1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Intel IFC VF NIC driver for virtio dataplane offloading 4 * 5 * Copyright (C) 2020 Intel Corporation. 6 * 7 * Author: Zhu Lingshan <lingshan.zhu@intel.com> 8 * 9 */ 10 11 #include "ifcvf_base.h" 12 13 struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw) 14 { 15 return container_of(hw, struct ifcvf_adapter, vf); 16 } 17 18 u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector) 19 { 20 struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; 21 22 vp_iowrite16(qid, &cfg->queue_select); 23 vp_iowrite16(vector, &cfg->queue_msix_vector); 24 25 return vp_ioread16(&cfg->queue_msix_vector); 26 } 27 28 u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector) 29 { 30 struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; 31 32 cfg = hw->common_cfg; 33 vp_iowrite16(vector, &cfg->msix_config); 34 35 return vp_ioread16(&cfg->msix_config); 36 } 37 38 static void __iomem *get_cap_addr(struct ifcvf_hw *hw, 39 struct virtio_pci_cap *cap) 40 { 41 struct ifcvf_adapter *ifcvf; 42 struct pci_dev *pdev; 43 u32 length, offset; 44 u8 bar; 45 46 length = le32_to_cpu(cap->length); 47 offset = le32_to_cpu(cap->offset); 48 bar = cap->bar; 49 50 ifcvf= vf_to_adapter(hw); 51 pdev = ifcvf->pdev; 52 53 if (bar >= IFCVF_PCI_MAX_RESOURCE) { 54 IFCVF_DBG(pdev, 55 "Invalid bar number %u to get capabilities\n", bar); 56 return NULL; 57 } 58 59 if (offset + length > pci_resource_len(pdev, bar)) { 60 IFCVF_DBG(pdev, 61 "offset(%u) + len(%u) overflows bar%u's capability\n", 62 offset, length, bar); 63 return NULL; 64 } 65 66 return hw->base[bar] + offset; 67 } 68 69 static int ifcvf_read_config_range(struct pci_dev *dev, 70 uint32_t *val, int size, int where) 71 { 72 int ret, i; 73 74 for (i = 0; i < size; i += 4) { 75 ret = pci_read_config_dword(dev, where + i, val + i / 4); 76 if (ret < 0) 77 return ret; 78 } 79 80 return 0; 81 } 82 83 int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev) 84 { 85 struct virtio_pci_cap cap; 86 u16 notify_off; 87 int ret; 88 u8 pos; 89 u32 i; 90 91 ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos); 92 if (ret < 0) { 93 IFCVF_ERR(pdev, "Failed to read PCI capability list\n"); 94 return -EIO; 95 } 96 97 while (pos) { 98 ret = ifcvf_read_config_range(pdev, (u32 *)&cap, 99 sizeof(cap), pos); 100 if (ret < 0) { 101 IFCVF_ERR(pdev, 102 "Failed to get PCI capability at %x\n", pos); 103 break; 104 } 105 106 if (cap.cap_vndr != PCI_CAP_ID_VNDR) 107 goto next; 108 109 switch (cap.cfg_type) { 110 case VIRTIO_PCI_CAP_COMMON_CFG: 111 hw->common_cfg = get_cap_addr(hw, &cap); 112 IFCVF_DBG(pdev, "hw->common_cfg = %p\n", 113 hw->common_cfg); 114 break; 115 case VIRTIO_PCI_CAP_NOTIFY_CFG: 116 pci_read_config_dword(pdev, pos + sizeof(cap), 117 &hw->notify_off_multiplier); 118 hw->notify_bar = cap.bar; 119 hw->notify_base = get_cap_addr(hw, &cap); 120 hw->notify_base_pa = pci_resource_start(pdev, cap.bar) + 121 le32_to_cpu(cap.offset); 122 IFCVF_DBG(pdev, "hw->notify_base = %p\n", 123 hw->notify_base); 124 break; 125 case VIRTIO_PCI_CAP_ISR_CFG: 126 hw->isr = get_cap_addr(hw, &cap); 127 IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr); 128 break; 129 case VIRTIO_PCI_CAP_DEVICE_CFG: 130 hw->dev_cfg = get_cap_addr(hw, &cap); 131 IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg); 132 break; 133 } 134 135 next: 136 pos = cap.cap_next; 137 } 138 139 if (hw->common_cfg == NULL || hw->notify_base == NULL || 140 hw->isr == NULL || hw->dev_cfg == NULL) { 141 IFCVF_ERR(pdev, "Incomplete PCI capabilities\n"); 142 return -EIO; 143 } 144 145 hw->nr_vring = vp_ioread16(&hw->common_cfg->num_queues); 146 147 for (i = 0; i < hw->nr_vring; i++) { 148 vp_iowrite16(i, &hw->common_cfg->queue_select); 149 notify_off = vp_ioread16(&hw->common_cfg->queue_notify_off); 150 hw->vring[i].notify_addr = hw->notify_base + 151 notify_off * hw->notify_off_multiplier; 152 hw->vring[i].notify_pa = hw->notify_base_pa + 153 notify_off * hw->notify_off_multiplier; 154 hw->vring[i].irq = -EINVAL; 155 } 156 157 hw->lm_cfg = hw->base[IFCVF_LM_BAR]; 158 159 IFCVF_DBG(pdev, 160 "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n", 161 hw->common_cfg, hw->notify_base, hw->isr, 162 hw->dev_cfg, hw->notify_off_multiplier); 163 164 hw->vqs_reused_irq = -EINVAL; 165 hw->config_irq = -EINVAL; 166 167 return 0; 168 } 169 170 u8 ifcvf_get_status(struct ifcvf_hw *hw) 171 { 172 return vp_ioread8(&hw->common_cfg->device_status); 173 } 174 175 void ifcvf_set_status(struct ifcvf_hw *hw, u8 status) 176 { 177 vp_iowrite8(status, &hw->common_cfg->device_status); 178 } 179 180 void ifcvf_reset(struct ifcvf_hw *hw) 181 { 182 hw->config_cb.callback = NULL; 183 hw->config_cb.private = NULL; 184 185 ifcvf_set_status(hw, 0); 186 /* flush set_status, make sure VF is stopped, reset */ 187 ifcvf_get_status(hw); 188 } 189 190 static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status) 191 { 192 if (status != 0) 193 status |= ifcvf_get_status(hw); 194 195 ifcvf_set_status(hw, status); 196 ifcvf_get_status(hw); 197 } 198 199 u64 ifcvf_get_hw_features(struct ifcvf_hw *hw) 200 { 201 struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; 202 u32 features_lo, features_hi; 203 u64 features; 204 205 vp_iowrite32(0, &cfg->device_feature_select); 206 features_lo = vp_ioread32(&cfg->device_feature); 207 208 vp_iowrite32(1, &cfg->device_feature_select); 209 features_hi = vp_ioread32(&cfg->device_feature); 210 211 features = ((u64)features_hi << 32) | features_lo; 212 213 return features; 214 } 215 216 u64 ifcvf_get_features(struct ifcvf_hw *hw) 217 { 218 return hw->hw_features; 219 } 220 221 int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features) 222 { 223 struct ifcvf_adapter *ifcvf = vf_to_adapter(hw); 224 225 if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) { 226 IFCVF_ERR(ifcvf->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n"); 227 return -EINVAL; 228 } 229 230 return 0; 231 } 232 233 u32 ifcvf_get_config_size(struct ifcvf_hw *hw) 234 { 235 struct ifcvf_adapter *adapter; 236 u32 config_size; 237 238 adapter = vf_to_adapter(hw); 239 switch (hw->dev_type) { 240 case VIRTIO_ID_NET: 241 config_size = sizeof(struct virtio_net_config); 242 break; 243 case VIRTIO_ID_BLOCK: 244 config_size = sizeof(struct virtio_blk_config); 245 break; 246 default: 247 config_size = 0; 248 IFCVF_ERR(adapter->pdev, "VIRTIO ID %u not supported\n", hw->dev_type); 249 } 250 251 return config_size; 252 } 253 254 void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset, 255 void *dst, int length) 256 { 257 u8 old_gen, new_gen, *p; 258 int i; 259 260 WARN_ON(offset + length > hw->config_size); 261 do { 262 old_gen = vp_ioread8(&hw->common_cfg->config_generation); 263 p = dst; 264 for (i = 0; i < length; i++) 265 *p++ = vp_ioread8(hw->dev_cfg + offset + i); 266 267 new_gen = vp_ioread8(&hw->common_cfg->config_generation); 268 } while (old_gen != new_gen); 269 } 270 271 void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset, 272 const void *src, int length) 273 { 274 const u8 *p; 275 int i; 276 277 p = src; 278 WARN_ON(offset + length > hw->config_size); 279 for (i = 0; i < length; i++) 280 vp_iowrite8(*p++, hw->dev_cfg + offset + i); 281 } 282 283 static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features) 284 { 285 struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; 286 287 vp_iowrite32(0, &cfg->guest_feature_select); 288 vp_iowrite32((u32)features, &cfg->guest_feature); 289 290 vp_iowrite32(1, &cfg->guest_feature_select); 291 vp_iowrite32(features >> 32, &cfg->guest_feature); 292 } 293 294 static int ifcvf_config_features(struct ifcvf_hw *hw) 295 { 296 struct ifcvf_adapter *ifcvf; 297 298 ifcvf = vf_to_adapter(hw); 299 ifcvf_set_features(hw, hw->req_features); 300 ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK); 301 302 if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) { 303 IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n"); 304 return -EIO; 305 } 306 307 return 0; 308 } 309 310 u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) 311 { 312 struct ifcvf_lm_cfg __iomem *ifcvf_lm; 313 void __iomem *avail_idx_addr; 314 u16 last_avail_idx; 315 u32 q_pair_id; 316 317 ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg; 318 q_pair_id = qid / hw->nr_vring; 319 avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2]; 320 last_avail_idx = vp_ioread16(avail_idx_addr); 321 322 return last_avail_idx; 323 } 324 325 int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num) 326 { 327 struct ifcvf_lm_cfg __iomem *ifcvf_lm; 328 void __iomem *avail_idx_addr; 329 u32 q_pair_id; 330 331 ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg; 332 q_pair_id = qid / hw->nr_vring; 333 avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2]; 334 hw->vring[qid].last_avail_idx = num; 335 vp_iowrite16(num, avail_idx_addr); 336 337 return 0; 338 } 339 340 static int ifcvf_hw_enable(struct ifcvf_hw *hw) 341 { 342 struct virtio_pci_common_cfg __iomem *cfg; 343 u32 i; 344 345 cfg = hw->common_cfg; 346 for (i = 0; i < hw->nr_vring; i++) { 347 if (!hw->vring[i].ready) 348 break; 349 350 vp_iowrite16(i, &cfg->queue_select); 351 vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo, 352 &cfg->queue_desc_hi); 353 vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo, 354 &cfg->queue_avail_hi); 355 vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo, 356 &cfg->queue_used_hi); 357 vp_iowrite16(hw->vring[i].size, &cfg->queue_size); 358 ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx); 359 vp_iowrite16(1, &cfg->queue_enable); 360 } 361 362 return 0; 363 } 364 365 static void ifcvf_hw_disable(struct ifcvf_hw *hw) 366 { 367 u32 i; 368 369 ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR); 370 for (i = 0; i < hw->nr_vring; i++) { 371 ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR); 372 } 373 } 374 375 int ifcvf_start_hw(struct ifcvf_hw *hw) 376 { 377 ifcvf_reset(hw); 378 ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE); 379 ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER); 380 381 if (ifcvf_config_features(hw) < 0) 382 return -EINVAL; 383 384 if (ifcvf_hw_enable(hw) < 0) 385 return -EINVAL; 386 387 ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK); 388 389 return 0; 390 } 391 392 void ifcvf_stop_hw(struct ifcvf_hw *hw) 393 { 394 ifcvf_hw_disable(hw); 395 ifcvf_reset(hw); 396 } 397 398 void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid) 399 { 400 vp_iowrite16(qid, hw->vring[qid].notify_addr); 401 } 402