1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */ 3 4 #include <linux/pci.h> 5 #include <linux/vdpa.h> 6 #include <uapi/linux/vdpa.h> 7 #include <linux/virtio_pci_modern.h> 8 9 #include <linux/pds/pds_common.h> 10 #include <linux/pds/pds_core_if.h> 11 #include <linux/pds/pds_adminq.h> 12 #include <linux/pds/pds_auxbus.h> 13 14 #include "vdpa_dev.h" 15 #include "aux_drv.h" 16 #include "cmds.h" 17 #include "debugfs.h" 18 19 static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev); 20 21 static struct pds_vdpa_device *vdpa_to_pdsv(struct vdpa_device *vdpa_dev) 22 { 23 return container_of(vdpa_dev, struct pds_vdpa_device, vdpa_dev); 24 } 25 26 static int pds_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, 27 u64 desc_addr, u64 driver_addr, u64 device_addr) 28 { 29 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 30 31 pdsv->vqs[qid].desc_addr = desc_addr; 32 pdsv->vqs[qid].avail_addr = driver_addr; 33 pdsv->vqs[qid].used_addr = device_addr; 34 35 return 0; 36 } 37 38 static void pds_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num) 39 { 40 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 41 42 pdsv->vqs[qid].q_len = num; 43 } 44 45 static void pds_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid) 46 { 47 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 48 49 iowrite16(qid, pdsv->vqs[qid].notify); 50 } 51 52 static void pds_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, 53 struct vdpa_callback *cb) 54 { 55 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 56 57 pdsv->vqs[qid].event_cb = *cb; 58 } 59 60 static irqreturn_t pds_vdpa_isr(int irq, void *data) 61 { 62 struct pds_vdpa_vq_info *vq; 63 64 vq = data; 65 if (vq->event_cb.callback) 66 vq->event_cb.callback(vq->event_cb.private); 67 68 return IRQ_HANDLED; 69 } 70 71 static void pds_vdpa_release_irq(struct pds_vdpa_device *pdsv, int qid) 72 { 73 if (pdsv->vqs[qid].irq == VIRTIO_MSI_NO_VECTOR) 74 return; 75 76 free_irq(pdsv->vqs[qid].irq, &pdsv->vqs[qid]); 77 pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR; 78 } 79 80 static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready) 81 { 82 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 83 struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev; 84 struct device *dev = &pdsv->vdpa_dev.dev; 85 u64 driver_features; 86 u16 invert_idx = 0; 87 int irq; 88 int err; 89 90 dev_dbg(dev, "%s: qid %d ready %d => %d\n", 91 __func__, qid, pdsv->vqs[qid].ready, ready); 92 if (ready == pdsv->vqs[qid].ready) 93 return; 94 95 driver_features = pds_vdpa_get_driver_features(vdpa_dev); 96 if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) 97 invert_idx = PDS_VDPA_PACKED_INVERT_IDX; 98 99 if (ready) { 100 irq = pci_irq_vector(pdev, qid); 101 snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name), 102 "vdpa-%s-%d", dev_name(dev), qid); 103 104 err = request_irq(irq, pds_vdpa_isr, 0, 105 pdsv->vqs[qid].irq_name, &pdsv->vqs[qid]); 106 if (err) { 107 dev_err(dev, "%s: no irq for qid %d: %pe\n", 108 __func__, qid, ERR_PTR(err)); 109 return; 110 } 111 pdsv->vqs[qid].irq = irq; 112 113 /* Pass vq setup info to DSC using adminq to gather up and 114 * send all info at once so FW can do its full set up in 115 * one easy operation 116 */ 117 err = pds_vdpa_cmd_init_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]); 118 if (err) { 119 dev_err(dev, "Failed to init vq %d: %pe\n", 120 qid, ERR_PTR(err)); 121 pds_vdpa_release_irq(pdsv, qid); 122 ready = false; 123 } 124 } else { 125 err = pds_vdpa_cmd_reset_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]); 126 if (err) 127 dev_err(dev, "%s: reset_vq failed qid %d: %pe\n", 128 __func__, qid, ERR_PTR(err)); 129 pds_vdpa_release_irq(pdsv, qid); 130 } 131 132 pdsv->vqs[qid].ready = ready; 133 } 134 135 static bool pds_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid) 136 { 137 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 138 139 return pdsv->vqs[qid].ready; 140 } 141 142 static int pds_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, 143 const struct vdpa_vq_state *state) 144 { 145 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 146 struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev; 147 struct device *dev = &padev->aux_dev.dev; 148 u64 driver_features; 149 u16 avail; 150 u16 used; 151 152 if (pdsv->vqs[qid].ready) { 153 dev_err(dev, "Setting device position is denied while vq is enabled\n"); 154 return -EINVAL; 155 } 156 157 driver_features = pds_vdpa_get_driver_features(vdpa_dev); 158 if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) { 159 avail = state->packed.last_avail_idx | 160 (state->packed.last_avail_counter << 15); 161 used = state->packed.last_used_idx | 162 (state->packed.last_used_counter << 15); 163 164 /* The avail and used index are stored with the packed wrap 165 * counter bit inverted. This way, in case set_vq_state is 166 * not called, the initial value can be set to zero prior to 167 * feature negotiation, and it is good for both packed and 168 * split vq. 169 */ 170 avail ^= PDS_VDPA_PACKED_INVERT_IDX; 171 used ^= PDS_VDPA_PACKED_INVERT_IDX; 172 } else { 173 avail = state->split.avail_index; 174 /* state->split does not provide a used_index: 175 * the vq will be set to "empty" here, and the vq will read 176 * the current used index the next time the vq is kicked. 177 */ 178 used = avail; 179 } 180 181 if (used != avail) { 182 dev_dbg(dev, "Setting used equal to avail, for interoperability\n"); 183 used = avail; 184 } 185 186 pdsv->vqs[qid].avail_idx = avail; 187 pdsv->vqs[qid].used_idx = used; 188 189 return 0; 190 } 191 192 static int pds_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, 193 struct vdpa_vq_state *state) 194 { 195 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 196 struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev; 197 struct device *dev = &padev->aux_dev.dev; 198 u64 driver_features; 199 u16 avail; 200 u16 used; 201 202 if (pdsv->vqs[qid].ready) { 203 dev_err(dev, "Getting device position is denied while vq is enabled\n"); 204 return -EINVAL; 205 } 206 207 avail = pdsv->vqs[qid].avail_idx; 208 used = pdsv->vqs[qid].used_idx; 209 210 driver_features = pds_vdpa_get_driver_features(vdpa_dev); 211 if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) { 212 avail ^= PDS_VDPA_PACKED_INVERT_IDX; 213 used ^= PDS_VDPA_PACKED_INVERT_IDX; 214 215 state->packed.last_avail_idx = avail & 0x7fff; 216 state->packed.last_avail_counter = avail >> 15; 217 state->packed.last_used_idx = used & 0x7fff; 218 state->packed.last_used_counter = used >> 15; 219 } else { 220 state->split.avail_index = avail; 221 /* state->split does not provide a used_index. */ 222 } 223 224 return 0; 225 } 226 227 static struct vdpa_notification_area 228 pds_vdpa_get_vq_notification(struct vdpa_device *vdpa_dev, u16 qid) 229 { 230 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 231 struct virtio_pci_modern_device *vd_mdev; 232 struct vdpa_notification_area area; 233 234 area.addr = pdsv->vqs[qid].notify_pa; 235 236 vd_mdev = &pdsv->vdpa_aux->vd_mdev; 237 if (!vd_mdev->notify_offset_multiplier) 238 area.size = PDS_PAGE_SIZE; 239 else 240 area.size = vd_mdev->notify_offset_multiplier; 241 242 return area; 243 } 244 245 static int pds_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, u16 qid) 246 { 247 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 248 249 return pdsv->vqs[qid].irq; 250 } 251 252 static u32 pds_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) 253 { 254 return PDS_PAGE_SIZE; 255 } 256 257 static u32 pds_vdpa_get_vq_group(struct vdpa_device *vdpa_dev, u16 idx) 258 { 259 return 0; 260 } 261 262 static u64 pds_vdpa_get_device_features(struct vdpa_device *vdpa_dev) 263 { 264 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 265 266 return pdsv->supported_features; 267 } 268 269 static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features) 270 { 271 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 272 struct device *dev = &pdsv->vdpa_dev.dev; 273 u64 driver_features; 274 u64 nego_features; 275 u64 missing; 276 277 if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) { 278 dev_err(dev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n"); 279 return -EOPNOTSUPP; 280 } 281 282 pdsv->req_features = features; 283 284 /* Check for valid feature bits */ 285 nego_features = features & le64_to_cpu(pdsv->vdpa_aux->ident.hw_features); 286 missing = pdsv->req_features & ~nego_features; 287 if (missing) { 288 dev_err(dev, "Can't support all requested features in %#llx, missing %#llx features\n", 289 pdsv->req_features, missing); 290 return -EOPNOTSUPP; 291 } 292 293 driver_features = pds_vdpa_get_driver_features(vdpa_dev); 294 dev_dbg(dev, "%s: %#llx => %#llx\n", 295 __func__, driver_features, nego_features); 296 297 if (driver_features == nego_features) 298 return 0; 299 300 vp_modern_set_features(&pdsv->vdpa_aux->vd_mdev, nego_features); 301 302 return 0; 303 } 304 305 static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev) 306 { 307 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 308 309 return vp_modern_get_driver_features(&pdsv->vdpa_aux->vd_mdev); 310 } 311 312 static void pds_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, 313 struct vdpa_callback *cb) 314 { 315 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 316 317 pdsv->config_cb.callback = cb->callback; 318 pdsv->config_cb.private = cb->private; 319 } 320 321 static u16 pds_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) 322 { 323 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 324 325 /* qemu has assert() that vq_num_max <= VIRTQUEUE_MAX_SIZE (1024) */ 326 return min_t(u16, 1024, BIT(le16_to_cpu(pdsv->vdpa_aux->ident.max_qlen))); 327 } 328 329 static u32 pds_vdpa_get_device_id(struct vdpa_device *vdpa_dev) 330 { 331 return VIRTIO_ID_NET; 332 } 333 334 static u32 pds_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev) 335 { 336 return PCI_VENDOR_ID_PENSANDO; 337 } 338 339 static u8 pds_vdpa_get_status(struct vdpa_device *vdpa_dev) 340 { 341 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 342 343 return vp_modern_get_status(&pdsv->vdpa_aux->vd_mdev); 344 } 345 346 static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) 347 { 348 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 349 struct device *dev = &pdsv->vdpa_dev.dev; 350 u8 old_status; 351 int i; 352 353 old_status = pds_vdpa_get_status(vdpa_dev); 354 dev_dbg(dev, "%s: old %#x new %#x\n", __func__, old_status, status); 355 356 pds_vdpa_cmd_set_status(pdsv, status); 357 358 /* Note: still working with FW on the need for this reset cmd */ 359 if (status == 0) { 360 pds_vdpa_cmd_reset(pdsv); 361 362 for (i = 0; i < pdsv->num_vqs; i++) { 363 pdsv->vqs[i].avail_idx = 0; 364 pdsv->vqs[i].used_idx = 0; 365 } 366 } 367 368 if (status & ~old_status & VIRTIO_CONFIG_S_FEATURES_OK) { 369 for (i = 0; i < pdsv->num_vqs; i++) { 370 pdsv->vqs[i].notify = 371 vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev, 372 i, &pdsv->vqs[i].notify_pa); 373 } 374 } 375 } 376 377 static int pds_vdpa_reset(struct vdpa_device *vdpa_dev) 378 { 379 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 380 struct device *dev; 381 int err = 0; 382 u8 status; 383 int i; 384 385 dev = &pdsv->vdpa_aux->padev->aux_dev.dev; 386 status = pds_vdpa_get_status(vdpa_dev); 387 388 if (status == 0) 389 return 0; 390 391 if (status & VIRTIO_CONFIG_S_DRIVER_OK) { 392 /* Reset the vqs */ 393 for (i = 0; i < pdsv->num_vqs && !err; i++) { 394 err = pds_vdpa_cmd_reset_vq(pdsv, i, 0, &pdsv->vqs[i]); 395 if (err) 396 dev_err(dev, "%s: reset_vq failed qid %d: %pe\n", 397 __func__, i, ERR_PTR(err)); 398 pds_vdpa_release_irq(pdsv, i); 399 memset(&pdsv->vqs[i], 0, sizeof(pdsv->vqs[0])); 400 pdsv->vqs[i].ready = false; 401 } 402 } 403 404 pds_vdpa_set_status(vdpa_dev, 0); 405 406 return 0; 407 } 408 409 static size_t pds_vdpa_get_config_size(struct vdpa_device *vdpa_dev) 410 { 411 return sizeof(struct virtio_net_config); 412 } 413 414 static void pds_vdpa_get_config(struct vdpa_device *vdpa_dev, 415 unsigned int offset, 416 void *buf, unsigned int len) 417 { 418 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 419 void __iomem *device; 420 421 if (offset + len > sizeof(struct virtio_net_config)) { 422 WARN(true, "%s: bad read, offset %d len %d\n", __func__, offset, len); 423 return; 424 } 425 426 device = pdsv->vdpa_aux->vd_mdev.device; 427 memcpy_fromio(buf, device + offset, len); 428 } 429 430 static void pds_vdpa_set_config(struct vdpa_device *vdpa_dev, 431 unsigned int offset, const void *buf, 432 unsigned int len) 433 { 434 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 435 void __iomem *device; 436 437 if (offset + len > sizeof(struct virtio_net_config)) { 438 WARN(true, "%s: bad read, offset %d len %d\n", __func__, offset, len); 439 return; 440 } 441 442 device = pdsv->vdpa_aux->vd_mdev.device; 443 memcpy_toio(device + offset, buf, len); 444 } 445 446 static const struct vdpa_config_ops pds_vdpa_ops = { 447 .set_vq_address = pds_vdpa_set_vq_address, 448 .set_vq_num = pds_vdpa_set_vq_num, 449 .kick_vq = pds_vdpa_kick_vq, 450 .set_vq_cb = pds_vdpa_set_vq_cb, 451 .set_vq_ready = pds_vdpa_set_vq_ready, 452 .get_vq_ready = pds_vdpa_get_vq_ready, 453 .set_vq_state = pds_vdpa_set_vq_state, 454 .get_vq_state = pds_vdpa_get_vq_state, 455 .get_vq_notification = pds_vdpa_get_vq_notification, 456 .get_vq_irq = pds_vdpa_get_vq_irq, 457 .get_vq_align = pds_vdpa_get_vq_align, 458 .get_vq_group = pds_vdpa_get_vq_group, 459 460 .get_device_features = pds_vdpa_get_device_features, 461 .set_driver_features = pds_vdpa_set_driver_features, 462 .get_driver_features = pds_vdpa_get_driver_features, 463 .set_config_cb = pds_vdpa_set_config_cb, 464 .get_vq_num_max = pds_vdpa_get_vq_num_max, 465 .get_device_id = pds_vdpa_get_device_id, 466 .get_vendor_id = pds_vdpa_get_vendor_id, 467 .get_status = pds_vdpa_get_status, 468 .set_status = pds_vdpa_set_status, 469 .reset = pds_vdpa_reset, 470 .get_config_size = pds_vdpa_get_config_size, 471 .get_config = pds_vdpa_get_config, 472 .set_config = pds_vdpa_set_config, 473 }; 474 static struct virtio_device_id pds_vdpa_id_table[] = { 475 {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID}, 476 {0}, 477 }; 478 479 static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, 480 const struct vdpa_dev_set_config *add_config) 481 { 482 struct pds_vdpa_aux *vdpa_aux; 483 struct pds_vdpa_device *pdsv; 484 struct vdpa_mgmt_dev *mgmt; 485 u16 fw_max_vqs, vq_pairs; 486 struct device *dma_dev; 487 struct pci_dev *pdev; 488 struct device *dev; 489 u8 mac[ETH_ALEN]; 490 int err; 491 int i; 492 493 vdpa_aux = container_of(mdev, struct pds_vdpa_aux, vdpa_mdev); 494 dev = &vdpa_aux->padev->aux_dev.dev; 495 mgmt = &vdpa_aux->vdpa_mdev; 496 497 if (vdpa_aux->pdsv) { 498 dev_warn(dev, "Multiple vDPA devices on a VF is not supported.\n"); 499 return -EOPNOTSUPP; 500 } 501 502 pdsv = vdpa_alloc_device(struct pds_vdpa_device, vdpa_dev, 503 dev, &pds_vdpa_ops, 1, 1, name, false); 504 if (IS_ERR(pdsv)) { 505 dev_err(dev, "Failed to allocate vDPA structure: %pe\n", pdsv); 506 return PTR_ERR(pdsv); 507 } 508 509 vdpa_aux->pdsv = pdsv; 510 pdsv->vdpa_aux = vdpa_aux; 511 512 pdev = vdpa_aux->padev->vf_pdev; 513 dma_dev = &pdev->dev; 514 pdsv->vdpa_dev.dma_dev = dma_dev; 515 516 pdsv->supported_features = mgmt->supported_features; 517 518 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { 519 u64 unsupp_features = 520 add_config->device_features & ~mgmt->supported_features; 521 522 if (unsupp_features) { 523 dev_err(dev, "Unsupported features: %#llx\n", unsupp_features); 524 err = -EOPNOTSUPP; 525 goto err_unmap; 526 } 527 528 pdsv->supported_features = add_config->device_features; 529 } 530 531 err = pds_vdpa_cmd_reset(pdsv); 532 if (err) { 533 dev_err(dev, "Failed to reset hw: %pe\n", ERR_PTR(err)); 534 goto err_unmap; 535 } 536 537 err = pds_vdpa_init_hw(pdsv); 538 if (err) { 539 dev_err(dev, "Failed to init hw: %pe\n", ERR_PTR(err)); 540 goto err_unmap; 541 } 542 543 fw_max_vqs = le16_to_cpu(pdsv->vdpa_aux->ident.max_vqs); 544 vq_pairs = fw_max_vqs / 2; 545 546 /* Make sure we have the queues being requested */ 547 if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) 548 vq_pairs = add_config->net.max_vq_pairs; 549 550 pdsv->num_vqs = 2 * vq_pairs; 551 if (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) 552 pdsv->num_vqs++; 553 554 if (pdsv->num_vqs > fw_max_vqs) { 555 dev_err(dev, "%s: queue count requested %u greater than max %u\n", 556 __func__, pdsv->num_vqs, fw_max_vqs); 557 err = -ENOSPC; 558 goto err_unmap; 559 } 560 561 if (pdsv->num_vqs != fw_max_vqs) { 562 err = pds_vdpa_cmd_set_max_vq_pairs(pdsv, vq_pairs); 563 if (err) { 564 dev_err(dev, "Failed to set max_vq_pairs: %pe\n", 565 ERR_PTR(err)); 566 goto err_unmap; 567 } 568 } 569 570 /* Set a mac, either from the user config if provided 571 * or set a random mac if default is 00:..:00 572 */ 573 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR)) { 574 ether_addr_copy(mac, add_config->net.mac); 575 pds_vdpa_cmd_set_mac(pdsv, mac); 576 } else { 577 struct virtio_net_config __iomem *vc; 578 579 vc = pdsv->vdpa_aux->vd_mdev.device; 580 memcpy_fromio(mac, vc->mac, sizeof(mac)); 581 if (is_zero_ether_addr(mac)) { 582 eth_random_addr(mac); 583 dev_info(dev, "setting random mac %pM\n", mac); 584 pds_vdpa_cmd_set_mac(pdsv, mac); 585 } 586 } 587 588 for (i = 0; i < pdsv->num_vqs; i++) { 589 pdsv->vqs[i].qid = i; 590 pdsv->vqs[i].pdsv = pdsv; 591 pdsv->vqs[i].irq = VIRTIO_MSI_NO_VECTOR; 592 pdsv->vqs[i].notify = vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev, 593 i, &pdsv->vqs[i].notify_pa); 594 } 595 596 pdsv->vdpa_dev.mdev = &vdpa_aux->vdpa_mdev; 597 598 /* We use the _vdpa_register_device() call rather than the 599 * vdpa_register_device() to avoid a deadlock because our 600 * dev_add() is called with the vdpa_dev_lock already set 601 * by vdpa_nl_cmd_dev_add_set_doit() 602 */ 603 err = _vdpa_register_device(&pdsv->vdpa_dev, pdsv->num_vqs); 604 if (err) { 605 dev_err(dev, "Failed to register to vDPA bus: %pe\n", ERR_PTR(err)); 606 goto err_unmap; 607 } 608 609 pds_vdpa_debugfs_add_vdpadev(vdpa_aux); 610 611 return 0; 612 613 err_unmap: 614 put_device(&pdsv->vdpa_dev.dev); 615 vdpa_aux->pdsv = NULL; 616 return err; 617 } 618 619 static void pds_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, 620 struct vdpa_device *vdpa_dev) 621 { 622 struct pds_vdpa_aux *vdpa_aux; 623 624 vdpa_aux = container_of(mdev, struct pds_vdpa_aux, vdpa_mdev); 625 _vdpa_unregister_device(vdpa_dev); 626 627 pds_vdpa_cmd_reset(vdpa_aux->pdsv); 628 pds_vdpa_debugfs_reset_vdpadev(vdpa_aux); 629 630 vdpa_aux->pdsv = NULL; 631 632 dev_info(&vdpa_aux->padev->aux_dev.dev, "Removed vdpa device\n"); 633 } 634 635 static const struct vdpa_mgmtdev_ops pds_vdpa_mgmt_dev_ops = { 636 .dev_add = pds_vdpa_dev_add, 637 .dev_del = pds_vdpa_dev_del 638 }; 639 640 int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux *vdpa_aux) 641 { 642 union pds_core_adminq_cmd cmd = { 643 .vdpa_ident.opcode = PDS_VDPA_CMD_IDENT, 644 .vdpa_ident.vf_id = cpu_to_le16(vdpa_aux->vf_id), 645 }; 646 union pds_core_adminq_comp comp = {}; 647 struct vdpa_mgmt_dev *mgmt; 648 struct pci_dev *pf_pdev; 649 struct device *pf_dev; 650 struct pci_dev *pdev; 651 dma_addr_t ident_pa; 652 struct device *dev; 653 u16 dev_intrs; 654 u16 max_vqs; 655 int err; 656 657 dev = &vdpa_aux->padev->aux_dev.dev; 658 pdev = vdpa_aux->padev->vf_pdev; 659 mgmt = &vdpa_aux->vdpa_mdev; 660 661 /* Get resource info through the PF's adminq. It is a block of info, 662 * so we need to map some memory for PF to make available to the 663 * firmware for writing the data. 664 */ 665 pf_pdev = pci_physfn(vdpa_aux->padev->vf_pdev); 666 pf_dev = &pf_pdev->dev; 667 ident_pa = dma_map_single(pf_dev, &vdpa_aux->ident, 668 sizeof(vdpa_aux->ident), DMA_FROM_DEVICE); 669 if (dma_mapping_error(pf_dev, ident_pa)) { 670 dev_err(dev, "Failed to map ident space\n"); 671 return -ENOMEM; 672 } 673 674 cmd.vdpa_ident.ident_pa = cpu_to_le64(ident_pa); 675 cmd.vdpa_ident.len = cpu_to_le32(sizeof(vdpa_aux->ident)); 676 err = pds_client_adminq_cmd(vdpa_aux->padev, &cmd, 677 sizeof(cmd.vdpa_ident), &comp, 0); 678 dma_unmap_single(pf_dev, ident_pa, 679 sizeof(vdpa_aux->ident), DMA_FROM_DEVICE); 680 if (err) { 681 dev_err(dev, "Failed to ident hw, status %d: %pe\n", 682 comp.status, ERR_PTR(err)); 683 return err; 684 } 685 686 max_vqs = le16_to_cpu(vdpa_aux->ident.max_vqs); 687 dev_intrs = pci_msix_vec_count(pdev); 688 dev_dbg(dev, "ident.max_vqs %d dev_intrs %d\n", max_vqs, dev_intrs); 689 690 max_vqs = min_t(u16, dev_intrs, max_vqs); 691 mgmt->max_supported_vqs = min_t(u16, PDS_VDPA_MAX_QUEUES, max_vqs); 692 vdpa_aux->nintrs = mgmt->max_supported_vqs; 693 694 mgmt->ops = &pds_vdpa_mgmt_dev_ops; 695 mgmt->id_table = pds_vdpa_id_table; 696 mgmt->device = dev; 697 mgmt->supported_features = le64_to_cpu(vdpa_aux->ident.hw_features); 698 mgmt->config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR); 699 mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP); 700 mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES); 701 702 err = pci_alloc_irq_vectors(pdev, vdpa_aux->nintrs, vdpa_aux->nintrs, 703 PCI_IRQ_MSIX); 704 if (err < 0) { 705 dev_err(dev, "Couldn't get %d msix vectors: %pe\n", 706 vdpa_aux->nintrs, ERR_PTR(err)); 707 return err; 708 } 709 vdpa_aux->nintrs = err; 710 711 return 0; 712 } 713