1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */ 3 4 #include <linux/pci.h> 5 #include <linux/vdpa.h> 6 #include <uapi/linux/vdpa.h> 7 #include <linux/virtio_pci_modern.h> 8 9 #include <linux/pds/pds_common.h> 10 #include <linux/pds/pds_core_if.h> 11 #include <linux/pds/pds_adminq.h> 12 #include <linux/pds/pds_auxbus.h> 13 14 #include "vdpa_dev.h" 15 #include "aux_drv.h" 16 #include "cmds.h" 17 #include "debugfs.h" 18 19 static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev); 20 21 static struct pds_vdpa_device *vdpa_to_pdsv(struct vdpa_device *vdpa_dev) 22 { 23 return container_of(vdpa_dev, struct pds_vdpa_device, vdpa_dev); 24 } 25 26 static int pds_vdpa_notify_handler(struct notifier_block *nb, 27 unsigned long ecode, 28 void *data) 29 { 30 struct pds_vdpa_device *pdsv = container_of(nb, struct pds_vdpa_device, nb); 31 struct device *dev = &pdsv->vdpa_aux->padev->aux_dev.dev; 32 33 dev_dbg(dev, "%s: event code %lu\n", __func__, ecode); 34 35 if (ecode == PDS_EVENT_RESET || ecode == PDS_EVENT_LINK_CHANGE) { 36 if (pdsv->config_cb.callback) 37 pdsv->config_cb.callback(pdsv->config_cb.private); 38 } 39 40 return 0; 41 } 42 43 static int pds_vdpa_register_event_handler(struct pds_vdpa_device *pdsv) 44 { 45 struct device *dev = &pdsv->vdpa_aux->padev->aux_dev.dev; 46 struct notifier_block *nb = &pdsv->nb; 47 int err; 48 49 if (!nb->notifier_call) { 50 nb->notifier_call = pds_vdpa_notify_handler; 51 err = pdsc_register_notify(nb); 52 if (err) { 53 nb->notifier_call = NULL; 54 dev_err(dev, "failed to register pds event handler: %ps\n", 55 ERR_PTR(err)); 56 return -EINVAL; 57 } 58 dev_dbg(dev, "pds event handler registered\n"); 59 } 60 61 return 0; 62 } 63 64 static void pds_vdpa_unregister_event_handler(struct pds_vdpa_device *pdsv) 65 { 66 if (pdsv->nb.notifier_call) { 67 pdsc_unregister_notify(&pdsv->nb); 68 pdsv->nb.notifier_call = NULL; 69 } 70 } 71 72 static int pds_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, 73 u64 desc_addr, u64 driver_addr, u64 device_addr) 74 { 75 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 76 77 pdsv->vqs[qid].desc_addr = desc_addr; 78 pdsv->vqs[qid].avail_addr = driver_addr; 79 pdsv->vqs[qid].used_addr = device_addr; 80 81 return 0; 82 } 83 84 static void pds_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num) 85 { 86 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 87 88 pdsv->vqs[qid].q_len = num; 89 } 90 91 static void pds_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid) 92 { 93 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 94 95 iowrite16(qid, pdsv->vqs[qid].notify); 96 } 97 98 static void pds_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, 99 struct vdpa_callback *cb) 100 { 101 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 102 103 pdsv->vqs[qid].event_cb = *cb; 104 } 105 106 static irqreturn_t pds_vdpa_isr(int irq, void *data) 107 { 108 struct pds_vdpa_vq_info *vq; 109 110 vq = data; 111 if (vq->event_cb.callback) 112 vq->event_cb.callback(vq->event_cb.private); 113 114 return IRQ_HANDLED; 115 } 116 117 static void pds_vdpa_release_irq(struct pds_vdpa_device *pdsv, int qid) 118 { 119 if (pdsv->vqs[qid].irq == VIRTIO_MSI_NO_VECTOR) 120 return; 121 122 free_irq(pdsv->vqs[qid].irq, &pdsv->vqs[qid]); 123 pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR; 124 } 125 126 static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready) 127 { 128 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 129 struct device *dev = &pdsv->vdpa_dev.dev; 130 u64 driver_features; 131 u16 invert_idx = 0; 132 int err; 133 134 dev_dbg(dev, "%s: qid %d ready %d => %d\n", 135 __func__, qid, pdsv->vqs[qid].ready, ready); 136 if (ready == pdsv->vqs[qid].ready) 137 return; 138 139 driver_features = pds_vdpa_get_driver_features(vdpa_dev); 140 if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) 141 invert_idx = PDS_VDPA_PACKED_INVERT_IDX; 142 143 if (ready) { 144 /* Pass vq setup info to DSC using adminq to gather up and 145 * send all info at once so FW can do its full set up in 146 * one easy operation 147 */ 148 err = pds_vdpa_cmd_init_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]); 149 if (err) { 150 dev_err(dev, "Failed to init vq %d: %pe\n", 151 qid, ERR_PTR(err)); 152 ready = false; 153 } 154 } else { 155 err = pds_vdpa_cmd_reset_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]); 156 if (err) 157 dev_err(dev, "%s: reset_vq failed qid %d: %pe\n", 158 __func__, qid, ERR_PTR(err)); 159 } 160 161 pdsv->vqs[qid].ready = ready; 162 } 163 164 static bool pds_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid) 165 { 166 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 167 168 return pdsv->vqs[qid].ready; 169 } 170 171 static int pds_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, 172 const struct vdpa_vq_state *state) 173 { 174 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 175 struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev; 176 struct device *dev = &padev->aux_dev.dev; 177 u64 driver_features; 178 u16 avail; 179 u16 used; 180 181 if (pdsv->vqs[qid].ready) { 182 dev_err(dev, "Setting device position is denied while vq is enabled\n"); 183 return -EINVAL; 184 } 185 186 driver_features = pds_vdpa_get_driver_features(vdpa_dev); 187 if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) { 188 avail = state->packed.last_avail_idx | 189 (state->packed.last_avail_counter << 15); 190 used = state->packed.last_used_idx | 191 (state->packed.last_used_counter << 15); 192 193 /* The avail and used index are stored with the packed wrap 194 * counter bit inverted. This way, in case set_vq_state is 195 * not called, the initial value can be set to zero prior to 196 * feature negotiation, and it is good for both packed and 197 * split vq. 198 */ 199 avail ^= PDS_VDPA_PACKED_INVERT_IDX; 200 used ^= PDS_VDPA_PACKED_INVERT_IDX; 201 } else { 202 avail = state->split.avail_index; 203 /* state->split does not provide a used_index: 204 * the vq will be set to "empty" here, and the vq will read 205 * the current used index the next time the vq is kicked. 206 */ 207 used = avail; 208 } 209 210 if (used != avail) { 211 dev_dbg(dev, "Setting used equal to avail, for interoperability\n"); 212 used = avail; 213 } 214 215 pdsv->vqs[qid].avail_idx = avail; 216 pdsv->vqs[qid].used_idx = used; 217 218 return 0; 219 } 220 221 static int pds_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, 222 struct vdpa_vq_state *state) 223 { 224 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 225 struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev; 226 struct device *dev = &padev->aux_dev.dev; 227 u64 driver_features; 228 u16 avail; 229 u16 used; 230 231 if (pdsv->vqs[qid].ready) { 232 dev_err(dev, "Getting device position is denied while vq is enabled\n"); 233 return -EINVAL; 234 } 235 236 avail = pdsv->vqs[qid].avail_idx; 237 used = pdsv->vqs[qid].used_idx; 238 239 driver_features = pds_vdpa_get_driver_features(vdpa_dev); 240 if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) { 241 avail ^= PDS_VDPA_PACKED_INVERT_IDX; 242 used ^= PDS_VDPA_PACKED_INVERT_IDX; 243 244 state->packed.last_avail_idx = avail & 0x7fff; 245 state->packed.last_avail_counter = avail >> 15; 246 state->packed.last_used_idx = used & 0x7fff; 247 state->packed.last_used_counter = used >> 15; 248 } else { 249 state->split.avail_index = avail; 250 /* state->split does not provide a used_index. */ 251 } 252 253 return 0; 254 } 255 256 static struct vdpa_notification_area 257 pds_vdpa_get_vq_notification(struct vdpa_device *vdpa_dev, u16 qid) 258 { 259 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 260 struct virtio_pci_modern_device *vd_mdev; 261 struct vdpa_notification_area area; 262 263 area.addr = pdsv->vqs[qid].notify_pa; 264 265 vd_mdev = &pdsv->vdpa_aux->vd_mdev; 266 if (!vd_mdev->notify_offset_multiplier) 267 area.size = PDS_PAGE_SIZE; 268 else 269 area.size = vd_mdev->notify_offset_multiplier; 270 271 return area; 272 } 273 274 static int pds_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, u16 qid) 275 { 276 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 277 278 return pdsv->vqs[qid].irq; 279 } 280 281 static u32 pds_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) 282 { 283 return PDS_PAGE_SIZE; 284 } 285 286 static u32 pds_vdpa_get_vq_group(struct vdpa_device *vdpa_dev, u16 idx) 287 { 288 return 0; 289 } 290 291 static u64 pds_vdpa_get_device_features(struct vdpa_device *vdpa_dev) 292 { 293 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 294 295 return pdsv->supported_features; 296 } 297 298 static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features) 299 { 300 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 301 struct device *dev = &pdsv->vdpa_dev.dev; 302 u64 driver_features; 303 u64 nego_features; 304 u64 hw_features; 305 u64 missing; 306 307 if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) { 308 dev_err(dev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n"); 309 return -EOPNOTSUPP; 310 } 311 312 /* Check for valid feature bits */ 313 nego_features = features & pdsv->supported_features; 314 missing = features & ~nego_features; 315 if (missing) { 316 dev_err(dev, "Can't support all requested features in %#llx, missing %#llx features\n", 317 features, missing); 318 return -EOPNOTSUPP; 319 } 320 321 pdsv->negotiated_features = nego_features; 322 323 driver_features = pds_vdpa_get_driver_features(vdpa_dev); 324 dev_dbg(dev, "%s: %#llx => %#llx\n", 325 __func__, driver_features, nego_features); 326 327 /* if we're faking the F_MAC, strip it before writing to device */ 328 hw_features = le64_to_cpu(pdsv->vdpa_aux->ident.hw_features); 329 if (!(hw_features & BIT_ULL(VIRTIO_NET_F_MAC))) 330 nego_features &= ~BIT_ULL(VIRTIO_NET_F_MAC); 331 332 if (driver_features == nego_features) 333 return 0; 334 335 vp_modern_set_features(&pdsv->vdpa_aux->vd_mdev, nego_features); 336 337 return 0; 338 } 339 340 static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev) 341 { 342 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 343 344 return pdsv->negotiated_features; 345 } 346 347 static void pds_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, 348 struct vdpa_callback *cb) 349 { 350 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 351 352 pdsv->config_cb.callback = cb->callback; 353 pdsv->config_cb.private = cb->private; 354 } 355 356 static u16 pds_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) 357 { 358 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 359 360 /* qemu has assert() that vq_num_max <= VIRTQUEUE_MAX_SIZE (1024) */ 361 return min_t(u16, 1024, BIT(le16_to_cpu(pdsv->vdpa_aux->ident.max_qlen))); 362 } 363 364 static u32 pds_vdpa_get_device_id(struct vdpa_device *vdpa_dev) 365 { 366 return VIRTIO_ID_NET; 367 } 368 369 static u32 pds_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev) 370 { 371 return PCI_VENDOR_ID_PENSANDO; 372 } 373 374 static u8 pds_vdpa_get_status(struct vdpa_device *vdpa_dev) 375 { 376 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 377 378 return vp_modern_get_status(&pdsv->vdpa_aux->vd_mdev); 379 } 380 381 static int pds_vdpa_request_irqs(struct pds_vdpa_device *pdsv) 382 { 383 struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev; 384 struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux; 385 struct device *dev = &pdsv->vdpa_dev.dev; 386 int max_vq, nintrs, qid, err; 387 388 max_vq = vdpa_aux->vdpa_mdev.max_supported_vqs; 389 390 nintrs = pci_alloc_irq_vectors(pdev, max_vq, max_vq, PCI_IRQ_MSIX); 391 if (nintrs < 0) { 392 dev_err(dev, "Couldn't get %d msix vectors: %pe\n", 393 max_vq, ERR_PTR(nintrs)); 394 return nintrs; 395 } 396 397 for (qid = 0; qid < pdsv->num_vqs; ++qid) { 398 int irq = pci_irq_vector(pdev, qid); 399 400 snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name), 401 "vdpa-%s-%d", dev_name(dev), qid); 402 403 err = request_irq(irq, pds_vdpa_isr, 0, 404 pdsv->vqs[qid].irq_name, 405 &pdsv->vqs[qid]); 406 if (err) { 407 dev_err(dev, "%s: no irq for qid %d: %pe\n", 408 __func__, qid, ERR_PTR(err)); 409 goto err_release; 410 } 411 412 pdsv->vqs[qid].irq = irq; 413 } 414 415 vdpa_aux->nintrs = nintrs; 416 417 return 0; 418 419 err_release: 420 while (qid--) 421 pds_vdpa_release_irq(pdsv, qid); 422 423 pci_free_irq_vectors(pdev); 424 425 vdpa_aux->nintrs = 0; 426 427 return err; 428 } 429 430 static void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv) 431 { 432 struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev; 433 struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux; 434 int qid; 435 436 if (!vdpa_aux->nintrs) 437 return; 438 439 for (qid = 0; qid < pdsv->num_vqs; qid++) 440 pds_vdpa_release_irq(pdsv, qid); 441 442 pci_free_irq_vectors(pdev); 443 444 vdpa_aux->nintrs = 0; 445 } 446 447 static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) 448 { 449 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 450 struct device *dev = &pdsv->vdpa_dev.dev; 451 u8 old_status; 452 int i; 453 454 old_status = pds_vdpa_get_status(vdpa_dev); 455 dev_dbg(dev, "%s: old %#x new %#x\n", __func__, old_status, status); 456 457 if (status & ~old_status & VIRTIO_CONFIG_S_DRIVER_OK) { 458 if (pds_vdpa_request_irqs(pdsv)) 459 status = old_status | VIRTIO_CONFIG_S_FAILED; 460 } 461 462 pds_vdpa_cmd_set_status(pdsv, status); 463 464 if (status == 0) { 465 struct vdpa_callback null_cb = { }; 466 467 pds_vdpa_set_config_cb(vdpa_dev, &null_cb); 468 pds_vdpa_cmd_reset(pdsv); 469 470 for (i = 0; i < pdsv->num_vqs; i++) { 471 pdsv->vqs[i].avail_idx = 0; 472 pdsv->vqs[i].used_idx = 0; 473 } 474 475 pds_vdpa_cmd_set_mac(pdsv, pdsv->mac); 476 } 477 478 if (status & ~old_status & VIRTIO_CONFIG_S_FEATURES_OK) { 479 for (i = 0; i < pdsv->num_vqs; i++) { 480 pdsv->vqs[i].notify = 481 vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev, 482 i, &pdsv->vqs[i].notify_pa); 483 } 484 } 485 486 if (old_status & ~status & VIRTIO_CONFIG_S_DRIVER_OK) 487 pds_vdpa_release_irqs(pdsv); 488 } 489 490 static void pds_vdpa_init_vqs_entry(struct pds_vdpa_device *pdsv, int qid, 491 void __iomem *notify) 492 { 493 memset(&pdsv->vqs[qid], 0, sizeof(pdsv->vqs[0])); 494 pdsv->vqs[qid].qid = qid; 495 pdsv->vqs[qid].pdsv = pdsv; 496 pdsv->vqs[qid].ready = false; 497 pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR; 498 pdsv->vqs[qid].notify = notify; 499 } 500 501 static int pds_vdpa_reset(struct vdpa_device *vdpa_dev) 502 { 503 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 504 struct device *dev; 505 int err = 0; 506 u8 status; 507 int i; 508 509 dev = &pdsv->vdpa_aux->padev->aux_dev.dev; 510 status = pds_vdpa_get_status(vdpa_dev); 511 512 if (status == 0) 513 return 0; 514 515 if (status & VIRTIO_CONFIG_S_DRIVER_OK) { 516 /* Reset the vqs */ 517 for (i = 0; i < pdsv->num_vqs && !err; i++) { 518 err = pds_vdpa_cmd_reset_vq(pdsv, i, 0, &pdsv->vqs[i]); 519 if (err) 520 dev_err(dev, "%s: reset_vq failed qid %d: %pe\n", 521 __func__, i, ERR_PTR(err)); 522 } 523 } 524 525 pds_vdpa_set_status(vdpa_dev, 0); 526 527 if (status & VIRTIO_CONFIG_S_DRIVER_OK) { 528 /* Reset the vq info */ 529 for (i = 0; i < pdsv->num_vqs && !err; i++) 530 pds_vdpa_init_vqs_entry(pdsv, i, pdsv->vqs[i].notify); 531 } 532 533 return 0; 534 } 535 536 static size_t pds_vdpa_get_config_size(struct vdpa_device *vdpa_dev) 537 { 538 return sizeof(struct virtio_net_config); 539 } 540 541 static void pds_vdpa_get_config(struct vdpa_device *vdpa_dev, 542 unsigned int offset, 543 void *buf, unsigned int len) 544 { 545 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 546 void __iomem *device; 547 548 if (offset + len > sizeof(struct virtio_net_config)) { 549 WARN(true, "%s: bad read, offset %d len %d\n", __func__, offset, len); 550 return; 551 } 552 553 device = pdsv->vdpa_aux->vd_mdev.device; 554 memcpy_fromio(buf, device + offset, len); 555 } 556 557 static void pds_vdpa_set_config(struct vdpa_device *vdpa_dev, 558 unsigned int offset, const void *buf, 559 unsigned int len) 560 { 561 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 562 void __iomem *device; 563 564 if (offset + len > sizeof(struct virtio_net_config)) { 565 WARN(true, "%s: bad read, offset %d len %d\n", __func__, offset, len); 566 return; 567 } 568 569 device = pdsv->vdpa_aux->vd_mdev.device; 570 memcpy_toio(device + offset, buf, len); 571 } 572 573 static const struct vdpa_config_ops pds_vdpa_ops = { 574 .set_vq_address = pds_vdpa_set_vq_address, 575 .set_vq_num = pds_vdpa_set_vq_num, 576 .kick_vq = pds_vdpa_kick_vq, 577 .set_vq_cb = pds_vdpa_set_vq_cb, 578 .set_vq_ready = pds_vdpa_set_vq_ready, 579 .get_vq_ready = pds_vdpa_get_vq_ready, 580 .set_vq_state = pds_vdpa_set_vq_state, 581 .get_vq_state = pds_vdpa_get_vq_state, 582 .get_vq_notification = pds_vdpa_get_vq_notification, 583 .get_vq_irq = pds_vdpa_get_vq_irq, 584 .get_vq_align = pds_vdpa_get_vq_align, 585 .get_vq_group = pds_vdpa_get_vq_group, 586 587 .get_device_features = pds_vdpa_get_device_features, 588 .set_driver_features = pds_vdpa_set_driver_features, 589 .get_driver_features = pds_vdpa_get_driver_features, 590 .set_config_cb = pds_vdpa_set_config_cb, 591 .get_vq_num_max = pds_vdpa_get_vq_num_max, 592 .get_device_id = pds_vdpa_get_device_id, 593 .get_vendor_id = pds_vdpa_get_vendor_id, 594 .get_status = pds_vdpa_get_status, 595 .set_status = pds_vdpa_set_status, 596 .reset = pds_vdpa_reset, 597 .get_config_size = pds_vdpa_get_config_size, 598 .get_config = pds_vdpa_get_config, 599 .set_config = pds_vdpa_set_config, 600 }; 601 static struct virtio_device_id pds_vdpa_id_table[] = { 602 {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID}, 603 {0}, 604 }; 605 606 static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, 607 const struct vdpa_dev_set_config *add_config) 608 { 609 struct pds_vdpa_aux *vdpa_aux; 610 struct pds_vdpa_device *pdsv; 611 struct vdpa_mgmt_dev *mgmt; 612 u16 fw_max_vqs, vq_pairs; 613 struct device *dma_dev; 614 struct pci_dev *pdev; 615 struct device *dev; 616 int err; 617 int i; 618 619 vdpa_aux = container_of(mdev, struct pds_vdpa_aux, vdpa_mdev); 620 dev = &vdpa_aux->padev->aux_dev.dev; 621 mgmt = &vdpa_aux->vdpa_mdev; 622 623 if (vdpa_aux->pdsv) { 624 dev_warn(dev, "Multiple vDPA devices on a VF is not supported.\n"); 625 return -EOPNOTSUPP; 626 } 627 628 pdsv = vdpa_alloc_device(struct pds_vdpa_device, vdpa_dev, 629 dev, &pds_vdpa_ops, 1, 1, name, false); 630 if (IS_ERR(pdsv)) { 631 dev_err(dev, "Failed to allocate vDPA structure: %pe\n", pdsv); 632 return PTR_ERR(pdsv); 633 } 634 635 vdpa_aux->pdsv = pdsv; 636 pdsv->vdpa_aux = vdpa_aux; 637 638 pdev = vdpa_aux->padev->vf_pdev; 639 dma_dev = &pdev->dev; 640 pdsv->vdpa_dev.dma_dev = dma_dev; 641 642 pdsv->supported_features = mgmt->supported_features; 643 644 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { 645 u64 unsupp_features = 646 add_config->device_features & ~pdsv->supported_features; 647 648 if (unsupp_features) { 649 dev_err(dev, "Unsupported features: %#llx\n", unsupp_features); 650 err = -EOPNOTSUPP; 651 goto err_unmap; 652 } 653 654 pdsv->supported_features = add_config->device_features; 655 } 656 657 err = pds_vdpa_cmd_reset(pdsv); 658 if (err) { 659 dev_err(dev, "Failed to reset hw: %pe\n", ERR_PTR(err)); 660 goto err_unmap; 661 } 662 663 err = pds_vdpa_init_hw(pdsv); 664 if (err) { 665 dev_err(dev, "Failed to init hw: %pe\n", ERR_PTR(err)); 666 goto err_unmap; 667 } 668 669 fw_max_vqs = le16_to_cpu(pdsv->vdpa_aux->ident.max_vqs); 670 vq_pairs = fw_max_vqs / 2; 671 672 /* Make sure we have the queues being requested */ 673 if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) 674 vq_pairs = add_config->net.max_vq_pairs; 675 676 pdsv->num_vqs = 2 * vq_pairs; 677 if (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) 678 pdsv->num_vqs++; 679 680 if (pdsv->num_vqs > fw_max_vqs) { 681 dev_err(dev, "%s: queue count requested %u greater than max %u\n", 682 __func__, pdsv->num_vqs, fw_max_vqs); 683 err = -ENOSPC; 684 goto err_unmap; 685 } 686 687 if (pdsv->num_vqs != fw_max_vqs) { 688 err = pds_vdpa_cmd_set_max_vq_pairs(pdsv, vq_pairs); 689 if (err) { 690 dev_err(dev, "Failed to set max_vq_pairs: %pe\n", 691 ERR_PTR(err)); 692 goto err_unmap; 693 } 694 } 695 696 /* Set a mac, either from the user config if provided 697 * or use the device's mac if not 00:..:00 698 * or set a random mac 699 */ 700 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR)) { 701 ether_addr_copy(pdsv->mac, add_config->net.mac); 702 } else { 703 struct virtio_net_config __iomem *vc; 704 705 vc = pdsv->vdpa_aux->vd_mdev.device; 706 memcpy_fromio(pdsv->mac, vc->mac, sizeof(pdsv->mac)); 707 if (is_zero_ether_addr(pdsv->mac) && 708 (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_MAC))) { 709 eth_random_addr(pdsv->mac); 710 dev_info(dev, "setting random mac %pM\n", pdsv->mac); 711 } 712 } 713 pds_vdpa_cmd_set_mac(pdsv, pdsv->mac); 714 715 for (i = 0; i < pdsv->num_vqs; i++) { 716 void __iomem *notify; 717 718 notify = vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev, 719 i, &pdsv->vqs[i].notify_pa); 720 pds_vdpa_init_vqs_entry(pdsv, i, notify); 721 } 722 723 pdsv->vdpa_dev.mdev = &vdpa_aux->vdpa_mdev; 724 725 err = pds_vdpa_register_event_handler(pdsv); 726 if (err) { 727 dev_err(dev, "Failed to register for PDS events: %pe\n", ERR_PTR(err)); 728 goto err_unmap; 729 } 730 731 /* We use the _vdpa_register_device() call rather than the 732 * vdpa_register_device() to avoid a deadlock because our 733 * dev_add() is called with the vdpa_dev_lock already set 734 * by vdpa_nl_cmd_dev_add_set_doit() 735 */ 736 err = _vdpa_register_device(&pdsv->vdpa_dev, pdsv->num_vqs); 737 if (err) { 738 dev_err(dev, "Failed to register to vDPA bus: %pe\n", ERR_PTR(err)); 739 goto err_unevent; 740 } 741 742 pds_vdpa_debugfs_add_vdpadev(vdpa_aux); 743 744 return 0; 745 746 err_unevent: 747 pds_vdpa_unregister_event_handler(pdsv); 748 err_unmap: 749 put_device(&pdsv->vdpa_dev.dev); 750 vdpa_aux->pdsv = NULL; 751 return err; 752 } 753 754 static void pds_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, 755 struct vdpa_device *vdpa_dev) 756 { 757 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); 758 struct pds_vdpa_aux *vdpa_aux; 759 760 pds_vdpa_unregister_event_handler(pdsv); 761 762 vdpa_aux = container_of(mdev, struct pds_vdpa_aux, vdpa_mdev); 763 _vdpa_unregister_device(vdpa_dev); 764 765 pds_vdpa_cmd_reset(vdpa_aux->pdsv); 766 pds_vdpa_debugfs_reset_vdpadev(vdpa_aux); 767 768 vdpa_aux->pdsv = NULL; 769 770 dev_info(&vdpa_aux->padev->aux_dev.dev, "Removed vdpa device\n"); 771 } 772 773 static const struct vdpa_mgmtdev_ops pds_vdpa_mgmt_dev_ops = { 774 .dev_add = pds_vdpa_dev_add, 775 .dev_del = pds_vdpa_dev_del 776 }; 777 778 int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux *vdpa_aux) 779 { 780 union pds_core_adminq_cmd cmd = { 781 .vdpa_ident.opcode = PDS_VDPA_CMD_IDENT, 782 .vdpa_ident.vf_id = cpu_to_le16(vdpa_aux->vf_id), 783 }; 784 union pds_core_adminq_comp comp = {}; 785 struct vdpa_mgmt_dev *mgmt; 786 struct pci_dev *pf_pdev; 787 struct device *pf_dev; 788 struct pci_dev *pdev; 789 dma_addr_t ident_pa; 790 struct device *dev; 791 u16 dev_intrs; 792 u16 max_vqs; 793 int err; 794 795 dev = &vdpa_aux->padev->aux_dev.dev; 796 pdev = vdpa_aux->padev->vf_pdev; 797 mgmt = &vdpa_aux->vdpa_mdev; 798 799 /* Get resource info through the PF's adminq. It is a block of info, 800 * so we need to map some memory for PF to make available to the 801 * firmware for writing the data. 802 */ 803 pf_pdev = pci_physfn(vdpa_aux->padev->vf_pdev); 804 pf_dev = &pf_pdev->dev; 805 ident_pa = dma_map_single(pf_dev, &vdpa_aux->ident, 806 sizeof(vdpa_aux->ident), DMA_FROM_DEVICE); 807 if (dma_mapping_error(pf_dev, ident_pa)) { 808 dev_err(dev, "Failed to map ident space\n"); 809 return -ENOMEM; 810 } 811 812 cmd.vdpa_ident.ident_pa = cpu_to_le64(ident_pa); 813 cmd.vdpa_ident.len = cpu_to_le32(sizeof(vdpa_aux->ident)); 814 err = pds_client_adminq_cmd(vdpa_aux->padev, &cmd, 815 sizeof(cmd.vdpa_ident), &comp, 0); 816 dma_unmap_single(pf_dev, ident_pa, 817 sizeof(vdpa_aux->ident), DMA_FROM_DEVICE); 818 if (err) { 819 dev_err(dev, "Failed to ident hw, status %d: %pe\n", 820 comp.status, ERR_PTR(err)); 821 return err; 822 } 823 824 max_vqs = le16_to_cpu(vdpa_aux->ident.max_vqs); 825 dev_intrs = pci_msix_vec_count(pdev); 826 dev_dbg(dev, "ident.max_vqs %d dev_intrs %d\n", max_vqs, dev_intrs); 827 828 max_vqs = min_t(u16, dev_intrs, max_vqs); 829 mgmt->max_supported_vqs = min_t(u16, PDS_VDPA_MAX_QUEUES, max_vqs); 830 vdpa_aux->nintrs = 0; 831 832 mgmt->ops = &pds_vdpa_mgmt_dev_ops; 833 mgmt->id_table = pds_vdpa_id_table; 834 mgmt->device = dev; 835 mgmt->supported_features = le64_to_cpu(vdpa_aux->ident.hw_features); 836 837 /* advertise F_MAC even if the device doesn't */ 838 mgmt->supported_features |= BIT_ULL(VIRTIO_NET_F_MAC); 839 840 mgmt->config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR); 841 mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP); 842 mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES); 843 844 return 0; 845 } 846