1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Intel IFC VF NIC driver for virtio dataplane offloading 4 * 5 * Copyright (C) 2020 Intel Corporation. 6 * 7 * Author: Zhu Lingshan <lingshan.zhu@intel.com> 8 * 9 */ 10 11 #include <linux/interrupt.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/sysfs.h> 15 #include "ifcvf_base.h" 16 17 #define DRIVER_AUTHOR "Intel Corporation" 18 #define IFCVF_DRIVER_NAME "ifcvf" 19 20 static irqreturn_t ifcvf_config_changed(int irq, void *arg) 21 { 22 struct ifcvf_hw *vf = arg; 23 24 if (vf->config_cb.callback) 25 return vf->config_cb.callback(vf->config_cb.private); 26 27 return IRQ_HANDLED; 28 } 29 30 static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg) 31 { 32 struct vring_info *vring = arg; 33 34 if (vring->cb.callback) 35 return vring->cb.callback(vring->cb.private); 36 37 return IRQ_HANDLED; 38 } 39 40 static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg) 41 { 42 struct ifcvf_hw *vf = arg; 43 struct vring_info *vring; 44 int i; 45 46 for (i = 0; i < vf->nr_vring; i++) { 47 vring = &vf->vring[i]; 48 if (vring->cb.callback) 49 vring->cb.callback(vring->cb.private); 50 } 51 52 return IRQ_HANDLED; 53 } 54 55 static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg) 56 { 57 struct ifcvf_hw *vf = arg; 58 u8 isr; 59 60 isr = vp_ioread8(vf->isr); 61 if (isr & VIRTIO_PCI_ISR_CONFIG) 62 ifcvf_config_changed(irq, arg); 63 64 return ifcvf_vqs_reused_intr_handler(irq, arg); 65 } 66 67 static void ifcvf_free_irq_vectors(void *data) 68 { 69 pci_free_irq_vectors(data); 70 } 71 72 static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter) 73 { 74 struct pci_dev *pdev = adapter->pdev; 75 struct ifcvf_hw *vf = &adapter->vf; 76 int i; 77 78 for (i = 0; i < vf->nr_vring; i++) { 79 if (vf->vring[i].irq != -EINVAL) { 80 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]); 81 vf->vring[i].irq = -EINVAL; 82 } 83 } 84 } 85 86 static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter) 87 { 88 struct pci_dev *pdev = adapter->pdev; 89 struct ifcvf_hw *vf = &adapter->vf; 90 91 if (vf->vqs_reused_irq != -EINVAL) { 92 devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf); 93 vf->vqs_reused_irq = -EINVAL; 94 } 95 96 } 97 98 static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter) 99 { 100 struct ifcvf_hw *vf = &adapter->vf; 101 102 if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) 103 ifcvf_free_per_vq_irq(adapter); 104 else 105 ifcvf_free_vqs_reused_irq(adapter); 106 } 107 108 static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter) 109 { 110 struct pci_dev *pdev = adapter->pdev; 111 struct ifcvf_hw *vf = &adapter->vf; 112 113 if (vf->config_irq == -EINVAL) 114 return; 115 116 /* If the irq is shared by all vqs and the config interrupt, 117 * it is already freed in ifcvf_free_vq_irq, so here only 118 * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED 119 */ 120 if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) { 121 devm_free_irq(&pdev->dev, vf->config_irq, vf); 122 vf->config_irq = -EINVAL; 123 } 124 } 125 126 static void ifcvf_free_irq(struct ifcvf_adapter *adapter) 127 { 128 struct pci_dev *pdev = adapter->pdev; 129 130 ifcvf_free_vq_irq(adapter); 131 ifcvf_free_config_irq(adapter); 132 ifcvf_free_irq_vectors(pdev); 133 } 134 135 /* ifcvf MSIX vectors allocator, this helper tries to allocate 136 * vectors for all virtqueues and the config interrupt. 137 * It returns the number of allocated vectors, negative 138 * return value when fails. 139 */ 140 static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter) 141 { 142 struct pci_dev *pdev = adapter->pdev; 143 struct ifcvf_hw *vf = &adapter->vf; 144 int max_intr, ret; 145 146 /* all queues and config interrupt */ 147 max_intr = vf->nr_vring + 1; 148 ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 149 150 if (ret < 0) { 151 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n"); 152 return ret; 153 } 154 155 if (ret < max_intr) 156 IFCVF_INFO(pdev, 157 "Requested %u vectors, however only %u allocated, lower performance\n", 158 max_intr, ret); 159 160 return ret; 161 } 162 163 static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter) 164 { 165 struct pci_dev *pdev = adapter->pdev; 166 struct ifcvf_hw *vf = &adapter->vf; 167 int i, vector, ret, irq; 168 169 vf->vqs_reused_irq = -EINVAL; 170 for (i = 0; i < vf->nr_vring; i++) { 171 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i); 172 vector = i; 173 irq = pci_irq_vector(pdev, vector); 174 ret = devm_request_irq(&pdev->dev, irq, 175 ifcvf_vq_intr_handler, 0, 176 vf->vring[i].msix_name, 177 &vf->vring[i]); 178 if (ret) { 179 IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i); 180 goto err; 181 } 182 183 vf->vring[i].irq = irq; 184 ret = ifcvf_set_vq_vector(vf, i, vector); 185 if (ret == VIRTIO_MSI_NO_VECTOR) { 186 IFCVF_ERR(pdev, "No msix vector for vq %u\n", i); 187 goto err; 188 } 189 } 190 191 return 0; 192 err: 193 ifcvf_free_irq(adapter); 194 195 return -EFAULT; 196 } 197 198 static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter) 199 { 200 struct pci_dev *pdev = adapter->pdev; 201 struct ifcvf_hw *vf = &adapter->vf; 202 int i, vector, ret, irq; 203 204 vector = 0; 205 snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev)); 206 irq = pci_irq_vector(pdev, vector); 207 ret = devm_request_irq(&pdev->dev, irq, 208 ifcvf_vqs_reused_intr_handler, 0, 209 vf->vring[0].msix_name, vf); 210 if (ret) { 211 IFCVF_ERR(pdev, "Failed to request reused irq for the device\n"); 212 goto err; 213 } 214 215 vf->vqs_reused_irq = irq; 216 for (i = 0; i < vf->nr_vring; i++) { 217 vf->vring[i].irq = -EINVAL; 218 ret = ifcvf_set_vq_vector(vf, i, vector); 219 if (ret == VIRTIO_MSI_NO_VECTOR) { 220 IFCVF_ERR(pdev, "No msix vector for vq %u\n", i); 221 goto err; 222 } 223 } 224 225 return 0; 226 err: 227 ifcvf_free_irq(adapter); 228 229 return -EFAULT; 230 } 231 232 static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter) 233 { 234 struct pci_dev *pdev = adapter->pdev; 235 struct ifcvf_hw *vf = &adapter->vf; 236 int i, vector, ret, irq; 237 238 vector = 0; 239 snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev)); 240 irq = pci_irq_vector(pdev, vector); 241 ret = devm_request_irq(&pdev->dev, irq, 242 ifcvf_dev_intr_handler, 0, 243 vf->vring[0].msix_name, vf); 244 if (ret) { 245 IFCVF_ERR(pdev, "Failed to request irq for the device\n"); 246 goto err; 247 } 248 249 vf->vqs_reused_irq = irq; 250 for (i = 0; i < vf->nr_vring; i++) { 251 vf->vring[i].irq = -EINVAL; 252 ret = ifcvf_set_vq_vector(vf, i, vector); 253 if (ret == VIRTIO_MSI_NO_VECTOR) { 254 IFCVF_ERR(pdev, "No msix vector for vq %u\n", i); 255 goto err; 256 } 257 } 258 259 vf->config_irq = irq; 260 ret = ifcvf_set_config_vector(vf, vector); 261 if (ret == VIRTIO_MSI_NO_VECTOR) { 262 IFCVF_ERR(pdev, "No msix vector for device config\n"); 263 goto err; 264 } 265 266 return 0; 267 err: 268 ifcvf_free_irq(adapter); 269 270 return -EFAULT; 271 272 } 273 274 static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter) 275 { 276 struct ifcvf_hw *vf = &adapter->vf; 277 int ret; 278 279 if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) 280 ret = ifcvf_request_per_vq_irq(adapter); 281 else 282 ret = ifcvf_request_vqs_reused_irq(adapter); 283 284 return ret; 285 } 286 287 static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter) 288 { 289 struct pci_dev *pdev = adapter->pdev; 290 struct ifcvf_hw *vf = &adapter->vf; 291 int config_vector, ret; 292 293 if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED) 294 return 0; 295 296 if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) 297 /* vector 0 ~ vf->nr_vring for vqs, num vf->nr_vring vector for config interrupt */ 298 config_vector = vf->nr_vring; 299 300 if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG) 301 /* vector 0 for vqs and 1 for config interrupt */ 302 config_vector = 1; 303 304 snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n", 305 pci_name(pdev)); 306 vf->config_irq = pci_irq_vector(pdev, config_vector); 307 ret = devm_request_irq(&pdev->dev, vf->config_irq, 308 ifcvf_config_changed, 0, 309 vf->config_msix_name, vf); 310 if (ret) { 311 IFCVF_ERR(pdev, "Failed to request config irq\n"); 312 goto err; 313 } 314 315 ret = ifcvf_set_config_vector(vf, config_vector); 316 if (ret == VIRTIO_MSI_NO_VECTOR) { 317 IFCVF_ERR(pdev, "No msix vector for device config\n"); 318 goto err; 319 } 320 321 return 0; 322 err: 323 ifcvf_free_irq(adapter); 324 325 return -EFAULT; 326 } 327 328 static int ifcvf_request_irq(struct ifcvf_adapter *adapter) 329 { 330 struct ifcvf_hw *vf = &adapter->vf; 331 int nvectors, ret, max_intr; 332 333 nvectors = ifcvf_alloc_vectors(adapter); 334 if (nvectors <= 0) 335 return -EFAULT; 336 337 vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG; 338 max_intr = vf->nr_vring + 1; 339 if (nvectors < max_intr) 340 vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG; 341 342 if (nvectors == 1) { 343 vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED; 344 ret = ifcvf_request_dev_irq(adapter); 345 346 return ret; 347 } 348 349 ret = ifcvf_request_vq_irq(adapter); 350 if (ret) 351 return ret; 352 353 ret = ifcvf_request_config_irq(adapter); 354 355 if (ret) 356 return ret; 357 358 return 0; 359 } 360 361 static int ifcvf_start_datapath(void *private) 362 { 363 struct ifcvf_hw *vf = ifcvf_private_to_vf(private); 364 u8 status; 365 int ret; 366 367 ret = ifcvf_start_hw(vf); 368 if (ret < 0) { 369 status = ifcvf_get_status(vf); 370 status |= VIRTIO_CONFIG_S_FAILED; 371 ifcvf_set_status(vf, status); 372 } 373 374 return ret; 375 } 376 377 static int ifcvf_stop_datapath(void *private) 378 { 379 struct ifcvf_hw *vf = ifcvf_private_to_vf(private); 380 int i; 381 382 for (i = 0; i < vf->nr_vring; i++) 383 vf->vring[i].cb.callback = NULL; 384 385 ifcvf_stop_hw(vf); 386 387 return 0; 388 } 389 390 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter) 391 { 392 struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter); 393 int i; 394 395 for (i = 0; i < vf->nr_vring; i++) { 396 vf->vring[i].last_avail_idx = 0; 397 vf->vring[i].desc = 0; 398 vf->vring[i].avail = 0; 399 vf->vring[i].used = 0; 400 vf->vring[i].ready = 0; 401 vf->vring[i].cb.callback = NULL; 402 vf->vring[i].cb.private = NULL; 403 } 404 405 ifcvf_reset(vf); 406 } 407 408 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev) 409 { 410 return container_of(vdpa_dev, struct ifcvf_adapter, vdpa); 411 } 412 413 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev) 414 { 415 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); 416 417 return &adapter->vf; 418 } 419 420 static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev) 421 { 422 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); 423 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 424 struct pci_dev *pdev = adapter->pdev; 425 u32 type = vf->dev_type; 426 u64 features; 427 428 if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK) 429 features = ifcvf_get_features(vf); 430 else { 431 features = 0; 432 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type); 433 } 434 435 return features; 436 } 437 438 static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features) 439 { 440 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 441 int ret; 442 443 ret = ifcvf_verify_min_features(vf, features); 444 if (ret) 445 return ret; 446 447 vf->req_features = features; 448 449 return 0; 450 } 451 452 static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev) 453 { 454 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 455 456 return vf->req_features; 457 } 458 459 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev) 460 { 461 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 462 463 return ifcvf_get_status(vf); 464 } 465 466 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) 467 { 468 struct ifcvf_adapter *adapter; 469 struct ifcvf_hw *vf; 470 u8 status_old; 471 int ret; 472 473 vf = vdpa_to_vf(vdpa_dev); 474 adapter = vdpa_to_adapter(vdpa_dev); 475 status_old = ifcvf_get_status(vf); 476 477 if (status_old == status) 478 return; 479 480 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && 481 !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) { 482 ret = ifcvf_request_irq(adapter); 483 if (ret) { 484 status = ifcvf_get_status(vf); 485 status |= VIRTIO_CONFIG_S_FAILED; 486 ifcvf_set_status(vf, status); 487 return; 488 } 489 490 if (ifcvf_start_datapath(adapter) < 0) 491 IFCVF_ERR(adapter->pdev, 492 "Failed to set ifcvf vdpa status %u\n", 493 status); 494 } 495 496 ifcvf_set_status(vf, status); 497 } 498 499 static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev) 500 { 501 struct ifcvf_adapter *adapter; 502 struct ifcvf_hw *vf; 503 u8 status_old; 504 505 vf = vdpa_to_vf(vdpa_dev); 506 adapter = vdpa_to_adapter(vdpa_dev); 507 status_old = ifcvf_get_status(vf); 508 509 if (status_old == 0) 510 return 0; 511 512 if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) { 513 ifcvf_stop_datapath(adapter); 514 ifcvf_free_irq(adapter); 515 } 516 517 ifcvf_reset_vring(adapter); 518 519 return 0; 520 } 521 522 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) 523 { 524 return IFCVF_QUEUE_MAX; 525 } 526 527 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, 528 struct vdpa_vq_state *state) 529 { 530 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 531 532 state->split.avail_index = ifcvf_get_vq_state(vf, qid); 533 return 0; 534 } 535 536 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, 537 const struct vdpa_vq_state *state) 538 { 539 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 540 541 return ifcvf_set_vq_state(vf, qid, state->split.avail_index); 542 } 543 544 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, 545 struct vdpa_callback *cb) 546 { 547 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 548 549 vf->vring[qid].cb = *cb; 550 } 551 552 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, 553 u16 qid, bool ready) 554 { 555 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 556 557 vf->vring[qid].ready = ready; 558 } 559 560 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid) 561 { 562 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 563 564 return vf->vring[qid].ready; 565 } 566 567 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, 568 u32 num) 569 { 570 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 571 572 vf->vring[qid].size = num; 573 } 574 575 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, 576 u64 desc_area, u64 driver_area, 577 u64 device_area) 578 { 579 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 580 581 vf->vring[qid].desc = desc_area; 582 vf->vring[qid].avail = driver_area; 583 vf->vring[qid].used = device_area; 584 585 return 0; 586 } 587 588 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid) 589 { 590 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 591 592 ifcvf_notify_queue(vf, qid); 593 } 594 595 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev) 596 { 597 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 598 599 return vp_ioread8(&vf->common_cfg->config_generation); 600 } 601 602 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev) 603 { 604 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 605 606 return vf->dev_type; 607 } 608 609 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev) 610 { 611 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); 612 struct pci_dev *pdev = adapter->pdev; 613 614 return pdev->subsystem_vendor; 615 } 616 617 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) 618 { 619 return IFCVF_QUEUE_ALIGNMENT; 620 } 621 622 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev) 623 { 624 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 625 626 return vf->config_size; 627 } 628 629 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev, 630 unsigned int offset, 631 void *buf, unsigned int len) 632 { 633 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 634 635 ifcvf_read_dev_config(vf, offset, buf, len); 636 } 637 638 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev, 639 unsigned int offset, const void *buf, 640 unsigned int len) 641 { 642 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 643 644 ifcvf_write_dev_config(vf, offset, buf, len); 645 } 646 647 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, 648 struct vdpa_callback *cb) 649 { 650 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 651 652 vf->config_cb.callback = cb->callback; 653 vf->config_cb.private = cb->private; 654 } 655 656 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, 657 u16 qid) 658 { 659 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 660 661 if (vf->vqs_reused_irq < 0) 662 return vf->vring[qid].irq; 663 else 664 return -EINVAL; 665 } 666 667 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev, 668 u16 idx) 669 { 670 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); 671 struct vdpa_notification_area area; 672 673 area.addr = vf->vring[idx].notify_pa; 674 if (!vf->notify_off_multiplier) 675 area.size = PAGE_SIZE; 676 else 677 area.size = vf->notify_off_multiplier; 678 679 return area; 680 } 681 682 /* 683 * IFCVF currently does't have on-chip IOMMU, so not 684 * implemented set_map()/dma_map()/dma_unmap() 685 */ 686 static const struct vdpa_config_ops ifc_vdpa_ops = { 687 .get_device_features = ifcvf_vdpa_get_device_features, 688 .set_driver_features = ifcvf_vdpa_set_driver_features, 689 .get_driver_features = ifcvf_vdpa_get_driver_features, 690 .get_status = ifcvf_vdpa_get_status, 691 .set_status = ifcvf_vdpa_set_status, 692 .reset = ifcvf_vdpa_reset, 693 .get_vq_num_max = ifcvf_vdpa_get_vq_num_max, 694 .get_vq_state = ifcvf_vdpa_get_vq_state, 695 .set_vq_state = ifcvf_vdpa_set_vq_state, 696 .set_vq_cb = ifcvf_vdpa_set_vq_cb, 697 .set_vq_ready = ifcvf_vdpa_set_vq_ready, 698 .get_vq_ready = ifcvf_vdpa_get_vq_ready, 699 .set_vq_num = ifcvf_vdpa_set_vq_num, 700 .set_vq_address = ifcvf_vdpa_set_vq_address, 701 .get_vq_irq = ifcvf_vdpa_get_vq_irq, 702 .kick_vq = ifcvf_vdpa_kick_vq, 703 .get_generation = ifcvf_vdpa_get_generation, 704 .get_device_id = ifcvf_vdpa_get_device_id, 705 .get_vendor_id = ifcvf_vdpa_get_vendor_id, 706 .get_vq_align = ifcvf_vdpa_get_vq_align, 707 .get_config_size = ifcvf_vdpa_get_config_size, 708 .get_config = ifcvf_vdpa_get_config, 709 .set_config = ifcvf_vdpa_set_config, 710 .set_config_cb = ifcvf_vdpa_set_config_cb, 711 .get_vq_notification = ifcvf_get_vq_notification, 712 }; 713 714 static struct virtio_device_id id_table_net[] = { 715 {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID}, 716 {0}, 717 }; 718 719 static struct virtio_device_id id_table_blk[] = { 720 {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID}, 721 {0}, 722 }; 723 724 static u32 get_dev_type(struct pci_dev *pdev) 725 { 726 u32 dev_type; 727 728 /* This drirver drives both modern virtio devices and transitional 729 * devices in modern mode. 730 * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM, 731 * so legacy devices and transitional devices in legacy 732 * mode will not work for vDPA, this driver will not 733 * drive devices with legacy interface. 734 */ 735 736 if (pdev->device < 0x1040) 737 dev_type = pdev->subsystem_device; 738 else 739 dev_type = pdev->device - 0x1040; 740 741 return dev_type; 742 } 743 744 static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, 745 const struct vdpa_dev_set_config *config) 746 { 747 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev; 748 struct ifcvf_adapter *adapter; 749 struct pci_dev *pdev; 750 struct ifcvf_hw *vf; 751 struct device *dev; 752 int ret, i; 753 754 ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev); 755 if (ifcvf_mgmt_dev->adapter) 756 return -EOPNOTSUPP; 757 758 pdev = ifcvf_mgmt_dev->pdev; 759 dev = &pdev->dev; 760 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, 761 dev, &ifc_vdpa_ops, name, false); 762 if (IS_ERR(adapter)) { 763 IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); 764 return PTR_ERR(adapter); 765 } 766 767 ifcvf_mgmt_dev->adapter = adapter; 768 pci_set_drvdata(pdev, ifcvf_mgmt_dev); 769 770 vf = &adapter->vf; 771 vf->dev_type = get_dev_type(pdev); 772 vf->base = pcim_iomap_table(pdev); 773 774 adapter->pdev = pdev; 775 adapter->vdpa.dma_dev = &pdev->dev; 776 777 ret = ifcvf_init_hw(vf, pdev); 778 if (ret) { 779 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n"); 780 goto err; 781 } 782 783 for (i = 0; i < vf->nr_vring; i++) 784 vf->vring[i].irq = -EINVAL; 785 786 vf->hw_features = ifcvf_get_hw_features(vf); 787 vf->config_size = ifcvf_get_config_size(vf); 788 789 adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev; 790 ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring); 791 if (ret) { 792 IFCVF_ERR(pdev, "Failed to register to vDPA bus"); 793 goto err; 794 } 795 796 return 0; 797 798 err: 799 put_device(&adapter->vdpa.dev); 800 return ret; 801 } 802 803 static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev) 804 { 805 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev; 806 807 ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev); 808 _vdpa_unregister_device(dev); 809 ifcvf_mgmt_dev->adapter = NULL; 810 } 811 812 static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = { 813 .dev_add = ifcvf_vdpa_dev_add, 814 .dev_del = ifcvf_vdpa_dev_del 815 }; 816 817 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) 818 { 819 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev; 820 struct device *dev = &pdev->dev; 821 u32 dev_type; 822 int ret; 823 824 ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL); 825 if (!ifcvf_mgmt_dev) { 826 IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n"); 827 return -ENOMEM; 828 } 829 830 dev_type = get_dev_type(pdev); 831 switch (dev_type) { 832 case VIRTIO_ID_NET: 833 ifcvf_mgmt_dev->mdev.id_table = id_table_net; 834 break; 835 case VIRTIO_ID_BLOCK: 836 ifcvf_mgmt_dev->mdev.id_table = id_table_blk; 837 break; 838 default: 839 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type); 840 ret = -EOPNOTSUPP; 841 goto err; 842 } 843 844 ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops; 845 ifcvf_mgmt_dev->mdev.device = dev; 846 ifcvf_mgmt_dev->pdev = pdev; 847 848 ret = pcim_enable_device(pdev); 849 if (ret) { 850 IFCVF_ERR(pdev, "Failed to enable device\n"); 851 goto err; 852 } 853 854 ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4), 855 IFCVF_DRIVER_NAME); 856 if (ret) { 857 IFCVF_ERR(pdev, "Failed to request MMIO region\n"); 858 goto err; 859 } 860 861 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 862 if (ret) { 863 IFCVF_ERR(pdev, "No usable DMA configuration\n"); 864 goto err; 865 } 866 867 ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev); 868 if (ret) { 869 IFCVF_ERR(pdev, 870 "Failed for adding devres for freeing irq vectors\n"); 871 goto err; 872 } 873 874 pci_set_master(pdev); 875 876 ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev); 877 if (ret) { 878 IFCVF_ERR(pdev, 879 "Failed to initialize the management interfaces\n"); 880 goto err; 881 } 882 883 return 0; 884 885 err: 886 kfree(ifcvf_mgmt_dev); 887 return ret; 888 } 889 890 static void ifcvf_remove(struct pci_dev *pdev) 891 { 892 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev; 893 894 ifcvf_mgmt_dev = pci_get_drvdata(pdev); 895 vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev); 896 kfree(ifcvf_mgmt_dev); 897 } 898 899 static struct pci_device_id ifcvf_pci_ids[] = { 900 /* N3000 network device */ 901 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET, 902 N3000_DEVICE_ID, 903 PCI_VENDOR_ID_INTEL, 904 N3000_SUBSYS_DEVICE_ID) }, 905 /* C5000X-PL network device */ 906 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET, 907 VIRTIO_TRANS_ID_NET, 908 PCI_VENDOR_ID_INTEL, 909 VIRTIO_ID_NET) }, 910 /* C5000X-PL block device */ 911 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET, 912 VIRTIO_TRANS_ID_BLOCK, 913 PCI_VENDOR_ID_INTEL, 914 VIRTIO_ID_BLOCK) }, 915 916 { 0 }, 917 }; 918 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids); 919 920 static struct pci_driver ifcvf_driver = { 921 .name = IFCVF_DRIVER_NAME, 922 .id_table = ifcvf_pci_ids, 923 .probe = ifcvf_probe, 924 .remove = ifcvf_remove, 925 }; 926 927 module_pci_driver(ifcvf_driver); 928 929 MODULE_LICENSE("GPL v2"); 930