1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Virtual Function ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/etherdevice.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/net_tstamp.h> 12 13 #include "otx2_common.h" 14 #include "otx2_reg.h" 15 #include "otx2_ptp.h" 16 #include "cn10k.h" 17 18 #define DRV_NAME "rvu_nicvf" 19 #define DRV_STRING "Marvell RVU NIC Virtual Function Driver" 20 21 static const struct pci_device_id otx2_vf_id_table[] = { 22 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) }, 23 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) }, 24 { } 25 }; 26 27 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>"); 28 MODULE_DESCRIPTION(DRV_STRING); 29 MODULE_LICENSE("GPL v2"); 30 MODULE_DEVICE_TABLE(pci, otx2_vf_id_table); 31 32 /* RVU VF Interrupt Vector Enumeration */ 33 enum { 34 RVU_VF_INT_VEC_MBOX = 0x0, 35 }; 36 37 static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf, 38 struct mbox_msghdr *msg) 39 { 40 if (msg->id >= MBOX_MSG_MAX) { 41 dev_err(vf->dev, 42 "Mbox msg with unknown ID %d\n", msg->id); 43 return; 44 } 45 46 if (msg->sig != OTX2_MBOX_RSP_SIG) { 47 dev_err(vf->dev, 48 "Mbox msg with wrong signature %x, ID %d\n", 49 msg->sig, msg->id); 50 return; 51 } 52 53 if (msg->rc == MBOX_MSG_INVALID) { 54 dev_err(vf->dev, 55 "PF/AF says the sent msg(s) %d were invalid\n", 56 msg->id); 57 return; 58 } 59 60 switch (msg->id) { 61 case MBOX_MSG_READY: 62 vf->pcifunc = msg->pcifunc; 63 break; 64 case MBOX_MSG_MSIX_OFFSET: 65 mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg); 66 break; 67 case MBOX_MSG_NPA_LF_ALLOC: 68 mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg); 69 break; 70 case MBOX_MSG_NIX_LF_ALLOC: 71 mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg); 72 break; 73 case MBOX_MSG_NIX_TXSCH_ALLOC: 74 mbox_handler_nix_txsch_alloc(vf, 75 (struct nix_txsch_alloc_rsp *)msg); 76 break; 77 case MBOX_MSG_NIX_BP_ENABLE: 78 mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg); 79 break; 80 default: 81 if (msg->rc) 82 dev_err(vf->dev, 83 "Mbox msg response has err %d, ID %d\n", 84 msg->rc, msg->id); 85 } 86 } 87 88 static void otx2vf_vfaf_mbox_handler(struct work_struct *work) 89 { 90 struct otx2_mbox_dev *mdev; 91 struct mbox_hdr *rsp_hdr; 92 struct mbox_msghdr *msg; 93 struct otx2_mbox *mbox; 94 struct mbox *af_mbox; 95 int offset, id; 96 97 af_mbox = container_of(work, struct mbox, mbox_wrk); 98 mbox = &af_mbox->mbox; 99 mdev = &mbox->dev[0]; 100 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 101 if (af_mbox->num_msgs == 0) 102 return; 103 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 104 105 for (id = 0; id < af_mbox->num_msgs; id++) { 106 msg = (struct mbox_msghdr *)(mdev->mbase + offset); 107 otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg); 108 offset = mbox->rx_start + msg->next_msgoff; 109 if (mdev->msgs_acked == (af_mbox->num_msgs - 1)) 110 __otx2_mbox_reset(mbox, 0); 111 mdev->msgs_acked++; 112 } 113 } 114 115 static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf, 116 struct mbox_msghdr *req) 117 { 118 struct msg_rsp *rsp; 119 int err; 120 121 /* Check if valid, if not reply with a invalid msg */ 122 if (req->sig != OTX2_MBOX_REQ_SIG) { 123 otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id); 124 return -ENODEV; 125 } 126 127 switch (req->id) { 128 case MBOX_MSG_CGX_LINK_EVENT: 129 rsp = (struct msg_rsp *)otx2_mbox_alloc_msg( 130 &vf->mbox.mbox_up, 0, 131 sizeof(struct msg_rsp)); 132 if (!rsp) 133 return -ENOMEM; 134 135 rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT; 136 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; 137 rsp->hdr.pcifunc = 0; 138 rsp->hdr.rc = 0; 139 err = otx2_mbox_up_handler_cgx_link_event( 140 vf, (struct cgx_link_info_msg *)req, rsp); 141 return err; 142 default: 143 otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id); 144 return -ENODEV; 145 } 146 return 0; 147 } 148 149 static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work) 150 { 151 struct otx2_mbox_dev *mdev; 152 struct mbox_hdr *rsp_hdr; 153 struct mbox_msghdr *msg; 154 struct otx2_mbox *mbox; 155 struct mbox *vf_mbox; 156 struct otx2_nic *vf; 157 int offset, id; 158 159 vf_mbox = container_of(work, struct mbox, mbox_up_wrk); 160 vf = vf_mbox->pfvf; 161 mbox = &vf_mbox->mbox_up; 162 mdev = &mbox->dev[0]; 163 164 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 165 if (vf_mbox->up_num_msgs == 0) 166 return; 167 168 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 169 170 for (id = 0; id < vf_mbox->up_num_msgs; id++) { 171 msg = (struct mbox_msghdr *)(mdev->mbase + offset); 172 otx2vf_process_mbox_msg_up(vf, msg); 173 offset = mbox->rx_start + msg->next_msgoff; 174 } 175 176 otx2_mbox_msg_send(mbox, 0); 177 } 178 179 static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq) 180 { 181 struct otx2_nic *vf = (struct otx2_nic *)vf_irq; 182 struct otx2_mbox_dev *mdev; 183 struct otx2_mbox *mbox; 184 struct mbox_hdr *hdr; 185 186 /* Clear the IRQ */ 187 otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); 188 189 /* Read latest mbox data */ 190 smp_rmb(); 191 192 /* Check for PF => VF response messages */ 193 mbox = &vf->mbox.mbox; 194 mdev = &mbox->dev[0]; 195 otx2_sync_mbox_bbuf(mbox, 0); 196 197 trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0)); 198 199 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 200 if (hdr->num_msgs) { 201 vf->mbox.num_msgs = hdr->num_msgs; 202 hdr->num_msgs = 0; 203 memset(mbox->hwbase + mbox->rx_start, 0, 204 ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); 205 queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); 206 } 207 /* Check for PF => VF notification messages */ 208 mbox = &vf->mbox.mbox_up; 209 mdev = &mbox->dev[0]; 210 otx2_sync_mbox_bbuf(mbox, 0); 211 212 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 213 if (hdr->num_msgs) { 214 vf->mbox.up_num_msgs = hdr->num_msgs; 215 hdr->num_msgs = 0; 216 memset(mbox->hwbase + mbox->rx_start, 0, 217 ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); 218 queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); 219 } 220 221 return IRQ_HANDLED; 222 } 223 224 static void otx2vf_disable_mbox_intr(struct otx2_nic *vf) 225 { 226 int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX); 227 228 /* Disable VF => PF mailbox IRQ */ 229 otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0)); 230 free_irq(vector, vf); 231 } 232 233 static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) 234 { 235 struct otx2_hw *hw = &vf->hw; 236 struct msg_req *req; 237 char *irq_name; 238 int err; 239 240 /* Register mailbox interrupt handler */ 241 irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE]; 242 snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox"); 243 err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), 244 otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); 245 if (err) { 246 dev_err(vf->dev, 247 "RVUPF: IRQ registration failed for VFAF mbox irq\n"); 248 return err; 249 } 250 251 /* Enable mailbox interrupt for msgs coming from PF. 252 * First clear to avoid spurious interrupts, if any. 253 */ 254 otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); 255 otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); 256 257 if (!probe_pf) 258 return 0; 259 260 /* Check mailbox communication with PF */ 261 req = otx2_mbox_alloc_msg_ready(&vf->mbox); 262 if (!req) { 263 otx2vf_disable_mbox_intr(vf); 264 return -ENOMEM; 265 } 266 267 err = otx2_sync_mbox_msg(&vf->mbox); 268 if (err) { 269 dev_warn(vf->dev, 270 "AF not responding to mailbox, deferring probe\n"); 271 otx2vf_disable_mbox_intr(vf); 272 return -EPROBE_DEFER; 273 } 274 return 0; 275 } 276 277 static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf) 278 { 279 struct mbox *mbox = &vf->mbox; 280 281 if (vf->mbox_wq) { 282 destroy_workqueue(vf->mbox_wq); 283 vf->mbox_wq = NULL; 284 } 285 286 if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag)) 287 iounmap((void __iomem *)mbox->mbox.hwbase); 288 289 otx2_mbox_destroy(&mbox->mbox); 290 otx2_mbox_destroy(&mbox->mbox_up); 291 } 292 293 static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf) 294 { 295 struct mbox *mbox = &vf->mbox; 296 void __iomem *hwbase; 297 int err; 298 299 mbox->pfvf = vf; 300 vf->mbox_wq = alloc_ordered_workqueue("otx2_vfaf_mailbox", 301 WQ_HIGHPRI | WQ_MEM_RECLAIM); 302 if (!vf->mbox_wq) 303 return -ENOMEM; 304 305 if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) { 306 /* For cn10k platform, VF mailbox region is in its BAR2 307 * register space 308 */ 309 hwbase = vf->reg_base + RVU_VF_MBOX_REGION; 310 } else { 311 /* Mailbox is a reserved memory (in RAM) region shared between 312 * admin function (i.e PF0) and this VF, shouldn't be mapped as 313 * device memory to allow unaligned accesses. 314 */ 315 hwbase = ioremap_wc(pci_resource_start(vf->pdev, 316 PCI_MBOX_BAR_NUM), 317 pci_resource_len(vf->pdev, 318 PCI_MBOX_BAR_NUM)); 319 if (!hwbase) { 320 dev_err(vf->dev, "Unable to map VFAF mailbox region\n"); 321 err = -ENOMEM; 322 goto exit; 323 } 324 } 325 326 err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base, 327 MBOX_DIR_VFPF, 1); 328 if (err) 329 goto exit; 330 331 err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base, 332 MBOX_DIR_VFPF_UP, 1); 333 if (err) 334 goto exit; 335 336 err = otx2_mbox_bbuf_init(mbox, vf->pdev); 337 if (err) 338 goto exit; 339 340 INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler); 341 INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler); 342 mutex_init(&mbox->lock); 343 344 return 0; 345 exit: 346 if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag)) 347 iounmap(hwbase); 348 destroy_workqueue(vf->mbox_wq); 349 return err; 350 } 351 352 static int otx2vf_open(struct net_device *netdev) 353 { 354 struct otx2_nic *vf; 355 int err; 356 357 err = otx2_open(netdev); 358 if (err) 359 return err; 360 361 /* LBKs do not receive link events so tell everyone we are up here */ 362 vf = netdev_priv(netdev); 363 if (is_otx2_lbkvf(vf->pdev)) { 364 pr_info("%s NIC Link is UP\n", netdev->name); 365 netif_carrier_on(netdev); 366 netif_tx_start_all_queues(netdev); 367 } 368 369 return 0; 370 } 371 372 static int otx2vf_stop(struct net_device *netdev) 373 { 374 return otx2_stop(netdev); 375 } 376 377 static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev) 378 { 379 struct otx2_nic *vf = netdev_priv(netdev); 380 int qidx = skb_get_queue_mapping(skb); 381 struct otx2_snd_queue *sq; 382 struct netdev_queue *txq; 383 384 sq = &vf->qset.sq[qidx]; 385 txq = netdev_get_tx_queue(netdev, qidx); 386 387 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { 388 netif_tx_stop_queue(txq); 389 390 /* Check again, incase SQBs got freed up */ 391 smp_mb(); 392 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) 393 > sq->sqe_thresh) 394 netif_tx_wake_queue(txq); 395 396 return NETDEV_TX_BUSY; 397 } 398 399 return NETDEV_TX_OK; 400 } 401 402 static void otx2vf_set_rx_mode(struct net_device *netdev) 403 { 404 struct otx2_nic *vf = netdev_priv(netdev); 405 406 queue_work(vf->otx2_wq, &vf->rx_mode_work); 407 } 408 409 static void otx2vf_do_set_rx_mode(struct work_struct *work) 410 { 411 struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work); 412 struct net_device *netdev = vf->netdev; 413 unsigned int flags = netdev->flags; 414 struct nix_rx_mode *req; 415 416 mutex_lock(&vf->mbox.lock); 417 418 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox); 419 if (!req) { 420 mutex_unlock(&vf->mbox.lock); 421 return; 422 } 423 424 req->mode = NIX_RX_MODE_UCAST; 425 426 if (flags & IFF_PROMISC) 427 req->mode |= NIX_RX_MODE_PROMISC; 428 if (flags & (IFF_ALLMULTI | IFF_MULTICAST)) 429 req->mode |= NIX_RX_MODE_ALLMULTI; 430 431 req->mode |= NIX_RX_MODE_USE_MCE; 432 433 otx2_sync_mbox_msg(&vf->mbox); 434 435 mutex_unlock(&vf->mbox.lock); 436 } 437 438 static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu) 439 { 440 bool if_up = netif_running(netdev); 441 int err = 0; 442 443 if (if_up) 444 otx2vf_stop(netdev); 445 446 netdev_info(netdev, "Changing MTU from %d to %d\n", 447 netdev->mtu, new_mtu); 448 netdev->mtu = new_mtu; 449 450 if (if_up) 451 err = otx2vf_open(netdev); 452 453 return err; 454 } 455 456 static void otx2vf_reset_task(struct work_struct *work) 457 { 458 struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task); 459 460 rtnl_lock(); 461 462 if (netif_running(vf->netdev)) { 463 otx2vf_stop(vf->netdev); 464 vf->reset_count++; 465 otx2vf_open(vf->netdev); 466 } 467 468 rtnl_unlock(); 469 } 470 471 static int otx2vf_set_features(struct net_device *netdev, 472 netdev_features_t features) 473 { 474 return otx2_handle_ntuple_tc_features(netdev, features); 475 } 476 477 static const struct net_device_ops otx2vf_netdev_ops = { 478 .ndo_open = otx2vf_open, 479 .ndo_stop = otx2vf_stop, 480 .ndo_start_xmit = otx2vf_xmit, 481 .ndo_set_rx_mode = otx2vf_set_rx_mode, 482 .ndo_set_mac_address = otx2_set_mac_address, 483 .ndo_change_mtu = otx2vf_change_mtu, 484 .ndo_set_features = otx2vf_set_features, 485 .ndo_get_stats64 = otx2_get_stats64, 486 .ndo_tx_timeout = otx2_tx_timeout, 487 .ndo_eth_ioctl = otx2_ioctl, 488 .ndo_setup_tc = otx2_setup_tc, 489 }; 490 491 static int otx2_wq_init(struct otx2_nic *vf) 492 { 493 vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq"); 494 if (!vf->otx2_wq) 495 return -ENOMEM; 496 497 INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode); 498 INIT_WORK(&vf->reset_task, otx2vf_reset_task); 499 return 0; 500 } 501 502 static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf) 503 { 504 struct otx2_hw *hw = &vf->hw; 505 int num_vec, err; 506 507 num_vec = hw->nix_msixoff; 508 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; 509 510 otx2vf_disable_mbox_intr(vf); 511 pci_free_irq_vectors(hw->pdev); 512 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); 513 if (err < 0) { 514 dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n", 515 __func__, num_vec); 516 return err; 517 } 518 519 return otx2vf_register_mbox_intr(vf, false); 520 } 521 522 static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) 523 { 524 int num_vec = pci_msix_vec_count(pdev); 525 struct device *dev = &pdev->dev; 526 struct net_device *netdev; 527 struct otx2_nic *vf; 528 struct otx2_hw *hw; 529 int err, qcount; 530 531 err = pcim_enable_device(pdev); 532 if (err) { 533 dev_err(dev, "Failed to enable PCI device\n"); 534 return err; 535 } 536 537 err = pci_request_regions(pdev, DRV_NAME); 538 if (err) { 539 dev_err(dev, "PCI request regions failed 0x%x\n", err); 540 return err; 541 } 542 543 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 544 if (err) { 545 dev_err(dev, "DMA mask config failed, abort\n"); 546 goto err_release_regions; 547 } 548 549 pci_set_master(pdev); 550 551 qcount = num_online_cpus(); 552 netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount); 553 if (!netdev) { 554 err = -ENOMEM; 555 goto err_release_regions; 556 } 557 558 pci_set_drvdata(pdev, netdev); 559 SET_NETDEV_DEV(netdev, &pdev->dev); 560 vf = netdev_priv(netdev); 561 vf->netdev = netdev; 562 vf->pdev = pdev; 563 vf->dev = dev; 564 vf->iommu_domain = iommu_get_domain_for_dev(dev); 565 566 vf->flags |= OTX2_FLAG_INTF_DOWN; 567 hw = &vf->hw; 568 hw->pdev = vf->pdev; 569 hw->rx_queues = qcount; 570 hw->tx_queues = qcount; 571 hw->max_queues = qcount; 572 hw->tot_tx_queues = qcount; 573 hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; 574 /* Use CQE of 128 byte descriptor size by default */ 575 hw->xqe_size = 128; 576 577 hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, 578 GFP_KERNEL); 579 if (!hw->irq_name) { 580 err = -ENOMEM; 581 goto err_free_netdev; 582 } 583 584 hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, 585 sizeof(cpumask_var_t), GFP_KERNEL); 586 if (!hw->affinity_mask) { 587 err = -ENOMEM; 588 goto err_free_netdev; 589 } 590 591 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); 592 if (err < 0) { 593 dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", 594 __func__, num_vec); 595 goto err_free_netdev; 596 } 597 598 vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 599 if (!vf->reg_base) { 600 dev_err(dev, "Unable to map physical function CSRs, aborting\n"); 601 err = -ENOMEM; 602 goto err_free_irq_vectors; 603 } 604 605 otx2_setup_dev_hw_settings(vf); 606 /* Init VF <=> PF mailbox stuff */ 607 err = otx2vf_vfaf_mbox_init(vf); 608 if (err) 609 goto err_free_irq_vectors; 610 611 /* Register mailbox interrupt */ 612 err = otx2vf_register_mbox_intr(vf, true); 613 if (err) 614 goto err_mbox_destroy; 615 616 /* Request AF to attach NPA and LIX LFs to this AF */ 617 err = otx2_attach_npa_nix(vf); 618 if (err) 619 goto err_disable_mbox_intr; 620 621 err = otx2vf_realloc_msix_vectors(vf); 622 if (err) 623 goto err_detach_rsrc; 624 625 err = otx2_set_real_num_queues(netdev, qcount, qcount); 626 if (err) 627 goto err_detach_rsrc; 628 629 err = cn10k_lmtst_init(vf); 630 if (err) 631 goto err_detach_rsrc; 632 633 /* Don't check for error. Proceed without ptp */ 634 otx2_ptp_init(vf); 635 636 /* Assign default mac address */ 637 otx2_get_mac_from_af(netdev); 638 639 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | 640 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | 641 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 642 NETIF_F_GSO_UDP_L4; 643 netdev->features = netdev->hw_features; 644 /* Support TSO on tag interface */ 645 netdev->vlan_features |= netdev->features; 646 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 647 NETIF_F_HW_VLAN_STAG_TX; 648 netdev->features |= netdev->hw_features; 649 650 netdev->hw_features |= NETIF_F_NTUPLE; 651 netdev->hw_features |= NETIF_F_RXALL; 652 netdev->hw_features |= NETIF_F_HW_TC; 653 654 netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS); 655 netdev->watchdog_timeo = OTX2_TX_TIMEOUT; 656 657 netdev->netdev_ops = &otx2vf_netdev_ops; 658 659 netdev->min_mtu = OTX2_MIN_MTU; 660 netdev->max_mtu = otx2_get_max_mtu(vf); 661 662 /* To distinguish, for LBK VFs set netdev name explicitly */ 663 if (is_otx2_lbkvf(vf->pdev)) { 664 int n; 665 666 n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK; 667 /* Need to subtract 1 to get proper VF number */ 668 n -= 1; 669 snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n); 670 } 671 672 err = register_netdev(netdev); 673 if (err) { 674 dev_err(dev, "Failed to register netdevice\n"); 675 goto err_ptp_destroy; 676 } 677 678 err = otx2_wq_init(vf); 679 if (err) 680 goto err_unreg_netdev; 681 682 otx2vf_set_ethtool_ops(netdev); 683 684 err = otx2vf_mcam_flow_init(vf); 685 if (err) 686 goto err_unreg_netdev; 687 688 err = otx2_init_tc(vf); 689 if (err) 690 goto err_unreg_netdev; 691 692 err = otx2_register_dl(vf); 693 if (err) 694 goto err_shutdown_tc; 695 696 #ifdef CONFIG_DCB 697 err = otx2_dcbnl_set_ops(netdev); 698 if (err) 699 goto err_shutdown_tc; 700 #endif 701 702 return 0; 703 704 err_shutdown_tc: 705 otx2_shutdown_tc(vf); 706 err_unreg_netdev: 707 unregister_netdev(netdev); 708 err_ptp_destroy: 709 otx2_ptp_destroy(vf); 710 err_detach_rsrc: 711 free_percpu(vf->hw.lmt_info); 712 if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) 713 qmem_free(vf->dev, vf->dync_lmt); 714 otx2_detach_resources(&vf->mbox); 715 err_disable_mbox_intr: 716 otx2vf_disable_mbox_intr(vf); 717 err_mbox_destroy: 718 otx2vf_vfaf_mbox_destroy(vf); 719 err_free_irq_vectors: 720 pci_free_irq_vectors(hw->pdev); 721 err_free_netdev: 722 pci_set_drvdata(pdev, NULL); 723 free_netdev(netdev); 724 err_release_regions: 725 pci_release_regions(pdev); 726 return err; 727 } 728 729 static void otx2vf_remove(struct pci_dev *pdev) 730 { 731 struct net_device *netdev = pci_get_drvdata(pdev); 732 struct otx2_nic *vf; 733 734 if (!netdev) 735 return; 736 737 vf = netdev_priv(netdev); 738 739 /* Disable 802.3x pause frames */ 740 if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED || 741 (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) { 742 vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; 743 vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; 744 otx2_config_pause_frm(vf); 745 } 746 747 #ifdef CONFIG_DCB 748 /* Disable PFC config */ 749 if (vf->pfc_en) { 750 vf->pfc_en = 0; 751 otx2_config_priority_flow_ctrl(vf); 752 } 753 #endif 754 755 cancel_work_sync(&vf->reset_task); 756 otx2_unregister_dl(vf); 757 unregister_netdev(netdev); 758 if (vf->otx2_wq) 759 destroy_workqueue(vf->otx2_wq); 760 otx2_ptp_destroy(vf); 761 otx2_mcam_flow_del(vf); 762 otx2_shutdown_tc(vf); 763 otx2vf_disable_mbox_intr(vf); 764 otx2_detach_resources(&vf->mbox); 765 free_percpu(vf->hw.lmt_info); 766 if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) 767 qmem_free(vf->dev, vf->dync_lmt); 768 otx2vf_vfaf_mbox_destroy(vf); 769 pci_free_irq_vectors(vf->pdev); 770 pci_set_drvdata(pdev, NULL); 771 free_netdev(netdev); 772 773 pci_release_regions(pdev); 774 } 775 776 static struct pci_driver otx2vf_driver = { 777 .name = DRV_NAME, 778 .id_table = otx2_vf_id_table, 779 .probe = otx2vf_probe, 780 .remove = otx2vf_remove, 781 .shutdown = otx2vf_remove, 782 }; 783 784 static int __init otx2vf_init_module(void) 785 { 786 pr_info("%s: %s\n", DRV_NAME, DRV_STRING); 787 788 return pci_register_driver(&otx2vf_driver); 789 } 790 791 static void __exit otx2vf_cleanup_module(void) 792 { 793 pci_unregister_driver(&otx2vf_driver); 794 } 795 796 module_init(otx2vf_init_module); 797 module_exit(otx2vf_cleanup_module); 798