1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Virtual Function ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/etherdevice.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/net_tstamp.h> 12 13 #include "otx2_common.h" 14 #include "otx2_reg.h" 15 #include "otx2_ptp.h" 16 #include "cn10k.h" 17 18 #define DRV_NAME "rvu_nicvf" 19 #define DRV_STRING "Marvell RVU NIC Virtual Function Driver" 20 21 static const struct pci_device_id otx2_vf_id_table[] = { 22 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) }, 23 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) }, 24 { } 25 }; 26 27 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>"); 28 MODULE_DESCRIPTION(DRV_STRING); 29 MODULE_LICENSE("GPL v2"); 30 MODULE_DEVICE_TABLE(pci, otx2_vf_id_table); 31 32 /* RVU VF Interrupt Vector Enumeration */ 33 enum { 34 RVU_VF_INT_VEC_MBOX = 0x0, 35 }; 36 37 static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf, 38 struct mbox_msghdr *msg) 39 { 40 if (msg->id >= MBOX_MSG_MAX) { 41 dev_err(vf->dev, 42 "Mbox msg with unknown ID %d\n", msg->id); 43 return; 44 } 45 46 if (msg->sig != OTX2_MBOX_RSP_SIG) { 47 dev_err(vf->dev, 48 "Mbox msg with wrong signature %x, ID %d\n", 49 msg->sig, msg->id); 50 return; 51 } 52 53 if (msg->rc == MBOX_MSG_INVALID) { 54 dev_err(vf->dev, 55 "PF/AF says the sent msg(s) %d were invalid\n", 56 msg->id); 57 return; 58 } 59 60 switch (msg->id) { 61 case MBOX_MSG_READY: 62 vf->pcifunc = msg->pcifunc; 63 break; 64 case MBOX_MSG_MSIX_OFFSET: 65 mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg); 66 break; 67 case MBOX_MSG_NPA_LF_ALLOC: 68 mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg); 69 break; 70 case MBOX_MSG_NIX_LF_ALLOC: 71 mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg); 72 break; 73 case MBOX_MSG_NIX_TXSCH_ALLOC: 74 mbox_handler_nix_txsch_alloc(vf, 75 (struct nix_txsch_alloc_rsp *)msg); 76 break; 77 case MBOX_MSG_NIX_BP_ENABLE: 78 mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg); 79 break; 80 default: 81 if (msg->rc) 82 dev_err(vf->dev, 83 "Mbox msg response has err %d, ID %d\n", 84 msg->rc, msg->id); 85 } 86 } 87 88 static void otx2vf_vfaf_mbox_handler(struct work_struct *work) 89 { 90 struct otx2_mbox_dev *mdev; 91 struct mbox_hdr *rsp_hdr; 92 struct mbox_msghdr *msg; 93 struct otx2_mbox *mbox; 94 struct mbox *af_mbox; 95 int offset, id; 96 97 af_mbox = container_of(work, struct mbox, mbox_wrk); 98 mbox = &af_mbox->mbox; 99 mdev = &mbox->dev[0]; 100 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 101 if (af_mbox->num_msgs == 0) 102 return; 103 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 104 105 for (id = 0; id < af_mbox->num_msgs; id++) { 106 msg = (struct mbox_msghdr *)(mdev->mbase + offset); 107 otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg); 108 offset = mbox->rx_start + msg->next_msgoff; 109 if (mdev->msgs_acked == (af_mbox->num_msgs - 1)) 110 __otx2_mbox_reset(mbox, 0); 111 mdev->msgs_acked++; 112 } 113 } 114 115 static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf, 116 struct mbox_msghdr *req) 117 { 118 struct msg_rsp *rsp; 119 int err; 120 121 /* Check if valid, if not reply with a invalid msg */ 122 if (req->sig != OTX2_MBOX_REQ_SIG) { 123 otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id); 124 return -ENODEV; 125 } 126 127 switch (req->id) { 128 case MBOX_MSG_CGX_LINK_EVENT: 129 rsp = (struct msg_rsp *)otx2_mbox_alloc_msg( 130 &vf->mbox.mbox_up, 0, 131 sizeof(struct msg_rsp)); 132 if (!rsp) 133 return -ENOMEM; 134 135 rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT; 136 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; 137 rsp->hdr.pcifunc = 0; 138 rsp->hdr.rc = 0; 139 err = otx2_mbox_up_handler_cgx_link_event( 140 vf, (struct cgx_link_info_msg *)req, rsp); 141 return err; 142 default: 143 otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id); 144 return -ENODEV; 145 } 146 return 0; 147 } 148 149 static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work) 150 { 151 struct otx2_mbox_dev *mdev; 152 struct mbox_hdr *rsp_hdr; 153 struct mbox_msghdr *msg; 154 struct otx2_mbox *mbox; 155 struct mbox *vf_mbox; 156 struct otx2_nic *vf; 157 int offset, id; 158 159 vf_mbox = container_of(work, struct mbox, mbox_up_wrk); 160 vf = vf_mbox->pfvf; 161 mbox = &vf_mbox->mbox_up; 162 mdev = &mbox->dev[0]; 163 164 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 165 if (vf_mbox->up_num_msgs == 0) 166 return; 167 168 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 169 170 for (id = 0; id < vf_mbox->up_num_msgs; id++) { 171 msg = (struct mbox_msghdr *)(mdev->mbase + offset); 172 otx2vf_process_mbox_msg_up(vf, msg); 173 offset = mbox->rx_start + msg->next_msgoff; 174 } 175 176 otx2_mbox_msg_send(mbox, 0); 177 } 178 179 static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq) 180 { 181 struct otx2_nic *vf = (struct otx2_nic *)vf_irq; 182 struct otx2_mbox_dev *mdev; 183 struct otx2_mbox *mbox; 184 struct mbox_hdr *hdr; 185 186 /* Clear the IRQ */ 187 otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); 188 189 /* Read latest mbox data */ 190 smp_rmb(); 191 192 /* Check for PF => VF response messages */ 193 mbox = &vf->mbox.mbox; 194 mdev = &mbox->dev[0]; 195 otx2_sync_mbox_bbuf(mbox, 0); 196 197 trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0)); 198 199 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 200 if (hdr->num_msgs) { 201 vf->mbox.num_msgs = hdr->num_msgs; 202 hdr->num_msgs = 0; 203 memset(mbox->hwbase + mbox->rx_start, 0, 204 ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); 205 queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); 206 } 207 /* Check for PF => VF notification messages */ 208 mbox = &vf->mbox.mbox_up; 209 mdev = &mbox->dev[0]; 210 otx2_sync_mbox_bbuf(mbox, 0); 211 212 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 213 if (hdr->num_msgs) { 214 vf->mbox.up_num_msgs = hdr->num_msgs; 215 hdr->num_msgs = 0; 216 memset(mbox->hwbase + mbox->rx_start, 0, 217 ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); 218 queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); 219 } 220 221 return IRQ_HANDLED; 222 } 223 224 static void otx2vf_disable_mbox_intr(struct otx2_nic *vf) 225 { 226 int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX); 227 228 /* Disable VF => PF mailbox IRQ */ 229 otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0)); 230 free_irq(vector, vf); 231 } 232 233 static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) 234 { 235 struct otx2_hw *hw = &vf->hw; 236 struct msg_req *req; 237 char *irq_name; 238 int err; 239 240 /* Register mailbox interrupt handler */ 241 irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE]; 242 snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox"); 243 err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), 244 otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); 245 if (err) { 246 dev_err(vf->dev, 247 "RVUPF: IRQ registration failed for VFAF mbox irq\n"); 248 return err; 249 } 250 251 /* Enable mailbox interrupt for msgs coming from PF. 252 * First clear to avoid spurious interrupts, if any. 253 */ 254 otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); 255 otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); 256 257 if (!probe_pf) 258 return 0; 259 260 /* Check mailbox communication with PF */ 261 req = otx2_mbox_alloc_msg_ready(&vf->mbox); 262 if (!req) { 263 otx2vf_disable_mbox_intr(vf); 264 return -ENOMEM; 265 } 266 267 err = otx2_sync_mbox_msg(&vf->mbox); 268 if (err) { 269 dev_warn(vf->dev, 270 "AF not responding to mailbox, deferring probe\n"); 271 otx2vf_disable_mbox_intr(vf); 272 return -EPROBE_DEFER; 273 } 274 return 0; 275 } 276 277 static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf) 278 { 279 struct mbox *mbox = &vf->mbox; 280 281 if (vf->mbox_wq) { 282 destroy_workqueue(vf->mbox_wq); 283 vf->mbox_wq = NULL; 284 } 285 286 if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag)) 287 iounmap((void __iomem *)mbox->mbox.hwbase); 288 289 otx2_mbox_destroy(&mbox->mbox); 290 otx2_mbox_destroy(&mbox->mbox_up); 291 } 292 293 static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf) 294 { 295 struct mbox *mbox = &vf->mbox; 296 void __iomem *hwbase; 297 int err; 298 299 mbox->pfvf = vf; 300 vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox", 301 WQ_UNBOUND | WQ_HIGHPRI | 302 WQ_MEM_RECLAIM, 1); 303 if (!vf->mbox_wq) 304 return -ENOMEM; 305 306 if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) { 307 /* For cn10k platform, VF mailbox region is in its BAR2 308 * register space 309 */ 310 hwbase = vf->reg_base + RVU_VF_MBOX_REGION; 311 } else { 312 /* Mailbox is a reserved memory (in RAM) region shared between 313 * admin function (i.e PF0) and this VF, shouldn't be mapped as 314 * device memory to allow unaligned accesses. 315 */ 316 hwbase = ioremap_wc(pci_resource_start(vf->pdev, 317 PCI_MBOX_BAR_NUM), 318 pci_resource_len(vf->pdev, 319 PCI_MBOX_BAR_NUM)); 320 if (!hwbase) { 321 dev_err(vf->dev, "Unable to map VFAF mailbox region\n"); 322 err = -ENOMEM; 323 goto exit; 324 } 325 } 326 327 err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base, 328 MBOX_DIR_VFPF, 1); 329 if (err) 330 goto exit; 331 332 err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base, 333 MBOX_DIR_VFPF_UP, 1); 334 if (err) 335 goto exit; 336 337 err = otx2_mbox_bbuf_init(mbox, vf->pdev); 338 if (err) 339 goto exit; 340 341 INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler); 342 INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler); 343 mutex_init(&mbox->lock); 344 345 return 0; 346 exit: 347 if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag)) 348 iounmap(hwbase); 349 destroy_workqueue(vf->mbox_wq); 350 return err; 351 } 352 353 static int otx2vf_open(struct net_device *netdev) 354 { 355 struct otx2_nic *vf; 356 int err; 357 358 err = otx2_open(netdev); 359 if (err) 360 return err; 361 362 /* LBKs do not receive link events so tell everyone we are up here */ 363 vf = netdev_priv(netdev); 364 if (is_otx2_lbkvf(vf->pdev)) { 365 pr_info("%s NIC Link is UP\n", netdev->name); 366 netif_carrier_on(netdev); 367 netif_tx_start_all_queues(netdev); 368 } 369 370 return 0; 371 } 372 373 static int otx2vf_stop(struct net_device *netdev) 374 { 375 return otx2_stop(netdev); 376 } 377 378 static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev) 379 { 380 struct otx2_nic *vf = netdev_priv(netdev); 381 int qidx = skb_get_queue_mapping(skb); 382 struct otx2_snd_queue *sq; 383 struct netdev_queue *txq; 384 385 sq = &vf->qset.sq[qidx]; 386 txq = netdev_get_tx_queue(netdev, qidx); 387 388 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { 389 netif_tx_stop_queue(txq); 390 391 /* Check again, incase SQBs got freed up */ 392 smp_mb(); 393 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) 394 > sq->sqe_thresh) 395 netif_tx_wake_queue(txq); 396 397 return NETDEV_TX_BUSY; 398 } 399 400 return NETDEV_TX_OK; 401 } 402 403 static void otx2vf_set_rx_mode(struct net_device *netdev) 404 { 405 struct otx2_nic *vf = netdev_priv(netdev); 406 407 queue_work(vf->otx2_wq, &vf->rx_mode_work); 408 } 409 410 static void otx2vf_do_set_rx_mode(struct work_struct *work) 411 { 412 struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work); 413 struct net_device *netdev = vf->netdev; 414 unsigned int flags = netdev->flags; 415 struct nix_rx_mode *req; 416 417 mutex_lock(&vf->mbox.lock); 418 419 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox); 420 if (!req) { 421 mutex_unlock(&vf->mbox.lock); 422 return; 423 } 424 425 req->mode = NIX_RX_MODE_UCAST; 426 427 if (flags & IFF_PROMISC) 428 req->mode |= NIX_RX_MODE_PROMISC; 429 if (flags & (IFF_ALLMULTI | IFF_MULTICAST)) 430 req->mode |= NIX_RX_MODE_ALLMULTI; 431 432 req->mode |= NIX_RX_MODE_USE_MCE; 433 434 otx2_sync_mbox_msg(&vf->mbox); 435 436 mutex_unlock(&vf->mbox.lock); 437 } 438 439 static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu) 440 { 441 bool if_up = netif_running(netdev); 442 int err = 0; 443 444 if (if_up) 445 otx2vf_stop(netdev); 446 447 netdev_info(netdev, "Changing MTU from %d to %d\n", 448 netdev->mtu, new_mtu); 449 netdev->mtu = new_mtu; 450 451 if (if_up) 452 err = otx2vf_open(netdev); 453 454 return err; 455 } 456 457 static void otx2vf_reset_task(struct work_struct *work) 458 { 459 struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task); 460 461 rtnl_lock(); 462 463 if (netif_running(vf->netdev)) { 464 otx2vf_stop(vf->netdev); 465 vf->reset_count++; 466 otx2vf_open(vf->netdev); 467 } 468 469 rtnl_unlock(); 470 } 471 472 static int otx2vf_set_features(struct net_device *netdev, 473 netdev_features_t features) 474 { 475 netdev_features_t changed = features ^ netdev->features; 476 bool ntuple_enabled = !!(features & NETIF_F_NTUPLE); 477 struct otx2_nic *vf = netdev_priv(netdev); 478 479 if (changed & NETIF_F_NTUPLE) { 480 if (!ntuple_enabled) { 481 otx2_mcam_flow_del(vf); 482 return 0; 483 } 484 485 if (!otx2_get_maxflows(vf->flow_cfg)) { 486 netdev_err(netdev, 487 "Can't enable NTUPLE, MCAM entries not allocated\n"); 488 return -EINVAL; 489 } 490 } 491 return 0; 492 } 493 494 static const struct net_device_ops otx2vf_netdev_ops = { 495 .ndo_open = otx2vf_open, 496 .ndo_stop = otx2vf_stop, 497 .ndo_start_xmit = otx2vf_xmit, 498 .ndo_set_rx_mode = otx2vf_set_rx_mode, 499 .ndo_set_mac_address = otx2_set_mac_address, 500 .ndo_change_mtu = otx2vf_change_mtu, 501 .ndo_set_features = otx2vf_set_features, 502 .ndo_get_stats64 = otx2_get_stats64, 503 .ndo_tx_timeout = otx2_tx_timeout, 504 .ndo_eth_ioctl = otx2_ioctl, 505 }; 506 507 static int otx2_wq_init(struct otx2_nic *vf) 508 { 509 vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq"); 510 if (!vf->otx2_wq) 511 return -ENOMEM; 512 513 INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode); 514 INIT_WORK(&vf->reset_task, otx2vf_reset_task); 515 return 0; 516 } 517 518 static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf) 519 { 520 struct otx2_hw *hw = &vf->hw; 521 int num_vec, err; 522 523 num_vec = hw->nix_msixoff; 524 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; 525 526 otx2vf_disable_mbox_intr(vf); 527 pci_free_irq_vectors(hw->pdev); 528 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); 529 if (err < 0) { 530 dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n", 531 __func__, num_vec); 532 return err; 533 } 534 535 return otx2vf_register_mbox_intr(vf, false); 536 } 537 538 static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) 539 { 540 int num_vec = pci_msix_vec_count(pdev); 541 struct device *dev = &pdev->dev; 542 struct net_device *netdev; 543 struct otx2_nic *vf; 544 struct otx2_hw *hw; 545 int err, qcount; 546 547 err = pcim_enable_device(pdev); 548 if (err) { 549 dev_err(dev, "Failed to enable PCI device\n"); 550 return err; 551 } 552 553 err = pci_request_regions(pdev, DRV_NAME); 554 if (err) { 555 dev_err(dev, "PCI request regions failed 0x%x\n", err); 556 return err; 557 } 558 559 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 560 if (err) { 561 dev_err(dev, "DMA mask config failed, abort\n"); 562 goto err_release_regions; 563 } 564 565 pci_set_master(pdev); 566 567 qcount = num_online_cpus(); 568 netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount); 569 if (!netdev) { 570 err = -ENOMEM; 571 goto err_release_regions; 572 } 573 574 pci_set_drvdata(pdev, netdev); 575 SET_NETDEV_DEV(netdev, &pdev->dev); 576 vf = netdev_priv(netdev); 577 vf->netdev = netdev; 578 vf->pdev = pdev; 579 vf->dev = dev; 580 vf->iommu_domain = iommu_get_domain_for_dev(dev); 581 582 vf->flags |= OTX2_FLAG_INTF_DOWN; 583 hw = &vf->hw; 584 hw->pdev = vf->pdev; 585 hw->rx_queues = qcount; 586 hw->tx_queues = qcount; 587 hw->max_queues = qcount; 588 hw->tot_tx_queues = qcount; 589 590 hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, 591 GFP_KERNEL); 592 if (!hw->irq_name) { 593 err = -ENOMEM; 594 goto err_free_netdev; 595 } 596 597 hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, 598 sizeof(cpumask_var_t), GFP_KERNEL); 599 if (!hw->affinity_mask) { 600 err = -ENOMEM; 601 goto err_free_netdev; 602 } 603 604 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); 605 if (err < 0) { 606 dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", 607 __func__, num_vec); 608 goto err_free_netdev; 609 } 610 611 vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 612 if (!vf->reg_base) { 613 dev_err(dev, "Unable to map physical function CSRs, aborting\n"); 614 err = -ENOMEM; 615 goto err_free_irq_vectors; 616 } 617 618 otx2_setup_dev_hw_settings(vf); 619 /* Init VF <=> PF mailbox stuff */ 620 err = otx2vf_vfaf_mbox_init(vf); 621 if (err) 622 goto err_free_irq_vectors; 623 624 /* Register mailbox interrupt */ 625 err = otx2vf_register_mbox_intr(vf, true); 626 if (err) 627 goto err_mbox_destroy; 628 629 /* Request AF to attach NPA and LIX LFs to this AF */ 630 err = otx2_attach_npa_nix(vf); 631 if (err) 632 goto err_disable_mbox_intr; 633 634 err = otx2vf_realloc_msix_vectors(vf); 635 if (err) 636 goto err_mbox_destroy; 637 638 err = otx2_set_real_num_queues(netdev, qcount, qcount); 639 if (err) 640 goto err_detach_rsrc; 641 642 err = cn10k_lmtst_init(vf); 643 if (err) 644 goto err_detach_rsrc; 645 646 /* Don't check for error. Proceed without ptp */ 647 otx2_ptp_init(vf); 648 649 /* Assign default mac address */ 650 otx2_get_mac_from_af(netdev); 651 652 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | 653 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | 654 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 655 NETIF_F_GSO_UDP_L4; 656 netdev->features = netdev->hw_features; 657 /* Support TSO on tag interface */ 658 netdev->vlan_features |= netdev->features; 659 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 660 NETIF_F_HW_VLAN_STAG_TX; 661 netdev->features |= netdev->hw_features; 662 663 netdev->hw_features |= NETIF_F_NTUPLE; 664 netdev->hw_features |= NETIF_F_RXALL; 665 666 netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; 667 netdev->watchdog_timeo = OTX2_TX_TIMEOUT; 668 669 netdev->netdev_ops = &otx2vf_netdev_ops; 670 671 netdev->min_mtu = OTX2_MIN_MTU; 672 netdev->max_mtu = otx2_get_max_mtu(vf); 673 674 /* To distinguish, for LBK VFs set netdev name explicitly */ 675 if (is_otx2_lbkvf(vf->pdev)) { 676 int n; 677 678 n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK; 679 /* Need to subtract 1 to get proper VF number */ 680 n -= 1; 681 snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n); 682 } 683 684 err = register_netdev(netdev); 685 if (err) { 686 dev_err(dev, "Failed to register netdevice\n"); 687 goto err_detach_rsrc; 688 } 689 690 err = otx2_wq_init(vf); 691 if (err) 692 goto err_unreg_netdev; 693 694 otx2vf_set_ethtool_ops(netdev); 695 696 err = otx2vf_mcam_flow_init(vf); 697 if (err) 698 goto err_unreg_netdev; 699 700 err = otx2_register_dl(vf); 701 if (err) 702 goto err_unreg_netdev; 703 704 /* Enable pause frames by default */ 705 vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; 706 vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; 707 708 return 0; 709 710 err_unreg_netdev: 711 unregister_netdev(netdev); 712 err_detach_rsrc: 713 if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) 714 qmem_free(vf->dev, vf->dync_lmt); 715 otx2_detach_resources(&vf->mbox); 716 err_disable_mbox_intr: 717 otx2vf_disable_mbox_intr(vf); 718 err_mbox_destroy: 719 otx2vf_vfaf_mbox_destroy(vf); 720 err_free_irq_vectors: 721 pci_free_irq_vectors(hw->pdev); 722 err_free_netdev: 723 pci_set_drvdata(pdev, NULL); 724 free_netdev(netdev); 725 err_release_regions: 726 pci_release_regions(pdev); 727 return err; 728 } 729 730 static void otx2vf_remove(struct pci_dev *pdev) 731 { 732 struct net_device *netdev = pci_get_drvdata(pdev); 733 struct otx2_nic *vf; 734 735 if (!netdev) 736 return; 737 738 vf = netdev_priv(netdev); 739 740 cancel_work_sync(&vf->reset_task); 741 otx2_unregister_dl(vf); 742 unregister_netdev(netdev); 743 if (vf->otx2_wq) 744 destroy_workqueue(vf->otx2_wq); 745 otx2vf_disable_mbox_intr(vf); 746 otx2_detach_resources(&vf->mbox); 747 if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) 748 qmem_free(vf->dev, vf->dync_lmt); 749 otx2vf_vfaf_mbox_destroy(vf); 750 pci_free_irq_vectors(vf->pdev); 751 pci_set_drvdata(pdev, NULL); 752 free_netdev(netdev); 753 754 pci_release_regions(pdev); 755 } 756 757 static struct pci_driver otx2vf_driver = { 758 .name = DRV_NAME, 759 .id_table = otx2_vf_id_table, 760 .probe = otx2vf_probe, 761 .remove = otx2vf_remove, 762 .shutdown = otx2vf_remove, 763 }; 764 765 static int __init otx2vf_init_module(void) 766 { 767 pr_info("%s: %s\n", DRV_NAME, DRV_STRING); 768 769 return pci_register_driver(&otx2vf_driver); 770 } 771 772 static void __exit otx2vf_cleanup_module(void) 773 { 774 pci_unregister_driver(&otx2vf_driver); 775 } 776 777 module_init(otx2vf_init_module); 778 module_exit(otx2vf_cleanup_module); 779