1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <net/vxlan.h> 22 #include "liquidio_common.h" 23 #include "octeon_droq.h" 24 #include "octeon_iq.h" 25 #include "response_manager.h" 26 #include "octeon_device.h" 27 #include "octeon_nic.h" 28 #include "octeon_main.h" 29 #include "octeon_network.h" 30 #include "cn23xx_vf_device.h" 31 32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver"); 34 MODULE_LICENSE("GPL"); 35 MODULE_VERSION(LIQUIDIO_VERSION); 36 37 static int debug = -1; 38 module_param(debug, int, 0644); 39 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 40 41 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 42 43 struct liquidio_if_cfg_context { 44 int octeon_id; 45 46 wait_queue_head_t wc; 47 48 int cond; 49 }; 50 51 struct liquidio_if_cfg_resp { 52 u64 rh; 53 struct liquidio_if_cfg_info cfg_info; 54 u64 status; 55 }; 56 57 struct liquidio_rx_ctl_context { 58 int octeon_id; 59 60 wait_queue_head_t wc; 61 62 int cond; 63 }; 64 65 struct oct_timestamp_resp { 66 u64 rh; 67 u64 timestamp; 68 u64 status; 69 }; 70 71 union tx_info { 72 u64 u64; 73 struct { 74 #ifdef __BIG_ENDIAN_BITFIELD 75 u16 gso_size; 76 u16 gso_segs; 77 u32 reserved; 78 #else 79 u32 reserved; 80 u16 gso_segs; 81 u16 gso_size; 82 #endif 83 } s; 84 }; 85 86 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS) 87 88 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 89 #define OCTNIC_GSO_MAX_SIZE \ 90 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 91 92 struct octnic_gather { 93 /* List manipulation. Next and prev pointers. */ 94 struct list_head list; 95 96 /* Size of the gather component at sg in bytes. */ 97 int sg_size; 98 99 /* Number of bytes that sg was adjusted to make it 8B-aligned. */ 100 int adjust; 101 102 /* Gather component that can accommodate max sized fragment list 103 * received from the IP layer. 104 */ 105 struct octeon_sg_entry *sg; 106 107 dma_addr_t sg_dma_ptr; 108 }; 109 110 static int 111 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 112 static void liquidio_vf_remove(struct pci_dev *pdev); 113 static int octeon_device_init(struct octeon_device *oct); 114 static int liquidio_stop(struct net_device *netdev); 115 116 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 117 { 118 struct octeon_device_priv *oct_priv = 119 (struct octeon_device_priv *)oct->priv; 120 int retry = MAX_IO_PENDING_PKT_COUNT; 121 int pkt_cnt = 0, pending_pkts; 122 int i; 123 124 do { 125 pending_pkts = 0; 126 127 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 128 if (!(oct->io_qmask.oq & BIT_ULL(i))) 129 continue; 130 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 131 } 132 if (pkt_cnt > 0) { 133 pending_pkts += pkt_cnt; 134 tasklet_schedule(&oct_priv->droq_tasklet); 135 } 136 pkt_cnt = 0; 137 schedule_timeout_uninterruptible(1); 138 139 } while (retry-- && pending_pkts); 140 141 return pkt_cnt; 142 } 143 144 /** 145 * \brief Cause device to go quiet so it can be safely removed/reset/etc 146 * @param oct Pointer to Octeon device 147 */ 148 static void pcierror_quiesce_device(struct octeon_device *oct) 149 { 150 int i; 151 152 /* Disable the input and output queues now. No more packets will 153 * arrive from Octeon, but we should wait for all packet processing 154 * to finish. 155 */ 156 157 /* To allow for in-flight requests */ 158 schedule_timeout_uninterruptible(100); 159 160 if (wait_for_pending_requests(oct)) 161 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 162 163 /* Force all requests waiting to be fetched by OCTEON to complete. */ 164 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 165 struct octeon_instr_queue *iq; 166 167 if (!(oct->io_qmask.iq & BIT_ULL(i))) 168 continue; 169 iq = oct->instr_queue[i]; 170 171 if (atomic_read(&iq->instr_pending)) { 172 spin_lock_bh(&iq->lock); 173 iq->fill_cnt = 0; 174 iq->octeon_read_index = iq->host_write_index; 175 iq->stats.instr_processed += 176 atomic_read(&iq->instr_pending); 177 lio_process_iq_request_list(oct, iq, 0); 178 spin_unlock_bh(&iq->lock); 179 } 180 } 181 182 /* Force all pending ordered list requests to time out. */ 183 lio_process_ordered_list(oct, 1); 184 185 /* We do not need to wait for output queue packets to be processed. */ 186 } 187 188 /** 189 * \brief Cleanup PCI AER uncorrectable error status 190 * @param dev Pointer to PCI device 191 */ 192 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 193 { 194 u32 status, mask; 195 int pos = 0x100; 196 197 pr_info("%s :\n", __func__); 198 199 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 200 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 201 if (dev->error_state == pci_channel_io_normal) 202 status &= ~mask; /* Clear corresponding nonfatal bits */ 203 else 204 status &= mask; /* Clear corresponding fatal bits */ 205 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 206 } 207 208 /** 209 * \brief Stop all PCI IO to a given device 210 * @param dev Pointer to Octeon device 211 */ 212 static void stop_pci_io(struct octeon_device *oct) 213 { 214 struct msix_entry *msix_entries; 215 int i; 216 217 /* No more instructions will be forwarded. */ 218 atomic_set(&oct->status, OCT_DEV_IN_RESET); 219 220 for (i = 0; i < oct->ifcount; i++) 221 netif_device_detach(oct->props[i].netdev); 222 223 /* Disable interrupts */ 224 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 225 226 pcierror_quiesce_device(oct); 227 if (oct->msix_on) { 228 msix_entries = (struct msix_entry *)oct->msix_entries; 229 for (i = 0; i < oct->num_msix_irqs; i++) { 230 /* clear the affinity_cpumask */ 231 irq_set_affinity_hint(msix_entries[i].vector, 232 NULL); 233 free_irq(msix_entries[i].vector, 234 &oct->ioq_vector[i]); 235 } 236 pci_disable_msix(oct->pci_dev); 237 kfree(oct->msix_entries); 238 oct->msix_entries = NULL; 239 octeon_free_ioq_vector(oct); 240 } 241 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 242 lio_get_state_string(&oct->status)); 243 244 /* making it a common function for all OCTEON models */ 245 cleanup_aer_uncorrect_error_status(oct->pci_dev); 246 247 pci_disable_device(oct->pci_dev); 248 } 249 250 /** 251 * \brief called when PCI error is detected 252 * @param pdev Pointer to PCI device 253 * @param state The current pci connection state 254 * 255 * This function is called after a PCI bus error affecting 256 * this device has been detected. 257 */ 258 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 259 pci_channel_state_t state) 260 { 261 struct octeon_device *oct = pci_get_drvdata(pdev); 262 263 /* Non-correctable Non-fatal errors */ 264 if (state == pci_channel_io_normal) { 265 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 266 cleanup_aer_uncorrect_error_status(oct->pci_dev); 267 return PCI_ERS_RESULT_CAN_RECOVER; 268 } 269 270 /* Non-correctable Fatal errors */ 271 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 272 stop_pci_io(oct); 273 274 return PCI_ERS_RESULT_DISCONNECT; 275 } 276 277 /* For PCI-E Advanced Error Recovery (AER) Interface */ 278 static const struct pci_error_handlers liquidio_vf_err_handler = { 279 .error_detected = liquidio_pcie_error_detected, 280 }; 281 282 static const struct pci_device_id liquidio_vf_pci_tbl[] = { 283 { 284 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, 285 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 286 }, 287 { 288 0, 0, 0, 0, 0, 0, 0 289 } 290 }; 291 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); 292 293 static struct pci_driver liquidio_vf_pci_driver = { 294 .name = "LiquidIO_VF", 295 .id_table = liquidio_vf_pci_tbl, 296 .probe = liquidio_vf_probe, 297 .remove = liquidio_vf_remove, 298 .err_handler = &liquidio_vf_err_handler, /* For AER */ 299 }; 300 301 /** 302 * \brief Stop Tx queues 303 * @param netdev network device 304 */ 305 static void txqs_stop(struct net_device *netdev) 306 { 307 if (netif_is_multiqueue(netdev)) { 308 int i; 309 310 for (i = 0; i < netdev->num_tx_queues; i++) 311 netif_stop_subqueue(netdev, i); 312 } else { 313 netif_stop_queue(netdev); 314 } 315 } 316 317 /** 318 * \brief Start Tx queues 319 * @param netdev network device 320 */ 321 static void txqs_start(struct net_device *netdev) 322 { 323 if (netif_is_multiqueue(netdev)) { 324 int i; 325 326 for (i = 0; i < netdev->num_tx_queues; i++) 327 netif_start_subqueue(netdev, i); 328 } else { 329 netif_start_queue(netdev); 330 } 331 } 332 333 /** 334 * \brief Wake Tx queues 335 * @param netdev network device 336 */ 337 static void txqs_wake(struct net_device *netdev) 338 { 339 struct lio *lio = GET_LIO(netdev); 340 341 if (netif_is_multiqueue(netdev)) { 342 int i; 343 344 for (i = 0; i < netdev->num_tx_queues; i++) { 345 int qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs] 346 .s.q_no; 347 if (__netif_subqueue_stopped(netdev, i)) { 348 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, 349 tx_restart, 1); 350 netif_wake_subqueue(netdev, i); 351 } 352 } 353 } else { 354 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, 355 tx_restart, 1); 356 netif_wake_queue(netdev); 357 } 358 } 359 360 /** 361 * \brief Start Tx queue 362 * @param netdev network device 363 */ 364 static void start_txq(struct net_device *netdev) 365 { 366 struct lio *lio = GET_LIO(netdev); 367 368 if (lio->linfo.link.s.link_up) { 369 txqs_start(netdev); 370 return; 371 } 372 } 373 374 /** 375 * \brief Wake a queue 376 * @param netdev network device 377 * @param q which queue to wake 378 */ 379 static void wake_q(struct net_device *netdev, int q) 380 { 381 if (netif_is_multiqueue(netdev)) 382 netif_wake_subqueue(netdev, q); 383 else 384 netif_wake_queue(netdev); 385 } 386 387 /** 388 * \brief Stop a queue 389 * @param netdev network device 390 * @param q which queue to stop 391 */ 392 static void stop_q(struct net_device *netdev, int q) 393 { 394 if (netif_is_multiqueue(netdev)) 395 netif_stop_subqueue(netdev, q); 396 else 397 netif_stop_queue(netdev); 398 } 399 400 /** 401 * Remove the node at the head of the list. The list would be empty at 402 * the end of this call if there are no more nodes in the list. 403 */ 404 static struct list_head *list_delete_head(struct list_head *root) 405 { 406 struct list_head *node; 407 408 if ((root->prev == root) && (root->next == root)) 409 node = NULL; 410 else 411 node = root->next; 412 413 if (node) 414 list_del(node); 415 416 return node; 417 } 418 419 /** 420 * \brief Delete gather lists 421 * @param lio per-network private data 422 */ 423 static void delete_glists(struct lio *lio) 424 { 425 struct octnic_gather *g; 426 int i; 427 428 kfree(lio->glist_lock); 429 lio->glist_lock = NULL; 430 431 if (!lio->glist) 432 return; 433 434 for (i = 0; i < lio->linfo.num_txpciq; i++) { 435 do { 436 g = (struct octnic_gather *) 437 list_delete_head(&lio->glist[i]); 438 if (g) 439 kfree(g); 440 } while (g); 441 442 if (lio->glists_virt_base && lio->glists_virt_base[i] && 443 lio->glists_dma_base && lio->glists_dma_base[i]) { 444 lio_dma_free(lio->oct_dev, 445 lio->glist_entry_size * lio->tx_qsize, 446 lio->glists_virt_base[i], 447 lio->glists_dma_base[i]); 448 } 449 } 450 451 kfree(lio->glists_virt_base); 452 lio->glists_virt_base = NULL; 453 454 kfree(lio->glists_dma_base); 455 lio->glists_dma_base = NULL; 456 457 kfree(lio->glist); 458 lio->glist = NULL; 459 } 460 461 /** 462 * \brief Setup gather lists 463 * @param lio per-network private data 464 */ 465 static int setup_glists(struct lio *lio, int num_iqs) 466 { 467 struct octnic_gather *g; 468 int i, j; 469 470 lio->glist_lock = 471 kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL); 472 if (!lio->glist_lock) 473 return -ENOMEM; 474 475 lio->glist = 476 kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL); 477 if (!lio->glist) { 478 kfree(lio->glist_lock); 479 lio->glist_lock = NULL; 480 return -ENOMEM; 481 } 482 483 lio->glist_entry_size = 484 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); 485 486 /* allocate memory to store virtual and dma base address of 487 * per glist consistent memory 488 */ 489 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), 490 GFP_KERNEL); 491 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), 492 GFP_KERNEL); 493 494 if (!lio->glists_virt_base || !lio->glists_dma_base) { 495 delete_glists(lio); 496 return -ENOMEM; 497 } 498 499 for (i = 0; i < num_iqs; i++) { 500 spin_lock_init(&lio->glist_lock[i]); 501 502 INIT_LIST_HEAD(&lio->glist[i]); 503 504 lio->glists_virt_base[i] = 505 lio_dma_alloc(lio->oct_dev, 506 lio->glist_entry_size * lio->tx_qsize, 507 &lio->glists_dma_base[i]); 508 509 if (!lio->glists_virt_base[i]) { 510 delete_glists(lio); 511 return -ENOMEM; 512 } 513 514 for (j = 0; j < lio->tx_qsize; j++) { 515 g = kzalloc(sizeof(*g), GFP_KERNEL); 516 if (!g) 517 break; 518 519 g->sg = lio->glists_virt_base[i] + 520 (j * lio->glist_entry_size); 521 522 g->sg_dma_ptr = lio->glists_dma_base[i] + 523 (j * lio->glist_entry_size); 524 525 list_add_tail(&g->list, &lio->glist[i]); 526 } 527 528 if (j != lio->tx_qsize) { 529 delete_glists(lio); 530 return -ENOMEM; 531 } 532 } 533 534 return 0; 535 } 536 537 /** 538 * \brief Print link information 539 * @param netdev network device 540 */ 541 static void print_link_info(struct net_device *netdev) 542 { 543 struct lio *lio = GET_LIO(netdev); 544 545 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 546 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 547 struct oct_link_info *linfo = &lio->linfo; 548 549 if (linfo->link.s.link_up) { 550 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 551 linfo->link.s.speed, 552 (linfo->link.s.duplex) ? "Full" : "Half"); 553 } else { 554 netif_info(lio, link, lio->netdev, "Link Down\n"); 555 } 556 } 557 } 558 559 /** 560 * \brief Routine to notify MTU change 561 * @param work work_struct data structure 562 */ 563 static void octnet_link_status_change(struct work_struct *work) 564 { 565 struct cavium_wk *wk = (struct cavium_wk *)work; 566 struct lio *lio = (struct lio *)wk->ctxptr; 567 568 rtnl_lock(); 569 call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev); 570 rtnl_unlock(); 571 } 572 573 /** 574 * \brief Sets up the mtu status change work 575 * @param netdev network device 576 */ 577 static int setup_link_status_change_wq(struct net_device *netdev) 578 { 579 struct lio *lio = GET_LIO(netdev); 580 struct octeon_device *oct = lio->oct_dev; 581 582 lio->link_status_wq.wq = alloc_workqueue("link-status", 583 WQ_MEM_RECLAIM, 0); 584 if (!lio->link_status_wq.wq) { 585 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 586 return -1; 587 } 588 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 589 octnet_link_status_change); 590 lio->link_status_wq.wk.ctxptr = lio; 591 592 return 0; 593 } 594 595 static void cleanup_link_status_change_wq(struct net_device *netdev) 596 { 597 struct lio *lio = GET_LIO(netdev); 598 599 if (lio->link_status_wq.wq) { 600 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 601 destroy_workqueue(lio->link_status_wq.wq); 602 } 603 } 604 605 /** 606 * \brief Update link status 607 * @param netdev network device 608 * @param ls link status structure 609 * 610 * Called on receipt of a link status response from the core application to 611 * update each interface's link status. 612 */ 613 static void update_link_status(struct net_device *netdev, 614 union oct_link_status *ls) 615 { 616 struct lio *lio = GET_LIO(netdev); 617 struct octeon_device *oct = lio->oct_dev; 618 619 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 620 lio->linfo.link.u64 = ls->u64; 621 622 print_link_info(netdev); 623 lio->link_changes++; 624 625 if (lio->linfo.link.s.link_up) { 626 netif_carrier_on(netdev); 627 txqs_wake(netdev); 628 } else { 629 netif_carrier_off(netdev); 630 txqs_stop(netdev); 631 } 632 633 if (lio->linfo.link.s.mtu != netdev->max_mtu) { 634 dev_info(&oct->pci_dev->dev, "Max MTU Changed from %d to %d\n", 635 netdev->max_mtu, lio->linfo.link.s.mtu); 636 netdev->max_mtu = lio->linfo.link.s.mtu; 637 } 638 639 if (lio->linfo.link.s.mtu < netdev->mtu) { 640 dev_warn(&oct->pci_dev->dev, 641 "PF has changed the MTU for gmx port. Reducing the mtu from %d to %d\n", 642 netdev->mtu, lio->linfo.link.s.mtu); 643 lio->mtu = lio->linfo.link.s.mtu; 644 netdev->mtu = lio->linfo.link.s.mtu; 645 queue_delayed_work(lio->link_status_wq.wq, 646 &lio->link_status_wq.wk.work, 0); 647 } 648 } 649 } 650 651 /** 652 * \brief PCI probe handler 653 * @param pdev PCI device structure 654 * @param ent unused 655 */ 656 static int 657 liquidio_vf_probe(struct pci_dev *pdev, 658 const struct pci_device_id *ent __attribute__((unused))) 659 { 660 struct octeon_device *oct_dev = NULL; 661 662 oct_dev = octeon_allocate_device(pdev->device, 663 sizeof(struct octeon_device_priv)); 664 665 if (!oct_dev) { 666 dev_err(&pdev->dev, "Unable to allocate device\n"); 667 return -ENOMEM; 668 } 669 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 670 671 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 672 (u32)pdev->vendor, (u32)pdev->device); 673 674 /* Assign octeon_device for this device to the private data area. */ 675 pci_set_drvdata(pdev, oct_dev); 676 677 /* set linux specific device pointer */ 678 oct_dev->pci_dev = pdev; 679 680 if (octeon_device_init(oct_dev)) { 681 liquidio_vf_remove(pdev); 682 return -ENOMEM; 683 } 684 685 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 686 687 return 0; 688 } 689 690 /** 691 * \brief PCI FLR for each Octeon device. 692 * @param oct octeon device 693 */ 694 static void octeon_pci_flr(struct octeon_device *oct) 695 { 696 pci_save_state(oct->pci_dev); 697 698 pci_cfg_access_lock(oct->pci_dev); 699 700 /* Quiesce the device completely */ 701 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 702 PCI_COMMAND_INTX_DISABLE); 703 704 pcie_flr(oct->pci_dev); 705 706 pci_cfg_access_unlock(oct->pci_dev); 707 708 pci_restore_state(oct->pci_dev); 709 } 710 711 /** 712 *\brief Destroy resources associated with octeon device 713 * @param pdev PCI device structure 714 * @param ent unused 715 */ 716 static void octeon_destroy_resources(struct octeon_device *oct) 717 { 718 struct msix_entry *msix_entries; 719 int i; 720 721 switch (atomic_read(&oct->status)) { 722 case OCT_DEV_RUNNING: 723 case OCT_DEV_CORE_OK: 724 /* No more instructions will be forwarded. */ 725 atomic_set(&oct->status, OCT_DEV_IN_RESET); 726 727 oct->app_mode = CVM_DRV_INVALID_APP; 728 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 729 lio_get_state_string(&oct->status)); 730 731 schedule_timeout_uninterruptible(HZ / 10); 732 733 /* fallthrough */ 734 case OCT_DEV_HOST_OK: 735 /* fallthrough */ 736 case OCT_DEV_IO_QUEUES_DONE: 737 if (wait_for_pending_requests(oct)) 738 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 739 740 if (lio_wait_for_instr_fetch(oct)) 741 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 742 743 /* Disable the input and output queues now. No more packets will 744 * arrive from Octeon, but we should wait for all packet 745 * processing to finish. 746 */ 747 oct->fn_list.disable_io_queues(oct); 748 749 if (lio_wait_for_oq_pkts(oct)) 750 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 751 752 case OCT_DEV_INTR_SET_DONE: 753 /* Disable interrupts */ 754 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 755 756 if (oct->msix_on) { 757 msix_entries = (struct msix_entry *)oct->msix_entries; 758 for (i = 0; i < oct->num_msix_irqs; i++) { 759 if (oct->ioq_vector[i].vector) { 760 irq_set_affinity_hint( 761 msix_entries[i].vector, 762 NULL); 763 free_irq(msix_entries[i].vector, 764 &oct->ioq_vector[i]); 765 oct->ioq_vector[i].vector = 0; 766 } 767 } 768 pci_disable_msix(oct->pci_dev); 769 kfree(oct->msix_entries); 770 oct->msix_entries = NULL; 771 kfree(oct->irq_name_storage); 772 oct->irq_name_storage = NULL; 773 } 774 /* Soft reset the octeon device before exiting */ 775 if (oct->pci_dev->reset_fn) 776 octeon_pci_flr(oct); 777 else 778 cn23xx_vf_ask_pf_to_do_flr(oct); 779 780 /* fallthrough */ 781 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 782 octeon_free_ioq_vector(oct); 783 784 /* fallthrough */ 785 case OCT_DEV_MBOX_SETUP_DONE: 786 oct->fn_list.free_mbox(oct); 787 788 /* fallthrough */ 789 case OCT_DEV_IN_RESET: 790 case OCT_DEV_DROQ_INIT_DONE: 791 mdelay(100); 792 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 793 if (!(oct->io_qmask.oq & BIT_ULL(i))) 794 continue; 795 octeon_delete_droq(oct, i); 796 } 797 798 /* fallthrough */ 799 case OCT_DEV_RESP_LIST_INIT_DONE: 800 octeon_delete_response_list(oct); 801 802 /* fallthrough */ 803 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 804 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 805 if (!(oct->io_qmask.iq & BIT_ULL(i))) 806 continue; 807 octeon_delete_instr_queue(oct, i); 808 } 809 810 /* fallthrough */ 811 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 812 octeon_free_sc_buffer_pool(oct); 813 814 /* fallthrough */ 815 case OCT_DEV_DISPATCH_INIT_DONE: 816 octeon_delete_dispatch_list(oct); 817 cancel_delayed_work_sync(&oct->nic_poll_work.work); 818 819 /* fallthrough */ 820 case OCT_DEV_PCI_MAP_DONE: 821 octeon_unmap_pci_barx(oct, 0); 822 octeon_unmap_pci_barx(oct, 1); 823 824 /* fallthrough */ 825 case OCT_DEV_PCI_ENABLE_DONE: 826 pci_clear_master(oct->pci_dev); 827 /* Disable the device, releasing the PCI INT */ 828 pci_disable_device(oct->pci_dev); 829 830 /* fallthrough */ 831 case OCT_DEV_BEGIN_STATE: 832 /* Nothing to be done here either */ 833 break; 834 } 835 } 836 837 /** 838 * \brief Callback for rx ctrl 839 * @param status status of request 840 * @param buf pointer to resp structure 841 */ 842 static void rx_ctl_callback(struct octeon_device *oct, 843 u32 status, void *buf) 844 { 845 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 846 struct liquidio_rx_ctl_context *ctx; 847 848 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; 849 850 oct = lio_get_device(ctx->octeon_id); 851 if (status) 852 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", 853 CVM_CAST64(status)); 854 WRITE_ONCE(ctx->cond, 1); 855 856 /* This barrier is required to be sure that the response has been 857 * written fully before waking up the handler 858 */ 859 wmb(); 860 861 wake_up_interruptible(&ctx->wc); 862 } 863 864 /** 865 * \brief Send Rx control command 866 * @param lio per-network private data 867 * @param start_stop whether to start or stop 868 */ 869 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 870 { 871 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 872 int ctx_size = sizeof(struct liquidio_rx_ctl_context); 873 struct liquidio_rx_ctl_context *ctx; 874 struct octeon_soft_command *sc; 875 union octnet_cmd *ncmd; 876 int retval; 877 878 if (oct->props[lio->ifidx].rx_on == start_stop) 879 return; 880 881 sc = (struct octeon_soft_command *) 882 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 883 16, ctx_size); 884 885 ncmd = (union octnet_cmd *)sc->virtdptr; 886 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; 887 888 WRITE_ONCE(ctx->cond, 0); 889 ctx->octeon_id = lio_get_device_id(oct); 890 init_waitqueue_head(&ctx->wc); 891 892 ncmd->u64 = 0; 893 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 894 ncmd->s.param1 = start_stop; 895 896 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 897 898 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 899 900 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 901 OPCODE_NIC_CMD, 0, 0, 0); 902 903 sc->callback = rx_ctl_callback; 904 sc->callback_arg = sc; 905 sc->wait_time = 5000; 906 907 retval = octeon_send_soft_command(oct, sc); 908 if (retval == IQ_SEND_FAILED) { 909 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 910 } else { 911 /* Sleep on a wait queue till the cond flag indicates that the 912 * response arrived or timed-out. 913 */ 914 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) 915 return; 916 oct->props[lio->ifidx].rx_on = start_stop; 917 } 918 919 octeon_free_soft_command(oct, sc); 920 } 921 922 /** 923 * \brief Destroy NIC device interface 924 * @param oct octeon device 925 * @param ifidx which interface to destroy 926 * 927 * Cleanup associated with each interface for an Octeon device when NIC 928 * module is being unloaded or if initialization fails during load. 929 */ 930 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 931 { 932 struct net_device *netdev = oct->props[ifidx].netdev; 933 struct napi_struct *napi, *n; 934 struct lio *lio; 935 936 if (!netdev) { 937 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 938 __func__, ifidx); 939 return; 940 } 941 942 lio = GET_LIO(netdev); 943 944 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 945 946 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 947 liquidio_stop(netdev); 948 949 if (oct->props[lio->ifidx].napi_enabled == 1) { 950 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 951 napi_disable(napi); 952 953 oct->props[lio->ifidx].napi_enabled = 0; 954 955 oct->droq[0]->ops.poll_mode = 0; 956 } 957 958 /* Delete NAPI */ 959 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 960 netif_napi_del(napi); 961 962 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 963 unregister_netdev(netdev); 964 965 cleanup_rx_oom_poll_fn(netdev); 966 967 cleanup_link_status_change_wq(netdev); 968 969 delete_glists(lio); 970 971 free_netdev(netdev); 972 973 oct->props[ifidx].gmxport = -1; 974 975 oct->props[ifidx].netdev = NULL; 976 } 977 978 /** 979 * \brief Stop complete NIC functionality 980 * @param oct octeon device 981 */ 982 static int liquidio_stop_nic_module(struct octeon_device *oct) 983 { 984 struct lio *lio; 985 int i, j; 986 987 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 988 if (!oct->ifcount) { 989 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 990 return 1; 991 } 992 993 spin_lock_bh(&oct->cmd_resp_wqlock); 994 oct->cmd_resp_state = OCT_DRV_OFFLINE; 995 spin_unlock_bh(&oct->cmd_resp_wqlock); 996 997 for (i = 0; i < oct->ifcount; i++) { 998 lio = GET_LIO(oct->props[i].netdev); 999 for (j = 0; j < oct->num_oqs; j++) 1000 octeon_unregister_droq_ops(oct, 1001 lio->linfo.rxpciq[j].s.q_no); 1002 } 1003 1004 for (i = 0; i < oct->ifcount; i++) 1005 liquidio_destroy_nic_device(oct, i); 1006 1007 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1008 return 0; 1009 } 1010 1011 /** 1012 * \brief Cleans up resources at unload time 1013 * @param pdev PCI device structure 1014 */ 1015 static void liquidio_vf_remove(struct pci_dev *pdev) 1016 { 1017 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1018 1019 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1020 1021 if (oct_dev->app_mode == CVM_DRV_NIC_APP) 1022 liquidio_stop_nic_module(oct_dev); 1023 1024 /* Reset the octeon device and cleanup all memory allocated for 1025 * the octeon device by driver. 1026 */ 1027 octeon_destroy_resources(oct_dev); 1028 1029 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1030 1031 /* This octeon device has been removed. Update the global 1032 * data structure to reflect this. Free the device structure. 1033 */ 1034 octeon_free_device_mem(oct_dev); 1035 } 1036 1037 /** 1038 * \brief PCI initialization for each Octeon device. 1039 * @param oct octeon device 1040 */ 1041 static int octeon_pci_os_setup(struct octeon_device *oct) 1042 { 1043 #ifdef CONFIG_PCI_IOV 1044 /* setup PCI stuff first */ 1045 if (!oct->pci_dev->physfn) 1046 octeon_pci_flr(oct); 1047 #endif 1048 1049 if (pci_enable_device(oct->pci_dev)) { 1050 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1051 return 1; 1052 } 1053 1054 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1055 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1056 pci_disable_device(oct->pci_dev); 1057 return 1; 1058 } 1059 1060 /* Enable PCI DMA Master. */ 1061 pci_set_master(oct->pci_dev); 1062 1063 return 0; 1064 } 1065 1066 static int skb_iq(struct lio *lio, struct sk_buff *skb) 1067 { 1068 int q = 0; 1069 1070 if (netif_is_multiqueue(lio->netdev)) 1071 q = skb->queue_mapping % lio->linfo.num_txpciq; 1072 1073 return q; 1074 } 1075 1076 /** 1077 * \brief Check Tx queue state for a given network buffer 1078 * @param lio per-network private data 1079 * @param skb network buffer 1080 */ 1081 static int check_txq_state(struct lio *lio, struct sk_buff *skb) 1082 { 1083 int q = 0, iq = 0; 1084 1085 if (netif_is_multiqueue(lio->netdev)) { 1086 q = skb->queue_mapping; 1087 iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no; 1088 } else { 1089 iq = lio->txq; 1090 q = iq; 1091 } 1092 1093 if (octnet_iq_is_full(lio->oct_dev, iq)) 1094 return 0; 1095 1096 if (__netif_subqueue_stopped(lio->netdev, q)) { 1097 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); 1098 wake_q(lio->netdev, q); 1099 } 1100 1101 return 1; 1102 } 1103 1104 /** 1105 * \brief Unmap and free network buffer 1106 * @param buf buffer 1107 */ 1108 static void free_netbuf(void *buf) 1109 { 1110 struct octnet_buf_free_info *finfo; 1111 struct sk_buff *skb; 1112 struct lio *lio; 1113 1114 finfo = (struct octnet_buf_free_info *)buf; 1115 skb = finfo->skb; 1116 lio = finfo->lio; 1117 1118 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1119 DMA_TO_DEVICE); 1120 1121 check_txq_state(lio, skb); 1122 1123 tx_buffer_free(skb); 1124 } 1125 1126 /** 1127 * \brief Unmap and free gather buffer 1128 * @param buf buffer 1129 */ 1130 static void free_netsgbuf(void *buf) 1131 { 1132 struct octnet_buf_free_info *finfo; 1133 struct octnic_gather *g; 1134 struct sk_buff *skb; 1135 int i, frags, iq; 1136 struct lio *lio; 1137 1138 finfo = (struct octnet_buf_free_info *)buf; 1139 skb = finfo->skb; 1140 lio = finfo->lio; 1141 g = finfo->g; 1142 frags = skb_shinfo(skb)->nr_frags; 1143 1144 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1145 g->sg[0].ptr[0], (skb->len - skb->data_len), 1146 DMA_TO_DEVICE); 1147 1148 i = 1; 1149 while (frags--) { 1150 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1151 1152 pci_unmap_page((lio->oct_dev)->pci_dev, 1153 g->sg[(i >> 2)].ptr[(i & 3)], 1154 frag->size, DMA_TO_DEVICE); 1155 i++; 1156 } 1157 1158 iq = skb_iq(lio, skb); 1159 1160 spin_lock(&lio->glist_lock[iq]); 1161 list_add_tail(&g->list, &lio->glist[iq]); 1162 spin_unlock(&lio->glist_lock[iq]); 1163 1164 check_txq_state(lio, skb); /* mq support: sub-queue state check */ 1165 1166 tx_buffer_free(skb); 1167 } 1168 1169 /** 1170 * \brief Unmap and free gather buffer with response 1171 * @param buf buffer 1172 */ 1173 static void free_netsgbuf_with_resp(void *buf) 1174 { 1175 struct octnet_buf_free_info *finfo; 1176 struct octeon_soft_command *sc; 1177 struct octnic_gather *g; 1178 struct sk_buff *skb; 1179 int i, frags, iq; 1180 struct lio *lio; 1181 1182 sc = (struct octeon_soft_command *)buf; 1183 skb = (struct sk_buff *)sc->callback_arg; 1184 finfo = (struct octnet_buf_free_info *)&skb->cb; 1185 1186 lio = finfo->lio; 1187 g = finfo->g; 1188 frags = skb_shinfo(skb)->nr_frags; 1189 1190 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1191 g->sg[0].ptr[0], (skb->len - skb->data_len), 1192 DMA_TO_DEVICE); 1193 1194 i = 1; 1195 while (frags--) { 1196 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1197 1198 pci_unmap_page((lio->oct_dev)->pci_dev, 1199 g->sg[(i >> 2)].ptr[(i & 3)], 1200 frag->size, DMA_TO_DEVICE); 1201 i++; 1202 } 1203 1204 iq = skb_iq(lio, skb); 1205 1206 spin_lock(&lio->glist_lock[iq]); 1207 list_add_tail(&g->list, &lio->glist[iq]); 1208 spin_unlock(&lio->glist_lock[iq]); 1209 1210 /* Don't free the skb yet */ 1211 1212 check_txq_state(lio, skb); 1213 } 1214 1215 /** 1216 * \brief Callback for getting interface configuration 1217 * @param status status of request 1218 * @param buf pointer to resp structure 1219 */ 1220 static void if_cfg_callback(struct octeon_device *oct, 1221 u32 status __attribute__((unused)), void *buf) 1222 { 1223 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 1224 struct liquidio_if_cfg_context *ctx; 1225 struct liquidio_if_cfg_resp *resp; 1226 1227 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1228 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 1229 1230 oct = lio_get_device(ctx->octeon_id); 1231 if (resp->status) 1232 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", 1233 CVM_CAST64(resp->status)); 1234 WRITE_ONCE(ctx->cond, 1); 1235 1236 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", 1237 resp->cfg_info.liquidio_firmware_version); 1238 1239 /* This barrier is required to be sure that the response has been 1240 * written fully before waking up the handler 1241 */ 1242 wmb(); 1243 1244 wake_up_interruptible(&ctx->wc); 1245 } 1246 1247 /** 1248 * \brief Net device open for LiquidIO 1249 * @param netdev network device 1250 */ 1251 static int liquidio_open(struct net_device *netdev) 1252 { 1253 struct lio *lio = GET_LIO(netdev); 1254 struct octeon_device *oct = lio->oct_dev; 1255 struct napi_struct *napi, *n; 1256 1257 if (!oct->props[lio->ifidx].napi_enabled) { 1258 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1259 napi_enable(napi); 1260 1261 oct->props[lio->ifidx].napi_enabled = 1; 1262 1263 oct->droq[0]->ops.poll_mode = 1; 1264 } 1265 1266 ifstate_set(lio, LIO_IFSTATE_RUNNING); 1267 1268 /* Ready for link status updates */ 1269 lio->intf_open = 1; 1270 1271 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 1272 start_txq(netdev); 1273 1274 /* tell Octeon to start forwarding packets to host */ 1275 send_rx_ctrl_cmd(lio, 1); 1276 1277 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); 1278 1279 return 0; 1280 } 1281 1282 /** 1283 * \brief Net device stop for LiquidIO 1284 * @param netdev network device 1285 */ 1286 static int liquidio_stop(struct net_device *netdev) 1287 { 1288 struct lio *lio = GET_LIO(netdev); 1289 struct octeon_device *oct = lio->oct_dev; 1290 struct napi_struct *napi, *n; 1291 1292 if (oct->props[lio->ifidx].napi_enabled) { 1293 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1294 napi_disable(napi); 1295 1296 oct->props[lio->ifidx].napi_enabled = 0; 1297 1298 oct->droq[0]->ops.poll_mode = 0; 1299 } 1300 1301 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 1302 /* Inform that netif carrier is down */ 1303 lio->intf_open = 0; 1304 lio->linfo.link.s.link_up = 0; 1305 1306 netif_carrier_off(netdev); 1307 lio->link_changes++; 1308 1309 /* tell Octeon to stop forwarding packets to host */ 1310 send_rx_ctrl_cmd(lio, 0); 1311 1312 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 1313 1314 txqs_stop(netdev); 1315 1316 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 1317 1318 return 0; 1319 } 1320 1321 /** 1322 * \brief Converts a mask based on net device flags 1323 * @param netdev network device 1324 * 1325 * This routine generates a octnet_ifflags mask from the net device flags 1326 * received from the OS. 1327 */ 1328 static enum octnet_ifflags get_new_flags(struct net_device *netdev) 1329 { 1330 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1331 1332 if (netdev->flags & IFF_PROMISC) 1333 f |= OCTNET_IFFLAG_PROMISC; 1334 1335 if (netdev->flags & IFF_ALLMULTI) 1336 f |= OCTNET_IFFLAG_ALLMULTI; 1337 1338 if (netdev->flags & IFF_MULTICAST) { 1339 f |= OCTNET_IFFLAG_MULTICAST; 1340 1341 /* Accept all multicast addresses if there are more than we 1342 * can handle 1343 */ 1344 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1345 f |= OCTNET_IFFLAG_ALLMULTI; 1346 } 1347 1348 if (netdev->flags & IFF_BROADCAST) 1349 f |= OCTNET_IFFLAG_BROADCAST; 1350 1351 return f; 1352 } 1353 1354 static void liquidio_set_uc_list(struct net_device *netdev) 1355 { 1356 struct lio *lio = GET_LIO(netdev); 1357 struct octeon_device *oct = lio->oct_dev; 1358 struct octnic_ctrl_pkt nctrl; 1359 struct netdev_hw_addr *ha; 1360 u64 *mac; 1361 1362 if (lio->netdev_uc_count == netdev_uc_count(netdev)) 1363 return; 1364 1365 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { 1366 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n"); 1367 return; 1368 } 1369 1370 lio->netdev_uc_count = netdev_uc_count(netdev); 1371 1372 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1373 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; 1374 nctrl.ncmd.s.more = lio->netdev_uc_count; 1375 nctrl.ncmd.s.param1 = oct->vf_num; 1376 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1377 nctrl.netpndev = (u64)netdev; 1378 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1379 1380 /* copy all the addresses into the udd */ 1381 mac = &nctrl.udd[0]; 1382 netdev_for_each_uc_addr(ha, netdev) { 1383 ether_addr_copy(((u8 *)mac) + 2, ha->addr); 1384 mac++; 1385 } 1386 1387 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1388 } 1389 1390 /** 1391 * \brief Net device set_multicast_list 1392 * @param netdev network device 1393 */ 1394 static void liquidio_set_mcast_list(struct net_device *netdev) 1395 { 1396 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1397 struct lio *lio = GET_LIO(netdev); 1398 struct octeon_device *oct = lio->oct_dev; 1399 struct octnic_ctrl_pkt nctrl; 1400 struct netdev_hw_addr *ha; 1401 u64 *mc; 1402 int ret; 1403 1404 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1405 1406 /* Create a ctrl pkt command to be sent to core app. */ 1407 nctrl.ncmd.u64 = 0; 1408 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1409 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1410 nctrl.ncmd.s.param2 = mc_count; 1411 nctrl.ncmd.s.more = mc_count; 1412 nctrl.netpndev = (u64)netdev; 1413 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1414 1415 /* copy all the addresses into the udd */ 1416 mc = &nctrl.udd[0]; 1417 netdev_for_each_mc_addr(ha, netdev) { 1418 *mc = 0; 1419 ether_addr_copy(((u8 *)mc) + 2, ha->addr); 1420 /* no need to swap bytes */ 1421 if (++mc > &nctrl.udd[mc_count]) 1422 break; 1423 } 1424 1425 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1426 1427 /* Apparently, any activity in this call from the kernel has to 1428 * be atomic. So we won't wait for response. 1429 */ 1430 nctrl.wait_time = 0; 1431 1432 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1433 if (ret < 0) { 1434 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1435 ret); 1436 } 1437 1438 liquidio_set_uc_list(netdev); 1439 } 1440 1441 /** 1442 * \brief Net device set_mac_address 1443 * @param netdev network device 1444 */ 1445 static int liquidio_set_mac(struct net_device *netdev, void *p) 1446 { 1447 struct sockaddr *addr = (struct sockaddr *)p; 1448 struct lio *lio = GET_LIO(netdev); 1449 struct octeon_device *oct = lio->oct_dev; 1450 struct octnic_ctrl_pkt nctrl; 1451 int ret = 0; 1452 1453 if (!is_valid_ether_addr(addr->sa_data)) 1454 return -EADDRNOTAVAIL; 1455 1456 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 1457 return 0; 1458 1459 if (lio->linfo.macaddr_is_admin_asgnd) 1460 return -EPERM; 1461 1462 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1463 1464 nctrl.ncmd.u64 = 0; 1465 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 1466 nctrl.ncmd.s.param1 = 0; 1467 nctrl.ncmd.s.more = 1; 1468 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1469 nctrl.netpndev = (u64)netdev; 1470 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1471 nctrl.wait_time = 100; 1472 1473 nctrl.udd[0] = 0; 1474 /* The MAC Address is presented in network byte order. */ 1475 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data); 1476 1477 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1478 if (ret < 0) { 1479 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 1480 return -ENOMEM; 1481 } 1482 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1483 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); 1484 1485 return 0; 1486 } 1487 1488 /** 1489 * \brief Net device get_stats 1490 * @param netdev network device 1491 */ 1492 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) 1493 { 1494 struct lio *lio = GET_LIO(netdev); 1495 struct net_device_stats *stats = &netdev->stats; 1496 u64 pkts = 0, drop = 0, bytes = 0; 1497 struct oct_droq_stats *oq_stats; 1498 struct oct_iq_stats *iq_stats; 1499 struct octeon_device *oct; 1500 int i, iq_no, oq_no; 1501 1502 oct = lio->oct_dev; 1503 1504 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1505 return stats; 1506 1507 for (i = 0; i < oct->num_iqs; i++) { 1508 iq_no = lio->linfo.txpciq[i].s.q_no; 1509 iq_stats = &oct->instr_queue[iq_no]->stats; 1510 pkts += iq_stats->tx_done; 1511 drop += iq_stats->tx_dropped; 1512 bytes += iq_stats->tx_tot_bytes; 1513 } 1514 1515 stats->tx_packets = pkts; 1516 stats->tx_bytes = bytes; 1517 stats->tx_dropped = drop; 1518 1519 pkts = 0; 1520 drop = 0; 1521 bytes = 0; 1522 1523 for (i = 0; i < oct->num_oqs; i++) { 1524 oq_no = lio->linfo.rxpciq[i].s.q_no; 1525 oq_stats = &oct->droq[oq_no]->stats; 1526 pkts += oq_stats->rx_pkts_received; 1527 drop += (oq_stats->rx_dropped + 1528 oq_stats->dropped_nodispatch + 1529 oq_stats->dropped_toomany + 1530 oq_stats->dropped_nomem); 1531 bytes += oq_stats->rx_bytes_received; 1532 } 1533 1534 stats->rx_bytes = bytes; 1535 stats->rx_packets = pkts; 1536 stats->rx_dropped = drop; 1537 1538 return stats; 1539 } 1540 1541 /** 1542 * \brief Net device change_mtu 1543 * @param netdev network device 1544 */ 1545 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) 1546 { 1547 struct octnic_ctrl_pkt nctrl; 1548 struct octeon_device *oct; 1549 struct lio *lio; 1550 int ret = 0; 1551 1552 lio = GET_LIO(netdev); 1553 oct = lio->oct_dev; 1554 1555 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1556 1557 nctrl.ncmd.u64 = 0; 1558 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; 1559 nctrl.ncmd.s.param1 = new_mtu; 1560 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1561 nctrl.wait_time = LIO_CMD_WAIT_TM; 1562 nctrl.netpndev = (u64)netdev; 1563 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1564 1565 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1566 if (ret < 0) { 1567 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); 1568 return -EIO; 1569 } 1570 1571 lio->mtu = new_mtu; 1572 1573 return 0; 1574 } 1575 1576 /** 1577 * \brief Handler for SIOCSHWTSTAMP ioctl 1578 * @param netdev network device 1579 * @param ifr interface request 1580 * @param cmd command 1581 */ 1582 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 1583 { 1584 struct lio *lio = GET_LIO(netdev); 1585 struct hwtstamp_config conf; 1586 1587 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 1588 return -EFAULT; 1589 1590 if (conf.flags) 1591 return -EINVAL; 1592 1593 switch (conf.tx_type) { 1594 case HWTSTAMP_TX_ON: 1595 case HWTSTAMP_TX_OFF: 1596 break; 1597 default: 1598 return -ERANGE; 1599 } 1600 1601 switch (conf.rx_filter) { 1602 case HWTSTAMP_FILTER_NONE: 1603 break; 1604 case HWTSTAMP_FILTER_ALL: 1605 case HWTSTAMP_FILTER_SOME: 1606 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1607 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1608 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1609 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1610 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1611 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1612 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1613 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1614 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1615 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1616 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1617 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1618 case HWTSTAMP_FILTER_NTP_ALL: 1619 conf.rx_filter = HWTSTAMP_FILTER_ALL; 1620 break; 1621 default: 1622 return -ERANGE; 1623 } 1624 1625 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 1626 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1627 1628 else 1629 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1630 1631 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 1632 } 1633 1634 /** 1635 * \brief ioctl handler 1636 * @param netdev network device 1637 * @param ifr interface request 1638 * @param cmd command 1639 */ 1640 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1641 { 1642 switch (cmd) { 1643 case SIOCSHWTSTAMP: 1644 return hwtstamp_ioctl(netdev, ifr); 1645 default: 1646 return -EOPNOTSUPP; 1647 } 1648 } 1649 1650 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) 1651 { 1652 struct sk_buff *skb = (struct sk_buff *)buf; 1653 struct octnet_buf_free_info *finfo; 1654 struct oct_timestamp_resp *resp; 1655 struct octeon_soft_command *sc; 1656 struct lio *lio; 1657 1658 finfo = (struct octnet_buf_free_info *)skb->cb; 1659 lio = finfo->lio; 1660 sc = finfo->sc; 1661 oct = lio->oct_dev; 1662 resp = (struct oct_timestamp_resp *)sc->virtrptr; 1663 1664 if (status != OCTEON_REQUEST_DONE) { 1665 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 1666 CVM_CAST64(status)); 1667 resp->timestamp = 0; 1668 } 1669 1670 octeon_swap_8B_data(&resp->timestamp, 1); 1671 1672 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 1673 struct skb_shared_hwtstamps ts; 1674 u64 ns = resp->timestamp; 1675 1676 netif_info(lio, tx_done, lio->netdev, 1677 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 1678 skb, (unsigned long long)ns); 1679 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 1680 skb_tstamp_tx(skb, &ts); 1681 } 1682 1683 octeon_free_soft_command(oct, sc); 1684 tx_buffer_free(skb); 1685 } 1686 1687 /* \brief Send a data packet that will be timestamped 1688 * @param oct octeon device 1689 * @param ndata pointer to network data 1690 * @param finfo pointer to private network data 1691 */ 1692 static int send_nic_timestamp_pkt(struct octeon_device *oct, 1693 struct octnic_data_pkt *ndata, 1694 struct octnet_buf_free_info *finfo) 1695 { 1696 struct octeon_soft_command *sc; 1697 int ring_doorbell; 1698 struct lio *lio; 1699 int retval; 1700 u32 len; 1701 1702 lio = finfo->lio; 1703 1704 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 1705 sizeof(struct oct_timestamp_resp)); 1706 finfo->sc = sc; 1707 1708 if (!sc) { 1709 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 1710 return IQ_SEND_FAILED; 1711 } 1712 1713 if (ndata->reqtype == REQTYPE_NORESP_NET) 1714 ndata->reqtype = REQTYPE_RESP_NET; 1715 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 1716 ndata->reqtype = REQTYPE_RESP_NET_SG; 1717 1718 sc->callback = handle_timestamp; 1719 sc->callback_arg = finfo->skb; 1720 sc->iq_no = ndata->q_no; 1721 1722 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; 1723 1724 ring_doorbell = 1; 1725 1726 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 1727 sc, len, ndata->reqtype); 1728 1729 if (retval == IQ_SEND_FAILED) { 1730 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 1731 retval); 1732 octeon_free_soft_command(oct, sc); 1733 } else { 1734 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 1735 } 1736 1737 return retval; 1738 } 1739 1740 /** \brief Transmit networks packets to the Octeon interface 1741 * @param skbuff skbuff struct to be passed to network layer. 1742 * @param netdev pointer to network device 1743 * @returns whether the packet was transmitted to the device okay or not 1744 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 1745 */ 1746 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 1747 { 1748 struct octnet_buf_free_info *finfo; 1749 union octnic_cmd_setup cmdsetup; 1750 struct octnic_data_pkt ndata; 1751 struct octeon_instr_irh *irh; 1752 struct oct_iq_stats *stats; 1753 struct octeon_device *oct; 1754 int q_idx = 0, iq_no = 0; 1755 union tx_info *tx_info; 1756 struct lio *lio; 1757 int status = 0; 1758 u64 dptr = 0; 1759 u32 tag = 0; 1760 int j; 1761 1762 lio = GET_LIO(netdev); 1763 oct = lio->oct_dev; 1764 1765 if (netif_is_multiqueue(netdev)) { 1766 q_idx = skb->queue_mapping; 1767 q_idx = (q_idx % (lio->linfo.num_txpciq)); 1768 tag = q_idx; 1769 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 1770 } else { 1771 iq_no = lio->txq; 1772 } 1773 1774 stats = &oct->instr_queue[iq_no]->stats; 1775 1776 /* Check for all conditions in which the current packet cannot be 1777 * transmitted. 1778 */ 1779 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 1780 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { 1781 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", 1782 lio->linfo.link.s.link_up); 1783 goto lio_xmit_failed; 1784 } 1785 1786 /* Use space in skb->cb to store info used to unmap and 1787 * free the buffers. 1788 */ 1789 finfo = (struct octnet_buf_free_info *)skb->cb; 1790 finfo->lio = lio; 1791 finfo->skb = skb; 1792 finfo->sc = NULL; 1793 1794 /* Prepare the attributes for the data to be passed to OSI. */ 1795 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 1796 1797 ndata.buf = finfo; 1798 1799 ndata.q_no = iq_no; 1800 1801 if (netif_is_multiqueue(netdev)) { 1802 if (octnet_iq_is_full(oct, ndata.q_no)) { 1803 /* defer sending if queue is full */ 1804 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 1805 ndata.q_no); 1806 stats->tx_iq_busy++; 1807 return NETDEV_TX_BUSY; 1808 } 1809 } else { 1810 if (octnet_iq_is_full(oct, lio->txq)) { 1811 /* defer sending if queue is full */ 1812 stats->tx_iq_busy++; 1813 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 1814 ndata.q_no); 1815 return NETDEV_TX_BUSY; 1816 } 1817 } 1818 1819 ndata.datasize = skb->len; 1820 1821 cmdsetup.u64 = 0; 1822 cmdsetup.s.iq_no = iq_no; 1823 1824 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1825 if (skb->encapsulation) { 1826 cmdsetup.s.tnl_csum = 1; 1827 stats->tx_vxlan++; 1828 } else { 1829 cmdsetup.s.transport_csum = 1; 1830 } 1831 } 1832 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1833 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1834 cmdsetup.s.timestamp = 1; 1835 } 1836 1837 if (!skb_shinfo(skb)->nr_frags) { 1838 cmdsetup.s.u.datasize = skb->len; 1839 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1840 /* Offload checksum calculation for TCP/UDP packets */ 1841 dptr = dma_map_single(&oct->pci_dev->dev, 1842 skb->data, 1843 skb->len, 1844 DMA_TO_DEVICE); 1845 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 1846 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 1847 __func__); 1848 return NETDEV_TX_BUSY; 1849 } 1850 1851 ndata.cmd.cmd3.dptr = dptr; 1852 finfo->dptr = dptr; 1853 ndata.reqtype = REQTYPE_NORESP_NET; 1854 1855 } else { 1856 struct skb_frag_struct *frag; 1857 struct octnic_gather *g; 1858 int i, frags; 1859 1860 spin_lock(&lio->glist_lock[q_idx]); 1861 g = (struct octnic_gather *)list_delete_head( 1862 &lio->glist[q_idx]); 1863 spin_unlock(&lio->glist_lock[q_idx]); 1864 1865 if (!g) { 1866 netif_info(lio, tx_err, lio->netdev, 1867 "Transmit scatter gather: glist null!\n"); 1868 goto lio_xmit_failed; 1869 } 1870 1871 cmdsetup.s.gather = 1; 1872 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 1873 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1874 1875 memset(g->sg, 0, g->sg_size); 1876 1877 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 1878 skb->data, 1879 (skb->len - skb->data_len), 1880 DMA_TO_DEVICE); 1881 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 1882 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 1883 __func__); 1884 return NETDEV_TX_BUSY; 1885 } 1886 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 1887 1888 frags = skb_shinfo(skb)->nr_frags; 1889 i = 1; 1890 while (frags--) { 1891 frag = &skb_shinfo(skb)->frags[i - 1]; 1892 1893 g->sg[(i >> 2)].ptr[(i & 3)] = 1894 dma_map_page(&oct->pci_dev->dev, 1895 frag->page.p, 1896 frag->page_offset, 1897 frag->size, 1898 DMA_TO_DEVICE); 1899 if (dma_mapping_error(&oct->pci_dev->dev, 1900 g->sg[i >> 2].ptr[i & 3])) { 1901 dma_unmap_single(&oct->pci_dev->dev, 1902 g->sg[0].ptr[0], 1903 skb->len - skb->data_len, 1904 DMA_TO_DEVICE); 1905 for (j = 1; j < i; j++) { 1906 frag = &skb_shinfo(skb)->frags[j - 1]; 1907 dma_unmap_page(&oct->pci_dev->dev, 1908 g->sg[j >> 2].ptr[j & 3], 1909 frag->size, 1910 DMA_TO_DEVICE); 1911 } 1912 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 1913 __func__); 1914 return NETDEV_TX_BUSY; 1915 } 1916 1917 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); 1918 i++; 1919 } 1920 1921 dptr = g->sg_dma_ptr; 1922 1923 ndata.cmd.cmd3.dptr = dptr; 1924 finfo->dptr = dptr; 1925 finfo->g = g; 1926 1927 ndata.reqtype = REQTYPE_NORESP_NET_SG; 1928 } 1929 1930 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 1931 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 1932 1933 if (skb_shinfo(skb)->gso_size) { 1934 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 1935 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 1936 } 1937 1938 /* HW insert VLAN tag */ 1939 if (skb_vlan_tag_present(skb)) { 1940 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; 1941 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; 1942 } 1943 1944 if (unlikely(cmdsetup.s.timestamp)) 1945 status = send_nic_timestamp_pkt(oct, &ndata, finfo); 1946 else 1947 status = octnet_send_nic_data_pkt(oct, &ndata); 1948 if (status == IQ_SEND_FAILED) 1949 goto lio_xmit_failed; 1950 1951 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 1952 1953 if (status == IQ_SEND_STOP) { 1954 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", 1955 iq_no); 1956 stop_q(lio->netdev, q_idx); 1957 } 1958 1959 netif_trans_update(netdev); 1960 1961 if (tx_info->s.gso_segs) 1962 stats->tx_done += tx_info->s.gso_segs; 1963 else 1964 stats->tx_done++; 1965 stats->tx_tot_bytes += ndata.datasize; 1966 1967 return NETDEV_TX_OK; 1968 1969 lio_xmit_failed: 1970 stats->tx_dropped++; 1971 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 1972 iq_no, stats->tx_dropped); 1973 if (dptr) 1974 dma_unmap_single(&oct->pci_dev->dev, dptr, 1975 ndata.datasize, DMA_TO_DEVICE); 1976 tx_buffer_free(skb); 1977 return NETDEV_TX_OK; 1978 } 1979 1980 /** \brief Network device Tx timeout 1981 * @param netdev pointer to network device 1982 */ 1983 static void liquidio_tx_timeout(struct net_device *netdev) 1984 { 1985 struct lio *lio; 1986 1987 lio = GET_LIO(netdev); 1988 1989 netif_info(lio, tx_err, lio->netdev, 1990 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 1991 netdev->stats.tx_dropped); 1992 netif_trans_update(netdev); 1993 txqs_wake(netdev); 1994 } 1995 1996 static int 1997 liquidio_vlan_rx_add_vid(struct net_device *netdev, 1998 __be16 proto __attribute__((unused)), u16 vid) 1999 { 2000 struct lio *lio = GET_LIO(netdev); 2001 struct octeon_device *oct = lio->oct_dev; 2002 struct octnic_ctrl_pkt nctrl; 2003 struct completion compl; 2004 u16 response_code; 2005 int ret = 0; 2006 2007 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2008 2009 nctrl.ncmd.u64 = 0; 2010 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2011 nctrl.ncmd.s.param1 = vid; 2012 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2013 nctrl.wait_time = 100; 2014 nctrl.netpndev = (u64)netdev; 2015 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2016 init_completion(&compl); 2017 nctrl.completion = &compl; 2018 nctrl.response_code = &response_code; 2019 2020 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2021 if (ret < 0) { 2022 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2023 ret); 2024 return -EIO; 2025 } 2026 2027 if (!wait_for_completion_timeout(&compl, 2028 msecs_to_jiffies(nctrl.wait_time))) 2029 return -EPERM; 2030 2031 if (READ_ONCE(response_code)) 2032 return -EPERM; 2033 2034 return 0; 2035 } 2036 2037 static int 2038 liquidio_vlan_rx_kill_vid(struct net_device *netdev, 2039 __be16 proto __attribute__((unused)), u16 vid) 2040 { 2041 struct lio *lio = GET_LIO(netdev); 2042 struct octeon_device *oct = lio->oct_dev; 2043 struct octnic_ctrl_pkt nctrl; 2044 int ret = 0; 2045 2046 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2047 2048 nctrl.ncmd.u64 = 0; 2049 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2050 nctrl.ncmd.s.param1 = vid; 2051 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2052 nctrl.wait_time = 100; 2053 nctrl.netpndev = (u64)netdev; 2054 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2055 2056 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2057 if (ret < 0) { 2058 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2059 ret); 2060 } 2061 return ret; 2062 } 2063 2064 /** Sending command to enable/disable RX checksum offload 2065 * @param netdev pointer to network device 2066 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 2067 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 2068 * OCTNET_CMD_RXCSUM_DISABLE 2069 * @returns SUCCESS or FAILURE 2070 */ 2071 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 2072 u8 rx_cmd) 2073 { 2074 struct lio *lio = GET_LIO(netdev); 2075 struct octeon_device *oct = lio->oct_dev; 2076 struct octnic_ctrl_pkt nctrl; 2077 int ret = 0; 2078 2079 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2080 2081 nctrl.ncmd.u64 = 0; 2082 nctrl.ncmd.s.cmd = command; 2083 nctrl.ncmd.s.param1 = rx_cmd; 2084 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2085 nctrl.wait_time = 100; 2086 nctrl.netpndev = (u64)netdev; 2087 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2088 2089 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2090 if (ret < 0) { 2091 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", 2092 ret); 2093 } 2094 return ret; 2095 } 2096 2097 /** Sending command to add/delete VxLAN UDP port to firmware 2098 * @param netdev pointer to network device 2099 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 2100 * @param vxlan_port VxLAN port to be added or deleted 2101 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 2102 * OCTNET_CMD_VXLAN_PORT_DEL 2103 * @returns SUCCESS or FAILURE 2104 */ 2105 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 2106 u16 vxlan_port, u8 vxlan_cmd_bit) 2107 { 2108 struct lio *lio = GET_LIO(netdev); 2109 struct octeon_device *oct = lio->oct_dev; 2110 struct octnic_ctrl_pkt nctrl; 2111 int ret = 0; 2112 2113 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2114 2115 nctrl.ncmd.u64 = 0; 2116 nctrl.ncmd.s.cmd = command; 2117 nctrl.ncmd.s.more = vxlan_cmd_bit; 2118 nctrl.ncmd.s.param1 = vxlan_port; 2119 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2120 nctrl.wait_time = 100; 2121 nctrl.netpndev = (u64)netdev; 2122 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2123 2124 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2125 if (ret < 0) { 2126 dev_err(&oct->pci_dev->dev, 2127 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", 2128 ret); 2129 } 2130 return ret; 2131 } 2132 2133 /** \brief Net device fix features 2134 * @param netdev pointer to network device 2135 * @param request features requested 2136 * @returns updated features list 2137 */ 2138 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 2139 netdev_features_t request) 2140 { 2141 struct lio *lio = netdev_priv(netdev); 2142 2143 if ((request & NETIF_F_RXCSUM) && 2144 !(lio->dev_capability & NETIF_F_RXCSUM)) 2145 request &= ~NETIF_F_RXCSUM; 2146 2147 if ((request & NETIF_F_HW_CSUM) && 2148 !(lio->dev_capability & NETIF_F_HW_CSUM)) 2149 request &= ~NETIF_F_HW_CSUM; 2150 2151 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 2152 request &= ~NETIF_F_TSO; 2153 2154 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 2155 request &= ~NETIF_F_TSO6; 2156 2157 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 2158 request &= ~NETIF_F_LRO; 2159 2160 /* Disable LRO if RXCSUM is off */ 2161 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 2162 (lio->dev_capability & NETIF_F_LRO)) 2163 request &= ~NETIF_F_LRO; 2164 2165 return request; 2166 } 2167 2168 /** \brief Net device set features 2169 * @param netdev pointer to network device 2170 * @param features features to enable/disable 2171 */ 2172 static int liquidio_set_features(struct net_device *netdev, 2173 netdev_features_t features) 2174 { 2175 struct lio *lio = netdev_priv(netdev); 2176 2177 if (!((netdev->features ^ features) & NETIF_F_LRO)) 2178 return 0; 2179 2180 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 2181 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2182 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2183 else if (!(features & NETIF_F_LRO) && 2184 (lio->dev_capability & NETIF_F_LRO)) 2185 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 2186 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2187 if (!(netdev->features & NETIF_F_RXCSUM) && 2188 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2189 (features & NETIF_F_RXCSUM)) 2190 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2191 OCTNET_CMD_RXCSUM_ENABLE); 2192 else if ((netdev->features & NETIF_F_RXCSUM) && 2193 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2194 !(features & NETIF_F_RXCSUM)) 2195 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2196 OCTNET_CMD_RXCSUM_DISABLE); 2197 2198 return 0; 2199 } 2200 2201 static void liquidio_add_vxlan_port(struct net_device *netdev, 2202 struct udp_tunnel_info *ti) 2203 { 2204 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2205 return; 2206 2207 liquidio_vxlan_port_command(netdev, 2208 OCTNET_CMD_VXLAN_PORT_CONFIG, 2209 htons(ti->port), 2210 OCTNET_CMD_VXLAN_PORT_ADD); 2211 } 2212 2213 static void liquidio_del_vxlan_port(struct net_device *netdev, 2214 struct udp_tunnel_info *ti) 2215 { 2216 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2217 return; 2218 2219 liquidio_vxlan_port_command(netdev, 2220 OCTNET_CMD_VXLAN_PORT_CONFIG, 2221 htons(ti->port), 2222 OCTNET_CMD_VXLAN_PORT_DEL); 2223 } 2224 2225 static const struct net_device_ops lionetdevops = { 2226 .ndo_open = liquidio_open, 2227 .ndo_stop = liquidio_stop, 2228 .ndo_start_xmit = liquidio_xmit, 2229 .ndo_get_stats = liquidio_get_stats, 2230 .ndo_set_mac_address = liquidio_set_mac, 2231 .ndo_set_rx_mode = liquidio_set_mcast_list, 2232 .ndo_tx_timeout = liquidio_tx_timeout, 2233 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 2234 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 2235 .ndo_change_mtu = liquidio_change_mtu, 2236 .ndo_do_ioctl = liquidio_ioctl, 2237 .ndo_fix_features = liquidio_fix_features, 2238 .ndo_set_features = liquidio_set_features, 2239 .ndo_udp_tunnel_add = liquidio_add_vxlan_port, 2240 .ndo_udp_tunnel_del = liquidio_del_vxlan_port, 2241 }; 2242 2243 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 2244 { 2245 struct octeon_device *oct = (struct octeon_device *)buf; 2246 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 2247 union oct_link_status *ls; 2248 int gmxport = 0; 2249 int i; 2250 2251 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 2252 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 2253 recv_pkt->buffer_size[0], 2254 recv_pkt->rh.r_nic_info.gmxport); 2255 goto nic_info_err; 2256 } 2257 2258 gmxport = recv_pkt->rh.r_nic_info.gmxport; 2259 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 2260 OCT_DROQ_INFO_SIZE); 2261 2262 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 2263 2264 for (i = 0; i < oct->ifcount; i++) { 2265 if (oct->props[i].gmxport == gmxport) { 2266 update_link_status(oct->props[i].netdev, ls); 2267 break; 2268 } 2269 } 2270 2271 nic_info_err: 2272 for (i = 0; i < recv_pkt->buffer_count; i++) 2273 recv_buffer_free(recv_pkt->buffer_ptr[i]); 2274 octeon_free_recv_info(recv_info); 2275 return 0; 2276 } 2277 2278 /** 2279 * \brief Setup network interfaces 2280 * @param octeon_dev octeon device 2281 * 2282 * Called during init time for each device. It assumes the NIC 2283 * is already up and running. The link information for each 2284 * interface is passed in link_info. 2285 */ 2286 static int setup_nic_devices(struct octeon_device *octeon_dev) 2287 { 2288 int retval, num_iqueues, num_oqueues; 2289 struct liquidio_if_cfg_context *ctx; 2290 u32 resp_size, ctx_size, data_size; 2291 struct liquidio_if_cfg_resp *resp; 2292 struct octeon_soft_command *sc; 2293 union oct_nic_if_cfg if_cfg; 2294 struct octdev_props *props; 2295 struct net_device *netdev; 2296 struct lio_version *vdata; 2297 struct lio *lio = NULL; 2298 u8 mac[ETH_ALEN], i, j; 2299 u32 ifidx_or_pfnum; 2300 2301 ifidx_or_pfnum = octeon_dev->pf_num; 2302 2303 /* This is to handle link status changes */ 2304 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, 2305 lio_nic_info, octeon_dev); 2306 2307 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 2308 * They are handled directly. 2309 */ 2310 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 2311 free_netbuf); 2312 2313 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 2314 free_netsgbuf); 2315 2316 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 2317 free_netsgbuf_with_resp); 2318 2319 for (i = 0; i < octeon_dev->ifcount; i++) { 2320 resp_size = sizeof(struct liquidio_if_cfg_resp); 2321 ctx_size = sizeof(struct liquidio_if_cfg_context); 2322 data_size = sizeof(struct lio_version); 2323 sc = (struct octeon_soft_command *) 2324 octeon_alloc_soft_command(octeon_dev, data_size, 2325 resp_size, ctx_size); 2326 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 2327 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 2328 vdata = (struct lio_version *)sc->virtdptr; 2329 2330 *((u64 *)vdata) = 0; 2331 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 2332 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 2333 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 2334 2335 WRITE_ONCE(ctx->cond, 0); 2336 ctx->octeon_id = lio_get_device_id(octeon_dev); 2337 init_waitqueue_head(&ctx->wc); 2338 2339 if_cfg.u64 = 0; 2340 2341 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; 2342 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; 2343 if_cfg.s.base_queue = 0; 2344 2345 sc->iq_no = 0; 2346 2347 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 2348 OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 2349 0); 2350 2351 sc->callback = if_cfg_callback; 2352 sc->callback_arg = sc; 2353 sc->wait_time = 5000; 2354 2355 retval = octeon_send_soft_command(octeon_dev, sc); 2356 if (retval == IQ_SEND_FAILED) { 2357 dev_err(&octeon_dev->pci_dev->dev, 2358 "iq/oq config failed status: %x\n", retval); 2359 /* Soft instr is freed by driver in case of failure. */ 2360 goto setup_nic_dev_fail; 2361 } 2362 2363 /* Sleep on a wait queue till the cond flag indicates that the 2364 * response arrived or timed-out. 2365 */ 2366 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { 2367 dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); 2368 goto setup_nic_wait_intr; 2369 } 2370 2371 retval = resp->status; 2372 if (retval) { 2373 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 2374 goto setup_nic_dev_fail; 2375 } 2376 2377 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 2378 (sizeof(struct liquidio_if_cfg_info)) >> 3); 2379 2380 num_iqueues = hweight64(resp->cfg_info.iqmask); 2381 num_oqueues = hweight64(resp->cfg_info.oqmask); 2382 2383 if (!(num_iqueues) || !(num_oqueues)) { 2384 dev_err(&octeon_dev->pci_dev->dev, 2385 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 2386 resp->cfg_info.iqmask, resp->cfg_info.oqmask); 2387 goto setup_nic_dev_fail; 2388 } 2389 dev_dbg(&octeon_dev->pci_dev->dev, 2390 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 2391 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 2392 num_iqueues, num_oqueues); 2393 2394 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 2395 2396 if (!netdev) { 2397 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 2398 goto setup_nic_dev_fail; 2399 } 2400 2401 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 2402 2403 /* Associate the routines that will handle different 2404 * netdev tasks. 2405 */ 2406 netdev->netdev_ops = &lionetdevops; 2407 2408 lio = GET_LIO(netdev); 2409 2410 memset(lio, 0, sizeof(struct lio)); 2411 2412 lio->ifidx = ifidx_or_pfnum; 2413 2414 props = &octeon_dev->props[i]; 2415 props->gmxport = resp->cfg_info.linfo.gmxport; 2416 props->netdev = netdev; 2417 2418 lio->linfo.num_rxpciq = num_oqueues; 2419 lio->linfo.num_txpciq = num_iqueues; 2420 2421 for (j = 0; j < num_oqueues; j++) { 2422 lio->linfo.rxpciq[j].u64 = 2423 resp->cfg_info.linfo.rxpciq[j].u64; 2424 } 2425 for (j = 0; j < num_iqueues; j++) { 2426 lio->linfo.txpciq[j].u64 = 2427 resp->cfg_info.linfo.txpciq[j].u64; 2428 } 2429 2430 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 2431 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 2432 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 2433 lio->linfo.macaddr_is_admin_asgnd = 2434 resp->cfg_info.linfo.macaddr_is_admin_asgnd; 2435 2436 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 2437 2438 lio->dev_capability = NETIF_F_HIGHDMA 2439 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2440 | NETIF_F_SG | NETIF_F_RXCSUM 2441 | NETIF_F_TSO | NETIF_F_TSO6 2442 | NETIF_F_GRO 2443 | NETIF_F_LRO; 2444 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 2445 2446 /* Copy of transmit encapsulation capabilities: 2447 * TSO, TSO6, Checksums for this device 2448 */ 2449 lio->enc_dev_capability = NETIF_F_IP_CSUM 2450 | NETIF_F_IPV6_CSUM 2451 | NETIF_F_GSO_UDP_TUNNEL 2452 | NETIF_F_HW_CSUM | NETIF_F_SG 2453 | NETIF_F_RXCSUM 2454 | NETIF_F_TSO | NETIF_F_TSO6 2455 | NETIF_F_LRO; 2456 2457 netdev->hw_enc_features = 2458 (lio->enc_dev_capability & ~NETIF_F_LRO); 2459 netdev->vlan_features = lio->dev_capability; 2460 /* Add any unchangeable hw features */ 2461 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 2462 NETIF_F_HW_VLAN_CTAG_RX | 2463 NETIF_F_HW_VLAN_CTAG_TX; 2464 2465 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 2466 2467 netdev->hw_features = lio->dev_capability; 2468 2469 /* MTU range: 68 - 16000 */ 2470 netdev->min_mtu = LIO_MIN_MTU_SIZE; 2471 netdev->max_mtu = LIO_MAX_MTU_SIZE; 2472 2473 /* Point to the properties for octeon device to which this 2474 * interface belongs. 2475 */ 2476 lio->oct_dev = octeon_dev; 2477 lio->octprops = props; 2478 lio->netdev = netdev; 2479 2480 dev_dbg(&octeon_dev->pci_dev->dev, 2481 "if%d gmx: %d hw_addr: 0x%llx\n", i, 2482 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 2483 2484 /* 64-bit swap required on LE machines */ 2485 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 2486 for (j = 0; j < ETH_ALEN; j++) 2487 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 2488 2489 /* Copy MAC Address to OS network device structure */ 2490 ether_addr_copy(netdev->dev_addr, mac); 2491 2492 if (liquidio_setup_io_queues(octeon_dev, i, 2493 lio->linfo.num_txpciq, 2494 lio->linfo.num_rxpciq)) { 2495 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 2496 goto setup_nic_dev_fail; 2497 } 2498 2499 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 2500 2501 /* For VFs, enable Octeon device interrupts here, 2502 * as this is contingent upon IO queue setup 2503 */ 2504 octeon_dev->fn_list.enable_interrupt(octeon_dev, 2505 OCTEON_ALL_INTR); 2506 2507 /* By default all interfaces on a single Octeon uses the same 2508 * tx and rx queues 2509 */ 2510 lio->txq = lio->linfo.txpciq[0].s.q_no; 2511 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 2512 2513 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 2514 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 2515 2516 if (setup_glists(lio, num_iqueues)) { 2517 dev_err(&octeon_dev->pci_dev->dev, 2518 "Gather list allocation failed\n"); 2519 goto setup_nic_dev_fail; 2520 } 2521 2522 /* Register ethtool support */ 2523 liquidio_set_ethtool_ops(netdev); 2524 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) 2525 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 2526 else 2527 octeon_dev->priv_flags = 0x0; 2528 2529 if (netdev->features & NETIF_F_LRO) 2530 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2531 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2532 2533 if (setup_link_status_change_wq(netdev)) 2534 goto setup_nic_dev_fail; 2535 2536 if (setup_rx_oom_poll_fn(netdev)) 2537 goto setup_nic_dev_fail; 2538 2539 /* Register the network device with the OS */ 2540 if (register_netdev(netdev)) { 2541 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 2542 goto setup_nic_dev_fail; 2543 } 2544 2545 dev_dbg(&octeon_dev->pci_dev->dev, 2546 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 2547 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 2548 netif_carrier_off(netdev); 2549 lio->link_changes++; 2550 2551 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 2552 2553 /* Sending command to firmware to enable Rx checksum offload 2554 * by default at the time of setup of Liquidio driver for 2555 * this device 2556 */ 2557 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2558 OCTNET_CMD_RXCSUM_ENABLE); 2559 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 2560 OCTNET_CMD_TXCSUM_ENABLE); 2561 2562 dev_dbg(&octeon_dev->pci_dev->dev, 2563 "NIC ifidx:%d Setup successful\n", i); 2564 2565 octeon_free_soft_command(octeon_dev, sc); 2566 } 2567 2568 return 0; 2569 2570 setup_nic_dev_fail: 2571 2572 octeon_free_soft_command(octeon_dev, sc); 2573 2574 setup_nic_wait_intr: 2575 2576 while (i--) { 2577 dev_err(&octeon_dev->pci_dev->dev, 2578 "NIC ifidx:%d Setup failed\n", i); 2579 liquidio_destroy_nic_device(octeon_dev, i); 2580 } 2581 return -ENODEV; 2582 } 2583 2584 /** 2585 * \brief initialize the NIC 2586 * @param oct octeon device 2587 * 2588 * This initialization routine is called once the Octeon device application is 2589 * up and running 2590 */ 2591 static int liquidio_init_nic_module(struct octeon_device *oct) 2592 { 2593 int num_nic_ports = 1; 2594 int i, retval = 0; 2595 2596 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 2597 2598 /* only default iq and oq were initialized 2599 * initialize the rest as well run port_config command for each port 2600 */ 2601 oct->ifcount = num_nic_ports; 2602 memset(oct->props, 0, 2603 sizeof(struct octdev_props) * num_nic_ports); 2604 2605 for (i = 0; i < MAX_OCTEON_LINKS; i++) 2606 oct->props[i].gmxport = -1; 2607 2608 retval = setup_nic_devices(oct); 2609 if (retval) { 2610 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 2611 goto octnet_init_failure; 2612 } 2613 2614 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 2615 2616 return retval; 2617 2618 octnet_init_failure: 2619 2620 oct->ifcount = 0; 2621 2622 return retval; 2623 } 2624 2625 /** 2626 * \brief Device initialization for each Octeon device that is probed 2627 * @param octeon_dev octeon device 2628 */ 2629 static int octeon_device_init(struct octeon_device *oct) 2630 { 2631 u32 rev_id; 2632 int j; 2633 2634 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE); 2635 2636 /* Enable access to the octeon device and make its DMA capability 2637 * known to the OS. 2638 */ 2639 if (octeon_pci_os_setup(oct)) 2640 return 1; 2641 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE); 2642 2643 oct->chip_id = OCTEON_CN23XX_VF_VID; 2644 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 2645 oct->rev_id = rev_id & 0xff; 2646 2647 if (cn23xx_setup_octeon_vf_device(oct)) 2648 return 1; 2649 2650 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE); 2651 2652 oct->app_mode = CVM_DRV_NIC_APP; 2653 2654 /* Initialize the dispatch mechanism used to push packets arriving on 2655 * Octeon Output queues. 2656 */ 2657 if (octeon_init_dispatch_list(oct)) 2658 return 1; 2659 2660 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE); 2661 2662 if (octeon_set_io_queues_off(oct)) { 2663 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); 2664 return 1; 2665 } 2666 2667 if (oct->fn_list.setup_device_regs(oct)) { 2668 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n"); 2669 return 1; 2670 } 2671 2672 /* Initialize soft command buffer pool */ 2673 if (octeon_setup_sc_buffer_pool(oct)) { 2674 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); 2675 return 1; 2676 } 2677 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 2678 2679 /* Setup the data structures that manage this Octeon's Input queues. */ 2680 if (octeon_setup_instr_queues(oct)) { 2681 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); 2682 return 1; 2683 } 2684 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 2685 2686 /* Initialize lists to manage the requests of different types that 2687 * arrive from user & kernel applications for this octeon device. 2688 */ 2689 if (octeon_setup_response_list(oct)) { 2690 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); 2691 return 1; 2692 } 2693 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE); 2694 2695 if (octeon_setup_output_queues(oct)) { 2696 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n"); 2697 return 1; 2698 } 2699 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE); 2700 2701 if (oct->fn_list.setup_mbox(oct)) { 2702 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 2703 return 1; 2704 } 2705 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); 2706 2707 if (octeon_allocate_ioq_vector(oct)) { 2708 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); 2709 return 1; 2710 } 2711 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 2712 2713 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n", 2714 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf); 2715 2716 /* Setup the interrupt handler and record the INT SUM register address*/ 2717 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) 2718 return 1; 2719 2720 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); 2721 2722 /* *************************************************************** 2723 * The interrupts need to be enabled for the PF<-->VF handshake. 2724 * They are [re]-enabled after the PF<-->VF handshake so that the 2725 * correct OQ tick value is used (i.e. the value retrieved from 2726 * the PF as part of the handshake). 2727 */ 2728 2729 /* Enable Octeon device interrupts */ 2730 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2731 2732 if (cn23xx_octeon_pfvf_handshake(oct)) 2733 return 1; 2734 2735 /* Here we [re]-enable the interrupts so that the correct OQ tick value 2736 * is used (i.e. the value that was retrieved during the handshake) 2737 */ 2738 2739 /* Enable Octeon device interrupts */ 2740 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2741 /* *************************************************************** */ 2742 2743 /* Enable the input and output queues for this Octeon device */ 2744 if (oct->fn_list.enable_io_queues(oct)) { 2745 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); 2746 return 1; 2747 } 2748 2749 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE); 2750 2751 atomic_set(&oct->status, OCT_DEV_HOST_OK); 2752 2753 /* Send Credit for Octeon Output queues. Credits are always sent after 2754 * the output queue is enabled. 2755 */ 2756 for (j = 0; j < oct->num_oqs; j++) 2757 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); 2758 2759 /* Packets can start arriving on the output queues from this point. */ 2760 2761 atomic_set(&oct->status, OCT_DEV_CORE_OK); 2762 2763 atomic_set(&oct->status, OCT_DEV_RUNNING); 2764 2765 if (liquidio_init_nic_module(oct)) 2766 return 1; 2767 2768 return 0; 2769 } 2770 2771 static int __init liquidio_vf_init(void) 2772 { 2773 octeon_init_device_list(0); 2774 return pci_register_driver(&liquidio_vf_pci_driver); 2775 } 2776 2777 static void __exit liquidio_vf_exit(void) 2778 { 2779 pci_unregister_driver(&liquidio_vf_pci_driver); 2780 2781 pr_info("LiquidIO_VF network module is now unloaded\n"); 2782 } 2783 2784 module_init(liquidio_vf_init); 2785 module_exit(liquidio_vf_exit); 2786