1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/pci.h> 19 #include <linux/firmware.h> 20 #include <net/vxlan.h> 21 #include <linux/kthread.h> 22 #include "liquidio_common.h" 23 #include "octeon_droq.h" 24 #include "octeon_iq.h" 25 #include "response_manager.h" 26 #include "octeon_device.h" 27 #include "octeon_nic.h" 28 #include "octeon_main.h" 29 #include "octeon_network.h" 30 #include "cn66xx_regs.h" 31 #include "cn66xx_device.h" 32 #include "cn68xx_device.h" 33 #include "cn23xx_pf_device.h" 34 #include "liquidio_image.h" 35 36 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 37 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); 38 MODULE_LICENSE("GPL"); 39 MODULE_VERSION(LIQUIDIO_VERSION); 40 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX); 41 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX); 42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX); 43 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME LIO_FW_NAME_SUFFIX); 44 45 static int ddr_timeout = 10000; 46 module_param(ddr_timeout, int, 0644); 47 MODULE_PARM_DESC(ddr_timeout, 48 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check"); 49 50 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 51 52 static int debug = -1; 53 module_param(debug, int, 0644); 54 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 55 56 static char fw_type[LIO_MAX_FW_TYPE_LEN]; 57 module_param_string(fw_type, fw_type, sizeof(fw_type), 0000); 58 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\""); 59 60 static int ptp_enable = 1; 61 62 /* Bit mask values for lio->ifstate */ 63 #define LIO_IFSTATE_DROQ_OPS 0x01 64 #define LIO_IFSTATE_REGISTERED 0x02 65 #define LIO_IFSTATE_RUNNING 0x04 66 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 67 68 /* Polling interval for determining when NIC application is alive */ 69 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 70 71 /* runtime link query interval */ 72 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 73 74 struct liquidio_if_cfg_context { 75 int octeon_id; 76 77 wait_queue_head_t wc; 78 79 int cond; 80 }; 81 82 struct liquidio_if_cfg_resp { 83 u64 rh; 84 struct liquidio_if_cfg_info cfg_info; 85 u64 status; 86 }; 87 88 struct liquidio_rx_ctl_context { 89 int octeon_id; 90 91 wait_queue_head_t wc; 92 93 int cond; 94 }; 95 96 struct oct_link_status_resp { 97 u64 rh; 98 struct oct_link_info link_info; 99 u64 status; 100 }; 101 102 struct oct_timestamp_resp { 103 u64 rh; 104 u64 timestamp; 105 u64 status; 106 }; 107 108 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) 109 110 union tx_info { 111 u64 u64; 112 struct { 113 #ifdef __BIG_ENDIAN_BITFIELD 114 u16 gso_size; 115 u16 gso_segs; 116 u32 reserved; 117 #else 118 u32 reserved; 119 u16 gso_segs; 120 u16 gso_size; 121 #endif 122 } s; 123 }; 124 125 /** Octeon device properties to be used by the NIC module. 126 * Each octeon device in the system will be represented 127 * by this structure in the NIC module. 128 */ 129 130 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS) 131 132 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 133 #define OCTNIC_GSO_MAX_SIZE \ 134 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 135 136 /** Structure of a node in list of gather components maintained by 137 * NIC driver for each network device. 138 */ 139 struct octnic_gather { 140 /** List manipulation. Next and prev pointers. */ 141 struct list_head list; 142 143 /** Size of the gather component at sg in bytes. */ 144 int sg_size; 145 146 /** Number of bytes that sg was adjusted to make it 8B-aligned. */ 147 int adjust; 148 149 /** Gather component that can accommodate max sized fragment list 150 * received from the IP layer. 151 */ 152 struct octeon_sg_entry *sg; 153 154 u64 sg_dma_ptr; 155 }; 156 157 struct handshake { 158 struct completion init; 159 struct completion started; 160 struct pci_dev *pci_dev; 161 int init_ok; 162 int started_ok; 163 }; 164 165 struct octeon_device_priv { 166 /** Tasklet structures for this device. */ 167 struct tasklet_struct droq_tasklet; 168 unsigned long napi_mask; 169 }; 170 171 #ifdef CONFIG_PCI_IOV 172 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); 173 #endif 174 175 static int octeon_device_init(struct octeon_device *); 176 static int liquidio_stop(struct net_device *netdev); 177 static void liquidio_remove(struct pci_dev *pdev); 178 static int liquidio_probe(struct pci_dev *pdev, 179 const struct pci_device_id *ent); 180 181 static struct handshake handshake[MAX_OCTEON_DEVICES]; 182 static struct completion first_stage; 183 184 static void octeon_droq_bh(unsigned long pdev) 185 { 186 int q_no; 187 int reschedule = 0; 188 struct octeon_device *oct = (struct octeon_device *)pdev; 189 struct octeon_device_priv *oct_priv = 190 (struct octeon_device_priv *)oct->priv; 191 192 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { 193 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) 194 continue; 195 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], 196 MAX_PACKET_BUDGET); 197 lio_enable_irq(oct->droq[q_no], NULL); 198 199 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { 200 /* set time and cnt interrupt thresholds for this DROQ 201 * for NAPI 202 */ 203 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; 204 205 octeon_write_csr64( 206 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no), 207 0x5700000040ULL); 208 octeon_write_csr64( 209 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0); 210 } 211 } 212 213 if (reschedule) 214 tasklet_schedule(&oct_priv->droq_tasklet); 215 } 216 217 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 218 { 219 struct octeon_device_priv *oct_priv = 220 (struct octeon_device_priv *)oct->priv; 221 int retry = 100, pkt_cnt = 0, pending_pkts = 0; 222 int i; 223 224 do { 225 pending_pkts = 0; 226 227 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 228 if (!(oct->io_qmask.oq & BIT_ULL(i))) 229 continue; 230 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 231 } 232 if (pkt_cnt > 0) { 233 pending_pkts += pkt_cnt; 234 tasklet_schedule(&oct_priv->droq_tasklet); 235 } 236 pkt_cnt = 0; 237 schedule_timeout_uninterruptible(1); 238 239 } while (retry-- && pending_pkts); 240 241 return pkt_cnt; 242 } 243 244 /** 245 * \brief Forces all IO queues off on a given device 246 * @param oct Pointer to Octeon device 247 */ 248 static void force_io_queues_off(struct octeon_device *oct) 249 { 250 if ((oct->chip_id == OCTEON_CN66XX) || 251 (oct->chip_id == OCTEON_CN68XX)) { 252 /* Reset the Enable bits for Input Queues. */ 253 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); 254 255 /* Reset the Enable bits for Output Queues. */ 256 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); 257 } 258 } 259 260 /** 261 * \brief wait for all pending requests to complete 262 * @param oct Pointer to Octeon device 263 * 264 * Called during shutdown sequence 265 */ 266 static int wait_for_pending_requests(struct octeon_device *oct) 267 { 268 int i, pcount = 0; 269 270 for (i = 0; i < 100; i++) { 271 pcount = 272 atomic_read(&oct->response_list 273 [OCTEON_ORDERED_SC_LIST].pending_req_count); 274 if (pcount) 275 schedule_timeout_uninterruptible(HZ / 10); 276 else 277 break; 278 } 279 280 if (pcount) 281 return 1; 282 283 return 0; 284 } 285 286 /** 287 * \brief Cause device to go quiet so it can be safely removed/reset/etc 288 * @param oct Pointer to Octeon device 289 */ 290 static inline void pcierror_quiesce_device(struct octeon_device *oct) 291 { 292 int i; 293 294 /* Disable the input and output queues now. No more packets will 295 * arrive from Octeon, but we should wait for all packet processing 296 * to finish. 297 */ 298 force_io_queues_off(oct); 299 300 /* To allow for in-flight requests */ 301 schedule_timeout_uninterruptible(100); 302 303 if (wait_for_pending_requests(oct)) 304 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 305 306 /* Force all requests waiting to be fetched by OCTEON to complete. */ 307 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 308 struct octeon_instr_queue *iq; 309 310 if (!(oct->io_qmask.iq & BIT_ULL(i))) 311 continue; 312 iq = oct->instr_queue[i]; 313 314 if (atomic_read(&iq->instr_pending)) { 315 spin_lock_bh(&iq->lock); 316 iq->fill_cnt = 0; 317 iq->octeon_read_index = iq->host_write_index; 318 iq->stats.instr_processed += 319 atomic_read(&iq->instr_pending); 320 lio_process_iq_request_list(oct, iq, 0); 321 spin_unlock_bh(&iq->lock); 322 } 323 } 324 325 /* Force all pending ordered list requests to time out. */ 326 lio_process_ordered_list(oct, 1); 327 328 /* We do not need to wait for output queue packets to be processed. */ 329 } 330 331 /** 332 * \brief Cleanup PCI AER uncorrectable error status 333 * @param dev Pointer to PCI device 334 */ 335 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 336 { 337 int pos = 0x100; 338 u32 status, mask; 339 340 pr_info("%s :\n", __func__); 341 342 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 343 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 344 if (dev->error_state == pci_channel_io_normal) 345 status &= ~mask; /* Clear corresponding nonfatal bits */ 346 else 347 status &= mask; /* Clear corresponding fatal bits */ 348 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 349 } 350 351 /** 352 * \brief Stop all PCI IO to a given device 353 * @param dev Pointer to Octeon device 354 */ 355 static void stop_pci_io(struct octeon_device *oct) 356 { 357 /* No more instructions will be forwarded. */ 358 atomic_set(&oct->status, OCT_DEV_IN_RESET); 359 360 pci_disable_device(oct->pci_dev); 361 362 /* Disable interrupts */ 363 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 364 365 pcierror_quiesce_device(oct); 366 367 /* Release the interrupt line */ 368 free_irq(oct->pci_dev->irq, oct); 369 370 if (oct->flags & LIO_FLAG_MSI_ENABLED) 371 pci_disable_msi(oct->pci_dev); 372 373 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 374 lio_get_state_string(&oct->status)); 375 376 /* making it a common function for all OCTEON models */ 377 cleanup_aer_uncorrect_error_status(oct->pci_dev); 378 } 379 380 /** 381 * \brief called when PCI error is detected 382 * @param pdev Pointer to PCI device 383 * @param state The current pci connection state 384 * 385 * This function is called after a PCI bus error affecting 386 * this device has been detected. 387 */ 388 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 389 pci_channel_state_t state) 390 { 391 struct octeon_device *oct = pci_get_drvdata(pdev); 392 393 /* Non-correctable Non-fatal errors */ 394 if (state == pci_channel_io_normal) { 395 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 396 cleanup_aer_uncorrect_error_status(oct->pci_dev); 397 return PCI_ERS_RESULT_CAN_RECOVER; 398 } 399 400 /* Non-correctable Fatal errors */ 401 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 402 stop_pci_io(oct); 403 404 /* Always return a DISCONNECT. There is no support for recovery but only 405 * for a clean shutdown. 406 */ 407 return PCI_ERS_RESULT_DISCONNECT; 408 } 409 410 /** 411 * \brief mmio handler 412 * @param pdev Pointer to PCI device 413 */ 414 static pci_ers_result_t liquidio_pcie_mmio_enabled( 415 struct pci_dev *pdev __attribute__((unused))) 416 { 417 /* We should never hit this since we never ask for a reset for a Fatal 418 * Error. We always return DISCONNECT in io_error above. 419 * But play safe and return RECOVERED for now. 420 */ 421 return PCI_ERS_RESULT_RECOVERED; 422 } 423 424 /** 425 * \brief called after the pci bus has been reset. 426 * @param pdev Pointer to PCI device 427 * 428 * Restart the card from scratch, as if from a cold-boot. Implementation 429 * resembles the first-half of the octeon_resume routine. 430 */ 431 static pci_ers_result_t liquidio_pcie_slot_reset( 432 struct pci_dev *pdev __attribute__((unused))) 433 { 434 /* We should never hit this since we never ask for a reset for a Fatal 435 * Error. We always return DISCONNECT in io_error above. 436 * But play safe and return RECOVERED for now. 437 */ 438 return PCI_ERS_RESULT_RECOVERED; 439 } 440 441 /** 442 * \brief called when traffic can start flowing again. 443 * @param pdev Pointer to PCI device 444 * 445 * This callback is called when the error recovery driver tells us that 446 * its OK to resume normal operation. Implementation resembles the 447 * second-half of the octeon_resume routine. 448 */ 449 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused))) 450 { 451 /* Nothing to be done here. */ 452 } 453 454 #ifdef CONFIG_PM 455 /** 456 * \brief called when suspending 457 * @param pdev Pointer to PCI device 458 * @param state state to suspend to 459 */ 460 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)), 461 pm_message_t state __attribute__((unused))) 462 { 463 return 0; 464 } 465 466 /** 467 * \brief called when resuming 468 * @param pdev Pointer to PCI device 469 */ 470 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused))) 471 { 472 return 0; 473 } 474 #endif 475 476 /* For PCI-E Advanced Error Recovery (AER) Interface */ 477 static const struct pci_error_handlers liquidio_err_handler = { 478 .error_detected = liquidio_pcie_error_detected, 479 .mmio_enabled = liquidio_pcie_mmio_enabled, 480 .slot_reset = liquidio_pcie_slot_reset, 481 .resume = liquidio_pcie_resume, 482 }; 483 484 static const struct pci_device_id liquidio_pci_tbl[] = { 485 { /* 68xx */ 486 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 487 }, 488 { /* 66xx */ 489 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 490 }, 491 { /* 23xx pf */ 492 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 493 }, 494 { 495 0, 0, 0, 0, 0, 0, 0 496 } 497 }; 498 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); 499 500 static struct pci_driver liquidio_pci_driver = { 501 .name = "LiquidIO", 502 .id_table = liquidio_pci_tbl, 503 .probe = liquidio_probe, 504 .remove = liquidio_remove, 505 .err_handler = &liquidio_err_handler, /* For AER */ 506 507 #ifdef CONFIG_PM 508 .suspend = liquidio_suspend, 509 .resume = liquidio_resume, 510 #endif 511 #ifdef CONFIG_PCI_IOV 512 .sriov_configure = liquidio_enable_sriov, 513 #endif 514 }; 515 516 /** 517 * \brief register PCI driver 518 */ 519 static int liquidio_init_pci(void) 520 { 521 return pci_register_driver(&liquidio_pci_driver); 522 } 523 524 /** 525 * \brief unregister PCI driver 526 */ 527 static void liquidio_deinit_pci(void) 528 { 529 pci_unregister_driver(&liquidio_pci_driver); 530 } 531 532 /** 533 * \brief check interface state 534 * @param lio per-network private data 535 * @param state_flag flag state to check 536 */ 537 static inline int ifstate_check(struct lio *lio, int state_flag) 538 { 539 return atomic_read(&lio->ifstate) & state_flag; 540 } 541 542 /** 543 * \brief set interface state 544 * @param lio per-network private data 545 * @param state_flag flag state to set 546 */ 547 static inline void ifstate_set(struct lio *lio, int state_flag) 548 { 549 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag)); 550 } 551 552 /** 553 * \brief clear interface state 554 * @param lio per-network private data 555 * @param state_flag flag state to clear 556 */ 557 static inline void ifstate_reset(struct lio *lio, int state_flag) 558 { 559 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); 560 } 561 562 /** 563 * \brief Stop Tx queues 564 * @param netdev network device 565 */ 566 static inline void txqs_stop(struct net_device *netdev) 567 { 568 if (netif_is_multiqueue(netdev)) { 569 int i; 570 571 for (i = 0; i < netdev->num_tx_queues; i++) 572 netif_stop_subqueue(netdev, i); 573 } else { 574 netif_stop_queue(netdev); 575 } 576 } 577 578 /** 579 * \brief Start Tx queues 580 * @param netdev network device 581 */ 582 static inline void txqs_start(struct net_device *netdev) 583 { 584 if (netif_is_multiqueue(netdev)) { 585 int i; 586 587 for (i = 0; i < netdev->num_tx_queues; i++) 588 netif_start_subqueue(netdev, i); 589 } else { 590 netif_start_queue(netdev); 591 } 592 } 593 594 /** 595 * \brief Wake Tx queues 596 * @param netdev network device 597 */ 598 static inline void txqs_wake(struct net_device *netdev) 599 { 600 struct lio *lio = GET_LIO(netdev); 601 602 if (netif_is_multiqueue(netdev)) { 603 int i; 604 605 for (i = 0; i < netdev->num_tx_queues; i++) { 606 int qno = lio->linfo.txpciq[i % 607 (lio->linfo.num_txpciq)].s.q_no; 608 609 if (__netif_subqueue_stopped(netdev, i)) { 610 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, 611 tx_restart, 1); 612 netif_wake_subqueue(netdev, i); 613 } 614 } 615 } else { 616 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, 617 tx_restart, 1); 618 netif_wake_queue(netdev); 619 } 620 } 621 622 /** 623 * \brief Stop Tx queue 624 * @param netdev network device 625 */ 626 static void stop_txq(struct net_device *netdev) 627 { 628 txqs_stop(netdev); 629 } 630 631 /** 632 * \brief Start Tx queue 633 * @param netdev network device 634 */ 635 static void start_txq(struct net_device *netdev) 636 { 637 struct lio *lio = GET_LIO(netdev); 638 639 if (lio->linfo.link.s.link_up) { 640 txqs_start(netdev); 641 return; 642 } 643 } 644 645 /** 646 * \brief Wake a queue 647 * @param netdev network device 648 * @param q which queue to wake 649 */ 650 static inline void wake_q(struct net_device *netdev, int q) 651 { 652 if (netif_is_multiqueue(netdev)) 653 netif_wake_subqueue(netdev, q); 654 else 655 netif_wake_queue(netdev); 656 } 657 658 /** 659 * \brief Stop a queue 660 * @param netdev network device 661 * @param q which queue to stop 662 */ 663 static inline void stop_q(struct net_device *netdev, int q) 664 { 665 if (netif_is_multiqueue(netdev)) 666 netif_stop_subqueue(netdev, q); 667 else 668 netif_stop_queue(netdev); 669 } 670 671 /** 672 * \brief Check Tx queue status, and take appropriate action 673 * @param lio per-network private data 674 * @returns 0 if full, number of queues woken up otherwise 675 */ 676 static inline int check_txq_status(struct lio *lio) 677 { 678 int ret_val = 0; 679 680 if (netif_is_multiqueue(lio->netdev)) { 681 int numqs = lio->netdev->num_tx_queues; 682 int q, iq = 0; 683 684 /* check each sub-queue state */ 685 for (q = 0; q < numqs; q++) { 686 iq = lio->linfo.txpciq[q % 687 (lio->linfo.num_txpciq)].s.q_no; 688 if (octnet_iq_is_full(lio->oct_dev, iq)) 689 continue; 690 if (__netif_subqueue_stopped(lio->netdev, q)) { 691 wake_q(lio->netdev, q); 692 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, 693 tx_restart, 1); 694 ret_val++; 695 } 696 } 697 } else { 698 if (octnet_iq_is_full(lio->oct_dev, lio->txq)) 699 return 0; 700 wake_q(lio->netdev, lio->txq); 701 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, 702 tx_restart, 1); 703 ret_val = 1; 704 } 705 return ret_val; 706 } 707 708 /** 709 * Remove the node at the head of the list. The list would be empty at 710 * the end of this call if there are no more nodes in the list. 711 */ 712 static inline struct list_head *list_delete_head(struct list_head *root) 713 { 714 struct list_head *node; 715 716 if ((root->prev == root) && (root->next == root)) 717 node = NULL; 718 else 719 node = root->next; 720 721 if (node) 722 list_del(node); 723 724 return node; 725 } 726 727 /** 728 * \brief Delete gather lists 729 * @param lio per-network private data 730 */ 731 static void delete_glists(struct lio *lio) 732 { 733 struct octnic_gather *g; 734 int i; 735 736 if (!lio->glist) 737 return; 738 739 for (i = 0; i < lio->linfo.num_txpciq; i++) { 740 do { 741 g = (struct octnic_gather *) 742 list_delete_head(&lio->glist[i]); 743 if (g) { 744 if (g->sg) { 745 dma_unmap_single(&lio->oct_dev-> 746 pci_dev->dev, 747 g->sg_dma_ptr, 748 g->sg_size, 749 DMA_TO_DEVICE); 750 kfree((void *)((unsigned long)g->sg - 751 g->adjust)); 752 } 753 kfree(g); 754 } 755 } while (g); 756 } 757 758 kfree((void *)lio->glist); 759 kfree((void *)lio->glist_lock); 760 } 761 762 /** 763 * \brief Setup gather lists 764 * @param lio per-network private data 765 */ 766 static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) 767 { 768 int i, j; 769 struct octnic_gather *g; 770 771 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock), 772 GFP_KERNEL); 773 if (!lio->glist_lock) 774 return 1; 775 776 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist), 777 GFP_KERNEL); 778 if (!lio->glist) { 779 kfree((void *)lio->glist_lock); 780 return 1; 781 } 782 783 for (i = 0; i < num_iqs; i++) { 784 int numa_node = cpu_to_node(i % num_online_cpus()); 785 786 spin_lock_init(&lio->glist_lock[i]); 787 788 INIT_LIST_HEAD(&lio->glist[i]); 789 790 for (j = 0; j < lio->tx_qsize; j++) { 791 g = kzalloc_node(sizeof(*g), GFP_KERNEL, 792 numa_node); 793 if (!g) 794 g = kzalloc(sizeof(*g), GFP_KERNEL); 795 if (!g) 796 break; 797 798 g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * 799 OCT_SG_ENTRY_SIZE); 800 801 g->sg = kmalloc_node(g->sg_size + 8, 802 GFP_KERNEL, numa_node); 803 if (!g->sg) 804 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); 805 if (!g->sg) { 806 kfree(g); 807 break; 808 } 809 810 /* The gather component should be aligned on 64-bit 811 * boundary 812 */ 813 if (((unsigned long)g->sg) & 7) { 814 g->adjust = 8 - (((unsigned long)g->sg) & 7); 815 g->sg = (struct octeon_sg_entry *) 816 ((unsigned long)g->sg + g->adjust); 817 } 818 g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev, 819 g->sg, g->sg_size, 820 DMA_TO_DEVICE); 821 if (dma_mapping_error(&oct->pci_dev->dev, 822 g->sg_dma_ptr)) { 823 kfree((void *)((unsigned long)g->sg - 824 g->adjust)); 825 kfree(g); 826 break; 827 } 828 829 list_add_tail(&g->list, &lio->glist[i]); 830 } 831 832 if (j != lio->tx_qsize) { 833 delete_glists(lio); 834 return 1; 835 } 836 } 837 838 return 0; 839 } 840 841 /** 842 * \brief Print link information 843 * @param netdev network device 844 */ 845 static void print_link_info(struct net_device *netdev) 846 { 847 struct lio *lio = GET_LIO(netdev); 848 849 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { 850 struct oct_link_info *linfo = &lio->linfo; 851 852 if (linfo->link.s.link_up) { 853 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 854 linfo->link.s.speed, 855 (linfo->link.s.duplex) ? "Full" : "Half"); 856 } else { 857 netif_info(lio, link, lio->netdev, "Link Down\n"); 858 } 859 } 860 } 861 862 /** 863 * \brief Routine to notify MTU change 864 * @param work work_struct data structure 865 */ 866 static void octnet_link_status_change(struct work_struct *work) 867 { 868 struct cavium_wk *wk = (struct cavium_wk *)work; 869 struct lio *lio = (struct lio *)wk->ctxptr; 870 871 rtnl_lock(); 872 call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev); 873 rtnl_unlock(); 874 } 875 876 /** 877 * \brief Sets up the mtu status change work 878 * @param netdev network device 879 */ 880 static inline int setup_link_status_change_wq(struct net_device *netdev) 881 { 882 struct lio *lio = GET_LIO(netdev); 883 struct octeon_device *oct = lio->oct_dev; 884 885 lio->link_status_wq.wq = alloc_workqueue("link-status", 886 WQ_MEM_RECLAIM, 0); 887 if (!lio->link_status_wq.wq) { 888 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 889 return -1; 890 } 891 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 892 octnet_link_status_change); 893 lio->link_status_wq.wk.ctxptr = lio; 894 895 return 0; 896 } 897 898 static inline void cleanup_link_status_change_wq(struct net_device *netdev) 899 { 900 struct lio *lio = GET_LIO(netdev); 901 902 if (lio->link_status_wq.wq) { 903 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 904 destroy_workqueue(lio->link_status_wq.wq); 905 } 906 } 907 908 /** 909 * \brief Update link status 910 * @param netdev network device 911 * @param ls link status structure 912 * 913 * Called on receipt of a link status response from the core application to 914 * update each interface's link status. 915 */ 916 static inline void update_link_status(struct net_device *netdev, 917 union oct_link_status *ls) 918 { 919 struct lio *lio = GET_LIO(netdev); 920 int changed = (lio->linfo.link.u64 != ls->u64); 921 922 lio->linfo.link.u64 = ls->u64; 923 924 if ((lio->intf_open) && (changed)) { 925 print_link_info(netdev); 926 lio->link_changes++; 927 928 if (lio->linfo.link.s.link_up) { 929 netif_carrier_on(netdev); 930 txqs_wake(netdev); 931 } else { 932 netif_carrier_off(netdev); 933 stop_txq(netdev); 934 } 935 } 936 } 937 938 /* Runs in interrupt context. */ 939 static void update_txq_status(struct octeon_device *oct, int iq_num) 940 { 941 struct net_device *netdev; 942 struct lio *lio; 943 struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; 944 945 netdev = oct->props[iq->ifidx].netdev; 946 947 /* This is needed because the first IQ does not have 948 * a netdev associated with it. 949 */ 950 if (!netdev) 951 return; 952 953 lio = GET_LIO(netdev); 954 if (netif_is_multiqueue(netdev)) { 955 if (__netif_subqueue_stopped(netdev, iq->q_index) && 956 lio->linfo.link.s.link_up && 957 (!octnet_iq_is_full(oct, iq_num))) { 958 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, 959 tx_restart, 1); 960 netif_wake_subqueue(netdev, iq->q_index); 961 } else { 962 if (!octnet_iq_is_full(oct, lio->txq)) { 963 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, 964 lio->txq, 965 tx_restart, 1); 966 wake_q(netdev, lio->txq); 967 } 968 } 969 } 970 } 971 972 static 973 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) 974 { 975 struct octeon_device *oct = droq->oct_dev; 976 struct octeon_device_priv *oct_priv = 977 (struct octeon_device_priv *)oct->priv; 978 979 if (droq->ops.poll_mode) { 980 droq->ops.napi_fn(droq); 981 } else { 982 if (ret & MSIX_PO_INT) { 983 tasklet_schedule(&oct_priv->droq_tasklet); 984 return 1; 985 } 986 /* this will be flushed periodically by check iq db */ 987 if (ret & MSIX_PI_INT) 988 return 0; 989 } 990 return 0; 991 } 992 993 /** 994 * \brief Droq packet processor sceduler 995 * @param oct octeon device 996 */ 997 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) 998 { 999 struct octeon_device_priv *oct_priv = 1000 (struct octeon_device_priv *)oct->priv; 1001 u64 oq_no; 1002 struct octeon_droq *droq; 1003 1004 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { 1005 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); 1006 oq_no++) { 1007 if (!(oct->droq_intr & BIT_ULL(oq_no))) 1008 continue; 1009 1010 droq = oct->droq[oq_no]; 1011 1012 if (droq->ops.poll_mode) { 1013 droq->ops.napi_fn(droq); 1014 oct_priv->napi_mask |= (1 << oq_no); 1015 } else { 1016 tasklet_schedule(&oct_priv->droq_tasklet); 1017 } 1018 } 1019 } 1020 } 1021 1022 static irqreturn_t 1023 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) 1024 { 1025 u64 ret; 1026 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; 1027 struct octeon_device *oct = ioq_vector->oct_dev; 1028 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; 1029 1030 ret = oct->fn_list.msix_interrupt_handler(ioq_vector); 1031 1032 if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT)) 1033 liquidio_schedule_msix_droq_pkt_handler(droq, ret); 1034 1035 return IRQ_HANDLED; 1036 } 1037 1038 /** 1039 * \brief Interrupt handler for octeon 1040 * @param irq unused 1041 * @param dev octeon device 1042 */ 1043 static 1044 irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)), 1045 void *dev) 1046 { 1047 struct octeon_device *oct = (struct octeon_device *)dev; 1048 irqreturn_t ret; 1049 1050 /* Disable our interrupts for the duration of ISR */ 1051 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 1052 1053 ret = oct->fn_list.process_interrupt_regs(oct); 1054 1055 if (ret == IRQ_HANDLED) 1056 liquidio_schedule_droq_pkt_handlers(oct); 1057 1058 /* Re-enable our interrupts */ 1059 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET)) 1060 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 1061 1062 return ret; 1063 } 1064 1065 /** 1066 * \brief Setup interrupt for octeon device 1067 * @param oct octeon device 1068 * 1069 * Enable interrupt in Octeon device as given in the PCI interrupt mask. 1070 */ 1071 static int octeon_setup_interrupt(struct octeon_device *oct) 1072 { 1073 int irqret, err; 1074 struct msix_entry *msix_entries; 1075 int i; 1076 int num_ioq_vectors; 1077 int num_alloc_ioq_vectors; 1078 1079 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { 1080 oct->num_msix_irqs = oct->sriov_info.num_pf_rings; 1081 /* one non ioq interrupt for handling sli_mac_pf_int_sum */ 1082 oct->num_msix_irqs += 1; 1083 1084 oct->msix_entries = kcalloc( 1085 oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); 1086 if (!oct->msix_entries) 1087 return 1; 1088 1089 msix_entries = (struct msix_entry *)oct->msix_entries; 1090 /*Assumption is that pf msix vectors start from pf srn to pf to 1091 * trs and not from 0. if not change this code 1092 */ 1093 for (i = 0; i < oct->num_msix_irqs - 1; i++) 1094 msix_entries[i].entry = oct->sriov_info.pf_srn + i; 1095 msix_entries[oct->num_msix_irqs - 1].entry = 1096 oct->sriov_info.trs; 1097 num_alloc_ioq_vectors = pci_enable_msix_range( 1098 oct->pci_dev, msix_entries, 1099 oct->num_msix_irqs, 1100 oct->num_msix_irqs); 1101 if (num_alloc_ioq_vectors < 0) { 1102 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); 1103 kfree(oct->msix_entries); 1104 oct->msix_entries = NULL; 1105 return 1; 1106 } 1107 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); 1108 1109 num_ioq_vectors = oct->num_msix_irqs; 1110 1111 /** For PF, there is one non-ioq interrupt handler */ 1112 num_ioq_vectors -= 1; 1113 irqret = request_irq(msix_entries[num_ioq_vectors].vector, 1114 liquidio_legacy_intr_handler, 0, "octeon", 1115 oct); 1116 if (irqret) { 1117 dev_err(&oct->pci_dev->dev, 1118 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", 1119 irqret); 1120 pci_disable_msix(oct->pci_dev); 1121 kfree(oct->msix_entries); 1122 oct->msix_entries = NULL; 1123 return 1; 1124 } 1125 1126 for (i = 0; i < num_ioq_vectors; i++) { 1127 irqret = request_irq(msix_entries[i].vector, 1128 liquidio_msix_intr_handler, 0, 1129 "octeon", &oct->ioq_vector[i]); 1130 if (irqret) { 1131 dev_err(&oct->pci_dev->dev, 1132 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", 1133 irqret); 1134 /** Freeing the non-ioq irq vector here . */ 1135 free_irq(msix_entries[num_ioq_vectors].vector, 1136 oct); 1137 1138 while (i) { 1139 i--; 1140 /** clearing affinity mask. */ 1141 irq_set_affinity_hint( 1142 msix_entries[i].vector, NULL); 1143 free_irq(msix_entries[i].vector, 1144 &oct->ioq_vector[i]); 1145 } 1146 pci_disable_msix(oct->pci_dev); 1147 kfree(oct->msix_entries); 1148 oct->msix_entries = NULL; 1149 return 1; 1150 } 1151 oct->ioq_vector[i].vector = msix_entries[i].vector; 1152 /* assign the cpu mask for this msix interrupt vector */ 1153 irq_set_affinity_hint( 1154 msix_entries[i].vector, 1155 (&oct->ioq_vector[i].affinity_mask)); 1156 } 1157 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n", 1158 oct->octeon_id); 1159 } else { 1160 err = pci_enable_msi(oct->pci_dev); 1161 if (err) 1162 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n", 1163 err); 1164 else 1165 oct->flags |= LIO_FLAG_MSI_ENABLED; 1166 1167 irqret = request_irq(oct->pci_dev->irq, 1168 liquidio_legacy_intr_handler, IRQF_SHARED, 1169 "octeon", oct); 1170 if (irqret) { 1171 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1172 pci_disable_msi(oct->pci_dev); 1173 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", 1174 irqret); 1175 return 1; 1176 } 1177 } 1178 return 0; 1179 } 1180 1181 static int liquidio_watchdog(void *param) 1182 { 1183 u64 wdog; 1184 u16 mask_of_stuck_cores = 0; 1185 u16 mask_of_crashed_cores = 0; 1186 int core_num; 1187 u8 core_is_stuck[LIO_MAX_CORES]; 1188 u8 core_crashed[LIO_MAX_CORES]; 1189 struct octeon_device *oct = param; 1190 1191 memset(core_is_stuck, 0, sizeof(core_is_stuck)); 1192 memset(core_crashed, 0, sizeof(core_crashed)); 1193 1194 while (!kthread_should_stop()) { 1195 mask_of_crashed_cores = 1196 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); 1197 1198 for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) { 1199 if (!core_is_stuck[core_num]) { 1200 wdog = lio_pci_readq(oct, CIU3_WDOG(core_num)); 1201 1202 /* look at watchdog state field */ 1203 wdog &= CIU3_WDOG_MASK; 1204 if (wdog) { 1205 /* this watchdog timer has expired */ 1206 core_is_stuck[core_num] = 1207 LIO_MONITOR_WDOG_EXPIRE; 1208 mask_of_stuck_cores |= (1 << core_num); 1209 } 1210 } 1211 1212 if (!core_crashed[core_num]) 1213 core_crashed[core_num] = 1214 (mask_of_crashed_cores >> core_num) & 1; 1215 } 1216 1217 if (mask_of_stuck_cores) { 1218 for (core_num = 0; core_num < LIO_MAX_CORES; 1219 core_num++) { 1220 if (core_is_stuck[core_num] == 1) { 1221 dev_err(&oct->pci_dev->dev, 1222 "ERROR: Octeon core %d is stuck!\n", 1223 core_num); 1224 /* 2 means we have printk'd an error 1225 * so no need to repeat the same printk 1226 */ 1227 core_is_stuck[core_num] = 1228 LIO_MONITOR_CORE_STUCK_MSGD; 1229 } 1230 } 1231 } 1232 1233 if (mask_of_crashed_cores) { 1234 for (core_num = 0; core_num < LIO_MAX_CORES; 1235 core_num++) { 1236 if (core_crashed[core_num] == 1) { 1237 dev_err(&oct->pci_dev->dev, 1238 "ERROR: Octeon core %d crashed! See oct-fwdump for details.\n", 1239 core_num); 1240 /* 2 means we have printk'd an error 1241 * so no need to repeat the same printk 1242 */ 1243 core_crashed[core_num] = 1244 LIO_MONITOR_CORE_STUCK_MSGD; 1245 } 1246 } 1247 } 1248 #ifdef CONFIG_MODULE_UNLOAD 1249 if (mask_of_stuck_cores || mask_of_crashed_cores) { 1250 /* make module refcount=0 so that rmmod will work */ 1251 long refcount; 1252 1253 refcount = module_refcount(THIS_MODULE); 1254 1255 while (refcount > 0) { 1256 module_put(THIS_MODULE); 1257 refcount = module_refcount(THIS_MODULE); 1258 } 1259 1260 /* compensate for and withstand an unlikely (but still 1261 * possible) race condition 1262 */ 1263 while (refcount < 0) { 1264 try_module_get(THIS_MODULE); 1265 refcount = module_refcount(THIS_MODULE); 1266 } 1267 } 1268 #endif 1269 /* sleep for two seconds */ 1270 set_current_state(TASK_INTERRUPTIBLE); 1271 schedule_timeout(2 * HZ); 1272 } 1273 1274 return 0; 1275 } 1276 1277 /** 1278 * \brief PCI probe handler 1279 * @param pdev PCI device structure 1280 * @param ent unused 1281 */ 1282 static int 1283 liquidio_probe(struct pci_dev *pdev, 1284 const struct pci_device_id *ent __attribute__((unused))) 1285 { 1286 struct octeon_device *oct_dev = NULL; 1287 struct handshake *hs; 1288 1289 oct_dev = octeon_allocate_device(pdev->device, 1290 sizeof(struct octeon_device_priv)); 1291 if (!oct_dev) { 1292 dev_err(&pdev->dev, "Unable to allocate device\n"); 1293 return -ENOMEM; 1294 } 1295 1296 if (pdev->device == OCTEON_CN23XX_PF_VID) 1297 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 1298 1299 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 1300 (u32)pdev->vendor, (u32)pdev->device); 1301 1302 /* Assign octeon_device for this device to the private data area. */ 1303 pci_set_drvdata(pdev, oct_dev); 1304 1305 /* set linux specific device pointer */ 1306 oct_dev->pci_dev = (void *)pdev; 1307 1308 hs = &handshake[oct_dev->octeon_id]; 1309 init_completion(&hs->init); 1310 init_completion(&hs->started); 1311 hs->pci_dev = pdev; 1312 1313 if (oct_dev->octeon_id == 0) 1314 /* first LiquidIO NIC is detected */ 1315 complete(&first_stage); 1316 1317 if (octeon_device_init(oct_dev)) { 1318 complete(&hs->init); 1319 liquidio_remove(pdev); 1320 return -ENOMEM; 1321 } 1322 1323 if (OCTEON_CN23XX_PF(oct_dev)) { 1324 u64 scratch1; 1325 u8 bus, device, function; 1326 1327 scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1); 1328 if (!(scratch1 & 4ULL)) { 1329 /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that 1330 * the lio watchdog kernel thread is running for this 1331 * NIC. Each NIC gets one watchdog kernel thread. 1332 */ 1333 scratch1 |= 4ULL; 1334 octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1, 1335 scratch1); 1336 1337 bus = pdev->bus->number; 1338 device = PCI_SLOT(pdev->devfn); 1339 function = PCI_FUNC(pdev->devfn); 1340 oct_dev->watchdog_task = kthread_create( 1341 liquidio_watchdog, oct_dev, 1342 "liowd/%02hhx:%02hhx.%hhx", bus, device, function); 1343 if (!IS_ERR(oct_dev->watchdog_task)) { 1344 wake_up_process(oct_dev->watchdog_task); 1345 } else { 1346 oct_dev->watchdog_task = NULL; 1347 dev_err(&oct_dev->pci_dev->dev, 1348 "failed to create kernel_thread\n"); 1349 liquidio_remove(pdev); 1350 return -1; 1351 } 1352 } 1353 } 1354 1355 oct_dev->rx_pause = 1; 1356 oct_dev->tx_pause = 1; 1357 1358 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 1359 1360 return 0; 1361 } 1362 1363 /** 1364 *\brief Destroy resources associated with octeon device 1365 * @param pdev PCI device structure 1366 * @param ent unused 1367 */ 1368 static void octeon_destroy_resources(struct octeon_device *oct) 1369 { 1370 int i; 1371 struct msix_entry *msix_entries; 1372 struct octeon_device_priv *oct_priv = 1373 (struct octeon_device_priv *)oct->priv; 1374 1375 struct handshake *hs; 1376 1377 switch (atomic_read(&oct->status)) { 1378 case OCT_DEV_RUNNING: 1379 case OCT_DEV_CORE_OK: 1380 1381 /* No more instructions will be forwarded. */ 1382 atomic_set(&oct->status, OCT_DEV_IN_RESET); 1383 1384 oct->app_mode = CVM_DRV_INVALID_APP; 1385 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 1386 lio_get_state_string(&oct->status)); 1387 1388 schedule_timeout_uninterruptible(HZ / 10); 1389 1390 /* fallthrough */ 1391 case OCT_DEV_HOST_OK: 1392 1393 /* fallthrough */ 1394 case OCT_DEV_CONSOLE_INIT_DONE: 1395 /* Remove any consoles */ 1396 octeon_remove_consoles(oct); 1397 1398 /* fallthrough */ 1399 case OCT_DEV_IO_QUEUES_DONE: 1400 if (wait_for_pending_requests(oct)) 1401 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 1402 1403 if (lio_wait_for_instr_fetch(oct)) 1404 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 1405 1406 /* Disable the input and output queues now. No more packets will 1407 * arrive from Octeon, but we should wait for all packet 1408 * processing to finish. 1409 */ 1410 oct->fn_list.disable_io_queues(oct); 1411 1412 if (lio_wait_for_oq_pkts(oct)) 1413 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 1414 1415 /* fallthrough */ 1416 case OCT_DEV_INTR_SET_DONE: 1417 /* Disable interrupts */ 1418 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 1419 1420 if (oct->msix_on) { 1421 msix_entries = (struct msix_entry *)oct->msix_entries; 1422 for (i = 0; i < oct->num_msix_irqs - 1; i++) { 1423 /* clear the affinity_cpumask */ 1424 irq_set_affinity_hint(msix_entries[i].vector, 1425 NULL); 1426 free_irq(msix_entries[i].vector, 1427 &oct->ioq_vector[i]); 1428 } 1429 /* non-iov vector's argument is oct struct */ 1430 free_irq(msix_entries[i].vector, oct); 1431 1432 pci_disable_msix(oct->pci_dev); 1433 kfree(oct->msix_entries); 1434 oct->msix_entries = NULL; 1435 } else { 1436 /* Release the interrupt line */ 1437 free_irq(oct->pci_dev->irq, oct); 1438 1439 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1440 pci_disable_msi(oct->pci_dev); 1441 } 1442 1443 /* fallthrough */ 1444 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 1445 if (OCTEON_CN23XX_PF(oct)) 1446 octeon_free_ioq_vector(oct); 1447 1448 /* fallthrough */ 1449 case OCT_DEV_MBOX_SETUP_DONE: 1450 if (OCTEON_CN23XX_PF(oct)) 1451 oct->fn_list.free_mbox(oct); 1452 1453 /* fallthrough */ 1454 case OCT_DEV_IN_RESET: 1455 case OCT_DEV_DROQ_INIT_DONE: 1456 /* Wait for any pending operations */ 1457 mdelay(100); 1458 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1459 if (!(oct->io_qmask.oq & BIT_ULL(i))) 1460 continue; 1461 octeon_delete_droq(oct, i); 1462 } 1463 1464 /* Force any pending handshakes to complete */ 1465 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 1466 hs = &handshake[i]; 1467 1468 if (hs->pci_dev) { 1469 handshake[oct->octeon_id].init_ok = 0; 1470 complete(&handshake[oct->octeon_id].init); 1471 handshake[oct->octeon_id].started_ok = 0; 1472 complete(&handshake[oct->octeon_id].started); 1473 } 1474 } 1475 1476 /* fallthrough */ 1477 case OCT_DEV_RESP_LIST_INIT_DONE: 1478 octeon_delete_response_list(oct); 1479 1480 /* fallthrough */ 1481 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 1482 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1483 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1484 continue; 1485 octeon_delete_instr_queue(oct, i); 1486 } 1487 #ifdef CONFIG_PCI_IOV 1488 if (oct->sriov_info.sriov_enabled) 1489 pci_disable_sriov(oct->pci_dev); 1490 #endif 1491 /* fallthrough */ 1492 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 1493 octeon_free_sc_buffer_pool(oct); 1494 1495 /* fallthrough */ 1496 case OCT_DEV_DISPATCH_INIT_DONE: 1497 octeon_delete_dispatch_list(oct); 1498 cancel_delayed_work_sync(&oct->nic_poll_work.work); 1499 1500 /* fallthrough */ 1501 case OCT_DEV_PCI_MAP_DONE: 1502 /* Soft reset the octeon device before exiting */ 1503 if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id) 1504 oct->fn_list.soft_reset(oct); 1505 1506 octeon_unmap_pci_barx(oct, 0); 1507 octeon_unmap_pci_barx(oct, 1); 1508 1509 /* fallthrough */ 1510 case OCT_DEV_PCI_ENABLE_DONE: 1511 pci_clear_master(oct->pci_dev); 1512 /* Disable the device, releasing the PCI INT */ 1513 pci_disable_device(oct->pci_dev); 1514 1515 /* fallthrough */ 1516 case OCT_DEV_BEGIN_STATE: 1517 /* Nothing to be done here either */ 1518 break; 1519 } /* end switch (oct->status) */ 1520 1521 tasklet_kill(&oct_priv->droq_tasklet); 1522 } 1523 1524 /** 1525 * \brief Callback for rx ctrl 1526 * @param status status of request 1527 * @param buf pointer to resp structure 1528 */ 1529 static void rx_ctl_callback(struct octeon_device *oct, 1530 u32 status, 1531 void *buf) 1532 { 1533 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 1534 struct liquidio_rx_ctl_context *ctx; 1535 1536 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; 1537 1538 oct = lio_get_device(ctx->octeon_id); 1539 if (status) 1540 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", 1541 CVM_CAST64(status)); 1542 WRITE_ONCE(ctx->cond, 1); 1543 1544 /* This barrier is required to be sure that the response has been 1545 * written fully before waking up the handler 1546 */ 1547 wmb(); 1548 1549 wake_up_interruptible(&ctx->wc); 1550 } 1551 1552 /** 1553 * \brief Send Rx control command 1554 * @param lio per-network private data 1555 * @param start_stop whether to start or stop 1556 */ 1557 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1558 { 1559 struct octeon_soft_command *sc; 1560 struct liquidio_rx_ctl_context *ctx; 1561 union octnet_cmd *ncmd; 1562 int ctx_size = sizeof(struct liquidio_rx_ctl_context); 1563 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1564 int retval; 1565 1566 if (oct->props[lio->ifidx].rx_on == start_stop) 1567 return; 1568 1569 sc = (struct octeon_soft_command *) 1570 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1571 16, ctx_size); 1572 1573 ncmd = (union octnet_cmd *)sc->virtdptr; 1574 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; 1575 1576 WRITE_ONCE(ctx->cond, 0); 1577 ctx->octeon_id = lio_get_device_id(oct); 1578 init_waitqueue_head(&ctx->wc); 1579 1580 ncmd->u64 = 0; 1581 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 1582 ncmd->s.param1 = start_stop; 1583 1584 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1585 1586 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1587 1588 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1589 OPCODE_NIC_CMD, 0, 0, 0); 1590 1591 sc->callback = rx_ctl_callback; 1592 sc->callback_arg = sc; 1593 sc->wait_time = 5000; 1594 1595 retval = octeon_send_soft_command(oct, sc); 1596 if (retval == IQ_SEND_FAILED) { 1597 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1598 } else { 1599 /* Sleep on a wait queue till the cond flag indicates that the 1600 * response arrived or timed-out. 1601 */ 1602 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) 1603 return; 1604 oct->props[lio->ifidx].rx_on = start_stop; 1605 } 1606 1607 octeon_free_soft_command(oct, sc); 1608 } 1609 1610 /** 1611 * \brief Destroy NIC device interface 1612 * @param oct octeon device 1613 * @param ifidx which interface to destroy 1614 * 1615 * Cleanup associated with each interface for an Octeon device when NIC 1616 * module is being unloaded or if initialization fails during load. 1617 */ 1618 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1619 { 1620 struct net_device *netdev = oct->props[ifidx].netdev; 1621 struct lio *lio; 1622 struct napi_struct *napi, *n; 1623 1624 if (!netdev) { 1625 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1626 __func__, ifidx); 1627 return; 1628 } 1629 1630 lio = GET_LIO(netdev); 1631 1632 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1633 1634 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1635 liquidio_stop(netdev); 1636 1637 if (oct->props[lio->ifidx].napi_enabled == 1) { 1638 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1639 napi_disable(napi); 1640 1641 oct->props[lio->ifidx].napi_enabled = 0; 1642 1643 if (OCTEON_CN23XX_PF(oct)) 1644 oct->droq[0]->ops.poll_mode = 0; 1645 } 1646 1647 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1648 unregister_netdev(netdev); 1649 1650 cleanup_link_status_change_wq(netdev); 1651 1652 delete_glists(lio); 1653 1654 free_netdev(netdev); 1655 1656 oct->props[ifidx].gmxport = -1; 1657 1658 oct->props[ifidx].netdev = NULL; 1659 } 1660 1661 /** 1662 * \brief Stop complete NIC functionality 1663 * @param oct octeon device 1664 */ 1665 static int liquidio_stop_nic_module(struct octeon_device *oct) 1666 { 1667 int i, j; 1668 struct lio *lio; 1669 1670 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1671 if (!oct->ifcount) { 1672 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1673 return 1; 1674 } 1675 1676 spin_lock_bh(&oct->cmd_resp_wqlock); 1677 oct->cmd_resp_state = OCT_DRV_OFFLINE; 1678 spin_unlock_bh(&oct->cmd_resp_wqlock); 1679 1680 for (i = 0; i < oct->ifcount; i++) { 1681 lio = GET_LIO(oct->props[i].netdev); 1682 for (j = 0; j < lio->linfo.num_rxpciq; j++) 1683 octeon_unregister_droq_ops(oct, 1684 lio->linfo.rxpciq[j].s.q_no); 1685 } 1686 1687 for (i = 0; i < oct->ifcount; i++) 1688 liquidio_destroy_nic_device(oct, i); 1689 1690 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1691 return 0; 1692 } 1693 1694 /** 1695 * \brief Cleans up resources at unload time 1696 * @param pdev PCI device structure 1697 */ 1698 static void liquidio_remove(struct pci_dev *pdev) 1699 { 1700 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1701 1702 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1703 1704 if (oct_dev->watchdog_task) 1705 kthread_stop(oct_dev->watchdog_task); 1706 1707 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) 1708 liquidio_stop_nic_module(oct_dev); 1709 1710 /* Reset the octeon device and cleanup all memory allocated for 1711 * the octeon device by driver. 1712 */ 1713 octeon_destroy_resources(oct_dev); 1714 1715 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1716 1717 /* This octeon device has been removed. Update the global 1718 * data structure to reflect this. Free the device structure. 1719 */ 1720 octeon_free_device_mem(oct_dev); 1721 } 1722 1723 /** 1724 * \brief Identify the Octeon device and to map the BAR address space 1725 * @param oct octeon device 1726 */ 1727 static int octeon_chip_specific_setup(struct octeon_device *oct) 1728 { 1729 u32 dev_id, rev_id; 1730 int ret = 1; 1731 char *s; 1732 1733 pci_read_config_dword(oct->pci_dev, 0, &dev_id); 1734 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 1735 oct->rev_id = rev_id & 0xff; 1736 1737 switch (dev_id) { 1738 case OCTEON_CN68XX_PCIID: 1739 oct->chip_id = OCTEON_CN68XX; 1740 ret = lio_setup_cn68xx_octeon_device(oct); 1741 s = "CN68XX"; 1742 break; 1743 1744 case OCTEON_CN66XX_PCIID: 1745 oct->chip_id = OCTEON_CN66XX; 1746 ret = lio_setup_cn66xx_octeon_device(oct); 1747 s = "CN66XX"; 1748 break; 1749 1750 case OCTEON_CN23XX_PCIID_PF: 1751 oct->chip_id = OCTEON_CN23XX_PF_VID; 1752 ret = setup_cn23xx_octeon_pf_device(oct); 1753 s = "CN23XX"; 1754 break; 1755 1756 default: 1757 s = "?"; 1758 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", 1759 dev_id); 1760 } 1761 1762 if (!ret) 1763 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s, 1764 OCTEON_MAJOR_REV(oct), 1765 OCTEON_MINOR_REV(oct), 1766 octeon_get_conf(oct)->card_name, 1767 LIQUIDIO_VERSION); 1768 1769 return ret; 1770 } 1771 1772 /** 1773 * \brief PCI initialization for each Octeon device. 1774 * @param oct octeon device 1775 */ 1776 static int octeon_pci_os_setup(struct octeon_device *oct) 1777 { 1778 /* setup PCI stuff first */ 1779 if (pci_enable_device(oct->pci_dev)) { 1780 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1781 return 1; 1782 } 1783 1784 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1785 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1786 pci_disable_device(oct->pci_dev); 1787 return 1; 1788 } 1789 1790 /* Enable PCI DMA Master. */ 1791 pci_set_master(oct->pci_dev); 1792 1793 return 0; 1794 } 1795 1796 static inline int skb_iq(struct lio *lio, struct sk_buff *skb) 1797 { 1798 int q = 0; 1799 1800 if (netif_is_multiqueue(lio->netdev)) 1801 q = skb->queue_mapping % lio->linfo.num_txpciq; 1802 1803 return q; 1804 } 1805 1806 /** 1807 * \brief Check Tx queue state for a given network buffer 1808 * @param lio per-network private data 1809 * @param skb network buffer 1810 */ 1811 static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) 1812 { 1813 int q = 0, iq = 0; 1814 1815 if (netif_is_multiqueue(lio->netdev)) { 1816 q = skb->queue_mapping; 1817 iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; 1818 } else { 1819 iq = lio->txq; 1820 q = iq; 1821 } 1822 1823 if (octnet_iq_is_full(lio->oct_dev, iq)) 1824 return 0; 1825 1826 if (__netif_subqueue_stopped(lio->netdev, q)) { 1827 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); 1828 wake_q(lio->netdev, q); 1829 } 1830 return 1; 1831 } 1832 1833 /** 1834 * \brief Unmap and free network buffer 1835 * @param buf buffer 1836 */ 1837 static void free_netbuf(void *buf) 1838 { 1839 struct sk_buff *skb; 1840 struct octnet_buf_free_info *finfo; 1841 struct lio *lio; 1842 1843 finfo = (struct octnet_buf_free_info *)buf; 1844 skb = finfo->skb; 1845 lio = finfo->lio; 1846 1847 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1848 DMA_TO_DEVICE); 1849 1850 check_txq_state(lio, skb); 1851 1852 tx_buffer_free(skb); 1853 } 1854 1855 /** 1856 * \brief Unmap and free gather buffer 1857 * @param buf buffer 1858 */ 1859 static void free_netsgbuf(void *buf) 1860 { 1861 struct octnet_buf_free_info *finfo; 1862 struct sk_buff *skb; 1863 struct lio *lio; 1864 struct octnic_gather *g; 1865 int i, frags, iq; 1866 1867 finfo = (struct octnet_buf_free_info *)buf; 1868 skb = finfo->skb; 1869 lio = finfo->lio; 1870 g = finfo->g; 1871 frags = skb_shinfo(skb)->nr_frags; 1872 1873 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1874 g->sg[0].ptr[0], (skb->len - skb->data_len), 1875 DMA_TO_DEVICE); 1876 1877 i = 1; 1878 while (frags--) { 1879 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1880 1881 pci_unmap_page((lio->oct_dev)->pci_dev, 1882 g->sg[(i >> 2)].ptr[(i & 3)], 1883 frag->size, DMA_TO_DEVICE); 1884 i++; 1885 } 1886 1887 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev, 1888 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE); 1889 1890 iq = skb_iq(lio, skb); 1891 spin_lock(&lio->glist_lock[iq]); 1892 list_add_tail(&g->list, &lio->glist[iq]); 1893 spin_unlock(&lio->glist_lock[iq]); 1894 1895 check_txq_state(lio, skb); /* mq support: sub-queue state check */ 1896 1897 tx_buffer_free(skb); 1898 } 1899 1900 /** 1901 * \brief Unmap and free gather buffer with response 1902 * @param buf buffer 1903 */ 1904 static void free_netsgbuf_with_resp(void *buf) 1905 { 1906 struct octeon_soft_command *sc; 1907 struct octnet_buf_free_info *finfo; 1908 struct sk_buff *skb; 1909 struct lio *lio; 1910 struct octnic_gather *g; 1911 int i, frags, iq; 1912 1913 sc = (struct octeon_soft_command *)buf; 1914 skb = (struct sk_buff *)sc->callback_arg; 1915 finfo = (struct octnet_buf_free_info *)&skb->cb; 1916 1917 lio = finfo->lio; 1918 g = finfo->g; 1919 frags = skb_shinfo(skb)->nr_frags; 1920 1921 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1922 g->sg[0].ptr[0], (skb->len - skb->data_len), 1923 DMA_TO_DEVICE); 1924 1925 i = 1; 1926 while (frags--) { 1927 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1928 1929 pci_unmap_page((lio->oct_dev)->pci_dev, 1930 g->sg[(i >> 2)].ptr[(i & 3)], 1931 frag->size, DMA_TO_DEVICE); 1932 i++; 1933 } 1934 1935 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev, 1936 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE); 1937 1938 iq = skb_iq(lio, skb); 1939 1940 spin_lock(&lio->glist_lock[iq]); 1941 list_add_tail(&g->list, &lio->glist[iq]); 1942 spin_unlock(&lio->glist_lock[iq]); 1943 1944 /* Don't free the skb yet */ 1945 1946 check_txq_state(lio, skb); 1947 } 1948 1949 /** 1950 * \brief Adjust ptp frequency 1951 * @param ptp PTP clock info 1952 * @param ppb how much to adjust by, in parts-per-billion 1953 */ 1954 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 1955 { 1956 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1957 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1958 u64 comp, delta; 1959 unsigned long flags; 1960 bool neg_adj = false; 1961 1962 if (ppb < 0) { 1963 neg_adj = true; 1964 ppb = -ppb; 1965 } 1966 1967 /* The hardware adds the clock compensation value to the 1968 * PTP clock on every coprocessor clock cycle, so we 1969 * compute the delta in terms of coprocessor clocks. 1970 */ 1971 delta = (u64)ppb << 32; 1972 do_div(delta, oct->coproc_clock_rate); 1973 1974 spin_lock_irqsave(&lio->ptp_lock, flags); 1975 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); 1976 if (neg_adj) 1977 comp -= delta; 1978 else 1979 comp += delta; 1980 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1981 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1982 1983 return 0; 1984 } 1985 1986 /** 1987 * \brief Adjust ptp time 1988 * @param ptp PTP clock info 1989 * @param delta how much to adjust by, in nanosecs 1990 */ 1991 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 1992 { 1993 unsigned long flags; 1994 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1995 1996 spin_lock_irqsave(&lio->ptp_lock, flags); 1997 lio->ptp_adjust += delta; 1998 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1999 2000 return 0; 2001 } 2002 2003 /** 2004 * \brief Get hardware clock time, including any adjustment 2005 * @param ptp PTP clock info 2006 * @param ts timespec 2007 */ 2008 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, 2009 struct timespec64 *ts) 2010 { 2011 u64 ns; 2012 unsigned long flags; 2013 struct lio *lio = container_of(ptp, struct lio, ptp_info); 2014 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 2015 2016 spin_lock_irqsave(&lio->ptp_lock, flags); 2017 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); 2018 ns += lio->ptp_adjust; 2019 spin_unlock_irqrestore(&lio->ptp_lock, flags); 2020 2021 *ts = ns_to_timespec64(ns); 2022 2023 return 0; 2024 } 2025 2026 /** 2027 * \brief Set hardware clock time. Reset adjustment 2028 * @param ptp PTP clock info 2029 * @param ts timespec 2030 */ 2031 static int liquidio_ptp_settime(struct ptp_clock_info *ptp, 2032 const struct timespec64 *ts) 2033 { 2034 u64 ns; 2035 unsigned long flags; 2036 struct lio *lio = container_of(ptp, struct lio, ptp_info); 2037 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 2038 2039 ns = timespec_to_ns(ts); 2040 2041 spin_lock_irqsave(&lio->ptp_lock, flags); 2042 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 2043 lio->ptp_adjust = 0; 2044 spin_unlock_irqrestore(&lio->ptp_lock, flags); 2045 2046 return 0; 2047 } 2048 2049 /** 2050 * \brief Check if PTP is enabled 2051 * @param ptp PTP clock info 2052 * @param rq request 2053 * @param on is it on 2054 */ 2055 static int 2056 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)), 2057 struct ptp_clock_request *rq __attribute__((unused)), 2058 int on __attribute__((unused))) 2059 { 2060 return -EOPNOTSUPP; 2061 } 2062 2063 /** 2064 * \brief Open PTP clock source 2065 * @param netdev network device 2066 */ 2067 static void oct_ptp_open(struct net_device *netdev) 2068 { 2069 struct lio *lio = GET_LIO(netdev); 2070 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 2071 2072 spin_lock_init(&lio->ptp_lock); 2073 2074 snprintf(lio->ptp_info.name, 16, "%s", netdev->name); 2075 lio->ptp_info.owner = THIS_MODULE; 2076 lio->ptp_info.max_adj = 250000000; 2077 lio->ptp_info.n_alarm = 0; 2078 lio->ptp_info.n_ext_ts = 0; 2079 lio->ptp_info.n_per_out = 0; 2080 lio->ptp_info.pps = 0; 2081 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq; 2082 lio->ptp_info.adjtime = liquidio_ptp_adjtime; 2083 lio->ptp_info.gettime64 = liquidio_ptp_gettime; 2084 lio->ptp_info.settime64 = liquidio_ptp_settime; 2085 lio->ptp_info.enable = liquidio_ptp_enable; 2086 2087 lio->ptp_adjust = 0; 2088 2089 lio->ptp_clock = ptp_clock_register(&lio->ptp_info, 2090 &oct->pci_dev->dev); 2091 2092 if (IS_ERR(lio->ptp_clock)) 2093 lio->ptp_clock = NULL; 2094 } 2095 2096 /** 2097 * \brief Init PTP clock 2098 * @param oct octeon device 2099 */ 2100 static void liquidio_ptp_init(struct octeon_device *oct) 2101 { 2102 u64 clock_comp, cfg; 2103 2104 clock_comp = (u64)NSEC_PER_SEC << 32; 2105 do_div(clock_comp, oct->coproc_clock_rate); 2106 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); 2107 2108 /* Enable */ 2109 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); 2110 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); 2111 } 2112 2113 /** 2114 * \brief Load firmware to device 2115 * @param oct octeon device 2116 * 2117 * Maps device to firmware filename, requests firmware, and downloads it 2118 */ 2119 static int load_firmware(struct octeon_device *oct) 2120 { 2121 int ret = 0; 2122 const struct firmware *fw; 2123 char fw_name[LIO_MAX_FW_FILENAME_LEN]; 2124 char *tmp_fw_type; 2125 2126 if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE, 2127 sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) { 2128 dev_info(&oct->pci_dev->dev, "Skipping firmware load\n"); 2129 return ret; 2130 } 2131 2132 if (fw_type[0] == '\0') 2133 tmp_fw_type = LIO_FW_NAME_TYPE_NIC; 2134 else 2135 tmp_fw_type = fw_type; 2136 2137 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, 2138 octeon_get_conf(oct)->card_name, tmp_fw_type, 2139 LIO_FW_NAME_SUFFIX); 2140 2141 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); 2142 if (ret) { 2143 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", 2144 fw_name); 2145 release_firmware(fw); 2146 return ret; 2147 } 2148 2149 ret = octeon_download_firmware(oct, fw->data, fw->size); 2150 2151 release_firmware(fw); 2152 2153 return ret; 2154 } 2155 2156 /** 2157 * \brief Setup output queue 2158 * @param oct octeon device 2159 * @param q_no which queue 2160 * @param num_descs how many descriptors 2161 * @param desc_size size of each descriptor 2162 * @param app_ctx application context 2163 */ 2164 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, 2165 int desc_size, void *app_ctx) 2166 { 2167 int ret_val = 0; 2168 2169 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); 2170 /* droq creation and local register settings. */ 2171 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); 2172 if (ret_val < 0) 2173 return ret_val; 2174 2175 if (ret_val == 1) { 2176 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); 2177 return 0; 2178 } 2179 /* tasklet creation for the droq */ 2180 2181 /* Enable the droq queues */ 2182 octeon_set_droq_pkt_op(oct, q_no, 1); 2183 2184 /* Send Credit for Octeon Output queues. Credits are always 2185 * sent after the output queue is enabled. 2186 */ 2187 writel(oct->droq[q_no]->max_count, 2188 oct->droq[q_no]->pkts_credit_reg); 2189 2190 return ret_val; 2191 } 2192 2193 /** 2194 * \brief Callback for getting interface configuration 2195 * @param status status of request 2196 * @param buf pointer to resp structure 2197 */ 2198 static void if_cfg_callback(struct octeon_device *oct, 2199 u32 status __attribute__((unused)), 2200 void *buf) 2201 { 2202 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 2203 struct liquidio_if_cfg_resp *resp; 2204 struct liquidio_if_cfg_context *ctx; 2205 2206 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 2207 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 2208 2209 oct = lio_get_device(ctx->octeon_id); 2210 if (resp->status) 2211 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", 2212 CVM_CAST64(resp->status)); 2213 WRITE_ONCE(ctx->cond, 1); 2214 2215 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", 2216 resp->cfg_info.liquidio_firmware_version); 2217 2218 /* This barrier is required to be sure that the response has been 2219 * written fully before waking up the handler 2220 */ 2221 wmb(); 2222 2223 wake_up_interruptible(&ctx->wc); 2224 } 2225 2226 /** 2227 * \brief Select queue based on hash 2228 * @param dev Net device 2229 * @param skb sk_buff structure 2230 * @returns selected queue number 2231 */ 2232 static u16 select_q(struct net_device *dev, struct sk_buff *skb, 2233 void *accel_priv __attribute__((unused)), 2234 select_queue_fallback_t fallback __attribute__((unused))) 2235 { 2236 u32 qindex = 0; 2237 struct lio *lio; 2238 2239 lio = GET_LIO(dev); 2240 qindex = skb_tx_hash(dev, skb); 2241 2242 return (u16)(qindex % (lio->linfo.num_txpciq)); 2243 } 2244 2245 /** Routine to push packets arriving on Octeon interface upto network layer. 2246 * @param oct_id - octeon device id. 2247 * @param skbuff - skbuff struct to be passed to network layer. 2248 * @param len - size of total data received. 2249 * @param rh - Control header associated with the packet 2250 * @param param - additional control data with the packet 2251 * @param arg - farg registered in droq_ops 2252 */ 2253 static void 2254 liquidio_push_packet(u32 octeon_id __attribute__((unused)), 2255 void *skbuff, 2256 u32 len, 2257 union octeon_rh *rh, 2258 void *param, 2259 void *arg) 2260 { 2261 struct napi_struct *napi = param; 2262 struct sk_buff *skb = (struct sk_buff *)skbuff; 2263 struct skb_shared_hwtstamps *shhwtstamps; 2264 u64 ns; 2265 u16 vtag = 0; 2266 struct net_device *netdev = (struct net_device *)arg; 2267 struct octeon_droq *droq = container_of(param, struct octeon_droq, 2268 napi); 2269 if (netdev) { 2270 int packet_was_received; 2271 struct lio *lio = GET_LIO(netdev); 2272 struct octeon_device *oct = lio->oct_dev; 2273 2274 /* Do not proceed if the interface is not in RUNNING state. */ 2275 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { 2276 recv_buffer_free(skb); 2277 droq->stats.rx_dropped++; 2278 return; 2279 } 2280 2281 skb->dev = netdev; 2282 2283 skb_record_rx_queue(skb, droq->q_no); 2284 if (likely(len > MIN_SKB_SIZE)) { 2285 struct octeon_skb_page_info *pg_info; 2286 unsigned char *va; 2287 2288 pg_info = ((struct octeon_skb_page_info *)(skb->cb)); 2289 if (pg_info->page) { 2290 /* For Paged allocation use the frags */ 2291 va = page_address(pg_info->page) + 2292 pg_info->page_offset; 2293 memcpy(skb->data, va, MIN_SKB_SIZE); 2294 skb_put(skb, MIN_SKB_SIZE); 2295 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 2296 pg_info->page, 2297 pg_info->page_offset + 2298 MIN_SKB_SIZE, 2299 len - MIN_SKB_SIZE, 2300 LIO_RXBUFFER_SZ); 2301 } 2302 } else { 2303 struct octeon_skb_page_info *pg_info = 2304 ((struct octeon_skb_page_info *)(skb->cb)); 2305 skb_copy_to_linear_data(skb, page_address(pg_info->page) 2306 + pg_info->page_offset, len); 2307 skb_put(skb, len); 2308 put_page(pg_info->page); 2309 } 2310 2311 if (((oct->chip_id == OCTEON_CN66XX) || 2312 (oct->chip_id == OCTEON_CN68XX)) && 2313 ptp_enable) { 2314 if (rh->r_dh.has_hwtstamp) { 2315 /* timestamp is included from the hardware at 2316 * the beginning of the packet. 2317 */ 2318 if (ifstate_check 2319 (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { 2320 /* Nanoseconds are in the first 64-bits 2321 * of the packet. 2322 */ 2323 memcpy(&ns, (skb->data), sizeof(ns)); 2324 shhwtstamps = skb_hwtstamps(skb); 2325 shhwtstamps->hwtstamp = 2326 ns_to_ktime(ns + 2327 lio->ptp_adjust); 2328 } 2329 skb_pull(skb, sizeof(ns)); 2330 } 2331 } 2332 2333 skb->protocol = eth_type_trans(skb, skb->dev); 2334 if ((netdev->features & NETIF_F_RXCSUM) && 2335 (((rh->r_dh.encap_on) && 2336 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || 2337 (!(rh->r_dh.encap_on) && 2338 (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) 2339 /* checksum has already been verified */ 2340 skb->ip_summed = CHECKSUM_UNNECESSARY; 2341 else 2342 skb->ip_summed = CHECKSUM_NONE; 2343 2344 /* Setting Encapsulation field on basis of status received 2345 * from the firmware 2346 */ 2347 if (rh->r_dh.encap_on) { 2348 skb->encapsulation = 1; 2349 skb->csum_level = 1; 2350 droq->stats.rx_vxlan++; 2351 } 2352 2353 /* inbound VLAN tag */ 2354 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 2355 (rh->r_dh.vlan != 0)) { 2356 u16 vid = rh->r_dh.vlan; 2357 u16 priority = rh->r_dh.priority; 2358 2359 vtag = priority << 13 | vid; 2360 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); 2361 } 2362 2363 packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP; 2364 2365 if (packet_was_received) { 2366 droq->stats.rx_bytes_received += len; 2367 droq->stats.rx_pkts_received++; 2368 netdev->last_rx = jiffies; 2369 } else { 2370 droq->stats.rx_dropped++; 2371 netif_info(lio, rx_err, lio->netdev, 2372 "droq:%d error rx_dropped:%llu\n", 2373 droq->q_no, droq->stats.rx_dropped); 2374 } 2375 2376 } else { 2377 recv_buffer_free(skb); 2378 } 2379 } 2380 2381 /** 2382 * \brief wrapper for calling napi_schedule 2383 * @param param parameters to pass to napi_schedule 2384 * 2385 * Used when scheduling on different CPUs 2386 */ 2387 static void napi_schedule_wrapper(void *param) 2388 { 2389 struct napi_struct *napi = param; 2390 2391 napi_schedule(napi); 2392 } 2393 2394 /** 2395 * \brief callback when receive interrupt occurs and we are in NAPI mode 2396 * @param arg pointer to octeon output queue 2397 */ 2398 static void liquidio_napi_drv_callback(void *arg) 2399 { 2400 struct octeon_device *oct; 2401 struct octeon_droq *droq = arg; 2402 int this_cpu = smp_processor_id(); 2403 2404 oct = droq->oct_dev; 2405 2406 if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) { 2407 napi_schedule_irqoff(&droq->napi); 2408 } else { 2409 struct call_single_data *csd = &droq->csd; 2410 2411 csd->func = napi_schedule_wrapper; 2412 csd->info = &droq->napi; 2413 csd->flags = 0; 2414 2415 smp_call_function_single_async(droq->cpu_id, csd); 2416 } 2417 } 2418 2419 /** 2420 * \brief Entry point for NAPI polling 2421 * @param napi NAPI structure 2422 * @param budget maximum number of items to process 2423 */ 2424 static int liquidio_napi_poll(struct napi_struct *napi, int budget) 2425 { 2426 struct octeon_droq *droq; 2427 int work_done; 2428 int tx_done = 0, iq_no; 2429 struct octeon_instr_queue *iq; 2430 struct octeon_device *oct; 2431 2432 droq = container_of(napi, struct octeon_droq, napi); 2433 oct = droq->oct_dev; 2434 iq_no = droq->q_no; 2435 /* Handle Droq descriptors */ 2436 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, 2437 POLL_EVENT_PROCESS_PKTS, 2438 budget); 2439 2440 /* Flush the instruction queue */ 2441 iq = oct->instr_queue[iq_no]; 2442 if (iq) { 2443 /* Process iq buffers with in the budget limits */ 2444 tx_done = octeon_flush_iq(oct, iq, 1, budget); 2445 /* Update iq read-index rather than waiting for next interrupt. 2446 * Return back if tx_done is false. 2447 */ 2448 update_txq_status(oct, iq_no); 2449 } else { 2450 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", 2451 __func__, iq_no); 2452 } 2453 2454 if ((work_done < budget) && (tx_done)) { 2455 napi_complete(napi); 2456 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, 2457 POLL_EVENT_ENABLE_INTR, 0); 2458 return 0; 2459 } 2460 2461 return (!tx_done) ? (budget) : (work_done); 2462 } 2463 2464 /** 2465 * \brief Setup input and output queues 2466 * @param octeon_dev octeon device 2467 * @param ifidx Interface Index 2468 * 2469 * Note: Queues are with respect to the octeon device. Thus 2470 * an input queue is for egress packets, and output queues 2471 * are for ingress packets. 2472 */ 2473 static inline int setup_io_queues(struct octeon_device *octeon_dev, 2474 int ifidx) 2475 { 2476 struct octeon_droq_ops droq_ops; 2477 struct net_device *netdev; 2478 static int cpu_id; 2479 static int cpu_id_modulus; 2480 struct octeon_droq *droq; 2481 struct napi_struct *napi; 2482 int q, q_no, retval = 0; 2483 struct lio *lio; 2484 int num_tx_descs; 2485 2486 netdev = octeon_dev->props[ifidx].netdev; 2487 2488 lio = GET_LIO(netdev); 2489 2490 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); 2491 2492 droq_ops.fptr = liquidio_push_packet; 2493 droq_ops.farg = (void *)netdev; 2494 2495 droq_ops.poll_mode = 1; 2496 droq_ops.napi_fn = liquidio_napi_drv_callback; 2497 cpu_id = 0; 2498 cpu_id_modulus = num_present_cpus(); 2499 2500 /* set up DROQs. */ 2501 for (q = 0; q < lio->linfo.num_rxpciq; q++) { 2502 q_no = lio->linfo.rxpciq[q].s.q_no; 2503 dev_dbg(&octeon_dev->pci_dev->dev, 2504 "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n", 2505 q, q_no); 2506 retval = octeon_setup_droq(octeon_dev, q_no, 2507 CFG_GET_NUM_RX_DESCS_NIC_IF 2508 (octeon_get_conf(octeon_dev), 2509 lio->ifidx), 2510 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF 2511 (octeon_get_conf(octeon_dev), 2512 lio->ifidx), NULL); 2513 if (retval) { 2514 dev_err(&octeon_dev->pci_dev->dev, 2515 "%s : Runtime DROQ(RxQ) creation failed.\n", 2516 __func__); 2517 return 1; 2518 } 2519 2520 droq = octeon_dev->droq[q_no]; 2521 napi = &droq->napi; 2522 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n", 2523 (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num); 2524 netif_napi_add(netdev, napi, liquidio_napi_poll, 64); 2525 2526 /* designate a CPU for this droq */ 2527 droq->cpu_id = cpu_id; 2528 cpu_id++; 2529 if (cpu_id >= cpu_id_modulus) 2530 cpu_id = 0; 2531 2532 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); 2533 } 2534 2535 if (OCTEON_CN23XX_PF(octeon_dev)) { 2536 /* 23XX PF can receive control messages (via the first PF-owned 2537 * droq) from the firmware even if the ethX interface is down, 2538 * so that's why poll_mode must be off for the first droq. 2539 */ 2540 octeon_dev->droq[0]->ops.poll_mode = 0; 2541 } 2542 2543 /* set up IQs. */ 2544 for (q = 0; q < lio->linfo.num_txpciq; q++) { 2545 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf 2546 (octeon_dev), 2547 lio->ifidx); 2548 retval = octeon_setup_iq(octeon_dev, ifidx, q, 2549 lio->linfo.txpciq[q], num_tx_descs, 2550 netdev_get_tx_queue(netdev, q)); 2551 if (retval) { 2552 dev_err(&octeon_dev->pci_dev->dev, 2553 " %s : Runtime IQ(TxQ) creation failed.\n", 2554 __func__); 2555 return 1; 2556 } 2557 } 2558 2559 return 0; 2560 } 2561 2562 /** 2563 * \brief Poll routine for checking transmit queue status 2564 * @param work work_struct data structure 2565 */ 2566 static void octnet_poll_check_txq_status(struct work_struct *work) 2567 { 2568 struct cavium_wk *wk = (struct cavium_wk *)work; 2569 struct lio *lio = (struct lio *)wk->ctxptr; 2570 2571 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) 2572 return; 2573 2574 check_txq_status(lio); 2575 queue_delayed_work(lio->txq_status_wq.wq, 2576 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 2577 } 2578 2579 /** 2580 * \brief Sets up the txq poll check 2581 * @param netdev network device 2582 */ 2583 static inline int setup_tx_poll_fn(struct net_device *netdev) 2584 { 2585 struct lio *lio = GET_LIO(netdev); 2586 struct octeon_device *oct = lio->oct_dev; 2587 2588 lio->txq_status_wq.wq = alloc_workqueue("txq-status", 2589 WQ_MEM_RECLAIM, 0); 2590 if (!lio->txq_status_wq.wq) { 2591 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); 2592 return -1; 2593 } 2594 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, 2595 octnet_poll_check_txq_status); 2596 lio->txq_status_wq.wk.ctxptr = lio; 2597 queue_delayed_work(lio->txq_status_wq.wq, 2598 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 2599 return 0; 2600 } 2601 2602 static inline void cleanup_tx_poll_fn(struct net_device *netdev) 2603 { 2604 struct lio *lio = GET_LIO(netdev); 2605 2606 if (lio->txq_status_wq.wq) { 2607 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); 2608 destroy_workqueue(lio->txq_status_wq.wq); 2609 } 2610 } 2611 2612 /** 2613 * \brief Net device open for LiquidIO 2614 * @param netdev network device 2615 */ 2616 static int liquidio_open(struct net_device *netdev) 2617 { 2618 struct lio *lio = GET_LIO(netdev); 2619 struct octeon_device *oct = lio->oct_dev; 2620 struct napi_struct *napi, *n; 2621 2622 if (oct->props[lio->ifidx].napi_enabled == 0) { 2623 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 2624 napi_enable(napi); 2625 2626 oct->props[lio->ifidx].napi_enabled = 1; 2627 2628 if (OCTEON_CN23XX_PF(oct)) 2629 oct->droq[0]->ops.poll_mode = 1; 2630 } 2631 2632 oct_ptp_open(netdev); 2633 2634 ifstate_set(lio, LIO_IFSTATE_RUNNING); 2635 2636 /* Ready for link status updates */ 2637 lio->intf_open = 1; 2638 2639 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 2640 2641 if (OCTEON_CN23XX_PF(oct)) { 2642 if (!oct->msix_on) 2643 if (setup_tx_poll_fn(netdev)) 2644 return -1; 2645 } else { 2646 if (setup_tx_poll_fn(netdev)) 2647 return -1; 2648 } 2649 2650 start_txq(netdev); 2651 2652 /* tell Octeon to start forwarding packets to host */ 2653 send_rx_ctrl_cmd(lio, 1); 2654 2655 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", 2656 netdev->name); 2657 2658 return 0; 2659 } 2660 2661 /** 2662 * \brief Net device stop for LiquidIO 2663 * @param netdev network device 2664 */ 2665 static int liquidio_stop(struct net_device *netdev) 2666 { 2667 struct lio *lio = GET_LIO(netdev); 2668 struct octeon_device *oct = lio->oct_dev; 2669 2670 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 2671 2672 netif_tx_disable(netdev); 2673 2674 /* Inform that netif carrier is down */ 2675 netif_carrier_off(netdev); 2676 lio->intf_open = 0; 2677 lio->linfo.link.s.link_up = 0; 2678 lio->link_changes++; 2679 2680 /* Pause for a moment and wait for Octeon to flush out (to the wire) any 2681 * egress packets that are in-flight. 2682 */ 2683 set_current_state(TASK_INTERRUPTIBLE); 2684 schedule_timeout(msecs_to_jiffies(100)); 2685 2686 /* Now it should be safe to tell Octeon that nic interface is down. */ 2687 send_rx_ctrl_cmd(lio, 0); 2688 2689 if (OCTEON_CN23XX_PF(oct)) { 2690 if (!oct->msix_on) 2691 cleanup_tx_poll_fn(netdev); 2692 } else { 2693 cleanup_tx_poll_fn(netdev); 2694 } 2695 2696 if (lio->ptp_clock) { 2697 ptp_clock_unregister(lio->ptp_clock); 2698 lio->ptp_clock = NULL; 2699 } 2700 2701 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 2702 2703 return 0; 2704 } 2705 2706 /** 2707 * \brief Converts a mask based on net device flags 2708 * @param netdev network device 2709 * 2710 * This routine generates a octnet_ifflags mask from the net device flags 2711 * received from the OS. 2712 */ 2713 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) 2714 { 2715 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 2716 2717 if (netdev->flags & IFF_PROMISC) 2718 f |= OCTNET_IFFLAG_PROMISC; 2719 2720 if (netdev->flags & IFF_ALLMULTI) 2721 f |= OCTNET_IFFLAG_ALLMULTI; 2722 2723 if (netdev->flags & IFF_MULTICAST) { 2724 f |= OCTNET_IFFLAG_MULTICAST; 2725 2726 /* Accept all multicast addresses if there are more than we 2727 * can handle 2728 */ 2729 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 2730 f |= OCTNET_IFFLAG_ALLMULTI; 2731 } 2732 2733 if (netdev->flags & IFF_BROADCAST) 2734 f |= OCTNET_IFFLAG_BROADCAST; 2735 2736 return f; 2737 } 2738 2739 /** 2740 * \brief Net device set_multicast_list 2741 * @param netdev network device 2742 */ 2743 static void liquidio_set_mcast_list(struct net_device *netdev) 2744 { 2745 struct lio *lio = GET_LIO(netdev); 2746 struct octeon_device *oct = lio->oct_dev; 2747 struct octnic_ctrl_pkt nctrl; 2748 struct netdev_hw_addr *ha; 2749 u64 *mc; 2750 int ret; 2751 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 2752 2753 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2754 2755 /* Create a ctrl pkt command to be sent to core app. */ 2756 nctrl.ncmd.u64 = 0; 2757 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 2758 nctrl.ncmd.s.param1 = get_new_flags(netdev); 2759 nctrl.ncmd.s.param2 = mc_count; 2760 nctrl.ncmd.s.more = mc_count; 2761 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2762 nctrl.netpndev = (u64)netdev; 2763 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2764 2765 /* copy all the addresses into the udd */ 2766 mc = &nctrl.udd[0]; 2767 netdev_for_each_mc_addr(ha, netdev) { 2768 *mc = 0; 2769 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); 2770 /* no need to swap bytes */ 2771 2772 if (++mc > &nctrl.udd[mc_count]) 2773 break; 2774 } 2775 2776 /* Apparently, any activity in this call from the kernel has to 2777 * be atomic. So we won't wait for response. 2778 */ 2779 nctrl.wait_time = 0; 2780 2781 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2782 if (ret < 0) { 2783 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 2784 ret); 2785 } 2786 } 2787 2788 /** 2789 * \brief Net device set_mac_address 2790 * @param netdev network device 2791 */ 2792 static int liquidio_set_mac(struct net_device *netdev, void *p) 2793 { 2794 int ret = 0; 2795 struct lio *lio = GET_LIO(netdev); 2796 struct octeon_device *oct = lio->oct_dev; 2797 struct sockaddr *addr = (struct sockaddr *)p; 2798 struct octnic_ctrl_pkt nctrl; 2799 2800 if (!is_valid_ether_addr(addr->sa_data)) 2801 return -EADDRNOTAVAIL; 2802 2803 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2804 2805 nctrl.ncmd.u64 = 0; 2806 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2807 nctrl.ncmd.s.param1 = 0; 2808 nctrl.ncmd.s.more = 1; 2809 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2810 nctrl.netpndev = (u64)netdev; 2811 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2812 nctrl.wait_time = 100; 2813 2814 nctrl.udd[0] = 0; 2815 /* The MAC Address is presented in network byte order. */ 2816 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); 2817 2818 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2819 if (ret < 0) { 2820 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 2821 return -ENOMEM; 2822 } 2823 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2824 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); 2825 2826 return 0; 2827 } 2828 2829 /** 2830 * \brief Net device get_stats 2831 * @param netdev network device 2832 */ 2833 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) 2834 { 2835 struct lio *lio = GET_LIO(netdev); 2836 struct net_device_stats *stats = &netdev->stats; 2837 struct octeon_device *oct; 2838 u64 pkts = 0, drop = 0, bytes = 0; 2839 struct oct_droq_stats *oq_stats; 2840 struct oct_iq_stats *iq_stats; 2841 int i, iq_no, oq_no; 2842 2843 oct = lio->oct_dev; 2844 2845 for (i = 0; i < lio->linfo.num_txpciq; i++) { 2846 iq_no = lio->linfo.txpciq[i].s.q_no; 2847 iq_stats = &oct->instr_queue[iq_no]->stats; 2848 pkts += iq_stats->tx_done; 2849 drop += iq_stats->tx_dropped; 2850 bytes += iq_stats->tx_tot_bytes; 2851 } 2852 2853 stats->tx_packets = pkts; 2854 stats->tx_bytes = bytes; 2855 stats->tx_dropped = drop; 2856 2857 pkts = 0; 2858 drop = 0; 2859 bytes = 0; 2860 2861 for (i = 0; i < lio->linfo.num_rxpciq; i++) { 2862 oq_no = lio->linfo.rxpciq[i].s.q_no; 2863 oq_stats = &oct->droq[oq_no]->stats; 2864 pkts += oq_stats->rx_pkts_received; 2865 drop += (oq_stats->rx_dropped + 2866 oq_stats->dropped_nodispatch + 2867 oq_stats->dropped_toomany + 2868 oq_stats->dropped_nomem); 2869 bytes += oq_stats->rx_bytes_received; 2870 } 2871 2872 stats->rx_bytes = bytes; 2873 stats->rx_packets = pkts; 2874 stats->rx_dropped = drop; 2875 2876 return stats; 2877 } 2878 2879 /** 2880 * \brief Net device change_mtu 2881 * @param netdev network device 2882 */ 2883 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) 2884 { 2885 struct lio *lio = GET_LIO(netdev); 2886 struct octeon_device *oct = lio->oct_dev; 2887 struct octnic_ctrl_pkt nctrl; 2888 int ret = 0; 2889 2890 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2891 2892 nctrl.ncmd.u64 = 0; 2893 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; 2894 nctrl.ncmd.s.param1 = new_mtu; 2895 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2896 nctrl.wait_time = 100; 2897 nctrl.netpndev = (u64)netdev; 2898 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2899 2900 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2901 if (ret < 0) { 2902 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); 2903 return -1; 2904 } 2905 2906 lio->mtu = new_mtu; 2907 2908 return 0; 2909 } 2910 2911 /** 2912 * \brief Handler for SIOCSHWTSTAMP ioctl 2913 * @param netdev network device 2914 * @param ifr interface request 2915 * @param cmd command 2916 */ 2917 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 2918 { 2919 struct hwtstamp_config conf; 2920 struct lio *lio = GET_LIO(netdev); 2921 2922 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 2923 return -EFAULT; 2924 2925 if (conf.flags) 2926 return -EINVAL; 2927 2928 switch (conf.tx_type) { 2929 case HWTSTAMP_TX_ON: 2930 case HWTSTAMP_TX_OFF: 2931 break; 2932 default: 2933 return -ERANGE; 2934 } 2935 2936 switch (conf.rx_filter) { 2937 case HWTSTAMP_FILTER_NONE: 2938 break; 2939 case HWTSTAMP_FILTER_ALL: 2940 case HWTSTAMP_FILTER_SOME: 2941 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2942 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2943 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2944 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2945 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2946 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2947 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2948 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2949 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2950 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2951 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2952 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2953 conf.rx_filter = HWTSTAMP_FILTER_ALL; 2954 break; 2955 default: 2956 return -ERANGE; 2957 } 2958 2959 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 2960 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2961 2962 else 2963 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2964 2965 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 2966 } 2967 2968 /** 2969 * \brief ioctl handler 2970 * @param netdev network device 2971 * @param ifr interface request 2972 * @param cmd command 2973 */ 2974 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2975 { 2976 switch (cmd) { 2977 case SIOCSHWTSTAMP: 2978 return hwtstamp_ioctl(netdev, ifr); 2979 default: 2980 return -EOPNOTSUPP; 2981 } 2982 } 2983 2984 /** 2985 * \brief handle a Tx timestamp response 2986 * @param status response status 2987 * @param buf pointer to skb 2988 */ 2989 static void handle_timestamp(struct octeon_device *oct, 2990 u32 status, 2991 void *buf) 2992 { 2993 struct octnet_buf_free_info *finfo; 2994 struct octeon_soft_command *sc; 2995 struct oct_timestamp_resp *resp; 2996 struct lio *lio; 2997 struct sk_buff *skb = (struct sk_buff *)buf; 2998 2999 finfo = (struct octnet_buf_free_info *)skb->cb; 3000 lio = finfo->lio; 3001 sc = finfo->sc; 3002 oct = lio->oct_dev; 3003 resp = (struct oct_timestamp_resp *)sc->virtrptr; 3004 3005 if (status != OCTEON_REQUEST_DONE) { 3006 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 3007 CVM_CAST64(status)); 3008 resp->timestamp = 0; 3009 } 3010 3011 octeon_swap_8B_data(&resp->timestamp, 1); 3012 3013 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { 3014 struct skb_shared_hwtstamps ts; 3015 u64 ns = resp->timestamp; 3016 3017 netif_info(lio, tx_done, lio->netdev, 3018 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 3019 skb, (unsigned long long)ns); 3020 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 3021 skb_tstamp_tx(skb, &ts); 3022 } 3023 3024 octeon_free_soft_command(oct, sc); 3025 tx_buffer_free(skb); 3026 } 3027 3028 /* \brief Send a data packet that will be timestamped 3029 * @param oct octeon device 3030 * @param ndata pointer to network data 3031 * @param finfo pointer to private network data 3032 */ 3033 static inline int send_nic_timestamp_pkt(struct octeon_device *oct, 3034 struct octnic_data_pkt *ndata, 3035 struct octnet_buf_free_info *finfo) 3036 { 3037 int retval; 3038 struct octeon_soft_command *sc; 3039 struct lio *lio; 3040 int ring_doorbell; 3041 u32 len; 3042 3043 lio = finfo->lio; 3044 3045 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 3046 sizeof(struct oct_timestamp_resp)); 3047 finfo->sc = sc; 3048 3049 if (!sc) { 3050 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 3051 return IQ_SEND_FAILED; 3052 } 3053 3054 if (ndata->reqtype == REQTYPE_NORESP_NET) 3055 ndata->reqtype = REQTYPE_RESP_NET; 3056 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 3057 ndata->reqtype = REQTYPE_RESP_NET_SG; 3058 3059 sc->callback = handle_timestamp; 3060 sc->callback_arg = finfo->skb; 3061 sc->iq_no = ndata->q_no; 3062 3063 if (OCTEON_CN23XX_PF(oct)) 3064 len = (u32)((struct octeon_instr_ih3 *) 3065 (&sc->cmd.cmd3.ih3))->dlengsz; 3066 else 3067 len = (u32)((struct octeon_instr_ih2 *) 3068 (&sc->cmd.cmd2.ih2))->dlengsz; 3069 3070 ring_doorbell = 1; 3071 3072 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 3073 sc, len, ndata->reqtype); 3074 3075 if (retval == IQ_SEND_FAILED) { 3076 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 3077 retval); 3078 octeon_free_soft_command(oct, sc); 3079 } else { 3080 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 3081 } 3082 3083 return retval; 3084 } 3085 3086 /** \brief Transmit networks packets to the Octeon interface 3087 * @param skbuff skbuff struct to be passed to network layer. 3088 * @param netdev pointer to network device 3089 * @returns whether the packet was transmitted to the device okay or not 3090 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 3091 */ 3092 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 3093 { 3094 struct lio *lio; 3095 struct octnet_buf_free_info *finfo; 3096 union octnic_cmd_setup cmdsetup; 3097 struct octnic_data_pkt ndata; 3098 struct octeon_device *oct; 3099 struct oct_iq_stats *stats; 3100 struct octeon_instr_irh *irh; 3101 union tx_info *tx_info; 3102 int status = 0; 3103 int q_idx = 0, iq_no = 0; 3104 int j; 3105 u64 dptr = 0; 3106 u32 tag = 0; 3107 3108 lio = GET_LIO(netdev); 3109 oct = lio->oct_dev; 3110 3111 if (netif_is_multiqueue(netdev)) { 3112 q_idx = skb->queue_mapping; 3113 q_idx = (q_idx % (lio->linfo.num_txpciq)); 3114 tag = q_idx; 3115 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 3116 } else { 3117 iq_no = lio->txq; 3118 } 3119 3120 stats = &oct->instr_queue[iq_no]->stats; 3121 3122 /* Check for all conditions in which the current packet cannot be 3123 * transmitted. 3124 */ 3125 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 3126 (!lio->linfo.link.s.link_up) || 3127 (skb->len <= 0)) { 3128 netif_info(lio, tx_err, lio->netdev, 3129 "Transmit failed link_status : %d\n", 3130 lio->linfo.link.s.link_up); 3131 goto lio_xmit_failed; 3132 } 3133 3134 /* Use space in skb->cb to store info used to unmap and 3135 * free the buffers. 3136 */ 3137 finfo = (struct octnet_buf_free_info *)skb->cb; 3138 finfo->lio = lio; 3139 finfo->skb = skb; 3140 finfo->sc = NULL; 3141 3142 /* Prepare the attributes for the data to be passed to OSI. */ 3143 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 3144 3145 ndata.buf = (void *)finfo; 3146 3147 ndata.q_no = iq_no; 3148 3149 if (netif_is_multiqueue(netdev)) { 3150 if (octnet_iq_is_full(oct, ndata.q_no)) { 3151 /* defer sending if queue is full */ 3152 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 3153 ndata.q_no); 3154 stats->tx_iq_busy++; 3155 return NETDEV_TX_BUSY; 3156 } 3157 } else { 3158 if (octnet_iq_is_full(oct, lio->txq)) { 3159 /* defer sending if queue is full */ 3160 stats->tx_iq_busy++; 3161 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 3162 lio->txq); 3163 return NETDEV_TX_BUSY; 3164 } 3165 } 3166 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", 3167 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); 3168 */ 3169 3170 ndata.datasize = skb->len; 3171 3172 cmdsetup.u64 = 0; 3173 cmdsetup.s.iq_no = iq_no; 3174 3175 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3176 if (skb->encapsulation) { 3177 cmdsetup.s.tnl_csum = 1; 3178 stats->tx_vxlan++; 3179 } else { 3180 cmdsetup.s.transport_csum = 1; 3181 } 3182 } 3183 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 3184 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3185 cmdsetup.s.timestamp = 1; 3186 } 3187 3188 if (skb_shinfo(skb)->nr_frags == 0) { 3189 cmdsetup.s.u.datasize = skb->len; 3190 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 3191 3192 /* Offload checksum calculation for TCP/UDP packets */ 3193 dptr = dma_map_single(&oct->pci_dev->dev, 3194 skb->data, 3195 skb->len, 3196 DMA_TO_DEVICE); 3197 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 3198 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 3199 __func__); 3200 return NETDEV_TX_BUSY; 3201 } 3202 3203 if (OCTEON_CN23XX_PF(oct)) 3204 ndata.cmd.cmd3.dptr = dptr; 3205 else 3206 ndata.cmd.cmd2.dptr = dptr; 3207 finfo->dptr = dptr; 3208 ndata.reqtype = REQTYPE_NORESP_NET; 3209 3210 } else { 3211 int i, frags; 3212 struct skb_frag_struct *frag; 3213 struct octnic_gather *g; 3214 3215 spin_lock(&lio->glist_lock[q_idx]); 3216 g = (struct octnic_gather *) 3217 list_delete_head(&lio->glist[q_idx]); 3218 spin_unlock(&lio->glist_lock[q_idx]); 3219 3220 if (!g) { 3221 netif_info(lio, tx_err, lio->netdev, 3222 "Transmit scatter gather: glist null!\n"); 3223 goto lio_xmit_failed; 3224 } 3225 3226 cmdsetup.s.gather = 1; 3227 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 3228 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 3229 3230 memset(g->sg, 0, g->sg_size); 3231 3232 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 3233 skb->data, 3234 (skb->len - skb->data_len), 3235 DMA_TO_DEVICE); 3236 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 3237 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 3238 __func__); 3239 return NETDEV_TX_BUSY; 3240 } 3241 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 3242 3243 frags = skb_shinfo(skb)->nr_frags; 3244 i = 1; 3245 while (frags--) { 3246 frag = &skb_shinfo(skb)->frags[i - 1]; 3247 3248 g->sg[(i >> 2)].ptr[(i & 3)] = 3249 dma_map_page(&oct->pci_dev->dev, 3250 frag->page.p, 3251 frag->page_offset, 3252 frag->size, 3253 DMA_TO_DEVICE); 3254 3255 if (dma_mapping_error(&oct->pci_dev->dev, 3256 g->sg[i >> 2].ptr[i & 3])) { 3257 dma_unmap_single(&oct->pci_dev->dev, 3258 g->sg[0].ptr[0], 3259 skb->len - skb->data_len, 3260 DMA_TO_DEVICE); 3261 for (j = 1; j < i; j++) { 3262 frag = &skb_shinfo(skb)->frags[j - 1]; 3263 dma_unmap_page(&oct->pci_dev->dev, 3264 g->sg[j >> 2].ptr[j & 3], 3265 frag->size, 3266 DMA_TO_DEVICE); 3267 } 3268 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 3269 __func__); 3270 return NETDEV_TX_BUSY; 3271 } 3272 3273 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); 3274 i++; 3275 } 3276 3277 dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr, 3278 g->sg_size, DMA_TO_DEVICE); 3279 dptr = g->sg_dma_ptr; 3280 3281 if (OCTEON_CN23XX_PF(oct)) 3282 ndata.cmd.cmd3.dptr = dptr; 3283 else 3284 ndata.cmd.cmd2.dptr = dptr; 3285 finfo->dptr = dptr; 3286 finfo->g = g; 3287 3288 ndata.reqtype = REQTYPE_NORESP_NET_SG; 3289 } 3290 3291 if (OCTEON_CN23XX_PF(oct)) { 3292 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 3293 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 3294 } else { 3295 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; 3296 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; 3297 } 3298 3299 if (skb_shinfo(skb)->gso_size) { 3300 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 3301 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 3302 stats->tx_gso++; 3303 } 3304 3305 /* HW insert VLAN tag */ 3306 if (skb_vlan_tag_present(skb)) { 3307 irh->priority = skb_vlan_tag_get(skb) >> 13; 3308 irh->vlan = skb_vlan_tag_get(skb) & 0xfff; 3309 } 3310 3311 if (unlikely(cmdsetup.s.timestamp)) 3312 status = send_nic_timestamp_pkt(oct, &ndata, finfo); 3313 else 3314 status = octnet_send_nic_data_pkt(oct, &ndata); 3315 if (status == IQ_SEND_FAILED) 3316 goto lio_xmit_failed; 3317 3318 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 3319 3320 if (status == IQ_SEND_STOP) 3321 stop_q(lio->netdev, q_idx); 3322 3323 netif_trans_update(netdev); 3324 3325 if (skb_shinfo(skb)->gso_size) 3326 stats->tx_done += skb_shinfo(skb)->gso_segs; 3327 else 3328 stats->tx_done++; 3329 stats->tx_tot_bytes += skb->len; 3330 3331 return NETDEV_TX_OK; 3332 3333 lio_xmit_failed: 3334 stats->tx_dropped++; 3335 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 3336 iq_no, stats->tx_dropped); 3337 if (dptr) 3338 dma_unmap_single(&oct->pci_dev->dev, dptr, 3339 ndata.datasize, DMA_TO_DEVICE); 3340 tx_buffer_free(skb); 3341 return NETDEV_TX_OK; 3342 } 3343 3344 /** \brief Network device Tx timeout 3345 * @param netdev pointer to network device 3346 */ 3347 static void liquidio_tx_timeout(struct net_device *netdev) 3348 { 3349 struct lio *lio; 3350 3351 lio = GET_LIO(netdev); 3352 3353 netif_info(lio, tx_err, lio->netdev, 3354 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 3355 netdev->stats.tx_dropped); 3356 netif_trans_update(netdev); 3357 txqs_wake(netdev); 3358 } 3359 3360 static int liquidio_vlan_rx_add_vid(struct net_device *netdev, 3361 __be16 proto __attribute__((unused)), 3362 u16 vid) 3363 { 3364 struct lio *lio = GET_LIO(netdev); 3365 struct octeon_device *oct = lio->oct_dev; 3366 struct octnic_ctrl_pkt nctrl; 3367 int ret = 0; 3368 3369 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3370 3371 nctrl.ncmd.u64 = 0; 3372 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 3373 nctrl.ncmd.s.param1 = vid; 3374 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3375 nctrl.wait_time = 100; 3376 nctrl.netpndev = (u64)netdev; 3377 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 3378 3379 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 3380 if (ret < 0) { 3381 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 3382 ret); 3383 } 3384 3385 return ret; 3386 } 3387 3388 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, 3389 __be16 proto __attribute__((unused)), 3390 u16 vid) 3391 { 3392 struct lio *lio = GET_LIO(netdev); 3393 struct octeon_device *oct = lio->oct_dev; 3394 struct octnic_ctrl_pkt nctrl; 3395 int ret = 0; 3396 3397 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3398 3399 nctrl.ncmd.u64 = 0; 3400 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 3401 nctrl.ncmd.s.param1 = vid; 3402 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3403 nctrl.wait_time = 100; 3404 nctrl.netpndev = (u64)netdev; 3405 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 3406 3407 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 3408 if (ret < 0) { 3409 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 3410 ret); 3411 } 3412 return ret; 3413 } 3414 3415 /** Sending command to enable/disable RX checksum offload 3416 * @param netdev pointer to network device 3417 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 3418 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 3419 * OCTNET_CMD_RXCSUM_DISABLE 3420 * @returns SUCCESS or FAILURE 3421 */ 3422 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 3423 u8 rx_cmd) 3424 { 3425 struct lio *lio = GET_LIO(netdev); 3426 struct octeon_device *oct = lio->oct_dev; 3427 struct octnic_ctrl_pkt nctrl; 3428 int ret = 0; 3429 3430 nctrl.ncmd.u64 = 0; 3431 nctrl.ncmd.s.cmd = command; 3432 nctrl.ncmd.s.param1 = rx_cmd; 3433 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3434 nctrl.wait_time = 100; 3435 nctrl.netpndev = (u64)netdev; 3436 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 3437 3438 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 3439 if (ret < 0) { 3440 dev_err(&oct->pci_dev->dev, 3441 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", 3442 ret); 3443 } 3444 return ret; 3445 } 3446 3447 /** Sending command to add/delete VxLAN UDP port to firmware 3448 * @param netdev pointer to network device 3449 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 3450 * @param vxlan_port VxLAN port to be added or deleted 3451 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 3452 * OCTNET_CMD_VXLAN_PORT_DEL 3453 * @returns SUCCESS or FAILURE 3454 */ 3455 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 3456 u16 vxlan_port, u8 vxlan_cmd_bit) 3457 { 3458 struct lio *lio = GET_LIO(netdev); 3459 struct octeon_device *oct = lio->oct_dev; 3460 struct octnic_ctrl_pkt nctrl; 3461 int ret = 0; 3462 3463 nctrl.ncmd.u64 = 0; 3464 nctrl.ncmd.s.cmd = command; 3465 nctrl.ncmd.s.more = vxlan_cmd_bit; 3466 nctrl.ncmd.s.param1 = vxlan_port; 3467 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3468 nctrl.wait_time = 100; 3469 nctrl.netpndev = (u64)netdev; 3470 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 3471 3472 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 3473 if (ret < 0) { 3474 dev_err(&oct->pci_dev->dev, 3475 "VxLAN port add/delete failed in core (ret:0x%x)\n", 3476 ret); 3477 } 3478 return ret; 3479 } 3480 3481 /** \brief Net device fix features 3482 * @param netdev pointer to network device 3483 * @param request features requested 3484 * @returns updated features list 3485 */ 3486 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 3487 netdev_features_t request) 3488 { 3489 struct lio *lio = netdev_priv(netdev); 3490 3491 if ((request & NETIF_F_RXCSUM) && 3492 !(lio->dev_capability & NETIF_F_RXCSUM)) 3493 request &= ~NETIF_F_RXCSUM; 3494 3495 if ((request & NETIF_F_HW_CSUM) && 3496 !(lio->dev_capability & NETIF_F_HW_CSUM)) 3497 request &= ~NETIF_F_HW_CSUM; 3498 3499 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 3500 request &= ~NETIF_F_TSO; 3501 3502 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 3503 request &= ~NETIF_F_TSO6; 3504 3505 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 3506 request &= ~NETIF_F_LRO; 3507 3508 /*Disable LRO if RXCSUM is off */ 3509 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 3510 (lio->dev_capability & NETIF_F_LRO)) 3511 request &= ~NETIF_F_LRO; 3512 3513 return request; 3514 } 3515 3516 /** \brief Net device set features 3517 * @param netdev pointer to network device 3518 * @param features features to enable/disable 3519 */ 3520 static int liquidio_set_features(struct net_device *netdev, 3521 netdev_features_t features) 3522 { 3523 struct lio *lio = netdev_priv(netdev); 3524 3525 if (!((netdev->features ^ features) & NETIF_F_LRO)) 3526 return 0; 3527 3528 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 3529 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 3530 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3531 else if (!(features & NETIF_F_LRO) && 3532 (lio->dev_capability & NETIF_F_LRO)) 3533 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 3534 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3535 3536 /* Sending command to firmware to enable/disable RX checksum 3537 * offload settings using ethtool 3538 */ 3539 if (!(netdev->features & NETIF_F_RXCSUM) && 3540 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 3541 (features & NETIF_F_RXCSUM)) 3542 liquidio_set_rxcsum_command(netdev, 3543 OCTNET_CMD_TNL_RX_CSUM_CTL, 3544 OCTNET_CMD_RXCSUM_ENABLE); 3545 else if ((netdev->features & NETIF_F_RXCSUM) && 3546 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 3547 !(features & NETIF_F_RXCSUM)) 3548 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 3549 OCTNET_CMD_RXCSUM_DISABLE); 3550 3551 return 0; 3552 } 3553 3554 static void liquidio_add_vxlan_port(struct net_device *netdev, 3555 struct udp_tunnel_info *ti) 3556 { 3557 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 3558 return; 3559 3560 liquidio_vxlan_port_command(netdev, 3561 OCTNET_CMD_VXLAN_PORT_CONFIG, 3562 htons(ti->port), 3563 OCTNET_CMD_VXLAN_PORT_ADD); 3564 } 3565 3566 static void liquidio_del_vxlan_port(struct net_device *netdev, 3567 struct udp_tunnel_info *ti) 3568 { 3569 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 3570 return; 3571 3572 liquidio_vxlan_port_command(netdev, 3573 OCTNET_CMD_VXLAN_PORT_CONFIG, 3574 htons(ti->port), 3575 OCTNET_CMD_VXLAN_PORT_DEL); 3576 } 3577 3578 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, 3579 u8 *mac, bool is_admin_assigned) 3580 { 3581 struct lio *lio = GET_LIO(netdev); 3582 struct octeon_device *oct = lio->oct_dev; 3583 struct octnic_ctrl_pkt nctrl; 3584 3585 if (!is_valid_ether_addr(mac)) 3586 return -EINVAL; 3587 3588 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs) 3589 return -EINVAL; 3590 3591 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3592 3593 nctrl.ncmd.u64 = 0; 3594 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 3595 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3596 nctrl.ncmd.s.param1 = vfidx + 1; 3597 nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0); 3598 nctrl.ncmd.s.more = 1; 3599 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3600 nctrl.cb_fn = 0; 3601 nctrl.wait_time = LIO_CMD_WAIT_TM; 3602 3603 nctrl.udd[0] = 0; 3604 /* The MAC Address is presented in network byte order. */ 3605 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac); 3606 3607 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; 3608 3609 octnet_send_nic_ctrl_pkt(oct, &nctrl); 3610 3611 return 0; 3612 } 3613 3614 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) 3615 { 3616 struct lio *lio = GET_LIO(netdev); 3617 struct octeon_device *oct = lio->oct_dev; 3618 int retval; 3619 3620 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true); 3621 if (!retval) 3622 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac); 3623 3624 return retval; 3625 } 3626 3627 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, 3628 u16 vlan, u8 qos, __be16 vlan_proto) 3629 { 3630 struct lio *lio = GET_LIO(netdev); 3631 struct octeon_device *oct = lio->oct_dev; 3632 struct octnic_ctrl_pkt nctrl; 3633 u16 vlantci; 3634 3635 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3636 return -EINVAL; 3637 3638 if (vlan_proto != htons(ETH_P_8021Q)) 3639 return -EPROTONOSUPPORT; 3640 3641 if (vlan >= VLAN_N_VID || qos > 7) 3642 return -EINVAL; 3643 3644 if (vlan) 3645 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT; 3646 else 3647 vlantci = 0; 3648 3649 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci) 3650 return 0; 3651 3652 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3653 3654 if (vlan) 3655 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 3656 else 3657 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 3658 3659 nctrl.ncmd.s.param1 = vlantci; 3660 nctrl.ncmd.s.param2 = 3661 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ 3662 nctrl.ncmd.s.more = 0; 3663 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3664 nctrl.cb_fn = 0; 3665 nctrl.wait_time = LIO_CMD_WAIT_TM; 3666 3667 octnet_send_nic_ctrl_pkt(oct, &nctrl); 3668 3669 oct->sriov_info.vf_vlantci[vfidx] = vlantci; 3670 3671 return 0; 3672 } 3673 3674 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, 3675 struct ifla_vf_info *ivi) 3676 { 3677 struct lio *lio = GET_LIO(netdev); 3678 struct octeon_device *oct = lio->oct_dev; 3679 u8 *macaddr; 3680 3681 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3682 return -EINVAL; 3683 3684 ivi->vf = vfidx; 3685 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; 3686 ether_addr_copy(&ivi->mac[0], macaddr); 3687 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK; 3688 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT; 3689 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; 3690 return 0; 3691 } 3692 3693 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 3694 int linkstate) 3695 { 3696 struct lio *lio = GET_LIO(netdev); 3697 struct octeon_device *oct = lio->oct_dev; 3698 struct octnic_ctrl_pkt nctrl; 3699 3700 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3701 return -EINVAL; 3702 3703 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate) 3704 return 0; 3705 3706 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3707 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE; 3708 nctrl.ncmd.s.param1 = 3709 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3710 nctrl.ncmd.s.param2 = linkstate; 3711 nctrl.ncmd.s.more = 0; 3712 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3713 nctrl.cb_fn = 0; 3714 nctrl.wait_time = LIO_CMD_WAIT_TM; 3715 3716 octnet_send_nic_ctrl_pkt(oct, &nctrl); 3717 3718 oct->sriov_info.vf_linkstate[vfidx] = linkstate; 3719 3720 return 0; 3721 } 3722 3723 static const struct net_device_ops lionetdevops = { 3724 .ndo_open = liquidio_open, 3725 .ndo_stop = liquidio_stop, 3726 .ndo_start_xmit = liquidio_xmit, 3727 .ndo_get_stats = liquidio_get_stats, 3728 .ndo_set_mac_address = liquidio_set_mac, 3729 .ndo_set_rx_mode = liquidio_set_mcast_list, 3730 .ndo_tx_timeout = liquidio_tx_timeout, 3731 3732 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 3733 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 3734 .ndo_change_mtu = liquidio_change_mtu, 3735 .ndo_do_ioctl = liquidio_ioctl, 3736 .ndo_fix_features = liquidio_fix_features, 3737 .ndo_set_features = liquidio_set_features, 3738 .ndo_udp_tunnel_add = liquidio_add_vxlan_port, 3739 .ndo_udp_tunnel_del = liquidio_del_vxlan_port, 3740 .ndo_set_vf_mac = liquidio_set_vf_mac, 3741 .ndo_set_vf_vlan = liquidio_set_vf_vlan, 3742 .ndo_get_vf_config = liquidio_get_vf_config, 3743 .ndo_set_vf_link_state = liquidio_set_vf_link_state, 3744 .ndo_select_queue = select_q 3745 }; 3746 3747 /** \brief Entry point for the liquidio module 3748 */ 3749 static int __init liquidio_init(void) 3750 { 3751 int i; 3752 struct handshake *hs; 3753 3754 init_completion(&first_stage); 3755 3756 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT); 3757 3758 if (liquidio_init_pci()) 3759 return -EINVAL; 3760 3761 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000)); 3762 3763 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3764 hs = &handshake[i]; 3765 if (hs->pci_dev) { 3766 wait_for_completion(&hs->init); 3767 if (!hs->init_ok) { 3768 /* init handshake failed */ 3769 dev_err(&hs->pci_dev->dev, 3770 "Failed to init device\n"); 3771 liquidio_deinit_pci(); 3772 return -EIO; 3773 } 3774 } 3775 } 3776 3777 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3778 hs = &handshake[i]; 3779 if (hs->pci_dev) { 3780 wait_for_completion_timeout(&hs->started, 3781 msecs_to_jiffies(30000)); 3782 if (!hs->started_ok) { 3783 /* starter handshake failed */ 3784 dev_err(&hs->pci_dev->dev, 3785 "Firmware failed to start\n"); 3786 liquidio_deinit_pci(); 3787 return -EIO; 3788 } 3789 } 3790 } 3791 3792 return 0; 3793 } 3794 3795 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 3796 { 3797 struct octeon_device *oct = (struct octeon_device *)buf; 3798 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3799 int gmxport = 0; 3800 union oct_link_status *ls; 3801 int i; 3802 3803 if (recv_pkt->buffer_size[0] != sizeof(*ls)) { 3804 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 3805 recv_pkt->buffer_size[0], 3806 recv_pkt->rh.r_nic_info.gmxport); 3807 goto nic_info_err; 3808 } 3809 3810 gmxport = recv_pkt->rh.r_nic_info.gmxport; 3811 ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]); 3812 3813 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 3814 for (i = 0; i < oct->ifcount; i++) { 3815 if (oct->props[i].gmxport == gmxport) { 3816 update_link_status(oct->props[i].netdev, ls); 3817 break; 3818 } 3819 } 3820 3821 nic_info_err: 3822 for (i = 0; i < recv_pkt->buffer_count; i++) 3823 recv_buffer_free(recv_pkt->buffer_ptr[i]); 3824 octeon_free_recv_info(recv_info); 3825 return 0; 3826 } 3827 3828 /** 3829 * \brief Setup network interfaces 3830 * @param octeon_dev octeon device 3831 * 3832 * Called during init time for each device. It assumes the NIC 3833 * is already up and running. The link information for each 3834 * interface is passed in link_info. 3835 */ 3836 static int setup_nic_devices(struct octeon_device *octeon_dev) 3837 { 3838 struct lio *lio = NULL; 3839 struct net_device *netdev; 3840 u8 mac[6], i, j; 3841 struct octeon_soft_command *sc; 3842 struct liquidio_if_cfg_context *ctx; 3843 struct liquidio_if_cfg_resp *resp; 3844 struct octdev_props *props; 3845 int retval, num_iqueues, num_oqueues; 3846 union oct_nic_if_cfg if_cfg; 3847 unsigned int base_queue; 3848 unsigned int gmx_port_id; 3849 u32 resp_size, ctx_size, data_size; 3850 u32 ifidx_or_pfnum; 3851 struct lio_version *vdata; 3852 3853 /* This is to handle link status changes */ 3854 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3855 OPCODE_NIC_INFO, 3856 lio_nic_info, octeon_dev); 3857 3858 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 3859 * They are handled directly. 3860 */ 3861 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 3862 free_netbuf); 3863 3864 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 3865 free_netsgbuf); 3866 3867 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 3868 free_netsgbuf_with_resp); 3869 3870 for (i = 0; i < octeon_dev->ifcount; i++) { 3871 resp_size = sizeof(struct liquidio_if_cfg_resp); 3872 ctx_size = sizeof(struct liquidio_if_cfg_context); 3873 data_size = sizeof(struct lio_version); 3874 sc = (struct octeon_soft_command *) 3875 octeon_alloc_soft_command(octeon_dev, data_size, 3876 resp_size, ctx_size); 3877 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 3878 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 3879 vdata = (struct lio_version *)sc->virtdptr; 3880 3881 *((u64 *)vdata) = 0; 3882 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 3883 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 3884 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 3885 3886 if (OCTEON_CN23XX_PF(octeon_dev)) { 3887 num_iqueues = octeon_dev->sriov_info.num_pf_rings; 3888 num_oqueues = octeon_dev->sriov_info.num_pf_rings; 3889 base_queue = octeon_dev->sriov_info.pf_srn; 3890 3891 gmx_port_id = octeon_dev->pf_num; 3892 ifidx_or_pfnum = octeon_dev->pf_num; 3893 } else { 3894 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF( 3895 octeon_get_conf(octeon_dev), i); 3896 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF( 3897 octeon_get_conf(octeon_dev), i); 3898 base_queue = CFG_GET_BASE_QUE_NIC_IF( 3899 octeon_get_conf(octeon_dev), i); 3900 gmx_port_id = CFG_GET_GMXID_NIC_IF( 3901 octeon_get_conf(octeon_dev), i); 3902 ifidx_or_pfnum = i; 3903 } 3904 3905 dev_dbg(&octeon_dev->pci_dev->dev, 3906 "requesting config for interface %d, iqs %d, oqs %d\n", 3907 ifidx_or_pfnum, num_iqueues, num_oqueues); 3908 WRITE_ONCE(ctx->cond, 0); 3909 ctx->octeon_id = lio_get_device_id(octeon_dev); 3910 init_waitqueue_head(&ctx->wc); 3911 3912 if_cfg.u64 = 0; 3913 if_cfg.s.num_iqueues = num_iqueues; 3914 if_cfg.s.num_oqueues = num_oqueues; 3915 if_cfg.s.base_queue = base_queue; 3916 if_cfg.s.gmx_port_id = gmx_port_id; 3917 3918 sc->iq_no = 0; 3919 3920 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 3921 OPCODE_NIC_IF_CFG, 0, 3922 if_cfg.u64, 0); 3923 3924 sc->callback = if_cfg_callback; 3925 sc->callback_arg = sc; 3926 sc->wait_time = 3000; 3927 3928 retval = octeon_send_soft_command(octeon_dev, sc); 3929 if (retval == IQ_SEND_FAILED) { 3930 dev_err(&octeon_dev->pci_dev->dev, 3931 "iq/oq config failed status: %x\n", 3932 retval); 3933 /* Soft instr is freed by driver in case of failure. */ 3934 goto setup_nic_dev_fail; 3935 } 3936 3937 /* Sleep on a wait queue till the cond flag indicates that the 3938 * response arrived or timed-out. 3939 */ 3940 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { 3941 dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); 3942 goto setup_nic_wait_intr; 3943 } 3944 3945 retval = resp->status; 3946 if (retval) { 3947 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 3948 goto setup_nic_dev_fail; 3949 } 3950 3951 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 3952 (sizeof(struct liquidio_if_cfg_info)) >> 3); 3953 3954 num_iqueues = hweight64(resp->cfg_info.iqmask); 3955 num_oqueues = hweight64(resp->cfg_info.oqmask); 3956 3957 if (!(num_iqueues) || !(num_oqueues)) { 3958 dev_err(&octeon_dev->pci_dev->dev, 3959 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 3960 resp->cfg_info.iqmask, 3961 resp->cfg_info.oqmask); 3962 goto setup_nic_dev_fail; 3963 } 3964 dev_dbg(&octeon_dev->pci_dev->dev, 3965 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 3966 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 3967 num_iqueues, num_oqueues); 3968 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 3969 3970 if (!netdev) { 3971 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 3972 goto setup_nic_dev_fail; 3973 } 3974 3975 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 3976 3977 /* Associate the routines that will handle different 3978 * netdev tasks. 3979 */ 3980 netdev->netdev_ops = &lionetdevops; 3981 3982 lio = GET_LIO(netdev); 3983 3984 memset(lio, 0, sizeof(struct lio)); 3985 3986 lio->ifidx = ifidx_or_pfnum; 3987 3988 props = &octeon_dev->props[i]; 3989 props->gmxport = resp->cfg_info.linfo.gmxport; 3990 props->netdev = netdev; 3991 3992 lio->linfo.num_rxpciq = num_oqueues; 3993 lio->linfo.num_txpciq = num_iqueues; 3994 for (j = 0; j < num_oqueues; j++) { 3995 lio->linfo.rxpciq[j].u64 = 3996 resp->cfg_info.linfo.rxpciq[j].u64; 3997 } 3998 for (j = 0; j < num_iqueues; j++) { 3999 lio->linfo.txpciq[j].u64 = 4000 resp->cfg_info.linfo.txpciq[j].u64; 4001 } 4002 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 4003 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 4004 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 4005 4006 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 4007 4008 if (OCTEON_CN23XX_PF(octeon_dev) || 4009 OCTEON_CN6XXX(octeon_dev)) { 4010 lio->dev_capability = NETIF_F_HIGHDMA 4011 | NETIF_F_IP_CSUM 4012 | NETIF_F_IPV6_CSUM 4013 | NETIF_F_SG | NETIF_F_RXCSUM 4014 | NETIF_F_GRO 4015 | NETIF_F_TSO | NETIF_F_TSO6 4016 | NETIF_F_LRO; 4017 } 4018 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 4019 4020 /* Copy of transmit encapsulation capabilities: 4021 * TSO, TSO6, Checksums for this device 4022 */ 4023 lio->enc_dev_capability = NETIF_F_IP_CSUM 4024 | NETIF_F_IPV6_CSUM 4025 | NETIF_F_GSO_UDP_TUNNEL 4026 | NETIF_F_HW_CSUM | NETIF_F_SG 4027 | NETIF_F_RXCSUM 4028 | NETIF_F_TSO | NETIF_F_TSO6 4029 | NETIF_F_LRO; 4030 4031 netdev->hw_enc_features = (lio->enc_dev_capability & 4032 ~NETIF_F_LRO); 4033 4034 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; 4035 4036 netdev->vlan_features = lio->dev_capability; 4037 /* Add any unchangeable hw features */ 4038 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 4039 NETIF_F_HW_VLAN_CTAG_RX | 4040 NETIF_F_HW_VLAN_CTAG_TX; 4041 4042 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 4043 4044 netdev->hw_features = lio->dev_capability; 4045 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ 4046 netdev->hw_features = netdev->hw_features & 4047 ~NETIF_F_HW_VLAN_CTAG_RX; 4048 4049 /* MTU range: 68 - 16000 */ 4050 netdev->min_mtu = LIO_MIN_MTU_SIZE; 4051 netdev->max_mtu = LIO_MAX_MTU_SIZE; 4052 4053 /* Point to the properties for octeon device to which this 4054 * interface belongs. 4055 */ 4056 lio->oct_dev = octeon_dev; 4057 lio->octprops = props; 4058 lio->netdev = netdev; 4059 4060 dev_dbg(&octeon_dev->pci_dev->dev, 4061 "if%d gmx: %d hw_addr: 0x%llx\n", i, 4062 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 4063 4064 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { 4065 u8 vfmac[ETH_ALEN]; 4066 4067 random_ether_addr(&vfmac[0]); 4068 if (__liquidio_set_vf_mac(netdev, j, 4069 &vfmac[0], false)) { 4070 dev_err(&octeon_dev->pci_dev->dev, 4071 "Error setting VF%d MAC address\n", 4072 j); 4073 goto setup_nic_dev_fail; 4074 } 4075 } 4076 4077 /* 64-bit swap required on LE machines */ 4078 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 4079 for (j = 0; j < 6; j++) 4080 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 4081 4082 /* Copy MAC Address to OS network device structure */ 4083 4084 ether_addr_copy(netdev->dev_addr, mac); 4085 4086 /* By default all interfaces on a single Octeon uses the same 4087 * tx and rx queues 4088 */ 4089 lio->txq = lio->linfo.txpciq[0].s.q_no; 4090 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 4091 if (setup_io_queues(octeon_dev, i)) { 4092 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 4093 goto setup_nic_dev_fail; 4094 } 4095 4096 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 4097 4098 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 4099 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 4100 4101 if (setup_glists(octeon_dev, lio, num_iqueues)) { 4102 dev_err(&octeon_dev->pci_dev->dev, 4103 "Gather list allocation failed\n"); 4104 goto setup_nic_dev_fail; 4105 } 4106 4107 /* Register ethtool support */ 4108 liquidio_set_ethtool_ops(netdev); 4109 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID) 4110 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 4111 else 4112 octeon_dev->priv_flags = 0x0; 4113 4114 if (netdev->features & NETIF_F_LRO) 4115 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 4116 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 4117 4118 liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0); 4119 4120 if ((debug != -1) && (debug & NETIF_MSG_HW)) 4121 liquidio_set_feature(netdev, 4122 OCTNET_CMD_VERBOSE_ENABLE, 0); 4123 4124 if (setup_link_status_change_wq(netdev)) 4125 goto setup_nic_dev_fail; 4126 4127 /* Register the network device with the OS */ 4128 if (register_netdev(netdev)) { 4129 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 4130 goto setup_nic_dev_fail; 4131 } 4132 4133 dev_dbg(&octeon_dev->pci_dev->dev, 4134 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 4135 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 4136 netif_carrier_off(netdev); 4137 lio->link_changes++; 4138 4139 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 4140 4141 /* Sending command to firmware to enable Rx checksum offload 4142 * by default at the time of setup of Liquidio driver for 4143 * this device 4144 */ 4145 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 4146 OCTNET_CMD_RXCSUM_ENABLE); 4147 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 4148 OCTNET_CMD_TXCSUM_ENABLE); 4149 4150 dev_dbg(&octeon_dev->pci_dev->dev, 4151 "NIC ifidx:%d Setup successful\n", i); 4152 4153 octeon_free_soft_command(octeon_dev, sc); 4154 } 4155 4156 return 0; 4157 4158 setup_nic_dev_fail: 4159 4160 octeon_free_soft_command(octeon_dev, sc); 4161 4162 setup_nic_wait_intr: 4163 4164 while (i--) { 4165 dev_err(&octeon_dev->pci_dev->dev, 4166 "NIC ifidx:%d Setup failed\n", i); 4167 liquidio_destroy_nic_device(octeon_dev, i); 4168 } 4169 return -ENODEV; 4170 } 4171 4172 #ifdef CONFIG_PCI_IOV 4173 static int octeon_enable_sriov(struct octeon_device *oct) 4174 { 4175 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced; 4176 struct pci_dev *vfdev; 4177 int err; 4178 u32 u; 4179 4180 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) { 4181 err = pci_enable_sriov(oct->pci_dev, 4182 oct->sriov_info.num_vfs_alloced); 4183 if (err) { 4184 dev_err(&oct->pci_dev->dev, 4185 "OCTEON: Failed to enable PCI sriov: %d\n", 4186 err); 4187 oct->sriov_info.num_vfs_alloced = 0; 4188 return err; 4189 } 4190 oct->sriov_info.sriov_enabled = 1; 4191 4192 /* init lookup table that maps DPI ring number to VF pci_dev 4193 * struct pointer 4194 */ 4195 u = 0; 4196 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 4197 OCTEON_CN23XX_VF_VID, NULL); 4198 while (vfdev) { 4199 if (vfdev->is_virtfn && 4200 (vfdev->physfn == oct->pci_dev)) { 4201 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = 4202 vfdev; 4203 u += oct->sriov_info.rings_per_vf; 4204 } 4205 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 4206 OCTEON_CN23XX_VF_VID, vfdev); 4207 } 4208 } 4209 4210 return num_vfs_alloced; 4211 } 4212 4213 static int lio_pci_sriov_disable(struct octeon_device *oct) 4214 { 4215 int u; 4216 4217 if (pci_vfs_assigned(oct->pci_dev)) { 4218 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n"); 4219 return -EPERM; 4220 } 4221 4222 pci_disable_sriov(oct->pci_dev); 4223 4224 u = 0; 4225 while (u < MAX_POSSIBLE_VFS) { 4226 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL; 4227 u += oct->sriov_info.rings_per_vf; 4228 } 4229 4230 oct->sriov_info.num_vfs_alloced = 0; 4231 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n", 4232 oct->pf_num); 4233 4234 return 0; 4235 } 4236 4237 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) 4238 { 4239 struct octeon_device *oct = pci_get_drvdata(dev); 4240 int ret = 0; 4241 4242 if ((num_vfs == oct->sriov_info.num_vfs_alloced) && 4243 (oct->sriov_info.sriov_enabled)) { 4244 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n", 4245 oct->pf_num, num_vfs); 4246 return 0; 4247 } 4248 4249 if (!num_vfs) { 4250 ret = lio_pci_sriov_disable(oct); 4251 } else if (num_vfs > oct->sriov_info.max_vfs) { 4252 dev_err(&oct->pci_dev->dev, 4253 "OCTEON: Max allowed VFs:%d user requested:%d", 4254 oct->sriov_info.max_vfs, num_vfs); 4255 ret = -EPERM; 4256 } else { 4257 oct->sriov_info.num_vfs_alloced = num_vfs; 4258 ret = octeon_enable_sriov(oct); 4259 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n", 4260 oct->pf_num, num_vfs); 4261 } 4262 4263 return ret; 4264 } 4265 #endif 4266 4267 /** 4268 * \brief initialize the NIC 4269 * @param oct octeon device 4270 * 4271 * This initialization routine is called once the Octeon device application is 4272 * up and running 4273 */ 4274 static int liquidio_init_nic_module(struct octeon_device *oct) 4275 { 4276 struct oct_intrmod_cfg *intrmod_cfg; 4277 int i, retval = 0; 4278 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); 4279 4280 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 4281 4282 /* only default iq and oq were initialized 4283 * initialize the rest as well 4284 */ 4285 /* run port_config command for each port */ 4286 oct->ifcount = num_nic_ports; 4287 4288 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); 4289 4290 for (i = 0; i < MAX_OCTEON_LINKS; i++) 4291 oct->props[i].gmxport = -1; 4292 4293 retval = setup_nic_devices(oct); 4294 if (retval) { 4295 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 4296 goto octnet_init_failure; 4297 } 4298 4299 liquidio_ptp_init(oct); 4300 4301 /* Initialize interrupt moderation params */ 4302 intrmod_cfg = &((struct octeon_device *)oct)->intrmod; 4303 intrmod_cfg->rx_enable = 1; 4304 intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; 4305 intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; 4306 intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; 4307 intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER; 4308 intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER; 4309 intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER; 4310 intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER; 4311 intrmod_cfg->tx_enable = 1; 4312 intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER; 4313 intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER; 4314 intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 4315 intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 4316 intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 4317 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 4318 4319 return retval; 4320 4321 octnet_init_failure: 4322 4323 oct->ifcount = 0; 4324 4325 return retval; 4326 } 4327 4328 /** 4329 * \brief starter callback that invokes the remaining initialization work after 4330 * the NIC is up and running. 4331 * @param octptr work struct work_struct 4332 */ 4333 static void nic_starter(struct work_struct *work) 4334 { 4335 struct octeon_device *oct; 4336 struct cavium_wk *wk = (struct cavium_wk *)work; 4337 4338 oct = (struct octeon_device *)wk->ctxptr; 4339 4340 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) 4341 return; 4342 4343 /* If the status of the device is CORE_OK, the core 4344 * application has reported its application type. Call 4345 * any registered handlers now and move to the RUNNING 4346 * state. 4347 */ 4348 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) { 4349 schedule_delayed_work(&oct->nic_poll_work.work, 4350 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 4351 return; 4352 } 4353 4354 atomic_set(&oct->status, OCT_DEV_RUNNING); 4355 4356 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { 4357 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n"); 4358 4359 if (liquidio_init_nic_module(oct)) 4360 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n"); 4361 else 4362 handshake[oct->octeon_id].started_ok = 1; 4363 } else { 4364 dev_err(&oct->pci_dev->dev, 4365 "Unexpected application running on NIC (%d). Check firmware.\n", 4366 oct->app_mode); 4367 } 4368 4369 complete(&handshake[oct->octeon_id].started); 4370 } 4371 4372 static int 4373 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) 4374 { 4375 struct octeon_device *oct = (struct octeon_device *)buf; 4376 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 4377 int i, notice, vf_idx; 4378 u64 *data, vf_num; 4379 4380 notice = recv_pkt->rh.r.ossp; 4381 data = (u64 *)get_rbd(recv_pkt->buffer_ptr[0]); 4382 4383 /* the first 64-bit word of data is the vf_num */ 4384 vf_num = data[0]; 4385 octeon_swap_8B_data(&vf_num, 1); 4386 vf_idx = (int)vf_num - 1; 4387 4388 if (notice == VF_DRV_LOADED) { 4389 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) { 4390 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx); 4391 dev_info(&oct->pci_dev->dev, 4392 "driver for VF%d was loaded\n", vf_idx); 4393 try_module_get(THIS_MODULE); 4394 } 4395 } else if (notice == VF_DRV_REMOVED) { 4396 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) { 4397 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx); 4398 dev_info(&oct->pci_dev->dev, 4399 "driver for VF%d was removed\n", vf_idx); 4400 module_put(THIS_MODULE); 4401 } 4402 } else if (notice == VF_DRV_MACADDR_CHANGED) { 4403 u8 *b = (u8 *)&data[1]; 4404 4405 oct->sriov_info.vf_macaddr[vf_idx] = data[1]; 4406 dev_info(&oct->pci_dev->dev, 4407 "VF driver changed VF%d's MAC address to %pM\n", 4408 vf_idx, b + 2); 4409 } 4410 4411 for (i = 0; i < recv_pkt->buffer_count; i++) 4412 recv_buffer_free(recv_pkt->buffer_ptr[i]); 4413 octeon_free_recv_info(recv_info); 4414 4415 return 0; 4416 } 4417 4418 /** 4419 * \brief Device initialization for each Octeon device that is probed 4420 * @param octeon_dev octeon device 4421 */ 4422 static int octeon_device_init(struct octeon_device *octeon_dev) 4423 { 4424 int j, ret; 4425 int fw_loaded = 0; 4426 char bootcmd[] = "\n"; 4427 struct octeon_device_priv *oct_priv = 4428 (struct octeon_device_priv *)octeon_dev->priv; 4429 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); 4430 4431 /* Enable access to the octeon device and make its DMA capability 4432 * known to the OS. 4433 */ 4434 if (octeon_pci_os_setup(octeon_dev)) 4435 return 1; 4436 4437 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE); 4438 4439 /* Identify the Octeon type and map the BAR address space. */ 4440 if (octeon_chip_specific_setup(octeon_dev)) { 4441 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); 4442 return 1; 4443 } 4444 4445 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE); 4446 4447 octeon_dev->app_mode = CVM_DRV_INVALID_APP; 4448 4449 if (OCTEON_CN23XX_PF(octeon_dev)) { 4450 if (!cn23xx_fw_loaded(octeon_dev)) { 4451 fw_loaded = 0; 4452 /* Do a soft reset of the Octeon device. */ 4453 if (octeon_dev->fn_list.soft_reset(octeon_dev)) 4454 return 1; 4455 /* things might have changed */ 4456 if (!cn23xx_fw_loaded(octeon_dev)) 4457 fw_loaded = 0; 4458 else 4459 fw_loaded = 1; 4460 } else { 4461 fw_loaded = 1; 4462 } 4463 } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) { 4464 return 1; 4465 } 4466 4467 /* Initialize the dispatch mechanism used to push packets arriving on 4468 * Octeon Output queues. 4469 */ 4470 if (octeon_init_dispatch_list(octeon_dev)) 4471 return 1; 4472 4473 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4474 OPCODE_NIC_CORE_DRV_ACTIVE, 4475 octeon_core_drv_init, 4476 octeon_dev); 4477 4478 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4479 OPCODE_NIC_VF_DRV_NOTICE, 4480 octeon_recv_vf_drv_notice, octeon_dev); 4481 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); 4482 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; 4483 schedule_delayed_work(&octeon_dev->nic_poll_work.work, 4484 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 4485 4486 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); 4487 4488 if (octeon_set_io_queues_off(octeon_dev)) { 4489 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n"); 4490 return 1; 4491 } 4492 4493 if (OCTEON_CN23XX_PF(octeon_dev)) { 4494 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4495 if (ret) { 4496 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n"); 4497 return ret; 4498 } 4499 } 4500 4501 /* Initialize soft command buffer pool 4502 */ 4503 if (octeon_setup_sc_buffer_pool(octeon_dev)) { 4504 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n"); 4505 return 1; 4506 } 4507 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 4508 4509 /* Setup the data structures that manage this Octeon's Input queues. */ 4510 if (octeon_setup_instr_queues(octeon_dev)) { 4511 dev_err(&octeon_dev->pci_dev->dev, 4512 "instruction queue initialization failed\n"); 4513 return 1; 4514 } 4515 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 4516 4517 /* Initialize lists to manage the requests of different types that 4518 * arrive from user & kernel applications for this octeon device. 4519 */ 4520 if (octeon_setup_response_list(octeon_dev)) { 4521 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n"); 4522 return 1; 4523 } 4524 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); 4525 4526 if (octeon_setup_output_queues(octeon_dev)) { 4527 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); 4528 return 1; 4529 } 4530 4531 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); 4532 4533 if (OCTEON_CN23XX_PF(octeon_dev)) { 4534 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) { 4535 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n"); 4536 return 1; 4537 } 4538 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); 4539 4540 if (octeon_allocate_ioq_vector(octeon_dev)) { 4541 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 4542 return 1; 4543 } 4544 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 4545 4546 } else { 4547 /* The input and output queue registers were setup earlier (the 4548 * queues were not enabled). Any additional registers 4549 * that need to be programmed should be done now. 4550 */ 4551 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4552 if (ret) { 4553 dev_err(&octeon_dev->pci_dev->dev, 4554 "Failed to configure device registers\n"); 4555 return ret; 4556 } 4557 } 4558 4559 /* Initialize the tasklet that handles output queue packet processing.*/ 4560 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n"); 4561 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh, 4562 (unsigned long)octeon_dev); 4563 4564 /* Setup the interrupt handler and record the INT SUM register address 4565 */ 4566 if (octeon_setup_interrupt(octeon_dev)) 4567 return 1; 4568 4569 /* Enable Octeon device interrupts */ 4570 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); 4571 4572 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE); 4573 4574 /* Enable the input and output queues for this Octeon device */ 4575 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); 4576 if (ret) { 4577 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues"); 4578 return ret; 4579 } 4580 4581 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); 4582 4583 if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) { 4584 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); 4585 if (!ddr_timeout) { 4586 dev_info(&octeon_dev->pci_dev->dev, 4587 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); 4588 } 4589 4590 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); 4591 4592 /* Wait for the octeon to initialize DDR after the soft-reset.*/ 4593 while (!ddr_timeout) { 4594 set_current_state(TASK_INTERRUPTIBLE); 4595 if (schedule_timeout(HZ / 10)) { 4596 /* user probably pressed Control-C */ 4597 return 1; 4598 } 4599 } 4600 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); 4601 if (ret) { 4602 dev_err(&octeon_dev->pci_dev->dev, 4603 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", 4604 ret); 4605 return 1; 4606 } 4607 4608 if (octeon_wait_for_bootloader(octeon_dev, 1000)) { 4609 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n"); 4610 return 1; 4611 } 4612 4613 /* Divert uboot to take commands from host instead. */ 4614 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50); 4615 4616 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); 4617 ret = octeon_init_consoles(octeon_dev); 4618 if (ret) { 4619 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); 4620 return 1; 4621 } 4622 ret = octeon_add_console(octeon_dev, 0); 4623 if (ret) { 4624 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); 4625 return 1; 4626 } 4627 4628 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); 4629 4630 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n"); 4631 ret = load_firmware(octeon_dev); 4632 if (ret) { 4633 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); 4634 return 1; 4635 } 4636 /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is 4637 * loaded 4638 */ 4639 if (OCTEON_CN23XX_PF(octeon_dev)) 4640 octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1, 4641 2ULL); 4642 } 4643 4644 handshake[octeon_dev->octeon_id].init_ok = 1; 4645 complete(&handshake[octeon_dev->octeon_id].init); 4646 4647 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); 4648 4649 /* Send Credit for Octeon Output queues. Credits are always sent after 4650 * the output queue is enabled. 4651 */ 4652 for (j = 0; j < octeon_dev->num_oqs; j++) 4653 writel(octeon_dev->droq[j]->max_count, 4654 octeon_dev->droq[j]->pkts_credit_reg); 4655 4656 /* Packets can start arriving on the output queues from this point. */ 4657 return 0; 4658 } 4659 4660 /** 4661 * \brief Exits the module 4662 */ 4663 static void __exit liquidio_exit(void) 4664 { 4665 liquidio_deinit_pci(); 4666 4667 pr_info("LiquidIO network module is now unloaded\n"); 4668 } 4669 4670 module_init(liquidio_init); 4671 module_exit(liquidio_exit); 4672