1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2015 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * This file may also be available under a different license from Cavium. 20 * Contact Cavium, Inc. for more information 21 **********************************************************************/ 22 #include <linux/version.h> 23 #include <linux/module.h> 24 #include <linux/crc32.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/pci.h> 27 #include <linux/pci_ids.h> 28 #include <linux/ip.h> 29 #include <net/ip.h> 30 #include <linux/ipv6.h> 31 #include <linux/net_tstamp.h> 32 #include <linux/if_vlan.h> 33 #include <linux/firmware.h> 34 #include <linux/ethtool.h> 35 #include <linux/ptp_clock_kernel.h> 36 #include <linux/types.h> 37 #include <linux/list.h> 38 #include <linux/workqueue.h> 39 #include <linux/interrupt.h> 40 #include "octeon_config.h" 41 #include "liquidio_common.h" 42 #include "octeon_droq.h" 43 #include "octeon_iq.h" 44 #include "response_manager.h" 45 #include "octeon_device.h" 46 #include "octeon_nic.h" 47 #include "octeon_main.h" 48 #include "octeon_network.h" 49 #include "cn66xx_regs.h" 50 #include "cn66xx_device.h" 51 #include "cn68xx_regs.h" 52 #include "cn68xx_device.h" 53 #include "liquidio_image.h" 54 55 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 56 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); 57 MODULE_LICENSE("GPL"); 58 MODULE_VERSION(LIQUIDIO_VERSION); 59 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX); 60 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX); 61 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX); 62 63 static int ddr_timeout = 10000; 64 module_param(ddr_timeout, int, 0644); 65 MODULE_PARM_DESC(ddr_timeout, 66 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check"); 67 68 static u32 console_bitmask; 69 module_param(console_bitmask, int, 0644); 70 MODULE_PARM_DESC(console_bitmask, 71 "Bitmask indicating which consoles have debug output redirected to syslog."); 72 73 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 74 75 static int debug = -1; 76 module_param(debug, int, 0644); 77 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 78 79 static char fw_type[LIO_MAX_FW_TYPE_LEN]; 80 module_param_string(fw_type, fw_type, sizeof(fw_type), 0000); 81 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\""); 82 83 static int conf_type; 84 module_param(conf_type, int, 0); 85 MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs"); 86 87 /* Bit mask values for lio->ifstate */ 88 #define LIO_IFSTATE_DROQ_OPS 0x01 89 #define LIO_IFSTATE_REGISTERED 0x02 90 #define LIO_IFSTATE_RUNNING 0x04 91 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 92 93 /* Polling interval for determining when NIC application is alive */ 94 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 95 96 /* runtime link query interval */ 97 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 98 99 struct liquidio_if_cfg_context { 100 int octeon_id; 101 102 wait_queue_head_t wc; 103 104 int cond; 105 }; 106 107 struct liquidio_if_cfg_resp { 108 u64 rh; 109 struct liquidio_if_cfg_info cfg_info; 110 u64 status; 111 }; 112 113 struct oct_link_status_resp { 114 u64 rh; 115 struct oct_link_info link_info; 116 u64 status; 117 }; 118 119 struct oct_timestamp_resp { 120 u64 rh; 121 u64 timestamp; 122 u64 status; 123 }; 124 125 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) 126 127 union tx_info { 128 u64 u64; 129 struct { 130 #ifdef __BIG_ENDIAN_BITFIELD 131 u16 gso_size; 132 u16 gso_segs; 133 u32 reserved; 134 #else 135 u32 reserved; 136 u16 gso_segs; 137 u16 gso_size; 138 #endif 139 } s; 140 }; 141 142 /** Octeon device properties to be used by the NIC module. 143 * Each octeon device in the system will be represented 144 * by this structure in the NIC module. 145 */ 146 147 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS) 148 149 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 150 #define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE) 151 152 /** Structure of a node in list of gather components maintained by 153 * NIC driver for each network device. 154 */ 155 struct octnic_gather { 156 /** List manipulation. Next and prev pointers. */ 157 struct list_head list; 158 159 /** Size of the gather component at sg in bytes. */ 160 int sg_size; 161 162 /** Number of bytes that sg was adjusted to make it 8B-aligned. */ 163 int adjust; 164 165 /** Gather component that can accommodate max sized fragment list 166 * received from the IP layer. 167 */ 168 struct octeon_sg_entry *sg; 169 }; 170 171 /** This structure is used by NIC driver to store information required 172 * to free the sk_buff when the packet has been fetched by Octeon. 173 * Bytes offset below assume worst-case of a 64-bit system. 174 */ 175 struct octnet_buf_free_info { 176 /** Bytes 1-8. Pointer to network device private structure. */ 177 struct lio *lio; 178 179 /** Bytes 9-16. Pointer to sk_buff. */ 180 struct sk_buff *skb; 181 182 /** Bytes 17-24. Pointer to gather list. */ 183 struct octnic_gather *g; 184 185 /** Bytes 25-32. Physical address of skb->data or gather list. */ 186 u64 dptr; 187 188 /** Bytes 33-47. Piggybacked soft command, if any */ 189 struct octeon_soft_command *sc; 190 }; 191 192 struct handshake { 193 struct completion init; 194 struct completion started; 195 struct pci_dev *pci_dev; 196 int init_ok; 197 int started_ok; 198 }; 199 200 struct octeon_device_priv { 201 /** Tasklet structures for this device. */ 202 struct tasklet_struct droq_tasklet; 203 unsigned long napi_mask; 204 }; 205 206 static int octeon_device_init(struct octeon_device *); 207 static void liquidio_remove(struct pci_dev *pdev); 208 static int liquidio_probe(struct pci_dev *pdev, 209 const struct pci_device_id *ent); 210 211 static struct handshake handshake[MAX_OCTEON_DEVICES]; 212 static struct completion first_stage; 213 214 static void octeon_droq_bh(unsigned long pdev) 215 { 216 int q_no; 217 int reschedule = 0; 218 struct octeon_device *oct = (struct octeon_device *)pdev; 219 struct octeon_device_priv *oct_priv = 220 (struct octeon_device_priv *)oct->priv; 221 222 /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */ 223 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) { 224 if (!(oct->io_qmask.oq & (1UL << q_no))) 225 continue; 226 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], 227 MAX_PACKET_BUDGET); 228 } 229 230 if (reschedule) 231 tasklet_schedule(&oct_priv->droq_tasklet); 232 } 233 234 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 235 { 236 struct octeon_device_priv *oct_priv = 237 (struct octeon_device_priv *)oct->priv; 238 int retry = 100, pkt_cnt = 0, pending_pkts = 0; 239 int i; 240 241 do { 242 pending_pkts = 0; 243 244 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { 245 if (!(oct->io_qmask.oq & (1UL << i))) 246 continue; 247 pkt_cnt += octeon_droq_check_hw_for_pkts(oct, 248 oct->droq[i]); 249 } 250 if (pkt_cnt > 0) { 251 pending_pkts += pkt_cnt; 252 tasklet_schedule(&oct_priv->droq_tasklet); 253 } 254 pkt_cnt = 0; 255 schedule_timeout_uninterruptible(1); 256 257 } while (retry-- && pending_pkts); 258 259 return pkt_cnt; 260 } 261 262 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, 263 unsigned int bytes_compl) 264 { 265 struct netdev_queue *netdev_queue = txq; 266 267 netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl); 268 } 269 270 void octeon_update_tx_completion_counters(void *buf, int reqtype, 271 unsigned int *pkts_compl, 272 unsigned int *bytes_compl) 273 { 274 struct octnet_buf_free_info *finfo; 275 struct sk_buff *skb = NULL; 276 struct octeon_soft_command *sc; 277 278 switch (reqtype) { 279 case REQTYPE_NORESP_NET: 280 case REQTYPE_NORESP_NET_SG: 281 finfo = buf; 282 skb = finfo->skb; 283 break; 284 285 case REQTYPE_RESP_NET_SG: 286 case REQTYPE_RESP_NET: 287 sc = buf; 288 skb = sc->callback_arg; 289 break; 290 291 default: 292 return; 293 } 294 295 (*pkts_compl)++; 296 *bytes_compl += skb->len; 297 } 298 299 void octeon_report_sent_bytes_to_bql(void *buf, int reqtype) 300 { 301 struct octnet_buf_free_info *finfo; 302 struct sk_buff *skb; 303 struct octeon_soft_command *sc; 304 struct netdev_queue *txq; 305 306 switch (reqtype) { 307 case REQTYPE_NORESP_NET: 308 case REQTYPE_NORESP_NET_SG: 309 finfo = buf; 310 skb = finfo->skb; 311 break; 312 313 case REQTYPE_RESP_NET_SG: 314 case REQTYPE_RESP_NET: 315 sc = buf; 316 skb = sc->callback_arg; 317 break; 318 319 default: 320 return; 321 } 322 323 txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb)); 324 netdev_tx_sent_queue(txq, skb->len); 325 } 326 327 int octeon_console_debug_enabled(u32 console) 328 { 329 return (console_bitmask >> (console)) & 0x1; 330 } 331 332 /** 333 * \brief Forces all IO queues off on a given device 334 * @param oct Pointer to Octeon device 335 */ 336 static void force_io_queues_off(struct octeon_device *oct) 337 { 338 if ((oct->chip_id == OCTEON_CN66XX) || 339 (oct->chip_id == OCTEON_CN68XX)) { 340 /* Reset the Enable bits for Input Queues. */ 341 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); 342 343 /* Reset the Enable bits for Output Queues. */ 344 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); 345 } 346 } 347 348 /** 349 * \brief wait for all pending requests to complete 350 * @param oct Pointer to Octeon device 351 * 352 * Called during shutdown sequence 353 */ 354 static int wait_for_pending_requests(struct octeon_device *oct) 355 { 356 int i, pcount = 0; 357 358 for (i = 0; i < 100; i++) { 359 pcount = 360 atomic_read(&oct->response_list 361 [OCTEON_ORDERED_SC_LIST].pending_req_count); 362 if (pcount) 363 schedule_timeout_uninterruptible(HZ / 10); 364 else 365 break; 366 } 367 368 if (pcount) 369 return 1; 370 371 return 0; 372 } 373 374 /** 375 * \brief Cause device to go quiet so it can be safely removed/reset/etc 376 * @param oct Pointer to Octeon device 377 */ 378 static inline void pcierror_quiesce_device(struct octeon_device *oct) 379 { 380 int i; 381 382 /* Disable the input and output queues now. No more packets will 383 * arrive from Octeon, but we should wait for all packet processing 384 * to finish. 385 */ 386 force_io_queues_off(oct); 387 388 /* To allow for in-flight requests */ 389 schedule_timeout_uninterruptible(100); 390 391 if (wait_for_pending_requests(oct)) 392 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 393 394 /* Force all requests waiting to be fetched by OCTEON to complete. */ 395 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { 396 struct octeon_instr_queue *iq; 397 398 if (!(oct->io_qmask.iq & (1UL << i))) 399 continue; 400 iq = oct->instr_queue[i]; 401 402 if (atomic_read(&iq->instr_pending)) { 403 spin_lock_bh(&iq->lock); 404 iq->fill_cnt = 0; 405 iq->octeon_read_index = iq->host_write_index; 406 iq->stats.instr_processed += 407 atomic_read(&iq->instr_pending); 408 lio_process_iq_request_list(oct, iq); 409 spin_unlock_bh(&iq->lock); 410 } 411 } 412 413 /* Force all pending ordered list requests to time out. */ 414 lio_process_ordered_list(oct, 1); 415 416 /* We do not need to wait for output queue packets to be processed. */ 417 } 418 419 /** 420 * \brief Cleanup PCI AER uncorrectable error status 421 * @param dev Pointer to PCI device 422 */ 423 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 424 { 425 int pos = 0x100; 426 u32 status, mask; 427 428 pr_info("%s :\n", __func__); 429 430 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 431 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 432 if (dev->error_state == pci_channel_io_normal) 433 status &= ~mask; /* Clear corresponding nonfatal bits */ 434 else 435 status &= mask; /* Clear corresponding fatal bits */ 436 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 437 } 438 439 /** 440 * \brief Stop all PCI IO to a given device 441 * @param dev Pointer to Octeon device 442 */ 443 static void stop_pci_io(struct octeon_device *oct) 444 { 445 /* No more instructions will be forwarded. */ 446 atomic_set(&oct->status, OCT_DEV_IN_RESET); 447 448 pci_disable_device(oct->pci_dev); 449 450 /* Disable interrupts */ 451 oct->fn_list.disable_interrupt(oct->chip); 452 453 pcierror_quiesce_device(oct); 454 455 /* Release the interrupt line */ 456 free_irq(oct->pci_dev->irq, oct); 457 458 if (oct->flags & LIO_FLAG_MSI_ENABLED) 459 pci_disable_msi(oct->pci_dev); 460 461 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 462 lio_get_state_string(&oct->status)); 463 464 /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */ 465 /* making it a common function for all OCTEON models */ 466 cleanup_aer_uncorrect_error_status(oct->pci_dev); 467 } 468 469 /** 470 * \brief called when PCI error is detected 471 * @param pdev Pointer to PCI device 472 * @param state The current pci connection state 473 * 474 * This function is called after a PCI bus error affecting 475 * this device has been detected. 476 */ 477 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 478 pci_channel_state_t state) 479 { 480 struct octeon_device *oct = pci_get_drvdata(pdev); 481 482 /* Non-correctable Non-fatal errors */ 483 if (state == pci_channel_io_normal) { 484 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 485 cleanup_aer_uncorrect_error_status(oct->pci_dev); 486 return PCI_ERS_RESULT_CAN_RECOVER; 487 } 488 489 /* Non-correctable Fatal errors */ 490 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 491 stop_pci_io(oct); 492 493 /* Always return a DISCONNECT. There is no support for recovery but only 494 * for a clean shutdown. 495 */ 496 return PCI_ERS_RESULT_DISCONNECT; 497 } 498 499 /** 500 * \brief mmio handler 501 * @param pdev Pointer to PCI device 502 */ 503 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev) 504 { 505 /* We should never hit this since we never ask for a reset for a Fatal 506 * Error. We always return DISCONNECT in io_error above. 507 * But play safe and return RECOVERED for now. 508 */ 509 return PCI_ERS_RESULT_RECOVERED; 510 } 511 512 /** 513 * \brief called after the pci bus has been reset. 514 * @param pdev Pointer to PCI device 515 * 516 * Restart the card from scratch, as if from a cold-boot. Implementation 517 * resembles the first-half of the octeon_resume routine. 518 */ 519 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev) 520 { 521 /* We should never hit this since we never ask for a reset for a Fatal 522 * Error. We always return DISCONNECT in io_error above. 523 * But play safe and return RECOVERED for now. 524 */ 525 return PCI_ERS_RESULT_RECOVERED; 526 } 527 528 /** 529 * \brief called when traffic can start flowing again. 530 * @param pdev Pointer to PCI device 531 * 532 * This callback is called when the error recovery driver tells us that 533 * its OK to resume normal operation. Implementation resembles the 534 * second-half of the octeon_resume routine. 535 */ 536 static void liquidio_pcie_resume(struct pci_dev *pdev) 537 { 538 /* Nothing to be done here. */ 539 } 540 541 #ifdef CONFIG_PM 542 /** 543 * \brief called when suspending 544 * @param pdev Pointer to PCI device 545 * @param state state to suspend to 546 */ 547 static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state) 548 { 549 return 0; 550 } 551 552 /** 553 * \brief called when resuming 554 * @param pdev Pointer to PCI device 555 */ 556 static int liquidio_resume(struct pci_dev *pdev) 557 { 558 return 0; 559 } 560 #endif 561 562 /* For PCI-E Advanced Error Recovery (AER) Interface */ 563 static struct pci_error_handlers liquidio_err_handler = { 564 .error_detected = liquidio_pcie_error_detected, 565 .mmio_enabled = liquidio_pcie_mmio_enabled, 566 .slot_reset = liquidio_pcie_slot_reset, 567 .resume = liquidio_pcie_resume, 568 }; 569 570 static const struct pci_device_id liquidio_pci_tbl[] = { 571 { /* 68xx */ 572 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 573 }, 574 { /* 66xx */ 575 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 576 }, 577 { 578 0, 0, 0, 0, 0, 0, 0 579 } 580 }; 581 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); 582 583 static struct pci_driver liquidio_pci_driver = { 584 .name = "LiquidIO", 585 .id_table = liquidio_pci_tbl, 586 .probe = liquidio_probe, 587 .remove = liquidio_remove, 588 .err_handler = &liquidio_err_handler, /* For AER */ 589 590 #ifdef CONFIG_PM 591 .suspend = liquidio_suspend, 592 .resume = liquidio_resume, 593 #endif 594 595 }; 596 597 /** 598 * \brief register PCI driver 599 */ 600 static int liquidio_init_pci(void) 601 { 602 return pci_register_driver(&liquidio_pci_driver); 603 } 604 605 /** 606 * \brief unregister PCI driver 607 */ 608 static void liquidio_deinit_pci(void) 609 { 610 pci_unregister_driver(&liquidio_pci_driver); 611 } 612 613 /** 614 * \brief check interface state 615 * @param lio per-network private data 616 * @param state_flag flag state to check 617 */ 618 static inline int ifstate_check(struct lio *lio, int state_flag) 619 { 620 return atomic_read(&lio->ifstate) & state_flag; 621 } 622 623 /** 624 * \brief set interface state 625 * @param lio per-network private data 626 * @param state_flag flag state to set 627 */ 628 static inline void ifstate_set(struct lio *lio, int state_flag) 629 { 630 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag)); 631 } 632 633 /** 634 * \brief clear interface state 635 * @param lio per-network private data 636 * @param state_flag flag state to clear 637 */ 638 static inline void ifstate_reset(struct lio *lio, int state_flag) 639 { 640 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); 641 } 642 643 /** 644 * \brief Stop Tx queues 645 * @param netdev network device 646 */ 647 static inline void txqs_stop(struct net_device *netdev) 648 { 649 if (netif_is_multiqueue(netdev)) { 650 int i; 651 652 for (i = 0; i < netdev->num_tx_queues; i++) 653 netif_stop_subqueue(netdev, i); 654 } else { 655 netif_stop_queue(netdev); 656 } 657 } 658 659 /** 660 * \brief Start Tx queues 661 * @param netdev network device 662 */ 663 static inline void txqs_start(struct net_device *netdev) 664 { 665 if (netif_is_multiqueue(netdev)) { 666 int i; 667 668 for (i = 0; i < netdev->num_tx_queues; i++) 669 netif_start_subqueue(netdev, i); 670 } else { 671 netif_start_queue(netdev); 672 } 673 } 674 675 /** 676 * \brief Wake Tx queues 677 * @param netdev network device 678 */ 679 static inline void txqs_wake(struct net_device *netdev) 680 { 681 if (netif_is_multiqueue(netdev)) { 682 int i; 683 684 for (i = 0; i < netdev->num_tx_queues; i++) 685 netif_wake_subqueue(netdev, i); 686 } else { 687 netif_wake_queue(netdev); 688 } 689 } 690 691 /** 692 * \brief Stop Tx queue 693 * @param netdev network device 694 */ 695 static void stop_txq(struct net_device *netdev) 696 { 697 txqs_stop(netdev); 698 } 699 700 /** 701 * \brief Start Tx queue 702 * @param netdev network device 703 */ 704 static void start_txq(struct net_device *netdev) 705 { 706 struct lio *lio = GET_LIO(netdev); 707 708 if (lio->linfo.link.s.status) { 709 txqs_start(netdev); 710 return; 711 } 712 } 713 714 /** 715 * \brief Wake a queue 716 * @param netdev network device 717 * @param q which queue to wake 718 */ 719 static inline void wake_q(struct net_device *netdev, int q) 720 { 721 if (netif_is_multiqueue(netdev)) 722 netif_wake_subqueue(netdev, q); 723 else 724 netif_wake_queue(netdev); 725 } 726 727 /** 728 * \brief Stop a queue 729 * @param netdev network device 730 * @param q which queue to stop 731 */ 732 static inline void stop_q(struct net_device *netdev, int q) 733 { 734 if (netif_is_multiqueue(netdev)) 735 netif_stop_subqueue(netdev, q); 736 else 737 netif_stop_queue(netdev); 738 } 739 740 /** 741 * \brief Check Tx queue status, and take appropriate action 742 * @param lio per-network private data 743 * @returns 0 if full, number of queues woken up otherwise 744 */ 745 static inline int check_txq_status(struct lio *lio) 746 { 747 int ret_val = 0; 748 749 if (netif_is_multiqueue(lio->netdev)) { 750 int numqs = lio->netdev->num_tx_queues; 751 int q, iq = 0; 752 753 /* check each sub-queue state */ 754 for (q = 0; q < numqs; q++) { 755 iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)]; 756 if (octnet_iq_is_full(lio->oct_dev, iq)) 757 continue; 758 wake_q(lio->netdev, q); 759 ret_val++; 760 } 761 } else { 762 if (octnet_iq_is_full(lio->oct_dev, lio->txq)) 763 return 0; 764 wake_q(lio->netdev, lio->txq); 765 ret_val = 1; 766 } 767 return ret_val; 768 } 769 770 /** 771 * Remove the node at the head of the list. The list would be empty at 772 * the end of this call if there are no more nodes in the list. 773 */ 774 static inline struct list_head *list_delete_head(struct list_head *root) 775 { 776 struct list_head *node; 777 778 if ((root->prev == root) && (root->next == root)) 779 node = NULL; 780 else 781 node = root->next; 782 783 if (node) 784 list_del(node); 785 786 return node; 787 } 788 789 /** 790 * \brief Delete gather list 791 * @param lio per-network private data 792 */ 793 static void delete_glist(struct lio *lio) 794 { 795 struct octnic_gather *g; 796 797 do { 798 g = (struct octnic_gather *) 799 list_delete_head(&lio->glist); 800 if (g) { 801 if (g->sg) 802 kfree((void *)((unsigned long)g->sg - 803 g->adjust)); 804 kfree(g); 805 } 806 } while (g); 807 } 808 809 /** 810 * \brief Setup gather list 811 * @param lio per-network private data 812 */ 813 static int setup_glist(struct lio *lio) 814 { 815 int i; 816 struct octnic_gather *g; 817 818 INIT_LIST_HEAD(&lio->glist); 819 820 for (i = 0; i < lio->tx_qsize; i++) { 821 g = kmalloc(sizeof(*g), GFP_KERNEL); 822 if (!g) 823 break; 824 memset(g, 0, sizeof(struct octnic_gather)); 825 826 g->sg_size = 827 ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); 828 829 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); 830 if (!g->sg) { 831 kfree(g); 832 break; 833 } 834 835 /* The gather component should be aligned on 64-bit boundary */ 836 if (((unsigned long)g->sg) & 7) { 837 g->adjust = 8 - (((unsigned long)g->sg) & 7); 838 g->sg = (struct octeon_sg_entry *) 839 ((unsigned long)g->sg + g->adjust); 840 } 841 list_add_tail(&g->list, &lio->glist); 842 } 843 844 if (i == lio->tx_qsize) 845 return 0; 846 847 delete_glist(lio); 848 return 1; 849 } 850 851 /** 852 * \brief Print link information 853 * @param netdev network device 854 */ 855 static void print_link_info(struct net_device *netdev) 856 { 857 struct lio *lio = GET_LIO(netdev); 858 859 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { 860 struct oct_link_info *linfo = &lio->linfo; 861 862 if (linfo->link.s.status) { 863 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 864 linfo->link.s.speed, 865 (linfo->link.s.duplex) ? "Full" : "Half"); 866 } else { 867 netif_info(lio, link, lio->netdev, "Link Down\n"); 868 } 869 } 870 } 871 872 /** 873 * \brief Update link status 874 * @param netdev network device 875 * @param ls link status structure 876 * 877 * Called on receipt of a link status response from the core application to 878 * update each interface's link status. 879 */ 880 static inline void update_link_status(struct net_device *netdev, 881 union oct_link_status *ls) 882 { 883 struct lio *lio = GET_LIO(netdev); 884 885 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 886 lio->linfo.link.u64 = ls->u64; 887 888 print_link_info(netdev); 889 890 if (lio->linfo.link.s.status) { 891 netif_carrier_on(netdev); 892 /* start_txq(netdev); */ 893 txqs_wake(netdev); 894 } else { 895 netif_carrier_off(netdev); 896 stop_txq(netdev); 897 } 898 } 899 } 900 901 /** 902 * \brief Droq packet processor sceduler 903 * @param oct octeon device 904 */ 905 static 906 void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) 907 { 908 struct octeon_device_priv *oct_priv = 909 (struct octeon_device_priv *)oct->priv; 910 u64 oq_no; 911 struct octeon_droq *droq; 912 913 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { 914 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) { 915 if (!(oct->droq_intr & (1 << oq_no))) 916 continue; 917 918 droq = oct->droq[oq_no]; 919 920 if (droq->ops.poll_mode) { 921 droq->ops.napi_fn(droq); 922 oct_priv->napi_mask |= (1 << oq_no); 923 } else { 924 tasklet_schedule(&oct_priv->droq_tasklet); 925 } 926 } 927 } 928 } 929 930 /** 931 * \brief Interrupt handler for octeon 932 * @param irq unused 933 * @param dev octeon device 934 */ 935 static 936 irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev) 937 { 938 struct octeon_device *oct = (struct octeon_device *)dev; 939 irqreturn_t ret; 940 941 /* Disable our interrupts for the duration of ISR */ 942 oct->fn_list.disable_interrupt(oct->chip); 943 944 ret = oct->fn_list.process_interrupt_regs(oct); 945 946 if (ret == IRQ_HANDLED) 947 liquidio_schedule_droq_pkt_handlers(oct); 948 949 /* Re-enable our interrupts */ 950 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET)) 951 oct->fn_list.enable_interrupt(oct->chip); 952 953 return ret; 954 } 955 956 /** 957 * \brief Setup interrupt for octeon device 958 * @param oct octeon device 959 * 960 * Enable interrupt in Octeon device as given in the PCI interrupt mask. 961 */ 962 static int octeon_setup_interrupt(struct octeon_device *oct) 963 { 964 int irqret, err; 965 966 err = pci_enable_msi(oct->pci_dev); 967 if (err) 968 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n", 969 err); 970 else 971 oct->flags |= LIO_FLAG_MSI_ENABLED; 972 973 irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler, 974 IRQF_SHARED, "octeon", oct); 975 if (irqret) { 976 if (oct->flags & LIO_FLAG_MSI_ENABLED) 977 pci_disable_msi(oct->pci_dev); 978 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", 979 irqret); 980 return 1; 981 } 982 983 return 0; 984 } 985 986 /** 987 * \brief PCI probe handler 988 * @param pdev PCI device structure 989 * @param ent unused 990 */ 991 static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 992 { 993 struct octeon_device *oct_dev = NULL; 994 struct handshake *hs; 995 996 oct_dev = octeon_allocate_device(pdev->device, 997 sizeof(struct octeon_device_priv)); 998 if (!oct_dev) { 999 dev_err(&pdev->dev, "Unable to allocate device\n"); 1000 return -ENOMEM; 1001 } 1002 1003 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 1004 (u32)pdev->vendor, (u32)pdev->device); 1005 1006 /* Assign octeon_device for this device to the private data area. */ 1007 pci_set_drvdata(pdev, oct_dev); 1008 1009 /* set linux specific device pointer */ 1010 oct_dev->pci_dev = (void *)pdev; 1011 1012 hs = &handshake[oct_dev->octeon_id]; 1013 init_completion(&hs->init); 1014 init_completion(&hs->started); 1015 hs->pci_dev = pdev; 1016 1017 if (oct_dev->octeon_id == 0) 1018 /* first LiquidIO NIC is detected */ 1019 complete(&first_stage); 1020 1021 if (octeon_device_init(oct_dev)) { 1022 liquidio_remove(pdev); 1023 return -ENOMEM; 1024 } 1025 1026 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 1027 1028 return 0; 1029 } 1030 1031 /** 1032 *\brief Destroy resources associated with octeon device 1033 * @param pdev PCI device structure 1034 * @param ent unused 1035 */ 1036 static void octeon_destroy_resources(struct octeon_device *oct) 1037 { 1038 int i; 1039 struct octeon_device_priv *oct_priv = 1040 (struct octeon_device_priv *)oct->priv; 1041 1042 struct handshake *hs; 1043 1044 switch (atomic_read(&oct->status)) { 1045 case OCT_DEV_RUNNING: 1046 case OCT_DEV_CORE_OK: 1047 1048 /* No more instructions will be forwarded. */ 1049 atomic_set(&oct->status, OCT_DEV_IN_RESET); 1050 1051 oct->app_mode = CVM_DRV_INVALID_APP; 1052 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 1053 lio_get_state_string(&oct->status)); 1054 1055 schedule_timeout_uninterruptible(HZ / 10); 1056 1057 /* fallthrough */ 1058 case OCT_DEV_HOST_OK: 1059 1060 /* fallthrough */ 1061 case OCT_DEV_CONSOLE_INIT_DONE: 1062 /* Remove any consoles */ 1063 octeon_remove_consoles(oct); 1064 1065 /* fallthrough */ 1066 case OCT_DEV_IO_QUEUES_DONE: 1067 if (wait_for_pending_requests(oct)) 1068 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 1069 1070 if (lio_wait_for_instr_fetch(oct)) 1071 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 1072 1073 /* Disable the input and output queues now. No more packets will 1074 * arrive from Octeon, but we should wait for all packet 1075 * processing to finish. 1076 */ 1077 oct->fn_list.disable_io_queues(oct); 1078 1079 if (lio_wait_for_oq_pkts(oct)) 1080 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 1081 1082 /* Disable interrupts */ 1083 oct->fn_list.disable_interrupt(oct->chip); 1084 1085 /* Release the interrupt line */ 1086 free_irq(oct->pci_dev->irq, oct); 1087 1088 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1089 pci_disable_msi(oct->pci_dev); 1090 1091 /* Soft reset the octeon device before exiting */ 1092 oct->fn_list.soft_reset(oct); 1093 1094 /* Disable the device, releasing the PCI INT */ 1095 pci_disable_device(oct->pci_dev); 1096 1097 /* fallthrough */ 1098 case OCT_DEV_IN_RESET: 1099 case OCT_DEV_DROQ_INIT_DONE: 1100 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/ 1101 mdelay(100); 1102 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { 1103 if (!(oct->io_qmask.oq & (1UL << i))) 1104 continue; 1105 octeon_delete_droq(oct, i); 1106 } 1107 1108 /* Force any pending handshakes to complete */ 1109 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 1110 hs = &handshake[i]; 1111 1112 if (hs->pci_dev) { 1113 handshake[oct->octeon_id].init_ok = 0; 1114 complete(&handshake[oct->octeon_id].init); 1115 handshake[oct->octeon_id].started_ok = 0; 1116 complete(&handshake[oct->octeon_id].started); 1117 } 1118 } 1119 1120 /* fallthrough */ 1121 case OCT_DEV_RESP_LIST_INIT_DONE: 1122 octeon_delete_response_list(oct); 1123 1124 /* fallthrough */ 1125 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 1126 octeon_free_sc_buffer_pool(oct); 1127 1128 /* fallthrough */ 1129 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 1130 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { 1131 if (!(oct->io_qmask.iq & (1UL << i))) 1132 continue; 1133 octeon_delete_instr_queue(oct, i); 1134 } 1135 1136 /* fallthrough */ 1137 case OCT_DEV_DISPATCH_INIT_DONE: 1138 octeon_delete_dispatch_list(oct); 1139 cancel_delayed_work_sync(&oct->nic_poll_work.work); 1140 1141 /* fallthrough */ 1142 case OCT_DEV_PCI_MAP_DONE: 1143 octeon_unmap_pci_barx(oct, 0); 1144 octeon_unmap_pci_barx(oct, 1); 1145 1146 /* fallthrough */ 1147 case OCT_DEV_BEGIN_STATE: 1148 /* Nothing to be done here either */ 1149 break; 1150 } /* end switch(oct->status) */ 1151 1152 tasklet_kill(&oct_priv->droq_tasklet); 1153 } 1154 1155 /** 1156 * \brief Send Rx control command 1157 * @param lio per-network private data 1158 * @param start_stop whether to start or stop 1159 */ 1160 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1161 { 1162 struct octnic_ctrl_pkt nctrl; 1163 struct octnic_ctrl_params nparams; 1164 1165 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1166 1167 nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL; 1168 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 1169 nctrl.ncmd.s.param2 = start_stop; 1170 nctrl.netpndev = (u64)lio->netdev; 1171 1172 nparams.resp_order = OCTEON_RESP_NORESPONSE; 1173 1174 if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0) 1175 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1176 } 1177 1178 /** 1179 * \brief Destroy NIC device interface 1180 * @param oct octeon device 1181 * @param ifidx which interface to destroy 1182 * 1183 * Cleanup associated with each interface for an Octeon device when NIC 1184 * module is being unloaded or if initialization fails during load. 1185 */ 1186 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1187 { 1188 struct net_device *netdev = oct->props[ifidx].netdev; 1189 struct lio *lio; 1190 1191 if (!netdev) { 1192 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1193 __func__, ifidx); 1194 return; 1195 } 1196 1197 lio = GET_LIO(netdev); 1198 1199 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1200 1201 send_rx_ctrl_cmd(lio, 0); 1202 1203 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1204 txqs_stop(netdev); 1205 1206 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1207 unregister_netdev(netdev); 1208 1209 delete_glist(lio); 1210 1211 free_netdev(netdev); 1212 1213 oct->props[ifidx].netdev = NULL; 1214 } 1215 1216 /** 1217 * \brief Stop complete NIC functionality 1218 * @param oct octeon device 1219 */ 1220 static int liquidio_stop_nic_module(struct octeon_device *oct) 1221 { 1222 int i, j; 1223 struct lio *lio; 1224 1225 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1226 if (!oct->ifcount) { 1227 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1228 return 1; 1229 } 1230 1231 for (i = 0; i < oct->ifcount; i++) { 1232 lio = GET_LIO(oct->props[i].netdev); 1233 for (j = 0; j < lio->linfo.num_rxpciq; j++) 1234 octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]); 1235 } 1236 1237 for (i = 0; i < oct->ifcount; i++) 1238 liquidio_destroy_nic_device(oct, i); 1239 1240 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1241 return 0; 1242 } 1243 1244 /** 1245 * \brief Cleans up resources at unload time 1246 * @param pdev PCI device structure 1247 */ 1248 static void liquidio_remove(struct pci_dev *pdev) 1249 { 1250 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1251 1252 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1253 1254 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) 1255 liquidio_stop_nic_module(oct_dev); 1256 1257 /* Reset the octeon device and cleanup all memory allocated for 1258 * the octeon device by driver. 1259 */ 1260 octeon_destroy_resources(oct_dev); 1261 1262 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1263 1264 /* This octeon device has been removed. Update the global 1265 * data structure to reflect this. Free the device structure. 1266 */ 1267 octeon_free_device_mem(oct_dev); 1268 } 1269 1270 /** 1271 * \brief Identify the Octeon device and to map the BAR address space 1272 * @param oct octeon device 1273 */ 1274 static int octeon_chip_specific_setup(struct octeon_device *oct) 1275 { 1276 u32 dev_id, rev_id; 1277 int ret = 1; 1278 1279 pci_read_config_dword(oct->pci_dev, 0, &dev_id); 1280 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 1281 oct->rev_id = rev_id & 0xff; 1282 1283 switch (dev_id) { 1284 case OCTEON_CN68XX_PCIID: 1285 oct->chip_id = OCTEON_CN68XX; 1286 ret = lio_setup_cn68xx_octeon_device(oct); 1287 break; 1288 1289 case OCTEON_CN66XX_PCIID: 1290 oct->chip_id = OCTEON_CN66XX; 1291 ret = lio_setup_cn66xx_octeon_device(oct); 1292 break; 1293 default: 1294 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", 1295 dev_id); 1296 } 1297 1298 if (!ret) 1299 dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n", 1300 OCTEON_MAJOR_REV(oct), 1301 OCTEON_MINOR_REV(oct), 1302 octeon_get_conf(oct)->card_name); 1303 1304 return ret; 1305 } 1306 1307 /** 1308 * \brief PCI initialization for each Octeon device. 1309 * @param oct octeon device 1310 */ 1311 static int octeon_pci_os_setup(struct octeon_device *oct) 1312 { 1313 /* setup PCI stuff first */ 1314 if (pci_enable_device(oct->pci_dev)) { 1315 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1316 return 1; 1317 } 1318 1319 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1320 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1321 return 1; 1322 } 1323 1324 /* Enable PCI DMA Master. */ 1325 pci_set_master(oct->pci_dev); 1326 1327 return 0; 1328 } 1329 1330 /** 1331 * \brief Check Tx queue state for a given network buffer 1332 * @param lio per-network private data 1333 * @param skb network buffer 1334 */ 1335 static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) 1336 { 1337 int q = 0, iq = 0; 1338 1339 if (netif_is_multiqueue(lio->netdev)) { 1340 q = skb->queue_mapping; 1341 iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))]; 1342 } else { 1343 iq = lio->txq; 1344 } 1345 1346 if (octnet_iq_is_full(lio->oct_dev, iq)) 1347 return 0; 1348 wake_q(lio->netdev, q); 1349 return 1; 1350 } 1351 1352 /** 1353 * \brief Unmap and free network buffer 1354 * @param buf buffer 1355 */ 1356 static void free_netbuf(void *buf) 1357 { 1358 struct sk_buff *skb; 1359 struct octnet_buf_free_info *finfo; 1360 struct lio *lio; 1361 1362 finfo = (struct octnet_buf_free_info *)buf; 1363 skb = finfo->skb; 1364 lio = finfo->lio; 1365 1366 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1367 DMA_TO_DEVICE); 1368 1369 check_txq_state(lio, skb); 1370 1371 recv_buffer_free((struct sk_buff *)skb); 1372 } 1373 1374 /** 1375 * \brief Unmap and free gather buffer 1376 * @param buf buffer 1377 */ 1378 static void free_netsgbuf(void *buf) 1379 { 1380 struct octnet_buf_free_info *finfo; 1381 struct sk_buff *skb; 1382 struct lio *lio; 1383 struct octnic_gather *g; 1384 int i, frags; 1385 1386 finfo = (struct octnet_buf_free_info *)buf; 1387 skb = finfo->skb; 1388 lio = finfo->lio; 1389 g = finfo->g; 1390 frags = skb_shinfo(skb)->nr_frags; 1391 1392 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1393 g->sg[0].ptr[0], (skb->len - skb->data_len), 1394 DMA_TO_DEVICE); 1395 1396 i = 1; 1397 while (frags--) { 1398 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1399 1400 pci_unmap_page((lio->oct_dev)->pci_dev, 1401 g->sg[(i >> 2)].ptr[(i & 3)], 1402 frag->size, DMA_TO_DEVICE); 1403 i++; 1404 } 1405 1406 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1407 finfo->dptr, g->sg_size, 1408 DMA_TO_DEVICE); 1409 1410 spin_lock(&lio->lock); 1411 list_add_tail(&g->list, &lio->glist); 1412 spin_unlock(&lio->lock); 1413 1414 check_txq_state(lio, skb); /* mq support: sub-queue state check */ 1415 1416 recv_buffer_free((struct sk_buff *)skb); 1417 } 1418 1419 /** 1420 * \brief Unmap and free gather buffer with response 1421 * @param buf buffer 1422 */ 1423 static void free_netsgbuf_with_resp(void *buf) 1424 { 1425 struct octeon_soft_command *sc; 1426 struct octnet_buf_free_info *finfo; 1427 struct sk_buff *skb; 1428 struct lio *lio; 1429 struct octnic_gather *g; 1430 int i, frags; 1431 1432 sc = (struct octeon_soft_command *)buf; 1433 skb = (struct sk_buff *)sc->callback_arg; 1434 finfo = (struct octnet_buf_free_info *)&skb->cb; 1435 1436 lio = finfo->lio; 1437 g = finfo->g; 1438 frags = skb_shinfo(skb)->nr_frags; 1439 1440 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1441 g->sg[0].ptr[0], (skb->len - skb->data_len), 1442 DMA_TO_DEVICE); 1443 1444 i = 1; 1445 while (frags--) { 1446 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1447 1448 pci_unmap_page((lio->oct_dev)->pci_dev, 1449 g->sg[(i >> 2)].ptr[(i & 3)], 1450 frag->size, DMA_TO_DEVICE); 1451 i++; 1452 } 1453 1454 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1455 finfo->dptr, g->sg_size, 1456 DMA_TO_DEVICE); 1457 1458 spin_lock(&lio->lock); 1459 list_add_tail(&g->list, &lio->glist); 1460 spin_unlock(&lio->lock); 1461 1462 /* Don't free the skb yet */ 1463 1464 check_txq_state(lio, skb); 1465 } 1466 1467 /** 1468 * \brief Adjust ptp frequency 1469 * @param ptp PTP clock info 1470 * @param ppb how much to adjust by, in parts-per-billion 1471 */ 1472 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 1473 { 1474 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1475 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1476 u64 comp, delta; 1477 unsigned long flags; 1478 bool neg_adj = false; 1479 1480 if (ppb < 0) { 1481 neg_adj = true; 1482 ppb = -ppb; 1483 } 1484 1485 /* The hardware adds the clock compensation value to the 1486 * PTP clock on every coprocessor clock cycle, so we 1487 * compute the delta in terms of coprocessor clocks. 1488 */ 1489 delta = (u64)ppb << 32; 1490 do_div(delta, oct->coproc_clock_rate); 1491 1492 spin_lock_irqsave(&lio->ptp_lock, flags); 1493 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); 1494 if (neg_adj) 1495 comp -= delta; 1496 else 1497 comp += delta; 1498 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1499 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1500 1501 return 0; 1502 } 1503 1504 /** 1505 * \brief Adjust ptp time 1506 * @param ptp PTP clock info 1507 * @param delta how much to adjust by, in nanosecs 1508 */ 1509 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 1510 { 1511 unsigned long flags; 1512 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1513 1514 spin_lock_irqsave(&lio->ptp_lock, flags); 1515 lio->ptp_adjust += delta; 1516 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1517 1518 return 0; 1519 } 1520 1521 /** 1522 * \brief Get hardware clock time, including any adjustment 1523 * @param ptp PTP clock info 1524 * @param ts timespec 1525 */ 1526 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, 1527 struct timespec64 *ts) 1528 { 1529 u64 ns; 1530 u32 remainder; 1531 unsigned long flags; 1532 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1533 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1534 1535 spin_lock_irqsave(&lio->ptp_lock, flags); 1536 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); 1537 ns += lio->ptp_adjust; 1538 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1539 1540 ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); 1541 ts->tv_nsec = remainder; 1542 1543 return 0; 1544 } 1545 1546 /** 1547 * \brief Set hardware clock time. Reset adjustment 1548 * @param ptp PTP clock info 1549 * @param ts timespec 1550 */ 1551 static int liquidio_ptp_settime(struct ptp_clock_info *ptp, 1552 const struct timespec64 *ts) 1553 { 1554 u64 ns; 1555 unsigned long flags; 1556 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1557 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1558 1559 ns = timespec_to_ns(ts); 1560 1561 spin_lock_irqsave(&lio->ptp_lock, flags); 1562 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1563 lio->ptp_adjust = 0; 1564 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1565 1566 return 0; 1567 } 1568 1569 /** 1570 * \brief Check if PTP is enabled 1571 * @param ptp PTP clock info 1572 * @param rq request 1573 * @param on is it on 1574 */ 1575 static int liquidio_ptp_enable(struct ptp_clock_info *ptp, 1576 struct ptp_clock_request *rq, int on) 1577 { 1578 return -EOPNOTSUPP; 1579 } 1580 1581 /** 1582 * \brief Open PTP clock source 1583 * @param netdev network device 1584 */ 1585 static void oct_ptp_open(struct net_device *netdev) 1586 { 1587 struct lio *lio = GET_LIO(netdev); 1588 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1589 1590 spin_lock_init(&lio->ptp_lock); 1591 1592 snprintf(lio->ptp_info.name, 16, "%s", netdev->name); 1593 lio->ptp_info.owner = THIS_MODULE; 1594 lio->ptp_info.max_adj = 250000000; 1595 lio->ptp_info.n_alarm = 0; 1596 lio->ptp_info.n_ext_ts = 0; 1597 lio->ptp_info.n_per_out = 0; 1598 lio->ptp_info.pps = 0; 1599 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq; 1600 lio->ptp_info.adjtime = liquidio_ptp_adjtime; 1601 lio->ptp_info.gettime64 = liquidio_ptp_gettime; 1602 lio->ptp_info.settime64 = liquidio_ptp_settime; 1603 lio->ptp_info.enable = liquidio_ptp_enable; 1604 1605 lio->ptp_adjust = 0; 1606 1607 lio->ptp_clock = ptp_clock_register(&lio->ptp_info, 1608 &oct->pci_dev->dev); 1609 1610 if (IS_ERR(lio->ptp_clock)) 1611 lio->ptp_clock = NULL; 1612 } 1613 1614 /** 1615 * \brief Init PTP clock 1616 * @param oct octeon device 1617 */ 1618 static void liquidio_ptp_init(struct octeon_device *oct) 1619 { 1620 u64 clock_comp, cfg; 1621 1622 clock_comp = (u64)NSEC_PER_SEC << 32; 1623 do_div(clock_comp, oct->coproc_clock_rate); 1624 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1625 1626 /* Enable */ 1627 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); 1628 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); 1629 } 1630 1631 /** 1632 * \brief Load firmware to device 1633 * @param oct octeon device 1634 * 1635 * Maps device to firmware filename, requests firmware, and downloads it 1636 */ 1637 static int load_firmware(struct octeon_device *oct) 1638 { 1639 int ret = 0; 1640 const struct firmware *fw; 1641 char fw_name[LIO_MAX_FW_FILENAME_LEN]; 1642 char *tmp_fw_type; 1643 1644 if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE, 1645 sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) { 1646 dev_info(&oct->pci_dev->dev, "Skipping firmware load\n"); 1647 return ret; 1648 } 1649 1650 if (fw_type[0] == '\0') 1651 tmp_fw_type = LIO_FW_NAME_TYPE_NIC; 1652 else 1653 tmp_fw_type = fw_type; 1654 1655 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, 1656 octeon_get_conf(oct)->card_name, tmp_fw_type, 1657 LIO_FW_NAME_SUFFIX); 1658 1659 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); 1660 if (ret) { 1661 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", 1662 fw_name); 1663 return ret; 1664 } 1665 1666 ret = octeon_download_firmware(oct, fw->data, fw->size); 1667 1668 release_firmware(fw); 1669 1670 return ret; 1671 } 1672 1673 /** 1674 * \brief Setup output queue 1675 * @param oct octeon device 1676 * @param q_no which queue 1677 * @param num_descs how many descriptors 1678 * @param desc_size size of each descriptor 1679 * @param app_ctx application context 1680 */ 1681 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, 1682 int desc_size, void *app_ctx) 1683 { 1684 int ret_val = 0; 1685 1686 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); 1687 /* droq creation and local register settings. */ 1688 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); 1689 if (ret_val == -1) 1690 return ret_val; 1691 1692 if (ret_val == 1) { 1693 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); 1694 return 0; 1695 } 1696 /* tasklet creation for the droq */ 1697 1698 /* Enable the droq queues */ 1699 octeon_set_droq_pkt_op(oct, q_no, 1); 1700 1701 /* Send Credit for Octeon Output queues. Credits are always 1702 * sent after the output queue is enabled. 1703 */ 1704 writel(oct->droq[q_no]->max_count, 1705 oct->droq[q_no]->pkts_credit_reg); 1706 1707 return ret_val; 1708 } 1709 1710 /** 1711 * \brief Callback for getting interface configuration 1712 * @param status status of request 1713 * @param buf pointer to resp structure 1714 */ 1715 static void if_cfg_callback(struct octeon_device *oct, 1716 u32 status, 1717 void *buf) 1718 { 1719 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 1720 struct liquidio_if_cfg_resp *resp; 1721 struct liquidio_if_cfg_context *ctx; 1722 1723 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1724 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 1725 1726 oct = lio_get_device(ctx->octeon_id); 1727 if (resp->status) 1728 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", 1729 CVM_CAST64(resp->status)); 1730 ACCESS_ONCE(ctx->cond) = 1; 1731 1732 /* This barrier is required to be sure that the response has been 1733 * written fully before waking up the handler 1734 */ 1735 wmb(); 1736 1737 wake_up_interruptible(&ctx->wc); 1738 } 1739 1740 /** 1741 * \brief Select queue based on hash 1742 * @param dev Net device 1743 * @param skb sk_buff structure 1744 * @returns selected queue number 1745 */ 1746 static u16 select_q(struct net_device *dev, struct sk_buff *skb, 1747 void *accel_priv, select_queue_fallback_t fallback) 1748 { 1749 int qindex; 1750 struct lio *lio; 1751 1752 lio = GET_LIO(dev); 1753 /* select queue on chosen queue_mapping or core */ 1754 qindex = skb_rx_queue_recorded(skb) ? 1755 skb_get_rx_queue(skb) : smp_processor_id(); 1756 return (u16)(qindex & (lio->linfo.num_txpciq - 1)); 1757 } 1758 1759 /** Routine to push packets arriving on Octeon interface upto network layer. 1760 * @param oct_id - octeon device id. 1761 * @param skbuff - skbuff struct to be passed to network layer. 1762 * @param len - size of total data received. 1763 * @param rh - Control header associated with the packet 1764 * @param param - additional control data with the packet 1765 */ 1766 static void 1767 liquidio_push_packet(u32 octeon_id, 1768 void *skbuff, 1769 u32 len, 1770 union octeon_rh *rh, 1771 void *param) 1772 { 1773 struct napi_struct *napi = param; 1774 struct octeon_device *oct = lio_get_device(octeon_id); 1775 struct sk_buff *skb = (struct sk_buff *)skbuff; 1776 struct skb_shared_hwtstamps *shhwtstamps; 1777 u64 ns; 1778 struct net_device *netdev = 1779 (struct net_device *)oct->props[rh->r_dh.link].netdev; 1780 struct octeon_droq *droq = container_of(param, struct octeon_droq, 1781 napi); 1782 if (netdev) { 1783 int packet_was_received; 1784 struct lio *lio = GET_LIO(netdev); 1785 1786 /* Do not proceed if the interface is not in RUNNING state. */ 1787 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { 1788 recv_buffer_free(skb); 1789 droq->stats.rx_dropped++; 1790 return; 1791 } 1792 1793 skb->dev = netdev; 1794 1795 if (rh->r_dh.has_hwtstamp) { 1796 /* timestamp is included from the hardware at the 1797 * beginning of the packet. 1798 */ 1799 if (ifstate_check(lio, 1800 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { 1801 /* Nanoseconds are in the first 64-bits 1802 * of the packet. 1803 */ 1804 memcpy(&ns, (skb->data), sizeof(ns)); 1805 shhwtstamps = skb_hwtstamps(skb); 1806 shhwtstamps->hwtstamp = 1807 ns_to_ktime(ns + lio->ptp_adjust); 1808 } 1809 skb_pull(skb, sizeof(ns)); 1810 } 1811 1812 skb->protocol = eth_type_trans(skb, skb->dev); 1813 1814 if ((netdev->features & NETIF_F_RXCSUM) && 1815 (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED)) 1816 /* checksum has already been verified */ 1817 skb->ip_summed = CHECKSUM_UNNECESSARY; 1818 else 1819 skb->ip_summed = CHECKSUM_NONE; 1820 1821 packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP; 1822 1823 if (packet_was_received) { 1824 droq->stats.rx_bytes_received += len; 1825 droq->stats.rx_pkts_received++; 1826 netdev->last_rx = jiffies; 1827 } else { 1828 droq->stats.rx_dropped++; 1829 netif_info(lio, rx_err, lio->netdev, 1830 "droq:%d error rx_dropped:%llu\n", 1831 droq->q_no, droq->stats.rx_dropped); 1832 } 1833 1834 } else { 1835 recv_buffer_free(skb); 1836 } 1837 } 1838 1839 /** 1840 * \brief wrapper for calling napi_schedule 1841 * @param param parameters to pass to napi_schedule 1842 * 1843 * Used when scheduling on different CPUs 1844 */ 1845 static void napi_schedule_wrapper(void *param) 1846 { 1847 struct napi_struct *napi = param; 1848 1849 napi_schedule(napi); 1850 } 1851 1852 /** 1853 * \brief callback when receive interrupt occurs and we are in NAPI mode 1854 * @param arg pointer to octeon output queue 1855 */ 1856 static void liquidio_napi_drv_callback(void *arg) 1857 { 1858 struct octeon_droq *droq = arg; 1859 int this_cpu = smp_processor_id(); 1860 1861 if (droq->cpu_id == this_cpu) { 1862 napi_schedule(&droq->napi); 1863 } else { 1864 struct call_single_data *csd = &droq->csd; 1865 1866 csd->func = napi_schedule_wrapper; 1867 csd->info = &droq->napi; 1868 csd->flags = 0; 1869 1870 smp_call_function_single_async(droq->cpu_id, csd); 1871 } 1872 } 1873 1874 /** 1875 * \brief Main NAPI poll function 1876 * @param droq octeon output queue 1877 * @param budget maximum number of items to process 1878 */ 1879 static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget) 1880 { 1881 int work_done; 1882 struct lio *lio = GET_LIO(droq->napi.dev); 1883 struct octeon_device *oct = lio->oct_dev; 1884 1885 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, 1886 POLL_EVENT_PROCESS_PKTS, 1887 budget); 1888 if (work_done < 0) { 1889 netif_info(lio, rx_err, lio->netdev, 1890 "Receive work_done < 0, rxq:%d\n", droq->q_no); 1891 goto octnet_napi_finish; 1892 } 1893 1894 if (work_done > budget) 1895 dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n", 1896 __func__, work_done, budget); 1897 1898 return work_done; 1899 1900 octnet_napi_finish: 1901 napi_complete(&droq->napi); 1902 octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR, 1903 0); 1904 return 0; 1905 } 1906 1907 /** 1908 * \brief Entry point for NAPI polling 1909 * @param napi NAPI structure 1910 * @param budget maximum number of items to process 1911 */ 1912 static int liquidio_napi_poll(struct napi_struct *napi, int budget) 1913 { 1914 struct octeon_droq *droq; 1915 int work_done; 1916 1917 droq = container_of(napi, struct octeon_droq, napi); 1918 1919 work_done = liquidio_napi_do_rx(droq, budget); 1920 1921 if (work_done < budget) { 1922 napi_complete(napi); 1923 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, 1924 POLL_EVENT_ENABLE_INTR, 0); 1925 return 0; 1926 } 1927 1928 return work_done; 1929 } 1930 1931 /** 1932 * \brief Setup input and output queues 1933 * @param octeon_dev octeon device 1934 * @param net_device Net device 1935 * 1936 * Note: Queues are with respect to the octeon device. Thus 1937 * an input queue is for egress packets, and output queues 1938 * are for ingress packets. 1939 */ 1940 static inline int setup_io_queues(struct octeon_device *octeon_dev, 1941 struct net_device *net_device) 1942 { 1943 static int first_time = 1; 1944 static struct octeon_droq_ops droq_ops; 1945 static int cpu_id; 1946 static int cpu_id_modulus; 1947 struct octeon_droq *droq; 1948 struct napi_struct *napi; 1949 int q, q_no, retval = 0; 1950 struct lio *lio; 1951 int num_tx_descs; 1952 1953 lio = GET_LIO(net_device); 1954 if (first_time) { 1955 first_time = 0; 1956 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); 1957 1958 droq_ops.fptr = liquidio_push_packet; 1959 1960 droq_ops.poll_mode = 1; 1961 droq_ops.napi_fn = liquidio_napi_drv_callback; 1962 cpu_id = 0; 1963 cpu_id_modulus = num_present_cpus(); 1964 } 1965 1966 /* set up DROQs. */ 1967 for (q = 0; q < lio->linfo.num_rxpciq; q++) { 1968 q_no = lio->linfo.rxpciq[q]; 1969 1970 retval = octeon_setup_droq(octeon_dev, q_no, 1971 CFG_GET_NUM_RX_DESCS_NIC_IF 1972 (octeon_get_conf(octeon_dev), 1973 lio->ifidx), 1974 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF 1975 (octeon_get_conf(octeon_dev), 1976 lio->ifidx), NULL); 1977 if (retval) { 1978 dev_err(&octeon_dev->pci_dev->dev, 1979 " %s : Runtime DROQ(RxQ) creation failed.\n", 1980 __func__); 1981 return 1; 1982 } 1983 1984 droq = octeon_dev->droq[q_no]; 1985 napi = &droq->napi; 1986 netif_napi_add(net_device, napi, liquidio_napi_poll, 64); 1987 1988 /* designate a CPU for this droq */ 1989 droq->cpu_id = cpu_id; 1990 cpu_id++; 1991 if (cpu_id >= cpu_id_modulus) 1992 cpu_id = 0; 1993 1994 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); 1995 } 1996 1997 /* set up IQs. */ 1998 for (q = 0; q < lio->linfo.num_txpciq; q++) { 1999 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf 2000 (octeon_dev), 2001 lio->ifidx); 2002 retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q], 2003 num_tx_descs, 2004 netdev_get_tx_queue(net_device, q)); 2005 if (retval) { 2006 dev_err(&octeon_dev->pci_dev->dev, 2007 " %s : Runtime IQ(TxQ) creation failed.\n", 2008 __func__); 2009 return 1; 2010 } 2011 } 2012 2013 return 0; 2014 } 2015 2016 /** 2017 * \brief Poll routine for checking transmit queue status 2018 * @param work work_struct data structure 2019 */ 2020 static void octnet_poll_check_txq_status(struct work_struct *work) 2021 { 2022 struct cavium_wk *wk = (struct cavium_wk *)work; 2023 struct lio *lio = (struct lio *)wk->ctxptr; 2024 2025 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) 2026 return; 2027 2028 check_txq_status(lio); 2029 queue_delayed_work(lio->txq_status_wq.wq, 2030 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 2031 } 2032 2033 /** 2034 * \brief Sets up the txq poll check 2035 * @param netdev network device 2036 */ 2037 static inline void setup_tx_poll_fn(struct net_device *netdev) 2038 { 2039 struct lio *lio = GET_LIO(netdev); 2040 struct octeon_device *oct = lio->oct_dev; 2041 2042 lio->txq_status_wq.wq = create_workqueue("txq-status"); 2043 if (!lio->txq_status_wq.wq) { 2044 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); 2045 return; 2046 } 2047 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, 2048 octnet_poll_check_txq_status); 2049 lio->txq_status_wq.wk.ctxptr = lio; 2050 queue_delayed_work(lio->txq_status_wq.wq, 2051 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 2052 } 2053 2054 /** 2055 * \brief Net device open for LiquidIO 2056 * @param netdev network device 2057 */ 2058 static int liquidio_open(struct net_device *netdev) 2059 { 2060 struct lio *lio = GET_LIO(netdev); 2061 struct octeon_device *oct = lio->oct_dev; 2062 struct napi_struct *napi, *n; 2063 2064 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 2065 napi_enable(napi); 2066 2067 oct_ptp_open(netdev); 2068 2069 ifstate_set(lio, LIO_IFSTATE_RUNNING); 2070 setup_tx_poll_fn(netdev); 2071 start_txq(netdev); 2072 2073 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 2074 try_module_get(THIS_MODULE); 2075 2076 /* tell Octeon to start forwarding packets to host */ 2077 send_rx_ctrl_cmd(lio, 1); 2078 2079 /* Ready for link status updates */ 2080 lio->intf_open = 1; 2081 2082 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", 2083 netdev->name); 2084 2085 return 0; 2086 } 2087 2088 /** 2089 * \brief Net device stop for LiquidIO 2090 * @param netdev network device 2091 */ 2092 static int liquidio_stop(struct net_device *netdev) 2093 { 2094 struct napi_struct *napi, *n; 2095 struct lio *lio = GET_LIO(netdev); 2096 struct octeon_device *oct = lio->oct_dev; 2097 2098 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 2099 /* Inform that netif carrier is down */ 2100 lio->intf_open = 0; 2101 lio->linfo.link.s.status = 0; 2102 2103 netif_carrier_off(netdev); 2104 2105 /* tell Octeon to stop forwarding packets to host */ 2106 send_rx_ctrl_cmd(lio, 0); 2107 2108 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); 2109 flush_workqueue(lio->txq_status_wq.wq); 2110 destroy_workqueue(lio->txq_status_wq.wq); 2111 2112 if (lio->ptp_clock) { 2113 ptp_clock_unregister(lio->ptp_clock); 2114 lio->ptp_clock = NULL; 2115 } 2116 2117 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 2118 2119 /* This is a hack that allows DHCP to continue working. */ 2120 set_bit(__LINK_STATE_START, &lio->netdev->state); 2121 2122 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 2123 napi_disable(napi); 2124 2125 txqs_stop(netdev); 2126 2127 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 2128 module_put(THIS_MODULE); 2129 2130 return 0; 2131 } 2132 2133 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) 2134 { 2135 struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr; 2136 struct net_device *netdev = (struct net_device *)nctrl->netpndev; 2137 struct lio *lio = GET_LIO(netdev); 2138 struct octeon_device *oct = lio->oct_dev; 2139 2140 switch (nctrl->ncmd.s.cmd) { 2141 case OCTNET_CMD_CHANGE_DEVFLAGS: 2142 case OCTNET_CMD_SET_MULTI_LIST: 2143 break; 2144 2145 case OCTNET_CMD_CHANGE_MACADDR: 2146 /* If command is successful, change the MACADDR. */ 2147 netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n", 2148 CVM_CAST64(nctrl->udd[0])); 2149 dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n", 2150 netdev->name, CVM_CAST64(nctrl->udd[0])); 2151 memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN); 2152 break; 2153 2154 case OCTNET_CMD_CHANGE_MTU: 2155 /* If command is successful, change the MTU. */ 2156 netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n", 2157 netdev->mtu, nctrl->ncmd.s.param2); 2158 dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", 2159 netdev->name, netdev->mtu, 2160 nctrl->ncmd.s.param2); 2161 netdev->mtu = nctrl->ncmd.s.param2; 2162 break; 2163 2164 case OCTNET_CMD_GPIO_ACCESS: 2165 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n"); 2166 2167 break; 2168 2169 case OCTNET_CMD_LRO_ENABLE: 2170 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); 2171 break; 2172 2173 case OCTNET_CMD_LRO_DISABLE: 2174 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n", 2175 netdev->name); 2176 break; 2177 2178 case OCTNET_CMD_VERBOSE_ENABLE: 2179 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); 2180 break; 2181 2182 case OCTNET_CMD_VERBOSE_DISABLE: 2183 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n", 2184 netdev->name); 2185 break; 2186 2187 case OCTNET_CMD_SET_SETTINGS: 2188 dev_info(&oct->pci_dev->dev, "%s settings changed\n", 2189 netdev->name); 2190 2191 break; 2192 2193 default: 2194 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, 2195 nctrl->ncmd.s.cmd); 2196 } 2197 } 2198 2199 /** 2200 * \brief Converts a mask based on net device flags 2201 * @param netdev network device 2202 * 2203 * This routine generates a octnet_ifflags mask from the net device flags 2204 * received from the OS. 2205 */ 2206 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) 2207 { 2208 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 2209 2210 if (netdev->flags & IFF_PROMISC) 2211 f |= OCTNET_IFFLAG_PROMISC; 2212 2213 if (netdev->flags & IFF_ALLMULTI) 2214 f |= OCTNET_IFFLAG_ALLMULTI; 2215 2216 if (netdev->flags & IFF_MULTICAST) { 2217 f |= OCTNET_IFFLAG_MULTICAST; 2218 2219 /* Accept all multicast addresses if there are more than we 2220 * can handle 2221 */ 2222 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 2223 f |= OCTNET_IFFLAG_ALLMULTI; 2224 } 2225 2226 if (netdev->flags & IFF_BROADCAST) 2227 f |= OCTNET_IFFLAG_BROADCAST; 2228 2229 return f; 2230 } 2231 2232 /** 2233 * \brief Net device set_multicast_list 2234 * @param netdev network device 2235 */ 2236 static void liquidio_set_mcast_list(struct net_device *netdev) 2237 { 2238 struct lio *lio = GET_LIO(netdev); 2239 struct octeon_device *oct = lio->oct_dev; 2240 struct octnic_ctrl_pkt nctrl; 2241 struct octnic_ctrl_params nparams; 2242 struct netdev_hw_addr *ha; 2243 u64 *mc; 2244 int ret, i; 2245 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 2246 2247 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2248 2249 /* Create a ctrl pkt command to be sent to core app. */ 2250 nctrl.ncmd.u64 = 0; 2251 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 2252 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 2253 nctrl.ncmd.s.param2 = get_new_flags(netdev); 2254 nctrl.ncmd.s.param3 = mc_count; 2255 nctrl.ncmd.s.more = mc_count; 2256 nctrl.netpndev = (u64)netdev; 2257 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2258 2259 /* copy all the addresses into the udd */ 2260 i = 0; 2261 mc = &nctrl.udd[0]; 2262 netdev_for_each_mc_addr(ha, netdev) { 2263 *mc = 0; 2264 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); 2265 /* no need to swap bytes */ 2266 2267 if (++mc > &nctrl.udd[mc_count]) 2268 break; 2269 } 2270 2271 /* Apparently, any activity in this call from the kernel has to 2272 * be atomic. So we won't wait for response. 2273 */ 2274 nctrl.wait_time = 0; 2275 2276 nparams.resp_order = OCTEON_RESP_NORESPONSE; 2277 2278 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); 2279 if (ret < 0) { 2280 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 2281 ret); 2282 } 2283 } 2284 2285 /** 2286 * \brief Net device set_mac_address 2287 * @param netdev network device 2288 */ 2289 static int liquidio_set_mac(struct net_device *netdev, void *p) 2290 { 2291 int ret = 0; 2292 struct lio *lio = GET_LIO(netdev); 2293 struct octeon_device *oct = lio->oct_dev; 2294 struct sockaddr *addr = (struct sockaddr *)p; 2295 struct octnic_ctrl_pkt nctrl; 2296 struct octnic_ctrl_params nparams; 2297 2298 if ((!is_valid_ether_addr(addr->sa_data)) || 2299 (ifstate_check(lio, LIO_IFSTATE_RUNNING))) 2300 return -EADDRNOTAVAIL; 2301 2302 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2303 2304 nctrl.ncmd.u64 = 0; 2305 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2306 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 2307 nctrl.ncmd.s.param2 = 0; 2308 nctrl.ncmd.s.more = 1; 2309 nctrl.netpndev = (u64)netdev; 2310 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2311 nctrl.wait_time = 100; 2312 2313 nctrl.udd[0] = 0; 2314 /* The MAC Address is presented in network byte order. */ 2315 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); 2316 2317 nparams.resp_order = OCTEON_RESP_ORDERED; 2318 2319 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); 2320 if (ret < 0) { 2321 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 2322 return -ENOMEM; 2323 } 2324 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2325 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); 2326 2327 return 0; 2328 } 2329 2330 /** 2331 * \brief Net device get_stats 2332 * @param netdev network device 2333 */ 2334 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) 2335 { 2336 struct lio *lio = GET_LIO(netdev); 2337 struct net_device_stats *stats = &netdev->stats; 2338 struct octeon_device *oct; 2339 u64 pkts = 0, drop = 0, bytes = 0; 2340 struct oct_droq_stats *oq_stats; 2341 struct oct_iq_stats *iq_stats; 2342 int i, iq_no, oq_no; 2343 2344 oct = lio->oct_dev; 2345 2346 for (i = 0; i < lio->linfo.num_txpciq; i++) { 2347 iq_no = lio->linfo.txpciq[i]; 2348 iq_stats = &oct->instr_queue[iq_no]->stats; 2349 pkts += iq_stats->tx_done; 2350 drop += iq_stats->tx_dropped; 2351 bytes += iq_stats->tx_tot_bytes; 2352 } 2353 2354 stats->tx_packets = pkts; 2355 stats->tx_bytes = bytes; 2356 stats->tx_dropped = drop; 2357 2358 pkts = 0; 2359 drop = 0; 2360 bytes = 0; 2361 2362 for (i = 0; i < lio->linfo.num_rxpciq; i++) { 2363 oq_no = lio->linfo.rxpciq[i]; 2364 oq_stats = &oct->droq[oq_no]->stats; 2365 pkts += oq_stats->rx_pkts_received; 2366 drop += (oq_stats->rx_dropped + 2367 oq_stats->dropped_nodispatch + 2368 oq_stats->dropped_toomany + 2369 oq_stats->dropped_nomem); 2370 bytes += oq_stats->rx_bytes_received; 2371 } 2372 2373 stats->rx_bytes = bytes; 2374 stats->rx_packets = pkts; 2375 stats->rx_dropped = drop; 2376 2377 return stats; 2378 } 2379 2380 /** 2381 * \brief Net device change_mtu 2382 * @param netdev network device 2383 */ 2384 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) 2385 { 2386 struct lio *lio = GET_LIO(netdev); 2387 struct octeon_device *oct = lio->oct_dev; 2388 struct octnic_ctrl_pkt nctrl; 2389 struct octnic_ctrl_params nparams; 2390 int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE; 2391 int ret = 0; 2392 2393 /* Limit the MTU to make sure the ethernet packets are between 64 bytes 2394 * and 65535 bytes 2395 */ 2396 if ((max_frm_size < OCTNET_MIN_FRM_SIZE) || 2397 (max_frm_size > OCTNET_MAX_FRM_SIZE)) { 2398 dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu); 2399 dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n", 2400 (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE), 2401 (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)); 2402 return -EINVAL; 2403 } 2404 2405 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2406 2407 nctrl.ncmd.u64 = 0; 2408 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; 2409 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 2410 nctrl.ncmd.s.param2 = new_mtu; 2411 nctrl.wait_time = 100; 2412 nctrl.netpndev = (u64)netdev; 2413 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2414 2415 nparams.resp_order = OCTEON_RESP_ORDERED; 2416 2417 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); 2418 if (ret < 0) { 2419 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); 2420 return -1; 2421 } 2422 2423 lio->mtu = new_mtu; 2424 2425 return 0; 2426 } 2427 2428 /** 2429 * \brief Handler for SIOCSHWTSTAMP ioctl 2430 * @param netdev network device 2431 * @param ifr interface request 2432 * @param cmd command 2433 */ 2434 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2435 { 2436 struct hwtstamp_config conf; 2437 struct lio *lio = GET_LIO(netdev); 2438 2439 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 2440 return -EFAULT; 2441 2442 if (conf.flags) 2443 return -EINVAL; 2444 2445 switch (conf.tx_type) { 2446 case HWTSTAMP_TX_ON: 2447 case HWTSTAMP_TX_OFF: 2448 break; 2449 default: 2450 return -ERANGE; 2451 } 2452 2453 switch (conf.rx_filter) { 2454 case HWTSTAMP_FILTER_NONE: 2455 break; 2456 case HWTSTAMP_FILTER_ALL: 2457 case HWTSTAMP_FILTER_SOME: 2458 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2459 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2460 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2461 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2462 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2463 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2464 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2465 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2466 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2467 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2468 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2469 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2470 conf.rx_filter = HWTSTAMP_FILTER_ALL; 2471 break; 2472 default: 2473 return -ERANGE; 2474 } 2475 2476 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 2477 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2478 2479 else 2480 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2481 2482 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 2483 } 2484 2485 /** 2486 * \brief ioctl handler 2487 * @param netdev network device 2488 * @param ifr interface request 2489 * @param cmd command 2490 */ 2491 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2492 { 2493 switch (cmd) { 2494 case SIOCSHWTSTAMP: 2495 return hwtstamp_ioctl(netdev, ifr, cmd); 2496 default: 2497 return -EOPNOTSUPP; 2498 } 2499 } 2500 2501 /** 2502 * \brief handle a Tx timestamp response 2503 * @param status response status 2504 * @param buf pointer to skb 2505 */ 2506 static void handle_timestamp(struct octeon_device *oct, 2507 u32 status, 2508 void *buf) 2509 { 2510 struct octnet_buf_free_info *finfo; 2511 struct octeon_soft_command *sc; 2512 struct oct_timestamp_resp *resp; 2513 struct lio *lio; 2514 struct sk_buff *skb = (struct sk_buff *)buf; 2515 2516 finfo = (struct octnet_buf_free_info *)skb->cb; 2517 lio = finfo->lio; 2518 sc = finfo->sc; 2519 oct = lio->oct_dev; 2520 resp = (struct oct_timestamp_resp *)sc->virtrptr; 2521 2522 if (status != OCTEON_REQUEST_DONE) { 2523 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 2524 CVM_CAST64(status)); 2525 resp->timestamp = 0; 2526 } 2527 2528 octeon_swap_8B_data(&resp->timestamp, 1); 2529 2530 if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) { 2531 struct skb_shared_hwtstamps ts; 2532 u64 ns = resp->timestamp; 2533 2534 netif_info(lio, tx_done, lio->netdev, 2535 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 2536 skb, (unsigned long long)ns); 2537 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 2538 skb_tstamp_tx(skb, &ts); 2539 } 2540 2541 octeon_free_soft_command(oct, sc); 2542 recv_buffer_free(skb); 2543 } 2544 2545 /* \brief Send a data packet that will be timestamped 2546 * @param oct octeon device 2547 * @param ndata pointer to network data 2548 * @param finfo pointer to private network data 2549 */ 2550 static inline int send_nic_timestamp_pkt(struct octeon_device *oct, 2551 struct octnic_data_pkt *ndata, 2552 struct octnet_buf_free_info *finfo, 2553 int xmit_more) 2554 { 2555 int retval; 2556 struct octeon_soft_command *sc; 2557 struct octeon_instr_ih *ih; 2558 struct octeon_instr_rdp *rdp; 2559 struct lio *lio; 2560 int ring_doorbell; 2561 2562 lio = finfo->lio; 2563 2564 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 2565 sizeof(struct oct_timestamp_resp)); 2566 finfo->sc = sc; 2567 2568 if (!sc) { 2569 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 2570 return IQ_SEND_FAILED; 2571 } 2572 2573 if (ndata->reqtype == REQTYPE_NORESP_NET) 2574 ndata->reqtype = REQTYPE_RESP_NET; 2575 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 2576 ndata->reqtype = REQTYPE_RESP_NET_SG; 2577 2578 sc->callback = handle_timestamp; 2579 sc->callback_arg = finfo->skb; 2580 sc->iq_no = ndata->q_no; 2581 2582 ih = (struct octeon_instr_ih *)&sc->cmd.ih; 2583 rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; 2584 2585 ring_doorbell = !xmit_more; 2586 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2587 sc, ih->dlengsz, ndata->reqtype); 2588 2589 if (retval) { 2590 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2591 retval); 2592 octeon_free_soft_command(oct, sc); 2593 } else { 2594 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 2595 } 2596 2597 return retval; 2598 } 2599 2600 static inline int is_ipv4(struct sk_buff *skb) 2601 { 2602 return (skb->protocol == htons(ETH_P_IP)) && 2603 (ip_hdr(skb)->version == 4); 2604 } 2605 2606 static inline int is_vlan(struct sk_buff *skb) 2607 { 2608 return skb->protocol == htons(ETH_P_8021Q); 2609 } 2610 2611 static inline int is_ip_fragmented(struct sk_buff *skb) 2612 { 2613 /* The Don't fragment and Reserved flag fields are ignored. 2614 * IP is fragmented if 2615 * - the More fragments bit is set (indicating this IP is a fragment 2616 * with more to follow; the current offset could be 0 ). 2617 * - ths offset field is non-zero. 2618 */ 2619 return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0; 2620 } 2621 2622 static inline int is_ipv6(struct sk_buff *skb) 2623 { 2624 return (skb->protocol == htons(ETH_P_IPV6)) && 2625 (ipv6_hdr(skb)->version == 6); 2626 } 2627 2628 static inline int is_with_extn_hdr(struct sk_buff *skb) 2629 { 2630 return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) && 2631 (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP); 2632 } 2633 2634 static inline int is_tcpudp(struct sk_buff *skb) 2635 { 2636 return (ip_hdr(skb)->protocol == IPPROTO_TCP) || 2637 (ip_hdr(skb)->protocol == IPPROTO_UDP); 2638 } 2639 2640 static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb) 2641 { 2642 u32 tag; 2643 struct iphdr *iphdr = ip_hdr(skb); 2644 2645 tag = crc32(0, &iphdr->protocol, 1); 2646 tag = crc32(tag, (u8 *)&iphdr->saddr, 8); 2647 tag = crc32(tag, skb_transport_header(skb), 4); 2648 return tag; 2649 } 2650 2651 static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb) 2652 { 2653 u32 tag; 2654 struct ipv6hdr *ipv6hdr = ipv6_hdr(skb); 2655 2656 tag = crc32(0, &ipv6hdr->nexthdr, 1); 2657 tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32); 2658 tag = crc32(tag, skb_transport_header(skb), 4); 2659 return tag; 2660 } 2661 2662 /** \brief Transmit networks packets to the Octeon interface 2663 * @param skbuff skbuff struct to be passed to network layer. 2664 * @param netdev pointer to network device 2665 * @returns whether the packet was transmitted to the device okay or not 2666 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 2667 */ 2668 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 2669 { 2670 struct lio *lio; 2671 struct octnet_buf_free_info *finfo; 2672 union octnic_cmd_setup cmdsetup; 2673 struct octnic_data_pkt ndata; 2674 struct octeon_device *oct; 2675 struct oct_iq_stats *stats; 2676 int cpu = 0, status = 0; 2677 int q_idx = 0, iq_no = 0; 2678 int xmit_more; 2679 u32 tag = 0; 2680 2681 lio = GET_LIO(netdev); 2682 oct = lio->oct_dev; 2683 2684 if (netif_is_multiqueue(netdev)) { 2685 cpu = skb->queue_mapping; 2686 q_idx = (cpu & (lio->linfo.num_txpciq - 1)); 2687 iq_no = lio->linfo.txpciq[q_idx]; 2688 } else { 2689 iq_no = lio->txq; 2690 } 2691 2692 stats = &oct->instr_queue[iq_no]->stats; 2693 2694 /* Check for all conditions in which the current packet cannot be 2695 * transmitted. 2696 */ 2697 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2698 (!lio->linfo.link.s.status) || 2699 (skb->len <= 0)) { 2700 netif_info(lio, tx_err, lio->netdev, 2701 "Transmit failed link_status : %d\n", 2702 lio->linfo.link.s.status); 2703 goto lio_xmit_failed; 2704 } 2705 2706 /* Use space in skb->cb to store info used to unmap and 2707 * free the buffers. 2708 */ 2709 finfo = (struct octnet_buf_free_info *)skb->cb; 2710 finfo->lio = lio; 2711 finfo->skb = skb; 2712 finfo->sc = NULL; 2713 2714 /* Prepare the attributes for the data to be passed to OSI. */ 2715 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 2716 2717 ndata.buf = (void *)finfo; 2718 2719 ndata.q_no = iq_no; 2720 2721 if (netif_is_multiqueue(netdev)) { 2722 if (octnet_iq_is_full(oct, ndata.q_no)) { 2723 /* defer sending if queue is full */ 2724 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2725 ndata.q_no); 2726 stats->tx_iq_busy++; 2727 return NETDEV_TX_BUSY; 2728 } 2729 } else { 2730 if (octnet_iq_is_full(oct, lio->txq)) { 2731 /* defer sending if queue is full */ 2732 stats->tx_iq_busy++; 2733 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2734 ndata.q_no); 2735 return NETDEV_TX_BUSY; 2736 } 2737 } 2738 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", 2739 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no ); 2740 */ 2741 2742 ndata.datasize = skb->len; 2743 2744 cmdsetup.u64 = 0; 2745 cmdsetup.s.ifidx = lio->linfo.ifidx; 2746 2747 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2748 if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) { 2749 tag = get_ipv4_5tuple_tag(skb); 2750 2751 cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1; 2752 2753 if (ip_hdr(skb)->ihl > 5) 2754 cmdsetup.s.ipv4opts_ipv6exthdr = 2755 OCT_PKT_PARAM_IPV4OPTS; 2756 2757 } else if (is_ipv6(skb)) { 2758 tag = get_ipv6_5tuple_tag(skb); 2759 2760 cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1; 2761 2762 if (is_with_extn_hdr(skb)) 2763 cmdsetup.s.ipv4opts_ipv6exthdr = 2764 OCT_PKT_PARAM_IPV6EXTHDR; 2765 2766 } else if (is_vlan(skb)) { 2767 if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto 2768 == htons(ETH_P_IP) && 2769 !is_ip_fragmented(skb) && is_tcpudp(skb)) { 2770 tag = get_ipv4_5tuple_tag(skb); 2771 2772 cmdsetup.s.cksum_offset = 2773 sizeof(struct vlan_ethhdr) + 1; 2774 2775 if (ip_hdr(skb)->ihl > 5) 2776 cmdsetup.s.ipv4opts_ipv6exthdr = 2777 OCT_PKT_PARAM_IPV4OPTS; 2778 2779 } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto 2780 == htons(ETH_P_IPV6)) { 2781 tag = get_ipv6_5tuple_tag(skb); 2782 2783 cmdsetup.s.cksum_offset = 2784 sizeof(struct vlan_ethhdr) + 1; 2785 2786 if (is_with_extn_hdr(skb)) 2787 cmdsetup.s.ipv4opts_ipv6exthdr = 2788 OCT_PKT_PARAM_IPV6EXTHDR; 2789 } 2790 } 2791 } 2792 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2793 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2794 cmdsetup.s.timestamp = 1; 2795 } 2796 2797 if (skb_shinfo(skb)->nr_frags == 0) { 2798 cmdsetup.s.u.datasize = skb->len; 2799 octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); 2800 /* Offload checksum calculation for TCP/UDP packets */ 2801 ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, 2802 skb->data, 2803 skb->len, 2804 DMA_TO_DEVICE); 2805 if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { 2806 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2807 __func__); 2808 return NETDEV_TX_BUSY; 2809 } 2810 2811 finfo->dptr = ndata.cmd.dptr; 2812 2813 ndata.reqtype = REQTYPE_NORESP_NET; 2814 2815 } else { 2816 int i, frags; 2817 struct skb_frag_struct *frag; 2818 struct octnic_gather *g; 2819 2820 spin_lock(&lio->lock); 2821 g = (struct octnic_gather *)list_delete_head(&lio->glist); 2822 spin_unlock(&lio->lock); 2823 2824 if (!g) { 2825 netif_info(lio, tx_err, lio->netdev, 2826 "Transmit scatter gather: glist null!\n"); 2827 goto lio_xmit_failed; 2828 } 2829 2830 cmdsetup.s.gather = 1; 2831 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2832 octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); 2833 2834 memset(g->sg, 0, g->sg_size); 2835 2836 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 2837 skb->data, 2838 (skb->len - skb->data_len), 2839 DMA_TO_DEVICE); 2840 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 2841 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 2842 __func__); 2843 return NETDEV_TX_BUSY; 2844 } 2845 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 2846 2847 frags = skb_shinfo(skb)->nr_frags; 2848 i = 1; 2849 while (frags--) { 2850 frag = &skb_shinfo(skb)->frags[i - 1]; 2851 2852 g->sg[(i >> 2)].ptr[(i & 3)] = 2853 dma_map_page(&oct->pci_dev->dev, 2854 frag->page.p, 2855 frag->page_offset, 2856 frag->size, 2857 DMA_TO_DEVICE); 2858 2859 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); 2860 i++; 2861 } 2862 2863 ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, 2864 g->sg, g->sg_size, 2865 DMA_TO_DEVICE); 2866 if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { 2867 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 2868 __func__); 2869 dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0], 2870 skb->len - skb->data_len, 2871 DMA_TO_DEVICE); 2872 return NETDEV_TX_BUSY; 2873 } 2874 2875 finfo->dptr = ndata.cmd.dptr; 2876 finfo->g = g; 2877 2878 ndata.reqtype = REQTYPE_NORESP_NET_SG; 2879 } 2880 2881 if (skb_shinfo(skb)->gso_size) { 2882 struct octeon_instr_irh *irh = 2883 (struct octeon_instr_irh *)&ndata.cmd.irh; 2884 union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0]; 2885 2886 irh->len = 1; /* to indicate that ossp[0] contains tx_info */ 2887 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 2888 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 2889 } 2890 2891 xmit_more = skb->xmit_more; 2892 2893 if (unlikely(cmdsetup.s.timestamp)) 2894 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 2895 else 2896 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 2897 if (status == IQ_SEND_FAILED) 2898 goto lio_xmit_failed; 2899 2900 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2901 2902 if (status == IQ_SEND_STOP) 2903 stop_q(lio->netdev, q_idx); 2904 2905 netdev->trans_start = jiffies; 2906 2907 stats->tx_done++; 2908 stats->tx_tot_bytes += skb->len; 2909 2910 return NETDEV_TX_OK; 2911 2912 lio_xmit_failed: 2913 stats->tx_dropped++; 2914 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2915 iq_no, stats->tx_dropped); 2916 dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr, 2917 ndata.datasize, DMA_TO_DEVICE); 2918 recv_buffer_free(skb); 2919 return NETDEV_TX_OK; 2920 } 2921 2922 /** \brief Network device Tx timeout 2923 * @param netdev pointer to network device 2924 */ 2925 static void liquidio_tx_timeout(struct net_device *netdev) 2926 { 2927 struct lio *lio; 2928 2929 lio = GET_LIO(netdev); 2930 2931 netif_info(lio, tx_err, lio->netdev, 2932 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 2933 netdev->stats.tx_dropped); 2934 netdev->trans_start = jiffies; 2935 txqs_wake(netdev); 2936 } 2937 2938 int liquidio_set_feature(struct net_device *netdev, int cmd) 2939 { 2940 struct lio *lio = GET_LIO(netdev); 2941 struct octeon_device *oct = lio->oct_dev; 2942 struct octnic_ctrl_pkt nctrl; 2943 struct octnic_ctrl_params nparams; 2944 int ret = 0; 2945 2946 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2947 2948 nctrl.ncmd.u64 = 0; 2949 nctrl.ncmd.s.cmd = cmd; 2950 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 2951 nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6; 2952 nctrl.wait_time = 100; 2953 nctrl.netpndev = (u64)netdev; 2954 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2955 2956 nparams.resp_order = OCTEON_RESP_NORESPONSE; 2957 2958 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); 2959 if (ret < 0) { 2960 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", 2961 ret); 2962 } 2963 return ret; 2964 } 2965 2966 /** \brief Net device fix features 2967 * @param netdev pointer to network device 2968 * @param request features requested 2969 * @returns updated features list 2970 */ 2971 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 2972 netdev_features_t request) 2973 { 2974 struct lio *lio = netdev_priv(netdev); 2975 2976 if ((request & NETIF_F_RXCSUM) && 2977 !(lio->dev_capability & NETIF_F_RXCSUM)) 2978 request &= ~NETIF_F_RXCSUM; 2979 2980 if ((request & NETIF_F_HW_CSUM) && 2981 !(lio->dev_capability & NETIF_F_HW_CSUM)) 2982 request &= ~NETIF_F_HW_CSUM; 2983 2984 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 2985 request &= ~NETIF_F_TSO; 2986 2987 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 2988 request &= ~NETIF_F_TSO6; 2989 2990 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 2991 request &= ~NETIF_F_LRO; 2992 2993 /*Disable LRO if RXCSUM is off */ 2994 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 2995 (lio->dev_capability & NETIF_F_LRO)) 2996 request &= ~NETIF_F_LRO; 2997 2998 return request; 2999 } 3000 3001 /** \brief Net device set features 3002 * @param netdev pointer to network device 3003 * @param features features to enable/disable 3004 */ 3005 static int liquidio_set_features(struct net_device *netdev, 3006 netdev_features_t features) 3007 { 3008 struct lio *lio = netdev_priv(netdev); 3009 3010 if (!((netdev->features ^ features) & NETIF_F_LRO)) 3011 return 0; 3012 3013 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 3014 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); 3015 else if (!(features & NETIF_F_LRO) && 3016 (lio->dev_capability & NETIF_F_LRO)) 3017 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE); 3018 3019 return 0; 3020 } 3021 3022 static struct net_device_ops lionetdevops = { 3023 .ndo_open = liquidio_open, 3024 .ndo_stop = liquidio_stop, 3025 .ndo_start_xmit = liquidio_xmit, 3026 .ndo_get_stats = liquidio_get_stats, 3027 .ndo_set_mac_address = liquidio_set_mac, 3028 .ndo_set_rx_mode = liquidio_set_mcast_list, 3029 .ndo_tx_timeout = liquidio_tx_timeout, 3030 .ndo_change_mtu = liquidio_change_mtu, 3031 .ndo_do_ioctl = liquidio_ioctl, 3032 .ndo_fix_features = liquidio_fix_features, 3033 .ndo_set_features = liquidio_set_features, 3034 }; 3035 3036 /** \brief Entry point for the liquidio module 3037 */ 3038 static int __init liquidio_init(void) 3039 { 3040 int i; 3041 struct handshake *hs; 3042 3043 init_completion(&first_stage); 3044 3045 octeon_init_device_list(conf_type); 3046 3047 if (liquidio_init_pci()) 3048 return -EINVAL; 3049 3050 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000)); 3051 3052 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3053 hs = &handshake[i]; 3054 if (hs->pci_dev) { 3055 wait_for_completion(&hs->init); 3056 if (!hs->init_ok) { 3057 /* init handshake failed */ 3058 dev_err(&hs->pci_dev->dev, 3059 "Failed to init device\n"); 3060 liquidio_deinit_pci(); 3061 return -EIO; 3062 } 3063 } 3064 } 3065 3066 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3067 hs = &handshake[i]; 3068 if (hs->pci_dev) { 3069 wait_for_completion_timeout(&hs->started, 3070 msecs_to_jiffies(30000)); 3071 if (!hs->started_ok) { 3072 /* starter handshake failed */ 3073 dev_err(&hs->pci_dev->dev, 3074 "Firmware failed to start\n"); 3075 liquidio_deinit_pci(); 3076 return -EIO; 3077 } 3078 } 3079 } 3080 3081 return 0; 3082 } 3083 3084 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 3085 { 3086 struct octeon_device *oct = (struct octeon_device *)buf; 3087 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3088 int ifidx = 0; 3089 union oct_link_status *ls; 3090 int i; 3091 3092 if ((recv_pkt->buffer_size[0] != sizeof(*ls)) || 3093 (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) { 3094 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 3095 recv_pkt->buffer_size[0], 3096 recv_pkt->rh.r_nic_info.ifidx); 3097 goto nic_info_err; 3098 } 3099 3100 ifidx = recv_pkt->rh.r_nic_info.ifidx; 3101 ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]); 3102 3103 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 3104 3105 update_link_status(oct->props[ifidx].netdev, ls); 3106 3107 nic_info_err: 3108 for (i = 0; i < recv_pkt->buffer_count; i++) 3109 recv_buffer_free(recv_pkt->buffer_ptr[i]); 3110 octeon_free_recv_info(recv_info); 3111 return 0; 3112 } 3113 3114 /** 3115 * \brief Setup network interfaces 3116 * @param octeon_dev octeon device 3117 * 3118 * Called during init time for each device. It assumes the NIC 3119 * is already up and running. The link information for each 3120 * interface is passed in link_info. 3121 */ 3122 static int setup_nic_devices(struct octeon_device *octeon_dev) 3123 { 3124 struct lio *lio = NULL; 3125 struct net_device *netdev; 3126 u8 mac[6], i, j; 3127 struct octeon_soft_command *sc; 3128 struct liquidio_if_cfg_context *ctx; 3129 struct liquidio_if_cfg_resp *resp; 3130 struct octdev_props *props; 3131 int retval, num_iqueues, num_oqueues, q_no; 3132 u64 q_mask; 3133 int num_cpus = num_online_cpus(); 3134 union oct_nic_if_cfg if_cfg; 3135 unsigned int base_queue; 3136 unsigned int gmx_port_id; 3137 u32 resp_size, ctx_size; 3138 3139 /* This is to handle link status changes */ 3140 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3141 OPCODE_NIC_INFO, 3142 lio_nic_info, octeon_dev); 3143 3144 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 3145 * They are handled directly. 3146 */ 3147 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 3148 free_netbuf); 3149 3150 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 3151 free_netsgbuf); 3152 3153 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 3154 free_netsgbuf_with_resp); 3155 3156 for (i = 0; i < octeon_dev->ifcount; i++) { 3157 resp_size = sizeof(struct liquidio_if_cfg_resp); 3158 ctx_size = sizeof(struct liquidio_if_cfg_context); 3159 sc = (struct octeon_soft_command *) 3160 octeon_alloc_soft_command(octeon_dev, 0, 3161 resp_size, ctx_size); 3162 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 3163 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 3164 3165 num_iqueues = 3166 CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i); 3167 num_oqueues = 3168 CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i); 3169 base_queue = 3170 CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i); 3171 gmx_port_id = 3172 CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i); 3173 if (num_iqueues > num_cpus) 3174 num_iqueues = num_cpus; 3175 if (num_oqueues > num_cpus) 3176 num_oqueues = num_cpus; 3177 dev_dbg(&octeon_dev->pci_dev->dev, 3178 "requesting config for interface %d, iqs %d, oqs %d\n", 3179 i, num_iqueues, num_oqueues); 3180 ACCESS_ONCE(ctx->cond) = 0; 3181 ctx->octeon_id = lio_get_device_id(octeon_dev); 3182 init_waitqueue_head(&ctx->wc); 3183 3184 if_cfg.u64 = 0; 3185 if_cfg.s.num_iqueues = num_iqueues; 3186 if_cfg.s.num_oqueues = num_oqueues; 3187 if_cfg.s.base_queue = base_queue; 3188 if_cfg.s.gmx_port_id = gmx_port_id; 3189 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 3190 OPCODE_NIC_IF_CFG, i, 3191 if_cfg.u64, 0); 3192 3193 sc->callback = if_cfg_callback; 3194 sc->callback_arg = sc; 3195 sc->wait_time = 1000; 3196 3197 retval = octeon_send_soft_command(octeon_dev, sc); 3198 if (retval) { 3199 dev_err(&octeon_dev->pci_dev->dev, 3200 "iq/oq config failed status: %x\n", 3201 retval); 3202 /* Soft instr is freed by driver in case of failure. */ 3203 goto setup_nic_dev_fail; 3204 } 3205 3206 /* Sleep on a wait queue till the cond flag indicates that the 3207 * response arrived or timed-out. 3208 */ 3209 sleep_cond(&ctx->wc, &ctx->cond); 3210 retval = resp->status; 3211 if (retval) { 3212 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 3213 goto setup_nic_dev_fail; 3214 } 3215 3216 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 3217 (sizeof(struct liquidio_if_cfg_info)) >> 3); 3218 3219 num_iqueues = hweight64(resp->cfg_info.iqmask); 3220 num_oqueues = hweight64(resp->cfg_info.oqmask); 3221 3222 if (!(num_iqueues) || !(num_oqueues)) { 3223 dev_err(&octeon_dev->pci_dev->dev, 3224 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 3225 resp->cfg_info.iqmask, 3226 resp->cfg_info.oqmask); 3227 goto setup_nic_dev_fail; 3228 } 3229 dev_dbg(&octeon_dev->pci_dev->dev, 3230 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 3231 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 3232 num_iqueues, num_oqueues); 3233 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 3234 3235 if (!netdev) { 3236 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 3237 goto setup_nic_dev_fail; 3238 } 3239 3240 props = &octeon_dev->props[i]; 3241 props->netdev = netdev; 3242 3243 if (num_iqueues > 1) 3244 lionetdevops.ndo_select_queue = select_q; 3245 3246 /* Associate the routines that will handle different 3247 * netdev tasks. 3248 */ 3249 netdev->netdev_ops = &lionetdevops; 3250 3251 lio = GET_LIO(netdev); 3252 3253 memset(lio, 0, sizeof(struct lio)); 3254 3255 lio->linfo.ifidx = resp->cfg_info.ifidx; 3256 lio->ifidx = resp->cfg_info.ifidx; 3257 3258 lio->linfo.num_rxpciq = num_oqueues; 3259 lio->linfo.num_txpciq = num_iqueues; 3260 q_mask = resp->cfg_info.oqmask; 3261 /* q_mask is 0-based and already verified mask is nonzero */ 3262 for (j = 0; j < num_oqueues; j++) { 3263 q_no = __ffs64(q_mask); 3264 q_mask &= (~(1UL << q_no)); 3265 lio->linfo.rxpciq[j] = q_no; 3266 } 3267 q_mask = resp->cfg_info.iqmask; 3268 for (j = 0; j < num_iqueues; j++) { 3269 q_no = __ffs64(q_mask); 3270 q_mask &= (~(1UL << q_no)); 3271 lio->linfo.txpciq[j] = q_no; 3272 } 3273 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 3274 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 3275 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 3276 3277 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3278 3279 lio->dev_capability = NETIF_F_HIGHDMA 3280 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 3281 | NETIF_F_SG | NETIF_F_RXCSUM 3282 | NETIF_F_TSO | NETIF_F_TSO6 3283 | NETIF_F_LRO; 3284 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 3285 3286 netdev->features = lio->dev_capability; 3287 netdev->vlan_features = lio->dev_capability; 3288 3289 netdev->hw_features = lio->dev_capability; 3290 3291 /* Point to the properties for octeon device to which this 3292 * interface belongs. 3293 */ 3294 lio->oct_dev = octeon_dev; 3295 lio->octprops = props; 3296 lio->netdev = netdev; 3297 spin_lock_init(&lio->lock); 3298 3299 dev_dbg(&octeon_dev->pci_dev->dev, 3300 "if%d gmx: %d hw_addr: 0x%llx\n", i, 3301 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 3302 3303 /* 64-bit swap required on LE machines */ 3304 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 3305 for (j = 0; j < 6; j++) 3306 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 3307 3308 /* Copy MAC Address to OS network device structure */ 3309 3310 ether_addr_copy(netdev->dev_addr, mac); 3311 3312 if (setup_io_queues(octeon_dev, netdev)) { 3313 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 3314 goto setup_nic_dev_fail; 3315 } 3316 3317 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 3318 3319 /* By default all interfaces on a single Octeon uses the same 3320 * tx and rx queues 3321 */ 3322 lio->txq = lio->linfo.txpciq[0]; 3323 lio->rxq = lio->linfo.rxpciq[0]; 3324 3325 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 3326 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 3327 3328 if (setup_glist(lio)) { 3329 dev_err(&octeon_dev->pci_dev->dev, 3330 "Gather list allocation failed\n"); 3331 goto setup_nic_dev_fail; 3332 } 3333 3334 /* Register ethtool support */ 3335 liquidio_set_ethtool_ops(netdev); 3336 3337 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); 3338 3339 if ((debug != -1) && (debug & NETIF_MSG_HW)) 3340 liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE); 3341 3342 /* Register the network device with the OS */ 3343 if (register_netdev(netdev)) { 3344 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 3345 goto setup_nic_dev_fail; 3346 } 3347 3348 dev_dbg(&octeon_dev->pci_dev->dev, 3349 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3350 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3351 netif_carrier_off(netdev); 3352 3353 if (lio->linfo.link.s.status) { 3354 netif_carrier_on(netdev); 3355 start_txq(netdev); 3356 } else { 3357 netif_carrier_off(netdev); 3358 } 3359 3360 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3361 3362 dev_dbg(&octeon_dev->pci_dev->dev, 3363 "NIC ifidx:%d Setup successful\n", i); 3364 3365 octeon_free_soft_command(octeon_dev, sc); 3366 } 3367 3368 return 0; 3369 3370 setup_nic_dev_fail: 3371 3372 octeon_free_soft_command(octeon_dev, sc); 3373 3374 while (i--) { 3375 dev_err(&octeon_dev->pci_dev->dev, 3376 "NIC ifidx:%d Setup failed\n", i); 3377 liquidio_destroy_nic_device(octeon_dev, i); 3378 } 3379 return -ENODEV; 3380 } 3381 3382 /** 3383 * \brief initialize the NIC 3384 * @param oct octeon device 3385 * 3386 * This initialization routine is called once the Octeon device application is 3387 * up and running 3388 */ 3389 static int liquidio_init_nic_module(struct octeon_device *oct) 3390 { 3391 struct oct_intrmod_cfg *intrmod_cfg; 3392 int retval = 0; 3393 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); 3394 3395 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3396 3397 /* only default iq and oq were initialized 3398 * initialize the rest as well 3399 */ 3400 /* run port_config command for each port */ 3401 oct->ifcount = num_nic_ports; 3402 3403 memset(oct->props, 0, 3404 sizeof(struct octdev_props) * num_nic_ports); 3405 3406 retval = setup_nic_devices(oct); 3407 if (retval) { 3408 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3409 goto octnet_init_failure; 3410 } 3411 3412 liquidio_ptp_init(oct); 3413 3414 /* Initialize interrupt moderation params */ 3415 intrmod_cfg = &((struct octeon_device *)oct)->intrmod; 3416 intrmod_cfg->intrmod_enable = 1; 3417 intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; 3418 intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; 3419 intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; 3420 intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER; 3421 intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER; 3422 intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER; 3423 intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER; 3424 3425 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3426 3427 return retval; 3428 3429 octnet_init_failure: 3430 3431 oct->ifcount = 0; 3432 3433 return retval; 3434 } 3435 3436 /** 3437 * \brief starter callback that invokes the remaining initialization work after 3438 * the NIC is up and running. 3439 * @param octptr work struct work_struct 3440 */ 3441 static void nic_starter(struct work_struct *work) 3442 { 3443 struct octeon_device *oct; 3444 struct cavium_wk *wk = (struct cavium_wk *)work; 3445 3446 oct = (struct octeon_device *)wk->ctxptr; 3447 3448 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) 3449 return; 3450 3451 /* If the status of the device is CORE_OK, the core 3452 * application has reported its application type. Call 3453 * any registered handlers now and move to the RUNNING 3454 * state. 3455 */ 3456 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) { 3457 schedule_delayed_work(&oct->nic_poll_work.work, 3458 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 3459 return; 3460 } 3461 3462 atomic_set(&oct->status, OCT_DEV_RUNNING); 3463 3464 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { 3465 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n"); 3466 3467 if (liquidio_init_nic_module(oct)) 3468 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n"); 3469 else 3470 handshake[oct->octeon_id].started_ok = 1; 3471 } else { 3472 dev_err(&oct->pci_dev->dev, 3473 "Unexpected application running on NIC (%d). Check firmware.\n", 3474 oct->app_mode); 3475 } 3476 3477 complete(&handshake[oct->octeon_id].started); 3478 } 3479 3480 /** 3481 * \brief Device initialization for each Octeon device that is probed 3482 * @param octeon_dev octeon device 3483 */ 3484 static int octeon_device_init(struct octeon_device *octeon_dev) 3485 { 3486 int j, ret; 3487 struct octeon_device_priv *oct_priv = 3488 (struct octeon_device_priv *)octeon_dev->priv; 3489 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); 3490 3491 /* Enable access to the octeon device and make its DMA capability 3492 * known to the OS. 3493 */ 3494 if (octeon_pci_os_setup(octeon_dev)) 3495 return 1; 3496 3497 /* Identify the Octeon type and map the BAR address space. */ 3498 if (octeon_chip_specific_setup(octeon_dev)) { 3499 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); 3500 return 1; 3501 } 3502 3503 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE); 3504 3505 octeon_dev->app_mode = CVM_DRV_INVALID_APP; 3506 3507 /* Do a soft reset of the Octeon device. */ 3508 if (octeon_dev->fn_list.soft_reset(octeon_dev)) 3509 return 1; 3510 3511 /* Initialize the dispatch mechanism used to push packets arriving on 3512 * Octeon Output queues. 3513 */ 3514 if (octeon_init_dispatch_list(octeon_dev)) 3515 return 1; 3516 3517 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3518 OPCODE_NIC_CORE_DRV_ACTIVE, 3519 octeon_core_drv_init, 3520 octeon_dev); 3521 3522 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); 3523 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; 3524 schedule_delayed_work(&octeon_dev->nic_poll_work.work, 3525 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 3526 3527 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); 3528 3529 octeon_set_io_queues_off(octeon_dev); 3530 3531 /* Setup the data structures that manage this Octeon's Input queues. */ 3532 if (octeon_setup_instr_queues(octeon_dev)) { 3533 dev_err(&octeon_dev->pci_dev->dev, 3534 "instruction queue initialization failed\n"); 3535 /* On error, release any previously allocated queues */ 3536 for (j = 0; j < octeon_dev->num_iqs; j++) 3537 octeon_delete_instr_queue(octeon_dev, j); 3538 return 1; 3539 } 3540 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 3541 3542 /* Initialize soft command buffer pool 3543 */ 3544 if (octeon_setup_sc_buffer_pool(octeon_dev)) { 3545 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n"); 3546 return 1; 3547 } 3548 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 3549 3550 /* Initialize lists to manage the requests of different types that 3551 * arrive from user & kernel applications for this octeon device. 3552 */ 3553 if (octeon_setup_response_list(octeon_dev)) { 3554 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n"); 3555 return 1; 3556 } 3557 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); 3558 3559 if (octeon_setup_output_queues(octeon_dev)) { 3560 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); 3561 /* Release any previously allocated queues */ 3562 for (j = 0; j < octeon_dev->num_oqs; j++) 3563 octeon_delete_droq(octeon_dev, j); 3564 } 3565 3566 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); 3567 3568 /* The input and output queue registers were setup earlier (the queues 3569 * were not enabled). Any additional registers that need to be 3570 * programmed should be done now. 3571 */ 3572 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 3573 if (ret) { 3574 dev_err(&octeon_dev->pci_dev->dev, 3575 "Failed to configure device registers\n"); 3576 return ret; 3577 } 3578 3579 /* Initialize the tasklet that handles output queue packet processing.*/ 3580 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n"); 3581 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh, 3582 (unsigned long)octeon_dev); 3583 3584 /* Setup the interrupt handler and record the INT SUM register address 3585 */ 3586 octeon_setup_interrupt(octeon_dev); 3587 3588 /* Enable Octeon device interrupts */ 3589 octeon_dev->fn_list.enable_interrupt(octeon_dev->chip); 3590 3591 /* Enable the input and output queues for this Octeon device */ 3592 octeon_dev->fn_list.enable_io_queues(octeon_dev); 3593 3594 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); 3595 3596 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); 3597 3598 if (ddr_timeout == 0) { 3599 dev_info(&octeon_dev->pci_dev->dev, 3600 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); 3601 } 3602 3603 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); 3604 3605 /* Wait for the octeon to initialize DDR after the soft-reset. */ 3606 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); 3607 if (ret) { 3608 dev_err(&octeon_dev->pci_dev->dev, 3609 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", 3610 ret); 3611 return 1; 3612 } 3613 3614 if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) { 3615 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n"); 3616 return 1; 3617 } 3618 3619 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); 3620 ret = octeon_init_consoles(octeon_dev); 3621 if (ret) { 3622 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); 3623 return 1; 3624 } 3625 ret = octeon_add_console(octeon_dev, 0); 3626 if (ret) { 3627 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); 3628 return 1; 3629 } 3630 3631 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); 3632 3633 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n"); 3634 ret = load_firmware(octeon_dev); 3635 if (ret) { 3636 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); 3637 return 1; 3638 } 3639 3640 handshake[octeon_dev->octeon_id].init_ok = 1; 3641 complete(&handshake[octeon_dev->octeon_id].init); 3642 3643 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); 3644 3645 /* Send Credit for Octeon Output queues. Credits are always sent after 3646 * the output queue is enabled. 3647 */ 3648 for (j = 0; j < octeon_dev->num_oqs; j++) 3649 writel(octeon_dev->droq[j]->max_count, 3650 octeon_dev->droq[j]->pkts_credit_reg); 3651 3652 /* Packets can start arriving on the output queues from this point. */ 3653 3654 return 0; 3655 } 3656 3657 /** 3658 * \brief Exits the module 3659 */ 3660 static void __exit liquidio_exit(void) 3661 { 3662 liquidio_deinit_pci(); 3663 3664 pr_info("LiquidIO network module is now unloaded\n"); 3665 } 3666 3667 module_init(liquidio_init); 3668 module_exit(liquidio_exit); 3669