1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2015 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * This file may also be available under a different license from Cavium. 20 * Contact Cavium, Inc. for more information 21 **********************************************************************/ 22 #include <linux/version.h> 23 #include <linux/module.h> 24 #include <linux/crc32.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/pci.h> 27 #include <linux/pci_ids.h> 28 #include <linux/ip.h> 29 #include <net/ip.h> 30 #include <linux/ipv6.h> 31 #include <linux/net_tstamp.h> 32 #include <linux/if_vlan.h> 33 #include <linux/firmware.h> 34 #include <linux/ethtool.h> 35 #include <linux/ptp_clock_kernel.h> 36 #include <linux/types.h> 37 #include <linux/list.h> 38 #include <linux/workqueue.h> 39 #include <linux/interrupt.h> 40 #include "octeon_config.h" 41 #include "liquidio_common.h" 42 #include "octeon_droq.h" 43 #include "octeon_iq.h" 44 #include "response_manager.h" 45 #include "octeon_device.h" 46 #include "octeon_nic.h" 47 #include "octeon_main.h" 48 #include "octeon_network.h" 49 #include "cn66xx_regs.h" 50 #include "cn66xx_device.h" 51 #include "cn68xx_regs.h" 52 #include "cn68xx_device.h" 53 #include "liquidio_image.h" 54 55 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 56 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); 57 MODULE_LICENSE("GPL"); 58 MODULE_VERSION(LIQUIDIO_VERSION); 59 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX); 60 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX); 61 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX); 62 63 static int ddr_timeout = 10000; 64 module_param(ddr_timeout, int, 0644); 65 MODULE_PARM_DESC(ddr_timeout, 66 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check"); 67 68 static u32 console_bitmask; 69 module_param(console_bitmask, int, 0644); 70 MODULE_PARM_DESC(console_bitmask, 71 "Bitmask indicating which consoles have debug output redirected to syslog."); 72 73 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 74 75 static int debug = -1; 76 module_param(debug, int, 0644); 77 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 78 79 static char fw_type[LIO_MAX_FW_TYPE_LEN]; 80 module_param_string(fw_type, fw_type, sizeof(fw_type), 0000); 81 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\""); 82 83 static int conf_type; 84 module_param(conf_type, int, 0); 85 MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs"); 86 87 /* Bit mask values for lio->ifstate */ 88 #define LIO_IFSTATE_DROQ_OPS 0x01 89 #define LIO_IFSTATE_REGISTERED 0x02 90 #define LIO_IFSTATE_RUNNING 0x04 91 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 92 93 /* Polling interval for determining when NIC application is alive */ 94 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 95 96 /* runtime link query interval */ 97 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 98 99 struct liquidio_if_cfg_context { 100 int octeon_id; 101 102 wait_queue_head_t wc; 103 104 int cond; 105 }; 106 107 struct liquidio_if_cfg_resp { 108 u64 rh; 109 struct liquidio_if_cfg_info cfg_info; 110 u64 status; 111 }; 112 113 struct oct_link_status_resp { 114 u64 rh; 115 struct oct_link_info link_info; 116 u64 status; 117 }; 118 119 struct oct_timestamp_resp { 120 u64 rh; 121 u64 timestamp; 122 u64 status; 123 }; 124 125 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) 126 127 union tx_info { 128 u64 u64; 129 struct { 130 #ifdef __BIG_ENDIAN_BITFIELD 131 u16 gso_size; 132 u16 gso_segs; 133 u32 reserved; 134 #else 135 u32 reserved; 136 u16 gso_segs; 137 u16 gso_size; 138 #endif 139 } s; 140 }; 141 142 /** Octeon device properties to be used by the NIC module. 143 * Each octeon device in the system will be represented 144 * by this structure in the NIC module. 145 */ 146 147 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS) 148 149 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 150 #define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE) 151 152 /** Structure of a node in list of gather components maintained by 153 * NIC driver for each network device. 154 */ 155 struct octnic_gather { 156 /** List manipulation. Next and prev pointers. */ 157 struct list_head list; 158 159 /** Size of the gather component at sg in bytes. */ 160 int sg_size; 161 162 /** Number of bytes that sg was adjusted to make it 8B-aligned. */ 163 int adjust; 164 165 /** Gather component that can accommodate max sized fragment list 166 * received from the IP layer. 167 */ 168 struct octeon_sg_entry *sg; 169 }; 170 171 /** This structure is used by NIC driver to store information required 172 * to free the sk_buff when the packet has been fetched by Octeon. 173 * Bytes offset below assume worst-case of a 64-bit system. 174 */ 175 struct octnet_buf_free_info { 176 /** Bytes 1-8. Pointer to network device private structure. */ 177 struct lio *lio; 178 179 /** Bytes 9-16. Pointer to sk_buff. */ 180 struct sk_buff *skb; 181 182 /** Bytes 17-24. Pointer to gather list. */ 183 struct octnic_gather *g; 184 185 /** Bytes 25-32. Physical address of skb->data or gather list. */ 186 u64 dptr; 187 188 /** Bytes 33-47. Piggybacked soft command, if any */ 189 struct octeon_soft_command *sc; 190 }; 191 192 struct handshake { 193 struct completion init; 194 struct completion started; 195 struct pci_dev *pci_dev; 196 int init_ok; 197 int started_ok; 198 }; 199 200 struct octeon_device_priv { 201 /** Tasklet structures for this device. */ 202 struct tasklet_struct droq_tasklet; 203 unsigned long napi_mask; 204 }; 205 206 static int octeon_device_init(struct octeon_device *); 207 static void liquidio_remove(struct pci_dev *pdev); 208 static int liquidio_probe(struct pci_dev *pdev, 209 const struct pci_device_id *ent); 210 211 static struct handshake handshake[MAX_OCTEON_DEVICES]; 212 static struct completion first_stage; 213 214 static void octeon_droq_bh(unsigned long pdev) 215 { 216 int q_no; 217 int reschedule = 0; 218 struct octeon_device *oct = (struct octeon_device *)pdev; 219 struct octeon_device_priv *oct_priv = 220 (struct octeon_device_priv *)oct->priv; 221 222 /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */ 223 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) { 224 if (!(oct->io_qmask.oq & (1UL << q_no))) 225 continue; 226 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], 227 MAX_PACKET_BUDGET); 228 } 229 230 if (reschedule) 231 tasklet_schedule(&oct_priv->droq_tasklet); 232 } 233 234 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 235 { 236 struct octeon_device_priv *oct_priv = 237 (struct octeon_device_priv *)oct->priv; 238 int retry = 100, pkt_cnt = 0, pending_pkts = 0; 239 int i; 240 241 do { 242 pending_pkts = 0; 243 244 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { 245 if (!(oct->io_qmask.oq & (1UL << i))) 246 continue; 247 pkt_cnt += octeon_droq_check_hw_for_pkts(oct, 248 oct->droq[i]); 249 } 250 if (pkt_cnt > 0) { 251 pending_pkts += pkt_cnt; 252 tasklet_schedule(&oct_priv->droq_tasklet); 253 } 254 pkt_cnt = 0; 255 schedule_timeout_uninterruptible(1); 256 257 } while (retry-- && pending_pkts); 258 259 return pkt_cnt; 260 } 261 262 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, 263 unsigned int bytes_compl) 264 { 265 struct netdev_queue *netdev_queue = txq; 266 267 netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl); 268 } 269 270 void octeon_update_tx_completion_counters(void *buf, int reqtype, 271 unsigned int *pkts_compl, 272 unsigned int *bytes_compl) 273 { 274 struct octnet_buf_free_info *finfo; 275 struct sk_buff *skb = NULL; 276 struct octeon_soft_command *sc; 277 278 switch (reqtype) { 279 case REQTYPE_NORESP_NET: 280 case REQTYPE_NORESP_NET_SG: 281 finfo = buf; 282 skb = finfo->skb; 283 break; 284 285 case REQTYPE_RESP_NET_SG: 286 case REQTYPE_RESP_NET: 287 sc = buf; 288 skb = sc->callback_arg; 289 break; 290 291 default: 292 return; 293 } 294 295 (*pkts_compl)++; 296 *bytes_compl += skb->len; 297 } 298 299 void octeon_report_sent_bytes_to_bql(void *buf, int reqtype) 300 { 301 struct octnet_buf_free_info *finfo; 302 struct sk_buff *skb; 303 struct octeon_soft_command *sc; 304 struct netdev_queue *txq; 305 306 switch (reqtype) { 307 case REQTYPE_NORESP_NET: 308 case REQTYPE_NORESP_NET_SG: 309 finfo = buf; 310 skb = finfo->skb; 311 break; 312 313 case REQTYPE_RESP_NET_SG: 314 case REQTYPE_RESP_NET: 315 sc = buf; 316 skb = sc->callback_arg; 317 break; 318 319 default: 320 return; 321 } 322 323 txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb)); 324 netdev_tx_sent_queue(txq, skb->len); 325 } 326 327 int octeon_console_debug_enabled(u32 console) 328 { 329 return (console_bitmask >> (console)) & 0x1; 330 } 331 332 /** 333 * \brief Forces all IO queues off on a given device 334 * @param oct Pointer to Octeon device 335 */ 336 static void force_io_queues_off(struct octeon_device *oct) 337 { 338 if ((oct->chip_id == OCTEON_CN66XX) || 339 (oct->chip_id == OCTEON_CN68XX)) { 340 /* Reset the Enable bits for Input Queues. */ 341 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); 342 343 /* Reset the Enable bits for Output Queues. */ 344 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); 345 } 346 } 347 348 /** 349 * \brief wait for all pending requests to complete 350 * @param oct Pointer to Octeon device 351 * 352 * Called during shutdown sequence 353 */ 354 static int wait_for_pending_requests(struct octeon_device *oct) 355 { 356 int i, pcount = 0; 357 358 for (i = 0; i < 100; i++) { 359 pcount = 360 atomic_read(&oct->response_list 361 [OCTEON_ORDERED_SC_LIST].pending_req_count); 362 if (pcount) 363 schedule_timeout_uninterruptible(HZ / 10); 364 else 365 break; 366 } 367 368 if (pcount) 369 return 1; 370 371 return 0; 372 } 373 374 /** 375 * \brief Cause device to go quiet so it can be safely removed/reset/etc 376 * @param oct Pointer to Octeon device 377 */ 378 static inline void pcierror_quiesce_device(struct octeon_device *oct) 379 { 380 int i; 381 382 /* Disable the input and output queues now. No more packets will 383 * arrive from Octeon, but we should wait for all packet processing 384 * to finish. 385 */ 386 force_io_queues_off(oct); 387 388 /* To allow for in-flight requests */ 389 schedule_timeout_uninterruptible(100); 390 391 if (wait_for_pending_requests(oct)) 392 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 393 394 /* Force all requests waiting to be fetched by OCTEON to complete. */ 395 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { 396 struct octeon_instr_queue *iq; 397 398 if (!(oct->io_qmask.iq & (1UL << i))) 399 continue; 400 iq = oct->instr_queue[i]; 401 402 if (atomic_read(&iq->instr_pending)) { 403 spin_lock_bh(&iq->lock); 404 iq->fill_cnt = 0; 405 iq->octeon_read_index = iq->host_write_index; 406 iq->stats.instr_processed += 407 atomic_read(&iq->instr_pending); 408 lio_process_iq_request_list(oct, iq); 409 spin_unlock_bh(&iq->lock); 410 } 411 } 412 413 /* Force all pending ordered list requests to time out. */ 414 lio_process_ordered_list(oct, 1); 415 416 /* We do not need to wait for output queue packets to be processed. */ 417 } 418 419 /** 420 * \brief Cleanup PCI AER uncorrectable error status 421 * @param dev Pointer to PCI device 422 */ 423 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 424 { 425 int pos = 0x100; 426 u32 status, mask; 427 428 pr_info("%s :\n", __func__); 429 430 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 431 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 432 if (dev->error_state == pci_channel_io_normal) 433 status &= ~mask; /* Clear corresponding nonfatal bits */ 434 else 435 status &= mask; /* Clear corresponding fatal bits */ 436 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 437 } 438 439 /** 440 * \brief Stop all PCI IO to a given device 441 * @param dev Pointer to Octeon device 442 */ 443 static void stop_pci_io(struct octeon_device *oct) 444 { 445 /* No more instructions will be forwarded. */ 446 atomic_set(&oct->status, OCT_DEV_IN_RESET); 447 448 pci_disable_device(oct->pci_dev); 449 450 /* Disable interrupts */ 451 oct->fn_list.disable_interrupt(oct->chip); 452 453 pcierror_quiesce_device(oct); 454 455 /* Release the interrupt line */ 456 free_irq(oct->pci_dev->irq, oct); 457 458 if (oct->flags & LIO_FLAG_MSI_ENABLED) 459 pci_disable_msi(oct->pci_dev); 460 461 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 462 lio_get_state_string(&oct->status)); 463 464 /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */ 465 /* making it a common function for all OCTEON models */ 466 cleanup_aer_uncorrect_error_status(oct->pci_dev); 467 } 468 469 /** 470 * \brief called when PCI error is detected 471 * @param pdev Pointer to PCI device 472 * @param state The current pci connection state 473 * 474 * This function is called after a PCI bus error affecting 475 * this device has been detected. 476 */ 477 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 478 pci_channel_state_t state) 479 { 480 struct octeon_device *oct = pci_get_drvdata(pdev); 481 482 /* Non-correctable Non-fatal errors */ 483 if (state == pci_channel_io_normal) { 484 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 485 cleanup_aer_uncorrect_error_status(oct->pci_dev); 486 return PCI_ERS_RESULT_CAN_RECOVER; 487 } 488 489 /* Non-correctable Fatal errors */ 490 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 491 stop_pci_io(oct); 492 493 /* Always return a DISCONNECT. There is no support for recovery but only 494 * for a clean shutdown. 495 */ 496 return PCI_ERS_RESULT_DISCONNECT; 497 } 498 499 /** 500 * \brief mmio handler 501 * @param pdev Pointer to PCI device 502 */ 503 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev) 504 { 505 /* We should never hit this since we never ask for a reset for a Fatal 506 * Error. We always return DISCONNECT in io_error above. 507 * But play safe and return RECOVERED for now. 508 */ 509 return PCI_ERS_RESULT_RECOVERED; 510 } 511 512 /** 513 * \brief called after the pci bus has been reset. 514 * @param pdev Pointer to PCI device 515 * 516 * Restart the card from scratch, as if from a cold-boot. Implementation 517 * resembles the first-half of the octeon_resume routine. 518 */ 519 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev) 520 { 521 /* We should never hit this since we never ask for a reset for a Fatal 522 * Error. We always return DISCONNECT in io_error above. 523 * But play safe and return RECOVERED for now. 524 */ 525 return PCI_ERS_RESULT_RECOVERED; 526 } 527 528 /** 529 * \brief called when traffic can start flowing again. 530 * @param pdev Pointer to PCI device 531 * 532 * This callback is called when the error recovery driver tells us that 533 * its OK to resume normal operation. Implementation resembles the 534 * second-half of the octeon_resume routine. 535 */ 536 static void liquidio_pcie_resume(struct pci_dev *pdev) 537 { 538 /* Nothing to be done here. */ 539 } 540 541 #ifdef CONFIG_PM 542 /** 543 * \brief called when suspending 544 * @param pdev Pointer to PCI device 545 * @param state state to suspend to 546 */ 547 static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state) 548 { 549 return 0; 550 } 551 552 /** 553 * \brief called when resuming 554 * @param pdev Pointer to PCI device 555 */ 556 static int liquidio_resume(struct pci_dev *pdev) 557 { 558 return 0; 559 } 560 #endif 561 562 /* For PCI-E Advanced Error Recovery (AER) Interface */ 563 static const struct pci_error_handlers liquidio_err_handler = { 564 .error_detected = liquidio_pcie_error_detected, 565 .mmio_enabled = liquidio_pcie_mmio_enabled, 566 .slot_reset = liquidio_pcie_slot_reset, 567 .resume = liquidio_pcie_resume, 568 }; 569 570 static const struct pci_device_id liquidio_pci_tbl[] = { 571 { /* 68xx */ 572 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 573 }, 574 { /* 66xx */ 575 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 576 }, 577 { 578 0, 0, 0, 0, 0, 0, 0 579 } 580 }; 581 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); 582 583 static struct pci_driver liquidio_pci_driver = { 584 .name = "LiquidIO", 585 .id_table = liquidio_pci_tbl, 586 .probe = liquidio_probe, 587 .remove = liquidio_remove, 588 .err_handler = &liquidio_err_handler, /* For AER */ 589 590 #ifdef CONFIG_PM 591 .suspend = liquidio_suspend, 592 .resume = liquidio_resume, 593 #endif 594 595 }; 596 597 /** 598 * \brief register PCI driver 599 */ 600 static int liquidio_init_pci(void) 601 { 602 return pci_register_driver(&liquidio_pci_driver); 603 } 604 605 /** 606 * \brief unregister PCI driver 607 */ 608 static void liquidio_deinit_pci(void) 609 { 610 pci_unregister_driver(&liquidio_pci_driver); 611 } 612 613 /** 614 * \brief check interface state 615 * @param lio per-network private data 616 * @param state_flag flag state to check 617 */ 618 static inline int ifstate_check(struct lio *lio, int state_flag) 619 { 620 return atomic_read(&lio->ifstate) & state_flag; 621 } 622 623 /** 624 * \brief set interface state 625 * @param lio per-network private data 626 * @param state_flag flag state to set 627 */ 628 static inline void ifstate_set(struct lio *lio, int state_flag) 629 { 630 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag)); 631 } 632 633 /** 634 * \brief clear interface state 635 * @param lio per-network private data 636 * @param state_flag flag state to clear 637 */ 638 static inline void ifstate_reset(struct lio *lio, int state_flag) 639 { 640 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); 641 } 642 643 /** 644 * \brief Stop Tx queues 645 * @param netdev network device 646 */ 647 static inline void txqs_stop(struct net_device *netdev) 648 { 649 if (netif_is_multiqueue(netdev)) { 650 int i; 651 652 for (i = 0; i < netdev->num_tx_queues; i++) 653 netif_stop_subqueue(netdev, i); 654 } else { 655 netif_stop_queue(netdev); 656 } 657 } 658 659 /** 660 * \brief Start Tx queues 661 * @param netdev network device 662 */ 663 static inline void txqs_start(struct net_device *netdev) 664 { 665 if (netif_is_multiqueue(netdev)) { 666 int i; 667 668 for (i = 0; i < netdev->num_tx_queues; i++) 669 netif_start_subqueue(netdev, i); 670 } else { 671 netif_start_queue(netdev); 672 } 673 } 674 675 /** 676 * \brief Wake Tx queues 677 * @param netdev network device 678 */ 679 static inline void txqs_wake(struct net_device *netdev) 680 { 681 if (netif_is_multiqueue(netdev)) { 682 int i; 683 684 for (i = 0; i < netdev->num_tx_queues; i++) 685 netif_wake_subqueue(netdev, i); 686 } else { 687 netif_wake_queue(netdev); 688 } 689 } 690 691 /** 692 * \brief Stop Tx queue 693 * @param netdev network device 694 */ 695 static void stop_txq(struct net_device *netdev) 696 { 697 txqs_stop(netdev); 698 } 699 700 /** 701 * \brief Start Tx queue 702 * @param netdev network device 703 */ 704 static void start_txq(struct net_device *netdev) 705 { 706 struct lio *lio = GET_LIO(netdev); 707 708 if (lio->linfo.link.s.status) { 709 txqs_start(netdev); 710 return; 711 } 712 } 713 714 /** 715 * \brief Wake a queue 716 * @param netdev network device 717 * @param q which queue to wake 718 */ 719 static inline void wake_q(struct net_device *netdev, int q) 720 { 721 if (netif_is_multiqueue(netdev)) 722 netif_wake_subqueue(netdev, q); 723 else 724 netif_wake_queue(netdev); 725 } 726 727 /** 728 * \brief Stop a queue 729 * @param netdev network device 730 * @param q which queue to stop 731 */ 732 static inline void stop_q(struct net_device *netdev, int q) 733 { 734 if (netif_is_multiqueue(netdev)) 735 netif_stop_subqueue(netdev, q); 736 else 737 netif_stop_queue(netdev); 738 } 739 740 /** 741 * \brief Check Tx queue status, and take appropriate action 742 * @param lio per-network private data 743 * @returns 0 if full, number of queues woken up otherwise 744 */ 745 static inline int check_txq_status(struct lio *lio) 746 { 747 int ret_val = 0; 748 749 if (netif_is_multiqueue(lio->netdev)) { 750 int numqs = lio->netdev->num_tx_queues; 751 int q, iq = 0; 752 753 /* check each sub-queue state */ 754 for (q = 0; q < numqs; q++) { 755 iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)]; 756 if (octnet_iq_is_full(lio->oct_dev, iq)) 757 continue; 758 wake_q(lio->netdev, q); 759 ret_val++; 760 } 761 } else { 762 if (octnet_iq_is_full(lio->oct_dev, lio->txq)) 763 return 0; 764 wake_q(lio->netdev, lio->txq); 765 ret_val = 1; 766 } 767 return ret_val; 768 } 769 770 /** 771 * Remove the node at the head of the list. The list would be empty at 772 * the end of this call if there are no more nodes in the list. 773 */ 774 static inline struct list_head *list_delete_head(struct list_head *root) 775 { 776 struct list_head *node; 777 778 if ((root->prev == root) && (root->next == root)) 779 node = NULL; 780 else 781 node = root->next; 782 783 if (node) 784 list_del(node); 785 786 return node; 787 } 788 789 /** 790 * \brief Delete gather list 791 * @param lio per-network private data 792 */ 793 static void delete_glist(struct lio *lio) 794 { 795 struct octnic_gather *g; 796 797 do { 798 g = (struct octnic_gather *) 799 list_delete_head(&lio->glist); 800 if (g) { 801 if (g->sg) 802 kfree((void *)((unsigned long)g->sg - 803 g->adjust)); 804 kfree(g); 805 } 806 } while (g); 807 } 808 809 /** 810 * \brief Setup gather list 811 * @param lio per-network private data 812 */ 813 static int setup_glist(struct lio *lio) 814 { 815 int i; 816 struct octnic_gather *g; 817 818 INIT_LIST_HEAD(&lio->glist); 819 820 for (i = 0; i < lio->tx_qsize; i++) { 821 g = kzalloc(sizeof(*g), GFP_KERNEL); 822 if (!g) 823 break; 824 825 g->sg_size = 826 ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); 827 828 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); 829 if (!g->sg) { 830 kfree(g); 831 break; 832 } 833 834 /* The gather component should be aligned on 64-bit boundary */ 835 if (((unsigned long)g->sg) & 7) { 836 g->adjust = 8 - (((unsigned long)g->sg) & 7); 837 g->sg = (struct octeon_sg_entry *) 838 ((unsigned long)g->sg + g->adjust); 839 } 840 list_add_tail(&g->list, &lio->glist); 841 } 842 843 if (i == lio->tx_qsize) 844 return 0; 845 846 delete_glist(lio); 847 return 1; 848 } 849 850 /** 851 * \brief Print link information 852 * @param netdev network device 853 */ 854 static void print_link_info(struct net_device *netdev) 855 { 856 struct lio *lio = GET_LIO(netdev); 857 858 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { 859 struct oct_link_info *linfo = &lio->linfo; 860 861 if (linfo->link.s.status) { 862 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 863 linfo->link.s.speed, 864 (linfo->link.s.duplex) ? "Full" : "Half"); 865 } else { 866 netif_info(lio, link, lio->netdev, "Link Down\n"); 867 } 868 } 869 } 870 871 /** 872 * \brief Update link status 873 * @param netdev network device 874 * @param ls link status structure 875 * 876 * Called on receipt of a link status response from the core application to 877 * update each interface's link status. 878 */ 879 static inline void update_link_status(struct net_device *netdev, 880 union oct_link_status *ls) 881 { 882 struct lio *lio = GET_LIO(netdev); 883 884 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 885 lio->linfo.link.u64 = ls->u64; 886 887 print_link_info(netdev); 888 889 if (lio->linfo.link.s.status) { 890 netif_carrier_on(netdev); 891 /* start_txq(netdev); */ 892 txqs_wake(netdev); 893 } else { 894 netif_carrier_off(netdev); 895 stop_txq(netdev); 896 } 897 } 898 } 899 900 /** 901 * \brief Droq packet processor sceduler 902 * @param oct octeon device 903 */ 904 static 905 void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) 906 { 907 struct octeon_device_priv *oct_priv = 908 (struct octeon_device_priv *)oct->priv; 909 u64 oq_no; 910 struct octeon_droq *droq; 911 912 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { 913 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) { 914 if (!(oct->droq_intr & (1 << oq_no))) 915 continue; 916 917 droq = oct->droq[oq_no]; 918 919 if (droq->ops.poll_mode) { 920 droq->ops.napi_fn(droq); 921 oct_priv->napi_mask |= (1 << oq_no); 922 } else { 923 tasklet_schedule(&oct_priv->droq_tasklet); 924 } 925 } 926 } 927 } 928 929 /** 930 * \brief Interrupt handler for octeon 931 * @param irq unused 932 * @param dev octeon device 933 */ 934 static 935 irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev) 936 { 937 struct octeon_device *oct = (struct octeon_device *)dev; 938 irqreturn_t ret; 939 940 /* Disable our interrupts for the duration of ISR */ 941 oct->fn_list.disable_interrupt(oct->chip); 942 943 ret = oct->fn_list.process_interrupt_regs(oct); 944 945 if (ret == IRQ_HANDLED) 946 liquidio_schedule_droq_pkt_handlers(oct); 947 948 /* Re-enable our interrupts */ 949 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET)) 950 oct->fn_list.enable_interrupt(oct->chip); 951 952 return ret; 953 } 954 955 /** 956 * \brief Setup interrupt for octeon device 957 * @param oct octeon device 958 * 959 * Enable interrupt in Octeon device as given in the PCI interrupt mask. 960 */ 961 static int octeon_setup_interrupt(struct octeon_device *oct) 962 { 963 int irqret, err; 964 965 err = pci_enable_msi(oct->pci_dev); 966 if (err) 967 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n", 968 err); 969 else 970 oct->flags |= LIO_FLAG_MSI_ENABLED; 971 972 irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler, 973 IRQF_SHARED, "octeon", oct); 974 if (irqret) { 975 if (oct->flags & LIO_FLAG_MSI_ENABLED) 976 pci_disable_msi(oct->pci_dev); 977 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", 978 irqret); 979 return 1; 980 } 981 982 return 0; 983 } 984 985 /** 986 * \brief PCI probe handler 987 * @param pdev PCI device structure 988 * @param ent unused 989 */ 990 static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 991 { 992 struct octeon_device *oct_dev = NULL; 993 struct handshake *hs; 994 995 oct_dev = octeon_allocate_device(pdev->device, 996 sizeof(struct octeon_device_priv)); 997 if (!oct_dev) { 998 dev_err(&pdev->dev, "Unable to allocate device\n"); 999 return -ENOMEM; 1000 } 1001 1002 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 1003 (u32)pdev->vendor, (u32)pdev->device); 1004 1005 /* Assign octeon_device for this device to the private data area. */ 1006 pci_set_drvdata(pdev, oct_dev); 1007 1008 /* set linux specific device pointer */ 1009 oct_dev->pci_dev = (void *)pdev; 1010 1011 hs = &handshake[oct_dev->octeon_id]; 1012 init_completion(&hs->init); 1013 init_completion(&hs->started); 1014 hs->pci_dev = pdev; 1015 1016 if (oct_dev->octeon_id == 0) 1017 /* first LiquidIO NIC is detected */ 1018 complete(&first_stage); 1019 1020 if (octeon_device_init(oct_dev)) { 1021 liquidio_remove(pdev); 1022 return -ENOMEM; 1023 } 1024 1025 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 1026 1027 return 0; 1028 } 1029 1030 /** 1031 *\brief Destroy resources associated with octeon device 1032 * @param pdev PCI device structure 1033 * @param ent unused 1034 */ 1035 static void octeon_destroy_resources(struct octeon_device *oct) 1036 { 1037 int i; 1038 struct octeon_device_priv *oct_priv = 1039 (struct octeon_device_priv *)oct->priv; 1040 1041 struct handshake *hs; 1042 1043 switch (atomic_read(&oct->status)) { 1044 case OCT_DEV_RUNNING: 1045 case OCT_DEV_CORE_OK: 1046 1047 /* No more instructions will be forwarded. */ 1048 atomic_set(&oct->status, OCT_DEV_IN_RESET); 1049 1050 oct->app_mode = CVM_DRV_INVALID_APP; 1051 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 1052 lio_get_state_string(&oct->status)); 1053 1054 schedule_timeout_uninterruptible(HZ / 10); 1055 1056 /* fallthrough */ 1057 case OCT_DEV_HOST_OK: 1058 1059 /* fallthrough */ 1060 case OCT_DEV_CONSOLE_INIT_DONE: 1061 /* Remove any consoles */ 1062 octeon_remove_consoles(oct); 1063 1064 /* fallthrough */ 1065 case OCT_DEV_IO_QUEUES_DONE: 1066 if (wait_for_pending_requests(oct)) 1067 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 1068 1069 if (lio_wait_for_instr_fetch(oct)) 1070 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 1071 1072 /* Disable the input and output queues now. No more packets will 1073 * arrive from Octeon, but we should wait for all packet 1074 * processing to finish. 1075 */ 1076 oct->fn_list.disable_io_queues(oct); 1077 1078 if (lio_wait_for_oq_pkts(oct)) 1079 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 1080 1081 /* Disable interrupts */ 1082 oct->fn_list.disable_interrupt(oct->chip); 1083 1084 /* Release the interrupt line */ 1085 free_irq(oct->pci_dev->irq, oct); 1086 1087 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1088 pci_disable_msi(oct->pci_dev); 1089 1090 /* Soft reset the octeon device before exiting */ 1091 oct->fn_list.soft_reset(oct); 1092 1093 /* Disable the device, releasing the PCI INT */ 1094 pci_disable_device(oct->pci_dev); 1095 1096 /* fallthrough */ 1097 case OCT_DEV_IN_RESET: 1098 case OCT_DEV_DROQ_INIT_DONE: 1099 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/ 1100 mdelay(100); 1101 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { 1102 if (!(oct->io_qmask.oq & (1UL << i))) 1103 continue; 1104 octeon_delete_droq(oct, i); 1105 } 1106 1107 /* Force any pending handshakes to complete */ 1108 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 1109 hs = &handshake[i]; 1110 1111 if (hs->pci_dev) { 1112 handshake[oct->octeon_id].init_ok = 0; 1113 complete(&handshake[oct->octeon_id].init); 1114 handshake[oct->octeon_id].started_ok = 0; 1115 complete(&handshake[oct->octeon_id].started); 1116 } 1117 } 1118 1119 /* fallthrough */ 1120 case OCT_DEV_RESP_LIST_INIT_DONE: 1121 octeon_delete_response_list(oct); 1122 1123 /* fallthrough */ 1124 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 1125 octeon_free_sc_buffer_pool(oct); 1126 1127 /* fallthrough */ 1128 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 1129 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { 1130 if (!(oct->io_qmask.iq & (1UL << i))) 1131 continue; 1132 octeon_delete_instr_queue(oct, i); 1133 } 1134 1135 /* fallthrough */ 1136 case OCT_DEV_DISPATCH_INIT_DONE: 1137 octeon_delete_dispatch_list(oct); 1138 cancel_delayed_work_sync(&oct->nic_poll_work.work); 1139 1140 /* fallthrough */ 1141 case OCT_DEV_PCI_MAP_DONE: 1142 octeon_unmap_pci_barx(oct, 0); 1143 octeon_unmap_pci_barx(oct, 1); 1144 1145 /* fallthrough */ 1146 case OCT_DEV_BEGIN_STATE: 1147 /* Nothing to be done here either */ 1148 break; 1149 } /* end switch(oct->status) */ 1150 1151 tasklet_kill(&oct_priv->droq_tasklet); 1152 } 1153 1154 /** 1155 * \brief Send Rx control command 1156 * @param lio per-network private data 1157 * @param start_stop whether to start or stop 1158 */ 1159 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1160 { 1161 struct octnic_ctrl_pkt nctrl; 1162 struct octnic_ctrl_params nparams; 1163 1164 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1165 1166 nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL; 1167 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 1168 nctrl.ncmd.s.param2 = start_stop; 1169 nctrl.netpndev = (u64)lio->netdev; 1170 1171 nparams.resp_order = OCTEON_RESP_NORESPONSE; 1172 1173 if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0) 1174 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1175 } 1176 1177 /** 1178 * \brief Destroy NIC device interface 1179 * @param oct octeon device 1180 * @param ifidx which interface to destroy 1181 * 1182 * Cleanup associated with each interface for an Octeon device when NIC 1183 * module is being unloaded or if initialization fails during load. 1184 */ 1185 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1186 { 1187 struct net_device *netdev = oct->props[ifidx].netdev; 1188 struct lio *lio; 1189 1190 if (!netdev) { 1191 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1192 __func__, ifidx); 1193 return; 1194 } 1195 1196 lio = GET_LIO(netdev); 1197 1198 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1199 1200 send_rx_ctrl_cmd(lio, 0); 1201 1202 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1203 txqs_stop(netdev); 1204 1205 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1206 unregister_netdev(netdev); 1207 1208 delete_glist(lio); 1209 1210 free_netdev(netdev); 1211 1212 oct->props[ifidx].netdev = NULL; 1213 } 1214 1215 /** 1216 * \brief Stop complete NIC functionality 1217 * @param oct octeon device 1218 */ 1219 static int liquidio_stop_nic_module(struct octeon_device *oct) 1220 { 1221 int i, j; 1222 struct lio *lio; 1223 1224 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1225 if (!oct->ifcount) { 1226 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1227 return 1; 1228 } 1229 1230 for (i = 0; i < oct->ifcount; i++) { 1231 lio = GET_LIO(oct->props[i].netdev); 1232 for (j = 0; j < lio->linfo.num_rxpciq; j++) 1233 octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]); 1234 } 1235 1236 for (i = 0; i < oct->ifcount; i++) 1237 liquidio_destroy_nic_device(oct, i); 1238 1239 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1240 return 0; 1241 } 1242 1243 /** 1244 * \brief Cleans up resources at unload time 1245 * @param pdev PCI device structure 1246 */ 1247 static void liquidio_remove(struct pci_dev *pdev) 1248 { 1249 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1250 1251 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1252 1253 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) 1254 liquidio_stop_nic_module(oct_dev); 1255 1256 /* Reset the octeon device and cleanup all memory allocated for 1257 * the octeon device by driver. 1258 */ 1259 octeon_destroy_resources(oct_dev); 1260 1261 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1262 1263 /* This octeon device has been removed. Update the global 1264 * data structure to reflect this. Free the device structure. 1265 */ 1266 octeon_free_device_mem(oct_dev); 1267 } 1268 1269 /** 1270 * \brief Identify the Octeon device and to map the BAR address space 1271 * @param oct octeon device 1272 */ 1273 static int octeon_chip_specific_setup(struct octeon_device *oct) 1274 { 1275 u32 dev_id, rev_id; 1276 int ret = 1; 1277 1278 pci_read_config_dword(oct->pci_dev, 0, &dev_id); 1279 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 1280 oct->rev_id = rev_id & 0xff; 1281 1282 switch (dev_id) { 1283 case OCTEON_CN68XX_PCIID: 1284 oct->chip_id = OCTEON_CN68XX; 1285 ret = lio_setup_cn68xx_octeon_device(oct); 1286 break; 1287 1288 case OCTEON_CN66XX_PCIID: 1289 oct->chip_id = OCTEON_CN66XX; 1290 ret = lio_setup_cn66xx_octeon_device(oct); 1291 break; 1292 default: 1293 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", 1294 dev_id); 1295 } 1296 1297 if (!ret) 1298 dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n", 1299 OCTEON_MAJOR_REV(oct), 1300 OCTEON_MINOR_REV(oct), 1301 octeon_get_conf(oct)->card_name); 1302 1303 return ret; 1304 } 1305 1306 /** 1307 * \brief PCI initialization for each Octeon device. 1308 * @param oct octeon device 1309 */ 1310 static int octeon_pci_os_setup(struct octeon_device *oct) 1311 { 1312 /* setup PCI stuff first */ 1313 if (pci_enable_device(oct->pci_dev)) { 1314 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1315 return 1; 1316 } 1317 1318 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1319 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1320 return 1; 1321 } 1322 1323 /* Enable PCI DMA Master. */ 1324 pci_set_master(oct->pci_dev); 1325 1326 return 0; 1327 } 1328 1329 /** 1330 * \brief Check Tx queue state for a given network buffer 1331 * @param lio per-network private data 1332 * @param skb network buffer 1333 */ 1334 static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) 1335 { 1336 int q = 0, iq = 0; 1337 1338 if (netif_is_multiqueue(lio->netdev)) { 1339 q = skb->queue_mapping; 1340 iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))]; 1341 } else { 1342 iq = lio->txq; 1343 } 1344 1345 if (octnet_iq_is_full(lio->oct_dev, iq)) 1346 return 0; 1347 wake_q(lio->netdev, q); 1348 return 1; 1349 } 1350 1351 /** 1352 * \brief Unmap and free network buffer 1353 * @param buf buffer 1354 */ 1355 static void free_netbuf(void *buf) 1356 { 1357 struct sk_buff *skb; 1358 struct octnet_buf_free_info *finfo; 1359 struct lio *lio; 1360 1361 finfo = (struct octnet_buf_free_info *)buf; 1362 skb = finfo->skb; 1363 lio = finfo->lio; 1364 1365 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1366 DMA_TO_DEVICE); 1367 1368 check_txq_state(lio, skb); 1369 1370 recv_buffer_free((struct sk_buff *)skb); 1371 } 1372 1373 /** 1374 * \brief Unmap and free gather buffer 1375 * @param buf buffer 1376 */ 1377 static void free_netsgbuf(void *buf) 1378 { 1379 struct octnet_buf_free_info *finfo; 1380 struct sk_buff *skb; 1381 struct lio *lio; 1382 struct octnic_gather *g; 1383 int i, frags; 1384 1385 finfo = (struct octnet_buf_free_info *)buf; 1386 skb = finfo->skb; 1387 lio = finfo->lio; 1388 g = finfo->g; 1389 frags = skb_shinfo(skb)->nr_frags; 1390 1391 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1392 g->sg[0].ptr[0], (skb->len - skb->data_len), 1393 DMA_TO_DEVICE); 1394 1395 i = 1; 1396 while (frags--) { 1397 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1398 1399 pci_unmap_page((lio->oct_dev)->pci_dev, 1400 g->sg[(i >> 2)].ptr[(i & 3)], 1401 frag->size, DMA_TO_DEVICE); 1402 i++; 1403 } 1404 1405 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1406 finfo->dptr, g->sg_size, 1407 DMA_TO_DEVICE); 1408 1409 spin_lock(&lio->lock); 1410 list_add_tail(&g->list, &lio->glist); 1411 spin_unlock(&lio->lock); 1412 1413 check_txq_state(lio, skb); /* mq support: sub-queue state check */ 1414 1415 recv_buffer_free((struct sk_buff *)skb); 1416 } 1417 1418 /** 1419 * \brief Unmap and free gather buffer with response 1420 * @param buf buffer 1421 */ 1422 static void free_netsgbuf_with_resp(void *buf) 1423 { 1424 struct octeon_soft_command *sc; 1425 struct octnet_buf_free_info *finfo; 1426 struct sk_buff *skb; 1427 struct lio *lio; 1428 struct octnic_gather *g; 1429 int i, frags; 1430 1431 sc = (struct octeon_soft_command *)buf; 1432 skb = (struct sk_buff *)sc->callback_arg; 1433 finfo = (struct octnet_buf_free_info *)&skb->cb; 1434 1435 lio = finfo->lio; 1436 g = finfo->g; 1437 frags = skb_shinfo(skb)->nr_frags; 1438 1439 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1440 g->sg[0].ptr[0], (skb->len - skb->data_len), 1441 DMA_TO_DEVICE); 1442 1443 i = 1; 1444 while (frags--) { 1445 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1446 1447 pci_unmap_page((lio->oct_dev)->pci_dev, 1448 g->sg[(i >> 2)].ptr[(i & 3)], 1449 frag->size, DMA_TO_DEVICE); 1450 i++; 1451 } 1452 1453 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1454 finfo->dptr, g->sg_size, 1455 DMA_TO_DEVICE); 1456 1457 spin_lock(&lio->lock); 1458 list_add_tail(&g->list, &lio->glist); 1459 spin_unlock(&lio->lock); 1460 1461 /* Don't free the skb yet */ 1462 1463 check_txq_state(lio, skb); 1464 } 1465 1466 /** 1467 * \brief Adjust ptp frequency 1468 * @param ptp PTP clock info 1469 * @param ppb how much to adjust by, in parts-per-billion 1470 */ 1471 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 1472 { 1473 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1474 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1475 u64 comp, delta; 1476 unsigned long flags; 1477 bool neg_adj = false; 1478 1479 if (ppb < 0) { 1480 neg_adj = true; 1481 ppb = -ppb; 1482 } 1483 1484 /* The hardware adds the clock compensation value to the 1485 * PTP clock on every coprocessor clock cycle, so we 1486 * compute the delta in terms of coprocessor clocks. 1487 */ 1488 delta = (u64)ppb << 32; 1489 do_div(delta, oct->coproc_clock_rate); 1490 1491 spin_lock_irqsave(&lio->ptp_lock, flags); 1492 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); 1493 if (neg_adj) 1494 comp -= delta; 1495 else 1496 comp += delta; 1497 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1498 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1499 1500 return 0; 1501 } 1502 1503 /** 1504 * \brief Adjust ptp time 1505 * @param ptp PTP clock info 1506 * @param delta how much to adjust by, in nanosecs 1507 */ 1508 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 1509 { 1510 unsigned long flags; 1511 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1512 1513 spin_lock_irqsave(&lio->ptp_lock, flags); 1514 lio->ptp_adjust += delta; 1515 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1516 1517 return 0; 1518 } 1519 1520 /** 1521 * \brief Get hardware clock time, including any adjustment 1522 * @param ptp PTP clock info 1523 * @param ts timespec 1524 */ 1525 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, 1526 struct timespec64 *ts) 1527 { 1528 u64 ns; 1529 u32 remainder; 1530 unsigned long flags; 1531 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1532 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1533 1534 spin_lock_irqsave(&lio->ptp_lock, flags); 1535 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); 1536 ns += lio->ptp_adjust; 1537 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1538 1539 ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); 1540 ts->tv_nsec = remainder; 1541 1542 return 0; 1543 } 1544 1545 /** 1546 * \brief Set hardware clock time. Reset adjustment 1547 * @param ptp PTP clock info 1548 * @param ts timespec 1549 */ 1550 static int liquidio_ptp_settime(struct ptp_clock_info *ptp, 1551 const struct timespec64 *ts) 1552 { 1553 u64 ns; 1554 unsigned long flags; 1555 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1556 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1557 1558 ns = timespec_to_ns(ts); 1559 1560 spin_lock_irqsave(&lio->ptp_lock, flags); 1561 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1562 lio->ptp_adjust = 0; 1563 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1564 1565 return 0; 1566 } 1567 1568 /** 1569 * \brief Check if PTP is enabled 1570 * @param ptp PTP clock info 1571 * @param rq request 1572 * @param on is it on 1573 */ 1574 static int liquidio_ptp_enable(struct ptp_clock_info *ptp, 1575 struct ptp_clock_request *rq, int on) 1576 { 1577 return -EOPNOTSUPP; 1578 } 1579 1580 /** 1581 * \brief Open PTP clock source 1582 * @param netdev network device 1583 */ 1584 static void oct_ptp_open(struct net_device *netdev) 1585 { 1586 struct lio *lio = GET_LIO(netdev); 1587 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1588 1589 spin_lock_init(&lio->ptp_lock); 1590 1591 snprintf(lio->ptp_info.name, 16, "%s", netdev->name); 1592 lio->ptp_info.owner = THIS_MODULE; 1593 lio->ptp_info.max_adj = 250000000; 1594 lio->ptp_info.n_alarm = 0; 1595 lio->ptp_info.n_ext_ts = 0; 1596 lio->ptp_info.n_per_out = 0; 1597 lio->ptp_info.pps = 0; 1598 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq; 1599 lio->ptp_info.adjtime = liquidio_ptp_adjtime; 1600 lio->ptp_info.gettime64 = liquidio_ptp_gettime; 1601 lio->ptp_info.settime64 = liquidio_ptp_settime; 1602 lio->ptp_info.enable = liquidio_ptp_enable; 1603 1604 lio->ptp_adjust = 0; 1605 1606 lio->ptp_clock = ptp_clock_register(&lio->ptp_info, 1607 &oct->pci_dev->dev); 1608 1609 if (IS_ERR(lio->ptp_clock)) 1610 lio->ptp_clock = NULL; 1611 } 1612 1613 /** 1614 * \brief Init PTP clock 1615 * @param oct octeon device 1616 */ 1617 static void liquidio_ptp_init(struct octeon_device *oct) 1618 { 1619 u64 clock_comp, cfg; 1620 1621 clock_comp = (u64)NSEC_PER_SEC << 32; 1622 do_div(clock_comp, oct->coproc_clock_rate); 1623 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1624 1625 /* Enable */ 1626 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); 1627 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); 1628 } 1629 1630 /** 1631 * \brief Load firmware to device 1632 * @param oct octeon device 1633 * 1634 * Maps device to firmware filename, requests firmware, and downloads it 1635 */ 1636 static int load_firmware(struct octeon_device *oct) 1637 { 1638 int ret = 0; 1639 const struct firmware *fw; 1640 char fw_name[LIO_MAX_FW_FILENAME_LEN]; 1641 char *tmp_fw_type; 1642 1643 if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE, 1644 sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) { 1645 dev_info(&oct->pci_dev->dev, "Skipping firmware load\n"); 1646 return ret; 1647 } 1648 1649 if (fw_type[0] == '\0') 1650 tmp_fw_type = LIO_FW_NAME_TYPE_NIC; 1651 else 1652 tmp_fw_type = fw_type; 1653 1654 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, 1655 octeon_get_conf(oct)->card_name, tmp_fw_type, 1656 LIO_FW_NAME_SUFFIX); 1657 1658 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); 1659 if (ret) { 1660 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", 1661 fw_name); 1662 return ret; 1663 } 1664 1665 ret = octeon_download_firmware(oct, fw->data, fw->size); 1666 1667 release_firmware(fw); 1668 1669 return ret; 1670 } 1671 1672 /** 1673 * \brief Setup output queue 1674 * @param oct octeon device 1675 * @param q_no which queue 1676 * @param num_descs how many descriptors 1677 * @param desc_size size of each descriptor 1678 * @param app_ctx application context 1679 */ 1680 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, 1681 int desc_size, void *app_ctx) 1682 { 1683 int ret_val = 0; 1684 1685 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); 1686 /* droq creation and local register settings. */ 1687 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); 1688 if (ret_val == -1) 1689 return ret_val; 1690 1691 if (ret_val == 1) { 1692 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); 1693 return 0; 1694 } 1695 /* tasklet creation for the droq */ 1696 1697 /* Enable the droq queues */ 1698 octeon_set_droq_pkt_op(oct, q_no, 1); 1699 1700 /* Send Credit for Octeon Output queues. Credits are always 1701 * sent after the output queue is enabled. 1702 */ 1703 writel(oct->droq[q_no]->max_count, 1704 oct->droq[q_no]->pkts_credit_reg); 1705 1706 return ret_val; 1707 } 1708 1709 /** 1710 * \brief Callback for getting interface configuration 1711 * @param status status of request 1712 * @param buf pointer to resp structure 1713 */ 1714 static void if_cfg_callback(struct octeon_device *oct, 1715 u32 status, 1716 void *buf) 1717 { 1718 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 1719 struct liquidio_if_cfg_resp *resp; 1720 struct liquidio_if_cfg_context *ctx; 1721 1722 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1723 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 1724 1725 oct = lio_get_device(ctx->octeon_id); 1726 if (resp->status) 1727 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", 1728 CVM_CAST64(resp->status)); 1729 ACCESS_ONCE(ctx->cond) = 1; 1730 1731 /* This barrier is required to be sure that the response has been 1732 * written fully before waking up the handler 1733 */ 1734 wmb(); 1735 1736 wake_up_interruptible(&ctx->wc); 1737 } 1738 1739 /** 1740 * \brief Select queue based on hash 1741 * @param dev Net device 1742 * @param skb sk_buff structure 1743 * @returns selected queue number 1744 */ 1745 static u16 select_q(struct net_device *dev, struct sk_buff *skb, 1746 void *accel_priv, select_queue_fallback_t fallback) 1747 { 1748 int qindex; 1749 struct lio *lio; 1750 1751 lio = GET_LIO(dev); 1752 /* select queue on chosen queue_mapping or core */ 1753 qindex = skb_rx_queue_recorded(skb) ? 1754 skb_get_rx_queue(skb) : smp_processor_id(); 1755 return (u16)(qindex & (lio->linfo.num_txpciq - 1)); 1756 } 1757 1758 /** Routine to push packets arriving on Octeon interface upto network layer. 1759 * @param oct_id - octeon device id. 1760 * @param skbuff - skbuff struct to be passed to network layer. 1761 * @param len - size of total data received. 1762 * @param rh - Control header associated with the packet 1763 * @param param - additional control data with the packet 1764 */ 1765 static void 1766 liquidio_push_packet(u32 octeon_id, 1767 void *skbuff, 1768 u32 len, 1769 union octeon_rh *rh, 1770 void *param) 1771 { 1772 struct napi_struct *napi = param; 1773 struct octeon_device *oct = lio_get_device(octeon_id); 1774 struct sk_buff *skb = (struct sk_buff *)skbuff; 1775 struct skb_shared_hwtstamps *shhwtstamps; 1776 u64 ns; 1777 struct net_device *netdev = 1778 (struct net_device *)oct->props[rh->r_dh.link].netdev; 1779 struct octeon_droq *droq = container_of(param, struct octeon_droq, 1780 napi); 1781 if (netdev) { 1782 int packet_was_received; 1783 struct lio *lio = GET_LIO(netdev); 1784 1785 /* Do not proceed if the interface is not in RUNNING state. */ 1786 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { 1787 recv_buffer_free(skb); 1788 droq->stats.rx_dropped++; 1789 return; 1790 } 1791 1792 skb->dev = netdev; 1793 1794 if (rh->r_dh.has_hwtstamp) { 1795 /* timestamp is included from the hardware at the 1796 * beginning of the packet. 1797 */ 1798 if (ifstate_check(lio, 1799 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { 1800 /* Nanoseconds are in the first 64-bits 1801 * of the packet. 1802 */ 1803 memcpy(&ns, (skb->data), sizeof(ns)); 1804 shhwtstamps = skb_hwtstamps(skb); 1805 shhwtstamps->hwtstamp = 1806 ns_to_ktime(ns + lio->ptp_adjust); 1807 } 1808 skb_pull(skb, sizeof(ns)); 1809 } 1810 1811 skb->protocol = eth_type_trans(skb, skb->dev); 1812 1813 if ((netdev->features & NETIF_F_RXCSUM) && 1814 (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED)) 1815 /* checksum has already been verified */ 1816 skb->ip_summed = CHECKSUM_UNNECESSARY; 1817 else 1818 skb->ip_summed = CHECKSUM_NONE; 1819 1820 packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP; 1821 1822 if (packet_was_received) { 1823 droq->stats.rx_bytes_received += len; 1824 droq->stats.rx_pkts_received++; 1825 netdev->last_rx = jiffies; 1826 } else { 1827 droq->stats.rx_dropped++; 1828 netif_info(lio, rx_err, lio->netdev, 1829 "droq:%d error rx_dropped:%llu\n", 1830 droq->q_no, droq->stats.rx_dropped); 1831 } 1832 1833 } else { 1834 recv_buffer_free(skb); 1835 } 1836 } 1837 1838 /** 1839 * \brief wrapper for calling napi_schedule 1840 * @param param parameters to pass to napi_schedule 1841 * 1842 * Used when scheduling on different CPUs 1843 */ 1844 static void napi_schedule_wrapper(void *param) 1845 { 1846 struct napi_struct *napi = param; 1847 1848 napi_schedule(napi); 1849 } 1850 1851 /** 1852 * \brief callback when receive interrupt occurs and we are in NAPI mode 1853 * @param arg pointer to octeon output queue 1854 */ 1855 static void liquidio_napi_drv_callback(void *arg) 1856 { 1857 struct octeon_droq *droq = arg; 1858 int this_cpu = smp_processor_id(); 1859 1860 if (droq->cpu_id == this_cpu) { 1861 napi_schedule(&droq->napi); 1862 } else { 1863 struct call_single_data *csd = &droq->csd; 1864 1865 csd->func = napi_schedule_wrapper; 1866 csd->info = &droq->napi; 1867 csd->flags = 0; 1868 1869 smp_call_function_single_async(droq->cpu_id, csd); 1870 } 1871 } 1872 1873 /** 1874 * \brief Main NAPI poll function 1875 * @param droq octeon output queue 1876 * @param budget maximum number of items to process 1877 */ 1878 static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget) 1879 { 1880 int work_done; 1881 struct lio *lio = GET_LIO(droq->napi.dev); 1882 struct octeon_device *oct = lio->oct_dev; 1883 1884 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, 1885 POLL_EVENT_PROCESS_PKTS, 1886 budget); 1887 if (work_done < 0) { 1888 netif_info(lio, rx_err, lio->netdev, 1889 "Receive work_done < 0, rxq:%d\n", droq->q_no); 1890 goto octnet_napi_finish; 1891 } 1892 1893 if (work_done > budget) 1894 dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n", 1895 __func__, work_done, budget); 1896 1897 return work_done; 1898 1899 octnet_napi_finish: 1900 napi_complete(&droq->napi); 1901 octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR, 1902 0); 1903 return 0; 1904 } 1905 1906 /** 1907 * \brief Entry point for NAPI polling 1908 * @param napi NAPI structure 1909 * @param budget maximum number of items to process 1910 */ 1911 static int liquidio_napi_poll(struct napi_struct *napi, int budget) 1912 { 1913 struct octeon_droq *droq; 1914 int work_done; 1915 1916 droq = container_of(napi, struct octeon_droq, napi); 1917 1918 work_done = liquidio_napi_do_rx(droq, budget); 1919 1920 if (work_done < budget) { 1921 napi_complete(napi); 1922 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, 1923 POLL_EVENT_ENABLE_INTR, 0); 1924 return 0; 1925 } 1926 1927 return work_done; 1928 } 1929 1930 /** 1931 * \brief Setup input and output queues 1932 * @param octeon_dev octeon device 1933 * @param net_device Net device 1934 * 1935 * Note: Queues are with respect to the octeon device. Thus 1936 * an input queue is for egress packets, and output queues 1937 * are for ingress packets. 1938 */ 1939 static inline int setup_io_queues(struct octeon_device *octeon_dev, 1940 struct net_device *net_device) 1941 { 1942 static int first_time = 1; 1943 static struct octeon_droq_ops droq_ops; 1944 static int cpu_id; 1945 static int cpu_id_modulus; 1946 struct octeon_droq *droq; 1947 struct napi_struct *napi; 1948 int q, q_no, retval = 0; 1949 struct lio *lio; 1950 int num_tx_descs; 1951 1952 lio = GET_LIO(net_device); 1953 if (first_time) { 1954 first_time = 0; 1955 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); 1956 1957 droq_ops.fptr = liquidio_push_packet; 1958 1959 droq_ops.poll_mode = 1; 1960 droq_ops.napi_fn = liquidio_napi_drv_callback; 1961 cpu_id = 0; 1962 cpu_id_modulus = num_present_cpus(); 1963 } 1964 1965 /* set up DROQs. */ 1966 for (q = 0; q < lio->linfo.num_rxpciq; q++) { 1967 q_no = lio->linfo.rxpciq[q]; 1968 1969 retval = octeon_setup_droq(octeon_dev, q_no, 1970 CFG_GET_NUM_RX_DESCS_NIC_IF 1971 (octeon_get_conf(octeon_dev), 1972 lio->ifidx), 1973 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF 1974 (octeon_get_conf(octeon_dev), 1975 lio->ifidx), NULL); 1976 if (retval) { 1977 dev_err(&octeon_dev->pci_dev->dev, 1978 " %s : Runtime DROQ(RxQ) creation failed.\n", 1979 __func__); 1980 return 1; 1981 } 1982 1983 droq = octeon_dev->droq[q_no]; 1984 napi = &droq->napi; 1985 netif_napi_add(net_device, napi, liquidio_napi_poll, 64); 1986 1987 /* designate a CPU for this droq */ 1988 droq->cpu_id = cpu_id; 1989 cpu_id++; 1990 if (cpu_id >= cpu_id_modulus) 1991 cpu_id = 0; 1992 1993 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); 1994 } 1995 1996 /* set up IQs. */ 1997 for (q = 0; q < lio->linfo.num_txpciq; q++) { 1998 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf 1999 (octeon_dev), 2000 lio->ifidx); 2001 retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q], 2002 num_tx_descs, 2003 netdev_get_tx_queue(net_device, q)); 2004 if (retval) { 2005 dev_err(&octeon_dev->pci_dev->dev, 2006 " %s : Runtime IQ(TxQ) creation failed.\n", 2007 __func__); 2008 return 1; 2009 } 2010 } 2011 2012 return 0; 2013 } 2014 2015 /** 2016 * \brief Poll routine for checking transmit queue status 2017 * @param work work_struct data structure 2018 */ 2019 static void octnet_poll_check_txq_status(struct work_struct *work) 2020 { 2021 struct cavium_wk *wk = (struct cavium_wk *)work; 2022 struct lio *lio = (struct lio *)wk->ctxptr; 2023 2024 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) 2025 return; 2026 2027 check_txq_status(lio); 2028 queue_delayed_work(lio->txq_status_wq.wq, 2029 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 2030 } 2031 2032 /** 2033 * \brief Sets up the txq poll check 2034 * @param netdev network device 2035 */ 2036 static inline void setup_tx_poll_fn(struct net_device *netdev) 2037 { 2038 struct lio *lio = GET_LIO(netdev); 2039 struct octeon_device *oct = lio->oct_dev; 2040 2041 lio->txq_status_wq.wq = create_workqueue("txq-status"); 2042 if (!lio->txq_status_wq.wq) { 2043 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); 2044 return; 2045 } 2046 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, 2047 octnet_poll_check_txq_status); 2048 lio->txq_status_wq.wk.ctxptr = lio; 2049 queue_delayed_work(lio->txq_status_wq.wq, 2050 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 2051 } 2052 2053 /** 2054 * \brief Net device open for LiquidIO 2055 * @param netdev network device 2056 */ 2057 static int liquidio_open(struct net_device *netdev) 2058 { 2059 struct lio *lio = GET_LIO(netdev); 2060 struct octeon_device *oct = lio->oct_dev; 2061 struct napi_struct *napi, *n; 2062 2063 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 2064 napi_enable(napi); 2065 2066 oct_ptp_open(netdev); 2067 2068 ifstate_set(lio, LIO_IFSTATE_RUNNING); 2069 setup_tx_poll_fn(netdev); 2070 start_txq(netdev); 2071 2072 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 2073 try_module_get(THIS_MODULE); 2074 2075 /* tell Octeon to start forwarding packets to host */ 2076 send_rx_ctrl_cmd(lio, 1); 2077 2078 /* Ready for link status updates */ 2079 lio->intf_open = 1; 2080 2081 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", 2082 netdev->name); 2083 2084 return 0; 2085 } 2086 2087 /** 2088 * \brief Net device stop for LiquidIO 2089 * @param netdev network device 2090 */ 2091 static int liquidio_stop(struct net_device *netdev) 2092 { 2093 struct napi_struct *napi, *n; 2094 struct lio *lio = GET_LIO(netdev); 2095 struct octeon_device *oct = lio->oct_dev; 2096 2097 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 2098 /* Inform that netif carrier is down */ 2099 lio->intf_open = 0; 2100 lio->linfo.link.s.status = 0; 2101 2102 netif_carrier_off(netdev); 2103 2104 /* tell Octeon to stop forwarding packets to host */ 2105 send_rx_ctrl_cmd(lio, 0); 2106 2107 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); 2108 flush_workqueue(lio->txq_status_wq.wq); 2109 destroy_workqueue(lio->txq_status_wq.wq); 2110 2111 if (lio->ptp_clock) { 2112 ptp_clock_unregister(lio->ptp_clock); 2113 lio->ptp_clock = NULL; 2114 } 2115 2116 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 2117 2118 /* This is a hack that allows DHCP to continue working. */ 2119 set_bit(__LINK_STATE_START, &lio->netdev->state); 2120 2121 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 2122 napi_disable(napi); 2123 2124 txqs_stop(netdev); 2125 2126 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 2127 module_put(THIS_MODULE); 2128 2129 return 0; 2130 } 2131 2132 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) 2133 { 2134 struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr; 2135 struct net_device *netdev = (struct net_device *)nctrl->netpndev; 2136 struct lio *lio = GET_LIO(netdev); 2137 struct octeon_device *oct = lio->oct_dev; 2138 2139 switch (nctrl->ncmd.s.cmd) { 2140 case OCTNET_CMD_CHANGE_DEVFLAGS: 2141 case OCTNET_CMD_SET_MULTI_LIST: 2142 break; 2143 2144 case OCTNET_CMD_CHANGE_MACADDR: 2145 /* If command is successful, change the MACADDR. */ 2146 netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n", 2147 CVM_CAST64(nctrl->udd[0])); 2148 dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n", 2149 netdev->name, CVM_CAST64(nctrl->udd[0])); 2150 memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN); 2151 break; 2152 2153 case OCTNET_CMD_CHANGE_MTU: 2154 /* If command is successful, change the MTU. */ 2155 netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n", 2156 netdev->mtu, nctrl->ncmd.s.param2); 2157 dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", 2158 netdev->name, netdev->mtu, 2159 nctrl->ncmd.s.param2); 2160 netdev->mtu = nctrl->ncmd.s.param2; 2161 break; 2162 2163 case OCTNET_CMD_GPIO_ACCESS: 2164 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n"); 2165 2166 break; 2167 2168 case OCTNET_CMD_LRO_ENABLE: 2169 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); 2170 break; 2171 2172 case OCTNET_CMD_LRO_DISABLE: 2173 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n", 2174 netdev->name); 2175 break; 2176 2177 case OCTNET_CMD_VERBOSE_ENABLE: 2178 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); 2179 break; 2180 2181 case OCTNET_CMD_VERBOSE_DISABLE: 2182 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n", 2183 netdev->name); 2184 break; 2185 2186 case OCTNET_CMD_SET_SETTINGS: 2187 dev_info(&oct->pci_dev->dev, "%s settings changed\n", 2188 netdev->name); 2189 2190 break; 2191 2192 default: 2193 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, 2194 nctrl->ncmd.s.cmd); 2195 } 2196 } 2197 2198 /** 2199 * \brief Converts a mask based on net device flags 2200 * @param netdev network device 2201 * 2202 * This routine generates a octnet_ifflags mask from the net device flags 2203 * received from the OS. 2204 */ 2205 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) 2206 { 2207 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 2208 2209 if (netdev->flags & IFF_PROMISC) 2210 f |= OCTNET_IFFLAG_PROMISC; 2211 2212 if (netdev->flags & IFF_ALLMULTI) 2213 f |= OCTNET_IFFLAG_ALLMULTI; 2214 2215 if (netdev->flags & IFF_MULTICAST) { 2216 f |= OCTNET_IFFLAG_MULTICAST; 2217 2218 /* Accept all multicast addresses if there are more than we 2219 * can handle 2220 */ 2221 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 2222 f |= OCTNET_IFFLAG_ALLMULTI; 2223 } 2224 2225 if (netdev->flags & IFF_BROADCAST) 2226 f |= OCTNET_IFFLAG_BROADCAST; 2227 2228 return f; 2229 } 2230 2231 /** 2232 * \brief Net device set_multicast_list 2233 * @param netdev network device 2234 */ 2235 static void liquidio_set_mcast_list(struct net_device *netdev) 2236 { 2237 struct lio *lio = GET_LIO(netdev); 2238 struct octeon_device *oct = lio->oct_dev; 2239 struct octnic_ctrl_pkt nctrl; 2240 struct octnic_ctrl_params nparams; 2241 struct netdev_hw_addr *ha; 2242 u64 *mc; 2243 int ret, i; 2244 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 2245 2246 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2247 2248 /* Create a ctrl pkt command to be sent to core app. */ 2249 nctrl.ncmd.u64 = 0; 2250 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 2251 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 2252 nctrl.ncmd.s.param2 = get_new_flags(netdev); 2253 nctrl.ncmd.s.param3 = mc_count; 2254 nctrl.ncmd.s.more = mc_count; 2255 nctrl.netpndev = (u64)netdev; 2256 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2257 2258 /* copy all the addresses into the udd */ 2259 i = 0; 2260 mc = &nctrl.udd[0]; 2261 netdev_for_each_mc_addr(ha, netdev) { 2262 *mc = 0; 2263 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); 2264 /* no need to swap bytes */ 2265 2266 if (++mc > &nctrl.udd[mc_count]) 2267 break; 2268 } 2269 2270 /* Apparently, any activity in this call from the kernel has to 2271 * be atomic. So we won't wait for response. 2272 */ 2273 nctrl.wait_time = 0; 2274 2275 nparams.resp_order = OCTEON_RESP_NORESPONSE; 2276 2277 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); 2278 if (ret < 0) { 2279 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 2280 ret); 2281 } 2282 } 2283 2284 /** 2285 * \brief Net device set_mac_address 2286 * @param netdev network device 2287 */ 2288 static int liquidio_set_mac(struct net_device *netdev, void *p) 2289 { 2290 int ret = 0; 2291 struct lio *lio = GET_LIO(netdev); 2292 struct octeon_device *oct = lio->oct_dev; 2293 struct sockaddr *addr = (struct sockaddr *)p; 2294 struct octnic_ctrl_pkt nctrl; 2295 struct octnic_ctrl_params nparams; 2296 2297 if ((!is_valid_ether_addr(addr->sa_data)) || 2298 (ifstate_check(lio, LIO_IFSTATE_RUNNING))) 2299 return -EADDRNOTAVAIL; 2300 2301 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2302 2303 nctrl.ncmd.u64 = 0; 2304 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2305 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 2306 nctrl.ncmd.s.param2 = 0; 2307 nctrl.ncmd.s.more = 1; 2308 nctrl.netpndev = (u64)netdev; 2309 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2310 nctrl.wait_time = 100; 2311 2312 nctrl.udd[0] = 0; 2313 /* The MAC Address is presented in network byte order. */ 2314 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); 2315 2316 nparams.resp_order = OCTEON_RESP_ORDERED; 2317 2318 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); 2319 if (ret < 0) { 2320 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 2321 return -ENOMEM; 2322 } 2323 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2324 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); 2325 2326 return 0; 2327 } 2328 2329 /** 2330 * \brief Net device get_stats 2331 * @param netdev network device 2332 */ 2333 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) 2334 { 2335 struct lio *lio = GET_LIO(netdev); 2336 struct net_device_stats *stats = &netdev->stats; 2337 struct octeon_device *oct; 2338 u64 pkts = 0, drop = 0, bytes = 0; 2339 struct oct_droq_stats *oq_stats; 2340 struct oct_iq_stats *iq_stats; 2341 int i, iq_no, oq_no; 2342 2343 oct = lio->oct_dev; 2344 2345 for (i = 0; i < lio->linfo.num_txpciq; i++) { 2346 iq_no = lio->linfo.txpciq[i]; 2347 iq_stats = &oct->instr_queue[iq_no]->stats; 2348 pkts += iq_stats->tx_done; 2349 drop += iq_stats->tx_dropped; 2350 bytes += iq_stats->tx_tot_bytes; 2351 } 2352 2353 stats->tx_packets = pkts; 2354 stats->tx_bytes = bytes; 2355 stats->tx_dropped = drop; 2356 2357 pkts = 0; 2358 drop = 0; 2359 bytes = 0; 2360 2361 for (i = 0; i < lio->linfo.num_rxpciq; i++) { 2362 oq_no = lio->linfo.rxpciq[i]; 2363 oq_stats = &oct->droq[oq_no]->stats; 2364 pkts += oq_stats->rx_pkts_received; 2365 drop += (oq_stats->rx_dropped + 2366 oq_stats->dropped_nodispatch + 2367 oq_stats->dropped_toomany + 2368 oq_stats->dropped_nomem); 2369 bytes += oq_stats->rx_bytes_received; 2370 } 2371 2372 stats->rx_bytes = bytes; 2373 stats->rx_packets = pkts; 2374 stats->rx_dropped = drop; 2375 2376 return stats; 2377 } 2378 2379 /** 2380 * \brief Net device change_mtu 2381 * @param netdev network device 2382 */ 2383 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) 2384 { 2385 struct lio *lio = GET_LIO(netdev); 2386 struct octeon_device *oct = lio->oct_dev; 2387 struct octnic_ctrl_pkt nctrl; 2388 struct octnic_ctrl_params nparams; 2389 int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE; 2390 int ret = 0; 2391 2392 /* Limit the MTU to make sure the ethernet packets are between 64 bytes 2393 * and 65535 bytes 2394 */ 2395 if ((max_frm_size < OCTNET_MIN_FRM_SIZE) || 2396 (max_frm_size > OCTNET_MAX_FRM_SIZE)) { 2397 dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu); 2398 dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n", 2399 (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE), 2400 (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)); 2401 return -EINVAL; 2402 } 2403 2404 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2405 2406 nctrl.ncmd.u64 = 0; 2407 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; 2408 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 2409 nctrl.ncmd.s.param2 = new_mtu; 2410 nctrl.wait_time = 100; 2411 nctrl.netpndev = (u64)netdev; 2412 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2413 2414 nparams.resp_order = OCTEON_RESP_ORDERED; 2415 2416 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); 2417 if (ret < 0) { 2418 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); 2419 return -1; 2420 } 2421 2422 lio->mtu = new_mtu; 2423 2424 return 0; 2425 } 2426 2427 /** 2428 * \brief Handler for SIOCSHWTSTAMP ioctl 2429 * @param netdev network device 2430 * @param ifr interface request 2431 * @param cmd command 2432 */ 2433 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2434 { 2435 struct hwtstamp_config conf; 2436 struct lio *lio = GET_LIO(netdev); 2437 2438 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 2439 return -EFAULT; 2440 2441 if (conf.flags) 2442 return -EINVAL; 2443 2444 switch (conf.tx_type) { 2445 case HWTSTAMP_TX_ON: 2446 case HWTSTAMP_TX_OFF: 2447 break; 2448 default: 2449 return -ERANGE; 2450 } 2451 2452 switch (conf.rx_filter) { 2453 case HWTSTAMP_FILTER_NONE: 2454 break; 2455 case HWTSTAMP_FILTER_ALL: 2456 case HWTSTAMP_FILTER_SOME: 2457 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2458 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2459 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2460 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2461 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2462 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2463 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2464 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2465 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2466 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2467 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2468 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2469 conf.rx_filter = HWTSTAMP_FILTER_ALL; 2470 break; 2471 default: 2472 return -ERANGE; 2473 } 2474 2475 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 2476 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2477 2478 else 2479 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2480 2481 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 2482 } 2483 2484 /** 2485 * \brief ioctl handler 2486 * @param netdev network device 2487 * @param ifr interface request 2488 * @param cmd command 2489 */ 2490 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2491 { 2492 switch (cmd) { 2493 case SIOCSHWTSTAMP: 2494 return hwtstamp_ioctl(netdev, ifr, cmd); 2495 default: 2496 return -EOPNOTSUPP; 2497 } 2498 } 2499 2500 /** 2501 * \brief handle a Tx timestamp response 2502 * @param status response status 2503 * @param buf pointer to skb 2504 */ 2505 static void handle_timestamp(struct octeon_device *oct, 2506 u32 status, 2507 void *buf) 2508 { 2509 struct octnet_buf_free_info *finfo; 2510 struct octeon_soft_command *sc; 2511 struct oct_timestamp_resp *resp; 2512 struct lio *lio; 2513 struct sk_buff *skb = (struct sk_buff *)buf; 2514 2515 finfo = (struct octnet_buf_free_info *)skb->cb; 2516 lio = finfo->lio; 2517 sc = finfo->sc; 2518 oct = lio->oct_dev; 2519 resp = (struct oct_timestamp_resp *)sc->virtrptr; 2520 2521 if (status != OCTEON_REQUEST_DONE) { 2522 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 2523 CVM_CAST64(status)); 2524 resp->timestamp = 0; 2525 } 2526 2527 octeon_swap_8B_data(&resp->timestamp, 1); 2528 2529 if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) { 2530 struct skb_shared_hwtstamps ts; 2531 u64 ns = resp->timestamp; 2532 2533 netif_info(lio, tx_done, lio->netdev, 2534 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 2535 skb, (unsigned long long)ns); 2536 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 2537 skb_tstamp_tx(skb, &ts); 2538 } 2539 2540 octeon_free_soft_command(oct, sc); 2541 recv_buffer_free(skb); 2542 } 2543 2544 /* \brief Send a data packet that will be timestamped 2545 * @param oct octeon device 2546 * @param ndata pointer to network data 2547 * @param finfo pointer to private network data 2548 */ 2549 static inline int send_nic_timestamp_pkt(struct octeon_device *oct, 2550 struct octnic_data_pkt *ndata, 2551 struct octnet_buf_free_info *finfo, 2552 int xmit_more) 2553 { 2554 int retval; 2555 struct octeon_soft_command *sc; 2556 struct octeon_instr_ih *ih; 2557 struct octeon_instr_rdp *rdp; 2558 struct lio *lio; 2559 int ring_doorbell; 2560 2561 lio = finfo->lio; 2562 2563 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 2564 sizeof(struct oct_timestamp_resp)); 2565 finfo->sc = sc; 2566 2567 if (!sc) { 2568 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 2569 return IQ_SEND_FAILED; 2570 } 2571 2572 if (ndata->reqtype == REQTYPE_NORESP_NET) 2573 ndata->reqtype = REQTYPE_RESP_NET; 2574 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 2575 ndata->reqtype = REQTYPE_RESP_NET_SG; 2576 2577 sc->callback = handle_timestamp; 2578 sc->callback_arg = finfo->skb; 2579 sc->iq_no = ndata->q_no; 2580 2581 ih = (struct octeon_instr_ih *)&sc->cmd.ih; 2582 rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; 2583 2584 ring_doorbell = !xmit_more; 2585 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2586 sc, ih->dlengsz, ndata->reqtype); 2587 2588 if (retval) { 2589 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2590 retval); 2591 octeon_free_soft_command(oct, sc); 2592 } else { 2593 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 2594 } 2595 2596 return retval; 2597 } 2598 2599 static inline int is_ipv4(struct sk_buff *skb) 2600 { 2601 return (skb->protocol == htons(ETH_P_IP)) && 2602 (ip_hdr(skb)->version == 4); 2603 } 2604 2605 static inline int is_vlan(struct sk_buff *skb) 2606 { 2607 return skb->protocol == htons(ETH_P_8021Q); 2608 } 2609 2610 static inline int is_ip_fragmented(struct sk_buff *skb) 2611 { 2612 /* The Don't fragment and Reserved flag fields are ignored. 2613 * IP is fragmented if 2614 * - the More fragments bit is set (indicating this IP is a fragment 2615 * with more to follow; the current offset could be 0 ). 2616 * - ths offset field is non-zero. 2617 */ 2618 return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0; 2619 } 2620 2621 static inline int is_ipv6(struct sk_buff *skb) 2622 { 2623 return (skb->protocol == htons(ETH_P_IPV6)) && 2624 (ipv6_hdr(skb)->version == 6); 2625 } 2626 2627 static inline int is_with_extn_hdr(struct sk_buff *skb) 2628 { 2629 return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) && 2630 (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP); 2631 } 2632 2633 static inline int is_tcpudp(struct sk_buff *skb) 2634 { 2635 return (ip_hdr(skb)->protocol == IPPROTO_TCP) || 2636 (ip_hdr(skb)->protocol == IPPROTO_UDP); 2637 } 2638 2639 static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb) 2640 { 2641 u32 tag; 2642 struct iphdr *iphdr = ip_hdr(skb); 2643 2644 tag = crc32(0, &iphdr->protocol, 1); 2645 tag = crc32(tag, (u8 *)&iphdr->saddr, 8); 2646 tag = crc32(tag, skb_transport_header(skb), 4); 2647 return tag; 2648 } 2649 2650 static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb) 2651 { 2652 u32 tag; 2653 struct ipv6hdr *ipv6hdr = ipv6_hdr(skb); 2654 2655 tag = crc32(0, &ipv6hdr->nexthdr, 1); 2656 tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32); 2657 tag = crc32(tag, skb_transport_header(skb), 4); 2658 return tag; 2659 } 2660 2661 /** \brief Transmit networks packets to the Octeon interface 2662 * @param skbuff skbuff struct to be passed to network layer. 2663 * @param netdev pointer to network device 2664 * @returns whether the packet was transmitted to the device okay or not 2665 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 2666 */ 2667 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 2668 { 2669 struct lio *lio; 2670 struct octnet_buf_free_info *finfo; 2671 union octnic_cmd_setup cmdsetup; 2672 struct octnic_data_pkt ndata; 2673 struct octeon_device *oct; 2674 struct oct_iq_stats *stats; 2675 int cpu = 0, status = 0; 2676 int q_idx = 0, iq_no = 0; 2677 int xmit_more; 2678 u32 tag = 0; 2679 2680 lio = GET_LIO(netdev); 2681 oct = lio->oct_dev; 2682 2683 if (netif_is_multiqueue(netdev)) { 2684 cpu = skb->queue_mapping; 2685 q_idx = (cpu & (lio->linfo.num_txpciq - 1)); 2686 iq_no = lio->linfo.txpciq[q_idx]; 2687 } else { 2688 iq_no = lio->txq; 2689 } 2690 2691 stats = &oct->instr_queue[iq_no]->stats; 2692 2693 /* Check for all conditions in which the current packet cannot be 2694 * transmitted. 2695 */ 2696 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2697 (!lio->linfo.link.s.status) || 2698 (skb->len <= 0)) { 2699 netif_info(lio, tx_err, lio->netdev, 2700 "Transmit failed link_status : %d\n", 2701 lio->linfo.link.s.status); 2702 goto lio_xmit_failed; 2703 } 2704 2705 /* Use space in skb->cb to store info used to unmap and 2706 * free the buffers. 2707 */ 2708 finfo = (struct octnet_buf_free_info *)skb->cb; 2709 finfo->lio = lio; 2710 finfo->skb = skb; 2711 finfo->sc = NULL; 2712 2713 /* Prepare the attributes for the data to be passed to OSI. */ 2714 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 2715 2716 ndata.buf = (void *)finfo; 2717 2718 ndata.q_no = iq_no; 2719 2720 if (netif_is_multiqueue(netdev)) { 2721 if (octnet_iq_is_full(oct, ndata.q_no)) { 2722 /* defer sending if queue is full */ 2723 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2724 ndata.q_no); 2725 stats->tx_iq_busy++; 2726 return NETDEV_TX_BUSY; 2727 } 2728 } else { 2729 if (octnet_iq_is_full(oct, lio->txq)) { 2730 /* defer sending if queue is full */ 2731 stats->tx_iq_busy++; 2732 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2733 ndata.q_no); 2734 return NETDEV_TX_BUSY; 2735 } 2736 } 2737 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", 2738 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no ); 2739 */ 2740 2741 ndata.datasize = skb->len; 2742 2743 cmdsetup.u64 = 0; 2744 cmdsetup.s.ifidx = lio->linfo.ifidx; 2745 2746 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2747 if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) { 2748 tag = get_ipv4_5tuple_tag(skb); 2749 2750 cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1; 2751 2752 if (ip_hdr(skb)->ihl > 5) 2753 cmdsetup.s.ipv4opts_ipv6exthdr = 2754 OCT_PKT_PARAM_IPV4OPTS; 2755 2756 } else if (is_ipv6(skb)) { 2757 tag = get_ipv6_5tuple_tag(skb); 2758 2759 cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1; 2760 2761 if (is_with_extn_hdr(skb)) 2762 cmdsetup.s.ipv4opts_ipv6exthdr = 2763 OCT_PKT_PARAM_IPV6EXTHDR; 2764 2765 } else if (is_vlan(skb)) { 2766 if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto 2767 == htons(ETH_P_IP) && 2768 !is_ip_fragmented(skb) && is_tcpudp(skb)) { 2769 tag = get_ipv4_5tuple_tag(skb); 2770 2771 cmdsetup.s.cksum_offset = 2772 sizeof(struct vlan_ethhdr) + 1; 2773 2774 if (ip_hdr(skb)->ihl > 5) 2775 cmdsetup.s.ipv4opts_ipv6exthdr = 2776 OCT_PKT_PARAM_IPV4OPTS; 2777 2778 } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto 2779 == htons(ETH_P_IPV6)) { 2780 tag = get_ipv6_5tuple_tag(skb); 2781 2782 cmdsetup.s.cksum_offset = 2783 sizeof(struct vlan_ethhdr) + 1; 2784 2785 if (is_with_extn_hdr(skb)) 2786 cmdsetup.s.ipv4opts_ipv6exthdr = 2787 OCT_PKT_PARAM_IPV6EXTHDR; 2788 } 2789 } 2790 } 2791 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2792 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2793 cmdsetup.s.timestamp = 1; 2794 } 2795 2796 if (skb_shinfo(skb)->nr_frags == 0) { 2797 cmdsetup.s.u.datasize = skb->len; 2798 octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); 2799 /* Offload checksum calculation for TCP/UDP packets */ 2800 ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, 2801 skb->data, 2802 skb->len, 2803 DMA_TO_DEVICE); 2804 if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { 2805 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2806 __func__); 2807 return NETDEV_TX_BUSY; 2808 } 2809 2810 finfo->dptr = ndata.cmd.dptr; 2811 2812 ndata.reqtype = REQTYPE_NORESP_NET; 2813 2814 } else { 2815 int i, frags; 2816 struct skb_frag_struct *frag; 2817 struct octnic_gather *g; 2818 2819 spin_lock(&lio->lock); 2820 g = (struct octnic_gather *)list_delete_head(&lio->glist); 2821 spin_unlock(&lio->lock); 2822 2823 if (!g) { 2824 netif_info(lio, tx_err, lio->netdev, 2825 "Transmit scatter gather: glist null!\n"); 2826 goto lio_xmit_failed; 2827 } 2828 2829 cmdsetup.s.gather = 1; 2830 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2831 octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); 2832 2833 memset(g->sg, 0, g->sg_size); 2834 2835 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 2836 skb->data, 2837 (skb->len - skb->data_len), 2838 DMA_TO_DEVICE); 2839 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 2840 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 2841 __func__); 2842 return NETDEV_TX_BUSY; 2843 } 2844 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 2845 2846 frags = skb_shinfo(skb)->nr_frags; 2847 i = 1; 2848 while (frags--) { 2849 frag = &skb_shinfo(skb)->frags[i - 1]; 2850 2851 g->sg[(i >> 2)].ptr[(i & 3)] = 2852 dma_map_page(&oct->pci_dev->dev, 2853 frag->page.p, 2854 frag->page_offset, 2855 frag->size, 2856 DMA_TO_DEVICE); 2857 2858 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); 2859 i++; 2860 } 2861 2862 ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, 2863 g->sg, g->sg_size, 2864 DMA_TO_DEVICE); 2865 if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { 2866 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 2867 __func__); 2868 dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0], 2869 skb->len - skb->data_len, 2870 DMA_TO_DEVICE); 2871 return NETDEV_TX_BUSY; 2872 } 2873 2874 finfo->dptr = ndata.cmd.dptr; 2875 finfo->g = g; 2876 2877 ndata.reqtype = REQTYPE_NORESP_NET_SG; 2878 } 2879 2880 if (skb_shinfo(skb)->gso_size) { 2881 struct octeon_instr_irh *irh = 2882 (struct octeon_instr_irh *)&ndata.cmd.irh; 2883 union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0]; 2884 2885 irh->len = 1; /* to indicate that ossp[0] contains tx_info */ 2886 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 2887 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 2888 } 2889 2890 xmit_more = skb->xmit_more; 2891 2892 if (unlikely(cmdsetup.s.timestamp)) 2893 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 2894 else 2895 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 2896 if (status == IQ_SEND_FAILED) 2897 goto lio_xmit_failed; 2898 2899 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2900 2901 if (status == IQ_SEND_STOP) 2902 stop_q(lio->netdev, q_idx); 2903 2904 netdev->trans_start = jiffies; 2905 2906 stats->tx_done++; 2907 stats->tx_tot_bytes += skb->len; 2908 2909 return NETDEV_TX_OK; 2910 2911 lio_xmit_failed: 2912 stats->tx_dropped++; 2913 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2914 iq_no, stats->tx_dropped); 2915 dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr, 2916 ndata.datasize, DMA_TO_DEVICE); 2917 recv_buffer_free(skb); 2918 return NETDEV_TX_OK; 2919 } 2920 2921 /** \brief Network device Tx timeout 2922 * @param netdev pointer to network device 2923 */ 2924 static void liquidio_tx_timeout(struct net_device *netdev) 2925 { 2926 struct lio *lio; 2927 2928 lio = GET_LIO(netdev); 2929 2930 netif_info(lio, tx_err, lio->netdev, 2931 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 2932 netdev->stats.tx_dropped); 2933 netdev->trans_start = jiffies; 2934 txqs_wake(netdev); 2935 } 2936 2937 int liquidio_set_feature(struct net_device *netdev, int cmd) 2938 { 2939 struct lio *lio = GET_LIO(netdev); 2940 struct octeon_device *oct = lio->oct_dev; 2941 struct octnic_ctrl_pkt nctrl; 2942 struct octnic_ctrl_params nparams; 2943 int ret = 0; 2944 2945 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2946 2947 nctrl.ncmd.u64 = 0; 2948 nctrl.ncmd.s.cmd = cmd; 2949 nctrl.ncmd.s.param1 = lio->linfo.ifidx; 2950 nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6; 2951 nctrl.wait_time = 100; 2952 nctrl.netpndev = (u64)netdev; 2953 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2954 2955 nparams.resp_order = OCTEON_RESP_NORESPONSE; 2956 2957 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); 2958 if (ret < 0) { 2959 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", 2960 ret); 2961 } 2962 return ret; 2963 } 2964 2965 /** \brief Net device fix features 2966 * @param netdev pointer to network device 2967 * @param request features requested 2968 * @returns updated features list 2969 */ 2970 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 2971 netdev_features_t request) 2972 { 2973 struct lio *lio = netdev_priv(netdev); 2974 2975 if ((request & NETIF_F_RXCSUM) && 2976 !(lio->dev_capability & NETIF_F_RXCSUM)) 2977 request &= ~NETIF_F_RXCSUM; 2978 2979 if ((request & NETIF_F_HW_CSUM) && 2980 !(lio->dev_capability & NETIF_F_HW_CSUM)) 2981 request &= ~NETIF_F_HW_CSUM; 2982 2983 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 2984 request &= ~NETIF_F_TSO; 2985 2986 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 2987 request &= ~NETIF_F_TSO6; 2988 2989 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 2990 request &= ~NETIF_F_LRO; 2991 2992 /*Disable LRO if RXCSUM is off */ 2993 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 2994 (lio->dev_capability & NETIF_F_LRO)) 2995 request &= ~NETIF_F_LRO; 2996 2997 return request; 2998 } 2999 3000 /** \brief Net device set features 3001 * @param netdev pointer to network device 3002 * @param features features to enable/disable 3003 */ 3004 static int liquidio_set_features(struct net_device *netdev, 3005 netdev_features_t features) 3006 { 3007 struct lio *lio = netdev_priv(netdev); 3008 3009 if (!((netdev->features ^ features) & NETIF_F_LRO)) 3010 return 0; 3011 3012 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 3013 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); 3014 else if (!(features & NETIF_F_LRO) && 3015 (lio->dev_capability & NETIF_F_LRO)) 3016 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE); 3017 3018 return 0; 3019 } 3020 3021 static struct net_device_ops lionetdevops = { 3022 .ndo_open = liquidio_open, 3023 .ndo_stop = liquidio_stop, 3024 .ndo_start_xmit = liquidio_xmit, 3025 .ndo_get_stats = liquidio_get_stats, 3026 .ndo_set_mac_address = liquidio_set_mac, 3027 .ndo_set_rx_mode = liquidio_set_mcast_list, 3028 .ndo_tx_timeout = liquidio_tx_timeout, 3029 .ndo_change_mtu = liquidio_change_mtu, 3030 .ndo_do_ioctl = liquidio_ioctl, 3031 .ndo_fix_features = liquidio_fix_features, 3032 .ndo_set_features = liquidio_set_features, 3033 }; 3034 3035 /** \brief Entry point for the liquidio module 3036 */ 3037 static int __init liquidio_init(void) 3038 { 3039 int i; 3040 struct handshake *hs; 3041 3042 init_completion(&first_stage); 3043 3044 octeon_init_device_list(conf_type); 3045 3046 if (liquidio_init_pci()) 3047 return -EINVAL; 3048 3049 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000)); 3050 3051 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3052 hs = &handshake[i]; 3053 if (hs->pci_dev) { 3054 wait_for_completion(&hs->init); 3055 if (!hs->init_ok) { 3056 /* init handshake failed */ 3057 dev_err(&hs->pci_dev->dev, 3058 "Failed to init device\n"); 3059 liquidio_deinit_pci(); 3060 return -EIO; 3061 } 3062 } 3063 } 3064 3065 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3066 hs = &handshake[i]; 3067 if (hs->pci_dev) { 3068 wait_for_completion_timeout(&hs->started, 3069 msecs_to_jiffies(30000)); 3070 if (!hs->started_ok) { 3071 /* starter handshake failed */ 3072 dev_err(&hs->pci_dev->dev, 3073 "Firmware failed to start\n"); 3074 liquidio_deinit_pci(); 3075 return -EIO; 3076 } 3077 } 3078 } 3079 3080 return 0; 3081 } 3082 3083 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 3084 { 3085 struct octeon_device *oct = (struct octeon_device *)buf; 3086 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3087 int ifidx = 0; 3088 union oct_link_status *ls; 3089 int i; 3090 3091 if ((recv_pkt->buffer_size[0] != sizeof(*ls)) || 3092 (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) { 3093 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 3094 recv_pkt->buffer_size[0], 3095 recv_pkt->rh.r_nic_info.ifidx); 3096 goto nic_info_err; 3097 } 3098 3099 ifidx = recv_pkt->rh.r_nic_info.ifidx; 3100 ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]); 3101 3102 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 3103 3104 update_link_status(oct->props[ifidx].netdev, ls); 3105 3106 nic_info_err: 3107 for (i = 0; i < recv_pkt->buffer_count; i++) 3108 recv_buffer_free(recv_pkt->buffer_ptr[i]); 3109 octeon_free_recv_info(recv_info); 3110 return 0; 3111 } 3112 3113 /** 3114 * \brief Setup network interfaces 3115 * @param octeon_dev octeon device 3116 * 3117 * Called during init time for each device. It assumes the NIC 3118 * is already up and running. The link information for each 3119 * interface is passed in link_info. 3120 */ 3121 static int setup_nic_devices(struct octeon_device *octeon_dev) 3122 { 3123 struct lio *lio = NULL; 3124 struct net_device *netdev; 3125 u8 mac[6], i, j; 3126 struct octeon_soft_command *sc; 3127 struct liquidio_if_cfg_context *ctx; 3128 struct liquidio_if_cfg_resp *resp; 3129 struct octdev_props *props; 3130 int retval, num_iqueues, num_oqueues, q_no; 3131 u64 q_mask; 3132 int num_cpus = num_online_cpus(); 3133 union oct_nic_if_cfg if_cfg; 3134 unsigned int base_queue; 3135 unsigned int gmx_port_id; 3136 u32 resp_size, ctx_size; 3137 3138 /* This is to handle link status changes */ 3139 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3140 OPCODE_NIC_INFO, 3141 lio_nic_info, octeon_dev); 3142 3143 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 3144 * They are handled directly. 3145 */ 3146 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 3147 free_netbuf); 3148 3149 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 3150 free_netsgbuf); 3151 3152 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 3153 free_netsgbuf_with_resp); 3154 3155 for (i = 0; i < octeon_dev->ifcount; i++) { 3156 resp_size = sizeof(struct liquidio_if_cfg_resp); 3157 ctx_size = sizeof(struct liquidio_if_cfg_context); 3158 sc = (struct octeon_soft_command *) 3159 octeon_alloc_soft_command(octeon_dev, 0, 3160 resp_size, ctx_size); 3161 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 3162 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 3163 3164 num_iqueues = 3165 CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i); 3166 num_oqueues = 3167 CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i); 3168 base_queue = 3169 CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i); 3170 gmx_port_id = 3171 CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i); 3172 if (num_iqueues > num_cpus) 3173 num_iqueues = num_cpus; 3174 if (num_oqueues > num_cpus) 3175 num_oqueues = num_cpus; 3176 dev_dbg(&octeon_dev->pci_dev->dev, 3177 "requesting config for interface %d, iqs %d, oqs %d\n", 3178 i, num_iqueues, num_oqueues); 3179 ACCESS_ONCE(ctx->cond) = 0; 3180 ctx->octeon_id = lio_get_device_id(octeon_dev); 3181 init_waitqueue_head(&ctx->wc); 3182 3183 if_cfg.u64 = 0; 3184 if_cfg.s.num_iqueues = num_iqueues; 3185 if_cfg.s.num_oqueues = num_oqueues; 3186 if_cfg.s.base_queue = base_queue; 3187 if_cfg.s.gmx_port_id = gmx_port_id; 3188 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 3189 OPCODE_NIC_IF_CFG, i, 3190 if_cfg.u64, 0); 3191 3192 sc->callback = if_cfg_callback; 3193 sc->callback_arg = sc; 3194 sc->wait_time = 1000; 3195 3196 retval = octeon_send_soft_command(octeon_dev, sc); 3197 if (retval) { 3198 dev_err(&octeon_dev->pci_dev->dev, 3199 "iq/oq config failed status: %x\n", 3200 retval); 3201 /* Soft instr is freed by driver in case of failure. */ 3202 goto setup_nic_dev_fail; 3203 } 3204 3205 /* Sleep on a wait queue till the cond flag indicates that the 3206 * response arrived or timed-out. 3207 */ 3208 sleep_cond(&ctx->wc, &ctx->cond); 3209 retval = resp->status; 3210 if (retval) { 3211 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 3212 goto setup_nic_dev_fail; 3213 } 3214 3215 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 3216 (sizeof(struct liquidio_if_cfg_info)) >> 3); 3217 3218 num_iqueues = hweight64(resp->cfg_info.iqmask); 3219 num_oqueues = hweight64(resp->cfg_info.oqmask); 3220 3221 if (!(num_iqueues) || !(num_oqueues)) { 3222 dev_err(&octeon_dev->pci_dev->dev, 3223 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 3224 resp->cfg_info.iqmask, 3225 resp->cfg_info.oqmask); 3226 goto setup_nic_dev_fail; 3227 } 3228 dev_dbg(&octeon_dev->pci_dev->dev, 3229 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 3230 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 3231 num_iqueues, num_oqueues); 3232 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 3233 3234 if (!netdev) { 3235 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 3236 goto setup_nic_dev_fail; 3237 } 3238 3239 props = &octeon_dev->props[i]; 3240 props->netdev = netdev; 3241 3242 if (num_iqueues > 1) 3243 lionetdevops.ndo_select_queue = select_q; 3244 3245 /* Associate the routines that will handle different 3246 * netdev tasks. 3247 */ 3248 netdev->netdev_ops = &lionetdevops; 3249 3250 lio = GET_LIO(netdev); 3251 3252 memset(lio, 0, sizeof(struct lio)); 3253 3254 lio->linfo.ifidx = resp->cfg_info.ifidx; 3255 lio->ifidx = resp->cfg_info.ifidx; 3256 3257 lio->linfo.num_rxpciq = num_oqueues; 3258 lio->linfo.num_txpciq = num_iqueues; 3259 q_mask = resp->cfg_info.oqmask; 3260 /* q_mask is 0-based and already verified mask is nonzero */ 3261 for (j = 0; j < num_oqueues; j++) { 3262 q_no = __ffs64(q_mask); 3263 q_mask &= (~(1UL << q_no)); 3264 lio->linfo.rxpciq[j] = q_no; 3265 } 3266 q_mask = resp->cfg_info.iqmask; 3267 for (j = 0; j < num_iqueues; j++) { 3268 q_no = __ffs64(q_mask); 3269 q_mask &= (~(1UL << q_no)); 3270 lio->linfo.txpciq[j] = q_no; 3271 } 3272 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 3273 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 3274 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 3275 3276 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3277 3278 lio->dev_capability = NETIF_F_HIGHDMA 3279 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 3280 | NETIF_F_SG | NETIF_F_RXCSUM 3281 | NETIF_F_TSO | NETIF_F_TSO6 3282 | NETIF_F_LRO; 3283 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 3284 3285 netdev->features = lio->dev_capability; 3286 netdev->vlan_features = lio->dev_capability; 3287 3288 netdev->hw_features = lio->dev_capability; 3289 3290 /* Point to the properties for octeon device to which this 3291 * interface belongs. 3292 */ 3293 lio->oct_dev = octeon_dev; 3294 lio->octprops = props; 3295 lio->netdev = netdev; 3296 spin_lock_init(&lio->lock); 3297 3298 dev_dbg(&octeon_dev->pci_dev->dev, 3299 "if%d gmx: %d hw_addr: 0x%llx\n", i, 3300 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 3301 3302 /* 64-bit swap required on LE machines */ 3303 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 3304 for (j = 0; j < 6; j++) 3305 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 3306 3307 /* Copy MAC Address to OS network device structure */ 3308 3309 ether_addr_copy(netdev->dev_addr, mac); 3310 3311 if (setup_io_queues(octeon_dev, netdev)) { 3312 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 3313 goto setup_nic_dev_fail; 3314 } 3315 3316 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 3317 3318 /* By default all interfaces on a single Octeon uses the same 3319 * tx and rx queues 3320 */ 3321 lio->txq = lio->linfo.txpciq[0]; 3322 lio->rxq = lio->linfo.rxpciq[0]; 3323 3324 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 3325 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 3326 3327 if (setup_glist(lio)) { 3328 dev_err(&octeon_dev->pci_dev->dev, 3329 "Gather list allocation failed\n"); 3330 goto setup_nic_dev_fail; 3331 } 3332 3333 /* Register ethtool support */ 3334 liquidio_set_ethtool_ops(netdev); 3335 3336 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); 3337 3338 if ((debug != -1) && (debug & NETIF_MSG_HW)) 3339 liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE); 3340 3341 /* Register the network device with the OS */ 3342 if (register_netdev(netdev)) { 3343 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 3344 goto setup_nic_dev_fail; 3345 } 3346 3347 dev_dbg(&octeon_dev->pci_dev->dev, 3348 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3349 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3350 netif_carrier_off(netdev); 3351 3352 if (lio->linfo.link.s.status) { 3353 netif_carrier_on(netdev); 3354 start_txq(netdev); 3355 } else { 3356 netif_carrier_off(netdev); 3357 } 3358 3359 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3360 3361 dev_dbg(&octeon_dev->pci_dev->dev, 3362 "NIC ifidx:%d Setup successful\n", i); 3363 3364 octeon_free_soft_command(octeon_dev, sc); 3365 } 3366 3367 return 0; 3368 3369 setup_nic_dev_fail: 3370 3371 octeon_free_soft_command(octeon_dev, sc); 3372 3373 while (i--) { 3374 dev_err(&octeon_dev->pci_dev->dev, 3375 "NIC ifidx:%d Setup failed\n", i); 3376 liquidio_destroy_nic_device(octeon_dev, i); 3377 } 3378 return -ENODEV; 3379 } 3380 3381 /** 3382 * \brief initialize the NIC 3383 * @param oct octeon device 3384 * 3385 * This initialization routine is called once the Octeon device application is 3386 * up and running 3387 */ 3388 static int liquidio_init_nic_module(struct octeon_device *oct) 3389 { 3390 struct oct_intrmod_cfg *intrmod_cfg; 3391 int retval = 0; 3392 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); 3393 3394 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3395 3396 /* only default iq and oq were initialized 3397 * initialize the rest as well 3398 */ 3399 /* run port_config command for each port */ 3400 oct->ifcount = num_nic_ports; 3401 3402 memset(oct->props, 0, 3403 sizeof(struct octdev_props) * num_nic_ports); 3404 3405 retval = setup_nic_devices(oct); 3406 if (retval) { 3407 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3408 goto octnet_init_failure; 3409 } 3410 3411 liquidio_ptp_init(oct); 3412 3413 /* Initialize interrupt moderation params */ 3414 intrmod_cfg = &((struct octeon_device *)oct)->intrmod; 3415 intrmod_cfg->intrmod_enable = 1; 3416 intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; 3417 intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; 3418 intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; 3419 intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER; 3420 intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER; 3421 intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER; 3422 intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER; 3423 3424 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3425 3426 return retval; 3427 3428 octnet_init_failure: 3429 3430 oct->ifcount = 0; 3431 3432 return retval; 3433 } 3434 3435 /** 3436 * \brief starter callback that invokes the remaining initialization work after 3437 * the NIC is up and running. 3438 * @param octptr work struct work_struct 3439 */ 3440 static void nic_starter(struct work_struct *work) 3441 { 3442 struct octeon_device *oct; 3443 struct cavium_wk *wk = (struct cavium_wk *)work; 3444 3445 oct = (struct octeon_device *)wk->ctxptr; 3446 3447 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) 3448 return; 3449 3450 /* If the status of the device is CORE_OK, the core 3451 * application has reported its application type. Call 3452 * any registered handlers now and move to the RUNNING 3453 * state. 3454 */ 3455 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) { 3456 schedule_delayed_work(&oct->nic_poll_work.work, 3457 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 3458 return; 3459 } 3460 3461 atomic_set(&oct->status, OCT_DEV_RUNNING); 3462 3463 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { 3464 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n"); 3465 3466 if (liquidio_init_nic_module(oct)) 3467 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n"); 3468 else 3469 handshake[oct->octeon_id].started_ok = 1; 3470 } else { 3471 dev_err(&oct->pci_dev->dev, 3472 "Unexpected application running on NIC (%d). Check firmware.\n", 3473 oct->app_mode); 3474 } 3475 3476 complete(&handshake[oct->octeon_id].started); 3477 } 3478 3479 /** 3480 * \brief Device initialization for each Octeon device that is probed 3481 * @param octeon_dev octeon device 3482 */ 3483 static int octeon_device_init(struct octeon_device *octeon_dev) 3484 { 3485 int j, ret; 3486 struct octeon_device_priv *oct_priv = 3487 (struct octeon_device_priv *)octeon_dev->priv; 3488 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); 3489 3490 /* Enable access to the octeon device and make its DMA capability 3491 * known to the OS. 3492 */ 3493 if (octeon_pci_os_setup(octeon_dev)) 3494 return 1; 3495 3496 /* Identify the Octeon type and map the BAR address space. */ 3497 if (octeon_chip_specific_setup(octeon_dev)) { 3498 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); 3499 return 1; 3500 } 3501 3502 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE); 3503 3504 octeon_dev->app_mode = CVM_DRV_INVALID_APP; 3505 3506 /* Do a soft reset of the Octeon device. */ 3507 if (octeon_dev->fn_list.soft_reset(octeon_dev)) 3508 return 1; 3509 3510 /* Initialize the dispatch mechanism used to push packets arriving on 3511 * Octeon Output queues. 3512 */ 3513 if (octeon_init_dispatch_list(octeon_dev)) 3514 return 1; 3515 3516 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3517 OPCODE_NIC_CORE_DRV_ACTIVE, 3518 octeon_core_drv_init, 3519 octeon_dev); 3520 3521 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); 3522 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; 3523 schedule_delayed_work(&octeon_dev->nic_poll_work.work, 3524 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 3525 3526 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); 3527 3528 octeon_set_io_queues_off(octeon_dev); 3529 3530 /* Setup the data structures that manage this Octeon's Input queues. */ 3531 if (octeon_setup_instr_queues(octeon_dev)) { 3532 dev_err(&octeon_dev->pci_dev->dev, 3533 "instruction queue initialization failed\n"); 3534 /* On error, release any previously allocated queues */ 3535 for (j = 0; j < octeon_dev->num_iqs; j++) 3536 octeon_delete_instr_queue(octeon_dev, j); 3537 return 1; 3538 } 3539 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 3540 3541 /* Initialize soft command buffer pool 3542 */ 3543 if (octeon_setup_sc_buffer_pool(octeon_dev)) { 3544 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n"); 3545 return 1; 3546 } 3547 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 3548 3549 /* Initialize lists to manage the requests of different types that 3550 * arrive from user & kernel applications for this octeon device. 3551 */ 3552 if (octeon_setup_response_list(octeon_dev)) { 3553 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n"); 3554 return 1; 3555 } 3556 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); 3557 3558 if (octeon_setup_output_queues(octeon_dev)) { 3559 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); 3560 /* Release any previously allocated queues */ 3561 for (j = 0; j < octeon_dev->num_oqs; j++) 3562 octeon_delete_droq(octeon_dev, j); 3563 } 3564 3565 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); 3566 3567 /* The input and output queue registers were setup earlier (the queues 3568 * were not enabled). Any additional registers that need to be 3569 * programmed should be done now. 3570 */ 3571 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 3572 if (ret) { 3573 dev_err(&octeon_dev->pci_dev->dev, 3574 "Failed to configure device registers\n"); 3575 return ret; 3576 } 3577 3578 /* Initialize the tasklet that handles output queue packet processing.*/ 3579 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n"); 3580 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh, 3581 (unsigned long)octeon_dev); 3582 3583 /* Setup the interrupt handler and record the INT SUM register address 3584 */ 3585 octeon_setup_interrupt(octeon_dev); 3586 3587 /* Enable Octeon device interrupts */ 3588 octeon_dev->fn_list.enable_interrupt(octeon_dev->chip); 3589 3590 /* Enable the input and output queues for this Octeon device */ 3591 octeon_dev->fn_list.enable_io_queues(octeon_dev); 3592 3593 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); 3594 3595 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); 3596 3597 if (ddr_timeout == 0) { 3598 dev_info(&octeon_dev->pci_dev->dev, 3599 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); 3600 } 3601 3602 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); 3603 3604 /* Wait for the octeon to initialize DDR after the soft-reset. */ 3605 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); 3606 if (ret) { 3607 dev_err(&octeon_dev->pci_dev->dev, 3608 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", 3609 ret); 3610 return 1; 3611 } 3612 3613 if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) { 3614 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n"); 3615 return 1; 3616 } 3617 3618 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); 3619 ret = octeon_init_consoles(octeon_dev); 3620 if (ret) { 3621 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); 3622 return 1; 3623 } 3624 ret = octeon_add_console(octeon_dev, 0); 3625 if (ret) { 3626 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); 3627 return 1; 3628 } 3629 3630 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); 3631 3632 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n"); 3633 ret = load_firmware(octeon_dev); 3634 if (ret) { 3635 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); 3636 return 1; 3637 } 3638 3639 handshake[octeon_dev->octeon_id].init_ok = 1; 3640 complete(&handshake[octeon_dev->octeon_id].init); 3641 3642 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); 3643 3644 /* Send Credit for Octeon Output queues. Credits are always sent after 3645 * the output queue is enabled. 3646 */ 3647 for (j = 0; j < octeon_dev->num_oqs; j++) 3648 writel(octeon_dev->droq[j]->max_count, 3649 octeon_dev->droq[j]->pkts_credit_reg); 3650 3651 /* Packets can start arriving on the output queues from this point. */ 3652 3653 return 0; 3654 } 3655 3656 /** 3657 * \brief Exits the module 3658 */ 3659 static void __exit liquidio_exit(void) 3660 { 3661 liquidio_deinit_pci(); 3662 3663 pr_info("LiquidIO network module is now unloaded\n"); 3664 } 3665 3666 module_init(liquidio_init); 3667 module_exit(liquidio_exit); 3668