1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <linux/firmware.h> 22 #include <net/vxlan.h> 23 #include <linux/kthread.h> 24 #include "liquidio_common.h" 25 #include "octeon_droq.h" 26 #include "octeon_iq.h" 27 #include "response_manager.h" 28 #include "octeon_device.h" 29 #include "octeon_nic.h" 30 #include "octeon_main.h" 31 #include "octeon_network.h" 32 #include "cn66xx_regs.h" 33 #include "cn66xx_device.h" 34 #include "cn68xx_device.h" 35 #include "cn23xx_pf_device.h" 36 #include "liquidio_image.h" 37 #include "lio_vf_rep.h" 38 39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); 41 MODULE_LICENSE("GPL"); 42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME 43 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME 45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME 47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME 49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 50 51 static int ddr_timeout = 10000; 52 module_param(ddr_timeout, int, 0644); 53 MODULE_PARM_DESC(ddr_timeout, 54 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check"); 55 56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 57 58 static int debug = -1; 59 module_param(debug, int, 0644); 60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 61 62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO; 63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444); 64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\"."); 65 66 static u32 console_bitmask; 67 module_param(console_bitmask, int, 0644); 68 MODULE_PARM_DESC(console_bitmask, 69 "Bitmask indicating which consoles have debug output redirected to syslog."); 70 71 /** 72 * octeon_console_debug_enabled - determines if a given console has debug enabled. 73 * @console: console to check 74 * Return: 1 = enabled. 0 otherwise 75 */ 76 static int octeon_console_debug_enabled(u32 console) 77 { 78 return (console_bitmask >> (console)) & 0x1; 79 } 80 81 /* Polling interval for determining when NIC application is alive */ 82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 83 84 /* runtime link query interval */ 85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 86 /* update localtime to octeon firmware every 60 seconds. 87 * make firmware to use same time reference, so that it will be easy to 88 * correlate firmware logged events/errors with host events, for debugging. 89 */ 90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 91 92 /* time to wait for possible in-flight requests in milliseconds */ 93 #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) 94 95 struct oct_link_status_resp { 96 u64 rh; 97 struct oct_link_info link_info; 98 u64 status; 99 }; 100 101 struct oct_timestamp_resp { 102 u64 rh; 103 u64 timestamp; 104 u64 status; 105 }; 106 107 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) 108 109 union tx_info { 110 u64 u64; 111 struct { 112 #ifdef __BIG_ENDIAN_BITFIELD 113 u16 gso_size; 114 u16 gso_segs; 115 u32 reserved; 116 #else 117 u32 reserved; 118 u16 gso_segs; 119 u16 gso_size; 120 #endif 121 } s; 122 }; 123 124 /* Octeon device properties to be used by the NIC module. 125 * Each octeon device in the system will be represented 126 * by this structure in the NIC module. 127 */ 128 129 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 130 #define OCTNIC_GSO_MAX_SIZE \ 131 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 132 133 struct handshake { 134 struct completion init; 135 struct completion started; 136 struct pci_dev *pci_dev; 137 int init_ok; 138 int started_ok; 139 }; 140 141 #ifdef CONFIG_PCI_IOV 142 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); 143 #endif 144 145 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 146 char *prefix, char *suffix); 147 148 static int octeon_device_init(struct octeon_device *); 149 static int liquidio_stop(struct net_device *netdev); 150 static void liquidio_remove(struct pci_dev *pdev); 151 static int liquidio_probe(struct pci_dev *pdev, 152 const struct pci_device_id *ent); 153 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 154 int linkstate); 155 156 static struct handshake handshake[MAX_OCTEON_DEVICES]; 157 static struct completion first_stage; 158 159 static void octeon_droq_bh(struct tasklet_struct *t) 160 { 161 int q_no; 162 int reschedule = 0; 163 struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t, 164 droq_tasklet); 165 struct octeon_device *oct = oct_priv->dev; 166 167 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { 168 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) 169 continue; 170 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], 171 MAX_PACKET_BUDGET); 172 lio_enable_irq(oct->droq[q_no], NULL); 173 174 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { 175 /* set time and cnt interrupt thresholds for this DROQ 176 * for NAPI 177 */ 178 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; 179 180 octeon_write_csr64( 181 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no), 182 0x5700000040ULL); 183 octeon_write_csr64( 184 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0); 185 } 186 } 187 188 if (reschedule) 189 tasklet_schedule(&oct_priv->droq_tasklet); 190 } 191 192 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 193 { 194 struct octeon_device_priv *oct_priv = 195 (struct octeon_device_priv *)oct->priv; 196 int retry = 100, pkt_cnt = 0, pending_pkts = 0; 197 int i; 198 199 do { 200 pending_pkts = 0; 201 202 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 203 if (!(oct->io_qmask.oq & BIT_ULL(i))) 204 continue; 205 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 206 } 207 if (pkt_cnt > 0) { 208 pending_pkts += pkt_cnt; 209 tasklet_schedule(&oct_priv->droq_tasklet); 210 } 211 pkt_cnt = 0; 212 schedule_timeout_uninterruptible(1); 213 214 } while (retry-- && pending_pkts); 215 216 return pkt_cnt; 217 } 218 219 /** 220 * force_io_queues_off - Forces all IO queues off on a given device 221 * @oct: Pointer to Octeon device 222 */ 223 static void force_io_queues_off(struct octeon_device *oct) 224 { 225 if ((oct->chip_id == OCTEON_CN66XX) || 226 (oct->chip_id == OCTEON_CN68XX)) { 227 /* Reset the Enable bits for Input Queues. */ 228 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); 229 230 /* Reset the Enable bits for Output Queues. */ 231 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); 232 } 233 } 234 235 /** 236 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc 237 * @oct: Pointer to Octeon device 238 */ 239 static inline void pcierror_quiesce_device(struct octeon_device *oct) 240 { 241 int i; 242 243 /* Disable the input and output queues now. No more packets will 244 * arrive from Octeon, but we should wait for all packet processing 245 * to finish. 246 */ 247 force_io_queues_off(oct); 248 249 /* To allow for in-flight requests */ 250 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST); 251 252 if (wait_for_pending_requests(oct)) 253 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 254 255 /* Force all requests waiting to be fetched by OCTEON to complete. */ 256 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 257 struct octeon_instr_queue *iq; 258 259 if (!(oct->io_qmask.iq & BIT_ULL(i))) 260 continue; 261 iq = oct->instr_queue[i]; 262 263 if (atomic_read(&iq->instr_pending)) { 264 spin_lock_bh(&iq->lock); 265 iq->fill_cnt = 0; 266 iq->octeon_read_index = iq->host_write_index; 267 iq->stats.instr_processed += 268 atomic_read(&iq->instr_pending); 269 lio_process_iq_request_list(oct, iq, 0); 270 spin_unlock_bh(&iq->lock); 271 } 272 } 273 274 /* Force all pending ordered list requests to time out. */ 275 lio_process_ordered_list(oct, 1); 276 277 /* We do not need to wait for output queue packets to be processed. */ 278 } 279 280 /** 281 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status 282 * @dev: Pointer to PCI device 283 */ 284 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 285 { 286 int pos = 0x100; 287 u32 status, mask; 288 289 pr_info("%s :\n", __func__); 290 291 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 292 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 293 if (dev->error_state == pci_channel_io_normal) 294 status &= ~mask; /* Clear corresponding nonfatal bits */ 295 else 296 status &= mask; /* Clear corresponding fatal bits */ 297 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 298 } 299 300 /** 301 * stop_pci_io - Stop all PCI IO to a given device 302 * @oct: Pointer to Octeon device 303 */ 304 static void stop_pci_io(struct octeon_device *oct) 305 { 306 /* No more instructions will be forwarded. */ 307 atomic_set(&oct->status, OCT_DEV_IN_RESET); 308 309 pci_disable_device(oct->pci_dev); 310 311 /* Disable interrupts */ 312 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 313 314 pcierror_quiesce_device(oct); 315 316 /* Release the interrupt line */ 317 free_irq(oct->pci_dev->irq, oct); 318 319 if (oct->flags & LIO_FLAG_MSI_ENABLED) 320 pci_disable_msi(oct->pci_dev); 321 322 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 323 lio_get_state_string(&oct->status)); 324 325 /* making it a common function for all OCTEON models */ 326 cleanup_aer_uncorrect_error_status(oct->pci_dev); 327 } 328 329 /** 330 * liquidio_pcie_error_detected - called when PCI error is detected 331 * @pdev: Pointer to PCI device 332 * @state: The current pci connection state 333 * 334 * This function is called after a PCI bus error affecting 335 * this device has been detected. 336 */ 337 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 338 pci_channel_state_t state) 339 { 340 struct octeon_device *oct = pci_get_drvdata(pdev); 341 342 /* Non-correctable Non-fatal errors */ 343 if (state == pci_channel_io_normal) { 344 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 345 cleanup_aer_uncorrect_error_status(oct->pci_dev); 346 return PCI_ERS_RESULT_CAN_RECOVER; 347 } 348 349 /* Non-correctable Fatal errors */ 350 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 351 stop_pci_io(oct); 352 353 /* Always return a DISCONNECT. There is no support for recovery but only 354 * for a clean shutdown. 355 */ 356 return PCI_ERS_RESULT_DISCONNECT; 357 } 358 359 /** 360 * liquidio_pcie_mmio_enabled - mmio handler 361 * @pdev: Pointer to PCI device 362 */ 363 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev) 364 { 365 /* We should never hit this since we never ask for a reset for a Fatal 366 * Error. We always return DISCONNECT in io_error above. 367 * But play safe and return RECOVERED for now. 368 */ 369 return PCI_ERS_RESULT_RECOVERED; 370 } 371 372 /** 373 * liquidio_pcie_slot_reset - called after the pci bus has been reset. 374 * @pdev: Pointer to PCI device 375 * 376 * Restart the card from scratch, as if from a cold-boot. Implementation 377 * resembles the first-half of the octeon_resume routine. 378 */ 379 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev) 380 { 381 /* We should never hit this since we never ask for a reset for a Fatal 382 * Error. We always return DISCONNECT in io_error above. 383 * But play safe and return RECOVERED for now. 384 */ 385 return PCI_ERS_RESULT_RECOVERED; 386 } 387 388 /** 389 * liquidio_pcie_resume - called when traffic can start flowing again. 390 * @pdev: Pointer to PCI device 391 * 392 * This callback is called when the error recovery driver tells us that 393 * its OK to resume normal operation. Implementation resembles the 394 * second-half of the octeon_resume routine. 395 */ 396 static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev) 397 { 398 /* Nothing to be done here. */ 399 } 400 401 #define liquidio_suspend NULL 402 #define liquidio_resume NULL 403 404 /* For PCI-E Advanced Error Recovery (AER) Interface */ 405 static const struct pci_error_handlers liquidio_err_handler = { 406 .error_detected = liquidio_pcie_error_detected, 407 .mmio_enabled = liquidio_pcie_mmio_enabled, 408 .slot_reset = liquidio_pcie_slot_reset, 409 .resume = liquidio_pcie_resume, 410 }; 411 412 static const struct pci_device_id liquidio_pci_tbl[] = { 413 { /* 68xx */ 414 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 415 }, 416 { /* 66xx */ 417 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 418 }, 419 { /* 23xx pf */ 420 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 421 }, 422 { 423 0, 0, 0, 0, 0, 0, 0 424 } 425 }; 426 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); 427 428 static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume); 429 430 static struct pci_driver liquidio_pci_driver = { 431 .name = "LiquidIO", 432 .id_table = liquidio_pci_tbl, 433 .probe = liquidio_probe, 434 .remove = liquidio_remove, 435 .err_handler = &liquidio_err_handler, /* For AER */ 436 .driver.pm = &liquidio_pm_ops, 437 #ifdef CONFIG_PCI_IOV 438 .sriov_configure = liquidio_enable_sriov, 439 #endif 440 }; 441 442 /** 443 * liquidio_init_pci - register PCI driver 444 */ 445 static int liquidio_init_pci(void) 446 { 447 return pci_register_driver(&liquidio_pci_driver); 448 } 449 450 /** 451 * liquidio_deinit_pci - unregister PCI driver 452 */ 453 static void liquidio_deinit_pci(void) 454 { 455 pci_unregister_driver(&liquidio_pci_driver); 456 } 457 458 /** 459 * check_txq_status - Check Tx queue status, and take appropriate action 460 * @lio: per-network private data 461 * Return: 0 if full, number of queues woken up otherwise 462 */ 463 static inline int check_txq_status(struct lio *lio) 464 { 465 int numqs = lio->netdev->real_num_tx_queues; 466 int ret_val = 0; 467 int q, iq; 468 469 /* check each sub-queue state */ 470 for (q = 0; q < numqs; q++) { 471 iq = lio->linfo.txpciq[q % 472 lio->oct_dev->num_iqs].s.q_no; 473 if (octnet_iq_is_full(lio->oct_dev, iq)) 474 continue; 475 if (__netif_subqueue_stopped(lio->netdev, q)) { 476 netif_wake_subqueue(lio->netdev, q); 477 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, 478 tx_restart, 1); 479 ret_val++; 480 } 481 } 482 483 return ret_val; 484 } 485 486 /** 487 * print_link_info - Print link information 488 * @netdev: network device 489 */ 490 static void print_link_info(struct net_device *netdev) 491 { 492 struct lio *lio = GET_LIO(netdev); 493 494 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 495 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 496 struct oct_link_info *linfo = &lio->linfo; 497 498 if (linfo->link.s.link_up) { 499 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 500 linfo->link.s.speed, 501 (linfo->link.s.duplex) ? "Full" : "Half"); 502 } else { 503 netif_info(lio, link, lio->netdev, "Link Down\n"); 504 } 505 } 506 } 507 508 /** 509 * octnet_link_status_change - Routine to notify MTU change 510 * @work: work_struct data structure 511 */ 512 static void octnet_link_status_change(struct work_struct *work) 513 { 514 struct cavium_wk *wk = (struct cavium_wk *)work; 515 struct lio *lio = (struct lio *)wk->ctxptr; 516 517 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. 518 * this API is invoked only when new max-MTU of the interface is 519 * less than current MTU. 520 */ 521 rtnl_lock(); 522 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); 523 rtnl_unlock(); 524 } 525 526 /** 527 * setup_link_status_change_wq - Sets up the mtu status change work 528 * @netdev: network device 529 */ 530 static inline int setup_link_status_change_wq(struct net_device *netdev) 531 { 532 struct lio *lio = GET_LIO(netdev); 533 struct octeon_device *oct = lio->oct_dev; 534 535 lio->link_status_wq.wq = alloc_workqueue("link-status", 536 WQ_MEM_RECLAIM, 0); 537 if (!lio->link_status_wq.wq) { 538 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 539 return -1; 540 } 541 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 542 octnet_link_status_change); 543 lio->link_status_wq.wk.ctxptr = lio; 544 545 return 0; 546 } 547 548 static inline void cleanup_link_status_change_wq(struct net_device *netdev) 549 { 550 struct lio *lio = GET_LIO(netdev); 551 552 if (lio->link_status_wq.wq) { 553 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 554 destroy_workqueue(lio->link_status_wq.wq); 555 } 556 } 557 558 /** 559 * update_link_status - Update link status 560 * @netdev: network device 561 * @ls: link status structure 562 * 563 * Called on receipt of a link status response from the core application to 564 * update each interface's link status. 565 */ 566 static inline void update_link_status(struct net_device *netdev, 567 union oct_link_status *ls) 568 { 569 struct lio *lio = GET_LIO(netdev); 570 int changed = (lio->linfo.link.u64 != ls->u64); 571 int current_max_mtu = lio->linfo.link.s.mtu; 572 struct octeon_device *oct = lio->oct_dev; 573 574 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n", 575 __func__, lio->linfo.link.u64, ls->u64); 576 lio->linfo.link.u64 = ls->u64; 577 578 if ((lio->intf_open) && (changed)) { 579 print_link_info(netdev); 580 lio->link_changes++; 581 582 if (lio->linfo.link.s.link_up) { 583 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__); 584 netif_carrier_on(netdev); 585 wake_txqs(netdev); 586 } else { 587 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__); 588 netif_carrier_off(netdev); 589 stop_txqs(netdev); 590 } 591 if (lio->linfo.link.s.mtu != current_max_mtu) { 592 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n", 593 current_max_mtu, lio->linfo.link.s.mtu); 594 netdev->max_mtu = lio->linfo.link.s.mtu; 595 } 596 if (lio->linfo.link.s.mtu < netdev->mtu) { 597 dev_warn(&oct->pci_dev->dev, 598 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", 599 netdev->mtu, lio->linfo.link.s.mtu); 600 queue_delayed_work(lio->link_status_wq.wq, 601 &lio->link_status_wq.wk.work, 0); 602 } 603 } 604 } 605 606 /** 607 * lio_sync_octeon_time - send latest localtime to octeon firmware so that 608 * firmware will correct it's time, in case there is a time skew 609 * 610 * @work: work scheduled to send time update to octeon firmware 611 **/ 612 static void lio_sync_octeon_time(struct work_struct *work) 613 { 614 struct cavium_wk *wk = (struct cavium_wk *)work; 615 struct lio *lio = (struct lio *)wk->ctxptr; 616 struct octeon_device *oct = lio->oct_dev; 617 struct octeon_soft_command *sc; 618 struct timespec64 ts; 619 struct lio_time *lt; 620 int ret; 621 622 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0); 623 if (!sc) { 624 dev_err(&oct->pci_dev->dev, 625 "Failed to sync time to octeon: soft command allocation failed\n"); 626 return; 627 } 628 629 lt = (struct lio_time *)sc->virtdptr; 630 631 /* Get time of the day */ 632 ktime_get_real_ts64(&ts); 633 lt->sec = ts.tv_sec; 634 lt->nsec = ts.tv_nsec; 635 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); 636 637 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 638 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 639 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0); 640 641 init_completion(&sc->complete); 642 sc->sc_status = OCTEON_REQUEST_PENDING; 643 644 ret = octeon_send_soft_command(oct, sc); 645 if (ret == IQ_SEND_FAILED) { 646 dev_err(&oct->pci_dev->dev, 647 "Failed to sync time to octeon: failed to send soft command\n"); 648 octeon_free_soft_command(oct, sc); 649 } else { 650 WRITE_ONCE(sc->caller_is_done, true); 651 } 652 653 queue_delayed_work(lio->sync_octeon_time_wq.wq, 654 &lio->sync_octeon_time_wq.wk.work, 655 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 656 } 657 658 /** 659 * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware 660 * 661 * @netdev: network device which should send time update to firmware 662 **/ 663 static inline int setup_sync_octeon_time_wq(struct net_device *netdev) 664 { 665 struct lio *lio = GET_LIO(netdev); 666 struct octeon_device *oct = lio->oct_dev; 667 668 lio->sync_octeon_time_wq.wq = 669 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0); 670 if (!lio->sync_octeon_time_wq.wq) { 671 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n"); 672 return -1; 673 } 674 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work, 675 lio_sync_octeon_time); 676 lio->sync_octeon_time_wq.wk.ctxptr = lio; 677 queue_delayed_work(lio->sync_octeon_time_wq.wq, 678 &lio->sync_octeon_time_wq.wk.work, 679 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 680 681 return 0; 682 } 683 684 /** 685 * cleanup_sync_octeon_time_wq - destroy wq 686 * 687 * @netdev: network device which should send time update to firmware 688 * 689 * Stop scheduling and destroy the work created to periodically update local 690 * time to octeon firmware. 691 **/ 692 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev) 693 { 694 struct lio *lio = GET_LIO(netdev); 695 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq; 696 697 if (time_wq->wq) { 698 cancel_delayed_work_sync(&time_wq->wk.work); 699 destroy_workqueue(time_wq->wq); 700 } 701 } 702 703 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) 704 { 705 struct octeon_device *other_oct; 706 707 other_oct = lio_get_device(oct->octeon_id + 1); 708 709 if (other_oct && other_oct->pci_dev) { 710 int oct_busnum, other_oct_busnum; 711 712 oct_busnum = oct->pci_dev->bus->number; 713 other_oct_busnum = other_oct->pci_dev->bus->number; 714 715 if (oct_busnum == other_oct_busnum) { 716 int oct_slot, other_oct_slot; 717 718 oct_slot = PCI_SLOT(oct->pci_dev->devfn); 719 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn); 720 721 if (oct_slot == other_oct_slot) 722 return other_oct; 723 } 724 } 725 726 return NULL; 727 } 728 729 static void disable_all_vf_links(struct octeon_device *oct) 730 { 731 struct net_device *netdev; 732 int max_vfs, vf, i; 733 734 if (!oct) 735 return; 736 737 max_vfs = oct->sriov_info.max_vfs; 738 739 for (i = 0; i < oct->ifcount; i++) { 740 netdev = oct->props[i].netdev; 741 if (!netdev) 742 continue; 743 744 for (vf = 0; vf < max_vfs; vf++) 745 liquidio_set_vf_link_state(netdev, vf, 746 IFLA_VF_LINK_STATE_DISABLE); 747 } 748 } 749 750 static int liquidio_watchdog(void *param) 751 { 752 bool err_msg_was_printed[LIO_MAX_CORES]; 753 u16 mask_of_crashed_or_stuck_cores = 0; 754 bool all_vf_links_are_disabled = false; 755 struct octeon_device *oct = param; 756 struct octeon_device *other_oct; 757 #ifdef CONFIG_MODULE_UNLOAD 758 long refcount, vfs_referencing_pf; 759 u64 vfs_mask1, vfs_mask2; 760 #endif 761 int core; 762 763 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed)); 764 765 while (!kthread_should_stop()) { 766 /* sleep for a couple of seconds so that we don't hog the CPU */ 767 set_current_state(TASK_INTERRUPTIBLE); 768 schedule_timeout(msecs_to_jiffies(2000)); 769 770 mask_of_crashed_or_stuck_cores = 771 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); 772 773 if (!mask_of_crashed_or_stuck_cores) 774 continue; 775 776 WRITE_ONCE(oct->cores_crashed, true); 777 other_oct = get_other_octeon_device(oct); 778 if (other_oct) 779 WRITE_ONCE(other_oct->cores_crashed, true); 780 781 for (core = 0; core < LIO_MAX_CORES; core++) { 782 bool core_crashed_or_got_stuck; 783 784 core_crashed_or_got_stuck = 785 (mask_of_crashed_or_stuck_cores 786 >> core) & 1; 787 788 if (core_crashed_or_got_stuck && 789 !err_msg_was_printed[core]) { 790 dev_err(&oct->pci_dev->dev, 791 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", 792 core); 793 err_msg_was_printed[core] = true; 794 } 795 } 796 797 if (all_vf_links_are_disabled) 798 continue; 799 800 disable_all_vf_links(oct); 801 disable_all_vf_links(other_oct); 802 all_vf_links_are_disabled = true; 803 804 #ifdef CONFIG_MODULE_UNLOAD 805 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask); 806 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask); 807 808 vfs_referencing_pf = hweight64(vfs_mask1); 809 vfs_referencing_pf += hweight64(vfs_mask2); 810 811 refcount = module_refcount(THIS_MODULE); 812 if (refcount >= vfs_referencing_pf) { 813 while (vfs_referencing_pf) { 814 module_put(THIS_MODULE); 815 vfs_referencing_pf--; 816 } 817 } 818 #endif 819 } 820 821 return 0; 822 } 823 824 /** 825 * liquidio_probe - PCI probe handler 826 * @pdev: PCI device structure 827 * @ent: unused 828 */ 829 static int 830 liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent) 831 { 832 struct octeon_device *oct_dev = NULL; 833 struct handshake *hs; 834 835 oct_dev = octeon_allocate_device(pdev->device, 836 sizeof(struct octeon_device_priv)); 837 if (!oct_dev) { 838 dev_err(&pdev->dev, "Unable to allocate device\n"); 839 return -ENOMEM; 840 } 841 842 if (pdev->device == OCTEON_CN23XX_PF_VID) 843 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 844 845 /* Enable PTP for 6XXX Device */ 846 if (((pdev->device == OCTEON_CN66XX) || 847 (pdev->device == OCTEON_CN68XX))) 848 oct_dev->ptp_enable = true; 849 else 850 oct_dev->ptp_enable = false; 851 852 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 853 (u32)pdev->vendor, (u32)pdev->device); 854 855 /* Assign octeon_device for this device to the private data area. */ 856 pci_set_drvdata(pdev, oct_dev); 857 858 /* set linux specific device pointer */ 859 oct_dev->pci_dev = (void *)pdev; 860 861 oct_dev->subsystem_id = pdev->subsystem_vendor | 862 (pdev->subsystem_device << 16); 863 864 hs = &handshake[oct_dev->octeon_id]; 865 init_completion(&hs->init); 866 init_completion(&hs->started); 867 hs->pci_dev = pdev; 868 869 if (oct_dev->octeon_id == 0) 870 /* first LiquidIO NIC is detected */ 871 complete(&first_stage); 872 873 if (octeon_device_init(oct_dev)) { 874 complete(&hs->init); 875 liquidio_remove(pdev); 876 return -ENOMEM; 877 } 878 879 if (OCTEON_CN23XX_PF(oct_dev)) { 880 u8 bus, device, function; 881 882 if (atomic_read(oct_dev->adapter_refcount) == 1) { 883 /* Each NIC gets one watchdog kernel thread. The first 884 * PF (of each NIC) that gets pci_driver->probe()'d 885 * creates that thread. 886 */ 887 bus = pdev->bus->number; 888 device = PCI_SLOT(pdev->devfn); 889 function = PCI_FUNC(pdev->devfn); 890 oct_dev->watchdog_task = kthread_run(liquidio_watchdog, 891 oct_dev, 892 "liowd/%02hhx:%02hhx.%hhx", 893 bus, device, function); 894 if (IS_ERR(oct_dev->watchdog_task)) { 895 oct_dev->watchdog_task = NULL; 896 dev_err(&oct_dev->pci_dev->dev, 897 "failed to create kernel_thread\n"); 898 liquidio_remove(pdev); 899 return -1; 900 } 901 } 902 } 903 904 oct_dev->rx_pause = 1; 905 oct_dev->tx_pause = 1; 906 907 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 908 909 return 0; 910 } 911 912 static bool fw_type_is_auto(void) 913 { 914 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO, 915 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0; 916 } 917 918 /** 919 * octeon_pci_flr - PCI FLR for each Octeon device. 920 * @oct: octeon device 921 */ 922 static void octeon_pci_flr(struct octeon_device *oct) 923 { 924 int rc; 925 926 pci_save_state(oct->pci_dev); 927 928 pci_cfg_access_lock(oct->pci_dev); 929 930 /* Quiesce the device completely */ 931 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 932 PCI_COMMAND_INTX_DISABLE); 933 934 rc = __pci_reset_function_locked(oct->pci_dev); 935 936 if (rc != 0) 937 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n", 938 rc, oct->pf_num); 939 940 pci_cfg_access_unlock(oct->pci_dev); 941 942 pci_restore_state(oct->pci_dev); 943 } 944 945 /** 946 * octeon_destroy_resources - Destroy resources associated with octeon device 947 * @oct: octeon device 948 */ 949 static void octeon_destroy_resources(struct octeon_device *oct) 950 { 951 int i, refcount; 952 struct msix_entry *msix_entries; 953 struct octeon_device_priv *oct_priv = 954 (struct octeon_device_priv *)oct->priv; 955 956 struct handshake *hs; 957 958 switch (atomic_read(&oct->status)) { 959 case OCT_DEV_RUNNING: 960 case OCT_DEV_CORE_OK: 961 962 /* No more instructions will be forwarded. */ 963 atomic_set(&oct->status, OCT_DEV_IN_RESET); 964 965 oct->app_mode = CVM_DRV_INVALID_APP; 966 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 967 lio_get_state_string(&oct->status)); 968 969 schedule_timeout_uninterruptible(HZ / 10); 970 971 fallthrough; 972 case OCT_DEV_HOST_OK: 973 974 case OCT_DEV_CONSOLE_INIT_DONE: 975 /* Remove any consoles */ 976 octeon_remove_consoles(oct); 977 978 fallthrough; 979 case OCT_DEV_IO_QUEUES_DONE: 980 if (lio_wait_for_instr_fetch(oct)) 981 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 982 983 if (wait_for_pending_requests(oct)) 984 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 985 986 /* Disable the input and output queues now. No more packets will 987 * arrive from Octeon, but we should wait for all packet 988 * processing to finish. 989 */ 990 oct->fn_list.disable_io_queues(oct); 991 992 if (lio_wait_for_oq_pkts(oct)) 993 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 994 995 /* Force all requests waiting to be fetched by OCTEON to 996 * complete. 997 */ 998 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 999 struct octeon_instr_queue *iq; 1000 1001 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1002 continue; 1003 iq = oct->instr_queue[i]; 1004 1005 if (atomic_read(&iq->instr_pending)) { 1006 spin_lock_bh(&iq->lock); 1007 iq->fill_cnt = 0; 1008 iq->octeon_read_index = iq->host_write_index; 1009 iq->stats.instr_processed += 1010 atomic_read(&iq->instr_pending); 1011 lio_process_iq_request_list(oct, iq, 0); 1012 spin_unlock_bh(&iq->lock); 1013 } 1014 } 1015 1016 lio_process_ordered_list(oct, 1); 1017 octeon_free_sc_done_list(oct); 1018 octeon_free_sc_zombie_list(oct); 1019 1020 fallthrough; 1021 case OCT_DEV_INTR_SET_DONE: 1022 /* Disable interrupts */ 1023 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 1024 1025 if (oct->msix_on) { 1026 msix_entries = (struct msix_entry *)oct->msix_entries; 1027 for (i = 0; i < oct->num_msix_irqs - 1; i++) { 1028 if (oct->ioq_vector[i].vector) { 1029 /* clear the affinity_cpumask */ 1030 irq_set_affinity_hint( 1031 msix_entries[i].vector, 1032 NULL); 1033 free_irq(msix_entries[i].vector, 1034 &oct->ioq_vector[i]); 1035 oct->ioq_vector[i].vector = 0; 1036 } 1037 } 1038 /* non-iov vector's argument is oct struct */ 1039 free_irq(msix_entries[i].vector, oct); 1040 1041 pci_disable_msix(oct->pci_dev); 1042 kfree(oct->msix_entries); 1043 oct->msix_entries = NULL; 1044 } else { 1045 /* Release the interrupt line */ 1046 free_irq(oct->pci_dev->irq, oct); 1047 1048 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1049 pci_disable_msi(oct->pci_dev); 1050 } 1051 1052 kfree(oct->irq_name_storage); 1053 oct->irq_name_storage = NULL; 1054 1055 fallthrough; 1056 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 1057 if (OCTEON_CN23XX_PF(oct)) 1058 octeon_free_ioq_vector(oct); 1059 1060 fallthrough; 1061 case OCT_DEV_MBOX_SETUP_DONE: 1062 if (OCTEON_CN23XX_PF(oct)) 1063 oct->fn_list.free_mbox(oct); 1064 1065 fallthrough; 1066 case OCT_DEV_IN_RESET: 1067 case OCT_DEV_DROQ_INIT_DONE: 1068 /* Wait for any pending operations */ 1069 mdelay(100); 1070 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1071 if (!(oct->io_qmask.oq & BIT_ULL(i))) 1072 continue; 1073 octeon_delete_droq(oct, i); 1074 } 1075 1076 /* Force any pending handshakes to complete */ 1077 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 1078 hs = &handshake[i]; 1079 1080 if (hs->pci_dev) { 1081 handshake[oct->octeon_id].init_ok = 0; 1082 complete(&handshake[oct->octeon_id].init); 1083 handshake[oct->octeon_id].started_ok = 0; 1084 complete(&handshake[oct->octeon_id].started); 1085 } 1086 } 1087 1088 fallthrough; 1089 case OCT_DEV_RESP_LIST_INIT_DONE: 1090 octeon_delete_response_list(oct); 1091 1092 fallthrough; 1093 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 1094 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1095 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1096 continue; 1097 octeon_delete_instr_queue(oct, i); 1098 } 1099 #ifdef CONFIG_PCI_IOV 1100 if (oct->sriov_info.sriov_enabled) 1101 pci_disable_sriov(oct->pci_dev); 1102 #endif 1103 fallthrough; 1104 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 1105 octeon_free_sc_buffer_pool(oct); 1106 1107 fallthrough; 1108 case OCT_DEV_DISPATCH_INIT_DONE: 1109 octeon_delete_dispatch_list(oct); 1110 cancel_delayed_work_sync(&oct->nic_poll_work.work); 1111 1112 fallthrough; 1113 case OCT_DEV_PCI_MAP_DONE: 1114 refcount = octeon_deregister_device(oct); 1115 1116 /* Soft reset the octeon device before exiting. 1117 * However, if fw was loaded from card (i.e. autoboot), 1118 * perform an FLR instead. 1119 * Implementation note: only soft-reset the device 1120 * if it is a CN6XXX OR the LAST CN23XX device. 1121 */ 1122 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED) 1123 octeon_pci_flr(oct); 1124 else if (OCTEON_CN6XXX(oct) || !refcount) 1125 oct->fn_list.soft_reset(oct); 1126 1127 octeon_unmap_pci_barx(oct, 0); 1128 octeon_unmap_pci_barx(oct, 1); 1129 1130 fallthrough; 1131 case OCT_DEV_PCI_ENABLE_DONE: 1132 /* Disable the device, releasing the PCI INT */ 1133 pci_disable_device(oct->pci_dev); 1134 1135 fallthrough; 1136 case OCT_DEV_BEGIN_STATE: 1137 /* Nothing to be done here either */ 1138 break; 1139 } /* end switch (oct->status) */ 1140 1141 tasklet_kill(&oct_priv->droq_tasklet); 1142 } 1143 1144 /** 1145 * send_rx_ctrl_cmd - Send Rx control command 1146 * @lio: per-network private data 1147 * @start_stop: whether to start or stop 1148 */ 1149 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1150 { 1151 struct octeon_soft_command *sc; 1152 union octnet_cmd *ncmd; 1153 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1154 int retval; 1155 1156 if (oct->props[lio->ifidx].rx_on == start_stop) 1157 return 0; 1158 1159 sc = (struct octeon_soft_command *) 1160 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1161 16, 0); 1162 if (!sc) { 1163 netif_info(lio, rx_err, lio->netdev, 1164 "Failed to allocate octeon_soft_command struct\n"); 1165 return -ENOMEM; 1166 } 1167 1168 ncmd = (union octnet_cmd *)sc->virtdptr; 1169 1170 ncmd->u64 = 0; 1171 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 1172 ncmd->s.param1 = start_stop; 1173 1174 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1175 1176 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1177 1178 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1179 OPCODE_NIC_CMD, 0, 0, 0); 1180 1181 init_completion(&sc->complete); 1182 sc->sc_status = OCTEON_REQUEST_PENDING; 1183 1184 retval = octeon_send_soft_command(oct, sc); 1185 if (retval == IQ_SEND_FAILED) { 1186 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1187 octeon_free_soft_command(oct, sc); 1188 } else { 1189 /* Sleep on a wait queue till the cond flag indicates that the 1190 * response arrived or timed-out. 1191 */ 1192 retval = wait_for_sc_completion_timeout(oct, sc, 0); 1193 if (retval) 1194 return retval; 1195 1196 oct->props[lio->ifidx].rx_on = start_stop; 1197 WRITE_ONCE(sc->caller_is_done, true); 1198 } 1199 1200 return retval; 1201 } 1202 1203 /** 1204 * liquidio_destroy_nic_device - Destroy NIC device interface 1205 * @oct: octeon device 1206 * @ifidx: which interface to destroy 1207 * 1208 * Cleanup associated with each interface for an Octeon device when NIC 1209 * module is being unloaded or if initialization fails during load. 1210 */ 1211 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1212 { 1213 struct net_device *netdev = oct->props[ifidx].netdev; 1214 struct octeon_device_priv *oct_priv = 1215 (struct octeon_device_priv *)oct->priv; 1216 struct napi_struct *napi, *n; 1217 struct lio *lio; 1218 1219 if (!netdev) { 1220 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1221 __func__, ifidx); 1222 return; 1223 } 1224 1225 lio = GET_LIO(netdev); 1226 1227 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1228 1229 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1230 liquidio_stop(netdev); 1231 1232 if (oct->props[lio->ifidx].napi_enabled == 1) { 1233 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1234 napi_disable(napi); 1235 1236 oct->props[lio->ifidx].napi_enabled = 0; 1237 1238 if (OCTEON_CN23XX_PF(oct)) 1239 oct->droq[0]->ops.poll_mode = 0; 1240 } 1241 1242 /* Delete NAPI */ 1243 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1244 netif_napi_del(napi); 1245 1246 tasklet_enable(&oct_priv->droq_tasklet); 1247 1248 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1249 unregister_netdev(netdev); 1250 1251 cleanup_sync_octeon_time_wq(netdev); 1252 cleanup_link_status_change_wq(netdev); 1253 1254 cleanup_rx_oom_poll_fn(netdev); 1255 1256 lio_delete_glists(lio); 1257 1258 free_netdev(netdev); 1259 1260 oct->props[ifidx].gmxport = -1; 1261 1262 oct->props[ifidx].netdev = NULL; 1263 } 1264 1265 /** 1266 * liquidio_stop_nic_module - Stop complete NIC functionality 1267 * @oct: octeon device 1268 */ 1269 static int liquidio_stop_nic_module(struct octeon_device *oct) 1270 { 1271 int i, j; 1272 struct lio *lio; 1273 1274 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1275 device_lock(&oct->pci_dev->dev); 1276 if (oct->devlink) { 1277 devlink_unregister(oct->devlink); 1278 devlink_free(oct->devlink); 1279 oct->devlink = NULL; 1280 } 1281 device_unlock(&oct->pci_dev->dev); 1282 1283 if (!oct->ifcount) { 1284 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1285 return 1; 1286 } 1287 1288 spin_lock_bh(&oct->cmd_resp_wqlock); 1289 oct->cmd_resp_state = OCT_DRV_OFFLINE; 1290 spin_unlock_bh(&oct->cmd_resp_wqlock); 1291 1292 lio_vf_rep_destroy(oct); 1293 1294 for (i = 0; i < oct->ifcount; i++) { 1295 lio = GET_LIO(oct->props[i].netdev); 1296 for (j = 0; j < oct->num_oqs; j++) 1297 octeon_unregister_droq_ops(oct, 1298 lio->linfo.rxpciq[j].s.q_no); 1299 } 1300 1301 for (i = 0; i < oct->ifcount; i++) 1302 liquidio_destroy_nic_device(oct, i); 1303 1304 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1305 return 0; 1306 } 1307 1308 /** 1309 * liquidio_remove - Cleans up resources at unload time 1310 * @pdev: PCI device structure 1311 */ 1312 static void liquidio_remove(struct pci_dev *pdev) 1313 { 1314 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1315 1316 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1317 1318 if (oct_dev->watchdog_task) 1319 kthread_stop(oct_dev->watchdog_task); 1320 1321 if (!oct_dev->octeon_id && 1322 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) 1323 lio_vf_rep_modexit(); 1324 1325 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) 1326 liquidio_stop_nic_module(oct_dev); 1327 1328 /* Reset the octeon device and cleanup all memory allocated for 1329 * the octeon device by driver. 1330 */ 1331 octeon_destroy_resources(oct_dev); 1332 1333 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1334 1335 /* This octeon device has been removed. Update the global 1336 * data structure to reflect this. Free the device structure. 1337 */ 1338 octeon_free_device_mem(oct_dev); 1339 } 1340 1341 /** 1342 * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space 1343 * @oct: octeon device 1344 */ 1345 static int octeon_chip_specific_setup(struct octeon_device *oct) 1346 { 1347 u32 dev_id, rev_id; 1348 int ret = 1; 1349 1350 pci_read_config_dword(oct->pci_dev, 0, &dev_id); 1351 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 1352 oct->rev_id = rev_id & 0xff; 1353 1354 switch (dev_id) { 1355 case OCTEON_CN68XX_PCIID: 1356 oct->chip_id = OCTEON_CN68XX; 1357 ret = lio_setup_cn68xx_octeon_device(oct); 1358 break; 1359 1360 case OCTEON_CN66XX_PCIID: 1361 oct->chip_id = OCTEON_CN66XX; 1362 ret = lio_setup_cn66xx_octeon_device(oct); 1363 break; 1364 1365 case OCTEON_CN23XX_PCIID_PF: 1366 oct->chip_id = OCTEON_CN23XX_PF_VID; 1367 ret = setup_cn23xx_octeon_pf_device(oct); 1368 if (ret) 1369 break; 1370 #ifdef CONFIG_PCI_IOV 1371 if (!ret) 1372 pci_sriov_set_totalvfs(oct->pci_dev, 1373 oct->sriov_info.max_vfs); 1374 #endif 1375 break; 1376 1377 default: 1378 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", 1379 dev_id); 1380 } 1381 1382 return ret; 1383 } 1384 1385 /** 1386 * octeon_pci_os_setup - PCI initialization for each Octeon device. 1387 * @oct: octeon device 1388 */ 1389 static int octeon_pci_os_setup(struct octeon_device *oct) 1390 { 1391 /* setup PCI stuff first */ 1392 if (pci_enable_device(oct->pci_dev)) { 1393 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1394 return 1; 1395 } 1396 1397 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1398 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1399 pci_disable_device(oct->pci_dev); 1400 return 1; 1401 } 1402 1403 /* Enable PCI DMA Master. */ 1404 pci_set_master(oct->pci_dev); 1405 1406 return 0; 1407 } 1408 1409 /** 1410 * free_netbuf - Unmap and free network buffer 1411 * @buf: buffer 1412 */ 1413 static void free_netbuf(void *buf) 1414 { 1415 struct sk_buff *skb; 1416 struct octnet_buf_free_info *finfo; 1417 struct lio *lio; 1418 1419 finfo = (struct octnet_buf_free_info *)buf; 1420 skb = finfo->skb; 1421 lio = finfo->lio; 1422 1423 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1424 DMA_TO_DEVICE); 1425 1426 tx_buffer_free(skb); 1427 } 1428 1429 /** 1430 * free_netsgbuf - Unmap and free gather buffer 1431 * @buf: buffer 1432 */ 1433 static void free_netsgbuf(void *buf) 1434 { 1435 struct octnet_buf_free_info *finfo; 1436 struct sk_buff *skb; 1437 struct lio *lio; 1438 struct octnic_gather *g; 1439 int i, frags, iq; 1440 1441 finfo = (struct octnet_buf_free_info *)buf; 1442 skb = finfo->skb; 1443 lio = finfo->lio; 1444 g = finfo->g; 1445 frags = skb_shinfo(skb)->nr_frags; 1446 1447 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1448 g->sg[0].ptr[0], (skb->len - skb->data_len), 1449 DMA_TO_DEVICE); 1450 1451 i = 1; 1452 while (frags--) { 1453 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 1454 1455 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 1456 g->sg[(i >> 2)].ptr[(i & 3)], 1457 skb_frag_size(frag), DMA_TO_DEVICE); 1458 i++; 1459 } 1460 1461 iq = skb_iq(lio->oct_dev, skb); 1462 spin_lock(&lio->glist_lock[iq]); 1463 list_add_tail(&g->list, &lio->glist[iq]); 1464 spin_unlock(&lio->glist_lock[iq]); 1465 1466 tx_buffer_free(skb); 1467 } 1468 1469 /** 1470 * free_netsgbuf_with_resp - Unmap and free gather buffer with response 1471 * @buf: buffer 1472 */ 1473 static void free_netsgbuf_with_resp(void *buf) 1474 { 1475 struct octeon_soft_command *sc; 1476 struct octnet_buf_free_info *finfo; 1477 struct sk_buff *skb; 1478 struct lio *lio; 1479 struct octnic_gather *g; 1480 int i, frags, iq; 1481 1482 sc = (struct octeon_soft_command *)buf; 1483 skb = (struct sk_buff *)sc->callback_arg; 1484 finfo = (struct octnet_buf_free_info *)&skb->cb; 1485 1486 lio = finfo->lio; 1487 g = finfo->g; 1488 frags = skb_shinfo(skb)->nr_frags; 1489 1490 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1491 g->sg[0].ptr[0], (skb->len - skb->data_len), 1492 DMA_TO_DEVICE); 1493 1494 i = 1; 1495 while (frags--) { 1496 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 1497 1498 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 1499 g->sg[(i >> 2)].ptr[(i & 3)], 1500 skb_frag_size(frag), DMA_TO_DEVICE); 1501 i++; 1502 } 1503 1504 iq = skb_iq(lio->oct_dev, skb); 1505 1506 spin_lock(&lio->glist_lock[iq]); 1507 list_add_tail(&g->list, &lio->glist[iq]); 1508 spin_unlock(&lio->glist_lock[iq]); 1509 1510 /* Don't free the skb yet */ 1511 } 1512 1513 /** 1514 * liquidio_ptp_adjfine - Adjust ptp frequency 1515 * @ptp: PTP clock info 1516 * @scaled_ppm: how much to adjust by, in scaled parts-per-million 1517 * 1518 * Scaled parts per million is ppm with a 16-bit binary fractional field. 1519 */ 1520 static int liquidio_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 1521 { 1522 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1523 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1524 s32 ppb = scaled_ppm_to_ppb(scaled_ppm); 1525 u64 comp, delta; 1526 unsigned long flags; 1527 bool neg_adj = false; 1528 1529 if (ppb < 0) { 1530 neg_adj = true; 1531 ppb = -ppb; 1532 } 1533 1534 /* The hardware adds the clock compensation value to the 1535 * PTP clock on every coprocessor clock cycle, so we 1536 * compute the delta in terms of coprocessor clocks. 1537 */ 1538 delta = (u64)ppb << 32; 1539 do_div(delta, oct->coproc_clock_rate); 1540 1541 spin_lock_irqsave(&lio->ptp_lock, flags); 1542 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); 1543 if (neg_adj) 1544 comp -= delta; 1545 else 1546 comp += delta; 1547 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1548 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1549 1550 return 0; 1551 } 1552 1553 /** 1554 * liquidio_ptp_adjtime - Adjust ptp time 1555 * @ptp: PTP clock info 1556 * @delta: how much to adjust by, in nanosecs 1557 */ 1558 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 1559 { 1560 unsigned long flags; 1561 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1562 1563 spin_lock_irqsave(&lio->ptp_lock, flags); 1564 lio->ptp_adjust += delta; 1565 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1566 1567 return 0; 1568 } 1569 1570 /** 1571 * liquidio_ptp_gettime - Get hardware clock time, including any adjustment 1572 * @ptp: PTP clock info 1573 * @ts: timespec 1574 */ 1575 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, 1576 struct timespec64 *ts) 1577 { 1578 u64 ns; 1579 unsigned long flags; 1580 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1581 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1582 1583 spin_lock_irqsave(&lio->ptp_lock, flags); 1584 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); 1585 ns += lio->ptp_adjust; 1586 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1587 1588 *ts = ns_to_timespec64(ns); 1589 1590 return 0; 1591 } 1592 1593 /** 1594 * liquidio_ptp_settime - Set hardware clock time. Reset adjustment 1595 * @ptp: PTP clock info 1596 * @ts: timespec 1597 */ 1598 static int liquidio_ptp_settime(struct ptp_clock_info *ptp, 1599 const struct timespec64 *ts) 1600 { 1601 u64 ns; 1602 unsigned long flags; 1603 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1604 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1605 1606 ns = timespec64_to_ns(ts); 1607 1608 spin_lock_irqsave(&lio->ptp_lock, flags); 1609 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1610 lio->ptp_adjust = 0; 1611 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1612 1613 return 0; 1614 } 1615 1616 /** 1617 * liquidio_ptp_enable - Check if PTP is enabled 1618 * @ptp: PTP clock info 1619 * @rq: request 1620 * @on: is it on 1621 */ 1622 static int 1623 liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp, 1624 struct ptp_clock_request __maybe_unused *rq, 1625 int __maybe_unused on) 1626 { 1627 return -EOPNOTSUPP; 1628 } 1629 1630 /** 1631 * oct_ptp_open - Open PTP clock source 1632 * @netdev: network device 1633 */ 1634 static void oct_ptp_open(struct net_device *netdev) 1635 { 1636 struct lio *lio = GET_LIO(netdev); 1637 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1638 1639 spin_lock_init(&lio->ptp_lock); 1640 1641 snprintf(lio->ptp_info.name, 16, "%s", netdev->name); 1642 lio->ptp_info.owner = THIS_MODULE; 1643 lio->ptp_info.max_adj = 250000000; 1644 lio->ptp_info.n_alarm = 0; 1645 lio->ptp_info.n_ext_ts = 0; 1646 lio->ptp_info.n_per_out = 0; 1647 lio->ptp_info.pps = 0; 1648 lio->ptp_info.adjfine = liquidio_ptp_adjfine; 1649 lio->ptp_info.adjtime = liquidio_ptp_adjtime; 1650 lio->ptp_info.gettime64 = liquidio_ptp_gettime; 1651 lio->ptp_info.settime64 = liquidio_ptp_settime; 1652 lio->ptp_info.enable = liquidio_ptp_enable; 1653 1654 lio->ptp_adjust = 0; 1655 1656 lio->ptp_clock = ptp_clock_register(&lio->ptp_info, 1657 &oct->pci_dev->dev); 1658 1659 if (IS_ERR(lio->ptp_clock)) 1660 lio->ptp_clock = NULL; 1661 } 1662 1663 /** 1664 * liquidio_ptp_init - Init PTP clock 1665 * @oct: octeon device 1666 */ 1667 static void liquidio_ptp_init(struct octeon_device *oct) 1668 { 1669 u64 clock_comp, cfg; 1670 1671 clock_comp = (u64)NSEC_PER_SEC << 32; 1672 do_div(clock_comp, oct->coproc_clock_rate); 1673 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1674 1675 /* Enable */ 1676 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); 1677 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); 1678 } 1679 1680 /** 1681 * load_firmware - Load firmware to device 1682 * @oct: octeon device 1683 * 1684 * Maps device to firmware filename, requests firmware, and downloads it 1685 */ 1686 static int load_firmware(struct octeon_device *oct) 1687 { 1688 int ret = 0; 1689 const struct firmware *fw; 1690 char fw_name[LIO_MAX_FW_FILENAME_LEN]; 1691 char *tmp_fw_type; 1692 1693 if (fw_type_is_auto()) { 1694 tmp_fw_type = LIO_FW_NAME_TYPE_NIC; 1695 strncpy(fw_type, tmp_fw_type, sizeof(fw_type)); 1696 } else { 1697 tmp_fw_type = fw_type; 1698 } 1699 1700 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, 1701 octeon_get_conf(oct)->card_name, tmp_fw_type, 1702 LIO_FW_NAME_SUFFIX); 1703 1704 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); 1705 if (ret) { 1706 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n", 1707 fw_name); 1708 release_firmware(fw); 1709 return ret; 1710 } 1711 1712 ret = octeon_download_firmware(oct, fw->data, fw->size); 1713 1714 release_firmware(fw); 1715 1716 return ret; 1717 } 1718 1719 /** 1720 * octnet_poll_check_txq_status - Poll routine for checking transmit queue status 1721 * @work: work_struct data structure 1722 */ 1723 static void octnet_poll_check_txq_status(struct work_struct *work) 1724 { 1725 struct cavium_wk *wk = (struct cavium_wk *)work; 1726 struct lio *lio = (struct lio *)wk->ctxptr; 1727 1728 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) 1729 return; 1730 1731 check_txq_status(lio); 1732 queue_delayed_work(lio->txq_status_wq.wq, 1733 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 1734 } 1735 1736 /** 1737 * setup_tx_poll_fn - Sets up the txq poll check 1738 * @netdev: network device 1739 */ 1740 static inline int setup_tx_poll_fn(struct net_device *netdev) 1741 { 1742 struct lio *lio = GET_LIO(netdev); 1743 struct octeon_device *oct = lio->oct_dev; 1744 1745 lio->txq_status_wq.wq = alloc_workqueue("txq-status", 1746 WQ_MEM_RECLAIM, 0); 1747 if (!lio->txq_status_wq.wq) { 1748 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); 1749 return -1; 1750 } 1751 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, 1752 octnet_poll_check_txq_status); 1753 lio->txq_status_wq.wk.ctxptr = lio; 1754 queue_delayed_work(lio->txq_status_wq.wq, 1755 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 1756 return 0; 1757 } 1758 1759 static inline void cleanup_tx_poll_fn(struct net_device *netdev) 1760 { 1761 struct lio *lio = GET_LIO(netdev); 1762 1763 if (lio->txq_status_wq.wq) { 1764 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); 1765 destroy_workqueue(lio->txq_status_wq.wq); 1766 } 1767 } 1768 1769 /** 1770 * liquidio_open - Net device open for LiquidIO 1771 * @netdev: network device 1772 */ 1773 static int liquidio_open(struct net_device *netdev) 1774 { 1775 struct lio *lio = GET_LIO(netdev); 1776 struct octeon_device *oct = lio->oct_dev; 1777 struct octeon_device_priv *oct_priv = 1778 (struct octeon_device_priv *)oct->priv; 1779 struct napi_struct *napi, *n; 1780 int ret = 0; 1781 1782 if (oct->props[lio->ifidx].napi_enabled == 0) { 1783 tasklet_disable(&oct_priv->droq_tasklet); 1784 1785 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1786 napi_enable(napi); 1787 1788 oct->props[lio->ifidx].napi_enabled = 1; 1789 1790 if (OCTEON_CN23XX_PF(oct)) 1791 oct->droq[0]->ops.poll_mode = 1; 1792 } 1793 1794 if (oct->ptp_enable) 1795 oct_ptp_open(netdev); 1796 1797 ifstate_set(lio, LIO_IFSTATE_RUNNING); 1798 1799 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) { 1800 ret = setup_tx_poll_fn(netdev); 1801 if (ret) 1802 goto err_poll; 1803 } 1804 1805 netif_tx_start_all_queues(netdev); 1806 1807 /* Ready for link status updates */ 1808 lio->intf_open = 1; 1809 1810 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 1811 1812 /* tell Octeon to start forwarding packets to host */ 1813 ret = send_rx_ctrl_cmd(lio, 1); 1814 if (ret) 1815 goto err_rx_ctrl; 1816 1817 /* start periodical statistics fetch */ 1818 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); 1819 lio->stats_wk.ctxptr = lio; 1820 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies 1821 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); 1822 1823 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", 1824 netdev->name); 1825 1826 return 0; 1827 1828 err_rx_ctrl: 1829 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) 1830 cleanup_tx_poll_fn(netdev); 1831 err_poll: 1832 if (lio->ptp_clock) { 1833 ptp_clock_unregister(lio->ptp_clock); 1834 lio->ptp_clock = NULL; 1835 } 1836 1837 if (oct->props[lio->ifidx].napi_enabled == 1) { 1838 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1839 napi_disable(napi); 1840 1841 oct->props[lio->ifidx].napi_enabled = 0; 1842 1843 if (OCTEON_CN23XX_PF(oct)) 1844 oct->droq[0]->ops.poll_mode = 0; 1845 } 1846 1847 return ret; 1848 } 1849 1850 /** 1851 * liquidio_stop - Net device stop for LiquidIO 1852 * @netdev: network device 1853 */ 1854 static int liquidio_stop(struct net_device *netdev) 1855 { 1856 struct lio *lio = GET_LIO(netdev); 1857 struct octeon_device *oct = lio->oct_dev; 1858 struct octeon_device_priv *oct_priv = 1859 (struct octeon_device_priv *)oct->priv; 1860 struct napi_struct *napi, *n; 1861 int ret = 0; 1862 1863 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 1864 1865 /* Stop any link updates */ 1866 lio->intf_open = 0; 1867 1868 stop_txqs(netdev); 1869 1870 /* Inform that netif carrier is down */ 1871 netif_carrier_off(netdev); 1872 netif_tx_disable(netdev); 1873 1874 lio->linfo.link.s.link_up = 0; 1875 lio->link_changes++; 1876 1877 /* Tell Octeon that nic interface is down. */ 1878 ret = send_rx_ctrl_cmd(lio, 0); 1879 if (ret) 1880 return ret; 1881 1882 if (OCTEON_CN23XX_PF(oct)) { 1883 if (!oct->msix_on) 1884 cleanup_tx_poll_fn(netdev); 1885 } else { 1886 cleanup_tx_poll_fn(netdev); 1887 } 1888 1889 cancel_delayed_work_sync(&lio->stats_wk.work); 1890 1891 if (lio->ptp_clock) { 1892 ptp_clock_unregister(lio->ptp_clock); 1893 lio->ptp_clock = NULL; 1894 } 1895 1896 /* Wait for any pending Rx descriptors */ 1897 if (lio_wait_for_clean_oq(oct)) 1898 netif_info(lio, rx_err, lio->netdev, 1899 "Proceeding with stop interface after partial RX desc processing\n"); 1900 1901 if (oct->props[lio->ifidx].napi_enabled == 1) { 1902 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1903 napi_disable(napi); 1904 1905 oct->props[lio->ifidx].napi_enabled = 0; 1906 1907 if (OCTEON_CN23XX_PF(oct)) 1908 oct->droq[0]->ops.poll_mode = 0; 1909 1910 tasklet_enable(&oct_priv->droq_tasklet); 1911 } 1912 1913 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 1914 1915 return ret; 1916 } 1917 1918 /** 1919 * get_new_flags - Converts a mask based on net device flags 1920 * @netdev: network device 1921 * 1922 * This routine generates a octnet_ifflags mask from the net device flags 1923 * received from the OS. 1924 */ 1925 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) 1926 { 1927 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1928 1929 if (netdev->flags & IFF_PROMISC) 1930 f |= OCTNET_IFFLAG_PROMISC; 1931 1932 if (netdev->flags & IFF_ALLMULTI) 1933 f |= OCTNET_IFFLAG_ALLMULTI; 1934 1935 if (netdev->flags & IFF_MULTICAST) { 1936 f |= OCTNET_IFFLAG_MULTICAST; 1937 1938 /* Accept all multicast addresses if there are more than we 1939 * can handle 1940 */ 1941 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1942 f |= OCTNET_IFFLAG_ALLMULTI; 1943 } 1944 1945 if (netdev->flags & IFF_BROADCAST) 1946 f |= OCTNET_IFFLAG_BROADCAST; 1947 1948 return f; 1949 } 1950 1951 /** 1952 * liquidio_set_mcast_list - Net device set_multicast_list 1953 * @netdev: network device 1954 */ 1955 static void liquidio_set_mcast_list(struct net_device *netdev) 1956 { 1957 struct lio *lio = GET_LIO(netdev); 1958 struct octeon_device *oct = lio->oct_dev; 1959 struct octnic_ctrl_pkt nctrl; 1960 struct netdev_hw_addr *ha; 1961 u64 *mc; 1962 int ret; 1963 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1964 1965 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1966 1967 /* Create a ctrl pkt command to be sent to core app. */ 1968 nctrl.ncmd.u64 = 0; 1969 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1970 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1971 nctrl.ncmd.s.param2 = mc_count; 1972 nctrl.ncmd.s.more = mc_count; 1973 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1974 nctrl.netpndev = (u64)netdev; 1975 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1976 1977 /* copy all the addresses into the udd */ 1978 mc = &nctrl.udd[0]; 1979 netdev_for_each_mc_addr(ha, netdev) { 1980 *mc = 0; 1981 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); 1982 /* no need to swap bytes */ 1983 1984 if (++mc > &nctrl.udd[mc_count]) 1985 break; 1986 } 1987 1988 /* Apparently, any activity in this call from the kernel has to 1989 * be atomic. So we won't wait for response. 1990 */ 1991 1992 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1993 if (ret) { 1994 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1995 ret); 1996 } 1997 } 1998 1999 /** 2000 * liquidio_set_mac - Net device set_mac_address 2001 * @netdev: network device 2002 * @p: pointer to sockaddr 2003 */ 2004 static int liquidio_set_mac(struct net_device *netdev, void *p) 2005 { 2006 int ret = 0; 2007 struct lio *lio = GET_LIO(netdev); 2008 struct octeon_device *oct = lio->oct_dev; 2009 struct sockaddr *addr = (struct sockaddr *)p; 2010 struct octnic_ctrl_pkt nctrl; 2011 2012 if (!is_valid_ether_addr(addr->sa_data)) 2013 return -EADDRNOTAVAIL; 2014 2015 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2016 2017 nctrl.ncmd.u64 = 0; 2018 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2019 nctrl.ncmd.s.param1 = 0; 2020 nctrl.ncmd.s.more = 1; 2021 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2022 nctrl.netpndev = (u64)netdev; 2023 2024 nctrl.udd[0] = 0; 2025 /* The MAC Address is presented in network byte order. */ 2026 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); 2027 2028 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2029 if (ret < 0) { 2030 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 2031 return -ENOMEM; 2032 } 2033 2034 if (nctrl.sc_status) { 2035 dev_err(&oct->pci_dev->dev, 2036 "%s: MAC Address change failed. sc return=%x\n", 2037 __func__, nctrl.sc_status); 2038 return -EIO; 2039 } 2040 2041 eth_hw_addr_set(netdev, addr->sa_data); 2042 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); 2043 2044 return 0; 2045 } 2046 2047 static void 2048 liquidio_get_stats64(struct net_device *netdev, 2049 struct rtnl_link_stats64 *lstats) 2050 { 2051 struct lio *lio = GET_LIO(netdev); 2052 struct octeon_device *oct; 2053 u64 pkts = 0, drop = 0, bytes = 0; 2054 struct oct_droq_stats *oq_stats; 2055 struct oct_iq_stats *iq_stats; 2056 int i, iq_no, oq_no; 2057 2058 oct = lio->oct_dev; 2059 2060 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 2061 return; 2062 2063 for (i = 0; i < oct->num_iqs; i++) { 2064 iq_no = lio->linfo.txpciq[i].s.q_no; 2065 iq_stats = &oct->instr_queue[iq_no]->stats; 2066 pkts += iq_stats->tx_done; 2067 drop += iq_stats->tx_dropped; 2068 bytes += iq_stats->tx_tot_bytes; 2069 } 2070 2071 lstats->tx_packets = pkts; 2072 lstats->tx_bytes = bytes; 2073 lstats->tx_dropped = drop; 2074 2075 pkts = 0; 2076 drop = 0; 2077 bytes = 0; 2078 2079 for (i = 0; i < oct->num_oqs; i++) { 2080 oq_no = lio->linfo.rxpciq[i].s.q_no; 2081 oq_stats = &oct->droq[oq_no]->stats; 2082 pkts += oq_stats->rx_pkts_received; 2083 drop += (oq_stats->rx_dropped + 2084 oq_stats->dropped_nodispatch + 2085 oq_stats->dropped_toomany + 2086 oq_stats->dropped_nomem); 2087 bytes += oq_stats->rx_bytes_received; 2088 } 2089 2090 lstats->rx_bytes = bytes; 2091 lstats->rx_packets = pkts; 2092 lstats->rx_dropped = drop; 2093 2094 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; 2095 lstats->collisions = oct->link_stats.fromhost.total_collisions; 2096 2097 /* detailed rx_errors: */ 2098 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; 2099 /* recved pkt with crc error */ 2100 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; 2101 /* recv'd frame alignment error */ 2102 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; 2103 /* recv'r fifo overrun */ 2104 lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err; 2105 2106 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + 2107 lstats->rx_frame_errors + lstats->rx_fifo_errors; 2108 2109 /* detailed tx_errors */ 2110 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; 2111 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; 2112 lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err; 2113 2114 lstats->tx_errors = lstats->tx_aborted_errors + 2115 lstats->tx_carrier_errors + 2116 lstats->tx_fifo_errors; 2117 } 2118 2119 /** 2120 * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl 2121 * @netdev: network device 2122 * @ifr: interface request 2123 */ 2124 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 2125 { 2126 struct hwtstamp_config conf; 2127 struct lio *lio = GET_LIO(netdev); 2128 2129 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 2130 return -EFAULT; 2131 2132 switch (conf.tx_type) { 2133 case HWTSTAMP_TX_ON: 2134 case HWTSTAMP_TX_OFF: 2135 break; 2136 default: 2137 return -ERANGE; 2138 } 2139 2140 switch (conf.rx_filter) { 2141 case HWTSTAMP_FILTER_NONE: 2142 break; 2143 case HWTSTAMP_FILTER_ALL: 2144 case HWTSTAMP_FILTER_SOME: 2145 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2146 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2147 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2148 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2149 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2150 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2151 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2152 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2153 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2154 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2155 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2156 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2157 case HWTSTAMP_FILTER_NTP_ALL: 2158 conf.rx_filter = HWTSTAMP_FILTER_ALL; 2159 break; 2160 default: 2161 return -ERANGE; 2162 } 2163 2164 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 2165 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2166 2167 else 2168 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2169 2170 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 2171 } 2172 2173 /** 2174 * liquidio_ioctl - ioctl handler 2175 * @netdev: network device 2176 * @ifr: interface request 2177 * @cmd: command 2178 */ 2179 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2180 { 2181 struct lio *lio = GET_LIO(netdev); 2182 2183 switch (cmd) { 2184 case SIOCSHWTSTAMP: 2185 if (lio->oct_dev->ptp_enable) 2186 return hwtstamp_ioctl(netdev, ifr); 2187 fallthrough; 2188 default: 2189 return -EOPNOTSUPP; 2190 } 2191 } 2192 2193 /** 2194 * handle_timestamp - handle a Tx timestamp response 2195 * @oct: octeon device 2196 * @status: response status 2197 * @buf: pointer to skb 2198 */ 2199 static void handle_timestamp(struct octeon_device *oct, 2200 u32 status, 2201 void *buf) 2202 { 2203 struct octnet_buf_free_info *finfo; 2204 struct octeon_soft_command *sc; 2205 struct oct_timestamp_resp *resp; 2206 struct lio *lio; 2207 struct sk_buff *skb = (struct sk_buff *)buf; 2208 2209 finfo = (struct octnet_buf_free_info *)skb->cb; 2210 lio = finfo->lio; 2211 sc = finfo->sc; 2212 oct = lio->oct_dev; 2213 resp = (struct oct_timestamp_resp *)sc->virtrptr; 2214 2215 if (status != OCTEON_REQUEST_DONE) { 2216 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 2217 CVM_CAST64(status)); 2218 resp->timestamp = 0; 2219 } 2220 2221 octeon_swap_8B_data(&resp->timestamp, 1); 2222 2223 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { 2224 struct skb_shared_hwtstamps ts; 2225 u64 ns = resp->timestamp; 2226 2227 netif_info(lio, tx_done, lio->netdev, 2228 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 2229 skb, (unsigned long long)ns); 2230 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 2231 skb_tstamp_tx(skb, &ts); 2232 } 2233 2234 octeon_free_soft_command(oct, sc); 2235 tx_buffer_free(skb); 2236 } 2237 2238 /** 2239 * send_nic_timestamp_pkt - Send a data packet that will be timestamped 2240 * @oct: octeon device 2241 * @ndata: pointer to network data 2242 * @finfo: pointer to private network data 2243 * @xmit_more: more is coming 2244 */ 2245 static inline int send_nic_timestamp_pkt(struct octeon_device *oct, 2246 struct octnic_data_pkt *ndata, 2247 struct octnet_buf_free_info *finfo, 2248 int xmit_more) 2249 { 2250 int retval; 2251 struct octeon_soft_command *sc; 2252 struct lio *lio; 2253 int ring_doorbell; 2254 u32 len; 2255 2256 lio = finfo->lio; 2257 2258 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 2259 sizeof(struct oct_timestamp_resp)); 2260 finfo->sc = sc; 2261 2262 if (!sc) { 2263 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 2264 return IQ_SEND_FAILED; 2265 } 2266 2267 if (ndata->reqtype == REQTYPE_NORESP_NET) 2268 ndata->reqtype = REQTYPE_RESP_NET; 2269 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 2270 ndata->reqtype = REQTYPE_RESP_NET_SG; 2271 2272 sc->callback = handle_timestamp; 2273 sc->callback_arg = finfo->skb; 2274 sc->iq_no = ndata->q_no; 2275 2276 if (OCTEON_CN23XX_PF(oct)) 2277 len = (u32)((struct octeon_instr_ih3 *) 2278 (&sc->cmd.cmd3.ih3))->dlengsz; 2279 else 2280 len = (u32)((struct octeon_instr_ih2 *) 2281 (&sc->cmd.cmd2.ih2))->dlengsz; 2282 2283 ring_doorbell = !xmit_more; 2284 2285 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2286 sc, len, ndata->reqtype); 2287 2288 if (retval == IQ_SEND_FAILED) { 2289 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2290 retval); 2291 octeon_free_soft_command(oct, sc); 2292 } else { 2293 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 2294 } 2295 2296 return retval; 2297 } 2298 2299 /** 2300 * liquidio_xmit - Transmit networks packets to the Octeon interface 2301 * @skb: skbuff struct to be passed to network layer. 2302 * @netdev: pointer to network device 2303 * 2304 * Return: whether the packet was transmitted to the device okay or not 2305 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 2306 */ 2307 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 2308 { 2309 struct lio *lio; 2310 struct octnet_buf_free_info *finfo; 2311 union octnic_cmd_setup cmdsetup; 2312 struct octnic_data_pkt ndata; 2313 struct octeon_device *oct; 2314 struct oct_iq_stats *stats; 2315 struct octeon_instr_irh *irh; 2316 union tx_info *tx_info; 2317 int status = 0; 2318 int q_idx = 0, iq_no = 0; 2319 int j, xmit_more = 0; 2320 u64 dptr = 0; 2321 u32 tag = 0; 2322 2323 lio = GET_LIO(netdev); 2324 oct = lio->oct_dev; 2325 2326 q_idx = skb_iq(oct, skb); 2327 tag = q_idx; 2328 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 2329 2330 stats = &oct->instr_queue[iq_no]->stats; 2331 2332 /* Check for all conditions in which the current packet cannot be 2333 * transmitted. 2334 */ 2335 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2336 (!lio->linfo.link.s.link_up) || 2337 (skb->len <= 0)) { 2338 netif_info(lio, tx_err, lio->netdev, 2339 "Transmit failed link_status : %d\n", 2340 lio->linfo.link.s.link_up); 2341 goto lio_xmit_failed; 2342 } 2343 2344 /* Use space in skb->cb to store info used to unmap and 2345 * free the buffers. 2346 */ 2347 finfo = (struct octnet_buf_free_info *)skb->cb; 2348 finfo->lio = lio; 2349 finfo->skb = skb; 2350 finfo->sc = NULL; 2351 2352 /* Prepare the attributes for the data to be passed to OSI. */ 2353 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 2354 2355 ndata.buf = (void *)finfo; 2356 2357 ndata.q_no = iq_no; 2358 2359 if (octnet_iq_is_full(oct, ndata.q_no)) { 2360 /* defer sending if queue is full */ 2361 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2362 ndata.q_no); 2363 stats->tx_iq_busy++; 2364 return NETDEV_TX_BUSY; 2365 } 2366 2367 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", 2368 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); 2369 */ 2370 2371 ndata.datasize = skb->len; 2372 2373 cmdsetup.u64 = 0; 2374 cmdsetup.s.iq_no = iq_no; 2375 2376 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2377 if (skb->encapsulation) { 2378 cmdsetup.s.tnl_csum = 1; 2379 stats->tx_vxlan++; 2380 } else { 2381 cmdsetup.s.transport_csum = 1; 2382 } 2383 } 2384 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2385 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2386 cmdsetup.s.timestamp = 1; 2387 } 2388 2389 if (skb_shinfo(skb)->nr_frags == 0) { 2390 cmdsetup.s.u.datasize = skb->len; 2391 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2392 2393 /* Offload checksum calculation for TCP/UDP packets */ 2394 dptr = dma_map_single(&oct->pci_dev->dev, 2395 skb->data, 2396 skb->len, 2397 DMA_TO_DEVICE); 2398 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 2399 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2400 __func__); 2401 stats->tx_dmamap_fail++; 2402 return NETDEV_TX_BUSY; 2403 } 2404 2405 if (OCTEON_CN23XX_PF(oct)) 2406 ndata.cmd.cmd3.dptr = dptr; 2407 else 2408 ndata.cmd.cmd2.dptr = dptr; 2409 finfo->dptr = dptr; 2410 ndata.reqtype = REQTYPE_NORESP_NET; 2411 2412 } else { 2413 int i, frags; 2414 skb_frag_t *frag; 2415 struct octnic_gather *g; 2416 2417 spin_lock(&lio->glist_lock[q_idx]); 2418 g = (struct octnic_gather *) 2419 lio_list_delete_head(&lio->glist[q_idx]); 2420 spin_unlock(&lio->glist_lock[q_idx]); 2421 2422 if (!g) { 2423 netif_info(lio, tx_err, lio->netdev, 2424 "Transmit scatter gather: glist null!\n"); 2425 goto lio_xmit_failed; 2426 } 2427 2428 cmdsetup.s.gather = 1; 2429 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2430 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2431 2432 memset(g->sg, 0, g->sg_size); 2433 2434 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 2435 skb->data, 2436 (skb->len - skb->data_len), 2437 DMA_TO_DEVICE); 2438 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 2439 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 2440 __func__); 2441 stats->tx_dmamap_fail++; 2442 return NETDEV_TX_BUSY; 2443 } 2444 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 2445 2446 frags = skb_shinfo(skb)->nr_frags; 2447 i = 1; 2448 while (frags--) { 2449 frag = &skb_shinfo(skb)->frags[i - 1]; 2450 2451 g->sg[(i >> 2)].ptr[(i & 3)] = 2452 skb_frag_dma_map(&oct->pci_dev->dev, 2453 frag, 0, skb_frag_size(frag), 2454 DMA_TO_DEVICE); 2455 2456 if (dma_mapping_error(&oct->pci_dev->dev, 2457 g->sg[i >> 2].ptr[i & 3])) { 2458 dma_unmap_single(&oct->pci_dev->dev, 2459 g->sg[0].ptr[0], 2460 skb->len - skb->data_len, 2461 DMA_TO_DEVICE); 2462 for (j = 1; j < i; j++) { 2463 frag = &skb_shinfo(skb)->frags[j - 1]; 2464 dma_unmap_page(&oct->pci_dev->dev, 2465 g->sg[j >> 2].ptr[j & 3], 2466 skb_frag_size(frag), 2467 DMA_TO_DEVICE); 2468 } 2469 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 2470 __func__); 2471 return NETDEV_TX_BUSY; 2472 } 2473 2474 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), 2475 (i & 3)); 2476 i++; 2477 } 2478 2479 dptr = g->sg_dma_ptr; 2480 2481 if (OCTEON_CN23XX_PF(oct)) 2482 ndata.cmd.cmd3.dptr = dptr; 2483 else 2484 ndata.cmd.cmd2.dptr = dptr; 2485 finfo->dptr = dptr; 2486 finfo->g = g; 2487 2488 ndata.reqtype = REQTYPE_NORESP_NET_SG; 2489 } 2490 2491 if (OCTEON_CN23XX_PF(oct)) { 2492 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 2493 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 2494 } else { 2495 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; 2496 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; 2497 } 2498 2499 if (skb_shinfo(skb)->gso_size) { 2500 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 2501 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 2502 stats->tx_gso++; 2503 } 2504 2505 /* HW insert VLAN tag */ 2506 if (skb_vlan_tag_present(skb)) { 2507 irh->priority = skb_vlan_tag_get(skb) >> 13; 2508 irh->vlan = skb_vlan_tag_get(skb) & 0xfff; 2509 } 2510 2511 xmit_more = netdev_xmit_more(); 2512 2513 if (unlikely(cmdsetup.s.timestamp)) 2514 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 2515 else 2516 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 2517 if (status == IQ_SEND_FAILED) 2518 goto lio_xmit_failed; 2519 2520 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2521 2522 if (status == IQ_SEND_STOP) 2523 netif_stop_subqueue(netdev, q_idx); 2524 2525 netif_trans_update(netdev); 2526 2527 if (tx_info->s.gso_segs) 2528 stats->tx_done += tx_info->s.gso_segs; 2529 else 2530 stats->tx_done++; 2531 stats->tx_tot_bytes += ndata.datasize; 2532 2533 return NETDEV_TX_OK; 2534 2535 lio_xmit_failed: 2536 stats->tx_dropped++; 2537 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2538 iq_no, stats->tx_dropped); 2539 if (dptr) 2540 dma_unmap_single(&oct->pci_dev->dev, dptr, 2541 ndata.datasize, DMA_TO_DEVICE); 2542 2543 octeon_ring_doorbell_locked(oct, iq_no); 2544 2545 tx_buffer_free(skb); 2546 return NETDEV_TX_OK; 2547 } 2548 2549 /** 2550 * liquidio_tx_timeout - Network device Tx timeout 2551 * @netdev: pointer to network device 2552 * @txqueue: index of the hung transmit queue 2553 */ 2554 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) 2555 { 2556 struct lio *lio; 2557 2558 lio = GET_LIO(netdev); 2559 2560 netif_info(lio, tx_err, lio->netdev, 2561 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 2562 netdev->stats.tx_dropped); 2563 netif_trans_update(netdev); 2564 wake_txqs(netdev); 2565 } 2566 2567 static int liquidio_vlan_rx_add_vid(struct net_device *netdev, 2568 __be16 proto __attribute__((unused)), 2569 u16 vid) 2570 { 2571 struct lio *lio = GET_LIO(netdev); 2572 struct octeon_device *oct = lio->oct_dev; 2573 struct octnic_ctrl_pkt nctrl; 2574 int ret = 0; 2575 2576 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2577 2578 nctrl.ncmd.u64 = 0; 2579 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2580 nctrl.ncmd.s.param1 = vid; 2581 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2582 nctrl.netpndev = (u64)netdev; 2583 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2584 2585 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2586 if (ret) { 2587 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2588 ret); 2589 if (ret > 0) 2590 ret = -EIO; 2591 } 2592 2593 return ret; 2594 } 2595 2596 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, 2597 __be16 proto __attribute__((unused)), 2598 u16 vid) 2599 { 2600 struct lio *lio = GET_LIO(netdev); 2601 struct octeon_device *oct = lio->oct_dev; 2602 struct octnic_ctrl_pkt nctrl; 2603 int ret = 0; 2604 2605 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2606 2607 nctrl.ncmd.u64 = 0; 2608 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2609 nctrl.ncmd.s.param1 = vid; 2610 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2611 nctrl.netpndev = (u64)netdev; 2612 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2613 2614 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2615 if (ret) { 2616 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", 2617 ret); 2618 if (ret > 0) 2619 ret = -EIO; 2620 } 2621 return ret; 2622 } 2623 2624 /** 2625 * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload 2626 * @netdev: pointer to network device 2627 * @command: OCTNET_CMD_TNL_RX_CSUM_CTL 2628 * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE 2629 * Returns: SUCCESS or FAILURE 2630 */ 2631 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 2632 u8 rx_cmd) 2633 { 2634 struct lio *lio = GET_LIO(netdev); 2635 struct octeon_device *oct = lio->oct_dev; 2636 struct octnic_ctrl_pkt nctrl; 2637 int ret = 0; 2638 2639 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2640 2641 nctrl.ncmd.u64 = 0; 2642 nctrl.ncmd.s.cmd = command; 2643 nctrl.ncmd.s.param1 = rx_cmd; 2644 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2645 nctrl.netpndev = (u64)netdev; 2646 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2647 2648 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2649 if (ret) { 2650 dev_err(&oct->pci_dev->dev, 2651 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", 2652 ret); 2653 if (ret > 0) 2654 ret = -EIO; 2655 } 2656 return ret; 2657 } 2658 2659 /** 2660 * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware 2661 * @netdev: pointer to network device 2662 * @command: OCTNET_CMD_VXLAN_PORT_CONFIG 2663 * @vxlan_port: VxLAN port to be added or deleted 2664 * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD, 2665 * OCTNET_CMD_VXLAN_PORT_DEL 2666 * Return: SUCCESS or FAILURE 2667 */ 2668 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 2669 u16 vxlan_port, u8 vxlan_cmd_bit) 2670 { 2671 struct lio *lio = GET_LIO(netdev); 2672 struct octeon_device *oct = lio->oct_dev; 2673 struct octnic_ctrl_pkt nctrl; 2674 int ret = 0; 2675 2676 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2677 2678 nctrl.ncmd.u64 = 0; 2679 nctrl.ncmd.s.cmd = command; 2680 nctrl.ncmd.s.more = vxlan_cmd_bit; 2681 nctrl.ncmd.s.param1 = vxlan_port; 2682 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2683 nctrl.netpndev = (u64)netdev; 2684 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2685 2686 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2687 if (ret) { 2688 dev_err(&oct->pci_dev->dev, 2689 "VxLAN port add/delete failed in core (ret:0x%x)\n", 2690 ret); 2691 if (ret > 0) 2692 ret = -EIO; 2693 } 2694 return ret; 2695 } 2696 2697 static int liquidio_udp_tunnel_set_port(struct net_device *netdev, 2698 unsigned int table, unsigned int entry, 2699 struct udp_tunnel_info *ti) 2700 { 2701 return liquidio_vxlan_port_command(netdev, 2702 OCTNET_CMD_VXLAN_PORT_CONFIG, 2703 htons(ti->port), 2704 OCTNET_CMD_VXLAN_PORT_ADD); 2705 } 2706 2707 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, 2708 unsigned int table, 2709 unsigned int entry, 2710 struct udp_tunnel_info *ti) 2711 { 2712 return liquidio_vxlan_port_command(netdev, 2713 OCTNET_CMD_VXLAN_PORT_CONFIG, 2714 htons(ti->port), 2715 OCTNET_CMD_VXLAN_PORT_DEL); 2716 } 2717 2718 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { 2719 .set_port = liquidio_udp_tunnel_set_port, 2720 .unset_port = liquidio_udp_tunnel_unset_port, 2721 .tables = { 2722 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 2723 }, 2724 }; 2725 2726 /** 2727 * liquidio_fix_features - Net device fix features 2728 * @netdev: pointer to network device 2729 * @request: features requested 2730 * Return: updated features list 2731 */ 2732 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 2733 netdev_features_t request) 2734 { 2735 struct lio *lio = netdev_priv(netdev); 2736 2737 if ((request & NETIF_F_RXCSUM) && 2738 !(lio->dev_capability & NETIF_F_RXCSUM)) 2739 request &= ~NETIF_F_RXCSUM; 2740 2741 if ((request & NETIF_F_HW_CSUM) && 2742 !(lio->dev_capability & NETIF_F_HW_CSUM)) 2743 request &= ~NETIF_F_HW_CSUM; 2744 2745 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 2746 request &= ~NETIF_F_TSO; 2747 2748 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 2749 request &= ~NETIF_F_TSO6; 2750 2751 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 2752 request &= ~NETIF_F_LRO; 2753 2754 /*Disable LRO if RXCSUM is off */ 2755 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 2756 (lio->dev_capability & NETIF_F_LRO)) 2757 request &= ~NETIF_F_LRO; 2758 2759 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) && 2760 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER)) 2761 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 2762 2763 return request; 2764 } 2765 2766 /** 2767 * liquidio_set_features - Net device set features 2768 * @netdev: pointer to network device 2769 * @features: features to enable/disable 2770 */ 2771 static int liquidio_set_features(struct net_device *netdev, 2772 netdev_features_t features) 2773 { 2774 struct lio *lio = netdev_priv(netdev); 2775 2776 if ((features & NETIF_F_LRO) && 2777 (lio->dev_capability & NETIF_F_LRO) && 2778 !(netdev->features & NETIF_F_LRO)) 2779 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2780 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2781 else if (!(features & NETIF_F_LRO) && 2782 (lio->dev_capability & NETIF_F_LRO) && 2783 (netdev->features & NETIF_F_LRO)) 2784 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 2785 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2786 2787 /* Sending command to firmware to enable/disable RX checksum 2788 * offload settings using ethtool 2789 */ 2790 if (!(netdev->features & NETIF_F_RXCSUM) && 2791 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2792 (features & NETIF_F_RXCSUM)) 2793 liquidio_set_rxcsum_command(netdev, 2794 OCTNET_CMD_TNL_RX_CSUM_CTL, 2795 OCTNET_CMD_RXCSUM_ENABLE); 2796 else if ((netdev->features & NETIF_F_RXCSUM) && 2797 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2798 !(features & NETIF_F_RXCSUM)) 2799 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2800 OCTNET_CMD_RXCSUM_DISABLE); 2801 2802 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && 2803 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 2804 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 2805 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 2806 OCTNET_CMD_VLAN_FILTER_ENABLE); 2807 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && 2808 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 2809 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 2810 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 2811 OCTNET_CMD_VLAN_FILTER_DISABLE); 2812 2813 return 0; 2814 } 2815 2816 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, 2817 u8 *mac, bool is_admin_assigned) 2818 { 2819 struct lio *lio = GET_LIO(netdev); 2820 struct octeon_device *oct = lio->oct_dev; 2821 struct octnic_ctrl_pkt nctrl; 2822 int ret = 0; 2823 2824 if (!is_valid_ether_addr(mac)) 2825 return -EINVAL; 2826 2827 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs) 2828 return -EINVAL; 2829 2830 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2831 2832 nctrl.ncmd.u64 = 0; 2833 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2834 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 2835 nctrl.ncmd.s.param1 = vfidx + 1; 2836 nctrl.ncmd.s.more = 1; 2837 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2838 nctrl.netpndev = (u64)netdev; 2839 if (is_admin_assigned) { 2840 nctrl.ncmd.s.param2 = true; 2841 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2842 } 2843 2844 nctrl.udd[0] = 0; 2845 /* The MAC Address is presented in network byte order. */ 2846 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac); 2847 2848 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; 2849 2850 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2851 if (ret > 0) 2852 ret = -EIO; 2853 2854 return ret; 2855 } 2856 2857 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) 2858 { 2859 struct lio *lio = GET_LIO(netdev); 2860 struct octeon_device *oct = lio->oct_dev; 2861 int retval; 2862 2863 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2864 return -EINVAL; 2865 2866 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true); 2867 if (!retval) 2868 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac); 2869 2870 return retval; 2871 } 2872 2873 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, 2874 bool enable) 2875 { 2876 struct lio *lio = GET_LIO(netdev); 2877 struct octeon_device *oct = lio->oct_dev; 2878 struct octnic_ctrl_pkt nctrl; 2879 int retval; 2880 2881 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) { 2882 netif_info(lio, drv, lio->netdev, 2883 "firmware does not support spoofchk\n"); 2884 return -EOPNOTSUPP; 2885 } 2886 2887 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { 2888 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); 2889 return -EINVAL; 2890 } 2891 2892 if (enable) { 2893 if (oct->sriov_info.vf_spoofchk[vfidx]) 2894 return 0; 2895 } else { 2896 /* Clear */ 2897 if (!oct->sriov_info.vf_spoofchk[vfidx]) 2898 return 0; 2899 } 2900 2901 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2902 nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1; 2903 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK; 2904 nctrl.ncmd.s.param1 = 2905 vfidx + 1; /* vfidx is 0 based, 2906 * but vf_num (param1) is 1 based 2907 */ 2908 nctrl.ncmd.s.param2 = enable; 2909 nctrl.ncmd.s.more = 0; 2910 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2911 nctrl.cb_fn = NULL; 2912 2913 retval = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2914 2915 if (retval) { 2916 netif_info(lio, drv, lio->netdev, 2917 "Failed to set VF %d spoofchk %s\n", vfidx, 2918 enable ? "on" : "off"); 2919 return -1; 2920 } 2921 2922 oct->sriov_info.vf_spoofchk[vfidx] = enable; 2923 netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx, 2924 enable ? "on" : "off"); 2925 2926 return 0; 2927 } 2928 2929 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, 2930 u16 vlan, u8 qos, __be16 vlan_proto) 2931 { 2932 struct lio *lio = GET_LIO(netdev); 2933 struct octeon_device *oct = lio->oct_dev; 2934 struct octnic_ctrl_pkt nctrl; 2935 u16 vlantci; 2936 int ret = 0; 2937 2938 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2939 return -EINVAL; 2940 2941 if (vlan_proto != htons(ETH_P_8021Q)) 2942 return -EPROTONOSUPPORT; 2943 2944 if (vlan >= VLAN_N_VID || qos > 7) 2945 return -EINVAL; 2946 2947 if (vlan) 2948 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT; 2949 else 2950 vlantci = 0; 2951 2952 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci) 2953 return 0; 2954 2955 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2956 2957 if (vlan) 2958 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2959 else 2960 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2961 2962 nctrl.ncmd.s.param1 = vlantci; 2963 nctrl.ncmd.s.param2 = 2964 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ 2965 nctrl.ncmd.s.more = 0; 2966 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2967 nctrl.cb_fn = NULL; 2968 2969 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2970 if (ret) { 2971 if (ret > 0) 2972 ret = -EIO; 2973 return ret; 2974 } 2975 2976 oct->sriov_info.vf_vlantci[vfidx] = vlantci; 2977 2978 return ret; 2979 } 2980 2981 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, 2982 struct ifla_vf_info *ivi) 2983 { 2984 struct lio *lio = GET_LIO(netdev); 2985 struct octeon_device *oct = lio->oct_dev; 2986 u8 *macaddr; 2987 2988 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2989 return -EINVAL; 2990 2991 memset(ivi, 0, sizeof(struct ifla_vf_info)); 2992 2993 ivi->vf = vfidx; 2994 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; 2995 ether_addr_copy(&ivi->mac[0], macaddr); 2996 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK; 2997 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT; 2998 if (oct->sriov_info.trusted_vf.active && 2999 oct->sriov_info.trusted_vf.id == vfidx) 3000 ivi->trusted = true; 3001 else 3002 ivi->trusted = false; 3003 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; 3004 ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx]; 3005 ivi->max_tx_rate = lio->linfo.link.s.speed; 3006 ivi->min_tx_rate = 0; 3007 3008 return 0; 3009 } 3010 3011 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) 3012 { 3013 struct octeon_device *oct = lio->oct_dev; 3014 struct octeon_soft_command *sc; 3015 int retval; 3016 3017 sc = octeon_alloc_soft_command(oct, 0, 16, 0); 3018 if (!sc) 3019 return -ENOMEM; 3020 3021 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 3022 3023 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3024 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 3025 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1, 3026 trusted); 3027 3028 init_completion(&sc->complete); 3029 sc->sc_status = OCTEON_REQUEST_PENDING; 3030 3031 retval = octeon_send_soft_command(oct, sc); 3032 if (retval == IQ_SEND_FAILED) { 3033 octeon_free_soft_command(oct, sc); 3034 retval = -1; 3035 } else { 3036 /* Wait for response or timeout */ 3037 retval = wait_for_sc_completion_timeout(oct, sc, 0); 3038 if (retval) 3039 return (retval); 3040 3041 WRITE_ONCE(sc->caller_is_done, true); 3042 } 3043 3044 return retval; 3045 } 3046 3047 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx, 3048 bool setting) 3049 { 3050 struct lio *lio = GET_LIO(netdev); 3051 struct octeon_device *oct = lio->oct_dev; 3052 3053 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) { 3054 /* trusted vf is not supported by firmware older than 1.7.1 */ 3055 return -EOPNOTSUPP; 3056 } 3057 3058 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { 3059 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); 3060 return -EINVAL; 3061 } 3062 3063 if (setting) { 3064 /* Set */ 3065 3066 if (oct->sriov_info.trusted_vf.active && 3067 oct->sriov_info.trusted_vf.id == vfidx) 3068 return 0; 3069 3070 if (oct->sriov_info.trusted_vf.active) { 3071 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n"); 3072 return -EPERM; 3073 } 3074 } else { 3075 /* Clear */ 3076 3077 if (!oct->sriov_info.trusted_vf.active) 3078 return 0; 3079 } 3080 3081 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) { 3082 if (setting) { 3083 oct->sriov_info.trusted_vf.id = vfidx; 3084 oct->sriov_info.trusted_vf.active = true; 3085 } else { 3086 oct->sriov_info.trusted_vf.active = false; 3087 } 3088 3089 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx, 3090 setting ? "" : "not "); 3091 } else { 3092 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n"); 3093 return -1; 3094 } 3095 3096 return 0; 3097 } 3098 3099 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 3100 int linkstate) 3101 { 3102 struct lio *lio = GET_LIO(netdev); 3103 struct octeon_device *oct = lio->oct_dev; 3104 struct octnic_ctrl_pkt nctrl; 3105 int ret = 0; 3106 3107 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3108 return -EINVAL; 3109 3110 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate) 3111 return 0; 3112 3113 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3114 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE; 3115 nctrl.ncmd.s.param1 = 3116 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3117 nctrl.ncmd.s.param2 = linkstate; 3118 nctrl.ncmd.s.more = 0; 3119 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3120 nctrl.cb_fn = NULL; 3121 3122 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 3123 3124 if (!ret) 3125 oct->sriov_info.vf_linkstate[vfidx] = linkstate; 3126 else if (ret > 0) 3127 ret = -EIO; 3128 3129 return ret; 3130 } 3131 3132 static int 3133 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode) 3134 { 3135 struct lio_devlink_priv *priv; 3136 struct octeon_device *oct; 3137 3138 priv = devlink_priv(devlink); 3139 oct = priv->oct; 3140 3141 *mode = oct->eswitch_mode; 3142 3143 return 0; 3144 } 3145 3146 static int 3147 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode, 3148 struct netlink_ext_ack *extack) 3149 { 3150 struct lio_devlink_priv *priv; 3151 struct octeon_device *oct; 3152 int ret = 0; 3153 3154 priv = devlink_priv(devlink); 3155 oct = priv->oct; 3156 3157 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)) 3158 return -EINVAL; 3159 3160 if (oct->eswitch_mode == mode) 3161 return 0; 3162 3163 switch (mode) { 3164 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 3165 oct->eswitch_mode = mode; 3166 ret = lio_vf_rep_create(oct); 3167 break; 3168 3169 case DEVLINK_ESWITCH_MODE_LEGACY: 3170 lio_vf_rep_destroy(oct); 3171 oct->eswitch_mode = mode; 3172 break; 3173 3174 default: 3175 ret = -EINVAL; 3176 } 3177 3178 return ret; 3179 } 3180 3181 static const struct devlink_ops liquidio_devlink_ops = { 3182 .eswitch_mode_get = liquidio_eswitch_mode_get, 3183 .eswitch_mode_set = liquidio_eswitch_mode_set, 3184 }; 3185 3186 static int 3187 liquidio_get_port_parent_id(struct net_device *dev, 3188 struct netdev_phys_item_id *ppid) 3189 { 3190 struct lio *lio = GET_LIO(dev); 3191 struct octeon_device *oct = lio->oct_dev; 3192 3193 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 3194 return -EOPNOTSUPP; 3195 3196 ppid->id_len = ETH_ALEN; 3197 ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2); 3198 3199 return 0; 3200 } 3201 3202 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx, 3203 struct ifla_vf_stats *vf_stats) 3204 { 3205 struct lio *lio = GET_LIO(netdev); 3206 struct octeon_device *oct = lio->oct_dev; 3207 struct oct_vf_stats stats; 3208 int ret; 3209 3210 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3211 return -EINVAL; 3212 3213 memset(&stats, 0, sizeof(struct oct_vf_stats)); 3214 ret = cn23xx_get_vf_stats(oct, vfidx, &stats); 3215 if (!ret) { 3216 vf_stats->rx_packets = stats.rx_packets; 3217 vf_stats->tx_packets = stats.tx_packets; 3218 vf_stats->rx_bytes = stats.rx_bytes; 3219 vf_stats->tx_bytes = stats.tx_bytes; 3220 vf_stats->broadcast = stats.broadcast; 3221 vf_stats->multicast = stats.multicast; 3222 } 3223 3224 return ret; 3225 } 3226 3227 static const struct net_device_ops lionetdevops = { 3228 .ndo_open = liquidio_open, 3229 .ndo_stop = liquidio_stop, 3230 .ndo_start_xmit = liquidio_xmit, 3231 .ndo_get_stats64 = liquidio_get_stats64, 3232 .ndo_set_mac_address = liquidio_set_mac, 3233 .ndo_set_rx_mode = liquidio_set_mcast_list, 3234 .ndo_tx_timeout = liquidio_tx_timeout, 3235 3236 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 3237 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 3238 .ndo_change_mtu = liquidio_change_mtu, 3239 .ndo_eth_ioctl = liquidio_ioctl, 3240 .ndo_fix_features = liquidio_fix_features, 3241 .ndo_set_features = liquidio_set_features, 3242 .ndo_set_vf_mac = liquidio_set_vf_mac, 3243 .ndo_set_vf_vlan = liquidio_set_vf_vlan, 3244 .ndo_get_vf_config = liquidio_get_vf_config, 3245 .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk, 3246 .ndo_set_vf_trust = liquidio_set_vf_trust, 3247 .ndo_set_vf_link_state = liquidio_set_vf_link_state, 3248 .ndo_get_vf_stats = liquidio_get_vf_stats, 3249 .ndo_get_port_parent_id = liquidio_get_port_parent_id, 3250 }; 3251 3252 /** 3253 * liquidio_init - Entry point for the liquidio module 3254 */ 3255 static int __init liquidio_init(void) 3256 { 3257 int i; 3258 struct handshake *hs; 3259 3260 init_completion(&first_stage); 3261 3262 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT); 3263 3264 if (liquidio_init_pci()) 3265 return -EINVAL; 3266 3267 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000)); 3268 3269 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3270 hs = &handshake[i]; 3271 if (hs->pci_dev) { 3272 wait_for_completion(&hs->init); 3273 if (!hs->init_ok) { 3274 /* init handshake failed */ 3275 dev_err(&hs->pci_dev->dev, 3276 "Failed to init device\n"); 3277 liquidio_deinit_pci(); 3278 return -EIO; 3279 } 3280 } 3281 } 3282 3283 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3284 hs = &handshake[i]; 3285 if (hs->pci_dev) { 3286 wait_for_completion_timeout(&hs->started, 3287 msecs_to_jiffies(30000)); 3288 if (!hs->started_ok) { 3289 /* starter handshake failed */ 3290 dev_err(&hs->pci_dev->dev, 3291 "Firmware failed to start\n"); 3292 liquidio_deinit_pci(); 3293 return -EIO; 3294 } 3295 } 3296 } 3297 3298 return 0; 3299 } 3300 3301 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 3302 { 3303 struct octeon_device *oct = (struct octeon_device *)buf; 3304 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3305 int gmxport = 0; 3306 union oct_link_status *ls; 3307 int i; 3308 3309 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 3310 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 3311 recv_pkt->buffer_size[0], 3312 recv_pkt->rh.r_nic_info.gmxport); 3313 goto nic_info_err; 3314 } 3315 3316 gmxport = recv_pkt->rh.r_nic_info.gmxport; 3317 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 3318 OCT_DROQ_INFO_SIZE); 3319 3320 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 3321 for (i = 0; i < oct->ifcount; i++) { 3322 if (oct->props[i].gmxport == gmxport) { 3323 update_link_status(oct->props[i].netdev, ls); 3324 break; 3325 } 3326 } 3327 3328 nic_info_err: 3329 for (i = 0; i < recv_pkt->buffer_count; i++) 3330 recv_buffer_free(recv_pkt->buffer_ptr[i]); 3331 octeon_free_recv_info(recv_info); 3332 return 0; 3333 } 3334 3335 /** 3336 * setup_nic_devices - Setup network interfaces 3337 * @octeon_dev: octeon device 3338 * 3339 * Called during init time for each device. It assumes the NIC 3340 * is already up and running. The link information for each 3341 * interface is passed in link_info. 3342 */ 3343 static int setup_nic_devices(struct octeon_device *octeon_dev) 3344 { 3345 struct lio *lio = NULL; 3346 struct net_device *netdev; 3347 u8 mac[6], i, j, *fw_ver, *micro_ver; 3348 unsigned long micro; 3349 u32 cur_ver; 3350 struct octeon_soft_command *sc; 3351 struct liquidio_if_cfg_resp *resp; 3352 struct octdev_props *props; 3353 int retval, num_iqueues, num_oqueues; 3354 int max_num_queues = 0; 3355 union oct_nic_if_cfg if_cfg; 3356 unsigned int base_queue; 3357 unsigned int gmx_port_id; 3358 u32 resp_size, data_size; 3359 u32 ifidx_or_pfnum; 3360 struct lio_version *vdata; 3361 struct devlink *devlink; 3362 struct lio_devlink_priv *lio_devlink; 3363 3364 /* This is to handle link status changes */ 3365 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3366 OPCODE_NIC_INFO, 3367 lio_nic_info, octeon_dev); 3368 3369 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 3370 * They are handled directly. 3371 */ 3372 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 3373 free_netbuf); 3374 3375 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 3376 free_netsgbuf); 3377 3378 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 3379 free_netsgbuf_with_resp); 3380 3381 for (i = 0; i < octeon_dev->ifcount; i++) { 3382 resp_size = sizeof(struct liquidio_if_cfg_resp); 3383 data_size = sizeof(struct lio_version); 3384 sc = (struct octeon_soft_command *) 3385 octeon_alloc_soft_command(octeon_dev, data_size, 3386 resp_size, 0); 3387 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 3388 vdata = (struct lio_version *)sc->virtdptr; 3389 3390 *((u64 *)vdata) = 0; 3391 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 3392 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 3393 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 3394 3395 if (OCTEON_CN23XX_PF(octeon_dev)) { 3396 num_iqueues = octeon_dev->sriov_info.num_pf_rings; 3397 num_oqueues = octeon_dev->sriov_info.num_pf_rings; 3398 base_queue = octeon_dev->sriov_info.pf_srn; 3399 3400 gmx_port_id = octeon_dev->pf_num; 3401 ifidx_or_pfnum = octeon_dev->pf_num; 3402 } else { 3403 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF( 3404 octeon_get_conf(octeon_dev), i); 3405 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF( 3406 octeon_get_conf(octeon_dev), i); 3407 base_queue = CFG_GET_BASE_QUE_NIC_IF( 3408 octeon_get_conf(octeon_dev), i); 3409 gmx_port_id = CFG_GET_GMXID_NIC_IF( 3410 octeon_get_conf(octeon_dev), i); 3411 ifidx_or_pfnum = i; 3412 } 3413 3414 dev_dbg(&octeon_dev->pci_dev->dev, 3415 "requesting config for interface %d, iqs %d, oqs %d\n", 3416 ifidx_or_pfnum, num_iqueues, num_oqueues); 3417 3418 if_cfg.u64 = 0; 3419 if_cfg.s.num_iqueues = num_iqueues; 3420 if_cfg.s.num_oqueues = num_oqueues; 3421 if_cfg.s.base_queue = base_queue; 3422 if_cfg.s.gmx_port_id = gmx_port_id; 3423 3424 sc->iq_no = 0; 3425 3426 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 3427 OPCODE_NIC_IF_CFG, 0, 3428 if_cfg.u64, 0); 3429 3430 init_completion(&sc->complete); 3431 sc->sc_status = OCTEON_REQUEST_PENDING; 3432 3433 retval = octeon_send_soft_command(octeon_dev, sc); 3434 if (retval == IQ_SEND_FAILED) { 3435 dev_err(&octeon_dev->pci_dev->dev, 3436 "iq/oq config failed status: %x\n", 3437 retval); 3438 /* Soft instr is freed by driver in case of failure. */ 3439 octeon_free_soft_command(octeon_dev, sc); 3440 return(-EIO); 3441 } 3442 3443 /* Sleep on a wait queue till the cond flag indicates that the 3444 * response arrived or timed-out. 3445 */ 3446 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); 3447 if (retval) 3448 return retval; 3449 3450 retval = resp->status; 3451 if (retval) { 3452 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 3453 WRITE_ONCE(sc->caller_is_done, true); 3454 goto setup_nic_dev_done; 3455 } 3456 snprintf(octeon_dev->fw_info.liquidio_firmware_version, 3457 32, "%s", 3458 resp->cfg_info.liquidio_firmware_version); 3459 3460 /* Verify f/w version (in case of 'auto' loading from flash) */ 3461 fw_ver = octeon_dev->fw_info.liquidio_firmware_version; 3462 if (memcmp(LIQUIDIO_BASE_VERSION, 3463 fw_ver, 3464 strlen(LIQUIDIO_BASE_VERSION))) { 3465 dev_err(&octeon_dev->pci_dev->dev, 3466 "Unmatched firmware version. Expected %s.x, got %s.\n", 3467 LIQUIDIO_BASE_VERSION, fw_ver); 3468 WRITE_ONCE(sc->caller_is_done, true); 3469 goto setup_nic_dev_done; 3470 } else if (atomic_read(octeon_dev->adapter_fw_state) == 3471 FW_IS_PRELOADED) { 3472 dev_info(&octeon_dev->pci_dev->dev, 3473 "Using auto-loaded firmware version %s.\n", 3474 fw_ver); 3475 } 3476 3477 /* extract micro version field; point past '<maj>.<min>.' */ 3478 micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1; 3479 if (kstrtoul(micro_ver, 10, µ) != 0) 3480 micro = 0; 3481 octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION; 3482 octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION; 3483 octeon_dev->fw_info.ver.rev = micro; 3484 3485 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 3486 (sizeof(struct liquidio_if_cfg_info)) >> 3); 3487 3488 num_iqueues = hweight64(resp->cfg_info.iqmask); 3489 num_oqueues = hweight64(resp->cfg_info.oqmask); 3490 3491 if (!(num_iqueues) || !(num_oqueues)) { 3492 dev_err(&octeon_dev->pci_dev->dev, 3493 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 3494 resp->cfg_info.iqmask, 3495 resp->cfg_info.oqmask); 3496 WRITE_ONCE(sc->caller_is_done, true); 3497 goto setup_nic_dev_done; 3498 } 3499 3500 if (OCTEON_CN6XXX(octeon_dev)) { 3501 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, 3502 cn6xxx)); 3503 } else if (OCTEON_CN23XX_PF(octeon_dev)) { 3504 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, 3505 cn23xx_pf)); 3506 } 3507 3508 dev_dbg(&octeon_dev->pci_dev->dev, 3509 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n", 3510 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 3511 num_iqueues, num_oqueues, max_num_queues); 3512 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues); 3513 3514 if (!netdev) { 3515 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 3516 WRITE_ONCE(sc->caller_is_done, true); 3517 goto setup_nic_dev_done; 3518 } 3519 3520 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 3521 3522 /* Associate the routines that will handle different 3523 * netdev tasks. 3524 */ 3525 netdev->netdev_ops = &lionetdevops; 3526 3527 retval = netif_set_real_num_rx_queues(netdev, num_oqueues); 3528 if (retval) { 3529 dev_err(&octeon_dev->pci_dev->dev, 3530 "setting real number rx failed\n"); 3531 WRITE_ONCE(sc->caller_is_done, true); 3532 goto setup_nic_dev_free; 3533 } 3534 3535 retval = netif_set_real_num_tx_queues(netdev, num_iqueues); 3536 if (retval) { 3537 dev_err(&octeon_dev->pci_dev->dev, 3538 "setting real number tx failed\n"); 3539 WRITE_ONCE(sc->caller_is_done, true); 3540 goto setup_nic_dev_free; 3541 } 3542 3543 lio = GET_LIO(netdev); 3544 3545 memset(lio, 0, sizeof(struct lio)); 3546 3547 lio->ifidx = ifidx_or_pfnum; 3548 3549 props = &octeon_dev->props[i]; 3550 props->gmxport = resp->cfg_info.linfo.gmxport; 3551 props->netdev = netdev; 3552 3553 lio->linfo.num_rxpciq = num_oqueues; 3554 lio->linfo.num_txpciq = num_iqueues; 3555 for (j = 0; j < num_oqueues; j++) { 3556 lio->linfo.rxpciq[j].u64 = 3557 resp->cfg_info.linfo.rxpciq[j].u64; 3558 } 3559 for (j = 0; j < num_iqueues; j++) { 3560 lio->linfo.txpciq[j].u64 = 3561 resp->cfg_info.linfo.txpciq[j].u64; 3562 } 3563 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 3564 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 3565 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 3566 3567 WRITE_ONCE(sc->caller_is_done, true); 3568 3569 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3570 3571 if (OCTEON_CN23XX_PF(octeon_dev) || 3572 OCTEON_CN6XXX(octeon_dev)) { 3573 lio->dev_capability = NETIF_F_HIGHDMA 3574 | NETIF_F_IP_CSUM 3575 | NETIF_F_IPV6_CSUM 3576 | NETIF_F_SG | NETIF_F_RXCSUM 3577 | NETIF_F_GRO 3578 | NETIF_F_TSO | NETIF_F_TSO6 3579 | NETIF_F_LRO; 3580 } 3581 netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 3582 3583 /* Copy of transmit encapsulation capabilities: 3584 * TSO, TSO6, Checksums for this device 3585 */ 3586 lio->enc_dev_capability = NETIF_F_IP_CSUM 3587 | NETIF_F_IPV6_CSUM 3588 | NETIF_F_GSO_UDP_TUNNEL 3589 | NETIF_F_HW_CSUM | NETIF_F_SG 3590 | NETIF_F_RXCSUM 3591 | NETIF_F_TSO | NETIF_F_TSO6 3592 | NETIF_F_LRO; 3593 3594 netdev->hw_enc_features = (lio->enc_dev_capability & 3595 ~NETIF_F_LRO); 3596 3597 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; 3598 3599 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; 3600 3601 netdev->vlan_features = lio->dev_capability; 3602 /* Add any unchangeable hw features */ 3603 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 3604 NETIF_F_HW_VLAN_CTAG_RX | 3605 NETIF_F_HW_VLAN_CTAG_TX; 3606 3607 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 3608 3609 netdev->hw_features = lio->dev_capability; 3610 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ 3611 netdev->hw_features = netdev->hw_features & 3612 ~NETIF_F_HW_VLAN_CTAG_RX; 3613 3614 /* MTU range: 68 - 16000 */ 3615 netdev->min_mtu = LIO_MIN_MTU_SIZE; 3616 netdev->max_mtu = LIO_MAX_MTU_SIZE; 3617 3618 /* Point to the properties for octeon device to which this 3619 * interface belongs. 3620 */ 3621 lio->oct_dev = octeon_dev; 3622 lio->octprops = props; 3623 lio->netdev = netdev; 3624 3625 dev_dbg(&octeon_dev->pci_dev->dev, 3626 "if%d gmx: %d hw_addr: 0x%llx\n", i, 3627 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 3628 3629 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { 3630 u8 vfmac[ETH_ALEN]; 3631 3632 eth_random_addr(vfmac); 3633 if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) { 3634 dev_err(&octeon_dev->pci_dev->dev, 3635 "Error setting VF%d MAC address\n", 3636 j); 3637 goto setup_nic_dev_free; 3638 } 3639 } 3640 3641 /* 64-bit swap required on LE machines */ 3642 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 3643 for (j = 0; j < 6; j++) 3644 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 3645 3646 /* Copy MAC Address to OS network device structure */ 3647 3648 eth_hw_addr_set(netdev, mac); 3649 3650 /* By default all interfaces on a single Octeon uses the same 3651 * tx and rx queues 3652 */ 3653 lio->txq = lio->linfo.txpciq[0].s.q_no; 3654 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 3655 if (liquidio_setup_io_queues(octeon_dev, i, 3656 lio->linfo.num_txpciq, 3657 lio->linfo.num_rxpciq)) { 3658 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 3659 goto setup_nic_dev_free; 3660 } 3661 3662 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 3663 3664 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 3665 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 3666 3667 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 3668 dev_err(&octeon_dev->pci_dev->dev, 3669 "Gather list allocation failed\n"); 3670 goto setup_nic_dev_free; 3671 } 3672 3673 /* Register ethtool support */ 3674 liquidio_set_ethtool_ops(netdev); 3675 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID) 3676 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 3677 else 3678 octeon_dev->priv_flags = 0x0; 3679 3680 if (netdev->features & NETIF_F_LRO) 3681 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 3682 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3683 3684 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 3685 OCTNET_CMD_VLAN_FILTER_ENABLE); 3686 3687 if ((debug != -1) && (debug & NETIF_MSG_HW)) 3688 liquidio_set_feature(netdev, 3689 OCTNET_CMD_VERBOSE_ENABLE, 0); 3690 3691 if (setup_link_status_change_wq(netdev)) 3692 goto setup_nic_dev_free; 3693 3694 if ((octeon_dev->fw_info.app_cap_flags & 3695 LIQUIDIO_TIME_SYNC_CAP) && 3696 setup_sync_octeon_time_wq(netdev)) 3697 goto setup_nic_dev_free; 3698 3699 if (setup_rx_oom_poll_fn(netdev)) 3700 goto setup_nic_dev_free; 3701 3702 /* Register the network device with the OS */ 3703 if (register_netdev(netdev)) { 3704 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 3705 goto setup_nic_dev_free; 3706 } 3707 3708 dev_dbg(&octeon_dev->pci_dev->dev, 3709 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3710 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3711 netif_carrier_off(netdev); 3712 lio->link_changes++; 3713 3714 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3715 3716 /* Sending command to firmware to enable Rx checksum offload 3717 * by default at the time of setup of Liquidio driver for 3718 * this device 3719 */ 3720 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 3721 OCTNET_CMD_RXCSUM_ENABLE); 3722 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 3723 OCTNET_CMD_TXCSUM_ENABLE); 3724 3725 dev_dbg(&octeon_dev->pci_dev->dev, 3726 "NIC ifidx:%d Setup successful\n", i); 3727 3728 if (octeon_dev->subsystem_id == 3729 OCTEON_CN2350_25GB_SUBSYS_ID || 3730 octeon_dev->subsystem_id == 3731 OCTEON_CN2360_25GB_SUBSYS_ID) { 3732 cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj, 3733 octeon_dev->fw_info.ver.min, 3734 octeon_dev->fw_info.ver.rev); 3735 3736 /* speed control unsupported in f/w older than 1.7.2 */ 3737 if (cur_ver < OCT_FW_VER(1, 7, 2)) { 3738 dev_info(&octeon_dev->pci_dev->dev, 3739 "speed setting not supported by f/w."); 3740 octeon_dev->speed_setting = 25; 3741 octeon_dev->no_speed_setting = 1; 3742 } else { 3743 liquidio_get_speed(lio); 3744 } 3745 3746 if (octeon_dev->speed_setting == 0) { 3747 octeon_dev->speed_setting = 25; 3748 octeon_dev->no_speed_setting = 1; 3749 } 3750 } else { 3751 octeon_dev->no_speed_setting = 1; 3752 octeon_dev->speed_setting = 10; 3753 } 3754 octeon_dev->speed_boot = octeon_dev->speed_setting; 3755 3756 /* don't read FEC setting if unsupported by f/w (see above) */ 3757 if (octeon_dev->speed_boot == 25 && 3758 !octeon_dev->no_speed_setting) { 3759 liquidio_get_fec(lio); 3760 octeon_dev->props[lio->ifidx].fec_boot = 3761 octeon_dev->props[lio->ifidx].fec; 3762 } 3763 } 3764 3765 device_lock(&octeon_dev->pci_dev->dev); 3766 devlink = devlink_alloc(&liquidio_devlink_ops, 3767 sizeof(struct lio_devlink_priv), 3768 &octeon_dev->pci_dev->dev); 3769 if (!devlink) { 3770 device_unlock(&octeon_dev->pci_dev->dev); 3771 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); 3772 goto setup_nic_dev_free; 3773 } 3774 3775 lio_devlink = devlink_priv(devlink); 3776 lio_devlink->oct = octeon_dev; 3777 3778 octeon_dev->devlink = devlink; 3779 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; 3780 devlink_register(devlink); 3781 device_unlock(&octeon_dev->pci_dev->dev); 3782 3783 return 0; 3784 3785 setup_nic_dev_free: 3786 3787 while (i--) { 3788 dev_err(&octeon_dev->pci_dev->dev, 3789 "NIC ifidx:%d Setup failed\n", i); 3790 liquidio_destroy_nic_device(octeon_dev, i); 3791 } 3792 3793 setup_nic_dev_done: 3794 3795 return -ENODEV; 3796 } 3797 3798 #ifdef CONFIG_PCI_IOV 3799 static int octeon_enable_sriov(struct octeon_device *oct) 3800 { 3801 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced; 3802 struct pci_dev *vfdev; 3803 int err; 3804 u32 u; 3805 3806 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) { 3807 err = pci_enable_sriov(oct->pci_dev, 3808 oct->sriov_info.num_vfs_alloced); 3809 if (err) { 3810 dev_err(&oct->pci_dev->dev, 3811 "OCTEON: Failed to enable PCI sriov: %d\n", 3812 err); 3813 oct->sriov_info.num_vfs_alloced = 0; 3814 return err; 3815 } 3816 oct->sriov_info.sriov_enabled = 1; 3817 3818 /* init lookup table that maps DPI ring number to VF pci_dev 3819 * struct pointer 3820 */ 3821 u = 0; 3822 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3823 OCTEON_CN23XX_VF_VID, NULL); 3824 while (vfdev) { 3825 if (vfdev->is_virtfn && 3826 (vfdev->physfn == oct->pci_dev)) { 3827 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = 3828 vfdev; 3829 u += oct->sriov_info.rings_per_vf; 3830 } 3831 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3832 OCTEON_CN23XX_VF_VID, vfdev); 3833 } 3834 } 3835 3836 return num_vfs_alloced; 3837 } 3838 3839 static int lio_pci_sriov_disable(struct octeon_device *oct) 3840 { 3841 int u; 3842 3843 if (pci_vfs_assigned(oct->pci_dev)) { 3844 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n"); 3845 return -EPERM; 3846 } 3847 3848 pci_disable_sriov(oct->pci_dev); 3849 3850 u = 0; 3851 while (u < MAX_POSSIBLE_VFS) { 3852 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL; 3853 u += oct->sriov_info.rings_per_vf; 3854 } 3855 3856 oct->sriov_info.num_vfs_alloced = 0; 3857 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n", 3858 oct->pf_num); 3859 3860 return 0; 3861 } 3862 3863 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) 3864 { 3865 struct octeon_device *oct = pci_get_drvdata(dev); 3866 int ret = 0; 3867 3868 if ((num_vfs == oct->sriov_info.num_vfs_alloced) && 3869 (oct->sriov_info.sriov_enabled)) { 3870 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n", 3871 oct->pf_num, num_vfs); 3872 return 0; 3873 } 3874 3875 if (!num_vfs) { 3876 lio_vf_rep_destroy(oct); 3877 ret = lio_pci_sriov_disable(oct); 3878 } else if (num_vfs > oct->sriov_info.max_vfs) { 3879 dev_err(&oct->pci_dev->dev, 3880 "OCTEON: Max allowed VFs:%d user requested:%d", 3881 oct->sriov_info.max_vfs, num_vfs); 3882 ret = -EPERM; 3883 } else { 3884 oct->sriov_info.num_vfs_alloced = num_vfs; 3885 ret = octeon_enable_sriov(oct); 3886 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n", 3887 oct->pf_num, num_vfs); 3888 ret = lio_vf_rep_create(oct); 3889 if (ret) 3890 dev_info(&oct->pci_dev->dev, 3891 "vf representor create failed"); 3892 } 3893 3894 return ret; 3895 } 3896 #endif 3897 3898 /** 3899 * liquidio_init_nic_module - initialize the NIC 3900 * @oct: octeon device 3901 * 3902 * This initialization routine is called once the Octeon device application is 3903 * up and running 3904 */ 3905 static int liquidio_init_nic_module(struct octeon_device *oct) 3906 { 3907 int i, retval = 0; 3908 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); 3909 3910 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3911 3912 /* only default iq and oq were initialized 3913 * initialize the rest as well 3914 */ 3915 /* run port_config command for each port */ 3916 oct->ifcount = num_nic_ports; 3917 3918 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); 3919 3920 for (i = 0; i < MAX_OCTEON_LINKS; i++) 3921 oct->props[i].gmxport = -1; 3922 3923 retval = setup_nic_devices(oct); 3924 if (retval) { 3925 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3926 goto octnet_init_failure; 3927 } 3928 3929 /* Call vf_rep_modinit if the firmware is switchdev capable 3930 * and do it from the first liquidio function probed. 3931 */ 3932 if (!oct->octeon_id && 3933 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) { 3934 retval = lio_vf_rep_modinit(); 3935 if (retval) { 3936 liquidio_stop_nic_module(oct); 3937 goto octnet_init_failure; 3938 } 3939 } 3940 3941 liquidio_ptp_init(oct); 3942 3943 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3944 3945 return retval; 3946 3947 octnet_init_failure: 3948 3949 oct->ifcount = 0; 3950 3951 return retval; 3952 } 3953 3954 /** 3955 * nic_starter - finish init 3956 * @work: work struct work_struct 3957 * 3958 * starter callback that invokes the remaining initialization work after the NIC is up and running. 3959 */ 3960 static void nic_starter(struct work_struct *work) 3961 { 3962 struct octeon_device *oct; 3963 struct cavium_wk *wk = (struct cavium_wk *)work; 3964 3965 oct = (struct octeon_device *)wk->ctxptr; 3966 3967 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) 3968 return; 3969 3970 /* If the status of the device is CORE_OK, the core 3971 * application has reported its application type. Call 3972 * any registered handlers now and move to the RUNNING 3973 * state. 3974 */ 3975 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) { 3976 schedule_delayed_work(&oct->nic_poll_work.work, 3977 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 3978 return; 3979 } 3980 3981 atomic_set(&oct->status, OCT_DEV_RUNNING); 3982 3983 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { 3984 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n"); 3985 3986 if (liquidio_init_nic_module(oct)) 3987 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n"); 3988 else 3989 handshake[oct->octeon_id].started_ok = 1; 3990 } else { 3991 dev_err(&oct->pci_dev->dev, 3992 "Unexpected application running on NIC (%d). Check firmware.\n", 3993 oct->app_mode); 3994 } 3995 3996 complete(&handshake[oct->octeon_id].started); 3997 } 3998 3999 static int 4000 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) 4001 { 4002 struct octeon_device *oct = (struct octeon_device *)buf; 4003 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 4004 int i, notice, vf_idx; 4005 bool cores_crashed; 4006 u64 *data, vf_num; 4007 4008 notice = recv_pkt->rh.r.ossp; 4009 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE); 4010 4011 /* the first 64-bit word of data is the vf_num */ 4012 vf_num = data[0]; 4013 octeon_swap_8B_data(&vf_num, 1); 4014 vf_idx = (int)vf_num - 1; 4015 4016 cores_crashed = READ_ONCE(oct->cores_crashed); 4017 4018 if (notice == VF_DRV_LOADED) { 4019 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) { 4020 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx); 4021 dev_info(&oct->pci_dev->dev, 4022 "driver for VF%d was loaded\n", vf_idx); 4023 if (!cores_crashed) 4024 try_module_get(THIS_MODULE); 4025 } 4026 } else if (notice == VF_DRV_REMOVED) { 4027 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) { 4028 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx); 4029 dev_info(&oct->pci_dev->dev, 4030 "driver for VF%d was removed\n", vf_idx); 4031 if (!cores_crashed) 4032 module_put(THIS_MODULE); 4033 } 4034 } else if (notice == VF_DRV_MACADDR_CHANGED) { 4035 u8 *b = (u8 *)&data[1]; 4036 4037 oct->sriov_info.vf_macaddr[vf_idx] = data[1]; 4038 dev_info(&oct->pci_dev->dev, 4039 "VF driver changed VF%d's MAC address to %pM\n", 4040 vf_idx, b + 2); 4041 } 4042 4043 for (i = 0; i < recv_pkt->buffer_count; i++) 4044 recv_buffer_free(recv_pkt->buffer_ptr[i]); 4045 octeon_free_recv_info(recv_info); 4046 4047 return 0; 4048 } 4049 4050 /** 4051 * octeon_device_init - Device initialization for each Octeon device that is probed 4052 * @octeon_dev: octeon device 4053 */ 4054 static int octeon_device_init(struct octeon_device *octeon_dev) 4055 { 4056 int j, ret; 4057 char bootcmd[] = "\n"; 4058 char *dbg_enb = NULL; 4059 enum lio_fw_state fw_state; 4060 struct octeon_device_priv *oct_priv = 4061 (struct octeon_device_priv *)octeon_dev->priv; 4062 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); 4063 4064 /* Enable access to the octeon device and make its DMA capability 4065 * known to the OS. 4066 */ 4067 if (octeon_pci_os_setup(octeon_dev)) 4068 return 1; 4069 4070 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE); 4071 4072 /* Identify the Octeon type and map the BAR address space. */ 4073 if (octeon_chip_specific_setup(octeon_dev)) { 4074 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); 4075 return 1; 4076 } 4077 4078 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE); 4079 4080 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', 4081 * since that is what is required for the reference to be removed 4082 * during de-initialization (see 'octeon_destroy_resources'). 4083 */ 4084 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number, 4085 PCI_SLOT(octeon_dev->pci_dev->devfn), 4086 PCI_FUNC(octeon_dev->pci_dev->devfn), 4087 true); 4088 4089 octeon_dev->app_mode = CVM_DRV_INVALID_APP; 4090 4091 /* CN23XX supports preloaded firmware if the following is true: 4092 * 4093 * The adapter indicates that firmware is currently running AND 4094 * 'fw_type' is 'auto'. 4095 * 4096 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate). 4097 */ 4098 if (OCTEON_CN23XX_PF(octeon_dev) && 4099 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) { 4100 atomic_cmpxchg(octeon_dev->adapter_fw_state, 4101 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED); 4102 } 4103 4104 /* If loading firmware, only first device of adapter needs to do so. */ 4105 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state, 4106 FW_NEEDS_TO_BE_LOADED, 4107 FW_IS_BEING_LOADED); 4108 4109 /* Here, [local variable] 'fw_state' is set to one of: 4110 * 4111 * FW_IS_PRELOADED: No firmware is to be loaded (see above) 4112 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load 4113 * firmware to the adapter. 4114 * FW_IS_BEING_LOADED: The driver's second instance will not load 4115 * firmware to the adapter. 4116 */ 4117 4118 /* Prior to f/w load, perform a soft reset of the Octeon device; 4119 * if error resetting, return w/error. 4120 */ 4121 if (fw_state == FW_NEEDS_TO_BE_LOADED) 4122 if (octeon_dev->fn_list.soft_reset(octeon_dev)) 4123 return 1; 4124 4125 /* Initialize the dispatch mechanism used to push packets arriving on 4126 * Octeon Output queues. 4127 */ 4128 if (octeon_init_dispatch_list(octeon_dev)) 4129 return 1; 4130 4131 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4132 OPCODE_NIC_CORE_DRV_ACTIVE, 4133 octeon_core_drv_init, 4134 octeon_dev); 4135 4136 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4137 OPCODE_NIC_VF_DRV_NOTICE, 4138 octeon_recv_vf_drv_notice, octeon_dev); 4139 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); 4140 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; 4141 schedule_delayed_work(&octeon_dev->nic_poll_work.work, 4142 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 4143 4144 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); 4145 4146 if (octeon_set_io_queues_off(octeon_dev)) { 4147 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n"); 4148 return 1; 4149 } 4150 4151 if (OCTEON_CN23XX_PF(octeon_dev)) { 4152 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4153 if (ret) { 4154 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n"); 4155 return ret; 4156 } 4157 } 4158 4159 /* Initialize soft command buffer pool 4160 */ 4161 if (octeon_setup_sc_buffer_pool(octeon_dev)) { 4162 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n"); 4163 return 1; 4164 } 4165 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 4166 4167 /* Setup the data structures that manage this Octeon's Input queues. */ 4168 if (octeon_setup_instr_queues(octeon_dev)) { 4169 dev_err(&octeon_dev->pci_dev->dev, 4170 "instruction queue initialization failed\n"); 4171 return 1; 4172 } 4173 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 4174 4175 /* Initialize lists to manage the requests of different types that 4176 * arrive from user & kernel applications for this octeon device. 4177 */ 4178 if (octeon_setup_response_list(octeon_dev)) { 4179 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n"); 4180 return 1; 4181 } 4182 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); 4183 4184 if (octeon_setup_output_queues(octeon_dev)) { 4185 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); 4186 return 1; 4187 } 4188 4189 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); 4190 4191 if (OCTEON_CN23XX_PF(octeon_dev)) { 4192 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) { 4193 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n"); 4194 return 1; 4195 } 4196 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); 4197 4198 if (octeon_allocate_ioq_vector 4199 (octeon_dev, 4200 octeon_dev->sriov_info.num_pf_rings)) { 4201 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 4202 return 1; 4203 } 4204 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 4205 4206 } else { 4207 /* The input and output queue registers were setup earlier (the 4208 * queues were not enabled). Any additional registers 4209 * that need to be programmed should be done now. 4210 */ 4211 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4212 if (ret) { 4213 dev_err(&octeon_dev->pci_dev->dev, 4214 "Failed to configure device registers\n"); 4215 return ret; 4216 } 4217 } 4218 4219 /* Initialize the tasklet that handles output queue packet processing.*/ 4220 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n"); 4221 tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh); 4222 4223 /* Setup the interrupt handler and record the INT SUM register address 4224 */ 4225 if (octeon_setup_interrupt(octeon_dev, 4226 octeon_dev->sriov_info.num_pf_rings)) 4227 return 1; 4228 4229 /* Enable Octeon device interrupts */ 4230 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); 4231 4232 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE); 4233 4234 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE 4235 * the output queue is enabled. 4236 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in 4237 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. 4238 * Otherwise, it is possible that the DRV_ACTIVE message will be sent 4239 * before any credits have been issued, causing the ring to be reset 4240 * (and the f/w appear to never have started). 4241 */ 4242 for (j = 0; j < octeon_dev->num_oqs; j++) 4243 writel(octeon_dev->droq[j]->max_count, 4244 octeon_dev->droq[j]->pkts_credit_reg); 4245 4246 /* Enable the input and output queues for this Octeon device */ 4247 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); 4248 if (ret) { 4249 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues"); 4250 return ret; 4251 } 4252 4253 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); 4254 4255 if (fw_state == FW_NEEDS_TO_BE_LOADED) { 4256 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); 4257 if (!ddr_timeout) { 4258 dev_info(&octeon_dev->pci_dev->dev, 4259 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); 4260 } 4261 4262 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); 4263 4264 /* Wait for the octeon to initialize DDR after the soft-reset.*/ 4265 while (!ddr_timeout) { 4266 set_current_state(TASK_INTERRUPTIBLE); 4267 if (schedule_timeout(HZ / 10)) { 4268 /* user probably pressed Control-C */ 4269 return 1; 4270 } 4271 } 4272 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); 4273 if (ret) { 4274 dev_err(&octeon_dev->pci_dev->dev, 4275 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", 4276 ret); 4277 return 1; 4278 } 4279 4280 if (octeon_wait_for_bootloader(octeon_dev, 1000)) { 4281 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n"); 4282 return 1; 4283 } 4284 4285 /* Divert uboot to take commands from host instead. */ 4286 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50); 4287 4288 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); 4289 ret = octeon_init_consoles(octeon_dev); 4290 if (ret) { 4291 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); 4292 return 1; 4293 } 4294 /* If console debug enabled, specify empty string to use default 4295 * enablement ELSE specify NULL string for 'disabled'. 4296 */ 4297 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL; 4298 ret = octeon_add_console(octeon_dev, 0, dbg_enb); 4299 if (ret) { 4300 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); 4301 return 1; 4302 } else if (octeon_console_debug_enabled(0)) { 4303 /* If console was added AND we're logging console output 4304 * then set our console print function. 4305 */ 4306 octeon_dev->console[0].print = octeon_dbg_console_print; 4307 } 4308 4309 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); 4310 4311 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n"); 4312 ret = load_firmware(octeon_dev); 4313 if (ret) { 4314 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); 4315 return 1; 4316 } 4317 4318 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED); 4319 } 4320 4321 handshake[octeon_dev->octeon_id].init_ok = 1; 4322 complete(&handshake[octeon_dev->octeon_id].init); 4323 4324 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); 4325 oct_priv->dev = octeon_dev; 4326 4327 return 0; 4328 } 4329 4330 /** 4331 * octeon_dbg_console_print - Debug console print function 4332 * @oct: octeon device 4333 * @console_num: console number 4334 * @prefix: first portion of line to display 4335 * @suffix: second portion of line to display 4336 * 4337 * The OCTEON debug console outputs entire lines (excluding '\n'). 4338 * Normally, the line will be passed in the 'prefix' parameter. 4339 * However, due to buffering, it is possible for a line to be split into two 4340 * parts, in which case they will be passed as the 'prefix' parameter and 4341 * 'suffix' parameter. 4342 */ 4343 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 4344 char *prefix, char *suffix) 4345 { 4346 if (prefix && suffix) 4347 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix, 4348 suffix); 4349 else if (prefix) 4350 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix); 4351 else if (suffix) 4352 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix); 4353 4354 return 0; 4355 } 4356 4357 /** 4358 * liquidio_exit - Exits the module 4359 */ 4360 static void __exit liquidio_exit(void) 4361 { 4362 liquidio_deinit_pci(); 4363 4364 pr_info("LiquidIO network module is now unloaded\n"); 4365 } 4366 4367 module_init(liquidio_init); 4368 module_exit(liquidio_exit); 4369