1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <linux/firmware.h> 22 #include <net/vxlan.h> 23 #include <linux/kthread.h> 24 #include "liquidio_common.h" 25 #include "octeon_droq.h" 26 #include "octeon_iq.h" 27 #include "response_manager.h" 28 #include "octeon_device.h" 29 #include "octeon_nic.h" 30 #include "octeon_main.h" 31 #include "octeon_network.h" 32 #include "cn66xx_regs.h" 33 #include "cn66xx_device.h" 34 #include "cn68xx_device.h" 35 #include "cn23xx_pf_device.h" 36 #include "liquidio_image.h" 37 #include "lio_vf_rep.h" 38 39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); 41 MODULE_LICENSE("GPL"); 42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME 43 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME 45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME 47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME 49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 50 51 static int ddr_timeout = 10000; 52 module_param(ddr_timeout, int, 0644); 53 MODULE_PARM_DESC(ddr_timeout, 54 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check"); 55 56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 57 58 static int debug = -1; 59 module_param(debug, int, 0644); 60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 61 62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO; 63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444); 64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\"."); 65 66 static u32 console_bitmask; 67 module_param(console_bitmask, int, 0644); 68 MODULE_PARM_DESC(console_bitmask, 69 "Bitmask indicating which consoles have debug output redirected to syslog."); 70 71 /** 72 * octeon_console_debug_enabled - determines if a given console has debug enabled. 73 * @console: console to check 74 * Return: 1 = enabled. 0 otherwise 75 */ 76 static int octeon_console_debug_enabled(u32 console) 77 { 78 return (console_bitmask >> (console)) & 0x1; 79 } 80 81 /* Polling interval for determining when NIC application is alive */ 82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 83 84 /* runtime link query interval */ 85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 86 /* update localtime to octeon firmware every 60 seconds. 87 * make firmware to use same time reference, so that it will be easy to 88 * correlate firmware logged events/errors with host events, for debugging. 89 */ 90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 91 92 /* time to wait for possible in-flight requests in milliseconds */ 93 #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) 94 95 struct oct_link_status_resp { 96 u64 rh; 97 struct oct_link_info link_info; 98 u64 status; 99 }; 100 101 struct oct_timestamp_resp { 102 u64 rh; 103 u64 timestamp; 104 u64 status; 105 }; 106 107 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) 108 109 union tx_info { 110 u64 u64; 111 struct { 112 #ifdef __BIG_ENDIAN_BITFIELD 113 u16 gso_size; 114 u16 gso_segs; 115 u32 reserved; 116 #else 117 u32 reserved; 118 u16 gso_segs; 119 u16 gso_size; 120 #endif 121 } s; 122 }; 123 124 /* Octeon device properties to be used by the NIC module. 125 * Each octeon device in the system will be represented 126 * by this structure in the NIC module. 127 */ 128 129 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 130 #define OCTNIC_GSO_MAX_SIZE \ 131 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 132 133 struct handshake { 134 struct completion init; 135 struct completion started; 136 struct pci_dev *pci_dev; 137 int init_ok; 138 int started_ok; 139 }; 140 141 #ifdef CONFIG_PCI_IOV 142 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); 143 #endif 144 145 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 146 char *prefix, char *suffix); 147 148 static int octeon_device_init(struct octeon_device *); 149 static int liquidio_stop(struct net_device *netdev); 150 static void liquidio_remove(struct pci_dev *pdev); 151 static int liquidio_probe(struct pci_dev *pdev, 152 const struct pci_device_id *ent); 153 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 154 int linkstate); 155 156 static struct handshake handshake[MAX_OCTEON_DEVICES]; 157 static struct completion first_stage; 158 159 static void octeon_droq_bh(struct tasklet_struct *t) 160 { 161 int q_no; 162 int reschedule = 0; 163 struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t, 164 droq_tasklet); 165 struct octeon_device *oct = oct_priv->dev; 166 167 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { 168 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) 169 continue; 170 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], 171 MAX_PACKET_BUDGET); 172 lio_enable_irq(oct->droq[q_no], NULL); 173 174 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { 175 /* set time and cnt interrupt thresholds for this DROQ 176 * for NAPI 177 */ 178 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; 179 180 octeon_write_csr64( 181 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no), 182 0x5700000040ULL); 183 octeon_write_csr64( 184 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0); 185 } 186 } 187 188 if (reschedule) 189 tasklet_schedule(&oct_priv->droq_tasklet); 190 } 191 192 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 193 { 194 struct octeon_device_priv *oct_priv = 195 (struct octeon_device_priv *)oct->priv; 196 int retry = 100, pkt_cnt = 0, pending_pkts = 0; 197 int i; 198 199 do { 200 pending_pkts = 0; 201 202 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 203 if (!(oct->io_qmask.oq & BIT_ULL(i))) 204 continue; 205 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 206 } 207 if (pkt_cnt > 0) { 208 pending_pkts += pkt_cnt; 209 tasklet_schedule(&oct_priv->droq_tasklet); 210 } 211 pkt_cnt = 0; 212 schedule_timeout_uninterruptible(1); 213 214 } while (retry-- && pending_pkts); 215 216 return pkt_cnt; 217 } 218 219 /** 220 * force_io_queues_off - Forces all IO queues off on a given device 221 * @oct: Pointer to Octeon device 222 */ 223 static void force_io_queues_off(struct octeon_device *oct) 224 { 225 if ((oct->chip_id == OCTEON_CN66XX) || 226 (oct->chip_id == OCTEON_CN68XX)) { 227 /* Reset the Enable bits for Input Queues. */ 228 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); 229 230 /* Reset the Enable bits for Output Queues. */ 231 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); 232 } 233 } 234 235 /** 236 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc 237 * @oct: Pointer to Octeon device 238 */ 239 static inline void pcierror_quiesce_device(struct octeon_device *oct) 240 { 241 int i; 242 243 /* Disable the input and output queues now. No more packets will 244 * arrive from Octeon, but we should wait for all packet processing 245 * to finish. 246 */ 247 force_io_queues_off(oct); 248 249 /* To allow for in-flight requests */ 250 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST); 251 252 if (wait_for_pending_requests(oct)) 253 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 254 255 /* Force all requests waiting to be fetched by OCTEON to complete. */ 256 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 257 struct octeon_instr_queue *iq; 258 259 if (!(oct->io_qmask.iq & BIT_ULL(i))) 260 continue; 261 iq = oct->instr_queue[i]; 262 263 if (atomic_read(&iq->instr_pending)) { 264 spin_lock_bh(&iq->lock); 265 iq->fill_cnt = 0; 266 iq->octeon_read_index = iq->host_write_index; 267 iq->stats.instr_processed += 268 atomic_read(&iq->instr_pending); 269 lio_process_iq_request_list(oct, iq, 0); 270 spin_unlock_bh(&iq->lock); 271 } 272 } 273 274 /* Force all pending ordered list requests to time out. */ 275 lio_process_ordered_list(oct, 1); 276 277 /* We do not need to wait for output queue packets to be processed. */ 278 } 279 280 /** 281 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status 282 * @dev: Pointer to PCI device 283 */ 284 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 285 { 286 int pos = 0x100; 287 u32 status, mask; 288 289 pr_info("%s :\n", __func__); 290 291 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 292 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 293 if (dev->error_state == pci_channel_io_normal) 294 status &= ~mask; /* Clear corresponding nonfatal bits */ 295 else 296 status &= mask; /* Clear corresponding fatal bits */ 297 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 298 } 299 300 /** 301 * stop_pci_io - Stop all PCI IO to a given device 302 * @oct: Pointer to Octeon device 303 */ 304 static void stop_pci_io(struct octeon_device *oct) 305 { 306 /* No more instructions will be forwarded. */ 307 atomic_set(&oct->status, OCT_DEV_IN_RESET); 308 309 pci_disable_device(oct->pci_dev); 310 311 /* Disable interrupts */ 312 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 313 314 pcierror_quiesce_device(oct); 315 316 /* Release the interrupt line */ 317 free_irq(oct->pci_dev->irq, oct); 318 319 if (oct->flags & LIO_FLAG_MSI_ENABLED) 320 pci_disable_msi(oct->pci_dev); 321 322 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 323 lio_get_state_string(&oct->status)); 324 325 /* making it a common function for all OCTEON models */ 326 cleanup_aer_uncorrect_error_status(oct->pci_dev); 327 } 328 329 /** 330 * liquidio_pcie_error_detected - called when PCI error is detected 331 * @pdev: Pointer to PCI device 332 * @state: The current pci connection state 333 * 334 * This function is called after a PCI bus error affecting 335 * this device has been detected. 336 */ 337 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 338 pci_channel_state_t state) 339 { 340 struct octeon_device *oct = pci_get_drvdata(pdev); 341 342 /* Non-correctable Non-fatal errors */ 343 if (state == pci_channel_io_normal) { 344 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 345 cleanup_aer_uncorrect_error_status(oct->pci_dev); 346 return PCI_ERS_RESULT_CAN_RECOVER; 347 } 348 349 /* Non-correctable Fatal errors */ 350 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 351 stop_pci_io(oct); 352 353 /* Always return a DISCONNECT. There is no support for recovery but only 354 * for a clean shutdown. 355 */ 356 return PCI_ERS_RESULT_DISCONNECT; 357 } 358 359 /** 360 * liquidio_pcie_mmio_enabled - mmio handler 361 * @pdev: Pointer to PCI device 362 */ 363 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev) 364 { 365 /* We should never hit this since we never ask for a reset for a Fatal 366 * Error. We always return DISCONNECT in io_error above. 367 * But play safe and return RECOVERED for now. 368 */ 369 return PCI_ERS_RESULT_RECOVERED; 370 } 371 372 /** 373 * liquidio_pcie_slot_reset - called after the pci bus has been reset. 374 * @pdev: Pointer to PCI device 375 * 376 * Restart the card from scratch, as if from a cold-boot. Implementation 377 * resembles the first-half of the octeon_resume routine. 378 */ 379 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev) 380 { 381 /* We should never hit this since we never ask for a reset for a Fatal 382 * Error. We always return DISCONNECT in io_error above. 383 * But play safe and return RECOVERED for now. 384 */ 385 return PCI_ERS_RESULT_RECOVERED; 386 } 387 388 /** 389 * liquidio_pcie_resume - called when traffic can start flowing again. 390 * @pdev: Pointer to PCI device 391 * 392 * This callback is called when the error recovery driver tells us that 393 * its OK to resume normal operation. Implementation resembles the 394 * second-half of the octeon_resume routine. 395 */ 396 static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev) 397 { 398 /* Nothing to be done here. */ 399 } 400 401 #define liquidio_suspend NULL 402 #define liquidio_resume NULL 403 404 /* For PCI-E Advanced Error Recovery (AER) Interface */ 405 static const struct pci_error_handlers liquidio_err_handler = { 406 .error_detected = liquidio_pcie_error_detected, 407 .mmio_enabled = liquidio_pcie_mmio_enabled, 408 .slot_reset = liquidio_pcie_slot_reset, 409 .resume = liquidio_pcie_resume, 410 }; 411 412 static const struct pci_device_id liquidio_pci_tbl[] = { 413 { /* 68xx */ 414 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 415 }, 416 { /* 66xx */ 417 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 418 }, 419 { /* 23xx pf */ 420 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 421 }, 422 { 423 0, 0, 0, 0, 0, 0, 0 424 } 425 }; 426 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); 427 428 static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume); 429 430 static struct pci_driver liquidio_pci_driver = { 431 .name = "LiquidIO", 432 .id_table = liquidio_pci_tbl, 433 .probe = liquidio_probe, 434 .remove = liquidio_remove, 435 .err_handler = &liquidio_err_handler, /* For AER */ 436 .driver.pm = &liquidio_pm_ops, 437 #ifdef CONFIG_PCI_IOV 438 .sriov_configure = liquidio_enable_sriov, 439 #endif 440 }; 441 442 /** 443 * liquidio_init_pci - register PCI driver 444 */ 445 static int liquidio_init_pci(void) 446 { 447 return pci_register_driver(&liquidio_pci_driver); 448 } 449 450 /** 451 * liquidio_deinit_pci - unregister PCI driver 452 */ 453 static void liquidio_deinit_pci(void) 454 { 455 pci_unregister_driver(&liquidio_pci_driver); 456 } 457 458 /** 459 * check_txq_status - Check Tx queue status, and take appropriate action 460 * @lio: per-network private data 461 * Return: 0 if full, number of queues woken up otherwise 462 */ 463 static inline int check_txq_status(struct lio *lio) 464 { 465 int numqs = lio->netdev->real_num_tx_queues; 466 int ret_val = 0; 467 int q, iq; 468 469 /* check each sub-queue state */ 470 for (q = 0; q < numqs; q++) { 471 iq = lio->linfo.txpciq[q % 472 lio->oct_dev->num_iqs].s.q_no; 473 if (octnet_iq_is_full(lio->oct_dev, iq)) 474 continue; 475 if (__netif_subqueue_stopped(lio->netdev, q)) { 476 netif_wake_subqueue(lio->netdev, q); 477 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, 478 tx_restart, 1); 479 ret_val++; 480 } 481 } 482 483 return ret_val; 484 } 485 486 /** 487 * print_link_info - Print link information 488 * @netdev: network device 489 */ 490 static void print_link_info(struct net_device *netdev) 491 { 492 struct lio *lio = GET_LIO(netdev); 493 494 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 495 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 496 struct oct_link_info *linfo = &lio->linfo; 497 498 if (linfo->link.s.link_up) { 499 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 500 linfo->link.s.speed, 501 (linfo->link.s.duplex) ? "Full" : "Half"); 502 } else { 503 netif_info(lio, link, lio->netdev, "Link Down\n"); 504 } 505 } 506 } 507 508 /** 509 * octnet_link_status_change - Routine to notify MTU change 510 * @work: work_struct data structure 511 */ 512 static void octnet_link_status_change(struct work_struct *work) 513 { 514 struct cavium_wk *wk = (struct cavium_wk *)work; 515 struct lio *lio = (struct lio *)wk->ctxptr; 516 517 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. 518 * this API is invoked only when new max-MTU of the interface is 519 * less than current MTU. 520 */ 521 rtnl_lock(); 522 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); 523 rtnl_unlock(); 524 } 525 526 /** 527 * setup_link_status_change_wq - Sets up the mtu status change work 528 * @netdev: network device 529 */ 530 static inline int setup_link_status_change_wq(struct net_device *netdev) 531 { 532 struct lio *lio = GET_LIO(netdev); 533 struct octeon_device *oct = lio->oct_dev; 534 535 lio->link_status_wq.wq = alloc_workqueue("link-status", 536 WQ_MEM_RECLAIM, 0); 537 if (!lio->link_status_wq.wq) { 538 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 539 return -1; 540 } 541 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 542 octnet_link_status_change); 543 lio->link_status_wq.wk.ctxptr = lio; 544 545 return 0; 546 } 547 548 static inline void cleanup_link_status_change_wq(struct net_device *netdev) 549 { 550 struct lio *lio = GET_LIO(netdev); 551 552 if (lio->link_status_wq.wq) { 553 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 554 destroy_workqueue(lio->link_status_wq.wq); 555 } 556 } 557 558 /** 559 * update_link_status - Update link status 560 * @netdev: network device 561 * @ls: link status structure 562 * 563 * Called on receipt of a link status response from the core application to 564 * update each interface's link status. 565 */ 566 static inline void update_link_status(struct net_device *netdev, 567 union oct_link_status *ls) 568 { 569 struct lio *lio = GET_LIO(netdev); 570 int changed = (lio->linfo.link.u64 != ls->u64); 571 int current_max_mtu = lio->linfo.link.s.mtu; 572 struct octeon_device *oct = lio->oct_dev; 573 574 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n", 575 __func__, lio->linfo.link.u64, ls->u64); 576 lio->linfo.link.u64 = ls->u64; 577 578 if ((lio->intf_open) && (changed)) { 579 print_link_info(netdev); 580 lio->link_changes++; 581 582 if (lio->linfo.link.s.link_up) { 583 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__); 584 netif_carrier_on(netdev); 585 wake_txqs(netdev); 586 } else { 587 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__); 588 netif_carrier_off(netdev); 589 stop_txqs(netdev); 590 } 591 if (lio->linfo.link.s.mtu != current_max_mtu) { 592 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n", 593 current_max_mtu, lio->linfo.link.s.mtu); 594 netdev->max_mtu = lio->linfo.link.s.mtu; 595 } 596 if (lio->linfo.link.s.mtu < netdev->mtu) { 597 dev_warn(&oct->pci_dev->dev, 598 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", 599 netdev->mtu, lio->linfo.link.s.mtu); 600 queue_delayed_work(lio->link_status_wq.wq, 601 &lio->link_status_wq.wk.work, 0); 602 } 603 } 604 } 605 606 /** 607 * lio_sync_octeon_time - send latest localtime to octeon firmware so that 608 * firmware will correct it's time, in case there is a time skew 609 * 610 * @work: work scheduled to send time update to octeon firmware 611 **/ 612 static void lio_sync_octeon_time(struct work_struct *work) 613 { 614 struct cavium_wk *wk = (struct cavium_wk *)work; 615 struct lio *lio = (struct lio *)wk->ctxptr; 616 struct octeon_device *oct = lio->oct_dev; 617 struct octeon_soft_command *sc; 618 struct timespec64 ts; 619 struct lio_time *lt; 620 int ret; 621 622 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0); 623 if (!sc) { 624 dev_err(&oct->pci_dev->dev, 625 "Failed to sync time to octeon: soft command allocation failed\n"); 626 return; 627 } 628 629 lt = (struct lio_time *)sc->virtdptr; 630 631 /* Get time of the day */ 632 ktime_get_real_ts64(&ts); 633 lt->sec = ts.tv_sec; 634 lt->nsec = ts.tv_nsec; 635 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); 636 637 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 638 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 639 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0); 640 641 init_completion(&sc->complete); 642 sc->sc_status = OCTEON_REQUEST_PENDING; 643 644 ret = octeon_send_soft_command(oct, sc); 645 if (ret == IQ_SEND_FAILED) { 646 dev_err(&oct->pci_dev->dev, 647 "Failed to sync time to octeon: failed to send soft command\n"); 648 octeon_free_soft_command(oct, sc); 649 } else { 650 WRITE_ONCE(sc->caller_is_done, true); 651 } 652 653 queue_delayed_work(lio->sync_octeon_time_wq.wq, 654 &lio->sync_octeon_time_wq.wk.work, 655 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 656 } 657 658 /** 659 * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware 660 * 661 * @netdev: network device which should send time update to firmware 662 **/ 663 static inline int setup_sync_octeon_time_wq(struct net_device *netdev) 664 { 665 struct lio *lio = GET_LIO(netdev); 666 struct octeon_device *oct = lio->oct_dev; 667 668 lio->sync_octeon_time_wq.wq = 669 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0); 670 if (!lio->sync_octeon_time_wq.wq) { 671 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n"); 672 return -1; 673 } 674 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work, 675 lio_sync_octeon_time); 676 lio->sync_octeon_time_wq.wk.ctxptr = lio; 677 queue_delayed_work(lio->sync_octeon_time_wq.wq, 678 &lio->sync_octeon_time_wq.wk.work, 679 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 680 681 return 0; 682 } 683 684 /** 685 * cleanup_sync_octeon_time_wq - destroy wq 686 * 687 * @netdev: network device which should send time update to firmware 688 * 689 * Stop scheduling and destroy the work created to periodically update local 690 * time to octeon firmware. 691 **/ 692 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev) 693 { 694 struct lio *lio = GET_LIO(netdev); 695 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq; 696 697 if (time_wq->wq) { 698 cancel_delayed_work_sync(&time_wq->wk.work); 699 destroy_workqueue(time_wq->wq); 700 } 701 } 702 703 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) 704 { 705 struct octeon_device *other_oct; 706 707 other_oct = lio_get_device(oct->octeon_id + 1); 708 709 if (other_oct && other_oct->pci_dev) { 710 int oct_busnum, other_oct_busnum; 711 712 oct_busnum = oct->pci_dev->bus->number; 713 other_oct_busnum = other_oct->pci_dev->bus->number; 714 715 if (oct_busnum == other_oct_busnum) { 716 int oct_slot, other_oct_slot; 717 718 oct_slot = PCI_SLOT(oct->pci_dev->devfn); 719 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn); 720 721 if (oct_slot == other_oct_slot) 722 return other_oct; 723 } 724 } 725 726 return NULL; 727 } 728 729 static void disable_all_vf_links(struct octeon_device *oct) 730 { 731 struct net_device *netdev; 732 int max_vfs, vf, i; 733 734 if (!oct) 735 return; 736 737 max_vfs = oct->sriov_info.max_vfs; 738 739 for (i = 0; i < oct->ifcount; i++) { 740 netdev = oct->props[i].netdev; 741 if (!netdev) 742 continue; 743 744 for (vf = 0; vf < max_vfs; vf++) 745 liquidio_set_vf_link_state(netdev, vf, 746 IFLA_VF_LINK_STATE_DISABLE); 747 } 748 } 749 750 static int liquidio_watchdog(void *param) 751 { 752 bool err_msg_was_printed[LIO_MAX_CORES]; 753 u16 mask_of_crashed_or_stuck_cores = 0; 754 bool all_vf_links_are_disabled = false; 755 struct octeon_device *oct = param; 756 struct octeon_device *other_oct; 757 #ifdef CONFIG_MODULE_UNLOAD 758 long refcount, vfs_referencing_pf; 759 u64 vfs_mask1, vfs_mask2; 760 #endif 761 int core; 762 763 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed)); 764 765 while (!kthread_should_stop()) { 766 /* sleep for a couple of seconds so that we don't hog the CPU */ 767 set_current_state(TASK_INTERRUPTIBLE); 768 schedule_timeout(msecs_to_jiffies(2000)); 769 770 mask_of_crashed_or_stuck_cores = 771 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); 772 773 if (!mask_of_crashed_or_stuck_cores) 774 continue; 775 776 WRITE_ONCE(oct->cores_crashed, true); 777 other_oct = get_other_octeon_device(oct); 778 if (other_oct) 779 WRITE_ONCE(other_oct->cores_crashed, true); 780 781 for (core = 0; core < LIO_MAX_CORES; core++) { 782 bool core_crashed_or_got_stuck; 783 784 core_crashed_or_got_stuck = 785 (mask_of_crashed_or_stuck_cores 786 >> core) & 1; 787 788 if (core_crashed_or_got_stuck && 789 !err_msg_was_printed[core]) { 790 dev_err(&oct->pci_dev->dev, 791 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", 792 core); 793 err_msg_was_printed[core] = true; 794 } 795 } 796 797 if (all_vf_links_are_disabled) 798 continue; 799 800 disable_all_vf_links(oct); 801 disable_all_vf_links(other_oct); 802 all_vf_links_are_disabled = true; 803 804 #ifdef CONFIG_MODULE_UNLOAD 805 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask); 806 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask); 807 808 vfs_referencing_pf = hweight64(vfs_mask1); 809 vfs_referencing_pf += hweight64(vfs_mask2); 810 811 refcount = module_refcount(THIS_MODULE); 812 if (refcount >= vfs_referencing_pf) { 813 while (vfs_referencing_pf) { 814 module_put(THIS_MODULE); 815 vfs_referencing_pf--; 816 } 817 } 818 #endif 819 } 820 821 return 0; 822 } 823 824 /** 825 * liquidio_probe - PCI probe handler 826 * @pdev: PCI device structure 827 * @ent: unused 828 */ 829 static int 830 liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent) 831 { 832 struct octeon_device *oct_dev = NULL; 833 struct handshake *hs; 834 835 oct_dev = octeon_allocate_device(pdev->device, 836 sizeof(struct octeon_device_priv)); 837 if (!oct_dev) { 838 dev_err(&pdev->dev, "Unable to allocate device\n"); 839 return -ENOMEM; 840 } 841 842 if (pdev->device == OCTEON_CN23XX_PF_VID) 843 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 844 845 /* Enable PTP for 6XXX Device */ 846 if (((pdev->device == OCTEON_CN66XX) || 847 (pdev->device == OCTEON_CN68XX))) 848 oct_dev->ptp_enable = true; 849 else 850 oct_dev->ptp_enable = false; 851 852 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 853 (u32)pdev->vendor, (u32)pdev->device); 854 855 /* Assign octeon_device for this device to the private data area. */ 856 pci_set_drvdata(pdev, oct_dev); 857 858 /* set linux specific device pointer */ 859 oct_dev->pci_dev = (void *)pdev; 860 861 oct_dev->subsystem_id = pdev->subsystem_vendor | 862 (pdev->subsystem_device << 16); 863 864 hs = &handshake[oct_dev->octeon_id]; 865 init_completion(&hs->init); 866 init_completion(&hs->started); 867 hs->pci_dev = pdev; 868 869 if (oct_dev->octeon_id == 0) 870 /* first LiquidIO NIC is detected */ 871 complete(&first_stage); 872 873 if (octeon_device_init(oct_dev)) { 874 complete(&hs->init); 875 liquidio_remove(pdev); 876 return -ENOMEM; 877 } 878 879 if (OCTEON_CN23XX_PF(oct_dev)) { 880 u8 bus, device, function; 881 882 if (atomic_read(oct_dev->adapter_refcount) == 1) { 883 /* Each NIC gets one watchdog kernel thread. The first 884 * PF (of each NIC) that gets pci_driver->probe()'d 885 * creates that thread. 886 */ 887 bus = pdev->bus->number; 888 device = PCI_SLOT(pdev->devfn); 889 function = PCI_FUNC(pdev->devfn); 890 oct_dev->watchdog_task = kthread_run(liquidio_watchdog, 891 oct_dev, 892 "liowd/%02hhx:%02hhx.%hhx", 893 bus, device, function); 894 if (IS_ERR(oct_dev->watchdog_task)) { 895 oct_dev->watchdog_task = NULL; 896 dev_err(&oct_dev->pci_dev->dev, 897 "failed to create kernel_thread\n"); 898 liquidio_remove(pdev); 899 return -1; 900 } 901 } 902 } 903 904 oct_dev->rx_pause = 1; 905 oct_dev->tx_pause = 1; 906 907 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 908 909 return 0; 910 } 911 912 static bool fw_type_is_auto(void) 913 { 914 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO, 915 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0; 916 } 917 918 /** 919 * octeon_pci_flr - PCI FLR for each Octeon device. 920 * @oct: octeon device 921 */ 922 static void octeon_pci_flr(struct octeon_device *oct) 923 { 924 int rc; 925 926 pci_save_state(oct->pci_dev); 927 928 pci_cfg_access_lock(oct->pci_dev); 929 930 /* Quiesce the device completely */ 931 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 932 PCI_COMMAND_INTX_DISABLE); 933 934 rc = __pci_reset_function_locked(oct->pci_dev); 935 936 if (rc != 0) 937 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n", 938 rc, oct->pf_num); 939 940 pci_cfg_access_unlock(oct->pci_dev); 941 942 pci_restore_state(oct->pci_dev); 943 } 944 945 /** 946 * octeon_destroy_resources - Destroy resources associated with octeon device 947 * @oct: octeon device 948 */ 949 static void octeon_destroy_resources(struct octeon_device *oct) 950 { 951 int i, refcount; 952 struct msix_entry *msix_entries; 953 struct octeon_device_priv *oct_priv = 954 (struct octeon_device_priv *)oct->priv; 955 956 struct handshake *hs; 957 958 switch (atomic_read(&oct->status)) { 959 case OCT_DEV_RUNNING: 960 case OCT_DEV_CORE_OK: 961 962 /* No more instructions will be forwarded. */ 963 atomic_set(&oct->status, OCT_DEV_IN_RESET); 964 965 oct->app_mode = CVM_DRV_INVALID_APP; 966 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 967 lio_get_state_string(&oct->status)); 968 969 schedule_timeout_uninterruptible(HZ / 10); 970 971 fallthrough; 972 case OCT_DEV_HOST_OK: 973 974 case OCT_DEV_CONSOLE_INIT_DONE: 975 /* Remove any consoles */ 976 octeon_remove_consoles(oct); 977 978 fallthrough; 979 case OCT_DEV_IO_QUEUES_DONE: 980 if (lio_wait_for_instr_fetch(oct)) 981 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 982 983 if (wait_for_pending_requests(oct)) 984 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 985 986 /* Disable the input and output queues now. No more packets will 987 * arrive from Octeon, but we should wait for all packet 988 * processing to finish. 989 */ 990 oct->fn_list.disable_io_queues(oct); 991 992 if (lio_wait_for_oq_pkts(oct)) 993 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 994 995 /* Force all requests waiting to be fetched by OCTEON to 996 * complete. 997 */ 998 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 999 struct octeon_instr_queue *iq; 1000 1001 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1002 continue; 1003 iq = oct->instr_queue[i]; 1004 1005 if (atomic_read(&iq->instr_pending)) { 1006 spin_lock_bh(&iq->lock); 1007 iq->fill_cnt = 0; 1008 iq->octeon_read_index = iq->host_write_index; 1009 iq->stats.instr_processed += 1010 atomic_read(&iq->instr_pending); 1011 lio_process_iq_request_list(oct, iq, 0); 1012 spin_unlock_bh(&iq->lock); 1013 } 1014 } 1015 1016 lio_process_ordered_list(oct, 1); 1017 octeon_free_sc_done_list(oct); 1018 octeon_free_sc_zombie_list(oct); 1019 1020 fallthrough; 1021 case OCT_DEV_INTR_SET_DONE: 1022 /* Disable interrupts */ 1023 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 1024 1025 if (oct->msix_on) { 1026 msix_entries = (struct msix_entry *)oct->msix_entries; 1027 for (i = 0; i < oct->num_msix_irqs - 1; i++) { 1028 if (oct->ioq_vector[i].vector) { 1029 /* clear the affinity_cpumask */ 1030 irq_set_affinity_hint( 1031 msix_entries[i].vector, 1032 NULL); 1033 free_irq(msix_entries[i].vector, 1034 &oct->ioq_vector[i]); 1035 oct->ioq_vector[i].vector = 0; 1036 } 1037 } 1038 /* non-iov vector's argument is oct struct */ 1039 free_irq(msix_entries[i].vector, oct); 1040 1041 pci_disable_msix(oct->pci_dev); 1042 kfree(oct->msix_entries); 1043 oct->msix_entries = NULL; 1044 } else { 1045 /* Release the interrupt line */ 1046 free_irq(oct->pci_dev->irq, oct); 1047 1048 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1049 pci_disable_msi(oct->pci_dev); 1050 } 1051 1052 kfree(oct->irq_name_storage); 1053 oct->irq_name_storage = NULL; 1054 1055 fallthrough; 1056 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 1057 if (OCTEON_CN23XX_PF(oct)) 1058 octeon_free_ioq_vector(oct); 1059 1060 fallthrough; 1061 case OCT_DEV_MBOX_SETUP_DONE: 1062 if (OCTEON_CN23XX_PF(oct)) 1063 oct->fn_list.free_mbox(oct); 1064 1065 fallthrough; 1066 case OCT_DEV_IN_RESET: 1067 case OCT_DEV_DROQ_INIT_DONE: 1068 /* Wait for any pending operations */ 1069 mdelay(100); 1070 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1071 if (!(oct->io_qmask.oq & BIT_ULL(i))) 1072 continue; 1073 octeon_delete_droq(oct, i); 1074 } 1075 1076 /* Force any pending handshakes to complete */ 1077 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 1078 hs = &handshake[i]; 1079 1080 if (hs->pci_dev) { 1081 handshake[oct->octeon_id].init_ok = 0; 1082 complete(&handshake[oct->octeon_id].init); 1083 handshake[oct->octeon_id].started_ok = 0; 1084 complete(&handshake[oct->octeon_id].started); 1085 } 1086 } 1087 1088 fallthrough; 1089 case OCT_DEV_RESP_LIST_INIT_DONE: 1090 octeon_delete_response_list(oct); 1091 1092 fallthrough; 1093 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 1094 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1095 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1096 continue; 1097 octeon_delete_instr_queue(oct, i); 1098 } 1099 #ifdef CONFIG_PCI_IOV 1100 if (oct->sriov_info.sriov_enabled) 1101 pci_disable_sriov(oct->pci_dev); 1102 #endif 1103 fallthrough; 1104 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 1105 octeon_free_sc_buffer_pool(oct); 1106 1107 fallthrough; 1108 case OCT_DEV_DISPATCH_INIT_DONE: 1109 octeon_delete_dispatch_list(oct); 1110 cancel_delayed_work_sync(&oct->nic_poll_work.work); 1111 1112 fallthrough; 1113 case OCT_DEV_PCI_MAP_DONE: 1114 refcount = octeon_deregister_device(oct); 1115 1116 /* Soft reset the octeon device before exiting. 1117 * However, if fw was loaded from card (i.e. autoboot), 1118 * perform an FLR instead. 1119 * Implementation note: only soft-reset the device 1120 * if it is a CN6XXX OR the LAST CN23XX device. 1121 */ 1122 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED) 1123 octeon_pci_flr(oct); 1124 else if (OCTEON_CN6XXX(oct) || !refcount) 1125 oct->fn_list.soft_reset(oct); 1126 1127 octeon_unmap_pci_barx(oct, 0); 1128 octeon_unmap_pci_barx(oct, 1); 1129 1130 fallthrough; 1131 case OCT_DEV_PCI_ENABLE_DONE: 1132 pci_clear_master(oct->pci_dev); 1133 /* Disable the device, releasing the PCI INT */ 1134 pci_disable_device(oct->pci_dev); 1135 1136 fallthrough; 1137 case OCT_DEV_BEGIN_STATE: 1138 /* Nothing to be done here either */ 1139 break; 1140 } /* end switch (oct->status) */ 1141 1142 tasklet_kill(&oct_priv->droq_tasklet); 1143 } 1144 1145 /** 1146 * send_rx_ctrl_cmd - Send Rx control command 1147 * @lio: per-network private data 1148 * @start_stop: whether to start or stop 1149 */ 1150 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1151 { 1152 struct octeon_soft_command *sc; 1153 union octnet_cmd *ncmd; 1154 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1155 int retval; 1156 1157 if (oct->props[lio->ifidx].rx_on == start_stop) 1158 return 0; 1159 1160 sc = (struct octeon_soft_command *) 1161 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1162 16, 0); 1163 if (!sc) { 1164 netif_info(lio, rx_err, lio->netdev, 1165 "Failed to allocate octeon_soft_command struct\n"); 1166 return -ENOMEM; 1167 } 1168 1169 ncmd = (union octnet_cmd *)sc->virtdptr; 1170 1171 ncmd->u64 = 0; 1172 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 1173 ncmd->s.param1 = start_stop; 1174 1175 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1176 1177 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1178 1179 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1180 OPCODE_NIC_CMD, 0, 0, 0); 1181 1182 init_completion(&sc->complete); 1183 sc->sc_status = OCTEON_REQUEST_PENDING; 1184 1185 retval = octeon_send_soft_command(oct, sc); 1186 if (retval == IQ_SEND_FAILED) { 1187 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1188 octeon_free_soft_command(oct, sc); 1189 } else { 1190 /* Sleep on a wait queue till the cond flag indicates that the 1191 * response arrived or timed-out. 1192 */ 1193 retval = wait_for_sc_completion_timeout(oct, sc, 0); 1194 if (retval) 1195 return retval; 1196 1197 oct->props[lio->ifidx].rx_on = start_stop; 1198 WRITE_ONCE(sc->caller_is_done, true); 1199 } 1200 1201 return retval; 1202 } 1203 1204 /** 1205 * liquidio_destroy_nic_device - Destroy NIC device interface 1206 * @oct: octeon device 1207 * @ifidx: which interface to destroy 1208 * 1209 * Cleanup associated with each interface for an Octeon device when NIC 1210 * module is being unloaded or if initialization fails during load. 1211 */ 1212 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1213 { 1214 struct net_device *netdev = oct->props[ifidx].netdev; 1215 struct octeon_device_priv *oct_priv = 1216 (struct octeon_device_priv *)oct->priv; 1217 struct napi_struct *napi, *n; 1218 struct lio *lio; 1219 1220 if (!netdev) { 1221 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1222 __func__, ifidx); 1223 return; 1224 } 1225 1226 lio = GET_LIO(netdev); 1227 1228 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1229 1230 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1231 liquidio_stop(netdev); 1232 1233 if (oct->props[lio->ifidx].napi_enabled == 1) { 1234 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1235 napi_disable(napi); 1236 1237 oct->props[lio->ifidx].napi_enabled = 0; 1238 1239 if (OCTEON_CN23XX_PF(oct)) 1240 oct->droq[0]->ops.poll_mode = 0; 1241 } 1242 1243 /* Delete NAPI */ 1244 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1245 netif_napi_del(napi); 1246 1247 tasklet_enable(&oct_priv->droq_tasklet); 1248 1249 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1250 unregister_netdev(netdev); 1251 1252 cleanup_sync_octeon_time_wq(netdev); 1253 cleanup_link_status_change_wq(netdev); 1254 1255 cleanup_rx_oom_poll_fn(netdev); 1256 1257 lio_delete_glists(lio); 1258 1259 free_netdev(netdev); 1260 1261 oct->props[ifidx].gmxport = -1; 1262 1263 oct->props[ifidx].netdev = NULL; 1264 } 1265 1266 /** 1267 * liquidio_stop_nic_module - Stop complete NIC functionality 1268 * @oct: octeon device 1269 */ 1270 static int liquidio_stop_nic_module(struct octeon_device *oct) 1271 { 1272 int i, j; 1273 struct lio *lio; 1274 1275 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1276 device_lock(&oct->pci_dev->dev); 1277 if (oct->devlink) { 1278 devlink_unregister(oct->devlink); 1279 devlink_free(oct->devlink); 1280 oct->devlink = NULL; 1281 } 1282 device_unlock(&oct->pci_dev->dev); 1283 1284 if (!oct->ifcount) { 1285 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1286 return 1; 1287 } 1288 1289 spin_lock_bh(&oct->cmd_resp_wqlock); 1290 oct->cmd_resp_state = OCT_DRV_OFFLINE; 1291 spin_unlock_bh(&oct->cmd_resp_wqlock); 1292 1293 lio_vf_rep_destroy(oct); 1294 1295 for (i = 0; i < oct->ifcount; i++) { 1296 lio = GET_LIO(oct->props[i].netdev); 1297 for (j = 0; j < oct->num_oqs; j++) 1298 octeon_unregister_droq_ops(oct, 1299 lio->linfo.rxpciq[j].s.q_no); 1300 } 1301 1302 for (i = 0; i < oct->ifcount; i++) 1303 liquidio_destroy_nic_device(oct, i); 1304 1305 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1306 return 0; 1307 } 1308 1309 /** 1310 * liquidio_remove - Cleans up resources at unload time 1311 * @pdev: PCI device structure 1312 */ 1313 static void liquidio_remove(struct pci_dev *pdev) 1314 { 1315 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1316 1317 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1318 1319 if (oct_dev->watchdog_task) 1320 kthread_stop(oct_dev->watchdog_task); 1321 1322 if (!oct_dev->octeon_id && 1323 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) 1324 lio_vf_rep_modexit(); 1325 1326 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) 1327 liquidio_stop_nic_module(oct_dev); 1328 1329 /* Reset the octeon device and cleanup all memory allocated for 1330 * the octeon device by driver. 1331 */ 1332 octeon_destroy_resources(oct_dev); 1333 1334 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1335 1336 /* This octeon device has been removed. Update the global 1337 * data structure to reflect this. Free the device structure. 1338 */ 1339 octeon_free_device_mem(oct_dev); 1340 } 1341 1342 /** 1343 * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space 1344 * @oct: octeon device 1345 */ 1346 static int octeon_chip_specific_setup(struct octeon_device *oct) 1347 { 1348 u32 dev_id, rev_id; 1349 int ret = 1; 1350 1351 pci_read_config_dword(oct->pci_dev, 0, &dev_id); 1352 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 1353 oct->rev_id = rev_id & 0xff; 1354 1355 switch (dev_id) { 1356 case OCTEON_CN68XX_PCIID: 1357 oct->chip_id = OCTEON_CN68XX; 1358 ret = lio_setup_cn68xx_octeon_device(oct); 1359 break; 1360 1361 case OCTEON_CN66XX_PCIID: 1362 oct->chip_id = OCTEON_CN66XX; 1363 ret = lio_setup_cn66xx_octeon_device(oct); 1364 break; 1365 1366 case OCTEON_CN23XX_PCIID_PF: 1367 oct->chip_id = OCTEON_CN23XX_PF_VID; 1368 ret = setup_cn23xx_octeon_pf_device(oct); 1369 if (ret) 1370 break; 1371 #ifdef CONFIG_PCI_IOV 1372 if (!ret) 1373 pci_sriov_set_totalvfs(oct->pci_dev, 1374 oct->sriov_info.max_vfs); 1375 #endif 1376 break; 1377 1378 default: 1379 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", 1380 dev_id); 1381 } 1382 1383 return ret; 1384 } 1385 1386 /** 1387 * octeon_pci_os_setup - PCI initialization for each Octeon device. 1388 * @oct: octeon device 1389 */ 1390 static int octeon_pci_os_setup(struct octeon_device *oct) 1391 { 1392 /* setup PCI stuff first */ 1393 if (pci_enable_device(oct->pci_dev)) { 1394 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1395 return 1; 1396 } 1397 1398 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1399 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1400 pci_disable_device(oct->pci_dev); 1401 return 1; 1402 } 1403 1404 /* Enable PCI DMA Master. */ 1405 pci_set_master(oct->pci_dev); 1406 1407 return 0; 1408 } 1409 1410 /** 1411 * free_netbuf - Unmap and free network buffer 1412 * @buf: buffer 1413 */ 1414 static void free_netbuf(void *buf) 1415 { 1416 struct sk_buff *skb; 1417 struct octnet_buf_free_info *finfo; 1418 struct lio *lio; 1419 1420 finfo = (struct octnet_buf_free_info *)buf; 1421 skb = finfo->skb; 1422 lio = finfo->lio; 1423 1424 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1425 DMA_TO_DEVICE); 1426 1427 tx_buffer_free(skb); 1428 } 1429 1430 /** 1431 * free_netsgbuf - Unmap and free gather buffer 1432 * @buf: buffer 1433 */ 1434 static void free_netsgbuf(void *buf) 1435 { 1436 struct octnet_buf_free_info *finfo; 1437 struct sk_buff *skb; 1438 struct lio *lio; 1439 struct octnic_gather *g; 1440 int i, frags, iq; 1441 1442 finfo = (struct octnet_buf_free_info *)buf; 1443 skb = finfo->skb; 1444 lio = finfo->lio; 1445 g = finfo->g; 1446 frags = skb_shinfo(skb)->nr_frags; 1447 1448 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1449 g->sg[0].ptr[0], (skb->len - skb->data_len), 1450 DMA_TO_DEVICE); 1451 1452 i = 1; 1453 while (frags--) { 1454 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 1455 1456 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 1457 g->sg[(i >> 2)].ptr[(i & 3)], 1458 skb_frag_size(frag), DMA_TO_DEVICE); 1459 i++; 1460 } 1461 1462 iq = skb_iq(lio->oct_dev, skb); 1463 spin_lock(&lio->glist_lock[iq]); 1464 list_add_tail(&g->list, &lio->glist[iq]); 1465 spin_unlock(&lio->glist_lock[iq]); 1466 1467 tx_buffer_free(skb); 1468 } 1469 1470 /** 1471 * free_netsgbuf_with_resp - Unmap and free gather buffer with response 1472 * @buf: buffer 1473 */ 1474 static void free_netsgbuf_with_resp(void *buf) 1475 { 1476 struct octeon_soft_command *sc; 1477 struct octnet_buf_free_info *finfo; 1478 struct sk_buff *skb; 1479 struct lio *lio; 1480 struct octnic_gather *g; 1481 int i, frags, iq; 1482 1483 sc = (struct octeon_soft_command *)buf; 1484 skb = (struct sk_buff *)sc->callback_arg; 1485 finfo = (struct octnet_buf_free_info *)&skb->cb; 1486 1487 lio = finfo->lio; 1488 g = finfo->g; 1489 frags = skb_shinfo(skb)->nr_frags; 1490 1491 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1492 g->sg[0].ptr[0], (skb->len - skb->data_len), 1493 DMA_TO_DEVICE); 1494 1495 i = 1; 1496 while (frags--) { 1497 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 1498 1499 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 1500 g->sg[(i >> 2)].ptr[(i & 3)], 1501 skb_frag_size(frag), DMA_TO_DEVICE); 1502 i++; 1503 } 1504 1505 iq = skb_iq(lio->oct_dev, skb); 1506 1507 spin_lock(&lio->glist_lock[iq]); 1508 list_add_tail(&g->list, &lio->glist[iq]); 1509 spin_unlock(&lio->glist_lock[iq]); 1510 1511 /* Don't free the skb yet */ 1512 } 1513 1514 /** 1515 * liquidio_ptp_adjfine - Adjust ptp frequency 1516 * @ptp: PTP clock info 1517 * @scaled_ppm: how much to adjust by, in scaled parts-per-million 1518 * 1519 * Scaled parts per million is ppm with a 16-bit binary fractional field. 1520 */ 1521 static int liquidio_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 1522 { 1523 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1524 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1525 s32 ppb = scaled_ppm_to_ppb(scaled_ppm); 1526 u64 comp, delta; 1527 unsigned long flags; 1528 bool neg_adj = false; 1529 1530 if (ppb < 0) { 1531 neg_adj = true; 1532 ppb = -ppb; 1533 } 1534 1535 /* The hardware adds the clock compensation value to the 1536 * PTP clock on every coprocessor clock cycle, so we 1537 * compute the delta in terms of coprocessor clocks. 1538 */ 1539 delta = (u64)ppb << 32; 1540 do_div(delta, oct->coproc_clock_rate); 1541 1542 spin_lock_irqsave(&lio->ptp_lock, flags); 1543 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); 1544 if (neg_adj) 1545 comp -= delta; 1546 else 1547 comp += delta; 1548 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1549 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1550 1551 return 0; 1552 } 1553 1554 /** 1555 * liquidio_ptp_adjtime - Adjust ptp time 1556 * @ptp: PTP clock info 1557 * @delta: how much to adjust by, in nanosecs 1558 */ 1559 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 1560 { 1561 unsigned long flags; 1562 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1563 1564 spin_lock_irqsave(&lio->ptp_lock, flags); 1565 lio->ptp_adjust += delta; 1566 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1567 1568 return 0; 1569 } 1570 1571 /** 1572 * liquidio_ptp_gettime - Get hardware clock time, including any adjustment 1573 * @ptp: PTP clock info 1574 * @ts: timespec 1575 */ 1576 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, 1577 struct timespec64 *ts) 1578 { 1579 u64 ns; 1580 unsigned long flags; 1581 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1582 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1583 1584 spin_lock_irqsave(&lio->ptp_lock, flags); 1585 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); 1586 ns += lio->ptp_adjust; 1587 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1588 1589 *ts = ns_to_timespec64(ns); 1590 1591 return 0; 1592 } 1593 1594 /** 1595 * liquidio_ptp_settime - Set hardware clock time. Reset adjustment 1596 * @ptp: PTP clock info 1597 * @ts: timespec 1598 */ 1599 static int liquidio_ptp_settime(struct ptp_clock_info *ptp, 1600 const struct timespec64 *ts) 1601 { 1602 u64 ns; 1603 unsigned long flags; 1604 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1605 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1606 1607 ns = timespec64_to_ns(ts); 1608 1609 spin_lock_irqsave(&lio->ptp_lock, flags); 1610 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1611 lio->ptp_adjust = 0; 1612 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1613 1614 return 0; 1615 } 1616 1617 /** 1618 * liquidio_ptp_enable - Check if PTP is enabled 1619 * @ptp: PTP clock info 1620 * @rq: request 1621 * @on: is it on 1622 */ 1623 static int 1624 liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp, 1625 struct ptp_clock_request __maybe_unused *rq, 1626 int __maybe_unused on) 1627 { 1628 return -EOPNOTSUPP; 1629 } 1630 1631 /** 1632 * oct_ptp_open - Open PTP clock source 1633 * @netdev: network device 1634 */ 1635 static void oct_ptp_open(struct net_device *netdev) 1636 { 1637 struct lio *lio = GET_LIO(netdev); 1638 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1639 1640 spin_lock_init(&lio->ptp_lock); 1641 1642 snprintf(lio->ptp_info.name, 16, "%s", netdev->name); 1643 lio->ptp_info.owner = THIS_MODULE; 1644 lio->ptp_info.max_adj = 250000000; 1645 lio->ptp_info.n_alarm = 0; 1646 lio->ptp_info.n_ext_ts = 0; 1647 lio->ptp_info.n_per_out = 0; 1648 lio->ptp_info.pps = 0; 1649 lio->ptp_info.adjfine = liquidio_ptp_adjfine; 1650 lio->ptp_info.adjtime = liquidio_ptp_adjtime; 1651 lio->ptp_info.gettime64 = liquidio_ptp_gettime; 1652 lio->ptp_info.settime64 = liquidio_ptp_settime; 1653 lio->ptp_info.enable = liquidio_ptp_enable; 1654 1655 lio->ptp_adjust = 0; 1656 1657 lio->ptp_clock = ptp_clock_register(&lio->ptp_info, 1658 &oct->pci_dev->dev); 1659 1660 if (IS_ERR(lio->ptp_clock)) 1661 lio->ptp_clock = NULL; 1662 } 1663 1664 /** 1665 * liquidio_ptp_init - Init PTP clock 1666 * @oct: octeon device 1667 */ 1668 static void liquidio_ptp_init(struct octeon_device *oct) 1669 { 1670 u64 clock_comp, cfg; 1671 1672 clock_comp = (u64)NSEC_PER_SEC << 32; 1673 do_div(clock_comp, oct->coproc_clock_rate); 1674 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1675 1676 /* Enable */ 1677 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); 1678 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); 1679 } 1680 1681 /** 1682 * load_firmware - Load firmware to device 1683 * @oct: octeon device 1684 * 1685 * Maps device to firmware filename, requests firmware, and downloads it 1686 */ 1687 static int load_firmware(struct octeon_device *oct) 1688 { 1689 int ret = 0; 1690 const struct firmware *fw; 1691 char fw_name[LIO_MAX_FW_FILENAME_LEN]; 1692 char *tmp_fw_type; 1693 1694 if (fw_type_is_auto()) { 1695 tmp_fw_type = LIO_FW_NAME_TYPE_NIC; 1696 strncpy(fw_type, tmp_fw_type, sizeof(fw_type)); 1697 } else { 1698 tmp_fw_type = fw_type; 1699 } 1700 1701 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, 1702 octeon_get_conf(oct)->card_name, tmp_fw_type, 1703 LIO_FW_NAME_SUFFIX); 1704 1705 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); 1706 if (ret) { 1707 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n", 1708 fw_name); 1709 release_firmware(fw); 1710 return ret; 1711 } 1712 1713 ret = octeon_download_firmware(oct, fw->data, fw->size); 1714 1715 release_firmware(fw); 1716 1717 return ret; 1718 } 1719 1720 /** 1721 * octnet_poll_check_txq_status - Poll routine for checking transmit queue status 1722 * @work: work_struct data structure 1723 */ 1724 static void octnet_poll_check_txq_status(struct work_struct *work) 1725 { 1726 struct cavium_wk *wk = (struct cavium_wk *)work; 1727 struct lio *lio = (struct lio *)wk->ctxptr; 1728 1729 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) 1730 return; 1731 1732 check_txq_status(lio); 1733 queue_delayed_work(lio->txq_status_wq.wq, 1734 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 1735 } 1736 1737 /** 1738 * setup_tx_poll_fn - Sets up the txq poll check 1739 * @netdev: network device 1740 */ 1741 static inline int setup_tx_poll_fn(struct net_device *netdev) 1742 { 1743 struct lio *lio = GET_LIO(netdev); 1744 struct octeon_device *oct = lio->oct_dev; 1745 1746 lio->txq_status_wq.wq = alloc_workqueue("txq-status", 1747 WQ_MEM_RECLAIM, 0); 1748 if (!lio->txq_status_wq.wq) { 1749 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); 1750 return -1; 1751 } 1752 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, 1753 octnet_poll_check_txq_status); 1754 lio->txq_status_wq.wk.ctxptr = lio; 1755 queue_delayed_work(lio->txq_status_wq.wq, 1756 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 1757 return 0; 1758 } 1759 1760 static inline void cleanup_tx_poll_fn(struct net_device *netdev) 1761 { 1762 struct lio *lio = GET_LIO(netdev); 1763 1764 if (lio->txq_status_wq.wq) { 1765 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); 1766 destroy_workqueue(lio->txq_status_wq.wq); 1767 } 1768 } 1769 1770 /** 1771 * liquidio_open - Net device open for LiquidIO 1772 * @netdev: network device 1773 */ 1774 static int liquidio_open(struct net_device *netdev) 1775 { 1776 struct lio *lio = GET_LIO(netdev); 1777 struct octeon_device *oct = lio->oct_dev; 1778 struct octeon_device_priv *oct_priv = 1779 (struct octeon_device_priv *)oct->priv; 1780 struct napi_struct *napi, *n; 1781 int ret = 0; 1782 1783 if (oct->props[lio->ifidx].napi_enabled == 0) { 1784 tasklet_disable(&oct_priv->droq_tasklet); 1785 1786 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1787 napi_enable(napi); 1788 1789 oct->props[lio->ifidx].napi_enabled = 1; 1790 1791 if (OCTEON_CN23XX_PF(oct)) 1792 oct->droq[0]->ops.poll_mode = 1; 1793 } 1794 1795 if (oct->ptp_enable) 1796 oct_ptp_open(netdev); 1797 1798 ifstate_set(lio, LIO_IFSTATE_RUNNING); 1799 1800 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) { 1801 ret = setup_tx_poll_fn(netdev); 1802 if (ret) 1803 goto err_poll; 1804 } 1805 1806 netif_tx_start_all_queues(netdev); 1807 1808 /* Ready for link status updates */ 1809 lio->intf_open = 1; 1810 1811 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 1812 1813 /* tell Octeon to start forwarding packets to host */ 1814 ret = send_rx_ctrl_cmd(lio, 1); 1815 if (ret) 1816 goto err_rx_ctrl; 1817 1818 /* start periodical statistics fetch */ 1819 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); 1820 lio->stats_wk.ctxptr = lio; 1821 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies 1822 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); 1823 1824 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", 1825 netdev->name); 1826 1827 return 0; 1828 1829 err_rx_ctrl: 1830 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) 1831 cleanup_tx_poll_fn(netdev); 1832 err_poll: 1833 if (lio->ptp_clock) { 1834 ptp_clock_unregister(lio->ptp_clock); 1835 lio->ptp_clock = NULL; 1836 } 1837 1838 if (oct->props[lio->ifidx].napi_enabled == 1) { 1839 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1840 napi_disable(napi); 1841 1842 oct->props[lio->ifidx].napi_enabled = 0; 1843 1844 if (OCTEON_CN23XX_PF(oct)) 1845 oct->droq[0]->ops.poll_mode = 0; 1846 } 1847 1848 return ret; 1849 } 1850 1851 /** 1852 * liquidio_stop - Net device stop for LiquidIO 1853 * @netdev: network device 1854 */ 1855 static int liquidio_stop(struct net_device *netdev) 1856 { 1857 struct lio *lio = GET_LIO(netdev); 1858 struct octeon_device *oct = lio->oct_dev; 1859 struct octeon_device_priv *oct_priv = 1860 (struct octeon_device_priv *)oct->priv; 1861 struct napi_struct *napi, *n; 1862 int ret = 0; 1863 1864 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 1865 1866 /* Stop any link updates */ 1867 lio->intf_open = 0; 1868 1869 stop_txqs(netdev); 1870 1871 /* Inform that netif carrier is down */ 1872 netif_carrier_off(netdev); 1873 netif_tx_disable(netdev); 1874 1875 lio->linfo.link.s.link_up = 0; 1876 lio->link_changes++; 1877 1878 /* Tell Octeon that nic interface is down. */ 1879 ret = send_rx_ctrl_cmd(lio, 0); 1880 if (ret) 1881 return ret; 1882 1883 if (OCTEON_CN23XX_PF(oct)) { 1884 if (!oct->msix_on) 1885 cleanup_tx_poll_fn(netdev); 1886 } else { 1887 cleanup_tx_poll_fn(netdev); 1888 } 1889 1890 cancel_delayed_work_sync(&lio->stats_wk.work); 1891 1892 if (lio->ptp_clock) { 1893 ptp_clock_unregister(lio->ptp_clock); 1894 lio->ptp_clock = NULL; 1895 } 1896 1897 /* Wait for any pending Rx descriptors */ 1898 if (lio_wait_for_clean_oq(oct)) 1899 netif_info(lio, rx_err, lio->netdev, 1900 "Proceeding with stop interface after partial RX desc processing\n"); 1901 1902 if (oct->props[lio->ifidx].napi_enabled == 1) { 1903 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1904 napi_disable(napi); 1905 1906 oct->props[lio->ifidx].napi_enabled = 0; 1907 1908 if (OCTEON_CN23XX_PF(oct)) 1909 oct->droq[0]->ops.poll_mode = 0; 1910 1911 tasklet_enable(&oct_priv->droq_tasklet); 1912 } 1913 1914 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 1915 1916 return ret; 1917 } 1918 1919 /** 1920 * get_new_flags - Converts a mask based on net device flags 1921 * @netdev: network device 1922 * 1923 * This routine generates a octnet_ifflags mask from the net device flags 1924 * received from the OS. 1925 */ 1926 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) 1927 { 1928 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1929 1930 if (netdev->flags & IFF_PROMISC) 1931 f |= OCTNET_IFFLAG_PROMISC; 1932 1933 if (netdev->flags & IFF_ALLMULTI) 1934 f |= OCTNET_IFFLAG_ALLMULTI; 1935 1936 if (netdev->flags & IFF_MULTICAST) { 1937 f |= OCTNET_IFFLAG_MULTICAST; 1938 1939 /* Accept all multicast addresses if there are more than we 1940 * can handle 1941 */ 1942 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1943 f |= OCTNET_IFFLAG_ALLMULTI; 1944 } 1945 1946 if (netdev->flags & IFF_BROADCAST) 1947 f |= OCTNET_IFFLAG_BROADCAST; 1948 1949 return f; 1950 } 1951 1952 /** 1953 * liquidio_set_mcast_list - Net device set_multicast_list 1954 * @netdev: network device 1955 */ 1956 static void liquidio_set_mcast_list(struct net_device *netdev) 1957 { 1958 struct lio *lio = GET_LIO(netdev); 1959 struct octeon_device *oct = lio->oct_dev; 1960 struct octnic_ctrl_pkt nctrl; 1961 struct netdev_hw_addr *ha; 1962 u64 *mc; 1963 int ret; 1964 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1965 1966 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1967 1968 /* Create a ctrl pkt command to be sent to core app. */ 1969 nctrl.ncmd.u64 = 0; 1970 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1971 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1972 nctrl.ncmd.s.param2 = mc_count; 1973 nctrl.ncmd.s.more = mc_count; 1974 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1975 nctrl.netpndev = (u64)netdev; 1976 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1977 1978 /* copy all the addresses into the udd */ 1979 mc = &nctrl.udd[0]; 1980 netdev_for_each_mc_addr(ha, netdev) { 1981 *mc = 0; 1982 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); 1983 /* no need to swap bytes */ 1984 1985 if (++mc > &nctrl.udd[mc_count]) 1986 break; 1987 } 1988 1989 /* Apparently, any activity in this call from the kernel has to 1990 * be atomic. So we won't wait for response. 1991 */ 1992 1993 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1994 if (ret) { 1995 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1996 ret); 1997 } 1998 } 1999 2000 /** 2001 * liquidio_set_mac - Net device set_mac_address 2002 * @netdev: network device 2003 * @p: pointer to sockaddr 2004 */ 2005 static int liquidio_set_mac(struct net_device *netdev, void *p) 2006 { 2007 int ret = 0; 2008 struct lio *lio = GET_LIO(netdev); 2009 struct octeon_device *oct = lio->oct_dev; 2010 struct sockaddr *addr = (struct sockaddr *)p; 2011 struct octnic_ctrl_pkt nctrl; 2012 2013 if (!is_valid_ether_addr(addr->sa_data)) 2014 return -EADDRNOTAVAIL; 2015 2016 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2017 2018 nctrl.ncmd.u64 = 0; 2019 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2020 nctrl.ncmd.s.param1 = 0; 2021 nctrl.ncmd.s.more = 1; 2022 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2023 nctrl.netpndev = (u64)netdev; 2024 2025 nctrl.udd[0] = 0; 2026 /* The MAC Address is presented in network byte order. */ 2027 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); 2028 2029 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2030 if (ret < 0) { 2031 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 2032 return -ENOMEM; 2033 } 2034 2035 if (nctrl.sc_status) { 2036 dev_err(&oct->pci_dev->dev, 2037 "%s: MAC Address change failed. sc return=%x\n", 2038 __func__, nctrl.sc_status); 2039 return -EIO; 2040 } 2041 2042 eth_hw_addr_set(netdev, addr->sa_data); 2043 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); 2044 2045 return 0; 2046 } 2047 2048 static void 2049 liquidio_get_stats64(struct net_device *netdev, 2050 struct rtnl_link_stats64 *lstats) 2051 { 2052 struct lio *lio = GET_LIO(netdev); 2053 struct octeon_device *oct; 2054 u64 pkts = 0, drop = 0, bytes = 0; 2055 struct oct_droq_stats *oq_stats; 2056 struct oct_iq_stats *iq_stats; 2057 int i, iq_no, oq_no; 2058 2059 oct = lio->oct_dev; 2060 2061 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 2062 return; 2063 2064 for (i = 0; i < oct->num_iqs; i++) { 2065 iq_no = lio->linfo.txpciq[i].s.q_no; 2066 iq_stats = &oct->instr_queue[iq_no]->stats; 2067 pkts += iq_stats->tx_done; 2068 drop += iq_stats->tx_dropped; 2069 bytes += iq_stats->tx_tot_bytes; 2070 } 2071 2072 lstats->tx_packets = pkts; 2073 lstats->tx_bytes = bytes; 2074 lstats->tx_dropped = drop; 2075 2076 pkts = 0; 2077 drop = 0; 2078 bytes = 0; 2079 2080 for (i = 0; i < oct->num_oqs; i++) { 2081 oq_no = lio->linfo.rxpciq[i].s.q_no; 2082 oq_stats = &oct->droq[oq_no]->stats; 2083 pkts += oq_stats->rx_pkts_received; 2084 drop += (oq_stats->rx_dropped + 2085 oq_stats->dropped_nodispatch + 2086 oq_stats->dropped_toomany + 2087 oq_stats->dropped_nomem); 2088 bytes += oq_stats->rx_bytes_received; 2089 } 2090 2091 lstats->rx_bytes = bytes; 2092 lstats->rx_packets = pkts; 2093 lstats->rx_dropped = drop; 2094 2095 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; 2096 lstats->collisions = oct->link_stats.fromhost.total_collisions; 2097 2098 /* detailed rx_errors: */ 2099 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; 2100 /* recved pkt with crc error */ 2101 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; 2102 /* recv'd frame alignment error */ 2103 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; 2104 /* recv'r fifo overrun */ 2105 lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err; 2106 2107 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + 2108 lstats->rx_frame_errors + lstats->rx_fifo_errors; 2109 2110 /* detailed tx_errors */ 2111 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; 2112 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; 2113 lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err; 2114 2115 lstats->tx_errors = lstats->tx_aborted_errors + 2116 lstats->tx_carrier_errors + 2117 lstats->tx_fifo_errors; 2118 } 2119 2120 /** 2121 * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl 2122 * @netdev: network device 2123 * @ifr: interface request 2124 */ 2125 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 2126 { 2127 struct hwtstamp_config conf; 2128 struct lio *lio = GET_LIO(netdev); 2129 2130 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 2131 return -EFAULT; 2132 2133 switch (conf.tx_type) { 2134 case HWTSTAMP_TX_ON: 2135 case HWTSTAMP_TX_OFF: 2136 break; 2137 default: 2138 return -ERANGE; 2139 } 2140 2141 switch (conf.rx_filter) { 2142 case HWTSTAMP_FILTER_NONE: 2143 break; 2144 case HWTSTAMP_FILTER_ALL: 2145 case HWTSTAMP_FILTER_SOME: 2146 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2147 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2148 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2149 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2150 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2151 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2152 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2153 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2154 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2155 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2156 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2157 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2158 case HWTSTAMP_FILTER_NTP_ALL: 2159 conf.rx_filter = HWTSTAMP_FILTER_ALL; 2160 break; 2161 default: 2162 return -ERANGE; 2163 } 2164 2165 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 2166 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2167 2168 else 2169 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2170 2171 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 2172 } 2173 2174 /** 2175 * liquidio_ioctl - ioctl handler 2176 * @netdev: network device 2177 * @ifr: interface request 2178 * @cmd: command 2179 */ 2180 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2181 { 2182 struct lio *lio = GET_LIO(netdev); 2183 2184 switch (cmd) { 2185 case SIOCSHWTSTAMP: 2186 if (lio->oct_dev->ptp_enable) 2187 return hwtstamp_ioctl(netdev, ifr); 2188 fallthrough; 2189 default: 2190 return -EOPNOTSUPP; 2191 } 2192 } 2193 2194 /** 2195 * handle_timestamp - handle a Tx timestamp response 2196 * @oct: octeon device 2197 * @status: response status 2198 * @buf: pointer to skb 2199 */ 2200 static void handle_timestamp(struct octeon_device *oct, 2201 u32 status, 2202 void *buf) 2203 { 2204 struct octnet_buf_free_info *finfo; 2205 struct octeon_soft_command *sc; 2206 struct oct_timestamp_resp *resp; 2207 struct lio *lio; 2208 struct sk_buff *skb = (struct sk_buff *)buf; 2209 2210 finfo = (struct octnet_buf_free_info *)skb->cb; 2211 lio = finfo->lio; 2212 sc = finfo->sc; 2213 oct = lio->oct_dev; 2214 resp = (struct oct_timestamp_resp *)sc->virtrptr; 2215 2216 if (status != OCTEON_REQUEST_DONE) { 2217 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 2218 CVM_CAST64(status)); 2219 resp->timestamp = 0; 2220 } 2221 2222 octeon_swap_8B_data(&resp->timestamp, 1); 2223 2224 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { 2225 struct skb_shared_hwtstamps ts; 2226 u64 ns = resp->timestamp; 2227 2228 netif_info(lio, tx_done, lio->netdev, 2229 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 2230 skb, (unsigned long long)ns); 2231 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 2232 skb_tstamp_tx(skb, &ts); 2233 } 2234 2235 octeon_free_soft_command(oct, sc); 2236 tx_buffer_free(skb); 2237 } 2238 2239 /** 2240 * send_nic_timestamp_pkt - Send a data packet that will be timestamped 2241 * @oct: octeon device 2242 * @ndata: pointer to network data 2243 * @finfo: pointer to private network data 2244 * @xmit_more: more is coming 2245 */ 2246 static inline int send_nic_timestamp_pkt(struct octeon_device *oct, 2247 struct octnic_data_pkt *ndata, 2248 struct octnet_buf_free_info *finfo, 2249 int xmit_more) 2250 { 2251 int retval; 2252 struct octeon_soft_command *sc; 2253 struct lio *lio; 2254 int ring_doorbell; 2255 u32 len; 2256 2257 lio = finfo->lio; 2258 2259 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 2260 sizeof(struct oct_timestamp_resp)); 2261 finfo->sc = sc; 2262 2263 if (!sc) { 2264 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 2265 return IQ_SEND_FAILED; 2266 } 2267 2268 if (ndata->reqtype == REQTYPE_NORESP_NET) 2269 ndata->reqtype = REQTYPE_RESP_NET; 2270 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 2271 ndata->reqtype = REQTYPE_RESP_NET_SG; 2272 2273 sc->callback = handle_timestamp; 2274 sc->callback_arg = finfo->skb; 2275 sc->iq_no = ndata->q_no; 2276 2277 if (OCTEON_CN23XX_PF(oct)) 2278 len = (u32)((struct octeon_instr_ih3 *) 2279 (&sc->cmd.cmd3.ih3))->dlengsz; 2280 else 2281 len = (u32)((struct octeon_instr_ih2 *) 2282 (&sc->cmd.cmd2.ih2))->dlengsz; 2283 2284 ring_doorbell = !xmit_more; 2285 2286 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2287 sc, len, ndata->reqtype); 2288 2289 if (retval == IQ_SEND_FAILED) { 2290 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2291 retval); 2292 octeon_free_soft_command(oct, sc); 2293 } else { 2294 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 2295 } 2296 2297 return retval; 2298 } 2299 2300 /** 2301 * liquidio_xmit - Transmit networks packets to the Octeon interface 2302 * @skb: skbuff struct to be passed to network layer. 2303 * @netdev: pointer to network device 2304 * 2305 * Return: whether the packet was transmitted to the device okay or not 2306 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 2307 */ 2308 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 2309 { 2310 struct lio *lio; 2311 struct octnet_buf_free_info *finfo; 2312 union octnic_cmd_setup cmdsetup; 2313 struct octnic_data_pkt ndata; 2314 struct octeon_device *oct; 2315 struct oct_iq_stats *stats; 2316 struct octeon_instr_irh *irh; 2317 union tx_info *tx_info; 2318 int status = 0; 2319 int q_idx = 0, iq_no = 0; 2320 int j, xmit_more = 0; 2321 u64 dptr = 0; 2322 u32 tag = 0; 2323 2324 lio = GET_LIO(netdev); 2325 oct = lio->oct_dev; 2326 2327 q_idx = skb_iq(oct, skb); 2328 tag = q_idx; 2329 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 2330 2331 stats = &oct->instr_queue[iq_no]->stats; 2332 2333 /* Check for all conditions in which the current packet cannot be 2334 * transmitted. 2335 */ 2336 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2337 (!lio->linfo.link.s.link_up) || 2338 (skb->len <= 0)) { 2339 netif_info(lio, tx_err, lio->netdev, 2340 "Transmit failed link_status : %d\n", 2341 lio->linfo.link.s.link_up); 2342 goto lio_xmit_failed; 2343 } 2344 2345 /* Use space in skb->cb to store info used to unmap and 2346 * free the buffers. 2347 */ 2348 finfo = (struct octnet_buf_free_info *)skb->cb; 2349 finfo->lio = lio; 2350 finfo->skb = skb; 2351 finfo->sc = NULL; 2352 2353 /* Prepare the attributes for the data to be passed to OSI. */ 2354 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 2355 2356 ndata.buf = (void *)finfo; 2357 2358 ndata.q_no = iq_no; 2359 2360 if (octnet_iq_is_full(oct, ndata.q_no)) { 2361 /* defer sending if queue is full */ 2362 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2363 ndata.q_no); 2364 stats->tx_iq_busy++; 2365 return NETDEV_TX_BUSY; 2366 } 2367 2368 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", 2369 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); 2370 */ 2371 2372 ndata.datasize = skb->len; 2373 2374 cmdsetup.u64 = 0; 2375 cmdsetup.s.iq_no = iq_no; 2376 2377 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2378 if (skb->encapsulation) { 2379 cmdsetup.s.tnl_csum = 1; 2380 stats->tx_vxlan++; 2381 } else { 2382 cmdsetup.s.transport_csum = 1; 2383 } 2384 } 2385 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2386 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2387 cmdsetup.s.timestamp = 1; 2388 } 2389 2390 if (skb_shinfo(skb)->nr_frags == 0) { 2391 cmdsetup.s.u.datasize = skb->len; 2392 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2393 2394 /* Offload checksum calculation for TCP/UDP packets */ 2395 dptr = dma_map_single(&oct->pci_dev->dev, 2396 skb->data, 2397 skb->len, 2398 DMA_TO_DEVICE); 2399 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 2400 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2401 __func__); 2402 stats->tx_dmamap_fail++; 2403 return NETDEV_TX_BUSY; 2404 } 2405 2406 if (OCTEON_CN23XX_PF(oct)) 2407 ndata.cmd.cmd3.dptr = dptr; 2408 else 2409 ndata.cmd.cmd2.dptr = dptr; 2410 finfo->dptr = dptr; 2411 ndata.reqtype = REQTYPE_NORESP_NET; 2412 2413 } else { 2414 int i, frags; 2415 skb_frag_t *frag; 2416 struct octnic_gather *g; 2417 2418 spin_lock(&lio->glist_lock[q_idx]); 2419 g = (struct octnic_gather *) 2420 lio_list_delete_head(&lio->glist[q_idx]); 2421 spin_unlock(&lio->glist_lock[q_idx]); 2422 2423 if (!g) { 2424 netif_info(lio, tx_err, lio->netdev, 2425 "Transmit scatter gather: glist null!\n"); 2426 goto lio_xmit_failed; 2427 } 2428 2429 cmdsetup.s.gather = 1; 2430 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2431 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2432 2433 memset(g->sg, 0, g->sg_size); 2434 2435 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 2436 skb->data, 2437 (skb->len - skb->data_len), 2438 DMA_TO_DEVICE); 2439 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 2440 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 2441 __func__); 2442 stats->tx_dmamap_fail++; 2443 return NETDEV_TX_BUSY; 2444 } 2445 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 2446 2447 frags = skb_shinfo(skb)->nr_frags; 2448 i = 1; 2449 while (frags--) { 2450 frag = &skb_shinfo(skb)->frags[i - 1]; 2451 2452 g->sg[(i >> 2)].ptr[(i & 3)] = 2453 skb_frag_dma_map(&oct->pci_dev->dev, 2454 frag, 0, skb_frag_size(frag), 2455 DMA_TO_DEVICE); 2456 2457 if (dma_mapping_error(&oct->pci_dev->dev, 2458 g->sg[i >> 2].ptr[i & 3])) { 2459 dma_unmap_single(&oct->pci_dev->dev, 2460 g->sg[0].ptr[0], 2461 skb->len - skb->data_len, 2462 DMA_TO_DEVICE); 2463 for (j = 1; j < i; j++) { 2464 frag = &skb_shinfo(skb)->frags[j - 1]; 2465 dma_unmap_page(&oct->pci_dev->dev, 2466 g->sg[j >> 2].ptr[j & 3], 2467 skb_frag_size(frag), 2468 DMA_TO_DEVICE); 2469 } 2470 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 2471 __func__); 2472 return NETDEV_TX_BUSY; 2473 } 2474 2475 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), 2476 (i & 3)); 2477 i++; 2478 } 2479 2480 dptr = g->sg_dma_ptr; 2481 2482 if (OCTEON_CN23XX_PF(oct)) 2483 ndata.cmd.cmd3.dptr = dptr; 2484 else 2485 ndata.cmd.cmd2.dptr = dptr; 2486 finfo->dptr = dptr; 2487 finfo->g = g; 2488 2489 ndata.reqtype = REQTYPE_NORESP_NET_SG; 2490 } 2491 2492 if (OCTEON_CN23XX_PF(oct)) { 2493 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 2494 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 2495 } else { 2496 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; 2497 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; 2498 } 2499 2500 if (skb_shinfo(skb)->gso_size) { 2501 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 2502 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 2503 stats->tx_gso++; 2504 } 2505 2506 /* HW insert VLAN tag */ 2507 if (skb_vlan_tag_present(skb)) { 2508 irh->priority = skb_vlan_tag_get(skb) >> 13; 2509 irh->vlan = skb_vlan_tag_get(skb) & 0xfff; 2510 } 2511 2512 xmit_more = netdev_xmit_more(); 2513 2514 if (unlikely(cmdsetup.s.timestamp)) 2515 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 2516 else 2517 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 2518 if (status == IQ_SEND_FAILED) 2519 goto lio_xmit_failed; 2520 2521 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2522 2523 if (status == IQ_SEND_STOP) 2524 netif_stop_subqueue(netdev, q_idx); 2525 2526 netif_trans_update(netdev); 2527 2528 if (tx_info->s.gso_segs) 2529 stats->tx_done += tx_info->s.gso_segs; 2530 else 2531 stats->tx_done++; 2532 stats->tx_tot_bytes += ndata.datasize; 2533 2534 return NETDEV_TX_OK; 2535 2536 lio_xmit_failed: 2537 stats->tx_dropped++; 2538 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2539 iq_no, stats->tx_dropped); 2540 if (dptr) 2541 dma_unmap_single(&oct->pci_dev->dev, dptr, 2542 ndata.datasize, DMA_TO_DEVICE); 2543 2544 octeon_ring_doorbell_locked(oct, iq_no); 2545 2546 tx_buffer_free(skb); 2547 return NETDEV_TX_OK; 2548 } 2549 2550 /** 2551 * liquidio_tx_timeout - Network device Tx timeout 2552 * @netdev: pointer to network device 2553 * @txqueue: index of the hung transmit queue 2554 */ 2555 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) 2556 { 2557 struct lio *lio; 2558 2559 lio = GET_LIO(netdev); 2560 2561 netif_info(lio, tx_err, lio->netdev, 2562 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 2563 netdev->stats.tx_dropped); 2564 netif_trans_update(netdev); 2565 wake_txqs(netdev); 2566 } 2567 2568 static int liquidio_vlan_rx_add_vid(struct net_device *netdev, 2569 __be16 proto __attribute__((unused)), 2570 u16 vid) 2571 { 2572 struct lio *lio = GET_LIO(netdev); 2573 struct octeon_device *oct = lio->oct_dev; 2574 struct octnic_ctrl_pkt nctrl; 2575 int ret = 0; 2576 2577 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2578 2579 nctrl.ncmd.u64 = 0; 2580 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2581 nctrl.ncmd.s.param1 = vid; 2582 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2583 nctrl.netpndev = (u64)netdev; 2584 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2585 2586 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2587 if (ret) { 2588 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2589 ret); 2590 if (ret > 0) 2591 ret = -EIO; 2592 } 2593 2594 return ret; 2595 } 2596 2597 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, 2598 __be16 proto __attribute__((unused)), 2599 u16 vid) 2600 { 2601 struct lio *lio = GET_LIO(netdev); 2602 struct octeon_device *oct = lio->oct_dev; 2603 struct octnic_ctrl_pkt nctrl; 2604 int ret = 0; 2605 2606 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2607 2608 nctrl.ncmd.u64 = 0; 2609 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2610 nctrl.ncmd.s.param1 = vid; 2611 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2612 nctrl.netpndev = (u64)netdev; 2613 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2614 2615 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2616 if (ret) { 2617 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", 2618 ret); 2619 if (ret > 0) 2620 ret = -EIO; 2621 } 2622 return ret; 2623 } 2624 2625 /** 2626 * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload 2627 * @netdev: pointer to network device 2628 * @command: OCTNET_CMD_TNL_RX_CSUM_CTL 2629 * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE 2630 * Returns: SUCCESS or FAILURE 2631 */ 2632 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 2633 u8 rx_cmd) 2634 { 2635 struct lio *lio = GET_LIO(netdev); 2636 struct octeon_device *oct = lio->oct_dev; 2637 struct octnic_ctrl_pkt nctrl; 2638 int ret = 0; 2639 2640 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2641 2642 nctrl.ncmd.u64 = 0; 2643 nctrl.ncmd.s.cmd = command; 2644 nctrl.ncmd.s.param1 = rx_cmd; 2645 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2646 nctrl.netpndev = (u64)netdev; 2647 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2648 2649 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2650 if (ret) { 2651 dev_err(&oct->pci_dev->dev, 2652 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", 2653 ret); 2654 if (ret > 0) 2655 ret = -EIO; 2656 } 2657 return ret; 2658 } 2659 2660 /** 2661 * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware 2662 * @netdev: pointer to network device 2663 * @command: OCTNET_CMD_VXLAN_PORT_CONFIG 2664 * @vxlan_port: VxLAN port to be added or deleted 2665 * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD, 2666 * OCTNET_CMD_VXLAN_PORT_DEL 2667 * Return: SUCCESS or FAILURE 2668 */ 2669 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 2670 u16 vxlan_port, u8 vxlan_cmd_bit) 2671 { 2672 struct lio *lio = GET_LIO(netdev); 2673 struct octeon_device *oct = lio->oct_dev; 2674 struct octnic_ctrl_pkt nctrl; 2675 int ret = 0; 2676 2677 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2678 2679 nctrl.ncmd.u64 = 0; 2680 nctrl.ncmd.s.cmd = command; 2681 nctrl.ncmd.s.more = vxlan_cmd_bit; 2682 nctrl.ncmd.s.param1 = vxlan_port; 2683 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2684 nctrl.netpndev = (u64)netdev; 2685 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2686 2687 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2688 if (ret) { 2689 dev_err(&oct->pci_dev->dev, 2690 "VxLAN port add/delete failed in core (ret:0x%x)\n", 2691 ret); 2692 if (ret > 0) 2693 ret = -EIO; 2694 } 2695 return ret; 2696 } 2697 2698 static int liquidio_udp_tunnel_set_port(struct net_device *netdev, 2699 unsigned int table, unsigned int entry, 2700 struct udp_tunnel_info *ti) 2701 { 2702 return liquidio_vxlan_port_command(netdev, 2703 OCTNET_CMD_VXLAN_PORT_CONFIG, 2704 htons(ti->port), 2705 OCTNET_CMD_VXLAN_PORT_ADD); 2706 } 2707 2708 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, 2709 unsigned int table, 2710 unsigned int entry, 2711 struct udp_tunnel_info *ti) 2712 { 2713 return liquidio_vxlan_port_command(netdev, 2714 OCTNET_CMD_VXLAN_PORT_CONFIG, 2715 htons(ti->port), 2716 OCTNET_CMD_VXLAN_PORT_DEL); 2717 } 2718 2719 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { 2720 .set_port = liquidio_udp_tunnel_set_port, 2721 .unset_port = liquidio_udp_tunnel_unset_port, 2722 .tables = { 2723 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 2724 }, 2725 }; 2726 2727 /** 2728 * liquidio_fix_features - Net device fix features 2729 * @netdev: pointer to network device 2730 * @request: features requested 2731 * Return: updated features list 2732 */ 2733 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 2734 netdev_features_t request) 2735 { 2736 struct lio *lio = netdev_priv(netdev); 2737 2738 if ((request & NETIF_F_RXCSUM) && 2739 !(lio->dev_capability & NETIF_F_RXCSUM)) 2740 request &= ~NETIF_F_RXCSUM; 2741 2742 if ((request & NETIF_F_HW_CSUM) && 2743 !(lio->dev_capability & NETIF_F_HW_CSUM)) 2744 request &= ~NETIF_F_HW_CSUM; 2745 2746 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 2747 request &= ~NETIF_F_TSO; 2748 2749 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 2750 request &= ~NETIF_F_TSO6; 2751 2752 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 2753 request &= ~NETIF_F_LRO; 2754 2755 /*Disable LRO if RXCSUM is off */ 2756 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 2757 (lio->dev_capability & NETIF_F_LRO)) 2758 request &= ~NETIF_F_LRO; 2759 2760 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) && 2761 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER)) 2762 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 2763 2764 return request; 2765 } 2766 2767 /** 2768 * liquidio_set_features - Net device set features 2769 * @netdev: pointer to network device 2770 * @features: features to enable/disable 2771 */ 2772 static int liquidio_set_features(struct net_device *netdev, 2773 netdev_features_t features) 2774 { 2775 struct lio *lio = netdev_priv(netdev); 2776 2777 if ((features & NETIF_F_LRO) && 2778 (lio->dev_capability & NETIF_F_LRO) && 2779 !(netdev->features & NETIF_F_LRO)) 2780 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2781 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2782 else if (!(features & NETIF_F_LRO) && 2783 (lio->dev_capability & NETIF_F_LRO) && 2784 (netdev->features & NETIF_F_LRO)) 2785 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 2786 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2787 2788 /* Sending command to firmware to enable/disable RX checksum 2789 * offload settings using ethtool 2790 */ 2791 if (!(netdev->features & NETIF_F_RXCSUM) && 2792 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2793 (features & NETIF_F_RXCSUM)) 2794 liquidio_set_rxcsum_command(netdev, 2795 OCTNET_CMD_TNL_RX_CSUM_CTL, 2796 OCTNET_CMD_RXCSUM_ENABLE); 2797 else if ((netdev->features & NETIF_F_RXCSUM) && 2798 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2799 !(features & NETIF_F_RXCSUM)) 2800 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2801 OCTNET_CMD_RXCSUM_DISABLE); 2802 2803 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && 2804 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 2805 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 2806 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 2807 OCTNET_CMD_VLAN_FILTER_ENABLE); 2808 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && 2809 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 2810 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 2811 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 2812 OCTNET_CMD_VLAN_FILTER_DISABLE); 2813 2814 return 0; 2815 } 2816 2817 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, 2818 u8 *mac, bool is_admin_assigned) 2819 { 2820 struct lio *lio = GET_LIO(netdev); 2821 struct octeon_device *oct = lio->oct_dev; 2822 struct octnic_ctrl_pkt nctrl; 2823 int ret = 0; 2824 2825 if (!is_valid_ether_addr(mac)) 2826 return -EINVAL; 2827 2828 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs) 2829 return -EINVAL; 2830 2831 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2832 2833 nctrl.ncmd.u64 = 0; 2834 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2835 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 2836 nctrl.ncmd.s.param1 = vfidx + 1; 2837 nctrl.ncmd.s.more = 1; 2838 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2839 nctrl.netpndev = (u64)netdev; 2840 if (is_admin_assigned) { 2841 nctrl.ncmd.s.param2 = true; 2842 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2843 } 2844 2845 nctrl.udd[0] = 0; 2846 /* The MAC Address is presented in network byte order. */ 2847 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac); 2848 2849 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; 2850 2851 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2852 if (ret > 0) 2853 ret = -EIO; 2854 2855 return ret; 2856 } 2857 2858 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) 2859 { 2860 struct lio *lio = GET_LIO(netdev); 2861 struct octeon_device *oct = lio->oct_dev; 2862 int retval; 2863 2864 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2865 return -EINVAL; 2866 2867 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true); 2868 if (!retval) 2869 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac); 2870 2871 return retval; 2872 } 2873 2874 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, 2875 bool enable) 2876 { 2877 struct lio *lio = GET_LIO(netdev); 2878 struct octeon_device *oct = lio->oct_dev; 2879 struct octnic_ctrl_pkt nctrl; 2880 int retval; 2881 2882 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) { 2883 netif_info(lio, drv, lio->netdev, 2884 "firmware does not support spoofchk\n"); 2885 return -EOPNOTSUPP; 2886 } 2887 2888 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { 2889 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); 2890 return -EINVAL; 2891 } 2892 2893 if (enable) { 2894 if (oct->sriov_info.vf_spoofchk[vfidx]) 2895 return 0; 2896 } else { 2897 /* Clear */ 2898 if (!oct->sriov_info.vf_spoofchk[vfidx]) 2899 return 0; 2900 } 2901 2902 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2903 nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1; 2904 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK; 2905 nctrl.ncmd.s.param1 = 2906 vfidx + 1; /* vfidx is 0 based, 2907 * but vf_num (param1) is 1 based 2908 */ 2909 nctrl.ncmd.s.param2 = enable; 2910 nctrl.ncmd.s.more = 0; 2911 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2912 nctrl.cb_fn = NULL; 2913 2914 retval = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2915 2916 if (retval) { 2917 netif_info(lio, drv, lio->netdev, 2918 "Failed to set VF %d spoofchk %s\n", vfidx, 2919 enable ? "on" : "off"); 2920 return -1; 2921 } 2922 2923 oct->sriov_info.vf_spoofchk[vfidx] = enable; 2924 netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx, 2925 enable ? "on" : "off"); 2926 2927 return 0; 2928 } 2929 2930 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, 2931 u16 vlan, u8 qos, __be16 vlan_proto) 2932 { 2933 struct lio *lio = GET_LIO(netdev); 2934 struct octeon_device *oct = lio->oct_dev; 2935 struct octnic_ctrl_pkt nctrl; 2936 u16 vlantci; 2937 int ret = 0; 2938 2939 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2940 return -EINVAL; 2941 2942 if (vlan_proto != htons(ETH_P_8021Q)) 2943 return -EPROTONOSUPPORT; 2944 2945 if (vlan >= VLAN_N_VID || qos > 7) 2946 return -EINVAL; 2947 2948 if (vlan) 2949 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT; 2950 else 2951 vlantci = 0; 2952 2953 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci) 2954 return 0; 2955 2956 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2957 2958 if (vlan) 2959 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2960 else 2961 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2962 2963 nctrl.ncmd.s.param1 = vlantci; 2964 nctrl.ncmd.s.param2 = 2965 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ 2966 nctrl.ncmd.s.more = 0; 2967 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2968 nctrl.cb_fn = NULL; 2969 2970 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2971 if (ret) { 2972 if (ret > 0) 2973 ret = -EIO; 2974 return ret; 2975 } 2976 2977 oct->sriov_info.vf_vlantci[vfidx] = vlantci; 2978 2979 return ret; 2980 } 2981 2982 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, 2983 struct ifla_vf_info *ivi) 2984 { 2985 struct lio *lio = GET_LIO(netdev); 2986 struct octeon_device *oct = lio->oct_dev; 2987 u8 *macaddr; 2988 2989 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2990 return -EINVAL; 2991 2992 memset(ivi, 0, sizeof(struct ifla_vf_info)); 2993 2994 ivi->vf = vfidx; 2995 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; 2996 ether_addr_copy(&ivi->mac[0], macaddr); 2997 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK; 2998 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT; 2999 if (oct->sriov_info.trusted_vf.active && 3000 oct->sriov_info.trusted_vf.id == vfidx) 3001 ivi->trusted = true; 3002 else 3003 ivi->trusted = false; 3004 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; 3005 ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx]; 3006 ivi->max_tx_rate = lio->linfo.link.s.speed; 3007 ivi->min_tx_rate = 0; 3008 3009 return 0; 3010 } 3011 3012 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) 3013 { 3014 struct octeon_device *oct = lio->oct_dev; 3015 struct octeon_soft_command *sc; 3016 int retval; 3017 3018 sc = octeon_alloc_soft_command(oct, 0, 16, 0); 3019 if (!sc) 3020 return -ENOMEM; 3021 3022 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 3023 3024 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3025 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 3026 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1, 3027 trusted); 3028 3029 init_completion(&sc->complete); 3030 sc->sc_status = OCTEON_REQUEST_PENDING; 3031 3032 retval = octeon_send_soft_command(oct, sc); 3033 if (retval == IQ_SEND_FAILED) { 3034 octeon_free_soft_command(oct, sc); 3035 retval = -1; 3036 } else { 3037 /* Wait for response or timeout */ 3038 retval = wait_for_sc_completion_timeout(oct, sc, 0); 3039 if (retval) 3040 return (retval); 3041 3042 WRITE_ONCE(sc->caller_is_done, true); 3043 } 3044 3045 return retval; 3046 } 3047 3048 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx, 3049 bool setting) 3050 { 3051 struct lio *lio = GET_LIO(netdev); 3052 struct octeon_device *oct = lio->oct_dev; 3053 3054 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) { 3055 /* trusted vf is not supported by firmware older than 1.7.1 */ 3056 return -EOPNOTSUPP; 3057 } 3058 3059 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { 3060 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); 3061 return -EINVAL; 3062 } 3063 3064 if (setting) { 3065 /* Set */ 3066 3067 if (oct->sriov_info.trusted_vf.active && 3068 oct->sriov_info.trusted_vf.id == vfidx) 3069 return 0; 3070 3071 if (oct->sriov_info.trusted_vf.active) { 3072 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n"); 3073 return -EPERM; 3074 } 3075 } else { 3076 /* Clear */ 3077 3078 if (!oct->sriov_info.trusted_vf.active) 3079 return 0; 3080 } 3081 3082 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) { 3083 if (setting) { 3084 oct->sriov_info.trusted_vf.id = vfidx; 3085 oct->sriov_info.trusted_vf.active = true; 3086 } else { 3087 oct->sriov_info.trusted_vf.active = false; 3088 } 3089 3090 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx, 3091 setting ? "" : "not "); 3092 } else { 3093 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n"); 3094 return -1; 3095 } 3096 3097 return 0; 3098 } 3099 3100 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 3101 int linkstate) 3102 { 3103 struct lio *lio = GET_LIO(netdev); 3104 struct octeon_device *oct = lio->oct_dev; 3105 struct octnic_ctrl_pkt nctrl; 3106 int ret = 0; 3107 3108 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3109 return -EINVAL; 3110 3111 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate) 3112 return 0; 3113 3114 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3115 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE; 3116 nctrl.ncmd.s.param1 = 3117 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3118 nctrl.ncmd.s.param2 = linkstate; 3119 nctrl.ncmd.s.more = 0; 3120 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3121 nctrl.cb_fn = NULL; 3122 3123 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 3124 3125 if (!ret) 3126 oct->sriov_info.vf_linkstate[vfidx] = linkstate; 3127 else if (ret > 0) 3128 ret = -EIO; 3129 3130 return ret; 3131 } 3132 3133 static int 3134 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode) 3135 { 3136 struct lio_devlink_priv *priv; 3137 struct octeon_device *oct; 3138 3139 priv = devlink_priv(devlink); 3140 oct = priv->oct; 3141 3142 *mode = oct->eswitch_mode; 3143 3144 return 0; 3145 } 3146 3147 static int 3148 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode, 3149 struct netlink_ext_ack *extack) 3150 { 3151 struct lio_devlink_priv *priv; 3152 struct octeon_device *oct; 3153 int ret = 0; 3154 3155 priv = devlink_priv(devlink); 3156 oct = priv->oct; 3157 3158 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)) 3159 return -EINVAL; 3160 3161 if (oct->eswitch_mode == mode) 3162 return 0; 3163 3164 switch (mode) { 3165 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 3166 oct->eswitch_mode = mode; 3167 ret = lio_vf_rep_create(oct); 3168 break; 3169 3170 case DEVLINK_ESWITCH_MODE_LEGACY: 3171 lio_vf_rep_destroy(oct); 3172 oct->eswitch_mode = mode; 3173 break; 3174 3175 default: 3176 ret = -EINVAL; 3177 } 3178 3179 return ret; 3180 } 3181 3182 static const struct devlink_ops liquidio_devlink_ops = { 3183 .eswitch_mode_get = liquidio_eswitch_mode_get, 3184 .eswitch_mode_set = liquidio_eswitch_mode_set, 3185 }; 3186 3187 static int 3188 liquidio_get_port_parent_id(struct net_device *dev, 3189 struct netdev_phys_item_id *ppid) 3190 { 3191 struct lio *lio = GET_LIO(dev); 3192 struct octeon_device *oct = lio->oct_dev; 3193 3194 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 3195 return -EOPNOTSUPP; 3196 3197 ppid->id_len = ETH_ALEN; 3198 ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2); 3199 3200 return 0; 3201 } 3202 3203 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx, 3204 struct ifla_vf_stats *vf_stats) 3205 { 3206 struct lio *lio = GET_LIO(netdev); 3207 struct octeon_device *oct = lio->oct_dev; 3208 struct oct_vf_stats stats; 3209 int ret; 3210 3211 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3212 return -EINVAL; 3213 3214 memset(&stats, 0, sizeof(struct oct_vf_stats)); 3215 ret = cn23xx_get_vf_stats(oct, vfidx, &stats); 3216 if (!ret) { 3217 vf_stats->rx_packets = stats.rx_packets; 3218 vf_stats->tx_packets = stats.tx_packets; 3219 vf_stats->rx_bytes = stats.rx_bytes; 3220 vf_stats->tx_bytes = stats.tx_bytes; 3221 vf_stats->broadcast = stats.broadcast; 3222 vf_stats->multicast = stats.multicast; 3223 } 3224 3225 return ret; 3226 } 3227 3228 static const struct net_device_ops lionetdevops = { 3229 .ndo_open = liquidio_open, 3230 .ndo_stop = liquidio_stop, 3231 .ndo_start_xmit = liquidio_xmit, 3232 .ndo_get_stats64 = liquidio_get_stats64, 3233 .ndo_set_mac_address = liquidio_set_mac, 3234 .ndo_set_rx_mode = liquidio_set_mcast_list, 3235 .ndo_tx_timeout = liquidio_tx_timeout, 3236 3237 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 3238 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 3239 .ndo_change_mtu = liquidio_change_mtu, 3240 .ndo_eth_ioctl = liquidio_ioctl, 3241 .ndo_fix_features = liquidio_fix_features, 3242 .ndo_set_features = liquidio_set_features, 3243 .ndo_set_vf_mac = liquidio_set_vf_mac, 3244 .ndo_set_vf_vlan = liquidio_set_vf_vlan, 3245 .ndo_get_vf_config = liquidio_get_vf_config, 3246 .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk, 3247 .ndo_set_vf_trust = liquidio_set_vf_trust, 3248 .ndo_set_vf_link_state = liquidio_set_vf_link_state, 3249 .ndo_get_vf_stats = liquidio_get_vf_stats, 3250 .ndo_get_port_parent_id = liquidio_get_port_parent_id, 3251 }; 3252 3253 /** 3254 * liquidio_init - Entry point for the liquidio module 3255 */ 3256 static int __init liquidio_init(void) 3257 { 3258 int i; 3259 struct handshake *hs; 3260 3261 init_completion(&first_stage); 3262 3263 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT); 3264 3265 if (liquidio_init_pci()) 3266 return -EINVAL; 3267 3268 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000)); 3269 3270 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3271 hs = &handshake[i]; 3272 if (hs->pci_dev) { 3273 wait_for_completion(&hs->init); 3274 if (!hs->init_ok) { 3275 /* init handshake failed */ 3276 dev_err(&hs->pci_dev->dev, 3277 "Failed to init device\n"); 3278 liquidio_deinit_pci(); 3279 return -EIO; 3280 } 3281 } 3282 } 3283 3284 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3285 hs = &handshake[i]; 3286 if (hs->pci_dev) { 3287 wait_for_completion_timeout(&hs->started, 3288 msecs_to_jiffies(30000)); 3289 if (!hs->started_ok) { 3290 /* starter handshake failed */ 3291 dev_err(&hs->pci_dev->dev, 3292 "Firmware failed to start\n"); 3293 liquidio_deinit_pci(); 3294 return -EIO; 3295 } 3296 } 3297 } 3298 3299 return 0; 3300 } 3301 3302 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 3303 { 3304 struct octeon_device *oct = (struct octeon_device *)buf; 3305 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3306 int gmxport = 0; 3307 union oct_link_status *ls; 3308 int i; 3309 3310 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 3311 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 3312 recv_pkt->buffer_size[0], 3313 recv_pkt->rh.r_nic_info.gmxport); 3314 goto nic_info_err; 3315 } 3316 3317 gmxport = recv_pkt->rh.r_nic_info.gmxport; 3318 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 3319 OCT_DROQ_INFO_SIZE); 3320 3321 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 3322 for (i = 0; i < oct->ifcount; i++) { 3323 if (oct->props[i].gmxport == gmxport) { 3324 update_link_status(oct->props[i].netdev, ls); 3325 break; 3326 } 3327 } 3328 3329 nic_info_err: 3330 for (i = 0; i < recv_pkt->buffer_count; i++) 3331 recv_buffer_free(recv_pkt->buffer_ptr[i]); 3332 octeon_free_recv_info(recv_info); 3333 return 0; 3334 } 3335 3336 /** 3337 * setup_nic_devices - Setup network interfaces 3338 * @octeon_dev: octeon device 3339 * 3340 * Called during init time for each device. It assumes the NIC 3341 * is already up and running. The link information for each 3342 * interface is passed in link_info. 3343 */ 3344 static int setup_nic_devices(struct octeon_device *octeon_dev) 3345 { 3346 struct lio *lio = NULL; 3347 struct net_device *netdev; 3348 u8 mac[6], i, j, *fw_ver, *micro_ver; 3349 unsigned long micro; 3350 u32 cur_ver; 3351 struct octeon_soft_command *sc; 3352 struct liquidio_if_cfg_resp *resp; 3353 struct octdev_props *props; 3354 int retval, num_iqueues, num_oqueues; 3355 int max_num_queues = 0; 3356 union oct_nic_if_cfg if_cfg; 3357 unsigned int base_queue; 3358 unsigned int gmx_port_id; 3359 u32 resp_size, data_size; 3360 u32 ifidx_or_pfnum; 3361 struct lio_version *vdata; 3362 struct devlink *devlink; 3363 struct lio_devlink_priv *lio_devlink; 3364 3365 /* This is to handle link status changes */ 3366 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3367 OPCODE_NIC_INFO, 3368 lio_nic_info, octeon_dev); 3369 3370 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 3371 * They are handled directly. 3372 */ 3373 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 3374 free_netbuf); 3375 3376 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 3377 free_netsgbuf); 3378 3379 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 3380 free_netsgbuf_with_resp); 3381 3382 for (i = 0; i < octeon_dev->ifcount; i++) { 3383 resp_size = sizeof(struct liquidio_if_cfg_resp); 3384 data_size = sizeof(struct lio_version); 3385 sc = (struct octeon_soft_command *) 3386 octeon_alloc_soft_command(octeon_dev, data_size, 3387 resp_size, 0); 3388 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 3389 vdata = (struct lio_version *)sc->virtdptr; 3390 3391 *((u64 *)vdata) = 0; 3392 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 3393 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 3394 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 3395 3396 if (OCTEON_CN23XX_PF(octeon_dev)) { 3397 num_iqueues = octeon_dev->sriov_info.num_pf_rings; 3398 num_oqueues = octeon_dev->sriov_info.num_pf_rings; 3399 base_queue = octeon_dev->sriov_info.pf_srn; 3400 3401 gmx_port_id = octeon_dev->pf_num; 3402 ifidx_or_pfnum = octeon_dev->pf_num; 3403 } else { 3404 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF( 3405 octeon_get_conf(octeon_dev), i); 3406 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF( 3407 octeon_get_conf(octeon_dev), i); 3408 base_queue = CFG_GET_BASE_QUE_NIC_IF( 3409 octeon_get_conf(octeon_dev), i); 3410 gmx_port_id = CFG_GET_GMXID_NIC_IF( 3411 octeon_get_conf(octeon_dev), i); 3412 ifidx_or_pfnum = i; 3413 } 3414 3415 dev_dbg(&octeon_dev->pci_dev->dev, 3416 "requesting config for interface %d, iqs %d, oqs %d\n", 3417 ifidx_or_pfnum, num_iqueues, num_oqueues); 3418 3419 if_cfg.u64 = 0; 3420 if_cfg.s.num_iqueues = num_iqueues; 3421 if_cfg.s.num_oqueues = num_oqueues; 3422 if_cfg.s.base_queue = base_queue; 3423 if_cfg.s.gmx_port_id = gmx_port_id; 3424 3425 sc->iq_no = 0; 3426 3427 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 3428 OPCODE_NIC_IF_CFG, 0, 3429 if_cfg.u64, 0); 3430 3431 init_completion(&sc->complete); 3432 sc->sc_status = OCTEON_REQUEST_PENDING; 3433 3434 retval = octeon_send_soft_command(octeon_dev, sc); 3435 if (retval == IQ_SEND_FAILED) { 3436 dev_err(&octeon_dev->pci_dev->dev, 3437 "iq/oq config failed status: %x\n", 3438 retval); 3439 /* Soft instr is freed by driver in case of failure. */ 3440 octeon_free_soft_command(octeon_dev, sc); 3441 return(-EIO); 3442 } 3443 3444 /* Sleep on a wait queue till the cond flag indicates that the 3445 * response arrived or timed-out. 3446 */ 3447 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); 3448 if (retval) 3449 return retval; 3450 3451 retval = resp->status; 3452 if (retval) { 3453 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 3454 WRITE_ONCE(sc->caller_is_done, true); 3455 goto setup_nic_dev_done; 3456 } 3457 snprintf(octeon_dev->fw_info.liquidio_firmware_version, 3458 32, "%s", 3459 resp->cfg_info.liquidio_firmware_version); 3460 3461 /* Verify f/w version (in case of 'auto' loading from flash) */ 3462 fw_ver = octeon_dev->fw_info.liquidio_firmware_version; 3463 if (memcmp(LIQUIDIO_BASE_VERSION, 3464 fw_ver, 3465 strlen(LIQUIDIO_BASE_VERSION))) { 3466 dev_err(&octeon_dev->pci_dev->dev, 3467 "Unmatched firmware version. Expected %s.x, got %s.\n", 3468 LIQUIDIO_BASE_VERSION, fw_ver); 3469 WRITE_ONCE(sc->caller_is_done, true); 3470 goto setup_nic_dev_done; 3471 } else if (atomic_read(octeon_dev->adapter_fw_state) == 3472 FW_IS_PRELOADED) { 3473 dev_info(&octeon_dev->pci_dev->dev, 3474 "Using auto-loaded firmware version %s.\n", 3475 fw_ver); 3476 } 3477 3478 /* extract micro version field; point past '<maj>.<min>.' */ 3479 micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1; 3480 if (kstrtoul(micro_ver, 10, µ) != 0) 3481 micro = 0; 3482 octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION; 3483 octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION; 3484 octeon_dev->fw_info.ver.rev = micro; 3485 3486 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 3487 (sizeof(struct liquidio_if_cfg_info)) >> 3); 3488 3489 num_iqueues = hweight64(resp->cfg_info.iqmask); 3490 num_oqueues = hweight64(resp->cfg_info.oqmask); 3491 3492 if (!(num_iqueues) || !(num_oqueues)) { 3493 dev_err(&octeon_dev->pci_dev->dev, 3494 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 3495 resp->cfg_info.iqmask, 3496 resp->cfg_info.oqmask); 3497 WRITE_ONCE(sc->caller_is_done, true); 3498 goto setup_nic_dev_done; 3499 } 3500 3501 if (OCTEON_CN6XXX(octeon_dev)) { 3502 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, 3503 cn6xxx)); 3504 } else if (OCTEON_CN23XX_PF(octeon_dev)) { 3505 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, 3506 cn23xx_pf)); 3507 } 3508 3509 dev_dbg(&octeon_dev->pci_dev->dev, 3510 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n", 3511 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 3512 num_iqueues, num_oqueues, max_num_queues); 3513 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues); 3514 3515 if (!netdev) { 3516 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 3517 WRITE_ONCE(sc->caller_is_done, true); 3518 goto setup_nic_dev_done; 3519 } 3520 3521 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 3522 3523 /* Associate the routines that will handle different 3524 * netdev tasks. 3525 */ 3526 netdev->netdev_ops = &lionetdevops; 3527 3528 retval = netif_set_real_num_rx_queues(netdev, num_oqueues); 3529 if (retval) { 3530 dev_err(&octeon_dev->pci_dev->dev, 3531 "setting real number rx failed\n"); 3532 WRITE_ONCE(sc->caller_is_done, true); 3533 goto setup_nic_dev_free; 3534 } 3535 3536 retval = netif_set_real_num_tx_queues(netdev, num_iqueues); 3537 if (retval) { 3538 dev_err(&octeon_dev->pci_dev->dev, 3539 "setting real number tx failed\n"); 3540 WRITE_ONCE(sc->caller_is_done, true); 3541 goto setup_nic_dev_free; 3542 } 3543 3544 lio = GET_LIO(netdev); 3545 3546 memset(lio, 0, sizeof(struct lio)); 3547 3548 lio->ifidx = ifidx_or_pfnum; 3549 3550 props = &octeon_dev->props[i]; 3551 props->gmxport = resp->cfg_info.linfo.gmxport; 3552 props->netdev = netdev; 3553 3554 lio->linfo.num_rxpciq = num_oqueues; 3555 lio->linfo.num_txpciq = num_iqueues; 3556 for (j = 0; j < num_oqueues; j++) { 3557 lio->linfo.rxpciq[j].u64 = 3558 resp->cfg_info.linfo.rxpciq[j].u64; 3559 } 3560 for (j = 0; j < num_iqueues; j++) { 3561 lio->linfo.txpciq[j].u64 = 3562 resp->cfg_info.linfo.txpciq[j].u64; 3563 } 3564 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 3565 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 3566 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 3567 3568 WRITE_ONCE(sc->caller_is_done, true); 3569 3570 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3571 3572 if (OCTEON_CN23XX_PF(octeon_dev) || 3573 OCTEON_CN6XXX(octeon_dev)) { 3574 lio->dev_capability = NETIF_F_HIGHDMA 3575 | NETIF_F_IP_CSUM 3576 | NETIF_F_IPV6_CSUM 3577 | NETIF_F_SG | NETIF_F_RXCSUM 3578 | NETIF_F_GRO 3579 | NETIF_F_TSO | NETIF_F_TSO6 3580 | NETIF_F_LRO; 3581 } 3582 netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 3583 3584 /* Copy of transmit encapsulation capabilities: 3585 * TSO, TSO6, Checksums for this device 3586 */ 3587 lio->enc_dev_capability = NETIF_F_IP_CSUM 3588 | NETIF_F_IPV6_CSUM 3589 | NETIF_F_GSO_UDP_TUNNEL 3590 | NETIF_F_HW_CSUM | NETIF_F_SG 3591 | NETIF_F_RXCSUM 3592 | NETIF_F_TSO | NETIF_F_TSO6 3593 | NETIF_F_LRO; 3594 3595 netdev->hw_enc_features = (lio->enc_dev_capability & 3596 ~NETIF_F_LRO); 3597 3598 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; 3599 3600 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; 3601 3602 netdev->vlan_features = lio->dev_capability; 3603 /* Add any unchangeable hw features */ 3604 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 3605 NETIF_F_HW_VLAN_CTAG_RX | 3606 NETIF_F_HW_VLAN_CTAG_TX; 3607 3608 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 3609 3610 netdev->hw_features = lio->dev_capability; 3611 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ 3612 netdev->hw_features = netdev->hw_features & 3613 ~NETIF_F_HW_VLAN_CTAG_RX; 3614 3615 /* MTU range: 68 - 16000 */ 3616 netdev->min_mtu = LIO_MIN_MTU_SIZE; 3617 netdev->max_mtu = LIO_MAX_MTU_SIZE; 3618 3619 /* Point to the properties for octeon device to which this 3620 * interface belongs. 3621 */ 3622 lio->oct_dev = octeon_dev; 3623 lio->octprops = props; 3624 lio->netdev = netdev; 3625 3626 dev_dbg(&octeon_dev->pci_dev->dev, 3627 "if%d gmx: %d hw_addr: 0x%llx\n", i, 3628 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 3629 3630 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { 3631 u8 vfmac[ETH_ALEN]; 3632 3633 eth_random_addr(vfmac); 3634 if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) { 3635 dev_err(&octeon_dev->pci_dev->dev, 3636 "Error setting VF%d MAC address\n", 3637 j); 3638 goto setup_nic_dev_free; 3639 } 3640 } 3641 3642 /* 64-bit swap required on LE machines */ 3643 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 3644 for (j = 0; j < 6; j++) 3645 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 3646 3647 /* Copy MAC Address to OS network device structure */ 3648 3649 eth_hw_addr_set(netdev, mac); 3650 3651 /* By default all interfaces on a single Octeon uses the same 3652 * tx and rx queues 3653 */ 3654 lio->txq = lio->linfo.txpciq[0].s.q_no; 3655 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 3656 if (liquidio_setup_io_queues(octeon_dev, i, 3657 lio->linfo.num_txpciq, 3658 lio->linfo.num_rxpciq)) { 3659 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 3660 goto setup_nic_dev_free; 3661 } 3662 3663 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 3664 3665 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 3666 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 3667 3668 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 3669 dev_err(&octeon_dev->pci_dev->dev, 3670 "Gather list allocation failed\n"); 3671 goto setup_nic_dev_free; 3672 } 3673 3674 /* Register ethtool support */ 3675 liquidio_set_ethtool_ops(netdev); 3676 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID) 3677 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 3678 else 3679 octeon_dev->priv_flags = 0x0; 3680 3681 if (netdev->features & NETIF_F_LRO) 3682 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 3683 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3684 3685 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 3686 OCTNET_CMD_VLAN_FILTER_ENABLE); 3687 3688 if ((debug != -1) && (debug & NETIF_MSG_HW)) 3689 liquidio_set_feature(netdev, 3690 OCTNET_CMD_VERBOSE_ENABLE, 0); 3691 3692 if (setup_link_status_change_wq(netdev)) 3693 goto setup_nic_dev_free; 3694 3695 if ((octeon_dev->fw_info.app_cap_flags & 3696 LIQUIDIO_TIME_SYNC_CAP) && 3697 setup_sync_octeon_time_wq(netdev)) 3698 goto setup_nic_dev_free; 3699 3700 if (setup_rx_oom_poll_fn(netdev)) 3701 goto setup_nic_dev_free; 3702 3703 /* Register the network device with the OS */ 3704 if (register_netdev(netdev)) { 3705 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 3706 goto setup_nic_dev_free; 3707 } 3708 3709 dev_dbg(&octeon_dev->pci_dev->dev, 3710 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3711 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3712 netif_carrier_off(netdev); 3713 lio->link_changes++; 3714 3715 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3716 3717 /* Sending command to firmware to enable Rx checksum offload 3718 * by default at the time of setup of Liquidio driver for 3719 * this device 3720 */ 3721 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 3722 OCTNET_CMD_RXCSUM_ENABLE); 3723 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 3724 OCTNET_CMD_TXCSUM_ENABLE); 3725 3726 dev_dbg(&octeon_dev->pci_dev->dev, 3727 "NIC ifidx:%d Setup successful\n", i); 3728 3729 if (octeon_dev->subsystem_id == 3730 OCTEON_CN2350_25GB_SUBSYS_ID || 3731 octeon_dev->subsystem_id == 3732 OCTEON_CN2360_25GB_SUBSYS_ID) { 3733 cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj, 3734 octeon_dev->fw_info.ver.min, 3735 octeon_dev->fw_info.ver.rev); 3736 3737 /* speed control unsupported in f/w older than 1.7.2 */ 3738 if (cur_ver < OCT_FW_VER(1, 7, 2)) { 3739 dev_info(&octeon_dev->pci_dev->dev, 3740 "speed setting not supported by f/w."); 3741 octeon_dev->speed_setting = 25; 3742 octeon_dev->no_speed_setting = 1; 3743 } else { 3744 liquidio_get_speed(lio); 3745 } 3746 3747 if (octeon_dev->speed_setting == 0) { 3748 octeon_dev->speed_setting = 25; 3749 octeon_dev->no_speed_setting = 1; 3750 } 3751 } else { 3752 octeon_dev->no_speed_setting = 1; 3753 octeon_dev->speed_setting = 10; 3754 } 3755 octeon_dev->speed_boot = octeon_dev->speed_setting; 3756 3757 /* don't read FEC setting if unsupported by f/w (see above) */ 3758 if (octeon_dev->speed_boot == 25 && 3759 !octeon_dev->no_speed_setting) { 3760 liquidio_get_fec(lio); 3761 octeon_dev->props[lio->ifidx].fec_boot = 3762 octeon_dev->props[lio->ifidx].fec; 3763 } 3764 } 3765 3766 device_lock(&octeon_dev->pci_dev->dev); 3767 devlink = devlink_alloc(&liquidio_devlink_ops, 3768 sizeof(struct lio_devlink_priv), 3769 &octeon_dev->pci_dev->dev); 3770 if (!devlink) { 3771 device_unlock(&octeon_dev->pci_dev->dev); 3772 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); 3773 goto setup_nic_dev_free; 3774 } 3775 3776 lio_devlink = devlink_priv(devlink); 3777 lio_devlink->oct = octeon_dev; 3778 3779 octeon_dev->devlink = devlink; 3780 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; 3781 devlink_register(devlink); 3782 device_unlock(&octeon_dev->pci_dev->dev); 3783 3784 return 0; 3785 3786 setup_nic_dev_free: 3787 3788 while (i--) { 3789 dev_err(&octeon_dev->pci_dev->dev, 3790 "NIC ifidx:%d Setup failed\n", i); 3791 liquidio_destroy_nic_device(octeon_dev, i); 3792 } 3793 3794 setup_nic_dev_done: 3795 3796 return -ENODEV; 3797 } 3798 3799 #ifdef CONFIG_PCI_IOV 3800 static int octeon_enable_sriov(struct octeon_device *oct) 3801 { 3802 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced; 3803 struct pci_dev *vfdev; 3804 int err; 3805 u32 u; 3806 3807 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) { 3808 err = pci_enable_sriov(oct->pci_dev, 3809 oct->sriov_info.num_vfs_alloced); 3810 if (err) { 3811 dev_err(&oct->pci_dev->dev, 3812 "OCTEON: Failed to enable PCI sriov: %d\n", 3813 err); 3814 oct->sriov_info.num_vfs_alloced = 0; 3815 return err; 3816 } 3817 oct->sriov_info.sriov_enabled = 1; 3818 3819 /* init lookup table that maps DPI ring number to VF pci_dev 3820 * struct pointer 3821 */ 3822 u = 0; 3823 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3824 OCTEON_CN23XX_VF_VID, NULL); 3825 while (vfdev) { 3826 if (vfdev->is_virtfn && 3827 (vfdev->physfn == oct->pci_dev)) { 3828 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = 3829 vfdev; 3830 u += oct->sriov_info.rings_per_vf; 3831 } 3832 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3833 OCTEON_CN23XX_VF_VID, vfdev); 3834 } 3835 } 3836 3837 return num_vfs_alloced; 3838 } 3839 3840 static int lio_pci_sriov_disable(struct octeon_device *oct) 3841 { 3842 int u; 3843 3844 if (pci_vfs_assigned(oct->pci_dev)) { 3845 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n"); 3846 return -EPERM; 3847 } 3848 3849 pci_disable_sriov(oct->pci_dev); 3850 3851 u = 0; 3852 while (u < MAX_POSSIBLE_VFS) { 3853 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL; 3854 u += oct->sriov_info.rings_per_vf; 3855 } 3856 3857 oct->sriov_info.num_vfs_alloced = 0; 3858 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n", 3859 oct->pf_num); 3860 3861 return 0; 3862 } 3863 3864 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) 3865 { 3866 struct octeon_device *oct = pci_get_drvdata(dev); 3867 int ret = 0; 3868 3869 if ((num_vfs == oct->sriov_info.num_vfs_alloced) && 3870 (oct->sriov_info.sriov_enabled)) { 3871 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n", 3872 oct->pf_num, num_vfs); 3873 return 0; 3874 } 3875 3876 if (!num_vfs) { 3877 lio_vf_rep_destroy(oct); 3878 ret = lio_pci_sriov_disable(oct); 3879 } else if (num_vfs > oct->sriov_info.max_vfs) { 3880 dev_err(&oct->pci_dev->dev, 3881 "OCTEON: Max allowed VFs:%d user requested:%d", 3882 oct->sriov_info.max_vfs, num_vfs); 3883 ret = -EPERM; 3884 } else { 3885 oct->sriov_info.num_vfs_alloced = num_vfs; 3886 ret = octeon_enable_sriov(oct); 3887 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n", 3888 oct->pf_num, num_vfs); 3889 ret = lio_vf_rep_create(oct); 3890 if (ret) 3891 dev_info(&oct->pci_dev->dev, 3892 "vf representor create failed"); 3893 } 3894 3895 return ret; 3896 } 3897 #endif 3898 3899 /** 3900 * liquidio_init_nic_module - initialize the NIC 3901 * @oct: octeon device 3902 * 3903 * This initialization routine is called once the Octeon device application is 3904 * up and running 3905 */ 3906 static int liquidio_init_nic_module(struct octeon_device *oct) 3907 { 3908 int i, retval = 0; 3909 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); 3910 3911 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3912 3913 /* only default iq and oq were initialized 3914 * initialize the rest as well 3915 */ 3916 /* run port_config command for each port */ 3917 oct->ifcount = num_nic_ports; 3918 3919 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); 3920 3921 for (i = 0; i < MAX_OCTEON_LINKS; i++) 3922 oct->props[i].gmxport = -1; 3923 3924 retval = setup_nic_devices(oct); 3925 if (retval) { 3926 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3927 goto octnet_init_failure; 3928 } 3929 3930 /* Call vf_rep_modinit if the firmware is switchdev capable 3931 * and do it from the first liquidio function probed. 3932 */ 3933 if (!oct->octeon_id && 3934 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) { 3935 retval = lio_vf_rep_modinit(); 3936 if (retval) { 3937 liquidio_stop_nic_module(oct); 3938 goto octnet_init_failure; 3939 } 3940 } 3941 3942 liquidio_ptp_init(oct); 3943 3944 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3945 3946 return retval; 3947 3948 octnet_init_failure: 3949 3950 oct->ifcount = 0; 3951 3952 return retval; 3953 } 3954 3955 /** 3956 * nic_starter - finish init 3957 * @work: work struct work_struct 3958 * 3959 * starter callback that invokes the remaining initialization work after the NIC is up and running. 3960 */ 3961 static void nic_starter(struct work_struct *work) 3962 { 3963 struct octeon_device *oct; 3964 struct cavium_wk *wk = (struct cavium_wk *)work; 3965 3966 oct = (struct octeon_device *)wk->ctxptr; 3967 3968 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) 3969 return; 3970 3971 /* If the status of the device is CORE_OK, the core 3972 * application has reported its application type. Call 3973 * any registered handlers now and move to the RUNNING 3974 * state. 3975 */ 3976 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) { 3977 schedule_delayed_work(&oct->nic_poll_work.work, 3978 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 3979 return; 3980 } 3981 3982 atomic_set(&oct->status, OCT_DEV_RUNNING); 3983 3984 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { 3985 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n"); 3986 3987 if (liquidio_init_nic_module(oct)) 3988 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n"); 3989 else 3990 handshake[oct->octeon_id].started_ok = 1; 3991 } else { 3992 dev_err(&oct->pci_dev->dev, 3993 "Unexpected application running on NIC (%d). Check firmware.\n", 3994 oct->app_mode); 3995 } 3996 3997 complete(&handshake[oct->octeon_id].started); 3998 } 3999 4000 static int 4001 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) 4002 { 4003 struct octeon_device *oct = (struct octeon_device *)buf; 4004 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 4005 int i, notice, vf_idx; 4006 bool cores_crashed; 4007 u64 *data, vf_num; 4008 4009 notice = recv_pkt->rh.r.ossp; 4010 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE); 4011 4012 /* the first 64-bit word of data is the vf_num */ 4013 vf_num = data[0]; 4014 octeon_swap_8B_data(&vf_num, 1); 4015 vf_idx = (int)vf_num - 1; 4016 4017 cores_crashed = READ_ONCE(oct->cores_crashed); 4018 4019 if (notice == VF_DRV_LOADED) { 4020 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) { 4021 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx); 4022 dev_info(&oct->pci_dev->dev, 4023 "driver for VF%d was loaded\n", vf_idx); 4024 if (!cores_crashed) 4025 try_module_get(THIS_MODULE); 4026 } 4027 } else if (notice == VF_DRV_REMOVED) { 4028 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) { 4029 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx); 4030 dev_info(&oct->pci_dev->dev, 4031 "driver for VF%d was removed\n", vf_idx); 4032 if (!cores_crashed) 4033 module_put(THIS_MODULE); 4034 } 4035 } else if (notice == VF_DRV_MACADDR_CHANGED) { 4036 u8 *b = (u8 *)&data[1]; 4037 4038 oct->sriov_info.vf_macaddr[vf_idx] = data[1]; 4039 dev_info(&oct->pci_dev->dev, 4040 "VF driver changed VF%d's MAC address to %pM\n", 4041 vf_idx, b + 2); 4042 } 4043 4044 for (i = 0; i < recv_pkt->buffer_count; i++) 4045 recv_buffer_free(recv_pkt->buffer_ptr[i]); 4046 octeon_free_recv_info(recv_info); 4047 4048 return 0; 4049 } 4050 4051 /** 4052 * octeon_device_init - Device initialization for each Octeon device that is probed 4053 * @octeon_dev: octeon device 4054 */ 4055 static int octeon_device_init(struct octeon_device *octeon_dev) 4056 { 4057 int j, ret; 4058 char bootcmd[] = "\n"; 4059 char *dbg_enb = NULL; 4060 enum lio_fw_state fw_state; 4061 struct octeon_device_priv *oct_priv = 4062 (struct octeon_device_priv *)octeon_dev->priv; 4063 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); 4064 4065 /* Enable access to the octeon device and make its DMA capability 4066 * known to the OS. 4067 */ 4068 if (octeon_pci_os_setup(octeon_dev)) 4069 return 1; 4070 4071 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE); 4072 4073 /* Identify the Octeon type and map the BAR address space. */ 4074 if (octeon_chip_specific_setup(octeon_dev)) { 4075 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); 4076 return 1; 4077 } 4078 4079 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE); 4080 4081 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', 4082 * since that is what is required for the reference to be removed 4083 * during de-initialization (see 'octeon_destroy_resources'). 4084 */ 4085 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number, 4086 PCI_SLOT(octeon_dev->pci_dev->devfn), 4087 PCI_FUNC(octeon_dev->pci_dev->devfn), 4088 true); 4089 4090 octeon_dev->app_mode = CVM_DRV_INVALID_APP; 4091 4092 /* CN23XX supports preloaded firmware if the following is true: 4093 * 4094 * The adapter indicates that firmware is currently running AND 4095 * 'fw_type' is 'auto'. 4096 * 4097 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate). 4098 */ 4099 if (OCTEON_CN23XX_PF(octeon_dev) && 4100 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) { 4101 atomic_cmpxchg(octeon_dev->adapter_fw_state, 4102 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED); 4103 } 4104 4105 /* If loading firmware, only first device of adapter needs to do so. */ 4106 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state, 4107 FW_NEEDS_TO_BE_LOADED, 4108 FW_IS_BEING_LOADED); 4109 4110 /* Here, [local variable] 'fw_state' is set to one of: 4111 * 4112 * FW_IS_PRELOADED: No firmware is to be loaded (see above) 4113 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load 4114 * firmware to the adapter. 4115 * FW_IS_BEING_LOADED: The driver's second instance will not load 4116 * firmware to the adapter. 4117 */ 4118 4119 /* Prior to f/w load, perform a soft reset of the Octeon device; 4120 * if error resetting, return w/error. 4121 */ 4122 if (fw_state == FW_NEEDS_TO_BE_LOADED) 4123 if (octeon_dev->fn_list.soft_reset(octeon_dev)) 4124 return 1; 4125 4126 /* Initialize the dispatch mechanism used to push packets arriving on 4127 * Octeon Output queues. 4128 */ 4129 if (octeon_init_dispatch_list(octeon_dev)) 4130 return 1; 4131 4132 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4133 OPCODE_NIC_CORE_DRV_ACTIVE, 4134 octeon_core_drv_init, 4135 octeon_dev); 4136 4137 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4138 OPCODE_NIC_VF_DRV_NOTICE, 4139 octeon_recv_vf_drv_notice, octeon_dev); 4140 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); 4141 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; 4142 schedule_delayed_work(&octeon_dev->nic_poll_work.work, 4143 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 4144 4145 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); 4146 4147 if (octeon_set_io_queues_off(octeon_dev)) { 4148 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n"); 4149 return 1; 4150 } 4151 4152 if (OCTEON_CN23XX_PF(octeon_dev)) { 4153 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4154 if (ret) { 4155 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n"); 4156 return ret; 4157 } 4158 } 4159 4160 /* Initialize soft command buffer pool 4161 */ 4162 if (octeon_setup_sc_buffer_pool(octeon_dev)) { 4163 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n"); 4164 return 1; 4165 } 4166 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 4167 4168 /* Setup the data structures that manage this Octeon's Input queues. */ 4169 if (octeon_setup_instr_queues(octeon_dev)) { 4170 dev_err(&octeon_dev->pci_dev->dev, 4171 "instruction queue initialization failed\n"); 4172 return 1; 4173 } 4174 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 4175 4176 /* Initialize lists to manage the requests of different types that 4177 * arrive from user & kernel applications for this octeon device. 4178 */ 4179 if (octeon_setup_response_list(octeon_dev)) { 4180 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n"); 4181 return 1; 4182 } 4183 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); 4184 4185 if (octeon_setup_output_queues(octeon_dev)) { 4186 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); 4187 return 1; 4188 } 4189 4190 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); 4191 4192 if (OCTEON_CN23XX_PF(octeon_dev)) { 4193 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) { 4194 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n"); 4195 return 1; 4196 } 4197 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); 4198 4199 if (octeon_allocate_ioq_vector 4200 (octeon_dev, 4201 octeon_dev->sriov_info.num_pf_rings)) { 4202 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 4203 return 1; 4204 } 4205 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 4206 4207 } else { 4208 /* The input and output queue registers were setup earlier (the 4209 * queues were not enabled). Any additional registers 4210 * that need to be programmed should be done now. 4211 */ 4212 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4213 if (ret) { 4214 dev_err(&octeon_dev->pci_dev->dev, 4215 "Failed to configure device registers\n"); 4216 return ret; 4217 } 4218 } 4219 4220 /* Initialize the tasklet that handles output queue packet processing.*/ 4221 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n"); 4222 tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh); 4223 4224 /* Setup the interrupt handler and record the INT SUM register address 4225 */ 4226 if (octeon_setup_interrupt(octeon_dev, 4227 octeon_dev->sriov_info.num_pf_rings)) 4228 return 1; 4229 4230 /* Enable Octeon device interrupts */ 4231 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); 4232 4233 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE); 4234 4235 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE 4236 * the output queue is enabled. 4237 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in 4238 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. 4239 * Otherwise, it is possible that the DRV_ACTIVE message will be sent 4240 * before any credits have been issued, causing the ring to be reset 4241 * (and the f/w appear to never have started). 4242 */ 4243 for (j = 0; j < octeon_dev->num_oqs; j++) 4244 writel(octeon_dev->droq[j]->max_count, 4245 octeon_dev->droq[j]->pkts_credit_reg); 4246 4247 /* Enable the input and output queues for this Octeon device */ 4248 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); 4249 if (ret) { 4250 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues"); 4251 return ret; 4252 } 4253 4254 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); 4255 4256 if (fw_state == FW_NEEDS_TO_BE_LOADED) { 4257 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); 4258 if (!ddr_timeout) { 4259 dev_info(&octeon_dev->pci_dev->dev, 4260 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); 4261 } 4262 4263 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); 4264 4265 /* Wait for the octeon to initialize DDR after the soft-reset.*/ 4266 while (!ddr_timeout) { 4267 set_current_state(TASK_INTERRUPTIBLE); 4268 if (schedule_timeout(HZ / 10)) { 4269 /* user probably pressed Control-C */ 4270 return 1; 4271 } 4272 } 4273 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); 4274 if (ret) { 4275 dev_err(&octeon_dev->pci_dev->dev, 4276 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", 4277 ret); 4278 return 1; 4279 } 4280 4281 if (octeon_wait_for_bootloader(octeon_dev, 1000)) { 4282 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n"); 4283 return 1; 4284 } 4285 4286 /* Divert uboot to take commands from host instead. */ 4287 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50); 4288 4289 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); 4290 ret = octeon_init_consoles(octeon_dev); 4291 if (ret) { 4292 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); 4293 return 1; 4294 } 4295 /* If console debug enabled, specify empty string to use default 4296 * enablement ELSE specify NULL string for 'disabled'. 4297 */ 4298 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL; 4299 ret = octeon_add_console(octeon_dev, 0, dbg_enb); 4300 if (ret) { 4301 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); 4302 return 1; 4303 } else if (octeon_console_debug_enabled(0)) { 4304 /* If console was added AND we're logging console output 4305 * then set our console print function. 4306 */ 4307 octeon_dev->console[0].print = octeon_dbg_console_print; 4308 } 4309 4310 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); 4311 4312 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n"); 4313 ret = load_firmware(octeon_dev); 4314 if (ret) { 4315 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); 4316 return 1; 4317 } 4318 4319 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED); 4320 } 4321 4322 handshake[octeon_dev->octeon_id].init_ok = 1; 4323 complete(&handshake[octeon_dev->octeon_id].init); 4324 4325 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); 4326 oct_priv->dev = octeon_dev; 4327 4328 return 0; 4329 } 4330 4331 /** 4332 * octeon_dbg_console_print - Debug console print function 4333 * @oct: octeon device 4334 * @console_num: console number 4335 * @prefix: first portion of line to display 4336 * @suffix: second portion of line to display 4337 * 4338 * The OCTEON debug console outputs entire lines (excluding '\n'). 4339 * Normally, the line will be passed in the 'prefix' parameter. 4340 * However, due to buffering, it is possible for a line to be split into two 4341 * parts, in which case they will be passed as the 'prefix' parameter and 4342 * 'suffix' parameter. 4343 */ 4344 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 4345 char *prefix, char *suffix) 4346 { 4347 if (prefix && suffix) 4348 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix, 4349 suffix); 4350 else if (prefix) 4351 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix); 4352 else if (suffix) 4353 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix); 4354 4355 return 0; 4356 } 4357 4358 /** 4359 * liquidio_exit - Exits the module 4360 */ 4361 static void __exit liquidio_exit(void) 4362 { 4363 liquidio_deinit_pci(); 4364 4365 pr_info("LiquidIO network module is now unloaded\n"); 4366 } 4367 4368 module_init(liquidio_init); 4369 module_exit(liquidio_exit); 4370