1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <linux/firmware.h> 22 #include <net/vxlan.h> 23 #include <linux/kthread.h> 24 #include "liquidio_common.h" 25 #include "octeon_droq.h" 26 #include "octeon_iq.h" 27 #include "response_manager.h" 28 #include "octeon_device.h" 29 #include "octeon_nic.h" 30 #include "octeon_main.h" 31 #include "octeon_network.h" 32 #include "cn66xx_regs.h" 33 #include "cn66xx_device.h" 34 #include "cn68xx_device.h" 35 #include "cn23xx_pf_device.h" 36 #include "liquidio_image.h" 37 #include "lio_vf_rep.h" 38 39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); 41 MODULE_LICENSE("GPL"); 42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME 43 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME 45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME 47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME 49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 50 51 static int ddr_timeout = 10000; 52 module_param(ddr_timeout, int, 0644); 53 MODULE_PARM_DESC(ddr_timeout, 54 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check"); 55 56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 57 58 static int debug = -1; 59 module_param(debug, int, 0644); 60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 61 62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO; 63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444); 64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\"."); 65 66 static u32 console_bitmask; 67 module_param(console_bitmask, int, 0644); 68 MODULE_PARM_DESC(console_bitmask, 69 "Bitmask indicating which consoles have debug output redirected to syslog."); 70 71 /** 72 * octeon_console_debug_enabled - determines if a given console has debug enabled. 73 * @console: console to check 74 * Return: 1 = enabled. 0 otherwise 75 */ 76 static int octeon_console_debug_enabled(u32 console) 77 { 78 return (console_bitmask >> (console)) & 0x1; 79 } 80 81 /* Polling interval for determining when NIC application is alive */ 82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 83 84 /* runtime link query interval */ 85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 86 /* update localtime to octeon firmware every 60 seconds. 87 * make firmware to use same time reference, so that it will be easy to 88 * correlate firmware logged events/errors with host events, for debugging. 89 */ 90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 91 92 /* time to wait for possible in-flight requests in milliseconds */ 93 #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) 94 95 struct lio_trusted_vf_ctx { 96 struct completion complete; 97 int status; 98 }; 99 100 struct oct_link_status_resp { 101 u64 rh; 102 struct oct_link_info link_info; 103 u64 status; 104 }; 105 106 struct oct_timestamp_resp { 107 u64 rh; 108 u64 timestamp; 109 u64 status; 110 }; 111 112 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) 113 114 union tx_info { 115 u64 u64; 116 struct { 117 #ifdef __BIG_ENDIAN_BITFIELD 118 u16 gso_size; 119 u16 gso_segs; 120 u32 reserved; 121 #else 122 u32 reserved; 123 u16 gso_segs; 124 u16 gso_size; 125 #endif 126 } s; 127 }; 128 129 /* Octeon device properties to be used by the NIC module. 130 * Each octeon device in the system will be represented 131 * by this structure in the NIC module. 132 */ 133 134 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 135 #define OCTNIC_GSO_MAX_SIZE \ 136 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 137 138 struct handshake { 139 struct completion init; 140 struct completion started; 141 struct pci_dev *pci_dev; 142 int init_ok; 143 int started_ok; 144 }; 145 146 #ifdef CONFIG_PCI_IOV 147 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); 148 #endif 149 150 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 151 char *prefix, char *suffix); 152 153 static int octeon_device_init(struct octeon_device *); 154 static int liquidio_stop(struct net_device *netdev); 155 static void liquidio_remove(struct pci_dev *pdev); 156 static int liquidio_probe(struct pci_dev *pdev, 157 const struct pci_device_id *ent); 158 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 159 int linkstate); 160 161 static struct handshake handshake[MAX_OCTEON_DEVICES]; 162 static struct completion first_stage; 163 164 static void octeon_droq_bh(struct tasklet_struct *t) 165 { 166 int q_no; 167 int reschedule = 0; 168 struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t, 169 droq_tasklet); 170 struct octeon_device *oct = oct_priv->dev; 171 172 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { 173 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) 174 continue; 175 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], 176 MAX_PACKET_BUDGET); 177 lio_enable_irq(oct->droq[q_no], NULL); 178 179 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { 180 /* set time and cnt interrupt thresholds for this DROQ 181 * for NAPI 182 */ 183 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; 184 185 octeon_write_csr64( 186 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no), 187 0x5700000040ULL); 188 octeon_write_csr64( 189 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0); 190 } 191 } 192 193 if (reschedule) 194 tasklet_schedule(&oct_priv->droq_tasklet); 195 } 196 197 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 198 { 199 struct octeon_device_priv *oct_priv = 200 (struct octeon_device_priv *)oct->priv; 201 int retry = 100, pkt_cnt = 0, pending_pkts = 0; 202 int i; 203 204 do { 205 pending_pkts = 0; 206 207 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 208 if (!(oct->io_qmask.oq & BIT_ULL(i))) 209 continue; 210 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 211 } 212 if (pkt_cnt > 0) { 213 pending_pkts += pkt_cnt; 214 tasklet_schedule(&oct_priv->droq_tasklet); 215 } 216 pkt_cnt = 0; 217 schedule_timeout_uninterruptible(1); 218 219 } while (retry-- && pending_pkts); 220 221 return pkt_cnt; 222 } 223 224 /** 225 * force_io_queues_off - Forces all IO queues off on a given device 226 * @oct: Pointer to Octeon device 227 */ 228 static void force_io_queues_off(struct octeon_device *oct) 229 { 230 if ((oct->chip_id == OCTEON_CN66XX) || 231 (oct->chip_id == OCTEON_CN68XX)) { 232 /* Reset the Enable bits for Input Queues. */ 233 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); 234 235 /* Reset the Enable bits for Output Queues. */ 236 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); 237 } 238 } 239 240 /** 241 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc 242 * @oct: Pointer to Octeon device 243 */ 244 static inline void pcierror_quiesce_device(struct octeon_device *oct) 245 { 246 int i; 247 248 /* Disable the input and output queues now. No more packets will 249 * arrive from Octeon, but we should wait for all packet processing 250 * to finish. 251 */ 252 force_io_queues_off(oct); 253 254 /* To allow for in-flight requests */ 255 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST); 256 257 if (wait_for_pending_requests(oct)) 258 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 259 260 /* Force all requests waiting to be fetched by OCTEON to complete. */ 261 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 262 struct octeon_instr_queue *iq; 263 264 if (!(oct->io_qmask.iq & BIT_ULL(i))) 265 continue; 266 iq = oct->instr_queue[i]; 267 268 if (atomic_read(&iq->instr_pending)) { 269 spin_lock_bh(&iq->lock); 270 iq->fill_cnt = 0; 271 iq->octeon_read_index = iq->host_write_index; 272 iq->stats.instr_processed += 273 atomic_read(&iq->instr_pending); 274 lio_process_iq_request_list(oct, iq, 0); 275 spin_unlock_bh(&iq->lock); 276 } 277 } 278 279 /* Force all pending ordered list requests to time out. */ 280 lio_process_ordered_list(oct, 1); 281 282 /* We do not need to wait for output queue packets to be processed. */ 283 } 284 285 /** 286 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status 287 * @dev: Pointer to PCI device 288 */ 289 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 290 { 291 int pos = 0x100; 292 u32 status, mask; 293 294 pr_info("%s :\n", __func__); 295 296 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 297 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 298 if (dev->error_state == pci_channel_io_normal) 299 status &= ~mask; /* Clear corresponding nonfatal bits */ 300 else 301 status &= mask; /* Clear corresponding fatal bits */ 302 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 303 } 304 305 /** 306 * stop_pci_io - Stop all PCI IO to a given device 307 * @oct: Pointer to Octeon device 308 */ 309 static void stop_pci_io(struct octeon_device *oct) 310 { 311 /* No more instructions will be forwarded. */ 312 atomic_set(&oct->status, OCT_DEV_IN_RESET); 313 314 pci_disable_device(oct->pci_dev); 315 316 /* Disable interrupts */ 317 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 318 319 pcierror_quiesce_device(oct); 320 321 /* Release the interrupt line */ 322 free_irq(oct->pci_dev->irq, oct); 323 324 if (oct->flags & LIO_FLAG_MSI_ENABLED) 325 pci_disable_msi(oct->pci_dev); 326 327 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 328 lio_get_state_string(&oct->status)); 329 330 /* making it a common function for all OCTEON models */ 331 cleanup_aer_uncorrect_error_status(oct->pci_dev); 332 } 333 334 /** 335 * liquidio_pcie_error_detected - called when PCI error is detected 336 * @pdev: Pointer to PCI device 337 * @state: The current pci connection state 338 * 339 * This function is called after a PCI bus error affecting 340 * this device has been detected. 341 */ 342 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 343 pci_channel_state_t state) 344 { 345 struct octeon_device *oct = pci_get_drvdata(pdev); 346 347 /* Non-correctable Non-fatal errors */ 348 if (state == pci_channel_io_normal) { 349 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 350 cleanup_aer_uncorrect_error_status(oct->pci_dev); 351 return PCI_ERS_RESULT_CAN_RECOVER; 352 } 353 354 /* Non-correctable Fatal errors */ 355 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 356 stop_pci_io(oct); 357 358 /* Always return a DISCONNECT. There is no support for recovery but only 359 * for a clean shutdown. 360 */ 361 return PCI_ERS_RESULT_DISCONNECT; 362 } 363 364 /** 365 * liquidio_pcie_mmio_enabled - mmio handler 366 * @pdev: Pointer to PCI device 367 */ 368 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev) 369 { 370 /* We should never hit this since we never ask for a reset for a Fatal 371 * Error. We always return DISCONNECT in io_error above. 372 * But play safe and return RECOVERED for now. 373 */ 374 return PCI_ERS_RESULT_RECOVERED; 375 } 376 377 /** 378 * liquidio_pcie_slot_reset - called after the pci bus has been reset. 379 * @pdev: Pointer to PCI device 380 * 381 * Restart the card from scratch, as if from a cold-boot. Implementation 382 * resembles the first-half of the octeon_resume routine. 383 */ 384 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev) 385 { 386 /* We should never hit this since we never ask for a reset for a Fatal 387 * Error. We always return DISCONNECT in io_error above. 388 * But play safe and return RECOVERED for now. 389 */ 390 return PCI_ERS_RESULT_RECOVERED; 391 } 392 393 /** 394 * liquidio_pcie_resume - called when traffic can start flowing again. 395 * @pdev: Pointer to PCI device 396 * 397 * This callback is called when the error recovery driver tells us that 398 * its OK to resume normal operation. Implementation resembles the 399 * second-half of the octeon_resume routine. 400 */ 401 static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev) 402 { 403 /* Nothing to be done here. */ 404 } 405 406 #define liquidio_suspend NULL 407 #define liquidio_resume NULL 408 409 /* For PCI-E Advanced Error Recovery (AER) Interface */ 410 static const struct pci_error_handlers liquidio_err_handler = { 411 .error_detected = liquidio_pcie_error_detected, 412 .mmio_enabled = liquidio_pcie_mmio_enabled, 413 .slot_reset = liquidio_pcie_slot_reset, 414 .resume = liquidio_pcie_resume, 415 }; 416 417 static const struct pci_device_id liquidio_pci_tbl[] = { 418 { /* 68xx */ 419 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 420 }, 421 { /* 66xx */ 422 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 423 }, 424 { /* 23xx pf */ 425 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 426 }, 427 { 428 0, 0, 0, 0, 0, 0, 0 429 } 430 }; 431 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); 432 433 static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume); 434 435 static struct pci_driver liquidio_pci_driver = { 436 .name = "LiquidIO", 437 .id_table = liquidio_pci_tbl, 438 .probe = liquidio_probe, 439 .remove = liquidio_remove, 440 .err_handler = &liquidio_err_handler, /* For AER */ 441 .driver.pm = &liquidio_pm_ops, 442 #ifdef CONFIG_PCI_IOV 443 .sriov_configure = liquidio_enable_sriov, 444 #endif 445 }; 446 447 /** 448 * liquidio_init_pci - register PCI driver 449 */ 450 static int liquidio_init_pci(void) 451 { 452 return pci_register_driver(&liquidio_pci_driver); 453 } 454 455 /** 456 * liquidio_deinit_pci - unregister PCI driver 457 */ 458 static void liquidio_deinit_pci(void) 459 { 460 pci_unregister_driver(&liquidio_pci_driver); 461 } 462 463 /** 464 * check_txq_status - Check Tx queue status, and take appropriate action 465 * @lio: per-network private data 466 * Return: 0 if full, number of queues woken up otherwise 467 */ 468 static inline int check_txq_status(struct lio *lio) 469 { 470 int numqs = lio->netdev->real_num_tx_queues; 471 int ret_val = 0; 472 int q, iq; 473 474 /* check each sub-queue state */ 475 for (q = 0; q < numqs; q++) { 476 iq = lio->linfo.txpciq[q % 477 lio->oct_dev->num_iqs].s.q_no; 478 if (octnet_iq_is_full(lio->oct_dev, iq)) 479 continue; 480 if (__netif_subqueue_stopped(lio->netdev, q)) { 481 netif_wake_subqueue(lio->netdev, q); 482 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, 483 tx_restart, 1); 484 ret_val++; 485 } 486 } 487 488 return ret_val; 489 } 490 491 /** 492 * print_link_info - Print link information 493 * @netdev: network device 494 */ 495 static void print_link_info(struct net_device *netdev) 496 { 497 struct lio *lio = GET_LIO(netdev); 498 499 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 500 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 501 struct oct_link_info *linfo = &lio->linfo; 502 503 if (linfo->link.s.link_up) { 504 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 505 linfo->link.s.speed, 506 (linfo->link.s.duplex) ? "Full" : "Half"); 507 } else { 508 netif_info(lio, link, lio->netdev, "Link Down\n"); 509 } 510 } 511 } 512 513 /** 514 * octnet_link_status_change - Routine to notify MTU change 515 * @work: work_struct data structure 516 */ 517 static void octnet_link_status_change(struct work_struct *work) 518 { 519 struct cavium_wk *wk = (struct cavium_wk *)work; 520 struct lio *lio = (struct lio *)wk->ctxptr; 521 522 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. 523 * this API is invoked only when new max-MTU of the interface is 524 * less than current MTU. 525 */ 526 rtnl_lock(); 527 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); 528 rtnl_unlock(); 529 } 530 531 /** 532 * setup_link_status_change_wq - Sets up the mtu status change work 533 * @netdev: network device 534 */ 535 static inline int setup_link_status_change_wq(struct net_device *netdev) 536 { 537 struct lio *lio = GET_LIO(netdev); 538 struct octeon_device *oct = lio->oct_dev; 539 540 lio->link_status_wq.wq = alloc_workqueue("link-status", 541 WQ_MEM_RECLAIM, 0); 542 if (!lio->link_status_wq.wq) { 543 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 544 return -1; 545 } 546 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 547 octnet_link_status_change); 548 lio->link_status_wq.wk.ctxptr = lio; 549 550 return 0; 551 } 552 553 static inline void cleanup_link_status_change_wq(struct net_device *netdev) 554 { 555 struct lio *lio = GET_LIO(netdev); 556 557 if (lio->link_status_wq.wq) { 558 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 559 destroy_workqueue(lio->link_status_wq.wq); 560 } 561 } 562 563 /** 564 * update_link_status - Update link status 565 * @netdev: network device 566 * @ls: link status structure 567 * 568 * Called on receipt of a link status response from the core application to 569 * update each interface's link status. 570 */ 571 static inline void update_link_status(struct net_device *netdev, 572 union oct_link_status *ls) 573 { 574 struct lio *lio = GET_LIO(netdev); 575 int changed = (lio->linfo.link.u64 != ls->u64); 576 int current_max_mtu = lio->linfo.link.s.mtu; 577 struct octeon_device *oct = lio->oct_dev; 578 579 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n", 580 __func__, lio->linfo.link.u64, ls->u64); 581 lio->linfo.link.u64 = ls->u64; 582 583 if ((lio->intf_open) && (changed)) { 584 print_link_info(netdev); 585 lio->link_changes++; 586 587 if (lio->linfo.link.s.link_up) { 588 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__); 589 netif_carrier_on(netdev); 590 wake_txqs(netdev); 591 } else { 592 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__); 593 netif_carrier_off(netdev); 594 stop_txqs(netdev); 595 } 596 if (lio->linfo.link.s.mtu != current_max_mtu) { 597 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n", 598 current_max_mtu, lio->linfo.link.s.mtu); 599 netdev->max_mtu = lio->linfo.link.s.mtu; 600 } 601 if (lio->linfo.link.s.mtu < netdev->mtu) { 602 dev_warn(&oct->pci_dev->dev, 603 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", 604 netdev->mtu, lio->linfo.link.s.mtu); 605 queue_delayed_work(lio->link_status_wq.wq, 606 &lio->link_status_wq.wk.work, 0); 607 } 608 } 609 } 610 611 /** 612 * lio_sync_octeon_time - send latest localtime to octeon firmware so that 613 * firmware will correct it's time, in case there is a time skew 614 * 615 * @work: work scheduled to send time update to octeon firmware 616 **/ 617 static void lio_sync_octeon_time(struct work_struct *work) 618 { 619 struct cavium_wk *wk = (struct cavium_wk *)work; 620 struct lio *lio = (struct lio *)wk->ctxptr; 621 struct octeon_device *oct = lio->oct_dev; 622 struct octeon_soft_command *sc; 623 struct timespec64 ts; 624 struct lio_time *lt; 625 int ret; 626 627 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0); 628 if (!sc) { 629 dev_err(&oct->pci_dev->dev, 630 "Failed to sync time to octeon: soft command allocation failed\n"); 631 return; 632 } 633 634 lt = (struct lio_time *)sc->virtdptr; 635 636 /* Get time of the day */ 637 ktime_get_real_ts64(&ts); 638 lt->sec = ts.tv_sec; 639 lt->nsec = ts.tv_nsec; 640 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); 641 642 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 643 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 644 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0); 645 646 init_completion(&sc->complete); 647 sc->sc_status = OCTEON_REQUEST_PENDING; 648 649 ret = octeon_send_soft_command(oct, sc); 650 if (ret == IQ_SEND_FAILED) { 651 dev_err(&oct->pci_dev->dev, 652 "Failed to sync time to octeon: failed to send soft command\n"); 653 octeon_free_soft_command(oct, sc); 654 } else { 655 WRITE_ONCE(sc->caller_is_done, true); 656 } 657 658 queue_delayed_work(lio->sync_octeon_time_wq.wq, 659 &lio->sync_octeon_time_wq.wk.work, 660 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 661 } 662 663 /** 664 * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware 665 * 666 * @netdev: network device which should send time update to firmware 667 **/ 668 static inline int setup_sync_octeon_time_wq(struct net_device *netdev) 669 { 670 struct lio *lio = GET_LIO(netdev); 671 struct octeon_device *oct = lio->oct_dev; 672 673 lio->sync_octeon_time_wq.wq = 674 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0); 675 if (!lio->sync_octeon_time_wq.wq) { 676 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n"); 677 return -1; 678 } 679 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work, 680 lio_sync_octeon_time); 681 lio->sync_octeon_time_wq.wk.ctxptr = lio; 682 queue_delayed_work(lio->sync_octeon_time_wq.wq, 683 &lio->sync_octeon_time_wq.wk.work, 684 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 685 686 return 0; 687 } 688 689 /** 690 * cleanup_sync_octeon_time_wq - destroy wq 691 * 692 * @netdev: network device which should send time update to firmware 693 * 694 * Stop scheduling and destroy the work created to periodically update local 695 * time to octeon firmware. 696 **/ 697 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev) 698 { 699 struct lio *lio = GET_LIO(netdev); 700 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq; 701 702 if (time_wq->wq) { 703 cancel_delayed_work_sync(&time_wq->wk.work); 704 destroy_workqueue(time_wq->wq); 705 } 706 } 707 708 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) 709 { 710 struct octeon_device *other_oct; 711 712 other_oct = lio_get_device(oct->octeon_id + 1); 713 714 if (other_oct && other_oct->pci_dev) { 715 int oct_busnum, other_oct_busnum; 716 717 oct_busnum = oct->pci_dev->bus->number; 718 other_oct_busnum = other_oct->pci_dev->bus->number; 719 720 if (oct_busnum == other_oct_busnum) { 721 int oct_slot, other_oct_slot; 722 723 oct_slot = PCI_SLOT(oct->pci_dev->devfn); 724 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn); 725 726 if (oct_slot == other_oct_slot) 727 return other_oct; 728 } 729 } 730 731 return NULL; 732 } 733 734 static void disable_all_vf_links(struct octeon_device *oct) 735 { 736 struct net_device *netdev; 737 int max_vfs, vf, i; 738 739 if (!oct) 740 return; 741 742 max_vfs = oct->sriov_info.max_vfs; 743 744 for (i = 0; i < oct->ifcount; i++) { 745 netdev = oct->props[i].netdev; 746 if (!netdev) 747 continue; 748 749 for (vf = 0; vf < max_vfs; vf++) 750 liquidio_set_vf_link_state(netdev, vf, 751 IFLA_VF_LINK_STATE_DISABLE); 752 } 753 } 754 755 static int liquidio_watchdog(void *param) 756 { 757 bool err_msg_was_printed[LIO_MAX_CORES]; 758 u16 mask_of_crashed_or_stuck_cores = 0; 759 bool all_vf_links_are_disabled = false; 760 struct octeon_device *oct = param; 761 struct octeon_device *other_oct; 762 #ifdef CONFIG_MODULE_UNLOAD 763 long refcount, vfs_referencing_pf; 764 u64 vfs_mask1, vfs_mask2; 765 #endif 766 int core; 767 768 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed)); 769 770 while (!kthread_should_stop()) { 771 /* sleep for a couple of seconds so that we don't hog the CPU */ 772 set_current_state(TASK_INTERRUPTIBLE); 773 schedule_timeout(msecs_to_jiffies(2000)); 774 775 mask_of_crashed_or_stuck_cores = 776 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); 777 778 if (!mask_of_crashed_or_stuck_cores) 779 continue; 780 781 WRITE_ONCE(oct->cores_crashed, true); 782 other_oct = get_other_octeon_device(oct); 783 if (other_oct) 784 WRITE_ONCE(other_oct->cores_crashed, true); 785 786 for (core = 0; core < LIO_MAX_CORES; core++) { 787 bool core_crashed_or_got_stuck; 788 789 core_crashed_or_got_stuck = 790 (mask_of_crashed_or_stuck_cores 791 >> core) & 1; 792 793 if (core_crashed_or_got_stuck && 794 !err_msg_was_printed[core]) { 795 dev_err(&oct->pci_dev->dev, 796 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", 797 core); 798 err_msg_was_printed[core] = true; 799 } 800 } 801 802 if (all_vf_links_are_disabled) 803 continue; 804 805 disable_all_vf_links(oct); 806 disable_all_vf_links(other_oct); 807 all_vf_links_are_disabled = true; 808 809 #ifdef CONFIG_MODULE_UNLOAD 810 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask); 811 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask); 812 813 vfs_referencing_pf = hweight64(vfs_mask1); 814 vfs_referencing_pf += hweight64(vfs_mask2); 815 816 refcount = module_refcount(THIS_MODULE); 817 if (refcount >= vfs_referencing_pf) { 818 while (vfs_referencing_pf) { 819 module_put(THIS_MODULE); 820 vfs_referencing_pf--; 821 } 822 } 823 #endif 824 } 825 826 return 0; 827 } 828 829 /** 830 * liquidio_probe - PCI probe handler 831 * @pdev: PCI device structure 832 * @ent: unused 833 */ 834 static int 835 liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent) 836 { 837 struct octeon_device *oct_dev = NULL; 838 struct handshake *hs; 839 840 oct_dev = octeon_allocate_device(pdev->device, 841 sizeof(struct octeon_device_priv)); 842 if (!oct_dev) { 843 dev_err(&pdev->dev, "Unable to allocate device\n"); 844 return -ENOMEM; 845 } 846 847 if (pdev->device == OCTEON_CN23XX_PF_VID) 848 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 849 850 /* Enable PTP for 6XXX Device */ 851 if (((pdev->device == OCTEON_CN66XX) || 852 (pdev->device == OCTEON_CN68XX))) 853 oct_dev->ptp_enable = true; 854 else 855 oct_dev->ptp_enable = false; 856 857 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 858 (u32)pdev->vendor, (u32)pdev->device); 859 860 /* Assign octeon_device for this device to the private data area. */ 861 pci_set_drvdata(pdev, oct_dev); 862 863 /* set linux specific device pointer */ 864 oct_dev->pci_dev = (void *)pdev; 865 866 oct_dev->subsystem_id = pdev->subsystem_vendor | 867 (pdev->subsystem_device << 16); 868 869 hs = &handshake[oct_dev->octeon_id]; 870 init_completion(&hs->init); 871 init_completion(&hs->started); 872 hs->pci_dev = pdev; 873 874 if (oct_dev->octeon_id == 0) 875 /* first LiquidIO NIC is detected */ 876 complete(&first_stage); 877 878 if (octeon_device_init(oct_dev)) { 879 complete(&hs->init); 880 liquidio_remove(pdev); 881 return -ENOMEM; 882 } 883 884 if (OCTEON_CN23XX_PF(oct_dev)) { 885 u8 bus, device, function; 886 887 if (atomic_read(oct_dev->adapter_refcount) == 1) { 888 /* Each NIC gets one watchdog kernel thread. The first 889 * PF (of each NIC) that gets pci_driver->probe()'d 890 * creates that thread. 891 */ 892 bus = pdev->bus->number; 893 device = PCI_SLOT(pdev->devfn); 894 function = PCI_FUNC(pdev->devfn); 895 oct_dev->watchdog_task = kthread_create( 896 liquidio_watchdog, oct_dev, 897 "liowd/%02hhx:%02hhx.%hhx", bus, device, function); 898 if (!IS_ERR(oct_dev->watchdog_task)) { 899 wake_up_process(oct_dev->watchdog_task); 900 } else { 901 oct_dev->watchdog_task = NULL; 902 dev_err(&oct_dev->pci_dev->dev, 903 "failed to create kernel_thread\n"); 904 liquidio_remove(pdev); 905 return -1; 906 } 907 } 908 } 909 910 oct_dev->rx_pause = 1; 911 oct_dev->tx_pause = 1; 912 913 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 914 915 return 0; 916 } 917 918 static bool fw_type_is_auto(void) 919 { 920 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO, 921 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0; 922 } 923 924 /** 925 * octeon_pci_flr - PCI FLR for each Octeon device. 926 * @oct: octeon device 927 */ 928 static void octeon_pci_flr(struct octeon_device *oct) 929 { 930 int rc; 931 932 pci_save_state(oct->pci_dev); 933 934 pci_cfg_access_lock(oct->pci_dev); 935 936 /* Quiesce the device completely */ 937 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 938 PCI_COMMAND_INTX_DISABLE); 939 940 rc = __pci_reset_function_locked(oct->pci_dev); 941 942 if (rc != 0) 943 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n", 944 rc, oct->pf_num); 945 946 pci_cfg_access_unlock(oct->pci_dev); 947 948 pci_restore_state(oct->pci_dev); 949 } 950 951 /** 952 * octeon_destroy_resources - Destroy resources associated with octeon device 953 * @oct: octeon device 954 */ 955 static void octeon_destroy_resources(struct octeon_device *oct) 956 { 957 int i, refcount; 958 struct msix_entry *msix_entries; 959 struct octeon_device_priv *oct_priv = 960 (struct octeon_device_priv *)oct->priv; 961 962 struct handshake *hs; 963 964 switch (atomic_read(&oct->status)) { 965 case OCT_DEV_RUNNING: 966 case OCT_DEV_CORE_OK: 967 968 /* No more instructions will be forwarded. */ 969 atomic_set(&oct->status, OCT_DEV_IN_RESET); 970 971 oct->app_mode = CVM_DRV_INVALID_APP; 972 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 973 lio_get_state_string(&oct->status)); 974 975 schedule_timeout_uninterruptible(HZ / 10); 976 977 fallthrough; 978 case OCT_DEV_HOST_OK: 979 980 case OCT_DEV_CONSOLE_INIT_DONE: 981 /* Remove any consoles */ 982 octeon_remove_consoles(oct); 983 984 fallthrough; 985 case OCT_DEV_IO_QUEUES_DONE: 986 if (lio_wait_for_instr_fetch(oct)) 987 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 988 989 if (wait_for_pending_requests(oct)) 990 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 991 992 /* Disable the input and output queues now. No more packets will 993 * arrive from Octeon, but we should wait for all packet 994 * processing to finish. 995 */ 996 oct->fn_list.disable_io_queues(oct); 997 998 if (lio_wait_for_oq_pkts(oct)) 999 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 1000 1001 /* Force all requests waiting to be fetched by OCTEON to 1002 * complete. 1003 */ 1004 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1005 struct octeon_instr_queue *iq; 1006 1007 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1008 continue; 1009 iq = oct->instr_queue[i]; 1010 1011 if (atomic_read(&iq->instr_pending)) { 1012 spin_lock_bh(&iq->lock); 1013 iq->fill_cnt = 0; 1014 iq->octeon_read_index = iq->host_write_index; 1015 iq->stats.instr_processed += 1016 atomic_read(&iq->instr_pending); 1017 lio_process_iq_request_list(oct, iq, 0); 1018 spin_unlock_bh(&iq->lock); 1019 } 1020 } 1021 1022 lio_process_ordered_list(oct, 1); 1023 octeon_free_sc_done_list(oct); 1024 octeon_free_sc_zombie_list(oct); 1025 1026 fallthrough; 1027 case OCT_DEV_INTR_SET_DONE: 1028 /* Disable interrupts */ 1029 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 1030 1031 if (oct->msix_on) { 1032 msix_entries = (struct msix_entry *)oct->msix_entries; 1033 for (i = 0; i < oct->num_msix_irqs - 1; i++) { 1034 if (oct->ioq_vector[i].vector) { 1035 /* clear the affinity_cpumask */ 1036 irq_set_affinity_hint( 1037 msix_entries[i].vector, 1038 NULL); 1039 free_irq(msix_entries[i].vector, 1040 &oct->ioq_vector[i]); 1041 oct->ioq_vector[i].vector = 0; 1042 } 1043 } 1044 /* non-iov vector's argument is oct struct */ 1045 free_irq(msix_entries[i].vector, oct); 1046 1047 pci_disable_msix(oct->pci_dev); 1048 kfree(oct->msix_entries); 1049 oct->msix_entries = NULL; 1050 } else { 1051 /* Release the interrupt line */ 1052 free_irq(oct->pci_dev->irq, oct); 1053 1054 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1055 pci_disable_msi(oct->pci_dev); 1056 } 1057 1058 kfree(oct->irq_name_storage); 1059 oct->irq_name_storage = NULL; 1060 1061 fallthrough; 1062 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 1063 if (OCTEON_CN23XX_PF(oct)) 1064 octeon_free_ioq_vector(oct); 1065 1066 fallthrough; 1067 case OCT_DEV_MBOX_SETUP_DONE: 1068 if (OCTEON_CN23XX_PF(oct)) 1069 oct->fn_list.free_mbox(oct); 1070 1071 fallthrough; 1072 case OCT_DEV_IN_RESET: 1073 case OCT_DEV_DROQ_INIT_DONE: 1074 /* Wait for any pending operations */ 1075 mdelay(100); 1076 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1077 if (!(oct->io_qmask.oq & BIT_ULL(i))) 1078 continue; 1079 octeon_delete_droq(oct, i); 1080 } 1081 1082 /* Force any pending handshakes to complete */ 1083 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 1084 hs = &handshake[i]; 1085 1086 if (hs->pci_dev) { 1087 handshake[oct->octeon_id].init_ok = 0; 1088 complete(&handshake[oct->octeon_id].init); 1089 handshake[oct->octeon_id].started_ok = 0; 1090 complete(&handshake[oct->octeon_id].started); 1091 } 1092 } 1093 1094 fallthrough; 1095 case OCT_DEV_RESP_LIST_INIT_DONE: 1096 octeon_delete_response_list(oct); 1097 1098 fallthrough; 1099 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 1100 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1101 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1102 continue; 1103 octeon_delete_instr_queue(oct, i); 1104 } 1105 #ifdef CONFIG_PCI_IOV 1106 if (oct->sriov_info.sriov_enabled) 1107 pci_disable_sriov(oct->pci_dev); 1108 #endif 1109 fallthrough; 1110 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 1111 octeon_free_sc_buffer_pool(oct); 1112 1113 fallthrough; 1114 case OCT_DEV_DISPATCH_INIT_DONE: 1115 octeon_delete_dispatch_list(oct); 1116 cancel_delayed_work_sync(&oct->nic_poll_work.work); 1117 1118 fallthrough; 1119 case OCT_DEV_PCI_MAP_DONE: 1120 refcount = octeon_deregister_device(oct); 1121 1122 /* Soft reset the octeon device before exiting. 1123 * However, if fw was loaded from card (i.e. autoboot), 1124 * perform an FLR instead. 1125 * Implementation note: only soft-reset the device 1126 * if it is a CN6XXX OR the LAST CN23XX device. 1127 */ 1128 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED) 1129 octeon_pci_flr(oct); 1130 else if (OCTEON_CN6XXX(oct) || !refcount) 1131 oct->fn_list.soft_reset(oct); 1132 1133 octeon_unmap_pci_barx(oct, 0); 1134 octeon_unmap_pci_barx(oct, 1); 1135 1136 fallthrough; 1137 case OCT_DEV_PCI_ENABLE_DONE: 1138 pci_clear_master(oct->pci_dev); 1139 /* Disable the device, releasing the PCI INT */ 1140 pci_disable_device(oct->pci_dev); 1141 1142 fallthrough; 1143 case OCT_DEV_BEGIN_STATE: 1144 /* Nothing to be done here either */ 1145 break; 1146 } /* end switch (oct->status) */ 1147 1148 tasklet_kill(&oct_priv->droq_tasklet); 1149 } 1150 1151 /** 1152 * send_rx_ctrl_cmd - Send Rx control command 1153 * @lio: per-network private data 1154 * @start_stop: whether to start or stop 1155 */ 1156 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1157 { 1158 struct octeon_soft_command *sc; 1159 union octnet_cmd *ncmd; 1160 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1161 int retval; 1162 1163 if (oct->props[lio->ifidx].rx_on == start_stop) 1164 return 0; 1165 1166 sc = (struct octeon_soft_command *) 1167 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1168 16, 0); 1169 if (!sc) { 1170 netif_info(lio, rx_err, lio->netdev, 1171 "Failed to allocate octeon_soft_command struct\n"); 1172 return -ENOMEM; 1173 } 1174 1175 ncmd = (union octnet_cmd *)sc->virtdptr; 1176 1177 ncmd->u64 = 0; 1178 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 1179 ncmd->s.param1 = start_stop; 1180 1181 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1182 1183 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1184 1185 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1186 OPCODE_NIC_CMD, 0, 0, 0); 1187 1188 init_completion(&sc->complete); 1189 sc->sc_status = OCTEON_REQUEST_PENDING; 1190 1191 retval = octeon_send_soft_command(oct, sc); 1192 if (retval == IQ_SEND_FAILED) { 1193 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1194 octeon_free_soft_command(oct, sc); 1195 } else { 1196 /* Sleep on a wait queue till the cond flag indicates that the 1197 * response arrived or timed-out. 1198 */ 1199 retval = wait_for_sc_completion_timeout(oct, sc, 0); 1200 if (retval) 1201 return retval; 1202 1203 oct->props[lio->ifidx].rx_on = start_stop; 1204 WRITE_ONCE(sc->caller_is_done, true); 1205 } 1206 1207 return retval; 1208 } 1209 1210 /** 1211 * liquidio_destroy_nic_device - Destroy NIC device interface 1212 * @oct: octeon device 1213 * @ifidx: which interface to destroy 1214 * 1215 * Cleanup associated with each interface for an Octeon device when NIC 1216 * module is being unloaded or if initialization fails during load. 1217 */ 1218 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1219 { 1220 struct net_device *netdev = oct->props[ifidx].netdev; 1221 struct octeon_device_priv *oct_priv = 1222 (struct octeon_device_priv *)oct->priv; 1223 struct napi_struct *napi, *n; 1224 struct lio *lio; 1225 1226 if (!netdev) { 1227 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1228 __func__, ifidx); 1229 return; 1230 } 1231 1232 lio = GET_LIO(netdev); 1233 1234 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1235 1236 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1237 liquidio_stop(netdev); 1238 1239 if (oct->props[lio->ifidx].napi_enabled == 1) { 1240 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1241 napi_disable(napi); 1242 1243 oct->props[lio->ifidx].napi_enabled = 0; 1244 1245 if (OCTEON_CN23XX_PF(oct)) 1246 oct->droq[0]->ops.poll_mode = 0; 1247 } 1248 1249 /* Delete NAPI */ 1250 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1251 netif_napi_del(napi); 1252 1253 tasklet_enable(&oct_priv->droq_tasklet); 1254 1255 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1256 unregister_netdev(netdev); 1257 1258 cleanup_sync_octeon_time_wq(netdev); 1259 cleanup_link_status_change_wq(netdev); 1260 1261 cleanup_rx_oom_poll_fn(netdev); 1262 1263 lio_delete_glists(lio); 1264 1265 free_netdev(netdev); 1266 1267 oct->props[ifidx].gmxport = -1; 1268 1269 oct->props[ifidx].netdev = NULL; 1270 } 1271 1272 /** 1273 * liquidio_stop_nic_module - Stop complete NIC functionality 1274 * @oct: octeon device 1275 */ 1276 static int liquidio_stop_nic_module(struct octeon_device *oct) 1277 { 1278 int i, j; 1279 struct lio *lio; 1280 1281 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1282 device_lock(&oct->pci_dev->dev); 1283 if (oct->devlink) { 1284 devlink_unregister(oct->devlink); 1285 devlink_free(oct->devlink); 1286 oct->devlink = NULL; 1287 } 1288 device_unlock(&oct->pci_dev->dev); 1289 1290 if (!oct->ifcount) { 1291 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1292 return 1; 1293 } 1294 1295 spin_lock_bh(&oct->cmd_resp_wqlock); 1296 oct->cmd_resp_state = OCT_DRV_OFFLINE; 1297 spin_unlock_bh(&oct->cmd_resp_wqlock); 1298 1299 lio_vf_rep_destroy(oct); 1300 1301 for (i = 0; i < oct->ifcount; i++) { 1302 lio = GET_LIO(oct->props[i].netdev); 1303 for (j = 0; j < oct->num_oqs; j++) 1304 octeon_unregister_droq_ops(oct, 1305 lio->linfo.rxpciq[j].s.q_no); 1306 } 1307 1308 for (i = 0; i < oct->ifcount; i++) 1309 liquidio_destroy_nic_device(oct, i); 1310 1311 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1312 return 0; 1313 } 1314 1315 /** 1316 * liquidio_remove - Cleans up resources at unload time 1317 * @pdev: PCI device structure 1318 */ 1319 static void liquidio_remove(struct pci_dev *pdev) 1320 { 1321 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1322 1323 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1324 1325 if (oct_dev->watchdog_task) 1326 kthread_stop(oct_dev->watchdog_task); 1327 1328 if (!oct_dev->octeon_id && 1329 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) 1330 lio_vf_rep_modexit(); 1331 1332 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) 1333 liquidio_stop_nic_module(oct_dev); 1334 1335 /* Reset the octeon device and cleanup all memory allocated for 1336 * the octeon device by driver. 1337 */ 1338 octeon_destroy_resources(oct_dev); 1339 1340 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1341 1342 /* This octeon device has been removed. Update the global 1343 * data structure to reflect this. Free the device structure. 1344 */ 1345 octeon_free_device_mem(oct_dev); 1346 } 1347 1348 /** 1349 * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space 1350 * @oct: octeon device 1351 */ 1352 static int octeon_chip_specific_setup(struct octeon_device *oct) 1353 { 1354 u32 dev_id, rev_id; 1355 int ret = 1; 1356 1357 pci_read_config_dword(oct->pci_dev, 0, &dev_id); 1358 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 1359 oct->rev_id = rev_id & 0xff; 1360 1361 switch (dev_id) { 1362 case OCTEON_CN68XX_PCIID: 1363 oct->chip_id = OCTEON_CN68XX; 1364 ret = lio_setup_cn68xx_octeon_device(oct); 1365 break; 1366 1367 case OCTEON_CN66XX_PCIID: 1368 oct->chip_id = OCTEON_CN66XX; 1369 ret = lio_setup_cn66xx_octeon_device(oct); 1370 break; 1371 1372 case OCTEON_CN23XX_PCIID_PF: 1373 oct->chip_id = OCTEON_CN23XX_PF_VID; 1374 ret = setup_cn23xx_octeon_pf_device(oct); 1375 if (ret) 1376 break; 1377 #ifdef CONFIG_PCI_IOV 1378 if (!ret) 1379 pci_sriov_set_totalvfs(oct->pci_dev, 1380 oct->sriov_info.max_vfs); 1381 #endif 1382 break; 1383 1384 default: 1385 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", 1386 dev_id); 1387 } 1388 1389 return ret; 1390 } 1391 1392 /** 1393 * octeon_pci_os_setup - PCI initialization for each Octeon device. 1394 * @oct: octeon device 1395 */ 1396 static int octeon_pci_os_setup(struct octeon_device *oct) 1397 { 1398 /* setup PCI stuff first */ 1399 if (pci_enable_device(oct->pci_dev)) { 1400 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1401 return 1; 1402 } 1403 1404 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1405 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1406 pci_disable_device(oct->pci_dev); 1407 return 1; 1408 } 1409 1410 /* Enable PCI DMA Master. */ 1411 pci_set_master(oct->pci_dev); 1412 1413 return 0; 1414 } 1415 1416 /** 1417 * free_netbuf - Unmap and free network buffer 1418 * @buf: buffer 1419 */ 1420 static void free_netbuf(void *buf) 1421 { 1422 struct sk_buff *skb; 1423 struct octnet_buf_free_info *finfo; 1424 struct lio *lio; 1425 1426 finfo = (struct octnet_buf_free_info *)buf; 1427 skb = finfo->skb; 1428 lio = finfo->lio; 1429 1430 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1431 DMA_TO_DEVICE); 1432 1433 tx_buffer_free(skb); 1434 } 1435 1436 /** 1437 * free_netsgbuf - Unmap and free gather buffer 1438 * @buf: buffer 1439 */ 1440 static void free_netsgbuf(void *buf) 1441 { 1442 struct octnet_buf_free_info *finfo; 1443 struct sk_buff *skb; 1444 struct lio *lio; 1445 struct octnic_gather *g; 1446 int i, frags, iq; 1447 1448 finfo = (struct octnet_buf_free_info *)buf; 1449 skb = finfo->skb; 1450 lio = finfo->lio; 1451 g = finfo->g; 1452 frags = skb_shinfo(skb)->nr_frags; 1453 1454 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1455 g->sg[0].ptr[0], (skb->len - skb->data_len), 1456 DMA_TO_DEVICE); 1457 1458 i = 1; 1459 while (frags--) { 1460 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 1461 1462 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 1463 g->sg[(i >> 2)].ptr[(i & 3)], 1464 skb_frag_size(frag), DMA_TO_DEVICE); 1465 i++; 1466 } 1467 1468 iq = skb_iq(lio->oct_dev, skb); 1469 spin_lock(&lio->glist_lock[iq]); 1470 list_add_tail(&g->list, &lio->glist[iq]); 1471 spin_unlock(&lio->glist_lock[iq]); 1472 1473 tx_buffer_free(skb); 1474 } 1475 1476 /** 1477 * free_netsgbuf_with_resp - Unmap and free gather buffer with response 1478 * @buf: buffer 1479 */ 1480 static void free_netsgbuf_with_resp(void *buf) 1481 { 1482 struct octeon_soft_command *sc; 1483 struct octnet_buf_free_info *finfo; 1484 struct sk_buff *skb; 1485 struct lio *lio; 1486 struct octnic_gather *g; 1487 int i, frags, iq; 1488 1489 sc = (struct octeon_soft_command *)buf; 1490 skb = (struct sk_buff *)sc->callback_arg; 1491 finfo = (struct octnet_buf_free_info *)&skb->cb; 1492 1493 lio = finfo->lio; 1494 g = finfo->g; 1495 frags = skb_shinfo(skb)->nr_frags; 1496 1497 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1498 g->sg[0].ptr[0], (skb->len - skb->data_len), 1499 DMA_TO_DEVICE); 1500 1501 i = 1; 1502 while (frags--) { 1503 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 1504 1505 dma_unmap_page(&lio->oct_dev->pci_dev->dev, 1506 g->sg[(i >> 2)].ptr[(i & 3)], 1507 skb_frag_size(frag), DMA_TO_DEVICE); 1508 i++; 1509 } 1510 1511 iq = skb_iq(lio->oct_dev, skb); 1512 1513 spin_lock(&lio->glist_lock[iq]); 1514 list_add_tail(&g->list, &lio->glist[iq]); 1515 spin_unlock(&lio->glist_lock[iq]); 1516 1517 /* Don't free the skb yet */ 1518 } 1519 1520 /** 1521 * liquidio_ptp_adjfreq - Adjust ptp frequency 1522 * @ptp: PTP clock info 1523 * @ppb: how much to adjust by, in parts-per-billion 1524 */ 1525 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 1526 { 1527 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1528 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1529 u64 comp, delta; 1530 unsigned long flags; 1531 bool neg_adj = false; 1532 1533 if (ppb < 0) { 1534 neg_adj = true; 1535 ppb = -ppb; 1536 } 1537 1538 /* The hardware adds the clock compensation value to the 1539 * PTP clock on every coprocessor clock cycle, so we 1540 * compute the delta in terms of coprocessor clocks. 1541 */ 1542 delta = (u64)ppb << 32; 1543 do_div(delta, oct->coproc_clock_rate); 1544 1545 spin_lock_irqsave(&lio->ptp_lock, flags); 1546 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); 1547 if (neg_adj) 1548 comp -= delta; 1549 else 1550 comp += delta; 1551 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1552 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1553 1554 return 0; 1555 } 1556 1557 /** 1558 * liquidio_ptp_adjtime - Adjust ptp time 1559 * @ptp: PTP clock info 1560 * @delta: how much to adjust by, in nanosecs 1561 */ 1562 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 1563 { 1564 unsigned long flags; 1565 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1566 1567 spin_lock_irqsave(&lio->ptp_lock, flags); 1568 lio->ptp_adjust += delta; 1569 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1570 1571 return 0; 1572 } 1573 1574 /** 1575 * liquidio_ptp_gettime - Get hardware clock time, including any adjustment 1576 * @ptp: PTP clock info 1577 * @ts: timespec 1578 */ 1579 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, 1580 struct timespec64 *ts) 1581 { 1582 u64 ns; 1583 unsigned long flags; 1584 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1585 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1586 1587 spin_lock_irqsave(&lio->ptp_lock, flags); 1588 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); 1589 ns += lio->ptp_adjust; 1590 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1591 1592 *ts = ns_to_timespec64(ns); 1593 1594 return 0; 1595 } 1596 1597 /** 1598 * liquidio_ptp_settime - Set hardware clock time. Reset adjustment 1599 * @ptp: PTP clock info 1600 * @ts: timespec 1601 */ 1602 static int liquidio_ptp_settime(struct ptp_clock_info *ptp, 1603 const struct timespec64 *ts) 1604 { 1605 u64 ns; 1606 unsigned long flags; 1607 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1608 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1609 1610 ns = timespec64_to_ns(ts); 1611 1612 spin_lock_irqsave(&lio->ptp_lock, flags); 1613 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1614 lio->ptp_adjust = 0; 1615 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1616 1617 return 0; 1618 } 1619 1620 /** 1621 * liquidio_ptp_enable - Check if PTP is enabled 1622 * @ptp: PTP clock info 1623 * @rq: request 1624 * @on: is it on 1625 */ 1626 static int 1627 liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp, 1628 struct ptp_clock_request __maybe_unused *rq, 1629 int __maybe_unused on) 1630 { 1631 return -EOPNOTSUPP; 1632 } 1633 1634 /** 1635 * oct_ptp_open - Open PTP clock source 1636 * @netdev: network device 1637 */ 1638 static void oct_ptp_open(struct net_device *netdev) 1639 { 1640 struct lio *lio = GET_LIO(netdev); 1641 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1642 1643 spin_lock_init(&lio->ptp_lock); 1644 1645 snprintf(lio->ptp_info.name, 16, "%s", netdev->name); 1646 lio->ptp_info.owner = THIS_MODULE; 1647 lio->ptp_info.max_adj = 250000000; 1648 lio->ptp_info.n_alarm = 0; 1649 lio->ptp_info.n_ext_ts = 0; 1650 lio->ptp_info.n_per_out = 0; 1651 lio->ptp_info.pps = 0; 1652 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq; 1653 lio->ptp_info.adjtime = liquidio_ptp_adjtime; 1654 lio->ptp_info.gettime64 = liquidio_ptp_gettime; 1655 lio->ptp_info.settime64 = liquidio_ptp_settime; 1656 lio->ptp_info.enable = liquidio_ptp_enable; 1657 1658 lio->ptp_adjust = 0; 1659 1660 lio->ptp_clock = ptp_clock_register(&lio->ptp_info, 1661 &oct->pci_dev->dev); 1662 1663 if (IS_ERR(lio->ptp_clock)) 1664 lio->ptp_clock = NULL; 1665 } 1666 1667 /** 1668 * liquidio_ptp_init - Init PTP clock 1669 * @oct: octeon device 1670 */ 1671 static void liquidio_ptp_init(struct octeon_device *oct) 1672 { 1673 u64 clock_comp, cfg; 1674 1675 clock_comp = (u64)NSEC_PER_SEC << 32; 1676 do_div(clock_comp, oct->coproc_clock_rate); 1677 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1678 1679 /* Enable */ 1680 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); 1681 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); 1682 } 1683 1684 /** 1685 * load_firmware - Load firmware to device 1686 * @oct: octeon device 1687 * 1688 * Maps device to firmware filename, requests firmware, and downloads it 1689 */ 1690 static int load_firmware(struct octeon_device *oct) 1691 { 1692 int ret = 0; 1693 const struct firmware *fw; 1694 char fw_name[LIO_MAX_FW_FILENAME_LEN]; 1695 char *tmp_fw_type; 1696 1697 if (fw_type_is_auto()) { 1698 tmp_fw_type = LIO_FW_NAME_TYPE_NIC; 1699 strncpy(fw_type, tmp_fw_type, sizeof(fw_type)); 1700 } else { 1701 tmp_fw_type = fw_type; 1702 } 1703 1704 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, 1705 octeon_get_conf(oct)->card_name, tmp_fw_type, 1706 LIO_FW_NAME_SUFFIX); 1707 1708 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); 1709 if (ret) { 1710 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n", 1711 fw_name); 1712 release_firmware(fw); 1713 return ret; 1714 } 1715 1716 ret = octeon_download_firmware(oct, fw->data, fw->size); 1717 1718 release_firmware(fw); 1719 1720 return ret; 1721 } 1722 1723 /** 1724 * octnet_poll_check_txq_status - Poll routine for checking transmit queue status 1725 * @work: work_struct data structure 1726 */ 1727 static void octnet_poll_check_txq_status(struct work_struct *work) 1728 { 1729 struct cavium_wk *wk = (struct cavium_wk *)work; 1730 struct lio *lio = (struct lio *)wk->ctxptr; 1731 1732 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) 1733 return; 1734 1735 check_txq_status(lio); 1736 queue_delayed_work(lio->txq_status_wq.wq, 1737 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 1738 } 1739 1740 /** 1741 * setup_tx_poll_fn - Sets up the txq poll check 1742 * @netdev: network device 1743 */ 1744 static inline int setup_tx_poll_fn(struct net_device *netdev) 1745 { 1746 struct lio *lio = GET_LIO(netdev); 1747 struct octeon_device *oct = lio->oct_dev; 1748 1749 lio->txq_status_wq.wq = alloc_workqueue("txq-status", 1750 WQ_MEM_RECLAIM, 0); 1751 if (!lio->txq_status_wq.wq) { 1752 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); 1753 return -1; 1754 } 1755 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, 1756 octnet_poll_check_txq_status); 1757 lio->txq_status_wq.wk.ctxptr = lio; 1758 queue_delayed_work(lio->txq_status_wq.wq, 1759 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 1760 return 0; 1761 } 1762 1763 static inline void cleanup_tx_poll_fn(struct net_device *netdev) 1764 { 1765 struct lio *lio = GET_LIO(netdev); 1766 1767 if (lio->txq_status_wq.wq) { 1768 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); 1769 destroy_workqueue(lio->txq_status_wq.wq); 1770 } 1771 } 1772 1773 /** 1774 * liquidio_open - Net device open for LiquidIO 1775 * @netdev: network device 1776 */ 1777 static int liquidio_open(struct net_device *netdev) 1778 { 1779 struct lio *lio = GET_LIO(netdev); 1780 struct octeon_device *oct = lio->oct_dev; 1781 struct octeon_device_priv *oct_priv = 1782 (struct octeon_device_priv *)oct->priv; 1783 struct napi_struct *napi, *n; 1784 int ret = 0; 1785 1786 if (oct->props[lio->ifidx].napi_enabled == 0) { 1787 tasklet_disable(&oct_priv->droq_tasklet); 1788 1789 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1790 napi_enable(napi); 1791 1792 oct->props[lio->ifidx].napi_enabled = 1; 1793 1794 if (OCTEON_CN23XX_PF(oct)) 1795 oct->droq[0]->ops.poll_mode = 1; 1796 } 1797 1798 if (oct->ptp_enable) 1799 oct_ptp_open(netdev); 1800 1801 ifstate_set(lio, LIO_IFSTATE_RUNNING); 1802 1803 if (OCTEON_CN23XX_PF(oct)) { 1804 if (!oct->msix_on) 1805 if (setup_tx_poll_fn(netdev)) 1806 return -1; 1807 } else { 1808 if (setup_tx_poll_fn(netdev)) 1809 return -1; 1810 } 1811 1812 netif_tx_start_all_queues(netdev); 1813 1814 /* Ready for link status updates */ 1815 lio->intf_open = 1; 1816 1817 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 1818 1819 /* tell Octeon to start forwarding packets to host */ 1820 ret = send_rx_ctrl_cmd(lio, 1); 1821 if (ret) 1822 return ret; 1823 1824 /* start periodical statistics fetch */ 1825 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); 1826 lio->stats_wk.ctxptr = lio; 1827 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies 1828 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); 1829 1830 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", 1831 netdev->name); 1832 1833 return ret; 1834 } 1835 1836 /** 1837 * liquidio_stop - Net device stop for LiquidIO 1838 * @netdev: network device 1839 */ 1840 static int liquidio_stop(struct net_device *netdev) 1841 { 1842 struct lio *lio = GET_LIO(netdev); 1843 struct octeon_device *oct = lio->oct_dev; 1844 struct octeon_device_priv *oct_priv = 1845 (struct octeon_device_priv *)oct->priv; 1846 struct napi_struct *napi, *n; 1847 int ret = 0; 1848 1849 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 1850 1851 /* Stop any link updates */ 1852 lio->intf_open = 0; 1853 1854 stop_txqs(netdev); 1855 1856 /* Inform that netif carrier is down */ 1857 netif_carrier_off(netdev); 1858 netif_tx_disable(netdev); 1859 1860 lio->linfo.link.s.link_up = 0; 1861 lio->link_changes++; 1862 1863 /* Tell Octeon that nic interface is down. */ 1864 ret = send_rx_ctrl_cmd(lio, 0); 1865 if (ret) 1866 return ret; 1867 1868 if (OCTEON_CN23XX_PF(oct)) { 1869 if (!oct->msix_on) 1870 cleanup_tx_poll_fn(netdev); 1871 } else { 1872 cleanup_tx_poll_fn(netdev); 1873 } 1874 1875 cancel_delayed_work_sync(&lio->stats_wk.work); 1876 1877 if (lio->ptp_clock) { 1878 ptp_clock_unregister(lio->ptp_clock); 1879 lio->ptp_clock = NULL; 1880 } 1881 1882 /* Wait for any pending Rx descriptors */ 1883 if (lio_wait_for_clean_oq(oct)) 1884 netif_info(lio, rx_err, lio->netdev, 1885 "Proceeding with stop interface after partial RX desc processing\n"); 1886 1887 if (oct->props[lio->ifidx].napi_enabled == 1) { 1888 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1889 napi_disable(napi); 1890 1891 oct->props[lio->ifidx].napi_enabled = 0; 1892 1893 if (OCTEON_CN23XX_PF(oct)) 1894 oct->droq[0]->ops.poll_mode = 0; 1895 1896 tasklet_enable(&oct_priv->droq_tasklet); 1897 } 1898 1899 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 1900 1901 return ret; 1902 } 1903 1904 /** 1905 * get_new_flags - Converts a mask based on net device flags 1906 * @netdev: network device 1907 * 1908 * This routine generates a octnet_ifflags mask from the net device flags 1909 * received from the OS. 1910 */ 1911 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) 1912 { 1913 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1914 1915 if (netdev->flags & IFF_PROMISC) 1916 f |= OCTNET_IFFLAG_PROMISC; 1917 1918 if (netdev->flags & IFF_ALLMULTI) 1919 f |= OCTNET_IFFLAG_ALLMULTI; 1920 1921 if (netdev->flags & IFF_MULTICAST) { 1922 f |= OCTNET_IFFLAG_MULTICAST; 1923 1924 /* Accept all multicast addresses if there are more than we 1925 * can handle 1926 */ 1927 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1928 f |= OCTNET_IFFLAG_ALLMULTI; 1929 } 1930 1931 if (netdev->flags & IFF_BROADCAST) 1932 f |= OCTNET_IFFLAG_BROADCAST; 1933 1934 return f; 1935 } 1936 1937 /** 1938 * liquidio_set_mcast_list - Net device set_multicast_list 1939 * @netdev: network device 1940 */ 1941 static void liquidio_set_mcast_list(struct net_device *netdev) 1942 { 1943 struct lio *lio = GET_LIO(netdev); 1944 struct octeon_device *oct = lio->oct_dev; 1945 struct octnic_ctrl_pkt nctrl; 1946 struct netdev_hw_addr *ha; 1947 u64 *mc; 1948 int ret; 1949 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1950 1951 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1952 1953 /* Create a ctrl pkt command to be sent to core app. */ 1954 nctrl.ncmd.u64 = 0; 1955 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1956 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1957 nctrl.ncmd.s.param2 = mc_count; 1958 nctrl.ncmd.s.more = mc_count; 1959 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1960 nctrl.netpndev = (u64)netdev; 1961 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1962 1963 /* copy all the addresses into the udd */ 1964 mc = &nctrl.udd[0]; 1965 netdev_for_each_mc_addr(ha, netdev) { 1966 *mc = 0; 1967 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); 1968 /* no need to swap bytes */ 1969 1970 if (++mc > &nctrl.udd[mc_count]) 1971 break; 1972 } 1973 1974 /* Apparently, any activity in this call from the kernel has to 1975 * be atomic. So we won't wait for response. 1976 */ 1977 1978 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1979 if (ret) { 1980 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1981 ret); 1982 } 1983 } 1984 1985 /** 1986 * liquidio_set_mac - Net device set_mac_address 1987 * @netdev: network device 1988 * @p: pointer to sockaddr 1989 */ 1990 static int liquidio_set_mac(struct net_device *netdev, void *p) 1991 { 1992 int ret = 0; 1993 struct lio *lio = GET_LIO(netdev); 1994 struct octeon_device *oct = lio->oct_dev; 1995 struct sockaddr *addr = (struct sockaddr *)p; 1996 struct octnic_ctrl_pkt nctrl; 1997 1998 if (!is_valid_ether_addr(addr->sa_data)) 1999 return -EADDRNOTAVAIL; 2000 2001 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2002 2003 nctrl.ncmd.u64 = 0; 2004 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2005 nctrl.ncmd.s.param1 = 0; 2006 nctrl.ncmd.s.more = 1; 2007 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2008 nctrl.netpndev = (u64)netdev; 2009 2010 nctrl.udd[0] = 0; 2011 /* The MAC Address is presented in network byte order. */ 2012 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); 2013 2014 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2015 if (ret < 0) { 2016 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 2017 return -ENOMEM; 2018 } 2019 2020 if (nctrl.sc_status) { 2021 dev_err(&oct->pci_dev->dev, 2022 "%s: MAC Address change failed. sc return=%x\n", 2023 __func__, nctrl.sc_status); 2024 return -EIO; 2025 } 2026 2027 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2028 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); 2029 2030 return 0; 2031 } 2032 2033 static void 2034 liquidio_get_stats64(struct net_device *netdev, 2035 struct rtnl_link_stats64 *lstats) 2036 { 2037 struct lio *lio = GET_LIO(netdev); 2038 struct octeon_device *oct; 2039 u64 pkts = 0, drop = 0, bytes = 0; 2040 struct oct_droq_stats *oq_stats; 2041 struct oct_iq_stats *iq_stats; 2042 int i, iq_no, oq_no; 2043 2044 oct = lio->oct_dev; 2045 2046 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 2047 return; 2048 2049 for (i = 0; i < oct->num_iqs; i++) { 2050 iq_no = lio->linfo.txpciq[i].s.q_no; 2051 iq_stats = &oct->instr_queue[iq_no]->stats; 2052 pkts += iq_stats->tx_done; 2053 drop += iq_stats->tx_dropped; 2054 bytes += iq_stats->tx_tot_bytes; 2055 } 2056 2057 lstats->tx_packets = pkts; 2058 lstats->tx_bytes = bytes; 2059 lstats->tx_dropped = drop; 2060 2061 pkts = 0; 2062 drop = 0; 2063 bytes = 0; 2064 2065 for (i = 0; i < oct->num_oqs; i++) { 2066 oq_no = lio->linfo.rxpciq[i].s.q_no; 2067 oq_stats = &oct->droq[oq_no]->stats; 2068 pkts += oq_stats->rx_pkts_received; 2069 drop += (oq_stats->rx_dropped + 2070 oq_stats->dropped_nodispatch + 2071 oq_stats->dropped_toomany + 2072 oq_stats->dropped_nomem); 2073 bytes += oq_stats->rx_bytes_received; 2074 } 2075 2076 lstats->rx_bytes = bytes; 2077 lstats->rx_packets = pkts; 2078 lstats->rx_dropped = drop; 2079 2080 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; 2081 lstats->collisions = oct->link_stats.fromhost.total_collisions; 2082 2083 /* detailed rx_errors: */ 2084 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; 2085 /* recved pkt with crc error */ 2086 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; 2087 /* recv'd frame alignment error */ 2088 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; 2089 /* recv'r fifo overrun */ 2090 lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err; 2091 2092 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + 2093 lstats->rx_frame_errors + lstats->rx_fifo_errors; 2094 2095 /* detailed tx_errors */ 2096 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; 2097 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; 2098 lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err; 2099 2100 lstats->tx_errors = lstats->tx_aborted_errors + 2101 lstats->tx_carrier_errors + 2102 lstats->tx_fifo_errors; 2103 } 2104 2105 /** 2106 * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl 2107 * @netdev: network device 2108 * @ifr: interface request 2109 */ 2110 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 2111 { 2112 struct hwtstamp_config conf; 2113 struct lio *lio = GET_LIO(netdev); 2114 2115 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 2116 return -EFAULT; 2117 2118 if (conf.flags) 2119 return -EINVAL; 2120 2121 switch (conf.tx_type) { 2122 case HWTSTAMP_TX_ON: 2123 case HWTSTAMP_TX_OFF: 2124 break; 2125 default: 2126 return -ERANGE; 2127 } 2128 2129 switch (conf.rx_filter) { 2130 case HWTSTAMP_FILTER_NONE: 2131 break; 2132 case HWTSTAMP_FILTER_ALL: 2133 case HWTSTAMP_FILTER_SOME: 2134 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2135 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2136 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2137 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2138 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2139 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2140 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2141 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2142 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2143 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2144 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2145 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2146 case HWTSTAMP_FILTER_NTP_ALL: 2147 conf.rx_filter = HWTSTAMP_FILTER_ALL; 2148 break; 2149 default: 2150 return -ERANGE; 2151 } 2152 2153 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 2154 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2155 2156 else 2157 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2158 2159 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 2160 } 2161 2162 /** 2163 * liquidio_ioctl - ioctl handler 2164 * @netdev: network device 2165 * @ifr: interface request 2166 * @cmd: command 2167 */ 2168 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2169 { 2170 struct lio *lio = GET_LIO(netdev); 2171 2172 switch (cmd) { 2173 case SIOCSHWTSTAMP: 2174 if (lio->oct_dev->ptp_enable) 2175 return hwtstamp_ioctl(netdev, ifr); 2176 fallthrough; 2177 default: 2178 return -EOPNOTSUPP; 2179 } 2180 } 2181 2182 /** 2183 * handle_timestamp - handle a Tx timestamp response 2184 * @oct: octeon device 2185 * @status: response status 2186 * @buf: pointer to skb 2187 */ 2188 static void handle_timestamp(struct octeon_device *oct, 2189 u32 status, 2190 void *buf) 2191 { 2192 struct octnet_buf_free_info *finfo; 2193 struct octeon_soft_command *sc; 2194 struct oct_timestamp_resp *resp; 2195 struct lio *lio; 2196 struct sk_buff *skb = (struct sk_buff *)buf; 2197 2198 finfo = (struct octnet_buf_free_info *)skb->cb; 2199 lio = finfo->lio; 2200 sc = finfo->sc; 2201 oct = lio->oct_dev; 2202 resp = (struct oct_timestamp_resp *)sc->virtrptr; 2203 2204 if (status != OCTEON_REQUEST_DONE) { 2205 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 2206 CVM_CAST64(status)); 2207 resp->timestamp = 0; 2208 } 2209 2210 octeon_swap_8B_data(&resp->timestamp, 1); 2211 2212 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { 2213 struct skb_shared_hwtstamps ts; 2214 u64 ns = resp->timestamp; 2215 2216 netif_info(lio, tx_done, lio->netdev, 2217 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 2218 skb, (unsigned long long)ns); 2219 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 2220 skb_tstamp_tx(skb, &ts); 2221 } 2222 2223 octeon_free_soft_command(oct, sc); 2224 tx_buffer_free(skb); 2225 } 2226 2227 /** 2228 * send_nic_timestamp_pkt - Send a data packet that will be timestamped 2229 * @oct: octeon device 2230 * @ndata: pointer to network data 2231 * @finfo: pointer to private network data 2232 * @xmit_more: more is coming 2233 */ 2234 static inline int send_nic_timestamp_pkt(struct octeon_device *oct, 2235 struct octnic_data_pkt *ndata, 2236 struct octnet_buf_free_info *finfo, 2237 int xmit_more) 2238 { 2239 int retval; 2240 struct octeon_soft_command *sc; 2241 struct lio *lio; 2242 int ring_doorbell; 2243 u32 len; 2244 2245 lio = finfo->lio; 2246 2247 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 2248 sizeof(struct oct_timestamp_resp)); 2249 finfo->sc = sc; 2250 2251 if (!sc) { 2252 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 2253 return IQ_SEND_FAILED; 2254 } 2255 2256 if (ndata->reqtype == REQTYPE_NORESP_NET) 2257 ndata->reqtype = REQTYPE_RESP_NET; 2258 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 2259 ndata->reqtype = REQTYPE_RESP_NET_SG; 2260 2261 sc->callback = handle_timestamp; 2262 sc->callback_arg = finfo->skb; 2263 sc->iq_no = ndata->q_no; 2264 2265 if (OCTEON_CN23XX_PF(oct)) 2266 len = (u32)((struct octeon_instr_ih3 *) 2267 (&sc->cmd.cmd3.ih3))->dlengsz; 2268 else 2269 len = (u32)((struct octeon_instr_ih2 *) 2270 (&sc->cmd.cmd2.ih2))->dlengsz; 2271 2272 ring_doorbell = !xmit_more; 2273 2274 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2275 sc, len, ndata->reqtype); 2276 2277 if (retval == IQ_SEND_FAILED) { 2278 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2279 retval); 2280 octeon_free_soft_command(oct, sc); 2281 } else { 2282 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 2283 } 2284 2285 return retval; 2286 } 2287 2288 /** 2289 * liquidio_xmit - Transmit networks packets to the Octeon interface 2290 * @skb: skbuff struct to be passed to network layer. 2291 * @netdev: pointer to network device 2292 * 2293 * Return: whether the packet was transmitted to the device okay or not 2294 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 2295 */ 2296 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 2297 { 2298 struct lio *lio; 2299 struct octnet_buf_free_info *finfo; 2300 union octnic_cmd_setup cmdsetup; 2301 struct octnic_data_pkt ndata; 2302 struct octeon_device *oct; 2303 struct oct_iq_stats *stats; 2304 struct octeon_instr_irh *irh; 2305 union tx_info *tx_info; 2306 int status = 0; 2307 int q_idx = 0, iq_no = 0; 2308 int j, xmit_more = 0; 2309 u64 dptr = 0; 2310 u32 tag = 0; 2311 2312 lio = GET_LIO(netdev); 2313 oct = lio->oct_dev; 2314 2315 q_idx = skb_iq(oct, skb); 2316 tag = q_idx; 2317 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 2318 2319 stats = &oct->instr_queue[iq_no]->stats; 2320 2321 /* Check for all conditions in which the current packet cannot be 2322 * transmitted. 2323 */ 2324 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2325 (!lio->linfo.link.s.link_up) || 2326 (skb->len <= 0)) { 2327 netif_info(lio, tx_err, lio->netdev, 2328 "Transmit failed link_status : %d\n", 2329 lio->linfo.link.s.link_up); 2330 goto lio_xmit_failed; 2331 } 2332 2333 /* Use space in skb->cb to store info used to unmap and 2334 * free the buffers. 2335 */ 2336 finfo = (struct octnet_buf_free_info *)skb->cb; 2337 finfo->lio = lio; 2338 finfo->skb = skb; 2339 finfo->sc = NULL; 2340 2341 /* Prepare the attributes for the data to be passed to OSI. */ 2342 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 2343 2344 ndata.buf = (void *)finfo; 2345 2346 ndata.q_no = iq_no; 2347 2348 if (octnet_iq_is_full(oct, ndata.q_no)) { 2349 /* defer sending if queue is full */ 2350 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2351 ndata.q_no); 2352 stats->tx_iq_busy++; 2353 return NETDEV_TX_BUSY; 2354 } 2355 2356 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", 2357 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); 2358 */ 2359 2360 ndata.datasize = skb->len; 2361 2362 cmdsetup.u64 = 0; 2363 cmdsetup.s.iq_no = iq_no; 2364 2365 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2366 if (skb->encapsulation) { 2367 cmdsetup.s.tnl_csum = 1; 2368 stats->tx_vxlan++; 2369 } else { 2370 cmdsetup.s.transport_csum = 1; 2371 } 2372 } 2373 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2374 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2375 cmdsetup.s.timestamp = 1; 2376 } 2377 2378 if (skb_shinfo(skb)->nr_frags == 0) { 2379 cmdsetup.s.u.datasize = skb->len; 2380 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2381 2382 /* Offload checksum calculation for TCP/UDP packets */ 2383 dptr = dma_map_single(&oct->pci_dev->dev, 2384 skb->data, 2385 skb->len, 2386 DMA_TO_DEVICE); 2387 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 2388 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2389 __func__); 2390 stats->tx_dmamap_fail++; 2391 return NETDEV_TX_BUSY; 2392 } 2393 2394 if (OCTEON_CN23XX_PF(oct)) 2395 ndata.cmd.cmd3.dptr = dptr; 2396 else 2397 ndata.cmd.cmd2.dptr = dptr; 2398 finfo->dptr = dptr; 2399 ndata.reqtype = REQTYPE_NORESP_NET; 2400 2401 } else { 2402 int i, frags; 2403 skb_frag_t *frag; 2404 struct octnic_gather *g; 2405 2406 spin_lock(&lio->glist_lock[q_idx]); 2407 g = (struct octnic_gather *) 2408 lio_list_delete_head(&lio->glist[q_idx]); 2409 spin_unlock(&lio->glist_lock[q_idx]); 2410 2411 if (!g) { 2412 netif_info(lio, tx_err, lio->netdev, 2413 "Transmit scatter gather: glist null!\n"); 2414 goto lio_xmit_failed; 2415 } 2416 2417 cmdsetup.s.gather = 1; 2418 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2419 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2420 2421 memset(g->sg, 0, g->sg_size); 2422 2423 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 2424 skb->data, 2425 (skb->len - skb->data_len), 2426 DMA_TO_DEVICE); 2427 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 2428 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 2429 __func__); 2430 stats->tx_dmamap_fail++; 2431 return NETDEV_TX_BUSY; 2432 } 2433 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 2434 2435 frags = skb_shinfo(skb)->nr_frags; 2436 i = 1; 2437 while (frags--) { 2438 frag = &skb_shinfo(skb)->frags[i - 1]; 2439 2440 g->sg[(i >> 2)].ptr[(i & 3)] = 2441 skb_frag_dma_map(&oct->pci_dev->dev, 2442 frag, 0, skb_frag_size(frag), 2443 DMA_TO_DEVICE); 2444 2445 if (dma_mapping_error(&oct->pci_dev->dev, 2446 g->sg[i >> 2].ptr[i & 3])) { 2447 dma_unmap_single(&oct->pci_dev->dev, 2448 g->sg[0].ptr[0], 2449 skb->len - skb->data_len, 2450 DMA_TO_DEVICE); 2451 for (j = 1; j < i; j++) { 2452 frag = &skb_shinfo(skb)->frags[j - 1]; 2453 dma_unmap_page(&oct->pci_dev->dev, 2454 g->sg[j >> 2].ptr[j & 3], 2455 skb_frag_size(frag), 2456 DMA_TO_DEVICE); 2457 } 2458 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 2459 __func__); 2460 return NETDEV_TX_BUSY; 2461 } 2462 2463 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), 2464 (i & 3)); 2465 i++; 2466 } 2467 2468 dptr = g->sg_dma_ptr; 2469 2470 if (OCTEON_CN23XX_PF(oct)) 2471 ndata.cmd.cmd3.dptr = dptr; 2472 else 2473 ndata.cmd.cmd2.dptr = dptr; 2474 finfo->dptr = dptr; 2475 finfo->g = g; 2476 2477 ndata.reqtype = REQTYPE_NORESP_NET_SG; 2478 } 2479 2480 if (OCTEON_CN23XX_PF(oct)) { 2481 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 2482 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 2483 } else { 2484 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; 2485 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; 2486 } 2487 2488 if (skb_shinfo(skb)->gso_size) { 2489 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 2490 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 2491 stats->tx_gso++; 2492 } 2493 2494 /* HW insert VLAN tag */ 2495 if (skb_vlan_tag_present(skb)) { 2496 irh->priority = skb_vlan_tag_get(skb) >> 13; 2497 irh->vlan = skb_vlan_tag_get(skb) & 0xfff; 2498 } 2499 2500 xmit_more = netdev_xmit_more(); 2501 2502 if (unlikely(cmdsetup.s.timestamp)) 2503 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 2504 else 2505 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 2506 if (status == IQ_SEND_FAILED) 2507 goto lio_xmit_failed; 2508 2509 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2510 2511 if (status == IQ_SEND_STOP) 2512 netif_stop_subqueue(netdev, q_idx); 2513 2514 netif_trans_update(netdev); 2515 2516 if (tx_info->s.gso_segs) 2517 stats->tx_done += tx_info->s.gso_segs; 2518 else 2519 stats->tx_done++; 2520 stats->tx_tot_bytes += ndata.datasize; 2521 2522 return NETDEV_TX_OK; 2523 2524 lio_xmit_failed: 2525 stats->tx_dropped++; 2526 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2527 iq_no, stats->tx_dropped); 2528 if (dptr) 2529 dma_unmap_single(&oct->pci_dev->dev, dptr, 2530 ndata.datasize, DMA_TO_DEVICE); 2531 2532 octeon_ring_doorbell_locked(oct, iq_no); 2533 2534 tx_buffer_free(skb); 2535 return NETDEV_TX_OK; 2536 } 2537 2538 /** 2539 * liquidio_tx_timeout - Network device Tx timeout 2540 * @netdev: pointer to network device 2541 * @txqueue: index of the hung transmit queue 2542 */ 2543 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) 2544 { 2545 struct lio *lio; 2546 2547 lio = GET_LIO(netdev); 2548 2549 netif_info(lio, tx_err, lio->netdev, 2550 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 2551 netdev->stats.tx_dropped); 2552 netif_trans_update(netdev); 2553 wake_txqs(netdev); 2554 } 2555 2556 static int liquidio_vlan_rx_add_vid(struct net_device *netdev, 2557 __be16 proto __attribute__((unused)), 2558 u16 vid) 2559 { 2560 struct lio *lio = GET_LIO(netdev); 2561 struct octeon_device *oct = lio->oct_dev; 2562 struct octnic_ctrl_pkt nctrl; 2563 int ret = 0; 2564 2565 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2566 2567 nctrl.ncmd.u64 = 0; 2568 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2569 nctrl.ncmd.s.param1 = vid; 2570 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2571 nctrl.netpndev = (u64)netdev; 2572 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2573 2574 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2575 if (ret) { 2576 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2577 ret); 2578 if (ret > 0) 2579 ret = -EIO; 2580 } 2581 2582 return ret; 2583 } 2584 2585 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, 2586 __be16 proto __attribute__((unused)), 2587 u16 vid) 2588 { 2589 struct lio *lio = GET_LIO(netdev); 2590 struct octeon_device *oct = lio->oct_dev; 2591 struct octnic_ctrl_pkt nctrl; 2592 int ret = 0; 2593 2594 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2595 2596 nctrl.ncmd.u64 = 0; 2597 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2598 nctrl.ncmd.s.param1 = vid; 2599 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2600 nctrl.netpndev = (u64)netdev; 2601 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2602 2603 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2604 if (ret) { 2605 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", 2606 ret); 2607 if (ret > 0) 2608 ret = -EIO; 2609 } 2610 return ret; 2611 } 2612 2613 /** 2614 * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload 2615 * @netdev: pointer to network device 2616 * @command: OCTNET_CMD_TNL_RX_CSUM_CTL 2617 * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE 2618 * Returns: SUCCESS or FAILURE 2619 */ 2620 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 2621 u8 rx_cmd) 2622 { 2623 struct lio *lio = GET_LIO(netdev); 2624 struct octeon_device *oct = lio->oct_dev; 2625 struct octnic_ctrl_pkt nctrl; 2626 int ret = 0; 2627 2628 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2629 2630 nctrl.ncmd.u64 = 0; 2631 nctrl.ncmd.s.cmd = command; 2632 nctrl.ncmd.s.param1 = rx_cmd; 2633 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2634 nctrl.netpndev = (u64)netdev; 2635 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2636 2637 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2638 if (ret) { 2639 dev_err(&oct->pci_dev->dev, 2640 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", 2641 ret); 2642 if (ret > 0) 2643 ret = -EIO; 2644 } 2645 return ret; 2646 } 2647 2648 /** 2649 * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware 2650 * @netdev: pointer to network device 2651 * @command: OCTNET_CMD_VXLAN_PORT_CONFIG 2652 * @vxlan_port: VxLAN port to be added or deleted 2653 * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD, 2654 * OCTNET_CMD_VXLAN_PORT_DEL 2655 * Return: SUCCESS or FAILURE 2656 */ 2657 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 2658 u16 vxlan_port, u8 vxlan_cmd_bit) 2659 { 2660 struct lio *lio = GET_LIO(netdev); 2661 struct octeon_device *oct = lio->oct_dev; 2662 struct octnic_ctrl_pkt nctrl; 2663 int ret = 0; 2664 2665 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2666 2667 nctrl.ncmd.u64 = 0; 2668 nctrl.ncmd.s.cmd = command; 2669 nctrl.ncmd.s.more = vxlan_cmd_bit; 2670 nctrl.ncmd.s.param1 = vxlan_port; 2671 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2672 nctrl.netpndev = (u64)netdev; 2673 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2674 2675 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2676 if (ret) { 2677 dev_err(&oct->pci_dev->dev, 2678 "VxLAN port add/delete failed in core (ret:0x%x)\n", 2679 ret); 2680 if (ret > 0) 2681 ret = -EIO; 2682 } 2683 return ret; 2684 } 2685 2686 static int liquidio_udp_tunnel_set_port(struct net_device *netdev, 2687 unsigned int table, unsigned int entry, 2688 struct udp_tunnel_info *ti) 2689 { 2690 return liquidio_vxlan_port_command(netdev, 2691 OCTNET_CMD_VXLAN_PORT_CONFIG, 2692 htons(ti->port), 2693 OCTNET_CMD_VXLAN_PORT_ADD); 2694 } 2695 2696 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, 2697 unsigned int table, 2698 unsigned int entry, 2699 struct udp_tunnel_info *ti) 2700 { 2701 return liquidio_vxlan_port_command(netdev, 2702 OCTNET_CMD_VXLAN_PORT_CONFIG, 2703 htons(ti->port), 2704 OCTNET_CMD_VXLAN_PORT_DEL); 2705 } 2706 2707 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { 2708 .set_port = liquidio_udp_tunnel_set_port, 2709 .unset_port = liquidio_udp_tunnel_unset_port, 2710 .tables = { 2711 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 2712 }, 2713 }; 2714 2715 /** 2716 * liquidio_fix_features - Net device fix features 2717 * @netdev: pointer to network device 2718 * @request: features requested 2719 * Return: updated features list 2720 */ 2721 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 2722 netdev_features_t request) 2723 { 2724 struct lio *lio = netdev_priv(netdev); 2725 2726 if ((request & NETIF_F_RXCSUM) && 2727 !(lio->dev_capability & NETIF_F_RXCSUM)) 2728 request &= ~NETIF_F_RXCSUM; 2729 2730 if ((request & NETIF_F_HW_CSUM) && 2731 !(lio->dev_capability & NETIF_F_HW_CSUM)) 2732 request &= ~NETIF_F_HW_CSUM; 2733 2734 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 2735 request &= ~NETIF_F_TSO; 2736 2737 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 2738 request &= ~NETIF_F_TSO6; 2739 2740 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 2741 request &= ~NETIF_F_LRO; 2742 2743 /*Disable LRO if RXCSUM is off */ 2744 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 2745 (lio->dev_capability & NETIF_F_LRO)) 2746 request &= ~NETIF_F_LRO; 2747 2748 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) && 2749 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER)) 2750 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 2751 2752 return request; 2753 } 2754 2755 /** 2756 * liquidio_set_features - Net device set features 2757 * @netdev: pointer to network device 2758 * @features: features to enable/disable 2759 */ 2760 static int liquidio_set_features(struct net_device *netdev, 2761 netdev_features_t features) 2762 { 2763 struct lio *lio = netdev_priv(netdev); 2764 2765 if ((features & NETIF_F_LRO) && 2766 (lio->dev_capability & NETIF_F_LRO) && 2767 !(netdev->features & NETIF_F_LRO)) 2768 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2769 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2770 else if (!(features & NETIF_F_LRO) && 2771 (lio->dev_capability & NETIF_F_LRO) && 2772 (netdev->features & NETIF_F_LRO)) 2773 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 2774 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2775 2776 /* Sending command to firmware to enable/disable RX checksum 2777 * offload settings using ethtool 2778 */ 2779 if (!(netdev->features & NETIF_F_RXCSUM) && 2780 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2781 (features & NETIF_F_RXCSUM)) 2782 liquidio_set_rxcsum_command(netdev, 2783 OCTNET_CMD_TNL_RX_CSUM_CTL, 2784 OCTNET_CMD_RXCSUM_ENABLE); 2785 else if ((netdev->features & NETIF_F_RXCSUM) && 2786 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2787 !(features & NETIF_F_RXCSUM)) 2788 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2789 OCTNET_CMD_RXCSUM_DISABLE); 2790 2791 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && 2792 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 2793 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 2794 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 2795 OCTNET_CMD_VLAN_FILTER_ENABLE); 2796 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && 2797 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 2798 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 2799 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 2800 OCTNET_CMD_VLAN_FILTER_DISABLE); 2801 2802 return 0; 2803 } 2804 2805 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, 2806 u8 *mac, bool is_admin_assigned) 2807 { 2808 struct lio *lio = GET_LIO(netdev); 2809 struct octeon_device *oct = lio->oct_dev; 2810 struct octnic_ctrl_pkt nctrl; 2811 int ret = 0; 2812 2813 if (!is_valid_ether_addr(mac)) 2814 return -EINVAL; 2815 2816 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs) 2817 return -EINVAL; 2818 2819 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2820 2821 nctrl.ncmd.u64 = 0; 2822 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2823 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 2824 nctrl.ncmd.s.param1 = vfidx + 1; 2825 nctrl.ncmd.s.more = 1; 2826 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2827 nctrl.netpndev = (u64)netdev; 2828 if (is_admin_assigned) { 2829 nctrl.ncmd.s.param2 = true; 2830 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2831 } 2832 2833 nctrl.udd[0] = 0; 2834 /* The MAC Address is presented in network byte order. */ 2835 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac); 2836 2837 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; 2838 2839 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2840 if (ret > 0) 2841 ret = -EIO; 2842 2843 return ret; 2844 } 2845 2846 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) 2847 { 2848 struct lio *lio = GET_LIO(netdev); 2849 struct octeon_device *oct = lio->oct_dev; 2850 int retval; 2851 2852 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2853 return -EINVAL; 2854 2855 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true); 2856 if (!retval) 2857 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac); 2858 2859 return retval; 2860 } 2861 2862 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, 2863 bool enable) 2864 { 2865 struct lio *lio = GET_LIO(netdev); 2866 struct octeon_device *oct = lio->oct_dev; 2867 struct octnic_ctrl_pkt nctrl; 2868 int retval; 2869 2870 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) { 2871 netif_info(lio, drv, lio->netdev, 2872 "firmware does not support spoofchk\n"); 2873 return -EOPNOTSUPP; 2874 } 2875 2876 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { 2877 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); 2878 return -EINVAL; 2879 } 2880 2881 if (enable) { 2882 if (oct->sriov_info.vf_spoofchk[vfidx]) 2883 return 0; 2884 } else { 2885 /* Clear */ 2886 if (!oct->sriov_info.vf_spoofchk[vfidx]) 2887 return 0; 2888 } 2889 2890 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2891 nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1; 2892 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK; 2893 nctrl.ncmd.s.param1 = 2894 vfidx + 1; /* vfidx is 0 based, 2895 * but vf_num (param1) is 1 based 2896 */ 2897 nctrl.ncmd.s.param2 = enable; 2898 nctrl.ncmd.s.more = 0; 2899 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2900 nctrl.cb_fn = NULL; 2901 2902 retval = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2903 2904 if (retval) { 2905 netif_info(lio, drv, lio->netdev, 2906 "Failed to set VF %d spoofchk %s\n", vfidx, 2907 enable ? "on" : "off"); 2908 return -1; 2909 } 2910 2911 oct->sriov_info.vf_spoofchk[vfidx] = enable; 2912 netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx, 2913 enable ? "on" : "off"); 2914 2915 return 0; 2916 } 2917 2918 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, 2919 u16 vlan, u8 qos, __be16 vlan_proto) 2920 { 2921 struct lio *lio = GET_LIO(netdev); 2922 struct octeon_device *oct = lio->oct_dev; 2923 struct octnic_ctrl_pkt nctrl; 2924 u16 vlantci; 2925 int ret = 0; 2926 2927 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2928 return -EINVAL; 2929 2930 if (vlan_proto != htons(ETH_P_8021Q)) 2931 return -EPROTONOSUPPORT; 2932 2933 if (vlan >= VLAN_N_VID || qos > 7) 2934 return -EINVAL; 2935 2936 if (vlan) 2937 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT; 2938 else 2939 vlantci = 0; 2940 2941 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci) 2942 return 0; 2943 2944 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2945 2946 if (vlan) 2947 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2948 else 2949 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2950 2951 nctrl.ncmd.s.param1 = vlantci; 2952 nctrl.ncmd.s.param2 = 2953 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ 2954 nctrl.ncmd.s.more = 0; 2955 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2956 nctrl.cb_fn = NULL; 2957 2958 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2959 if (ret) { 2960 if (ret > 0) 2961 ret = -EIO; 2962 return ret; 2963 } 2964 2965 oct->sriov_info.vf_vlantci[vfidx] = vlantci; 2966 2967 return ret; 2968 } 2969 2970 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, 2971 struct ifla_vf_info *ivi) 2972 { 2973 struct lio *lio = GET_LIO(netdev); 2974 struct octeon_device *oct = lio->oct_dev; 2975 u8 *macaddr; 2976 2977 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2978 return -EINVAL; 2979 2980 memset(ivi, 0, sizeof(struct ifla_vf_info)); 2981 2982 ivi->vf = vfidx; 2983 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; 2984 ether_addr_copy(&ivi->mac[0], macaddr); 2985 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK; 2986 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT; 2987 if (oct->sriov_info.trusted_vf.active && 2988 oct->sriov_info.trusted_vf.id == vfidx) 2989 ivi->trusted = true; 2990 else 2991 ivi->trusted = false; 2992 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; 2993 ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx]; 2994 ivi->max_tx_rate = lio->linfo.link.s.speed; 2995 ivi->min_tx_rate = 0; 2996 2997 return 0; 2998 } 2999 3000 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) 3001 { 3002 struct octeon_device *oct = lio->oct_dev; 3003 struct octeon_soft_command *sc; 3004 int retval; 3005 3006 sc = octeon_alloc_soft_command(oct, 0, 16, 0); 3007 if (!sc) 3008 return -ENOMEM; 3009 3010 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 3011 3012 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3013 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 3014 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1, 3015 trusted); 3016 3017 init_completion(&sc->complete); 3018 sc->sc_status = OCTEON_REQUEST_PENDING; 3019 3020 retval = octeon_send_soft_command(oct, sc); 3021 if (retval == IQ_SEND_FAILED) { 3022 octeon_free_soft_command(oct, sc); 3023 retval = -1; 3024 } else { 3025 /* Wait for response or timeout */ 3026 retval = wait_for_sc_completion_timeout(oct, sc, 0); 3027 if (retval) 3028 return (retval); 3029 3030 WRITE_ONCE(sc->caller_is_done, true); 3031 } 3032 3033 return retval; 3034 } 3035 3036 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx, 3037 bool setting) 3038 { 3039 struct lio *lio = GET_LIO(netdev); 3040 struct octeon_device *oct = lio->oct_dev; 3041 3042 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) { 3043 /* trusted vf is not supported by firmware older than 1.7.1 */ 3044 return -EOPNOTSUPP; 3045 } 3046 3047 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { 3048 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); 3049 return -EINVAL; 3050 } 3051 3052 if (setting) { 3053 /* Set */ 3054 3055 if (oct->sriov_info.trusted_vf.active && 3056 oct->sriov_info.trusted_vf.id == vfidx) 3057 return 0; 3058 3059 if (oct->sriov_info.trusted_vf.active) { 3060 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n"); 3061 return -EPERM; 3062 } 3063 } else { 3064 /* Clear */ 3065 3066 if (!oct->sriov_info.trusted_vf.active) 3067 return 0; 3068 } 3069 3070 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) { 3071 if (setting) { 3072 oct->sriov_info.trusted_vf.id = vfidx; 3073 oct->sriov_info.trusted_vf.active = true; 3074 } else { 3075 oct->sriov_info.trusted_vf.active = false; 3076 } 3077 3078 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx, 3079 setting ? "" : "not "); 3080 } else { 3081 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n"); 3082 return -1; 3083 } 3084 3085 return 0; 3086 } 3087 3088 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 3089 int linkstate) 3090 { 3091 struct lio *lio = GET_LIO(netdev); 3092 struct octeon_device *oct = lio->oct_dev; 3093 struct octnic_ctrl_pkt nctrl; 3094 int ret = 0; 3095 3096 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3097 return -EINVAL; 3098 3099 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate) 3100 return 0; 3101 3102 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3103 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE; 3104 nctrl.ncmd.s.param1 = 3105 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3106 nctrl.ncmd.s.param2 = linkstate; 3107 nctrl.ncmd.s.more = 0; 3108 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3109 nctrl.cb_fn = NULL; 3110 3111 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 3112 3113 if (!ret) 3114 oct->sriov_info.vf_linkstate[vfidx] = linkstate; 3115 else if (ret > 0) 3116 ret = -EIO; 3117 3118 return ret; 3119 } 3120 3121 static int 3122 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode) 3123 { 3124 struct lio_devlink_priv *priv; 3125 struct octeon_device *oct; 3126 3127 priv = devlink_priv(devlink); 3128 oct = priv->oct; 3129 3130 *mode = oct->eswitch_mode; 3131 3132 return 0; 3133 } 3134 3135 static int 3136 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode, 3137 struct netlink_ext_ack *extack) 3138 { 3139 struct lio_devlink_priv *priv; 3140 struct octeon_device *oct; 3141 int ret = 0; 3142 3143 priv = devlink_priv(devlink); 3144 oct = priv->oct; 3145 3146 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)) 3147 return -EINVAL; 3148 3149 if (oct->eswitch_mode == mode) 3150 return 0; 3151 3152 switch (mode) { 3153 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 3154 oct->eswitch_mode = mode; 3155 ret = lio_vf_rep_create(oct); 3156 break; 3157 3158 case DEVLINK_ESWITCH_MODE_LEGACY: 3159 lio_vf_rep_destroy(oct); 3160 oct->eswitch_mode = mode; 3161 break; 3162 3163 default: 3164 ret = -EINVAL; 3165 } 3166 3167 return ret; 3168 } 3169 3170 static const struct devlink_ops liquidio_devlink_ops = { 3171 .eswitch_mode_get = liquidio_eswitch_mode_get, 3172 .eswitch_mode_set = liquidio_eswitch_mode_set, 3173 }; 3174 3175 static int 3176 liquidio_get_port_parent_id(struct net_device *dev, 3177 struct netdev_phys_item_id *ppid) 3178 { 3179 struct lio *lio = GET_LIO(dev); 3180 struct octeon_device *oct = lio->oct_dev; 3181 3182 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 3183 return -EOPNOTSUPP; 3184 3185 ppid->id_len = ETH_ALEN; 3186 ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2); 3187 3188 return 0; 3189 } 3190 3191 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx, 3192 struct ifla_vf_stats *vf_stats) 3193 { 3194 struct lio *lio = GET_LIO(netdev); 3195 struct octeon_device *oct = lio->oct_dev; 3196 struct oct_vf_stats stats; 3197 int ret; 3198 3199 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3200 return -EINVAL; 3201 3202 memset(&stats, 0, sizeof(struct oct_vf_stats)); 3203 ret = cn23xx_get_vf_stats(oct, vfidx, &stats); 3204 if (!ret) { 3205 vf_stats->rx_packets = stats.rx_packets; 3206 vf_stats->tx_packets = stats.tx_packets; 3207 vf_stats->rx_bytes = stats.rx_bytes; 3208 vf_stats->tx_bytes = stats.tx_bytes; 3209 vf_stats->broadcast = stats.broadcast; 3210 vf_stats->multicast = stats.multicast; 3211 } 3212 3213 return ret; 3214 } 3215 3216 static const struct net_device_ops lionetdevops = { 3217 .ndo_open = liquidio_open, 3218 .ndo_stop = liquidio_stop, 3219 .ndo_start_xmit = liquidio_xmit, 3220 .ndo_get_stats64 = liquidio_get_stats64, 3221 .ndo_set_mac_address = liquidio_set_mac, 3222 .ndo_set_rx_mode = liquidio_set_mcast_list, 3223 .ndo_tx_timeout = liquidio_tx_timeout, 3224 3225 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 3226 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 3227 .ndo_change_mtu = liquidio_change_mtu, 3228 .ndo_eth_ioctl = liquidio_ioctl, 3229 .ndo_fix_features = liquidio_fix_features, 3230 .ndo_set_features = liquidio_set_features, 3231 .ndo_set_vf_mac = liquidio_set_vf_mac, 3232 .ndo_set_vf_vlan = liquidio_set_vf_vlan, 3233 .ndo_get_vf_config = liquidio_get_vf_config, 3234 .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk, 3235 .ndo_set_vf_trust = liquidio_set_vf_trust, 3236 .ndo_set_vf_link_state = liquidio_set_vf_link_state, 3237 .ndo_get_vf_stats = liquidio_get_vf_stats, 3238 .ndo_get_port_parent_id = liquidio_get_port_parent_id, 3239 }; 3240 3241 /** 3242 * liquidio_init - Entry point for the liquidio module 3243 */ 3244 static int __init liquidio_init(void) 3245 { 3246 int i; 3247 struct handshake *hs; 3248 3249 init_completion(&first_stage); 3250 3251 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT); 3252 3253 if (liquidio_init_pci()) 3254 return -EINVAL; 3255 3256 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000)); 3257 3258 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3259 hs = &handshake[i]; 3260 if (hs->pci_dev) { 3261 wait_for_completion(&hs->init); 3262 if (!hs->init_ok) { 3263 /* init handshake failed */ 3264 dev_err(&hs->pci_dev->dev, 3265 "Failed to init device\n"); 3266 liquidio_deinit_pci(); 3267 return -EIO; 3268 } 3269 } 3270 } 3271 3272 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3273 hs = &handshake[i]; 3274 if (hs->pci_dev) { 3275 wait_for_completion_timeout(&hs->started, 3276 msecs_to_jiffies(30000)); 3277 if (!hs->started_ok) { 3278 /* starter handshake failed */ 3279 dev_err(&hs->pci_dev->dev, 3280 "Firmware failed to start\n"); 3281 liquidio_deinit_pci(); 3282 return -EIO; 3283 } 3284 } 3285 } 3286 3287 return 0; 3288 } 3289 3290 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 3291 { 3292 struct octeon_device *oct = (struct octeon_device *)buf; 3293 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3294 int gmxport = 0; 3295 union oct_link_status *ls; 3296 int i; 3297 3298 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 3299 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 3300 recv_pkt->buffer_size[0], 3301 recv_pkt->rh.r_nic_info.gmxport); 3302 goto nic_info_err; 3303 } 3304 3305 gmxport = recv_pkt->rh.r_nic_info.gmxport; 3306 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 3307 OCT_DROQ_INFO_SIZE); 3308 3309 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 3310 for (i = 0; i < oct->ifcount; i++) { 3311 if (oct->props[i].gmxport == gmxport) { 3312 update_link_status(oct->props[i].netdev, ls); 3313 break; 3314 } 3315 } 3316 3317 nic_info_err: 3318 for (i = 0; i < recv_pkt->buffer_count; i++) 3319 recv_buffer_free(recv_pkt->buffer_ptr[i]); 3320 octeon_free_recv_info(recv_info); 3321 return 0; 3322 } 3323 3324 /** 3325 * setup_nic_devices - Setup network interfaces 3326 * @octeon_dev: octeon device 3327 * 3328 * Called during init time for each device. It assumes the NIC 3329 * is already up and running. The link information for each 3330 * interface is passed in link_info. 3331 */ 3332 static int setup_nic_devices(struct octeon_device *octeon_dev) 3333 { 3334 struct lio *lio = NULL; 3335 struct net_device *netdev; 3336 u8 mac[6], i, j, *fw_ver, *micro_ver; 3337 unsigned long micro; 3338 u32 cur_ver; 3339 struct octeon_soft_command *sc; 3340 struct liquidio_if_cfg_resp *resp; 3341 struct octdev_props *props; 3342 int retval, num_iqueues, num_oqueues; 3343 int max_num_queues = 0; 3344 union oct_nic_if_cfg if_cfg; 3345 unsigned int base_queue; 3346 unsigned int gmx_port_id; 3347 u32 resp_size, data_size; 3348 u32 ifidx_or_pfnum; 3349 struct lio_version *vdata; 3350 struct devlink *devlink; 3351 struct lio_devlink_priv *lio_devlink; 3352 3353 /* This is to handle link status changes */ 3354 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3355 OPCODE_NIC_INFO, 3356 lio_nic_info, octeon_dev); 3357 3358 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 3359 * They are handled directly. 3360 */ 3361 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 3362 free_netbuf); 3363 3364 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 3365 free_netsgbuf); 3366 3367 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 3368 free_netsgbuf_with_resp); 3369 3370 for (i = 0; i < octeon_dev->ifcount; i++) { 3371 resp_size = sizeof(struct liquidio_if_cfg_resp); 3372 data_size = sizeof(struct lio_version); 3373 sc = (struct octeon_soft_command *) 3374 octeon_alloc_soft_command(octeon_dev, data_size, 3375 resp_size, 0); 3376 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 3377 vdata = (struct lio_version *)sc->virtdptr; 3378 3379 *((u64 *)vdata) = 0; 3380 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 3381 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 3382 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 3383 3384 if (OCTEON_CN23XX_PF(octeon_dev)) { 3385 num_iqueues = octeon_dev->sriov_info.num_pf_rings; 3386 num_oqueues = octeon_dev->sriov_info.num_pf_rings; 3387 base_queue = octeon_dev->sriov_info.pf_srn; 3388 3389 gmx_port_id = octeon_dev->pf_num; 3390 ifidx_or_pfnum = octeon_dev->pf_num; 3391 } else { 3392 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF( 3393 octeon_get_conf(octeon_dev), i); 3394 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF( 3395 octeon_get_conf(octeon_dev), i); 3396 base_queue = CFG_GET_BASE_QUE_NIC_IF( 3397 octeon_get_conf(octeon_dev), i); 3398 gmx_port_id = CFG_GET_GMXID_NIC_IF( 3399 octeon_get_conf(octeon_dev), i); 3400 ifidx_or_pfnum = i; 3401 } 3402 3403 dev_dbg(&octeon_dev->pci_dev->dev, 3404 "requesting config for interface %d, iqs %d, oqs %d\n", 3405 ifidx_or_pfnum, num_iqueues, num_oqueues); 3406 3407 if_cfg.u64 = 0; 3408 if_cfg.s.num_iqueues = num_iqueues; 3409 if_cfg.s.num_oqueues = num_oqueues; 3410 if_cfg.s.base_queue = base_queue; 3411 if_cfg.s.gmx_port_id = gmx_port_id; 3412 3413 sc->iq_no = 0; 3414 3415 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 3416 OPCODE_NIC_IF_CFG, 0, 3417 if_cfg.u64, 0); 3418 3419 init_completion(&sc->complete); 3420 sc->sc_status = OCTEON_REQUEST_PENDING; 3421 3422 retval = octeon_send_soft_command(octeon_dev, sc); 3423 if (retval == IQ_SEND_FAILED) { 3424 dev_err(&octeon_dev->pci_dev->dev, 3425 "iq/oq config failed status: %x\n", 3426 retval); 3427 /* Soft instr is freed by driver in case of failure. */ 3428 octeon_free_soft_command(octeon_dev, sc); 3429 return(-EIO); 3430 } 3431 3432 /* Sleep on a wait queue till the cond flag indicates that the 3433 * response arrived or timed-out. 3434 */ 3435 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); 3436 if (retval) 3437 return retval; 3438 3439 retval = resp->status; 3440 if (retval) { 3441 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 3442 WRITE_ONCE(sc->caller_is_done, true); 3443 goto setup_nic_dev_done; 3444 } 3445 snprintf(octeon_dev->fw_info.liquidio_firmware_version, 3446 32, "%s", 3447 resp->cfg_info.liquidio_firmware_version); 3448 3449 /* Verify f/w version (in case of 'auto' loading from flash) */ 3450 fw_ver = octeon_dev->fw_info.liquidio_firmware_version; 3451 if (memcmp(LIQUIDIO_BASE_VERSION, 3452 fw_ver, 3453 strlen(LIQUIDIO_BASE_VERSION))) { 3454 dev_err(&octeon_dev->pci_dev->dev, 3455 "Unmatched firmware version. Expected %s.x, got %s.\n", 3456 LIQUIDIO_BASE_VERSION, fw_ver); 3457 WRITE_ONCE(sc->caller_is_done, true); 3458 goto setup_nic_dev_done; 3459 } else if (atomic_read(octeon_dev->adapter_fw_state) == 3460 FW_IS_PRELOADED) { 3461 dev_info(&octeon_dev->pci_dev->dev, 3462 "Using auto-loaded firmware version %s.\n", 3463 fw_ver); 3464 } 3465 3466 /* extract micro version field; point past '<maj>.<min>.' */ 3467 micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1; 3468 if (kstrtoul(micro_ver, 10, µ) != 0) 3469 micro = 0; 3470 octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION; 3471 octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION; 3472 octeon_dev->fw_info.ver.rev = micro; 3473 3474 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 3475 (sizeof(struct liquidio_if_cfg_info)) >> 3); 3476 3477 num_iqueues = hweight64(resp->cfg_info.iqmask); 3478 num_oqueues = hweight64(resp->cfg_info.oqmask); 3479 3480 if (!(num_iqueues) || !(num_oqueues)) { 3481 dev_err(&octeon_dev->pci_dev->dev, 3482 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 3483 resp->cfg_info.iqmask, 3484 resp->cfg_info.oqmask); 3485 WRITE_ONCE(sc->caller_is_done, true); 3486 goto setup_nic_dev_done; 3487 } 3488 3489 if (OCTEON_CN6XXX(octeon_dev)) { 3490 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, 3491 cn6xxx)); 3492 } else if (OCTEON_CN23XX_PF(octeon_dev)) { 3493 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, 3494 cn23xx_pf)); 3495 } 3496 3497 dev_dbg(&octeon_dev->pci_dev->dev, 3498 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n", 3499 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 3500 num_iqueues, num_oqueues, max_num_queues); 3501 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues); 3502 3503 if (!netdev) { 3504 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 3505 WRITE_ONCE(sc->caller_is_done, true); 3506 goto setup_nic_dev_done; 3507 } 3508 3509 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 3510 3511 /* Associate the routines that will handle different 3512 * netdev tasks. 3513 */ 3514 netdev->netdev_ops = &lionetdevops; 3515 3516 retval = netif_set_real_num_rx_queues(netdev, num_oqueues); 3517 if (retval) { 3518 dev_err(&octeon_dev->pci_dev->dev, 3519 "setting real number rx failed\n"); 3520 WRITE_ONCE(sc->caller_is_done, true); 3521 goto setup_nic_dev_free; 3522 } 3523 3524 retval = netif_set_real_num_tx_queues(netdev, num_iqueues); 3525 if (retval) { 3526 dev_err(&octeon_dev->pci_dev->dev, 3527 "setting real number tx failed\n"); 3528 WRITE_ONCE(sc->caller_is_done, true); 3529 goto setup_nic_dev_free; 3530 } 3531 3532 lio = GET_LIO(netdev); 3533 3534 memset(lio, 0, sizeof(struct lio)); 3535 3536 lio->ifidx = ifidx_or_pfnum; 3537 3538 props = &octeon_dev->props[i]; 3539 props->gmxport = resp->cfg_info.linfo.gmxport; 3540 props->netdev = netdev; 3541 3542 lio->linfo.num_rxpciq = num_oqueues; 3543 lio->linfo.num_txpciq = num_iqueues; 3544 for (j = 0; j < num_oqueues; j++) { 3545 lio->linfo.rxpciq[j].u64 = 3546 resp->cfg_info.linfo.rxpciq[j].u64; 3547 } 3548 for (j = 0; j < num_iqueues; j++) { 3549 lio->linfo.txpciq[j].u64 = 3550 resp->cfg_info.linfo.txpciq[j].u64; 3551 } 3552 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 3553 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 3554 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 3555 3556 WRITE_ONCE(sc->caller_is_done, true); 3557 3558 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3559 3560 if (OCTEON_CN23XX_PF(octeon_dev) || 3561 OCTEON_CN6XXX(octeon_dev)) { 3562 lio->dev_capability = NETIF_F_HIGHDMA 3563 | NETIF_F_IP_CSUM 3564 | NETIF_F_IPV6_CSUM 3565 | NETIF_F_SG | NETIF_F_RXCSUM 3566 | NETIF_F_GRO 3567 | NETIF_F_TSO | NETIF_F_TSO6 3568 | NETIF_F_LRO; 3569 } 3570 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 3571 3572 /* Copy of transmit encapsulation capabilities: 3573 * TSO, TSO6, Checksums for this device 3574 */ 3575 lio->enc_dev_capability = NETIF_F_IP_CSUM 3576 | NETIF_F_IPV6_CSUM 3577 | NETIF_F_GSO_UDP_TUNNEL 3578 | NETIF_F_HW_CSUM | NETIF_F_SG 3579 | NETIF_F_RXCSUM 3580 | NETIF_F_TSO | NETIF_F_TSO6 3581 | NETIF_F_LRO; 3582 3583 netdev->hw_enc_features = (lio->enc_dev_capability & 3584 ~NETIF_F_LRO); 3585 3586 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; 3587 3588 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; 3589 3590 netdev->vlan_features = lio->dev_capability; 3591 /* Add any unchangeable hw features */ 3592 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 3593 NETIF_F_HW_VLAN_CTAG_RX | 3594 NETIF_F_HW_VLAN_CTAG_TX; 3595 3596 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 3597 3598 netdev->hw_features = lio->dev_capability; 3599 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ 3600 netdev->hw_features = netdev->hw_features & 3601 ~NETIF_F_HW_VLAN_CTAG_RX; 3602 3603 /* MTU range: 68 - 16000 */ 3604 netdev->min_mtu = LIO_MIN_MTU_SIZE; 3605 netdev->max_mtu = LIO_MAX_MTU_SIZE; 3606 3607 /* Point to the properties for octeon device to which this 3608 * interface belongs. 3609 */ 3610 lio->oct_dev = octeon_dev; 3611 lio->octprops = props; 3612 lio->netdev = netdev; 3613 3614 dev_dbg(&octeon_dev->pci_dev->dev, 3615 "if%d gmx: %d hw_addr: 0x%llx\n", i, 3616 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 3617 3618 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { 3619 u8 vfmac[ETH_ALEN]; 3620 3621 eth_random_addr(vfmac); 3622 if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) { 3623 dev_err(&octeon_dev->pci_dev->dev, 3624 "Error setting VF%d MAC address\n", 3625 j); 3626 goto setup_nic_dev_free; 3627 } 3628 } 3629 3630 /* 64-bit swap required on LE machines */ 3631 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 3632 for (j = 0; j < 6; j++) 3633 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 3634 3635 /* Copy MAC Address to OS network device structure */ 3636 3637 eth_hw_addr_set(netdev, mac); 3638 3639 /* By default all interfaces on a single Octeon uses the same 3640 * tx and rx queues 3641 */ 3642 lio->txq = lio->linfo.txpciq[0].s.q_no; 3643 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 3644 if (liquidio_setup_io_queues(octeon_dev, i, 3645 lio->linfo.num_txpciq, 3646 lio->linfo.num_rxpciq)) { 3647 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 3648 goto setup_nic_dev_free; 3649 } 3650 3651 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 3652 3653 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 3654 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 3655 3656 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 3657 dev_err(&octeon_dev->pci_dev->dev, 3658 "Gather list allocation failed\n"); 3659 goto setup_nic_dev_free; 3660 } 3661 3662 /* Register ethtool support */ 3663 liquidio_set_ethtool_ops(netdev); 3664 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID) 3665 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 3666 else 3667 octeon_dev->priv_flags = 0x0; 3668 3669 if (netdev->features & NETIF_F_LRO) 3670 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 3671 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3672 3673 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 3674 OCTNET_CMD_VLAN_FILTER_ENABLE); 3675 3676 if ((debug != -1) && (debug & NETIF_MSG_HW)) 3677 liquidio_set_feature(netdev, 3678 OCTNET_CMD_VERBOSE_ENABLE, 0); 3679 3680 if (setup_link_status_change_wq(netdev)) 3681 goto setup_nic_dev_free; 3682 3683 if ((octeon_dev->fw_info.app_cap_flags & 3684 LIQUIDIO_TIME_SYNC_CAP) && 3685 setup_sync_octeon_time_wq(netdev)) 3686 goto setup_nic_dev_free; 3687 3688 if (setup_rx_oom_poll_fn(netdev)) 3689 goto setup_nic_dev_free; 3690 3691 /* Register the network device with the OS */ 3692 if (register_netdev(netdev)) { 3693 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 3694 goto setup_nic_dev_free; 3695 } 3696 3697 dev_dbg(&octeon_dev->pci_dev->dev, 3698 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3699 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3700 netif_carrier_off(netdev); 3701 lio->link_changes++; 3702 3703 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3704 3705 /* Sending command to firmware to enable Rx checksum offload 3706 * by default at the time of setup of Liquidio driver for 3707 * this device 3708 */ 3709 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 3710 OCTNET_CMD_RXCSUM_ENABLE); 3711 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 3712 OCTNET_CMD_TXCSUM_ENABLE); 3713 3714 dev_dbg(&octeon_dev->pci_dev->dev, 3715 "NIC ifidx:%d Setup successful\n", i); 3716 3717 if (octeon_dev->subsystem_id == 3718 OCTEON_CN2350_25GB_SUBSYS_ID || 3719 octeon_dev->subsystem_id == 3720 OCTEON_CN2360_25GB_SUBSYS_ID) { 3721 cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj, 3722 octeon_dev->fw_info.ver.min, 3723 octeon_dev->fw_info.ver.rev); 3724 3725 /* speed control unsupported in f/w older than 1.7.2 */ 3726 if (cur_ver < OCT_FW_VER(1, 7, 2)) { 3727 dev_info(&octeon_dev->pci_dev->dev, 3728 "speed setting not supported by f/w."); 3729 octeon_dev->speed_setting = 25; 3730 octeon_dev->no_speed_setting = 1; 3731 } else { 3732 liquidio_get_speed(lio); 3733 } 3734 3735 if (octeon_dev->speed_setting == 0) { 3736 octeon_dev->speed_setting = 25; 3737 octeon_dev->no_speed_setting = 1; 3738 } 3739 } else { 3740 octeon_dev->no_speed_setting = 1; 3741 octeon_dev->speed_setting = 10; 3742 } 3743 octeon_dev->speed_boot = octeon_dev->speed_setting; 3744 3745 /* don't read FEC setting if unsupported by f/w (see above) */ 3746 if (octeon_dev->speed_boot == 25 && 3747 !octeon_dev->no_speed_setting) { 3748 liquidio_get_fec(lio); 3749 octeon_dev->props[lio->ifidx].fec_boot = 3750 octeon_dev->props[lio->ifidx].fec; 3751 } 3752 } 3753 3754 device_lock(&octeon_dev->pci_dev->dev); 3755 devlink = devlink_alloc(&liquidio_devlink_ops, 3756 sizeof(struct lio_devlink_priv), 3757 &octeon_dev->pci_dev->dev); 3758 if (!devlink) { 3759 device_unlock(&octeon_dev->pci_dev->dev); 3760 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); 3761 goto setup_nic_dev_free; 3762 } 3763 3764 lio_devlink = devlink_priv(devlink); 3765 lio_devlink->oct = octeon_dev; 3766 3767 octeon_dev->devlink = devlink; 3768 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; 3769 devlink_register(devlink); 3770 device_unlock(&octeon_dev->pci_dev->dev); 3771 3772 return 0; 3773 3774 setup_nic_dev_free: 3775 3776 while (i--) { 3777 dev_err(&octeon_dev->pci_dev->dev, 3778 "NIC ifidx:%d Setup failed\n", i); 3779 liquidio_destroy_nic_device(octeon_dev, i); 3780 } 3781 3782 setup_nic_dev_done: 3783 3784 return -ENODEV; 3785 } 3786 3787 #ifdef CONFIG_PCI_IOV 3788 static int octeon_enable_sriov(struct octeon_device *oct) 3789 { 3790 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced; 3791 struct pci_dev *vfdev; 3792 int err; 3793 u32 u; 3794 3795 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) { 3796 err = pci_enable_sriov(oct->pci_dev, 3797 oct->sriov_info.num_vfs_alloced); 3798 if (err) { 3799 dev_err(&oct->pci_dev->dev, 3800 "OCTEON: Failed to enable PCI sriov: %d\n", 3801 err); 3802 oct->sriov_info.num_vfs_alloced = 0; 3803 return err; 3804 } 3805 oct->sriov_info.sriov_enabled = 1; 3806 3807 /* init lookup table that maps DPI ring number to VF pci_dev 3808 * struct pointer 3809 */ 3810 u = 0; 3811 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3812 OCTEON_CN23XX_VF_VID, NULL); 3813 while (vfdev) { 3814 if (vfdev->is_virtfn && 3815 (vfdev->physfn == oct->pci_dev)) { 3816 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = 3817 vfdev; 3818 u += oct->sriov_info.rings_per_vf; 3819 } 3820 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3821 OCTEON_CN23XX_VF_VID, vfdev); 3822 } 3823 } 3824 3825 return num_vfs_alloced; 3826 } 3827 3828 static int lio_pci_sriov_disable(struct octeon_device *oct) 3829 { 3830 int u; 3831 3832 if (pci_vfs_assigned(oct->pci_dev)) { 3833 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n"); 3834 return -EPERM; 3835 } 3836 3837 pci_disable_sriov(oct->pci_dev); 3838 3839 u = 0; 3840 while (u < MAX_POSSIBLE_VFS) { 3841 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL; 3842 u += oct->sriov_info.rings_per_vf; 3843 } 3844 3845 oct->sriov_info.num_vfs_alloced = 0; 3846 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n", 3847 oct->pf_num); 3848 3849 return 0; 3850 } 3851 3852 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) 3853 { 3854 struct octeon_device *oct = pci_get_drvdata(dev); 3855 int ret = 0; 3856 3857 if ((num_vfs == oct->sriov_info.num_vfs_alloced) && 3858 (oct->sriov_info.sriov_enabled)) { 3859 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n", 3860 oct->pf_num, num_vfs); 3861 return 0; 3862 } 3863 3864 if (!num_vfs) { 3865 lio_vf_rep_destroy(oct); 3866 ret = lio_pci_sriov_disable(oct); 3867 } else if (num_vfs > oct->sriov_info.max_vfs) { 3868 dev_err(&oct->pci_dev->dev, 3869 "OCTEON: Max allowed VFs:%d user requested:%d", 3870 oct->sriov_info.max_vfs, num_vfs); 3871 ret = -EPERM; 3872 } else { 3873 oct->sriov_info.num_vfs_alloced = num_vfs; 3874 ret = octeon_enable_sriov(oct); 3875 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n", 3876 oct->pf_num, num_vfs); 3877 ret = lio_vf_rep_create(oct); 3878 if (ret) 3879 dev_info(&oct->pci_dev->dev, 3880 "vf representor create failed"); 3881 } 3882 3883 return ret; 3884 } 3885 #endif 3886 3887 /** 3888 * liquidio_init_nic_module - initialize the NIC 3889 * @oct: octeon device 3890 * 3891 * This initialization routine is called once the Octeon device application is 3892 * up and running 3893 */ 3894 static int liquidio_init_nic_module(struct octeon_device *oct) 3895 { 3896 int i, retval = 0; 3897 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); 3898 3899 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3900 3901 /* only default iq and oq were initialized 3902 * initialize the rest as well 3903 */ 3904 /* run port_config command for each port */ 3905 oct->ifcount = num_nic_ports; 3906 3907 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); 3908 3909 for (i = 0; i < MAX_OCTEON_LINKS; i++) 3910 oct->props[i].gmxport = -1; 3911 3912 retval = setup_nic_devices(oct); 3913 if (retval) { 3914 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3915 goto octnet_init_failure; 3916 } 3917 3918 /* Call vf_rep_modinit if the firmware is switchdev capable 3919 * and do it from the first liquidio function probed. 3920 */ 3921 if (!oct->octeon_id && 3922 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) { 3923 retval = lio_vf_rep_modinit(); 3924 if (retval) { 3925 liquidio_stop_nic_module(oct); 3926 goto octnet_init_failure; 3927 } 3928 } 3929 3930 liquidio_ptp_init(oct); 3931 3932 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3933 3934 return retval; 3935 3936 octnet_init_failure: 3937 3938 oct->ifcount = 0; 3939 3940 return retval; 3941 } 3942 3943 /** 3944 * nic_starter - finish init 3945 * @work: work struct work_struct 3946 * 3947 * starter callback that invokes the remaining initialization work after the NIC is up and running. 3948 */ 3949 static void nic_starter(struct work_struct *work) 3950 { 3951 struct octeon_device *oct; 3952 struct cavium_wk *wk = (struct cavium_wk *)work; 3953 3954 oct = (struct octeon_device *)wk->ctxptr; 3955 3956 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) 3957 return; 3958 3959 /* If the status of the device is CORE_OK, the core 3960 * application has reported its application type. Call 3961 * any registered handlers now and move to the RUNNING 3962 * state. 3963 */ 3964 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) { 3965 schedule_delayed_work(&oct->nic_poll_work.work, 3966 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 3967 return; 3968 } 3969 3970 atomic_set(&oct->status, OCT_DEV_RUNNING); 3971 3972 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { 3973 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n"); 3974 3975 if (liquidio_init_nic_module(oct)) 3976 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n"); 3977 else 3978 handshake[oct->octeon_id].started_ok = 1; 3979 } else { 3980 dev_err(&oct->pci_dev->dev, 3981 "Unexpected application running on NIC (%d). Check firmware.\n", 3982 oct->app_mode); 3983 } 3984 3985 complete(&handshake[oct->octeon_id].started); 3986 } 3987 3988 static int 3989 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) 3990 { 3991 struct octeon_device *oct = (struct octeon_device *)buf; 3992 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3993 int i, notice, vf_idx; 3994 bool cores_crashed; 3995 u64 *data, vf_num; 3996 3997 notice = recv_pkt->rh.r.ossp; 3998 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE); 3999 4000 /* the first 64-bit word of data is the vf_num */ 4001 vf_num = data[0]; 4002 octeon_swap_8B_data(&vf_num, 1); 4003 vf_idx = (int)vf_num - 1; 4004 4005 cores_crashed = READ_ONCE(oct->cores_crashed); 4006 4007 if (notice == VF_DRV_LOADED) { 4008 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) { 4009 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx); 4010 dev_info(&oct->pci_dev->dev, 4011 "driver for VF%d was loaded\n", vf_idx); 4012 if (!cores_crashed) 4013 try_module_get(THIS_MODULE); 4014 } 4015 } else if (notice == VF_DRV_REMOVED) { 4016 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) { 4017 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx); 4018 dev_info(&oct->pci_dev->dev, 4019 "driver for VF%d was removed\n", vf_idx); 4020 if (!cores_crashed) 4021 module_put(THIS_MODULE); 4022 } 4023 } else if (notice == VF_DRV_MACADDR_CHANGED) { 4024 u8 *b = (u8 *)&data[1]; 4025 4026 oct->sriov_info.vf_macaddr[vf_idx] = data[1]; 4027 dev_info(&oct->pci_dev->dev, 4028 "VF driver changed VF%d's MAC address to %pM\n", 4029 vf_idx, b + 2); 4030 } 4031 4032 for (i = 0; i < recv_pkt->buffer_count; i++) 4033 recv_buffer_free(recv_pkt->buffer_ptr[i]); 4034 octeon_free_recv_info(recv_info); 4035 4036 return 0; 4037 } 4038 4039 /** 4040 * octeon_device_init - Device initialization for each Octeon device that is probed 4041 * @octeon_dev: octeon device 4042 */ 4043 static int octeon_device_init(struct octeon_device *octeon_dev) 4044 { 4045 int j, ret; 4046 char bootcmd[] = "\n"; 4047 char *dbg_enb = NULL; 4048 enum lio_fw_state fw_state; 4049 struct octeon_device_priv *oct_priv = 4050 (struct octeon_device_priv *)octeon_dev->priv; 4051 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); 4052 4053 /* Enable access to the octeon device and make its DMA capability 4054 * known to the OS. 4055 */ 4056 if (octeon_pci_os_setup(octeon_dev)) 4057 return 1; 4058 4059 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE); 4060 4061 /* Identify the Octeon type and map the BAR address space. */ 4062 if (octeon_chip_specific_setup(octeon_dev)) { 4063 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); 4064 return 1; 4065 } 4066 4067 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE); 4068 4069 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', 4070 * since that is what is required for the reference to be removed 4071 * during de-initialization (see 'octeon_destroy_resources'). 4072 */ 4073 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number, 4074 PCI_SLOT(octeon_dev->pci_dev->devfn), 4075 PCI_FUNC(octeon_dev->pci_dev->devfn), 4076 true); 4077 4078 octeon_dev->app_mode = CVM_DRV_INVALID_APP; 4079 4080 /* CN23XX supports preloaded firmware if the following is true: 4081 * 4082 * The adapter indicates that firmware is currently running AND 4083 * 'fw_type' is 'auto'. 4084 * 4085 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate). 4086 */ 4087 if (OCTEON_CN23XX_PF(octeon_dev) && 4088 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) { 4089 atomic_cmpxchg(octeon_dev->adapter_fw_state, 4090 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED); 4091 } 4092 4093 /* If loading firmware, only first device of adapter needs to do so. */ 4094 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state, 4095 FW_NEEDS_TO_BE_LOADED, 4096 FW_IS_BEING_LOADED); 4097 4098 /* Here, [local variable] 'fw_state' is set to one of: 4099 * 4100 * FW_IS_PRELOADED: No firmware is to be loaded (see above) 4101 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load 4102 * firmware to the adapter. 4103 * FW_IS_BEING_LOADED: The driver's second instance will not load 4104 * firmware to the adapter. 4105 */ 4106 4107 /* Prior to f/w load, perform a soft reset of the Octeon device; 4108 * if error resetting, return w/error. 4109 */ 4110 if (fw_state == FW_NEEDS_TO_BE_LOADED) 4111 if (octeon_dev->fn_list.soft_reset(octeon_dev)) 4112 return 1; 4113 4114 /* Initialize the dispatch mechanism used to push packets arriving on 4115 * Octeon Output queues. 4116 */ 4117 if (octeon_init_dispatch_list(octeon_dev)) 4118 return 1; 4119 4120 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4121 OPCODE_NIC_CORE_DRV_ACTIVE, 4122 octeon_core_drv_init, 4123 octeon_dev); 4124 4125 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4126 OPCODE_NIC_VF_DRV_NOTICE, 4127 octeon_recv_vf_drv_notice, octeon_dev); 4128 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); 4129 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; 4130 schedule_delayed_work(&octeon_dev->nic_poll_work.work, 4131 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 4132 4133 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); 4134 4135 if (octeon_set_io_queues_off(octeon_dev)) { 4136 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n"); 4137 return 1; 4138 } 4139 4140 if (OCTEON_CN23XX_PF(octeon_dev)) { 4141 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4142 if (ret) { 4143 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n"); 4144 return ret; 4145 } 4146 } 4147 4148 /* Initialize soft command buffer pool 4149 */ 4150 if (octeon_setup_sc_buffer_pool(octeon_dev)) { 4151 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n"); 4152 return 1; 4153 } 4154 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 4155 4156 /* Setup the data structures that manage this Octeon's Input queues. */ 4157 if (octeon_setup_instr_queues(octeon_dev)) { 4158 dev_err(&octeon_dev->pci_dev->dev, 4159 "instruction queue initialization failed\n"); 4160 return 1; 4161 } 4162 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 4163 4164 /* Initialize lists to manage the requests of different types that 4165 * arrive from user & kernel applications for this octeon device. 4166 */ 4167 if (octeon_setup_response_list(octeon_dev)) { 4168 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n"); 4169 return 1; 4170 } 4171 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); 4172 4173 if (octeon_setup_output_queues(octeon_dev)) { 4174 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); 4175 return 1; 4176 } 4177 4178 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); 4179 4180 if (OCTEON_CN23XX_PF(octeon_dev)) { 4181 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) { 4182 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n"); 4183 return 1; 4184 } 4185 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); 4186 4187 if (octeon_allocate_ioq_vector 4188 (octeon_dev, 4189 octeon_dev->sriov_info.num_pf_rings)) { 4190 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 4191 return 1; 4192 } 4193 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 4194 4195 } else { 4196 /* The input and output queue registers were setup earlier (the 4197 * queues were not enabled). Any additional registers 4198 * that need to be programmed should be done now. 4199 */ 4200 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4201 if (ret) { 4202 dev_err(&octeon_dev->pci_dev->dev, 4203 "Failed to configure device registers\n"); 4204 return ret; 4205 } 4206 } 4207 4208 /* Initialize the tasklet that handles output queue packet processing.*/ 4209 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n"); 4210 tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh); 4211 4212 /* Setup the interrupt handler and record the INT SUM register address 4213 */ 4214 if (octeon_setup_interrupt(octeon_dev, 4215 octeon_dev->sriov_info.num_pf_rings)) 4216 return 1; 4217 4218 /* Enable Octeon device interrupts */ 4219 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); 4220 4221 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE); 4222 4223 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE 4224 * the output queue is enabled. 4225 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in 4226 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. 4227 * Otherwise, it is possible that the DRV_ACTIVE message will be sent 4228 * before any credits have been issued, causing the ring to be reset 4229 * (and the f/w appear to never have started). 4230 */ 4231 for (j = 0; j < octeon_dev->num_oqs; j++) 4232 writel(octeon_dev->droq[j]->max_count, 4233 octeon_dev->droq[j]->pkts_credit_reg); 4234 4235 /* Enable the input and output queues for this Octeon device */ 4236 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); 4237 if (ret) { 4238 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues"); 4239 return ret; 4240 } 4241 4242 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); 4243 4244 if (fw_state == FW_NEEDS_TO_BE_LOADED) { 4245 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); 4246 if (!ddr_timeout) { 4247 dev_info(&octeon_dev->pci_dev->dev, 4248 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); 4249 } 4250 4251 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); 4252 4253 /* Wait for the octeon to initialize DDR after the soft-reset.*/ 4254 while (!ddr_timeout) { 4255 set_current_state(TASK_INTERRUPTIBLE); 4256 if (schedule_timeout(HZ / 10)) { 4257 /* user probably pressed Control-C */ 4258 return 1; 4259 } 4260 } 4261 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); 4262 if (ret) { 4263 dev_err(&octeon_dev->pci_dev->dev, 4264 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", 4265 ret); 4266 return 1; 4267 } 4268 4269 if (octeon_wait_for_bootloader(octeon_dev, 1000)) { 4270 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n"); 4271 return 1; 4272 } 4273 4274 /* Divert uboot to take commands from host instead. */ 4275 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50); 4276 4277 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); 4278 ret = octeon_init_consoles(octeon_dev); 4279 if (ret) { 4280 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); 4281 return 1; 4282 } 4283 /* If console debug enabled, specify empty string to use default 4284 * enablement ELSE specify NULL string for 'disabled'. 4285 */ 4286 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL; 4287 ret = octeon_add_console(octeon_dev, 0, dbg_enb); 4288 if (ret) { 4289 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); 4290 return 1; 4291 } else if (octeon_console_debug_enabled(0)) { 4292 /* If console was added AND we're logging console output 4293 * then set our console print function. 4294 */ 4295 octeon_dev->console[0].print = octeon_dbg_console_print; 4296 } 4297 4298 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); 4299 4300 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n"); 4301 ret = load_firmware(octeon_dev); 4302 if (ret) { 4303 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); 4304 return 1; 4305 } 4306 4307 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED); 4308 } 4309 4310 handshake[octeon_dev->octeon_id].init_ok = 1; 4311 complete(&handshake[octeon_dev->octeon_id].init); 4312 4313 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); 4314 oct_priv->dev = octeon_dev; 4315 4316 return 0; 4317 } 4318 4319 /** 4320 * octeon_dbg_console_print - Debug console print function 4321 * @oct: octeon device 4322 * @console_num: console number 4323 * @prefix: first portion of line to display 4324 * @suffix: second portion of line to display 4325 * 4326 * The OCTEON debug console outputs entire lines (excluding '\n'). 4327 * Normally, the line will be passed in the 'prefix' parameter. 4328 * However, due to buffering, it is possible for a line to be split into two 4329 * parts, in which case they will be passed as the 'prefix' parameter and 4330 * 'suffix' parameter. 4331 */ 4332 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 4333 char *prefix, char *suffix) 4334 { 4335 if (prefix && suffix) 4336 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix, 4337 suffix); 4338 else if (prefix) 4339 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix); 4340 else if (suffix) 4341 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix); 4342 4343 return 0; 4344 } 4345 4346 /** 4347 * liquidio_exit - Exits the module 4348 */ 4349 static void __exit liquidio_exit(void) 4350 { 4351 liquidio_deinit_pci(); 4352 4353 pr_info("LiquidIO network module is now unloaded\n"); 4354 } 4355 4356 module_init(liquidio_init); 4357 module_exit(liquidio_exit); 4358