1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <net/vxlan.h> 22 #include "liquidio_common.h" 23 #include "octeon_droq.h" 24 #include "octeon_iq.h" 25 #include "response_manager.h" 26 #include "octeon_device.h" 27 #include "octeon_nic.h" 28 #include "octeon_main.h" 29 #include "octeon_network.h" 30 #include "cn23xx_vf_device.h" 31 32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver"); 34 MODULE_LICENSE("GPL"); 35 MODULE_VERSION(LIQUIDIO_VERSION); 36 37 static int debug = -1; 38 module_param(debug, int, 0644); 39 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 40 41 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 42 43 struct oct_timestamp_resp { 44 u64 rh; 45 u64 timestamp; 46 u64 status; 47 }; 48 49 union tx_info { 50 u64 u64; 51 struct { 52 #ifdef __BIG_ENDIAN_BITFIELD 53 u16 gso_size; 54 u16 gso_segs; 55 u32 reserved; 56 #else 57 u32 reserved; 58 u16 gso_segs; 59 u16 gso_size; 60 #endif 61 } s; 62 }; 63 64 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 65 #define OCTNIC_GSO_MAX_SIZE \ 66 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 67 68 static int 69 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 70 static void liquidio_vf_remove(struct pci_dev *pdev); 71 static int octeon_device_init(struct octeon_device *oct); 72 static int liquidio_stop(struct net_device *netdev); 73 74 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 75 { 76 struct octeon_device_priv *oct_priv = 77 (struct octeon_device_priv *)oct->priv; 78 int retry = MAX_IO_PENDING_PKT_COUNT; 79 int pkt_cnt = 0, pending_pkts; 80 int i; 81 82 do { 83 pending_pkts = 0; 84 85 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 86 if (!(oct->io_qmask.oq & BIT_ULL(i))) 87 continue; 88 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 89 } 90 if (pkt_cnt > 0) { 91 pending_pkts += pkt_cnt; 92 tasklet_schedule(&oct_priv->droq_tasklet); 93 } 94 pkt_cnt = 0; 95 schedule_timeout_uninterruptible(1); 96 97 } while (retry-- && pending_pkts); 98 99 return pkt_cnt; 100 } 101 102 /** 103 * \brief Cause device to go quiet so it can be safely removed/reset/etc 104 * @param oct Pointer to Octeon device 105 */ 106 static void pcierror_quiesce_device(struct octeon_device *oct) 107 { 108 int i; 109 110 /* Disable the input and output queues now. No more packets will 111 * arrive from Octeon, but we should wait for all packet processing 112 * to finish. 113 */ 114 115 /* To allow for in-flight requests */ 116 schedule_timeout_uninterruptible(100); 117 118 if (wait_for_pending_requests(oct)) 119 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 120 121 /* Force all requests waiting to be fetched by OCTEON to complete. */ 122 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 123 struct octeon_instr_queue *iq; 124 125 if (!(oct->io_qmask.iq & BIT_ULL(i))) 126 continue; 127 iq = oct->instr_queue[i]; 128 129 if (atomic_read(&iq->instr_pending)) { 130 spin_lock_bh(&iq->lock); 131 iq->fill_cnt = 0; 132 iq->octeon_read_index = iq->host_write_index; 133 iq->stats.instr_processed += 134 atomic_read(&iq->instr_pending); 135 lio_process_iq_request_list(oct, iq, 0); 136 spin_unlock_bh(&iq->lock); 137 } 138 } 139 140 /* Force all pending ordered list requests to time out. */ 141 lio_process_ordered_list(oct, 1); 142 143 /* We do not need to wait for output queue packets to be processed. */ 144 } 145 146 /** 147 * \brief Cleanup PCI AER uncorrectable error status 148 * @param dev Pointer to PCI device 149 */ 150 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 151 { 152 u32 status, mask; 153 int pos = 0x100; 154 155 pr_info("%s :\n", __func__); 156 157 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 158 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 159 if (dev->error_state == pci_channel_io_normal) 160 status &= ~mask; /* Clear corresponding nonfatal bits */ 161 else 162 status &= mask; /* Clear corresponding fatal bits */ 163 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 164 } 165 166 /** 167 * \brief Stop all PCI IO to a given device 168 * @param dev Pointer to Octeon device 169 */ 170 static void stop_pci_io(struct octeon_device *oct) 171 { 172 struct msix_entry *msix_entries; 173 int i; 174 175 /* No more instructions will be forwarded. */ 176 atomic_set(&oct->status, OCT_DEV_IN_RESET); 177 178 for (i = 0; i < oct->ifcount; i++) 179 netif_device_detach(oct->props[i].netdev); 180 181 /* Disable interrupts */ 182 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 183 184 pcierror_quiesce_device(oct); 185 if (oct->msix_on) { 186 msix_entries = (struct msix_entry *)oct->msix_entries; 187 for (i = 0; i < oct->num_msix_irqs; i++) { 188 /* clear the affinity_cpumask */ 189 irq_set_affinity_hint(msix_entries[i].vector, 190 NULL); 191 free_irq(msix_entries[i].vector, 192 &oct->ioq_vector[i]); 193 } 194 pci_disable_msix(oct->pci_dev); 195 kfree(oct->msix_entries); 196 oct->msix_entries = NULL; 197 octeon_free_ioq_vector(oct); 198 } 199 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 200 lio_get_state_string(&oct->status)); 201 202 /* making it a common function for all OCTEON models */ 203 cleanup_aer_uncorrect_error_status(oct->pci_dev); 204 205 pci_disable_device(oct->pci_dev); 206 } 207 208 /** 209 * \brief called when PCI error is detected 210 * @param pdev Pointer to PCI device 211 * @param state The current pci connection state 212 * 213 * This function is called after a PCI bus error affecting 214 * this device has been detected. 215 */ 216 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 217 pci_channel_state_t state) 218 { 219 struct octeon_device *oct = pci_get_drvdata(pdev); 220 221 /* Non-correctable Non-fatal errors */ 222 if (state == pci_channel_io_normal) { 223 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 224 cleanup_aer_uncorrect_error_status(oct->pci_dev); 225 return PCI_ERS_RESULT_CAN_RECOVER; 226 } 227 228 /* Non-correctable Fatal errors */ 229 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 230 stop_pci_io(oct); 231 232 return PCI_ERS_RESULT_DISCONNECT; 233 } 234 235 /* For PCI-E Advanced Error Recovery (AER) Interface */ 236 static const struct pci_error_handlers liquidio_vf_err_handler = { 237 .error_detected = liquidio_pcie_error_detected, 238 }; 239 240 static const struct pci_device_id liquidio_vf_pci_tbl[] = { 241 { 242 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, 243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 244 }, 245 { 246 0, 0, 0, 0, 0, 0, 0 247 } 248 }; 249 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); 250 251 static struct pci_driver liquidio_vf_pci_driver = { 252 .name = "LiquidIO_VF", 253 .id_table = liquidio_vf_pci_tbl, 254 .probe = liquidio_vf_probe, 255 .remove = liquidio_vf_remove, 256 .err_handler = &liquidio_vf_err_handler, /* For AER */ 257 }; 258 259 /** 260 * \brief Print link information 261 * @param netdev network device 262 */ 263 static void print_link_info(struct net_device *netdev) 264 { 265 struct lio *lio = GET_LIO(netdev); 266 267 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 268 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 269 struct oct_link_info *linfo = &lio->linfo; 270 271 if (linfo->link.s.link_up) { 272 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 273 linfo->link.s.speed, 274 (linfo->link.s.duplex) ? "Full" : "Half"); 275 } else { 276 netif_info(lio, link, lio->netdev, "Link Down\n"); 277 } 278 } 279 } 280 281 /** 282 * \brief Routine to notify MTU change 283 * @param work work_struct data structure 284 */ 285 static void octnet_link_status_change(struct work_struct *work) 286 { 287 struct cavium_wk *wk = (struct cavium_wk *)work; 288 struct lio *lio = (struct lio *)wk->ctxptr; 289 290 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. 291 * this API is invoked only when new max-MTU of the interface is 292 * less than current MTU. 293 */ 294 rtnl_lock(); 295 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); 296 rtnl_unlock(); 297 } 298 299 /** 300 * \brief Sets up the mtu status change work 301 * @param netdev network device 302 */ 303 static int setup_link_status_change_wq(struct net_device *netdev) 304 { 305 struct lio *lio = GET_LIO(netdev); 306 struct octeon_device *oct = lio->oct_dev; 307 308 lio->link_status_wq.wq = alloc_workqueue("link-status", 309 WQ_MEM_RECLAIM, 0); 310 if (!lio->link_status_wq.wq) { 311 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 312 return -1; 313 } 314 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 315 octnet_link_status_change); 316 lio->link_status_wq.wk.ctxptr = lio; 317 318 return 0; 319 } 320 321 static void cleanup_link_status_change_wq(struct net_device *netdev) 322 { 323 struct lio *lio = GET_LIO(netdev); 324 325 if (lio->link_status_wq.wq) { 326 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 327 destroy_workqueue(lio->link_status_wq.wq); 328 } 329 } 330 331 /** 332 * \brief Update link status 333 * @param netdev network device 334 * @param ls link status structure 335 * 336 * Called on receipt of a link status response from the core application to 337 * update each interface's link status. 338 */ 339 static void update_link_status(struct net_device *netdev, 340 union oct_link_status *ls) 341 { 342 struct lio *lio = GET_LIO(netdev); 343 int current_max_mtu = lio->linfo.link.s.mtu; 344 struct octeon_device *oct = lio->oct_dev; 345 346 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 347 lio->linfo.link.u64 = ls->u64; 348 349 print_link_info(netdev); 350 lio->link_changes++; 351 352 if (lio->linfo.link.s.link_up) { 353 netif_carrier_on(netdev); 354 wake_txqs(netdev); 355 } else { 356 netif_carrier_off(netdev); 357 stop_txqs(netdev); 358 } 359 360 if (lio->linfo.link.s.mtu != current_max_mtu) { 361 dev_info(&oct->pci_dev->dev, 362 "Max MTU Changed from %d to %d\n", 363 current_max_mtu, lio->linfo.link.s.mtu); 364 netdev->max_mtu = lio->linfo.link.s.mtu; 365 } 366 367 if (lio->linfo.link.s.mtu < netdev->mtu) { 368 dev_warn(&oct->pci_dev->dev, 369 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", 370 netdev->mtu, lio->linfo.link.s.mtu); 371 queue_delayed_work(lio->link_status_wq.wq, 372 &lio->link_status_wq.wk.work, 0); 373 } 374 } 375 } 376 377 /** 378 * \brief PCI probe handler 379 * @param pdev PCI device structure 380 * @param ent unused 381 */ 382 static int 383 liquidio_vf_probe(struct pci_dev *pdev, 384 const struct pci_device_id *ent __attribute__((unused))) 385 { 386 struct octeon_device *oct_dev = NULL; 387 388 oct_dev = octeon_allocate_device(pdev->device, 389 sizeof(struct octeon_device_priv)); 390 391 if (!oct_dev) { 392 dev_err(&pdev->dev, "Unable to allocate device\n"); 393 return -ENOMEM; 394 } 395 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 396 397 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 398 (u32)pdev->vendor, (u32)pdev->device); 399 400 /* Assign octeon_device for this device to the private data area. */ 401 pci_set_drvdata(pdev, oct_dev); 402 403 /* set linux specific device pointer */ 404 oct_dev->pci_dev = pdev; 405 406 oct_dev->subsystem_id = pdev->subsystem_vendor | 407 (pdev->subsystem_device << 16); 408 409 if (octeon_device_init(oct_dev)) { 410 liquidio_vf_remove(pdev); 411 return -ENOMEM; 412 } 413 414 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 415 416 return 0; 417 } 418 419 /** 420 * \brief PCI FLR for each Octeon device. 421 * @param oct octeon device 422 */ 423 static void octeon_pci_flr(struct octeon_device *oct) 424 { 425 pci_save_state(oct->pci_dev); 426 427 pci_cfg_access_lock(oct->pci_dev); 428 429 /* Quiesce the device completely */ 430 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 431 PCI_COMMAND_INTX_DISABLE); 432 433 pcie_flr(oct->pci_dev); 434 435 pci_cfg_access_unlock(oct->pci_dev); 436 437 pci_restore_state(oct->pci_dev); 438 } 439 440 /** 441 *\brief Destroy resources associated with octeon device 442 * @param pdev PCI device structure 443 * @param ent unused 444 */ 445 static void octeon_destroy_resources(struct octeon_device *oct) 446 { 447 struct octeon_device_priv *oct_priv = 448 (struct octeon_device_priv *)oct->priv; 449 struct msix_entry *msix_entries; 450 int i; 451 452 switch (atomic_read(&oct->status)) { 453 case OCT_DEV_RUNNING: 454 case OCT_DEV_CORE_OK: 455 /* No more instructions will be forwarded. */ 456 atomic_set(&oct->status, OCT_DEV_IN_RESET); 457 458 oct->app_mode = CVM_DRV_INVALID_APP; 459 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 460 lio_get_state_string(&oct->status)); 461 462 schedule_timeout_uninterruptible(HZ / 10); 463 464 /* fallthrough */ 465 case OCT_DEV_HOST_OK: 466 /* fallthrough */ 467 case OCT_DEV_IO_QUEUES_DONE: 468 if (lio_wait_for_instr_fetch(oct)) 469 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 470 471 if (wait_for_pending_requests(oct)) 472 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 473 474 /* Disable the input and output queues now. No more packets will 475 * arrive from Octeon, but we should wait for all packet 476 * processing to finish. 477 */ 478 oct->fn_list.disable_io_queues(oct); 479 480 if (lio_wait_for_oq_pkts(oct)) 481 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 482 483 /* Force all requests waiting to be fetched by OCTEON to 484 * complete. 485 */ 486 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 487 struct octeon_instr_queue *iq; 488 489 if (!(oct->io_qmask.iq & BIT_ULL(i))) 490 continue; 491 iq = oct->instr_queue[i]; 492 493 if (atomic_read(&iq->instr_pending)) { 494 spin_lock_bh(&iq->lock); 495 iq->fill_cnt = 0; 496 iq->octeon_read_index = iq->host_write_index; 497 iq->stats.instr_processed += 498 atomic_read(&iq->instr_pending); 499 lio_process_iq_request_list(oct, iq, 0); 500 spin_unlock_bh(&iq->lock); 501 } 502 } 503 504 lio_process_ordered_list(oct, 1); 505 octeon_free_sc_done_list(oct); 506 octeon_free_sc_zombie_list(oct); 507 508 /* fall through */ 509 case OCT_DEV_INTR_SET_DONE: 510 /* Disable interrupts */ 511 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 512 513 if (oct->msix_on) { 514 msix_entries = (struct msix_entry *)oct->msix_entries; 515 for (i = 0; i < oct->num_msix_irqs; i++) { 516 if (oct->ioq_vector[i].vector) { 517 irq_set_affinity_hint( 518 msix_entries[i].vector, 519 NULL); 520 free_irq(msix_entries[i].vector, 521 &oct->ioq_vector[i]); 522 oct->ioq_vector[i].vector = 0; 523 } 524 } 525 pci_disable_msix(oct->pci_dev); 526 kfree(oct->msix_entries); 527 oct->msix_entries = NULL; 528 kfree(oct->irq_name_storage); 529 oct->irq_name_storage = NULL; 530 } 531 /* Soft reset the octeon device before exiting */ 532 if (oct->pci_dev->reset_fn) 533 octeon_pci_flr(oct); 534 else 535 cn23xx_vf_ask_pf_to_do_flr(oct); 536 537 /* fallthrough */ 538 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 539 octeon_free_ioq_vector(oct); 540 541 /* fallthrough */ 542 case OCT_DEV_MBOX_SETUP_DONE: 543 oct->fn_list.free_mbox(oct); 544 545 /* fallthrough */ 546 case OCT_DEV_IN_RESET: 547 case OCT_DEV_DROQ_INIT_DONE: 548 mdelay(100); 549 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 550 if (!(oct->io_qmask.oq & BIT_ULL(i))) 551 continue; 552 octeon_delete_droq(oct, i); 553 } 554 555 /* fallthrough */ 556 case OCT_DEV_RESP_LIST_INIT_DONE: 557 octeon_delete_response_list(oct); 558 559 /* fallthrough */ 560 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 561 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 562 if (!(oct->io_qmask.iq & BIT_ULL(i))) 563 continue; 564 octeon_delete_instr_queue(oct, i); 565 } 566 567 /* fallthrough */ 568 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 569 octeon_free_sc_buffer_pool(oct); 570 571 /* fallthrough */ 572 case OCT_DEV_DISPATCH_INIT_DONE: 573 octeon_delete_dispatch_list(oct); 574 cancel_delayed_work_sync(&oct->nic_poll_work.work); 575 576 /* fallthrough */ 577 case OCT_DEV_PCI_MAP_DONE: 578 octeon_unmap_pci_barx(oct, 0); 579 octeon_unmap_pci_barx(oct, 1); 580 581 /* fallthrough */ 582 case OCT_DEV_PCI_ENABLE_DONE: 583 pci_clear_master(oct->pci_dev); 584 /* Disable the device, releasing the PCI INT */ 585 pci_disable_device(oct->pci_dev); 586 587 /* fallthrough */ 588 case OCT_DEV_BEGIN_STATE: 589 /* Nothing to be done here either */ 590 break; 591 } 592 593 tasklet_kill(&oct_priv->droq_tasklet); 594 } 595 596 /** 597 * \brief Send Rx control command 598 * @param lio per-network private data 599 * @param start_stop whether to start or stop 600 */ 601 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 602 { 603 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 604 struct octeon_soft_command *sc; 605 union octnet_cmd *ncmd; 606 int retval; 607 608 if (oct->props[lio->ifidx].rx_on == start_stop) 609 return; 610 611 sc = (struct octeon_soft_command *) 612 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 613 16, 0); 614 615 ncmd = (union octnet_cmd *)sc->virtdptr; 616 617 ncmd->u64 = 0; 618 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 619 ncmd->s.param1 = start_stop; 620 621 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 622 623 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 624 625 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 626 OPCODE_NIC_CMD, 0, 0, 0); 627 628 init_completion(&sc->complete); 629 sc->sc_status = OCTEON_REQUEST_PENDING; 630 631 retval = octeon_send_soft_command(oct, sc); 632 if (retval == IQ_SEND_FAILED) { 633 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 634 octeon_free_soft_command(oct, sc); 635 } else { 636 /* Sleep on a wait queue till the cond flag indicates that the 637 * response arrived or timed-out. 638 */ 639 retval = wait_for_sc_completion_timeout(oct, sc, 0); 640 if (retval) 641 return; 642 643 oct->props[lio->ifidx].rx_on = start_stop; 644 WRITE_ONCE(sc->caller_is_done, true); 645 } 646 } 647 648 /** 649 * \brief Destroy NIC device interface 650 * @param oct octeon device 651 * @param ifidx which interface to destroy 652 * 653 * Cleanup associated with each interface for an Octeon device when NIC 654 * module is being unloaded or if initialization fails during load. 655 */ 656 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 657 { 658 struct net_device *netdev = oct->props[ifidx].netdev; 659 struct octeon_device_priv *oct_priv = 660 (struct octeon_device_priv *)oct->priv; 661 struct napi_struct *napi, *n; 662 struct lio *lio; 663 664 if (!netdev) { 665 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 666 __func__, ifidx); 667 return; 668 } 669 670 lio = GET_LIO(netdev); 671 672 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 673 674 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 675 liquidio_stop(netdev); 676 677 if (oct->props[lio->ifidx].napi_enabled == 1) { 678 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 679 napi_disable(napi); 680 681 oct->props[lio->ifidx].napi_enabled = 0; 682 683 oct->droq[0]->ops.poll_mode = 0; 684 } 685 686 /* Delete NAPI */ 687 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 688 netif_napi_del(napi); 689 690 tasklet_enable(&oct_priv->droq_tasklet); 691 692 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 693 unregister_netdev(netdev); 694 695 cleanup_rx_oom_poll_fn(netdev); 696 697 cleanup_link_status_change_wq(netdev); 698 699 lio_delete_glists(lio); 700 701 free_netdev(netdev); 702 703 oct->props[ifidx].gmxport = -1; 704 705 oct->props[ifidx].netdev = NULL; 706 } 707 708 /** 709 * \brief Stop complete NIC functionality 710 * @param oct octeon device 711 */ 712 static int liquidio_stop_nic_module(struct octeon_device *oct) 713 { 714 struct lio *lio; 715 int i, j; 716 717 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 718 if (!oct->ifcount) { 719 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 720 return 1; 721 } 722 723 spin_lock_bh(&oct->cmd_resp_wqlock); 724 oct->cmd_resp_state = OCT_DRV_OFFLINE; 725 spin_unlock_bh(&oct->cmd_resp_wqlock); 726 727 for (i = 0; i < oct->ifcount; i++) { 728 lio = GET_LIO(oct->props[i].netdev); 729 for (j = 0; j < oct->num_oqs; j++) 730 octeon_unregister_droq_ops(oct, 731 lio->linfo.rxpciq[j].s.q_no); 732 } 733 734 for (i = 0; i < oct->ifcount; i++) 735 liquidio_destroy_nic_device(oct, i); 736 737 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 738 return 0; 739 } 740 741 /** 742 * \brief Cleans up resources at unload time 743 * @param pdev PCI device structure 744 */ 745 static void liquidio_vf_remove(struct pci_dev *pdev) 746 { 747 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 748 749 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 750 751 if (oct_dev->app_mode == CVM_DRV_NIC_APP) 752 liquidio_stop_nic_module(oct_dev); 753 754 /* Reset the octeon device and cleanup all memory allocated for 755 * the octeon device by driver. 756 */ 757 octeon_destroy_resources(oct_dev); 758 759 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 760 761 /* This octeon device has been removed. Update the global 762 * data structure to reflect this. Free the device structure. 763 */ 764 octeon_free_device_mem(oct_dev); 765 } 766 767 /** 768 * \brief PCI initialization for each Octeon device. 769 * @param oct octeon device 770 */ 771 static int octeon_pci_os_setup(struct octeon_device *oct) 772 { 773 #ifdef CONFIG_PCI_IOV 774 /* setup PCI stuff first */ 775 if (!oct->pci_dev->physfn) 776 octeon_pci_flr(oct); 777 #endif 778 779 if (pci_enable_device(oct->pci_dev)) { 780 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 781 return 1; 782 } 783 784 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 785 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 786 pci_disable_device(oct->pci_dev); 787 return 1; 788 } 789 790 /* Enable PCI DMA Master. */ 791 pci_set_master(oct->pci_dev); 792 793 return 0; 794 } 795 796 /** 797 * \brief Unmap and free network buffer 798 * @param buf buffer 799 */ 800 static void free_netbuf(void *buf) 801 { 802 struct octnet_buf_free_info *finfo; 803 struct sk_buff *skb; 804 struct lio *lio; 805 806 finfo = (struct octnet_buf_free_info *)buf; 807 skb = finfo->skb; 808 lio = finfo->lio; 809 810 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 811 DMA_TO_DEVICE); 812 813 tx_buffer_free(skb); 814 } 815 816 /** 817 * \brief Unmap and free gather buffer 818 * @param buf buffer 819 */ 820 static void free_netsgbuf(void *buf) 821 { 822 struct octnet_buf_free_info *finfo; 823 struct octnic_gather *g; 824 struct sk_buff *skb; 825 int i, frags, iq; 826 struct lio *lio; 827 828 finfo = (struct octnet_buf_free_info *)buf; 829 skb = finfo->skb; 830 lio = finfo->lio; 831 g = finfo->g; 832 frags = skb_shinfo(skb)->nr_frags; 833 834 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 835 g->sg[0].ptr[0], (skb->len - skb->data_len), 836 DMA_TO_DEVICE); 837 838 i = 1; 839 while (frags--) { 840 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 841 842 pci_unmap_page((lio->oct_dev)->pci_dev, 843 g->sg[(i >> 2)].ptr[(i & 3)], 844 frag->size, DMA_TO_DEVICE); 845 i++; 846 } 847 848 iq = skb_iq(lio->oct_dev, skb); 849 850 spin_lock(&lio->glist_lock[iq]); 851 list_add_tail(&g->list, &lio->glist[iq]); 852 spin_unlock(&lio->glist_lock[iq]); 853 854 tx_buffer_free(skb); 855 } 856 857 /** 858 * \brief Unmap and free gather buffer with response 859 * @param buf buffer 860 */ 861 static void free_netsgbuf_with_resp(void *buf) 862 { 863 struct octnet_buf_free_info *finfo; 864 struct octeon_soft_command *sc; 865 struct octnic_gather *g; 866 struct sk_buff *skb; 867 int i, frags, iq; 868 struct lio *lio; 869 870 sc = (struct octeon_soft_command *)buf; 871 skb = (struct sk_buff *)sc->callback_arg; 872 finfo = (struct octnet_buf_free_info *)&skb->cb; 873 874 lio = finfo->lio; 875 g = finfo->g; 876 frags = skb_shinfo(skb)->nr_frags; 877 878 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 879 g->sg[0].ptr[0], (skb->len - skb->data_len), 880 DMA_TO_DEVICE); 881 882 i = 1; 883 while (frags--) { 884 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 885 886 pci_unmap_page((lio->oct_dev)->pci_dev, 887 g->sg[(i >> 2)].ptr[(i & 3)], 888 frag->size, DMA_TO_DEVICE); 889 i++; 890 } 891 892 iq = skb_iq(lio->oct_dev, skb); 893 894 spin_lock(&lio->glist_lock[iq]); 895 list_add_tail(&g->list, &lio->glist[iq]); 896 spin_unlock(&lio->glist_lock[iq]); 897 898 /* Don't free the skb yet */ 899 } 900 901 /** 902 * \brief Net device open for LiquidIO 903 * @param netdev network device 904 */ 905 static int liquidio_open(struct net_device *netdev) 906 { 907 struct lio *lio = GET_LIO(netdev); 908 struct octeon_device *oct = lio->oct_dev; 909 struct octeon_device_priv *oct_priv = 910 (struct octeon_device_priv *)oct->priv; 911 struct napi_struct *napi, *n; 912 913 if (!oct->props[lio->ifidx].napi_enabled) { 914 tasklet_disable(&oct_priv->droq_tasklet); 915 916 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 917 napi_enable(napi); 918 919 oct->props[lio->ifidx].napi_enabled = 1; 920 921 oct->droq[0]->ops.poll_mode = 1; 922 } 923 924 ifstate_set(lio, LIO_IFSTATE_RUNNING); 925 926 /* Ready for link status updates */ 927 lio->intf_open = 1; 928 929 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 930 start_txqs(netdev); 931 932 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); 933 lio->stats_wk.ctxptr = lio; 934 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies 935 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); 936 937 /* tell Octeon to start forwarding packets to host */ 938 send_rx_ctrl_cmd(lio, 1); 939 940 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); 941 942 return 0; 943 } 944 945 /** 946 * \brief Net device stop for LiquidIO 947 * @param netdev network device 948 */ 949 static int liquidio_stop(struct net_device *netdev) 950 { 951 struct lio *lio = GET_LIO(netdev); 952 struct octeon_device *oct = lio->oct_dev; 953 struct octeon_device_priv *oct_priv = 954 (struct octeon_device_priv *)oct->priv; 955 struct napi_struct *napi, *n; 956 957 /* tell Octeon to stop forwarding packets to host */ 958 send_rx_ctrl_cmd(lio, 0); 959 960 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 961 /* Inform that netif carrier is down */ 962 lio->intf_open = 0; 963 lio->linfo.link.s.link_up = 0; 964 965 netif_carrier_off(netdev); 966 lio->link_changes++; 967 968 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 969 970 stop_txqs(netdev); 971 972 /* Wait for any pending Rx descriptors */ 973 if (lio_wait_for_clean_oq(oct)) 974 netif_info(lio, rx_err, lio->netdev, 975 "Proceeding with stop interface after partial RX desc processing\n"); 976 977 if (oct->props[lio->ifidx].napi_enabled == 1) { 978 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 979 napi_disable(napi); 980 981 oct->props[lio->ifidx].napi_enabled = 0; 982 983 oct->droq[0]->ops.poll_mode = 0; 984 985 tasklet_enable(&oct_priv->droq_tasklet); 986 } 987 988 cancel_delayed_work_sync(&lio->stats_wk.work); 989 990 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 991 992 return 0; 993 } 994 995 /** 996 * \brief Converts a mask based on net device flags 997 * @param netdev network device 998 * 999 * This routine generates a octnet_ifflags mask from the net device flags 1000 * received from the OS. 1001 */ 1002 static enum octnet_ifflags get_new_flags(struct net_device *netdev) 1003 { 1004 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1005 1006 if (netdev->flags & IFF_PROMISC) 1007 f |= OCTNET_IFFLAG_PROMISC; 1008 1009 if (netdev->flags & IFF_ALLMULTI) 1010 f |= OCTNET_IFFLAG_ALLMULTI; 1011 1012 if (netdev->flags & IFF_MULTICAST) { 1013 f |= OCTNET_IFFLAG_MULTICAST; 1014 1015 /* Accept all multicast addresses if there are more than we 1016 * can handle 1017 */ 1018 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1019 f |= OCTNET_IFFLAG_ALLMULTI; 1020 } 1021 1022 if (netdev->flags & IFF_BROADCAST) 1023 f |= OCTNET_IFFLAG_BROADCAST; 1024 1025 return f; 1026 } 1027 1028 static void liquidio_set_uc_list(struct net_device *netdev) 1029 { 1030 struct lio *lio = GET_LIO(netdev); 1031 struct octeon_device *oct = lio->oct_dev; 1032 struct octnic_ctrl_pkt nctrl; 1033 struct netdev_hw_addr *ha; 1034 u64 *mac; 1035 1036 if (lio->netdev_uc_count == netdev_uc_count(netdev)) 1037 return; 1038 1039 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { 1040 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n"); 1041 return; 1042 } 1043 1044 lio->netdev_uc_count = netdev_uc_count(netdev); 1045 1046 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1047 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; 1048 nctrl.ncmd.s.more = lio->netdev_uc_count; 1049 nctrl.ncmd.s.param1 = oct->vf_num; 1050 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1051 nctrl.netpndev = (u64)netdev; 1052 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1053 1054 /* copy all the addresses into the udd */ 1055 mac = &nctrl.udd[0]; 1056 netdev_for_each_uc_addr(ha, netdev) { 1057 ether_addr_copy(((u8 *)mac) + 2, ha->addr); 1058 mac++; 1059 } 1060 1061 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1062 } 1063 1064 /** 1065 * \brief Net device set_multicast_list 1066 * @param netdev network device 1067 */ 1068 static void liquidio_set_mcast_list(struct net_device *netdev) 1069 { 1070 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1071 struct lio *lio = GET_LIO(netdev); 1072 struct octeon_device *oct = lio->oct_dev; 1073 struct octnic_ctrl_pkt nctrl; 1074 struct netdev_hw_addr *ha; 1075 u64 *mc; 1076 int ret; 1077 1078 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1079 1080 /* Create a ctrl pkt command to be sent to core app. */ 1081 nctrl.ncmd.u64 = 0; 1082 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1083 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1084 nctrl.ncmd.s.param2 = mc_count; 1085 nctrl.ncmd.s.more = mc_count; 1086 nctrl.netpndev = (u64)netdev; 1087 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1088 1089 /* copy all the addresses into the udd */ 1090 mc = &nctrl.udd[0]; 1091 netdev_for_each_mc_addr(ha, netdev) { 1092 *mc = 0; 1093 ether_addr_copy(((u8 *)mc) + 2, ha->addr); 1094 /* no need to swap bytes */ 1095 if (++mc > &nctrl.udd[mc_count]) 1096 break; 1097 } 1098 1099 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1100 1101 /* Apparently, any activity in this call from the kernel has to 1102 * be atomic. So we won't wait for response. 1103 */ 1104 1105 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1106 if (ret) { 1107 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1108 ret); 1109 } 1110 1111 liquidio_set_uc_list(netdev); 1112 } 1113 1114 /** 1115 * \brief Net device set_mac_address 1116 * @param netdev network device 1117 */ 1118 static int liquidio_set_mac(struct net_device *netdev, void *p) 1119 { 1120 struct sockaddr *addr = (struct sockaddr *)p; 1121 struct lio *lio = GET_LIO(netdev); 1122 struct octeon_device *oct = lio->oct_dev; 1123 struct octnic_ctrl_pkt nctrl; 1124 int ret = 0; 1125 1126 if (!is_valid_ether_addr(addr->sa_data)) 1127 return -EADDRNOTAVAIL; 1128 1129 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 1130 return 0; 1131 1132 if (lio->linfo.macaddr_is_admin_asgnd) 1133 return -EPERM; 1134 1135 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1136 1137 nctrl.ncmd.u64 = 0; 1138 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 1139 nctrl.ncmd.s.param1 = 0; 1140 nctrl.ncmd.s.more = 1; 1141 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1142 nctrl.netpndev = (u64)netdev; 1143 1144 nctrl.udd[0] = 0; 1145 /* The MAC Address is presented in network byte order. */ 1146 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data); 1147 1148 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1149 if (ret < 0) { 1150 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 1151 return -ENOMEM; 1152 } 1153 1154 if (nctrl.sc_status == 1155 FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) { 1156 dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n"); 1157 return -EPERM; 1158 } 1159 1160 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1161 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); 1162 1163 return 0; 1164 } 1165 1166 static void 1167 liquidio_get_stats64(struct net_device *netdev, 1168 struct rtnl_link_stats64 *lstats) 1169 { 1170 struct lio *lio = GET_LIO(netdev); 1171 struct octeon_device *oct; 1172 u64 pkts = 0, drop = 0, bytes = 0; 1173 struct oct_droq_stats *oq_stats; 1174 struct oct_iq_stats *iq_stats; 1175 int i, iq_no, oq_no; 1176 1177 oct = lio->oct_dev; 1178 1179 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1180 return; 1181 1182 for (i = 0; i < oct->num_iqs; i++) { 1183 iq_no = lio->linfo.txpciq[i].s.q_no; 1184 iq_stats = &oct->instr_queue[iq_no]->stats; 1185 pkts += iq_stats->tx_done; 1186 drop += iq_stats->tx_dropped; 1187 bytes += iq_stats->tx_tot_bytes; 1188 } 1189 1190 lstats->tx_packets = pkts; 1191 lstats->tx_bytes = bytes; 1192 lstats->tx_dropped = drop; 1193 1194 pkts = 0; 1195 drop = 0; 1196 bytes = 0; 1197 1198 for (i = 0; i < oct->num_oqs; i++) { 1199 oq_no = lio->linfo.rxpciq[i].s.q_no; 1200 oq_stats = &oct->droq[oq_no]->stats; 1201 pkts += oq_stats->rx_pkts_received; 1202 drop += (oq_stats->rx_dropped + 1203 oq_stats->dropped_nodispatch + 1204 oq_stats->dropped_toomany + 1205 oq_stats->dropped_nomem); 1206 bytes += oq_stats->rx_bytes_received; 1207 } 1208 1209 lstats->rx_bytes = bytes; 1210 lstats->rx_packets = pkts; 1211 lstats->rx_dropped = drop; 1212 1213 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; 1214 1215 /* detailed rx_errors: */ 1216 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; 1217 /* recved pkt with crc error */ 1218 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; 1219 /* recv'd frame alignment error */ 1220 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; 1221 1222 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + 1223 lstats->rx_frame_errors; 1224 1225 /* detailed tx_errors */ 1226 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; 1227 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; 1228 1229 lstats->tx_errors = lstats->tx_aborted_errors + 1230 lstats->tx_carrier_errors; 1231 } 1232 1233 /** 1234 * \brief Handler for SIOCSHWTSTAMP ioctl 1235 * @param netdev network device 1236 * @param ifr interface request 1237 * @param cmd command 1238 */ 1239 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 1240 { 1241 struct lio *lio = GET_LIO(netdev); 1242 struct hwtstamp_config conf; 1243 1244 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 1245 return -EFAULT; 1246 1247 if (conf.flags) 1248 return -EINVAL; 1249 1250 switch (conf.tx_type) { 1251 case HWTSTAMP_TX_ON: 1252 case HWTSTAMP_TX_OFF: 1253 break; 1254 default: 1255 return -ERANGE; 1256 } 1257 1258 switch (conf.rx_filter) { 1259 case HWTSTAMP_FILTER_NONE: 1260 break; 1261 case HWTSTAMP_FILTER_ALL: 1262 case HWTSTAMP_FILTER_SOME: 1263 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1264 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1265 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1266 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1267 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1268 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1269 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1270 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1271 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1272 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1273 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1274 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1275 case HWTSTAMP_FILTER_NTP_ALL: 1276 conf.rx_filter = HWTSTAMP_FILTER_ALL; 1277 break; 1278 default: 1279 return -ERANGE; 1280 } 1281 1282 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 1283 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1284 1285 else 1286 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1287 1288 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 1289 } 1290 1291 /** 1292 * \brief ioctl handler 1293 * @param netdev network device 1294 * @param ifr interface request 1295 * @param cmd command 1296 */ 1297 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1298 { 1299 switch (cmd) { 1300 case SIOCSHWTSTAMP: 1301 return hwtstamp_ioctl(netdev, ifr); 1302 default: 1303 return -EOPNOTSUPP; 1304 } 1305 } 1306 1307 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) 1308 { 1309 struct sk_buff *skb = (struct sk_buff *)buf; 1310 struct octnet_buf_free_info *finfo; 1311 struct oct_timestamp_resp *resp; 1312 struct octeon_soft_command *sc; 1313 struct lio *lio; 1314 1315 finfo = (struct octnet_buf_free_info *)skb->cb; 1316 lio = finfo->lio; 1317 sc = finfo->sc; 1318 oct = lio->oct_dev; 1319 resp = (struct oct_timestamp_resp *)sc->virtrptr; 1320 1321 if (status != OCTEON_REQUEST_DONE) { 1322 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 1323 CVM_CAST64(status)); 1324 resp->timestamp = 0; 1325 } 1326 1327 octeon_swap_8B_data(&resp->timestamp, 1); 1328 1329 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 1330 struct skb_shared_hwtstamps ts; 1331 u64 ns = resp->timestamp; 1332 1333 netif_info(lio, tx_done, lio->netdev, 1334 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 1335 skb, (unsigned long long)ns); 1336 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 1337 skb_tstamp_tx(skb, &ts); 1338 } 1339 1340 octeon_free_soft_command(oct, sc); 1341 tx_buffer_free(skb); 1342 } 1343 1344 /* \brief Send a data packet that will be timestamped 1345 * @param oct octeon device 1346 * @param ndata pointer to network data 1347 * @param finfo pointer to private network data 1348 */ 1349 static int send_nic_timestamp_pkt(struct octeon_device *oct, 1350 struct octnic_data_pkt *ndata, 1351 struct octnet_buf_free_info *finfo, 1352 int xmit_more) 1353 { 1354 struct octeon_soft_command *sc; 1355 int ring_doorbell; 1356 struct lio *lio; 1357 int retval; 1358 u32 len; 1359 1360 lio = finfo->lio; 1361 1362 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 1363 sizeof(struct oct_timestamp_resp)); 1364 finfo->sc = sc; 1365 1366 if (!sc) { 1367 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 1368 return IQ_SEND_FAILED; 1369 } 1370 1371 if (ndata->reqtype == REQTYPE_NORESP_NET) 1372 ndata->reqtype = REQTYPE_RESP_NET; 1373 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 1374 ndata->reqtype = REQTYPE_RESP_NET_SG; 1375 1376 sc->callback = handle_timestamp; 1377 sc->callback_arg = finfo->skb; 1378 sc->iq_no = ndata->q_no; 1379 1380 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; 1381 1382 ring_doorbell = !xmit_more; 1383 1384 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 1385 sc, len, ndata->reqtype); 1386 1387 if (retval == IQ_SEND_FAILED) { 1388 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 1389 retval); 1390 octeon_free_soft_command(oct, sc); 1391 } else { 1392 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 1393 } 1394 1395 return retval; 1396 } 1397 1398 /** \brief Transmit networks packets to the Octeon interface 1399 * @param skbuff skbuff struct to be passed to network layer. 1400 * @param netdev pointer to network device 1401 * @returns whether the packet was transmitted to the device okay or not 1402 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 1403 */ 1404 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 1405 { 1406 struct octnet_buf_free_info *finfo; 1407 union octnic_cmd_setup cmdsetup; 1408 struct octnic_data_pkt ndata; 1409 struct octeon_instr_irh *irh; 1410 struct oct_iq_stats *stats; 1411 struct octeon_device *oct; 1412 int q_idx = 0, iq_no = 0; 1413 union tx_info *tx_info; 1414 int xmit_more = 0; 1415 struct lio *lio; 1416 int status = 0; 1417 u64 dptr = 0; 1418 u32 tag = 0; 1419 int j; 1420 1421 lio = GET_LIO(netdev); 1422 oct = lio->oct_dev; 1423 1424 q_idx = skb_iq(lio->oct_dev, skb); 1425 tag = q_idx; 1426 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 1427 1428 stats = &oct->instr_queue[iq_no]->stats; 1429 1430 /* Check for all conditions in which the current packet cannot be 1431 * transmitted. 1432 */ 1433 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 1434 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { 1435 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", 1436 lio->linfo.link.s.link_up); 1437 goto lio_xmit_failed; 1438 } 1439 1440 /* Use space in skb->cb to store info used to unmap and 1441 * free the buffers. 1442 */ 1443 finfo = (struct octnet_buf_free_info *)skb->cb; 1444 finfo->lio = lio; 1445 finfo->skb = skb; 1446 finfo->sc = NULL; 1447 1448 /* Prepare the attributes for the data to be passed to OSI. */ 1449 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 1450 1451 ndata.buf = finfo; 1452 1453 ndata.q_no = iq_no; 1454 1455 if (octnet_iq_is_full(oct, ndata.q_no)) { 1456 /* defer sending if queue is full */ 1457 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 1458 ndata.q_no); 1459 stats->tx_iq_busy++; 1460 return NETDEV_TX_BUSY; 1461 } 1462 1463 ndata.datasize = skb->len; 1464 1465 cmdsetup.u64 = 0; 1466 cmdsetup.s.iq_no = iq_no; 1467 1468 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1469 if (skb->encapsulation) { 1470 cmdsetup.s.tnl_csum = 1; 1471 stats->tx_vxlan++; 1472 } else { 1473 cmdsetup.s.transport_csum = 1; 1474 } 1475 } 1476 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1477 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1478 cmdsetup.s.timestamp = 1; 1479 } 1480 1481 if (!skb_shinfo(skb)->nr_frags) { 1482 cmdsetup.s.u.datasize = skb->len; 1483 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1484 /* Offload checksum calculation for TCP/UDP packets */ 1485 dptr = dma_map_single(&oct->pci_dev->dev, 1486 skb->data, 1487 skb->len, 1488 DMA_TO_DEVICE); 1489 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 1490 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 1491 __func__); 1492 return NETDEV_TX_BUSY; 1493 } 1494 1495 ndata.cmd.cmd3.dptr = dptr; 1496 finfo->dptr = dptr; 1497 ndata.reqtype = REQTYPE_NORESP_NET; 1498 1499 } else { 1500 struct skb_frag_struct *frag; 1501 struct octnic_gather *g; 1502 int i, frags; 1503 1504 spin_lock(&lio->glist_lock[q_idx]); 1505 g = (struct octnic_gather *) 1506 lio_list_delete_head(&lio->glist[q_idx]); 1507 spin_unlock(&lio->glist_lock[q_idx]); 1508 1509 if (!g) { 1510 netif_info(lio, tx_err, lio->netdev, 1511 "Transmit scatter gather: glist null!\n"); 1512 goto lio_xmit_failed; 1513 } 1514 1515 cmdsetup.s.gather = 1; 1516 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 1517 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1518 1519 memset(g->sg, 0, g->sg_size); 1520 1521 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 1522 skb->data, 1523 (skb->len - skb->data_len), 1524 DMA_TO_DEVICE); 1525 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 1526 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 1527 __func__); 1528 return NETDEV_TX_BUSY; 1529 } 1530 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 1531 1532 frags = skb_shinfo(skb)->nr_frags; 1533 i = 1; 1534 while (frags--) { 1535 frag = &skb_shinfo(skb)->frags[i - 1]; 1536 1537 g->sg[(i >> 2)].ptr[(i & 3)] = 1538 dma_map_page(&oct->pci_dev->dev, 1539 frag->page.p, 1540 frag->page_offset, 1541 frag->size, 1542 DMA_TO_DEVICE); 1543 if (dma_mapping_error(&oct->pci_dev->dev, 1544 g->sg[i >> 2].ptr[i & 3])) { 1545 dma_unmap_single(&oct->pci_dev->dev, 1546 g->sg[0].ptr[0], 1547 skb->len - skb->data_len, 1548 DMA_TO_DEVICE); 1549 for (j = 1; j < i; j++) { 1550 frag = &skb_shinfo(skb)->frags[j - 1]; 1551 dma_unmap_page(&oct->pci_dev->dev, 1552 g->sg[j >> 2].ptr[j & 3], 1553 frag->size, 1554 DMA_TO_DEVICE); 1555 } 1556 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 1557 __func__); 1558 return NETDEV_TX_BUSY; 1559 } 1560 1561 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); 1562 i++; 1563 } 1564 1565 dptr = g->sg_dma_ptr; 1566 1567 ndata.cmd.cmd3.dptr = dptr; 1568 finfo->dptr = dptr; 1569 finfo->g = g; 1570 1571 ndata.reqtype = REQTYPE_NORESP_NET_SG; 1572 } 1573 1574 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 1575 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 1576 1577 if (skb_shinfo(skb)->gso_size) { 1578 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 1579 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 1580 } 1581 1582 /* HW insert VLAN tag */ 1583 if (skb_vlan_tag_present(skb)) { 1584 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; 1585 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; 1586 } 1587 1588 xmit_more = netdev_xmit_more(); 1589 1590 if (unlikely(cmdsetup.s.timestamp)) 1591 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 1592 else 1593 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 1594 if (status == IQ_SEND_FAILED) 1595 goto lio_xmit_failed; 1596 1597 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 1598 1599 if (status == IQ_SEND_STOP) { 1600 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", 1601 iq_no); 1602 netif_stop_subqueue(netdev, q_idx); 1603 } 1604 1605 netif_trans_update(netdev); 1606 1607 if (tx_info->s.gso_segs) 1608 stats->tx_done += tx_info->s.gso_segs; 1609 else 1610 stats->tx_done++; 1611 stats->tx_tot_bytes += ndata.datasize; 1612 1613 return NETDEV_TX_OK; 1614 1615 lio_xmit_failed: 1616 stats->tx_dropped++; 1617 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 1618 iq_no, stats->tx_dropped); 1619 if (dptr) 1620 dma_unmap_single(&oct->pci_dev->dev, dptr, 1621 ndata.datasize, DMA_TO_DEVICE); 1622 1623 octeon_ring_doorbell_locked(oct, iq_no); 1624 1625 tx_buffer_free(skb); 1626 return NETDEV_TX_OK; 1627 } 1628 1629 /** \brief Network device Tx timeout 1630 * @param netdev pointer to network device 1631 */ 1632 static void liquidio_tx_timeout(struct net_device *netdev) 1633 { 1634 struct lio *lio; 1635 1636 lio = GET_LIO(netdev); 1637 1638 netif_info(lio, tx_err, lio->netdev, 1639 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 1640 netdev->stats.tx_dropped); 1641 netif_trans_update(netdev); 1642 wake_txqs(netdev); 1643 } 1644 1645 static int 1646 liquidio_vlan_rx_add_vid(struct net_device *netdev, 1647 __be16 proto __attribute__((unused)), u16 vid) 1648 { 1649 struct lio *lio = GET_LIO(netdev); 1650 struct octeon_device *oct = lio->oct_dev; 1651 struct octnic_ctrl_pkt nctrl; 1652 int ret = 0; 1653 1654 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1655 1656 nctrl.ncmd.u64 = 0; 1657 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 1658 nctrl.ncmd.s.param1 = vid; 1659 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1660 nctrl.netpndev = (u64)netdev; 1661 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1662 1663 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1664 if (ret) { 1665 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 1666 ret); 1667 return -EPERM; 1668 } 1669 1670 return 0; 1671 } 1672 1673 static int 1674 liquidio_vlan_rx_kill_vid(struct net_device *netdev, 1675 __be16 proto __attribute__((unused)), u16 vid) 1676 { 1677 struct lio *lio = GET_LIO(netdev); 1678 struct octeon_device *oct = lio->oct_dev; 1679 struct octnic_ctrl_pkt nctrl; 1680 int ret = 0; 1681 1682 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1683 1684 nctrl.ncmd.u64 = 0; 1685 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 1686 nctrl.ncmd.s.param1 = vid; 1687 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1688 nctrl.netpndev = (u64)netdev; 1689 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1690 1691 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1692 if (ret) { 1693 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", 1694 ret); 1695 if (ret > 0) 1696 ret = -EIO; 1697 } 1698 return ret; 1699 } 1700 1701 /** Sending command to enable/disable RX checksum offload 1702 * @param netdev pointer to network device 1703 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 1704 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 1705 * OCTNET_CMD_RXCSUM_DISABLE 1706 * @returns SUCCESS or FAILURE 1707 */ 1708 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 1709 u8 rx_cmd) 1710 { 1711 struct lio *lio = GET_LIO(netdev); 1712 struct octeon_device *oct = lio->oct_dev; 1713 struct octnic_ctrl_pkt nctrl; 1714 int ret = 0; 1715 1716 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1717 1718 nctrl.ncmd.u64 = 0; 1719 nctrl.ncmd.s.cmd = command; 1720 nctrl.ncmd.s.param1 = rx_cmd; 1721 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1722 nctrl.netpndev = (u64)netdev; 1723 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1724 1725 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1726 if (ret) { 1727 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", 1728 ret); 1729 if (ret > 0) 1730 ret = -EIO; 1731 } 1732 return ret; 1733 } 1734 1735 /** Sending command to add/delete VxLAN UDP port to firmware 1736 * @param netdev pointer to network device 1737 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 1738 * @param vxlan_port VxLAN port to be added or deleted 1739 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 1740 * OCTNET_CMD_VXLAN_PORT_DEL 1741 * @returns SUCCESS or FAILURE 1742 */ 1743 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 1744 u16 vxlan_port, u8 vxlan_cmd_bit) 1745 { 1746 struct lio *lio = GET_LIO(netdev); 1747 struct octeon_device *oct = lio->oct_dev; 1748 struct octnic_ctrl_pkt nctrl; 1749 int ret = 0; 1750 1751 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1752 1753 nctrl.ncmd.u64 = 0; 1754 nctrl.ncmd.s.cmd = command; 1755 nctrl.ncmd.s.more = vxlan_cmd_bit; 1756 nctrl.ncmd.s.param1 = vxlan_port; 1757 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1758 nctrl.netpndev = (u64)netdev; 1759 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1760 1761 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1762 if (ret) { 1763 dev_err(&oct->pci_dev->dev, 1764 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", 1765 ret); 1766 if (ret > 0) 1767 ret = -EIO; 1768 } 1769 return ret; 1770 } 1771 1772 /** \brief Net device fix features 1773 * @param netdev pointer to network device 1774 * @param request features requested 1775 * @returns updated features list 1776 */ 1777 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 1778 netdev_features_t request) 1779 { 1780 struct lio *lio = netdev_priv(netdev); 1781 1782 if ((request & NETIF_F_RXCSUM) && 1783 !(lio->dev_capability & NETIF_F_RXCSUM)) 1784 request &= ~NETIF_F_RXCSUM; 1785 1786 if ((request & NETIF_F_HW_CSUM) && 1787 !(lio->dev_capability & NETIF_F_HW_CSUM)) 1788 request &= ~NETIF_F_HW_CSUM; 1789 1790 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 1791 request &= ~NETIF_F_TSO; 1792 1793 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 1794 request &= ~NETIF_F_TSO6; 1795 1796 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 1797 request &= ~NETIF_F_LRO; 1798 1799 /* Disable LRO if RXCSUM is off */ 1800 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 1801 (lio->dev_capability & NETIF_F_LRO)) 1802 request &= ~NETIF_F_LRO; 1803 1804 return request; 1805 } 1806 1807 /** \brief Net device set features 1808 * @param netdev pointer to network device 1809 * @param features features to enable/disable 1810 */ 1811 static int liquidio_set_features(struct net_device *netdev, 1812 netdev_features_t features) 1813 { 1814 struct lio *lio = netdev_priv(netdev); 1815 1816 if (!((netdev->features ^ features) & NETIF_F_LRO)) 1817 return 0; 1818 1819 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 1820 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 1821 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 1822 else if (!(features & NETIF_F_LRO) && 1823 (lio->dev_capability & NETIF_F_LRO)) 1824 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 1825 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 1826 if (!(netdev->features & NETIF_F_RXCSUM) && 1827 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 1828 (features & NETIF_F_RXCSUM)) 1829 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 1830 OCTNET_CMD_RXCSUM_ENABLE); 1831 else if ((netdev->features & NETIF_F_RXCSUM) && 1832 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 1833 !(features & NETIF_F_RXCSUM)) 1834 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 1835 OCTNET_CMD_RXCSUM_DISABLE); 1836 1837 return 0; 1838 } 1839 1840 static void liquidio_add_vxlan_port(struct net_device *netdev, 1841 struct udp_tunnel_info *ti) 1842 { 1843 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 1844 return; 1845 1846 liquidio_vxlan_port_command(netdev, 1847 OCTNET_CMD_VXLAN_PORT_CONFIG, 1848 htons(ti->port), 1849 OCTNET_CMD_VXLAN_PORT_ADD); 1850 } 1851 1852 static void liquidio_del_vxlan_port(struct net_device *netdev, 1853 struct udp_tunnel_info *ti) 1854 { 1855 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 1856 return; 1857 1858 liquidio_vxlan_port_command(netdev, 1859 OCTNET_CMD_VXLAN_PORT_CONFIG, 1860 htons(ti->port), 1861 OCTNET_CMD_VXLAN_PORT_DEL); 1862 } 1863 1864 static const struct net_device_ops lionetdevops = { 1865 .ndo_open = liquidio_open, 1866 .ndo_stop = liquidio_stop, 1867 .ndo_start_xmit = liquidio_xmit, 1868 .ndo_get_stats64 = liquidio_get_stats64, 1869 .ndo_set_mac_address = liquidio_set_mac, 1870 .ndo_set_rx_mode = liquidio_set_mcast_list, 1871 .ndo_tx_timeout = liquidio_tx_timeout, 1872 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 1873 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 1874 .ndo_change_mtu = liquidio_change_mtu, 1875 .ndo_do_ioctl = liquidio_ioctl, 1876 .ndo_fix_features = liquidio_fix_features, 1877 .ndo_set_features = liquidio_set_features, 1878 .ndo_udp_tunnel_add = liquidio_add_vxlan_port, 1879 .ndo_udp_tunnel_del = liquidio_del_vxlan_port, 1880 }; 1881 1882 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 1883 { 1884 struct octeon_device *oct = (struct octeon_device *)buf; 1885 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 1886 union oct_link_status *ls; 1887 int gmxport = 0; 1888 int i; 1889 1890 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 1891 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 1892 recv_pkt->buffer_size[0], 1893 recv_pkt->rh.r_nic_info.gmxport); 1894 goto nic_info_err; 1895 } 1896 1897 gmxport = recv_pkt->rh.r_nic_info.gmxport; 1898 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 1899 OCT_DROQ_INFO_SIZE); 1900 1901 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 1902 1903 for (i = 0; i < oct->ifcount; i++) { 1904 if (oct->props[i].gmxport == gmxport) { 1905 update_link_status(oct->props[i].netdev, ls); 1906 break; 1907 } 1908 } 1909 1910 nic_info_err: 1911 for (i = 0; i < recv_pkt->buffer_count; i++) 1912 recv_buffer_free(recv_pkt->buffer_ptr[i]); 1913 octeon_free_recv_info(recv_info); 1914 return 0; 1915 } 1916 1917 /** 1918 * \brief Setup network interfaces 1919 * @param octeon_dev octeon device 1920 * 1921 * Called during init time for each device. It assumes the NIC 1922 * is already up and running. The link information for each 1923 * interface is passed in link_info. 1924 */ 1925 static int setup_nic_devices(struct octeon_device *octeon_dev) 1926 { 1927 int retval, num_iqueues, num_oqueues; 1928 u32 resp_size, data_size; 1929 struct liquidio_if_cfg_resp *resp; 1930 struct octeon_soft_command *sc; 1931 union oct_nic_if_cfg if_cfg; 1932 struct octdev_props *props; 1933 struct net_device *netdev; 1934 struct lio_version *vdata; 1935 struct lio *lio = NULL; 1936 u8 mac[ETH_ALEN], i, j; 1937 u32 ifidx_or_pfnum; 1938 1939 ifidx_or_pfnum = octeon_dev->pf_num; 1940 1941 /* This is to handle link status changes */ 1942 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, 1943 lio_nic_info, octeon_dev); 1944 1945 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 1946 * They are handled directly. 1947 */ 1948 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 1949 free_netbuf); 1950 1951 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 1952 free_netsgbuf); 1953 1954 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 1955 free_netsgbuf_with_resp); 1956 1957 for (i = 0; i < octeon_dev->ifcount; i++) { 1958 resp_size = sizeof(struct liquidio_if_cfg_resp); 1959 data_size = sizeof(struct lio_version); 1960 sc = (struct octeon_soft_command *) 1961 octeon_alloc_soft_command(octeon_dev, data_size, 1962 resp_size, 0); 1963 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1964 vdata = (struct lio_version *)sc->virtdptr; 1965 1966 *((u64 *)vdata) = 0; 1967 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 1968 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 1969 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 1970 1971 if_cfg.u64 = 0; 1972 1973 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; 1974 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; 1975 if_cfg.s.base_queue = 0; 1976 1977 sc->iq_no = 0; 1978 1979 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 1980 OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 1981 0); 1982 1983 init_completion(&sc->complete); 1984 sc->sc_status = OCTEON_REQUEST_PENDING; 1985 1986 retval = octeon_send_soft_command(octeon_dev, sc); 1987 if (retval == IQ_SEND_FAILED) { 1988 dev_err(&octeon_dev->pci_dev->dev, 1989 "iq/oq config failed status: %x\n", retval); 1990 /* Soft instr is freed by driver in case of failure. */ 1991 octeon_free_soft_command(octeon_dev, sc); 1992 return(-EIO); 1993 } 1994 1995 /* Sleep on a wait queue till the cond flag indicates that the 1996 * response arrived or timed-out. 1997 */ 1998 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); 1999 if (retval) 2000 return retval; 2001 2002 retval = resp->status; 2003 if (retval) { 2004 dev_err(&octeon_dev->pci_dev->dev, 2005 "iq/oq config failed, retval = %d\n", retval); 2006 WRITE_ONCE(sc->caller_is_done, true); 2007 return -EIO; 2008 } 2009 2010 snprintf(octeon_dev->fw_info.liquidio_firmware_version, 2011 32, "%s", 2012 resp->cfg_info.liquidio_firmware_version); 2013 2014 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 2015 (sizeof(struct liquidio_if_cfg_info)) >> 3); 2016 2017 num_iqueues = hweight64(resp->cfg_info.iqmask); 2018 num_oqueues = hweight64(resp->cfg_info.oqmask); 2019 2020 if (!(num_iqueues) || !(num_oqueues)) { 2021 dev_err(&octeon_dev->pci_dev->dev, 2022 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 2023 resp->cfg_info.iqmask, resp->cfg_info.oqmask); 2024 WRITE_ONCE(sc->caller_is_done, true); 2025 goto setup_nic_dev_done; 2026 } 2027 dev_dbg(&octeon_dev->pci_dev->dev, 2028 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 2029 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 2030 num_iqueues, num_oqueues); 2031 2032 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 2033 2034 if (!netdev) { 2035 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 2036 WRITE_ONCE(sc->caller_is_done, true); 2037 goto setup_nic_dev_done; 2038 } 2039 2040 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 2041 2042 /* Associate the routines that will handle different 2043 * netdev tasks. 2044 */ 2045 netdev->netdev_ops = &lionetdevops; 2046 2047 lio = GET_LIO(netdev); 2048 2049 memset(lio, 0, sizeof(struct lio)); 2050 2051 lio->ifidx = ifidx_or_pfnum; 2052 2053 props = &octeon_dev->props[i]; 2054 props->gmxport = resp->cfg_info.linfo.gmxport; 2055 props->netdev = netdev; 2056 2057 lio->linfo.num_rxpciq = num_oqueues; 2058 lio->linfo.num_txpciq = num_iqueues; 2059 2060 for (j = 0; j < num_oqueues; j++) { 2061 lio->linfo.rxpciq[j].u64 = 2062 resp->cfg_info.linfo.rxpciq[j].u64; 2063 } 2064 for (j = 0; j < num_iqueues; j++) { 2065 lio->linfo.txpciq[j].u64 = 2066 resp->cfg_info.linfo.txpciq[j].u64; 2067 } 2068 2069 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 2070 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 2071 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 2072 lio->linfo.macaddr_is_admin_asgnd = 2073 resp->cfg_info.linfo.macaddr_is_admin_asgnd; 2074 lio->linfo.macaddr_spoofchk = 2075 resp->cfg_info.linfo.macaddr_spoofchk; 2076 2077 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 2078 2079 lio->dev_capability = NETIF_F_HIGHDMA 2080 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2081 | NETIF_F_SG | NETIF_F_RXCSUM 2082 | NETIF_F_TSO | NETIF_F_TSO6 2083 | NETIF_F_GRO 2084 | NETIF_F_LRO; 2085 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 2086 2087 /* Copy of transmit encapsulation capabilities: 2088 * TSO, TSO6, Checksums for this device 2089 */ 2090 lio->enc_dev_capability = NETIF_F_IP_CSUM 2091 | NETIF_F_IPV6_CSUM 2092 | NETIF_F_GSO_UDP_TUNNEL 2093 | NETIF_F_HW_CSUM | NETIF_F_SG 2094 | NETIF_F_RXCSUM 2095 | NETIF_F_TSO | NETIF_F_TSO6 2096 | NETIF_F_LRO; 2097 2098 netdev->hw_enc_features = 2099 (lio->enc_dev_capability & ~NETIF_F_LRO); 2100 netdev->vlan_features = lio->dev_capability; 2101 /* Add any unchangeable hw features */ 2102 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 2103 NETIF_F_HW_VLAN_CTAG_RX | 2104 NETIF_F_HW_VLAN_CTAG_TX; 2105 2106 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 2107 2108 netdev->hw_features = lio->dev_capability; 2109 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; 2110 2111 /* MTU range: 68 - 16000 */ 2112 netdev->min_mtu = LIO_MIN_MTU_SIZE; 2113 netdev->max_mtu = LIO_MAX_MTU_SIZE; 2114 2115 WRITE_ONCE(sc->caller_is_done, true); 2116 2117 /* Point to the properties for octeon device to which this 2118 * interface belongs. 2119 */ 2120 lio->oct_dev = octeon_dev; 2121 lio->octprops = props; 2122 lio->netdev = netdev; 2123 2124 dev_dbg(&octeon_dev->pci_dev->dev, 2125 "if%d gmx: %d hw_addr: 0x%llx\n", i, 2126 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 2127 2128 /* 64-bit swap required on LE machines */ 2129 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 2130 for (j = 0; j < ETH_ALEN; j++) 2131 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 2132 2133 /* Copy MAC Address to OS network device structure */ 2134 ether_addr_copy(netdev->dev_addr, mac); 2135 2136 if (liquidio_setup_io_queues(octeon_dev, i, 2137 lio->linfo.num_txpciq, 2138 lio->linfo.num_rxpciq)) { 2139 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 2140 goto setup_nic_dev_free; 2141 } 2142 2143 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 2144 2145 /* For VFs, enable Octeon device interrupts here, 2146 * as this is contingent upon IO queue setup 2147 */ 2148 octeon_dev->fn_list.enable_interrupt(octeon_dev, 2149 OCTEON_ALL_INTR); 2150 2151 /* By default all interfaces on a single Octeon uses the same 2152 * tx and rx queues 2153 */ 2154 lio->txq = lio->linfo.txpciq[0].s.q_no; 2155 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 2156 2157 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 2158 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 2159 2160 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 2161 dev_err(&octeon_dev->pci_dev->dev, 2162 "Gather list allocation failed\n"); 2163 goto setup_nic_dev_free; 2164 } 2165 2166 /* Register ethtool support */ 2167 liquidio_set_ethtool_ops(netdev); 2168 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) 2169 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 2170 else 2171 octeon_dev->priv_flags = 0x0; 2172 2173 if (netdev->features & NETIF_F_LRO) 2174 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2175 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2176 2177 if (setup_link_status_change_wq(netdev)) 2178 goto setup_nic_dev_free; 2179 2180 if (setup_rx_oom_poll_fn(netdev)) 2181 goto setup_nic_dev_free; 2182 2183 /* Register the network device with the OS */ 2184 if (register_netdev(netdev)) { 2185 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 2186 goto setup_nic_dev_free; 2187 } 2188 2189 dev_dbg(&octeon_dev->pci_dev->dev, 2190 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 2191 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 2192 netif_carrier_off(netdev); 2193 lio->link_changes++; 2194 2195 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 2196 2197 /* Sending command to firmware to enable Rx checksum offload 2198 * by default at the time of setup of Liquidio driver for 2199 * this device 2200 */ 2201 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2202 OCTNET_CMD_RXCSUM_ENABLE); 2203 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 2204 OCTNET_CMD_TXCSUM_ENABLE); 2205 2206 dev_dbg(&octeon_dev->pci_dev->dev, 2207 "NIC ifidx:%d Setup successful\n", i); 2208 2209 octeon_dev->no_speed_setting = 1; 2210 } 2211 2212 return 0; 2213 2214 setup_nic_dev_free: 2215 2216 while (i--) { 2217 dev_err(&octeon_dev->pci_dev->dev, 2218 "NIC ifidx:%d Setup failed\n", i); 2219 liquidio_destroy_nic_device(octeon_dev, i); 2220 } 2221 2222 setup_nic_dev_done: 2223 2224 return -ENODEV; 2225 } 2226 2227 /** 2228 * \brief initialize the NIC 2229 * @param oct octeon device 2230 * 2231 * This initialization routine is called once the Octeon device application is 2232 * up and running 2233 */ 2234 static int liquidio_init_nic_module(struct octeon_device *oct) 2235 { 2236 int num_nic_ports = 1; 2237 int i, retval = 0; 2238 2239 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 2240 2241 /* only default iq and oq were initialized 2242 * initialize the rest as well run port_config command for each port 2243 */ 2244 oct->ifcount = num_nic_ports; 2245 memset(oct->props, 0, 2246 sizeof(struct octdev_props) * num_nic_ports); 2247 2248 for (i = 0; i < MAX_OCTEON_LINKS; i++) 2249 oct->props[i].gmxport = -1; 2250 2251 retval = setup_nic_devices(oct); 2252 if (retval) { 2253 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 2254 goto octnet_init_failure; 2255 } 2256 2257 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 2258 2259 return retval; 2260 2261 octnet_init_failure: 2262 2263 oct->ifcount = 0; 2264 2265 return retval; 2266 } 2267 2268 /** 2269 * \brief Device initialization for each Octeon device that is probed 2270 * @param octeon_dev octeon device 2271 */ 2272 static int octeon_device_init(struct octeon_device *oct) 2273 { 2274 u32 rev_id; 2275 int j; 2276 2277 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE); 2278 2279 /* Enable access to the octeon device and make its DMA capability 2280 * known to the OS. 2281 */ 2282 if (octeon_pci_os_setup(oct)) 2283 return 1; 2284 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE); 2285 2286 oct->chip_id = OCTEON_CN23XX_VF_VID; 2287 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 2288 oct->rev_id = rev_id & 0xff; 2289 2290 if (cn23xx_setup_octeon_vf_device(oct)) 2291 return 1; 2292 2293 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE); 2294 2295 oct->app_mode = CVM_DRV_NIC_APP; 2296 2297 /* Initialize the dispatch mechanism used to push packets arriving on 2298 * Octeon Output queues. 2299 */ 2300 if (octeon_init_dispatch_list(oct)) 2301 return 1; 2302 2303 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE); 2304 2305 if (octeon_set_io_queues_off(oct)) { 2306 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); 2307 return 1; 2308 } 2309 2310 if (oct->fn_list.setup_device_regs(oct)) { 2311 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n"); 2312 return 1; 2313 } 2314 2315 /* Initialize soft command buffer pool */ 2316 if (octeon_setup_sc_buffer_pool(oct)) { 2317 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); 2318 return 1; 2319 } 2320 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 2321 2322 /* Setup the data structures that manage this Octeon's Input queues. */ 2323 if (octeon_setup_instr_queues(oct)) { 2324 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); 2325 return 1; 2326 } 2327 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 2328 2329 /* Initialize lists to manage the requests of different types that 2330 * arrive from user & kernel applications for this octeon device. 2331 */ 2332 if (octeon_setup_response_list(oct)) { 2333 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); 2334 return 1; 2335 } 2336 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE); 2337 2338 if (octeon_setup_output_queues(oct)) { 2339 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n"); 2340 return 1; 2341 } 2342 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE); 2343 2344 if (oct->fn_list.setup_mbox(oct)) { 2345 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 2346 return 1; 2347 } 2348 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); 2349 2350 if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) { 2351 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); 2352 return 1; 2353 } 2354 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 2355 2356 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n", 2357 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf); 2358 2359 /* Setup the interrupt handler and record the INT SUM register address*/ 2360 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) 2361 return 1; 2362 2363 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); 2364 2365 /* *************************************************************** 2366 * The interrupts need to be enabled for the PF<-->VF handshake. 2367 * They are [re]-enabled after the PF<-->VF handshake so that the 2368 * correct OQ tick value is used (i.e. the value retrieved from 2369 * the PF as part of the handshake). 2370 */ 2371 2372 /* Enable Octeon device interrupts */ 2373 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2374 2375 if (cn23xx_octeon_pfvf_handshake(oct)) 2376 return 1; 2377 2378 /* Here we [re]-enable the interrupts so that the correct OQ tick value 2379 * is used (i.e. the value that was retrieved during the handshake) 2380 */ 2381 2382 /* Enable Octeon device interrupts */ 2383 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2384 /* *************************************************************** */ 2385 2386 /* Enable the input and output queues for this Octeon device */ 2387 if (oct->fn_list.enable_io_queues(oct)) { 2388 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); 2389 return 1; 2390 } 2391 2392 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE); 2393 2394 atomic_set(&oct->status, OCT_DEV_HOST_OK); 2395 2396 /* Send Credit for Octeon Output queues. Credits are always sent after 2397 * the output queue is enabled. 2398 */ 2399 for (j = 0; j < oct->num_oqs; j++) 2400 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); 2401 2402 /* Packets can start arriving on the output queues from this point. */ 2403 2404 atomic_set(&oct->status, OCT_DEV_CORE_OK); 2405 2406 atomic_set(&oct->status, OCT_DEV_RUNNING); 2407 2408 if (liquidio_init_nic_module(oct)) 2409 return 1; 2410 2411 return 0; 2412 } 2413 2414 static int __init liquidio_vf_init(void) 2415 { 2416 octeon_init_device_list(0); 2417 return pci_register_driver(&liquidio_vf_pci_driver); 2418 } 2419 2420 static void __exit liquidio_vf_exit(void) 2421 { 2422 pci_unregister_driver(&liquidio_vf_pci_driver); 2423 2424 pr_info("LiquidIO_VF network module is now unloaded\n"); 2425 } 2426 2427 module_init(liquidio_vf_init); 2428 module_exit(liquidio_vf_exit); 2429