1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <net/vxlan.h> 22 #include "liquidio_common.h" 23 #include "octeon_droq.h" 24 #include "octeon_iq.h" 25 #include "response_manager.h" 26 #include "octeon_device.h" 27 #include "octeon_nic.h" 28 #include "octeon_main.h" 29 #include "octeon_network.h" 30 #include "cn23xx_vf_device.h" 31 32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver"); 34 MODULE_LICENSE("GPL"); 35 MODULE_VERSION(LIQUIDIO_VERSION); 36 37 static int debug = -1; 38 module_param(debug, int, 0644); 39 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 40 41 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 42 43 struct oct_timestamp_resp { 44 u64 rh; 45 u64 timestamp; 46 u64 status; 47 }; 48 49 union tx_info { 50 u64 u64; 51 struct { 52 #ifdef __BIG_ENDIAN_BITFIELD 53 u16 gso_size; 54 u16 gso_segs; 55 u32 reserved; 56 #else 57 u32 reserved; 58 u16 gso_segs; 59 u16 gso_size; 60 #endif 61 } s; 62 }; 63 64 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 65 #define OCTNIC_GSO_MAX_SIZE \ 66 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 67 68 static int 69 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 70 static void liquidio_vf_remove(struct pci_dev *pdev); 71 static int octeon_device_init(struct octeon_device *oct); 72 static int liquidio_stop(struct net_device *netdev); 73 74 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 75 { 76 struct octeon_device_priv *oct_priv = 77 (struct octeon_device_priv *)oct->priv; 78 int retry = MAX_IO_PENDING_PKT_COUNT; 79 int pkt_cnt = 0, pending_pkts; 80 int i; 81 82 do { 83 pending_pkts = 0; 84 85 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 86 if (!(oct->io_qmask.oq & BIT_ULL(i))) 87 continue; 88 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 89 } 90 if (pkt_cnt > 0) { 91 pending_pkts += pkt_cnt; 92 tasklet_schedule(&oct_priv->droq_tasklet); 93 } 94 pkt_cnt = 0; 95 schedule_timeout_uninterruptible(1); 96 97 } while (retry-- && pending_pkts); 98 99 return pkt_cnt; 100 } 101 102 /** 103 * \brief Cause device to go quiet so it can be safely removed/reset/etc 104 * @param oct Pointer to Octeon device 105 */ 106 static void pcierror_quiesce_device(struct octeon_device *oct) 107 { 108 int i; 109 110 /* Disable the input and output queues now. No more packets will 111 * arrive from Octeon, but we should wait for all packet processing 112 * to finish. 113 */ 114 115 /* To allow for in-flight requests */ 116 schedule_timeout_uninterruptible(100); 117 118 if (wait_for_pending_requests(oct)) 119 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 120 121 /* Force all requests waiting to be fetched by OCTEON to complete. */ 122 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 123 struct octeon_instr_queue *iq; 124 125 if (!(oct->io_qmask.iq & BIT_ULL(i))) 126 continue; 127 iq = oct->instr_queue[i]; 128 129 if (atomic_read(&iq->instr_pending)) { 130 spin_lock_bh(&iq->lock); 131 iq->fill_cnt = 0; 132 iq->octeon_read_index = iq->host_write_index; 133 iq->stats.instr_processed += 134 atomic_read(&iq->instr_pending); 135 lio_process_iq_request_list(oct, iq, 0); 136 spin_unlock_bh(&iq->lock); 137 } 138 } 139 140 /* Force all pending ordered list requests to time out. */ 141 lio_process_ordered_list(oct, 1); 142 143 /* We do not need to wait for output queue packets to be processed. */ 144 } 145 146 /** 147 * \brief Cleanup PCI AER uncorrectable error status 148 * @param dev Pointer to PCI device 149 */ 150 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 151 { 152 u32 status, mask; 153 int pos = 0x100; 154 155 pr_info("%s :\n", __func__); 156 157 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 158 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 159 if (dev->error_state == pci_channel_io_normal) 160 status &= ~mask; /* Clear corresponding nonfatal bits */ 161 else 162 status &= mask; /* Clear corresponding fatal bits */ 163 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 164 } 165 166 /** 167 * \brief Stop all PCI IO to a given device 168 * @param dev Pointer to Octeon device 169 */ 170 static void stop_pci_io(struct octeon_device *oct) 171 { 172 struct msix_entry *msix_entries; 173 int i; 174 175 /* No more instructions will be forwarded. */ 176 atomic_set(&oct->status, OCT_DEV_IN_RESET); 177 178 for (i = 0; i < oct->ifcount; i++) 179 netif_device_detach(oct->props[i].netdev); 180 181 /* Disable interrupts */ 182 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 183 184 pcierror_quiesce_device(oct); 185 if (oct->msix_on) { 186 msix_entries = (struct msix_entry *)oct->msix_entries; 187 for (i = 0; i < oct->num_msix_irqs; i++) { 188 /* clear the affinity_cpumask */ 189 irq_set_affinity_hint(msix_entries[i].vector, 190 NULL); 191 free_irq(msix_entries[i].vector, 192 &oct->ioq_vector[i]); 193 } 194 pci_disable_msix(oct->pci_dev); 195 kfree(oct->msix_entries); 196 oct->msix_entries = NULL; 197 octeon_free_ioq_vector(oct); 198 } 199 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 200 lio_get_state_string(&oct->status)); 201 202 /* making it a common function for all OCTEON models */ 203 cleanup_aer_uncorrect_error_status(oct->pci_dev); 204 205 pci_disable_device(oct->pci_dev); 206 } 207 208 /** 209 * \brief called when PCI error is detected 210 * @param pdev Pointer to PCI device 211 * @param state The current pci connection state 212 * 213 * This function is called after a PCI bus error affecting 214 * this device has been detected. 215 */ 216 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 217 pci_channel_state_t state) 218 { 219 struct octeon_device *oct = pci_get_drvdata(pdev); 220 221 /* Non-correctable Non-fatal errors */ 222 if (state == pci_channel_io_normal) { 223 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 224 cleanup_aer_uncorrect_error_status(oct->pci_dev); 225 return PCI_ERS_RESULT_CAN_RECOVER; 226 } 227 228 /* Non-correctable Fatal errors */ 229 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 230 stop_pci_io(oct); 231 232 return PCI_ERS_RESULT_DISCONNECT; 233 } 234 235 /* For PCI-E Advanced Error Recovery (AER) Interface */ 236 static const struct pci_error_handlers liquidio_vf_err_handler = { 237 .error_detected = liquidio_pcie_error_detected, 238 }; 239 240 static const struct pci_device_id liquidio_vf_pci_tbl[] = { 241 { 242 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, 243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 244 }, 245 { 246 0, 0, 0, 0, 0, 0, 0 247 } 248 }; 249 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); 250 251 static struct pci_driver liquidio_vf_pci_driver = { 252 .name = "LiquidIO_VF", 253 .id_table = liquidio_vf_pci_tbl, 254 .probe = liquidio_vf_probe, 255 .remove = liquidio_vf_remove, 256 .err_handler = &liquidio_vf_err_handler, /* For AER */ 257 }; 258 259 /** 260 * \brief Print link information 261 * @param netdev network device 262 */ 263 static void print_link_info(struct net_device *netdev) 264 { 265 struct lio *lio = GET_LIO(netdev); 266 267 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 268 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 269 struct oct_link_info *linfo = &lio->linfo; 270 271 if (linfo->link.s.link_up) { 272 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 273 linfo->link.s.speed, 274 (linfo->link.s.duplex) ? "Full" : "Half"); 275 } else { 276 netif_info(lio, link, lio->netdev, "Link Down\n"); 277 } 278 } 279 } 280 281 /** 282 * \brief Routine to notify MTU change 283 * @param work work_struct data structure 284 */ 285 static void octnet_link_status_change(struct work_struct *work) 286 { 287 struct cavium_wk *wk = (struct cavium_wk *)work; 288 struct lio *lio = (struct lio *)wk->ctxptr; 289 290 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. 291 * this API is invoked only when new max-MTU of the interface is 292 * less than current MTU. 293 */ 294 rtnl_lock(); 295 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); 296 rtnl_unlock(); 297 } 298 299 /** 300 * \brief Sets up the mtu status change work 301 * @param netdev network device 302 */ 303 static int setup_link_status_change_wq(struct net_device *netdev) 304 { 305 struct lio *lio = GET_LIO(netdev); 306 struct octeon_device *oct = lio->oct_dev; 307 308 lio->link_status_wq.wq = alloc_workqueue("link-status", 309 WQ_MEM_RECLAIM, 0); 310 if (!lio->link_status_wq.wq) { 311 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 312 return -1; 313 } 314 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 315 octnet_link_status_change); 316 lio->link_status_wq.wk.ctxptr = lio; 317 318 return 0; 319 } 320 321 static void cleanup_link_status_change_wq(struct net_device *netdev) 322 { 323 struct lio *lio = GET_LIO(netdev); 324 325 if (lio->link_status_wq.wq) { 326 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 327 destroy_workqueue(lio->link_status_wq.wq); 328 } 329 } 330 331 /** 332 * \brief Update link status 333 * @param netdev network device 334 * @param ls link status structure 335 * 336 * Called on receipt of a link status response from the core application to 337 * update each interface's link status. 338 */ 339 static void update_link_status(struct net_device *netdev, 340 union oct_link_status *ls) 341 { 342 struct lio *lio = GET_LIO(netdev); 343 int current_max_mtu = lio->linfo.link.s.mtu; 344 struct octeon_device *oct = lio->oct_dev; 345 346 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 347 lio->linfo.link.u64 = ls->u64; 348 349 print_link_info(netdev); 350 lio->link_changes++; 351 352 if (lio->linfo.link.s.link_up) { 353 netif_carrier_on(netdev); 354 wake_txqs(netdev); 355 } else { 356 netif_carrier_off(netdev); 357 stop_txqs(netdev); 358 } 359 360 if (lio->linfo.link.s.mtu != current_max_mtu) { 361 dev_info(&oct->pci_dev->dev, 362 "Max MTU Changed from %d to %d\n", 363 current_max_mtu, lio->linfo.link.s.mtu); 364 netdev->max_mtu = lio->linfo.link.s.mtu; 365 } 366 367 if (lio->linfo.link.s.mtu < netdev->mtu) { 368 dev_warn(&oct->pci_dev->dev, 369 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", 370 netdev->mtu, lio->linfo.link.s.mtu); 371 queue_delayed_work(lio->link_status_wq.wq, 372 &lio->link_status_wq.wk.work, 0); 373 } 374 } 375 } 376 377 /** 378 * \brief PCI probe handler 379 * @param pdev PCI device structure 380 * @param ent unused 381 */ 382 static int 383 liquidio_vf_probe(struct pci_dev *pdev, 384 const struct pci_device_id *ent __attribute__((unused))) 385 { 386 struct octeon_device *oct_dev = NULL; 387 388 oct_dev = octeon_allocate_device(pdev->device, 389 sizeof(struct octeon_device_priv)); 390 391 if (!oct_dev) { 392 dev_err(&pdev->dev, "Unable to allocate device\n"); 393 return -ENOMEM; 394 } 395 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 396 397 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 398 (u32)pdev->vendor, (u32)pdev->device); 399 400 /* Assign octeon_device for this device to the private data area. */ 401 pci_set_drvdata(pdev, oct_dev); 402 403 /* set linux specific device pointer */ 404 oct_dev->pci_dev = pdev; 405 406 oct_dev->subsystem_id = pdev->subsystem_vendor | 407 (pdev->subsystem_device << 16); 408 409 if (octeon_device_init(oct_dev)) { 410 liquidio_vf_remove(pdev); 411 return -ENOMEM; 412 } 413 414 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 415 416 return 0; 417 } 418 419 /** 420 * \brief PCI FLR for each Octeon device. 421 * @param oct octeon device 422 */ 423 static void octeon_pci_flr(struct octeon_device *oct) 424 { 425 pci_save_state(oct->pci_dev); 426 427 pci_cfg_access_lock(oct->pci_dev); 428 429 /* Quiesce the device completely */ 430 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 431 PCI_COMMAND_INTX_DISABLE); 432 433 pcie_flr(oct->pci_dev); 434 435 pci_cfg_access_unlock(oct->pci_dev); 436 437 pci_restore_state(oct->pci_dev); 438 } 439 440 /** 441 *\brief Destroy resources associated with octeon device 442 * @param pdev PCI device structure 443 * @param ent unused 444 */ 445 static void octeon_destroy_resources(struct octeon_device *oct) 446 { 447 struct octeon_device_priv *oct_priv = 448 (struct octeon_device_priv *)oct->priv; 449 struct msix_entry *msix_entries; 450 int i; 451 452 switch (atomic_read(&oct->status)) { 453 case OCT_DEV_RUNNING: 454 case OCT_DEV_CORE_OK: 455 /* No more instructions will be forwarded. */ 456 atomic_set(&oct->status, OCT_DEV_IN_RESET); 457 458 oct->app_mode = CVM_DRV_INVALID_APP; 459 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 460 lio_get_state_string(&oct->status)); 461 462 schedule_timeout_uninterruptible(HZ / 10); 463 464 /* fallthrough */ 465 case OCT_DEV_HOST_OK: 466 /* fallthrough */ 467 case OCT_DEV_IO_QUEUES_DONE: 468 if (lio_wait_for_instr_fetch(oct)) 469 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 470 471 if (wait_for_pending_requests(oct)) 472 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 473 474 /* Disable the input and output queues now. No more packets will 475 * arrive from Octeon, but we should wait for all packet 476 * processing to finish. 477 */ 478 oct->fn_list.disable_io_queues(oct); 479 480 if (lio_wait_for_oq_pkts(oct)) 481 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 482 483 /* Force all requests waiting to be fetched by OCTEON to 484 * complete. 485 */ 486 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 487 struct octeon_instr_queue *iq; 488 489 if (!(oct->io_qmask.iq & BIT_ULL(i))) 490 continue; 491 iq = oct->instr_queue[i]; 492 493 if (atomic_read(&iq->instr_pending)) { 494 spin_lock_bh(&iq->lock); 495 iq->fill_cnt = 0; 496 iq->octeon_read_index = iq->host_write_index; 497 iq->stats.instr_processed += 498 atomic_read(&iq->instr_pending); 499 lio_process_iq_request_list(oct, iq, 0); 500 spin_unlock_bh(&iq->lock); 501 } 502 } 503 504 lio_process_ordered_list(oct, 1); 505 octeon_free_sc_done_list(oct); 506 octeon_free_sc_zombie_list(oct); 507 508 /* fall through */ 509 case OCT_DEV_INTR_SET_DONE: 510 /* Disable interrupts */ 511 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 512 513 if (oct->msix_on) { 514 msix_entries = (struct msix_entry *)oct->msix_entries; 515 for (i = 0; i < oct->num_msix_irqs; i++) { 516 if (oct->ioq_vector[i].vector) { 517 irq_set_affinity_hint( 518 msix_entries[i].vector, 519 NULL); 520 free_irq(msix_entries[i].vector, 521 &oct->ioq_vector[i]); 522 oct->ioq_vector[i].vector = 0; 523 } 524 } 525 pci_disable_msix(oct->pci_dev); 526 kfree(oct->msix_entries); 527 oct->msix_entries = NULL; 528 kfree(oct->irq_name_storage); 529 oct->irq_name_storage = NULL; 530 } 531 /* Soft reset the octeon device before exiting */ 532 if (oct->pci_dev->reset_fn) 533 octeon_pci_flr(oct); 534 else 535 cn23xx_vf_ask_pf_to_do_flr(oct); 536 537 /* fallthrough */ 538 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 539 octeon_free_ioq_vector(oct); 540 541 /* fallthrough */ 542 case OCT_DEV_MBOX_SETUP_DONE: 543 oct->fn_list.free_mbox(oct); 544 545 /* fallthrough */ 546 case OCT_DEV_IN_RESET: 547 case OCT_DEV_DROQ_INIT_DONE: 548 mdelay(100); 549 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 550 if (!(oct->io_qmask.oq & BIT_ULL(i))) 551 continue; 552 octeon_delete_droq(oct, i); 553 } 554 555 /* fallthrough */ 556 case OCT_DEV_RESP_LIST_INIT_DONE: 557 octeon_delete_response_list(oct); 558 559 /* fallthrough */ 560 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 561 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 562 if (!(oct->io_qmask.iq & BIT_ULL(i))) 563 continue; 564 octeon_delete_instr_queue(oct, i); 565 } 566 567 /* fallthrough */ 568 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 569 octeon_free_sc_buffer_pool(oct); 570 571 /* fallthrough */ 572 case OCT_DEV_DISPATCH_INIT_DONE: 573 octeon_delete_dispatch_list(oct); 574 cancel_delayed_work_sync(&oct->nic_poll_work.work); 575 576 /* fallthrough */ 577 case OCT_DEV_PCI_MAP_DONE: 578 octeon_unmap_pci_barx(oct, 0); 579 octeon_unmap_pci_barx(oct, 1); 580 581 /* fallthrough */ 582 case OCT_DEV_PCI_ENABLE_DONE: 583 pci_clear_master(oct->pci_dev); 584 /* Disable the device, releasing the PCI INT */ 585 pci_disable_device(oct->pci_dev); 586 587 /* fallthrough */ 588 case OCT_DEV_BEGIN_STATE: 589 /* Nothing to be done here either */ 590 break; 591 } 592 593 tasklet_kill(&oct_priv->droq_tasklet); 594 } 595 596 /** 597 * \brief Send Rx control command 598 * @param lio per-network private data 599 * @param start_stop whether to start or stop 600 */ 601 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 602 { 603 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 604 struct octeon_soft_command *sc; 605 union octnet_cmd *ncmd; 606 int retval; 607 608 if (oct->props[lio->ifidx].rx_on == start_stop) 609 return; 610 611 sc = (struct octeon_soft_command *) 612 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 613 16, 0); 614 615 ncmd = (union octnet_cmd *)sc->virtdptr; 616 617 ncmd->u64 = 0; 618 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 619 ncmd->s.param1 = start_stop; 620 621 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 622 623 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 624 625 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 626 OPCODE_NIC_CMD, 0, 0, 0); 627 628 init_completion(&sc->complete); 629 sc->sc_status = OCTEON_REQUEST_PENDING; 630 631 retval = octeon_send_soft_command(oct, sc); 632 if (retval == IQ_SEND_FAILED) { 633 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 634 octeon_free_soft_command(oct, sc); 635 } else { 636 /* Sleep on a wait queue till the cond flag indicates that the 637 * response arrived or timed-out. 638 */ 639 retval = wait_for_sc_completion_timeout(oct, sc, 0); 640 if (retval) 641 return; 642 643 oct->props[lio->ifidx].rx_on = start_stop; 644 WRITE_ONCE(sc->caller_is_done, true); 645 } 646 } 647 648 /** 649 * \brief Destroy NIC device interface 650 * @param oct octeon device 651 * @param ifidx which interface to destroy 652 * 653 * Cleanup associated with each interface for an Octeon device when NIC 654 * module is being unloaded or if initialization fails during load. 655 */ 656 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 657 { 658 struct net_device *netdev = oct->props[ifidx].netdev; 659 struct octeon_device_priv *oct_priv = 660 (struct octeon_device_priv *)oct->priv; 661 struct napi_struct *napi, *n; 662 struct lio *lio; 663 664 if (!netdev) { 665 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 666 __func__, ifidx); 667 return; 668 } 669 670 lio = GET_LIO(netdev); 671 672 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 673 674 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 675 liquidio_stop(netdev); 676 677 if (oct->props[lio->ifidx].napi_enabled == 1) { 678 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 679 napi_disable(napi); 680 681 oct->props[lio->ifidx].napi_enabled = 0; 682 683 oct->droq[0]->ops.poll_mode = 0; 684 } 685 686 /* Delete NAPI */ 687 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 688 netif_napi_del(napi); 689 690 tasklet_enable(&oct_priv->droq_tasklet); 691 692 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 693 unregister_netdev(netdev); 694 695 cleanup_rx_oom_poll_fn(netdev); 696 697 cleanup_link_status_change_wq(netdev); 698 699 lio_delete_glists(lio); 700 701 free_netdev(netdev); 702 703 oct->props[ifidx].gmxport = -1; 704 705 oct->props[ifidx].netdev = NULL; 706 } 707 708 /** 709 * \brief Stop complete NIC functionality 710 * @param oct octeon device 711 */ 712 static int liquidio_stop_nic_module(struct octeon_device *oct) 713 { 714 struct lio *lio; 715 int i, j; 716 717 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 718 if (!oct->ifcount) { 719 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 720 return 1; 721 } 722 723 spin_lock_bh(&oct->cmd_resp_wqlock); 724 oct->cmd_resp_state = OCT_DRV_OFFLINE; 725 spin_unlock_bh(&oct->cmd_resp_wqlock); 726 727 for (i = 0; i < oct->ifcount; i++) { 728 lio = GET_LIO(oct->props[i].netdev); 729 for (j = 0; j < oct->num_oqs; j++) 730 octeon_unregister_droq_ops(oct, 731 lio->linfo.rxpciq[j].s.q_no); 732 } 733 734 for (i = 0; i < oct->ifcount; i++) 735 liquidio_destroy_nic_device(oct, i); 736 737 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 738 return 0; 739 } 740 741 /** 742 * \brief Cleans up resources at unload time 743 * @param pdev PCI device structure 744 */ 745 static void liquidio_vf_remove(struct pci_dev *pdev) 746 { 747 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 748 749 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 750 751 if (oct_dev->app_mode == CVM_DRV_NIC_APP) 752 liquidio_stop_nic_module(oct_dev); 753 754 /* Reset the octeon device and cleanup all memory allocated for 755 * the octeon device by driver. 756 */ 757 octeon_destroy_resources(oct_dev); 758 759 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 760 761 /* This octeon device has been removed. Update the global 762 * data structure to reflect this. Free the device structure. 763 */ 764 octeon_free_device_mem(oct_dev); 765 } 766 767 /** 768 * \brief PCI initialization for each Octeon device. 769 * @param oct octeon device 770 */ 771 static int octeon_pci_os_setup(struct octeon_device *oct) 772 { 773 #ifdef CONFIG_PCI_IOV 774 /* setup PCI stuff first */ 775 if (!oct->pci_dev->physfn) 776 octeon_pci_flr(oct); 777 #endif 778 779 if (pci_enable_device(oct->pci_dev)) { 780 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 781 return 1; 782 } 783 784 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 785 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 786 pci_disable_device(oct->pci_dev); 787 return 1; 788 } 789 790 /* Enable PCI DMA Master. */ 791 pci_set_master(oct->pci_dev); 792 793 return 0; 794 } 795 796 /** 797 * \brief Unmap and free network buffer 798 * @param buf buffer 799 */ 800 static void free_netbuf(void *buf) 801 { 802 struct octnet_buf_free_info *finfo; 803 struct sk_buff *skb; 804 struct lio *lio; 805 806 finfo = (struct octnet_buf_free_info *)buf; 807 skb = finfo->skb; 808 lio = finfo->lio; 809 810 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 811 DMA_TO_DEVICE); 812 813 tx_buffer_free(skb); 814 } 815 816 /** 817 * \brief Unmap and free gather buffer 818 * @param buf buffer 819 */ 820 static void free_netsgbuf(void *buf) 821 { 822 struct octnet_buf_free_info *finfo; 823 struct octnic_gather *g; 824 struct sk_buff *skb; 825 int i, frags, iq; 826 struct lio *lio; 827 828 finfo = (struct octnet_buf_free_info *)buf; 829 skb = finfo->skb; 830 lio = finfo->lio; 831 g = finfo->g; 832 frags = skb_shinfo(skb)->nr_frags; 833 834 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 835 g->sg[0].ptr[0], (skb->len - skb->data_len), 836 DMA_TO_DEVICE); 837 838 i = 1; 839 while (frags--) { 840 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 841 842 pci_unmap_page((lio->oct_dev)->pci_dev, 843 g->sg[(i >> 2)].ptr[(i & 3)], 844 skb_frag_size(frag), DMA_TO_DEVICE); 845 i++; 846 } 847 848 iq = skb_iq(lio->oct_dev, skb); 849 850 spin_lock(&lio->glist_lock[iq]); 851 list_add_tail(&g->list, &lio->glist[iq]); 852 spin_unlock(&lio->glist_lock[iq]); 853 854 tx_buffer_free(skb); 855 } 856 857 /** 858 * \brief Unmap and free gather buffer with response 859 * @param buf buffer 860 */ 861 static void free_netsgbuf_with_resp(void *buf) 862 { 863 struct octnet_buf_free_info *finfo; 864 struct octeon_soft_command *sc; 865 struct octnic_gather *g; 866 struct sk_buff *skb; 867 int i, frags, iq; 868 struct lio *lio; 869 870 sc = (struct octeon_soft_command *)buf; 871 skb = (struct sk_buff *)sc->callback_arg; 872 finfo = (struct octnet_buf_free_info *)&skb->cb; 873 874 lio = finfo->lio; 875 g = finfo->g; 876 frags = skb_shinfo(skb)->nr_frags; 877 878 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 879 g->sg[0].ptr[0], (skb->len - skb->data_len), 880 DMA_TO_DEVICE); 881 882 i = 1; 883 while (frags--) { 884 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 885 886 pci_unmap_page((lio->oct_dev)->pci_dev, 887 g->sg[(i >> 2)].ptr[(i & 3)], 888 skb_frag_size(frag), DMA_TO_DEVICE); 889 i++; 890 } 891 892 iq = skb_iq(lio->oct_dev, skb); 893 894 spin_lock(&lio->glist_lock[iq]); 895 list_add_tail(&g->list, &lio->glist[iq]); 896 spin_unlock(&lio->glist_lock[iq]); 897 898 /* Don't free the skb yet */ 899 } 900 901 /** 902 * \brief Net device open for LiquidIO 903 * @param netdev network device 904 */ 905 static int liquidio_open(struct net_device *netdev) 906 { 907 struct lio *lio = GET_LIO(netdev); 908 struct octeon_device *oct = lio->oct_dev; 909 struct octeon_device_priv *oct_priv = 910 (struct octeon_device_priv *)oct->priv; 911 struct napi_struct *napi, *n; 912 913 if (!oct->props[lio->ifidx].napi_enabled) { 914 tasklet_disable(&oct_priv->droq_tasklet); 915 916 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 917 napi_enable(napi); 918 919 oct->props[lio->ifidx].napi_enabled = 1; 920 921 oct->droq[0]->ops.poll_mode = 1; 922 } 923 924 ifstate_set(lio, LIO_IFSTATE_RUNNING); 925 926 /* Ready for link status updates */ 927 lio->intf_open = 1; 928 929 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 930 start_txqs(netdev); 931 932 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); 933 lio->stats_wk.ctxptr = lio; 934 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies 935 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); 936 937 /* tell Octeon to start forwarding packets to host */ 938 send_rx_ctrl_cmd(lio, 1); 939 940 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); 941 942 return 0; 943 } 944 945 /** 946 * \brief Net device stop for LiquidIO 947 * @param netdev network device 948 */ 949 static int liquidio_stop(struct net_device *netdev) 950 { 951 struct lio *lio = GET_LIO(netdev); 952 struct octeon_device *oct = lio->oct_dev; 953 struct octeon_device_priv *oct_priv = 954 (struct octeon_device_priv *)oct->priv; 955 struct napi_struct *napi, *n; 956 957 /* tell Octeon to stop forwarding packets to host */ 958 send_rx_ctrl_cmd(lio, 0); 959 960 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 961 /* Inform that netif carrier is down */ 962 lio->intf_open = 0; 963 lio->linfo.link.s.link_up = 0; 964 965 netif_carrier_off(netdev); 966 lio->link_changes++; 967 968 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 969 970 stop_txqs(netdev); 971 972 /* Wait for any pending Rx descriptors */ 973 if (lio_wait_for_clean_oq(oct)) 974 netif_info(lio, rx_err, lio->netdev, 975 "Proceeding with stop interface after partial RX desc processing\n"); 976 977 if (oct->props[lio->ifidx].napi_enabled == 1) { 978 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 979 napi_disable(napi); 980 981 oct->props[lio->ifidx].napi_enabled = 0; 982 983 oct->droq[0]->ops.poll_mode = 0; 984 985 tasklet_enable(&oct_priv->droq_tasklet); 986 } 987 988 cancel_delayed_work_sync(&lio->stats_wk.work); 989 990 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 991 992 return 0; 993 } 994 995 /** 996 * \brief Converts a mask based on net device flags 997 * @param netdev network device 998 * 999 * This routine generates a octnet_ifflags mask from the net device flags 1000 * received from the OS. 1001 */ 1002 static enum octnet_ifflags get_new_flags(struct net_device *netdev) 1003 { 1004 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1005 1006 if (netdev->flags & IFF_PROMISC) 1007 f |= OCTNET_IFFLAG_PROMISC; 1008 1009 if (netdev->flags & IFF_ALLMULTI) 1010 f |= OCTNET_IFFLAG_ALLMULTI; 1011 1012 if (netdev->flags & IFF_MULTICAST) { 1013 f |= OCTNET_IFFLAG_MULTICAST; 1014 1015 /* Accept all multicast addresses if there are more than we 1016 * can handle 1017 */ 1018 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1019 f |= OCTNET_IFFLAG_ALLMULTI; 1020 } 1021 1022 if (netdev->flags & IFF_BROADCAST) 1023 f |= OCTNET_IFFLAG_BROADCAST; 1024 1025 return f; 1026 } 1027 1028 static void liquidio_set_uc_list(struct net_device *netdev) 1029 { 1030 struct lio *lio = GET_LIO(netdev); 1031 struct octeon_device *oct = lio->oct_dev; 1032 struct octnic_ctrl_pkt nctrl; 1033 struct netdev_hw_addr *ha; 1034 u64 *mac; 1035 1036 if (lio->netdev_uc_count == netdev_uc_count(netdev)) 1037 return; 1038 1039 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { 1040 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n"); 1041 return; 1042 } 1043 1044 lio->netdev_uc_count = netdev_uc_count(netdev); 1045 1046 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1047 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; 1048 nctrl.ncmd.s.more = lio->netdev_uc_count; 1049 nctrl.ncmd.s.param1 = oct->vf_num; 1050 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1051 nctrl.netpndev = (u64)netdev; 1052 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1053 1054 /* copy all the addresses into the udd */ 1055 mac = &nctrl.udd[0]; 1056 netdev_for_each_uc_addr(ha, netdev) { 1057 ether_addr_copy(((u8 *)mac) + 2, ha->addr); 1058 mac++; 1059 } 1060 1061 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1062 } 1063 1064 /** 1065 * \brief Net device set_multicast_list 1066 * @param netdev network device 1067 */ 1068 static void liquidio_set_mcast_list(struct net_device *netdev) 1069 { 1070 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1071 struct lio *lio = GET_LIO(netdev); 1072 struct octeon_device *oct = lio->oct_dev; 1073 struct octnic_ctrl_pkt nctrl; 1074 struct netdev_hw_addr *ha; 1075 u64 *mc; 1076 int ret; 1077 1078 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1079 1080 /* Create a ctrl pkt command to be sent to core app. */ 1081 nctrl.ncmd.u64 = 0; 1082 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1083 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1084 nctrl.ncmd.s.param2 = mc_count; 1085 nctrl.ncmd.s.more = mc_count; 1086 nctrl.netpndev = (u64)netdev; 1087 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1088 1089 /* copy all the addresses into the udd */ 1090 mc = &nctrl.udd[0]; 1091 netdev_for_each_mc_addr(ha, netdev) { 1092 *mc = 0; 1093 ether_addr_copy(((u8 *)mc) + 2, ha->addr); 1094 /* no need to swap bytes */ 1095 if (++mc > &nctrl.udd[mc_count]) 1096 break; 1097 } 1098 1099 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1100 1101 /* Apparently, any activity in this call from the kernel has to 1102 * be atomic. So we won't wait for response. 1103 */ 1104 1105 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1106 if (ret) { 1107 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1108 ret); 1109 } 1110 1111 liquidio_set_uc_list(netdev); 1112 } 1113 1114 /** 1115 * \brief Net device set_mac_address 1116 * @param netdev network device 1117 */ 1118 static int liquidio_set_mac(struct net_device *netdev, void *p) 1119 { 1120 struct sockaddr *addr = (struct sockaddr *)p; 1121 struct lio *lio = GET_LIO(netdev); 1122 struct octeon_device *oct = lio->oct_dev; 1123 struct octnic_ctrl_pkt nctrl; 1124 int ret = 0; 1125 1126 if (!is_valid_ether_addr(addr->sa_data)) 1127 return -EADDRNOTAVAIL; 1128 1129 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 1130 return 0; 1131 1132 if (lio->linfo.macaddr_is_admin_asgnd) 1133 return -EPERM; 1134 1135 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1136 1137 nctrl.ncmd.u64 = 0; 1138 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 1139 nctrl.ncmd.s.param1 = 0; 1140 nctrl.ncmd.s.more = 1; 1141 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1142 nctrl.netpndev = (u64)netdev; 1143 1144 nctrl.udd[0] = 0; 1145 /* The MAC Address is presented in network byte order. */ 1146 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data); 1147 1148 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1149 if (ret < 0) { 1150 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 1151 return -ENOMEM; 1152 } 1153 1154 if (nctrl.sc_status == 1155 FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) { 1156 dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n"); 1157 return -EPERM; 1158 } 1159 1160 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1161 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); 1162 1163 return 0; 1164 } 1165 1166 static void 1167 liquidio_get_stats64(struct net_device *netdev, 1168 struct rtnl_link_stats64 *lstats) 1169 { 1170 struct lio *lio = GET_LIO(netdev); 1171 struct octeon_device *oct; 1172 u64 pkts = 0, drop = 0, bytes = 0; 1173 struct oct_droq_stats *oq_stats; 1174 struct oct_iq_stats *iq_stats; 1175 int i, iq_no, oq_no; 1176 1177 oct = lio->oct_dev; 1178 1179 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1180 return; 1181 1182 for (i = 0; i < oct->num_iqs; i++) { 1183 iq_no = lio->linfo.txpciq[i].s.q_no; 1184 iq_stats = &oct->instr_queue[iq_no]->stats; 1185 pkts += iq_stats->tx_done; 1186 drop += iq_stats->tx_dropped; 1187 bytes += iq_stats->tx_tot_bytes; 1188 } 1189 1190 lstats->tx_packets = pkts; 1191 lstats->tx_bytes = bytes; 1192 lstats->tx_dropped = drop; 1193 1194 pkts = 0; 1195 drop = 0; 1196 bytes = 0; 1197 1198 for (i = 0; i < oct->num_oqs; i++) { 1199 oq_no = lio->linfo.rxpciq[i].s.q_no; 1200 oq_stats = &oct->droq[oq_no]->stats; 1201 pkts += oq_stats->rx_pkts_received; 1202 drop += (oq_stats->rx_dropped + 1203 oq_stats->dropped_nodispatch + 1204 oq_stats->dropped_toomany + 1205 oq_stats->dropped_nomem); 1206 bytes += oq_stats->rx_bytes_received; 1207 } 1208 1209 lstats->rx_bytes = bytes; 1210 lstats->rx_packets = pkts; 1211 lstats->rx_dropped = drop; 1212 1213 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; 1214 1215 /* detailed rx_errors: */ 1216 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; 1217 /* recved pkt with crc error */ 1218 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; 1219 /* recv'd frame alignment error */ 1220 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; 1221 1222 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + 1223 lstats->rx_frame_errors; 1224 1225 /* detailed tx_errors */ 1226 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; 1227 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; 1228 1229 lstats->tx_errors = lstats->tx_aborted_errors + 1230 lstats->tx_carrier_errors; 1231 } 1232 1233 /** 1234 * \brief Handler for SIOCSHWTSTAMP ioctl 1235 * @param netdev network device 1236 * @param ifr interface request 1237 * @param cmd command 1238 */ 1239 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 1240 { 1241 struct lio *lio = GET_LIO(netdev); 1242 struct hwtstamp_config conf; 1243 1244 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 1245 return -EFAULT; 1246 1247 if (conf.flags) 1248 return -EINVAL; 1249 1250 switch (conf.tx_type) { 1251 case HWTSTAMP_TX_ON: 1252 case HWTSTAMP_TX_OFF: 1253 break; 1254 default: 1255 return -ERANGE; 1256 } 1257 1258 switch (conf.rx_filter) { 1259 case HWTSTAMP_FILTER_NONE: 1260 break; 1261 case HWTSTAMP_FILTER_ALL: 1262 case HWTSTAMP_FILTER_SOME: 1263 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1264 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1265 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1266 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1267 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1268 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1269 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1270 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1271 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1272 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1273 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1274 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1275 case HWTSTAMP_FILTER_NTP_ALL: 1276 conf.rx_filter = HWTSTAMP_FILTER_ALL; 1277 break; 1278 default: 1279 return -ERANGE; 1280 } 1281 1282 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 1283 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1284 1285 else 1286 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1287 1288 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 1289 } 1290 1291 /** 1292 * \brief ioctl handler 1293 * @param netdev network device 1294 * @param ifr interface request 1295 * @param cmd command 1296 */ 1297 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1298 { 1299 switch (cmd) { 1300 case SIOCSHWTSTAMP: 1301 return hwtstamp_ioctl(netdev, ifr); 1302 default: 1303 return -EOPNOTSUPP; 1304 } 1305 } 1306 1307 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) 1308 { 1309 struct sk_buff *skb = (struct sk_buff *)buf; 1310 struct octnet_buf_free_info *finfo; 1311 struct oct_timestamp_resp *resp; 1312 struct octeon_soft_command *sc; 1313 struct lio *lio; 1314 1315 finfo = (struct octnet_buf_free_info *)skb->cb; 1316 lio = finfo->lio; 1317 sc = finfo->sc; 1318 oct = lio->oct_dev; 1319 resp = (struct oct_timestamp_resp *)sc->virtrptr; 1320 1321 if (status != OCTEON_REQUEST_DONE) { 1322 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 1323 CVM_CAST64(status)); 1324 resp->timestamp = 0; 1325 } 1326 1327 octeon_swap_8B_data(&resp->timestamp, 1); 1328 1329 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 1330 struct skb_shared_hwtstamps ts; 1331 u64 ns = resp->timestamp; 1332 1333 netif_info(lio, tx_done, lio->netdev, 1334 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 1335 skb, (unsigned long long)ns); 1336 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 1337 skb_tstamp_tx(skb, &ts); 1338 } 1339 1340 octeon_free_soft_command(oct, sc); 1341 tx_buffer_free(skb); 1342 } 1343 1344 /* \brief Send a data packet that will be timestamped 1345 * @param oct octeon device 1346 * @param ndata pointer to network data 1347 * @param finfo pointer to private network data 1348 */ 1349 static int send_nic_timestamp_pkt(struct octeon_device *oct, 1350 struct octnic_data_pkt *ndata, 1351 struct octnet_buf_free_info *finfo, 1352 int xmit_more) 1353 { 1354 struct octeon_soft_command *sc; 1355 int ring_doorbell; 1356 struct lio *lio; 1357 int retval; 1358 u32 len; 1359 1360 lio = finfo->lio; 1361 1362 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 1363 sizeof(struct oct_timestamp_resp)); 1364 finfo->sc = sc; 1365 1366 if (!sc) { 1367 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 1368 return IQ_SEND_FAILED; 1369 } 1370 1371 if (ndata->reqtype == REQTYPE_NORESP_NET) 1372 ndata->reqtype = REQTYPE_RESP_NET; 1373 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 1374 ndata->reqtype = REQTYPE_RESP_NET_SG; 1375 1376 sc->callback = handle_timestamp; 1377 sc->callback_arg = finfo->skb; 1378 sc->iq_no = ndata->q_no; 1379 1380 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; 1381 1382 ring_doorbell = !xmit_more; 1383 1384 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 1385 sc, len, ndata->reqtype); 1386 1387 if (retval == IQ_SEND_FAILED) { 1388 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 1389 retval); 1390 octeon_free_soft_command(oct, sc); 1391 } else { 1392 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 1393 } 1394 1395 return retval; 1396 } 1397 1398 /** \brief Transmit networks packets to the Octeon interface 1399 * @param skbuff skbuff struct to be passed to network layer. 1400 * @param netdev pointer to network device 1401 * @returns whether the packet was transmitted to the device okay or not 1402 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 1403 */ 1404 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 1405 { 1406 struct octnet_buf_free_info *finfo; 1407 union octnic_cmd_setup cmdsetup; 1408 struct octnic_data_pkt ndata; 1409 struct octeon_instr_irh *irh; 1410 struct oct_iq_stats *stats; 1411 struct octeon_device *oct; 1412 int q_idx = 0, iq_no = 0; 1413 union tx_info *tx_info; 1414 int xmit_more = 0; 1415 struct lio *lio; 1416 int status = 0; 1417 u64 dptr = 0; 1418 u32 tag = 0; 1419 int j; 1420 1421 lio = GET_LIO(netdev); 1422 oct = lio->oct_dev; 1423 1424 q_idx = skb_iq(lio->oct_dev, skb); 1425 tag = q_idx; 1426 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 1427 1428 stats = &oct->instr_queue[iq_no]->stats; 1429 1430 /* Check for all conditions in which the current packet cannot be 1431 * transmitted. 1432 */ 1433 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 1434 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { 1435 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", 1436 lio->linfo.link.s.link_up); 1437 goto lio_xmit_failed; 1438 } 1439 1440 /* Use space in skb->cb to store info used to unmap and 1441 * free the buffers. 1442 */ 1443 finfo = (struct octnet_buf_free_info *)skb->cb; 1444 finfo->lio = lio; 1445 finfo->skb = skb; 1446 finfo->sc = NULL; 1447 1448 /* Prepare the attributes for the data to be passed to OSI. */ 1449 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 1450 1451 ndata.buf = finfo; 1452 1453 ndata.q_no = iq_no; 1454 1455 if (octnet_iq_is_full(oct, ndata.q_no)) { 1456 /* defer sending if queue is full */ 1457 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 1458 ndata.q_no); 1459 stats->tx_iq_busy++; 1460 return NETDEV_TX_BUSY; 1461 } 1462 1463 ndata.datasize = skb->len; 1464 1465 cmdsetup.u64 = 0; 1466 cmdsetup.s.iq_no = iq_no; 1467 1468 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1469 if (skb->encapsulation) { 1470 cmdsetup.s.tnl_csum = 1; 1471 stats->tx_vxlan++; 1472 } else { 1473 cmdsetup.s.transport_csum = 1; 1474 } 1475 } 1476 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1477 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1478 cmdsetup.s.timestamp = 1; 1479 } 1480 1481 if (!skb_shinfo(skb)->nr_frags) { 1482 cmdsetup.s.u.datasize = skb->len; 1483 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1484 /* Offload checksum calculation for TCP/UDP packets */ 1485 dptr = dma_map_single(&oct->pci_dev->dev, 1486 skb->data, 1487 skb->len, 1488 DMA_TO_DEVICE); 1489 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 1490 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 1491 __func__); 1492 return NETDEV_TX_BUSY; 1493 } 1494 1495 ndata.cmd.cmd3.dptr = dptr; 1496 finfo->dptr = dptr; 1497 ndata.reqtype = REQTYPE_NORESP_NET; 1498 1499 } else { 1500 skb_frag_t *frag; 1501 struct octnic_gather *g; 1502 int i, frags; 1503 1504 spin_lock(&lio->glist_lock[q_idx]); 1505 g = (struct octnic_gather *) 1506 lio_list_delete_head(&lio->glist[q_idx]); 1507 spin_unlock(&lio->glist_lock[q_idx]); 1508 1509 if (!g) { 1510 netif_info(lio, tx_err, lio->netdev, 1511 "Transmit scatter gather: glist null!\n"); 1512 goto lio_xmit_failed; 1513 } 1514 1515 cmdsetup.s.gather = 1; 1516 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 1517 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1518 1519 memset(g->sg, 0, g->sg_size); 1520 1521 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 1522 skb->data, 1523 (skb->len - skb->data_len), 1524 DMA_TO_DEVICE); 1525 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 1526 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 1527 __func__); 1528 return NETDEV_TX_BUSY; 1529 } 1530 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 1531 1532 frags = skb_shinfo(skb)->nr_frags; 1533 i = 1; 1534 while (frags--) { 1535 frag = &skb_shinfo(skb)->frags[i - 1]; 1536 1537 g->sg[(i >> 2)].ptr[(i & 3)] = 1538 skb_frag_dma_map(&oct->pci_dev->dev, 1539 frag, 0, skb_frag_size(frag), 1540 DMA_TO_DEVICE); 1541 if (dma_mapping_error(&oct->pci_dev->dev, 1542 g->sg[i >> 2].ptr[i & 3])) { 1543 dma_unmap_single(&oct->pci_dev->dev, 1544 g->sg[0].ptr[0], 1545 skb->len - skb->data_len, 1546 DMA_TO_DEVICE); 1547 for (j = 1; j < i; j++) { 1548 frag = &skb_shinfo(skb)->frags[j - 1]; 1549 dma_unmap_page(&oct->pci_dev->dev, 1550 g->sg[j >> 2].ptr[j & 3], 1551 skb_frag_size(frag), 1552 DMA_TO_DEVICE); 1553 } 1554 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 1555 __func__); 1556 return NETDEV_TX_BUSY; 1557 } 1558 1559 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), 1560 (i & 3)); 1561 i++; 1562 } 1563 1564 dptr = g->sg_dma_ptr; 1565 1566 ndata.cmd.cmd3.dptr = dptr; 1567 finfo->dptr = dptr; 1568 finfo->g = g; 1569 1570 ndata.reqtype = REQTYPE_NORESP_NET_SG; 1571 } 1572 1573 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 1574 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 1575 1576 if (skb_shinfo(skb)->gso_size) { 1577 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 1578 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 1579 } 1580 1581 /* HW insert VLAN tag */ 1582 if (skb_vlan_tag_present(skb)) { 1583 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; 1584 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; 1585 } 1586 1587 xmit_more = netdev_xmit_more(); 1588 1589 if (unlikely(cmdsetup.s.timestamp)) 1590 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 1591 else 1592 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 1593 if (status == IQ_SEND_FAILED) 1594 goto lio_xmit_failed; 1595 1596 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 1597 1598 if (status == IQ_SEND_STOP) { 1599 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", 1600 iq_no); 1601 netif_stop_subqueue(netdev, q_idx); 1602 } 1603 1604 netif_trans_update(netdev); 1605 1606 if (tx_info->s.gso_segs) 1607 stats->tx_done += tx_info->s.gso_segs; 1608 else 1609 stats->tx_done++; 1610 stats->tx_tot_bytes += ndata.datasize; 1611 1612 return NETDEV_TX_OK; 1613 1614 lio_xmit_failed: 1615 stats->tx_dropped++; 1616 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 1617 iq_no, stats->tx_dropped); 1618 if (dptr) 1619 dma_unmap_single(&oct->pci_dev->dev, dptr, 1620 ndata.datasize, DMA_TO_DEVICE); 1621 1622 octeon_ring_doorbell_locked(oct, iq_no); 1623 1624 tx_buffer_free(skb); 1625 return NETDEV_TX_OK; 1626 } 1627 1628 /** \brief Network device Tx timeout 1629 * @param netdev pointer to network device 1630 */ 1631 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1632 { 1633 struct lio *lio; 1634 1635 lio = GET_LIO(netdev); 1636 1637 netif_info(lio, tx_err, lio->netdev, 1638 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 1639 netdev->stats.tx_dropped); 1640 netif_trans_update(netdev); 1641 wake_txqs(netdev); 1642 } 1643 1644 static int 1645 liquidio_vlan_rx_add_vid(struct net_device *netdev, 1646 __be16 proto __attribute__((unused)), u16 vid) 1647 { 1648 struct lio *lio = GET_LIO(netdev); 1649 struct octeon_device *oct = lio->oct_dev; 1650 struct octnic_ctrl_pkt nctrl; 1651 int ret = 0; 1652 1653 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1654 1655 nctrl.ncmd.u64 = 0; 1656 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 1657 nctrl.ncmd.s.param1 = vid; 1658 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1659 nctrl.netpndev = (u64)netdev; 1660 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1661 1662 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1663 if (ret) { 1664 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 1665 ret); 1666 return -EPERM; 1667 } 1668 1669 return 0; 1670 } 1671 1672 static int 1673 liquidio_vlan_rx_kill_vid(struct net_device *netdev, 1674 __be16 proto __attribute__((unused)), u16 vid) 1675 { 1676 struct lio *lio = GET_LIO(netdev); 1677 struct octeon_device *oct = lio->oct_dev; 1678 struct octnic_ctrl_pkt nctrl; 1679 int ret = 0; 1680 1681 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1682 1683 nctrl.ncmd.u64 = 0; 1684 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 1685 nctrl.ncmd.s.param1 = vid; 1686 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1687 nctrl.netpndev = (u64)netdev; 1688 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1689 1690 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1691 if (ret) { 1692 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", 1693 ret); 1694 if (ret > 0) 1695 ret = -EIO; 1696 } 1697 return ret; 1698 } 1699 1700 /** Sending command to enable/disable RX checksum offload 1701 * @param netdev pointer to network device 1702 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 1703 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 1704 * OCTNET_CMD_RXCSUM_DISABLE 1705 * @returns SUCCESS or FAILURE 1706 */ 1707 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 1708 u8 rx_cmd) 1709 { 1710 struct lio *lio = GET_LIO(netdev); 1711 struct octeon_device *oct = lio->oct_dev; 1712 struct octnic_ctrl_pkt nctrl; 1713 int ret = 0; 1714 1715 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1716 1717 nctrl.ncmd.u64 = 0; 1718 nctrl.ncmd.s.cmd = command; 1719 nctrl.ncmd.s.param1 = rx_cmd; 1720 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1721 nctrl.netpndev = (u64)netdev; 1722 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1723 1724 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1725 if (ret) { 1726 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", 1727 ret); 1728 if (ret > 0) 1729 ret = -EIO; 1730 } 1731 return ret; 1732 } 1733 1734 /** Sending command to add/delete VxLAN UDP port to firmware 1735 * @param netdev pointer to network device 1736 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 1737 * @param vxlan_port VxLAN port to be added or deleted 1738 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 1739 * OCTNET_CMD_VXLAN_PORT_DEL 1740 * @returns SUCCESS or FAILURE 1741 */ 1742 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 1743 u16 vxlan_port, u8 vxlan_cmd_bit) 1744 { 1745 struct lio *lio = GET_LIO(netdev); 1746 struct octeon_device *oct = lio->oct_dev; 1747 struct octnic_ctrl_pkt nctrl; 1748 int ret = 0; 1749 1750 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1751 1752 nctrl.ncmd.u64 = 0; 1753 nctrl.ncmd.s.cmd = command; 1754 nctrl.ncmd.s.more = vxlan_cmd_bit; 1755 nctrl.ncmd.s.param1 = vxlan_port; 1756 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1757 nctrl.netpndev = (u64)netdev; 1758 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1759 1760 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1761 if (ret) { 1762 dev_err(&oct->pci_dev->dev, 1763 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", 1764 ret); 1765 if (ret > 0) 1766 ret = -EIO; 1767 } 1768 return ret; 1769 } 1770 1771 /** \brief Net device fix features 1772 * @param netdev pointer to network device 1773 * @param request features requested 1774 * @returns updated features list 1775 */ 1776 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 1777 netdev_features_t request) 1778 { 1779 struct lio *lio = netdev_priv(netdev); 1780 1781 if ((request & NETIF_F_RXCSUM) && 1782 !(lio->dev_capability & NETIF_F_RXCSUM)) 1783 request &= ~NETIF_F_RXCSUM; 1784 1785 if ((request & NETIF_F_HW_CSUM) && 1786 !(lio->dev_capability & NETIF_F_HW_CSUM)) 1787 request &= ~NETIF_F_HW_CSUM; 1788 1789 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 1790 request &= ~NETIF_F_TSO; 1791 1792 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 1793 request &= ~NETIF_F_TSO6; 1794 1795 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 1796 request &= ~NETIF_F_LRO; 1797 1798 /* Disable LRO if RXCSUM is off */ 1799 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 1800 (lio->dev_capability & NETIF_F_LRO)) 1801 request &= ~NETIF_F_LRO; 1802 1803 return request; 1804 } 1805 1806 /** \brief Net device set features 1807 * @param netdev pointer to network device 1808 * @param features features to enable/disable 1809 */ 1810 static int liquidio_set_features(struct net_device *netdev, 1811 netdev_features_t features) 1812 { 1813 struct lio *lio = netdev_priv(netdev); 1814 1815 if (!((netdev->features ^ features) & NETIF_F_LRO)) 1816 return 0; 1817 1818 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 1819 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 1820 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 1821 else if (!(features & NETIF_F_LRO) && 1822 (lio->dev_capability & NETIF_F_LRO)) 1823 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 1824 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 1825 if (!(netdev->features & NETIF_F_RXCSUM) && 1826 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 1827 (features & NETIF_F_RXCSUM)) 1828 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 1829 OCTNET_CMD_RXCSUM_ENABLE); 1830 else if ((netdev->features & NETIF_F_RXCSUM) && 1831 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 1832 !(features & NETIF_F_RXCSUM)) 1833 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 1834 OCTNET_CMD_RXCSUM_DISABLE); 1835 1836 return 0; 1837 } 1838 1839 static void liquidio_add_vxlan_port(struct net_device *netdev, 1840 struct udp_tunnel_info *ti) 1841 { 1842 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 1843 return; 1844 1845 liquidio_vxlan_port_command(netdev, 1846 OCTNET_CMD_VXLAN_PORT_CONFIG, 1847 htons(ti->port), 1848 OCTNET_CMD_VXLAN_PORT_ADD); 1849 } 1850 1851 static void liquidio_del_vxlan_port(struct net_device *netdev, 1852 struct udp_tunnel_info *ti) 1853 { 1854 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 1855 return; 1856 1857 liquidio_vxlan_port_command(netdev, 1858 OCTNET_CMD_VXLAN_PORT_CONFIG, 1859 htons(ti->port), 1860 OCTNET_CMD_VXLAN_PORT_DEL); 1861 } 1862 1863 static const struct net_device_ops lionetdevops = { 1864 .ndo_open = liquidio_open, 1865 .ndo_stop = liquidio_stop, 1866 .ndo_start_xmit = liquidio_xmit, 1867 .ndo_get_stats64 = liquidio_get_stats64, 1868 .ndo_set_mac_address = liquidio_set_mac, 1869 .ndo_set_rx_mode = liquidio_set_mcast_list, 1870 .ndo_tx_timeout = liquidio_tx_timeout, 1871 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 1872 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 1873 .ndo_change_mtu = liquidio_change_mtu, 1874 .ndo_do_ioctl = liquidio_ioctl, 1875 .ndo_fix_features = liquidio_fix_features, 1876 .ndo_set_features = liquidio_set_features, 1877 .ndo_udp_tunnel_add = liquidio_add_vxlan_port, 1878 .ndo_udp_tunnel_del = liquidio_del_vxlan_port, 1879 }; 1880 1881 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 1882 { 1883 struct octeon_device *oct = (struct octeon_device *)buf; 1884 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 1885 union oct_link_status *ls; 1886 int gmxport = 0; 1887 int i; 1888 1889 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 1890 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 1891 recv_pkt->buffer_size[0], 1892 recv_pkt->rh.r_nic_info.gmxport); 1893 goto nic_info_err; 1894 } 1895 1896 gmxport = recv_pkt->rh.r_nic_info.gmxport; 1897 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 1898 OCT_DROQ_INFO_SIZE); 1899 1900 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 1901 1902 for (i = 0; i < oct->ifcount; i++) { 1903 if (oct->props[i].gmxport == gmxport) { 1904 update_link_status(oct->props[i].netdev, ls); 1905 break; 1906 } 1907 } 1908 1909 nic_info_err: 1910 for (i = 0; i < recv_pkt->buffer_count; i++) 1911 recv_buffer_free(recv_pkt->buffer_ptr[i]); 1912 octeon_free_recv_info(recv_info); 1913 return 0; 1914 } 1915 1916 /** 1917 * \brief Setup network interfaces 1918 * @param octeon_dev octeon device 1919 * 1920 * Called during init time for each device. It assumes the NIC 1921 * is already up and running. The link information for each 1922 * interface is passed in link_info. 1923 */ 1924 static int setup_nic_devices(struct octeon_device *octeon_dev) 1925 { 1926 int retval, num_iqueues, num_oqueues; 1927 u32 resp_size, data_size; 1928 struct liquidio_if_cfg_resp *resp; 1929 struct octeon_soft_command *sc; 1930 union oct_nic_if_cfg if_cfg; 1931 struct octdev_props *props; 1932 struct net_device *netdev; 1933 struct lio_version *vdata; 1934 struct lio *lio = NULL; 1935 u8 mac[ETH_ALEN], i, j; 1936 u32 ifidx_or_pfnum; 1937 1938 ifidx_or_pfnum = octeon_dev->pf_num; 1939 1940 /* This is to handle link status changes */ 1941 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, 1942 lio_nic_info, octeon_dev); 1943 1944 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 1945 * They are handled directly. 1946 */ 1947 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 1948 free_netbuf); 1949 1950 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 1951 free_netsgbuf); 1952 1953 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 1954 free_netsgbuf_with_resp); 1955 1956 for (i = 0; i < octeon_dev->ifcount; i++) { 1957 resp_size = sizeof(struct liquidio_if_cfg_resp); 1958 data_size = sizeof(struct lio_version); 1959 sc = (struct octeon_soft_command *) 1960 octeon_alloc_soft_command(octeon_dev, data_size, 1961 resp_size, 0); 1962 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1963 vdata = (struct lio_version *)sc->virtdptr; 1964 1965 *((u64 *)vdata) = 0; 1966 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 1967 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 1968 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 1969 1970 if_cfg.u64 = 0; 1971 1972 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; 1973 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; 1974 if_cfg.s.base_queue = 0; 1975 1976 sc->iq_no = 0; 1977 1978 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 1979 OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 1980 0); 1981 1982 init_completion(&sc->complete); 1983 sc->sc_status = OCTEON_REQUEST_PENDING; 1984 1985 retval = octeon_send_soft_command(octeon_dev, sc); 1986 if (retval == IQ_SEND_FAILED) { 1987 dev_err(&octeon_dev->pci_dev->dev, 1988 "iq/oq config failed status: %x\n", retval); 1989 /* Soft instr is freed by driver in case of failure. */ 1990 octeon_free_soft_command(octeon_dev, sc); 1991 return(-EIO); 1992 } 1993 1994 /* Sleep on a wait queue till the cond flag indicates that the 1995 * response arrived or timed-out. 1996 */ 1997 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); 1998 if (retval) 1999 return retval; 2000 2001 retval = resp->status; 2002 if (retval) { 2003 dev_err(&octeon_dev->pci_dev->dev, 2004 "iq/oq config failed, retval = %d\n", retval); 2005 WRITE_ONCE(sc->caller_is_done, true); 2006 return -EIO; 2007 } 2008 2009 snprintf(octeon_dev->fw_info.liquidio_firmware_version, 2010 32, "%s", 2011 resp->cfg_info.liquidio_firmware_version); 2012 2013 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 2014 (sizeof(struct liquidio_if_cfg_info)) >> 3); 2015 2016 num_iqueues = hweight64(resp->cfg_info.iqmask); 2017 num_oqueues = hweight64(resp->cfg_info.oqmask); 2018 2019 if (!(num_iqueues) || !(num_oqueues)) { 2020 dev_err(&octeon_dev->pci_dev->dev, 2021 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 2022 resp->cfg_info.iqmask, resp->cfg_info.oqmask); 2023 WRITE_ONCE(sc->caller_is_done, true); 2024 goto setup_nic_dev_done; 2025 } 2026 dev_dbg(&octeon_dev->pci_dev->dev, 2027 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 2028 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 2029 num_iqueues, num_oqueues); 2030 2031 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 2032 2033 if (!netdev) { 2034 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 2035 WRITE_ONCE(sc->caller_is_done, true); 2036 goto setup_nic_dev_done; 2037 } 2038 2039 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 2040 2041 /* Associate the routines that will handle different 2042 * netdev tasks. 2043 */ 2044 netdev->netdev_ops = &lionetdevops; 2045 2046 lio = GET_LIO(netdev); 2047 2048 memset(lio, 0, sizeof(struct lio)); 2049 2050 lio->ifidx = ifidx_or_pfnum; 2051 2052 props = &octeon_dev->props[i]; 2053 props->gmxport = resp->cfg_info.linfo.gmxport; 2054 props->netdev = netdev; 2055 2056 lio->linfo.num_rxpciq = num_oqueues; 2057 lio->linfo.num_txpciq = num_iqueues; 2058 2059 for (j = 0; j < num_oqueues; j++) { 2060 lio->linfo.rxpciq[j].u64 = 2061 resp->cfg_info.linfo.rxpciq[j].u64; 2062 } 2063 for (j = 0; j < num_iqueues; j++) { 2064 lio->linfo.txpciq[j].u64 = 2065 resp->cfg_info.linfo.txpciq[j].u64; 2066 } 2067 2068 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 2069 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 2070 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 2071 lio->linfo.macaddr_is_admin_asgnd = 2072 resp->cfg_info.linfo.macaddr_is_admin_asgnd; 2073 lio->linfo.macaddr_spoofchk = 2074 resp->cfg_info.linfo.macaddr_spoofchk; 2075 2076 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 2077 2078 lio->dev_capability = NETIF_F_HIGHDMA 2079 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2080 | NETIF_F_SG | NETIF_F_RXCSUM 2081 | NETIF_F_TSO | NETIF_F_TSO6 2082 | NETIF_F_GRO 2083 | NETIF_F_LRO; 2084 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 2085 2086 /* Copy of transmit encapsulation capabilities: 2087 * TSO, TSO6, Checksums for this device 2088 */ 2089 lio->enc_dev_capability = NETIF_F_IP_CSUM 2090 | NETIF_F_IPV6_CSUM 2091 | NETIF_F_GSO_UDP_TUNNEL 2092 | NETIF_F_HW_CSUM | NETIF_F_SG 2093 | NETIF_F_RXCSUM 2094 | NETIF_F_TSO | NETIF_F_TSO6 2095 | NETIF_F_LRO; 2096 2097 netdev->hw_enc_features = 2098 (lio->enc_dev_capability & ~NETIF_F_LRO); 2099 netdev->vlan_features = lio->dev_capability; 2100 /* Add any unchangeable hw features */ 2101 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 2102 NETIF_F_HW_VLAN_CTAG_RX | 2103 NETIF_F_HW_VLAN_CTAG_TX; 2104 2105 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 2106 2107 netdev->hw_features = lio->dev_capability; 2108 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; 2109 2110 /* MTU range: 68 - 16000 */ 2111 netdev->min_mtu = LIO_MIN_MTU_SIZE; 2112 netdev->max_mtu = LIO_MAX_MTU_SIZE; 2113 2114 WRITE_ONCE(sc->caller_is_done, true); 2115 2116 /* Point to the properties for octeon device to which this 2117 * interface belongs. 2118 */ 2119 lio->oct_dev = octeon_dev; 2120 lio->octprops = props; 2121 lio->netdev = netdev; 2122 2123 dev_dbg(&octeon_dev->pci_dev->dev, 2124 "if%d gmx: %d hw_addr: 0x%llx\n", i, 2125 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 2126 2127 /* 64-bit swap required on LE machines */ 2128 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 2129 for (j = 0; j < ETH_ALEN; j++) 2130 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 2131 2132 /* Copy MAC Address to OS network device structure */ 2133 ether_addr_copy(netdev->dev_addr, mac); 2134 2135 if (liquidio_setup_io_queues(octeon_dev, i, 2136 lio->linfo.num_txpciq, 2137 lio->linfo.num_rxpciq)) { 2138 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 2139 goto setup_nic_dev_free; 2140 } 2141 2142 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 2143 2144 /* For VFs, enable Octeon device interrupts here, 2145 * as this is contingent upon IO queue setup 2146 */ 2147 octeon_dev->fn_list.enable_interrupt(octeon_dev, 2148 OCTEON_ALL_INTR); 2149 2150 /* By default all interfaces on a single Octeon uses the same 2151 * tx and rx queues 2152 */ 2153 lio->txq = lio->linfo.txpciq[0].s.q_no; 2154 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 2155 2156 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 2157 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 2158 2159 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 2160 dev_err(&octeon_dev->pci_dev->dev, 2161 "Gather list allocation failed\n"); 2162 goto setup_nic_dev_free; 2163 } 2164 2165 /* Register ethtool support */ 2166 liquidio_set_ethtool_ops(netdev); 2167 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) 2168 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 2169 else 2170 octeon_dev->priv_flags = 0x0; 2171 2172 if (netdev->features & NETIF_F_LRO) 2173 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2174 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2175 2176 if (setup_link_status_change_wq(netdev)) 2177 goto setup_nic_dev_free; 2178 2179 if (setup_rx_oom_poll_fn(netdev)) 2180 goto setup_nic_dev_free; 2181 2182 /* Register the network device with the OS */ 2183 if (register_netdev(netdev)) { 2184 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 2185 goto setup_nic_dev_free; 2186 } 2187 2188 dev_dbg(&octeon_dev->pci_dev->dev, 2189 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 2190 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 2191 netif_carrier_off(netdev); 2192 lio->link_changes++; 2193 2194 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 2195 2196 /* Sending command to firmware to enable Rx checksum offload 2197 * by default at the time of setup of Liquidio driver for 2198 * this device 2199 */ 2200 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2201 OCTNET_CMD_RXCSUM_ENABLE); 2202 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 2203 OCTNET_CMD_TXCSUM_ENABLE); 2204 2205 dev_dbg(&octeon_dev->pci_dev->dev, 2206 "NIC ifidx:%d Setup successful\n", i); 2207 2208 octeon_dev->no_speed_setting = 1; 2209 } 2210 2211 return 0; 2212 2213 setup_nic_dev_free: 2214 2215 while (i--) { 2216 dev_err(&octeon_dev->pci_dev->dev, 2217 "NIC ifidx:%d Setup failed\n", i); 2218 liquidio_destroy_nic_device(octeon_dev, i); 2219 } 2220 2221 setup_nic_dev_done: 2222 2223 return -ENODEV; 2224 } 2225 2226 /** 2227 * \brief initialize the NIC 2228 * @param oct octeon device 2229 * 2230 * This initialization routine is called once the Octeon device application is 2231 * up and running 2232 */ 2233 static int liquidio_init_nic_module(struct octeon_device *oct) 2234 { 2235 int num_nic_ports = 1; 2236 int i, retval = 0; 2237 2238 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 2239 2240 /* only default iq and oq were initialized 2241 * initialize the rest as well run port_config command for each port 2242 */ 2243 oct->ifcount = num_nic_ports; 2244 memset(oct->props, 0, 2245 sizeof(struct octdev_props) * num_nic_ports); 2246 2247 for (i = 0; i < MAX_OCTEON_LINKS; i++) 2248 oct->props[i].gmxport = -1; 2249 2250 retval = setup_nic_devices(oct); 2251 if (retval) { 2252 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 2253 goto octnet_init_failure; 2254 } 2255 2256 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 2257 2258 return retval; 2259 2260 octnet_init_failure: 2261 2262 oct->ifcount = 0; 2263 2264 return retval; 2265 } 2266 2267 /** 2268 * \brief Device initialization for each Octeon device that is probed 2269 * @param octeon_dev octeon device 2270 */ 2271 static int octeon_device_init(struct octeon_device *oct) 2272 { 2273 u32 rev_id; 2274 int j; 2275 2276 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE); 2277 2278 /* Enable access to the octeon device and make its DMA capability 2279 * known to the OS. 2280 */ 2281 if (octeon_pci_os_setup(oct)) 2282 return 1; 2283 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE); 2284 2285 oct->chip_id = OCTEON_CN23XX_VF_VID; 2286 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 2287 oct->rev_id = rev_id & 0xff; 2288 2289 if (cn23xx_setup_octeon_vf_device(oct)) 2290 return 1; 2291 2292 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE); 2293 2294 oct->app_mode = CVM_DRV_NIC_APP; 2295 2296 /* Initialize the dispatch mechanism used to push packets arriving on 2297 * Octeon Output queues. 2298 */ 2299 if (octeon_init_dispatch_list(oct)) 2300 return 1; 2301 2302 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE); 2303 2304 if (octeon_set_io_queues_off(oct)) { 2305 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); 2306 return 1; 2307 } 2308 2309 if (oct->fn_list.setup_device_regs(oct)) { 2310 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n"); 2311 return 1; 2312 } 2313 2314 /* Initialize soft command buffer pool */ 2315 if (octeon_setup_sc_buffer_pool(oct)) { 2316 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); 2317 return 1; 2318 } 2319 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 2320 2321 /* Setup the data structures that manage this Octeon's Input queues. */ 2322 if (octeon_setup_instr_queues(oct)) { 2323 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); 2324 return 1; 2325 } 2326 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 2327 2328 /* Initialize lists to manage the requests of different types that 2329 * arrive from user & kernel applications for this octeon device. 2330 */ 2331 if (octeon_setup_response_list(oct)) { 2332 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); 2333 return 1; 2334 } 2335 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE); 2336 2337 if (octeon_setup_output_queues(oct)) { 2338 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n"); 2339 return 1; 2340 } 2341 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE); 2342 2343 if (oct->fn_list.setup_mbox(oct)) { 2344 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 2345 return 1; 2346 } 2347 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); 2348 2349 if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) { 2350 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); 2351 return 1; 2352 } 2353 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 2354 2355 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n", 2356 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf); 2357 2358 /* Setup the interrupt handler and record the INT SUM register address*/ 2359 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) 2360 return 1; 2361 2362 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); 2363 2364 /* *************************************************************** 2365 * The interrupts need to be enabled for the PF<-->VF handshake. 2366 * They are [re]-enabled after the PF<-->VF handshake so that the 2367 * correct OQ tick value is used (i.e. the value retrieved from 2368 * the PF as part of the handshake). 2369 */ 2370 2371 /* Enable Octeon device interrupts */ 2372 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2373 2374 if (cn23xx_octeon_pfvf_handshake(oct)) 2375 return 1; 2376 2377 /* Here we [re]-enable the interrupts so that the correct OQ tick value 2378 * is used (i.e. the value that was retrieved during the handshake) 2379 */ 2380 2381 /* Enable Octeon device interrupts */ 2382 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2383 /* *************************************************************** */ 2384 2385 /* Enable the input and output queues for this Octeon device */ 2386 if (oct->fn_list.enable_io_queues(oct)) { 2387 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); 2388 return 1; 2389 } 2390 2391 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE); 2392 2393 atomic_set(&oct->status, OCT_DEV_HOST_OK); 2394 2395 /* Send Credit for Octeon Output queues. Credits are always sent after 2396 * the output queue is enabled. 2397 */ 2398 for (j = 0; j < oct->num_oqs; j++) 2399 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); 2400 2401 /* Packets can start arriving on the output queues from this point. */ 2402 2403 atomic_set(&oct->status, OCT_DEV_CORE_OK); 2404 2405 atomic_set(&oct->status, OCT_DEV_RUNNING); 2406 2407 if (liquidio_init_nic_module(oct)) 2408 return 1; 2409 2410 return 0; 2411 } 2412 2413 static int __init liquidio_vf_init(void) 2414 { 2415 octeon_init_device_list(0); 2416 return pci_register_driver(&liquidio_vf_pci_driver); 2417 } 2418 2419 static void __exit liquidio_vf_exit(void) 2420 { 2421 pci_unregister_driver(&liquidio_vf_pci_driver); 2422 2423 pr_info("LiquidIO_VF network module is now unloaded\n"); 2424 } 2425 2426 module_init(liquidio_vf_init); 2427 module_exit(liquidio_vf_exit); 2428