1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <net/vxlan.h> 22 #include "liquidio_common.h" 23 #include "octeon_droq.h" 24 #include "octeon_iq.h" 25 #include "response_manager.h" 26 #include "octeon_device.h" 27 #include "octeon_nic.h" 28 #include "octeon_main.h" 29 #include "octeon_network.h" 30 #include "cn23xx_vf_device.h" 31 32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver"); 34 MODULE_LICENSE("GPL"); 35 36 static int debug = -1; 37 module_param(debug, int, 0644); 38 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 39 40 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 41 42 struct oct_timestamp_resp { 43 u64 rh; 44 u64 timestamp; 45 u64 status; 46 }; 47 48 union tx_info { 49 u64 u64; 50 struct { 51 #ifdef __BIG_ENDIAN_BITFIELD 52 u16 gso_size; 53 u16 gso_segs; 54 u32 reserved; 55 #else 56 u32 reserved; 57 u16 gso_segs; 58 u16 gso_size; 59 #endif 60 } s; 61 }; 62 63 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 64 #define OCTNIC_GSO_MAX_SIZE \ 65 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 66 67 static int 68 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 69 static void liquidio_vf_remove(struct pci_dev *pdev); 70 static int octeon_device_init(struct octeon_device *oct); 71 static int liquidio_stop(struct net_device *netdev); 72 73 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 74 { 75 struct octeon_device_priv *oct_priv = 76 (struct octeon_device_priv *)oct->priv; 77 int retry = MAX_IO_PENDING_PKT_COUNT; 78 int pkt_cnt = 0, pending_pkts; 79 int i; 80 81 do { 82 pending_pkts = 0; 83 84 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 85 if (!(oct->io_qmask.oq & BIT_ULL(i))) 86 continue; 87 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 88 } 89 if (pkt_cnt > 0) { 90 pending_pkts += pkt_cnt; 91 tasklet_schedule(&oct_priv->droq_tasklet); 92 } 93 pkt_cnt = 0; 94 schedule_timeout_uninterruptible(1); 95 96 } while (retry-- && pending_pkts); 97 98 return pkt_cnt; 99 } 100 101 /** 102 * \brief Cause device to go quiet so it can be safely removed/reset/etc 103 * @param oct Pointer to Octeon device 104 */ 105 static void pcierror_quiesce_device(struct octeon_device *oct) 106 { 107 int i; 108 109 /* Disable the input and output queues now. No more packets will 110 * arrive from Octeon, but we should wait for all packet processing 111 * to finish. 112 */ 113 114 /* To allow for in-flight requests */ 115 schedule_timeout_uninterruptible(100); 116 117 if (wait_for_pending_requests(oct)) 118 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 119 120 /* Force all requests waiting to be fetched by OCTEON to complete. */ 121 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 122 struct octeon_instr_queue *iq; 123 124 if (!(oct->io_qmask.iq & BIT_ULL(i))) 125 continue; 126 iq = oct->instr_queue[i]; 127 128 if (atomic_read(&iq->instr_pending)) { 129 spin_lock_bh(&iq->lock); 130 iq->fill_cnt = 0; 131 iq->octeon_read_index = iq->host_write_index; 132 iq->stats.instr_processed += 133 atomic_read(&iq->instr_pending); 134 lio_process_iq_request_list(oct, iq, 0); 135 spin_unlock_bh(&iq->lock); 136 } 137 } 138 139 /* Force all pending ordered list requests to time out. */ 140 lio_process_ordered_list(oct, 1); 141 142 /* We do not need to wait for output queue packets to be processed. */ 143 } 144 145 /** 146 * \brief Cleanup PCI AER uncorrectable error status 147 * @param dev Pointer to PCI device 148 */ 149 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 150 { 151 u32 status, mask; 152 int pos = 0x100; 153 154 pr_info("%s :\n", __func__); 155 156 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 157 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 158 if (dev->error_state == pci_channel_io_normal) 159 status &= ~mask; /* Clear corresponding nonfatal bits */ 160 else 161 status &= mask; /* Clear corresponding fatal bits */ 162 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 163 } 164 165 /** 166 * \brief Stop all PCI IO to a given device 167 * @param dev Pointer to Octeon device 168 */ 169 static void stop_pci_io(struct octeon_device *oct) 170 { 171 struct msix_entry *msix_entries; 172 int i; 173 174 /* No more instructions will be forwarded. */ 175 atomic_set(&oct->status, OCT_DEV_IN_RESET); 176 177 for (i = 0; i < oct->ifcount; i++) 178 netif_device_detach(oct->props[i].netdev); 179 180 /* Disable interrupts */ 181 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 182 183 pcierror_quiesce_device(oct); 184 if (oct->msix_on) { 185 msix_entries = (struct msix_entry *)oct->msix_entries; 186 for (i = 0; i < oct->num_msix_irqs; i++) { 187 /* clear the affinity_cpumask */ 188 irq_set_affinity_hint(msix_entries[i].vector, 189 NULL); 190 free_irq(msix_entries[i].vector, 191 &oct->ioq_vector[i]); 192 } 193 pci_disable_msix(oct->pci_dev); 194 kfree(oct->msix_entries); 195 oct->msix_entries = NULL; 196 octeon_free_ioq_vector(oct); 197 } 198 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 199 lio_get_state_string(&oct->status)); 200 201 /* making it a common function for all OCTEON models */ 202 cleanup_aer_uncorrect_error_status(oct->pci_dev); 203 204 pci_disable_device(oct->pci_dev); 205 } 206 207 /** 208 * \brief called when PCI error is detected 209 * @param pdev Pointer to PCI device 210 * @param state The current pci connection state 211 * 212 * This function is called after a PCI bus error affecting 213 * this device has been detected. 214 */ 215 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 216 pci_channel_state_t state) 217 { 218 struct octeon_device *oct = pci_get_drvdata(pdev); 219 220 /* Non-correctable Non-fatal errors */ 221 if (state == pci_channel_io_normal) { 222 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 223 cleanup_aer_uncorrect_error_status(oct->pci_dev); 224 return PCI_ERS_RESULT_CAN_RECOVER; 225 } 226 227 /* Non-correctable Fatal errors */ 228 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 229 stop_pci_io(oct); 230 231 return PCI_ERS_RESULT_DISCONNECT; 232 } 233 234 /* For PCI-E Advanced Error Recovery (AER) Interface */ 235 static const struct pci_error_handlers liquidio_vf_err_handler = { 236 .error_detected = liquidio_pcie_error_detected, 237 }; 238 239 static const struct pci_device_id liquidio_vf_pci_tbl[] = { 240 { 241 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, 242 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 243 }, 244 { 245 0, 0, 0, 0, 0, 0, 0 246 } 247 }; 248 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); 249 250 static struct pci_driver liquidio_vf_pci_driver = { 251 .name = "LiquidIO_VF", 252 .id_table = liquidio_vf_pci_tbl, 253 .probe = liquidio_vf_probe, 254 .remove = liquidio_vf_remove, 255 .err_handler = &liquidio_vf_err_handler, /* For AER */ 256 }; 257 258 /** 259 * \brief Print link information 260 * @param netdev network device 261 */ 262 static void print_link_info(struct net_device *netdev) 263 { 264 struct lio *lio = GET_LIO(netdev); 265 266 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 267 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 268 struct oct_link_info *linfo = &lio->linfo; 269 270 if (linfo->link.s.link_up) { 271 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 272 linfo->link.s.speed, 273 (linfo->link.s.duplex) ? "Full" : "Half"); 274 } else { 275 netif_info(lio, link, lio->netdev, "Link Down\n"); 276 } 277 } 278 } 279 280 /** 281 * \brief Routine to notify MTU change 282 * @param work work_struct data structure 283 */ 284 static void octnet_link_status_change(struct work_struct *work) 285 { 286 struct cavium_wk *wk = (struct cavium_wk *)work; 287 struct lio *lio = (struct lio *)wk->ctxptr; 288 289 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. 290 * this API is invoked only when new max-MTU of the interface is 291 * less than current MTU. 292 */ 293 rtnl_lock(); 294 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); 295 rtnl_unlock(); 296 } 297 298 /** 299 * \brief Sets up the mtu status change work 300 * @param netdev network device 301 */ 302 static int setup_link_status_change_wq(struct net_device *netdev) 303 { 304 struct lio *lio = GET_LIO(netdev); 305 struct octeon_device *oct = lio->oct_dev; 306 307 lio->link_status_wq.wq = alloc_workqueue("link-status", 308 WQ_MEM_RECLAIM, 0); 309 if (!lio->link_status_wq.wq) { 310 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 311 return -1; 312 } 313 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 314 octnet_link_status_change); 315 lio->link_status_wq.wk.ctxptr = lio; 316 317 return 0; 318 } 319 320 static void cleanup_link_status_change_wq(struct net_device *netdev) 321 { 322 struct lio *lio = GET_LIO(netdev); 323 324 if (lio->link_status_wq.wq) { 325 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 326 destroy_workqueue(lio->link_status_wq.wq); 327 } 328 } 329 330 /** 331 * \brief Update link status 332 * @param netdev network device 333 * @param ls link status structure 334 * 335 * Called on receipt of a link status response from the core application to 336 * update each interface's link status. 337 */ 338 static void update_link_status(struct net_device *netdev, 339 union oct_link_status *ls) 340 { 341 struct lio *lio = GET_LIO(netdev); 342 int current_max_mtu = lio->linfo.link.s.mtu; 343 struct octeon_device *oct = lio->oct_dev; 344 345 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 346 lio->linfo.link.u64 = ls->u64; 347 348 print_link_info(netdev); 349 lio->link_changes++; 350 351 if (lio->linfo.link.s.link_up) { 352 netif_carrier_on(netdev); 353 wake_txqs(netdev); 354 } else { 355 netif_carrier_off(netdev); 356 stop_txqs(netdev); 357 } 358 359 if (lio->linfo.link.s.mtu != current_max_mtu) { 360 dev_info(&oct->pci_dev->dev, 361 "Max MTU Changed from %d to %d\n", 362 current_max_mtu, lio->linfo.link.s.mtu); 363 netdev->max_mtu = lio->linfo.link.s.mtu; 364 } 365 366 if (lio->linfo.link.s.mtu < netdev->mtu) { 367 dev_warn(&oct->pci_dev->dev, 368 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", 369 netdev->mtu, lio->linfo.link.s.mtu); 370 queue_delayed_work(lio->link_status_wq.wq, 371 &lio->link_status_wq.wk.work, 0); 372 } 373 } 374 } 375 376 /** 377 * \brief PCI probe handler 378 * @param pdev PCI device structure 379 * @param ent unused 380 */ 381 static int 382 liquidio_vf_probe(struct pci_dev *pdev, 383 const struct pci_device_id *ent __attribute__((unused))) 384 { 385 struct octeon_device *oct_dev = NULL; 386 387 oct_dev = octeon_allocate_device(pdev->device, 388 sizeof(struct octeon_device_priv)); 389 390 if (!oct_dev) { 391 dev_err(&pdev->dev, "Unable to allocate device\n"); 392 return -ENOMEM; 393 } 394 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 395 396 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 397 (u32)pdev->vendor, (u32)pdev->device); 398 399 /* Assign octeon_device for this device to the private data area. */ 400 pci_set_drvdata(pdev, oct_dev); 401 402 /* set linux specific device pointer */ 403 oct_dev->pci_dev = pdev; 404 405 oct_dev->subsystem_id = pdev->subsystem_vendor | 406 (pdev->subsystem_device << 16); 407 408 if (octeon_device_init(oct_dev)) { 409 liquidio_vf_remove(pdev); 410 return -ENOMEM; 411 } 412 413 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 414 415 return 0; 416 } 417 418 /** 419 * \brief PCI FLR for each Octeon device. 420 * @param oct octeon device 421 */ 422 static void octeon_pci_flr(struct octeon_device *oct) 423 { 424 pci_save_state(oct->pci_dev); 425 426 pci_cfg_access_lock(oct->pci_dev); 427 428 /* Quiesce the device completely */ 429 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 430 PCI_COMMAND_INTX_DISABLE); 431 432 pcie_flr(oct->pci_dev); 433 434 pci_cfg_access_unlock(oct->pci_dev); 435 436 pci_restore_state(oct->pci_dev); 437 } 438 439 /** 440 *\brief Destroy resources associated with octeon device 441 * @param pdev PCI device structure 442 * @param ent unused 443 */ 444 static void octeon_destroy_resources(struct octeon_device *oct) 445 { 446 struct octeon_device_priv *oct_priv = 447 (struct octeon_device_priv *)oct->priv; 448 struct msix_entry *msix_entries; 449 int i; 450 451 switch (atomic_read(&oct->status)) { 452 case OCT_DEV_RUNNING: 453 case OCT_DEV_CORE_OK: 454 /* No more instructions will be forwarded. */ 455 atomic_set(&oct->status, OCT_DEV_IN_RESET); 456 457 oct->app_mode = CVM_DRV_INVALID_APP; 458 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 459 lio_get_state_string(&oct->status)); 460 461 schedule_timeout_uninterruptible(HZ / 10); 462 463 /* fallthrough */ 464 case OCT_DEV_HOST_OK: 465 /* fallthrough */ 466 case OCT_DEV_IO_QUEUES_DONE: 467 if (lio_wait_for_instr_fetch(oct)) 468 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 469 470 if (wait_for_pending_requests(oct)) 471 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 472 473 /* Disable the input and output queues now. No more packets will 474 * arrive from Octeon, but we should wait for all packet 475 * processing to finish. 476 */ 477 oct->fn_list.disable_io_queues(oct); 478 479 if (lio_wait_for_oq_pkts(oct)) 480 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 481 482 /* Force all requests waiting to be fetched by OCTEON to 483 * complete. 484 */ 485 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 486 struct octeon_instr_queue *iq; 487 488 if (!(oct->io_qmask.iq & BIT_ULL(i))) 489 continue; 490 iq = oct->instr_queue[i]; 491 492 if (atomic_read(&iq->instr_pending)) { 493 spin_lock_bh(&iq->lock); 494 iq->fill_cnt = 0; 495 iq->octeon_read_index = iq->host_write_index; 496 iq->stats.instr_processed += 497 atomic_read(&iq->instr_pending); 498 lio_process_iq_request_list(oct, iq, 0); 499 spin_unlock_bh(&iq->lock); 500 } 501 } 502 503 lio_process_ordered_list(oct, 1); 504 octeon_free_sc_done_list(oct); 505 octeon_free_sc_zombie_list(oct); 506 507 /* fall through */ 508 case OCT_DEV_INTR_SET_DONE: 509 /* Disable interrupts */ 510 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 511 512 if (oct->msix_on) { 513 msix_entries = (struct msix_entry *)oct->msix_entries; 514 for (i = 0; i < oct->num_msix_irqs; i++) { 515 if (oct->ioq_vector[i].vector) { 516 irq_set_affinity_hint( 517 msix_entries[i].vector, 518 NULL); 519 free_irq(msix_entries[i].vector, 520 &oct->ioq_vector[i]); 521 oct->ioq_vector[i].vector = 0; 522 } 523 } 524 pci_disable_msix(oct->pci_dev); 525 kfree(oct->msix_entries); 526 oct->msix_entries = NULL; 527 kfree(oct->irq_name_storage); 528 oct->irq_name_storage = NULL; 529 } 530 /* Soft reset the octeon device before exiting */ 531 if (oct->pci_dev->reset_fn) 532 octeon_pci_flr(oct); 533 else 534 cn23xx_vf_ask_pf_to_do_flr(oct); 535 536 /* fallthrough */ 537 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 538 octeon_free_ioq_vector(oct); 539 540 /* fallthrough */ 541 case OCT_DEV_MBOX_SETUP_DONE: 542 oct->fn_list.free_mbox(oct); 543 544 /* fallthrough */ 545 case OCT_DEV_IN_RESET: 546 case OCT_DEV_DROQ_INIT_DONE: 547 mdelay(100); 548 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 549 if (!(oct->io_qmask.oq & BIT_ULL(i))) 550 continue; 551 octeon_delete_droq(oct, i); 552 } 553 554 /* fallthrough */ 555 case OCT_DEV_RESP_LIST_INIT_DONE: 556 octeon_delete_response_list(oct); 557 558 /* fallthrough */ 559 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 560 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 561 if (!(oct->io_qmask.iq & BIT_ULL(i))) 562 continue; 563 octeon_delete_instr_queue(oct, i); 564 } 565 566 /* fallthrough */ 567 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 568 octeon_free_sc_buffer_pool(oct); 569 570 /* fallthrough */ 571 case OCT_DEV_DISPATCH_INIT_DONE: 572 octeon_delete_dispatch_list(oct); 573 cancel_delayed_work_sync(&oct->nic_poll_work.work); 574 575 /* fallthrough */ 576 case OCT_DEV_PCI_MAP_DONE: 577 octeon_unmap_pci_barx(oct, 0); 578 octeon_unmap_pci_barx(oct, 1); 579 580 /* fallthrough */ 581 case OCT_DEV_PCI_ENABLE_DONE: 582 pci_clear_master(oct->pci_dev); 583 /* Disable the device, releasing the PCI INT */ 584 pci_disable_device(oct->pci_dev); 585 586 /* fallthrough */ 587 case OCT_DEV_BEGIN_STATE: 588 /* Nothing to be done here either */ 589 break; 590 } 591 592 tasklet_kill(&oct_priv->droq_tasklet); 593 } 594 595 /** 596 * \brief Send Rx control command 597 * @param lio per-network private data 598 * @param start_stop whether to start or stop 599 */ 600 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 601 { 602 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 603 struct octeon_soft_command *sc; 604 union octnet_cmd *ncmd; 605 int retval; 606 607 if (oct->props[lio->ifidx].rx_on == start_stop) 608 return; 609 610 sc = (struct octeon_soft_command *) 611 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 612 16, 0); 613 614 ncmd = (union octnet_cmd *)sc->virtdptr; 615 616 ncmd->u64 = 0; 617 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 618 ncmd->s.param1 = start_stop; 619 620 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 621 622 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 623 624 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 625 OPCODE_NIC_CMD, 0, 0, 0); 626 627 init_completion(&sc->complete); 628 sc->sc_status = OCTEON_REQUEST_PENDING; 629 630 retval = octeon_send_soft_command(oct, sc); 631 if (retval == IQ_SEND_FAILED) { 632 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 633 octeon_free_soft_command(oct, sc); 634 } else { 635 /* Sleep on a wait queue till the cond flag indicates that the 636 * response arrived or timed-out. 637 */ 638 retval = wait_for_sc_completion_timeout(oct, sc, 0); 639 if (retval) 640 return; 641 642 oct->props[lio->ifidx].rx_on = start_stop; 643 WRITE_ONCE(sc->caller_is_done, true); 644 } 645 } 646 647 /** 648 * \brief Destroy NIC device interface 649 * @param oct octeon device 650 * @param ifidx which interface to destroy 651 * 652 * Cleanup associated with each interface for an Octeon device when NIC 653 * module is being unloaded or if initialization fails during load. 654 */ 655 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 656 { 657 struct net_device *netdev = oct->props[ifidx].netdev; 658 struct octeon_device_priv *oct_priv = 659 (struct octeon_device_priv *)oct->priv; 660 struct napi_struct *napi, *n; 661 struct lio *lio; 662 663 if (!netdev) { 664 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 665 __func__, ifidx); 666 return; 667 } 668 669 lio = GET_LIO(netdev); 670 671 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 672 673 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 674 liquidio_stop(netdev); 675 676 if (oct->props[lio->ifidx].napi_enabled == 1) { 677 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 678 napi_disable(napi); 679 680 oct->props[lio->ifidx].napi_enabled = 0; 681 682 oct->droq[0]->ops.poll_mode = 0; 683 } 684 685 /* Delete NAPI */ 686 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 687 netif_napi_del(napi); 688 689 tasklet_enable(&oct_priv->droq_tasklet); 690 691 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 692 unregister_netdev(netdev); 693 694 cleanup_rx_oom_poll_fn(netdev); 695 696 cleanup_link_status_change_wq(netdev); 697 698 lio_delete_glists(lio); 699 700 free_netdev(netdev); 701 702 oct->props[ifidx].gmxport = -1; 703 704 oct->props[ifidx].netdev = NULL; 705 } 706 707 /** 708 * \brief Stop complete NIC functionality 709 * @param oct octeon device 710 */ 711 static int liquidio_stop_nic_module(struct octeon_device *oct) 712 { 713 struct lio *lio; 714 int i, j; 715 716 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 717 if (!oct->ifcount) { 718 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 719 return 1; 720 } 721 722 spin_lock_bh(&oct->cmd_resp_wqlock); 723 oct->cmd_resp_state = OCT_DRV_OFFLINE; 724 spin_unlock_bh(&oct->cmd_resp_wqlock); 725 726 for (i = 0; i < oct->ifcount; i++) { 727 lio = GET_LIO(oct->props[i].netdev); 728 for (j = 0; j < oct->num_oqs; j++) 729 octeon_unregister_droq_ops(oct, 730 lio->linfo.rxpciq[j].s.q_no); 731 } 732 733 for (i = 0; i < oct->ifcount; i++) 734 liquidio_destroy_nic_device(oct, i); 735 736 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 737 return 0; 738 } 739 740 /** 741 * \brief Cleans up resources at unload time 742 * @param pdev PCI device structure 743 */ 744 static void liquidio_vf_remove(struct pci_dev *pdev) 745 { 746 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 747 748 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 749 750 if (oct_dev->app_mode == CVM_DRV_NIC_APP) 751 liquidio_stop_nic_module(oct_dev); 752 753 /* Reset the octeon device and cleanup all memory allocated for 754 * the octeon device by driver. 755 */ 756 octeon_destroy_resources(oct_dev); 757 758 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 759 760 /* This octeon device has been removed. Update the global 761 * data structure to reflect this. Free the device structure. 762 */ 763 octeon_free_device_mem(oct_dev); 764 } 765 766 /** 767 * \brief PCI initialization for each Octeon device. 768 * @param oct octeon device 769 */ 770 static int octeon_pci_os_setup(struct octeon_device *oct) 771 { 772 #ifdef CONFIG_PCI_IOV 773 /* setup PCI stuff first */ 774 if (!oct->pci_dev->physfn) 775 octeon_pci_flr(oct); 776 #endif 777 778 if (pci_enable_device(oct->pci_dev)) { 779 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 780 return 1; 781 } 782 783 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 784 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 785 pci_disable_device(oct->pci_dev); 786 return 1; 787 } 788 789 /* Enable PCI DMA Master. */ 790 pci_set_master(oct->pci_dev); 791 792 return 0; 793 } 794 795 /** 796 * \brief Unmap and free network buffer 797 * @param buf buffer 798 */ 799 static void free_netbuf(void *buf) 800 { 801 struct octnet_buf_free_info *finfo; 802 struct sk_buff *skb; 803 struct lio *lio; 804 805 finfo = (struct octnet_buf_free_info *)buf; 806 skb = finfo->skb; 807 lio = finfo->lio; 808 809 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 810 DMA_TO_DEVICE); 811 812 tx_buffer_free(skb); 813 } 814 815 /** 816 * \brief Unmap and free gather buffer 817 * @param buf buffer 818 */ 819 static void free_netsgbuf(void *buf) 820 { 821 struct octnet_buf_free_info *finfo; 822 struct octnic_gather *g; 823 struct sk_buff *skb; 824 int i, frags, iq; 825 struct lio *lio; 826 827 finfo = (struct octnet_buf_free_info *)buf; 828 skb = finfo->skb; 829 lio = finfo->lio; 830 g = finfo->g; 831 frags = skb_shinfo(skb)->nr_frags; 832 833 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 834 g->sg[0].ptr[0], (skb->len - skb->data_len), 835 DMA_TO_DEVICE); 836 837 i = 1; 838 while (frags--) { 839 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 840 841 pci_unmap_page((lio->oct_dev)->pci_dev, 842 g->sg[(i >> 2)].ptr[(i & 3)], 843 skb_frag_size(frag), DMA_TO_DEVICE); 844 i++; 845 } 846 847 iq = skb_iq(lio->oct_dev, skb); 848 849 spin_lock(&lio->glist_lock[iq]); 850 list_add_tail(&g->list, &lio->glist[iq]); 851 spin_unlock(&lio->glist_lock[iq]); 852 853 tx_buffer_free(skb); 854 } 855 856 /** 857 * \brief Unmap and free gather buffer with response 858 * @param buf buffer 859 */ 860 static void free_netsgbuf_with_resp(void *buf) 861 { 862 struct octnet_buf_free_info *finfo; 863 struct octeon_soft_command *sc; 864 struct octnic_gather *g; 865 struct sk_buff *skb; 866 int i, frags, iq; 867 struct lio *lio; 868 869 sc = (struct octeon_soft_command *)buf; 870 skb = (struct sk_buff *)sc->callback_arg; 871 finfo = (struct octnet_buf_free_info *)&skb->cb; 872 873 lio = finfo->lio; 874 g = finfo->g; 875 frags = skb_shinfo(skb)->nr_frags; 876 877 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 878 g->sg[0].ptr[0], (skb->len - skb->data_len), 879 DMA_TO_DEVICE); 880 881 i = 1; 882 while (frags--) { 883 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 884 885 pci_unmap_page((lio->oct_dev)->pci_dev, 886 g->sg[(i >> 2)].ptr[(i & 3)], 887 skb_frag_size(frag), DMA_TO_DEVICE); 888 i++; 889 } 890 891 iq = skb_iq(lio->oct_dev, skb); 892 893 spin_lock(&lio->glist_lock[iq]); 894 list_add_tail(&g->list, &lio->glist[iq]); 895 spin_unlock(&lio->glist_lock[iq]); 896 897 /* Don't free the skb yet */ 898 } 899 900 /** 901 * \brief Net device open for LiquidIO 902 * @param netdev network device 903 */ 904 static int liquidio_open(struct net_device *netdev) 905 { 906 struct lio *lio = GET_LIO(netdev); 907 struct octeon_device *oct = lio->oct_dev; 908 struct octeon_device_priv *oct_priv = 909 (struct octeon_device_priv *)oct->priv; 910 struct napi_struct *napi, *n; 911 912 if (!oct->props[lio->ifidx].napi_enabled) { 913 tasklet_disable(&oct_priv->droq_tasklet); 914 915 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 916 napi_enable(napi); 917 918 oct->props[lio->ifidx].napi_enabled = 1; 919 920 oct->droq[0]->ops.poll_mode = 1; 921 } 922 923 ifstate_set(lio, LIO_IFSTATE_RUNNING); 924 925 /* Ready for link status updates */ 926 lio->intf_open = 1; 927 928 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 929 start_txqs(netdev); 930 931 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); 932 lio->stats_wk.ctxptr = lio; 933 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies 934 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); 935 936 /* tell Octeon to start forwarding packets to host */ 937 send_rx_ctrl_cmd(lio, 1); 938 939 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); 940 941 return 0; 942 } 943 944 /** 945 * \brief Net device stop for LiquidIO 946 * @param netdev network device 947 */ 948 static int liquidio_stop(struct net_device *netdev) 949 { 950 struct lio *lio = GET_LIO(netdev); 951 struct octeon_device *oct = lio->oct_dev; 952 struct octeon_device_priv *oct_priv = 953 (struct octeon_device_priv *)oct->priv; 954 struct napi_struct *napi, *n; 955 956 /* tell Octeon to stop forwarding packets to host */ 957 send_rx_ctrl_cmd(lio, 0); 958 959 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 960 /* Inform that netif carrier is down */ 961 lio->intf_open = 0; 962 lio->linfo.link.s.link_up = 0; 963 964 netif_carrier_off(netdev); 965 lio->link_changes++; 966 967 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 968 969 stop_txqs(netdev); 970 971 /* Wait for any pending Rx descriptors */ 972 if (lio_wait_for_clean_oq(oct)) 973 netif_info(lio, rx_err, lio->netdev, 974 "Proceeding with stop interface after partial RX desc processing\n"); 975 976 if (oct->props[lio->ifidx].napi_enabled == 1) { 977 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 978 napi_disable(napi); 979 980 oct->props[lio->ifidx].napi_enabled = 0; 981 982 oct->droq[0]->ops.poll_mode = 0; 983 984 tasklet_enable(&oct_priv->droq_tasklet); 985 } 986 987 cancel_delayed_work_sync(&lio->stats_wk.work); 988 989 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 990 991 return 0; 992 } 993 994 /** 995 * \brief Converts a mask based on net device flags 996 * @param netdev network device 997 * 998 * This routine generates a octnet_ifflags mask from the net device flags 999 * received from the OS. 1000 */ 1001 static enum octnet_ifflags get_new_flags(struct net_device *netdev) 1002 { 1003 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1004 1005 if (netdev->flags & IFF_PROMISC) 1006 f |= OCTNET_IFFLAG_PROMISC; 1007 1008 if (netdev->flags & IFF_ALLMULTI) 1009 f |= OCTNET_IFFLAG_ALLMULTI; 1010 1011 if (netdev->flags & IFF_MULTICAST) { 1012 f |= OCTNET_IFFLAG_MULTICAST; 1013 1014 /* Accept all multicast addresses if there are more than we 1015 * can handle 1016 */ 1017 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1018 f |= OCTNET_IFFLAG_ALLMULTI; 1019 } 1020 1021 if (netdev->flags & IFF_BROADCAST) 1022 f |= OCTNET_IFFLAG_BROADCAST; 1023 1024 return f; 1025 } 1026 1027 static void liquidio_set_uc_list(struct net_device *netdev) 1028 { 1029 struct lio *lio = GET_LIO(netdev); 1030 struct octeon_device *oct = lio->oct_dev; 1031 struct octnic_ctrl_pkt nctrl; 1032 struct netdev_hw_addr *ha; 1033 u64 *mac; 1034 1035 if (lio->netdev_uc_count == netdev_uc_count(netdev)) 1036 return; 1037 1038 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { 1039 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n"); 1040 return; 1041 } 1042 1043 lio->netdev_uc_count = netdev_uc_count(netdev); 1044 1045 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1046 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; 1047 nctrl.ncmd.s.more = lio->netdev_uc_count; 1048 nctrl.ncmd.s.param1 = oct->vf_num; 1049 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1050 nctrl.netpndev = (u64)netdev; 1051 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1052 1053 /* copy all the addresses into the udd */ 1054 mac = &nctrl.udd[0]; 1055 netdev_for_each_uc_addr(ha, netdev) { 1056 ether_addr_copy(((u8 *)mac) + 2, ha->addr); 1057 mac++; 1058 } 1059 1060 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1061 } 1062 1063 /** 1064 * \brief Net device set_multicast_list 1065 * @param netdev network device 1066 */ 1067 static void liquidio_set_mcast_list(struct net_device *netdev) 1068 { 1069 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1070 struct lio *lio = GET_LIO(netdev); 1071 struct octeon_device *oct = lio->oct_dev; 1072 struct octnic_ctrl_pkt nctrl; 1073 struct netdev_hw_addr *ha; 1074 u64 *mc; 1075 int ret; 1076 1077 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1078 1079 /* Create a ctrl pkt command to be sent to core app. */ 1080 nctrl.ncmd.u64 = 0; 1081 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1082 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1083 nctrl.ncmd.s.param2 = mc_count; 1084 nctrl.ncmd.s.more = mc_count; 1085 nctrl.netpndev = (u64)netdev; 1086 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1087 1088 /* copy all the addresses into the udd */ 1089 mc = &nctrl.udd[0]; 1090 netdev_for_each_mc_addr(ha, netdev) { 1091 *mc = 0; 1092 ether_addr_copy(((u8 *)mc) + 2, ha->addr); 1093 /* no need to swap bytes */ 1094 if (++mc > &nctrl.udd[mc_count]) 1095 break; 1096 } 1097 1098 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1099 1100 /* Apparently, any activity in this call from the kernel has to 1101 * be atomic. So we won't wait for response. 1102 */ 1103 1104 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1105 if (ret) { 1106 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1107 ret); 1108 } 1109 1110 liquidio_set_uc_list(netdev); 1111 } 1112 1113 /** 1114 * \brief Net device set_mac_address 1115 * @param netdev network device 1116 */ 1117 static int liquidio_set_mac(struct net_device *netdev, void *p) 1118 { 1119 struct sockaddr *addr = (struct sockaddr *)p; 1120 struct lio *lio = GET_LIO(netdev); 1121 struct octeon_device *oct = lio->oct_dev; 1122 struct octnic_ctrl_pkt nctrl; 1123 int ret = 0; 1124 1125 if (!is_valid_ether_addr(addr->sa_data)) 1126 return -EADDRNOTAVAIL; 1127 1128 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 1129 return 0; 1130 1131 if (lio->linfo.macaddr_is_admin_asgnd) 1132 return -EPERM; 1133 1134 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1135 1136 nctrl.ncmd.u64 = 0; 1137 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 1138 nctrl.ncmd.s.param1 = 0; 1139 nctrl.ncmd.s.more = 1; 1140 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1141 nctrl.netpndev = (u64)netdev; 1142 1143 nctrl.udd[0] = 0; 1144 /* The MAC Address is presented in network byte order. */ 1145 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data); 1146 1147 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1148 if (ret < 0) { 1149 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 1150 return -ENOMEM; 1151 } 1152 1153 if (nctrl.sc_status == 1154 FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) { 1155 dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n"); 1156 return -EPERM; 1157 } 1158 1159 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1160 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); 1161 1162 return 0; 1163 } 1164 1165 static void 1166 liquidio_get_stats64(struct net_device *netdev, 1167 struct rtnl_link_stats64 *lstats) 1168 { 1169 struct lio *lio = GET_LIO(netdev); 1170 struct octeon_device *oct; 1171 u64 pkts = 0, drop = 0, bytes = 0; 1172 struct oct_droq_stats *oq_stats; 1173 struct oct_iq_stats *iq_stats; 1174 int i, iq_no, oq_no; 1175 1176 oct = lio->oct_dev; 1177 1178 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1179 return; 1180 1181 for (i = 0; i < oct->num_iqs; i++) { 1182 iq_no = lio->linfo.txpciq[i].s.q_no; 1183 iq_stats = &oct->instr_queue[iq_no]->stats; 1184 pkts += iq_stats->tx_done; 1185 drop += iq_stats->tx_dropped; 1186 bytes += iq_stats->tx_tot_bytes; 1187 } 1188 1189 lstats->tx_packets = pkts; 1190 lstats->tx_bytes = bytes; 1191 lstats->tx_dropped = drop; 1192 1193 pkts = 0; 1194 drop = 0; 1195 bytes = 0; 1196 1197 for (i = 0; i < oct->num_oqs; i++) { 1198 oq_no = lio->linfo.rxpciq[i].s.q_no; 1199 oq_stats = &oct->droq[oq_no]->stats; 1200 pkts += oq_stats->rx_pkts_received; 1201 drop += (oq_stats->rx_dropped + 1202 oq_stats->dropped_nodispatch + 1203 oq_stats->dropped_toomany + 1204 oq_stats->dropped_nomem); 1205 bytes += oq_stats->rx_bytes_received; 1206 } 1207 1208 lstats->rx_bytes = bytes; 1209 lstats->rx_packets = pkts; 1210 lstats->rx_dropped = drop; 1211 1212 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; 1213 1214 /* detailed rx_errors: */ 1215 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; 1216 /* recved pkt with crc error */ 1217 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; 1218 /* recv'd frame alignment error */ 1219 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; 1220 1221 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + 1222 lstats->rx_frame_errors; 1223 1224 /* detailed tx_errors */ 1225 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; 1226 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; 1227 1228 lstats->tx_errors = lstats->tx_aborted_errors + 1229 lstats->tx_carrier_errors; 1230 } 1231 1232 /** 1233 * \brief Handler for SIOCSHWTSTAMP ioctl 1234 * @param netdev network device 1235 * @param ifr interface request 1236 * @param cmd command 1237 */ 1238 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 1239 { 1240 struct lio *lio = GET_LIO(netdev); 1241 struct hwtstamp_config conf; 1242 1243 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 1244 return -EFAULT; 1245 1246 if (conf.flags) 1247 return -EINVAL; 1248 1249 switch (conf.tx_type) { 1250 case HWTSTAMP_TX_ON: 1251 case HWTSTAMP_TX_OFF: 1252 break; 1253 default: 1254 return -ERANGE; 1255 } 1256 1257 switch (conf.rx_filter) { 1258 case HWTSTAMP_FILTER_NONE: 1259 break; 1260 case HWTSTAMP_FILTER_ALL: 1261 case HWTSTAMP_FILTER_SOME: 1262 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1263 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1264 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1265 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1266 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1267 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1268 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1269 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1270 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1271 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1272 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1273 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1274 case HWTSTAMP_FILTER_NTP_ALL: 1275 conf.rx_filter = HWTSTAMP_FILTER_ALL; 1276 break; 1277 default: 1278 return -ERANGE; 1279 } 1280 1281 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 1282 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1283 1284 else 1285 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 1286 1287 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 1288 } 1289 1290 /** 1291 * \brief ioctl handler 1292 * @param netdev network device 1293 * @param ifr interface request 1294 * @param cmd command 1295 */ 1296 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1297 { 1298 switch (cmd) { 1299 case SIOCSHWTSTAMP: 1300 return hwtstamp_ioctl(netdev, ifr); 1301 default: 1302 return -EOPNOTSUPP; 1303 } 1304 } 1305 1306 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) 1307 { 1308 struct sk_buff *skb = (struct sk_buff *)buf; 1309 struct octnet_buf_free_info *finfo; 1310 struct oct_timestamp_resp *resp; 1311 struct octeon_soft_command *sc; 1312 struct lio *lio; 1313 1314 finfo = (struct octnet_buf_free_info *)skb->cb; 1315 lio = finfo->lio; 1316 sc = finfo->sc; 1317 oct = lio->oct_dev; 1318 resp = (struct oct_timestamp_resp *)sc->virtrptr; 1319 1320 if (status != OCTEON_REQUEST_DONE) { 1321 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 1322 CVM_CAST64(status)); 1323 resp->timestamp = 0; 1324 } 1325 1326 octeon_swap_8B_data(&resp->timestamp, 1); 1327 1328 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 1329 struct skb_shared_hwtstamps ts; 1330 u64 ns = resp->timestamp; 1331 1332 netif_info(lio, tx_done, lio->netdev, 1333 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 1334 skb, (unsigned long long)ns); 1335 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 1336 skb_tstamp_tx(skb, &ts); 1337 } 1338 1339 octeon_free_soft_command(oct, sc); 1340 tx_buffer_free(skb); 1341 } 1342 1343 /* \brief Send a data packet that will be timestamped 1344 * @param oct octeon device 1345 * @param ndata pointer to network data 1346 * @param finfo pointer to private network data 1347 */ 1348 static int send_nic_timestamp_pkt(struct octeon_device *oct, 1349 struct octnic_data_pkt *ndata, 1350 struct octnet_buf_free_info *finfo, 1351 int xmit_more) 1352 { 1353 struct octeon_soft_command *sc; 1354 int ring_doorbell; 1355 struct lio *lio; 1356 int retval; 1357 u32 len; 1358 1359 lio = finfo->lio; 1360 1361 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 1362 sizeof(struct oct_timestamp_resp)); 1363 finfo->sc = sc; 1364 1365 if (!sc) { 1366 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 1367 return IQ_SEND_FAILED; 1368 } 1369 1370 if (ndata->reqtype == REQTYPE_NORESP_NET) 1371 ndata->reqtype = REQTYPE_RESP_NET; 1372 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 1373 ndata->reqtype = REQTYPE_RESP_NET_SG; 1374 1375 sc->callback = handle_timestamp; 1376 sc->callback_arg = finfo->skb; 1377 sc->iq_no = ndata->q_no; 1378 1379 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; 1380 1381 ring_doorbell = !xmit_more; 1382 1383 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 1384 sc, len, ndata->reqtype); 1385 1386 if (retval == IQ_SEND_FAILED) { 1387 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 1388 retval); 1389 octeon_free_soft_command(oct, sc); 1390 } else { 1391 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 1392 } 1393 1394 return retval; 1395 } 1396 1397 /** \brief Transmit networks packets to the Octeon interface 1398 * @param skbuff skbuff struct to be passed to network layer. 1399 * @param netdev pointer to network device 1400 * @returns whether the packet was transmitted to the device okay or not 1401 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 1402 */ 1403 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 1404 { 1405 struct octnet_buf_free_info *finfo; 1406 union octnic_cmd_setup cmdsetup; 1407 struct octnic_data_pkt ndata; 1408 struct octeon_instr_irh *irh; 1409 struct oct_iq_stats *stats; 1410 struct octeon_device *oct; 1411 int q_idx = 0, iq_no = 0; 1412 union tx_info *tx_info; 1413 int xmit_more = 0; 1414 struct lio *lio; 1415 int status = 0; 1416 u64 dptr = 0; 1417 u32 tag = 0; 1418 int j; 1419 1420 lio = GET_LIO(netdev); 1421 oct = lio->oct_dev; 1422 1423 q_idx = skb_iq(lio->oct_dev, skb); 1424 tag = q_idx; 1425 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 1426 1427 stats = &oct->instr_queue[iq_no]->stats; 1428 1429 /* Check for all conditions in which the current packet cannot be 1430 * transmitted. 1431 */ 1432 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 1433 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { 1434 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", 1435 lio->linfo.link.s.link_up); 1436 goto lio_xmit_failed; 1437 } 1438 1439 /* Use space in skb->cb to store info used to unmap and 1440 * free the buffers. 1441 */ 1442 finfo = (struct octnet_buf_free_info *)skb->cb; 1443 finfo->lio = lio; 1444 finfo->skb = skb; 1445 finfo->sc = NULL; 1446 1447 /* Prepare the attributes for the data to be passed to OSI. */ 1448 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 1449 1450 ndata.buf = finfo; 1451 1452 ndata.q_no = iq_no; 1453 1454 if (octnet_iq_is_full(oct, ndata.q_no)) { 1455 /* defer sending if queue is full */ 1456 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 1457 ndata.q_no); 1458 stats->tx_iq_busy++; 1459 return NETDEV_TX_BUSY; 1460 } 1461 1462 ndata.datasize = skb->len; 1463 1464 cmdsetup.u64 = 0; 1465 cmdsetup.s.iq_no = iq_no; 1466 1467 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1468 if (skb->encapsulation) { 1469 cmdsetup.s.tnl_csum = 1; 1470 stats->tx_vxlan++; 1471 } else { 1472 cmdsetup.s.transport_csum = 1; 1473 } 1474 } 1475 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1476 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1477 cmdsetup.s.timestamp = 1; 1478 } 1479 1480 if (!skb_shinfo(skb)->nr_frags) { 1481 cmdsetup.s.u.datasize = skb->len; 1482 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1483 /* Offload checksum calculation for TCP/UDP packets */ 1484 dptr = dma_map_single(&oct->pci_dev->dev, 1485 skb->data, 1486 skb->len, 1487 DMA_TO_DEVICE); 1488 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 1489 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 1490 __func__); 1491 return NETDEV_TX_BUSY; 1492 } 1493 1494 ndata.cmd.cmd3.dptr = dptr; 1495 finfo->dptr = dptr; 1496 ndata.reqtype = REQTYPE_NORESP_NET; 1497 1498 } else { 1499 skb_frag_t *frag; 1500 struct octnic_gather *g; 1501 int i, frags; 1502 1503 spin_lock(&lio->glist_lock[q_idx]); 1504 g = (struct octnic_gather *) 1505 lio_list_delete_head(&lio->glist[q_idx]); 1506 spin_unlock(&lio->glist_lock[q_idx]); 1507 1508 if (!g) { 1509 netif_info(lio, tx_err, lio->netdev, 1510 "Transmit scatter gather: glist null!\n"); 1511 goto lio_xmit_failed; 1512 } 1513 1514 cmdsetup.s.gather = 1; 1515 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 1516 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 1517 1518 memset(g->sg, 0, g->sg_size); 1519 1520 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 1521 skb->data, 1522 (skb->len - skb->data_len), 1523 DMA_TO_DEVICE); 1524 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 1525 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 1526 __func__); 1527 return NETDEV_TX_BUSY; 1528 } 1529 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 1530 1531 frags = skb_shinfo(skb)->nr_frags; 1532 i = 1; 1533 while (frags--) { 1534 frag = &skb_shinfo(skb)->frags[i - 1]; 1535 1536 g->sg[(i >> 2)].ptr[(i & 3)] = 1537 skb_frag_dma_map(&oct->pci_dev->dev, 1538 frag, 0, skb_frag_size(frag), 1539 DMA_TO_DEVICE); 1540 if (dma_mapping_error(&oct->pci_dev->dev, 1541 g->sg[i >> 2].ptr[i & 3])) { 1542 dma_unmap_single(&oct->pci_dev->dev, 1543 g->sg[0].ptr[0], 1544 skb->len - skb->data_len, 1545 DMA_TO_DEVICE); 1546 for (j = 1; j < i; j++) { 1547 frag = &skb_shinfo(skb)->frags[j - 1]; 1548 dma_unmap_page(&oct->pci_dev->dev, 1549 g->sg[j >> 2].ptr[j & 3], 1550 skb_frag_size(frag), 1551 DMA_TO_DEVICE); 1552 } 1553 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 1554 __func__); 1555 return NETDEV_TX_BUSY; 1556 } 1557 1558 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), 1559 (i & 3)); 1560 i++; 1561 } 1562 1563 dptr = g->sg_dma_ptr; 1564 1565 ndata.cmd.cmd3.dptr = dptr; 1566 finfo->dptr = dptr; 1567 finfo->g = g; 1568 1569 ndata.reqtype = REQTYPE_NORESP_NET_SG; 1570 } 1571 1572 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 1573 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 1574 1575 if (skb_shinfo(skb)->gso_size) { 1576 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 1577 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 1578 } 1579 1580 /* HW insert VLAN tag */ 1581 if (skb_vlan_tag_present(skb)) { 1582 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; 1583 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; 1584 } 1585 1586 xmit_more = netdev_xmit_more(); 1587 1588 if (unlikely(cmdsetup.s.timestamp)) 1589 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 1590 else 1591 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 1592 if (status == IQ_SEND_FAILED) 1593 goto lio_xmit_failed; 1594 1595 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 1596 1597 if (status == IQ_SEND_STOP) { 1598 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", 1599 iq_no); 1600 netif_stop_subqueue(netdev, q_idx); 1601 } 1602 1603 netif_trans_update(netdev); 1604 1605 if (tx_info->s.gso_segs) 1606 stats->tx_done += tx_info->s.gso_segs; 1607 else 1608 stats->tx_done++; 1609 stats->tx_tot_bytes += ndata.datasize; 1610 1611 return NETDEV_TX_OK; 1612 1613 lio_xmit_failed: 1614 stats->tx_dropped++; 1615 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 1616 iq_no, stats->tx_dropped); 1617 if (dptr) 1618 dma_unmap_single(&oct->pci_dev->dev, dptr, 1619 ndata.datasize, DMA_TO_DEVICE); 1620 1621 octeon_ring_doorbell_locked(oct, iq_no); 1622 1623 tx_buffer_free(skb); 1624 return NETDEV_TX_OK; 1625 } 1626 1627 /** \brief Network device Tx timeout 1628 * @param netdev pointer to network device 1629 */ 1630 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1631 { 1632 struct lio *lio; 1633 1634 lio = GET_LIO(netdev); 1635 1636 netif_info(lio, tx_err, lio->netdev, 1637 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 1638 netdev->stats.tx_dropped); 1639 netif_trans_update(netdev); 1640 wake_txqs(netdev); 1641 } 1642 1643 static int 1644 liquidio_vlan_rx_add_vid(struct net_device *netdev, 1645 __be16 proto __attribute__((unused)), u16 vid) 1646 { 1647 struct lio *lio = GET_LIO(netdev); 1648 struct octeon_device *oct = lio->oct_dev; 1649 struct octnic_ctrl_pkt nctrl; 1650 int ret = 0; 1651 1652 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1653 1654 nctrl.ncmd.u64 = 0; 1655 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 1656 nctrl.ncmd.s.param1 = vid; 1657 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1658 nctrl.netpndev = (u64)netdev; 1659 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1660 1661 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1662 if (ret) { 1663 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 1664 ret); 1665 return -EPERM; 1666 } 1667 1668 return 0; 1669 } 1670 1671 static int 1672 liquidio_vlan_rx_kill_vid(struct net_device *netdev, 1673 __be16 proto __attribute__((unused)), u16 vid) 1674 { 1675 struct lio *lio = GET_LIO(netdev); 1676 struct octeon_device *oct = lio->oct_dev; 1677 struct octnic_ctrl_pkt nctrl; 1678 int ret = 0; 1679 1680 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1681 1682 nctrl.ncmd.u64 = 0; 1683 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 1684 nctrl.ncmd.s.param1 = vid; 1685 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1686 nctrl.netpndev = (u64)netdev; 1687 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1688 1689 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1690 if (ret) { 1691 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", 1692 ret); 1693 if (ret > 0) 1694 ret = -EIO; 1695 } 1696 return ret; 1697 } 1698 1699 /** Sending command to enable/disable RX checksum offload 1700 * @param netdev pointer to network device 1701 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 1702 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 1703 * OCTNET_CMD_RXCSUM_DISABLE 1704 * @returns SUCCESS or FAILURE 1705 */ 1706 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 1707 u8 rx_cmd) 1708 { 1709 struct lio *lio = GET_LIO(netdev); 1710 struct octeon_device *oct = lio->oct_dev; 1711 struct octnic_ctrl_pkt nctrl; 1712 int ret = 0; 1713 1714 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1715 1716 nctrl.ncmd.u64 = 0; 1717 nctrl.ncmd.s.cmd = command; 1718 nctrl.ncmd.s.param1 = rx_cmd; 1719 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1720 nctrl.netpndev = (u64)netdev; 1721 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1722 1723 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1724 if (ret) { 1725 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", 1726 ret); 1727 if (ret > 0) 1728 ret = -EIO; 1729 } 1730 return ret; 1731 } 1732 1733 /** Sending command to add/delete VxLAN UDP port to firmware 1734 * @param netdev pointer to network device 1735 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 1736 * @param vxlan_port VxLAN port to be added or deleted 1737 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 1738 * OCTNET_CMD_VXLAN_PORT_DEL 1739 * @returns SUCCESS or FAILURE 1740 */ 1741 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 1742 u16 vxlan_port, u8 vxlan_cmd_bit) 1743 { 1744 struct lio *lio = GET_LIO(netdev); 1745 struct octeon_device *oct = lio->oct_dev; 1746 struct octnic_ctrl_pkt nctrl; 1747 int ret = 0; 1748 1749 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1750 1751 nctrl.ncmd.u64 = 0; 1752 nctrl.ncmd.s.cmd = command; 1753 nctrl.ncmd.s.more = vxlan_cmd_bit; 1754 nctrl.ncmd.s.param1 = vxlan_port; 1755 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1756 nctrl.netpndev = (u64)netdev; 1757 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1758 1759 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1760 if (ret) { 1761 dev_err(&oct->pci_dev->dev, 1762 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", 1763 ret); 1764 if (ret > 0) 1765 ret = -EIO; 1766 } 1767 return ret; 1768 } 1769 1770 static int liquidio_udp_tunnel_set_port(struct net_device *netdev, 1771 unsigned int table, unsigned int entry, 1772 struct udp_tunnel_info *ti) 1773 { 1774 return liquidio_vxlan_port_command(netdev, 1775 OCTNET_CMD_VXLAN_PORT_CONFIG, 1776 htons(ti->port), 1777 OCTNET_CMD_VXLAN_PORT_ADD); 1778 } 1779 1780 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, 1781 unsigned int table, 1782 unsigned int entry, 1783 struct udp_tunnel_info *ti) 1784 { 1785 return liquidio_vxlan_port_command(netdev, 1786 OCTNET_CMD_VXLAN_PORT_CONFIG, 1787 htons(ti->port), 1788 OCTNET_CMD_VXLAN_PORT_DEL); 1789 } 1790 1791 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { 1792 .set_port = liquidio_udp_tunnel_set_port, 1793 .unset_port = liquidio_udp_tunnel_unset_port, 1794 .tables = { 1795 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 1796 }, 1797 }; 1798 1799 /** \brief Net device fix features 1800 * @param netdev pointer to network device 1801 * @param request features requested 1802 * @returns updated features list 1803 */ 1804 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 1805 netdev_features_t request) 1806 { 1807 struct lio *lio = netdev_priv(netdev); 1808 1809 if ((request & NETIF_F_RXCSUM) && 1810 !(lio->dev_capability & NETIF_F_RXCSUM)) 1811 request &= ~NETIF_F_RXCSUM; 1812 1813 if ((request & NETIF_F_HW_CSUM) && 1814 !(lio->dev_capability & NETIF_F_HW_CSUM)) 1815 request &= ~NETIF_F_HW_CSUM; 1816 1817 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 1818 request &= ~NETIF_F_TSO; 1819 1820 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 1821 request &= ~NETIF_F_TSO6; 1822 1823 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 1824 request &= ~NETIF_F_LRO; 1825 1826 /* Disable LRO if RXCSUM is off */ 1827 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 1828 (lio->dev_capability & NETIF_F_LRO)) 1829 request &= ~NETIF_F_LRO; 1830 1831 return request; 1832 } 1833 1834 /** \brief Net device set features 1835 * @param netdev pointer to network device 1836 * @param features features to enable/disable 1837 */ 1838 static int liquidio_set_features(struct net_device *netdev, 1839 netdev_features_t features) 1840 { 1841 struct lio *lio = netdev_priv(netdev); 1842 1843 if (!((netdev->features ^ features) & NETIF_F_LRO)) 1844 return 0; 1845 1846 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 1847 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 1848 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 1849 else if (!(features & NETIF_F_LRO) && 1850 (lio->dev_capability & NETIF_F_LRO)) 1851 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 1852 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 1853 if (!(netdev->features & NETIF_F_RXCSUM) && 1854 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 1855 (features & NETIF_F_RXCSUM)) 1856 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 1857 OCTNET_CMD_RXCSUM_ENABLE); 1858 else if ((netdev->features & NETIF_F_RXCSUM) && 1859 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 1860 !(features & NETIF_F_RXCSUM)) 1861 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 1862 OCTNET_CMD_RXCSUM_DISABLE); 1863 1864 return 0; 1865 } 1866 1867 static const struct net_device_ops lionetdevops = { 1868 .ndo_open = liquidio_open, 1869 .ndo_stop = liquidio_stop, 1870 .ndo_start_xmit = liquidio_xmit, 1871 .ndo_get_stats64 = liquidio_get_stats64, 1872 .ndo_set_mac_address = liquidio_set_mac, 1873 .ndo_set_rx_mode = liquidio_set_mcast_list, 1874 .ndo_tx_timeout = liquidio_tx_timeout, 1875 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 1876 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 1877 .ndo_change_mtu = liquidio_change_mtu, 1878 .ndo_do_ioctl = liquidio_ioctl, 1879 .ndo_fix_features = liquidio_fix_features, 1880 .ndo_set_features = liquidio_set_features, 1881 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, 1882 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, 1883 }; 1884 1885 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 1886 { 1887 struct octeon_device *oct = (struct octeon_device *)buf; 1888 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 1889 union oct_link_status *ls; 1890 int gmxport = 0; 1891 int i; 1892 1893 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 1894 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 1895 recv_pkt->buffer_size[0], 1896 recv_pkt->rh.r_nic_info.gmxport); 1897 goto nic_info_err; 1898 } 1899 1900 gmxport = recv_pkt->rh.r_nic_info.gmxport; 1901 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 1902 OCT_DROQ_INFO_SIZE); 1903 1904 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 1905 1906 for (i = 0; i < oct->ifcount; i++) { 1907 if (oct->props[i].gmxport == gmxport) { 1908 update_link_status(oct->props[i].netdev, ls); 1909 break; 1910 } 1911 } 1912 1913 nic_info_err: 1914 for (i = 0; i < recv_pkt->buffer_count; i++) 1915 recv_buffer_free(recv_pkt->buffer_ptr[i]); 1916 octeon_free_recv_info(recv_info); 1917 return 0; 1918 } 1919 1920 /** 1921 * \brief Setup network interfaces 1922 * @param octeon_dev octeon device 1923 * 1924 * Called during init time for each device. It assumes the NIC 1925 * is already up and running. The link information for each 1926 * interface is passed in link_info. 1927 */ 1928 static int setup_nic_devices(struct octeon_device *octeon_dev) 1929 { 1930 int retval, num_iqueues, num_oqueues; 1931 u32 resp_size, data_size; 1932 struct liquidio_if_cfg_resp *resp; 1933 struct octeon_soft_command *sc; 1934 union oct_nic_if_cfg if_cfg; 1935 struct octdev_props *props; 1936 struct net_device *netdev; 1937 struct lio_version *vdata; 1938 struct lio *lio = NULL; 1939 u8 mac[ETH_ALEN], i, j; 1940 u32 ifidx_or_pfnum; 1941 1942 ifidx_or_pfnum = octeon_dev->pf_num; 1943 1944 /* This is to handle link status changes */ 1945 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, 1946 lio_nic_info, octeon_dev); 1947 1948 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 1949 * They are handled directly. 1950 */ 1951 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 1952 free_netbuf); 1953 1954 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 1955 free_netsgbuf); 1956 1957 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 1958 free_netsgbuf_with_resp); 1959 1960 for (i = 0; i < octeon_dev->ifcount; i++) { 1961 resp_size = sizeof(struct liquidio_if_cfg_resp); 1962 data_size = sizeof(struct lio_version); 1963 sc = (struct octeon_soft_command *) 1964 octeon_alloc_soft_command(octeon_dev, data_size, 1965 resp_size, 0); 1966 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1967 vdata = (struct lio_version *)sc->virtdptr; 1968 1969 *((u64 *)vdata) = 0; 1970 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 1971 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 1972 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 1973 1974 if_cfg.u64 = 0; 1975 1976 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; 1977 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; 1978 if_cfg.s.base_queue = 0; 1979 1980 sc->iq_no = 0; 1981 1982 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 1983 OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 1984 0); 1985 1986 init_completion(&sc->complete); 1987 sc->sc_status = OCTEON_REQUEST_PENDING; 1988 1989 retval = octeon_send_soft_command(octeon_dev, sc); 1990 if (retval == IQ_SEND_FAILED) { 1991 dev_err(&octeon_dev->pci_dev->dev, 1992 "iq/oq config failed status: %x\n", retval); 1993 /* Soft instr is freed by driver in case of failure. */ 1994 octeon_free_soft_command(octeon_dev, sc); 1995 return(-EIO); 1996 } 1997 1998 /* Sleep on a wait queue till the cond flag indicates that the 1999 * response arrived or timed-out. 2000 */ 2001 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); 2002 if (retval) 2003 return retval; 2004 2005 retval = resp->status; 2006 if (retval) { 2007 dev_err(&octeon_dev->pci_dev->dev, 2008 "iq/oq config failed, retval = %d\n", retval); 2009 WRITE_ONCE(sc->caller_is_done, true); 2010 return -EIO; 2011 } 2012 2013 snprintf(octeon_dev->fw_info.liquidio_firmware_version, 2014 32, "%s", 2015 resp->cfg_info.liquidio_firmware_version); 2016 2017 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 2018 (sizeof(struct liquidio_if_cfg_info)) >> 3); 2019 2020 num_iqueues = hweight64(resp->cfg_info.iqmask); 2021 num_oqueues = hweight64(resp->cfg_info.oqmask); 2022 2023 if (!(num_iqueues) || !(num_oqueues)) { 2024 dev_err(&octeon_dev->pci_dev->dev, 2025 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 2026 resp->cfg_info.iqmask, resp->cfg_info.oqmask); 2027 WRITE_ONCE(sc->caller_is_done, true); 2028 goto setup_nic_dev_done; 2029 } 2030 dev_dbg(&octeon_dev->pci_dev->dev, 2031 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 2032 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 2033 num_iqueues, num_oqueues); 2034 2035 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 2036 2037 if (!netdev) { 2038 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 2039 WRITE_ONCE(sc->caller_is_done, true); 2040 goto setup_nic_dev_done; 2041 } 2042 2043 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 2044 2045 /* Associate the routines that will handle different 2046 * netdev tasks. 2047 */ 2048 netdev->netdev_ops = &lionetdevops; 2049 2050 lio = GET_LIO(netdev); 2051 2052 memset(lio, 0, sizeof(struct lio)); 2053 2054 lio->ifidx = ifidx_or_pfnum; 2055 2056 props = &octeon_dev->props[i]; 2057 props->gmxport = resp->cfg_info.linfo.gmxport; 2058 props->netdev = netdev; 2059 2060 lio->linfo.num_rxpciq = num_oqueues; 2061 lio->linfo.num_txpciq = num_iqueues; 2062 2063 for (j = 0; j < num_oqueues; j++) { 2064 lio->linfo.rxpciq[j].u64 = 2065 resp->cfg_info.linfo.rxpciq[j].u64; 2066 } 2067 for (j = 0; j < num_iqueues; j++) { 2068 lio->linfo.txpciq[j].u64 = 2069 resp->cfg_info.linfo.txpciq[j].u64; 2070 } 2071 2072 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 2073 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 2074 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 2075 lio->linfo.macaddr_is_admin_asgnd = 2076 resp->cfg_info.linfo.macaddr_is_admin_asgnd; 2077 lio->linfo.macaddr_spoofchk = 2078 resp->cfg_info.linfo.macaddr_spoofchk; 2079 2080 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 2081 2082 lio->dev_capability = NETIF_F_HIGHDMA 2083 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2084 | NETIF_F_SG | NETIF_F_RXCSUM 2085 | NETIF_F_TSO | NETIF_F_TSO6 2086 | NETIF_F_GRO 2087 | NETIF_F_LRO; 2088 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 2089 2090 /* Copy of transmit encapsulation capabilities: 2091 * TSO, TSO6, Checksums for this device 2092 */ 2093 lio->enc_dev_capability = NETIF_F_IP_CSUM 2094 | NETIF_F_IPV6_CSUM 2095 | NETIF_F_GSO_UDP_TUNNEL 2096 | NETIF_F_HW_CSUM | NETIF_F_SG 2097 | NETIF_F_RXCSUM 2098 | NETIF_F_TSO | NETIF_F_TSO6 2099 | NETIF_F_LRO; 2100 2101 netdev->hw_enc_features = 2102 (lio->enc_dev_capability & ~NETIF_F_LRO); 2103 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; 2104 2105 netdev->vlan_features = lio->dev_capability; 2106 /* Add any unchangeable hw features */ 2107 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 2108 NETIF_F_HW_VLAN_CTAG_RX | 2109 NETIF_F_HW_VLAN_CTAG_TX; 2110 2111 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 2112 2113 netdev->hw_features = lio->dev_capability; 2114 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; 2115 2116 /* MTU range: 68 - 16000 */ 2117 netdev->min_mtu = LIO_MIN_MTU_SIZE; 2118 netdev->max_mtu = LIO_MAX_MTU_SIZE; 2119 2120 WRITE_ONCE(sc->caller_is_done, true); 2121 2122 /* Point to the properties for octeon device to which this 2123 * interface belongs. 2124 */ 2125 lio->oct_dev = octeon_dev; 2126 lio->octprops = props; 2127 lio->netdev = netdev; 2128 2129 dev_dbg(&octeon_dev->pci_dev->dev, 2130 "if%d gmx: %d hw_addr: 0x%llx\n", i, 2131 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 2132 2133 /* 64-bit swap required on LE machines */ 2134 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 2135 for (j = 0; j < ETH_ALEN; j++) 2136 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 2137 2138 /* Copy MAC Address to OS network device structure */ 2139 ether_addr_copy(netdev->dev_addr, mac); 2140 2141 if (liquidio_setup_io_queues(octeon_dev, i, 2142 lio->linfo.num_txpciq, 2143 lio->linfo.num_rxpciq)) { 2144 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 2145 goto setup_nic_dev_free; 2146 } 2147 2148 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 2149 2150 /* For VFs, enable Octeon device interrupts here, 2151 * as this is contingent upon IO queue setup 2152 */ 2153 octeon_dev->fn_list.enable_interrupt(octeon_dev, 2154 OCTEON_ALL_INTR); 2155 2156 /* By default all interfaces on a single Octeon uses the same 2157 * tx and rx queues 2158 */ 2159 lio->txq = lio->linfo.txpciq[0].s.q_no; 2160 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 2161 2162 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 2163 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 2164 2165 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 2166 dev_err(&octeon_dev->pci_dev->dev, 2167 "Gather list allocation failed\n"); 2168 goto setup_nic_dev_free; 2169 } 2170 2171 /* Register ethtool support */ 2172 liquidio_set_ethtool_ops(netdev); 2173 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) 2174 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 2175 else 2176 octeon_dev->priv_flags = 0x0; 2177 2178 if (netdev->features & NETIF_F_LRO) 2179 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2180 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2181 2182 if (setup_link_status_change_wq(netdev)) 2183 goto setup_nic_dev_free; 2184 2185 if (setup_rx_oom_poll_fn(netdev)) 2186 goto setup_nic_dev_free; 2187 2188 /* Register the network device with the OS */ 2189 if (register_netdev(netdev)) { 2190 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 2191 goto setup_nic_dev_free; 2192 } 2193 2194 dev_dbg(&octeon_dev->pci_dev->dev, 2195 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 2196 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 2197 netif_carrier_off(netdev); 2198 lio->link_changes++; 2199 2200 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 2201 2202 /* Sending command to firmware to enable Rx checksum offload 2203 * by default at the time of setup of Liquidio driver for 2204 * this device 2205 */ 2206 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2207 OCTNET_CMD_RXCSUM_ENABLE); 2208 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 2209 OCTNET_CMD_TXCSUM_ENABLE); 2210 2211 dev_dbg(&octeon_dev->pci_dev->dev, 2212 "NIC ifidx:%d Setup successful\n", i); 2213 2214 octeon_dev->no_speed_setting = 1; 2215 } 2216 2217 return 0; 2218 2219 setup_nic_dev_free: 2220 2221 while (i--) { 2222 dev_err(&octeon_dev->pci_dev->dev, 2223 "NIC ifidx:%d Setup failed\n", i); 2224 liquidio_destroy_nic_device(octeon_dev, i); 2225 } 2226 2227 setup_nic_dev_done: 2228 2229 return -ENODEV; 2230 } 2231 2232 /** 2233 * \brief initialize the NIC 2234 * @param oct octeon device 2235 * 2236 * This initialization routine is called once the Octeon device application is 2237 * up and running 2238 */ 2239 static int liquidio_init_nic_module(struct octeon_device *oct) 2240 { 2241 int num_nic_ports = 1; 2242 int i, retval = 0; 2243 2244 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 2245 2246 /* only default iq and oq were initialized 2247 * initialize the rest as well run port_config command for each port 2248 */ 2249 oct->ifcount = num_nic_ports; 2250 memset(oct->props, 0, 2251 sizeof(struct octdev_props) * num_nic_ports); 2252 2253 for (i = 0; i < MAX_OCTEON_LINKS; i++) 2254 oct->props[i].gmxport = -1; 2255 2256 retval = setup_nic_devices(oct); 2257 if (retval) { 2258 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 2259 goto octnet_init_failure; 2260 } 2261 2262 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 2263 2264 return retval; 2265 2266 octnet_init_failure: 2267 2268 oct->ifcount = 0; 2269 2270 return retval; 2271 } 2272 2273 /** 2274 * \brief Device initialization for each Octeon device that is probed 2275 * @param octeon_dev octeon device 2276 */ 2277 static int octeon_device_init(struct octeon_device *oct) 2278 { 2279 u32 rev_id; 2280 int j; 2281 2282 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE); 2283 2284 /* Enable access to the octeon device and make its DMA capability 2285 * known to the OS. 2286 */ 2287 if (octeon_pci_os_setup(oct)) 2288 return 1; 2289 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE); 2290 2291 oct->chip_id = OCTEON_CN23XX_VF_VID; 2292 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 2293 oct->rev_id = rev_id & 0xff; 2294 2295 if (cn23xx_setup_octeon_vf_device(oct)) 2296 return 1; 2297 2298 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE); 2299 2300 oct->app_mode = CVM_DRV_NIC_APP; 2301 2302 /* Initialize the dispatch mechanism used to push packets arriving on 2303 * Octeon Output queues. 2304 */ 2305 if (octeon_init_dispatch_list(oct)) 2306 return 1; 2307 2308 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE); 2309 2310 if (octeon_set_io_queues_off(oct)) { 2311 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); 2312 return 1; 2313 } 2314 2315 if (oct->fn_list.setup_device_regs(oct)) { 2316 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n"); 2317 return 1; 2318 } 2319 2320 /* Initialize soft command buffer pool */ 2321 if (octeon_setup_sc_buffer_pool(oct)) { 2322 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); 2323 return 1; 2324 } 2325 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 2326 2327 /* Setup the data structures that manage this Octeon's Input queues. */ 2328 if (octeon_setup_instr_queues(oct)) { 2329 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); 2330 return 1; 2331 } 2332 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 2333 2334 /* Initialize lists to manage the requests of different types that 2335 * arrive from user & kernel applications for this octeon device. 2336 */ 2337 if (octeon_setup_response_list(oct)) { 2338 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); 2339 return 1; 2340 } 2341 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE); 2342 2343 if (octeon_setup_output_queues(oct)) { 2344 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n"); 2345 return 1; 2346 } 2347 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE); 2348 2349 if (oct->fn_list.setup_mbox(oct)) { 2350 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 2351 return 1; 2352 } 2353 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); 2354 2355 if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) { 2356 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); 2357 return 1; 2358 } 2359 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 2360 2361 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n", 2362 oct->sriov_info.rings_per_vf); 2363 2364 /* Setup the interrupt handler and record the INT SUM register address*/ 2365 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) 2366 return 1; 2367 2368 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); 2369 2370 /* *************************************************************** 2371 * The interrupts need to be enabled for the PF<-->VF handshake. 2372 * They are [re]-enabled after the PF<-->VF handshake so that the 2373 * correct OQ tick value is used (i.e. the value retrieved from 2374 * the PF as part of the handshake). 2375 */ 2376 2377 /* Enable Octeon device interrupts */ 2378 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2379 2380 if (cn23xx_octeon_pfvf_handshake(oct)) 2381 return 1; 2382 2383 /* Here we [re]-enable the interrupts so that the correct OQ tick value 2384 * is used (i.e. the value that was retrieved during the handshake) 2385 */ 2386 2387 /* Enable Octeon device interrupts */ 2388 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 2389 /* *************************************************************** */ 2390 2391 /* Enable the input and output queues for this Octeon device */ 2392 if (oct->fn_list.enable_io_queues(oct)) { 2393 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); 2394 return 1; 2395 } 2396 2397 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE); 2398 2399 atomic_set(&oct->status, OCT_DEV_HOST_OK); 2400 2401 /* Send Credit for Octeon Output queues. Credits are always sent after 2402 * the output queue is enabled. 2403 */ 2404 for (j = 0; j < oct->num_oqs; j++) 2405 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); 2406 2407 /* Packets can start arriving on the output queues from this point. */ 2408 2409 atomic_set(&oct->status, OCT_DEV_CORE_OK); 2410 2411 atomic_set(&oct->status, OCT_DEV_RUNNING); 2412 2413 if (liquidio_init_nic_module(oct)) 2414 return 1; 2415 2416 return 0; 2417 } 2418 2419 static int __init liquidio_vf_init(void) 2420 { 2421 octeon_init_device_list(0); 2422 return pci_register_driver(&liquidio_vf_pci_driver); 2423 } 2424 2425 static void __exit liquidio_vf_exit(void) 2426 { 2427 pci_unregister_driver(&liquidio_vf_pci_driver); 2428 2429 pr_info("LiquidIO_VF network module is now unloaded\n"); 2430 } 2431 2432 module_init(liquidio_vf_init); 2433 module_exit(liquidio_vf_exit); 2434