1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <net/vxlan.h> 22 #include "liquidio_common.h" 23 #include "octeon_droq.h" 24 #include "octeon_iq.h" 25 #include "response_manager.h" 26 #include "octeon_device.h" 27 #include "octeon_nic.h" 28 #include "octeon_main.h" 29 #include "octeon_network.h" 30 #include "cn23xx_vf_device.h" 31 32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver"); 34 MODULE_LICENSE("GPL"); 35 MODULE_VERSION(LIQUIDIO_VERSION); 36 37 static int debug = -1; 38 module_param(debug, int, 0644); 39 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 40 41 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 42 43 struct liquidio_if_cfg_context { 44 int octeon_id; 45 46 wait_queue_head_t wc; 47 48 int cond; 49 }; 50 51 struct liquidio_if_cfg_resp { 52 u64 rh; 53 struct liquidio_if_cfg_info cfg_info; 54 u64 status; 55 }; 56 57 struct liquidio_rx_ctl_context { 58 int octeon_id; 59 60 wait_queue_head_t wc; 61 62 int cond; 63 }; 64 65 struct oct_timestamp_resp { 66 u64 rh; 67 u64 timestamp; 68 u64 status; 69 }; 70 71 union tx_info { 72 u64 u64; 73 struct { 74 #ifdef __BIG_ENDIAN_BITFIELD 75 u16 gso_size; 76 u16 gso_segs; 77 u32 reserved; 78 #else 79 u32 reserved; 80 u16 gso_segs; 81 u16 gso_size; 82 #endif 83 } s; 84 }; 85 86 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS) 87 88 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 89 #define OCTNIC_GSO_MAX_SIZE \ 90 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 91 92 struct octnic_gather { 93 /* List manipulation. Next and prev pointers. */ 94 struct list_head list; 95 96 /* Size of the gather component at sg in bytes. */ 97 int sg_size; 98 99 /* Number of bytes that sg was adjusted to make it 8B-aligned. */ 100 int adjust; 101 102 /* Gather component that can accommodate max sized fragment list 103 * received from the IP layer. 104 */ 105 struct octeon_sg_entry *sg; 106 107 dma_addr_t sg_dma_ptr; 108 }; 109 110 struct octeon_device_priv { 111 /* Tasklet structures for this device. */ 112 struct tasklet_struct droq_tasklet; 113 unsigned long napi_mask; 114 }; 115 116 static int 117 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 118 static void liquidio_vf_remove(struct pci_dev *pdev); 119 static int octeon_device_init(struct octeon_device *oct); 120 static int liquidio_stop(struct net_device *netdev); 121 122 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 123 { 124 struct octeon_device_priv *oct_priv = 125 (struct octeon_device_priv *)oct->priv; 126 int retry = MAX_VF_IP_OP_PENDING_PKT_COUNT; 127 int pkt_cnt = 0, pending_pkts; 128 int i; 129 130 do { 131 pending_pkts = 0; 132 133 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 134 if (!(oct->io_qmask.oq & BIT_ULL(i))) 135 continue; 136 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 137 } 138 if (pkt_cnt > 0) { 139 pending_pkts += pkt_cnt; 140 tasklet_schedule(&oct_priv->droq_tasklet); 141 } 142 pkt_cnt = 0; 143 schedule_timeout_uninterruptible(1); 144 145 } while (retry-- && pending_pkts); 146 147 return pkt_cnt; 148 } 149 150 /** 151 * \brief wait for all pending requests to complete 152 * @param oct Pointer to Octeon device 153 * 154 * Called during shutdown sequence 155 */ 156 static int wait_for_pending_requests(struct octeon_device *oct) 157 { 158 int i, pcount = 0; 159 160 for (i = 0; i < MAX_VF_IP_OP_PENDING_PKT_COUNT; i++) { 161 pcount = atomic_read( 162 &oct->response_list[OCTEON_ORDERED_SC_LIST] 163 .pending_req_count); 164 if (pcount) 165 schedule_timeout_uninterruptible(HZ / 10); 166 else 167 break; 168 } 169 170 if (pcount) 171 return 1; 172 173 return 0; 174 } 175 176 /** 177 * \brief Cause device to go quiet so it can be safely removed/reset/etc 178 * @param oct Pointer to Octeon device 179 */ 180 static void pcierror_quiesce_device(struct octeon_device *oct) 181 { 182 int i; 183 184 /* Disable the input and output queues now. No more packets will 185 * arrive from Octeon, but we should wait for all packet processing 186 * to finish. 187 */ 188 189 /* To allow for in-flight requests */ 190 schedule_timeout_uninterruptible(100); 191 192 if (wait_for_pending_requests(oct)) 193 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 194 195 /* Force all requests waiting to be fetched by OCTEON to complete. */ 196 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 197 struct octeon_instr_queue *iq; 198 199 if (!(oct->io_qmask.iq & BIT_ULL(i))) 200 continue; 201 iq = oct->instr_queue[i]; 202 203 if (atomic_read(&iq->instr_pending)) { 204 spin_lock_bh(&iq->lock); 205 iq->fill_cnt = 0; 206 iq->octeon_read_index = iq->host_write_index; 207 iq->stats.instr_processed += 208 atomic_read(&iq->instr_pending); 209 lio_process_iq_request_list(oct, iq, 0); 210 spin_unlock_bh(&iq->lock); 211 } 212 } 213 214 /* Force all pending ordered list requests to time out. */ 215 lio_process_ordered_list(oct, 1); 216 217 /* We do not need to wait for output queue packets to be processed. */ 218 } 219 220 /** 221 * \brief Cleanup PCI AER uncorrectable error status 222 * @param dev Pointer to PCI device 223 */ 224 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 225 { 226 u32 status, mask; 227 int pos = 0x100; 228 229 pr_info("%s :\n", __func__); 230 231 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 232 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 233 if (dev->error_state == pci_channel_io_normal) 234 status &= ~mask; /* Clear corresponding nonfatal bits */ 235 else 236 status &= mask; /* Clear corresponding fatal bits */ 237 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 238 } 239 240 /** 241 * \brief Stop all PCI IO to a given device 242 * @param dev Pointer to Octeon device 243 */ 244 static void stop_pci_io(struct octeon_device *oct) 245 { 246 struct msix_entry *msix_entries; 247 int i; 248 249 /* No more instructions will be forwarded. */ 250 atomic_set(&oct->status, OCT_DEV_IN_RESET); 251 252 for (i = 0; i < oct->ifcount; i++) 253 netif_device_detach(oct->props[i].netdev); 254 255 /* Disable interrupts */ 256 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 257 258 pcierror_quiesce_device(oct); 259 if (oct->msix_on) { 260 msix_entries = (struct msix_entry *)oct->msix_entries; 261 for (i = 0; i < oct->num_msix_irqs; i++) { 262 /* clear the affinity_cpumask */ 263 irq_set_affinity_hint(msix_entries[i].vector, 264 NULL); 265 free_irq(msix_entries[i].vector, 266 &oct->ioq_vector[i]); 267 } 268 pci_disable_msix(oct->pci_dev); 269 kfree(oct->msix_entries); 270 oct->msix_entries = NULL; 271 octeon_free_ioq_vector(oct); 272 } 273 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 274 lio_get_state_string(&oct->status)); 275 276 /* making it a common function for all OCTEON models */ 277 cleanup_aer_uncorrect_error_status(oct->pci_dev); 278 279 pci_disable_device(oct->pci_dev); 280 } 281 282 /** 283 * \brief called when PCI error is detected 284 * @param pdev Pointer to PCI device 285 * @param state The current pci connection state 286 * 287 * This function is called after a PCI bus error affecting 288 * this device has been detected. 289 */ 290 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 291 pci_channel_state_t state) 292 { 293 struct octeon_device *oct = pci_get_drvdata(pdev); 294 295 /* Non-correctable Non-fatal errors */ 296 if (state == pci_channel_io_normal) { 297 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 298 cleanup_aer_uncorrect_error_status(oct->pci_dev); 299 return PCI_ERS_RESULT_CAN_RECOVER; 300 } 301 302 /* Non-correctable Fatal errors */ 303 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 304 stop_pci_io(oct); 305 306 return PCI_ERS_RESULT_DISCONNECT; 307 } 308 309 /* For PCI-E Advanced Error Recovery (AER) Interface */ 310 static const struct pci_error_handlers liquidio_vf_err_handler = { 311 .error_detected = liquidio_pcie_error_detected, 312 }; 313 314 static const struct pci_device_id liquidio_vf_pci_tbl[] = { 315 { 316 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, 317 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 318 }, 319 { 320 0, 0, 0, 0, 0, 0, 0 321 } 322 }; 323 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); 324 325 static struct pci_driver liquidio_vf_pci_driver = { 326 .name = "LiquidIO_VF", 327 .id_table = liquidio_vf_pci_tbl, 328 .probe = liquidio_vf_probe, 329 .remove = liquidio_vf_remove, 330 .err_handler = &liquidio_vf_err_handler, /* For AER */ 331 }; 332 333 /** 334 * \brief Stop Tx queues 335 * @param netdev network device 336 */ 337 static void txqs_stop(struct net_device *netdev) 338 { 339 if (netif_is_multiqueue(netdev)) { 340 int i; 341 342 for (i = 0; i < netdev->num_tx_queues; i++) 343 netif_stop_subqueue(netdev, i); 344 } else { 345 netif_stop_queue(netdev); 346 } 347 } 348 349 /** 350 * \brief Start Tx queues 351 * @param netdev network device 352 */ 353 static void txqs_start(struct net_device *netdev) 354 { 355 if (netif_is_multiqueue(netdev)) { 356 int i; 357 358 for (i = 0; i < netdev->num_tx_queues; i++) 359 netif_start_subqueue(netdev, i); 360 } else { 361 netif_start_queue(netdev); 362 } 363 } 364 365 /** 366 * \brief Wake Tx queues 367 * @param netdev network device 368 */ 369 static void txqs_wake(struct net_device *netdev) 370 { 371 struct lio *lio = GET_LIO(netdev); 372 373 if (netif_is_multiqueue(netdev)) { 374 int i; 375 376 for (i = 0; i < netdev->num_tx_queues; i++) { 377 int qno = lio->linfo.txpciq[i % (lio->linfo.num_txpciq)] 378 .s.q_no; 379 if (__netif_subqueue_stopped(netdev, i)) { 380 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, 381 tx_restart, 1); 382 netif_wake_subqueue(netdev, i); 383 } 384 } 385 } else { 386 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, 387 tx_restart, 1); 388 netif_wake_queue(netdev); 389 } 390 } 391 392 /** 393 * \brief Start Tx queue 394 * @param netdev network device 395 */ 396 static void start_txq(struct net_device *netdev) 397 { 398 struct lio *lio = GET_LIO(netdev); 399 400 if (lio->linfo.link.s.link_up) { 401 txqs_start(netdev); 402 return; 403 } 404 } 405 406 /** 407 * \brief Wake a queue 408 * @param netdev network device 409 * @param q which queue to wake 410 */ 411 static void wake_q(struct net_device *netdev, int q) 412 { 413 if (netif_is_multiqueue(netdev)) 414 netif_wake_subqueue(netdev, q); 415 else 416 netif_wake_queue(netdev); 417 } 418 419 /** 420 * \brief Stop a queue 421 * @param netdev network device 422 * @param q which queue to stop 423 */ 424 static void stop_q(struct net_device *netdev, int q) 425 { 426 if (netif_is_multiqueue(netdev)) 427 netif_stop_subqueue(netdev, q); 428 else 429 netif_stop_queue(netdev); 430 } 431 432 /** 433 * Remove the node at the head of the list. The list would be empty at 434 * the end of this call if there are no more nodes in the list. 435 */ 436 static struct list_head *list_delete_head(struct list_head *root) 437 { 438 struct list_head *node; 439 440 if ((root->prev == root) && (root->next == root)) 441 node = NULL; 442 else 443 node = root->next; 444 445 if (node) 446 list_del(node); 447 448 return node; 449 } 450 451 /** 452 * \brief Delete gather lists 453 * @param lio per-network private data 454 */ 455 static void delete_glists(struct lio *lio) 456 { 457 struct octnic_gather *g; 458 int i; 459 460 kfree(lio->glist_lock); 461 lio->glist_lock = NULL; 462 463 if (!lio->glist) 464 return; 465 466 for (i = 0; i < lio->linfo.num_txpciq; i++) { 467 do { 468 g = (struct octnic_gather *) 469 list_delete_head(&lio->glist[i]); 470 if (g) 471 kfree(g); 472 } while (g); 473 474 if (lio->glists_virt_base && lio->glists_virt_base[i] && 475 lio->glists_dma_base && lio->glists_dma_base[i]) { 476 lio_dma_free(lio->oct_dev, 477 lio->glist_entry_size * lio->tx_qsize, 478 lio->glists_virt_base[i], 479 lio->glists_dma_base[i]); 480 } 481 } 482 483 kfree(lio->glists_virt_base); 484 lio->glists_virt_base = NULL; 485 486 kfree(lio->glists_dma_base); 487 lio->glists_dma_base = NULL; 488 489 kfree(lio->glist); 490 lio->glist = NULL; 491 } 492 493 /** 494 * \brief Setup gather lists 495 * @param lio per-network private data 496 */ 497 static int setup_glists(struct lio *lio, int num_iqs) 498 { 499 struct octnic_gather *g; 500 int i, j; 501 502 lio->glist_lock = 503 kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL); 504 if (!lio->glist_lock) 505 return -ENOMEM; 506 507 lio->glist = 508 kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL); 509 if (!lio->glist) { 510 kfree(lio->glist_lock); 511 lio->glist_lock = NULL; 512 return -ENOMEM; 513 } 514 515 lio->glist_entry_size = 516 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); 517 518 /* allocate memory to store virtual and dma base address of 519 * per glist consistent memory 520 */ 521 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), 522 GFP_KERNEL); 523 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), 524 GFP_KERNEL); 525 526 if (!lio->glists_virt_base || !lio->glists_dma_base) { 527 delete_glists(lio); 528 return -ENOMEM; 529 } 530 531 for (i = 0; i < num_iqs; i++) { 532 spin_lock_init(&lio->glist_lock[i]); 533 534 INIT_LIST_HEAD(&lio->glist[i]); 535 536 lio->glists_virt_base[i] = 537 lio_dma_alloc(lio->oct_dev, 538 lio->glist_entry_size * lio->tx_qsize, 539 &lio->glists_dma_base[i]); 540 541 if (!lio->glists_virt_base[i]) { 542 delete_glists(lio); 543 return -ENOMEM; 544 } 545 546 for (j = 0; j < lio->tx_qsize; j++) { 547 g = kzalloc(sizeof(*g), GFP_KERNEL); 548 if (!g) 549 break; 550 551 g->sg = lio->glists_virt_base[i] + 552 (j * lio->glist_entry_size); 553 554 g->sg_dma_ptr = lio->glists_dma_base[i] + 555 (j * lio->glist_entry_size); 556 557 list_add_tail(&g->list, &lio->glist[i]); 558 } 559 560 if (j != lio->tx_qsize) { 561 delete_glists(lio); 562 return -ENOMEM; 563 } 564 } 565 566 return 0; 567 } 568 569 /** 570 * \brief Print link information 571 * @param netdev network device 572 */ 573 static void print_link_info(struct net_device *netdev) 574 { 575 struct lio *lio = GET_LIO(netdev); 576 577 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { 578 struct oct_link_info *linfo = &lio->linfo; 579 580 if (linfo->link.s.link_up) { 581 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 582 linfo->link.s.speed, 583 (linfo->link.s.duplex) ? "Full" : "Half"); 584 } else { 585 netif_info(lio, link, lio->netdev, "Link Down\n"); 586 } 587 } 588 } 589 590 /** 591 * \brief Routine to notify MTU change 592 * @param work work_struct data structure 593 */ 594 static void octnet_link_status_change(struct work_struct *work) 595 { 596 struct cavium_wk *wk = (struct cavium_wk *)work; 597 struct lio *lio = (struct lio *)wk->ctxptr; 598 599 rtnl_lock(); 600 call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev); 601 rtnl_unlock(); 602 } 603 604 /** 605 * \brief Sets up the mtu status change work 606 * @param netdev network device 607 */ 608 static int setup_link_status_change_wq(struct net_device *netdev) 609 { 610 struct lio *lio = GET_LIO(netdev); 611 struct octeon_device *oct = lio->oct_dev; 612 613 lio->link_status_wq.wq = alloc_workqueue("link-status", 614 WQ_MEM_RECLAIM, 0); 615 if (!lio->link_status_wq.wq) { 616 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 617 return -1; 618 } 619 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 620 octnet_link_status_change); 621 lio->link_status_wq.wk.ctxptr = lio; 622 623 return 0; 624 } 625 626 static void cleanup_link_status_change_wq(struct net_device *netdev) 627 { 628 struct lio *lio = GET_LIO(netdev); 629 630 if (lio->link_status_wq.wq) { 631 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 632 destroy_workqueue(lio->link_status_wq.wq); 633 } 634 } 635 636 /** 637 * \brief Update link status 638 * @param netdev network device 639 * @param ls link status structure 640 * 641 * Called on receipt of a link status response from the core application to 642 * update each interface's link status. 643 */ 644 static void update_link_status(struct net_device *netdev, 645 union oct_link_status *ls) 646 { 647 struct lio *lio = GET_LIO(netdev); 648 struct octeon_device *oct = lio->oct_dev; 649 650 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 651 lio->linfo.link.u64 = ls->u64; 652 653 print_link_info(netdev); 654 lio->link_changes++; 655 656 if (lio->linfo.link.s.link_up) { 657 netif_carrier_on(netdev); 658 txqs_wake(netdev); 659 } else { 660 netif_carrier_off(netdev); 661 txqs_stop(netdev); 662 } 663 664 if (lio->linfo.link.s.mtu < netdev->mtu) { 665 dev_warn(&oct->pci_dev->dev, 666 "PF has changed the MTU for gmx port. Reducing the mtu from %d to %d\n", 667 netdev->mtu, lio->linfo.link.s.mtu); 668 lio->mtu = lio->linfo.link.s.mtu; 669 netdev->mtu = lio->linfo.link.s.mtu; 670 queue_delayed_work(lio->link_status_wq.wq, 671 &lio->link_status_wq.wk.work, 0); 672 } 673 } 674 } 675 676 static void update_txq_status(struct octeon_device *oct, int iq_num) 677 { 678 struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; 679 struct net_device *netdev; 680 struct lio *lio; 681 682 netdev = oct->props[iq->ifidx].netdev; 683 lio = GET_LIO(netdev); 684 if (netif_is_multiqueue(netdev)) { 685 if (__netif_subqueue_stopped(netdev, iq->q_index) && 686 lio->linfo.link.s.link_up && 687 (!octnet_iq_is_full(oct, iq_num))) { 688 netif_wake_subqueue(netdev, iq->q_index); 689 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, 690 tx_restart, 1); 691 } 692 } else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up && 693 (!octnet_iq_is_full(oct, lio->txq))) { 694 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, 695 lio->txq, tx_restart, 1); 696 netif_wake_queue(netdev); 697 } 698 } 699 700 static 701 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) 702 { 703 struct octeon_device *oct = droq->oct_dev; 704 struct octeon_device_priv *oct_priv = 705 (struct octeon_device_priv *)oct->priv; 706 707 if (droq->ops.poll_mode) { 708 droq->ops.napi_fn(droq); 709 } else { 710 if (ret & MSIX_PO_INT) { 711 dev_err(&oct->pci_dev->dev, 712 "should not come here should not get rx when poll mode = 0 for vf\n"); 713 tasklet_schedule(&oct_priv->droq_tasklet); 714 return 1; 715 } 716 /* this will be flushed periodically by check iq db */ 717 if (ret & MSIX_PI_INT) 718 return 0; 719 } 720 return 0; 721 } 722 723 static irqreturn_t 724 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) 725 { 726 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; 727 struct octeon_device *oct = ioq_vector->oct_dev; 728 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; 729 u64 ret; 730 731 ret = oct->fn_list.msix_interrupt_handler(ioq_vector); 732 733 if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT)) 734 liquidio_schedule_msix_droq_pkt_handler(droq, ret); 735 736 return IRQ_HANDLED; 737 } 738 739 /** 740 * \brief Setup interrupt for octeon device 741 * @param oct octeon device 742 * 743 * Enable interrupt in Octeon device as given in the PCI interrupt mask. 744 */ 745 static int octeon_setup_interrupt(struct octeon_device *oct) 746 { 747 struct msix_entry *msix_entries; 748 char *queue_irq_names = NULL; 749 int num_alloc_ioq_vectors; 750 int num_ioq_vectors; 751 int irqret; 752 int i; 753 754 if (oct->msix_on) { 755 oct->num_msix_irqs = oct->sriov_info.rings_per_vf; 756 757 /* allocate storage for the names assigned to each irq */ 758 oct->irq_name_storage = 759 kcalloc(MAX_IOQ_INTERRUPTS_PER_VF, INTRNAMSIZ, 760 GFP_KERNEL); 761 if (!oct->irq_name_storage) { 762 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); 763 return -ENOMEM; 764 } 765 766 queue_irq_names = oct->irq_name_storage; 767 768 oct->msix_entries = kcalloc( 769 oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); 770 if (!oct->msix_entries) { 771 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); 772 kfree(oct->irq_name_storage); 773 oct->irq_name_storage = NULL; 774 return -ENOMEM; 775 } 776 777 msix_entries = (struct msix_entry *)oct->msix_entries; 778 779 for (i = 0; i < oct->num_msix_irqs; i++) 780 msix_entries[i].entry = i; 781 num_alloc_ioq_vectors = pci_enable_msix_range( 782 oct->pci_dev, msix_entries, 783 oct->num_msix_irqs, 784 oct->num_msix_irqs); 785 if (num_alloc_ioq_vectors < 0) { 786 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); 787 kfree(oct->msix_entries); 788 oct->msix_entries = NULL; 789 kfree(oct->irq_name_storage); 790 oct->irq_name_storage = NULL; 791 return num_alloc_ioq_vectors; 792 } 793 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); 794 795 num_ioq_vectors = oct->num_msix_irqs; 796 797 for (i = 0; i < num_ioq_vectors; i++) { 798 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ, 799 "LiquidIO%u-vf%u-rxtx-%u", 800 oct->octeon_id, oct->vf_num, i); 801 802 irqret = request_irq(msix_entries[i].vector, 803 liquidio_msix_intr_handler, 0, 804 &queue_irq_names[IRQ_NAME_OFF(i)], 805 &oct->ioq_vector[i]); 806 if (irqret) { 807 dev_err(&oct->pci_dev->dev, 808 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", 809 irqret); 810 811 while (i) { 812 i--; 813 irq_set_affinity_hint( 814 msix_entries[i].vector, NULL); 815 free_irq(msix_entries[i].vector, 816 &oct->ioq_vector[i]); 817 } 818 pci_disable_msix(oct->pci_dev); 819 kfree(oct->msix_entries); 820 oct->msix_entries = NULL; 821 kfree(oct->irq_name_storage); 822 oct->irq_name_storage = NULL; 823 return irqret; 824 } 825 oct->ioq_vector[i].vector = msix_entries[i].vector; 826 /* assign the cpu mask for this msix interrupt vector */ 827 irq_set_affinity_hint( 828 msix_entries[i].vector, 829 (&oct->ioq_vector[i].affinity_mask)); 830 } 831 dev_dbg(&oct->pci_dev->dev, 832 "OCTEON[%d]: MSI-X enabled\n", oct->octeon_id); 833 } 834 return 0; 835 } 836 837 /** 838 * \brief PCI probe handler 839 * @param pdev PCI device structure 840 * @param ent unused 841 */ 842 static int 843 liquidio_vf_probe(struct pci_dev *pdev, 844 const struct pci_device_id *ent __attribute__((unused))) 845 { 846 struct octeon_device *oct_dev = NULL; 847 848 oct_dev = octeon_allocate_device(pdev->device, 849 sizeof(struct octeon_device_priv)); 850 851 if (!oct_dev) { 852 dev_err(&pdev->dev, "Unable to allocate device\n"); 853 return -ENOMEM; 854 } 855 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 856 857 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 858 (u32)pdev->vendor, (u32)pdev->device); 859 860 /* Assign octeon_device for this device to the private data area. */ 861 pci_set_drvdata(pdev, oct_dev); 862 863 /* set linux specific device pointer */ 864 oct_dev->pci_dev = pdev; 865 866 if (octeon_device_init(oct_dev)) { 867 liquidio_vf_remove(pdev); 868 return -ENOMEM; 869 } 870 871 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 872 873 return 0; 874 } 875 876 /** 877 * \brief PCI FLR for each Octeon device. 878 * @param oct octeon device 879 */ 880 static void octeon_pci_flr(struct octeon_device *oct) 881 { 882 pci_save_state(oct->pci_dev); 883 884 pci_cfg_access_lock(oct->pci_dev); 885 886 /* Quiesce the device completely */ 887 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 888 PCI_COMMAND_INTX_DISABLE); 889 890 pcie_flr(oct->pci_dev); 891 892 pci_cfg_access_unlock(oct->pci_dev); 893 894 pci_restore_state(oct->pci_dev); 895 } 896 897 /** 898 *\brief Destroy resources associated with octeon device 899 * @param pdev PCI device structure 900 * @param ent unused 901 */ 902 static void octeon_destroy_resources(struct octeon_device *oct) 903 { 904 struct msix_entry *msix_entries; 905 int i; 906 907 switch (atomic_read(&oct->status)) { 908 case OCT_DEV_RUNNING: 909 case OCT_DEV_CORE_OK: 910 /* No more instructions will be forwarded. */ 911 atomic_set(&oct->status, OCT_DEV_IN_RESET); 912 913 oct->app_mode = CVM_DRV_INVALID_APP; 914 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 915 lio_get_state_string(&oct->status)); 916 917 schedule_timeout_uninterruptible(HZ / 10); 918 919 /* fallthrough */ 920 case OCT_DEV_HOST_OK: 921 /* fallthrough */ 922 case OCT_DEV_IO_QUEUES_DONE: 923 if (wait_for_pending_requests(oct)) 924 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 925 926 if (lio_wait_for_instr_fetch(oct)) 927 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 928 929 /* Disable the input and output queues now. No more packets will 930 * arrive from Octeon, but we should wait for all packet 931 * processing to finish. 932 */ 933 oct->fn_list.disable_io_queues(oct); 934 935 if (lio_wait_for_oq_pkts(oct)) 936 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 937 938 case OCT_DEV_INTR_SET_DONE: 939 /* Disable interrupts */ 940 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 941 942 if (oct->msix_on) { 943 msix_entries = (struct msix_entry *)oct->msix_entries; 944 for (i = 0; i < oct->num_msix_irqs; i++) { 945 irq_set_affinity_hint(msix_entries[i].vector, 946 NULL); 947 free_irq(msix_entries[i].vector, 948 &oct->ioq_vector[i]); 949 } 950 pci_disable_msix(oct->pci_dev); 951 kfree(oct->msix_entries); 952 oct->msix_entries = NULL; 953 kfree(oct->irq_name_storage); 954 oct->irq_name_storage = NULL; 955 } 956 /* Soft reset the octeon device before exiting */ 957 if (oct->pci_dev->reset_fn) 958 octeon_pci_flr(oct); 959 else 960 cn23xx_vf_ask_pf_to_do_flr(oct); 961 962 /* fallthrough */ 963 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 964 octeon_free_ioq_vector(oct); 965 966 /* fallthrough */ 967 case OCT_DEV_MBOX_SETUP_DONE: 968 oct->fn_list.free_mbox(oct); 969 970 /* fallthrough */ 971 case OCT_DEV_IN_RESET: 972 case OCT_DEV_DROQ_INIT_DONE: 973 mdelay(100); 974 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 975 if (!(oct->io_qmask.oq & BIT_ULL(i))) 976 continue; 977 octeon_delete_droq(oct, i); 978 } 979 980 /* fallthrough */ 981 case OCT_DEV_RESP_LIST_INIT_DONE: 982 octeon_delete_response_list(oct); 983 984 /* fallthrough */ 985 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 986 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 987 if (!(oct->io_qmask.iq & BIT_ULL(i))) 988 continue; 989 octeon_delete_instr_queue(oct, i); 990 } 991 992 /* fallthrough */ 993 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 994 octeon_free_sc_buffer_pool(oct); 995 996 /* fallthrough */ 997 case OCT_DEV_DISPATCH_INIT_DONE: 998 octeon_delete_dispatch_list(oct); 999 cancel_delayed_work_sync(&oct->nic_poll_work.work); 1000 1001 /* fallthrough */ 1002 case OCT_DEV_PCI_MAP_DONE: 1003 octeon_unmap_pci_barx(oct, 0); 1004 octeon_unmap_pci_barx(oct, 1); 1005 1006 /* fallthrough */ 1007 case OCT_DEV_PCI_ENABLE_DONE: 1008 pci_clear_master(oct->pci_dev); 1009 /* Disable the device, releasing the PCI INT */ 1010 pci_disable_device(oct->pci_dev); 1011 1012 /* fallthrough */ 1013 case OCT_DEV_BEGIN_STATE: 1014 /* Nothing to be done here either */ 1015 break; 1016 } 1017 } 1018 1019 /** 1020 * \brief Callback for rx ctrl 1021 * @param status status of request 1022 * @param buf pointer to resp structure 1023 */ 1024 static void rx_ctl_callback(struct octeon_device *oct, 1025 u32 status, void *buf) 1026 { 1027 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 1028 struct liquidio_rx_ctl_context *ctx; 1029 1030 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; 1031 1032 oct = lio_get_device(ctx->octeon_id); 1033 if (status) 1034 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", 1035 CVM_CAST64(status)); 1036 WRITE_ONCE(ctx->cond, 1); 1037 1038 /* This barrier is required to be sure that the response has been 1039 * written fully before waking up the handler 1040 */ 1041 wmb(); 1042 1043 wake_up_interruptible(&ctx->wc); 1044 } 1045 1046 /** 1047 * \brief Send Rx control command 1048 * @param lio per-network private data 1049 * @param start_stop whether to start or stop 1050 */ 1051 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1052 { 1053 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1054 int ctx_size = sizeof(struct liquidio_rx_ctl_context); 1055 struct liquidio_rx_ctl_context *ctx; 1056 struct octeon_soft_command *sc; 1057 union octnet_cmd *ncmd; 1058 int retval; 1059 1060 if (oct->props[lio->ifidx].rx_on == start_stop) 1061 return; 1062 1063 sc = (struct octeon_soft_command *) 1064 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1065 16, ctx_size); 1066 1067 ncmd = (union octnet_cmd *)sc->virtdptr; 1068 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; 1069 1070 WRITE_ONCE(ctx->cond, 0); 1071 ctx->octeon_id = lio_get_device_id(oct); 1072 init_waitqueue_head(&ctx->wc); 1073 1074 ncmd->u64 = 0; 1075 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 1076 ncmd->s.param1 = start_stop; 1077 1078 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1079 1080 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1081 1082 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1083 OPCODE_NIC_CMD, 0, 0, 0); 1084 1085 sc->callback = rx_ctl_callback; 1086 sc->callback_arg = sc; 1087 sc->wait_time = 5000; 1088 1089 retval = octeon_send_soft_command(oct, sc); 1090 if (retval == IQ_SEND_FAILED) { 1091 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1092 } else { 1093 /* Sleep on a wait queue till the cond flag indicates that the 1094 * response arrived or timed-out. 1095 */ 1096 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) 1097 return; 1098 oct->props[lio->ifidx].rx_on = start_stop; 1099 } 1100 1101 octeon_free_soft_command(oct, sc); 1102 } 1103 1104 /** 1105 * \brief Destroy NIC device interface 1106 * @param oct octeon device 1107 * @param ifidx which interface to destroy 1108 * 1109 * Cleanup associated with each interface for an Octeon device when NIC 1110 * module is being unloaded or if initialization fails during load. 1111 */ 1112 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1113 { 1114 struct net_device *netdev = oct->props[ifidx].netdev; 1115 struct napi_struct *napi, *n; 1116 struct lio *lio; 1117 1118 if (!netdev) { 1119 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1120 __func__, ifidx); 1121 return; 1122 } 1123 1124 lio = GET_LIO(netdev); 1125 1126 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1127 1128 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1129 liquidio_stop(netdev); 1130 1131 if (oct->props[lio->ifidx].napi_enabled == 1) { 1132 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1133 napi_disable(napi); 1134 1135 oct->props[lio->ifidx].napi_enabled = 0; 1136 1137 oct->droq[0]->ops.poll_mode = 0; 1138 } 1139 1140 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1141 unregister_netdev(netdev); 1142 1143 cleanup_rx_oom_poll_fn(netdev); 1144 1145 cleanup_link_status_change_wq(netdev); 1146 1147 delete_glists(lio); 1148 1149 free_netdev(netdev); 1150 1151 oct->props[ifidx].gmxport = -1; 1152 1153 oct->props[ifidx].netdev = NULL; 1154 } 1155 1156 /** 1157 * \brief Stop complete NIC functionality 1158 * @param oct octeon device 1159 */ 1160 static int liquidio_stop_nic_module(struct octeon_device *oct) 1161 { 1162 struct lio *lio; 1163 int i, j; 1164 1165 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1166 if (!oct->ifcount) { 1167 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1168 return 1; 1169 } 1170 1171 spin_lock_bh(&oct->cmd_resp_wqlock); 1172 oct->cmd_resp_state = OCT_DRV_OFFLINE; 1173 spin_unlock_bh(&oct->cmd_resp_wqlock); 1174 1175 for (i = 0; i < oct->ifcount; i++) { 1176 lio = GET_LIO(oct->props[i].netdev); 1177 for (j = 0; j < lio->linfo.num_rxpciq; j++) 1178 octeon_unregister_droq_ops(oct, 1179 lio->linfo.rxpciq[j].s.q_no); 1180 } 1181 1182 for (i = 0; i < oct->ifcount; i++) 1183 liquidio_destroy_nic_device(oct, i); 1184 1185 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1186 return 0; 1187 } 1188 1189 /** 1190 * \brief Cleans up resources at unload time 1191 * @param pdev PCI device structure 1192 */ 1193 static void liquidio_vf_remove(struct pci_dev *pdev) 1194 { 1195 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1196 1197 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1198 1199 if (oct_dev->app_mode == CVM_DRV_NIC_APP) 1200 liquidio_stop_nic_module(oct_dev); 1201 1202 /* Reset the octeon device and cleanup all memory allocated for 1203 * the octeon device by driver. 1204 */ 1205 octeon_destroy_resources(oct_dev); 1206 1207 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1208 1209 /* This octeon device has been removed. Update the global 1210 * data structure to reflect this. Free the device structure. 1211 */ 1212 octeon_free_device_mem(oct_dev); 1213 } 1214 1215 /** 1216 * \brief PCI initialization for each Octeon device. 1217 * @param oct octeon device 1218 */ 1219 static int octeon_pci_os_setup(struct octeon_device *oct) 1220 { 1221 #ifdef CONFIG_PCI_IOV 1222 /* setup PCI stuff first */ 1223 if (!oct->pci_dev->physfn) 1224 octeon_pci_flr(oct); 1225 #endif 1226 1227 if (pci_enable_device(oct->pci_dev)) { 1228 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1229 return 1; 1230 } 1231 1232 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1233 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1234 pci_disable_device(oct->pci_dev); 1235 return 1; 1236 } 1237 1238 /* Enable PCI DMA Master. */ 1239 pci_set_master(oct->pci_dev); 1240 1241 return 0; 1242 } 1243 1244 static int skb_iq(struct lio *lio, struct sk_buff *skb) 1245 { 1246 int q = 0; 1247 1248 if (netif_is_multiqueue(lio->netdev)) 1249 q = skb->queue_mapping % lio->linfo.num_txpciq; 1250 1251 return q; 1252 } 1253 1254 /** 1255 * \brief Check Tx queue state for a given network buffer 1256 * @param lio per-network private data 1257 * @param skb network buffer 1258 */ 1259 static int check_txq_state(struct lio *lio, struct sk_buff *skb) 1260 { 1261 int q = 0, iq = 0; 1262 1263 if (netif_is_multiqueue(lio->netdev)) { 1264 q = skb->queue_mapping; 1265 iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; 1266 } else { 1267 iq = lio->txq; 1268 q = iq; 1269 } 1270 1271 if (octnet_iq_is_full(lio->oct_dev, iq)) 1272 return 0; 1273 1274 if (__netif_subqueue_stopped(lio->netdev, q)) { 1275 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); 1276 wake_q(lio->netdev, q); 1277 } 1278 1279 return 1; 1280 } 1281 1282 /** 1283 * \brief Unmap and free network buffer 1284 * @param buf buffer 1285 */ 1286 static void free_netbuf(void *buf) 1287 { 1288 struct octnet_buf_free_info *finfo; 1289 struct sk_buff *skb; 1290 struct lio *lio; 1291 1292 finfo = (struct octnet_buf_free_info *)buf; 1293 skb = finfo->skb; 1294 lio = finfo->lio; 1295 1296 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1297 DMA_TO_DEVICE); 1298 1299 check_txq_state(lio, skb); 1300 1301 tx_buffer_free(skb); 1302 } 1303 1304 /** 1305 * \brief Unmap and free gather buffer 1306 * @param buf buffer 1307 */ 1308 static void free_netsgbuf(void *buf) 1309 { 1310 struct octnet_buf_free_info *finfo; 1311 struct octnic_gather *g; 1312 struct sk_buff *skb; 1313 int i, frags, iq; 1314 struct lio *lio; 1315 1316 finfo = (struct octnet_buf_free_info *)buf; 1317 skb = finfo->skb; 1318 lio = finfo->lio; 1319 g = finfo->g; 1320 frags = skb_shinfo(skb)->nr_frags; 1321 1322 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1323 g->sg[0].ptr[0], (skb->len - skb->data_len), 1324 DMA_TO_DEVICE); 1325 1326 i = 1; 1327 while (frags--) { 1328 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1329 1330 pci_unmap_page((lio->oct_dev)->pci_dev, 1331 g->sg[(i >> 2)].ptr[(i & 3)], 1332 frag->size, DMA_TO_DEVICE); 1333 i++; 1334 } 1335 1336 iq = skb_iq(lio, skb); 1337 1338 spin_lock(&lio->glist_lock[iq]); 1339 list_add_tail(&g->list, &lio->glist[iq]); 1340 spin_unlock(&lio->glist_lock[iq]); 1341 1342 check_txq_state(lio, skb); /* mq support: sub-queue state check */ 1343 1344 tx_buffer_free(skb); 1345 } 1346 1347 /** 1348 * \brief Unmap and free gather buffer with response 1349 * @param buf buffer 1350 */ 1351 static void free_netsgbuf_with_resp(void *buf) 1352 { 1353 struct octnet_buf_free_info *finfo; 1354 struct octeon_soft_command *sc; 1355 struct octnic_gather *g; 1356 struct sk_buff *skb; 1357 int i, frags, iq; 1358 struct lio *lio; 1359 1360 sc = (struct octeon_soft_command *)buf; 1361 skb = (struct sk_buff *)sc->callback_arg; 1362 finfo = (struct octnet_buf_free_info *)&skb->cb; 1363 1364 lio = finfo->lio; 1365 g = finfo->g; 1366 frags = skb_shinfo(skb)->nr_frags; 1367 1368 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1369 g->sg[0].ptr[0], (skb->len - skb->data_len), 1370 DMA_TO_DEVICE); 1371 1372 i = 1; 1373 while (frags--) { 1374 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1375 1376 pci_unmap_page((lio->oct_dev)->pci_dev, 1377 g->sg[(i >> 2)].ptr[(i & 3)], 1378 frag->size, DMA_TO_DEVICE); 1379 i++; 1380 } 1381 1382 iq = skb_iq(lio, skb); 1383 1384 spin_lock(&lio->glist_lock[iq]); 1385 list_add_tail(&g->list, &lio->glist[iq]); 1386 spin_unlock(&lio->glist_lock[iq]); 1387 1388 /* Don't free the skb yet */ 1389 1390 check_txq_state(lio, skb); 1391 } 1392 1393 /** 1394 * \brief Setup output queue 1395 * @param oct octeon device 1396 * @param q_no which queue 1397 * @param num_descs how many descriptors 1398 * @param desc_size size of each descriptor 1399 * @param app_ctx application context 1400 */ 1401 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, 1402 int desc_size, void *app_ctx) 1403 { 1404 int ret_val; 1405 1406 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); 1407 /* droq creation and local register settings. */ 1408 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); 1409 if (ret_val < 0) 1410 return ret_val; 1411 1412 if (ret_val == 1) { 1413 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); 1414 return 0; 1415 } 1416 1417 /* Enable the droq queues */ 1418 octeon_set_droq_pkt_op(oct, q_no, 1); 1419 1420 /* Send Credit for Octeon Output queues. Credits are always 1421 * sent after the output queue is enabled. 1422 */ 1423 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); 1424 1425 return ret_val; 1426 } 1427 1428 /** 1429 * \brief Callback for getting interface configuration 1430 * @param status status of request 1431 * @param buf pointer to resp structure 1432 */ 1433 static void if_cfg_callback(struct octeon_device *oct, 1434 u32 status __attribute__((unused)), void *buf) 1435 { 1436 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 1437 struct liquidio_if_cfg_context *ctx; 1438 struct liquidio_if_cfg_resp *resp; 1439 1440 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1441 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 1442 1443 oct = lio_get_device(ctx->octeon_id); 1444 if (resp->status) 1445 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", 1446 CVM_CAST64(resp->status)); 1447 WRITE_ONCE(ctx->cond, 1); 1448 1449 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", 1450 resp->cfg_info.liquidio_firmware_version); 1451 1452 /* This barrier is required to be sure that the response has been 1453 * written fully before waking up the handler 1454 */ 1455 wmb(); 1456 1457 wake_up_interruptible(&ctx->wc); 1458 } 1459 1460 /** Routine to push packets arriving on Octeon interface upto network layer. 1461 * @param oct_id - octeon device id. 1462 * @param skbuff - skbuff struct to be passed to network layer. 1463 * @param len - size of total data received. 1464 * @param rh - Control header associated with the packet 1465 * @param param - additional control data with the packet 1466 * @param arg - farg registered in droq_ops 1467 */ 1468 static void 1469 liquidio_push_packet(u32 octeon_id __attribute__((unused)), 1470 void *skbuff, 1471 u32 len, 1472 union octeon_rh *rh, 1473 void *param, 1474 void *arg) 1475 { 1476 struct napi_struct *napi = param; 1477 struct octeon_droq *droq = 1478 container_of(param, struct octeon_droq, napi); 1479 struct net_device *netdev = (struct net_device *)arg; 1480 struct sk_buff *skb = (struct sk_buff *)skbuff; 1481 u16 vtag = 0; 1482 u32 r_dh_off; 1483 1484 if (netdev) { 1485 struct lio *lio = GET_LIO(netdev); 1486 int packet_was_received; 1487 1488 /* Do not proceed if the interface is not in RUNNING state. */ 1489 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { 1490 recv_buffer_free(skb); 1491 droq->stats.rx_dropped++; 1492 return; 1493 } 1494 1495 skb->dev = netdev; 1496 1497 skb_record_rx_queue(skb, droq->q_no); 1498 if (likely(len > MIN_SKB_SIZE)) { 1499 struct octeon_skb_page_info *pg_info; 1500 unsigned char *va; 1501 1502 pg_info = ((struct octeon_skb_page_info *)(skb->cb)); 1503 if (pg_info->page) { 1504 /* For Paged allocation use the frags */ 1505 va = page_address(pg_info->page) + 1506 pg_info->page_offset; 1507 memcpy(skb->data, va, MIN_SKB_SIZE); 1508 skb_put(skb, MIN_SKB_SIZE); 1509 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 1510 pg_info->page, 1511 pg_info->page_offset + 1512 MIN_SKB_SIZE, 1513 len - MIN_SKB_SIZE, 1514 LIO_RXBUFFER_SZ); 1515 } 1516 } else { 1517 struct octeon_skb_page_info *pg_info = 1518 ((struct octeon_skb_page_info *)(skb->cb)); 1519 skb_copy_to_linear_data(skb, 1520 page_address(pg_info->page) + 1521 pg_info->page_offset, len); 1522 skb_put(skb, len); 1523 put_page(pg_info->page); 1524 } 1525 1526 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; 1527 1528 if (rh->r_dh.has_hwtstamp) 1529 r_dh_off -= BYTES_PER_DHLEN_UNIT; 1530 1531 if (rh->r_dh.has_hash) { 1532 __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); 1533 u32 hash = be32_to_cpu(*hash_be); 1534 1535 skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); 1536 r_dh_off -= BYTES_PER_DHLEN_UNIT; 1537 } 1538 1539 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); 1540 skb->protocol = eth_type_trans(skb, skb->dev); 1541 1542 if ((netdev->features & NETIF_F_RXCSUM) && 1543 (((rh->r_dh.encap_on) && 1544 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || 1545 (!(rh->r_dh.encap_on) && 1546 (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) 1547 /* checksum has already been verified */ 1548 skb->ip_summed = CHECKSUM_UNNECESSARY; 1549 else 1550 skb->ip_summed = CHECKSUM_NONE; 1551 1552 /* Setting Encapsulation field on basis of status received 1553 * from the firmware 1554 */ 1555 if (rh->r_dh.encap_on) { 1556 skb->encapsulation = 1; 1557 skb->csum_level = 1; 1558 droq->stats.rx_vxlan++; 1559 } 1560 1561 /* inbound VLAN tag */ 1562 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1563 rh->r_dh.vlan) { 1564 u16 priority = rh->r_dh.priority; 1565 u16 vid = rh->r_dh.vlan; 1566 1567 vtag = (priority << VLAN_PRIO_SHIFT) | vid; 1568 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); 1569 } 1570 1571 packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP); 1572 1573 if (packet_was_received) { 1574 droq->stats.rx_bytes_received += len; 1575 droq->stats.rx_pkts_received++; 1576 } else { 1577 droq->stats.rx_dropped++; 1578 netif_info(lio, rx_err, lio->netdev, 1579 "droq:%d error rx_dropped:%llu\n", 1580 droq->q_no, droq->stats.rx_dropped); 1581 } 1582 1583 } else { 1584 recv_buffer_free(skb); 1585 } 1586 } 1587 1588 /** 1589 * \brief callback when receive interrupt occurs and we are in NAPI mode 1590 * @param arg pointer to octeon output queue 1591 */ 1592 static void liquidio_vf_napi_drv_callback(void *arg) 1593 { 1594 struct octeon_droq *droq = arg; 1595 1596 napi_schedule_irqoff(&droq->napi); 1597 } 1598 1599 /** 1600 * \brief Entry point for NAPI polling 1601 * @param napi NAPI structure 1602 * @param budget maximum number of items to process 1603 */ 1604 static int liquidio_napi_poll(struct napi_struct *napi, int budget) 1605 { 1606 struct octeon_instr_queue *iq; 1607 struct octeon_device *oct; 1608 struct octeon_droq *droq; 1609 int tx_done = 0, iq_no; 1610 int work_done; 1611 1612 droq = container_of(napi, struct octeon_droq, napi); 1613 oct = droq->oct_dev; 1614 iq_no = droq->q_no; 1615 1616 /* Handle Droq descriptors */ 1617 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, 1618 POLL_EVENT_PROCESS_PKTS, 1619 budget); 1620 1621 /* Flush the instruction queue */ 1622 iq = oct->instr_queue[iq_no]; 1623 if (iq) { 1624 if (atomic_read(&iq->instr_pending)) 1625 /* Process iq buffers with in the budget limits */ 1626 tx_done = octeon_flush_iq(oct, iq, budget); 1627 else 1628 tx_done = 1; 1629 1630 /* Update iq read-index rather than waiting for next interrupt. 1631 * Return back if tx_done is false. 1632 */ 1633 update_txq_status(oct, iq_no); 1634 } else { 1635 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", 1636 __func__, iq_no); 1637 } 1638 1639 /* force enable interrupt if reg cnts are high to avoid wraparound */ 1640 if ((work_done < budget && tx_done) || 1641 (iq && iq->pkt_in_done >= MAX_REG_CNT) || 1642 (droq->pkt_count >= MAX_REG_CNT)) { 1643 tx_done = 1; 1644 napi_complete_done(napi, work_done); 1645 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, 1646 POLL_EVENT_ENABLE_INTR, 0); 1647 return 0; 1648 } 1649 1650 return (!tx_done) ? (budget) : (work_done); 1651 } 1652 1653 /** 1654 * \brief Setup input and output queues 1655 * @param octeon_dev octeon device 1656 * @param ifidx Interface index 1657 * 1658 * Note: Queues are with respect to the octeon device. Thus 1659 * an input queue is for egress packets, and output queues 1660 * are for ingress packets. 1661 */ 1662 static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx) 1663 { 1664 struct octeon_droq_ops droq_ops; 1665 struct net_device *netdev; 1666 static int cpu_id_modulus; 1667 struct octeon_droq *droq; 1668 struct napi_struct *napi; 1669 static int cpu_id; 1670 int num_tx_descs; 1671 struct lio *lio; 1672 int retval = 0; 1673 int q, q_no; 1674 1675 netdev = octeon_dev->props[ifidx].netdev; 1676 1677 lio = GET_LIO(netdev); 1678 1679 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); 1680 1681 droq_ops.fptr = liquidio_push_packet; 1682 droq_ops.farg = netdev; 1683 1684 droq_ops.poll_mode = 1; 1685 droq_ops.napi_fn = liquidio_vf_napi_drv_callback; 1686 cpu_id = 0; 1687 cpu_id_modulus = num_present_cpus(); 1688 1689 /* set up DROQs. */ 1690 for (q = 0; q < lio->linfo.num_rxpciq; q++) { 1691 q_no = lio->linfo.rxpciq[q].s.q_no; 1692 1693 retval = octeon_setup_droq( 1694 octeon_dev, q_no, 1695 CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev), 1696 lio->ifidx), 1697 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev), 1698 lio->ifidx), 1699 NULL); 1700 if (retval) { 1701 dev_err(&octeon_dev->pci_dev->dev, 1702 "%s : Runtime DROQ(RxQ) creation failed.\n", 1703 __func__); 1704 return 1; 1705 } 1706 1707 droq = octeon_dev->droq[q_no]; 1708 napi = &droq->napi; 1709 netif_napi_add(netdev, napi, liquidio_napi_poll, 64); 1710 1711 /* designate a CPU for this droq */ 1712 droq->cpu_id = cpu_id; 1713 cpu_id++; 1714 if (cpu_id >= cpu_id_modulus) 1715 cpu_id = 0; 1716 1717 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); 1718 } 1719 1720 /* 23XX VF can send/recv control messages (via the first VF-owned 1721 * droq) from the firmware even if the ethX interface is down, 1722 * so that's why poll_mode must be off for the first droq. 1723 */ 1724 octeon_dev->droq[0]->ops.poll_mode = 0; 1725 1726 /* set up IQs. */ 1727 for (q = 0; q < lio->linfo.num_txpciq; q++) { 1728 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( 1729 octeon_get_conf(octeon_dev), lio->ifidx); 1730 retval = octeon_setup_iq(octeon_dev, ifidx, q, 1731 lio->linfo.txpciq[q], num_tx_descs, 1732 netdev_get_tx_queue(netdev, q)); 1733 if (retval) { 1734 dev_err(&octeon_dev->pci_dev->dev, 1735 " %s : Runtime IQ(TxQ) creation failed.\n", 1736 __func__); 1737 return 1; 1738 } 1739 } 1740 1741 return 0; 1742 } 1743 1744 /** 1745 * \brief Net device open for LiquidIO 1746 * @param netdev network device 1747 */ 1748 static int liquidio_open(struct net_device *netdev) 1749 { 1750 struct lio *lio = GET_LIO(netdev); 1751 struct octeon_device *oct = lio->oct_dev; 1752 struct napi_struct *napi, *n; 1753 1754 if (!oct->props[lio->ifidx].napi_enabled) { 1755 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1756 napi_enable(napi); 1757 1758 oct->props[lio->ifidx].napi_enabled = 1; 1759 1760 oct->droq[0]->ops.poll_mode = 1; 1761 } 1762 1763 ifstate_set(lio, LIO_IFSTATE_RUNNING); 1764 1765 /* Ready for link status updates */ 1766 lio->intf_open = 1; 1767 1768 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 1769 start_txq(netdev); 1770 1771 /* tell Octeon to start forwarding packets to host */ 1772 send_rx_ctrl_cmd(lio, 1); 1773 1774 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); 1775 1776 return 0; 1777 } 1778 1779 /** 1780 * \brief Net device stop for LiquidIO 1781 * @param netdev network device 1782 */ 1783 static int liquidio_stop(struct net_device *netdev) 1784 { 1785 struct lio *lio = GET_LIO(netdev); 1786 struct octeon_device *oct = lio->oct_dev; 1787 1788 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 1789 /* Inform that netif carrier is down */ 1790 lio->intf_open = 0; 1791 lio->linfo.link.s.link_up = 0; 1792 1793 netif_carrier_off(netdev); 1794 lio->link_changes++; 1795 1796 /* tell Octeon to stop forwarding packets to host */ 1797 send_rx_ctrl_cmd(lio, 0); 1798 1799 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 1800 1801 txqs_stop(netdev); 1802 1803 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 1804 1805 return 0; 1806 } 1807 1808 /** 1809 * \brief Converts a mask based on net device flags 1810 * @param netdev network device 1811 * 1812 * This routine generates a octnet_ifflags mask from the net device flags 1813 * received from the OS. 1814 */ 1815 static enum octnet_ifflags get_new_flags(struct net_device *netdev) 1816 { 1817 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1818 1819 if (netdev->flags & IFF_PROMISC) 1820 f |= OCTNET_IFFLAG_PROMISC; 1821 1822 if (netdev->flags & IFF_ALLMULTI) 1823 f |= OCTNET_IFFLAG_ALLMULTI; 1824 1825 if (netdev->flags & IFF_MULTICAST) { 1826 f |= OCTNET_IFFLAG_MULTICAST; 1827 1828 /* Accept all multicast addresses if there are more than we 1829 * can handle 1830 */ 1831 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1832 f |= OCTNET_IFFLAG_ALLMULTI; 1833 } 1834 1835 if (netdev->flags & IFF_BROADCAST) 1836 f |= OCTNET_IFFLAG_BROADCAST; 1837 1838 return f; 1839 } 1840 1841 static void liquidio_set_uc_list(struct net_device *netdev) 1842 { 1843 struct lio *lio = GET_LIO(netdev); 1844 struct octeon_device *oct = lio->oct_dev; 1845 struct octnic_ctrl_pkt nctrl; 1846 struct netdev_hw_addr *ha; 1847 u64 *mac; 1848 1849 if (lio->netdev_uc_count == netdev_uc_count(netdev)) 1850 return; 1851 1852 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { 1853 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n"); 1854 return; 1855 } 1856 1857 lio->netdev_uc_count = netdev_uc_count(netdev); 1858 1859 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1860 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; 1861 nctrl.ncmd.s.more = lio->netdev_uc_count; 1862 nctrl.ncmd.s.param1 = oct->vf_num; 1863 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1864 nctrl.netpndev = (u64)netdev; 1865 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1866 1867 /* copy all the addresses into the udd */ 1868 mac = &nctrl.udd[0]; 1869 netdev_for_each_uc_addr(ha, netdev) { 1870 ether_addr_copy(((u8 *)mac) + 2, ha->addr); 1871 mac++; 1872 } 1873 1874 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1875 } 1876 1877 /** 1878 * \brief Net device set_multicast_list 1879 * @param netdev network device 1880 */ 1881 static void liquidio_set_mcast_list(struct net_device *netdev) 1882 { 1883 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1884 struct lio *lio = GET_LIO(netdev); 1885 struct octeon_device *oct = lio->oct_dev; 1886 struct octnic_ctrl_pkt nctrl; 1887 struct netdev_hw_addr *ha; 1888 u64 *mc; 1889 int ret; 1890 1891 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1892 1893 /* Create a ctrl pkt command to be sent to core app. */ 1894 nctrl.ncmd.u64 = 0; 1895 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1896 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1897 nctrl.ncmd.s.param2 = mc_count; 1898 nctrl.ncmd.s.more = mc_count; 1899 nctrl.netpndev = (u64)netdev; 1900 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1901 1902 /* copy all the addresses into the udd */ 1903 mc = &nctrl.udd[0]; 1904 netdev_for_each_mc_addr(ha, netdev) { 1905 *mc = 0; 1906 ether_addr_copy(((u8 *)mc) + 2, ha->addr); 1907 /* no need to swap bytes */ 1908 if (++mc > &nctrl.udd[mc_count]) 1909 break; 1910 } 1911 1912 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1913 1914 /* Apparently, any activity in this call from the kernel has to 1915 * be atomic. So we won't wait for response. 1916 */ 1917 nctrl.wait_time = 0; 1918 1919 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1920 if (ret < 0) { 1921 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1922 ret); 1923 } 1924 1925 liquidio_set_uc_list(netdev); 1926 } 1927 1928 /** 1929 * \brief Net device set_mac_address 1930 * @param netdev network device 1931 */ 1932 static int liquidio_set_mac(struct net_device *netdev, void *p) 1933 { 1934 struct sockaddr *addr = (struct sockaddr *)p; 1935 struct lio *lio = GET_LIO(netdev); 1936 struct octeon_device *oct = lio->oct_dev; 1937 struct octnic_ctrl_pkt nctrl; 1938 int ret = 0; 1939 1940 if (!is_valid_ether_addr(addr->sa_data)) 1941 return -EADDRNOTAVAIL; 1942 1943 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 1944 return 0; 1945 1946 if (lio->linfo.macaddr_is_admin_asgnd) 1947 return -EPERM; 1948 1949 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1950 1951 nctrl.ncmd.u64 = 0; 1952 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 1953 nctrl.ncmd.s.param1 = 0; 1954 nctrl.ncmd.s.more = 1; 1955 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1956 nctrl.netpndev = (u64)netdev; 1957 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1958 nctrl.wait_time = 100; 1959 1960 nctrl.udd[0] = 0; 1961 /* The MAC Address is presented in network byte order. */ 1962 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data); 1963 1964 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1965 if (ret < 0) { 1966 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 1967 return -ENOMEM; 1968 } 1969 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1970 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); 1971 1972 return 0; 1973 } 1974 1975 /** 1976 * \brief Net device get_stats 1977 * @param netdev network device 1978 */ 1979 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) 1980 { 1981 struct lio *lio = GET_LIO(netdev); 1982 struct net_device_stats *stats = &netdev->stats; 1983 u64 pkts = 0, drop = 0, bytes = 0; 1984 struct oct_droq_stats *oq_stats; 1985 struct oct_iq_stats *iq_stats; 1986 struct octeon_device *oct; 1987 int i, iq_no, oq_no; 1988 1989 oct = lio->oct_dev; 1990 1991 for (i = 0; i < lio->linfo.num_txpciq; i++) { 1992 iq_no = lio->linfo.txpciq[i].s.q_no; 1993 iq_stats = &oct->instr_queue[iq_no]->stats; 1994 pkts += iq_stats->tx_done; 1995 drop += iq_stats->tx_dropped; 1996 bytes += iq_stats->tx_tot_bytes; 1997 } 1998 1999 stats->tx_packets = pkts; 2000 stats->tx_bytes = bytes; 2001 stats->tx_dropped = drop; 2002 2003 pkts = 0; 2004 drop = 0; 2005 bytes = 0; 2006 2007 for (i = 0; i < lio->linfo.num_rxpciq; i++) { 2008 oq_no = lio->linfo.rxpciq[i].s.q_no; 2009 oq_stats = &oct->droq[oq_no]->stats; 2010 pkts += oq_stats->rx_pkts_received; 2011 drop += (oq_stats->rx_dropped + 2012 oq_stats->dropped_nodispatch + 2013 oq_stats->dropped_toomany + 2014 oq_stats->dropped_nomem); 2015 bytes += oq_stats->rx_bytes_received; 2016 } 2017 2018 stats->rx_bytes = bytes; 2019 stats->rx_packets = pkts; 2020 stats->rx_dropped = drop; 2021 2022 return stats; 2023 } 2024 2025 /** 2026 * \brief Net device change_mtu 2027 * @param netdev network device 2028 */ 2029 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) 2030 { 2031 struct lio *lio = GET_LIO(netdev); 2032 struct octeon_device *oct = lio->oct_dev; 2033 2034 lio->mtu = new_mtu; 2035 2036 netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n", 2037 netdev->mtu, new_mtu); 2038 dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", 2039 netdev->name, netdev->mtu, new_mtu); 2040 2041 netdev->mtu = new_mtu; 2042 2043 return 0; 2044 } 2045 2046 /** 2047 * \brief Handler for SIOCSHWTSTAMP ioctl 2048 * @param netdev network device 2049 * @param ifr interface request 2050 * @param cmd command 2051 */ 2052 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 2053 { 2054 struct lio *lio = GET_LIO(netdev); 2055 struct hwtstamp_config conf; 2056 2057 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 2058 return -EFAULT; 2059 2060 if (conf.flags) 2061 return -EINVAL; 2062 2063 switch (conf.tx_type) { 2064 case HWTSTAMP_TX_ON: 2065 case HWTSTAMP_TX_OFF: 2066 break; 2067 default: 2068 return -ERANGE; 2069 } 2070 2071 switch (conf.rx_filter) { 2072 case HWTSTAMP_FILTER_NONE: 2073 break; 2074 case HWTSTAMP_FILTER_ALL: 2075 case HWTSTAMP_FILTER_SOME: 2076 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2077 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2078 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2079 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2080 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2081 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2082 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2083 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2084 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2085 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2086 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2087 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2088 case HWTSTAMP_FILTER_NTP_ALL: 2089 conf.rx_filter = HWTSTAMP_FILTER_ALL; 2090 break; 2091 default: 2092 return -ERANGE; 2093 } 2094 2095 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 2096 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2097 2098 else 2099 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2100 2101 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 2102 } 2103 2104 /** 2105 * \brief ioctl handler 2106 * @param netdev network device 2107 * @param ifr interface request 2108 * @param cmd command 2109 */ 2110 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2111 { 2112 switch (cmd) { 2113 case SIOCSHWTSTAMP: 2114 return hwtstamp_ioctl(netdev, ifr); 2115 default: 2116 return -EOPNOTSUPP; 2117 } 2118 } 2119 2120 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) 2121 { 2122 struct sk_buff *skb = (struct sk_buff *)buf; 2123 struct octnet_buf_free_info *finfo; 2124 struct oct_timestamp_resp *resp; 2125 struct octeon_soft_command *sc; 2126 struct lio *lio; 2127 2128 finfo = (struct octnet_buf_free_info *)skb->cb; 2129 lio = finfo->lio; 2130 sc = finfo->sc; 2131 oct = lio->oct_dev; 2132 resp = (struct oct_timestamp_resp *)sc->virtrptr; 2133 2134 if (status != OCTEON_REQUEST_DONE) { 2135 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 2136 CVM_CAST64(status)); 2137 resp->timestamp = 0; 2138 } 2139 2140 octeon_swap_8B_data(&resp->timestamp, 1); 2141 2142 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2143 struct skb_shared_hwtstamps ts; 2144 u64 ns = resp->timestamp; 2145 2146 netif_info(lio, tx_done, lio->netdev, 2147 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 2148 skb, (unsigned long long)ns); 2149 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 2150 skb_tstamp_tx(skb, &ts); 2151 } 2152 2153 octeon_free_soft_command(oct, sc); 2154 tx_buffer_free(skb); 2155 } 2156 2157 /* \brief Send a data packet that will be timestamped 2158 * @param oct octeon device 2159 * @param ndata pointer to network data 2160 * @param finfo pointer to private network data 2161 */ 2162 static int send_nic_timestamp_pkt(struct octeon_device *oct, 2163 struct octnic_data_pkt *ndata, 2164 struct octnet_buf_free_info *finfo) 2165 { 2166 struct octeon_soft_command *sc; 2167 int ring_doorbell; 2168 struct lio *lio; 2169 int retval; 2170 u32 len; 2171 2172 lio = finfo->lio; 2173 2174 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 2175 sizeof(struct oct_timestamp_resp)); 2176 finfo->sc = sc; 2177 2178 if (!sc) { 2179 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 2180 return IQ_SEND_FAILED; 2181 } 2182 2183 if (ndata->reqtype == REQTYPE_NORESP_NET) 2184 ndata->reqtype = REQTYPE_RESP_NET; 2185 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 2186 ndata->reqtype = REQTYPE_RESP_NET_SG; 2187 2188 sc->callback = handle_timestamp; 2189 sc->callback_arg = finfo->skb; 2190 sc->iq_no = ndata->q_no; 2191 2192 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; 2193 2194 ring_doorbell = 1; 2195 2196 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2197 sc, len, ndata->reqtype); 2198 2199 if (retval == IQ_SEND_FAILED) { 2200 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2201 retval); 2202 octeon_free_soft_command(oct, sc); 2203 } else { 2204 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 2205 } 2206 2207 return retval; 2208 } 2209 2210 /** \brief Transmit networks packets to the Octeon interface 2211 * @param skbuff skbuff struct to be passed to network layer. 2212 * @param netdev pointer to network device 2213 * @returns whether the packet was transmitted to the device okay or not 2214 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 2215 */ 2216 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 2217 { 2218 struct octnet_buf_free_info *finfo; 2219 union octnic_cmd_setup cmdsetup; 2220 struct octnic_data_pkt ndata; 2221 struct octeon_instr_irh *irh; 2222 struct oct_iq_stats *stats; 2223 struct octeon_device *oct; 2224 int q_idx = 0, iq_no = 0; 2225 union tx_info *tx_info; 2226 struct lio *lio; 2227 int status = 0; 2228 u64 dptr = 0; 2229 u32 tag = 0; 2230 int j; 2231 2232 lio = GET_LIO(netdev); 2233 oct = lio->oct_dev; 2234 2235 if (netif_is_multiqueue(netdev)) { 2236 q_idx = skb->queue_mapping; 2237 q_idx = (q_idx % (lio->linfo.num_txpciq)); 2238 tag = q_idx; 2239 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 2240 } else { 2241 iq_no = lio->txq; 2242 } 2243 2244 stats = &oct->instr_queue[iq_no]->stats; 2245 2246 /* Check for all conditions in which the current packet cannot be 2247 * transmitted. 2248 */ 2249 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2250 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { 2251 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", 2252 lio->linfo.link.s.link_up); 2253 goto lio_xmit_failed; 2254 } 2255 2256 /* Use space in skb->cb to store info used to unmap and 2257 * free the buffers. 2258 */ 2259 finfo = (struct octnet_buf_free_info *)skb->cb; 2260 finfo->lio = lio; 2261 finfo->skb = skb; 2262 finfo->sc = NULL; 2263 2264 /* Prepare the attributes for the data to be passed to OSI. */ 2265 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 2266 2267 ndata.buf = finfo; 2268 2269 ndata.q_no = iq_no; 2270 2271 if (netif_is_multiqueue(netdev)) { 2272 if (octnet_iq_is_full(oct, ndata.q_no)) { 2273 /* defer sending if queue is full */ 2274 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2275 ndata.q_no); 2276 stats->tx_iq_busy++; 2277 return NETDEV_TX_BUSY; 2278 } 2279 } else { 2280 if (octnet_iq_is_full(oct, lio->txq)) { 2281 /* defer sending if queue is full */ 2282 stats->tx_iq_busy++; 2283 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2284 ndata.q_no); 2285 return NETDEV_TX_BUSY; 2286 } 2287 } 2288 2289 ndata.datasize = skb->len; 2290 2291 cmdsetup.u64 = 0; 2292 cmdsetup.s.iq_no = iq_no; 2293 2294 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2295 if (skb->encapsulation) { 2296 cmdsetup.s.tnl_csum = 1; 2297 stats->tx_vxlan++; 2298 } else { 2299 cmdsetup.s.transport_csum = 1; 2300 } 2301 } 2302 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2303 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2304 cmdsetup.s.timestamp = 1; 2305 } 2306 2307 if (!skb_shinfo(skb)->nr_frags) { 2308 cmdsetup.s.u.datasize = skb->len; 2309 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2310 /* Offload checksum calculation for TCP/UDP packets */ 2311 dptr = dma_map_single(&oct->pci_dev->dev, 2312 skb->data, 2313 skb->len, 2314 DMA_TO_DEVICE); 2315 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 2316 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2317 __func__); 2318 return NETDEV_TX_BUSY; 2319 } 2320 2321 ndata.cmd.cmd3.dptr = dptr; 2322 finfo->dptr = dptr; 2323 ndata.reqtype = REQTYPE_NORESP_NET; 2324 2325 } else { 2326 struct skb_frag_struct *frag; 2327 struct octnic_gather *g; 2328 int i, frags; 2329 2330 spin_lock(&lio->glist_lock[q_idx]); 2331 g = (struct octnic_gather *)list_delete_head( 2332 &lio->glist[q_idx]); 2333 spin_unlock(&lio->glist_lock[q_idx]); 2334 2335 if (!g) { 2336 netif_info(lio, tx_err, lio->netdev, 2337 "Transmit scatter gather: glist null!\n"); 2338 goto lio_xmit_failed; 2339 } 2340 2341 cmdsetup.s.gather = 1; 2342 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2343 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2344 2345 memset(g->sg, 0, g->sg_size); 2346 2347 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 2348 skb->data, 2349 (skb->len - skb->data_len), 2350 DMA_TO_DEVICE); 2351 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 2352 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 2353 __func__); 2354 return NETDEV_TX_BUSY; 2355 } 2356 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 2357 2358 frags = skb_shinfo(skb)->nr_frags; 2359 i = 1; 2360 while (frags--) { 2361 frag = &skb_shinfo(skb)->frags[i - 1]; 2362 2363 g->sg[(i >> 2)].ptr[(i & 3)] = 2364 dma_map_page(&oct->pci_dev->dev, 2365 frag->page.p, 2366 frag->page_offset, 2367 frag->size, 2368 DMA_TO_DEVICE); 2369 if (dma_mapping_error(&oct->pci_dev->dev, 2370 g->sg[i >> 2].ptr[i & 3])) { 2371 dma_unmap_single(&oct->pci_dev->dev, 2372 g->sg[0].ptr[0], 2373 skb->len - skb->data_len, 2374 DMA_TO_DEVICE); 2375 for (j = 1; j < i; j++) { 2376 frag = &skb_shinfo(skb)->frags[j - 1]; 2377 dma_unmap_page(&oct->pci_dev->dev, 2378 g->sg[j >> 2].ptr[j & 3], 2379 frag->size, 2380 DMA_TO_DEVICE); 2381 } 2382 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 2383 __func__); 2384 return NETDEV_TX_BUSY; 2385 } 2386 2387 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); 2388 i++; 2389 } 2390 2391 dptr = g->sg_dma_ptr; 2392 2393 ndata.cmd.cmd3.dptr = dptr; 2394 finfo->dptr = dptr; 2395 finfo->g = g; 2396 2397 ndata.reqtype = REQTYPE_NORESP_NET_SG; 2398 } 2399 2400 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 2401 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 2402 2403 if (skb_shinfo(skb)->gso_size) { 2404 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 2405 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 2406 } 2407 2408 /* HW insert VLAN tag */ 2409 if (skb_vlan_tag_present(skb)) { 2410 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; 2411 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; 2412 } 2413 2414 if (unlikely(cmdsetup.s.timestamp)) 2415 status = send_nic_timestamp_pkt(oct, &ndata, finfo); 2416 else 2417 status = octnet_send_nic_data_pkt(oct, &ndata); 2418 if (status == IQ_SEND_FAILED) 2419 goto lio_xmit_failed; 2420 2421 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2422 2423 if (status == IQ_SEND_STOP) { 2424 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", 2425 iq_no); 2426 stop_q(lio->netdev, q_idx); 2427 } 2428 2429 netif_trans_update(netdev); 2430 2431 if (tx_info->s.gso_segs) 2432 stats->tx_done += tx_info->s.gso_segs; 2433 else 2434 stats->tx_done++; 2435 stats->tx_tot_bytes += ndata.datasize; 2436 2437 return NETDEV_TX_OK; 2438 2439 lio_xmit_failed: 2440 stats->tx_dropped++; 2441 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2442 iq_no, stats->tx_dropped); 2443 if (dptr) 2444 dma_unmap_single(&oct->pci_dev->dev, dptr, 2445 ndata.datasize, DMA_TO_DEVICE); 2446 tx_buffer_free(skb); 2447 return NETDEV_TX_OK; 2448 } 2449 2450 /** \brief Network device Tx timeout 2451 * @param netdev pointer to network device 2452 */ 2453 static void liquidio_tx_timeout(struct net_device *netdev) 2454 { 2455 struct lio *lio; 2456 2457 lio = GET_LIO(netdev); 2458 2459 netif_info(lio, tx_err, lio->netdev, 2460 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 2461 netdev->stats.tx_dropped); 2462 netif_trans_update(netdev); 2463 txqs_wake(netdev); 2464 } 2465 2466 static int 2467 liquidio_vlan_rx_add_vid(struct net_device *netdev, 2468 __be16 proto __attribute__((unused)), u16 vid) 2469 { 2470 struct lio *lio = GET_LIO(netdev); 2471 struct octeon_device *oct = lio->oct_dev; 2472 struct octnic_ctrl_pkt nctrl; 2473 struct completion compl; 2474 u16 response_code; 2475 int ret = 0; 2476 2477 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2478 2479 nctrl.ncmd.u64 = 0; 2480 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2481 nctrl.ncmd.s.param1 = vid; 2482 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2483 nctrl.wait_time = 100; 2484 nctrl.netpndev = (u64)netdev; 2485 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2486 init_completion(&compl); 2487 nctrl.completion = &compl; 2488 nctrl.response_code = &response_code; 2489 2490 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2491 if (ret < 0) { 2492 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2493 ret); 2494 return -EIO; 2495 } 2496 2497 if (!wait_for_completion_timeout(&compl, 2498 msecs_to_jiffies(nctrl.wait_time))) 2499 return -EPERM; 2500 2501 if (READ_ONCE(response_code)) 2502 return -EPERM; 2503 2504 return 0; 2505 } 2506 2507 static int 2508 liquidio_vlan_rx_kill_vid(struct net_device *netdev, 2509 __be16 proto __attribute__((unused)), u16 vid) 2510 { 2511 struct lio *lio = GET_LIO(netdev); 2512 struct octeon_device *oct = lio->oct_dev; 2513 struct octnic_ctrl_pkt nctrl; 2514 int ret = 0; 2515 2516 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2517 2518 nctrl.ncmd.u64 = 0; 2519 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2520 nctrl.ncmd.s.param1 = vid; 2521 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2522 nctrl.wait_time = 100; 2523 nctrl.netpndev = (u64)netdev; 2524 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2525 2526 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2527 if (ret < 0) { 2528 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2529 ret); 2530 } 2531 return ret; 2532 } 2533 2534 /** Sending command to enable/disable RX checksum offload 2535 * @param netdev pointer to network device 2536 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 2537 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 2538 * OCTNET_CMD_RXCSUM_DISABLE 2539 * @returns SUCCESS or FAILURE 2540 */ 2541 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 2542 u8 rx_cmd) 2543 { 2544 struct lio *lio = GET_LIO(netdev); 2545 struct octeon_device *oct = lio->oct_dev; 2546 struct octnic_ctrl_pkt nctrl; 2547 int ret = 0; 2548 2549 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2550 2551 nctrl.ncmd.u64 = 0; 2552 nctrl.ncmd.s.cmd = command; 2553 nctrl.ncmd.s.param1 = rx_cmd; 2554 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2555 nctrl.wait_time = 100; 2556 nctrl.netpndev = (u64)netdev; 2557 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2558 2559 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2560 if (ret < 0) { 2561 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", 2562 ret); 2563 } 2564 return ret; 2565 } 2566 2567 /** Sending command to add/delete VxLAN UDP port to firmware 2568 * @param netdev pointer to network device 2569 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 2570 * @param vxlan_port VxLAN port to be added or deleted 2571 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 2572 * OCTNET_CMD_VXLAN_PORT_DEL 2573 * @returns SUCCESS or FAILURE 2574 */ 2575 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 2576 u16 vxlan_port, u8 vxlan_cmd_bit) 2577 { 2578 struct lio *lio = GET_LIO(netdev); 2579 struct octeon_device *oct = lio->oct_dev; 2580 struct octnic_ctrl_pkt nctrl; 2581 int ret = 0; 2582 2583 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2584 2585 nctrl.ncmd.u64 = 0; 2586 nctrl.ncmd.s.cmd = command; 2587 nctrl.ncmd.s.more = vxlan_cmd_bit; 2588 nctrl.ncmd.s.param1 = vxlan_port; 2589 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2590 nctrl.wait_time = 100; 2591 nctrl.netpndev = (u64)netdev; 2592 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2593 2594 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2595 if (ret < 0) { 2596 dev_err(&oct->pci_dev->dev, 2597 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", 2598 ret); 2599 } 2600 return ret; 2601 } 2602 2603 /** \brief Net device fix features 2604 * @param netdev pointer to network device 2605 * @param request features requested 2606 * @returns updated features list 2607 */ 2608 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 2609 netdev_features_t request) 2610 { 2611 struct lio *lio = netdev_priv(netdev); 2612 2613 if ((request & NETIF_F_RXCSUM) && 2614 !(lio->dev_capability & NETIF_F_RXCSUM)) 2615 request &= ~NETIF_F_RXCSUM; 2616 2617 if ((request & NETIF_F_HW_CSUM) && 2618 !(lio->dev_capability & NETIF_F_HW_CSUM)) 2619 request &= ~NETIF_F_HW_CSUM; 2620 2621 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 2622 request &= ~NETIF_F_TSO; 2623 2624 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 2625 request &= ~NETIF_F_TSO6; 2626 2627 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 2628 request &= ~NETIF_F_LRO; 2629 2630 /* Disable LRO if RXCSUM is off */ 2631 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 2632 (lio->dev_capability & NETIF_F_LRO)) 2633 request &= ~NETIF_F_LRO; 2634 2635 return request; 2636 } 2637 2638 /** \brief Net device set features 2639 * @param netdev pointer to network device 2640 * @param features features to enable/disable 2641 */ 2642 static int liquidio_set_features(struct net_device *netdev, 2643 netdev_features_t features) 2644 { 2645 struct lio *lio = netdev_priv(netdev); 2646 2647 if (!((netdev->features ^ features) & NETIF_F_LRO)) 2648 return 0; 2649 2650 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 2651 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2652 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2653 else if (!(features & NETIF_F_LRO) && 2654 (lio->dev_capability & NETIF_F_LRO)) 2655 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 2656 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2657 if (!(netdev->features & NETIF_F_RXCSUM) && 2658 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2659 (features & NETIF_F_RXCSUM)) 2660 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2661 OCTNET_CMD_RXCSUM_ENABLE); 2662 else if ((netdev->features & NETIF_F_RXCSUM) && 2663 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2664 !(features & NETIF_F_RXCSUM)) 2665 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2666 OCTNET_CMD_RXCSUM_DISABLE); 2667 2668 return 0; 2669 } 2670 2671 static void liquidio_add_vxlan_port(struct net_device *netdev, 2672 struct udp_tunnel_info *ti) 2673 { 2674 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2675 return; 2676 2677 liquidio_vxlan_port_command(netdev, 2678 OCTNET_CMD_VXLAN_PORT_CONFIG, 2679 htons(ti->port), 2680 OCTNET_CMD_VXLAN_PORT_ADD); 2681 } 2682 2683 static void liquidio_del_vxlan_port(struct net_device *netdev, 2684 struct udp_tunnel_info *ti) 2685 { 2686 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2687 return; 2688 2689 liquidio_vxlan_port_command(netdev, 2690 OCTNET_CMD_VXLAN_PORT_CONFIG, 2691 htons(ti->port), 2692 OCTNET_CMD_VXLAN_PORT_DEL); 2693 } 2694 2695 static const struct net_device_ops lionetdevops = { 2696 .ndo_open = liquidio_open, 2697 .ndo_stop = liquidio_stop, 2698 .ndo_start_xmit = liquidio_xmit, 2699 .ndo_get_stats = liquidio_get_stats, 2700 .ndo_set_mac_address = liquidio_set_mac, 2701 .ndo_set_rx_mode = liquidio_set_mcast_list, 2702 .ndo_tx_timeout = liquidio_tx_timeout, 2703 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 2704 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 2705 .ndo_change_mtu = liquidio_change_mtu, 2706 .ndo_do_ioctl = liquidio_ioctl, 2707 .ndo_fix_features = liquidio_fix_features, 2708 .ndo_set_features = liquidio_set_features, 2709 .ndo_udp_tunnel_add = liquidio_add_vxlan_port, 2710 .ndo_udp_tunnel_del = liquidio_del_vxlan_port, 2711 }; 2712 2713 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 2714 { 2715 struct octeon_device *oct = (struct octeon_device *)buf; 2716 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 2717 union oct_link_status *ls; 2718 int gmxport = 0; 2719 int i; 2720 2721 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 2722 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 2723 recv_pkt->buffer_size[0], 2724 recv_pkt->rh.r_nic_info.gmxport); 2725 goto nic_info_err; 2726 } 2727 2728 gmxport = recv_pkt->rh.r_nic_info.gmxport; 2729 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 2730 OCT_DROQ_INFO_SIZE); 2731 2732 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 2733 2734 for (i = 0; i < oct->ifcount; i++) { 2735 if (oct->props[i].gmxport == gmxport) { 2736 update_link_status(oct->props[i].netdev, ls); 2737 break; 2738 } 2739 } 2740 2741 nic_info_err: 2742 for (i = 0; i < recv_pkt->buffer_count; i++) 2743 recv_buffer_free(recv_pkt->buffer_ptr[i]); 2744 octeon_free_recv_info(recv_info); 2745 return 0; 2746 } 2747 2748 /** 2749 * \brief Setup network interfaces 2750 * @param octeon_dev octeon device 2751 * 2752 * Called during init time for each device. It assumes the NIC 2753 * is already up and running. The link information for each 2754 * interface is passed in link_info. 2755 */ 2756 static int setup_nic_devices(struct octeon_device *octeon_dev) 2757 { 2758 int retval, num_iqueues, num_oqueues; 2759 struct liquidio_if_cfg_context *ctx; 2760 u32 resp_size, ctx_size, data_size; 2761 struct liquidio_if_cfg_resp *resp; 2762 struct octeon_soft_command *sc; 2763 union oct_nic_if_cfg if_cfg; 2764 struct octdev_props *props; 2765 struct net_device *netdev; 2766 struct lio_version *vdata; 2767 struct lio *lio = NULL; 2768 u8 mac[ETH_ALEN], i, j; 2769 u32 ifidx_or_pfnum; 2770 2771 ifidx_or_pfnum = octeon_dev->pf_num; 2772 2773 /* This is to handle link status changes */ 2774 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, 2775 lio_nic_info, octeon_dev); 2776 2777 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 2778 * They are handled directly. 2779 */ 2780 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 2781 free_netbuf); 2782 2783 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 2784 free_netsgbuf); 2785 2786 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 2787 free_netsgbuf_with_resp); 2788 2789 for (i = 0; i < octeon_dev->ifcount; i++) { 2790 resp_size = sizeof(struct liquidio_if_cfg_resp); 2791 ctx_size = sizeof(struct liquidio_if_cfg_context); 2792 data_size = sizeof(struct lio_version); 2793 sc = (struct octeon_soft_command *) 2794 octeon_alloc_soft_command(octeon_dev, data_size, 2795 resp_size, ctx_size); 2796 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 2797 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 2798 vdata = (struct lio_version *)sc->virtdptr; 2799 2800 *((u64 *)vdata) = 0; 2801 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 2802 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 2803 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 2804 2805 WRITE_ONCE(ctx->cond, 0); 2806 ctx->octeon_id = lio_get_device_id(octeon_dev); 2807 init_waitqueue_head(&ctx->wc); 2808 2809 if_cfg.u64 = 0; 2810 2811 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; 2812 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; 2813 if_cfg.s.base_queue = 0; 2814 2815 sc->iq_no = 0; 2816 2817 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 2818 OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 2819 0); 2820 2821 sc->callback = if_cfg_callback; 2822 sc->callback_arg = sc; 2823 sc->wait_time = 5000; 2824 2825 retval = octeon_send_soft_command(octeon_dev, sc); 2826 if (retval == IQ_SEND_FAILED) { 2827 dev_err(&octeon_dev->pci_dev->dev, 2828 "iq/oq config failed status: %x\n", retval); 2829 /* Soft instr is freed by driver in case of failure. */ 2830 goto setup_nic_dev_fail; 2831 } 2832 2833 /* Sleep on a wait queue till the cond flag indicates that the 2834 * response arrived or timed-out. 2835 */ 2836 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { 2837 dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); 2838 goto setup_nic_wait_intr; 2839 } 2840 2841 retval = resp->status; 2842 if (retval) { 2843 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 2844 goto setup_nic_dev_fail; 2845 } 2846 2847 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 2848 (sizeof(struct liquidio_if_cfg_info)) >> 3); 2849 2850 num_iqueues = hweight64(resp->cfg_info.iqmask); 2851 num_oqueues = hweight64(resp->cfg_info.oqmask); 2852 2853 if (!(num_iqueues) || !(num_oqueues)) { 2854 dev_err(&octeon_dev->pci_dev->dev, 2855 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 2856 resp->cfg_info.iqmask, resp->cfg_info.oqmask); 2857 goto setup_nic_dev_fail; 2858 } 2859 dev_dbg(&octeon_dev->pci_dev->dev, 2860 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 2861 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 2862 num_iqueues, num_oqueues); 2863 2864 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 2865 2866 if (!netdev) { 2867 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 2868 goto setup_nic_dev_fail; 2869 } 2870 2871 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 2872 2873 /* Associate the routines that will handle different 2874 * netdev tasks. 2875 */ 2876 netdev->netdev_ops = &lionetdevops; 2877 2878 lio = GET_LIO(netdev); 2879 2880 memset(lio, 0, sizeof(struct lio)); 2881 2882 lio->ifidx = ifidx_or_pfnum; 2883 2884 props = &octeon_dev->props[i]; 2885 props->gmxport = resp->cfg_info.linfo.gmxport; 2886 props->netdev = netdev; 2887 2888 lio->linfo.num_rxpciq = num_oqueues; 2889 lio->linfo.num_txpciq = num_iqueues; 2890 2891 for (j = 0; j < num_oqueues; j++) { 2892 lio->linfo.rxpciq[j].u64 = 2893 resp->cfg_info.linfo.rxpciq[j].u64; 2894 } 2895 for (j = 0; j < num_iqueues; j++) { 2896 lio->linfo.txpciq[j].u64 = 2897 resp->cfg_info.linfo.txpciq[j].u64; 2898 } 2899 2900 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 2901 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 2902 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 2903 lio->linfo.macaddr_is_admin_asgnd = 2904 resp->cfg_info.linfo.macaddr_is_admin_asgnd; 2905 2906 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 2907 2908 lio->dev_capability = NETIF_F_HIGHDMA 2909 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2910 | NETIF_F_SG | NETIF_F_RXCSUM 2911 | NETIF_F_TSO | NETIF_F_TSO6 2912 | NETIF_F_GRO 2913 | NETIF_F_LRO; 2914 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 2915 2916 /* Copy of transmit encapsulation capabilities: 2917 * TSO, TSO6, Checksums for this device 2918 */ 2919 lio->enc_dev_capability = NETIF_F_IP_CSUM 2920 | NETIF_F_IPV6_CSUM 2921 | NETIF_F_GSO_UDP_TUNNEL 2922 | NETIF_F_HW_CSUM | NETIF_F_SG 2923 | NETIF_F_RXCSUM 2924 | NETIF_F_TSO | NETIF_F_TSO6 2925 | NETIF_F_LRO; 2926 2927 netdev->hw_enc_features = 2928 (lio->enc_dev_capability & ~NETIF_F_LRO); 2929 netdev->vlan_features = lio->dev_capability; 2930 /* Add any unchangeable hw features */ 2931 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 2932 NETIF_F_HW_VLAN_CTAG_RX | 2933 NETIF_F_HW_VLAN_CTAG_TX; 2934 2935 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 2936 2937 netdev->hw_features = lio->dev_capability; 2938 2939 /* MTU range: 68 - 16000 */ 2940 netdev->min_mtu = LIO_MIN_MTU_SIZE; 2941 netdev->max_mtu = LIO_MAX_MTU_SIZE; 2942 2943 /* Point to the properties for octeon device to which this 2944 * interface belongs. 2945 */ 2946 lio->oct_dev = octeon_dev; 2947 lio->octprops = props; 2948 lio->netdev = netdev; 2949 2950 dev_dbg(&octeon_dev->pci_dev->dev, 2951 "if%d gmx: %d hw_addr: 0x%llx\n", i, 2952 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 2953 2954 /* 64-bit swap required on LE machines */ 2955 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 2956 for (j = 0; j < ETH_ALEN; j++) 2957 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 2958 2959 /* Copy MAC Address to OS network device structure */ 2960 ether_addr_copy(netdev->dev_addr, mac); 2961 2962 if (setup_io_queues(octeon_dev, i)) { 2963 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 2964 goto setup_nic_dev_fail; 2965 } 2966 2967 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 2968 2969 /* For VFs, enable Octeon device interrupts here, 2970 * as this is contingent upon IO queue setup 2971 */ 2972 octeon_dev->fn_list.enable_interrupt(octeon_dev, 2973 OCTEON_ALL_INTR); 2974 2975 /* By default all interfaces on a single Octeon uses the same 2976 * tx and rx queues 2977 */ 2978 lio->txq = lio->linfo.txpciq[0].s.q_no; 2979 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 2980 2981 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 2982 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 2983 2984 if (setup_glists(lio, num_iqueues)) { 2985 dev_err(&octeon_dev->pci_dev->dev, 2986 "Gather list allocation failed\n"); 2987 goto setup_nic_dev_fail; 2988 } 2989 2990 /* Register ethtool support */ 2991 liquidio_set_ethtool_ops(netdev); 2992 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) 2993 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 2994 else 2995 octeon_dev->priv_flags = 0x0; 2996 2997 if (netdev->features & NETIF_F_LRO) 2998 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2999 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3000 3001 if (setup_link_status_change_wq(netdev)) 3002 goto setup_nic_dev_fail; 3003 3004 if (setup_rx_oom_poll_fn(netdev)) 3005 goto setup_nic_dev_fail; 3006 3007 /* Register the network device with the OS */ 3008 if (register_netdev(netdev)) { 3009 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 3010 goto setup_nic_dev_fail; 3011 } 3012 3013 dev_dbg(&octeon_dev->pci_dev->dev, 3014 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3015 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3016 netif_carrier_off(netdev); 3017 lio->link_changes++; 3018 3019 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3020 3021 /* Sending command to firmware to enable Rx checksum offload 3022 * by default at the time of setup of Liquidio driver for 3023 * this device 3024 */ 3025 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 3026 OCTNET_CMD_RXCSUM_ENABLE); 3027 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 3028 OCTNET_CMD_TXCSUM_ENABLE); 3029 3030 dev_dbg(&octeon_dev->pci_dev->dev, 3031 "NIC ifidx:%d Setup successful\n", i); 3032 3033 octeon_free_soft_command(octeon_dev, sc); 3034 } 3035 3036 return 0; 3037 3038 setup_nic_dev_fail: 3039 3040 octeon_free_soft_command(octeon_dev, sc); 3041 3042 setup_nic_wait_intr: 3043 3044 while (i--) { 3045 dev_err(&octeon_dev->pci_dev->dev, 3046 "NIC ifidx:%d Setup failed\n", i); 3047 liquidio_destroy_nic_device(octeon_dev, i); 3048 } 3049 return -ENODEV; 3050 } 3051 3052 /** 3053 * \brief initialize the NIC 3054 * @param oct octeon device 3055 * 3056 * This initialization routine is called once the Octeon device application is 3057 * up and running 3058 */ 3059 static int liquidio_init_nic_module(struct octeon_device *oct) 3060 { 3061 int num_nic_ports = 1; 3062 int i, retval = 0; 3063 3064 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3065 3066 /* only default iq and oq were initialized 3067 * initialize the rest as well run port_config command for each port 3068 */ 3069 oct->ifcount = num_nic_ports; 3070 memset(oct->props, 0, 3071 sizeof(struct octdev_props) * num_nic_ports); 3072 3073 for (i = 0; i < MAX_OCTEON_LINKS; i++) 3074 oct->props[i].gmxport = -1; 3075 3076 retval = setup_nic_devices(oct); 3077 if (retval) { 3078 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3079 goto octnet_init_failure; 3080 } 3081 3082 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3083 3084 return retval; 3085 3086 octnet_init_failure: 3087 3088 oct->ifcount = 0; 3089 3090 return retval; 3091 } 3092 3093 /** 3094 * \brief Device initialization for each Octeon device that is probed 3095 * @param octeon_dev octeon device 3096 */ 3097 static int octeon_device_init(struct octeon_device *oct) 3098 { 3099 u32 rev_id; 3100 int j; 3101 3102 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE); 3103 3104 /* Enable access to the octeon device and make its DMA capability 3105 * known to the OS. 3106 */ 3107 if (octeon_pci_os_setup(oct)) 3108 return 1; 3109 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE); 3110 3111 oct->chip_id = OCTEON_CN23XX_VF_VID; 3112 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 3113 oct->rev_id = rev_id & 0xff; 3114 3115 if (cn23xx_setup_octeon_vf_device(oct)) 3116 return 1; 3117 3118 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE); 3119 3120 oct->app_mode = CVM_DRV_NIC_APP; 3121 3122 /* Initialize the dispatch mechanism used to push packets arriving on 3123 * Octeon Output queues. 3124 */ 3125 if (octeon_init_dispatch_list(oct)) 3126 return 1; 3127 3128 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE); 3129 3130 if (octeon_set_io_queues_off(oct)) { 3131 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); 3132 return 1; 3133 } 3134 3135 if (oct->fn_list.setup_device_regs(oct)) { 3136 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n"); 3137 return 1; 3138 } 3139 3140 /* Initialize soft command buffer pool */ 3141 if (octeon_setup_sc_buffer_pool(oct)) { 3142 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); 3143 return 1; 3144 } 3145 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 3146 3147 /* Setup the data structures that manage this Octeon's Input queues. */ 3148 if (octeon_setup_instr_queues(oct)) { 3149 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); 3150 return 1; 3151 } 3152 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 3153 3154 /* Initialize lists to manage the requests of different types that 3155 * arrive from user & kernel applications for this octeon device. 3156 */ 3157 if (octeon_setup_response_list(oct)) { 3158 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); 3159 return 1; 3160 } 3161 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE); 3162 3163 if (octeon_setup_output_queues(oct)) { 3164 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n"); 3165 return 1; 3166 } 3167 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE); 3168 3169 if (oct->fn_list.setup_mbox(oct)) { 3170 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 3171 return 1; 3172 } 3173 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); 3174 3175 if (octeon_allocate_ioq_vector(oct)) { 3176 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); 3177 return 1; 3178 } 3179 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 3180 3181 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n", 3182 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf); 3183 3184 /* Setup the interrupt handler and record the INT SUM register address*/ 3185 if (octeon_setup_interrupt(oct)) 3186 return 1; 3187 3188 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); 3189 3190 /* *************************************************************** 3191 * The interrupts need to be enabled for the PF<-->VF handshake. 3192 * They are [re]-enabled after the PF<-->VF handshake so that the 3193 * correct OQ tick value is used (i.e. the value retrieved from 3194 * the PF as part of the handshake). 3195 */ 3196 3197 /* Enable Octeon device interrupts */ 3198 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 3199 3200 if (cn23xx_octeon_pfvf_handshake(oct)) 3201 return 1; 3202 3203 /* Here we [re]-enable the interrupts so that the correct OQ tick value 3204 * is used (i.e. the value that was retrieved during the handshake) 3205 */ 3206 3207 /* Enable Octeon device interrupts */ 3208 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 3209 /* *************************************************************** */ 3210 3211 /* Enable the input and output queues for this Octeon device */ 3212 if (oct->fn_list.enable_io_queues(oct)) { 3213 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); 3214 return 1; 3215 } 3216 3217 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE); 3218 3219 atomic_set(&oct->status, OCT_DEV_HOST_OK); 3220 3221 /* Send Credit for Octeon Output queues. Credits are always sent after 3222 * the output queue is enabled. 3223 */ 3224 for (j = 0; j < oct->num_oqs; j++) 3225 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); 3226 3227 /* Packets can start arriving on the output queues from this point. */ 3228 3229 atomic_set(&oct->status, OCT_DEV_CORE_OK); 3230 3231 atomic_set(&oct->status, OCT_DEV_RUNNING); 3232 3233 if (liquidio_init_nic_module(oct)) 3234 return 1; 3235 3236 return 0; 3237 } 3238 3239 static int __init liquidio_vf_init(void) 3240 { 3241 octeon_init_device_list(0); 3242 return pci_register_driver(&liquidio_vf_pci_driver); 3243 } 3244 3245 static void __exit liquidio_vf_exit(void) 3246 { 3247 pci_unregister_driver(&liquidio_vf_pci_driver); 3248 3249 pr_info("LiquidIO_VF network module is now unloaded\n"); 3250 } 3251 3252 module_init(liquidio_vf_init); 3253 module_exit(liquidio_vf_exit); 3254