1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/pci.h> 19 #include <net/vxlan.h> 20 #include "liquidio_common.h" 21 #include "octeon_droq.h" 22 #include "octeon_iq.h" 23 #include "response_manager.h" 24 #include "octeon_device.h" 25 #include "octeon_nic.h" 26 #include "octeon_main.h" 27 #include "octeon_network.h" 28 #include "cn23xx_vf_device.h" 29 30 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 31 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver"); 32 MODULE_LICENSE("GPL"); 33 MODULE_VERSION(LIQUIDIO_VERSION); 34 35 static int debug = -1; 36 module_param(debug, int, 0644); 37 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 38 39 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 40 41 /* Bit mask values for lio->ifstate */ 42 #define LIO_IFSTATE_DROQ_OPS 0x01 43 #define LIO_IFSTATE_REGISTERED 0x02 44 #define LIO_IFSTATE_RUNNING 0x04 45 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 46 47 struct liquidio_if_cfg_context { 48 int octeon_id; 49 50 wait_queue_head_t wc; 51 52 int cond; 53 }; 54 55 struct liquidio_if_cfg_resp { 56 u64 rh; 57 struct liquidio_if_cfg_info cfg_info; 58 u64 status; 59 }; 60 61 struct liquidio_rx_ctl_context { 62 int octeon_id; 63 64 wait_queue_head_t wc; 65 66 int cond; 67 }; 68 69 struct oct_timestamp_resp { 70 u64 rh; 71 u64 timestamp; 72 u64 status; 73 }; 74 75 union tx_info { 76 u64 u64; 77 struct { 78 #ifdef __BIG_ENDIAN_BITFIELD 79 u16 gso_size; 80 u16 gso_segs; 81 u32 reserved; 82 #else 83 u32 reserved; 84 u16 gso_segs; 85 u16 gso_size; 86 #endif 87 } s; 88 }; 89 90 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS) 91 92 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 93 #define OCTNIC_GSO_MAX_SIZE \ 94 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 95 96 struct octnic_gather { 97 /* List manipulation. Next and prev pointers. */ 98 struct list_head list; 99 100 /* Size of the gather component at sg in bytes. */ 101 int sg_size; 102 103 /* Number of bytes that sg was adjusted to make it 8B-aligned. */ 104 int adjust; 105 106 /* Gather component that can accommodate max sized fragment list 107 * received from the IP layer. 108 */ 109 struct octeon_sg_entry *sg; 110 }; 111 112 struct octeon_device_priv { 113 /* Tasklet structures for this device. */ 114 struct tasklet_struct droq_tasklet; 115 unsigned long napi_mask; 116 }; 117 118 static int 119 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 120 static void liquidio_vf_remove(struct pci_dev *pdev); 121 static int octeon_device_init(struct octeon_device *oct); 122 static int liquidio_stop(struct net_device *netdev); 123 124 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 125 { 126 struct octeon_device_priv *oct_priv = 127 (struct octeon_device_priv *)oct->priv; 128 int retry = MAX_VF_IP_OP_PENDING_PKT_COUNT; 129 int pkt_cnt = 0, pending_pkts; 130 int i; 131 132 do { 133 pending_pkts = 0; 134 135 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 136 if (!(oct->io_qmask.oq & BIT_ULL(i))) 137 continue; 138 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 139 } 140 if (pkt_cnt > 0) { 141 pending_pkts += pkt_cnt; 142 tasklet_schedule(&oct_priv->droq_tasklet); 143 } 144 pkt_cnt = 0; 145 schedule_timeout_uninterruptible(1); 146 147 } while (retry-- && pending_pkts); 148 149 return pkt_cnt; 150 } 151 152 /** 153 * \brief wait for all pending requests to complete 154 * @param oct Pointer to Octeon device 155 * 156 * Called during shutdown sequence 157 */ 158 static int wait_for_pending_requests(struct octeon_device *oct) 159 { 160 int i, pcount = 0; 161 162 for (i = 0; i < MAX_VF_IP_OP_PENDING_PKT_COUNT; i++) { 163 pcount = atomic_read( 164 &oct->response_list[OCTEON_ORDERED_SC_LIST] 165 .pending_req_count); 166 if (pcount) 167 schedule_timeout_uninterruptible(HZ / 10); 168 else 169 break; 170 } 171 172 if (pcount) 173 return 1; 174 175 return 0; 176 } 177 178 /** 179 * \brief Cause device to go quiet so it can be safely removed/reset/etc 180 * @param oct Pointer to Octeon device 181 */ 182 static void pcierror_quiesce_device(struct octeon_device *oct) 183 { 184 int i; 185 186 /* Disable the input and output queues now. No more packets will 187 * arrive from Octeon, but we should wait for all packet processing 188 * to finish. 189 */ 190 191 /* To allow for in-flight requests */ 192 schedule_timeout_uninterruptible(100); 193 194 if (wait_for_pending_requests(oct)) 195 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 196 197 /* Force all requests waiting to be fetched by OCTEON to complete. */ 198 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 199 struct octeon_instr_queue *iq; 200 201 if (!(oct->io_qmask.iq & BIT_ULL(i))) 202 continue; 203 iq = oct->instr_queue[i]; 204 205 if (atomic_read(&iq->instr_pending)) { 206 spin_lock_bh(&iq->lock); 207 iq->fill_cnt = 0; 208 iq->octeon_read_index = iq->host_write_index; 209 iq->stats.instr_processed += 210 atomic_read(&iq->instr_pending); 211 lio_process_iq_request_list(oct, iq, 0); 212 spin_unlock_bh(&iq->lock); 213 } 214 } 215 216 /* Force all pending ordered list requests to time out. */ 217 lio_process_ordered_list(oct, 1); 218 219 /* We do not need to wait for output queue packets to be processed. */ 220 } 221 222 /** 223 * \brief Cleanup PCI AER uncorrectable error status 224 * @param dev Pointer to PCI device 225 */ 226 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 227 { 228 u32 status, mask; 229 int pos = 0x100; 230 231 pr_info("%s :\n", __func__); 232 233 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 234 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 235 if (dev->error_state == pci_channel_io_normal) 236 status &= ~mask; /* Clear corresponding nonfatal bits */ 237 else 238 status &= mask; /* Clear corresponding fatal bits */ 239 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 240 } 241 242 /** 243 * \brief Stop all PCI IO to a given device 244 * @param dev Pointer to Octeon device 245 */ 246 static void stop_pci_io(struct octeon_device *oct) 247 { 248 struct msix_entry *msix_entries; 249 int i; 250 251 /* No more instructions will be forwarded. */ 252 atomic_set(&oct->status, OCT_DEV_IN_RESET); 253 254 for (i = 0; i < oct->ifcount; i++) 255 netif_device_detach(oct->props[i].netdev); 256 257 /* Disable interrupts */ 258 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 259 260 pcierror_quiesce_device(oct); 261 if (oct->msix_on) { 262 msix_entries = (struct msix_entry *)oct->msix_entries; 263 for (i = 0; i < oct->num_msix_irqs; i++) { 264 /* clear the affinity_cpumask */ 265 irq_set_affinity_hint(msix_entries[i].vector, 266 NULL); 267 free_irq(msix_entries[i].vector, 268 &oct->ioq_vector[i]); 269 } 270 pci_disable_msix(oct->pci_dev); 271 kfree(oct->msix_entries); 272 oct->msix_entries = NULL; 273 octeon_free_ioq_vector(oct); 274 } 275 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 276 lio_get_state_string(&oct->status)); 277 278 /* making it a common function for all OCTEON models */ 279 cleanup_aer_uncorrect_error_status(oct->pci_dev); 280 281 pci_disable_device(oct->pci_dev); 282 } 283 284 /** 285 * \brief called when PCI error is detected 286 * @param pdev Pointer to PCI device 287 * @param state The current pci connection state 288 * 289 * This function is called after a PCI bus error affecting 290 * this device has been detected. 291 */ 292 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 293 pci_channel_state_t state) 294 { 295 struct octeon_device *oct = pci_get_drvdata(pdev); 296 297 /* Non-correctable Non-fatal errors */ 298 if (state == pci_channel_io_normal) { 299 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 300 cleanup_aer_uncorrect_error_status(oct->pci_dev); 301 return PCI_ERS_RESULT_CAN_RECOVER; 302 } 303 304 /* Non-correctable Fatal errors */ 305 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 306 stop_pci_io(oct); 307 308 return PCI_ERS_RESULT_DISCONNECT; 309 } 310 311 /* For PCI-E Advanced Error Recovery (AER) Interface */ 312 static const struct pci_error_handlers liquidio_vf_err_handler = { 313 .error_detected = liquidio_pcie_error_detected, 314 }; 315 316 static const struct pci_device_id liquidio_vf_pci_tbl[] = { 317 { 318 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, 319 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 320 }, 321 { 322 0, 0, 0, 0, 0, 0, 0 323 } 324 }; 325 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); 326 327 static struct pci_driver liquidio_vf_pci_driver = { 328 .name = "LiquidIO_VF", 329 .id_table = liquidio_vf_pci_tbl, 330 .probe = liquidio_vf_probe, 331 .remove = liquidio_vf_remove, 332 .err_handler = &liquidio_vf_err_handler, /* For AER */ 333 }; 334 335 /** 336 * \brief check interface state 337 * @param lio per-network private data 338 * @param state_flag flag state to check 339 */ 340 static int ifstate_check(struct lio *lio, int state_flag) 341 { 342 return atomic_read(&lio->ifstate) & state_flag; 343 } 344 345 /** 346 * \brief set interface state 347 * @param lio per-network private data 348 * @param state_flag flag state to set 349 */ 350 static void ifstate_set(struct lio *lio, int state_flag) 351 { 352 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag)); 353 } 354 355 /** 356 * \brief clear interface state 357 * @param lio per-network private data 358 * @param state_flag flag state to clear 359 */ 360 static void ifstate_reset(struct lio *lio, int state_flag) 361 { 362 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); 363 } 364 365 /** 366 * \brief Stop Tx queues 367 * @param netdev network device 368 */ 369 static void txqs_stop(struct net_device *netdev) 370 { 371 if (netif_is_multiqueue(netdev)) { 372 int i; 373 374 for (i = 0; i < netdev->num_tx_queues; i++) 375 netif_stop_subqueue(netdev, i); 376 } else { 377 netif_stop_queue(netdev); 378 } 379 } 380 381 /** 382 * \brief Start Tx queues 383 * @param netdev network device 384 */ 385 static void txqs_start(struct net_device *netdev) 386 { 387 if (netif_is_multiqueue(netdev)) { 388 int i; 389 390 for (i = 0; i < netdev->num_tx_queues; i++) 391 netif_start_subqueue(netdev, i); 392 } else { 393 netif_start_queue(netdev); 394 } 395 } 396 397 /** 398 * \brief Wake Tx queues 399 * @param netdev network device 400 */ 401 static void txqs_wake(struct net_device *netdev) 402 { 403 struct lio *lio = GET_LIO(netdev); 404 405 if (netif_is_multiqueue(netdev)) { 406 int i; 407 408 for (i = 0; i < netdev->num_tx_queues; i++) { 409 int qno = lio->linfo.txpciq[i % (lio->linfo.num_txpciq)] 410 .s.q_no; 411 if (__netif_subqueue_stopped(netdev, i)) { 412 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, 413 tx_restart, 1); 414 netif_wake_subqueue(netdev, i); 415 } 416 } 417 } else { 418 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, 419 tx_restart, 1); 420 netif_wake_queue(netdev); 421 } 422 } 423 424 /** 425 * \brief Start Tx queue 426 * @param netdev network device 427 */ 428 static void start_txq(struct net_device *netdev) 429 { 430 struct lio *lio = GET_LIO(netdev); 431 432 if (lio->linfo.link.s.link_up) { 433 txqs_start(netdev); 434 return; 435 } 436 } 437 438 /** 439 * \brief Wake a queue 440 * @param netdev network device 441 * @param q which queue to wake 442 */ 443 static void wake_q(struct net_device *netdev, int q) 444 { 445 if (netif_is_multiqueue(netdev)) 446 netif_wake_subqueue(netdev, q); 447 else 448 netif_wake_queue(netdev); 449 } 450 451 /** 452 * \brief Stop a queue 453 * @param netdev network device 454 * @param q which queue to stop 455 */ 456 static void stop_q(struct net_device *netdev, int q) 457 { 458 if (netif_is_multiqueue(netdev)) 459 netif_stop_subqueue(netdev, q); 460 else 461 netif_stop_queue(netdev); 462 } 463 464 /** 465 * Remove the node at the head of the list. The list would be empty at 466 * the end of this call if there are no more nodes in the list. 467 */ 468 static struct list_head *list_delete_head(struct list_head *root) 469 { 470 struct list_head *node; 471 472 if ((root->prev == root) && (root->next == root)) 473 node = NULL; 474 else 475 node = root->next; 476 477 if (node) 478 list_del(node); 479 480 return node; 481 } 482 483 /** 484 * \brief Delete gather lists 485 * @param lio per-network private data 486 */ 487 static void delete_glists(struct lio *lio) 488 { 489 struct octnic_gather *g; 490 int i; 491 492 if (!lio->glist) 493 return; 494 495 for (i = 0; i < lio->linfo.num_txpciq; i++) { 496 do { 497 g = (struct octnic_gather *) 498 list_delete_head(&lio->glist[i]); 499 if (g) { 500 if (g->sg) 501 kfree((void *)((unsigned long)g->sg - 502 g->adjust)); 503 kfree(g); 504 } 505 } while (g); 506 } 507 508 kfree(lio->glist); 509 kfree(lio->glist_lock); 510 } 511 512 /** 513 * \brief Setup gather lists 514 * @param lio per-network private data 515 */ 516 static int setup_glists(struct lio *lio, int num_iqs) 517 { 518 struct octnic_gather *g; 519 int i, j; 520 521 lio->glist_lock = 522 kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL); 523 if (!lio->glist_lock) 524 return 1; 525 526 lio->glist = 527 kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL); 528 if (!lio->glist) { 529 kfree(lio->glist_lock); 530 return 1; 531 } 532 533 for (i = 0; i < num_iqs; i++) { 534 spin_lock_init(&lio->glist_lock[i]); 535 536 INIT_LIST_HEAD(&lio->glist[i]); 537 538 for (j = 0; j < lio->tx_qsize; j++) { 539 g = kzalloc(sizeof(*g), GFP_KERNEL); 540 if (!g) 541 break; 542 543 g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * 544 OCT_SG_ENTRY_SIZE); 545 546 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); 547 if (!g->sg) { 548 kfree(g); 549 break; 550 } 551 552 /* The gather component should be aligned on 64-bit 553 * boundary 554 */ 555 if (((unsigned long)g->sg) & 7) { 556 g->adjust = 8 - (((unsigned long)g->sg) & 7); 557 g->sg = (struct octeon_sg_entry *) 558 ((unsigned long)g->sg + g->adjust); 559 } 560 list_add_tail(&g->list, &lio->glist[i]); 561 } 562 563 if (j != lio->tx_qsize) { 564 delete_glists(lio); 565 return 1; 566 } 567 } 568 569 return 0; 570 } 571 572 /** 573 * \brief Print link information 574 * @param netdev network device 575 */ 576 static void print_link_info(struct net_device *netdev) 577 { 578 struct lio *lio = GET_LIO(netdev); 579 580 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { 581 struct oct_link_info *linfo = &lio->linfo; 582 583 if (linfo->link.s.link_up) { 584 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 585 linfo->link.s.speed, 586 (linfo->link.s.duplex) ? "Full" : "Half"); 587 } else { 588 netif_info(lio, link, lio->netdev, "Link Down\n"); 589 } 590 } 591 } 592 593 /** 594 * \brief Routine to notify MTU change 595 * @param work work_struct data structure 596 */ 597 static void octnet_link_status_change(struct work_struct *work) 598 { 599 struct cavium_wk *wk = (struct cavium_wk *)work; 600 struct lio *lio = (struct lio *)wk->ctxptr; 601 602 rtnl_lock(); 603 call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev); 604 rtnl_unlock(); 605 } 606 607 /** 608 * \brief Sets up the mtu status change work 609 * @param netdev network device 610 */ 611 static int setup_link_status_change_wq(struct net_device *netdev) 612 { 613 struct lio *lio = GET_LIO(netdev); 614 struct octeon_device *oct = lio->oct_dev; 615 616 lio->link_status_wq.wq = alloc_workqueue("link-status", 617 WQ_MEM_RECLAIM, 0); 618 if (!lio->link_status_wq.wq) { 619 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 620 return -1; 621 } 622 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 623 octnet_link_status_change); 624 lio->link_status_wq.wk.ctxptr = lio; 625 626 return 0; 627 } 628 629 static void cleanup_link_status_change_wq(struct net_device *netdev) 630 { 631 struct lio *lio = GET_LIO(netdev); 632 633 if (lio->link_status_wq.wq) { 634 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 635 destroy_workqueue(lio->link_status_wq.wq); 636 } 637 } 638 639 /** 640 * \brief Update link status 641 * @param netdev network device 642 * @param ls link status structure 643 * 644 * Called on receipt of a link status response from the core application to 645 * update each interface's link status. 646 */ 647 static void update_link_status(struct net_device *netdev, 648 union oct_link_status *ls) 649 { 650 struct lio *lio = GET_LIO(netdev); 651 struct octeon_device *oct = lio->oct_dev; 652 653 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { 654 lio->linfo.link.u64 = ls->u64; 655 656 print_link_info(netdev); 657 lio->link_changes++; 658 659 if (lio->linfo.link.s.link_up) { 660 netif_carrier_on(netdev); 661 txqs_wake(netdev); 662 } else { 663 netif_carrier_off(netdev); 664 txqs_stop(netdev); 665 } 666 667 if (lio->linfo.link.s.mtu < netdev->mtu) { 668 dev_warn(&oct->pci_dev->dev, 669 "PF has changed the MTU for gmx port. Reducing the mtu from %d to %d\n", 670 netdev->mtu, lio->linfo.link.s.mtu); 671 lio->mtu = lio->linfo.link.s.mtu; 672 netdev->mtu = lio->linfo.link.s.mtu; 673 queue_delayed_work(lio->link_status_wq.wq, 674 &lio->link_status_wq.wk.work, 0); 675 } 676 } 677 } 678 679 static void update_txq_status(struct octeon_device *oct, int iq_num) 680 { 681 struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; 682 struct net_device *netdev; 683 struct lio *lio; 684 685 netdev = oct->props[iq->ifidx].netdev; 686 lio = GET_LIO(netdev); 687 if (netif_is_multiqueue(netdev)) { 688 if (__netif_subqueue_stopped(netdev, iq->q_index) && 689 lio->linfo.link.s.link_up && 690 (!octnet_iq_is_full(oct, iq_num))) { 691 netif_wake_subqueue(netdev, iq->q_index); 692 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, 693 tx_restart, 1); 694 } else { 695 if (!octnet_iq_is_full(oct, lio->txq)) { 696 INCR_INSTRQUEUE_PKT_COUNT( 697 lio->oct_dev, lio->txq, tx_restart, 1); 698 wake_q(netdev, lio->txq); 699 } 700 } 701 } 702 } 703 704 static 705 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) 706 { 707 struct octeon_device *oct = droq->oct_dev; 708 struct octeon_device_priv *oct_priv = 709 (struct octeon_device_priv *)oct->priv; 710 711 if (droq->ops.poll_mode) { 712 droq->ops.napi_fn(droq); 713 } else { 714 if (ret & MSIX_PO_INT) { 715 dev_err(&oct->pci_dev->dev, 716 "should not come here should not get rx when poll mode = 0 for vf\n"); 717 tasklet_schedule(&oct_priv->droq_tasklet); 718 return 1; 719 } 720 /* this will be flushed periodically by check iq db */ 721 if (ret & MSIX_PI_INT) 722 return 0; 723 } 724 return 0; 725 } 726 727 static irqreturn_t 728 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) 729 { 730 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; 731 struct octeon_device *oct = ioq_vector->oct_dev; 732 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; 733 u64 ret; 734 735 ret = oct->fn_list.msix_interrupt_handler(ioq_vector); 736 737 if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT)) 738 liquidio_schedule_msix_droq_pkt_handler(droq, ret); 739 740 return IRQ_HANDLED; 741 } 742 743 /** 744 * \brief Setup interrupt for octeon device 745 * @param oct octeon device 746 * 747 * Enable interrupt in Octeon device as given in the PCI interrupt mask. 748 */ 749 static int octeon_setup_interrupt(struct octeon_device *oct) 750 { 751 struct msix_entry *msix_entries; 752 int num_alloc_ioq_vectors; 753 int num_ioq_vectors; 754 int irqret; 755 int i; 756 757 if (oct->msix_on) { 758 oct->num_msix_irqs = oct->sriov_info.rings_per_vf; 759 760 oct->msix_entries = kcalloc( 761 oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); 762 if (!oct->msix_entries) 763 return 1; 764 765 msix_entries = (struct msix_entry *)oct->msix_entries; 766 767 for (i = 0; i < oct->num_msix_irqs; i++) 768 msix_entries[i].entry = i; 769 num_alloc_ioq_vectors = pci_enable_msix_range( 770 oct->pci_dev, msix_entries, 771 oct->num_msix_irqs, 772 oct->num_msix_irqs); 773 if (num_alloc_ioq_vectors < 0) { 774 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); 775 kfree(oct->msix_entries); 776 oct->msix_entries = NULL; 777 return 1; 778 } 779 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); 780 781 num_ioq_vectors = oct->num_msix_irqs; 782 783 for (i = 0; i < num_ioq_vectors; i++) { 784 irqret = request_irq(msix_entries[i].vector, 785 liquidio_msix_intr_handler, 0, 786 "octeon", &oct->ioq_vector[i]); 787 if (irqret) { 788 dev_err(&oct->pci_dev->dev, 789 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", 790 irqret); 791 792 while (i) { 793 i--; 794 irq_set_affinity_hint( 795 msix_entries[i].vector, NULL); 796 free_irq(msix_entries[i].vector, 797 &oct->ioq_vector[i]); 798 } 799 pci_disable_msix(oct->pci_dev); 800 kfree(oct->msix_entries); 801 oct->msix_entries = NULL; 802 return 1; 803 } 804 oct->ioq_vector[i].vector = msix_entries[i].vector; 805 /* assign the cpu mask for this msix interrupt vector */ 806 irq_set_affinity_hint( 807 msix_entries[i].vector, 808 (&oct->ioq_vector[i].affinity_mask)); 809 } 810 dev_dbg(&oct->pci_dev->dev, 811 "OCTEON[%d]: MSI-X enabled\n", oct->octeon_id); 812 } 813 return 0; 814 } 815 816 /** 817 * \brief PCI probe handler 818 * @param pdev PCI device structure 819 * @param ent unused 820 */ 821 static int 822 liquidio_vf_probe(struct pci_dev *pdev, 823 const struct pci_device_id *ent __attribute__((unused))) 824 { 825 struct octeon_device *oct_dev = NULL; 826 827 oct_dev = octeon_allocate_device(pdev->device, 828 sizeof(struct octeon_device_priv)); 829 830 if (!oct_dev) { 831 dev_err(&pdev->dev, "Unable to allocate device\n"); 832 return -ENOMEM; 833 } 834 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 835 836 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 837 (u32)pdev->vendor, (u32)pdev->device); 838 839 /* Assign octeon_device for this device to the private data area. */ 840 pci_set_drvdata(pdev, oct_dev); 841 842 /* set linux specific device pointer */ 843 oct_dev->pci_dev = pdev; 844 845 if (octeon_device_init(oct_dev)) { 846 liquidio_vf_remove(pdev); 847 return -ENOMEM; 848 } 849 850 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 851 852 return 0; 853 } 854 855 /** 856 * \brief PCI FLR for each Octeon device. 857 * @param oct octeon device 858 */ 859 static void octeon_pci_flr(struct octeon_device *oct) 860 { 861 u16 status; 862 863 pci_save_state(oct->pci_dev); 864 865 pci_cfg_access_lock(oct->pci_dev); 866 867 /* Quiesce the device completely */ 868 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 869 PCI_COMMAND_INTX_DISABLE); 870 871 /* Wait for Transaction Pending bit clean */ 872 msleep(100); 873 pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, &status); 874 if (status & PCI_EXP_DEVSTA_TRPND) { 875 dev_info(&oct->pci_dev->dev, "Function reset incomplete after 100ms, sleeping for 5 seconds\n"); 876 ssleep(5); 877 pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, 878 &status); 879 if (status & PCI_EXP_DEVSTA_TRPND) 880 dev_info(&oct->pci_dev->dev, "Function reset still incomplete after 5s, reset anyway\n"); 881 } 882 pcie_capability_set_word(oct->pci_dev, PCI_EXP_DEVCTL, 883 PCI_EXP_DEVCTL_BCR_FLR); 884 mdelay(100); 885 886 pci_cfg_access_unlock(oct->pci_dev); 887 888 pci_restore_state(oct->pci_dev); 889 } 890 891 /** 892 *\brief Destroy resources associated with octeon device 893 * @param pdev PCI device structure 894 * @param ent unused 895 */ 896 static void octeon_destroy_resources(struct octeon_device *oct) 897 { 898 struct msix_entry *msix_entries; 899 int i; 900 901 switch (atomic_read(&oct->status)) { 902 case OCT_DEV_RUNNING: 903 case OCT_DEV_CORE_OK: 904 /* No more instructions will be forwarded. */ 905 atomic_set(&oct->status, OCT_DEV_IN_RESET); 906 907 oct->app_mode = CVM_DRV_INVALID_APP; 908 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 909 lio_get_state_string(&oct->status)); 910 911 schedule_timeout_uninterruptible(HZ / 10); 912 913 /* fallthrough */ 914 case OCT_DEV_HOST_OK: 915 /* fallthrough */ 916 case OCT_DEV_IO_QUEUES_DONE: 917 if (wait_for_pending_requests(oct)) 918 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 919 920 if (lio_wait_for_instr_fetch(oct)) 921 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 922 923 /* Disable the input and output queues now. No more packets will 924 * arrive from Octeon, but we should wait for all packet 925 * processing to finish. 926 */ 927 oct->fn_list.disable_io_queues(oct); 928 929 if (lio_wait_for_oq_pkts(oct)) 930 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 931 932 case OCT_DEV_INTR_SET_DONE: 933 /* Disable interrupts */ 934 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 935 936 if (oct->msix_on) { 937 msix_entries = (struct msix_entry *)oct->msix_entries; 938 for (i = 0; i < oct->num_msix_irqs; i++) { 939 irq_set_affinity_hint(msix_entries[i].vector, 940 NULL); 941 free_irq(msix_entries[i].vector, 942 &oct->ioq_vector[i]); 943 } 944 pci_disable_msix(oct->pci_dev); 945 kfree(oct->msix_entries); 946 oct->msix_entries = NULL; 947 } 948 /* Soft reset the octeon device before exiting */ 949 if (oct->pci_dev->reset_fn) 950 octeon_pci_flr(oct); 951 else 952 cn23xx_vf_ask_pf_to_do_flr(oct); 953 954 /* fallthrough */ 955 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 956 octeon_free_ioq_vector(oct); 957 958 /* fallthrough */ 959 case OCT_DEV_MBOX_SETUP_DONE: 960 oct->fn_list.free_mbox(oct); 961 962 /* fallthrough */ 963 case OCT_DEV_IN_RESET: 964 case OCT_DEV_DROQ_INIT_DONE: 965 mdelay(100); 966 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 967 if (!(oct->io_qmask.oq & BIT_ULL(i))) 968 continue; 969 octeon_delete_droq(oct, i); 970 } 971 972 /* fallthrough */ 973 case OCT_DEV_RESP_LIST_INIT_DONE: 974 octeon_delete_response_list(oct); 975 976 /* fallthrough */ 977 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 978 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 979 if (!(oct->io_qmask.iq & BIT_ULL(i))) 980 continue; 981 octeon_delete_instr_queue(oct, i); 982 } 983 984 /* fallthrough */ 985 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 986 octeon_free_sc_buffer_pool(oct); 987 988 /* fallthrough */ 989 case OCT_DEV_DISPATCH_INIT_DONE: 990 octeon_delete_dispatch_list(oct); 991 cancel_delayed_work_sync(&oct->nic_poll_work.work); 992 993 /* fallthrough */ 994 case OCT_DEV_PCI_MAP_DONE: 995 octeon_unmap_pci_barx(oct, 0); 996 octeon_unmap_pci_barx(oct, 1); 997 998 /* fallthrough */ 999 case OCT_DEV_PCI_ENABLE_DONE: 1000 pci_clear_master(oct->pci_dev); 1001 /* Disable the device, releasing the PCI INT */ 1002 pci_disable_device(oct->pci_dev); 1003 1004 /* fallthrough */ 1005 case OCT_DEV_BEGIN_STATE: 1006 /* Nothing to be done here either */ 1007 break; 1008 } 1009 } 1010 1011 /** 1012 * \brief Callback for rx ctrl 1013 * @param status status of request 1014 * @param buf pointer to resp structure 1015 */ 1016 static void rx_ctl_callback(struct octeon_device *oct, 1017 u32 status, void *buf) 1018 { 1019 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 1020 struct liquidio_rx_ctl_context *ctx; 1021 1022 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; 1023 1024 oct = lio_get_device(ctx->octeon_id); 1025 if (status) 1026 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", 1027 CVM_CAST64(status)); 1028 WRITE_ONCE(ctx->cond, 1); 1029 1030 /* This barrier is required to be sure that the response has been 1031 * written fully before waking up the handler 1032 */ 1033 wmb(); 1034 1035 wake_up_interruptible(&ctx->wc); 1036 } 1037 1038 /** 1039 * \brief Send Rx control command 1040 * @param lio per-network private data 1041 * @param start_stop whether to start or stop 1042 */ 1043 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1044 { 1045 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1046 int ctx_size = sizeof(struct liquidio_rx_ctl_context); 1047 struct liquidio_rx_ctl_context *ctx; 1048 struct octeon_soft_command *sc; 1049 union octnet_cmd *ncmd; 1050 int retval; 1051 1052 if (oct->props[lio->ifidx].rx_on == start_stop) 1053 return; 1054 1055 sc = (struct octeon_soft_command *) 1056 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1057 16, ctx_size); 1058 1059 ncmd = (union octnet_cmd *)sc->virtdptr; 1060 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; 1061 1062 WRITE_ONCE(ctx->cond, 0); 1063 ctx->octeon_id = lio_get_device_id(oct); 1064 init_waitqueue_head(&ctx->wc); 1065 1066 ncmd->u64 = 0; 1067 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 1068 ncmd->s.param1 = start_stop; 1069 1070 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1071 1072 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1073 1074 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1075 OPCODE_NIC_CMD, 0, 0, 0); 1076 1077 sc->callback = rx_ctl_callback; 1078 sc->callback_arg = sc; 1079 sc->wait_time = 5000; 1080 1081 retval = octeon_send_soft_command(oct, sc); 1082 if (retval == IQ_SEND_FAILED) { 1083 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1084 } else { 1085 /* Sleep on a wait queue till the cond flag indicates that the 1086 * response arrived or timed-out. 1087 */ 1088 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) 1089 return; 1090 oct->props[lio->ifidx].rx_on = start_stop; 1091 } 1092 1093 octeon_free_soft_command(oct, sc); 1094 } 1095 1096 /** 1097 * \brief Destroy NIC device interface 1098 * @param oct octeon device 1099 * @param ifidx which interface to destroy 1100 * 1101 * Cleanup associated with each interface for an Octeon device when NIC 1102 * module is being unloaded or if initialization fails during load. 1103 */ 1104 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1105 { 1106 struct net_device *netdev = oct->props[ifidx].netdev; 1107 struct napi_struct *napi, *n; 1108 struct lio *lio; 1109 1110 if (!netdev) { 1111 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1112 __func__, ifidx); 1113 return; 1114 } 1115 1116 lio = GET_LIO(netdev); 1117 1118 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1119 1120 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1121 liquidio_stop(netdev); 1122 1123 if (oct->props[lio->ifidx].napi_enabled == 1) { 1124 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1125 napi_disable(napi); 1126 1127 oct->props[lio->ifidx].napi_enabled = 0; 1128 1129 oct->droq[0]->ops.poll_mode = 0; 1130 } 1131 1132 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1133 unregister_netdev(netdev); 1134 1135 cleanup_link_status_change_wq(netdev); 1136 1137 delete_glists(lio); 1138 1139 free_netdev(netdev); 1140 1141 oct->props[ifidx].gmxport = -1; 1142 1143 oct->props[ifidx].netdev = NULL; 1144 } 1145 1146 /** 1147 * \brief Stop complete NIC functionality 1148 * @param oct octeon device 1149 */ 1150 static int liquidio_stop_nic_module(struct octeon_device *oct) 1151 { 1152 struct lio *lio; 1153 int i, j; 1154 1155 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1156 if (!oct->ifcount) { 1157 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1158 return 1; 1159 } 1160 1161 spin_lock_bh(&oct->cmd_resp_wqlock); 1162 oct->cmd_resp_state = OCT_DRV_OFFLINE; 1163 spin_unlock_bh(&oct->cmd_resp_wqlock); 1164 1165 for (i = 0; i < oct->ifcount; i++) { 1166 lio = GET_LIO(oct->props[i].netdev); 1167 for (j = 0; j < lio->linfo.num_rxpciq; j++) 1168 octeon_unregister_droq_ops(oct, 1169 lio->linfo.rxpciq[j].s.q_no); 1170 } 1171 1172 for (i = 0; i < oct->ifcount; i++) 1173 liquidio_destroy_nic_device(oct, i); 1174 1175 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1176 return 0; 1177 } 1178 1179 /** 1180 * \brief Cleans up resources at unload time 1181 * @param pdev PCI device structure 1182 */ 1183 static void liquidio_vf_remove(struct pci_dev *pdev) 1184 { 1185 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1186 1187 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1188 1189 if (oct_dev->app_mode == CVM_DRV_NIC_APP) 1190 liquidio_stop_nic_module(oct_dev); 1191 1192 /* Reset the octeon device and cleanup all memory allocated for 1193 * the octeon device by driver. 1194 */ 1195 octeon_destroy_resources(oct_dev); 1196 1197 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1198 1199 /* This octeon device has been removed. Update the global 1200 * data structure to reflect this. Free the device structure. 1201 */ 1202 octeon_free_device_mem(oct_dev); 1203 } 1204 1205 /** 1206 * \brief PCI initialization for each Octeon device. 1207 * @param oct octeon device 1208 */ 1209 static int octeon_pci_os_setup(struct octeon_device *oct) 1210 { 1211 #ifdef CONFIG_PCI_IOV 1212 /* setup PCI stuff first */ 1213 if (!oct->pci_dev->physfn) 1214 octeon_pci_flr(oct); 1215 #endif 1216 1217 if (pci_enable_device(oct->pci_dev)) { 1218 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1219 return 1; 1220 } 1221 1222 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1223 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1224 pci_disable_device(oct->pci_dev); 1225 return 1; 1226 } 1227 1228 /* Enable PCI DMA Master. */ 1229 pci_set_master(oct->pci_dev); 1230 1231 return 0; 1232 } 1233 1234 static int skb_iq(struct lio *lio, struct sk_buff *skb) 1235 { 1236 int q = 0; 1237 1238 if (netif_is_multiqueue(lio->netdev)) 1239 q = skb->queue_mapping % lio->linfo.num_txpciq; 1240 1241 return q; 1242 } 1243 1244 /** 1245 * \brief Check Tx queue state for a given network buffer 1246 * @param lio per-network private data 1247 * @param skb network buffer 1248 */ 1249 static int check_txq_state(struct lio *lio, struct sk_buff *skb) 1250 { 1251 int q = 0, iq = 0; 1252 1253 if (netif_is_multiqueue(lio->netdev)) { 1254 q = skb->queue_mapping; 1255 iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; 1256 } else { 1257 iq = lio->txq; 1258 q = iq; 1259 } 1260 1261 if (octnet_iq_is_full(lio->oct_dev, iq)) 1262 return 0; 1263 1264 if (__netif_subqueue_stopped(lio->netdev, q)) { 1265 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); 1266 wake_q(lio->netdev, q); 1267 } 1268 1269 return 1; 1270 } 1271 1272 /** 1273 * \brief Unmap and free network buffer 1274 * @param buf buffer 1275 */ 1276 static void free_netbuf(void *buf) 1277 { 1278 struct octnet_buf_free_info *finfo; 1279 struct sk_buff *skb; 1280 struct lio *lio; 1281 1282 finfo = (struct octnet_buf_free_info *)buf; 1283 skb = finfo->skb; 1284 lio = finfo->lio; 1285 1286 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1287 DMA_TO_DEVICE); 1288 1289 check_txq_state(lio, skb); 1290 1291 tx_buffer_free(skb); 1292 } 1293 1294 /** 1295 * \brief Unmap and free gather buffer 1296 * @param buf buffer 1297 */ 1298 static void free_netsgbuf(void *buf) 1299 { 1300 struct octnet_buf_free_info *finfo; 1301 struct octnic_gather *g; 1302 struct sk_buff *skb; 1303 int i, frags, iq; 1304 struct lio *lio; 1305 1306 finfo = (struct octnet_buf_free_info *)buf; 1307 skb = finfo->skb; 1308 lio = finfo->lio; 1309 g = finfo->g; 1310 frags = skb_shinfo(skb)->nr_frags; 1311 1312 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1313 g->sg[0].ptr[0], (skb->len - skb->data_len), 1314 DMA_TO_DEVICE); 1315 1316 i = 1; 1317 while (frags--) { 1318 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1319 1320 pci_unmap_page((lio->oct_dev)->pci_dev, 1321 g->sg[(i >> 2)].ptr[(i & 3)], 1322 frag->size, DMA_TO_DEVICE); 1323 i++; 1324 } 1325 1326 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1327 finfo->dptr, g->sg_size, 1328 DMA_TO_DEVICE); 1329 1330 iq = skb_iq(lio, skb); 1331 1332 spin_lock(&lio->glist_lock[iq]); 1333 list_add_tail(&g->list, &lio->glist[iq]); 1334 spin_unlock(&lio->glist_lock[iq]); 1335 1336 check_txq_state(lio, skb); /* mq support: sub-queue state check */ 1337 1338 tx_buffer_free(skb); 1339 } 1340 1341 /** 1342 * \brief Unmap and free gather buffer with response 1343 * @param buf buffer 1344 */ 1345 static void free_netsgbuf_with_resp(void *buf) 1346 { 1347 struct octnet_buf_free_info *finfo; 1348 struct octeon_soft_command *sc; 1349 struct octnic_gather *g; 1350 struct sk_buff *skb; 1351 int i, frags, iq; 1352 struct lio *lio; 1353 1354 sc = (struct octeon_soft_command *)buf; 1355 skb = (struct sk_buff *)sc->callback_arg; 1356 finfo = (struct octnet_buf_free_info *)&skb->cb; 1357 1358 lio = finfo->lio; 1359 g = finfo->g; 1360 frags = skb_shinfo(skb)->nr_frags; 1361 1362 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1363 g->sg[0].ptr[0], (skb->len - skb->data_len), 1364 DMA_TO_DEVICE); 1365 1366 i = 1; 1367 while (frags--) { 1368 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1369 1370 pci_unmap_page((lio->oct_dev)->pci_dev, 1371 g->sg[(i >> 2)].ptr[(i & 3)], 1372 frag->size, DMA_TO_DEVICE); 1373 i++; 1374 } 1375 1376 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1377 finfo->dptr, g->sg_size, 1378 DMA_TO_DEVICE); 1379 1380 iq = skb_iq(lio, skb); 1381 1382 spin_lock(&lio->glist_lock[iq]); 1383 list_add_tail(&g->list, &lio->glist[iq]); 1384 spin_unlock(&lio->glist_lock[iq]); 1385 1386 /* Don't free the skb yet */ 1387 1388 check_txq_state(lio, skb); 1389 } 1390 1391 /** 1392 * \brief Setup output queue 1393 * @param oct octeon device 1394 * @param q_no which queue 1395 * @param num_descs how many descriptors 1396 * @param desc_size size of each descriptor 1397 * @param app_ctx application context 1398 */ 1399 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, 1400 int desc_size, void *app_ctx) 1401 { 1402 int ret_val; 1403 1404 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); 1405 /* droq creation and local register settings. */ 1406 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); 1407 if (ret_val < 0) 1408 return ret_val; 1409 1410 if (ret_val == 1) { 1411 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); 1412 return 0; 1413 } 1414 1415 /* Enable the droq queues */ 1416 octeon_set_droq_pkt_op(oct, q_no, 1); 1417 1418 /* Send Credit for Octeon Output queues. Credits are always 1419 * sent after the output queue is enabled. 1420 */ 1421 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); 1422 1423 return ret_val; 1424 } 1425 1426 /** 1427 * \brief Callback for getting interface configuration 1428 * @param status status of request 1429 * @param buf pointer to resp structure 1430 */ 1431 static void if_cfg_callback(struct octeon_device *oct, 1432 u32 status __attribute__((unused)), void *buf) 1433 { 1434 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 1435 struct liquidio_if_cfg_context *ctx; 1436 struct liquidio_if_cfg_resp *resp; 1437 1438 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 1439 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 1440 1441 oct = lio_get_device(ctx->octeon_id); 1442 if (resp->status) 1443 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", 1444 CVM_CAST64(resp->status)); 1445 WRITE_ONCE(ctx->cond, 1); 1446 1447 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", 1448 resp->cfg_info.liquidio_firmware_version); 1449 1450 /* This barrier is required to be sure that the response has been 1451 * written fully before waking up the handler 1452 */ 1453 wmb(); 1454 1455 wake_up_interruptible(&ctx->wc); 1456 } 1457 1458 /** 1459 * \brief Select queue based on hash 1460 * @param dev Net device 1461 * @param skb sk_buff structure 1462 * @returns selected queue number 1463 */ 1464 static u16 select_q(struct net_device *dev, struct sk_buff *skb, 1465 void *accel_priv __attribute__((unused)), 1466 select_queue_fallback_t fallback __attribute__((unused))) 1467 { 1468 struct lio *lio; 1469 u32 qindex; 1470 1471 lio = GET_LIO(dev); 1472 1473 qindex = skb_tx_hash(dev, skb); 1474 1475 return (u16)(qindex % (lio->linfo.num_txpciq)); 1476 } 1477 1478 /** Routine to push packets arriving on Octeon interface upto network layer. 1479 * @param oct_id - octeon device id. 1480 * @param skbuff - skbuff struct to be passed to network layer. 1481 * @param len - size of total data received. 1482 * @param rh - Control header associated with the packet 1483 * @param param - additional control data with the packet 1484 * @param arg - farg registered in droq_ops 1485 */ 1486 static void 1487 liquidio_push_packet(u32 octeon_id __attribute__((unused)), 1488 void *skbuff, 1489 u32 len, 1490 union octeon_rh *rh, 1491 void *param, 1492 void *arg) 1493 { 1494 struct napi_struct *napi = param; 1495 struct octeon_droq *droq = 1496 container_of(param, struct octeon_droq, napi); 1497 struct net_device *netdev = (struct net_device *)arg; 1498 struct sk_buff *skb = (struct sk_buff *)skbuff; 1499 u16 vtag = 0; 1500 1501 if (netdev) { 1502 struct lio *lio = GET_LIO(netdev); 1503 int packet_was_received; 1504 1505 /* Do not proceed if the interface is not in RUNNING state. */ 1506 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { 1507 recv_buffer_free(skb); 1508 droq->stats.rx_dropped++; 1509 return; 1510 } 1511 1512 skb->dev = netdev; 1513 1514 skb_record_rx_queue(skb, droq->q_no); 1515 if (likely(len > MIN_SKB_SIZE)) { 1516 struct octeon_skb_page_info *pg_info; 1517 unsigned char *va; 1518 1519 pg_info = ((struct octeon_skb_page_info *)(skb->cb)); 1520 if (pg_info->page) { 1521 /* For Paged allocation use the frags */ 1522 va = page_address(pg_info->page) + 1523 pg_info->page_offset; 1524 memcpy(skb->data, va, MIN_SKB_SIZE); 1525 skb_put(skb, MIN_SKB_SIZE); 1526 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 1527 pg_info->page, 1528 pg_info->page_offset + 1529 MIN_SKB_SIZE, 1530 len - MIN_SKB_SIZE, 1531 LIO_RXBUFFER_SZ); 1532 } 1533 } else { 1534 struct octeon_skb_page_info *pg_info = 1535 ((struct octeon_skb_page_info *)(skb->cb)); 1536 skb_copy_to_linear_data(skb, 1537 page_address(pg_info->page) + 1538 pg_info->page_offset, len); 1539 skb_put(skb, len); 1540 put_page(pg_info->page); 1541 } 1542 1543 skb_pull(skb, rh->r_dh.len * 8); 1544 skb->protocol = eth_type_trans(skb, skb->dev); 1545 1546 if ((netdev->features & NETIF_F_RXCSUM) && 1547 (((rh->r_dh.encap_on) && 1548 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || 1549 (!(rh->r_dh.encap_on) && 1550 (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) 1551 /* checksum has already been verified */ 1552 skb->ip_summed = CHECKSUM_UNNECESSARY; 1553 else 1554 skb->ip_summed = CHECKSUM_NONE; 1555 1556 /* Setting Encapsulation field on basis of status received 1557 * from the firmware 1558 */ 1559 if (rh->r_dh.encap_on) { 1560 skb->encapsulation = 1; 1561 skb->csum_level = 1; 1562 droq->stats.rx_vxlan++; 1563 } 1564 1565 /* inbound VLAN tag */ 1566 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1567 rh->r_dh.vlan) { 1568 u16 priority = rh->r_dh.priority; 1569 u16 vid = rh->r_dh.vlan; 1570 1571 vtag = (priority << VLAN_PRIO_SHIFT) | vid; 1572 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); 1573 } 1574 1575 packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP); 1576 1577 if (packet_was_received) { 1578 droq->stats.rx_bytes_received += len; 1579 droq->stats.rx_pkts_received++; 1580 netdev->last_rx = jiffies; 1581 } else { 1582 droq->stats.rx_dropped++; 1583 netif_info(lio, rx_err, lio->netdev, 1584 "droq:%d error rx_dropped:%llu\n", 1585 droq->q_no, droq->stats.rx_dropped); 1586 } 1587 1588 } else { 1589 recv_buffer_free(skb); 1590 } 1591 } 1592 1593 /** 1594 * \brief callback when receive interrupt occurs and we are in NAPI mode 1595 * @param arg pointer to octeon output queue 1596 */ 1597 static void liquidio_vf_napi_drv_callback(void *arg) 1598 { 1599 struct octeon_droq *droq = arg; 1600 1601 napi_schedule_irqoff(&droq->napi); 1602 } 1603 1604 /** 1605 * \brief Entry point for NAPI polling 1606 * @param napi NAPI structure 1607 * @param budget maximum number of items to process 1608 */ 1609 static int liquidio_napi_poll(struct napi_struct *napi, int budget) 1610 { 1611 struct octeon_instr_queue *iq; 1612 struct octeon_device *oct; 1613 struct octeon_droq *droq; 1614 int tx_done = 0, iq_no; 1615 int work_done; 1616 1617 droq = container_of(napi, struct octeon_droq, napi); 1618 oct = droq->oct_dev; 1619 iq_no = droq->q_no; 1620 1621 /* Handle Droq descriptors */ 1622 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, 1623 POLL_EVENT_PROCESS_PKTS, 1624 budget); 1625 1626 /* Flush the instruction queue */ 1627 iq = oct->instr_queue[iq_no]; 1628 if (iq) { 1629 /* Process iq buffers with in the budget limits */ 1630 tx_done = octeon_flush_iq(oct, iq, 1, budget); 1631 /* Update iq read-index rather than waiting for next interrupt. 1632 * Return back if tx_done is false. 1633 */ 1634 update_txq_status(oct, iq_no); 1635 } else { 1636 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", 1637 __func__, iq_no); 1638 } 1639 1640 if ((work_done < budget) && (tx_done)) { 1641 napi_complete(napi); 1642 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, 1643 POLL_EVENT_ENABLE_INTR, 0); 1644 return 0; 1645 } 1646 1647 return (!tx_done) ? (budget) : (work_done); 1648 } 1649 1650 /** 1651 * \brief Setup input and output queues 1652 * @param octeon_dev octeon device 1653 * @param ifidx Interface index 1654 * 1655 * Note: Queues are with respect to the octeon device. Thus 1656 * an input queue is for egress packets, and output queues 1657 * are for ingress packets. 1658 */ 1659 static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx) 1660 { 1661 struct octeon_droq_ops droq_ops; 1662 struct net_device *netdev; 1663 static int cpu_id_modulus; 1664 struct octeon_droq *droq; 1665 struct napi_struct *napi; 1666 static int cpu_id; 1667 int num_tx_descs; 1668 struct lio *lio; 1669 int retval = 0; 1670 int q, q_no; 1671 1672 netdev = octeon_dev->props[ifidx].netdev; 1673 1674 lio = GET_LIO(netdev); 1675 1676 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); 1677 1678 droq_ops.fptr = liquidio_push_packet; 1679 droq_ops.farg = netdev; 1680 1681 droq_ops.poll_mode = 1; 1682 droq_ops.napi_fn = liquidio_vf_napi_drv_callback; 1683 cpu_id = 0; 1684 cpu_id_modulus = num_present_cpus(); 1685 1686 /* set up DROQs. */ 1687 for (q = 0; q < lio->linfo.num_rxpciq; q++) { 1688 q_no = lio->linfo.rxpciq[q].s.q_no; 1689 1690 retval = octeon_setup_droq( 1691 octeon_dev, q_no, 1692 CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev), 1693 lio->ifidx), 1694 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev), 1695 lio->ifidx), 1696 NULL); 1697 if (retval) { 1698 dev_err(&octeon_dev->pci_dev->dev, 1699 "%s : Runtime DROQ(RxQ) creation failed.\n", 1700 __func__); 1701 return 1; 1702 } 1703 1704 droq = octeon_dev->droq[q_no]; 1705 napi = &droq->napi; 1706 netif_napi_add(netdev, napi, liquidio_napi_poll, 64); 1707 1708 /* designate a CPU for this droq */ 1709 droq->cpu_id = cpu_id; 1710 cpu_id++; 1711 if (cpu_id >= cpu_id_modulus) 1712 cpu_id = 0; 1713 1714 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); 1715 } 1716 1717 /* 23XX VF can send/recv control messages (via the first VF-owned 1718 * droq) from the firmware even if the ethX interface is down, 1719 * so that's why poll_mode must be off for the first droq. 1720 */ 1721 octeon_dev->droq[0]->ops.poll_mode = 0; 1722 1723 /* set up IQs. */ 1724 for (q = 0; q < lio->linfo.num_txpciq; q++) { 1725 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( 1726 octeon_get_conf(octeon_dev), lio->ifidx); 1727 retval = octeon_setup_iq(octeon_dev, ifidx, q, 1728 lio->linfo.txpciq[q], num_tx_descs, 1729 netdev_get_tx_queue(netdev, q)); 1730 if (retval) { 1731 dev_err(&octeon_dev->pci_dev->dev, 1732 " %s : Runtime IQ(TxQ) creation failed.\n", 1733 __func__); 1734 return 1; 1735 } 1736 } 1737 1738 return 0; 1739 } 1740 1741 /** 1742 * \brief Net device open for LiquidIO 1743 * @param netdev network device 1744 */ 1745 static int liquidio_open(struct net_device *netdev) 1746 { 1747 struct lio *lio = GET_LIO(netdev); 1748 struct octeon_device *oct = lio->oct_dev; 1749 struct napi_struct *napi, *n; 1750 1751 if (!oct->props[lio->ifidx].napi_enabled) { 1752 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1753 napi_enable(napi); 1754 1755 oct->props[lio->ifidx].napi_enabled = 1; 1756 1757 oct->droq[0]->ops.poll_mode = 1; 1758 } 1759 1760 ifstate_set(lio, LIO_IFSTATE_RUNNING); 1761 1762 /* Ready for link status updates */ 1763 lio->intf_open = 1; 1764 1765 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 1766 start_txq(netdev); 1767 1768 /* tell Octeon to start forwarding packets to host */ 1769 send_rx_ctrl_cmd(lio, 1); 1770 1771 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); 1772 1773 return 0; 1774 } 1775 1776 /** 1777 * \brief Net device stop for LiquidIO 1778 * @param netdev network device 1779 */ 1780 static int liquidio_stop(struct net_device *netdev) 1781 { 1782 struct lio *lio = GET_LIO(netdev); 1783 struct octeon_device *oct = lio->oct_dev; 1784 1785 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); 1786 /* Inform that netif carrier is down */ 1787 lio->intf_open = 0; 1788 lio->linfo.link.s.link_up = 0; 1789 1790 netif_carrier_off(netdev); 1791 lio->link_changes++; 1792 1793 /* tell Octeon to stop forwarding packets to host */ 1794 send_rx_ctrl_cmd(lio, 0); 1795 1796 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 1797 1798 txqs_stop(netdev); 1799 1800 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 1801 1802 return 0; 1803 } 1804 1805 /** 1806 * \brief Converts a mask based on net device flags 1807 * @param netdev network device 1808 * 1809 * This routine generates a octnet_ifflags mask from the net device flags 1810 * received from the OS. 1811 */ 1812 static enum octnet_ifflags get_new_flags(struct net_device *netdev) 1813 { 1814 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1815 1816 if (netdev->flags & IFF_PROMISC) 1817 f |= OCTNET_IFFLAG_PROMISC; 1818 1819 if (netdev->flags & IFF_ALLMULTI) 1820 f |= OCTNET_IFFLAG_ALLMULTI; 1821 1822 if (netdev->flags & IFF_MULTICAST) { 1823 f |= OCTNET_IFFLAG_MULTICAST; 1824 1825 /* Accept all multicast addresses if there are more than we 1826 * can handle 1827 */ 1828 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1829 f |= OCTNET_IFFLAG_ALLMULTI; 1830 } 1831 1832 if (netdev->flags & IFF_BROADCAST) 1833 f |= OCTNET_IFFLAG_BROADCAST; 1834 1835 return f; 1836 } 1837 1838 static void liquidio_set_uc_list(struct net_device *netdev) 1839 { 1840 struct lio *lio = GET_LIO(netdev); 1841 struct octeon_device *oct = lio->oct_dev; 1842 struct octnic_ctrl_pkt nctrl; 1843 struct netdev_hw_addr *ha; 1844 u64 *mac; 1845 1846 if (lio->netdev_uc_count == netdev_uc_count(netdev)) 1847 return; 1848 1849 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { 1850 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n"); 1851 return; 1852 } 1853 1854 lio->netdev_uc_count = netdev_uc_count(netdev); 1855 1856 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1857 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; 1858 nctrl.ncmd.s.more = lio->netdev_uc_count; 1859 nctrl.ncmd.s.param1 = oct->vf_num; 1860 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1861 nctrl.netpndev = (u64)netdev; 1862 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1863 1864 /* copy all the addresses into the udd */ 1865 mac = &nctrl.udd[0]; 1866 netdev_for_each_uc_addr(ha, netdev) { 1867 ether_addr_copy(((u8 *)mac) + 2, ha->addr); 1868 mac++; 1869 } 1870 1871 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1872 } 1873 1874 /** 1875 * \brief Net device set_multicast_list 1876 * @param netdev network device 1877 */ 1878 static void liquidio_set_mcast_list(struct net_device *netdev) 1879 { 1880 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1881 struct lio *lio = GET_LIO(netdev); 1882 struct octeon_device *oct = lio->oct_dev; 1883 struct octnic_ctrl_pkt nctrl; 1884 struct netdev_hw_addr *ha; 1885 u64 *mc; 1886 int ret; 1887 1888 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1889 1890 /* Create a ctrl pkt command to be sent to core app. */ 1891 nctrl.ncmd.u64 = 0; 1892 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1893 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1894 nctrl.ncmd.s.param2 = mc_count; 1895 nctrl.ncmd.s.more = mc_count; 1896 nctrl.netpndev = (u64)netdev; 1897 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1898 1899 /* copy all the addresses into the udd */ 1900 mc = &nctrl.udd[0]; 1901 netdev_for_each_mc_addr(ha, netdev) { 1902 *mc = 0; 1903 ether_addr_copy(((u8 *)mc) + 2, ha->addr); 1904 /* no need to swap bytes */ 1905 if (++mc > &nctrl.udd[mc_count]) 1906 break; 1907 } 1908 1909 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1910 1911 /* Apparently, any activity in this call from the kernel has to 1912 * be atomic. So we won't wait for response. 1913 */ 1914 nctrl.wait_time = 0; 1915 1916 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1917 if (ret < 0) { 1918 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1919 ret); 1920 } 1921 1922 liquidio_set_uc_list(netdev); 1923 } 1924 1925 /** 1926 * \brief Net device set_mac_address 1927 * @param netdev network device 1928 */ 1929 static int liquidio_set_mac(struct net_device *netdev, void *p) 1930 { 1931 struct sockaddr *addr = (struct sockaddr *)p; 1932 struct lio *lio = GET_LIO(netdev); 1933 struct octeon_device *oct = lio->oct_dev; 1934 struct octnic_ctrl_pkt nctrl; 1935 int ret = 0; 1936 1937 if (!is_valid_ether_addr(addr->sa_data)) 1938 return -EADDRNOTAVAIL; 1939 1940 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 1941 return 0; 1942 1943 if (lio->linfo.macaddr_is_admin_asgnd) 1944 return -EPERM; 1945 1946 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1947 1948 nctrl.ncmd.u64 = 0; 1949 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 1950 nctrl.ncmd.s.param1 = 0; 1951 nctrl.ncmd.s.more = 1; 1952 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1953 nctrl.netpndev = (u64)netdev; 1954 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1955 nctrl.wait_time = 100; 1956 1957 nctrl.udd[0] = 0; 1958 /* The MAC Address is presented in network byte order. */ 1959 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data); 1960 1961 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1962 if (ret < 0) { 1963 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 1964 return -ENOMEM; 1965 } 1966 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1967 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); 1968 1969 return 0; 1970 } 1971 1972 /** 1973 * \brief Net device get_stats 1974 * @param netdev network device 1975 */ 1976 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) 1977 { 1978 struct lio *lio = GET_LIO(netdev); 1979 struct net_device_stats *stats = &netdev->stats; 1980 u64 pkts = 0, drop = 0, bytes = 0; 1981 struct oct_droq_stats *oq_stats; 1982 struct oct_iq_stats *iq_stats; 1983 struct octeon_device *oct; 1984 int i, iq_no, oq_no; 1985 1986 oct = lio->oct_dev; 1987 1988 for (i = 0; i < lio->linfo.num_txpciq; i++) { 1989 iq_no = lio->linfo.txpciq[i].s.q_no; 1990 iq_stats = &oct->instr_queue[iq_no]->stats; 1991 pkts += iq_stats->tx_done; 1992 drop += iq_stats->tx_dropped; 1993 bytes += iq_stats->tx_tot_bytes; 1994 } 1995 1996 stats->tx_packets = pkts; 1997 stats->tx_bytes = bytes; 1998 stats->tx_dropped = drop; 1999 2000 pkts = 0; 2001 drop = 0; 2002 bytes = 0; 2003 2004 for (i = 0; i < lio->linfo.num_rxpciq; i++) { 2005 oq_no = lio->linfo.rxpciq[i].s.q_no; 2006 oq_stats = &oct->droq[oq_no]->stats; 2007 pkts += oq_stats->rx_pkts_received; 2008 drop += (oq_stats->rx_dropped + 2009 oq_stats->dropped_nodispatch + 2010 oq_stats->dropped_toomany + 2011 oq_stats->dropped_nomem); 2012 bytes += oq_stats->rx_bytes_received; 2013 } 2014 2015 stats->rx_bytes = bytes; 2016 stats->rx_packets = pkts; 2017 stats->rx_dropped = drop; 2018 2019 return stats; 2020 } 2021 2022 /** 2023 * \brief Net device change_mtu 2024 * @param netdev network device 2025 */ 2026 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) 2027 { 2028 struct lio *lio = GET_LIO(netdev); 2029 struct octeon_device *oct = lio->oct_dev; 2030 2031 lio->mtu = new_mtu; 2032 2033 netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n", 2034 netdev->mtu, new_mtu); 2035 dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", 2036 netdev->name, netdev->mtu, new_mtu); 2037 2038 netdev->mtu = new_mtu; 2039 2040 return 0; 2041 } 2042 2043 /** 2044 * \brief Handler for SIOCSHWTSTAMP ioctl 2045 * @param netdev network device 2046 * @param ifr interface request 2047 * @param cmd command 2048 */ 2049 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 2050 { 2051 struct lio *lio = GET_LIO(netdev); 2052 struct hwtstamp_config conf; 2053 2054 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 2055 return -EFAULT; 2056 2057 if (conf.flags) 2058 return -EINVAL; 2059 2060 switch (conf.tx_type) { 2061 case HWTSTAMP_TX_ON: 2062 case HWTSTAMP_TX_OFF: 2063 break; 2064 default: 2065 return -ERANGE; 2066 } 2067 2068 switch (conf.rx_filter) { 2069 case HWTSTAMP_FILTER_NONE: 2070 break; 2071 case HWTSTAMP_FILTER_ALL: 2072 case HWTSTAMP_FILTER_SOME: 2073 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2074 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2075 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2076 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2077 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2078 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2079 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2080 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2081 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2082 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2083 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2084 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2085 conf.rx_filter = HWTSTAMP_FILTER_ALL; 2086 break; 2087 default: 2088 return -ERANGE; 2089 } 2090 2091 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 2092 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2093 2094 else 2095 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2096 2097 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 2098 } 2099 2100 /** 2101 * \brief ioctl handler 2102 * @param netdev network device 2103 * @param ifr interface request 2104 * @param cmd command 2105 */ 2106 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2107 { 2108 switch (cmd) { 2109 case SIOCSHWTSTAMP: 2110 return hwtstamp_ioctl(netdev, ifr); 2111 default: 2112 return -EOPNOTSUPP; 2113 } 2114 } 2115 2116 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) 2117 { 2118 struct sk_buff *skb = (struct sk_buff *)buf; 2119 struct octnet_buf_free_info *finfo; 2120 struct oct_timestamp_resp *resp; 2121 struct octeon_soft_command *sc; 2122 struct lio *lio; 2123 2124 finfo = (struct octnet_buf_free_info *)skb->cb; 2125 lio = finfo->lio; 2126 sc = finfo->sc; 2127 oct = lio->oct_dev; 2128 resp = (struct oct_timestamp_resp *)sc->virtrptr; 2129 2130 if (status != OCTEON_REQUEST_DONE) { 2131 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 2132 CVM_CAST64(status)); 2133 resp->timestamp = 0; 2134 } 2135 2136 octeon_swap_8B_data(&resp->timestamp, 1); 2137 2138 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { 2139 struct skb_shared_hwtstamps ts; 2140 u64 ns = resp->timestamp; 2141 2142 netif_info(lio, tx_done, lio->netdev, 2143 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 2144 skb, (unsigned long long)ns); 2145 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 2146 skb_tstamp_tx(skb, &ts); 2147 } 2148 2149 octeon_free_soft_command(oct, sc); 2150 tx_buffer_free(skb); 2151 } 2152 2153 /* \brief Send a data packet that will be timestamped 2154 * @param oct octeon device 2155 * @param ndata pointer to network data 2156 * @param finfo pointer to private network data 2157 */ 2158 static int send_nic_timestamp_pkt(struct octeon_device *oct, 2159 struct octnic_data_pkt *ndata, 2160 struct octnet_buf_free_info *finfo) 2161 { 2162 struct octeon_soft_command *sc; 2163 int ring_doorbell; 2164 struct lio *lio; 2165 int retval; 2166 u32 len; 2167 2168 lio = finfo->lio; 2169 2170 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 2171 sizeof(struct oct_timestamp_resp)); 2172 finfo->sc = sc; 2173 2174 if (!sc) { 2175 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 2176 return IQ_SEND_FAILED; 2177 } 2178 2179 if (ndata->reqtype == REQTYPE_NORESP_NET) 2180 ndata->reqtype = REQTYPE_RESP_NET; 2181 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 2182 ndata->reqtype = REQTYPE_RESP_NET_SG; 2183 2184 sc->callback = handle_timestamp; 2185 sc->callback_arg = finfo->skb; 2186 sc->iq_no = ndata->q_no; 2187 2188 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; 2189 2190 ring_doorbell = 1; 2191 2192 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2193 sc, len, ndata->reqtype); 2194 2195 if (retval == IQ_SEND_FAILED) { 2196 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2197 retval); 2198 octeon_free_soft_command(oct, sc); 2199 } else { 2200 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 2201 } 2202 2203 return retval; 2204 } 2205 2206 /** \brief Transmit networks packets to the Octeon interface 2207 * @param skbuff skbuff struct to be passed to network layer. 2208 * @param netdev pointer to network device 2209 * @returns whether the packet was transmitted to the device okay or not 2210 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 2211 */ 2212 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 2213 { 2214 struct octnet_buf_free_info *finfo; 2215 union octnic_cmd_setup cmdsetup; 2216 struct octnic_data_pkt ndata; 2217 struct octeon_instr_irh *irh; 2218 struct oct_iq_stats *stats; 2219 struct octeon_device *oct; 2220 int q_idx = 0, iq_no = 0; 2221 union tx_info *tx_info; 2222 struct lio *lio; 2223 int status = 0; 2224 u64 dptr = 0; 2225 u32 tag = 0; 2226 int j; 2227 2228 lio = GET_LIO(netdev); 2229 oct = lio->oct_dev; 2230 2231 if (netif_is_multiqueue(netdev)) { 2232 q_idx = skb->queue_mapping; 2233 q_idx = (q_idx % (lio->linfo.num_txpciq)); 2234 tag = q_idx; 2235 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 2236 } else { 2237 iq_no = lio->txq; 2238 } 2239 2240 stats = &oct->instr_queue[iq_no]->stats; 2241 2242 /* Check for all conditions in which the current packet cannot be 2243 * transmitted. 2244 */ 2245 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2246 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { 2247 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", 2248 lio->linfo.link.s.link_up); 2249 goto lio_xmit_failed; 2250 } 2251 2252 /* Use space in skb->cb to store info used to unmap and 2253 * free the buffers. 2254 */ 2255 finfo = (struct octnet_buf_free_info *)skb->cb; 2256 finfo->lio = lio; 2257 finfo->skb = skb; 2258 finfo->sc = NULL; 2259 2260 /* Prepare the attributes for the data to be passed to OSI. */ 2261 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 2262 2263 ndata.buf = finfo; 2264 2265 ndata.q_no = iq_no; 2266 2267 if (netif_is_multiqueue(netdev)) { 2268 if (octnet_iq_is_full(oct, ndata.q_no)) { 2269 /* defer sending if queue is full */ 2270 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2271 ndata.q_no); 2272 stats->tx_iq_busy++; 2273 return NETDEV_TX_BUSY; 2274 } 2275 } else { 2276 if (octnet_iq_is_full(oct, lio->txq)) { 2277 /* defer sending if queue is full */ 2278 stats->tx_iq_busy++; 2279 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2280 ndata.q_no); 2281 return NETDEV_TX_BUSY; 2282 } 2283 } 2284 2285 ndata.datasize = skb->len; 2286 2287 cmdsetup.u64 = 0; 2288 cmdsetup.s.iq_no = iq_no; 2289 2290 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2291 if (skb->encapsulation) { 2292 cmdsetup.s.tnl_csum = 1; 2293 stats->tx_vxlan++; 2294 } else { 2295 cmdsetup.s.transport_csum = 1; 2296 } 2297 } 2298 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2299 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2300 cmdsetup.s.timestamp = 1; 2301 } 2302 2303 if (!skb_shinfo(skb)->nr_frags) { 2304 cmdsetup.s.u.datasize = skb->len; 2305 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2306 /* Offload checksum calculation for TCP/UDP packets */ 2307 dptr = dma_map_single(&oct->pci_dev->dev, 2308 skb->data, 2309 skb->len, 2310 DMA_TO_DEVICE); 2311 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 2312 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2313 __func__); 2314 return NETDEV_TX_BUSY; 2315 } 2316 2317 ndata.cmd.cmd3.dptr = dptr; 2318 finfo->dptr = dptr; 2319 ndata.reqtype = REQTYPE_NORESP_NET; 2320 2321 } else { 2322 struct skb_frag_struct *frag; 2323 struct octnic_gather *g; 2324 int i, frags; 2325 2326 spin_lock(&lio->glist_lock[q_idx]); 2327 g = (struct octnic_gather *)list_delete_head( 2328 &lio->glist[q_idx]); 2329 spin_unlock(&lio->glist_lock[q_idx]); 2330 2331 if (!g) { 2332 netif_info(lio, tx_err, lio->netdev, 2333 "Transmit scatter gather: glist null!\n"); 2334 goto lio_xmit_failed; 2335 } 2336 2337 cmdsetup.s.gather = 1; 2338 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2339 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2340 2341 memset(g->sg, 0, g->sg_size); 2342 2343 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 2344 skb->data, 2345 (skb->len - skb->data_len), 2346 DMA_TO_DEVICE); 2347 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 2348 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 2349 __func__); 2350 return NETDEV_TX_BUSY; 2351 } 2352 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 2353 2354 frags = skb_shinfo(skb)->nr_frags; 2355 i = 1; 2356 while (frags--) { 2357 frag = &skb_shinfo(skb)->frags[i - 1]; 2358 2359 g->sg[(i >> 2)].ptr[(i & 3)] = 2360 dma_map_page(&oct->pci_dev->dev, 2361 frag->page.p, 2362 frag->page_offset, 2363 frag->size, 2364 DMA_TO_DEVICE); 2365 if (dma_mapping_error(&oct->pci_dev->dev, 2366 g->sg[i >> 2].ptr[i & 3])) { 2367 dma_unmap_single(&oct->pci_dev->dev, 2368 g->sg[0].ptr[0], 2369 skb->len - skb->data_len, 2370 DMA_TO_DEVICE); 2371 for (j = 1; j < i; j++) { 2372 frag = &skb_shinfo(skb)->frags[j - 1]; 2373 dma_unmap_page(&oct->pci_dev->dev, 2374 g->sg[j >> 2].ptr[j & 3], 2375 frag->size, 2376 DMA_TO_DEVICE); 2377 } 2378 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 2379 __func__); 2380 return NETDEV_TX_BUSY; 2381 } 2382 2383 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); 2384 i++; 2385 } 2386 2387 dptr = dma_map_single(&oct->pci_dev->dev, 2388 g->sg, g->sg_size, 2389 DMA_TO_DEVICE); 2390 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 2391 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n", 2392 __func__); 2393 dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0], 2394 skb->len - skb->data_len, 2395 DMA_TO_DEVICE); 2396 for (j = 1; j <= frags; j++) { 2397 frag = &skb_shinfo(skb)->frags[j - 1]; 2398 dma_unmap_page(&oct->pci_dev->dev, 2399 g->sg[j >> 2].ptr[j & 3], 2400 frag->size, DMA_TO_DEVICE); 2401 } 2402 return NETDEV_TX_BUSY; 2403 } 2404 2405 ndata.cmd.cmd3.dptr = dptr; 2406 finfo->dptr = dptr; 2407 finfo->g = g; 2408 2409 ndata.reqtype = REQTYPE_NORESP_NET_SG; 2410 } 2411 2412 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 2413 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 2414 2415 if (skb_shinfo(skb)->gso_size) { 2416 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 2417 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 2418 } 2419 2420 /* HW insert VLAN tag */ 2421 if (skb_vlan_tag_present(skb)) { 2422 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; 2423 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; 2424 } 2425 2426 if (unlikely(cmdsetup.s.timestamp)) 2427 status = send_nic_timestamp_pkt(oct, &ndata, finfo); 2428 else 2429 status = octnet_send_nic_data_pkt(oct, &ndata); 2430 if (status == IQ_SEND_FAILED) 2431 goto lio_xmit_failed; 2432 2433 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2434 2435 if (status == IQ_SEND_STOP) { 2436 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", 2437 iq_no); 2438 stop_q(lio->netdev, q_idx); 2439 } 2440 2441 netif_trans_update(netdev); 2442 2443 if (skb_shinfo(skb)->gso_size) 2444 stats->tx_done += skb_shinfo(skb)->gso_segs; 2445 else 2446 stats->tx_done++; 2447 stats->tx_tot_bytes += skb->len; 2448 2449 return NETDEV_TX_OK; 2450 2451 lio_xmit_failed: 2452 stats->tx_dropped++; 2453 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2454 iq_no, stats->tx_dropped); 2455 if (dptr) 2456 dma_unmap_single(&oct->pci_dev->dev, dptr, 2457 ndata.datasize, DMA_TO_DEVICE); 2458 tx_buffer_free(skb); 2459 return NETDEV_TX_OK; 2460 } 2461 2462 /** \brief Network device Tx timeout 2463 * @param netdev pointer to network device 2464 */ 2465 static void liquidio_tx_timeout(struct net_device *netdev) 2466 { 2467 struct lio *lio; 2468 2469 lio = GET_LIO(netdev); 2470 2471 netif_info(lio, tx_err, lio->netdev, 2472 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 2473 netdev->stats.tx_dropped); 2474 netif_trans_update(netdev); 2475 txqs_wake(netdev); 2476 } 2477 2478 static int 2479 liquidio_vlan_rx_add_vid(struct net_device *netdev, 2480 __be16 proto __attribute__((unused)), u16 vid) 2481 { 2482 struct lio *lio = GET_LIO(netdev); 2483 struct octeon_device *oct = lio->oct_dev; 2484 struct octnic_ctrl_pkt nctrl; 2485 int ret = 0; 2486 2487 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2488 2489 nctrl.ncmd.u64 = 0; 2490 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2491 nctrl.ncmd.s.param1 = vid; 2492 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2493 nctrl.wait_time = 100; 2494 nctrl.netpndev = (u64)netdev; 2495 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2496 2497 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2498 if (ret < 0) { 2499 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2500 ret); 2501 } 2502 2503 return ret; 2504 } 2505 2506 static int 2507 liquidio_vlan_rx_kill_vid(struct net_device *netdev, 2508 __be16 proto __attribute__((unused)), u16 vid) 2509 { 2510 struct lio *lio = GET_LIO(netdev); 2511 struct octeon_device *oct = lio->oct_dev; 2512 struct octnic_ctrl_pkt nctrl; 2513 int ret = 0; 2514 2515 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2516 2517 nctrl.ncmd.u64 = 0; 2518 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2519 nctrl.ncmd.s.param1 = vid; 2520 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2521 nctrl.wait_time = 100; 2522 nctrl.netpndev = (u64)netdev; 2523 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2524 2525 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2526 if (ret < 0) { 2527 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2528 ret); 2529 } 2530 return ret; 2531 } 2532 2533 /** Sending command to enable/disable RX checksum offload 2534 * @param netdev pointer to network device 2535 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 2536 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 2537 * OCTNET_CMD_RXCSUM_DISABLE 2538 * @returns SUCCESS or FAILURE 2539 */ 2540 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 2541 u8 rx_cmd) 2542 { 2543 struct lio *lio = GET_LIO(netdev); 2544 struct octeon_device *oct = lio->oct_dev; 2545 struct octnic_ctrl_pkt nctrl; 2546 int ret = 0; 2547 2548 nctrl.ncmd.u64 = 0; 2549 nctrl.ncmd.s.cmd = command; 2550 nctrl.ncmd.s.param1 = rx_cmd; 2551 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2552 nctrl.wait_time = 100; 2553 nctrl.netpndev = (u64)netdev; 2554 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2555 2556 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2557 if (ret < 0) { 2558 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", 2559 ret); 2560 } 2561 return ret; 2562 } 2563 2564 /** Sending command to add/delete VxLAN UDP port to firmware 2565 * @param netdev pointer to network device 2566 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 2567 * @param vxlan_port VxLAN port to be added or deleted 2568 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 2569 * OCTNET_CMD_VXLAN_PORT_DEL 2570 * @returns SUCCESS or FAILURE 2571 */ 2572 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 2573 u16 vxlan_port, u8 vxlan_cmd_bit) 2574 { 2575 struct lio *lio = GET_LIO(netdev); 2576 struct octeon_device *oct = lio->oct_dev; 2577 struct octnic_ctrl_pkt nctrl; 2578 int ret = 0; 2579 2580 nctrl.ncmd.u64 = 0; 2581 nctrl.ncmd.s.cmd = command; 2582 nctrl.ncmd.s.more = vxlan_cmd_bit; 2583 nctrl.ncmd.s.param1 = vxlan_port; 2584 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2585 nctrl.wait_time = 100; 2586 nctrl.netpndev = (u64)netdev; 2587 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2588 2589 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2590 if (ret < 0) { 2591 dev_err(&oct->pci_dev->dev, 2592 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", 2593 ret); 2594 } 2595 return ret; 2596 } 2597 2598 /** \brief Net device fix features 2599 * @param netdev pointer to network device 2600 * @param request features requested 2601 * @returns updated features list 2602 */ 2603 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 2604 netdev_features_t request) 2605 { 2606 struct lio *lio = netdev_priv(netdev); 2607 2608 if ((request & NETIF_F_RXCSUM) && 2609 !(lio->dev_capability & NETIF_F_RXCSUM)) 2610 request &= ~NETIF_F_RXCSUM; 2611 2612 if ((request & NETIF_F_HW_CSUM) && 2613 !(lio->dev_capability & NETIF_F_HW_CSUM)) 2614 request &= ~NETIF_F_HW_CSUM; 2615 2616 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 2617 request &= ~NETIF_F_TSO; 2618 2619 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 2620 request &= ~NETIF_F_TSO6; 2621 2622 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 2623 request &= ~NETIF_F_LRO; 2624 2625 /* Disable LRO if RXCSUM is off */ 2626 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 2627 (lio->dev_capability & NETIF_F_LRO)) 2628 request &= ~NETIF_F_LRO; 2629 2630 return request; 2631 } 2632 2633 /** \brief Net device set features 2634 * @param netdev pointer to network device 2635 * @param features features to enable/disable 2636 */ 2637 static int liquidio_set_features(struct net_device *netdev, 2638 netdev_features_t features) 2639 { 2640 struct lio *lio = netdev_priv(netdev); 2641 2642 if (!((netdev->features ^ features) & NETIF_F_LRO)) 2643 return 0; 2644 2645 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) 2646 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2647 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2648 else if (!(features & NETIF_F_LRO) && 2649 (lio->dev_capability & NETIF_F_LRO)) 2650 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 2651 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2652 if (!(netdev->features & NETIF_F_RXCSUM) && 2653 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2654 (features & NETIF_F_RXCSUM)) 2655 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2656 OCTNET_CMD_RXCSUM_ENABLE); 2657 else if ((netdev->features & NETIF_F_RXCSUM) && 2658 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2659 !(features & NETIF_F_RXCSUM)) 2660 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2661 OCTNET_CMD_RXCSUM_DISABLE); 2662 2663 return 0; 2664 } 2665 2666 static void liquidio_add_vxlan_port(struct net_device *netdev, 2667 struct udp_tunnel_info *ti) 2668 { 2669 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2670 return; 2671 2672 liquidio_vxlan_port_command(netdev, 2673 OCTNET_CMD_VXLAN_PORT_CONFIG, 2674 htons(ti->port), 2675 OCTNET_CMD_VXLAN_PORT_ADD); 2676 } 2677 2678 static void liquidio_del_vxlan_port(struct net_device *netdev, 2679 struct udp_tunnel_info *ti) 2680 { 2681 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 2682 return; 2683 2684 liquidio_vxlan_port_command(netdev, 2685 OCTNET_CMD_VXLAN_PORT_CONFIG, 2686 htons(ti->port), 2687 OCTNET_CMD_VXLAN_PORT_DEL); 2688 } 2689 2690 static const struct net_device_ops lionetdevops = { 2691 .ndo_open = liquidio_open, 2692 .ndo_stop = liquidio_stop, 2693 .ndo_start_xmit = liquidio_xmit, 2694 .ndo_get_stats = liquidio_get_stats, 2695 .ndo_set_mac_address = liquidio_set_mac, 2696 .ndo_set_rx_mode = liquidio_set_mcast_list, 2697 .ndo_tx_timeout = liquidio_tx_timeout, 2698 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 2699 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 2700 .ndo_change_mtu = liquidio_change_mtu, 2701 .ndo_do_ioctl = liquidio_ioctl, 2702 .ndo_fix_features = liquidio_fix_features, 2703 .ndo_set_features = liquidio_set_features, 2704 .ndo_udp_tunnel_add = liquidio_add_vxlan_port, 2705 .ndo_udp_tunnel_del = liquidio_del_vxlan_port, 2706 .ndo_select_queue = select_q, 2707 }; 2708 2709 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 2710 { 2711 struct octeon_device *oct = (struct octeon_device *)buf; 2712 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 2713 union oct_link_status *ls; 2714 int gmxport = 0; 2715 int i; 2716 2717 if (recv_pkt->buffer_size[0] != sizeof(*ls)) { 2718 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 2719 recv_pkt->buffer_size[0], 2720 recv_pkt->rh.r_nic_info.gmxport); 2721 goto nic_info_err; 2722 } 2723 2724 gmxport = recv_pkt->rh.r_nic_info.gmxport; 2725 ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]); 2726 2727 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 2728 2729 for (i = 0; i < oct->ifcount; i++) { 2730 if (oct->props[i].gmxport == gmxport) { 2731 update_link_status(oct->props[i].netdev, ls); 2732 break; 2733 } 2734 } 2735 2736 nic_info_err: 2737 for (i = 0; i < recv_pkt->buffer_count; i++) 2738 recv_buffer_free(recv_pkt->buffer_ptr[i]); 2739 octeon_free_recv_info(recv_info); 2740 return 0; 2741 } 2742 2743 /** 2744 * \brief Setup network interfaces 2745 * @param octeon_dev octeon device 2746 * 2747 * Called during init time for each device. It assumes the NIC 2748 * is already up and running. The link information for each 2749 * interface is passed in link_info. 2750 */ 2751 static int setup_nic_devices(struct octeon_device *octeon_dev) 2752 { 2753 int retval, num_iqueues, num_oqueues; 2754 struct liquidio_if_cfg_context *ctx; 2755 u32 resp_size, ctx_size, data_size; 2756 struct liquidio_if_cfg_resp *resp; 2757 struct octeon_soft_command *sc; 2758 union oct_nic_if_cfg if_cfg; 2759 struct octdev_props *props; 2760 struct net_device *netdev; 2761 struct lio_version *vdata; 2762 struct lio *lio = NULL; 2763 u8 mac[ETH_ALEN], i, j; 2764 u32 ifidx_or_pfnum; 2765 2766 ifidx_or_pfnum = octeon_dev->pf_num; 2767 2768 /* This is to handle link status changes */ 2769 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, 2770 lio_nic_info, octeon_dev); 2771 2772 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 2773 * They are handled directly. 2774 */ 2775 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 2776 free_netbuf); 2777 2778 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 2779 free_netsgbuf); 2780 2781 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 2782 free_netsgbuf_with_resp); 2783 2784 for (i = 0; i < octeon_dev->ifcount; i++) { 2785 resp_size = sizeof(struct liquidio_if_cfg_resp); 2786 ctx_size = sizeof(struct liquidio_if_cfg_context); 2787 data_size = sizeof(struct lio_version); 2788 sc = (struct octeon_soft_command *) 2789 octeon_alloc_soft_command(octeon_dev, data_size, 2790 resp_size, ctx_size); 2791 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 2792 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; 2793 vdata = (struct lio_version *)sc->virtdptr; 2794 2795 *((u64 *)vdata) = 0; 2796 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 2797 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 2798 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 2799 2800 WRITE_ONCE(ctx->cond, 0); 2801 ctx->octeon_id = lio_get_device_id(octeon_dev); 2802 init_waitqueue_head(&ctx->wc); 2803 2804 if_cfg.u64 = 0; 2805 2806 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; 2807 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; 2808 if_cfg.s.base_queue = 0; 2809 2810 sc->iq_no = 0; 2811 2812 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 2813 OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 2814 0); 2815 2816 sc->callback = if_cfg_callback; 2817 sc->callback_arg = sc; 2818 sc->wait_time = 5000; 2819 2820 retval = octeon_send_soft_command(octeon_dev, sc); 2821 if (retval == IQ_SEND_FAILED) { 2822 dev_err(&octeon_dev->pci_dev->dev, 2823 "iq/oq config failed status: %x\n", retval); 2824 /* Soft instr is freed by driver in case of failure. */ 2825 goto setup_nic_dev_fail; 2826 } 2827 2828 /* Sleep on a wait queue till the cond flag indicates that the 2829 * response arrived or timed-out. 2830 */ 2831 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { 2832 dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); 2833 goto setup_nic_wait_intr; 2834 } 2835 2836 retval = resp->status; 2837 if (retval) { 2838 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 2839 goto setup_nic_dev_fail; 2840 } 2841 2842 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 2843 (sizeof(struct liquidio_if_cfg_info)) >> 3); 2844 2845 num_iqueues = hweight64(resp->cfg_info.iqmask); 2846 num_oqueues = hweight64(resp->cfg_info.oqmask); 2847 2848 if (!(num_iqueues) || !(num_oqueues)) { 2849 dev_err(&octeon_dev->pci_dev->dev, 2850 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 2851 resp->cfg_info.iqmask, resp->cfg_info.oqmask); 2852 goto setup_nic_dev_fail; 2853 } 2854 dev_dbg(&octeon_dev->pci_dev->dev, 2855 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 2856 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 2857 num_iqueues, num_oqueues); 2858 2859 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 2860 2861 if (!netdev) { 2862 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 2863 goto setup_nic_dev_fail; 2864 } 2865 2866 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 2867 2868 /* Associate the routines that will handle different 2869 * netdev tasks. 2870 */ 2871 netdev->netdev_ops = &lionetdevops; 2872 2873 lio = GET_LIO(netdev); 2874 2875 memset(lio, 0, sizeof(struct lio)); 2876 2877 lio->ifidx = ifidx_or_pfnum; 2878 2879 props = &octeon_dev->props[i]; 2880 props->gmxport = resp->cfg_info.linfo.gmxport; 2881 props->netdev = netdev; 2882 2883 lio->linfo.num_rxpciq = num_oqueues; 2884 lio->linfo.num_txpciq = num_iqueues; 2885 2886 for (j = 0; j < num_oqueues; j++) { 2887 lio->linfo.rxpciq[j].u64 = 2888 resp->cfg_info.linfo.rxpciq[j].u64; 2889 } 2890 for (j = 0; j < num_iqueues; j++) { 2891 lio->linfo.txpciq[j].u64 = 2892 resp->cfg_info.linfo.txpciq[j].u64; 2893 } 2894 2895 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 2896 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 2897 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 2898 lio->linfo.macaddr_is_admin_asgnd = 2899 resp->cfg_info.linfo.macaddr_is_admin_asgnd; 2900 2901 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 2902 2903 lio->dev_capability = NETIF_F_HIGHDMA 2904 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 2905 | NETIF_F_SG | NETIF_F_RXCSUM 2906 | NETIF_F_TSO | NETIF_F_TSO6 2907 | NETIF_F_GRO 2908 | NETIF_F_LRO; 2909 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 2910 2911 /* Copy of transmit encapsulation capabilities: 2912 * TSO, TSO6, Checksums for this device 2913 */ 2914 lio->enc_dev_capability = NETIF_F_IP_CSUM 2915 | NETIF_F_IPV6_CSUM 2916 | NETIF_F_GSO_UDP_TUNNEL 2917 | NETIF_F_HW_CSUM | NETIF_F_SG 2918 | NETIF_F_RXCSUM 2919 | NETIF_F_TSO | NETIF_F_TSO6 2920 | NETIF_F_LRO; 2921 2922 netdev->hw_enc_features = 2923 (lio->enc_dev_capability & ~NETIF_F_LRO); 2924 netdev->vlan_features = lio->dev_capability; 2925 /* Add any unchangeable hw features */ 2926 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 2927 NETIF_F_HW_VLAN_CTAG_RX | 2928 NETIF_F_HW_VLAN_CTAG_TX; 2929 2930 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 2931 2932 netdev->hw_features = lio->dev_capability; 2933 2934 /* MTU range: 68 - 16000 */ 2935 netdev->min_mtu = LIO_MIN_MTU_SIZE; 2936 netdev->max_mtu = LIO_MAX_MTU_SIZE; 2937 2938 /* Point to the properties for octeon device to which this 2939 * interface belongs. 2940 */ 2941 lio->oct_dev = octeon_dev; 2942 lio->octprops = props; 2943 lio->netdev = netdev; 2944 2945 dev_dbg(&octeon_dev->pci_dev->dev, 2946 "if%d gmx: %d hw_addr: 0x%llx\n", i, 2947 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 2948 2949 /* 64-bit swap required on LE machines */ 2950 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 2951 for (j = 0; j < ETH_ALEN; j++) 2952 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 2953 2954 /* Copy MAC Address to OS network device structure */ 2955 ether_addr_copy(netdev->dev_addr, mac); 2956 2957 if (setup_io_queues(octeon_dev, i)) { 2958 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 2959 goto setup_nic_dev_fail; 2960 } 2961 2962 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 2963 2964 /* For VFs, enable Octeon device interrupts here, 2965 * as this is contingent upon IO queue setup 2966 */ 2967 octeon_dev->fn_list.enable_interrupt(octeon_dev, 2968 OCTEON_ALL_INTR); 2969 2970 /* By default all interfaces on a single Octeon uses the same 2971 * tx and rx queues 2972 */ 2973 lio->txq = lio->linfo.txpciq[0].s.q_no; 2974 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 2975 2976 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 2977 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 2978 2979 if (setup_glists(lio, num_iqueues)) { 2980 dev_err(&octeon_dev->pci_dev->dev, 2981 "Gather list allocation failed\n"); 2982 goto setup_nic_dev_fail; 2983 } 2984 2985 /* Register ethtool support */ 2986 liquidio_set_ethtool_ops(netdev); 2987 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) 2988 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 2989 else 2990 octeon_dev->priv_flags = 0x0; 2991 2992 if (netdev->features & NETIF_F_LRO) 2993 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2994 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2995 2996 if ((debug != -1) && (debug & NETIF_MSG_HW)) 2997 liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE, 2998 0); 2999 3000 if (setup_link_status_change_wq(netdev)) 3001 goto setup_nic_dev_fail; 3002 3003 /* Register the network device with the OS */ 3004 if (register_netdev(netdev)) { 3005 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 3006 goto setup_nic_dev_fail; 3007 } 3008 3009 dev_dbg(&octeon_dev->pci_dev->dev, 3010 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3011 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3012 netif_carrier_off(netdev); 3013 lio->link_changes++; 3014 3015 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3016 3017 /* Sending command to firmware to enable Rx checksum offload 3018 * by default at the time of setup of Liquidio driver for 3019 * this device 3020 */ 3021 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 3022 OCTNET_CMD_RXCSUM_ENABLE); 3023 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 3024 OCTNET_CMD_TXCSUM_ENABLE); 3025 3026 dev_dbg(&octeon_dev->pci_dev->dev, 3027 "NIC ifidx:%d Setup successful\n", i); 3028 3029 octeon_free_soft_command(octeon_dev, sc); 3030 } 3031 3032 return 0; 3033 3034 setup_nic_dev_fail: 3035 3036 octeon_free_soft_command(octeon_dev, sc); 3037 3038 setup_nic_wait_intr: 3039 3040 while (i--) { 3041 dev_err(&octeon_dev->pci_dev->dev, 3042 "NIC ifidx:%d Setup failed\n", i); 3043 liquidio_destroy_nic_device(octeon_dev, i); 3044 } 3045 return -ENODEV; 3046 } 3047 3048 /** 3049 * \brief initialize the NIC 3050 * @param oct octeon device 3051 * 3052 * This initialization routine is called once the Octeon device application is 3053 * up and running 3054 */ 3055 static int liquidio_init_nic_module(struct octeon_device *oct) 3056 { 3057 struct oct_intrmod_cfg *intrmod_cfg; 3058 int num_nic_ports = 1; 3059 int i, retval = 0; 3060 3061 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3062 3063 /* only default iq and oq were initialized 3064 * initialize the rest as well run port_config command for each port 3065 */ 3066 oct->ifcount = num_nic_ports; 3067 memset(oct->props, 0, 3068 sizeof(struct octdev_props) * num_nic_ports); 3069 3070 for (i = 0; i < MAX_OCTEON_LINKS; i++) 3071 oct->props[i].gmxport = -1; 3072 3073 retval = setup_nic_devices(oct); 3074 if (retval) { 3075 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3076 goto octnet_init_failure; 3077 } 3078 3079 /* Initialize interrupt moderation params */ 3080 intrmod_cfg = &((struct octeon_device *)oct)->intrmod; 3081 intrmod_cfg->rx_enable = 1; 3082 intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; 3083 intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; 3084 intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; 3085 intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER; 3086 intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER; 3087 intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER; 3088 intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER; 3089 intrmod_cfg->tx_enable = 1; 3090 intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER; 3091 intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER; 3092 intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); 3093 intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); 3094 intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); 3095 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3096 3097 return retval; 3098 3099 octnet_init_failure: 3100 3101 oct->ifcount = 0; 3102 3103 return retval; 3104 } 3105 3106 /** 3107 * \brief Device initialization for each Octeon device that is probed 3108 * @param octeon_dev octeon device 3109 */ 3110 static int octeon_device_init(struct octeon_device *oct) 3111 { 3112 u32 rev_id; 3113 int j; 3114 3115 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE); 3116 3117 /* Enable access to the octeon device and make its DMA capability 3118 * known to the OS. 3119 */ 3120 if (octeon_pci_os_setup(oct)) 3121 return 1; 3122 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE); 3123 3124 oct->chip_id = OCTEON_CN23XX_VF_VID; 3125 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 3126 oct->rev_id = rev_id & 0xff; 3127 3128 if (cn23xx_setup_octeon_vf_device(oct)) 3129 return 1; 3130 3131 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE); 3132 3133 oct->app_mode = CVM_DRV_NIC_APP; 3134 3135 /* Initialize the dispatch mechanism used to push packets arriving on 3136 * Octeon Output queues. 3137 */ 3138 if (octeon_init_dispatch_list(oct)) 3139 return 1; 3140 3141 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE); 3142 3143 if (octeon_set_io_queues_off(oct)) { 3144 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); 3145 return 1; 3146 } 3147 3148 if (oct->fn_list.setup_device_regs(oct)) { 3149 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n"); 3150 return 1; 3151 } 3152 3153 /* Initialize soft command buffer pool */ 3154 if (octeon_setup_sc_buffer_pool(oct)) { 3155 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); 3156 return 1; 3157 } 3158 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 3159 3160 /* Setup the data structures that manage this Octeon's Input queues. */ 3161 if (octeon_setup_instr_queues(oct)) { 3162 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); 3163 return 1; 3164 } 3165 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 3166 3167 /* Initialize lists to manage the requests of different types that 3168 * arrive from user & kernel applications for this octeon device. 3169 */ 3170 if (octeon_setup_response_list(oct)) { 3171 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); 3172 return 1; 3173 } 3174 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE); 3175 3176 if (octeon_setup_output_queues(oct)) { 3177 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n"); 3178 return 1; 3179 } 3180 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE); 3181 3182 if (oct->fn_list.setup_mbox(oct)) { 3183 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); 3184 return 1; 3185 } 3186 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); 3187 3188 if (octeon_allocate_ioq_vector(oct)) { 3189 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); 3190 return 1; 3191 } 3192 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 3193 3194 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n", 3195 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf); 3196 3197 /* Setup the interrupt handler and record the INT SUM register address*/ 3198 if (octeon_setup_interrupt(oct)) 3199 return 1; 3200 3201 if (cn23xx_octeon_pfvf_handshake(oct)) 3202 return 1; 3203 3204 /* Enable Octeon device interrupts */ 3205 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 3206 3207 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); 3208 3209 /* Enable the input and output queues for this Octeon device */ 3210 if (oct->fn_list.enable_io_queues(oct)) { 3211 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); 3212 return 1; 3213 } 3214 3215 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE); 3216 3217 atomic_set(&oct->status, OCT_DEV_HOST_OK); 3218 3219 /* Send Credit for Octeon Output queues. Credits are always sent after 3220 * the output queue is enabled. 3221 */ 3222 for (j = 0; j < oct->num_oqs; j++) 3223 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); 3224 3225 /* Packets can start arriving on the output queues from this point. */ 3226 3227 atomic_set(&oct->status, OCT_DEV_CORE_OK); 3228 3229 atomic_set(&oct->status, OCT_DEV_RUNNING); 3230 3231 if (liquidio_init_nic_module(oct)) 3232 return 1; 3233 3234 return 0; 3235 } 3236 3237 static int __init liquidio_vf_init(void) 3238 { 3239 octeon_init_device_list(0); 3240 return pci_register_driver(&liquidio_vf_pci_driver); 3241 } 3242 3243 static void __exit liquidio_vf_exit(void) 3244 { 3245 pci_unregister_driver(&liquidio_vf_pci_driver); 3246 3247 pr_info("LiquidIO_VF network module is now unloaded\n"); 3248 } 3249 3250 module_init(liquidio_vf_init); 3251 module_exit(liquidio_vf_exit); 3252