1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/pci.h> 19 #include <linux/if_vlan.h> 20 #include "liquidio_common.h" 21 #include "octeon_droq.h" 22 #include "octeon_iq.h" 23 #include "response_manager.h" 24 #include "octeon_device.h" 25 #include "octeon_nic.h" 26 #include "octeon_main.h" 27 #include "octeon_network.h" 28 29 /* OOM task polling interval */ 30 #define LIO_OOM_POLL_INTERVAL_MS 250 31 32 #define OCTNIC_MAX_SG MAX_SKB_FRAGS 33 34 /** 35 * lio_delete_glists - Delete gather lists 36 * @lio: per-network private data 37 */ 38 void lio_delete_glists(struct lio *lio) 39 { 40 struct octnic_gather *g; 41 int i; 42 43 kfree(lio->glist_lock); 44 lio->glist_lock = NULL; 45 46 if (!lio->glist) 47 return; 48 49 for (i = 0; i < lio->oct_dev->num_iqs; i++) { 50 do { 51 g = (struct octnic_gather *) 52 lio_list_delete_head(&lio->glist[i]); 53 kfree(g); 54 } while (g); 55 56 if (lio->glists_virt_base && lio->glists_virt_base[i] && 57 lio->glists_dma_base && lio->glists_dma_base[i]) { 58 lio_dma_free(lio->oct_dev, 59 lio->glist_entry_size * lio->tx_qsize, 60 lio->glists_virt_base[i], 61 lio->glists_dma_base[i]); 62 } 63 } 64 65 kfree(lio->glists_virt_base); 66 lio->glists_virt_base = NULL; 67 68 kfree(lio->glists_dma_base); 69 lio->glists_dma_base = NULL; 70 71 kfree(lio->glist); 72 lio->glist = NULL; 73 } 74 75 /** 76 * lio_setup_glists - Setup gather lists 77 * @oct: octeon_device 78 * @lio: per-network private data 79 * @num_iqs: count of iqs to allocate 80 */ 81 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) 82 { 83 struct octnic_gather *g; 84 int i, j; 85 86 lio->glist_lock = 87 kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL); 88 if (!lio->glist_lock) 89 return -ENOMEM; 90 91 lio->glist = 92 kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL); 93 if (!lio->glist) { 94 kfree(lio->glist_lock); 95 lio->glist_lock = NULL; 96 return -ENOMEM; 97 } 98 99 lio->glist_entry_size = 100 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); 101 102 /* allocate memory to store virtual and dma base address of 103 * per glist consistent memory 104 */ 105 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), 106 GFP_KERNEL); 107 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), 108 GFP_KERNEL); 109 110 if (!lio->glists_virt_base || !lio->glists_dma_base) { 111 lio_delete_glists(lio); 112 return -ENOMEM; 113 } 114 115 for (i = 0; i < num_iqs; i++) { 116 int numa_node = dev_to_node(&oct->pci_dev->dev); 117 118 spin_lock_init(&lio->glist_lock[i]); 119 120 INIT_LIST_HEAD(&lio->glist[i]); 121 122 lio->glists_virt_base[i] = 123 lio_dma_alloc(oct, 124 lio->glist_entry_size * lio->tx_qsize, 125 &lio->glists_dma_base[i]); 126 127 if (!lio->glists_virt_base[i]) { 128 lio_delete_glists(lio); 129 return -ENOMEM; 130 } 131 132 for (j = 0; j < lio->tx_qsize; j++) { 133 g = kzalloc_node(sizeof(*g), GFP_KERNEL, 134 numa_node); 135 if (!g) 136 g = kzalloc(sizeof(*g), GFP_KERNEL); 137 if (!g) 138 break; 139 140 g->sg = lio->glists_virt_base[i] + 141 (j * lio->glist_entry_size); 142 143 g->sg_dma_ptr = lio->glists_dma_base[i] + 144 (j * lio->glist_entry_size); 145 146 list_add_tail(&g->list, &lio->glist[i]); 147 } 148 149 if (j != lio->tx_qsize) { 150 lio_delete_glists(lio); 151 return -ENOMEM; 152 } 153 } 154 155 return 0; 156 } 157 158 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) 159 { 160 struct lio *lio = GET_LIO(netdev); 161 struct octeon_device *oct = lio->oct_dev; 162 struct octnic_ctrl_pkt nctrl; 163 int ret = 0; 164 165 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 166 167 nctrl.ncmd.u64 = 0; 168 nctrl.ncmd.s.cmd = cmd; 169 nctrl.ncmd.s.param1 = param1; 170 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 171 nctrl.netpndev = (u64)netdev; 172 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 173 174 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 175 if (ret) { 176 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", 177 ret); 178 if (ret > 0) 179 ret = -EIO; 180 } 181 return ret; 182 } 183 184 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, 185 unsigned int bytes_compl) 186 { 187 struct netdev_queue *netdev_queue = txq; 188 189 netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl); 190 } 191 192 void octeon_update_tx_completion_counters(void *buf, int reqtype, 193 unsigned int *pkts_compl, 194 unsigned int *bytes_compl) 195 { 196 struct octnet_buf_free_info *finfo; 197 struct sk_buff *skb = NULL; 198 struct octeon_soft_command *sc; 199 200 switch (reqtype) { 201 case REQTYPE_NORESP_NET: 202 case REQTYPE_NORESP_NET_SG: 203 finfo = buf; 204 skb = finfo->skb; 205 break; 206 207 case REQTYPE_RESP_NET_SG: 208 case REQTYPE_RESP_NET: 209 sc = buf; 210 skb = sc->callback_arg; 211 break; 212 213 default: 214 return; 215 } 216 217 (*pkts_compl)++; 218 *bytes_compl += skb->len; 219 } 220 221 int octeon_report_sent_bytes_to_bql(void *buf, int reqtype) 222 { 223 struct octnet_buf_free_info *finfo; 224 struct sk_buff *skb; 225 struct octeon_soft_command *sc; 226 struct netdev_queue *txq; 227 228 switch (reqtype) { 229 case REQTYPE_NORESP_NET: 230 case REQTYPE_NORESP_NET_SG: 231 finfo = buf; 232 skb = finfo->skb; 233 break; 234 235 case REQTYPE_RESP_NET_SG: 236 case REQTYPE_RESP_NET: 237 sc = buf; 238 skb = sc->callback_arg; 239 break; 240 241 default: 242 return 0; 243 } 244 245 txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb)); 246 netdev_tx_sent_queue(txq, skb->len); 247 248 return netif_xmit_stopped(txq); 249 } 250 251 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) 252 { 253 struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr; 254 struct net_device *netdev = (struct net_device *)nctrl->netpndev; 255 struct lio *lio = GET_LIO(netdev); 256 struct octeon_device *oct = lio->oct_dev; 257 u8 *mac; 258 259 if (nctrl->sc_status) 260 return; 261 262 switch (nctrl->ncmd.s.cmd) { 263 case OCTNET_CMD_CHANGE_DEVFLAGS: 264 case OCTNET_CMD_SET_MULTI_LIST: 265 case OCTNET_CMD_SET_UC_LIST: 266 break; 267 268 case OCTNET_CMD_CHANGE_MACADDR: 269 mac = ((u8 *)&nctrl->udd[0]) + 2; 270 if (nctrl->ncmd.s.param1) { 271 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 272 int vfidx = nctrl->ncmd.s.param1 - 1; 273 bool mac_is_admin_assigned = nctrl->ncmd.s.param2; 274 275 if (mac_is_admin_assigned) 276 netif_info(lio, probe, lio->netdev, 277 "MAC Address %pM is configured for VF %d\n", 278 mac, vfidx); 279 } else { 280 netif_info(lio, probe, lio->netdev, 281 " MACAddr changed to %pM\n", 282 mac); 283 } 284 break; 285 286 case OCTNET_CMD_GPIO_ACCESS: 287 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n"); 288 289 break; 290 291 case OCTNET_CMD_ID_ACTIVE: 292 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n"); 293 294 break; 295 296 case OCTNET_CMD_LRO_ENABLE: 297 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); 298 break; 299 300 case OCTNET_CMD_LRO_DISABLE: 301 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n", 302 netdev->name); 303 break; 304 305 case OCTNET_CMD_VERBOSE_ENABLE: 306 dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n", 307 netdev->name); 308 break; 309 310 case OCTNET_CMD_VERBOSE_DISABLE: 311 dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n", 312 netdev->name); 313 break; 314 315 case OCTNET_CMD_VLAN_FILTER_CTL: 316 if (nctrl->ncmd.s.param1) 317 dev_info(&oct->pci_dev->dev, 318 "%s VLAN filter enabled\n", netdev->name); 319 else 320 dev_info(&oct->pci_dev->dev, 321 "%s VLAN filter disabled\n", netdev->name); 322 break; 323 324 case OCTNET_CMD_ADD_VLAN_FILTER: 325 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n", 326 netdev->name, nctrl->ncmd.s.param1); 327 break; 328 329 case OCTNET_CMD_DEL_VLAN_FILTER: 330 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n", 331 netdev->name, nctrl->ncmd.s.param1); 332 break; 333 334 case OCTNET_CMD_SET_SETTINGS: 335 dev_info(&oct->pci_dev->dev, "%s settings changed\n", 336 netdev->name); 337 338 break; 339 340 /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL" 341 * Command passed by NIC driver 342 */ 343 case OCTNET_CMD_TNL_RX_CSUM_CTL: 344 if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) { 345 netif_info(lio, probe, lio->netdev, 346 "RX Checksum Offload Enabled\n"); 347 } else if (nctrl->ncmd.s.param1 == 348 OCTNET_CMD_RXCSUM_DISABLE) { 349 netif_info(lio, probe, lio->netdev, 350 "RX Checksum Offload Disabled\n"); 351 } 352 break; 353 354 /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL" 355 * Command passed by NIC driver 356 */ 357 case OCTNET_CMD_TNL_TX_CSUM_CTL: 358 if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) { 359 netif_info(lio, probe, lio->netdev, 360 "TX Checksum Offload Enabled\n"); 361 } else if (nctrl->ncmd.s.param1 == 362 OCTNET_CMD_TXCSUM_DISABLE) { 363 netif_info(lio, probe, lio->netdev, 364 "TX Checksum Offload Disabled\n"); 365 } 366 break; 367 368 /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG" 369 * Command passed by NIC driver 370 */ 371 case OCTNET_CMD_VXLAN_PORT_CONFIG: 372 if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) { 373 netif_info(lio, probe, lio->netdev, 374 "VxLAN Destination UDP PORT:%d ADDED\n", 375 nctrl->ncmd.s.param1); 376 } else if (nctrl->ncmd.s.more == 377 OCTNET_CMD_VXLAN_PORT_DEL) { 378 netif_info(lio, probe, lio->netdev, 379 "VxLAN Destination UDP PORT:%d DELETED\n", 380 nctrl->ncmd.s.param1); 381 } 382 break; 383 384 case OCTNET_CMD_SET_FLOW_CTL: 385 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n"); 386 break; 387 388 case OCTNET_CMD_QUEUE_COUNT_CTL: 389 netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n", 390 nctrl->ncmd.s.param1); 391 break; 392 393 default: 394 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, 395 nctrl->ncmd.s.cmd); 396 } 397 } 398 399 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac) 400 { 401 bool macaddr_changed = false; 402 struct net_device *netdev; 403 struct lio *lio; 404 405 rtnl_lock(); 406 407 netdev = oct->props[0].netdev; 408 lio = GET_LIO(netdev); 409 410 lio->linfo.macaddr_is_admin_asgnd = true; 411 412 if (!ether_addr_equal(netdev->dev_addr, mac)) { 413 macaddr_changed = true; 414 ether_addr_copy(netdev->dev_addr, mac); 415 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac); 416 call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev); 417 } 418 419 rtnl_unlock(); 420 421 if (macaddr_changed) 422 dev_info(&oct->pci_dev->dev, 423 "PF changed VF's MAC address to %pM\n", mac); 424 425 /* no need to notify the firmware of the macaddr change because 426 * the PF did that already 427 */ 428 } 429 430 void octeon_schedule_rxq_oom_work(struct octeon_device *oct, 431 struct octeon_droq *droq) 432 { 433 struct net_device *netdev = oct->props[0].netdev; 434 struct lio *lio = GET_LIO(netdev); 435 struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; 436 437 queue_delayed_work(wq->wq, &wq->wk.work, 438 msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); 439 } 440 441 static void octnet_poll_check_rxq_oom_status(struct work_struct *work) 442 { 443 struct cavium_wk *wk = (struct cavium_wk *)work; 444 struct lio *lio = (struct lio *)wk->ctxptr; 445 struct octeon_device *oct = lio->oct_dev; 446 int q_no = wk->ctxul; 447 struct octeon_droq *droq = oct->droq[q_no]; 448 449 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq) 450 return; 451 452 if (octeon_retry_droq_refill(droq)) 453 octeon_schedule_rxq_oom_work(oct, droq); 454 } 455 456 int setup_rx_oom_poll_fn(struct net_device *netdev) 457 { 458 struct lio *lio = GET_LIO(netdev); 459 struct octeon_device *oct = lio->oct_dev; 460 struct cavium_wq *wq; 461 int q, q_no; 462 463 for (q = 0; q < oct->num_oqs; q++) { 464 q_no = lio->linfo.rxpciq[q].s.q_no; 465 wq = &lio->rxq_status_wq[q_no]; 466 wq->wq = alloc_workqueue("rxq-oom-status", 467 WQ_MEM_RECLAIM, 0); 468 if (!wq->wq) { 469 dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); 470 return -ENOMEM; 471 } 472 473 INIT_DELAYED_WORK(&wq->wk.work, 474 octnet_poll_check_rxq_oom_status); 475 wq->wk.ctxptr = lio; 476 wq->wk.ctxul = q_no; 477 } 478 479 return 0; 480 } 481 482 void cleanup_rx_oom_poll_fn(struct net_device *netdev) 483 { 484 struct lio *lio = GET_LIO(netdev); 485 struct octeon_device *oct = lio->oct_dev; 486 struct cavium_wq *wq; 487 int q_no; 488 489 for (q_no = 0; q_no < oct->num_oqs; q_no++) { 490 wq = &lio->rxq_status_wq[q_no]; 491 if (wq->wq) { 492 cancel_delayed_work_sync(&wq->wk.work); 493 flush_workqueue(wq->wq); 494 destroy_workqueue(wq->wq); 495 wq->wq = NULL; 496 } 497 } 498 } 499 500 /* Runs in interrupt context. */ 501 static void lio_update_txq_status(struct octeon_device *oct, int iq_num) 502 { 503 struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; 504 struct net_device *netdev; 505 struct lio *lio; 506 507 netdev = oct->props[iq->ifidx].netdev; 508 509 /* This is needed because the first IQ does not have 510 * a netdev associated with it. 511 */ 512 if (!netdev) 513 return; 514 515 lio = GET_LIO(netdev); 516 if (__netif_subqueue_stopped(netdev, iq->q_index) && 517 lio->linfo.link.s.link_up && 518 (!octnet_iq_is_full(oct, iq_num))) { 519 netif_wake_subqueue(netdev, iq->q_index); 520 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, 521 tx_restart, 1); 522 } 523 } 524 525 /** 526 * octeon_setup_droq - Setup output queue 527 * @oct: octeon device 528 * @q_no: which queue 529 * @num_descs: how many descriptors 530 * @desc_size: size of each descriptor 531 * @app_ctx: application context 532 */ 533 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, 534 int desc_size, void *app_ctx) 535 { 536 int ret_val; 537 538 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); 539 /* droq creation and local register settings. */ 540 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); 541 if (ret_val < 0) 542 return ret_val; 543 544 if (ret_val == 1) { 545 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); 546 return 0; 547 } 548 549 /* Enable the droq queues */ 550 octeon_set_droq_pkt_op(oct, q_no, 1); 551 552 /* Send Credit for Octeon Output queues. Credits are always 553 * sent after the output queue is enabled. 554 */ 555 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); 556 557 return ret_val; 558 } 559 560 /** 561 * liquidio_push_packet - Routine to push packets arriving on Octeon interface upto network layer. 562 * @octeon_id:octeon device id. 563 * @skbuff: skbuff struct to be passed to network layer. 564 * @len: size of total data received. 565 * @rh: Control header associated with the packet 566 * @param: additional control data with the packet 567 * @arg: farg registered in droq_ops 568 */ 569 static void 570 liquidio_push_packet(u32 __maybe_unused octeon_id, 571 void *skbuff, 572 u32 len, 573 union octeon_rh *rh, 574 void *param, 575 void *arg) 576 { 577 struct net_device *netdev = (struct net_device *)arg; 578 struct octeon_droq *droq = 579 container_of(param, struct octeon_droq, napi); 580 struct sk_buff *skb = (struct sk_buff *)skbuff; 581 struct skb_shared_hwtstamps *shhwtstamps; 582 struct napi_struct *napi = param; 583 u16 vtag = 0; 584 u32 r_dh_off; 585 u64 ns; 586 587 if (netdev) { 588 struct lio *lio = GET_LIO(netdev); 589 struct octeon_device *oct = lio->oct_dev; 590 591 /* Do not proceed if the interface is not in RUNNING state. */ 592 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { 593 recv_buffer_free(skb); 594 droq->stats.rx_dropped++; 595 return; 596 } 597 598 skb->dev = netdev; 599 600 skb_record_rx_queue(skb, droq->q_no); 601 if (likely(len > MIN_SKB_SIZE)) { 602 struct octeon_skb_page_info *pg_info; 603 unsigned char *va; 604 605 pg_info = ((struct octeon_skb_page_info *)(skb->cb)); 606 if (pg_info->page) { 607 /* For Paged allocation use the frags */ 608 va = page_address(pg_info->page) + 609 pg_info->page_offset; 610 memcpy(skb->data, va, MIN_SKB_SIZE); 611 skb_put(skb, MIN_SKB_SIZE); 612 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 613 pg_info->page, 614 pg_info->page_offset + 615 MIN_SKB_SIZE, 616 len - MIN_SKB_SIZE, 617 LIO_RXBUFFER_SZ); 618 } 619 } else { 620 struct octeon_skb_page_info *pg_info = 621 ((struct octeon_skb_page_info *)(skb->cb)); 622 skb_copy_to_linear_data(skb, page_address(pg_info->page) 623 + pg_info->page_offset, len); 624 skb_put(skb, len); 625 put_page(pg_info->page); 626 } 627 628 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; 629 630 if (oct->ptp_enable) { 631 if (rh->r_dh.has_hwtstamp) { 632 /* timestamp is included from the hardware at 633 * the beginning of the packet. 634 */ 635 if (ifstate_check 636 (lio, 637 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { 638 /* Nanoseconds are in the first 64-bits 639 * of the packet. 640 */ 641 memcpy(&ns, (skb->data + r_dh_off), 642 sizeof(ns)); 643 r_dh_off -= BYTES_PER_DHLEN_UNIT; 644 shhwtstamps = skb_hwtstamps(skb); 645 shhwtstamps->hwtstamp = 646 ns_to_ktime(ns + 647 lio->ptp_adjust); 648 } 649 } 650 } 651 652 if (rh->r_dh.has_hash) { 653 __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); 654 u32 hash = be32_to_cpu(*hash_be); 655 656 skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); 657 r_dh_off -= BYTES_PER_DHLEN_UNIT; 658 } 659 660 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); 661 skb->protocol = eth_type_trans(skb, skb->dev); 662 663 if ((netdev->features & NETIF_F_RXCSUM) && 664 (((rh->r_dh.encap_on) && 665 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || 666 (!(rh->r_dh.encap_on) && 667 ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) == 668 CNNIC_CSUM_VERIFIED)))) 669 /* checksum has already been verified */ 670 skb->ip_summed = CHECKSUM_UNNECESSARY; 671 else 672 skb->ip_summed = CHECKSUM_NONE; 673 674 /* Setting Encapsulation field on basis of status received 675 * from the firmware 676 */ 677 if (rh->r_dh.encap_on) { 678 skb->encapsulation = 1; 679 skb->csum_level = 1; 680 droq->stats.rx_vxlan++; 681 } 682 683 /* inbound VLAN tag */ 684 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 685 rh->r_dh.vlan) { 686 u16 priority = rh->r_dh.priority; 687 u16 vid = rh->r_dh.vlan; 688 689 vtag = (priority << VLAN_PRIO_SHIFT) | vid; 690 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); 691 } 692 693 napi_gro_receive(napi, skb); 694 695 droq->stats.rx_bytes_received += len - 696 rh->r_dh.len * BYTES_PER_DHLEN_UNIT; 697 droq->stats.rx_pkts_received++; 698 } else { 699 recv_buffer_free(skb); 700 } 701 } 702 703 /** 704 * napi_schedule_wrapper - wrapper for calling napi_schedule 705 * @param: parameters to pass to napi_schedule 706 * 707 * Used when scheduling on different CPUs 708 */ 709 static void napi_schedule_wrapper(void *param) 710 { 711 struct napi_struct *napi = param; 712 713 napi_schedule(napi); 714 } 715 716 /** 717 * liquidio_napi_drv_callback - callback when receive interrupt occurs and we are in NAPI mode 718 * @arg: pointer to octeon output queue 719 */ 720 static void liquidio_napi_drv_callback(void *arg) 721 { 722 struct octeon_device *oct; 723 struct octeon_droq *droq = arg; 724 int this_cpu = smp_processor_id(); 725 726 oct = droq->oct_dev; 727 728 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) || 729 droq->cpu_id == this_cpu) { 730 napi_schedule_irqoff(&droq->napi); 731 } else { 732 INIT_CSD(&droq->csd, napi_schedule_wrapper, &droq->napi); 733 smp_call_function_single_async(droq->cpu_id, &droq->csd); 734 } 735 } 736 737 /** 738 * liquidio_napi_poll - Entry point for NAPI polling 739 * @napi: NAPI structure 740 * @budget: maximum number of items to process 741 */ 742 static int liquidio_napi_poll(struct napi_struct *napi, int budget) 743 { 744 struct octeon_instr_queue *iq; 745 struct octeon_device *oct; 746 struct octeon_droq *droq; 747 int tx_done = 0, iq_no; 748 int work_done; 749 750 droq = container_of(napi, struct octeon_droq, napi); 751 oct = droq->oct_dev; 752 iq_no = droq->q_no; 753 754 /* Handle Droq descriptors */ 755 work_done = octeon_droq_process_poll_pkts(oct, droq, budget); 756 757 /* Flush the instruction queue */ 758 iq = oct->instr_queue[iq_no]; 759 if (iq) { 760 /* TODO: move this check to inside octeon_flush_iq, 761 * once check_db_timeout is removed 762 */ 763 if (atomic_read(&iq->instr_pending)) 764 /* Process iq buffers with in the budget limits */ 765 tx_done = octeon_flush_iq(oct, iq, budget); 766 else 767 tx_done = 1; 768 /* Update iq read-index rather than waiting for next interrupt. 769 * Return back if tx_done is false. 770 */ 771 /* sub-queue status update */ 772 lio_update_txq_status(oct, iq_no); 773 } else { 774 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", 775 __func__, iq_no); 776 } 777 778 #define MAX_REG_CNT 2000000U 779 /* force enable interrupt if reg cnts are high to avoid wraparound */ 780 if ((work_done < budget && tx_done) || 781 (iq && iq->pkt_in_done >= MAX_REG_CNT) || 782 (droq->pkt_count >= MAX_REG_CNT)) { 783 napi_complete_done(napi, work_done); 784 785 octeon_enable_irq(droq->oct_dev, droq->q_no); 786 return 0; 787 } 788 789 return (!tx_done) ? (budget) : (work_done); 790 } 791 792 /** 793 * liquidio_setup_io_queues - Setup input and output queues 794 * @octeon_dev: octeon device 795 * @ifidx: Interface index 796 * @num_iqs: input io queue count 797 * @num_oqs: output io queue count 798 * 799 * Note: Queues are with respect to the octeon device. Thus 800 * an input queue is for egress packets, and output queues 801 * are for ingress packets. 802 */ 803 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx, 804 u32 num_iqs, u32 num_oqs) 805 { 806 struct octeon_droq_ops droq_ops; 807 struct net_device *netdev; 808 struct octeon_droq *droq; 809 struct napi_struct *napi; 810 int cpu_id_modulus; 811 int num_tx_descs; 812 struct lio *lio; 813 int retval = 0; 814 int q, q_no; 815 int cpu_id; 816 817 netdev = octeon_dev->props[ifidx].netdev; 818 819 lio = GET_LIO(netdev); 820 821 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); 822 823 droq_ops.fptr = liquidio_push_packet; 824 droq_ops.farg = netdev; 825 826 droq_ops.poll_mode = 1; 827 droq_ops.napi_fn = liquidio_napi_drv_callback; 828 cpu_id = 0; 829 cpu_id_modulus = num_present_cpus(); 830 831 /* set up DROQs. */ 832 for (q = 0; q < num_oqs; q++) { 833 q_no = lio->linfo.rxpciq[q].s.q_no; 834 dev_dbg(&octeon_dev->pci_dev->dev, 835 "%s index:%d linfo.rxpciq.s.q_no:%d\n", 836 __func__, q, q_no); 837 retval = octeon_setup_droq( 838 octeon_dev, q_no, 839 CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev), 840 lio->ifidx), 841 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev), 842 lio->ifidx), 843 NULL); 844 if (retval) { 845 dev_err(&octeon_dev->pci_dev->dev, 846 "%s : Runtime DROQ(RxQ) creation failed.\n", 847 __func__); 848 return 1; 849 } 850 851 droq = octeon_dev->droq[q_no]; 852 napi = &droq->napi; 853 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n", 854 (u64)netdev, (u64)octeon_dev); 855 netif_napi_add(netdev, napi, liquidio_napi_poll, 64); 856 857 /* designate a CPU for this droq */ 858 droq->cpu_id = cpu_id; 859 cpu_id++; 860 if (cpu_id >= cpu_id_modulus) 861 cpu_id = 0; 862 863 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); 864 } 865 866 if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) { 867 /* 23XX PF/VF can send/recv control messages (via the first 868 * PF/VF-owned droq) from the firmware even if the ethX 869 * interface is down, so that's why poll_mode must be off 870 * for the first droq. 871 */ 872 octeon_dev->droq[0]->ops.poll_mode = 0; 873 } 874 875 /* set up IQs. */ 876 for (q = 0; q < num_iqs; q++) { 877 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( 878 octeon_get_conf(octeon_dev), lio->ifidx); 879 retval = octeon_setup_iq(octeon_dev, ifidx, q, 880 lio->linfo.txpciq[q], num_tx_descs, 881 netdev_get_tx_queue(netdev, q)); 882 if (retval) { 883 dev_err(&octeon_dev->pci_dev->dev, 884 " %s : Runtime IQ(TxQ) creation failed.\n", 885 __func__); 886 return 1; 887 } 888 889 /* XPS */ 890 if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on && 891 octeon_dev->ioq_vector) { 892 struct octeon_ioq_vector *ioq_vector; 893 894 ioq_vector = &octeon_dev->ioq_vector[q]; 895 netif_set_xps_queue(netdev, 896 &ioq_vector->affinity_mask, 897 ioq_vector->iq_index); 898 } 899 } 900 901 return 0; 902 } 903 904 static 905 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) 906 { 907 struct octeon_device *oct = droq->oct_dev; 908 struct octeon_device_priv *oct_priv = 909 (struct octeon_device_priv *)oct->priv; 910 911 if (droq->ops.poll_mode) { 912 droq->ops.napi_fn(droq); 913 } else { 914 if (ret & MSIX_PO_INT) { 915 if (OCTEON_CN23XX_VF(oct)) 916 dev_err(&oct->pci_dev->dev, 917 "should not come here should not get rx when poll mode = 0 for vf\n"); 918 tasklet_schedule(&oct_priv->droq_tasklet); 919 return 1; 920 } 921 /* this will be flushed periodically by check iq db */ 922 if (ret & MSIX_PI_INT) 923 return 0; 924 } 925 926 return 0; 927 } 928 929 irqreturn_t 930 liquidio_msix_intr_handler(int __maybe_unused irq, void *dev) 931 { 932 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; 933 struct octeon_device *oct = ioq_vector->oct_dev; 934 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; 935 u64 ret; 936 937 ret = oct->fn_list.msix_interrupt_handler(ioq_vector); 938 939 if (ret & MSIX_PO_INT || ret & MSIX_PI_INT) 940 liquidio_schedule_msix_droq_pkt_handler(droq, ret); 941 942 return IRQ_HANDLED; 943 } 944 945 /** 946 * liquidio_schedule_droq_pkt_handlers - Droq packet processor sceduler 947 * @oct: octeon device 948 */ 949 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) 950 { 951 struct octeon_device_priv *oct_priv = 952 (struct octeon_device_priv *)oct->priv; 953 struct octeon_droq *droq; 954 u64 oq_no; 955 956 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { 957 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); 958 oq_no++) { 959 if (!(oct->droq_intr & BIT_ULL(oq_no))) 960 continue; 961 962 droq = oct->droq[oq_no]; 963 964 if (droq->ops.poll_mode) { 965 droq->ops.napi_fn(droq); 966 oct_priv->napi_mask |= BIT_ULL(oq_no); 967 } else { 968 tasklet_schedule(&oct_priv->droq_tasklet); 969 } 970 } 971 } 972 } 973 974 /** 975 * liquidio_legacy_intr_handler - Interrupt handler for octeon 976 * @irq: unused 977 * @dev: octeon device 978 */ 979 static 980 irqreturn_t liquidio_legacy_intr_handler(int __maybe_unused irq, void *dev) 981 { 982 struct octeon_device *oct = (struct octeon_device *)dev; 983 irqreturn_t ret; 984 985 /* Disable our interrupts for the duration of ISR */ 986 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 987 988 ret = oct->fn_list.process_interrupt_regs(oct); 989 990 if (ret == IRQ_HANDLED) 991 liquidio_schedule_droq_pkt_handlers(oct); 992 993 /* Re-enable our interrupts */ 994 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET)) 995 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); 996 997 return ret; 998 } 999 1000 /** 1001 * octeon_setup_interrupt - Setup interrupt for octeon device 1002 * @oct: octeon device 1003 * @num_ioqs: number of queues 1004 * 1005 * Enable interrupt in Octeon device as given in the PCI interrupt mask. 1006 */ 1007 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) 1008 { 1009 struct msix_entry *msix_entries; 1010 char *queue_irq_names = NULL; 1011 int i, num_interrupts = 0; 1012 int num_alloc_ioq_vectors; 1013 char *aux_irq_name = NULL; 1014 int num_ioq_vectors; 1015 int irqret, err; 1016 1017 if (oct->msix_on) { 1018 oct->num_msix_irqs = num_ioqs; 1019 if (OCTEON_CN23XX_PF(oct)) { 1020 num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1; 1021 1022 /* one non ioq interrupt for handling 1023 * sli_mac_pf_int_sum 1024 */ 1025 oct->num_msix_irqs += 1; 1026 } else if (OCTEON_CN23XX_VF(oct)) { 1027 num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF; 1028 } 1029 1030 /* allocate storage for the names assigned to each irq */ 1031 oct->irq_name_storage = 1032 kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL); 1033 if (!oct->irq_name_storage) { 1034 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); 1035 return -ENOMEM; 1036 } 1037 1038 queue_irq_names = oct->irq_name_storage; 1039 1040 if (OCTEON_CN23XX_PF(oct)) 1041 aux_irq_name = &queue_irq_names 1042 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)]; 1043 1044 oct->msix_entries = kcalloc(oct->num_msix_irqs, 1045 sizeof(struct msix_entry), 1046 GFP_KERNEL); 1047 if (!oct->msix_entries) { 1048 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); 1049 kfree(oct->irq_name_storage); 1050 oct->irq_name_storage = NULL; 1051 return -ENOMEM; 1052 } 1053 1054 msix_entries = (struct msix_entry *)oct->msix_entries; 1055 1056 /*Assumption is that pf msix vectors start from pf srn to pf to 1057 * trs and not from 0. if not change this code 1058 */ 1059 if (OCTEON_CN23XX_PF(oct)) { 1060 for (i = 0; i < oct->num_msix_irqs - 1; i++) 1061 msix_entries[i].entry = 1062 oct->sriov_info.pf_srn + i; 1063 1064 msix_entries[oct->num_msix_irqs - 1].entry = 1065 oct->sriov_info.trs; 1066 } else if (OCTEON_CN23XX_VF(oct)) { 1067 for (i = 0; i < oct->num_msix_irqs; i++) 1068 msix_entries[i].entry = i; 1069 } 1070 num_alloc_ioq_vectors = pci_enable_msix_range( 1071 oct->pci_dev, msix_entries, 1072 oct->num_msix_irqs, 1073 oct->num_msix_irqs); 1074 if (num_alloc_ioq_vectors < 0) { 1075 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); 1076 kfree(oct->msix_entries); 1077 oct->msix_entries = NULL; 1078 kfree(oct->irq_name_storage); 1079 oct->irq_name_storage = NULL; 1080 return num_alloc_ioq_vectors; 1081 } 1082 1083 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); 1084 1085 num_ioq_vectors = oct->num_msix_irqs; 1086 /* For PF, there is one non-ioq interrupt handler */ 1087 if (OCTEON_CN23XX_PF(oct)) { 1088 num_ioq_vectors -= 1; 1089 1090 snprintf(aux_irq_name, INTRNAMSIZ, 1091 "LiquidIO%u-pf%u-aux", oct->octeon_id, 1092 oct->pf_num); 1093 irqret = request_irq( 1094 msix_entries[num_ioq_vectors].vector, 1095 liquidio_legacy_intr_handler, 0, 1096 aux_irq_name, oct); 1097 if (irqret) { 1098 dev_err(&oct->pci_dev->dev, 1099 "Request_irq failed for MSIX interrupt Error: %d\n", 1100 irqret); 1101 pci_disable_msix(oct->pci_dev); 1102 kfree(oct->msix_entries); 1103 kfree(oct->irq_name_storage); 1104 oct->irq_name_storage = NULL; 1105 oct->msix_entries = NULL; 1106 return irqret; 1107 } 1108 } 1109 for (i = 0 ; i < num_ioq_vectors ; i++) { 1110 if (OCTEON_CN23XX_PF(oct)) 1111 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], 1112 INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u", 1113 oct->octeon_id, oct->pf_num, i); 1114 1115 if (OCTEON_CN23XX_VF(oct)) 1116 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], 1117 INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u", 1118 oct->octeon_id, oct->vf_num, i); 1119 1120 irqret = request_irq(msix_entries[i].vector, 1121 liquidio_msix_intr_handler, 0, 1122 &queue_irq_names[IRQ_NAME_OFF(i)], 1123 &oct->ioq_vector[i]); 1124 1125 if (irqret) { 1126 dev_err(&oct->pci_dev->dev, 1127 "Request_irq failed for MSIX interrupt Error: %d\n", 1128 irqret); 1129 /* Freeing the non-ioq irq vector here . */ 1130 free_irq(msix_entries[num_ioq_vectors].vector, 1131 oct); 1132 1133 while (i) { 1134 i--; 1135 /* clearing affinity mask. */ 1136 irq_set_affinity_hint( 1137 msix_entries[i].vector, 1138 NULL); 1139 free_irq(msix_entries[i].vector, 1140 &oct->ioq_vector[i]); 1141 } 1142 pci_disable_msix(oct->pci_dev); 1143 kfree(oct->msix_entries); 1144 kfree(oct->irq_name_storage); 1145 oct->irq_name_storage = NULL; 1146 oct->msix_entries = NULL; 1147 return irqret; 1148 } 1149 oct->ioq_vector[i].vector = msix_entries[i].vector; 1150 /* assign the cpu mask for this msix interrupt vector */ 1151 irq_set_affinity_hint(msix_entries[i].vector, 1152 &oct->ioq_vector[i].affinity_mask 1153 ); 1154 } 1155 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n", 1156 oct->octeon_id); 1157 } else { 1158 err = pci_enable_msi(oct->pci_dev); 1159 if (err) 1160 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n", 1161 err); 1162 else 1163 oct->flags |= LIO_FLAG_MSI_ENABLED; 1164 1165 /* allocate storage for the names assigned to the irq */ 1166 oct->irq_name_storage = kzalloc(INTRNAMSIZ, GFP_KERNEL); 1167 if (!oct->irq_name_storage) 1168 return -ENOMEM; 1169 1170 queue_irq_names = oct->irq_name_storage; 1171 1172 if (OCTEON_CN23XX_PF(oct)) 1173 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, 1174 "LiquidIO%u-pf%u-rxtx-%u", 1175 oct->octeon_id, oct->pf_num, 0); 1176 1177 if (OCTEON_CN23XX_VF(oct)) 1178 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, 1179 "LiquidIO%u-vf%u-rxtx-%u", 1180 oct->octeon_id, oct->vf_num, 0); 1181 1182 irqret = request_irq(oct->pci_dev->irq, 1183 liquidio_legacy_intr_handler, 1184 IRQF_SHARED, 1185 &queue_irq_names[IRQ_NAME_OFF(0)], oct); 1186 if (irqret) { 1187 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1188 pci_disable_msi(oct->pci_dev); 1189 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", 1190 irqret); 1191 kfree(oct->irq_name_storage); 1192 oct->irq_name_storage = NULL; 1193 return irqret; 1194 } 1195 } 1196 return 0; 1197 } 1198 1199 /** 1200 * liquidio_change_mtu - Net device change_mtu 1201 * @netdev: network device 1202 * @new_mtu: the new max transmit unit size 1203 */ 1204 int liquidio_change_mtu(struct net_device *netdev, int new_mtu) 1205 { 1206 struct lio *lio = GET_LIO(netdev); 1207 struct octeon_device *oct = lio->oct_dev; 1208 struct octeon_soft_command *sc; 1209 union octnet_cmd *ncmd; 1210 int ret = 0; 1211 1212 sc = (struct octeon_soft_command *) 1213 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0); 1214 if (!sc) { 1215 netif_info(lio, rx_err, lio->netdev, 1216 "Failed to allocate soft command\n"); 1217 return -ENOMEM; 1218 } 1219 1220 ncmd = (union octnet_cmd *)sc->virtdptr; 1221 1222 init_completion(&sc->complete); 1223 sc->sc_status = OCTEON_REQUEST_PENDING; 1224 1225 ncmd->u64 = 0; 1226 ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU; 1227 ncmd->s.param1 = new_mtu; 1228 1229 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1230 1231 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1232 1233 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1234 OPCODE_NIC_CMD, 0, 0, 0); 1235 1236 ret = octeon_send_soft_command(oct, sc); 1237 if (ret == IQ_SEND_FAILED) { 1238 netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n"); 1239 octeon_free_soft_command(oct, sc); 1240 return -EINVAL; 1241 } 1242 /* Sleep on a wait queue till the cond flag indicates that the 1243 * response arrived or timed-out. 1244 */ 1245 ret = wait_for_sc_completion_timeout(oct, sc, 0); 1246 if (ret) 1247 return ret; 1248 1249 if (sc->sc_status) { 1250 WRITE_ONCE(sc->caller_is_done, true); 1251 return -EINVAL; 1252 } 1253 1254 netdev->mtu = new_mtu; 1255 lio->mtu = new_mtu; 1256 1257 WRITE_ONCE(sc->caller_is_done, true); 1258 return 0; 1259 } 1260 1261 int lio_wait_for_clean_oq(struct octeon_device *oct) 1262 { 1263 int retry = 100, pending_pkts = 0; 1264 int idx; 1265 1266 do { 1267 pending_pkts = 0; 1268 1269 for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) { 1270 if (!(oct->io_qmask.oq & BIT_ULL(idx))) 1271 continue; 1272 pending_pkts += 1273 atomic_read(&oct->droq[idx]->pkts_pending); 1274 } 1275 1276 if (pending_pkts > 0) 1277 schedule_timeout_uninterruptible(1); 1278 1279 } while (retry-- && pending_pkts); 1280 1281 return pending_pkts; 1282 } 1283 1284 static void 1285 octnet_nic_stats_callback(struct octeon_device *oct_dev, 1286 u32 status, void *ptr) 1287 { 1288 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; 1289 struct oct_nic_stats_resp *resp = 1290 (struct oct_nic_stats_resp *)sc->virtrptr; 1291 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; 1292 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; 1293 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; 1294 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost; 1295 1296 if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) { 1297 octeon_swap_8B_data((u64 *)&resp->stats, 1298 (sizeof(struct oct_link_stats)) >> 3); 1299 1300 /* RX link-level stats */ 1301 rstats->total_rcvd = rsp_rstats->total_rcvd; 1302 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; 1303 rstats->total_bcst = rsp_rstats->total_bcst; 1304 rstats->total_mcst = rsp_rstats->total_mcst; 1305 rstats->runts = rsp_rstats->runts; 1306 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; 1307 /* Accounts for over/under-run of buffers */ 1308 rstats->fifo_err = rsp_rstats->fifo_err; 1309 rstats->dmac_drop = rsp_rstats->dmac_drop; 1310 rstats->fcs_err = rsp_rstats->fcs_err; 1311 rstats->jabber_err = rsp_rstats->jabber_err; 1312 rstats->l2_err = rsp_rstats->l2_err; 1313 rstats->frame_err = rsp_rstats->frame_err; 1314 rstats->red_drops = rsp_rstats->red_drops; 1315 1316 /* RX firmware stats */ 1317 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; 1318 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; 1319 rstats->fw_total_mcast = rsp_rstats->fw_total_mcast; 1320 rstats->fw_total_bcast = rsp_rstats->fw_total_bcast; 1321 rstats->fw_err_pko = rsp_rstats->fw_err_pko; 1322 rstats->fw_err_link = rsp_rstats->fw_err_link; 1323 rstats->fw_err_drop = rsp_rstats->fw_err_drop; 1324 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; 1325 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; 1326 1327 /* Number of packets that are LROed */ 1328 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; 1329 /* Number of octets that are LROed */ 1330 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; 1331 /* Number of LRO packets formed */ 1332 rstats->fw_total_lro = rsp_rstats->fw_total_lro; 1333 /* Number of times lRO of packet aborted */ 1334 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; 1335 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; 1336 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; 1337 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; 1338 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; 1339 /* intrmod: packet forward rate */ 1340 rstats->fwd_rate = rsp_rstats->fwd_rate; 1341 1342 /* TX link-level stats */ 1343 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; 1344 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; 1345 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; 1346 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; 1347 tstats->ctl_sent = rsp_tstats->ctl_sent; 1348 /* Packets sent after one collision*/ 1349 tstats->one_collision_sent = rsp_tstats->one_collision_sent; 1350 /* Packets sent after multiple collision*/ 1351 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; 1352 /* Packets not sent due to max collisions */ 1353 tstats->max_collision_fail = rsp_tstats->max_collision_fail; 1354 /* Packets not sent due to max deferrals */ 1355 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; 1356 /* Accounts for over/under-run of buffers */ 1357 tstats->fifo_err = rsp_tstats->fifo_err; 1358 tstats->runts = rsp_tstats->runts; 1359 /* Total number of collisions detected */ 1360 tstats->total_collisions = rsp_tstats->total_collisions; 1361 1362 /* firmware stats */ 1363 tstats->fw_total_sent = rsp_tstats->fw_total_sent; 1364 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; 1365 tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent; 1366 tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent; 1367 tstats->fw_err_pko = rsp_tstats->fw_err_pko; 1368 tstats->fw_err_pki = rsp_tstats->fw_err_pki; 1369 tstats->fw_err_link = rsp_tstats->fw_err_link; 1370 tstats->fw_err_drop = rsp_tstats->fw_err_drop; 1371 tstats->fw_tso = rsp_tstats->fw_tso; 1372 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; 1373 tstats->fw_err_tso = rsp_tstats->fw_err_tso; 1374 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; 1375 1376 resp->status = 1; 1377 } else { 1378 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n"); 1379 resp->status = -1; 1380 } 1381 } 1382 1383 static int lio_fetch_vf_stats(struct lio *lio) 1384 { 1385 struct octeon_device *oct_dev = lio->oct_dev; 1386 struct octeon_soft_command *sc; 1387 struct oct_nic_vf_stats_resp *resp; 1388 1389 int retval; 1390 1391 /* Alloc soft command */ 1392 sc = (struct octeon_soft_command *) 1393 octeon_alloc_soft_command(oct_dev, 1394 0, 1395 sizeof(struct oct_nic_vf_stats_resp), 1396 0); 1397 1398 if (!sc) { 1399 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n"); 1400 retval = -ENOMEM; 1401 goto lio_fetch_vf_stats_exit; 1402 } 1403 1404 resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr; 1405 memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp)); 1406 1407 init_completion(&sc->complete); 1408 sc->sc_status = OCTEON_REQUEST_PENDING; 1409 1410 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1411 1412 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 1413 OPCODE_NIC_VF_PORT_STATS, 0, 0, 0); 1414 1415 retval = octeon_send_soft_command(oct_dev, sc); 1416 if (retval == IQ_SEND_FAILED) { 1417 octeon_free_soft_command(oct_dev, sc); 1418 goto lio_fetch_vf_stats_exit; 1419 } 1420 1421 retval = 1422 wait_for_sc_completion_timeout(oct_dev, sc, 1423 (2 * LIO_SC_MAX_TMO_MS)); 1424 if (retval) { 1425 dev_err(&oct_dev->pci_dev->dev, 1426 "sc OPCODE_NIC_VF_PORT_STATS command failed\n"); 1427 goto lio_fetch_vf_stats_exit; 1428 } 1429 1430 if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) { 1431 octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt, 1432 (sizeof(u64)) >> 3); 1433 1434 if (resp->spoofmac_cnt != 0) { 1435 dev_warn(&oct_dev->pci_dev->dev, 1436 "%llu Spoofed packets detected\n", 1437 resp->spoofmac_cnt); 1438 } 1439 } 1440 WRITE_ONCE(sc->caller_is_done, 1); 1441 1442 lio_fetch_vf_stats_exit: 1443 return retval; 1444 } 1445 1446 void lio_fetch_stats(struct work_struct *work) 1447 { 1448 struct cavium_wk *wk = (struct cavium_wk *)work; 1449 struct lio *lio = wk->ctxptr; 1450 struct octeon_device *oct_dev = lio->oct_dev; 1451 struct octeon_soft_command *sc; 1452 struct oct_nic_stats_resp *resp; 1453 unsigned long time_in_jiffies; 1454 int retval; 1455 1456 if (OCTEON_CN23XX_PF(oct_dev)) { 1457 /* report spoofchk every 2 seconds */ 1458 if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) && 1459 (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) && 1460 oct_dev->sriov_info.num_vfs_alloced) { 1461 lio_fetch_vf_stats(lio); 1462 } 1463 1464 oct_dev->vfstats_poll++; 1465 } 1466 1467 /* Alloc soft command */ 1468 sc = (struct octeon_soft_command *) 1469 octeon_alloc_soft_command(oct_dev, 1470 0, 1471 sizeof(struct oct_nic_stats_resp), 1472 0); 1473 1474 if (!sc) { 1475 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n"); 1476 goto lio_fetch_stats_exit; 1477 } 1478 1479 resp = (struct oct_nic_stats_resp *)sc->virtrptr; 1480 memset(resp, 0, sizeof(struct oct_nic_stats_resp)); 1481 1482 init_completion(&sc->complete); 1483 sc->sc_status = OCTEON_REQUEST_PENDING; 1484 1485 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1486 1487 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, 1488 OPCODE_NIC_PORT_STATS, 0, 0, 0); 1489 1490 retval = octeon_send_soft_command(oct_dev, sc); 1491 if (retval == IQ_SEND_FAILED) { 1492 octeon_free_soft_command(oct_dev, sc); 1493 goto lio_fetch_stats_exit; 1494 } 1495 1496 retval = wait_for_sc_completion_timeout(oct_dev, sc, 1497 (2 * LIO_SC_MAX_TMO_MS)); 1498 if (retval) { 1499 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n"); 1500 goto lio_fetch_stats_exit; 1501 } 1502 1503 octnet_nic_stats_callback(oct_dev, sc->sc_status, sc); 1504 WRITE_ONCE(sc->caller_is_done, true); 1505 1506 lio_fetch_stats_exit: 1507 time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS); 1508 if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) 1509 schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies); 1510 1511 return; 1512 } 1513 1514 int liquidio_set_speed(struct lio *lio, int speed) 1515 { 1516 struct octeon_device *oct = lio->oct_dev; 1517 struct oct_nic_seapi_resp *resp; 1518 struct octeon_soft_command *sc; 1519 union octnet_cmd *ncmd; 1520 int retval; 1521 u32 var; 1522 1523 if (oct->speed_setting == speed) 1524 return 0; 1525 1526 if (!OCTEON_CN23XX_PF(oct)) { 1527 dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n", 1528 __func__); 1529 return -EOPNOTSUPP; 1530 } 1531 1532 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1533 sizeof(struct oct_nic_seapi_resp), 1534 0); 1535 if (!sc) 1536 return -ENOMEM; 1537 1538 ncmd = sc->virtdptr; 1539 resp = sc->virtrptr; 1540 memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); 1541 1542 init_completion(&sc->complete); 1543 sc->sc_status = OCTEON_REQUEST_PENDING; 1544 1545 ncmd->u64 = 0; 1546 ncmd->s.cmd = SEAPI_CMD_SPEED_SET; 1547 ncmd->s.param1 = speed; 1548 1549 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1550 1551 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1552 1553 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1554 OPCODE_NIC_UBOOT_CTL, 0, 0, 0); 1555 1556 retval = octeon_send_soft_command(oct, sc); 1557 if (retval == IQ_SEND_FAILED) { 1558 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); 1559 octeon_free_soft_command(oct, sc); 1560 retval = -EBUSY; 1561 } else { 1562 /* Wait for response or timeout */ 1563 retval = wait_for_sc_completion_timeout(oct, sc, 0); 1564 if (retval) 1565 return retval; 1566 1567 retval = resp->status; 1568 1569 if (retval) { 1570 dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n", 1571 __func__, retval); 1572 WRITE_ONCE(sc->caller_is_done, true); 1573 1574 return -EIO; 1575 } 1576 1577 var = be32_to_cpu((__force __be32)resp->speed); 1578 if (var != speed) { 1579 dev_err(&oct->pci_dev->dev, 1580 "%s: setting failed speed= %x, expect %x\n", 1581 __func__, var, speed); 1582 } 1583 1584 oct->speed_setting = var; 1585 WRITE_ONCE(sc->caller_is_done, true); 1586 } 1587 1588 return retval; 1589 } 1590 1591 int liquidio_get_speed(struct lio *lio) 1592 { 1593 struct octeon_device *oct = lio->oct_dev; 1594 struct oct_nic_seapi_resp *resp; 1595 struct octeon_soft_command *sc; 1596 union octnet_cmd *ncmd; 1597 int retval; 1598 1599 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1600 sizeof(struct oct_nic_seapi_resp), 1601 0); 1602 if (!sc) 1603 return -ENOMEM; 1604 1605 ncmd = sc->virtdptr; 1606 resp = sc->virtrptr; 1607 memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); 1608 1609 init_completion(&sc->complete); 1610 sc->sc_status = OCTEON_REQUEST_PENDING; 1611 1612 ncmd->u64 = 0; 1613 ncmd->s.cmd = SEAPI_CMD_SPEED_GET; 1614 1615 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1616 1617 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1618 1619 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1620 OPCODE_NIC_UBOOT_CTL, 0, 0, 0); 1621 1622 retval = octeon_send_soft_command(oct, sc); 1623 if (retval == IQ_SEND_FAILED) { 1624 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); 1625 octeon_free_soft_command(oct, sc); 1626 retval = -EIO; 1627 } else { 1628 retval = wait_for_sc_completion_timeout(oct, sc, 0); 1629 if (retval) 1630 return retval; 1631 1632 retval = resp->status; 1633 if (retval) { 1634 dev_err(&oct->pci_dev->dev, 1635 "%s failed retval=%d\n", __func__, retval); 1636 retval = -EIO; 1637 } else { 1638 u32 var; 1639 1640 var = be32_to_cpu((__force __be32)resp->speed); 1641 oct->speed_setting = var; 1642 if (var == 0xffff) { 1643 /* unable to access boot variables 1644 * get the default value based on the NIC type 1645 */ 1646 if (oct->subsystem_id == 1647 OCTEON_CN2350_25GB_SUBSYS_ID || 1648 oct->subsystem_id == 1649 OCTEON_CN2360_25GB_SUBSYS_ID) { 1650 oct->no_speed_setting = 1; 1651 oct->speed_setting = 25; 1652 } else { 1653 oct->speed_setting = 10; 1654 } 1655 } 1656 1657 } 1658 WRITE_ONCE(sc->caller_is_done, true); 1659 } 1660 1661 return retval; 1662 } 1663 1664 int liquidio_set_fec(struct lio *lio, int on_off) 1665 { 1666 struct oct_nic_seapi_resp *resp; 1667 struct octeon_soft_command *sc; 1668 struct octeon_device *oct; 1669 union octnet_cmd *ncmd; 1670 int retval; 1671 u32 var; 1672 1673 oct = lio->oct_dev; 1674 1675 if (oct->props[lio->ifidx].fec == on_off) 1676 return 0; 1677 1678 if (!OCTEON_CN23XX_PF(oct)) { 1679 dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n", 1680 __func__); 1681 return -1; 1682 } 1683 1684 if (oct->speed_boot != 25) { 1685 dev_err(&oct->pci_dev->dev, 1686 "Set FEC only when link speed is 25G during insmod\n"); 1687 return -1; 1688 } 1689 1690 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1691 sizeof(struct oct_nic_seapi_resp), 0); 1692 if (!sc) { 1693 dev_err(&oct->pci_dev->dev, 1694 "Failed to allocate soft command\n"); 1695 return -ENOMEM; 1696 } 1697 1698 ncmd = sc->virtdptr; 1699 resp = sc->virtrptr; 1700 memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); 1701 1702 init_completion(&sc->complete); 1703 sc->sc_status = OCTEON_REQUEST_PENDING; 1704 1705 ncmd->u64 = 0; 1706 ncmd->s.cmd = SEAPI_CMD_FEC_SET; 1707 ncmd->s.param1 = on_off; 1708 /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */ 1709 1710 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1711 1712 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1713 1714 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1715 OPCODE_NIC_UBOOT_CTL, 0, 0, 0); 1716 1717 retval = octeon_send_soft_command(oct, sc); 1718 if (retval == IQ_SEND_FAILED) { 1719 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); 1720 octeon_free_soft_command(oct, sc); 1721 return -EIO; 1722 } 1723 1724 retval = wait_for_sc_completion_timeout(oct, sc, 0); 1725 if (retval) 1726 return (-EIO); 1727 1728 var = be32_to_cpu(resp->fec_setting); 1729 resp->fec_setting = var; 1730 if (var != on_off) { 1731 dev_err(&oct->pci_dev->dev, 1732 "Setting failed fec= %x, expect %x\n", 1733 var, on_off); 1734 oct->props[lio->ifidx].fec = var; 1735 if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS) 1736 oct->props[lio->ifidx].fec = 1; 1737 else 1738 oct->props[lio->ifidx].fec = 0; 1739 } 1740 1741 WRITE_ONCE(sc->caller_is_done, true); 1742 1743 if (oct->props[lio->ifidx].fec != 1744 oct->props[lio->ifidx].fec_boot) { 1745 dev_dbg(&oct->pci_dev->dev, 1746 "Reload driver to change fec to %s\n", 1747 oct->props[lio->ifidx].fec ? "on" : "off"); 1748 } 1749 1750 return retval; 1751 } 1752 1753 int liquidio_get_fec(struct lio *lio) 1754 { 1755 struct oct_nic_seapi_resp *resp; 1756 struct octeon_soft_command *sc; 1757 struct octeon_device *oct; 1758 union octnet_cmd *ncmd; 1759 int retval; 1760 u32 var; 1761 1762 oct = lio->oct_dev; 1763 1764 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1765 sizeof(struct oct_nic_seapi_resp), 0); 1766 if (!sc) 1767 return -ENOMEM; 1768 1769 ncmd = sc->virtdptr; 1770 resp = sc->virtrptr; 1771 memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); 1772 1773 init_completion(&sc->complete); 1774 sc->sc_status = OCTEON_REQUEST_PENDING; 1775 1776 ncmd->u64 = 0; 1777 ncmd->s.cmd = SEAPI_CMD_FEC_GET; 1778 1779 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1780 1781 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1782 1783 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1784 OPCODE_NIC_UBOOT_CTL, 0, 0, 0); 1785 1786 retval = octeon_send_soft_command(oct, sc); 1787 if (retval == IQ_SEND_FAILED) { 1788 dev_info(&oct->pci_dev->dev, 1789 "%s: Failed to send soft command\n", __func__); 1790 octeon_free_soft_command(oct, sc); 1791 return -EIO; 1792 } 1793 1794 retval = wait_for_sc_completion_timeout(oct, sc, 0); 1795 if (retval) 1796 return retval; 1797 1798 var = be32_to_cpu(resp->fec_setting); 1799 resp->fec_setting = var; 1800 if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS) 1801 oct->props[lio->ifidx].fec = 1; 1802 else 1803 oct->props[lio->ifidx].fec = 0; 1804 1805 WRITE_ONCE(sc->caller_is_done, true); 1806 1807 if (oct->props[lio->ifidx].fec != 1808 oct->props[lio->ifidx].fec_boot) { 1809 dev_dbg(&oct->pci_dev->dev, 1810 "Reload driver to change fec to %s\n", 1811 oct->props[lio->ifidx].fec ? "on" : "off"); 1812 } 1813 1814 return retval; 1815 } 1816