1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Intel Corporation */ 3 4 #include <linux/module.h> 5 #include <linux/types.h> 6 #include <linux/if_vlan.h> 7 #include <linux/aer.h> 8 #include <linux/tcp.h> 9 #include <linux/udp.h> 10 #include <linux/ip.h> 11 #include <linux/pm_runtime.h> 12 #include <net/pkt_sched.h> 13 #include <linux/bpf_trace.h> 14 #include <net/xdp_sock_drv.h> 15 #include <net/ipv6.h> 16 17 #include "igc.h" 18 #include "igc_hw.h" 19 #include "igc_tsn.h" 20 #include "igc_xdp.h" 21 22 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" 23 24 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 25 26 #define IGC_XDP_PASS 0 27 #define IGC_XDP_CONSUMED BIT(0) 28 #define IGC_XDP_TX BIT(1) 29 #define IGC_XDP_REDIRECT BIT(2) 30 31 static int debug = -1; 32 33 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 34 MODULE_DESCRIPTION(DRV_SUMMARY); 35 MODULE_LICENSE("GPL v2"); 36 module_param(debug, int, 0); 37 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 38 39 char igc_driver_name[] = "igc"; 40 static const char igc_driver_string[] = DRV_SUMMARY; 41 static const char igc_copyright[] = 42 "Copyright(c) 2018 Intel Corporation."; 43 44 static const struct igc_info *igc_info_tbl[] = { 45 [board_base] = &igc_base_info, 46 }; 47 48 static const struct pci_device_id igc_pci_tbl[] = { 49 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, 50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, 51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, 52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, 53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, 54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, 55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, 56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, 57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, 58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, 59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, 60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, 61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, 62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, 63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, 64 /* required last entry */ 65 {0, } 66 }; 67 68 MODULE_DEVICE_TABLE(pci, igc_pci_tbl); 69 70 enum latency_range { 71 lowest_latency = 0, 72 low_latency = 1, 73 bulk_latency = 2, 74 latency_invalid = 255 75 }; 76 77 void igc_reset(struct igc_adapter *adapter) 78 { 79 struct net_device *dev = adapter->netdev; 80 struct igc_hw *hw = &adapter->hw; 81 struct igc_fc_info *fc = &hw->fc; 82 u32 pba, hwm; 83 84 /* Repartition PBA for greater than 9k MTU if required */ 85 pba = IGC_PBA_34K; 86 87 /* flow control settings 88 * The high water mark must be low enough to fit one full frame 89 * after transmitting the pause frame. As such we must have enough 90 * space to allow for us to complete our current transmit and then 91 * receive the frame that is in progress from the link partner. 92 * Set it to: 93 * - the full Rx FIFO size minus one full Tx plus one full Rx frame 94 */ 95 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); 96 97 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ 98 fc->low_water = fc->high_water - 16; 99 fc->pause_time = 0xFFFF; 100 fc->send_xon = 1; 101 fc->current_mode = fc->requested_mode; 102 103 hw->mac.ops.reset_hw(hw); 104 105 if (hw->mac.ops.init_hw(hw)) 106 netdev_err(dev, "Error on hardware initialization\n"); 107 108 /* Re-establish EEE setting */ 109 igc_set_eee_i225(hw, true, true, true); 110 111 if (!netif_running(adapter->netdev)) 112 igc_power_down_phy_copper_base(&adapter->hw); 113 114 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ 115 wr32(IGC_VET, ETH_P_8021Q); 116 117 /* Re-enable PTP, where applicable. */ 118 igc_ptp_reset(adapter); 119 120 /* Re-enable TSN offloading, where applicable. */ 121 igc_tsn_offload_apply(adapter); 122 123 igc_get_phy_info(hw); 124 } 125 126 /** 127 * igc_power_up_link - Power up the phy link 128 * @adapter: address of board private structure 129 */ 130 static void igc_power_up_link(struct igc_adapter *adapter) 131 { 132 igc_reset_phy(&adapter->hw); 133 134 igc_power_up_phy_copper(&adapter->hw); 135 136 igc_setup_link(&adapter->hw); 137 } 138 139 /** 140 * igc_release_hw_control - release control of the h/w to f/w 141 * @adapter: address of board private structure 142 * 143 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. 144 * For ASF and Pass Through versions of f/w this means that the 145 * driver is no longer loaded. 146 */ 147 static void igc_release_hw_control(struct igc_adapter *adapter) 148 { 149 struct igc_hw *hw = &adapter->hw; 150 u32 ctrl_ext; 151 152 /* Let firmware take over control of h/w */ 153 ctrl_ext = rd32(IGC_CTRL_EXT); 154 wr32(IGC_CTRL_EXT, 155 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 156 } 157 158 /** 159 * igc_get_hw_control - get control of the h/w from f/w 160 * @adapter: address of board private structure 161 * 162 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. 163 * For ASF and Pass Through versions of f/w this means that 164 * the driver is loaded. 165 */ 166 static void igc_get_hw_control(struct igc_adapter *adapter) 167 { 168 struct igc_hw *hw = &adapter->hw; 169 u32 ctrl_ext; 170 171 /* Let firmware know the driver has taken over */ 172 ctrl_ext = rd32(IGC_CTRL_EXT); 173 wr32(IGC_CTRL_EXT, 174 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 175 } 176 177 static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) 178 { 179 dma_unmap_single(dev, dma_unmap_addr(buf, dma), 180 dma_unmap_len(buf, len), DMA_TO_DEVICE); 181 182 dma_unmap_len_set(buf, len, 0); 183 } 184 185 /** 186 * igc_clean_tx_ring - Free Tx Buffers 187 * @tx_ring: ring to be cleaned 188 */ 189 static void igc_clean_tx_ring(struct igc_ring *tx_ring) 190 { 191 u16 i = tx_ring->next_to_clean; 192 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 193 u32 xsk_frames = 0; 194 195 while (i != tx_ring->next_to_use) { 196 union igc_adv_tx_desc *eop_desc, *tx_desc; 197 198 switch (tx_buffer->type) { 199 case IGC_TX_BUFFER_TYPE_XSK: 200 xsk_frames++; 201 break; 202 case IGC_TX_BUFFER_TYPE_XDP: 203 xdp_return_frame(tx_buffer->xdpf); 204 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 205 break; 206 case IGC_TX_BUFFER_TYPE_SKB: 207 dev_kfree_skb_any(tx_buffer->skb); 208 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 209 break; 210 default: 211 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 212 break; 213 } 214 215 /* check for eop_desc to determine the end of the packet */ 216 eop_desc = tx_buffer->next_to_watch; 217 tx_desc = IGC_TX_DESC(tx_ring, i); 218 219 /* unmap remaining buffers */ 220 while (tx_desc != eop_desc) { 221 tx_buffer++; 222 tx_desc++; 223 i++; 224 if (unlikely(i == tx_ring->count)) { 225 i = 0; 226 tx_buffer = tx_ring->tx_buffer_info; 227 tx_desc = IGC_TX_DESC(tx_ring, 0); 228 } 229 230 /* unmap any remaining paged data */ 231 if (dma_unmap_len(tx_buffer, len)) 232 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 233 } 234 235 /* move us one more past the eop_desc for start of next pkt */ 236 tx_buffer++; 237 i++; 238 if (unlikely(i == tx_ring->count)) { 239 i = 0; 240 tx_buffer = tx_ring->tx_buffer_info; 241 } 242 } 243 244 if (tx_ring->xsk_pool && xsk_frames) 245 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 246 247 /* reset BQL for queue */ 248 netdev_tx_reset_queue(txring_txq(tx_ring)); 249 250 /* reset next_to_use and next_to_clean */ 251 tx_ring->next_to_use = 0; 252 tx_ring->next_to_clean = 0; 253 } 254 255 /** 256 * igc_free_tx_resources - Free Tx Resources per Queue 257 * @tx_ring: Tx descriptor ring for a specific queue 258 * 259 * Free all transmit software resources 260 */ 261 void igc_free_tx_resources(struct igc_ring *tx_ring) 262 { 263 igc_clean_tx_ring(tx_ring); 264 265 vfree(tx_ring->tx_buffer_info); 266 tx_ring->tx_buffer_info = NULL; 267 268 /* if not set, then don't free */ 269 if (!tx_ring->desc) 270 return; 271 272 dma_free_coherent(tx_ring->dev, tx_ring->size, 273 tx_ring->desc, tx_ring->dma); 274 275 tx_ring->desc = NULL; 276 } 277 278 /** 279 * igc_free_all_tx_resources - Free Tx Resources for All Queues 280 * @adapter: board private structure 281 * 282 * Free all transmit software resources 283 */ 284 static void igc_free_all_tx_resources(struct igc_adapter *adapter) 285 { 286 int i; 287 288 for (i = 0; i < adapter->num_tx_queues; i++) 289 igc_free_tx_resources(adapter->tx_ring[i]); 290 } 291 292 /** 293 * igc_clean_all_tx_rings - Free Tx Buffers for all queues 294 * @adapter: board private structure 295 */ 296 static void igc_clean_all_tx_rings(struct igc_adapter *adapter) 297 { 298 int i; 299 300 for (i = 0; i < adapter->num_tx_queues; i++) 301 if (adapter->tx_ring[i]) 302 igc_clean_tx_ring(adapter->tx_ring[i]); 303 } 304 305 /** 306 * igc_setup_tx_resources - allocate Tx resources (Descriptors) 307 * @tx_ring: tx descriptor ring (for a specific queue) to setup 308 * 309 * Return 0 on success, negative on failure 310 */ 311 int igc_setup_tx_resources(struct igc_ring *tx_ring) 312 { 313 struct net_device *ndev = tx_ring->netdev; 314 struct device *dev = tx_ring->dev; 315 int size = 0; 316 317 size = sizeof(struct igc_tx_buffer) * tx_ring->count; 318 tx_ring->tx_buffer_info = vzalloc(size); 319 if (!tx_ring->tx_buffer_info) 320 goto err; 321 322 /* round up to nearest 4K */ 323 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); 324 tx_ring->size = ALIGN(tx_ring->size, 4096); 325 326 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 327 &tx_ring->dma, GFP_KERNEL); 328 329 if (!tx_ring->desc) 330 goto err; 331 332 tx_ring->next_to_use = 0; 333 tx_ring->next_to_clean = 0; 334 335 return 0; 336 337 err: 338 vfree(tx_ring->tx_buffer_info); 339 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); 340 return -ENOMEM; 341 } 342 343 /** 344 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues 345 * @adapter: board private structure 346 * 347 * Return 0 on success, negative on failure 348 */ 349 static int igc_setup_all_tx_resources(struct igc_adapter *adapter) 350 { 351 struct net_device *dev = adapter->netdev; 352 int i, err = 0; 353 354 for (i = 0; i < adapter->num_tx_queues; i++) { 355 err = igc_setup_tx_resources(adapter->tx_ring[i]); 356 if (err) { 357 netdev_err(dev, "Error on Tx queue %u setup\n", i); 358 for (i--; i >= 0; i--) 359 igc_free_tx_resources(adapter->tx_ring[i]); 360 break; 361 } 362 } 363 364 return err; 365 } 366 367 static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) 368 { 369 u16 i = rx_ring->next_to_clean; 370 371 dev_kfree_skb(rx_ring->skb); 372 rx_ring->skb = NULL; 373 374 /* Free all the Rx ring sk_buffs */ 375 while (i != rx_ring->next_to_alloc) { 376 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 377 378 /* Invalidate cache lines that may have been written to by 379 * device so that we avoid corrupting memory. 380 */ 381 dma_sync_single_range_for_cpu(rx_ring->dev, 382 buffer_info->dma, 383 buffer_info->page_offset, 384 igc_rx_bufsz(rx_ring), 385 DMA_FROM_DEVICE); 386 387 /* free resources associated with mapping */ 388 dma_unmap_page_attrs(rx_ring->dev, 389 buffer_info->dma, 390 igc_rx_pg_size(rx_ring), 391 DMA_FROM_DEVICE, 392 IGC_RX_DMA_ATTR); 393 __page_frag_cache_drain(buffer_info->page, 394 buffer_info->pagecnt_bias); 395 396 i++; 397 if (i == rx_ring->count) 398 i = 0; 399 } 400 } 401 402 static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) 403 { 404 struct igc_rx_buffer *bi; 405 u16 i; 406 407 for (i = 0; i < ring->count; i++) { 408 bi = &ring->rx_buffer_info[i]; 409 if (!bi->xdp) 410 continue; 411 412 xsk_buff_free(bi->xdp); 413 bi->xdp = NULL; 414 } 415 } 416 417 /** 418 * igc_clean_rx_ring - Free Rx Buffers per Queue 419 * @ring: ring to free buffers from 420 */ 421 static void igc_clean_rx_ring(struct igc_ring *ring) 422 { 423 if (ring->xsk_pool) 424 igc_clean_rx_ring_xsk_pool(ring); 425 else 426 igc_clean_rx_ring_page_shared(ring); 427 428 clear_ring_uses_large_buffer(ring); 429 430 ring->next_to_alloc = 0; 431 ring->next_to_clean = 0; 432 ring->next_to_use = 0; 433 } 434 435 /** 436 * igc_clean_all_rx_rings - Free Rx Buffers for all queues 437 * @adapter: board private structure 438 */ 439 static void igc_clean_all_rx_rings(struct igc_adapter *adapter) 440 { 441 int i; 442 443 for (i = 0; i < adapter->num_rx_queues; i++) 444 if (adapter->rx_ring[i]) 445 igc_clean_rx_ring(adapter->rx_ring[i]); 446 } 447 448 /** 449 * igc_free_rx_resources - Free Rx Resources 450 * @rx_ring: ring to clean the resources from 451 * 452 * Free all receive software resources 453 */ 454 void igc_free_rx_resources(struct igc_ring *rx_ring) 455 { 456 igc_clean_rx_ring(rx_ring); 457 458 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 459 460 vfree(rx_ring->rx_buffer_info); 461 rx_ring->rx_buffer_info = NULL; 462 463 /* if not set, then don't free */ 464 if (!rx_ring->desc) 465 return; 466 467 dma_free_coherent(rx_ring->dev, rx_ring->size, 468 rx_ring->desc, rx_ring->dma); 469 470 rx_ring->desc = NULL; 471 } 472 473 /** 474 * igc_free_all_rx_resources - Free Rx Resources for All Queues 475 * @adapter: board private structure 476 * 477 * Free all receive software resources 478 */ 479 static void igc_free_all_rx_resources(struct igc_adapter *adapter) 480 { 481 int i; 482 483 for (i = 0; i < adapter->num_rx_queues; i++) 484 igc_free_rx_resources(adapter->rx_ring[i]); 485 } 486 487 /** 488 * igc_setup_rx_resources - allocate Rx resources (Descriptors) 489 * @rx_ring: rx descriptor ring (for a specific queue) to setup 490 * 491 * Returns 0 on success, negative on failure 492 */ 493 int igc_setup_rx_resources(struct igc_ring *rx_ring) 494 { 495 struct net_device *ndev = rx_ring->netdev; 496 struct device *dev = rx_ring->dev; 497 u8 index = rx_ring->queue_index; 498 int size, desc_len, res; 499 500 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, 501 rx_ring->q_vector->napi.napi_id); 502 if (res < 0) { 503 netdev_err(ndev, "Failed to register xdp_rxq index %u\n", 504 index); 505 return res; 506 } 507 508 size = sizeof(struct igc_rx_buffer) * rx_ring->count; 509 rx_ring->rx_buffer_info = vzalloc(size); 510 if (!rx_ring->rx_buffer_info) 511 goto err; 512 513 desc_len = sizeof(union igc_adv_rx_desc); 514 515 /* Round up to nearest 4K */ 516 rx_ring->size = rx_ring->count * desc_len; 517 rx_ring->size = ALIGN(rx_ring->size, 4096); 518 519 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 520 &rx_ring->dma, GFP_KERNEL); 521 522 if (!rx_ring->desc) 523 goto err; 524 525 rx_ring->next_to_alloc = 0; 526 rx_ring->next_to_clean = 0; 527 rx_ring->next_to_use = 0; 528 529 return 0; 530 531 err: 532 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 533 vfree(rx_ring->rx_buffer_info); 534 rx_ring->rx_buffer_info = NULL; 535 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); 536 return -ENOMEM; 537 } 538 539 /** 540 * igc_setup_all_rx_resources - wrapper to allocate Rx resources 541 * (Descriptors) for all queues 542 * @adapter: board private structure 543 * 544 * Return 0 on success, negative on failure 545 */ 546 static int igc_setup_all_rx_resources(struct igc_adapter *adapter) 547 { 548 struct net_device *dev = adapter->netdev; 549 int i, err = 0; 550 551 for (i = 0; i < adapter->num_rx_queues; i++) { 552 err = igc_setup_rx_resources(adapter->rx_ring[i]); 553 if (err) { 554 netdev_err(dev, "Error on Rx queue %u setup\n", i); 555 for (i--; i >= 0; i--) 556 igc_free_rx_resources(adapter->rx_ring[i]); 557 break; 558 } 559 } 560 561 return err; 562 } 563 564 static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, 565 struct igc_ring *ring) 566 { 567 if (!igc_xdp_is_enabled(adapter) || 568 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) 569 return NULL; 570 571 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); 572 } 573 574 /** 575 * igc_configure_rx_ring - Configure a receive ring after Reset 576 * @adapter: board private structure 577 * @ring: receive ring to be configured 578 * 579 * Configure the Rx unit of the MAC after a reset. 580 */ 581 static void igc_configure_rx_ring(struct igc_adapter *adapter, 582 struct igc_ring *ring) 583 { 584 struct igc_hw *hw = &adapter->hw; 585 union igc_adv_rx_desc *rx_desc; 586 int reg_idx = ring->reg_idx; 587 u32 srrctl = 0, rxdctl = 0; 588 u64 rdba = ring->dma; 589 u32 buf_size; 590 591 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 592 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 593 if (ring->xsk_pool) { 594 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 595 MEM_TYPE_XSK_BUFF_POOL, 596 NULL)); 597 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); 598 } else { 599 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 600 MEM_TYPE_PAGE_SHARED, 601 NULL)); 602 } 603 604 if (igc_xdp_is_enabled(adapter)) 605 set_ring_uses_large_buffer(ring); 606 607 /* disable the queue */ 608 wr32(IGC_RXDCTL(reg_idx), 0); 609 610 /* Set DMA base address registers */ 611 wr32(IGC_RDBAL(reg_idx), 612 rdba & 0x00000000ffffffffULL); 613 wr32(IGC_RDBAH(reg_idx), rdba >> 32); 614 wr32(IGC_RDLEN(reg_idx), 615 ring->count * sizeof(union igc_adv_rx_desc)); 616 617 /* initialize head and tail */ 618 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); 619 wr32(IGC_RDH(reg_idx), 0); 620 writel(0, ring->tail); 621 622 /* reset next-to- use/clean to place SW in sync with hardware */ 623 ring->next_to_clean = 0; 624 ring->next_to_use = 0; 625 626 if (ring->xsk_pool) 627 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); 628 else if (ring_uses_large_buffer(ring)) 629 buf_size = IGC_RXBUFFER_3072; 630 else 631 buf_size = IGC_RXBUFFER_2048; 632 633 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; 634 srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT; 635 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 636 637 wr32(IGC_SRRCTL(reg_idx), srrctl); 638 639 rxdctl |= IGC_RX_PTHRESH; 640 rxdctl |= IGC_RX_HTHRESH << 8; 641 rxdctl |= IGC_RX_WTHRESH << 16; 642 643 /* initialize rx_buffer_info */ 644 memset(ring->rx_buffer_info, 0, 645 sizeof(struct igc_rx_buffer) * ring->count); 646 647 /* initialize Rx descriptor 0 */ 648 rx_desc = IGC_RX_DESC(ring, 0); 649 rx_desc->wb.upper.length = 0; 650 651 /* enable receive descriptor fetching */ 652 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 653 654 wr32(IGC_RXDCTL(reg_idx), rxdctl); 655 } 656 657 /** 658 * igc_configure_rx - Configure receive Unit after Reset 659 * @adapter: board private structure 660 * 661 * Configure the Rx unit of the MAC after a reset. 662 */ 663 static void igc_configure_rx(struct igc_adapter *adapter) 664 { 665 int i; 666 667 /* Setup the HW Rx Head and Tail Descriptor Pointers and 668 * the Base and Length of the Rx Descriptor Ring 669 */ 670 for (i = 0; i < adapter->num_rx_queues; i++) 671 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); 672 } 673 674 /** 675 * igc_configure_tx_ring - Configure transmit ring after Reset 676 * @adapter: board private structure 677 * @ring: tx ring to configure 678 * 679 * Configure a transmit ring after a reset. 680 */ 681 static void igc_configure_tx_ring(struct igc_adapter *adapter, 682 struct igc_ring *ring) 683 { 684 struct igc_hw *hw = &adapter->hw; 685 int reg_idx = ring->reg_idx; 686 u64 tdba = ring->dma; 687 u32 txdctl = 0; 688 689 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 690 691 /* disable the queue */ 692 wr32(IGC_TXDCTL(reg_idx), 0); 693 wrfl(); 694 mdelay(10); 695 696 wr32(IGC_TDLEN(reg_idx), 697 ring->count * sizeof(union igc_adv_tx_desc)); 698 wr32(IGC_TDBAL(reg_idx), 699 tdba & 0x00000000ffffffffULL); 700 wr32(IGC_TDBAH(reg_idx), tdba >> 32); 701 702 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); 703 wr32(IGC_TDH(reg_idx), 0); 704 writel(0, ring->tail); 705 706 txdctl |= IGC_TX_PTHRESH; 707 txdctl |= IGC_TX_HTHRESH << 8; 708 txdctl |= IGC_TX_WTHRESH << 16; 709 710 txdctl |= IGC_TXDCTL_QUEUE_ENABLE; 711 wr32(IGC_TXDCTL(reg_idx), txdctl); 712 } 713 714 /** 715 * igc_configure_tx - Configure transmit Unit after Reset 716 * @adapter: board private structure 717 * 718 * Configure the Tx unit of the MAC after a reset. 719 */ 720 static void igc_configure_tx(struct igc_adapter *adapter) 721 { 722 int i; 723 724 for (i = 0; i < adapter->num_tx_queues; i++) 725 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); 726 } 727 728 /** 729 * igc_setup_mrqc - configure the multiple receive queue control registers 730 * @adapter: Board private structure 731 */ 732 static void igc_setup_mrqc(struct igc_adapter *adapter) 733 { 734 struct igc_hw *hw = &adapter->hw; 735 u32 j, num_rx_queues; 736 u32 mrqc, rxcsum; 737 u32 rss_key[10]; 738 739 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 740 for (j = 0; j < 10; j++) 741 wr32(IGC_RSSRK(j), rss_key[j]); 742 743 num_rx_queues = adapter->rss_queues; 744 745 if (adapter->rss_indir_tbl_init != num_rx_queues) { 746 for (j = 0; j < IGC_RETA_SIZE; j++) 747 adapter->rss_indir_tbl[j] = 748 (j * num_rx_queues) / IGC_RETA_SIZE; 749 adapter->rss_indir_tbl_init = num_rx_queues; 750 } 751 igc_write_rss_indir_tbl(adapter); 752 753 /* Disable raw packet checksumming so that RSS hash is placed in 754 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 755 * offloads as they are enabled by default 756 */ 757 rxcsum = rd32(IGC_RXCSUM); 758 rxcsum |= IGC_RXCSUM_PCSD; 759 760 /* Enable Receive Checksum Offload for SCTP */ 761 rxcsum |= IGC_RXCSUM_CRCOFL; 762 763 /* Don't need to set TUOFL or IPOFL, they default to 1 */ 764 wr32(IGC_RXCSUM, rxcsum); 765 766 /* Generate RSS hash based on packet types, TCP/UDP 767 * port numbers and/or IPv4/v6 src and dst addresses 768 */ 769 mrqc = IGC_MRQC_RSS_FIELD_IPV4 | 770 IGC_MRQC_RSS_FIELD_IPV4_TCP | 771 IGC_MRQC_RSS_FIELD_IPV6 | 772 IGC_MRQC_RSS_FIELD_IPV6_TCP | 773 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; 774 775 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) 776 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; 777 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) 778 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; 779 780 mrqc |= IGC_MRQC_ENABLE_RSS_MQ; 781 782 wr32(IGC_MRQC, mrqc); 783 } 784 785 /** 786 * igc_setup_rctl - configure the receive control registers 787 * @adapter: Board private structure 788 */ 789 static void igc_setup_rctl(struct igc_adapter *adapter) 790 { 791 struct igc_hw *hw = &adapter->hw; 792 u32 rctl; 793 794 rctl = rd32(IGC_RCTL); 795 796 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 797 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); 798 799 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | 800 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 801 802 /* enable stripping of CRC. Newer features require 803 * that the HW strips the CRC. 804 */ 805 rctl |= IGC_RCTL_SECRC; 806 807 /* disable store bad packets and clear size bits. */ 808 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); 809 810 /* enable LPE to allow for reception of jumbo frames */ 811 rctl |= IGC_RCTL_LPE; 812 813 /* disable queue 0 to prevent tail write w/o re-config */ 814 wr32(IGC_RXDCTL(0), 0); 815 816 /* This is useful for sniffing bad packets. */ 817 if (adapter->netdev->features & NETIF_F_RXALL) { 818 /* UPE and MPE will be handled by normal PROMISC logic 819 * in set_rx_mode 820 */ 821 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ 822 IGC_RCTL_BAM | /* RX All Bcast Pkts */ 823 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 824 825 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ 826 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ 827 } 828 829 wr32(IGC_RCTL, rctl); 830 } 831 832 /** 833 * igc_setup_tctl - configure the transmit control registers 834 * @adapter: Board private structure 835 */ 836 static void igc_setup_tctl(struct igc_adapter *adapter) 837 { 838 struct igc_hw *hw = &adapter->hw; 839 u32 tctl; 840 841 /* disable queue 0 which icould be enabled by default */ 842 wr32(IGC_TXDCTL(0), 0); 843 844 /* Program the Transmit Control Register */ 845 tctl = rd32(IGC_TCTL); 846 tctl &= ~IGC_TCTL_CT; 847 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | 848 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); 849 850 /* Enable transmits */ 851 tctl |= IGC_TCTL_EN; 852 853 wr32(IGC_TCTL, tctl); 854 } 855 856 /** 857 * igc_set_mac_filter_hw() - Set MAC address filter in hardware 858 * @adapter: Pointer to adapter where the filter should be set 859 * @index: Filter index 860 * @type: MAC address filter type (source or destination) 861 * @addr: MAC address 862 * @queue: If non-negative, queue assignment feature is enabled and frames 863 * matching the filter are enqueued onto 'queue'. Otherwise, queue 864 * assignment is disabled. 865 */ 866 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, 867 enum igc_mac_filter_type type, 868 const u8 *addr, int queue) 869 { 870 struct net_device *dev = adapter->netdev; 871 struct igc_hw *hw = &adapter->hw; 872 u32 ral, rah; 873 874 if (WARN_ON(index >= hw->mac.rar_entry_count)) 875 return; 876 877 ral = le32_to_cpup((__le32 *)(addr)); 878 rah = le16_to_cpup((__le16 *)(addr + 4)); 879 880 if (type == IGC_MAC_FILTER_TYPE_SRC) { 881 rah &= ~IGC_RAH_ASEL_MASK; 882 rah |= IGC_RAH_ASEL_SRC_ADDR; 883 } 884 885 if (queue >= 0) { 886 rah &= ~IGC_RAH_QSEL_MASK; 887 rah |= (queue << IGC_RAH_QSEL_SHIFT); 888 rah |= IGC_RAH_QSEL_ENABLE; 889 } 890 891 rah |= IGC_RAH_AV; 892 893 wr32(IGC_RAL(index), ral); 894 wr32(IGC_RAH(index), rah); 895 896 netdev_dbg(dev, "MAC address filter set in HW: index %d", index); 897 } 898 899 /** 900 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware 901 * @adapter: Pointer to adapter where the filter should be cleared 902 * @index: Filter index 903 */ 904 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) 905 { 906 struct net_device *dev = adapter->netdev; 907 struct igc_hw *hw = &adapter->hw; 908 909 if (WARN_ON(index >= hw->mac.rar_entry_count)) 910 return; 911 912 wr32(IGC_RAL(index), 0); 913 wr32(IGC_RAH(index), 0); 914 915 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); 916 } 917 918 /* Set default MAC address for the PF in the first RAR entry */ 919 static void igc_set_default_mac_filter(struct igc_adapter *adapter) 920 { 921 struct net_device *dev = adapter->netdev; 922 u8 *addr = adapter->hw.mac.addr; 923 924 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); 925 926 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); 927 } 928 929 /** 930 * igc_set_mac - Change the Ethernet Address of the NIC 931 * @netdev: network interface device structure 932 * @p: pointer to an address structure 933 * 934 * Returns 0 on success, negative on failure 935 */ 936 static int igc_set_mac(struct net_device *netdev, void *p) 937 { 938 struct igc_adapter *adapter = netdev_priv(netdev); 939 struct igc_hw *hw = &adapter->hw; 940 struct sockaddr *addr = p; 941 942 if (!is_valid_ether_addr(addr->sa_data)) 943 return -EADDRNOTAVAIL; 944 945 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 946 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 947 948 /* set the correct pool for the new PF MAC address in entry 0 */ 949 igc_set_default_mac_filter(adapter); 950 951 return 0; 952 } 953 954 /** 955 * igc_write_mc_addr_list - write multicast addresses to MTA 956 * @netdev: network interface device structure 957 * 958 * Writes multicast address list to the MTA hash table. 959 * Returns: -ENOMEM on failure 960 * 0 on no addresses written 961 * X on writing X addresses to MTA 962 **/ 963 static int igc_write_mc_addr_list(struct net_device *netdev) 964 { 965 struct igc_adapter *adapter = netdev_priv(netdev); 966 struct igc_hw *hw = &adapter->hw; 967 struct netdev_hw_addr *ha; 968 u8 *mta_list; 969 int i; 970 971 if (netdev_mc_empty(netdev)) { 972 /* nothing to program, so clear mc list */ 973 igc_update_mc_addr_list(hw, NULL, 0); 974 return 0; 975 } 976 977 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); 978 if (!mta_list) 979 return -ENOMEM; 980 981 /* The shared function expects a packed array of only addresses. */ 982 i = 0; 983 netdev_for_each_mc_addr(ha, netdev) 984 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 985 986 igc_update_mc_addr_list(hw, mta_list, i); 987 kfree(mta_list); 988 989 return netdev_mc_count(netdev); 990 } 991 992 static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) 993 { 994 ktime_t cycle_time = adapter->cycle_time; 995 ktime_t base_time = adapter->base_time; 996 u32 launchtime; 997 998 /* FIXME: when using ETF together with taprio, we may have a 999 * case where 'delta' is larger than the cycle_time, this may 1000 * cause problems if we don't read the current value of 1001 * IGC_BASET, as the value writen into the launchtime 1002 * descriptor field may be misinterpreted. 1003 */ 1004 div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); 1005 1006 return cpu_to_le32(launchtime); 1007 } 1008 1009 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, 1010 struct igc_tx_buffer *first, 1011 u32 vlan_macip_lens, u32 type_tucmd, 1012 u32 mss_l4len_idx) 1013 { 1014 struct igc_adv_tx_context_desc *context_desc; 1015 u16 i = tx_ring->next_to_use; 1016 1017 context_desc = IGC_TX_CTXTDESC(tx_ring, i); 1018 1019 i++; 1020 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1021 1022 /* set bits to identify this as an advanced context descriptor */ 1023 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 1024 1025 /* For i225, context index must be unique per ring. */ 1026 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) 1027 mss_l4len_idx |= tx_ring->reg_idx << 4; 1028 1029 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1030 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1031 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1032 1033 /* We assume there is always a valid Tx time available. Invalid times 1034 * should have been handled by the upper layers. 1035 */ 1036 if (tx_ring->launchtime_enable) { 1037 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 1038 ktime_t txtime = first->skb->tstamp; 1039 1040 skb_txtime_consumed(first->skb); 1041 context_desc->launch_time = igc_tx_launchtime(adapter, 1042 txtime); 1043 } else { 1044 context_desc->launch_time = 0; 1045 } 1046 } 1047 1048 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) 1049 { 1050 struct sk_buff *skb = first->skb; 1051 u32 vlan_macip_lens = 0; 1052 u32 type_tucmd = 0; 1053 1054 if (skb->ip_summed != CHECKSUM_PARTIAL) { 1055 csum_failed: 1056 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && 1057 !tx_ring->launchtime_enable) 1058 return; 1059 goto no_csum; 1060 } 1061 1062 switch (skb->csum_offset) { 1063 case offsetof(struct tcphdr, check): 1064 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1065 fallthrough; 1066 case offsetof(struct udphdr, check): 1067 break; 1068 case offsetof(struct sctphdr, checksum): 1069 /* validate that this is actually an SCTP request */ 1070 if (skb_csum_is_sctp(skb)) { 1071 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; 1072 break; 1073 } 1074 fallthrough; 1075 default: 1076 skb_checksum_help(skb); 1077 goto csum_failed; 1078 } 1079 1080 /* update TX checksum flag */ 1081 first->tx_flags |= IGC_TX_FLAGS_CSUM; 1082 vlan_macip_lens = skb_checksum_start_offset(skb) - 1083 skb_network_offset(skb); 1084 no_csum: 1085 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; 1086 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1087 1088 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); 1089 } 1090 1091 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 1092 { 1093 struct net_device *netdev = tx_ring->netdev; 1094 1095 netif_stop_subqueue(netdev, tx_ring->queue_index); 1096 1097 /* memory barriier comment */ 1098 smp_mb(); 1099 1100 /* We need to check again in a case another CPU has just 1101 * made room available. 1102 */ 1103 if (igc_desc_unused(tx_ring) < size) 1104 return -EBUSY; 1105 1106 /* A reprieve! */ 1107 netif_wake_subqueue(netdev, tx_ring->queue_index); 1108 1109 u64_stats_update_begin(&tx_ring->tx_syncp2); 1110 tx_ring->tx_stats.restart_queue2++; 1111 u64_stats_update_end(&tx_ring->tx_syncp2); 1112 1113 return 0; 1114 } 1115 1116 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 1117 { 1118 if (igc_desc_unused(tx_ring) >= size) 1119 return 0; 1120 return __igc_maybe_stop_tx(tx_ring, size); 1121 } 1122 1123 #define IGC_SET_FLAG(_input, _flag, _result) \ 1124 (((_flag) <= (_result)) ? \ 1125 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ 1126 ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) 1127 1128 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 1129 { 1130 /* set type for advanced descriptor with frame checksum insertion */ 1131 u32 cmd_type = IGC_ADVTXD_DTYP_DATA | 1132 IGC_ADVTXD_DCMD_DEXT | 1133 IGC_ADVTXD_DCMD_IFCS; 1134 1135 /* set HW vlan bit if vlan is present */ 1136 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, 1137 IGC_ADVTXD_DCMD_VLE); 1138 1139 /* set segmentation bits for TSO */ 1140 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, 1141 (IGC_ADVTXD_DCMD_TSE)); 1142 1143 /* set timestamp bit if present */ 1144 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, 1145 (IGC_ADVTXD_MAC_TSTAMP)); 1146 1147 /* insert frame checksum */ 1148 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); 1149 1150 return cmd_type; 1151 } 1152 1153 static void igc_tx_olinfo_status(struct igc_ring *tx_ring, 1154 union igc_adv_tx_desc *tx_desc, 1155 u32 tx_flags, unsigned int paylen) 1156 { 1157 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; 1158 1159 /* insert L4 checksum */ 1160 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * 1161 ((IGC_TXD_POPTS_TXSM << 8) / 1162 IGC_TX_FLAGS_CSUM); 1163 1164 /* insert IPv4 checksum */ 1165 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * 1166 (((IGC_TXD_POPTS_IXSM << 8)) / 1167 IGC_TX_FLAGS_IPV4); 1168 1169 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 1170 } 1171 1172 static int igc_tx_map(struct igc_ring *tx_ring, 1173 struct igc_tx_buffer *first, 1174 const u8 hdr_len) 1175 { 1176 struct sk_buff *skb = first->skb; 1177 struct igc_tx_buffer *tx_buffer; 1178 union igc_adv_tx_desc *tx_desc; 1179 u32 tx_flags = first->tx_flags; 1180 skb_frag_t *frag; 1181 u16 i = tx_ring->next_to_use; 1182 unsigned int data_len, size; 1183 dma_addr_t dma; 1184 u32 cmd_type; 1185 1186 cmd_type = igc_tx_cmd_type(skb, tx_flags); 1187 tx_desc = IGC_TX_DESC(tx_ring, i); 1188 1189 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); 1190 1191 size = skb_headlen(skb); 1192 data_len = skb->data_len; 1193 1194 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1195 1196 tx_buffer = first; 1197 1198 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1199 if (dma_mapping_error(tx_ring->dev, dma)) 1200 goto dma_error; 1201 1202 /* record length, and DMA address */ 1203 dma_unmap_len_set(tx_buffer, len, size); 1204 dma_unmap_addr_set(tx_buffer, dma, dma); 1205 1206 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1207 1208 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { 1209 tx_desc->read.cmd_type_len = 1210 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); 1211 1212 i++; 1213 tx_desc++; 1214 if (i == tx_ring->count) { 1215 tx_desc = IGC_TX_DESC(tx_ring, 0); 1216 i = 0; 1217 } 1218 tx_desc->read.olinfo_status = 0; 1219 1220 dma += IGC_MAX_DATA_PER_TXD; 1221 size -= IGC_MAX_DATA_PER_TXD; 1222 1223 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1224 } 1225 1226 if (likely(!data_len)) 1227 break; 1228 1229 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 1230 1231 i++; 1232 tx_desc++; 1233 if (i == tx_ring->count) { 1234 tx_desc = IGC_TX_DESC(tx_ring, 0); 1235 i = 0; 1236 } 1237 tx_desc->read.olinfo_status = 0; 1238 1239 size = skb_frag_size(frag); 1240 data_len -= size; 1241 1242 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, 1243 size, DMA_TO_DEVICE); 1244 1245 tx_buffer = &tx_ring->tx_buffer_info[i]; 1246 } 1247 1248 /* write last descriptor with RS and EOP bits */ 1249 cmd_type |= size | IGC_TXD_DCMD; 1250 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 1251 1252 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1253 1254 /* set the timestamp */ 1255 first->time_stamp = jiffies; 1256 1257 skb_tx_timestamp(skb); 1258 1259 /* Force memory writes to complete before letting h/w know there 1260 * are new descriptors to fetch. (Only applicable for weak-ordered 1261 * memory model archs, such as IA-64). 1262 * 1263 * We also need this memory barrier to make certain all of the 1264 * status bits have been updated before next_to_watch is written. 1265 */ 1266 wmb(); 1267 1268 /* set next_to_watch value indicating a packet is present */ 1269 first->next_to_watch = tx_desc; 1270 1271 i++; 1272 if (i == tx_ring->count) 1273 i = 0; 1274 1275 tx_ring->next_to_use = i; 1276 1277 /* Make sure there is space in the ring for the next send. */ 1278 igc_maybe_stop_tx(tx_ring, DESC_NEEDED); 1279 1280 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 1281 writel(i, tx_ring->tail); 1282 } 1283 1284 return 0; 1285 dma_error: 1286 netdev_err(tx_ring->netdev, "TX DMA map failed\n"); 1287 tx_buffer = &tx_ring->tx_buffer_info[i]; 1288 1289 /* clear dma mappings for failed tx_buffer_info map */ 1290 while (tx_buffer != first) { 1291 if (dma_unmap_len(tx_buffer, len)) 1292 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 1293 1294 if (i-- == 0) 1295 i += tx_ring->count; 1296 tx_buffer = &tx_ring->tx_buffer_info[i]; 1297 } 1298 1299 if (dma_unmap_len(tx_buffer, len)) 1300 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 1301 1302 dev_kfree_skb_any(tx_buffer->skb); 1303 tx_buffer->skb = NULL; 1304 1305 tx_ring->next_to_use = i; 1306 1307 return -1; 1308 } 1309 1310 static int igc_tso(struct igc_ring *tx_ring, 1311 struct igc_tx_buffer *first, 1312 u8 *hdr_len) 1313 { 1314 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 1315 struct sk_buff *skb = first->skb; 1316 union { 1317 struct iphdr *v4; 1318 struct ipv6hdr *v6; 1319 unsigned char *hdr; 1320 } ip; 1321 union { 1322 struct tcphdr *tcp; 1323 struct udphdr *udp; 1324 unsigned char *hdr; 1325 } l4; 1326 u32 paylen, l4_offset; 1327 int err; 1328 1329 if (skb->ip_summed != CHECKSUM_PARTIAL) 1330 return 0; 1331 1332 if (!skb_is_gso(skb)) 1333 return 0; 1334 1335 err = skb_cow_head(skb, 0); 1336 if (err < 0) 1337 return err; 1338 1339 ip.hdr = skb_network_header(skb); 1340 l4.hdr = skb_checksum_start(skb); 1341 1342 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1343 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1344 1345 /* initialize outer IP header fields */ 1346 if (ip.v4->version == 4) { 1347 unsigned char *csum_start = skb_checksum_start(skb); 1348 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 1349 1350 /* IP header will have to cancel out any data that 1351 * is not a part of the outer IP header 1352 */ 1353 ip.v4->check = csum_fold(csum_partial(trans_start, 1354 csum_start - trans_start, 1355 0)); 1356 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; 1357 1358 ip.v4->tot_len = 0; 1359 first->tx_flags |= IGC_TX_FLAGS_TSO | 1360 IGC_TX_FLAGS_CSUM | 1361 IGC_TX_FLAGS_IPV4; 1362 } else { 1363 ip.v6->payload_len = 0; 1364 first->tx_flags |= IGC_TX_FLAGS_TSO | 1365 IGC_TX_FLAGS_CSUM; 1366 } 1367 1368 /* determine offset of inner transport header */ 1369 l4_offset = l4.hdr - skb->data; 1370 1371 /* remove payload length from inner checksum */ 1372 paylen = skb->len - l4_offset; 1373 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { 1374 /* compute length of segmentation header */ 1375 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 1376 csum_replace_by_diff(&l4.tcp->check, 1377 (__force __wsum)htonl(paylen)); 1378 } else { 1379 /* compute length of segmentation header */ 1380 *hdr_len = sizeof(*l4.udp) + l4_offset; 1381 csum_replace_by_diff(&l4.udp->check, 1382 (__force __wsum)htonl(paylen)); 1383 } 1384 1385 /* update gso size and bytecount with header size */ 1386 first->gso_segs = skb_shinfo(skb)->gso_segs; 1387 first->bytecount += (first->gso_segs - 1) * *hdr_len; 1388 1389 /* MSS L4LEN IDX */ 1390 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; 1391 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; 1392 1393 /* VLAN MACLEN IPLEN */ 1394 vlan_macip_lens = l4.hdr - ip.hdr; 1395 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; 1396 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1397 1398 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, 1399 type_tucmd, mss_l4len_idx); 1400 1401 return 1; 1402 } 1403 1404 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, 1405 struct igc_ring *tx_ring) 1406 { 1407 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 1408 __be16 protocol = vlan_get_protocol(skb); 1409 struct igc_tx_buffer *first; 1410 u32 tx_flags = 0; 1411 unsigned short f; 1412 u8 hdr_len = 0; 1413 int tso = 0; 1414 1415 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, 1416 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, 1417 * + 2 desc gap to keep tail from touching head, 1418 * + 1 desc for context descriptor, 1419 * otherwise try next time 1420 */ 1421 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1422 count += TXD_USE_COUNT(skb_frag_size( 1423 &skb_shinfo(skb)->frags[f])); 1424 1425 if (igc_maybe_stop_tx(tx_ring, count + 3)) { 1426 /* this is a hard error */ 1427 return NETDEV_TX_BUSY; 1428 } 1429 1430 /* record the location of the first descriptor for this packet */ 1431 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 1432 first->type = IGC_TX_BUFFER_TYPE_SKB; 1433 first->skb = skb; 1434 first->bytecount = skb->len; 1435 first->gso_segs = 1; 1436 1437 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1438 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 1439 1440 /* FIXME: add support for retrieving timestamps from 1441 * the other timer registers before skipping the 1442 * timestamping request. 1443 */ 1444 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && 1445 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, 1446 &adapter->state)) { 1447 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1448 tx_flags |= IGC_TX_FLAGS_TSTAMP; 1449 1450 adapter->ptp_tx_skb = skb_get(skb); 1451 adapter->ptp_tx_start = jiffies; 1452 } else { 1453 adapter->tx_hwtstamp_skipped++; 1454 } 1455 } 1456 1457 if (skb_vlan_tag_present(skb)) { 1458 tx_flags |= IGC_TX_FLAGS_VLAN; 1459 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); 1460 } 1461 1462 /* record initial flags and protocol */ 1463 first->tx_flags = tx_flags; 1464 first->protocol = protocol; 1465 1466 tso = igc_tso(tx_ring, first, &hdr_len); 1467 if (tso < 0) 1468 goto out_drop; 1469 else if (!tso) 1470 igc_tx_csum(tx_ring, first); 1471 1472 igc_tx_map(tx_ring, first, hdr_len); 1473 1474 return NETDEV_TX_OK; 1475 1476 out_drop: 1477 dev_kfree_skb_any(first->skb); 1478 first->skb = NULL; 1479 1480 return NETDEV_TX_OK; 1481 } 1482 1483 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, 1484 struct sk_buff *skb) 1485 { 1486 unsigned int r_idx = skb->queue_mapping; 1487 1488 if (r_idx >= adapter->num_tx_queues) 1489 r_idx = r_idx % adapter->num_tx_queues; 1490 1491 return adapter->tx_ring[r_idx]; 1492 } 1493 1494 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, 1495 struct net_device *netdev) 1496 { 1497 struct igc_adapter *adapter = netdev_priv(netdev); 1498 1499 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 1500 * in order to meet this minimum size requirement. 1501 */ 1502 if (skb->len < 17) { 1503 if (skb_padto(skb, 17)) 1504 return NETDEV_TX_OK; 1505 skb->len = 17; 1506 } 1507 1508 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); 1509 } 1510 1511 static void igc_rx_checksum(struct igc_ring *ring, 1512 union igc_adv_rx_desc *rx_desc, 1513 struct sk_buff *skb) 1514 { 1515 skb_checksum_none_assert(skb); 1516 1517 /* Ignore Checksum bit is set */ 1518 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) 1519 return; 1520 1521 /* Rx checksum disabled via ethtool */ 1522 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1523 return; 1524 1525 /* TCP/UDP checksum error bit is set */ 1526 if (igc_test_staterr(rx_desc, 1527 IGC_RXDEXT_STATERR_L4E | 1528 IGC_RXDEXT_STATERR_IPE)) { 1529 /* work around errata with sctp packets where the TCPE aka 1530 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 1531 * packets (aka let the stack check the crc32c) 1532 */ 1533 if (!(skb->len == 60 && 1534 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { 1535 u64_stats_update_begin(&ring->rx_syncp); 1536 ring->rx_stats.csum_err++; 1537 u64_stats_update_end(&ring->rx_syncp); 1538 } 1539 /* let the stack verify checksum errors */ 1540 return; 1541 } 1542 /* It must be a TCP or UDP packet with a valid checksum */ 1543 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | 1544 IGC_RXD_STAT_UDPCS)) 1545 skb->ip_summed = CHECKSUM_UNNECESSARY; 1546 1547 netdev_dbg(ring->netdev, "cksum success: bits %08X\n", 1548 le32_to_cpu(rx_desc->wb.upper.status_error)); 1549 } 1550 1551 static inline void igc_rx_hash(struct igc_ring *ring, 1552 union igc_adv_rx_desc *rx_desc, 1553 struct sk_buff *skb) 1554 { 1555 if (ring->netdev->features & NETIF_F_RXHASH) 1556 skb_set_hash(skb, 1557 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 1558 PKT_HASH_TYPE_L3); 1559 } 1560 1561 static void igc_rx_vlan(struct igc_ring *rx_ring, 1562 union igc_adv_rx_desc *rx_desc, 1563 struct sk_buff *skb) 1564 { 1565 struct net_device *dev = rx_ring->netdev; 1566 u16 vid; 1567 1568 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1569 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { 1570 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && 1571 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) 1572 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); 1573 else 1574 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1575 1576 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1577 } 1578 } 1579 1580 /** 1581 * igc_process_skb_fields - Populate skb header fields from Rx descriptor 1582 * @rx_ring: rx descriptor ring packet is being transacted on 1583 * @rx_desc: pointer to the EOP Rx descriptor 1584 * @skb: pointer to current skb being populated 1585 * 1586 * This function checks the ring, descriptor, and packet information in order 1587 * to populate the hash, checksum, VLAN, protocol, and other fields within the 1588 * skb. 1589 */ 1590 static void igc_process_skb_fields(struct igc_ring *rx_ring, 1591 union igc_adv_rx_desc *rx_desc, 1592 struct sk_buff *skb) 1593 { 1594 igc_rx_hash(rx_ring, rx_desc, skb); 1595 1596 igc_rx_checksum(rx_ring, rx_desc, skb); 1597 1598 igc_rx_vlan(rx_ring, rx_desc, skb); 1599 1600 skb_record_rx_queue(skb, rx_ring->queue_index); 1601 1602 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1603 } 1604 1605 static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) 1606 { 1607 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 1608 struct igc_adapter *adapter = netdev_priv(netdev); 1609 struct igc_hw *hw = &adapter->hw; 1610 u32 ctrl; 1611 1612 ctrl = rd32(IGC_CTRL); 1613 1614 if (enable) { 1615 /* enable VLAN tag insert/strip */ 1616 ctrl |= IGC_CTRL_VME; 1617 } else { 1618 /* disable VLAN tag insert/strip */ 1619 ctrl &= ~IGC_CTRL_VME; 1620 } 1621 wr32(IGC_CTRL, ctrl); 1622 } 1623 1624 static void igc_restore_vlan(struct igc_adapter *adapter) 1625 { 1626 igc_vlan_mode(adapter->netdev, adapter->netdev->features); 1627 } 1628 1629 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, 1630 const unsigned int size, 1631 int *rx_buffer_pgcnt) 1632 { 1633 struct igc_rx_buffer *rx_buffer; 1634 1635 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 1636 *rx_buffer_pgcnt = 1637 #if (PAGE_SIZE < 8192) 1638 page_count(rx_buffer->page); 1639 #else 1640 0; 1641 #endif 1642 prefetchw(rx_buffer->page); 1643 1644 /* we are reusing so sync this buffer for CPU use */ 1645 dma_sync_single_range_for_cpu(rx_ring->dev, 1646 rx_buffer->dma, 1647 rx_buffer->page_offset, 1648 size, 1649 DMA_FROM_DEVICE); 1650 1651 rx_buffer->pagecnt_bias--; 1652 1653 return rx_buffer; 1654 } 1655 1656 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, 1657 unsigned int truesize) 1658 { 1659 #if (PAGE_SIZE < 8192) 1660 buffer->page_offset ^= truesize; 1661 #else 1662 buffer->page_offset += truesize; 1663 #endif 1664 } 1665 1666 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, 1667 unsigned int size) 1668 { 1669 unsigned int truesize; 1670 1671 #if (PAGE_SIZE < 8192) 1672 truesize = igc_rx_pg_size(ring) / 2; 1673 #else 1674 truesize = ring_uses_build_skb(ring) ? 1675 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1676 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1677 SKB_DATA_ALIGN(size); 1678 #endif 1679 return truesize; 1680 } 1681 1682 /** 1683 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff 1684 * @rx_ring: rx descriptor ring to transact packets on 1685 * @rx_buffer: buffer containing page to add 1686 * @skb: sk_buff to place the data into 1687 * @size: size of buffer to be added 1688 * 1689 * This function will add the data contained in rx_buffer->page to the skb. 1690 */ 1691 static void igc_add_rx_frag(struct igc_ring *rx_ring, 1692 struct igc_rx_buffer *rx_buffer, 1693 struct sk_buff *skb, 1694 unsigned int size) 1695 { 1696 unsigned int truesize; 1697 1698 #if (PAGE_SIZE < 8192) 1699 truesize = igc_rx_pg_size(rx_ring) / 2; 1700 #else 1701 truesize = ring_uses_build_skb(rx_ring) ? 1702 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1703 SKB_DATA_ALIGN(size); 1704 #endif 1705 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1706 rx_buffer->page_offset, size, truesize); 1707 1708 igc_rx_buffer_flip(rx_buffer, truesize); 1709 } 1710 1711 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, 1712 struct igc_rx_buffer *rx_buffer, 1713 union igc_adv_rx_desc *rx_desc, 1714 unsigned int size) 1715 { 1716 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 1717 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1718 struct sk_buff *skb; 1719 1720 /* prefetch first cache line of first page */ 1721 net_prefetch(va); 1722 1723 /* build an skb around the page buffer */ 1724 skb = build_skb(va - IGC_SKB_PAD, truesize); 1725 if (unlikely(!skb)) 1726 return NULL; 1727 1728 /* update pointers within the skb to store the data */ 1729 skb_reserve(skb, IGC_SKB_PAD); 1730 __skb_put(skb, size); 1731 1732 igc_rx_buffer_flip(rx_buffer, truesize); 1733 return skb; 1734 } 1735 1736 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, 1737 struct igc_rx_buffer *rx_buffer, 1738 struct xdp_buff *xdp, 1739 ktime_t timestamp) 1740 { 1741 unsigned int size = xdp->data_end - xdp->data; 1742 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1743 void *va = xdp->data; 1744 unsigned int headlen; 1745 struct sk_buff *skb; 1746 1747 /* prefetch first cache line of first page */ 1748 net_prefetch(va); 1749 1750 /* allocate a skb to store the frags */ 1751 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); 1752 if (unlikely(!skb)) 1753 return NULL; 1754 1755 if (timestamp) 1756 skb_hwtstamps(skb)->hwtstamp = timestamp; 1757 1758 /* Determine available headroom for copy */ 1759 headlen = size; 1760 if (headlen > IGC_RX_HDR_LEN) 1761 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); 1762 1763 /* align pull length to size of long to optimize memcpy performance */ 1764 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 1765 1766 /* update all of the pointers */ 1767 size -= headlen; 1768 if (size) { 1769 skb_add_rx_frag(skb, 0, rx_buffer->page, 1770 (va + headlen) - page_address(rx_buffer->page), 1771 size, truesize); 1772 igc_rx_buffer_flip(rx_buffer, truesize); 1773 } else { 1774 rx_buffer->pagecnt_bias++; 1775 } 1776 1777 return skb; 1778 } 1779 1780 /** 1781 * igc_reuse_rx_page - page flip buffer and store it back on the ring 1782 * @rx_ring: rx descriptor ring to store buffers on 1783 * @old_buff: donor buffer to have page reused 1784 * 1785 * Synchronizes page for reuse by the adapter 1786 */ 1787 static void igc_reuse_rx_page(struct igc_ring *rx_ring, 1788 struct igc_rx_buffer *old_buff) 1789 { 1790 u16 nta = rx_ring->next_to_alloc; 1791 struct igc_rx_buffer *new_buff; 1792 1793 new_buff = &rx_ring->rx_buffer_info[nta]; 1794 1795 /* update, and store next to alloc */ 1796 nta++; 1797 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1798 1799 /* Transfer page from old buffer to new buffer. 1800 * Move each member individually to avoid possible store 1801 * forwarding stalls. 1802 */ 1803 new_buff->dma = old_buff->dma; 1804 new_buff->page = old_buff->page; 1805 new_buff->page_offset = old_buff->page_offset; 1806 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1807 } 1808 1809 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, 1810 int rx_buffer_pgcnt) 1811 { 1812 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1813 struct page *page = rx_buffer->page; 1814 1815 /* avoid re-using remote and pfmemalloc pages */ 1816 if (!dev_page_is_reusable(page)) 1817 return false; 1818 1819 #if (PAGE_SIZE < 8192) 1820 /* if we are only owner of page we can reuse it */ 1821 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) 1822 return false; 1823 #else 1824 #define IGC_LAST_OFFSET \ 1825 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) 1826 1827 if (rx_buffer->page_offset > IGC_LAST_OFFSET) 1828 return false; 1829 #endif 1830 1831 /* If we have drained the page fragment pool we need to update 1832 * the pagecnt_bias and page count so that we fully restock the 1833 * number of references the driver holds. 1834 */ 1835 if (unlikely(pagecnt_bias == 1)) { 1836 page_ref_add(page, USHRT_MAX - 1); 1837 rx_buffer->pagecnt_bias = USHRT_MAX; 1838 } 1839 1840 return true; 1841 } 1842 1843 /** 1844 * igc_is_non_eop - process handling of non-EOP buffers 1845 * @rx_ring: Rx ring being processed 1846 * @rx_desc: Rx descriptor for current buffer 1847 * 1848 * This function updates next to clean. If the buffer is an EOP buffer 1849 * this function exits returning false, otherwise it will place the 1850 * sk_buff in the next buffer to be chained and return true indicating 1851 * that this is in fact a non-EOP buffer. 1852 */ 1853 static bool igc_is_non_eop(struct igc_ring *rx_ring, 1854 union igc_adv_rx_desc *rx_desc) 1855 { 1856 u32 ntc = rx_ring->next_to_clean + 1; 1857 1858 /* fetch, update, and store next to clean */ 1859 ntc = (ntc < rx_ring->count) ? ntc : 0; 1860 rx_ring->next_to_clean = ntc; 1861 1862 prefetch(IGC_RX_DESC(rx_ring, ntc)); 1863 1864 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) 1865 return false; 1866 1867 return true; 1868 } 1869 1870 /** 1871 * igc_cleanup_headers - Correct corrupted or empty headers 1872 * @rx_ring: rx descriptor ring packet is being transacted on 1873 * @rx_desc: pointer to the EOP Rx descriptor 1874 * @skb: pointer to current skb being fixed 1875 * 1876 * Address the case where we are pulling data in on pages only 1877 * and as such no data is present in the skb header. 1878 * 1879 * In addition if skb is not at least 60 bytes we need to pad it so that 1880 * it is large enough to qualify as a valid Ethernet frame. 1881 * 1882 * Returns true if an error was encountered and skb was freed. 1883 */ 1884 static bool igc_cleanup_headers(struct igc_ring *rx_ring, 1885 union igc_adv_rx_desc *rx_desc, 1886 struct sk_buff *skb) 1887 { 1888 /* XDP packets use error pointer so abort at this point */ 1889 if (IS_ERR(skb)) 1890 return true; 1891 1892 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { 1893 struct net_device *netdev = rx_ring->netdev; 1894 1895 if (!(netdev->features & NETIF_F_RXALL)) { 1896 dev_kfree_skb_any(skb); 1897 return true; 1898 } 1899 } 1900 1901 /* if eth_skb_pad returns an error the skb was freed */ 1902 if (eth_skb_pad(skb)) 1903 return true; 1904 1905 return false; 1906 } 1907 1908 static void igc_put_rx_buffer(struct igc_ring *rx_ring, 1909 struct igc_rx_buffer *rx_buffer, 1910 int rx_buffer_pgcnt) 1911 { 1912 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { 1913 /* hand second half of page back to the ring */ 1914 igc_reuse_rx_page(rx_ring, rx_buffer); 1915 } else { 1916 /* We are not reusing the buffer so unmap it and free 1917 * any references we are holding to it 1918 */ 1919 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 1920 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1921 IGC_RX_DMA_ATTR); 1922 __page_frag_cache_drain(rx_buffer->page, 1923 rx_buffer->pagecnt_bias); 1924 } 1925 1926 /* clear contents of rx_buffer */ 1927 rx_buffer->page = NULL; 1928 } 1929 1930 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) 1931 { 1932 struct igc_adapter *adapter = rx_ring->q_vector->adapter; 1933 1934 if (ring_uses_build_skb(rx_ring)) 1935 return IGC_SKB_PAD; 1936 if (igc_xdp_is_enabled(adapter)) 1937 return XDP_PACKET_HEADROOM; 1938 1939 return 0; 1940 } 1941 1942 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, 1943 struct igc_rx_buffer *bi) 1944 { 1945 struct page *page = bi->page; 1946 dma_addr_t dma; 1947 1948 /* since we are recycling buffers we should seldom need to alloc */ 1949 if (likely(page)) 1950 return true; 1951 1952 /* alloc new page for storage */ 1953 page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); 1954 if (unlikely(!page)) { 1955 rx_ring->rx_stats.alloc_failed++; 1956 return false; 1957 } 1958 1959 /* map page for use */ 1960 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 1961 igc_rx_pg_size(rx_ring), 1962 DMA_FROM_DEVICE, 1963 IGC_RX_DMA_ATTR); 1964 1965 /* if mapping failed free memory back to system since 1966 * there isn't much point in holding memory we can't use 1967 */ 1968 if (dma_mapping_error(rx_ring->dev, dma)) { 1969 __free_page(page); 1970 1971 rx_ring->rx_stats.alloc_failed++; 1972 return false; 1973 } 1974 1975 bi->dma = dma; 1976 bi->page = page; 1977 bi->page_offset = igc_rx_offset(rx_ring); 1978 page_ref_add(page, USHRT_MAX - 1); 1979 bi->pagecnt_bias = USHRT_MAX; 1980 1981 return true; 1982 } 1983 1984 /** 1985 * igc_alloc_rx_buffers - Replace used receive buffers; packet split 1986 * @rx_ring: rx descriptor ring 1987 * @cleaned_count: number of buffers to clean 1988 */ 1989 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) 1990 { 1991 union igc_adv_rx_desc *rx_desc; 1992 u16 i = rx_ring->next_to_use; 1993 struct igc_rx_buffer *bi; 1994 u16 bufsz; 1995 1996 /* nothing to do */ 1997 if (!cleaned_count) 1998 return; 1999 2000 rx_desc = IGC_RX_DESC(rx_ring, i); 2001 bi = &rx_ring->rx_buffer_info[i]; 2002 i -= rx_ring->count; 2003 2004 bufsz = igc_rx_bufsz(rx_ring); 2005 2006 do { 2007 if (!igc_alloc_mapped_page(rx_ring, bi)) 2008 break; 2009 2010 /* sync the buffer for use by the device */ 2011 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 2012 bi->page_offset, bufsz, 2013 DMA_FROM_DEVICE); 2014 2015 /* Refresh the desc even if buffer_addrs didn't change 2016 * because each write-back erases this info. 2017 */ 2018 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 2019 2020 rx_desc++; 2021 bi++; 2022 i++; 2023 if (unlikely(!i)) { 2024 rx_desc = IGC_RX_DESC(rx_ring, 0); 2025 bi = rx_ring->rx_buffer_info; 2026 i -= rx_ring->count; 2027 } 2028 2029 /* clear the length for the next_to_use descriptor */ 2030 rx_desc->wb.upper.length = 0; 2031 2032 cleaned_count--; 2033 } while (cleaned_count); 2034 2035 i += rx_ring->count; 2036 2037 if (rx_ring->next_to_use != i) { 2038 /* record the next descriptor to use */ 2039 rx_ring->next_to_use = i; 2040 2041 /* update next to alloc since we have filled the ring */ 2042 rx_ring->next_to_alloc = i; 2043 2044 /* Force memory writes to complete before letting h/w 2045 * know there are new descriptors to fetch. (Only 2046 * applicable for weak-ordered memory model archs, 2047 * such as IA-64). 2048 */ 2049 wmb(); 2050 writel(i, rx_ring->tail); 2051 } 2052 } 2053 2054 static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) 2055 { 2056 union igc_adv_rx_desc *desc; 2057 u16 i = ring->next_to_use; 2058 struct igc_rx_buffer *bi; 2059 dma_addr_t dma; 2060 bool ok = true; 2061 2062 if (!count) 2063 return ok; 2064 2065 desc = IGC_RX_DESC(ring, i); 2066 bi = &ring->rx_buffer_info[i]; 2067 i -= ring->count; 2068 2069 do { 2070 bi->xdp = xsk_buff_alloc(ring->xsk_pool); 2071 if (!bi->xdp) { 2072 ok = false; 2073 break; 2074 } 2075 2076 dma = xsk_buff_xdp_get_dma(bi->xdp); 2077 desc->read.pkt_addr = cpu_to_le64(dma); 2078 2079 desc++; 2080 bi++; 2081 i++; 2082 if (unlikely(!i)) { 2083 desc = IGC_RX_DESC(ring, 0); 2084 bi = ring->rx_buffer_info; 2085 i -= ring->count; 2086 } 2087 2088 /* Clear the length for the next_to_use descriptor. */ 2089 desc->wb.upper.length = 0; 2090 2091 count--; 2092 } while (count); 2093 2094 i += ring->count; 2095 2096 if (ring->next_to_use != i) { 2097 ring->next_to_use = i; 2098 2099 /* Force memory writes to complete before letting h/w 2100 * know there are new descriptors to fetch. (Only 2101 * applicable for weak-ordered memory model archs, 2102 * such as IA-64). 2103 */ 2104 wmb(); 2105 writel(i, ring->tail); 2106 } 2107 2108 return ok; 2109 } 2110 2111 static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer, 2112 struct xdp_frame *xdpf, 2113 struct igc_ring *ring) 2114 { 2115 dma_addr_t dma; 2116 2117 dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); 2118 if (dma_mapping_error(ring->dev, dma)) { 2119 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); 2120 return -ENOMEM; 2121 } 2122 2123 buffer->type = IGC_TX_BUFFER_TYPE_XDP; 2124 buffer->xdpf = xdpf; 2125 buffer->protocol = 0; 2126 buffer->bytecount = xdpf->len; 2127 buffer->gso_segs = 1; 2128 buffer->time_stamp = jiffies; 2129 dma_unmap_len_set(buffer, len, xdpf->len); 2130 dma_unmap_addr_set(buffer, dma, dma); 2131 return 0; 2132 } 2133 2134 /* This function requires __netif_tx_lock is held by the caller. */ 2135 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, 2136 struct xdp_frame *xdpf) 2137 { 2138 struct igc_tx_buffer *buffer; 2139 union igc_adv_tx_desc *desc; 2140 u32 cmd_type, olinfo_status; 2141 int err; 2142 2143 if (!igc_desc_unused(ring)) 2144 return -EBUSY; 2145 2146 buffer = &ring->tx_buffer_info[ring->next_to_use]; 2147 err = igc_xdp_init_tx_buffer(buffer, xdpf, ring); 2148 if (err) 2149 return err; 2150 2151 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 2152 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 2153 buffer->bytecount; 2154 olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; 2155 2156 desc = IGC_TX_DESC(ring, ring->next_to_use); 2157 desc->read.cmd_type_len = cpu_to_le32(cmd_type); 2158 desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2159 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma)); 2160 2161 netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount); 2162 2163 buffer->next_to_watch = desc; 2164 2165 ring->next_to_use++; 2166 if (ring->next_to_use == ring->count) 2167 ring->next_to_use = 0; 2168 2169 return 0; 2170 } 2171 2172 static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, 2173 int cpu) 2174 { 2175 int index = cpu; 2176 2177 if (unlikely(index < 0)) 2178 index = 0; 2179 2180 while (index >= adapter->num_tx_queues) 2181 index -= adapter->num_tx_queues; 2182 2183 return adapter->tx_ring[index]; 2184 } 2185 2186 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) 2187 { 2188 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 2189 int cpu = smp_processor_id(); 2190 struct netdev_queue *nq; 2191 struct igc_ring *ring; 2192 int res; 2193 2194 if (unlikely(!xdpf)) 2195 return -EFAULT; 2196 2197 ring = igc_xdp_get_tx_ring(adapter, cpu); 2198 nq = txring_txq(ring); 2199 2200 __netif_tx_lock(nq, cpu); 2201 res = igc_xdp_init_tx_descriptor(ring, xdpf); 2202 __netif_tx_unlock(nq); 2203 return res; 2204 } 2205 2206 /* This function assumes rcu_read_lock() is held by the caller. */ 2207 static int __igc_xdp_run_prog(struct igc_adapter *adapter, 2208 struct bpf_prog *prog, 2209 struct xdp_buff *xdp) 2210 { 2211 u32 act = bpf_prog_run_xdp(prog, xdp); 2212 2213 switch (act) { 2214 case XDP_PASS: 2215 return IGC_XDP_PASS; 2216 case XDP_TX: 2217 if (igc_xdp_xmit_back(adapter, xdp) < 0) 2218 goto out_failure; 2219 return IGC_XDP_TX; 2220 case XDP_REDIRECT: 2221 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) 2222 goto out_failure; 2223 return IGC_XDP_REDIRECT; 2224 break; 2225 default: 2226 bpf_warn_invalid_xdp_action(act); 2227 fallthrough; 2228 case XDP_ABORTED: 2229 out_failure: 2230 trace_xdp_exception(adapter->netdev, prog, act); 2231 fallthrough; 2232 case XDP_DROP: 2233 return IGC_XDP_CONSUMED; 2234 } 2235 } 2236 2237 static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, 2238 struct xdp_buff *xdp) 2239 { 2240 struct bpf_prog *prog; 2241 int res; 2242 2243 prog = READ_ONCE(adapter->xdp_prog); 2244 if (!prog) { 2245 res = IGC_XDP_PASS; 2246 goto out; 2247 } 2248 2249 res = __igc_xdp_run_prog(adapter, prog, xdp); 2250 2251 out: 2252 return ERR_PTR(-res); 2253 } 2254 2255 /* This function assumes __netif_tx_lock is held by the caller. */ 2256 static void igc_flush_tx_descriptors(struct igc_ring *ring) 2257 { 2258 /* Once tail pointer is updated, hardware can fetch the descriptors 2259 * any time so we issue a write membar here to ensure all memory 2260 * writes are complete before the tail pointer is updated. 2261 */ 2262 wmb(); 2263 writel(ring->next_to_use, ring->tail); 2264 } 2265 2266 static void igc_finalize_xdp(struct igc_adapter *adapter, int status) 2267 { 2268 int cpu = smp_processor_id(); 2269 struct netdev_queue *nq; 2270 struct igc_ring *ring; 2271 2272 if (status & IGC_XDP_TX) { 2273 ring = igc_xdp_get_tx_ring(adapter, cpu); 2274 nq = txring_txq(ring); 2275 2276 __netif_tx_lock(nq, cpu); 2277 igc_flush_tx_descriptors(ring); 2278 __netif_tx_unlock(nq); 2279 } 2280 2281 if (status & IGC_XDP_REDIRECT) 2282 xdp_do_flush(); 2283 } 2284 2285 static void igc_update_rx_stats(struct igc_q_vector *q_vector, 2286 unsigned int packets, unsigned int bytes) 2287 { 2288 struct igc_ring *ring = q_vector->rx.ring; 2289 2290 u64_stats_update_begin(&ring->rx_syncp); 2291 ring->rx_stats.packets += packets; 2292 ring->rx_stats.bytes += bytes; 2293 u64_stats_update_end(&ring->rx_syncp); 2294 2295 q_vector->rx.total_packets += packets; 2296 q_vector->rx.total_bytes += bytes; 2297 } 2298 2299 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) 2300 { 2301 unsigned int total_bytes = 0, total_packets = 0; 2302 struct igc_adapter *adapter = q_vector->adapter; 2303 struct igc_ring *rx_ring = q_vector->rx.ring; 2304 struct sk_buff *skb = rx_ring->skb; 2305 u16 cleaned_count = igc_desc_unused(rx_ring); 2306 int xdp_status = 0, rx_buffer_pgcnt; 2307 2308 while (likely(total_packets < budget)) { 2309 union igc_adv_rx_desc *rx_desc; 2310 struct igc_rx_buffer *rx_buffer; 2311 unsigned int size, truesize; 2312 ktime_t timestamp = 0; 2313 struct xdp_buff xdp; 2314 int pkt_offset = 0; 2315 void *pktbuf; 2316 2317 /* return some buffers to hardware, one at a time is too slow */ 2318 if (cleaned_count >= IGC_RX_BUFFER_WRITE) { 2319 igc_alloc_rx_buffers(rx_ring, cleaned_count); 2320 cleaned_count = 0; 2321 } 2322 2323 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); 2324 size = le16_to_cpu(rx_desc->wb.upper.length); 2325 if (!size) 2326 break; 2327 2328 /* This memory barrier is needed to keep us from reading 2329 * any other fields out of the rx_desc until we know the 2330 * descriptor has been written back 2331 */ 2332 dma_rmb(); 2333 2334 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); 2335 truesize = igc_get_rx_frame_truesize(rx_ring, size); 2336 2337 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; 2338 2339 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { 2340 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, 2341 pktbuf); 2342 pkt_offset = IGC_TS_HDR_LEN; 2343 size -= IGC_TS_HDR_LEN; 2344 } 2345 2346 if (!skb) { 2347 xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); 2348 xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), 2349 igc_rx_offset(rx_ring) + pkt_offset, size, false); 2350 2351 skb = igc_xdp_run_prog(adapter, &xdp); 2352 } 2353 2354 if (IS_ERR(skb)) { 2355 unsigned int xdp_res = -PTR_ERR(skb); 2356 2357 switch (xdp_res) { 2358 case IGC_XDP_CONSUMED: 2359 rx_buffer->pagecnt_bias++; 2360 break; 2361 case IGC_XDP_TX: 2362 case IGC_XDP_REDIRECT: 2363 igc_rx_buffer_flip(rx_buffer, truesize); 2364 xdp_status |= xdp_res; 2365 break; 2366 } 2367 2368 total_packets++; 2369 total_bytes += size; 2370 } else if (skb) 2371 igc_add_rx_frag(rx_ring, rx_buffer, skb, size); 2372 else if (ring_uses_build_skb(rx_ring)) 2373 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); 2374 else 2375 skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, 2376 timestamp); 2377 2378 /* exit if we failed to retrieve a buffer */ 2379 if (!skb) { 2380 rx_ring->rx_stats.alloc_failed++; 2381 rx_buffer->pagecnt_bias++; 2382 break; 2383 } 2384 2385 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); 2386 cleaned_count++; 2387 2388 /* fetch next buffer in frame if non-eop */ 2389 if (igc_is_non_eop(rx_ring, rx_desc)) 2390 continue; 2391 2392 /* verify the packet layout is correct */ 2393 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { 2394 skb = NULL; 2395 continue; 2396 } 2397 2398 /* probably a little skewed due to removing CRC */ 2399 total_bytes += skb->len; 2400 2401 /* populate checksum, VLAN, and protocol */ 2402 igc_process_skb_fields(rx_ring, rx_desc, skb); 2403 2404 napi_gro_receive(&q_vector->napi, skb); 2405 2406 /* reset skb pointer */ 2407 skb = NULL; 2408 2409 /* update budget accounting */ 2410 total_packets++; 2411 } 2412 2413 if (xdp_status) 2414 igc_finalize_xdp(adapter, xdp_status); 2415 2416 /* place incomplete frames back on ring for completion */ 2417 rx_ring->skb = skb; 2418 2419 igc_update_rx_stats(q_vector, total_packets, total_bytes); 2420 2421 if (cleaned_count) 2422 igc_alloc_rx_buffers(rx_ring, cleaned_count); 2423 2424 return total_packets; 2425 } 2426 2427 static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, 2428 struct xdp_buff *xdp) 2429 { 2430 unsigned int metasize = xdp->data - xdp->data_meta; 2431 unsigned int datasize = xdp->data_end - xdp->data; 2432 unsigned int totalsize = metasize + datasize; 2433 struct sk_buff *skb; 2434 2435 skb = __napi_alloc_skb(&ring->q_vector->napi, 2436 xdp->data_end - xdp->data_hard_start, 2437 GFP_ATOMIC | __GFP_NOWARN); 2438 if (unlikely(!skb)) 2439 return NULL; 2440 2441 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); 2442 memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); 2443 if (metasize) 2444 skb_metadata_set(skb, metasize); 2445 2446 return skb; 2447 } 2448 2449 static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, 2450 union igc_adv_rx_desc *desc, 2451 struct xdp_buff *xdp, 2452 ktime_t timestamp) 2453 { 2454 struct igc_ring *ring = q_vector->rx.ring; 2455 struct sk_buff *skb; 2456 2457 skb = igc_construct_skb_zc(ring, xdp); 2458 if (!skb) { 2459 ring->rx_stats.alloc_failed++; 2460 return; 2461 } 2462 2463 if (timestamp) 2464 skb_hwtstamps(skb)->hwtstamp = timestamp; 2465 2466 if (igc_cleanup_headers(ring, desc, skb)) 2467 return; 2468 2469 igc_process_skb_fields(ring, desc, skb); 2470 napi_gro_receive(&q_vector->napi, skb); 2471 } 2472 2473 static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) 2474 { 2475 struct igc_adapter *adapter = q_vector->adapter; 2476 struct igc_ring *ring = q_vector->rx.ring; 2477 u16 cleaned_count = igc_desc_unused(ring); 2478 int total_bytes = 0, total_packets = 0; 2479 u16 ntc = ring->next_to_clean; 2480 struct bpf_prog *prog; 2481 bool failure = false; 2482 int xdp_status = 0; 2483 2484 rcu_read_lock(); 2485 2486 prog = READ_ONCE(adapter->xdp_prog); 2487 2488 while (likely(total_packets < budget)) { 2489 union igc_adv_rx_desc *desc; 2490 struct igc_rx_buffer *bi; 2491 ktime_t timestamp = 0; 2492 unsigned int size; 2493 int res; 2494 2495 desc = IGC_RX_DESC(ring, ntc); 2496 size = le16_to_cpu(desc->wb.upper.length); 2497 if (!size) 2498 break; 2499 2500 /* This memory barrier is needed to keep us from reading 2501 * any other fields out of the rx_desc until we know the 2502 * descriptor has been written back 2503 */ 2504 dma_rmb(); 2505 2506 bi = &ring->rx_buffer_info[ntc]; 2507 2508 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { 2509 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, 2510 bi->xdp->data); 2511 2512 bi->xdp->data += IGC_TS_HDR_LEN; 2513 2514 /* HW timestamp has been copied into local variable. Metadata 2515 * length when XDP program is called should be 0. 2516 */ 2517 bi->xdp->data_meta += IGC_TS_HDR_LEN; 2518 size -= IGC_TS_HDR_LEN; 2519 } 2520 2521 bi->xdp->data_end = bi->xdp->data + size; 2522 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); 2523 2524 res = __igc_xdp_run_prog(adapter, prog, bi->xdp); 2525 switch (res) { 2526 case IGC_XDP_PASS: 2527 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); 2528 fallthrough; 2529 case IGC_XDP_CONSUMED: 2530 xsk_buff_free(bi->xdp); 2531 break; 2532 case IGC_XDP_TX: 2533 case IGC_XDP_REDIRECT: 2534 xdp_status |= res; 2535 break; 2536 } 2537 2538 bi->xdp = NULL; 2539 total_bytes += size; 2540 total_packets++; 2541 cleaned_count++; 2542 ntc++; 2543 if (ntc == ring->count) 2544 ntc = 0; 2545 } 2546 2547 ring->next_to_clean = ntc; 2548 rcu_read_unlock(); 2549 2550 if (cleaned_count >= IGC_RX_BUFFER_WRITE) 2551 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); 2552 2553 if (xdp_status) 2554 igc_finalize_xdp(adapter, xdp_status); 2555 2556 igc_update_rx_stats(q_vector, total_packets, total_bytes); 2557 2558 if (xsk_uses_need_wakeup(ring->xsk_pool)) { 2559 if (failure || ring->next_to_clean == ring->next_to_use) 2560 xsk_set_rx_need_wakeup(ring->xsk_pool); 2561 else 2562 xsk_clear_rx_need_wakeup(ring->xsk_pool); 2563 return total_packets; 2564 } 2565 2566 return failure ? budget : total_packets; 2567 } 2568 2569 static void igc_update_tx_stats(struct igc_q_vector *q_vector, 2570 unsigned int packets, unsigned int bytes) 2571 { 2572 struct igc_ring *ring = q_vector->tx.ring; 2573 2574 u64_stats_update_begin(&ring->tx_syncp); 2575 ring->tx_stats.bytes += bytes; 2576 ring->tx_stats.packets += packets; 2577 u64_stats_update_end(&ring->tx_syncp); 2578 2579 q_vector->tx.total_bytes += bytes; 2580 q_vector->tx.total_packets += packets; 2581 } 2582 2583 static void igc_xdp_xmit_zc(struct igc_ring *ring) 2584 { 2585 struct xsk_buff_pool *pool = ring->xsk_pool; 2586 struct netdev_queue *nq = txring_txq(ring); 2587 union igc_adv_tx_desc *tx_desc = NULL; 2588 int cpu = smp_processor_id(); 2589 u16 ntu = ring->next_to_use; 2590 struct xdp_desc xdp_desc; 2591 u16 budget; 2592 2593 if (!netif_carrier_ok(ring->netdev)) 2594 return; 2595 2596 __netif_tx_lock(nq, cpu); 2597 2598 budget = igc_desc_unused(ring); 2599 2600 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { 2601 u32 cmd_type, olinfo_status; 2602 struct igc_tx_buffer *bi; 2603 dma_addr_t dma; 2604 2605 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 2606 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 2607 xdp_desc.len; 2608 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; 2609 2610 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2611 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); 2612 2613 tx_desc = IGC_TX_DESC(ring, ntu); 2614 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 2615 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2616 tx_desc->read.buffer_addr = cpu_to_le64(dma); 2617 2618 bi = &ring->tx_buffer_info[ntu]; 2619 bi->type = IGC_TX_BUFFER_TYPE_XSK; 2620 bi->protocol = 0; 2621 bi->bytecount = xdp_desc.len; 2622 bi->gso_segs = 1; 2623 bi->time_stamp = jiffies; 2624 bi->next_to_watch = tx_desc; 2625 2626 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); 2627 2628 ntu++; 2629 if (ntu == ring->count) 2630 ntu = 0; 2631 } 2632 2633 ring->next_to_use = ntu; 2634 if (tx_desc) { 2635 igc_flush_tx_descriptors(ring); 2636 xsk_tx_release(pool); 2637 } 2638 2639 __netif_tx_unlock(nq); 2640 } 2641 2642 /** 2643 * igc_clean_tx_irq - Reclaim resources after transmit completes 2644 * @q_vector: pointer to q_vector containing needed info 2645 * @napi_budget: Used to determine if we are in netpoll 2646 * 2647 * returns true if ring is completely cleaned 2648 */ 2649 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) 2650 { 2651 struct igc_adapter *adapter = q_vector->adapter; 2652 unsigned int total_bytes = 0, total_packets = 0; 2653 unsigned int budget = q_vector->tx.work_limit; 2654 struct igc_ring *tx_ring = q_vector->tx.ring; 2655 unsigned int i = tx_ring->next_to_clean; 2656 struct igc_tx_buffer *tx_buffer; 2657 union igc_adv_tx_desc *tx_desc; 2658 u32 xsk_frames = 0; 2659 2660 if (test_bit(__IGC_DOWN, &adapter->state)) 2661 return true; 2662 2663 tx_buffer = &tx_ring->tx_buffer_info[i]; 2664 tx_desc = IGC_TX_DESC(tx_ring, i); 2665 i -= tx_ring->count; 2666 2667 do { 2668 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 2669 2670 /* if next_to_watch is not set then there is no work pending */ 2671 if (!eop_desc) 2672 break; 2673 2674 /* prevent any other reads prior to eop_desc */ 2675 smp_rmb(); 2676 2677 /* if DD is not set pending work has not been completed */ 2678 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) 2679 break; 2680 2681 /* clear next_to_watch to prevent false hangs */ 2682 tx_buffer->next_to_watch = NULL; 2683 2684 /* update the statistics for this packet */ 2685 total_bytes += tx_buffer->bytecount; 2686 total_packets += tx_buffer->gso_segs; 2687 2688 switch (tx_buffer->type) { 2689 case IGC_TX_BUFFER_TYPE_XSK: 2690 xsk_frames++; 2691 break; 2692 case IGC_TX_BUFFER_TYPE_XDP: 2693 xdp_return_frame(tx_buffer->xdpf); 2694 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2695 break; 2696 case IGC_TX_BUFFER_TYPE_SKB: 2697 napi_consume_skb(tx_buffer->skb, napi_budget); 2698 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2699 break; 2700 default: 2701 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 2702 break; 2703 } 2704 2705 /* clear last DMA location and unmap remaining buffers */ 2706 while (tx_desc != eop_desc) { 2707 tx_buffer++; 2708 tx_desc++; 2709 i++; 2710 if (unlikely(!i)) { 2711 i -= tx_ring->count; 2712 tx_buffer = tx_ring->tx_buffer_info; 2713 tx_desc = IGC_TX_DESC(tx_ring, 0); 2714 } 2715 2716 /* unmap any remaining paged data */ 2717 if (dma_unmap_len(tx_buffer, len)) 2718 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2719 } 2720 2721 /* move us one more past the eop_desc for start of next pkt */ 2722 tx_buffer++; 2723 tx_desc++; 2724 i++; 2725 if (unlikely(!i)) { 2726 i -= tx_ring->count; 2727 tx_buffer = tx_ring->tx_buffer_info; 2728 tx_desc = IGC_TX_DESC(tx_ring, 0); 2729 } 2730 2731 /* issue prefetch for next Tx descriptor */ 2732 prefetch(tx_desc); 2733 2734 /* update budget accounting */ 2735 budget--; 2736 } while (likely(budget)); 2737 2738 netdev_tx_completed_queue(txring_txq(tx_ring), 2739 total_packets, total_bytes); 2740 2741 i += tx_ring->count; 2742 tx_ring->next_to_clean = i; 2743 2744 igc_update_tx_stats(q_vector, total_packets, total_bytes); 2745 2746 if (tx_ring->xsk_pool) { 2747 if (xsk_frames) 2748 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 2749 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) 2750 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); 2751 igc_xdp_xmit_zc(tx_ring); 2752 } 2753 2754 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 2755 struct igc_hw *hw = &adapter->hw; 2756 2757 /* Detect a transmit hang in hardware, this serializes the 2758 * check with the clearing of time_stamp and movement of i 2759 */ 2760 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 2761 if (tx_buffer->next_to_watch && 2762 time_after(jiffies, tx_buffer->time_stamp + 2763 (adapter->tx_timeout_factor * HZ)) && 2764 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { 2765 /* detected Tx unit hang */ 2766 netdev_err(tx_ring->netdev, 2767 "Detected Tx Unit Hang\n" 2768 " Tx Queue <%d>\n" 2769 " TDH <%x>\n" 2770 " TDT <%x>\n" 2771 " next_to_use <%x>\n" 2772 " next_to_clean <%x>\n" 2773 "buffer_info[next_to_clean]\n" 2774 " time_stamp <%lx>\n" 2775 " next_to_watch <%p>\n" 2776 " jiffies <%lx>\n" 2777 " desc.status <%x>\n", 2778 tx_ring->queue_index, 2779 rd32(IGC_TDH(tx_ring->reg_idx)), 2780 readl(tx_ring->tail), 2781 tx_ring->next_to_use, 2782 tx_ring->next_to_clean, 2783 tx_buffer->time_stamp, 2784 tx_buffer->next_to_watch, 2785 jiffies, 2786 tx_buffer->next_to_watch->wb.status); 2787 netif_stop_subqueue(tx_ring->netdev, 2788 tx_ring->queue_index); 2789 2790 /* we are about to reset, no point in enabling stuff */ 2791 return true; 2792 } 2793 } 2794 2795 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 2796 if (unlikely(total_packets && 2797 netif_carrier_ok(tx_ring->netdev) && 2798 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 2799 /* Make sure that anybody stopping the queue after this 2800 * sees the new next_to_clean. 2801 */ 2802 smp_mb(); 2803 if (__netif_subqueue_stopped(tx_ring->netdev, 2804 tx_ring->queue_index) && 2805 !(test_bit(__IGC_DOWN, &adapter->state))) { 2806 netif_wake_subqueue(tx_ring->netdev, 2807 tx_ring->queue_index); 2808 2809 u64_stats_update_begin(&tx_ring->tx_syncp); 2810 tx_ring->tx_stats.restart_queue++; 2811 u64_stats_update_end(&tx_ring->tx_syncp); 2812 } 2813 } 2814 2815 return !!budget; 2816 } 2817 2818 static int igc_find_mac_filter(struct igc_adapter *adapter, 2819 enum igc_mac_filter_type type, const u8 *addr) 2820 { 2821 struct igc_hw *hw = &adapter->hw; 2822 int max_entries = hw->mac.rar_entry_count; 2823 u32 ral, rah; 2824 int i; 2825 2826 for (i = 0; i < max_entries; i++) { 2827 ral = rd32(IGC_RAL(i)); 2828 rah = rd32(IGC_RAH(i)); 2829 2830 if (!(rah & IGC_RAH_AV)) 2831 continue; 2832 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) 2833 continue; 2834 if ((rah & IGC_RAH_RAH_MASK) != 2835 le16_to_cpup((__le16 *)(addr + 4))) 2836 continue; 2837 if (ral != le32_to_cpup((__le32 *)(addr))) 2838 continue; 2839 2840 return i; 2841 } 2842 2843 return -1; 2844 } 2845 2846 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) 2847 { 2848 struct igc_hw *hw = &adapter->hw; 2849 int max_entries = hw->mac.rar_entry_count; 2850 u32 rah; 2851 int i; 2852 2853 for (i = 0; i < max_entries; i++) { 2854 rah = rd32(IGC_RAH(i)); 2855 2856 if (!(rah & IGC_RAH_AV)) 2857 return i; 2858 } 2859 2860 return -1; 2861 } 2862 2863 /** 2864 * igc_add_mac_filter() - Add MAC address filter 2865 * @adapter: Pointer to adapter where the filter should be added 2866 * @type: MAC address filter type (source or destination) 2867 * @addr: MAC address 2868 * @queue: If non-negative, queue assignment feature is enabled and frames 2869 * matching the filter are enqueued onto 'queue'. Otherwise, queue 2870 * assignment is disabled. 2871 * 2872 * Return: 0 in case of success, negative errno code otherwise. 2873 */ 2874 static int igc_add_mac_filter(struct igc_adapter *adapter, 2875 enum igc_mac_filter_type type, const u8 *addr, 2876 int queue) 2877 { 2878 struct net_device *dev = adapter->netdev; 2879 int index; 2880 2881 index = igc_find_mac_filter(adapter, type, addr); 2882 if (index >= 0) 2883 goto update_filter; 2884 2885 index = igc_get_avail_mac_filter_slot(adapter); 2886 if (index < 0) 2887 return -ENOSPC; 2888 2889 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", 2890 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 2891 addr, queue); 2892 2893 update_filter: 2894 igc_set_mac_filter_hw(adapter, index, type, addr, queue); 2895 return 0; 2896 } 2897 2898 /** 2899 * igc_del_mac_filter() - Delete MAC address filter 2900 * @adapter: Pointer to adapter where the filter should be deleted from 2901 * @type: MAC address filter type (source or destination) 2902 * @addr: MAC address 2903 */ 2904 static void igc_del_mac_filter(struct igc_adapter *adapter, 2905 enum igc_mac_filter_type type, const u8 *addr) 2906 { 2907 struct net_device *dev = adapter->netdev; 2908 int index; 2909 2910 index = igc_find_mac_filter(adapter, type, addr); 2911 if (index < 0) 2912 return; 2913 2914 if (index == 0) { 2915 /* If this is the default filter, we don't actually delete it. 2916 * We just reset to its default value i.e. disable queue 2917 * assignment. 2918 */ 2919 netdev_dbg(dev, "Disable default MAC filter queue assignment"); 2920 2921 igc_set_mac_filter_hw(adapter, 0, type, addr, -1); 2922 } else { 2923 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", 2924 index, 2925 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 2926 addr); 2927 2928 igc_clear_mac_filter_hw(adapter, index); 2929 } 2930 } 2931 2932 /** 2933 * igc_add_vlan_prio_filter() - Add VLAN priority filter 2934 * @adapter: Pointer to adapter where the filter should be added 2935 * @prio: VLAN priority value 2936 * @queue: Queue number which matching frames are assigned to 2937 * 2938 * Return: 0 in case of success, negative errno code otherwise. 2939 */ 2940 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, 2941 int queue) 2942 { 2943 struct net_device *dev = adapter->netdev; 2944 struct igc_hw *hw = &adapter->hw; 2945 u32 vlanpqf; 2946 2947 vlanpqf = rd32(IGC_VLANPQF); 2948 2949 if (vlanpqf & IGC_VLANPQF_VALID(prio)) { 2950 netdev_dbg(dev, "VLAN priority filter already in use\n"); 2951 return -EEXIST; 2952 } 2953 2954 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); 2955 vlanpqf |= IGC_VLANPQF_VALID(prio); 2956 2957 wr32(IGC_VLANPQF, vlanpqf); 2958 2959 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", 2960 prio, queue); 2961 return 0; 2962 } 2963 2964 /** 2965 * igc_del_vlan_prio_filter() - Delete VLAN priority filter 2966 * @adapter: Pointer to adapter where the filter should be deleted from 2967 * @prio: VLAN priority value 2968 */ 2969 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) 2970 { 2971 struct igc_hw *hw = &adapter->hw; 2972 u32 vlanpqf; 2973 2974 vlanpqf = rd32(IGC_VLANPQF); 2975 2976 vlanpqf &= ~IGC_VLANPQF_VALID(prio); 2977 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); 2978 2979 wr32(IGC_VLANPQF, vlanpqf); 2980 2981 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", 2982 prio); 2983 } 2984 2985 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) 2986 { 2987 struct igc_hw *hw = &adapter->hw; 2988 int i; 2989 2990 for (i = 0; i < MAX_ETYPE_FILTER; i++) { 2991 u32 etqf = rd32(IGC_ETQF(i)); 2992 2993 if (!(etqf & IGC_ETQF_FILTER_ENABLE)) 2994 return i; 2995 } 2996 2997 return -1; 2998 } 2999 3000 /** 3001 * igc_add_etype_filter() - Add ethertype filter 3002 * @adapter: Pointer to adapter where the filter should be added 3003 * @etype: Ethertype value 3004 * @queue: If non-negative, queue assignment feature is enabled and frames 3005 * matching the filter are enqueued onto 'queue'. Otherwise, queue 3006 * assignment is disabled. 3007 * 3008 * Return: 0 in case of success, negative errno code otherwise. 3009 */ 3010 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, 3011 int queue) 3012 { 3013 struct igc_hw *hw = &adapter->hw; 3014 int index; 3015 u32 etqf; 3016 3017 index = igc_get_avail_etype_filter_slot(adapter); 3018 if (index < 0) 3019 return -ENOSPC; 3020 3021 etqf = rd32(IGC_ETQF(index)); 3022 3023 etqf &= ~IGC_ETQF_ETYPE_MASK; 3024 etqf |= etype; 3025 3026 if (queue >= 0) { 3027 etqf &= ~IGC_ETQF_QUEUE_MASK; 3028 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); 3029 etqf |= IGC_ETQF_QUEUE_ENABLE; 3030 } 3031 3032 etqf |= IGC_ETQF_FILTER_ENABLE; 3033 3034 wr32(IGC_ETQF(index), etqf); 3035 3036 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", 3037 etype, queue); 3038 return 0; 3039 } 3040 3041 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) 3042 { 3043 struct igc_hw *hw = &adapter->hw; 3044 int i; 3045 3046 for (i = 0; i < MAX_ETYPE_FILTER; i++) { 3047 u32 etqf = rd32(IGC_ETQF(i)); 3048 3049 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) 3050 return i; 3051 } 3052 3053 return -1; 3054 } 3055 3056 /** 3057 * igc_del_etype_filter() - Delete ethertype filter 3058 * @adapter: Pointer to adapter where the filter should be deleted from 3059 * @etype: Ethertype value 3060 */ 3061 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) 3062 { 3063 struct igc_hw *hw = &adapter->hw; 3064 int index; 3065 3066 index = igc_find_etype_filter(adapter, etype); 3067 if (index < 0) 3068 return; 3069 3070 wr32(IGC_ETQF(index), 0); 3071 3072 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", 3073 etype); 3074 } 3075 3076 static int igc_enable_nfc_rule(struct igc_adapter *adapter, 3077 const struct igc_nfc_rule *rule) 3078 { 3079 int err; 3080 3081 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 3082 err = igc_add_etype_filter(adapter, rule->filter.etype, 3083 rule->action); 3084 if (err) 3085 return err; 3086 } 3087 3088 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { 3089 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 3090 rule->filter.src_addr, rule->action); 3091 if (err) 3092 return err; 3093 } 3094 3095 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { 3096 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 3097 rule->filter.dst_addr, rule->action); 3098 if (err) 3099 return err; 3100 } 3101 3102 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 3103 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> 3104 VLAN_PRIO_SHIFT; 3105 3106 err = igc_add_vlan_prio_filter(adapter, prio, rule->action); 3107 if (err) 3108 return err; 3109 } 3110 3111 return 0; 3112 } 3113 3114 static void igc_disable_nfc_rule(struct igc_adapter *adapter, 3115 const struct igc_nfc_rule *rule) 3116 { 3117 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) 3118 igc_del_etype_filter(adapter, rule->filter.etype); 3119 3120 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 3121 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> 3122 VLAN_PRIO_SHIFT; 3123 3124 igc_del_vlan_prio_filter(adapter, prio); 3125 } 3126 3127 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) 3128 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 3129 rule->filter.src_addr); 3130 3131 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) 3132 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 3133 rule->filter.dst_addr); 3134 } 3135 3136 /** 3137 * igc_get_nfc_rule() - Get NFC rule 3138 * @adapter: Pointer to adapter 3139 * @location: Rule location 3140 * 3141 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3142 * 3143 * Return: Pointer to NFC rule at @location. If not found, NULL. 3144 */ 3145 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, 3146 u32 location) 3147 { 3148 struct igc_nfc_rule *rule; 3149 3150 list_for_each_entry(rule, &adapter->nfc_rule_list, list) { 3151 if (rule->location == location) 3152 return rule; 3153 if (rule->location > location) 3154 break; 3155 } 3156 3157 return NULL; 3158 } 3159 3160 /** 3161 * igc_del_nfc_rule() - Delete NFC rule 3162 * @adapter: Pointer to adapter 3163 * @rule: Pointer to rule to be deleted 3164 * 3165 * Disable NFC rule in hardware and delete it from adapter. 3166 * 3167 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3168 */ 3169 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 3170 { 3171 igc_disable_nfc_rule(adapter, rule); 3172 3173 list_del(&rule->list); 3174 adapter->nfc_rule_count--; 3175 3176 kfree(rule); 3177 } 3178 3179 static void igc_flush_nfc_rules(struct igc_adapter *adapter) 3180 { 3181 struct igc_nfc_rule *rule, *tmp; 3182 3183 mutex_lock(&adapter->nfc_rule_lock); 3184 3185 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) 3186 igc_del_nfc_rule(adapter, rule); 3187 3188 mutex_unlock(&adapter->nfc_rule_lock); 3189 } 3190 3191 /** 3192 * igc_add_nfc_rule() - Add NFC rule 3193 * @adapter: Pointer to adapter 3194 * @rule: Pointer to rule to be added 3195 * 3196 * Enable NFC rule in hardware and add it to adapter. 3197 * 3198 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3199 * 3200 * Return: 0 on success, negative errno on failure. 3201 */ 3202 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 3203 { 3204 struct igc_nfc_rule *pred, *cur; 3205 int err; 3206 3207 err = igc_enable_nfc_rule(adapter, rule); 3208 if (err) 3209 return err; 3210 3211 pred = NULL; 3212 list_for_each_entry(cur, &adapter->nfc_rule_list, list) { 3213 if (cur->location >= rule->location) 3214 break; 3215 pred = cur; 3216 } 3217 3218 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); 3219 adapter->nfc_rule_count++; 3220 return 0; 3221 } 3222 3223 static void igc_restore_nfc_rules(struct igc_adapter *adapter) 3224 { 3225 struct igc_nfc_rule *rule; 3226 3227 mutex_lock(&adapter->nfc_rule_lock); 3228 3229 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) 3230 igc_enable_nfc_rule(adapter, rule); 3231 3232 mutex_unlock(&adapter->nfc_rule_lock); 3233 } 3234 3235 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) 3236 { 3237 struct igc_adapter *adapter = netdev_priv(netdev); 3238 3239 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); 3240 } 3241 3242 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) 3243 { 3244 struct igc_adapter *adapter = netdev_priv(netdev); 3245 3246 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); 3247 return 0; 3248 } 3249 3250 /** 3251 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 3252 * @netdev: network interface device structure 3253 * 3254 * The set_rx_mode entry point is called whenever the unicast or multicast 3255 * address lists or the network interface flags are updated. This routine is 3256 * responsible for configuring the hardware for proper unicast, multicast, 3257 * promiscuous mode, and all-multi behavior. 3258 */ 3259 static void igc_set_rx_mode(struct net_device *netdev) 3260 { 3261 struct igc_adapter *adapter = netdev_priv(netdev); 3262 struct igc_hw *hw = &adapter->hw; 3263 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; 3264 int count; 3265 3266 /* Check for Promiscuous and All Multicast modes */ 3267 if (netdev->flags & IFF_PROMISC) { 3268 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; 3269 } else { 3270 if (netdev->flags & IFF_ALLMULTI) { 3271 rctl |= IGC_RCTL_MPE; 3272 } else { 3273 /* Write addresses to the MTA, if the attempt fails 3274 * then we should just turn on promiscuous mode so 3275 * that we can at least receive multicast traffic 3276 */ 3277 count = igc_write_mc_addr_list(netdev); 3278 if (count < 0) 3279 rctl |= IGC_RCTL_MPE; 3280 } 3281 } 3282 3283 /* Write addresses to available RAR registers, if there is not 3284 * sufficient space to store all the addresses then enable 3285 * unicast promiscuous mode 3286 */ 3287 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) 3288 rctl |= IGC_RCTL_UPE; 3289 3290 /* update state of unicast and multicast */ 3291 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 3292 wr32(IGC_RCTL, rctl); 3293 3294 #if (PAGE_SIZE < 8192) 3295 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) 3296 rlpml = IGC_MAX_FRAME_BUILD_SKB; 3297 #endif 3298 wr32(IGC_RLPML, rlpml); 3299 } 3300 3301 /** 3302 * igc_configure - configure the hardware for RX and TX 3303 * @adapter: private board structure 3304 */ 3305 static void igc_configure(struct igc_adapter *adapter) 3306 { 3307 struct net_device *netdev = adapter->netdev; 3308 int i = 0; 3309 3310 igc_get_hw_control(adapter); 3311 igc_set_rx_mode(netdev); 3312 3313 igc_restore_vlan(adapter); 3314 3315 igc_setup_tctl(adapter); 3316 igc_setup_mrqc(adapter); 3317 igc_setup_rctl(adapter); 3318 3319 igc_set_default_mac_filter(adapter); 3320 igc_restore_nfc_rules(adapter); 3321 3322 igc_configure_tx(adapter); 3323 igc_configure_rx(adapter); 3324 3325 igc_rx_fifo_flush_base(&adapter->hw); 3326 3327 /* call igc_desc_unused which always leaves 3328 * at least 1 descriptor unused to make sure 3329 * next_to_use != next_to_clean 3330 */ 3331 for (i = 0; i < adapter->num_rx_queues; i++) { 3332 struct igc_ring *ring = adapter->rx_ring[i]; 3333 3334 if (ring->xsk_pool) 3335 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 3336 else 3337 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 3338 } 3339 } 3340 3341 /** 3342 * igc_write_ivar - configure ivar for given MSI-X vector 3343 * @hw: pointer to the HW structure 3344 * @msix_vector: vector number we are allocating to a given ring 3345 * @index: row index of IVAR register to write within IVAR table 3346 * @offset: column offset of in IVAR, should be multiple of 8 3347 * 3348 * The IVAR table consists of 2 columns, 3349 * each containing an cause allocation for an Rx and Tx ring, and a 3350 * variable number of rows depending on the number of queues supported. 3351 */ 3352 static void igc_write_ivar(struct igc_hw *hw, int msix_vector, 3353 int index, int offset) 3354 { 3355 u32 ivar = array_rd32(IGC_IVAR0, index); 3356 3357 /* clear any bits that are currently set */ 3358 ivar &= ~((u32)0xFF << offset); 3359 3360 /* write vector and valid bit */ 3361 ivar |= (msix_vector | IGC_IVAR_VALID) << offset; 3362 3363 array_wr32(IGC_IVAR0, index, ivar); 3364 } 3365 3366 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) 3367 { 3368 struct igc_adapter *adapter = q_vector->adapter; 3369 struct igc_hw *hw = &adapter->hw; 3370 int rx_queue = IGC_N0_QUEUE; 3371 int tx_queue = IGC_N0_QUEUE; 3372 3373 if (q_vector->rx.ring) 3374 rx_queue = q_vector->rx.ring->reg_idx; 3375 if (q_vector->tx.ring) 3376 tx_queue = q_vector->tx.ring->reg_idx; 3377 3378 switch (hw->mac.type) { 3379 case igc_i225: 3380 if (rx_queue > IGC_N0_QUEUE) 3381 igc_write_ivar(hw, msix_vector, 3382 rx_queue >> 1, 3383 (rx_queue & 0x1) << 4); 3384 if (tx_queue > IGC_N0_QUEUE) 3385 igc_write_ivar(hw, msix_vector, 3386 tx_queue >> 1, 3387 ((tx_queue & 0x1) << 4) + 8); 3388 q_vector->eims_value = BIT(msix_vector); 3389 break; 3390 default: 3391 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); 3392 break; 3393 } 3394 3395 /* add q_vector eims value to global eims_enable_mask */ 3396 adapter->eims_enable_mask |= q_vector->eims_value; 3397 3398 /* configure q_vector to set itr on first interrupt */ 3399 q_vector->set_itr = 1; 3400 } 3401 3402 /** 3403 * igc_configure_msix - Configure MSI-X hardware 3404 * @adapter: Pointer to adapter structure 3405 * 3406 * igc_configure_msix sets up the hardware to properly 3407 * generate MSI-X interrupts. 3408 */ 3409 static void igc_configure_msix(struct igc_adapter *adapter) 3410 { 3411 struct igc_hw *hw = &adapter->hw; 3412 int i, vector = 0; 3413 u32 tmp; 3414 3415 adapter->eims_enable_mask = 0; 3416 3417 /* set vector for other causes, i.e. link changes */ 3418 switch (hw->mac.type) { 3419 case igc_i225: 3420 /* Turn on MSI-X capability first, or our settings 3421 * won't stick. And it will take days to debug. 3422 */ 3423 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | 3424 IGC_GPIE_PBA | IGC_GPIE_EIAME | 3425 IGC_GPIE_NSICR); 3426 3427 /* enable msix_other interrupt */ 3428 adapter->eims_other = BIT(vector); 3429 tmp = (vector++ | IGC_IVAR_VALID) << 8; 3430 3431 wr32(IGC_IVAR_MISC, tmp); 3432 break; 3433 default: 3434 /* do nothing, since nothing else supports MSI-X */ 3435 break; 3436 } /* switch (hw->mac.type) */ 3437 3438 adapter->eims_enable_mask |= adapter->eims_other; 3439 3440 for (i = 0; i < adapter->num_q_vectors; i++) 3441 igc_assign_vector(adapter->q_vector[i], vector++); 3442 3443 wrfl(); 3444 } 3445 3446 /** 3447 * igc_irq_enable - Enable default interrupt generation settings 3448 * @adapter: board private structure 3449 */ 3450 static void igc_irq_enable(struct igc_adapter *adapter) 3451 { 3452 struct igc_hw *hw = &adapter->hw; 3453 3454 if (adapter->msix_entries) { 3455 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; 3456 u32 regval = rd32(IGC_EIAC); 3457 3458 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); 3459 regval = rd32(IGC_EIAM); 3460 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); 3461 wr32(IGC_EIMS, adapter->eims_enable_mask); 3462 wr32(IGC_IMS, ims); 3463 } else { 3464 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 3465 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 3466 } 3467 } 3468 3469 /** 3470 * igc_irq_disable - Mask off interrupt generation on the NIC 3471 * @adapter: board private structure 3472 */ 3473 static void igc_irq_disable(struct igc_adapter *adapter) 3474 { 3475 struct igc_hw *hw = &adapter->hw; 3476 3477 if (adapter->msix_entries) { 3478 u32 regval = rd32(IGC_EIAM); 3479 3480 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); 3481 wr32(IGC_EIMC, adapter->eims_enable_mask); 3482 regval = rd32(IGC_EIAC); 3483 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); 3484 } 3485 3486 wr32(IGC_IAM, 0); 3487 wr32(IGC_IMC, ~0); 3488 wrfl(); 3489 3490 if (adapter->msix_entries) { 3491 int vector = 0, i; 3492 3493 synchronize_irq(adapter->msix_entries[vector++].vector); 3494 3495 for (i = 0; i < adapter->num_q_vectors; i++) 3496 synchronize_irq(adapter->msix_entries[vector++].vector); 3497 } else { 3498 synchronize_irq(adapter->pdev->irq); 3499 } 3500 } 3501 3502 void igc_set_flag_queue_pairs(struct igc_adapter *adapter, 3503 const u32 max_rss_queues) 3504 { 3505 /* Determine if we need to pair queues. */ 3506 /* If rss_queues > half of max_rss_queues, pair the queues in 3507 * order to conserve interrupts due to limited supply. 3508 */ 3509 if (adapter->rss_queues > (max_rss_queues / 2)) 3510 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 3511 else 3512 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; 3513 } 3514 3515 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) 3516 { 3517 return IGC_MAX_RX_QUEUES; 3518 } 3519 3520 static void igc_init_queue_configuration(struct igc_adapter *adapter) 3521 { 3522 u32 max_rss_queues; 3523 3524 max_rss_queues = igc_get_max_rss_queues(adapter); 3525 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); 3526 3527 igc_set_flag_queue_pairs(adapter, max_rss_queues); 3528 } 3529 3530 /** 3531 * igc_reset_q_vector - Reset config for interrupt vector 3532 * @adapter: board private structure to initialize 3533 * @v_idx: Index of vector to be reset 3534 * 3535 * If NAPI is enabled it will delete any references to the 3536 * NAPI struct. This is preparation for igc_free_q_vector. 3537 */ 3538 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) 3539 { 3540 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 3541 3542 /* if we're coming from igc_set_interrupt_capability, the vectors are 3543 * not yet allocated 3544 */ 3545 if (!q_vector) 3546 return; 3547 3548 if (q_vector->tx.ring) 3549 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 3550 3551 if (q_vector->rx.ring) 3552 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; 3553 3554 netif_napi_del(&q_vector->napi); 3555 } 3556 3557 /** 3558 * igc_free_q_vector - Free memory allocated for specific interrupt vector 3559 * @adapter: board private structure to initialize 3560 * @v_idx: Index of vector to be freed 3561 * 3562 * This function frees the memory allocated to the q_vector. 3563 */ 3564 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) 3565 { 3566 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 3567 3568 adapter->q_vector[v_idx] = NULL; 3569 3570 /* igc_get_stats64() might access the rings on this vector, 3571 * we must wait a grace period before freeing it. 3572 */ 3573 if (q_vector) 3574 kfree_rcu(q_vector, rcu); 3575 } 3576 3577 /** 3578 * igc_free_q_vectors - Free memory allocated for interrupt vectors 3579 * @adapter: board private structure to initialize 3580 * 3581 * This function frees the memory allocated to the q_vectors. In addition if 3582 * NAPI is enabled it will delete any references to the NAPI struct prior 3583 * to freeing the q_vector. 3584 */ 3585 static void igc_free_q_vectors(struct igc_adapter *adapter) 3586 { 3587 int v_idx = adapter->num_q_vectors; 3588 3589 adapter->num_tx_queues = 0; 3590 adapter->num_rx_queues = 0; 3591 adapter->num_q_vectors = 0; 3592 3593 while (v_idx--) { 3594 igc_reset_q_vector(adapter, v_idx); 3595 igc_free_q_vector(adapter, v_idx); 3596 } 3597 } 3598 3599 /** 3600 * igc_update_itr - update the dynamic ITR value based on statistics 3601 * @q_vector: pointer to q_vector 3602 * @ring_container: ring info to update the itr for 3603 * 3604 * Stores a new ITR value based on packets and byte 3605 * counts during the last interrupt. The advantage of per interrupt 3606 * computation is faster updates and more accurate ITR for the current 3607 * traffic pattern. Constants in this function were computed 3608 * based on theoretical maximum wire speed and thresholds were set based 3609 * on testing data as well as attempting to minimize response time 3610 * while increasing bulk throughput. 3611 * NOTE: These calculations are only valid when operating in a single- 3612 * queue environment. 3613 */ 3614 static void igc_update_itr(struct igc_q_vector *q_vector, 3615 struct igc_ring_container *ring_container) 3616 { 3617 unsigned int packets = ring_container->total_packets; 3618 unsigned int bytes = ring_container->total_bytes; 3619 u8 itrval = ring_container->itr; 3620 3621 /* no packets, exit with status unchanged */ 3622 if (packets == 0) 3623 return; 3624 3625 switch (itrval) { 3626 case lowest_latency: 3627 /* handle TSO and jumbo frames */ 3628 if (bytes / packets > 8000) 3629 itrval = bulk_latency; 3630 else if ((packets < 5) && (bytes > 512)) 3631 itrval = low_latency; 3632 break; 3633 case low_latency: /* 50 usec aka 20000 ints/s */ 3634 if (bytes > 10000) { 3635 /* this if handles the TSO accounting */ 3636 if (bytes / packets > 8000) 3637 itrval = bulk_latency; 3638 else if ((packets < 10) || ((bytes / packets) > 1200)) 3639 itrval = bulk_latency; 3640 else if ((packets > 35)) 3641 itrval = lowest_latency; 3642 } else if (bytes / packets > 2000) { 3643 itrval = bulk_latency; 3644 } else if (packets <= 2 && bytes < 512) { 3645 itrval = lowest_latency; 3646 } 3647 break; 3648 case bulk_latency: /* 250 usec aka 4000 ints/s */ 3649 if (bytes > 25000) { 3650 if (packets > 35) 3651 itrval = low_latency; 3652 } else if (bytes < 1500) { 3653 itrval = low_latency; 3654 } 3655 break; 3656 } 3657 3658 /* clear work counters since we have the values we need */ 3659 ring_container->total_bytes = 0; 3660 ring_container->total_packets = 0; 3661 3662 /* write updated itr to ring container */ 3663 ring_container->itr = itrval; 3664 } 3665 3666 static void igc_set_itr(struct igc_q_vector *q_vector) 3667 { 3668 struct igc_adapter *adapter = q_vector->adapter; 3669 u32 new_itr = q_vector->itr_val; 3670 u8 current_itr = 0; 3671 3672 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 3673 switch (adapter->link_speed) { 3674 case SPEED_10: 3675 case SPEED_100: 3676 current_itr = 0; 3677 new_itr = IGC_4K_ITR; 3678 goto set_itr_now; 3679 default: 3680 break; 3681 } 3682 3683 igc_update_itr(q_vector, &q_vector->tx); 3684 igc_update_itr(q_vector, &q_vector->rx); 3685 3686 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 3687 3688 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 3689 if (current_itr == lowest_latency && 3690 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 3691 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 3692 current_itr = low_latency; 3693 3694 switch (current_itr) { 3695 /* counts and packets in update_itr are dependent on these numbers */ 3696 case lowest_latency: 3697 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ 3698 break; 3699 case low_latency: 3700 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ 3701 break; 3702 case bulk_latency: 3703 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ 3704 break; 3705 default: 3706 break; 3707 } 3708 3709 set_itr_now: 3710 if (new_itr != q_vector->itr_val) { 3711 /* this attempts to bias the interrupt rate towards Bulk 3712 * by adding intermediate steps when interrupt rate is 3713 * increasing 3714 */ 3715 new_itr = new_itr > q_vector->itr_val ? 3716 max((new_itr * q_vector->itr_val) / 3717 (new_itr + (q_vector->itr_val >> 2)), 3718 new_itr) : new_itr; 3719 /* Don't write the value here; it resets the adapter's 3720 * internal timer, and causes us to delay far longer than 3721 * we should between interrupts. Instead, we write the ITR 3722 * value at the beginning of the next interrupt so the timing 3723 * ends up being correct. 3724 */ 3725 q_vector->itr_val = new_itr; 3726 q_vector->set_itr = 1; 3727 } 3728 } 3729 3730 static void igc_reset_interrupt_capability(struct igc_adapter *adapter) 3731 { 3732 int v_idx = adapter->num_q_vectors; 3733 3734 if (adapter->msix_entries) { 3735 pci_disable_msix(adapter->pdev); 3736 kfree(adapter->msix_entries); 3737 adapter->msix_entries = NULL; 3738 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { 3739 pci_disable_msi(adapter->pdev); 3740 } 3741 3742 while (v_idx--) 3743 igc_reset_q_vector(adapter, v_idx); 3744 } 3745 3746 /** 3747 * igc_set_interrupt_capability - set MSI or MSI-X if supported 3748 * @adapter: Pointer to adapter structure 3749 * @msix: boolean value for MSI-X capability 3750 * 3751 * Attempt to configure interrupts using the best available 3752 * capabilities of the hardware and kernel. 3753 */ 3754 static void igc_set_interrupt_capability(struct igc_adapter *adapter, 3755 bool msix) 3756 { 3757 int numvecs, i; 3758 int err; 3759 3760 if (!msix) 3761 goto msi_only; 3762 adapter->flags |= IGC_FLAG_HAS_MSIX; 3763 3764 /* Number of supported queues. */ 3765 adapter->num_rx_queues = adapter->rss_queues; 3766 3767 adapter->num_tx_queues = adapter->rss_queues; 3768 3769 /* start with one vector for every Rx queue */ 3770 numvecs = adapter->num_rx_queues; 3771 3772 /* if Tx handler is separate add 1 for every Tx queue */ 3773 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) 3774 numvecs += adapter->num_tx_queues; 3775 3776 /* store the number of vectors reserved for queues */ 3777 adapter->num_q_vectors = numvecs; 3778 3779 /* add 1 vector for link status interrupts */ 3780 numvecs++; 3781 3782 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 3783 GFP_KERNEL); 3784 3785 if (!adapter->msix_entries) 3786 return; 3787 3788 /* populate entry values */ 3789 for (i = 0; i < numvecs; i++) 3790 adapter->msix_entries[i].entry = i; 3791 3792 err = pci_enable_msix_range(adapter->pdev, 3793 adapter->msix_entries, 3794 numvecs, 3795 numvecs); 3796 if (err > 0) 3797 return; 3798 3799 kfree(adapter->msix_entries); 3800 adapter->msix_entries = NULL; 3801 3802 igc_reset_interrupt_capability(adapter); 3803 3804 msi_only: 3805 adapter->flags &= ~IGC_FLAG_HAS_MSIX; 3806 3807 adapter->rss_queues = 1; 3808 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 3809 adapter->num_rx_queues = 1; 3810 adapter->num_tx_queues = 1; 3811 adapter->num_q_vectors = 1; 3812 if (!pci_enable_msi(adapter->pdev)) 3813 adapter->flags |= IGC_FLAG_HAS_MSI; 3814 } 3815 3816 /** 3817 * igc_update_ring_itr - update the dynamic ITR value based on packet size 3818 * @q_vector: pointer to q_vector 3819 * 3820 * Stores a new ITR value based on strictly on packet size. This 3821 * algorithm is less sophisticated than that used in igc_update_itr, 3822 * due to the difficulty of synchronizing statistics across multiple 3823 * receive rings. The divisors and thresholds used by this function 3824 * were determined based on theoretical maximum wire speed and testing 3825 * data, in order to minimize response time while increasing bulk 3826 * throughput. 3827 * NOTE: This function is called only when operating in a multiqueue 3828 * receive environment. 3829 */ 3830 static void igc_update_ring_itr(struct igc_q_vector *q_vector) 3831 { 3832 struct igc_adapter *adapter = q_vector->adapter; 3833 int new_val = q_vector->itr_val; 3834 int avg_wire_size = 0; 3835 unsigned int packets; 3836 3837 /* For non-gigabit speeds, just fix the interrupt rate at 4000 3838 * ints/sec - ITR timer value of 120 ticks. 3839 */ 3840 switch (adapter->link_speed) { 3841 case SPEED_10: 3842 case SPEED_100: 3843 new_val = IGC_4K_ITR; 3844 goto set_itr_val; 3845 default: 3846 break; 3847 } 3848 3849 packets = q_vector->rx.total_packets; 3850 if (packets) 3851 avg_wire_size = q_vector->rx.total_bytes / packets; 3852 3853 packets = q_vector->tx.total_packets; 3854 if (packets) 3855 avg_wire_size = max_t(u32, avg_wire_size, 3856 q_vector->tx.total_bytes / packets); 3857 3858 /* if avg_wire_size isn't set no work was done */ 3859 if (!avg_wire_size) 3860 goto clear_counts; 3861 3862 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 3863 avg_wire_size += 24; 3864 3865 /* Don't starve jumbo frames */ 3866 avg_wire_size = min(avg_wire_size, 3000); 3867 3868 /* Give a little boost to mid-size frames */ 3869 if (avg_wire_size > 300 && avg_wire_size < 1200) 3870 new_val = avg_wire_size / 3; 3871 else 3872 new_val = avg_wire_size / 2; 3873 3874 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 3875 if (new_val < IGC_20K_ITR && 3876 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 3877 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 3878 new_val = IGC_20K_ITR; 3879 3880 set_itr_val: 3881 if (new_val != q_vector->itr_val) { 3882 q_vector->itr_val = new_val; 3883 q_vector->set_itr = 1; 3884 } 3885 clear_counts: 3886 q_vector->rx.total_bytes = 0; 3887 q_vector->rx.total_packets = 0; 3888 q_vector->tx.total_bytes = 0; 3889 q_vector->tx.total_packets = 0; 3890 } 3891 3892 static void igc_ring_irq_enable(struct igc_q_vector *q_vector) 3893 { 3894 struct igc_adapter *adapter = q_vector->adapter; 3895 struct igc_hw *hw = &adapter->hw; 3896 3897 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || 3898 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { 3899 if (adapter->num_q_vectors == 1) 3900 igc_set_itr(q_vector); 3901 else 3902 igc_update_ring_itr(q_vector); 3903 } 3904 3905 if (!test_bit(__IGC_DOWN, &adapter->state)) { 3906 if (adapter->msix_entries) 3907 wr32(IGC_EIMS, q_vector->eims_value); 3908 else 3909 igc_irq_enable(adapter); 3910 } 3911 } 3912 3913 static void igc_add_ring(struct igc_ring *ring, 3914 struct igc_ring_container *head) 3915 { 3916 head->ring = ring; 3917 head->count++; 3918 } 3919 3920 /** 3921 * igc_cache_ring_register - Descriptor ring to register mapping 3922 * @adapter: board private structure to initialize 3923 * 3924 * Once we know the feature-set enabled for the device, we'll cache 3925 * the register offset the descriptor ring is assigned to. 3926 */ 3927 static void igc_cache_ring_register(struct igc_adapter *adapter) 3928 { 3929 int i = 0, j = 0; 3930 3931 switch (adapter->hw.mac.type) { 3932 case igc_i225: 3933 default: 3934 for (; i < adapter->num_rx_queues; i++) 3935 adapter->rx_ring[i]->reg_idx = i; 3936 for (; j < adapter->num_tx_queues; j++) 3937 adapter->tx_ring[j]->reg_idx = j; 3938 break; 3939 } 3940 } 3941 3942 /** 3943 * igc_poll - NAPI Rx polling callback 3944 * @napi: napi polling structure 3945 * @budget: count of how many packets we should handle 3946 */ 3947 static int igc_poll(struct napi_struct *napi, int budget) 3948 { 3949 struct igc_q_vector *q_vector = container_of(napi, 3950 struct igc_q_vector, 3951 napi); 3952 struct igc_ring *rx_ring = q_vector->rx.ring; 3953 bool clean_complete = true; 3954 int work_done = 0; 3955 3956 if (q_vector->tx.ring) 3957 clean_complete = igc_clean_tx_irq(q_vector, budget); 3958 3959 if (rx_ring) { 3960 int cleaned = rx_ring->xsk_pool ? 3961 igc_clean_rx_irq_zc(q_vector, budget) : 3962 igc_clean_rx_irq(q_vector, budget); 3963 3964 work_done += cleaned; 3965 if (cleaned >= budget) 3966 clean_complete = false; 3967 } 3968 3969 /* If all work not completed, return budget and keep polling */ 3970 if (!clean_complete) 3971 return budget; 3972 3973 /* Exit the polling mode, but don't re-enable interrupts if stack might 3974 * poll us due to busy-polling 3975 */ 3976 if (likely(napi_complete_done(napi, work_done))) 3977 igc_ring_irq_enable(q_vector); 3978 3979 return min(work_done, budget - 1); 3980 } 3981 3982 /** 3983 * igc_alloc_q_vector - Allocate memory for a single interrupt vector 3984 * @adapter: board private structure to initialize 3985 * @v_count: q_vectors allocated on adapter, used for ring interleaving 3986 * @v_idx: index of vector in adapter struct 3987 * @txr_count: total number of Tx rings to allocate 3988 * @txr_idx: index of first Tx ring to allocate 3989 * @rxr_count: total number of Rx rings to allocate 3990 * @rxr_idx: index of first Rx ring to allocate 3991 * 3992 * We allocate one q_vector. If allocation fails we return -ENOMEM. 3993 */ 3994 static int igc_alloc_q_vector(struct igc_adapter *adapter, 3995 unsigned int v_count, unsigned int v_idx, 3996 unsigned int txr_count, unsigned int txr_idx, 3997 unsigned int rxr_count, unsigned int rxr_idx) 3998 { 3999 struct igc_q_vector *q_vector; 4000 struct igc_ring *ring; 4001 int ring_count; 4002 4003 /* igc only supports 1 Tx and/or 1 Rx queue per vector */ 4004 if (txr_count > 1 || rxr_count > 1) 4005 return -ENOMEM; 4006 4007 ring_count = txr_count + rxr_count; 4008 4009 /* allocate q_vector and rings */ 4010 q_vector = adapter->q_vector[v_idx]; 4011 if (!q_vector) 4012 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), 4013 GFP_KERNEL); 4014 else 4015 memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); 4016 if (!q_vector) 4017 return -ENOMEM; 4018 4019 /* initialize NAPI */ 4020 netif_napi_add(adapter->netdev, &q_vector->napi, 4021 igc_poll, 64); 4022 4023 /* tie q_vector and adapter together */ 4024 adapter->q_vector[v_idx] = q_vector; 4025 q_vector->adapter = adapter; 4026 4027 /* initialize work limits */ 4028 q_vector->tx.work_limit = adapter->tx_work_limit; 4029 4030 /* initialize ITR configuration */ 4031 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); 4032 q_vector->itr_val = IGC_START_ITR; 4033 4034 /* initialize pointer to rings */ 4035 ring = q_vector->ring; 4036 4037 /* initialize ITR */ 4038 if (rxr_count) { 4039 /* rx or rx/tx vector */ 4040 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) 4041 q_vector->itr_val = adapter->rx_itr_setting; 4042 } else { 4043 /* tx only vector */ 4044 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) 4045 q_vector->itr_val = adapter->tx_itr_setting; 4046 } 4047 4048 if (txr_count) { 4049 /* assign generic ring traits */ 4050 ring->dev = &adapter->pdev->dev; 4051 ring->netdev = adapter->netdev; 4052 4053 /* configure backlink on ring */ 4054 ring->q_vector = q_vector; 4055 4056 /* update q_vector Tx values */ 4057 igc_add_ring(ring, &q_vector->tx); 4058 4059 /* apply Tx specific ring traits */ 4060 ring->count = adapter->tx_ring_count; 4061 ring->queue_index = txr_idx; 4062 4063 /* assign ring to adapter */ 4064 adapter->tx_ring[txr_idx] = ring; 4065 4066 /* push pointer to next ring */ 4067 ring++; 4068 } 4069 4070 if (rxr_count) { 4071 /* assign generic ring traits */ 4072 ring->dev = &adapter->pdev->dev; 4073 ring->netdev = adapter->netdev; 4074 4075 /* configure backlink on ring */ 4076 ring->q_vector = q_vector; 4077 4078 /* update q_vector Rx values */ 4079 igc_add_ring(ring, &q_vector->rx); 4080 4081 /* apply Rx specific ring traits */ 4082 ring->count = adapter->rx_ring_count; 4083 ring->queue_index = rxr_idx; 4084 4085 /* assign ring to adapter */ 4086 adapter->rx_ring[rxr_idx] = ring; 4087 } 4088 4089 return 0; 4090 } 4091 4092 /** 4093 * igc_alloc_q_vectors - Allocate memory for interrupt vectors 4094 * @adapter: board private structure to initialize 4095 * 4096 * We allocate one q_vector per queue interrupt. If allocation fails we 4097 * return -ENOMEM. 4098 */ 4099 static int igc_alloc_q_vectors(struct igc_adapter *adapter) 4100 { 4101 int rxr_remaining = adapter->num_rx_queues; 4102 int txr_remaining = adapter->num_tx_queues; 4103 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 4104 int q_vectors = adapter->num_q_vectors; 4105 int err; 4106 4107 if (q_vectors >= (rxr_remaining + txr_remaining)) { 4108 for (; rxr_remaining; v_idx++) { 4109 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 4110 0, 0, 1, rxr_idx); 4111 4112 if (err) 4113 goto err_out; 4114 4115 /* update counts and index */ 4116 rxr_remaining--; 4117 rxr_idx++; 4118 } 4119 } 4120 4121 for (; v_idx < q_vectors; v_idx++) { 4122 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 4123 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 4124 4125 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 4126 tqpv, txr_idx, rqpv, rxr_idx); 4127 4128 if (err) 4129 goto err_out; 4130 4131 /* update counts and index */ 4132 rxr_remaining -= rqpv; 4133 txr_remaining -= tqpv; 4134 rxr_idx++; 4135 txr_idx++; 4136 } 4137 4138 return 0; 4139 4140 err_out: 4141 adapter->num_tx_queues = 0; 4142 adapter->num_rx_queues = 0; 4143 adapter->num_q_vectors = 0; 4144 4145 while (v_idx--) 4146 igc_free_q_vector(adapter, v_idx); 4147 4148 return -ENOMEM; 4149 } 4150 4151 /** 4152 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 4153 * @adapter: Pointer to adapter structure 4154 * @msix: boolean for MSI-X capability 4155 * 4156 * This function initializes the interrupts and allocates all of the queues. 4157 */ 4158 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) 4159 { 4160 struct net_device *dev = adapter->netdev; 4161 int err = 0; 4162 4163 igc_set_interrupt_capability(adapter, msix); 4164 4165 err = igc_alloc_q_vectors(adapter); 4166 if (err) { 4167 netdev_err(dev, "Unable to allocate memory for vectors\n"); 4168 goto err_alloc_q_vectors; 4169 } 4170 4171 igc_cache_ring_register(adapter); 4172 4173 return 0; 4174 4175 err_alloc_q_vectors: 4176 igc_reset_interrupt_capability(adapter); 4177 return err; 4178 } 4179 4180 /** 4181 * igc_sw_init - Initialize general software structures (struct igc_adapter) 4182 * @adapter: board private structure to initialize 4183 * 4184 * igc_sw_init initializes the Adapter private data structure. 4185 * Fields are initialized based on PCI device information and 4186 * OS network device settings (MTU size). 4187 */ 4188 static int igc_sw_init(struct igc_adapter *adapter) 4189 { 4190 struct net_device *netdev = adapter->netdev; 4191 struct pci_dev *pdev = adapter->pdev; 4192 struct igc_hw *hw = &adapter->hw; 4193 4194 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 4195 4196 /* set default ring sizes */ 4197 adapter->tx_ring_count = IGC_DEFAULT_TXD; 4198 adapter->rx_ring_count = IGC_DEFAULT_RXD; 4199 4200 /* set default ITR values */ 4201 adapter->rx_itr_setting = IGC_DEFAULT_ITR; 4202 adapter->tx_itr_setting = IGC_DEFAULT_ITR; 4203 4204 /* set default work limits */ 4205 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; 4206 4207 /* adjust max frame to be at least the size of a standard frame */ 4208 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + 4209 VLAN_HLEN; 4210 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 4211 4212 mutex_init(&adapter->nfc_rule_lock); 4213 INIT_LIST_HEAD(&adapter->nfc_rule_list); 4214 adapter->nfc_rule_count = 0; 4215 4216 spin_lock_init(&adapter->stats64_lock); 4217 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ 4218 adapter->flags |= IGC_FLAG_HAS_MSIX; 4219 4220 igc_init_queue_configuration(adapter); 4221 4222 /* This call may decrease the number of queues */ 4223 if (igc_init_interrupt_scheme(adapter, true)) { 4224 netdev_err(netdev, "Unable to allocate memory for queues\n"); 4225 return -ENOMEM; 4226 } 4227 4228 /* Explicitly disable IRQ since the NIC can be in any state. */ 4229 igc_irq_disable(adapter); 4230 4231 set_bit(__IGC_DOWN, &adapter->state); 4232 4233 return 0; 4234 } 4235 4236 /** 4237 * igc_up - Open the interface and prepare it to handle traffic 4238 * @adapter: board private structure 4239 */ 4240 void igc_up(struct igc_adapter *adapter) 4241 { 4242 struct igc_hw *hw = &adapter->hw; 4243 int i = 0; 4244 4245 /* hardware has been reset, we need to reload some things */ 4246 igc_configure(adapter); 4247 4248 clear_bit(__IGC_DOWN, &adapter->state); 4249 4250 for (i = 0; i < adapter->num_q_vectors; i++) 4251 napi_enable(&adapter->q_vector[i]->napi); 4252 4253 if (adapter->msix_entries) 4254 igc_configure_msix(adapter); 4255 else 4256 igc_assign_vector(adapter->q_vector[0], 0); 4257 4258 /* Clear any pending interrupts. */ 4259 rd32(IGC_ICR); 4260 igc_irq_enable(adapter); 4261 4262 netif_tx_start_all_queues(adapter->netdev); 4263 4264 /* start the watchdog. */ 4265 hw->mac.get_link_status = true; 4266 schedule_work(&adapter->watchdog_task); 4267 } 4268 4269 /** 4270 * igc_update_stats - Update the board statistics counters 4271 * @adapter: board private structure 4272 */ 4273 void igc_update_stats(struct igc_adapter *adapter) 4274 { 4275 struct rtnl_link_stats64 *net_stats = &adapter->stats64; 4276 struct pci_dev *pdev = adapter->pdev; 4277 struct igc_hw *hw = &adapter->hw; 4278 u64 _bytes, _packets; 4279 u64 bytes, packets; 4280 unsigned int start; 4281 u32 mpc; 4282 int i; 4283 4284 /* Prevent stats update while adapter is being reset, or if the pci 4285 * connection is down. 4286 */ 4287 if (adapter->link_speed == 0) 4288 return; 4289 if (pci_channel_offline(pdev)) 4290 return; 4291 4292 packets = 0; 4293 bytes = 0; 4294 4295 rcu_read_lock(); 4296 for (i = 0; i < adapter->num_rx_queues; i++) { 4297 struct igc_ring *ring = adapter->rx_ring[i]; 4298 u32 rqdpc = rd32(IGC_RQDPC(i)); 4299 4300 if (hw->mac.type >= igc_i225) 4301 wr32(IGC_RQDPC(i), 0); 4302 4303 if (rqdpc) { 4304 ring->rx_stats.drops += rqdpc; 4305 net_stats->rx_fifo_errors += rqdpc; 4306 } 4307 4308 do { 4309 start = u64_stats_fetch_begin_irq(&ring->rx_syncp); 4310 _bytes = ring->rx_stats.bytes; 4311 _packets = ring->rx_stats.packets; 4312 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 4313 bytes += _bytes; 4314 packets += _packets; 4315 } 4316 4317 net_stats->rx_bytes = bytes; 4318 net_stats->rx_packets = packets; 4319 4320 packets = 0; 4321 bytes = 0; 4322 for (i = 0; i < adapter->num_tx_queues; i++) { 4323 struct igc_ring *ring = adapter->tx_ring[i]; 4324 4325 do { 4326 start = u64_stats_fetch_begin_irq(&ring->tx_syncp); 4327 _bytes = ring->tx_stats.bytes; 4328 _packets = ring->tx_stats.packets; 4329 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); 4330 bytes += _bytes; 4331 packets += _packets; 4332 } 4333 net_stats->tx_bytes = bytes; 4334 net_stats->tx_packets = packets; 4335 rcu_read_unlock(); 4336 4337 /* read stats registers */ 4338 adapter->stats.crcerrs += rd32(IGC_CRCERRS); 4339 adapter->stats.gprc += rd32(IGC_GPRC); 4340 adapter->stats.gorc += rd32(IGC_GORCL); 4341 rd32(IGC_GORCH); /* clear GORCL */ 4342 adapter->stats.bprc += rd32(IGC_BPRC); 4343 adapter->stats.mprc += rd32(IGC_MPRC); 4344 adapter->stats.roc += rd32(IGC_ROC); 4345 4346 adapter->stats.prc64 += rd32(IGC_PRC64); 4347 adapter->stats.prc127 += rd32(IGC_PRC127); 4348 adapter->stats.prc255 += rd32(IGC_PRC255); 4349 adapter->stats.prc511 += rd32(IGC_PRC511); 4350 adapter->stats.prc1023 += rd32(IGC_PRC1023); 4351 adapter->stats.prc1522 += rd32(IGC_PRC1522); 4352 adapter->stats.tlpic += rd32(IGC_TLPIC); 4353 adapter->stats.rlpic += rd32(IGC_RLPIC); 4354 adapter->stats.hgptc += rd32(IGC_HGPTC); 4355 4356 mpc = rd32(IGC_MPC); 4357 adapter->stats.mpc += mpc; 4358 net_stats->rx_fifo_errors += mpc; 4359 adapter->stats.scc += rd32(IGC_SCC); 4360 adapter->stats.ecol += rd32(IGC_ECOL); 4361 adapter->stats.mcc += rd32(IGC_MCC); 4362 adapter->stats.latecol += rd32(IGC_LATECOL); 4363 adapter->stats.dc += rd32(IGC_DC); 4364 adapter->stats.rlec += rd32(IGC_RLEC); 4365 adapter->stats.xonrxc += rd32(IGC_XONRXC); 4366 adapter->stats.xontxc += rd32(IGC_XONTXC); 4367 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); 4368 adapter->stats.xofftxc += rd32(IGC_XOFFTXC); 4369 adapter->stats.fcruc += rd32(IGC_FCRUC); 4370 adapter->stats.gptc += rd32(IGC_GPTC); 4371 adapter->stats.gotc += rd32(IGC_GOTCL); 4372 rd32(IGC_GOTCH); /* clear GOTCL */ 4373 adapter->stats.rnbc += rd32(IGC_RNBC); 4374 adapter->stats.ruc += rd32(IGC_RUC); 4375 adapter->stats.rfc += rd32(IGC_RFC); 4376 adapter->stats.rjc += rd32(IGC_RJC); 4377 adapter->stats.tor += rd32(IGC_TORH); 4378 adapter->stats.tot += rd32(IGC_TOTH); 4379 adapter->stats.tpr += rd32(IGC_TPR); 4380 4381 adapter->stats.ptc64 += rd32(IGC_PTC64); 4382 adapter->stats.ptc127 += rd32(IGC_PTC127); 4383 adapter->stats.ptc255 += rd32(IGC_PTC255); 4384 adapter->stats.ptc511 += rd32(IGC_PTC511); 4385 adapter->stats.ptc1023 += rd32(IGC_PTC1023); 4386 adapter->stats.ptc1522 += rd32(IGC_PTC1522); 4387 4388 adapter->stats.mptc += rd32(IGC_MPTC); 4389 adapter->stats.bptc += rd32(IGC_BPTC); 4390 4391 adapter->stats.tpt += rd32(IGC_TPT); 4392 adapter->stats.colc += rd32(IGC_COLC); 4393 adapter->stats.colc += rd32(IGC_RERC); 4394 4395 adapter->stats.algnerrc += rd32(IGC_ALGNERRC); 4396 4397 adapter->stats.tsctc += rd32(IGC_TSCTC); 4398 4399 adapter->stats.iac += rd32(IGC_IAC); 4400 4401 /* Fill out the OS statistics structure */ 4402 net_stats->multicast = adapter->stats.mprc; 4403 net_stats->collisions = adapter->stats.colc; 4404 4405 /* Rx Errors */ 4406 4407 /* RLEC on some newer hardware can be incorrect so build 4408 * our own version based on RUC and ROC 4409 */ 4410 net_stats->rx_errors = adapter->stats.rxerrc + 4411 adapter->stats.crcerrs + adapter->stats.algnerrc + 4412 adapter->stats.ruc + adapter->stats.roc + 4413 adapter->stats.cexterr; 4414 net_stats->rx_length_errors = adapter->stats.ruc + 4415 adapter->stats.roc; 4416 net_stats->rx_crc_errors = adapter->stats.crcerrs; 4417 net_stats->rx_frame_errors = adapter->stats.algnerrc; 4418 net_stats->rx_missed_errors = adapter->stats.mpc; 4419 4420 /* Tx Errors */ 4421 net_stats->tx_errors = adapter->stats.ecol + 4422 adapter->stats.latecol; 4423 net_stats->tx_aborted_errors = adapter->stats.ecol; 4424 net_stats->tx_window_errors = adapter->stats.latecol; 4425 net_stats->tx_carrier_errors = adapter->stats.tncrs; 4426 4427 /* Tx Dropped needs to be maintained elsewhere */ 4428 4429 /* Management Stats */ 4430 adapter->stats.mgptc += rd32(IGC_MGTPTC); 4431 adapter->stats.mgprc += rd32(IGC_MGTPRC); 4432 adapter->stats.mgpdc += rd32(IGC_MGTPDC); 4433 } 4434 4435 /** 4436 * igc_down - Close the interface 4437 * @adapter: board private structure 4438 */ 4439 void igc_down(struct igc_adapter *adapter) 4440 { 4441 struct net_device *netdev = adapter->netdev; 4442 struct igc_hw *hw = &adapter->hw; 4443 u32 tctl, rctl; 4444 int i = 0; 4445 4446 set_bit(__IGC_DOWN, &adapter->state); 4447 4448 igc_ptp_suspend(adapter); 4449 4450 /* disable receives in the hardware */ 4451 rctl = rd32(IGC_RCTL); 4452 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); 4453 /* flush and sleep below */ 4454 4455 /* set trans_start so we don't get spurious watchdogs during reset */ 4456 netif_trans_update(netdev); 4457 4458 netif_carrier_off(netdev); 4459 netif_tx_stop_all_queues(netdev); 4460 4461 /* disable transmits in the hardware */ 4462 tctl = rd32(IGC_TCTL); 4463 tctl &= ~IGC_TCTL_EN; 4464 wr32(IGC_TCTL, tctl); 4465 /* flush both disables and wait for them to finish */ 4466 wrfl(); 4467 usleep_range(10000, 20000); 4468 4469 igc_irq_disable(adapter); 4470 4471 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 4472 4473 for (i = 0; i < adapter->num_q_vectors; i++) { 4474 if (adapter->q_vector[i]) { 4475 napi_synchronize(&adapter->q_vector[i]->napi); 4476 napi_disable(&adapter->q_vector[i]->napi); 4477 } 4478 } 4479 4480 del_timer_sync(&adapter->watchdog_timer); 4481 del_timer_sync(&adapter->phy_info_timer); 4482 4483 /* record the stats before reset*/ 4484 spin_lock(&adapter->stats64_lock); 4485 igc_update_stats(adapter); 4486 spin_unlock(&adapter->stats64_lock); 4487 4488 adapter->link_speed = 0; 4489 adapter->link_duplex = 0; 4490 4491 if (!pci_channel_offline(adapter->pdev)) 4492 igc_reset(adapter); 4493 4494 /* clear VLAN promisc flag so VFTA will be updated if necessary */ 4495 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; 4496 4497 igc_clean_all_tx_rings(adapter); 4498 igc_clean_all_rx_rings(adapter); 4499 } 4500 4501 void igc_reinit_locked(struct igc_adapter *adapter) 4502 { 4503 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 4504 usleep_range(1000, 2000); 4505 igc_down(adapter); 4506 igc_up(adapter); 4507 clear_bit(__IGC_RESETTING, &adapter->state); 4508 } 4509 4510 static void igc_reset_task(struct work_struct *work) 4511 { 4512 struct igc_adapter *adapter; 4513 4514 adapter = container_of(work, struct igc_adapter, reset_task); 4515 4516 rtnl_lock(); 4517 /* If we're already down or resetting, just bail */ 4518 if (test_bit(__IGC_DOWN, &adapter->state) || 4519 test_bit(__IGC_RESETTING, &adapter->state)) { 4520 rtnl_unlock(); 4521 return; 4522 } 4523 4524 igc_rings_dump(adapter); 4525 igc_regs_dump(adapter); 4526 netdev_err(adapter->netdev, "Reset adapter\n"); 4527 igc_reinit_locked(adapter); 4528 rtnl_unlock(); 4529 } 4530 4531 /** 4532 * igc_change_mtu - Change the Maximum Transfer Unit 4533 * @netdev: network interface device structure 4534 * @new_mtu: new value for maximum frame size 4535 * 4536 * Returns 0 on success, negative on failure 4537 */ 4538 static int igc_change_mtu(struct net_device *netdev, int new_mtu) 4539 { 4540 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 4541 struct igc_adapter *adapter = netdev_priv(netdev); 4542 4543 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { 4544 netdev_dbg(netdev, "Jumbo frames not supported with XDP"); 4545 return -EINVAL; 4546 } 4547 4548 /* adjust max frame to be at least the size of a standard frame */ 4549 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 4550 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 4551 4552 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 4553 usleep_range(1000, 2000); 4554 4555 /* igc_down has a dependency on max_frame_size */ 4556 adapter->max_frame_size = max_frame; 4557 4558 if (netif_running(netdev)) 4559 igc_down(adapter); 4560 4561 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 4562 netdev->mtu = new_mtu; 4563 4564 if (netif_running(netdev)) 4565 igc_up(adapter); 4566 else 4567 igc_reset(adapter); 4568 4569 clear_bit(__IGC_RESETTING, &adapter->state); 4570 4571 return 0; 4572 } 4573 4574 /** 4575 * igc_get_stats64 - Get System Network Statistics 4576 * @netdev: network interface device structure 4577 * @stats: rtnl_link_stats64 pointer 4578 * 4579 * Returns the address of the device statistics structure. 4580 * The statistics are updated here and also from the timer callback. 4581 */ 4582 static void igc_get_stats64(struct net_device *netdev, 4583 struct rtnl_link_stats64 *stats) 4584 { 4585 struct igc_adapter *adapter = netdev_priv(netdev); 4586 4587 spin_lock(&adapter->stats64_lock); 4588 if (!test_bit(__IGC_RESETTING, &adapter->state)) 4589 igc_update_stats(adapter); 4590 memcpy(stats, &adapter->stats64, sizeof(*stats)); 4591 spin_unlock(&adapter->stats64_lock); 4592 } 4593 4594 static netdev_features_t igc_fix_features(struct net_device *netdev, 4595 netdev_features_t features) 4596 { 4597 /* Since there is no support for separate Rx/Tx vlan accel 4598 * enable/disable make sure Tx flag is always in same state as Rx. 4599 */ 4600 if (features & NETIF_F_HW_VLAN_CTAG_RX) 4601 features |= NETIF_F_HW_VLAN_CTAG_TX; 4602 else 4603 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 4604 4605 return features; 4606 } 4607 4608 static int igc_set_features(struct net_device *netdev, 4609 netdev_features_t features) 4610 { 4611 netdev_features_t changed = netdev->features ^ features; 4612 struct igc_adapter *adapter = netdev_priv(netdev); 4613 4614 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 4615 igc_vlan_mode(netdev, features); 4616 4617 /* Add VLAN support */ 4618 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) 4619 return 0; 4620 4621 if (!(features & NETIF_F_NTUPLE)) 4622 igc_flush_nfc_rules(adapter); 4623 4624 netdev->features = features; 4625 4626 if (netif_running(netdev)) 4627 igc_reinit_locked(adapter); 4628 else 4629 igc_reset(adapter); 4630 4631 return 1; 4632 } 4633 4634 static netdev_features_t 4635 igc_features_check(struct sk_buff *skb, struct net_device *dev, 4636 netdev_features_t features) 4637 { 4638 unsigned int network_hdr_len, mac_hdr_len; 4639 4640 /* Make certain the headers can be described by a context descriptor */ 4641 mac_hdr_len = skb_network_header(skb) - skb->data; 4642 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) 4643 return features & ~(NETIF_F_HW_CSUM | 4644 NETIF_F_SCTP_CRC | 4645 NETIF_F_HW_VLAN_CTAG_TX | 4646 NETIF_F_TSO | 4647 NETIF_F_TSO6); 4648 4649 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 4650 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) 4651 return features & ~(NETIF_F_HW_CSUM | 4652 NETIF_F_SCTP_CRC | 4653 NETIF_F_TSO | 4654 NETIF_F_TSO6); 4655 4656 /* We can only support IPv4 TSO in tunnels if we can mangle the 4657 * inner IP ID field, so strip TSO if MANGLEID is not supported. 4658 */ 4659 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 4660 features &= ~NETIF_F_TSO; 4661 4662 return features; 4663 } 4664 4665 static void igc_tsync_interrupt(struct igc_adapter *adapter) 4666 { 4667 u32 ack, tsauxc, sec, nsec, tsicr; 4668 struct igc_hw *hw = &adapter->hw; 4669 struct ptp_clock_event event; 4670 struct timespec64 ts; 4671 4672 tsicr = rd32(IGC_TSICR); 4673 ack = 0; 4674 4675 if (tsicr & IGC_TSICR_SYS_WRAP) { 4676 event.type = PTP_CLOCK_PPS; 4677 if (adapter->ptp_caps.pps) 4678 ptp_clock_event(adapter->ptp_clock, &event); 4679 ack |= IGC_TSICR_SYS_WRAP; 4680 } 4681 4682 if (tsicr & IGC_TSICR_TXTS) { 4683 /* retrieve hardware timestamp */ 4684 schedule_work(&adapter->ptp_tx_work); 4685 ack |= IGC_TSICR_TXTS; 4686 } 4687 4688 if (tsicr & IGC_TSICR_TT0) { 4689 spin_lock(&adapter->tmreg_lock); 4690 ts = timespec64_add(adapter->perout[0].start, 4691 adapter->perout[0].period); 4692 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 4693 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); 4694 tsauxc = rd32(IGC_TSAUXC); 4695 tsauxc |= IGC_TSAUXC_EN_TT0; 4696 wr32(IGC_TSAUXC, tsauxc); 4697 adapter->perout[0].start = ts; 4698 spin_unlock(&adapter->tmreg_lock); 4699 ack |= IGC_TSICR_TT0; 4700 } 4701 4702 if (tsicr & IGC_TSICR_TT1) { 4703 spin_lock(&adapter->tmreg_lock); 4704 ts = timespec64_add(adapter->perout[1].start, 4705 adapter->perout[1].period); 4706 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 4707 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); 4708 tsauxc = rd32(IGC_TSAUXC); 4709 tsauxc |= IGC_TSAUXC_EN_TT1; 4710 wr32(IGC_TSAUXC, tsauxc); 4711 adapter->perout[1].start = ts; 4712 spin_unlock(&adapter->tmreg_lock); 4713 ack |= IGC_TSICR_TT1; 4714 } 4715 4716 if (tsicr & IGC_TSICR_AUTT0) { 4717 nsec = rd32(IGC_AUXSTMPL0); 4718 sec = rd32(IGC_AUXSTMPH0); 4719 event.type = PTP_CLOCK_EXTTS; 4720 event.index = 0; 4721 event.timestamp = sec * NSEC_PER_SEC + nsec; 4722 ptp_clock_event(adapter->ptp_clock, &event); 4723 ack |= IGC_TSICR_AUTT0; 4724 } 4725 4726 if (tsicr & IGC_TSICR_AUTT1) { 4727 nsec = rd32(IGC_AUXSTMPL1); 4728 sec = rd32(IGC_AUXSTMPH1); 4729 event.type = PTP_CLOCK_EXTTS; 4730 event.index = 1; 4731 event.timestamp = sec * NSEC_PER_SEC + nsec; 4732 ptp_clock_event(adapter->ptp_clock, &event); 4733 ack |= IGC_TSICR_AUTT1; 4734 } 4735 4736 /* acknowledge the interrupts */ 4737 wr32(IGC_TSICR, ack); 4738 } 4739 4740 /** 4741 * igc_msix_other - msix other interrupt handler 4742 * @irq: interrupt number 4743 * @data: pointer to a q_vector 4744 */ 4745 static irqreturn_t igc_msix_other(int irq, void *data) 4746 { 4747 struct igc_adapter *adapter = data; 4748 struct igc_hw *hw = &adapter->hw; 4749 u32 icr = rd32(IGC_ICR); 4750 4751 /* reading ICR causes bit 31 of EICR to be cleared */ 4752 if (icr & IGC_ICR_DRSTA) 4753 schedule_work(&adapter->reset_task); 4754 4755 if (icr & IGC_ICR_DOUTSYNC) { 4756 /* HW is reporting DMA is out of sync */ 4757 adapter->stats.doosync++; 4758 } 4759 4760 if (icr & IGC_ICR_LSC) { 4761 hw->mac.get_link_status = true; 4762 /* guard against interrupt when we're going down */ 4763 if (!test_bit(__IGC_DOWN, &adapter->state)) 4764 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4765 } 4766 4767 if (icr & IGC_ICR_TS) 4768 igc_tsync_interrupt(adapter); 4769 4770 wr32(IGC_EIMS, adapter->eims_other); 4771 4772 return IRQ_HANDLED; 4773 } 4774 4775 static void igc_write_itr(struct igc_q_vector *q_vector) 4776 { 4777 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; 4778 4779 if (!q_vector->set_itr) 4780 return; 4781 4782 if (!itr_val) 4783 itr_val = IGC_ITR_VAL_MASK; 4784 4785 itr_val |= IGC_EITR_CNT_IGNR; 4786 4787 writel(itr_val, q_vector->itr_register); 4788 q_vector->set_itr = 0; 4789 } 4790 4791 static irqreturn_t igc_msix_ring(int irq, void *data) 4792 { 4793 struct igc_q_vector *q_vector = data; 4794 4795 /* Write the ITR value calculated from the previous interrupt. */ 4796 igc_write_itr(q_vector); 4797 4798 napi_schedule(&q_vector->napi); 4799 4800 return IRQ_HANDLED; 4801 } 4802 4803 /** 4804 * igc_request_msix - Initialize MSI-X interrupts 4805 * @adapter: Pointer to adapter structure 4806 * 4807 * igc_request_msix allocates MSI-X vectors and requests interrupts from the 4808 * kernel. 4809 */ 4810 static int igc_request_msix(struct igc_adapter *adapter) 4811 { 4812 int i = 0, err = 0, vector = 0, free_vector = 0; 4813 struct net_device *netdev = adapter->netdev; 4814 4815 err = request_irq(adapter->msix_entries[vector].vector, 4816 &igc_msix_other, 0, netdev->name, adapter); 4817 if (err) 4818 goto err_out; 4819 4820 for (i = 0; i < adapter->num_q_vectors; i++) { 4821 struct igc_q_vector *q_vector = adapter->q_vector[i]; 4822 4823 vector++; 4824 4825 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); 4826 4827 if (q_vector->rx.ring && q_vector->tx.ring) 4828 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, 4829 q_vector->rx.ring->queue_index); 4830 else if (q_vector->tx.ring) 4831 sprintf(q_vector->name, "%s-tx-%u", netdev->name, 4832 q_vector->tx.ring->queue_index); 4833 else if (q_vector->rx.ring) 4834 sprintf(q_vector->name, "%s-rx-%u", netdev->name, 4835 q_vector->rx.ring->queue_index); 4836 else 4837 sprintf(q_vector->name, "%s-unused", netdev->name); 4838 4839 err = request_irq(adapter->msix_entries[vector].vector, 4840 igc_msix_ring, 0, q_vector->name, 4841 q_vector); 4842 if (err) 4843 goto err_free; 4844 } 4845 4846 igc_configure_msix(adapter); 4847 return 0; 4848 4849 err_free: 4850 /* free already assigned IRQs */ 4851 free_irq(adapter->msix_entries[free_vector++].vector, adapter); 4852 4853 vector--; 4854 for (i = 0; i < vector; i++) { 4855 free_irq(adapter->msix_entries[free_vector++].vector, 4856 adapter->q_vector[i]); 4857 } 4858 err_out: 4859 return err; 4860 } 4861 4862 /** 4863 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts 4864 * @adapter: Pointer to adapter structure 4865 * 4866 * This function resets the device so that it has 0 rx queues, tx queues, and 4867 * MSI-X interrupts allocated. 4868 */ 4869 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) 4870 { 4871 igc_free_q_vectors(adapter); 4872 igc_reset_interrupt_capability(adapter); 4873 } 4874 4875 /* Need to wait a few seconds after link up to get diagnostic information from 4876 * the phy 4877 */ 4878 static void igc_update_phy_info(struct timer_list *t) 4879 { 4880 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); 4881 4882 igc_get_phy_info(&adapter->hw); 4883 } 4884 4885 /** 4886 * igc_has_link - check shared code for link and determine up/down 4887 * @adapter: pointer to driver private info 4888 */ 4889 bool igc_has_link(struct igc_adapter *adapter) 4890 { 4891 struct igc_hw *hw = &adapter->hw; 4892 bool link_active = false; 4893 4894 /* get_link_status is set on LSC (link status) interrupt or 4895 * rx sequence error interrupt. get_link_status will stay 4896 * false until the igc_check_for_link establishes link 4897 * for copper adapters ONLY 4898 */ 4899 switch (hw->phy.media_type) { 4900 case igc_media_type_copper: 4901 if (!hw->mac.get_link_status) 4902 return true; 4903 hw->mac.ops.check_for_link(hw); 4904 link_active = !hw->mac.get_link_status; 4905 break; 4906 default: 4907 case igc_media_type_unknown: 4908 break; 4909 } 4910 4911 if (hw->mac.type == igc_i225 && 4912 hw->phy.id == I225_I_PHY_ID) { 4913 if (!netif_carrier_ok(adapter->netdev)) { 4914 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 4915 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { 4916 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; 4917 adapter->link_check_timeout = jiffies; 4918 } 4919 } 4920 4921 return link_active; 4922 } 4923 4924 /** 4925 * igc_watchdog - Timer Call-back 4926 * @t: timer for the watchdog 4927 */ 4928 static void igc_watchdog(struct timer_list *t) 4929 { 4930 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); 4931 /* Do the rest outside of interrupt context */ 4932 schedule_work(&adapter->watchdog_task); 4933 } 4934 4935 static void igc_watchdog_task(struct work_struct *work) 4936 { 4937 struct igc_adapter *adapter = container_of(work, 4938 struct igc_adapter, 4939 watchdog_task); 4940 struct net_device *netdev = adapter->netdev; 4941 struct igc_hw *hw = &adapter->hw; 4942 struct igc_phy_info *phy = &hw->phy; 4943 u16 phy_data, retry_count = 20; 4944 u32 link; 4945 int i; 4946 4947 link = igc_has_link(adapter); 4948 4949 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { 4950 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) 4951 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 4952 else 4953 link = false; 4954 } 4955 4956 if (link) { 4957 /* Cancel scheduled suspend requests. */ 4958 pm_runtime_resume(netdev->dev.parent); 4959 4960 if (!netif_carrier_ok(netdev)) { 4961 u32 ctrl; 4962 4963 hw->mac.ops.get_speed_and_duplex(hw, 4964 &adapter->link_speed, 4965 &adapter->link_duplex); 4966 4967 ctrl = rd32(IGC_CTRL); 4968 /* Link status message must follow this format */ 4969 netdev_info(netdev, 4970 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 4971 adapter->link_speed, 4972 adapter->link_duplex == FULL_DUPLEX ? 4973 "Full" : "Half", 4974 (ctrl & IGC_CTRL_TFCE) && 4975 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : 4976 (ctrl & IGC_CTRL_RFCE) ? "RX" : 4977 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); 4978 4979 /* disable EEE if enabled */ 4980 if ((adapter->flags & IGC_FLAG_EEE) && 4981 adapter->link_duplex == HALF_DUPLEX) { 4982 netdev_info(netdev, 4983 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); 4984 adapter->hw.dev_spec._base.eee_enable = false; 4985 adapter->flags &= ~IGC_FLAG_EEE; 4986 } 4987 4988 /* check if SmartSpeed worked */ 4989 igc_check_downshift(hw); 4990 if (phy->speed_downgraded) 4991 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); 4992 4993 /* adjust timeout factor according to speed/duplex */ 4994 adapter->tx_timeout_factor = 1; 4995 switch (adapter->link_speed) { 4996 case SPEED_10: 4997 adapter->tx_timeout_factor = 14; 4998 break; 4999 case SPEED_100: 5000 /* maybe add some timeout factor ? */ 5001 break; 5002 } 5003 5004 if (adapter->link_speed != SPEED_1000) 5005 goto no_wait; 5006 5007 /* wait for Remote receiver status OK */ 5008 retry_read_status: 5009 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, 5010 &phy_data)) { 5011 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && 5012 retry_count) { 5013 msleep(100); 5014 retry_count--; 5015 goto retry_read_status; 5016 } else if (!retry_count) { 5017 netdev_err(netdev, "exceed max 2 second\n"); 5018 } 5019 } else { 5020 netdev_err(netdev, "read 1000Base-T Status Reg\n"); 5021 } 5022 no_wait: 5023 netif_carrier_on(netdev); 5024 5025 /* link state has changed, schedule phy info update */ 5026 if (!test_bit(__IGC_DOWN, &adapter->state)) 5027 mod_timer(&adapter->phy_info_timer, 5028 round_jiffies(jiffies + 2 * HZ)); 5029 } 5030 } else { 5031 if (netif_carrier_ok(netdev)) { 5032 adapter->link_speed = 0; 5033 adapter->link_duplex = 0; 5034 5035 /* Links status message must follow this format */ 5036 netdev_info(netdev, "NIC Link is Down\n"); 5037 netif_carrier_off(netdev); 5038 5039 /* link state has changed, schedule phy info update */ 5040 if (!test_bit(__IGC_DOWN, &adapter->state)) 5041 mod_timer(&adapter->phy_info_timer, 5042 round_jiffies(jiffies + 2 * HZ)); 5043 5044 /* link is down, time to check for alternate media */ 5045 if (adapter->flags & IGC_FLAG_MAS_ENABLE) { 5046 if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 5047 schedule_work(&adapter->reset_task); 5048 /* return immediately */ 5049 return; 5050 } 5051 } 5052 pm_schedule_suspend(netdev->dev.parent, 5053 MSEC_PER_SEC * 5); 5054 5055 /* also check for alternate media here */ 5056 } else if (!netif_carrier_ok(netdev) && 5057 (adapter->flags & IGC_FLAG_MAS_ENABLE)) { 5058 if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 5059 schedule_work(&adapter->reset_task); 5060 /* return immediately */ 5061 return; 5062 } 5063 } 5064 } 5065 5066 spin_lock(&adapter->stats64_lock); 5067 igc_update_stats(adapter); 5068 spin_unlock(&adapter->stats64_lock); 5069 5070 for (i = 0; i < adapter->num_tx_queues; i++) { 5071 struct igc_ring *tx_ring = adapter->tx_ring[i]; 5072 5073 if (!netif_carrier_ok(netdev)) { 5074 /* We've lost link, so the controller stops DMA, 5075 * but we've got queued Tx work that's never going 5076 * to get done, so reset controller to flush Tx. 5077 * (Do the reset outside of interrupt context). 5078 */ 5079 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { 5080 adapter->tx_timeout_count++; 5081 schedule_work(&adapter->reset_task); 5082 /* return immediately since reset is imminent */ 5083 return; 5084 } 5085 } 5086 5087 /* Force detection of hung controller every watchdog period */ 5088 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 5089 } 5090 5091 /* Cause software interrupt to ensure Rx ring is cleaned */ 5092 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5093 u32 eics = 0; 5094 5095 for (i = 0; i < adapter->num_q_vectors; i++) 5096 eics |= adapter->q_vector[i]->eims_value; 5097 wr32(IGC_EICS, eics); 5098 } else { 5099 wr32(IGC_ICS, IGC_ICS_RXDMT0); 5100 } 5101 5102 igc_ptp_tx_hang(adapter); 5103 5104 /* Reset the timer */ 5105 if (!test_bit(__IGC_DOWN, &adapter->state)) { 5106 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) 5107 mod_timer(&adapter->watchdog_timer, 5108 round_jiffies(jiffies + HZ)); 5109 else 5110 mod_timer(&adapter->watchdog_timer, 5111 round_jiffies(jiffies + 2 * HZ)); 5112 } 5113 } 5114 5115 /** 5116 * igc_intr_msi - Interrupt Handler 5117 * @irq: interrupt number 5118 * @data: pointer to a network interface device structure 5119 */ 5120 static irqreturn_t igc_intr_msi(int irq, void *data) 5121 { 5122 struct igc_adapter *adapter = data; 5123 struct igc_q_vector *q_vector = adapter->q_vector[0]; 5124 struct igc_hw *hw = &adapter->hw; 5125 /* read ICR disables interrupts using IAM */ 5126 u32 icr = rd32(IGC_ICR); 5127 5128 igc_write_itr(q_vector); 5129 5130 if (icr & IGC_ICR_DRSTA) 5131 schedule_work(&adapter->reset_task); 5132 5133 if (icr & IGC_ICR_DOUTSYNC) { 5134 /* HW is reporting DMA is out of sync */ 5135 adapter->stats.doosync++; 5136 } 5137 5138 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5139 hw->mac.get_link_status = true; 5140 if (!test_bit(__IGC_DOWN, &adapter->state)) 5141 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5142 } 5143 5144 napi_schedule(&q_vector->napi); 5145 5146 return IRQ_HANDLED; 5147 } 5148 5149 /** 5150 * igc_intr - Legacy Interrupt Handler 5151 * @irq: interrupt number 5152 * @data: pointer to a network interface device structure 5153 */ 5154 static irqreturn_t igc_intr(int irq, void *data) 5155 { 5156 struct igc_adapter *adapter = data; 5157 struct igc_q_vector *q_vector = adapter->q_vector[0]; 5158 struct igc_hw *hw = &adapter->hw; 5159 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 5160 * need for the IMC write 5161 */ 5162 u32 icr = rd32(IGC_ICR); 5163 5164 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 5165 * not set, then the adapter didn't send an interrupt 5166 */ 5167 if (!(icr & IGC_ICR_INT_ASSERTED)) 5168 return IRQ_NONE; 5169 5170 igc_write_itr(q_vector); 5171 5172 if (icr & IGC_ICR_DRSTA) 5173 schedule_work(&adapter->reset_task); 5174 5175 if (icr & IGC_ICR_DOUTSYNC) { 5176 /* HW is reporting DMA is out of sync */ 5177 adapter->stats.doosync++; 5178 } 5179 5180 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5181 hw->mac.get_link_status = true; 5182 /* guard against interrupt when we're going down */ 5183 if (!test_bit(__IGC_DOWN, &adapter->state)) 5184 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5185 } 5186 5187 napi_schedule(&q_vector->napi); 5188 5189 return IRQ_HANDLED; 5190 } 5191 5192 static void igc_free_irq(struct igc_adapter *adapter) 5193 { 5194 if (adapter->msix_entries) { 5195 int vector = 0, i; 5196 5197 free_irq(adapter->msix_entries[vector++].vector, adapter); 5198 5199 for (i = 0; i < adapter->num_q_vectors; i++) 5200 free_irq(adapter->msix_entries[vector++].vector, 5201 adapter->q_vector[i]); 5202 } else { 5203 free_irq(adapter->pdev->irq, adapter); 5204 } 5205 } 5206 5207 /** 5208 * igc_request_irq - initialize interrupts 5209 * @adapter: Pointer to adapter structure 5210 * 5211 * Attempts to configure interrupts using the best available 5212 * capabilities of the hardware and kernel. 5213 */ 5214 static int igc_request_irq(struct igc_adapter *adapter) 5215 { 5216 struct net_device *netdev = adapter->netdev; 5217 struct pci_dev *pdev = adapter->pdev; 5218 int err = 0; 5219 5220 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5221 err = igc_request_msix(adapter); 5222 if (!err) 5223 goto request_done; 5224 /* fall back to MSI */ 5225 igc_free_all_tx_resources(adapter); 5226 igc_free_all_rx_resources(adapter); 5227 5228 igc_clear_interrupt_scheme(adapter); 5229 err = igc_init_interrupt_scheme(adapter, false); 5230 if (err) 5231 goto request_done; 5232 igc_setup_all_tx_resources(adapter); 5233 igc_setup_all_rx_resources(adapter); 5234 igc_configure(adapter); 5235 } 5236 5237 igc_assign_vector(adapter->q_vector[0], 0); 5238 5239 if (adapter->flags & IGC_FLAG_HAS_MSI) { 5240 err = request_irq(pdev->irq, &igc_intr_msi, 0, 5241 netdev->name, adapter); 5242 if (!err) 5243 goto request_done; 5244 5245 /* fall back to legacy interrupts */ 5246 igc_reset_interrupt_capability(adapter); 5247 adapter->flags &= ~IGC_FLAG_HAS_MSI; 5248 } 5249 5250 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, 5251 netdev->name, adapter); 5252 5253 if (err) 5254 netdev_err(netdev, "Error %d getting interrupt\n", err); 5255 5256 request_done: 5257 return err; 5258 } 5259 5260 /** 5261 * __igc_open - Called when a network interface is made active 5262 * @netdev: network interface device structure 5263 * @resuming: boolean indicating if the device is resuming 5264 * 5265 * Returns 0 on success, negative value on failure 5266 * 5267 * The open entry point is called when a network interface is made 5268 * active by the system (IFF_UP). At this point all resources needed 5269 * for transmit and receive operations are allocated, the interrupt 5270 * handler is registered with the OS, the watchdog timer is started, 5271 * and the stack is notified that the interface is ready. 5272 */ 5273 static int __igc_open(struct net_device *netdev, bool resuming) 5274 { 5275 struct igc_adapter *adapter = netdev_priv(netdev); 5276 struct pci_dev *pdev = adapter->pdev; 5277 struct igc_hw *hw = &adapter->hw; 5278 int err = 0; 5279 int i = 0; 5280 5281 /* disallow open during test */ 5282 5283 if (test_bit(__IGC_TESTING, &adapter->state)) { 5284 WARN_ON(resuming); 5285 return -EBUSY; 5286 } 5287 5288 if (!resuming) 5289 pm_runtime_get_sync(&pdev->dev); 5290 5291 netif_carrier_off(netdev); 5292 5293 /* allocate transmit descriptors */ 5294 err = igc_setup_all_tx_resources(adapter); 5295 if (err) 5296 goto err_setup_tx; 5297 5298 /* allocate receive descriptors */ 5299 err = igc_setup_all_rx_resources(adapter); 5300 if (err) 5301 goto err_setup_rx; 5302 5303 igc_power_up_link(adapter); 5304 5305 igc_configure(adapter); 5306 5307 err = igc_request_irq(adapter); 5308 if (err) 5309 goto err_req_irq; 5310 5311 /* Notify the stack of the actual queue counts. */ 5312 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 5313 if (err) 5314 goto err_set_queues; 5315 5316 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 5317 if (err) 5318 goto err_set_queues; 5319 5320 clear_bit(__IGC_DOWN, &adapter->state); 5321 5322 for (i = 0; i < adapter->num_q_vectors; i++) 5323 napi_enable(&adapter->q_vector[i]->napi); 5324 5325 /* Clear any pending interrupts. */ 5326 rd32(IGC_ICR); 5327 igc_irq_enable(adapter); 5328 5329 if (!resuming) 5330 pm_runtime_put(&pdev->dev); 5331 5332 netif_tx_start_all_queues(netdev); 5333 5334 /* start the watchdog. */ 5335 hw->mac.get_link_status = true; 5336 schedule_work(&adapter->watchdog_task); 5337 5338 return IGC_SUCCESS; 5339 5340 err_set_queues: 5341 igc_free_irq(adapter); 5342 err_req_irq: 5343 igc_release_hw_control(adapter); 5344 igc_power_down_phy_copper_base(&adapter->hw); 5345 igc_free_all_rx_resources(adapter); 5346 err_setup_rx: 5347 igc_free_all_tx_resources(adapter); 5348 err_setup_tx: 5349 igc_reset(adapter); 5350 if (!resuming) 5351 pm_runtime_put(&pdev->dev); 5352 5353 return err; 5354 } 5355 5356 int igc_open(struct net_device *netdev) 5357 { 5358 return __igc_open(netdev, false); 5359 } 5360 5361 /** 5362 * __igc_close - Disables a network interface 5363 * @netdev: network interface device structure 5364 * @suspending: boolean indicating the device is suspending 5365 * 5366 * Returns 0, this is not allowed to fail 5367 * 5368 * The close entry point is called when an interface is de-activated 5369 * by the OS. The hardware is still under the driver's control, but 5370 * needs to be disabled. A global MAC reset is issued to stop the 5371 * hardware, and all transmit and receive resources are freed. 5372 */ 5373 static int __igc_close(struct net_device *netdev, bool suspending) 5374 { 5375 struct igc_adapter *adapter = netdev_priv(netdev); 5376 struct pci_dev *pdev = adapter->pdev; 5377 5378 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); 5379 5380 if (!suspending) 5381 pm_runtime_get_sync(&pdev->dev); 5382 5383 igc_down(adapter); 5384 5385 igc_release_hw_control(adapter); 5386 5387 igc_free_irq(adapter); 5388 5389 igc_free_all_tx_resources(adapter); 5390 igc_free_all_rx_resources(adapter); 5391 5392 if (!suspending) 5393 pm_runtime_put_sync(&pdev->dev); 5394 5395 return 0; 5396 } 5397 5398 int igc_close(struct net_device *netdev) 5399 { 5400 if (netif_device_present(netdev) || netdev->dismantle) 5401 return __igc_close(netdev, false); 5402 return 0; 5403 } 5404 5405 /** 5406 * igc_ioctl - Access the hwtstamp interface 5407 * @netdev: network interface device structure 5408 * @ifr: interface request data 5409 * @cmd: ioctl command 5410 **/ 5411 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 5412 { 5413 switch (cmd) { 5414 case SIOCGHWTSTAMP: 5415 return igc_ptp_get_ts_config(netdev, ifr); 5416 case SIOCSHWTSTAMP: 5417 return igc_ptp_set_ts_config(netdev, ifr); 5418 default: 5419 return -EOPNOTSUPP; 5420 } 5421 } 5422 5423 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, 5424 bool enable) 5425 { 5426 struct igc_ring *ring; 5427 int i; 5428 5429 if (queue < 0 || queue >= adapter->num_tx_queues) 5430 return -EINVAL; 5431 5432 ring = adapter->tx_ring[queue]; 5433 ring->launchtime_enable = enable; 5434 5435 if (adapter->base_time) 5436 return 0; 5437 5438 adapter->cycle_time = NSEC_PER_SEC; 5439 5440 for (i = 0; i < adapter->num_tx_queues; i++) { 5441 ring = adapter->tx_ring[i]; 5442 ring->start_time = 0; 5443 ring->end_time = NSEC_PER_SEC; 5444 } 5445 5446 return 0; 5447 } 5448 5449 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) 5450 { 5451 struct timespec64 b; 5452 5453 b = ktime_to_timespec64(base_time); 5454 5455 return timespec64_compare(now, &b) > 0; 5456 } 5457 5458 static bool validate_schedule(struct igc_adapter *adapter, 5459 const struct tc_taprio_qopt_offload *qopt) 5460 { 5461 int queue_uses[IGC_MAX_TX_QUEUES] = { }; 5462 struct timespec64 now; 5463 size_t n; 5464 5465 if (qopt->cycle_time_extension) 5466 return false; 5467 5468 igc_ptp_read(adapter, &now); 5469 5470 /* If we program the controller's BASET registers with a time 5471 * in the future, it will hold all the packets until that 5472 * time, causing a lot of TX Hangs, so to avoid that, we 5473 * reject schedules that would start in the future. 5474 */ 5475 if (!is_base_time_past(qopt->base_time, &now)) 5476 return false; 5477 5478 for (n = 0; n < qopt->num_entries; n++) { 5479 const struct tc_taprio_sched_entry *e; 5480 int i; 5481 5482 e = &qopt->entries[n]; 5483 5484 /* i225 only supports "global" frame preemption 5485 * settings. 5486 */ 5487 if (e->command != TC_TAPRIO_CMD_SET_GATES) 5488 return false; 5489 5490 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { 5491 if (e->gate_mask & BIT(i)) 5492 queue_uses[i]++; 5493 5494 if (queue_uses[i] > 1) 5495 return false; 5496 } 5497 } 5498 5499 return true; 5500 } 5501 5502 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, 5503 struct tc_etf_qopt_offload *qopt) 5504 { 5505 struct igc_hw *hw = &adapter->hw; 5506 int err; 5507 5508 if (hw->mac.type != igc_i225) 5509 return -EOPNOTSUPP; 5510 5511 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); 5512 if (err) 5513 return err; 5514 5515 return igc_tsn_offload_apply(adapter); 5516 } 5517 5518 static int igc_save_qbv_schedule(struct igc_adapter *adapter, 5519 struct tc_taprio_qopt_offload *qopt) 5520 { 5521 u32 start_time = 0, end_time = 0; 5522 size_t n; 5523 5524 if (!qopt->enable) { 5525 adapter->base_time = 0; 5526 return 0; 5527 } 5528 5529 if (adapter->base_time) 5530 return -EALREADY; 5531 5532 if (!validate_schedule(adapter, qopt)) 5533 return -EINVAL; 5534 5535 adapter->cycle_time = qopt->cycle_time; 5536 adapter->base_time = qopt->base_time; 5537 5538 /* FIXME: be a little smarter about cases when the gate for a 5539 * queue stays open for more than one entry. 5540 */ 5541 for (n = 0; n < qopt->num_entries; n++) { 5542 struct tc_taprio_sched_entry *e = &qopt->entries[n]; 5543 int i; 5544 5545 end_time += e->interval; 5546 5547 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { 5548 struct igc_ring *ring = adapter->tx_ring[i]; 5549 5550 if (!(e->gate_mask & BIT(i))) 5551 continue; 5552 5553 ring->start_time = start_time; 5554 ring->end_time = end_time; 5555 } 5556 5557 start_time += e->interval; 5558 } 5559 5560 return 0; 5561 } 5562 5563 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, 5564 struct tc_taprio_qopt_offload *qopt) 5565 { 5566 struct igc_hw *hw = &adapter->hw; 5567 int err; 5568 5569 if (hw->mac.type != igc_i225) 5570 return -EOPNOTSUPP; 5571 5572 err = igc_save_qbv_schedule(adapter, qopt); 5573 if (err) 5574 return err; 5575 5576 return igc_tsn_offload_apply(adapter); 5577 } 5578 5579 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, 5580 void *type_data) 5581 { 5582 struct igc_adapter *adapter = netdev_priv(dev); 5583 5584 switch (type) { 5585 case TC_SETUP_QDISC_TAPRIO: 5586 return igc_tsn_enable_qbv_scheduling(adapter, type_data); 5587 5588 case TC_SETUP_QDISC_ETF: 5589 return igc_tsn_enable_launchtime(adapter, type_data); 5590 5591 default: 5592 return -EOPNOTSUPP; 5593 } 5594 } 5595 5596 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) 5597 { 5598 struct igc_adapter *adapter = netdev_priv(dev); 5599 5600 switch (bpf->command) { 5601 case XDP_SETUP_PROG: 5602 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); 5603 case XDP_SETUP_XSK_POOL: 5604 return igc_xdp_setup_pool(adapter, bpf->xsk.pool, 5605 bpf->xsk.queue_id); 5606 default: 5607 return -EOPNOTSUPP; 5608 } 5609 } 5610 5611 static int igc_xdp_xmit(struct net_device *dev, int num_frames, 5612 struct xdp_frame **frames, u32 flags) 5613 { 5614 struct igc_adapter *adapter = netdev_priv(dev); 5615 int cpu = smp_processor_id(); 5616 struct netdev_queue *nq; 5617 struct igc_ring *ring; 5618 int i, drops; 5619 5620 if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) 5621 return -ENETDOWN; 5622 5623 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 5624 return -EINVAL; 5625 5626 ring = igc_xdp_get_tx_ring(adapter, cpu); 5627 nq = txring_txq(ring); 5628 5629 __netif_tx_lock(nq, cpu); 5630 5631 drops = 0; 5632 for (i = 0; i < num_frames; i++) { 5633 int err; 5634 struct xdp_frame *xdpf = frames[i]; 5635 5636 err = igc_xdp_init_tx_descriptor(ring, xdpf); 5637 if (err) { 5638 xdp_return_frame_rx_napi(xdpf); 5639 drops++; 5640 } 5641 } 5642 5643 if (flags & XDP_XMIT_FLUSH) 5644 igc_flush_tx_descriptors(ring); 5645 5646 __netif_tx_unlock(nq); 5647 5648 return num_frames - drops; 5649 } 5650 5651 static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, 5652 struct igc_q_vector *q_vector) 5653 { 5654 struct igc_hw *hw = &adapter->hw; 5655 u32 eics = 0; 5656 5657 eics |= q_vector->eims_value; 5658 wr32(IGC_EICS, eics); 5659 } 5660 5661 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 5662 { 5663 struct igc_adapter *adapter = netdev_priv(dev); 5664 struct igc_q_vector *q_vector; 5665 struct igc_ring *ring; 5666 5667 if (test_bit(__IGC_DOWN, &adapter->state)) 5668 return -ENETDOWN; 5669 5670 if (!igc_xdp_is_enabled(adapter)) 5671 return -ENXIO; 5672 5673 if (queue_id >= adapter->num_rx_queues) 5674 return -EINVAL; 5675 5676 ring = adapter->rx_ring[queue_id]; 5677 5678 if (!ring->xsk_pool) 5679 return -ENXIO; 5680 5681 q_vector = adapter->q_vector[queue_id]; 5682 if (!napi_if_scheduled_mark_missed(&q_vector->napi)) 5683 igc_trigger_rxtxq_interrupt(adapter, q_vector); 5684 5685 return 0; 5686 } 5687 5688 static const struct net_device_ops igc_netdev_ops = { 5689 .ndo_open = igc_open, 5690 .ndo_stop = igc_close, 5691 .ndo_start_xmit = igc_xmit_frame, 5692 .ndo_set_rx_mode = igc_set_rx_mode, 5693 .ndo_set_mac_address = igc_set_mac, 5694 .ndo_change_mtu = igc_change_mtu, 5695 .ndo_get_stats64 = igc_get_stats64, 5696 .ndo_fix_features = igc_fix_features, 5697 .ndo_set_features = igc_set_features, 5698 .ndo_features_check = igc_features_check, 5699 .ndo_do_ioctl = igc_ioctl, 5700 .ndo_setup_tc = igc_setup_tc, 5701 .ndo_bpf = igc_bpf, 5702 .ndo_xdp_xmit = igc_xdp_xmit, 5703 .ndo_xsk_wakeup = igc_xsk_wakeup, 5704 }; 5705 5706 /* PCIe configuration access */ 5707 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 5708 { 5709 struct igc_adapter *adapter = hw->back; 5710 5711 pci_read_config_word(adapter->pdev, reg, value); 5712 } 5713 5714 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 5715 { 5716 struct igc_adapter *adapter = hw->back; 5717 5718 pci_write_config_word(adapter->pdev, reg, *value); 5719 } 5720 5721 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 5722 { 5723 struct igc_adapter *adapter = hw->back; 5724 5725 if (!pci_is_pcie(adapter->pdev)) 5726 return -IGC_ERR_CONFIG; 5727 5728 pcie_capability_read_word(adapter->pdev, reg, value); 5729 5730 return IGC_SUCCESS; 5731 } 5732 5733 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 5734 { 5735 struct igc_adapter *adapter = hw->back; 5736 5737 if (!pci_is_pcie(adapter->pdev)) 5738 return -IGC_ERR_CONFIG; 5739 5740 pcie_capability_write_word(adapter->pdev, reg, *value); 5741 5742 return IGC_SUCCESS; 5743 } 5744 5745 u32 igc_rd32(struct igc_hw *hw, u32 reg) 5746 { 5747 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); 5748 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); 5749 u32 value = 0; 5750 5751 value = readl(&hw_addr[reg]); 5752 5753 /* reads should not return all F's */ 5754 if (!(~value) && (!reg || !(~readl(hw_addr)))) { 5755 struct net_device *netdev = igc->netdev; 5756 5757 hw->hw_addr = NULL; 5758 netif_device_detach(netdev); 5759 netdev_err(netdev, "PCIe link lost, device now detached\n"); 5760 WARN(pci_device_is_present(igc->pdev), 5761 "igc: Failed to read reg 0x%x!\n", reg); 5762 } 5763 5764 return value; 5765 } 5766 5767 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) 5768 { 5769 struct igc_mac_info *mac = &adapter->hw.mac; 5770 5771 mac->autoneg = false; 5772 5773 /* Make sure dplx is at most 1 bit and lsb of speed is not set 5774 * for the switch() below to work 5775 */ 5776 if ((spd & 1) || (dplx & ~1)) 5777 goto err_inval; 5778 5779 switch (spd + dplx) { 5780 case SPEED_10 + DUPLEX_HALF: 5781 mac->forced_speed_duplex = ADVERTISE_10_HALF; 5782 break; 5783 case SPEED_10 + DUPLEX_FULL: 5784 mac->forced_speed_duplex = ADVERTISE_10_FULL; 5785 break; 5786 case SPEED_100 + DUPLEX_HALF: 5787 mac->forced_speed_duplex = ADVERTISE_100_HALF; 5788 break; 5789 case SPEED_100 + DUPLEX_FULL: 5790 mac->forced_speed_duplex = ADVERTISE_100_FULL; 5791 break; 5792 case SPEED_1000 + DUPLEX_FULL: 5793 mac->autoneg = true; 5794 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 5795 break; 5796 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5797 goto err_inval; 5798 case SPEED_2500 + DUPLEX_FULL: 5799 mac->autoneg = true; 5800 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 5801 break; 5802 case SPEED_2500 + DUPLEX_HALF: /* not supported */ 5803 default: 5804 goto err_inval; 5805 } 5806 5807 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ 5808 adapter->hw.phy.mdix = AUTO_ALL_MODES; 5809 5810 return 0; 5811 5812 err_inval: 5813 netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n"); 5814 return -EINVAL; 5815 } 5816 5817 /** 5818 * igc_probe - Device Initialization Routine 5819 * @pdev: PCI device information struct 5820 * @ent: entry in igc_pci_tbl 5821 * 5822 * Returns 0 on success, negative on failure 5823 * 5824 * igc_probe initializes an adapter identified by a pci_dev structure. 5825 * The OS initialization, configuring the adapter private structure, 5826 * and a hardware reset occur. 5827 */ 5828 static int igc_probe(struct pci_dev *pdev, 5829 const struct pci_device_id *ent) 5830 { 5831 struct igc_adapter *adapter; 5832 struct net_device *netdev; 5833 struct igc_hw *hw; 5834 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; 5835 int err, pci_using_dac; 5836 5837 err = pci_enable_device_mem(pdev); 5838 if (err) 5839 return err; 5840 5841 pci_using_dac = 0; 5842 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 5843 if (!err) { 5844 pci_using_dac = 1; 5845 } else { 5846 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 5847 if (err) { 5848 dev_err(&pdev->dev, 5849 "No usable DMA configuration, aborting\n"); 5850 goto err_dma; 5851 } 5852 } 5853 5854 err = pci_request_mem_regions(pdev, igc_driver_name); 5855 if (err) 5856 goto err_pci_reg; 5857 5858 pci_enable_pcie_error_reporting(pdev); 5859 5860 pci_set_master(pdev); 5861 5862 err = -ENOMEM; 5863 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), 5864 IGC_MAX_TX_QUEUES); 5865 5866 if (!netdev) 5867 goto err_alloc_etherdev; 5868 5869 SET_NETDEV_DEV(netdev, &pdev->dev); 5870 5871 pci_set_drvdata(pdev, netdev); 5872 adapter = netdev_priv(netdev); 5873 adapter->netdev = netdev; 5874 adapter->pdev = pdev; 5875 hw = &adapter->hw; 5876 hw->back = adapter; 5877 adapter->port_num = hw->bus.func; 5878 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 5879 5880 err = pci_save_state(pdev); 5881 if (err) 5882 goto err_ioremap; 5883 5884 err = -EIO; 5885 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), 5886 pci_resource_len(pdev, 0)); 5887 if (!adapter->io_addr) 5888 goto err_ioremap; 5889 5890 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ 5891 hw->hw_addr = adapter->io_addr; 5892 5893 netdev->netdev_ops = &igc_netdev_ops; 5894 igc_ethtool_set_ops(netdev); 5895 netdev->watchdog_timeo = 5 * HZ; 5896 5897 netdev->mem_start = pci_resource_start(pdev, 0); 5898 netdev->mem_end = pci_resource_end(pdev, 0); 5899 5900 /* PCI config space info */ 5901 hw->vendor_id = pdev->vendor; 5902 hw->device_id = pdev->device; 5903 hw->revision_id = pdev->revision; 5904 hw->subsystem_vendor_id = pdev->subsystem_vendor; 5905 hw->subsystem_device_id = pdev->subsystem_device; 5906 5907 /* Copy the default MAC and PHY function pointers */ 5908 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 5909 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 5910 5911 /* Initialize skew-specific constants */ 5912 err = ei->get_invariants(hw); 5913 if (err) 5914 goto err_sw_init; 5915 5916 /* Add supported features to the features list*/ 5917 netdev->features |= NETIF_F_SG; 5918 netdev->features |= NETIF_F_TSO; 5919 netdev->features |= NETIF_F_TSO6; 5920 netdev->features |= NETIF_F_TSO_ECN; 5921 netdev->features |= NETIF_F_RXCSUM; 5922 netdev->features |= NETIF_F_HW_CSUM; 5923 netdev->features |= NETIF_F_SCTP_CRC; 5924 netdev->features |= NETIF_F_HW_TC; 5925 5926 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 5927 NETIF_F_GSO_GRE_CSUM | \ 5928 NETIF_F_GSO_IPXIP4 | \ 5929 NETIF_F_GSO_IPXIP6 | \ 5930 NETIF_F_GSO_UDP_TUNNEL | \ 5931 NETIF_F_GSO_UDP_TUNNEL_CSUM) 5932 5933 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; 5934 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; 5935 5936 /* setup the private structure */ 5937 err = igc_sw_init(adapter); 5938 if (err) 5939 goto err_sw_init; 5940 5941 /* copy netdev features into list of user selectable features */ 5942 netdev->hw_features |= NETIF_F_NTUPLE; 5943 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 5944 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 5945 netdev->hw_features |= netdev->features; 5946 5947 if (pci_using_dac) 5948 netdev->features |= NETIF_F_HIGHDMA; 5949 5950 netdev->vlan_features |= netdev->features; 5951 5952 /* MTU range: 68 - 9216 */ 5953 netdev->min_mtu = ETH_MIN_MTU; 5954 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; 5955 5956 /* before reading the NVM, reset the controller to put the device in a 5957 * known good starting state 5958 */ 5959 hw->mac.ops.reset_hw(hw); 5960 5961 if (igc_get_flash_presence_i225(hw)) { 5962 if (hw->nvm.ops.validate(hw) < 0) { 5963 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 5964 err = -EIO; 5965 goto err_eeprom; 5966 } 5967 } 5968 5969 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { 5970 /* copy the MAC address out of the NVM */ 5971 if (hw->mac.ops.read_mac_addr(hw)) 5972 dev_err(&pdev->dev, "NVM Read Error\n"); 5973 } 5974 5975 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 5976 5977 if (!is_valid_ether_addr(netdev->dev_addr)) { 5978 dev_err(&pdev->dev, "Invalid MAC Address\n"); 5979 err = -EIO; 5980 goto err_eeprom; 5981 } 5982 5983 /* configure RXPBSIZE and TXPBSIZE */ 5984 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); 5985 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); 5986 5987 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); 5988 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); 5989 5990 INIT_WORK(&adapter->reset_task, igc_reset_task); 5991 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); 5992 5993 /* Initialize link properties that are user-changeable */ 5994 adapter->fc_autoneg = true; 5995 hw->mac.autoneg = true; 5996 hw->phy.autoneg_advertised = 0xaf; 5997 5998 hw->fc.requested_mode = igc_fc_default; 5999 hw->fc.current_mode = igc_fc_default; 6000 6001 /* By default, support wake on port A */ 6002 adapter->flags |= IGC_FLAG_WOL_SUPPORTED; 6003 6004 /* initialize the wol settings based on the eeprom settings */ 6005 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) 6006 adapter->wol |= IGC_WUFC_MAG; 6007 6008 device_set_wakeup_enable(&adapter->pdev->dev, 6009 adapter->flags & IGC_FLAG_WOL_SUPPORTED); 6010 6011 igc_ptp_init(adapter); 6012 6013 /* reset the hardware with the new settings */ 6014 igc_reset(adapter); 6015 6016 /* let the f/w know that the h/w is now under the control of the 6017 * driver. 6018 */ 6019 igc_get_hw_control(adapter); 6020 6021 strncpy(netdev->name, "eth%d", IFNAMSIZ); 6022 err = register_netdev(netdev); 6023 if (err) 6024 goto err_register; 6025 6026 /* carrier off reporting is important to ethtool even BEFORE open */ 6027 netif_carrier_off(netdev); 6028 6029 /* Check if Media Autosense is enabled */ 6030 adapter->ei = *ei; 6031 6032 /* print pcie link status and MAC address */ 6033 pcie_print_link_status(pdev); 6034 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); 6035 6036 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 6037 /* Disable EEE for internal PHY devices */ 6038 hw->dev_spec._base.eee_enable = false; 6039 adapter->flags &= ~IGC_FLAG_EEE; 6040 igc_set_eee_i225(hw, false, false, false); 6041 6042 pm_runtime_put_noidle(&pdev->dev); 6043 6044 return 0; 6045 6046 err_register: 6047 igc_release_hw_control(adapter); 6048 err_eeprom: 6049 if (!igc_check_reset_block(hw)) 6050 igc_reset_phy(hw); 6051 err_sw_init: 6052 igc_clear_interrupt_scheme(adapter); 6053 iounmap(adapter->io_addr); 6054 err_ioremap: 6055 free_netdev(netdev); 6056 err_alloc_etherdev: 6057 pci_release_mem_regions(pdev); 6058 err_pci_reg: 6059 err_dma: 6060 pci_disable_device(pdev); 6061 return err; 6062 } 6063 6064 /** 6065 * igc_remove - Device Removal Routine 6066 * @pdev: PCI device information struct 6067 * 6068 * igc_remove is called by the PCI subsystem to alert the driver 6069 * that it should release a PCI device. This could be caused by a 6070 * Hot-Plug event, or because the driver is going to be removed from 6071 * memory. 6072 */ 6073 static void igc_remove(struct pci_dev *pdev) 6074 { 6075 struct net_device *netdev = pci_get_drvdata(pdev); 6076 struct igc_adapter *adapter = netdev_priv(netdev); 6077 6078 pm_runtime_get_noresume(&pdev->dev); 6079 6080 igc_flush_nfc_rules(adapter); 6081 6082 igc_ptp_stop(adapter); 6083 6084 set_bit(__IGC_DOWN, &adapter->state); 6085 6086 del_timer_sync(&adapter->watchdog_timer); 6087 del_timer_sync(&adapter->phy_info_timer); 6088 6089 cancel_work_sync(&adapter->reset_task); 6090 cancel_work_sync(&adapter->watchdog_task); 6091 6092 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6093 * would have already happened in close and is redundant. 6094 */ 6095 igc_release_hw_control(adapter); 6096 unregister_netdev(netdev); 6097 6098 igc_clear_interrupt_scheme(adapter); 6099 pci_iounmap(pdev, adapter->io_addr); 6100 pci_release_mem_regions(pdev); 6101 6102 free_netdev(netdev); 6103 6104 pci_disable_pcie_error_reporting(pdev); 6105 6106 pci_disable_device(pdev); 6107 } 6108 6109 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, 6110 bool runtime) 6111 { 6112 struct net_device *netdev = pci_get_drvdata(pdev); 6113 struct igc_adapter *adapter = netdev_priv(netdev); 6114 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; 6115 struct igc_hw *hw = &adapter->hw; 6116 u32 ctrl, rctl, status; 6117 bool wake; 6118 6119 rtnl_lock(); 6120 netif_device_detach(netdev); 6121 6122 if (netif_running(netdev)) 6123 __igc_close(netdev, true); 6124 6125 igc_ptp_suspend(adapter); 6126 6127 igc_clear_interrupt_scheme(adapter); 6128 rtnl_unlock(); 6129 6130 status = rd32(IGC_STATUS); 6131 if (status & IGC_STATUS_LU) 6132 wufc &= ~IGC_WUFC_LNKC; 6133 6134 if (wufc) { 6135 igc_setup_rctl(adapter); 6136 igc_set_rx_mode(netdev); 6137 6138 /* turn on all-multi mode if wake on multicast is enabled */ 6139 if (wufc & IGC_WUFC_MC) { 6140 rctl = rd32(IGC_RCTL); 6141 rctl |= IGC_RCTL_MPE; 6142 wr32(IGC_RCTL, rctl); 6143 } 6144 6145 ctrl = rd32(IGC_CTRL); 6146 ctrl |= IGC_CTRL_ADVD3WUC; 6147 wr32(IGC_CTRL, ctrl); 6148 6149 /* Allow time for pending master requests to run */ 6150 igc_disable_pcie_master(hw); 6151 6152 wr32(IGC_WUC, IGC_WUC_PME_EN); 6153 wr32(IGC_WUFC, wufc); 6154 } else { 6155 wr32(IGC_WUC, 0); 6156 wr32(IGC_WUFC, 0); 6157 } 6158 6159 wake = wufc || adapter->en_mng_pt; 6160 if (!wake) 6161 igc_power_down_phy_copper_base(&adapter->hw); 6162 else 6163 igc_power_up_link(adapter); 6164 6165 if (enable_wake) 6166 *enable_wake = wake; 6167 6168 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6169 * would have already happened in close and is redundant. 6170 */ 6171 igc_release_hw_control(adapter); 6172 6173 pci_disable_device(pdev); 6174 6175 return 0; 6176 } 6177 6178 #ifdef CONFIG_PM 6179 static int __maybe_unused igc_runtime_suspend(struct device *dev) 6180 { 6181 return __igc_shutdown(to_pci_dev(dev), NULL, 1); 6182 } 6183 6184 static void igc_deliver_wake_packet(struct net_device *netdev) 6185 { 6186 struct igc_adapter *adapter = netdev_priv(netdev); 6187 struct igc_hw *hw = &adapter->hw; 6188 struct sk_buff *skb; 6189 u32 wupl; 6190 6191 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; 6192 6193 /* WUPM stores only the first 128 bytes of the wake packet. 6194 * Read the packet only if we have the whole thing. 6195 */ 6196 if (wupl == 0 || wupl > IGC_WUPM_BYTES) 6197 return; 6198 6199 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); 6200 if (!skb) 6201 return; 6202 6203 skb_put(skb, wupl); 6204 6205 /* Ensure reads are 32-bit aligned */ 6206 wupl = roundup(wupl, 4); 6207 6208 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); 6209 6210 skb->protocol = eth_type_trans(skb, netdev); 6211 netif_rx(skb); 6212 } 6213 6214 static int __maybe_unused igc_resume(struct device *dev) 6215 { 6216 struct pci_dev *pdev = to_pci_dev(dev); 6217 struct net_device *netdev = pci_get_drvdata(pdev); 6218 struct igc_adapter *adapter = netdev_priv(netdev); 6219 struct igc_hw *hw = &adapter->hw; 6220 u32 err, val; 6221 6222 pci_set_power_state(pdev, PCI_D0); 6223 pci_restore_state(pdev); 6224 pci_save_state(pdev); 6225 6226 if (!pci_device_is_present(pdev)) 6227 return -ENODEV; 6228 err = pci_enable_device_mem(pdev); 6229 if (err) { 6230 netdev_err(netdev, "Cannot enable PCI device from suspend\n"); 6231 return err; 6232 } 6233 pci_set_master(pdev); 6234 6235 pci_enable_wake(pdev, PCI_D3hot, 0); 6236 pci_enable_wake(pdev, PCI_D3cold, 0); 6237 6238 if (igc_init_interrupt_scheme(adapter, true)) { 6239 netdev_err(netdev, "Unable to allocate memory for queues\n"); 6240 return -ENOMEM; 6241 } 6242 6243 igc_reset(adapter); 6244 6245 /* let the f/w know that the h/w is now under the control of the 6246 * driver. 6247 */ 6248 igc_get_hw_control(adapter); 6249 6250 val = rd32(IGC_WUS); 6251 if (val & WAKE_PKT_WUS) 6252 igc_deliver_wake_packet(netdev); 6253 6254 wr32(IGC_WUS, ~0); 6255 6256 rtnl_lock(); 6257 if (!err && netif_running(netdev)) 6258 err = __igc_open(netdev, true); 6259 6260 if (!err) 6261 netif_device_attach(netdev); 6262 rtnl_unlock(); 6263 6264 return err; 6265 } 6266 6267 static int __maybe_unused igc_runtime_resume(struct device *dev) 6268 { 6269 return igc_resume(dev); 6270 } 6271 6272 static int __maybe_unused igc_suspend(struct device *dev) 6273 { 6274 return __igc_shutdown(to_pci_dev(dev), NULL, 0); 6275 } 6276 6277 static int __maybe_unused igc_runtime_idle(struct device *dev) 6278 { 6279 struct net_device *netdev = dev_get_drvdata(dev); 6280 struct igc_adapter *adapter = netdev_priv(netdev); 6281 6282 if (!igc_has_link(adapter)) 6283 pm_schedule_suspend(dev, MSEC_PER_SEC * 5); 6284 6285 return -EBUSY; 6286 } 6287 #endif /* CONFIG_PM */ 6288 6289 static void igc_shutdown(struct pci_dev *pdev) 6290 { 6291 bool wake; 6292 6293 __igc_shutdown(pdev, &wake, 0); 6294 6295 if (system_state == SYSTEM_POWER_OFF) { 6296 pci_wake_from_d3(pdev, wake); 6297 pci_set_power_state(pdev, PCI_D3hot); 6298 } 6299 } 6300 6301 /** 6302 * igc_io_error_detected - called when PCI error is detected 6303 * @pdev: Pointer to PCI device 6304 * @state: The current PCI connection state 6305 * 6306 * This function is called after a PCI bus error affecting 6307 * this device has been detected. 6308 **/ 6309 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, 6310 pci_channel_state_t state) 6311 { 6312 struct net_device *netdev = pci_get_drvdata(pdev); 6313 struct igc_adapter *adapter = netdev_priv(netdev); 6314 6315 netif_device_detach(netdev); 6316 6317 if (state == pci_channel_io_perm_failure) 6318 return PCI_ERS_RESULT_DISCONNECT; 6319 6320 if (netif_running(netdev)) 6321 igc_down(adapter); 6322 pci_disable_device(pdev); 6323 6324 /* Request a slot reset. */ 6325 return PCI_ERS_RESULT_NEED_RESET; 6326 } 6327 6328 /** 6329 * igc_io_slot_reset - called after the PCI bus has been reset. 6330 * @pdev: Pointer to PCI device 6331 * 6332 * Restart the card from scratch, as if from a cold-boot. Implementation 6333 * resembles the first-half of the igc_resume routine. 6334 **/ 6335 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) 6336 { 6337 struct net_device *netdev = pci_get_drvdata(pdev); 6338 struct igc_adapter *adapter = netdev_priv(netdev); 6339 struct igc_hw *hw = &adapter->hw; 6340 pci_ers_result_t result; 6341 6342 if (pci_enable_device_mem(pdev)) { 6343 netdev_err(netdev, "Could not re-enable PCI device after reset\n"); 6344 result = PCI_ERS_RESULT_DISCONNECT; 6345 } else { 6346 pci_set_master(pdev); 6347 pci_restore_state(pdev); 6348 pci_save_state(pdev); 6349 6350 pci_enable_wake(pdev, PCI_D3hot, 0); 6351 pci_enable_wake(pdev, PCI_D3cold, 0); 6352 6353 /* In case of PCI error, adapter loses its HW address 6354 * so we should re-assign it here. 6355 */ 6356 hw->hw_addr = adapter->io_addr; 6357 6358 igc_reset(adapter); 6359 wr32(IGC_WUS, ~0); 6360 result = PCI_ERS_RESULT_RECOVERED; 6361 } 6362 6363 return result; 6364 } 6365 6366 /** 6367 * igc_io_resume - called when traffic can start to flow again. 6368 * @pdev: Pointer to PCI device 6369 * 6370 * This callback is called when the error recovery driver tells us that 6371 * its OK to resume normal operation. Implementation resembles the 6372 * second-half of the igc_resume routine. 6373 */ 6374 static void igc_io_resume(struct pci_dev *pdev) 6375 { 6376 struct net_device *netdev = pci_get_drvdata(pdev); 6377 struct igc_adapter *adapter = netdev_priv(netdev); 6378 6379 rtnl_lock(); 6380 if (netif_running(netdev)) { 6381 if (igc_open(netdev)) { 6382 netdev_err(netdev, "igc_open failed after reset\n"); 6383 return; 6384 } 6385 } 6386 6387 netif_device_attach(netdev); 6388 6389 /* let the f/w know that the h/w is now under the control of the 6390 * driver. 6391 */ 6392 igc_get_hw_control(adapter); 6393 rtnl_unlock(); 6394 } 6395 6396 static const struct pci_error_handlers igc_err_handler = { 6397 .error_detected = igc_io_error_detected, 6398 .slot_reset = igc_io_slot_reset, 6399 .resume = igc_io_resume, 6400 }; 6401 6402 #ifdef CONFIG_PM 6403 static const struct dev_pm_ops igc_pm_ops = { 6404 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) 6405 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, 6406 igc_runtime_idle) 6407 }; 6408 #endif 6409 6410 static struct pci_driver igc_driver = { 6411 .name = igc_driver_name, 6412 .id_table = igc_pci_tbl, 6413 .probe = igc_probe, 6414 .remove = igc_remove, 6415 #ifdef CONFIG_PM 6416 .driver.pm = &igc_pm_ops, 6417 #endif 6418 .shutdown = igc_shutdown, 6419 .err_handler = &igc_err_handler, 6420 }; 6421 6422 /** 6423 * igc_reinit_queues - return error 6424 * @adapter: pointer to adapter structure 6425 */ 6426 int igc_reinit_queues(struct igc_adapter *adapter) 6427 { 6428 struct net_device *netdev = adapter->netdev; 6429 int err = 0; 6430 6431 if (netif_running(netdev)) 6432 igc_close(netdev); 6433 6434 igc_reset_interrupt_capability(adapter); 6435 6436 if (igc_init_interrupt_scheme(adapter, true)) { 6437 netdev_err(netdev, "Unable to allocate memory for queues\n"); 6438 return -ENOMEM; 6439 } 6440 6441 if (netif_running(netdev)) 6442 err = igc_open(netdev); 6443 6444 return err; 6445 } 6446 6447 /** 6448 * igc_get_hw_dev - return device 6449 * @hw: pointer to hardware structure 6450 * 6451 * used by hardware layer to print debugging information 6452 */ 6453 struct net_device *igc_get_hw_dev(struct igc_hw *hw) 6454 { 6455 struct igc_adapter *adapter = hw->back; 6456 6457 return adapter->netdev; 6458 } 6459 6460 static void igc_disable_rx_ring_hw(struct igc_ring *ring) 6461 { 6462 struct igc_hw *hw = &ring->q_vector->adapter->hw; 6463 u8 idx = ring->reg_idx; 6464 u32 rxdctl; 6465 6466 rxdctl = rd32(IGC_RXDCTL(idx)); 6467 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; 6468 rxdctl |= IGC_RXDCTL_SWFLUSH; 6469 wr32(IGC_RXDCTL(idx), rxdctl); 6470 } 6471 6472 void igc_disable_rx_ring(struct igc_ring *ring) 6473 { 6474 igc_disable_rx_ring_hw(ring); 6475 igc_clean_rx_ring(ring); 6476 } 6477 6478 void igc_enable_rx_ring(struct igc_ring *ring) 6479 { 6480 struct igc_adapter *adapter = ring->q_vector->adapter; 6481 6482 igc_configure_rx_ring(adapter, ring); 6483 6484 if (ring->xsk_pool) 6485 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 6486 else 6487 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 6488 } 6489 6490 static void igc_disable_tx_ring_hw(struct igc_ring *ring) 6491 { 6492 struct igc_hw *hw = &ring->q_vector->adapter->hw; 6493 u8 idx = ring->reg_idx; 6494 u32 txdctl; 6495 6496 txdctl = rd32(IGC_TXDCTL(idx)); 6497 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; 6498 txdctl |= IGC_TXDCTL_SWFLUSH; 6499 wr32(IGC_TXDCTL(idx), txdctl); 6500 } 6501 6502 void igc_disable_tx_ring(struct igc_ring *ring) 6503 { 6504 igc_disable_tx_ring_hw(ring); 6505 igc_clean_tx_ring(ring); 6506 } 6507 6508 void igc_enable_tx_ring(struct igc_ring *ring) 6509 { 6510 struct igc_adapter *adapter = ring->q_vector->adapter; 6511 6512 igc_configure_tx_ring(adapter, ring); 6513 } 6514 6515 /** 6516 * igc_init_module - Driver Registration Routine 6517 * 6518 * igc_init_module is the first routine called when the driver is 6519 * loaded. All it does is register with the PCI subsystem. 6520 */ 6521 static int __init igc_init_module(void) 6522 { 6523 int ret; 6524 6525 pr_info("%s\n", igc_driver_string); 6526 pr_info("%s\n", igc_copyright); 6527 6528 ret = pci_register_driver(&igc_driver); 6529 return ret; 6530 } 6531 6532 module_init(igc_init_module); 6533 6534 /** 6535 * igc_exit_module - Driver Exit Cleanup Routine 6536 * 6537 * igc_exit_module is called just before the driver is removed 6538 * from memory. 6539 */ 6540 static void __exit igc_exit_module(void) 6541 { 6542 pci_unregister_driver(&igc_driver); 6543 } 6544 6545 module_exit(igc_exit_module); 6546 /* igc_main.c */ 6547