1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Intel Corporation */ 3 4 #include <linux/module.h> 5 #include <linux/types.h> 6 #include <linux/if_vlan.h> 7 #include <linux/aer.h> 8 #include <linux/tcp.h> 9 #include <linux/udp.h> 10 #include <linux/ip.h> 11 #include <linux/pm_runtime.h> 12 #include <net/pkt_sched.h> 13 #include <linux/bpf_trace.h> 14 #include <net/xdp_sock_drv.h> 15 #include <net/ipv6.h> 16 17 #include "igc.h" 18 #include "igc_hw.h" 19 #include "igc_tsn.h" 20 #include "igc_xdp.h" 21 22 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" 23 24 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 25 26 #define IGC_XDP_PASS 0 27 #define IGC_XDP_CONSUMED BIT(0) 28 #define IGC_XDP_TX BIT(1) 29 #define IGC_XDP_REDIRECT BIT(2) 30 31 static int debug = -1; 32 33 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 34 MODULE_DESCRIPTION(DRV_SUMMARY); 35 MODULE_LICENSE("GPL v2"); 36 module_param(debug, int, 0); 37 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 38 39 char igc_driver_name[] = "igc"; 40 static const char igc_driver_string[] = DRV_SUMMARY; 41 static const char igc_copyright[] = 42 "Copyright(c) 2018 Intel Corporation."; 43 44 static const struct igc_info *igc_info_tbl[] = { 45 [board_base] = &igc_base_info, 46 }; 47 48 static const struct pci_device_id igc_pci_tbl[] = { 49 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, 50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, 51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, 52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, 53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, 54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, 55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, 56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, 57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, 58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, 59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, 60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, 61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, 62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, 63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, 64 /* required last entry */ 65 {0, } 66 }; 67 68 MODULE_DEVICE_TABLE(pci, igc_pci_tbl); 69 70 enum latency_range { 71 lowest_latency = 0, 72 low_latency = 1, 73 bulk_latency = 2, 74 latency_invalid = 255 75 }; 76 77 void igc_reset(struct igc_adapter *adapter) 78 { 79 struct net_device *dev = adapter->netdev; 80 struct igc_hw *hw = &adapter->hw; 81 struct igc_fc_info *fc = &hw->fc; 82 u32 pba, hwm; 83 84 /* Repartition PBA for greater than 9k MTU if required */ 85 pba = IGC_PBA_34K; 86 87 /* flow control settings 88 * The high water mark must be low enough to fit one full frame 89 * after transmitting the pause frame. As such we must have enough 90 * space to allow for us to complete our current transmit and then 91 * receive the frame that is in progress from the link partner. 92 * Set it to: 93 * - the full Rx FIFO size minus one full Tx plus one full Rx frame 94 */ 95 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); 96 97 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ 98 fc->low_water = fc->high_water - 16; 99 fc->pause_time = 0xFFFF; 100 fc->send_xon = 1; 101 fc->current_mode = fc->requested_mode; 102 103 hw->mac.ops.reset_hw(hw); 104 105 if (hw->mac.ops.init_hw(hw)) 106 netdev_err(dev, "Error on hardware initialization\n"); 107 108 /* Re-establish EEE setting */ 109 igc_set_eee_i225(hw, true, true, true); 110 111 if (!netif_running(adapter->netdev)) 112 igc_power_down_phy_copper_base(&adapter->hw); 113 114 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ 115 wr32(IGC_VET, ETH_P_8021Q); 116 117 /* Re-enable PTP, where applicable. */ 118 igc_ptp_reset(adapter); 119 120 /* Re-enable TSN offloading, where applicable. */ 121 igc_tsn_offload_apply(adapter); 122 123 igc_get_phy_info(hw); 124 } 125 126 /** 127 * igc_power_up_link - Power up the phy link 128 * @adapter: address of board private structure 129 */ 130 static void igc_power_up_link(struct igc_adapter *adapter) 131 { 132 igc_reset_phy(&adapter->hw); 133 134 igc_power_up_phy_copper(&adapter->hw); 135 136 igc_setup_link(&adapter->hw); 137 } 138 139 /** 140 * igc_release_hw_control - release control of the h/w to f/w 141 * @adapter: address of board private structure 142 * 143 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. 144 * For ASF and Pass Through versions of f/w this means that the 145 * driver is no longer loaded. 146 */ 147 static void igc_release_hw_control(struct igc_adapter *adapter) 148 { 149 struct igc_hw *hw = &adapter->hw; 150 u32 ctrl_ext; 151 152 /* Let firmware take over control of h/w */ 153 ctrl_ext = rd32(IGC_CTRL_EXT); 154 wr32(IGC_CTRL_EXT, 155 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 156 } 157 158 /** 159 * igc_get_hw_control - get control of the h/w from f/w 160 * @adapter: address of board private structure 161 * 162 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. 163 * For ASF and Pass Through versions of f/w this means that 164 * the driver is loaded. 165 */ 166 static void igc_get_hw_control(struct igc_adapter *adapter) 167 { 168 struct igc_hw *hw = &adapter->hw; 169 u32 ctrl_ext; 170 171 /* Let firmware know the driver has taken over */ 172 ctrl_ext = rd32(IGC_CTRL_EXT); 173 wr32(IGC_CTRL_EXT, 174 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 175 } 176 177 static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) 178 { 179 dma_unmap_single(dev, dma_unmap_addr(buf, dma), 180 dma_unmap_len(buf, len), DMA_TO_DEVICE); 181 182 dma_unmap_len_set(buf, len, 0); 183 } 184 185 /** 186 * igc_clean_tx_ring - Free Tx Buffers 187 * @tx_ring: ring to be cleaned 188 */ 189 static void igc_clean_tx_ring(struct igc_ring *tx_ring) 190 { 191 u16 i = tx_ring->next_to_clean; 192 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 193 u32 xsk_frames = 0; 194 195 while (i != tx_ring->next_to_use) { 196 union igc_adv_tx_desc *eop_desc, *tx_desc; 197 198 switch (tx_buffer->type) { 199 case IGC_TX_BUFFER_TYPE_XSK: 200 xsk_frames++; 201 break; 202 case IGC_TX_BUFFER_TYPE_XDP: 203 xdp_return_frame(tx_buffer->xdpf); 204 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 205 break; 206 case IGC_TX_BUFFER_TYPE_SKB: 207 dev_kfree_skb_any(tx_buffer->skb); 208 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 209 break; 210 default: 211 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 212 break; 213 } 214 215 /* check for eop_desc to determine the end of the packet */ 216 eop_desc = tx_buffer->next_to_watch; 217 tx_desc = IGC_TX_DESC(tx_ring, i); 218 219 /* unmap remaining buffers */ 220 while (tx_desc != eop_desc) { 221 tx_buffer++; 222 tx_desc++; 223 i++; 224 if (unlikely(i == tx_ring->count)) { 225 i = 0; 226 tx_buffer = tx_ring->tx_buffer_info; 227 tx_desc = IGC_TX_DESC(tx_ring, 0); 228 } 229 230 /* unmap any remaining paged data */ 231 if (dma_unmap_len(tx_buffer, len)) 232 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 233 } 234 235 tx_buffer->next_to_watch = NULL; 236 237 /* move us one more past the eop_desc for start of next pkt */ 238 tx_buffer++; 239 i++; 240 if (unlikely(i == tx_ring->count)) { 241 i = 0; 242 tx_buffer = tx_ring->tx_buffer_info; 243 } 244 } 245 246 if (tx_ring->xsk_pool && xsk_frames) 247 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 248 249 /* reset BQL for queue */ 250 netdev_tx_reset_queue(txring_txq(tx_ring)); 251 252 /* reset next_to_use and next_to_clean */ 253 tx_ring->next_to_use = 0; 254 tx_ring->next_to_clean = 0; 255 } 256 257 /** 258 * igc_free_tx_resources - Free Tx Resources per Queue 259 * @tx_ring: Tx descriptor ring for a specific queue 260 * 261 * Free all transmit software resources 262 */ 263 void igc_free_tx_resources(struct igc_ring *tx_ring) 264 { 265 igc_clean_tx_ring(tx_ring); 266 267 vfree(tx_ring->tx_buffer_info); 268 tx_ring->tx_buffer_info = NULL; 269 270 /* if not set, then don't free */ 271 if (!tx_ring->desc) 272 return; 273 274 dma_free_coherent(tx_ring->dev, tx_ring->size, 275 tx_ring->desc, tx_ring->dma); 276 277 tx_ring->desc = NULL; 278 } 279 280 /** 281 * igc_free_all_tx_resources - Free Tx Resources for All Queues 282 * @adapter: board private structure 283 * 284 * Free all transmit software resources 285 */ 286 static void igc_free_all_tx_resources(struct igc_adapter *adapter) 287 { 288 int i; 289 290 for (i = 0; i < adapter->num_tx_queues; i++) 291 igc_free_tx_resources(adapter->tx_ring[i]); 292 } 293 294 /** 295 * igc_clean_all_tx_rings - Free Tx Buffers for all queues 296 * @adapter: board private structure 297 */ 298 static void igc_clean_all_tx_rings(struct igc_adapter *adapter) 299 { 300 int i; 301 302 for (i = 0; i < adapter->num_tx_queues; i++) 303 if (adapter->tx_ring[i]) 304 igc_clean_tx_ring(adapter->tx_ring[i]); 305 } 306 307 /** 308 * igc_setup_tx_resources - allocate Tx resources (Descriptors) 309 * @tx_ring: tx descriptor ring (for a specific queue) to setup 310 * 311 * Return 0 on success, negative on failure 312 */ 313 int igc_setup_tx_resources(struct igc_ring *tx_ring) 314 { 315 struct net_device *ndev = tx_ring->netdev; 316 struct device *dev = tx_ring->dev; 317 int size = 0; 318 319 size = sizeof(struct igc_tx_buffer) * tx_ring->count; 320 tx_ring->tx_buffer_info = vzalloc(size); 321 if (!tx_ring->tx_buffer_info) 322 goto err; 323 324 /* round up to nearest 4K */ 325 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); 326 tx_ring->size = ALIGN(tx_ring->size, 4096); 327 328 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 329 &tx_ring->dma, GFP_KERNEL); 330 331 if (!tx_ring->desc) 332 goto err; 333 334 tx_ring->next_to_use = 0; 335 tx_ring->next_to_clean = 0; 336 337 return 0; 338 339 err: 340 vfree(tx_ring->tx_buffer_info); 341 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); 342 return -ENOMEM; 343 } 344 345 /** 346 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues 347 * @adapter: board private structure 348 * 349 * Return 0 on success, negative on failure 350 */ 351 static int igc_setup_all_tx_resources(struct igc_adapter *adapter) 352 { 353 struct net_device *dev = adapter->netdev; 354 int i, err = 0; 355 356 for (i = 0; i < adapter->num_tx_queues; i++) { 357 err = igc_setup_tx_resources(adapter->tx_ring[i]); 358 if (err) { 359 netdev_err(dev, "Error on Tx queue %u setup\n", i); 360 for (i--; i >= 0; i--) 361 igc_free_tx_resources(adapter->tx_ring[i]); 362 break; 363 } 364 } 365 366 return err; 367 } 368 369 static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) 370 { 371 u16 i = rx_ring->next_to_clean; 372 373 dev_kfree_skb(rx_ring->skb); 374 rx_ring->skb = NULL; 375 376 /* Free all the Rx ring sk_buffs */ 377 while (i != rx_ring->next_to_alloc) { 378 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 379 380 /* Invalidate cache lines that may have been written to by 381 * device so that we avoid corrupting memory. 382 */ 383 dma_sync_single_range_for_cpu(rx_ring->dev, 384 buffer_info->dma, 385 buffer_info->page_offset, 386 igc_rx_bufsz(rx_ring), 387 DMA_FROM_DEVICE); 388 389 /* free resources associated with mapping */ 390 dma_unmap_page_attrs(rx_ring->dev, 391 buffer_info->dma, 392 igc_rx_pg_size(rx_ring), 393 DMA_FROM_DEVICE, 394 IGC_RX_DMA_ATTR); 395 __page_frag_cache_drain(buffer_info->page, 396 buffer_info->pagecnt_bias); 397 398 i++; 399 if (i == rx_ring->count) 400 i = 0; 401 } 402 } 403 404 static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) 405 { 406 struct igc_rx_buffer *bi; 407 u16 i; 408 409 for (i = 0; i < ring->count; i++) { 410 bi = &ring->rx_buffer_info[i]; 411 if (!bi->xdp) 412 continue; 413 414 xsk_buff_free(bi->xdp); 415 bi->xdp = NULL; 416 } 417 } 418 419 /** 420 * igc_clean_rx_ring - Free Rx Buffers per Queue 421 * @ring: ring to free buffers from 422 */ 423 static void igc_clean_rx_ring(struct igc_ring *ring) 424 { 425 if (ring->xsk_pool) 426 igc_clean_rx_ring_xsk_pool(ring); 427 else 428 igc_clean_rx_ring_page_shared(ring); 429 430 clear_ring_uses_large_buffer(ring); 431 432 ring->next_to_alloc = 0; 433 ring->next_to_clean = 0; 434 ring->next_to_use = 0; 435 } 436 437 /** 438 * igc_clean_all_rx_rings - Free Rx Buffers for all queues 439 * @adapter: board private structure 440 */ 441 static void igc_clean_all_rx_rings(struct igc_adapter *adapter) 442 { 443 int i; 444 445 for (i = 0; i < adapter->num_rx_queues; i++) 446 if (adapter->rx_ring[i]) 447 igc_clean_rx_ring(adapter->rx_ring[i]); 448 } 449 450 /** 451 * igc_free_rx_resources - Free Rx Resources 452 * @rx_ring: ring to clean the resources from 453 * 454 * Free all receive software resources 455 */ 456 void igc_free_rx_resources(struct igc_ring *rx_ring) 457 { 458 igc_clean_rx_ring(rx_ring); 459 460 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 461 462 vfree(rx_ring->rx_buffer_info); 463 rx_ring->rx_buffer_info = NULL; 464 465 /* if not set, then don't free */ 466 if (!rx_ring->desc) 467 return; 468 469 dma_free_coherent(rx_ring->dev, rx_ring->size, 470 rx_ring->desc, rx_ring->dma); 471 472 rx_ring->desc = NULL; 473 } 474 475 /** 476 * igc_free_all_rx_resources - Free Rx Resources for All Queues 477 * @adapter: board private structure 478 * 479 * Free all receive software resources 480 */ 481 static void igc_free_all_rx_resources(struct igc_adapter *adapter) 482 { 483 int i; 484 485 for (i = 0; i < adapter->num_rx_queues; i++) 486 igc_free_rx_resources(adapter->rx_ring[i]); 487 } 488 489 /** 490 * igc_setup_rx_resources - allocate Rx resources (Descriptors) 491 * @rx_ring: rx descriptor ring (for a specific queue) to setup 492 * 493 * Returns 0 on success, negative on failure 494 */ 495 int igc_setup_rx_resources(struct igc_ring *rx_ring) 496 { 497 struct net_device *ndev = rx_ring->netdev; 498 struct device *dev = rx_ring->dev; 499 u8 index = rx_ring->queue_index; 500 int size, desc_len, res; 501 502 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, 503 rx_ring->q_vector->napi.napi_id); 504 if (res < 0) { 505 netdev_err(ndev, "Failed to register xdp_rxq index %u\n", 506 index); 507 return res; 508 } 509 510 size = sizeof(struct igc_rx_buffer) * rx_ring->count; 511 rx_ring->rx_buffer_info = vzalloc(size); 512 if (!rx_ring->rx_buffer_info) 513 goto err; 514 515 desc_len = sizeof(union igc_adv_rx_desc); 516 517 /* Round up to nearest 4K */ 518 rx_ring->size = rx_ring->count * desc_len; 519 rx_ring->size = ALIGN(rx_ring->size, 4096); 520 521 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 522 &rx_ring->dma, GFP_KERNEL); 523 524 if (!rx_ring->desc) 525 goto err; 526 527 rx_ring->next_to_alloc = 0; 528 rx_ring->next_to_clean = 0; 529 rx_ring->next_to_use = 0; 530 531 return 0; 532 533 err: 534 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 535 vfree(rx_ring->rx_buffer_info); 536 rx_ring->rx_buffer_info = NULL; 537 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); 538 return -ENOMEM; 539 } 540 541 /** 542 * igc_setup_all_rx_resources - wrapper to allocate Rx resources 543 * (Descriptors) for all queues 544 * @adapter: board private structure 545 * 546 * Return 0 on success, negative on failure 547 */ 548 static int igc_setup_all_rx_resources(struct igc_adapter *adapter) 549 { 550 struct net_device *dev = adapter->netdev; 551 int i, err = 0; 552 553 for (i = 0; i < adapter->num_rx_queues; i++) { 554 err = igc_setup_rx_resources(adapter->rx_ring[i]); 555 if (err) { 556 netdev_err(dev, "Error on Rx queue %u setup\n", i); 557 for (i--; i >= 0; i--) 558 igc_free_rx_resources(adapter->rx_ring[i]); 559 break; 560 } 561 } 562 563 return err; 564 } 565 566 static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, 567 struct igc_ring *ring) 568 { 569 if (!igc_xdp_is_enabled(adapter) || 570 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) 571 return NULL; 572 573 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); 574 } 575 576 /** 577 * igc_configure_rx_ring - Configure a receive ring after Reset 578 * @adapter: board private structure 579 * @ring: receive ring to be configured 580 * 581 * Configure the Rx unit of the MAC after a reset. 582 */ 583 static void igc_configure_rx_ring(struct igc_adapter *adapter, 584 struct igc_ring *ring) 585 { 586 struct igc_hw *hw = &adapter->hw; 587 union igc_adv_rx_desc *rx_desc; 588 int reg_idx = ring->reg_idx; 589 u32 srrctl = 0, rxdctl = 0; 590 u64 rdba = ring->dma; 591 u32 buf_size; 592 593 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 594 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 595 if (ring->xsk_pool) { 596 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 597 MEM_TYPE_XSK_BUFF_POOL, 598 NULL)); 599 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); 600 } else { 601 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 602 MEM_TYPE_PAGE_SHARED, 603 NULL)); 604 } 605 606 if (igc_xdp_is_enabled(adapter)) 607 set_ring_uses_large_buffer(ring); 608 609 /* disable the queue */ 610 wr32(IGC_RXDCTL(reg_idx), 0); 611 612 /* Set DMA base address registers */ 613 wr32(IGC_RDBAL(reg_idx), 614 rdba & 0x00000000ffffffffULL); 615 wr32(IGC_RDBAH(reg_idx), rdba >> 32); 616 wr32(IGC_RDLEN(reg_idx), 617 ring->count * sizeof(union igc_adv_rx_desc)); 618 619 /* initialize head and tail */ 620 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); 621 wr32(IGC_RDH(reg_idx), 0); 622 writel(0, ring->tail); 623 624 /* reset next-to- use/clean to place SW in sync with hardware */ 625 ring->next_to_clean = 0; 626 ring->next_to_use = 0; 627 628 if (ring->xsk_pool) 629 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); 630 else if (ring_uses_large_buffer(ring)) 631 buf_size = IGC_RXBUFFER_3072; 632 else 633 buf_size = IGC_RXBUFFER_2048; 634 635 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; 636 srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT; 637 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 638 639 wr32(IGC_SRRCTL(reg_idx), srrctl); 640 641 rxdctl |= IGC_RX_PTHRESH; 642 rxdctl |= IGC_RX_HTHRESH << 8; 643 rxdctl |= IGC_RX_WTHRESH << 16; 644 645 /* initialize rx_buffer_info */ 646 memset(ring->rx_buffer_info, 0, 647 sizeof(struct igc_rx_buffer) * ring->count); 648 649 /* initialize Rx descriptor 0 */ 650 rx_desc = IGC_RX_DESC(ring, 0); 651 rx_desc->wb.upper.length = 0; 652 653 /* enable receive descriptor fetching */ 654 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 655 656 wr32(IGC_RXDCTL(reg_idx), rxdctl); 657 } 658 659 /** 660 * igc_configure_rx - Configure receive Unit after Reset 661 * @adapter: board private structure 662 * 663 * Configure the Rx unit of the MAC after a reset. 664 */ 665 static void igc_configure_rx(struct igc_adapter *adapter) 666 { 667 int i; 668 669 /* Setup the HW Rx Head and Tail Descriptor Pointers and 670 * the Base and Length of the Rx Descriptor Ring 671 */ 672 for (i = 0; i < adapter->num_rx_queues; i++) 673 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); 674 } 675 676 /** 677 * igc_configure_tx_ring - Configure transmit ring after Reset 678 * @adapter: board private structure 679 * @ring: tx ring to configure 680 * 681 * Configure a transmit ring after a reset. 682 */ 683 static void igc_configure_tx_ring(struct igc_adapter *adapter, 684 struct igc_ring *ring) 685 { 686 struct igc_hw *hw = &adapter->hw; 687 int reg_idx = ring->reg_idx; 688 u64 tdba = ring->dma; 689 u32 txdctl = 0; 690 691 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 692 693 /* disable the queue */ 694 wr32(IGC_TXDCTL(reg_idx), 0); 695 wrfl(); 696 mdelay(10); 697 698 wr32(IGC_TDLEN(reg_idx), 699 ring->count * sizeof(union igc_adv_tx_desc)); 700 wr32(IGC_TDBAL(reg_idx), 701 tdba & 0x00000000ffffffffULL); 702 wr32(IGC_TDBAH(reg_idx), tdba >> 32); 703 704 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); 705 wr32(IGC_TDH(reg_idx), 0); 706 writel(0, ring->tail); 707 708 txdctl |= IGC_TX_PTHRESH; 709 txdctl |= IGC_TX_HTHRESH << 8; 710 txdctl |= IGC_TX_WTHRESH << 16; 711 712 txdctl |= IGC_TXDCTL_QUEUE_ENABLE; 713 wr32(IGC_TXDCTL(reg_idx), txdctl); 714 } 715 716 /** 717 * igc_configure_tx - Configure transmit Unit after Reset 718 * @adapter: board private structure 719 * 720 * Configure the Tx unit of the MAC after a reset. 721 */ 722 static void igc_configure_tx(struct igc_adapter *adapter) 723 { 724 int i; 725 726 for (i = 0; i < adapter->num_tx_queues; i++) 727 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); 728 } 729 730 /** 731 * igc_setup_mrqc - configure the multiple receive queue control registers 732 * @adapter: Board private structure 733 */ 734 static void igc_setup_mrqc(struct igc_adapter *adapter) 735 { 736 struct igc_hw *hw = &adapter->hw; 737 u32 j, num_rx_queues; 738 u32 mrqc, rxcsum; 739 u32 rss_key[10]; 740 741 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 742 for (j = 0; j < 10; j++) 743 wr32(IGC_RSSRK(j), rss_key[j]); 744 745 num_rx_queues = adapter->rss_queues; 746 747 if (adapter->rss_indir_tbl_init != num_rx_queues) { 748 for (j = 0; j < IGC_RETA_SIZE; j++) 749 adapter->rss_indir_tbl[j] = 750 (j * num_rx_queues) / IGC_RETA_SIZE; 751 adapter->rss_indir_tbl_init = num_rx_queues; 752 } 753 igc_write_rss_indir_tbl(adapter); 754 755 /* Disable raw packet checksumming so that RSS hash is placed in 756 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 757 * offloads as they are enabled by default 758 */ 759 rxcsum = rd32(IGC_RXCSUM); 760 rxcsum |= IGC_RXCSUM_PCSD; 761 762 /* Enable Receive Checksum Offload for SCTP */ 763 rxcsum |= IGC_RXCSUM_CRCOFL; 764 765 /* Don't need to set TUOFL or IPOFL, they default to 1 */ 766 wr32(IGC_RXCSUM, rxcsum); 767 768 /* Generate RSS hash based on packet types, TCP/UDP 769 * port numbers and/or IPv4/v6 src and dst addresses 770 */ 771 mrqc = IGC_MRQC_RSS_FIELD_IPV4 | 772 IGC_MRQC_RSS_FIELD_IPV4_TCP | 773 IGC_MRQC_RSS_FIELD_IPV6 | 774 IGC_MRQC_RSS_FIELD_IPV6_TCP | 775 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; 776 777 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) 778 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; 779 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) 780 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; 781 782 mrqc |= IGC_MRQC_ENABLE_RSS_MQ; 783 784 wr32(IGC_MRQC, mrqc); 785 } 786 787 /** 788 * igc_setup_rctl - configure the receive control registers 789 * @adapter: Board private structure 790 */ 791 static void igc_setup_rctl(struct igc_adapter *adapter) 792 { 793 struct igc_hw *hw = &adapter->hw; 794 u32 rctl; 795 796 rctl = rd32(IGC_RCTL); 797 798 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 799 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); 800 801 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | 802 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 803 804 /* enable stripping of CRC. Newer features require 805 * that the HW strips the CRC. 806 */ 807 rctl |= IGC_RCTL_SECRC; 808 809 /* disable store bad packets and clear size bits. */ 810 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); 811 812 /* enable LPE to allow for reception of jumbo frames */ 813 rctl |= IGC_RCTL_LPE; 814 815 /* disable queue 0 to prevent tail write w/o re-config */ 816 wr32(IGC_RXDCTL(0), 0); 817 818 /* This is useful for sniffing bad packets. */ 819 if (adapter->netdev->features & NETIF_F_RXALL) { 820 /* UPE and MPE will be handled by normal PROMISC logic 821 * in set_rx_mode 822 */ 823 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ 824 IGC_RCTL_BAM | /* RX All Bcast Pkts */ 825 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 826 827 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ 828 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ 829 } 830 831 wr32(IGC_RCTL, rctl); 832 } 833 834 /** 835 * igc_setup_tctl - configure the transmit control registers 836 * @adapter: Board private structure 837 */ 838 static void igc_setup_tctl(struct igc_adapter *adapter) 839 { 840 struct igc_hw *hw = &adapter->hw; 841 u32 tctl; 842 843 /* disable queue 0 which icould be enabled by default */ 844 wr32(IGC_TXDCTL(0), 0); 845 846 /* Program the Transmit Control Register */ 847 tctl = rd32(IGC_TCTL); 848 tctl &= ~IGC_TCTL_CT; 849 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | 850 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); 851 852 /* Enable transmits */ 853 tctl |= IGC_TCTL_EN; 854 855 wr32(IGC_TCTL, tctl); 856 } 857 858 /** 859 * igc_set_mac_filter_hw() - Set MAC address filter in hardware 860 * @adapter: Pointer to adapter where the filter should be set 861 * @index: Filter index 862 * @type: MAC address filter type (source or destination) 863 * @addr: MAC address 864 * @queue: If non-negative, queue assignment feature is enabled and frames 865 * matching the filter are enqueued onto 'queue'. Otherwise, queue 866 * assignment is disabled. 867 */ 868 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, 869 enum igc_mac_filter_type type, 870 const u8 *addr, int queue) 871 { 872 struct net_device *dev = adapter->netdev; 873 struct igc_hw *hw = &adapter->hw; 874 u32 ral, rah; 875 876 if (WARN_ON(index >= hw->mac.rar_entry_count)) 877 return; 878 879 ral = le32_to_cpup((__le32 *)(addr)); 880 rah = le16_to_cpup((__le16 *)(addr + 4)); 881 882 if (type == IGC_MAC_FILTER_TYPE_SRC) { 883 rah &= ~IGC_RAH_ASEL_MASK; 884 rah |= IGC_RAH_ASEL_SRC_ADDR; 885 } 886 887 if (queue >= 0) { 888 rah &= ~IGC_RAH_QSEL_MASK; 889 rah |= (queue << IGC_RAH_QSEL_SHIFT); 890 rah |= IGC_RAH_QSEL_ENABLE; 891 } 892 893 rah |= IGC_RAH_AV; 894 895 wr32(IGC_RAL(index), ral); 896 wr32(IGC_RAH(index), rah); 897 898 netdev_dbg(dev, "MAC address filter set in HW: index %d", index); 899 } 900 901 /** 902 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware 903 * @adapter: Pointer to adapter where the filter should be cleared 904 * @index: Filter index 905 */ 906 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) 907 { 908 struct net_device *dev = adapter->netdev; 909 struct igc_hw *hw = &adapter->hw; 910 911 if (WARN_ON(index >= hw->mac.rar_entry_count)) 912 return; 913 914 wr32(IGC_RAL(index), 0); 915 wr32(IGC_RAH(index), 0); 916 917 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); 918 } 919 920 /* Set default MAC address for the PF in the first RAR entry */ 921 static void igc_set_default_mac_filter(struct igc_adapter *adapter) 922 { 923 struct net_device *dev = adapter->netdev; 924 u8 *addr = adapter->hw.mac.addr; 925 926 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); 927 928 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); 929 } 930 931 /** 932 * igc_set_mac - Change the Ethernet Address of the NIC 933 * @netdev: network interface device structure 934 * @p: pointer to an address structure 935 * 936 * Returns 0 on success, negative on failure 937 */ 938 static int igc_set_mac(struct net_device *netdev, void *p) 939 { 940 struct igc_adapter *adapter = netdev_priv(netdev); 941 struct igc_hw *hw = &adapter->hw; 942 struct sockaddr *addr = p; 943 944 if (!is_valid_ether_addr(addr->sa_data)) 945 return -EADDRNOTAVAIL; 946 947 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 948 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 949 950 /* set the correct pool for the new PF MAC address in entry 0 */ 951 igc_set_default_mac_filter(adapter); 952 953 return 0; 954 } 955 956 /** 957 * igc_write_mc_addr_list - write multicast addresses to MTA 958 * @netdev: network interface device structure 959 * 960 * Writes multicast address list to the MTA hash table. 961 * Returns: -ENOMEM on failure 962 * 0 on no addresses written 963 * X on writing X addresses to MTA 964 **/ 965 static int igc_write_mc_addr_list(struct net_device *netdev) 966 { 967 struct igc_adapter *adapter = netdev_priv(netdev); 968 struct igc_hw *hw = &adapter->hw; 969 struct netdev_hw_addr *ha; 970 u8 *mta_list; 971 int i; 972 973 if (netdev_mc_empty(netdev)) { 974 /* nothing to program, so clear mc list */ 975 igc_update_mc_addr_list(hw, NULL, 0); 976 return 0; 977 } 978 979 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); 980 if (!mta_list) 981 return -ENOMEM; 982 983 /* The shared function expects a packed array of only addresses. */ 984 i = 0; 985 netdev_for_each_mc_addr(ha, netdev) 986 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 987 988 igc_update_mc_addr_list(hw, mta_list, i); 989 kfree(mta_list); 990 991 return netdev_mc_count(netdev); 992 } 993 994 static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) 995 { 996 ktime_t cycle_time = adapter->cycle_time; 997 ktime_t base_time = adapter->base_time; 998 u32 launchtime; 999 1000 /* FIXME: when using ETF together with taprio, we may have a 1001 * case where 'delta' is larger than the cycle_time, this may 1002 * cause problems if we don't read the current value of 1003 * IGC_BASET, as the value writen into the launchtime 1004 * descriptor field may be misinterpreted. 1005 */ 1006 div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); 1007 1008 return cpu_to_le32(launchtime); 1009 } 1010 1011 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, 1012 struct igc_tx_buffer *first, 1013 u32 vlan_macip_lens, u32 type_tucmd, 1014 u32 mss_l4len_idx) 1015 { 1016 struct igc_adv_tx_context_desc *context_desc; 1017 u16 i = tx_ring->next_to_use; 1018 1019 context_desc = IGC_TX_CTXTDESC(tx_ring, i); 1020 1021 i++; 1022 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1023 1024 /* set bits to identify this as an advanced context descriptor */ 1025 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 1026 1027 /* For i225, context index must be unique per ring. */ 1028 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) 1029 mss_l4len_idx |= tx_ring->reg_idx << 4; 1030 1031 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1032 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1033 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1034 1035 /* We assume there is always a valid Tx time available. Invalid times 1036 * should have been handled by the upper layers. 1037 */ 1038 if (tx_ring->launchtime_enable) { 1039 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 1040 ktime_t txtime = first->skb->tstamp; 1041 1042 skb_txtime_consumed(first->skb); 1043 context_desc->launch_time = igc_tx_launchtime(adapter, 1044 txtime); 1045 } else { 1046 context_desc->launch_time = 0; 1047 } 1048 } 1049 1050 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) 1051 { 1052 struct sk_buff *skb = first->skb; 1053 u32 vlan_macip_lens = 0; 1054 u32 type_tucmd = 0; 1055 1056 if (skb->ip_summed != CHECKSUM_PARTIAL) { 1057 csum_failed: 1058 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && 1059 !tx_ring->launchtime_enable) 1060 return; 1061 goto no_csum; 1062 } 1063 1064 switch (skb->csum_offset) { 1065 case offsetof(struct tcphdr, check): 1066 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1067 fallthrough; 1068 case offsetof(struct udphdr, check): 1069 break; 1070 case offsetof(struct sctphdr, checksum): 1071 /* validate that this is actually an SCTP request */ 1072 if (skb_csum_is_sctp(skb)) { 1073 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; 1074 break; 1075 } 1076 fallthrough; 1077 default: 1078 skb_checksum_help(skb); 1079 goto csum_failed; 1080 } 1081 1082 /* update TX checksum flag */ 1083 first->tx_flags |= IGC_TX_FLAGS_CSUM; 1084 vlan_macip_lens = skb_checksum_start_offset(skb) - 1085 skb_network_offset(skb); 1086 no_csum: 1087 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; 1088 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1089 1090 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); 1091 } 1092 1093 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 1094 { 1095 struct net_device *netdev = tx_ring->netdev; 1096 1097 netif_stop_subqueue(netdev, tx_ring->queue_index); 1098 1099 /* memory barriier comment */ 1100 smp_mb(); 1101 1102 /* We need to check again in a case another CPU has just 1103 * made room available. 1104 */ 1105 if (igc_desc_unused(tx_ring) < size) 1106 return -EBUSY; 1107 1108 /* A reprieve! */ 1109 netif_wake_subqueue(netdev, tx_ring->queue_index); 1110 1111 u64_stats_update_begin(&tx_ring->tx_syncp2); 1112 tx_ring->tx_stats.restart_queue2++; 1113 u64_stats_update_end(&tx_ring->tx_syncp2); 1114 1115 return 0; 1116 } 1117 1118 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 1119 { 1120 if (igc_desc_unused(tx_ring) >= size) 1121 return 0; 1122 return __igc_maybe_stop_tx(tx_ring, size); 1123 } 1124 1125 #define IGC_SET_FLAG(_input, _flag, _result) \ 1126 (((_flag) <= (_result)) ? \ 1127 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ 1128 ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) 1129 1130 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 1131 { 1132 /* set type for advanced descriptor with frame checksum insertion */ 1133 u32 cmd_type = IGC_ADVTXD_DTYP_DATA | 1134 IGC_ADVTXD_DCMD_DEXT | 1135 IGC_ADVTXD_DCMD_IFCS; 1136 1137 /* set HW vlan bit if vlan is present */ 1138 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, 1139 IGC_ADVTXD_DCMD_VLE); 1140 1141 /* set segmentation bits for TSO */ 1142 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, 1143 (IGC_ADVTXD_DCMD_TSE)); 1144 1145 /* set timestamp bit if present */ 1146 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, 1147 (IGC_ADVTXD_MAC_TSTAMP)); 1148 1149 /* insert frame checksum */ 1150 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); 1151 1152 return cmd_type; 1153 } 1154 1155 static void igc_tx_olinfo_status(struct igc_ring *tx_ring, 1156 union igc_adv_tx_desc *tx_desc, 1157 u32 tx_flags, unsigned int paylen) 1158 { 1159 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; 1160 1161 /* insert L4 checksum */ 1162 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * 1163 ((IGC_TXD_POPTS_TXSM << 8) / 1164 IGC_TX_FLAGS_CSUM); 1165 1166 /* insert IPv4 checksum */ 1167 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * 1168 (((IGC_TXD_POPTS_IXSM << 8)) / 1169 IGC_TX_FLAGS_IPV4); 1170 1171 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 1172 } 1173 1174 static int igc_tx_map(struct igc_ring *tx_ring, 1175 struct igc_tx_buffer *first, 1176 const u8 hdr_len) 1177 { 1178 struct sk_buff *skb = first->skb; 1179 struct igc_tx_buffer *tx_buffer; 1180 union igc_adv_tx_desc *tx_desc; 1181 u32 tx_flags = first->tx_flags; 1182 skb_frag_t *frag; 1183 u16 i = tx_ring->next_to_use; 1184 unsigned int data_len, size; 1185 dma_addr_t dma; 1186 u32 cmd_type; 1187 1188 cmd_type = igc_tx_cmd_type(skb, tx_flags); 1189 tx_desc = IGC_TX_DESC(tx_ring, i); 1190 1191 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); 1192 1193 size = skb_headlen(skb); 1194 data_len = skb->data_len; 1195 1196 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1197 1198 tx_buffer = first; 1199 1200 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1201 if (dma_mapping_error(tx_ring->dev, dma)) 1202 goto dma_error; 1203 1204 /* record length, and DMA address */ 1205 dma_unmap_len_set(tx_buffer, len, size); 1206 dma_unmap_addr_set(tx_buffer, dma, dma); 1207 1208 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1209 1210 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { 1211 tx_desc->read.cmd_type_len = 1212 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); 1213 1214 i++; 1215 tx_desc++; 1216 if (i == tx_ring->count) { 1217 tx_desc = IGC_TX_DESC(tx_ring, 0); 1218 i = 0; 1219 } 1220 tx_desc->read.olinfo_status = 0; 1221 1222 dma += IGC_MAX_DATA_PER_TXD; 1223 size -= IGC_MAX_DATA_PER_TXD; 1224 1225 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1226 } 1227 1228 if (likely(!data_len)) 1229 break; 1230 1231 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 1232 1233 i++; 1234 tx_desc++; 1235 if (i == tx_ring->count) { 1236 tx_desc = IGC_TX_DESC(tx_ring, 0); 1237 i = 0; 1238 } 1239 tx_desc->read.olinfo_status = 0; 1240 1241 size = skb_frag_size(frag); 1242 data_len -= size; 1243 1244 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, 1245 size, DMA_TO_DEVICE); 1246 1247 tx_buffer = &tx_ring->tx_buffer_info[i]; 1248 } 1249 1250 /* write last descriptor with RS and EOP bits */ 1251 cmd_type |= size | IGC_TXD_DCMD; 1252 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 1253 1254 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1255 1256 /* set the timestamp */ 1257 first->time_stamp = jiffies; 1258 1259 skb_tx_timestamp(skb); 1260 1261 /* Force memory writes to complete before letting h/w know there 1262 * are new descriptors to fetch. (Only applicable for weak-ordered 1263 * memory model archs, such as IA-64). 1264 * 1265 * We also need this memory barrier to make certain all of the 1266 * status bits have been updated before next_to_watch is written. 1267 */ 1268 wmb(); 1269 1270 /* set next_to_watch value indicating a packet is present */ 1271 first->next_to_watch = tx_desc; 1272 1273 i++; 1274 if (i == tx_ring->count) 1275 i = 0; 1276 1277 tx_ring->next_to_use = i; 1278 1279 /* Make sure there is space in the ring for the next send. */ 1280 igc_maybe_stop_tx(tx_ring, DESC_NEEDED); 1281 1282 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 1283 writel(i, tx_ring->tail); 1284 } 1285 1286 return 0; 1287 dma_error: 1288 netdev_err(tx_ring->netdev, "TX DMA map failed\n"); 1289 tx_buffer = &tx_ring->tx_buffer_info[i]; 1290 1291 /* clear dma mappings for failed tx_buffer_info map */ 1292 while (tx_buffer != first) { 1293 if (dma_unmap_len(tx_buffer, len)) 1294 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 1295 1296 if (i-- == 0) 1297 i += tx_ring->count; 1298 tx_buffer = &tx_ring->tx_buffer_info[i]; 1299 } 1300 1301 if (dma_unmap_len(tx_buffer, len)) 1302 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 1303 1304 dev_kfree_skb_any(tx_buffer->skb); 1305 tx_buffer->skb = NULL; 1306 1307 tx_ring->next_to_use = i; 1308 1309 return -1; 1310 } 1311 1312 static int igc_tso(struct igc_ring *tx_ring, 1313 struct igc_tx_buffer *first, 1314 u8 *hdr_len) 1315 { 1316 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 1317 struct sk_buff *skb = first->skb; 1318 union { 1319 struct iphdr *v4; 1320 struct ipv6hdr *v6; 1321 unsigned char *hdr; 1322 } ip; 1323 union { 1324 struct tcphdr *tcp; 1325 struct udphdr *udp; 1326 unsigned char *hdr; 1327 } l4; 1328 u32 paylen, l4_offset; 1329 int err; 1330 1331 if (skb->ip_summed != CHECKSUM_PARTIAL) 1332 return 0; 1333 1334 if (!skb_is_gso(skb)) 1335 return 0; 1336 1337 err = skb_cow_head(skb, 0); 1338 if (err < 0) 1339 return err; 1340 1341 ip.hdr = skb_network_header(skb); 1342 l4.hdr = skb_checksum_start(skb); 1343 1344 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1345 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1346 1347 /* initialize outer IP header fields */ 1348 if (ip.v4->version == 4) { 1349 unsigned char *csum_start = skb_checksum_start(skb); 1350 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 1351 1352 /* IP header will have to cancel out any data that 1353 * is not a part of the outer IP header 1354 */ 1355 ip.v4->check = csum_fold(csum_partial(trans_start, 1356 csum_start - trans_start, 1357 0)); 1358 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; 1359 1360 ip.v4->tot_len = 0; 1361 first->tx_flags |= IGC_TX_FLAGS_TSO | 1362 IGC_TX_FLAGS_CSUM | 1363 IGC_TX_FLAGS_IPV4; 1364 } else { 1365 ip.v6->payload_len = 0; 1366 first->tx_flags |= IGC_TX_FLAGS_TSO | 1367 IGC_TX_FLAGS_CSUM; 1368 } 1369 1370 /* determine offset of inner transport header */ 1371 l4_offset = l4.hdr - skb->data; 1372 1373 /* remove payload length from inner checksum */ 1374 paylen = skb->len - l4_offset; 1375 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { 1376 /* compute length of segmentation header */ 1377 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 1378 csum_replace_by_diff(&l4.tcp->check, 1379 (__force __wsum)htonl(paylen)); 1380 } else { 1381 /* compute length of segmentation header */ 1382 *hdr_len = sizeof(*l4.udp) + l4_offset; 1383 csum_replace_by_diff(&l4.udp->check, 1384 (__force __wsum)htonl(paylen)); 1385 } 1386 1387 /* update gso size and bytecount with header size */ 1388 first->gso_segs = skb_shinfo(skb)->gso_segs; 1389 first->bytecount += (first->gso_segs - 1) * *hdr_len; 1390 1391 /* MSS L4LEN IDX */ 1392 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; 1393 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; 1394 1395 /* VLAN MACLEN IPLEN */ 1396 vlan_macip_lens = l4.hdr - ip.hdr; 1397 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; 1398 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1399 1400 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, 1401 type_tucmd, mss_l4len_idx); 1402 1403 return 1; 1404 } 1405 1406 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, 1407 struct igc_ring *tx_ring) 1408 { 1409 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 1410 __be16 protocol = vlan_get_protocol(skb); 1411 struct igc_tx_buffer *first; 1412 u32 tx_flags = 0; 1413 unsigned short f; 1414 u8 hdr_len = 0; 1415 int tso = 0; 1416 1417 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, 1418 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, 1419 * + 2 desc gap to keep tail from touching head, 1420 * + 1 desc for context descriptor, 1421 * otherwise try next time 1422 */ 1423 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1424 count += TXD_USE_COUNT(skb_frag_size( 1425 &skb_shinfo(skb)->frags[f])); 1426 1427 if (igc_maybe_stop_tx(tx_ring, count + 3)) { 1428 /* this is a hard error */ 1429 return NETDEV_TX_BUSY; 1430 } 1431 1432 /* record the location of the first descriptor for this packet */ 1433 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 1434 first->type = IGC_TX_BUFFER_TYPE_SKB; 1435 first->skb = skb; 1436 first->bytecount = skb->len; 1437 first->gso_segs = 1; 1438 1439 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1440 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 1441 1442 /* FIXME: add support for retrieving timestamps from 1443 * the other timer registers before skipping the 1444 * timestamping request. 1445 */ 1446 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && 1447 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, 1448 &adapter->state)) { 1449 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1450 tx_flags |= IGC_TX_FLAGS_TSTAMP; 1451 1452 adapter->ptp_tx_skb = skb_get(skb); 1453 adapter->ptp_tx_start = jiffies; 1454 } else { 1455 adapter->tx_hwtstamp_skipped++; 1456 } 1457 } 1458 1459 if (skb_vlan_tag_present(skb)) { 1460 tx_flags |= IGC_TX_FLAGS_VLAN; 1461 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); 1462 } 1463 1464 /* record initial flags and protocol */ 1465 first->tx_flags = tx_flags; 1466 first->protocol = protocol; 1467 1468 tso = igc_tso(tx_ring, first, &hdr_len); 1469 if (tso < 0) 1470 goto out_drop; 1471 else if (!tso) 1472 igc_tx_csum(tx_ring, first); 1473 1474 igc_tx_map(tx_ring, first, hdr_len); 1475 1476 return NETDEV_TX_OK; 1477 1478 out_drop: 1479 dev_kfree_skb_any(first->skb); 1480 first->skb = NULL; 1481 1482 return NETDEV_TX_OK; 1483 } 1484 1485 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, 1486 struct sk_buff *skb) 1487 { 1488 unsigned int r_idx = skb->queue_mapping; 1489 1490 if (r_idx >= adapter->num_tx_queues) 1491 r_idx = r_idx % adapter->num_tx_queues; 1492 1493 return adapter->tx_ring[r_idx]; 1494 } 1495 1496 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, 1497 struct net_device *netdev) 1498 { 1499 struct igc_adapter *adapter = netdev_priv(netdev); 1500 1501 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 1502 * in order to meet this minimum size requirement. 1503 */ 1504 if (skb->len < 17) { 1505 if (skb_padto(skb, 17)) 1506 return NETDEV_TX_OK; 1507 skb->len = 17; 1508 } 1509 1510 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); 1511 } 1512 1513 static void igc_rx_checksum(struct igc_ring *ring, 1514 union igc_adv_rx_desc *rx_desc, 1515 struct sk_buff *skb) 1516 { 1517 skb_checksum_none_assert(skb); 1518 1519 /* Ignore Checksum bit is set */ 1520 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) 1521 return; 1522 1523 /* Rx checksum disabled via ethtool */ 1524 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1525 return; 1526 1527 /* TCP/UDP checksum error bit is set */ 1528 if (igc_test_staterr(rx_desc, 1529 IGC_RXDEXT_STATERR_L4E | 1530 IGC_RXDEXT_STATERR_IPE)) { 1531 /* work around errata with sctp packets where the TCPE aka 1532 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 1533 * packets (aka let the stack check the crc32c) 1534 */ 1535 if (!(skb->len == 60 && 1536 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { 1537 u64_stats_update_begin(&ring->rx_syncp); 1538 ring->rx_stats.csum_err++; 1539 u64_stats_update_end(&ring->rx_syncp); 1540 } 1541 /* let the stack verify checksum errors */ 1542 return; 1543 } 1544 /* It must be a TCP or UDP packet with a valid checksum */ 1545 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | 1546 IGC_RXD_STAT_UDPCS)) 1547 skb->ip_summed = CHECKSUM_UNNECESSARY; 1548 1549 netdev_dbg(ring->netdev, "cksum success: bits %08X\n", 1550 le32_to_cpu(rx_desc->wb.upper.status_error)); 1551 } 1552 1553 static inline void igc_rx_hash(struct igc_ring *ring, 1554 union igc_adv_rx_desc *rx_desc, 1555 struct sk_buff *skb) 1556 { 1557 if (ring->netdev->features & NETIF_F_RXHASH) 1558 skb_set_hash(skb, 1559 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 1560 PKT_HASH_TYPE_L3); 1561 } 1562 1563 static void igc_rx_vlan(struct igc_ring *rx_ring, 1564 union igc_adv_rx_desc *rx_desc, 1565 struct sk_buff *skb) 1566 { 1567 struct net_device *dev = rx_ring->netdev; 1568 u16 vid; 1569 1570 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1571 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { 1572 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && 1573 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) 1574 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); 1575 else 1576 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1577 1578 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1579 } 1580 } 1581 1582 /** 1583 * igc_process_skb_fields - Populate skb header fields from Rx descriptor 1584 * @rx_ring: rx descriptor ring packet is being transacted on 1585 * @rx_desc: pointer to the EOP Rx descriptor 1586 * @skb: pointer to current skb being populated 1587 * 1588 * This function checks the ring, descriptor, and packet information in order 1589 * to populate the hash, checksum, VLAN, protocol, and other fields within the 1590 * skb. 1591 */ 1592 static void igc_process_skb_fields(struct igc_ring *rx_ring, 1593 union igc_adv_rx_desc *rx_desc, 1594 struct sk_buff *skb) 1595 { 1596 igc_rx_hash(rx_ring, rx_desc, skb); 1597 1598 igc_rx_checksum(rx_ring, rx_desc, skb); 1599 1600 igc_rx_vlan(rx_ring, rx_desc, skb); 1601 1602 skb_record_rx_queue(skb, rx_ring->queue_index); 1603 1604 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1605 } 1606 1607 static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) 1608 { 1609 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 1610 struct igc_adapter *adapter = netdev_priv(netdev); 1611 struct igc_hw *hw = &adapter->hw; 1612 u32 ctrl; 1613 1614 ctrl = rd32(IGC_CTRL); 1615 1616 if (enable) { 1617 /* enable VLAN tag insert/strip */ 1618 ctrl |= IGC_CTRL_VME; 1619 } else { 1620 /* disable VLAN tag insert/strip */ 1621 ctrl &= ~IGC_CTRL_VME; 1622 } 1623 wr32(IGC_CTRL, ctrl); 1624 } 1625 1626 static void igc_restore_vlan(struct igc_adapter *adapter) 1627 { 1628 igc_vlan_mode(adapter->netdev, adapter->netdev->features); 1629 } 1630 1631 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, 1632 const unsigned int size, 1633 int *rx_buffer_pgcnt) 1634 { 1635 struct igc_rx_buffer *rx_buffer; 1636 1637 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 1638 *rx_buffer_pgcnt = 1639 #if (PAGE_SIZE < 8192) 1640 page_count(rx_buffer->page); 1641 #else 1642 0; 1643 #endif 1644 prefetchw(rx_buffer->page); 1645 1646 /* we are reusing so sync this buffer for CPU use */ 1647 dma_sync_single_range_for_cpu(rx_ring->dev, 1648 rx_buffer->dma, 1649 rx_buffer->page_offset, 1650 size, 1651 DMA_FROM_DEVICE); 1652 1653 rx_buffer->pagecnt_bias--; 1654 1655 return rx_buffer; 1656 } 1657 1658 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, 1659 unsigned int truesize) 1660 { 1661 #if (PAGE_SIZE < 8192) 1662 buffer->page_offset ^= truesize; 1663 #else 1664 buffer->page_offset += truesize; 1665 #endif 1666 } 1667 1668 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, 1669 unsigned int size) 1670 { 1671 unsigned int truesize; 1672 1673 #if (PAGE_SIZE < 8192) 1674 truesize = igc_rx_pg_size(ring) / 2; 1675 #else 1676 truesize = ring_uses_build_skb(ring) ? 1677 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1678 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1679 SKB_DATA_ALIGN(size); 1680 #endif 1681 return truesize; 1682 } 1683 1684 /** 1685 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff 1686 * @rx_ring: rx descriptor ring to transact packets on 1687 * @rx_buffer: buffer containing page to add 1688 * @skb: sk_buff to place the data into 1689 * @size: size of buffer to be added 1690 * 1691 * This function will add the data contained in rx_buffer->page to the skb. 1692 */ 1693 static void igc_add_rx_frag(struct igc_ring *rx_ring, 1694 struct igc_rx_buffer *rx_buffer, 1695 struct sk_buff *skb, 1696 unsigned int size) 1697 { 1698 unsigned int truesize; 1699 1700 #if (PAGE_SIZE < 8192) 1701 truesize = igc_rx_pg_size(rx_ring) / 2; 1702 #else 1703 truesize = ring_uses_build_skb(rx_ring) ? 1704 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1705 SKB_DATA_ALIGN(size); 1706 #endif 1707 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1708 rx_buffer->page_offset, size, truesize); 1709 1710 igc_rx_buffer_flip(rx_buffer, truesize); 1711 } 1712 1713 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, 1714 struct igc_rx_buffer *rx_buffer, 1715 union igc_adv_rx_desc *rx_desc, 1716 unsigned int size) 1717 { 1718 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 1719 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1720 struct sk_buff *skb; 1721 1722 /* prefetch first cache line of first page */ 1723 net_prefetch(va); 1724 1725 /* build an skb around the page buffer */ 1726 skb = build_skb(va - IGC_SKB_PAD, truesize); 1727 if (unlikely(!skb)) 1728 return NULL; 1729 1730 /* update pointers within the skb to store the data */ 1731 skb_reserve(skb, IGC_SKB_PAD); 1732 __skb_put(skb, size); 1733 1734 igc_rx_buffer_flip(rx_buffer, truesize); 1735 return skb; 1736 } 1737 1738 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, 1739 struct igc_rx_buffer *rx_buffer, 1740 struct xdp_buff *xdp, 1741 ktime_t timestamp) 1742 { 1743 unsigned int size = xdp->data_end - xdp->data; 1744 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1745 void *va = xdp->data; 1746 unsigned int headlen; 1747 struct sk_buff *skb; 1748 1749 /* prefetch first cache line of first page */ 1750 net_prefetch(va); 1751 1752 /* allocate a skb to store the frags */ 1753 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); 1754 if (unlikely(!skb)) 1755 return NULL; 1756 1757 if (timestamp) 1758 skb_hwtstamps(skb)->hwtstamp = timestamp; 1759 1760 /* Determine available headroom for copy */ 1761 headlen = size; 1762 if (headlen > IGC_RX_HDR_LEN) 1763 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); 1764 1765 /* align pull length to size of long to optimize memcpy performance */ 1766 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 1767 1768 /* update all of the pointers */ 1769 size -= headlen; 1770 if (size) { 1771 skb_add_rx_frag(skb, 0, rx_buffer->page, 1772 (va + headlen) - page_address(rx_buffer->page), 1773 size, truesize); 1774 igc_rx_buffer_flip(rx_buffer, truesize); 1775 } else { 1776 rx_buffer->pagecnt_bias++; 1777 } 1778 1779 return skb; 1780 } 1781 1782 /** 1783 * igc_reuse_rx_page - page flip buffer and store it back on the ring 1784 * @rx_ring: rx descriptor ring to store buffers on 1785 * @old_buff: donor buffer to have page reused 1786 * 1787 * Synchronizes page for reuse by the adapter 1788 */ 1789 static void igc_reuse_rx_page(struct igc_ring *rx_ring, 1790 struct igc_rx_buffer *old_buff) 1791 { 1792 u16 nta = rx_ring->next_to_alloc; 1793 struct igc_rx_buffer *new_buff; 1794 1795 new_buff = &rx_ring->rx_buffer_info[nta]; 1796 1797 /* update, and store next to alloc */ 1798 nta++; 1799 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1800 1801 /* Transfer page from old buffer to new buffer. 1802 * Move each member individually to avoid possible store 1803 * forwarding stalls. 1804 */ 1805 new_buff->dma = old_buff->dma; 1806 new_buff->page = old_buff->page; 1807 new_buff->page_offset = old_buff->page_offset; 1808 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1809 } 1810 1811 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, 1812 int rx_buffer_pgcnt) 1813 { 1814 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1815 struct page *page = rx_buffer->page; 1816 1817 /* avoid re-using remote and pfmemalloc pages */ 1818 if (!dev_page_is_reusable(page)) 1819 return false; 1820 1821 #if (PAGE_SIZE < 8192) 1822 /* if we are only owner of page we can reuse it */ 1823 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) 1824 return false; 1825 #else 1826 #define IGC_LAST_OFFSET \ 1827 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) 1828 1829 if (rx_buffer->page_offset > IGC_LAST_OFFSET) 1830 return false; 1831 #endif 1832 1833 /* If we have drained the page fragment pool we need to update 1834 * the pagecnt_bias and page count so that we fully restock the 1835 * number of references the driver holds. 1836 */ 1837 if (unlikely(pagecnt_bias == 1)) { 1838 page_ref_add(page, USHRT_MAX - 1); 1839 rx_buffer->pagecnt_bias = USHRT_MAX; 1840 } 1841 1842 return true; 1843 } 1844 1845 /** 1846 * igc_is_non_eop - process handling of non-EOP buffers 1847 * @rx_ring: Rx ring being processed 1848 * @rx_desc: Rx descriptor for current buffer 1849 * 1850 * This function updates next to clean. If the buffer is an EOP buffer 1851 * this function exits returning false, otherwise it will place the 1852 * sk_buff in the next buffer to be chained and return true indicating 1853 * that this is in fact a non-EOP buffer. 1854 */ 1855 static bool igc_is_non_eop(struct igc_ring *rx_ring, 1856 union igc_adv_rx_desc *rx_desc) 1857 { 1858 u32 ntc = rx_ring->next_to_clean + 1; 1859 1860 /* fetch, update, and store next to clean */ 1861 ntc = (ntc < rx_ring->count) ? ntc : 0; 1862 rx_ring->next_to_clean = ntc; 1863 1864 prefetch(IGC_RX_DESC(rx_ring, ntc)); 1865 1866 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) 1867 return false; 1868 1869 return true; 1870 } 1871 1872 /** 1873 * igc_cleanup_headers - Correct corrupted or empty headers 1874 * @rx_ring: rx descriptor ring packet is being transacted on 1875 * @rx_desc: pointer to the EOP Rx descriptor 1876 * @skb: pointer to current skb being fixed 1877 * 1878 * Address the case where we are pulling data in on pages only 1879 * and as such no data is present in the skb header. 1880 * 1881 * In addition if skb is not at least 60 bytes we need to pad it so that 1882 * it is large enough to qualify as a valid Ethernet frame. 1883 * 1884 * Returns true if an error was encountered and skb was freed. 1885 */ 1886 static bool igc_cleanup_headers(struct igc_ring *rx_ring, 1887 union igc_adv_rx_desc *rx_desc, 1888 struct sk_buff *skb) 1889 { 1890 /* XDP packets use error pointer so abort at this point */ 1891 if (IS_ERR(skb)) 1892 return true; 1893 1894 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { 1895 struct net_device *netdev = rx_ring->netdev; 1896 1897 if (!(netdev->features & NETIF_F_RXALL)) { 1898 dev_kfree_skb_any(skb); 1899 return true; 1900 } 1901 } 1902 1903 /* if eth_skb_pad returns an error the skb was freed */ 1904 if (eth_skb_pad(skb)) 1905 return true; 1906 1907 return false; 1908 } 1909 1910 static void igc_put_rx_buffer(struct igc_ring *rx_ring, 1911 struct igc_rx_buffer *rx_buffer, 1912 int rx_buffer_pgcnt) 1913 { 1914 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { 1915 /* hand second half of page back to the ring */ 1916 igc_reuse_rx_page(rx_ring, rx_buffer); 1917 } else { 1918 /* We are not reusing the buffer so unmap it and free 1919 * any references we are holding to it 1920 */ 1921 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 1922 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1923 IGC_RX_DMA_ATTR); 1924 __page_frag_cache_drain(rx_buffer->page, 1925 rx_buffer->pagecnt_bias); 1926 } 1927 1928 /* clear contents of rx_buffer */ 1929 rx_buffer->page = NULL; 1930 } 1931 1932 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) 1933 { 1934 struct igc_adapter *adapter = rx_ring->q_vector->adapter; 1935 1936 if (ring_uses_build_skb(rx_ring)) 1937 return IGC_SKB_PAD; 1938 if (igc_xdp_is_enabled(adapter)) 1939 return XDP_PACKET_HEADROOM; 1940 1941 return 0; 1942 } 1943 1944 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, 1945 struct igc_rx_buffer *bi) 1946 { 1947 struct page *page = bi->page; 1948 dma_addr_t dma; 1949 1950 /* since we are recycling buffers we should seldom need to alloc */ 1951 if (likely(page)) 1952 return true; 1953 1954 /* alloc new page for storage */ 1955 page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); 1956 if (unlikely(!page)) { 1957 rx_ring->rx_stats.alloc_failed++; 1958 return false; 1959 } 1960 1961 /* map page for use */ 1962 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 1963 igc_rx_pg_size(rx_ring), 1964 DMA_FROM_DEVICE, 1965 IGC_RX_DMA_ATTR); 1966 1967 /* if mapping failed free memory back to system since 1968 * there isn't much point in holding memory we can't use 1969 */ 1970 if (dma_mapping_error(rx_ring->dev, dma)) { 1971 __free_page(page); 1972 1973 rx_ring->rx_stats.alloc_failed++; 1974 return false; 1975 } 1976 1977 bi->dma = dma; 1978 bi->page = page; 1979 bi->page_offset = igc_rx_offset(rx_ring); 1980 page_ref_add(page, USHRT_MAX - 1); 1981 bi->pagecnt_bias = USHRT_MAX; 1982 1983 return true; 1984 } 1985 1986 /** 1987 * igc_alloc_rx_buffers - Replace used receive buffers; packet split 1988 * @rx_ring: rx descriptor ring 1989 * @cleaned_count: number of buffers to clean 1990 */ 1991 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) 1992 { 1993 union igc_adv_rx_desc *rx_desc; 1994 u16 i = rx_ring->next_to_use; 1995 struct igc_rx_buffer *bi; 1996 u16 bufsz; 1997 1998 /* nothing to do */ 1999 if (!cleaned_count) 2000 return; 2001 2002 rx_desc = IGC_RX_DESC(rx_ring, i); 2003 bi = &rx_ring->rx_buffer_info[i]; 2004 i -= rx_ring->count; 2005 2006 bufsz = igc_rx_bufsz(rx_ring); 2007 2008 do { 2009 if (!igc_alloc_mapped_page(rx_ring, bi)) 2010 break; 2011 2012 /* sync the buffer for use by the device */ 2013 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 2014 bi->page_offset, bufsz, 2015 DMA_FROM_DEVICE); 2016 2017 /* Refresh the desc even if buffer_addrs didn't change 2018 * because each write-back erases this info. 2019 */ 2020 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 2021 2022 rx_desc++; 2023 bi++; 2024 i++; 2025 if (unlikely(!i)) { 2026 rx_desc = IGC_RX_DESC(rx_ring, 0); 2027 bi = rx_ring->rx_buffer_info; 2028 i -= rx_ring->count; 2029 } 2030 2031 /* clear the length for the next_to_use descriptor */ 2032 rx_desc->wb.upper.length = 0; 2033 2034 cleaned_count--; 2035 } while (cleaned_count); 2036 2037 i += rx_ring->count; 2038 2039 if (rx_ring->next_to_use != i) { 2040 /* record the next descriptor to use */ 2041 rx_ring->next_to_use = i; 2042 2043 /* update next to alloc since we have filled the ring */ 2044 rx_ring->next_to_alloc = i; 2045 2046 /* Force memory writes to complete before letting h/w 2047 * know there are new descriptors to fetch. (Only 2048 * applicable for weak-ordered memory model archs, 2049 * such as IA-64). 2050 */ 2051 wmb(); 2052 writel(i, rx_ring->tail); 2053 } 2054 } 2055 2056 static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) 2057 { 2058 union igc_adv_rx_desc *desc; 2059 u16 i = ring->next_to_use; 2060 struct igc_rx_buffer *bi; 2061 dma_addr_t dma; 2062 bool ok = true; 2063 2064 if (!count) 2065 return ok; 2066 2067 desc = IGC_RX_DESC(ring, i); 2068 bi = &ring->rx_buffer_info[i]; 2069 i -= ring->count; 2070 2071 do { 2072 bi->xdp = xsk_buff_alloc(ring->xsk_pool); 2073 if (!bi->xdp) { 2074 ok = false; 2075 break; 2076 } 2077 2078 dma = xsk_buff_xdp_get_dma(bi->xdp); 2079 desc->read.pkt_addr = cpu_to_le64(dma); 2080 2081 desc++; 2082 bi++; 2083 i++; 2084 if (unlikely(!i)) { 2085 desc = IGC_RX_DESC(ring, 0); 2086 bi = ring->rx_buffer_info; 2087 i -= ring->count; 2088 } 2089 2090 /* Clear the length for the next_to_use descriptor. */ 2091 desc->wb.upper.length = 0; 2092 2093 count--; 2094 } while (count); 2095 2096 i += ring->count; 2097 2098 if (ring->next_to_use != i) { 2099 ring->next_to_use = i; 2100 2101 /* Force memory writes to complete before letting h/w 2102 * know there are new descriptors to fetch. (Only 2103 * applicable for weak-ordered memory model archs, 2104 * such as IA-64). 2105 */ 2106 wmb(); 2107 writel(i, ring->tail); 2108 } 2109 2110 return ok; 2111 } 2112 2113 static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer, 2114 struct xdp_frame *xdpf, 2115 struct igc_ring *ring) 2116 { 2117 dma_addr_t dma; 2118 2119 dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); 2120 if (dma_mapping_error(ring->dev, dma)) { 2121 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); 2122 return -ENOMEM; 2123 } 2124 2125 buffer->type = IGC_TX_BUFFER_TYPE_XDP; 2126 buffer->xdpf = xdpf; 2127 buffer->protocol = 0; 2128 buffer->bytecount = xdpf->len; 2129 buffer->gso_segs = 1; 2130 buffer->time_stamp = jiffies; 2131 dma_unmap_len_set(buffer, len, xdpf->len); 2132 dma_unmap_addr_set(buffer, dma, dma); 2133 return 0; 2134 } 2135 2136 /* This function requires __netif_tx_lock is held by the caller. */ 2137 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, 2138 struct xdp_frame *xdpf) 2139 { 2140 struct igc_tx_buffer *buffer; 2141 union igc_adv_tx_desc *desc; 2142 u32 cmd_type, olinfo_status; 2143 int err; 2144 2145 if (!igc_desc_unused(ring)) 2146 return -EBUSY; 2147 2148 buffer = &ring->tx_buffer_info[ring->next_to_use]; 2149 err = igc_xdp_init_tx_buffer(buffer, xdpf, ring); 2150 if (err) 2151 return err; 2152 2153 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 2154 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 2155 buffer->bytecount; 2156 olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; 2157 2158 desc = IGC_TX_DESC(ring, ring->next_to_use); 2159 desc->read.cmd_type_len = cpu_to_le32(cmd_type); 2160 desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2161 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma)); 2162 2163 netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount); 2164 2165 buffer->next_to_watch = desc; 2166 2167 ring->next_to_use++; 2168 if (ring->next_to_use == ring->count) 2169 ring->next_to_use = 0; 2170 2171 return 0; 2172 } 2173 2174 static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, 2175 int cpu) 2176 { 2177 int index = cpu; 2178 2179 if (unlikely(index < 0)) 2180 index = 0; 2181 2182 while (index >= adapter->num_tx_queues) 2183 index -= adapter->num_tx_queues; 2184 2185 return adapter->tx_ring[index]; 2186 } 2187 2188 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) 2189 { 2190 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 2191 int cpu = smp_processor_id(); 2192 struct netdev_queue *nq; 2193 struct igc_ring *ring; 2194 int res; 2195 2196 if (unlikely(!xdpf)) 2197 return -EFAULT; 2198 2199 ring = igc_xdp_get_tx_ring(adapter, cpu); 2200 nq = txring_txq(ring); 2201 2202 __netif_tx_lock(nq, cpu); 2203 res = igc_xdp_init_tx_descriptor(ring, xdpf); 2204 __netif_tx_unlock(nq); 2205 return res; 2206 } 2207 2208 /* This function assumes rcu_read_lock() is held by the caller. */ 2209 static int __igc_xdp_run_prog(struct igc_adapter *adapter, 2210 struct bpf_prog *prog, 2211 struct xdp_buff *xdp) 2212 { 2213 u32 act = bpf_prog_run_xdp(prog, xdp); 2214 2215 switch (act) { 2216 case XDP_PASS: 2217 return IGC_XDP_PASS; 2218 case XDP_TX: 2219 if (igc_xdp_xmit_back(adapter, xdp) < 0) 2220 goto out_failure; 2221 return IGC_XDP_TX; 2222 case XDP_REDIRECT: 2223 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) 2224 goto out_failure; 2225 return IGC_XDP_REDIRECT; 2226 break; 2227 default: 2228 bpf_warn_invalid_xdp_action(act); 2229 fallthrough; 2230 case XDP_ABORTED: 2231 out_failure: 2232 trace_xdp_exception(adapter->netdev, prog, act); 2233 fallthrough; 2234 case XDP_DROP: 2235 return IGC_XDP_CONSUMED; 2236 } 2237 } 2238 2239 static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, 2240 struct xdp_buff *xdp) 2241 { 2242 struct bpf_prog *prog; 2243 int res; 2244 2245 prog = READ_ONCE(adapter->xdp_prog); 2246 if (!prog) { 2247 res = IGC_XDP_PASS; 2248 goto out; 2249 } 2250 2251 res = __igc_xdp_run_prog(adapter, prog, xdp); 2252 2253 out: 2254 return ERR_PTR(-res); 2255 } 2256 2257 /* This function assumes __netif_tx_lock is held by the caller. */ 2258 static void igc_flush_tx_descriptors(struct igc_ring *ring) 2259 { 2260 /* Once tail pointer is updated, hardware can fetch the descriptors 2261 * any time so we issue a write membar here to ensure all memory 2262 * writes are complete before the tail pointer is updated. 2263 */ 2264 wmb(); 2265 writel(ring->next_to_use, ring->tail); 2266 } 2267 2268 static void igc_finalize_xdp(struct igc_adapter *adapter, int status) 2269 { 2270 int cpu = smp_processor_id(); 2271 struct netdev_queue *nq; 2272 struct igc_ring *ring; 2273 2274 if (status & IGC_XDP_TX) { 2275 ring = igc_xdp_get_tx_ring(adapter, cpu); 2276 nq = txring_txq(ring); 2277 2278 __netif_tx_lock(nq, cpu); 2279 igc_flush_tx_descriptors(ring); 2280 __netif_tx_unlock(nq); 2281 } 2282 2283 if (status & IGC_XDP_REDIRECT) 2284 xdp_do_flush(); 2285 } 2286 2287 static void igc_update_rx_stats(struct igc_q_vector *q_vector, 2288 unsigned int packets, unsigned int bytes) 2289 { 2290 struct igc_ring *ring = q_vector->rx.ring; 2291 2292 u64_stats_update_begin(&ring->rx_syncp); 2293 ring->rx_stats.packets += packets; 2294 ring->rx_stats.bytes += bytes; 2295 u64_stats_update_end(&ring->rx_syncp); 2296 2297 q_vector->rx.total_packets += packets; 2298 q_vector->rx.total_bytes += bytes; 2299 } 2300 2301 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) 2302 { 2303 unsigned int total_bytes = 0, total_packets = 0; 2304 struct igc_adapter *adapter = q_vector->adapter; 2305 struct igc_ring *rx_ring = q_vector->rx.ring; 2306 struct sk_buff *skb = rx_ring->skb; 2307 u16 cleaned_count = igc_desc_unused(rx_ring); 2308 int xdp_status = 0, rx_buffer_pgcnt; 2309 2310 while (likely(total_packets < budget)) { 2311 union igc_adv_rx_desc *rx_desc; 2312 struct igc_rx_buffer *rx_buffer; 2313 unsigned int size, truesize; 2314 ktime_t timestamp = 0; 2315 struct xdp_buff xdp; 2316 int pkt_offset = 0; 2317 void *pktbuf; 2318 2319 /* return some buffers to hardware, one at a time is too slow */ 2320 if (cleaned_count >= IGC_RX_BUFFER_WRITE) { 2321 igc_alloc_rx_buffers(rx_ring, cleaned_count); 2322 cleaned_count = 0; 2323 } 2324 2325 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); 2326 size = le16_to_cpu(rx_desc->wb.upper.length); 2327 if (!size) 2328 break; 2329 2330 /* This memory barrier is needed to keep us from reading 2331 * any other fields out of the rx_desc until we know the 2332 * descriptor has been written back 2333 */ 2334 dma_rmb(); 2335 2336 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); 2337 truesize = igc_get_rx_frame_truesize(rx_ring, size); 2338 2339 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; 2340 2341 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { 2342 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, 2343 pktbuf); 2344 pkt_offset = IGC_TS_HDR_LEN; 2345 size -= IGC_TS_HDR_LEN; 2346 } 2347 2348 if (!skb) { 2349 xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); 2350 xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), 2351 igc_rx_offset(rx_ring) + pkt_offset, size, false); 2352 2353 skb = igc_xdp_run_prog(adapter, &xdp); 2354 } 2355 2356 if (IS_ERR(skb)) { 2357 unsigned int xdp_res = -PTR_ERR(skb); 2358 2359 switch (xdp_res) { 2360 case IGC_XDP_CONSUMED: 2361 rx_buffer->pagecnt_bias++; 2362 break; 2363 case IGC_XDP_TX: 2364 case IGC_XDP_REDIRECT: 2365 igc_rx_buffer_flip(rx_buffer, truesize); 2366 xdp_status |= xdp_res; 2367 break; 2368 } 2369 2370 total_packets++; 2371 total_bytes += size; 2372 } else if (skb) 2373 igc_add_rx_frag(rx_ring, rx_buffer, skb, size); 2374 else if (ring_uses_build_skb(rx_ring)) 2375 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); 2376 else 2377 skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, 2378 timestamp); 2379 2380 /* exit if we failed to retrieve a buffer */ 2381 if (!skb) { 2382 rx_ring->rx_stats.alloc_failed++; 2383 rx_buffer->pagecnt_bias++; 2384 break; 2385 } 2386 2387 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); 2388 cleaned_count++; 2389 2390 /* fetch next buffer in frame if non-eop */ 2391 if (igc_is_non_eop(rx_ring, rx_desc)) 2392 continue; 2393 2394 /* verify the packet layout is correct */ 2395 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { 2396 skb = NULL; 2397 continue; 2398 } 2399 2400 /* probably a little skewed due to removing CRC */ 2401 total_bytes += skb->len; 2402 2403 /* populate checksum, VLAN, and protocol */ 2404 igc_process_skb_fields(rx_ring, rx_desc, skb); 2405 2406 napi_gro_receive(&q_vector->napi, skb); 2407 2408 /* reset skb pointer */ 2409 skb = NULL; 2410 2411 /* update budget accounting */ 2412 total_packets++; 2413 } 2414 2415 if (xdp_status) 2416 igc_finalize_xdp(adapter, xdp_status); 2417 2418 /* place incomplete frames back on ring for completion */ 2419 rx_ring->skb = skb; 2420 2421 igc_update_rx_stats(q_vector, total_packets, total_bytes); 2422 2423 if (cleaned_count) 2424 igc_alloc_rx_buffers(rx_ring, cleaned_count); 2425 2426 return total_packets; 2427 } 2428 2429 static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, 2430 struct xdp_buff *xdp) 2431 { 2432 unsigned int metasize = xdp->data - xdp->data_meta; 2433 unsigned int datasize = xdp->data_end - xdp->data; 2434 unsigned int totalsize = metasize + datasize; 2435 struct sk_buff *skb; 2436 2437 skb = __napi_alloc_skb(&ring->q_vector->napi, 2438 xdp->data_end - xdp->data_hard_start, 2439 GFP_ATOMIC | __GFP_NOWARN); 2440 if (unlikely(!skb)) 2441 return NULL; 2442 2443 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); 2444 memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); 2445 if (metasize) 2446 skb_metadata_set(skb, metasize); 2447 2448 return skb; 2449 } 2450 2451 static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, 2452 union igc_adv_rx_desc *desc, 2453 struct xdp_buff *xdp, 2454 ktime_t timestamp) 2455 { 2456 struct igc_ring *ring = q_vector->rx.ring; 2457 struct sk_buff *skb; 2458 2459 skb = igc_construct_skb_zc(ring, xdp); 2460 if (!skb) { 2461 ring->rx_stats.alloc_failed++; 2462 return; 2463 } 2464 2465 if (timestamp) 2466 skb_hwtstamps(skb)->hwtstamp = timestamp; 2467 2468 if (igc_cleanup_headers(ring, desc, skb)) 2469 return; 2470 2471 igc_process_skb_fields(ring, desc, skb); 2472 napi_gro_receive(&q_vector->napi, skb); 2473 } 2474 2475 static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) 2476 { 2477 struct igc_adapter *adapter = q_vector->adapter; 2478 struct igc_ring *ring = q_vector->rx.ring; 2479 u16 cleaned_count = igc_desc_unused(ring); 2480 int total_bytes = 0, total_packets = 0; 2481 u16 ntc = ring->next_to_clean; 2482 struct bpf_prog *prog; 2483 bool failure = false; 2484 int xdp_status = 0; 2485 2486 rcu_read_lock(); 2487 2488 prog = READ_ONCE(adapter->xdp_prog); 2489 2490 while (likely(total_packets < budget)) { 2491 union igc_adv_rx_desc *desc; 2492 struct igc_rx_buffer *bi; 2493 ktime_t timestamp = 0; 2494 unsigned int size; 2495 int res; 2496 2497 desc = IGC_RX_DESC(ring, ntc); 2498 size = le16_to_cpu(desc->wb.upper.length); 2499 if (!size) 2500 break; 2501 2502 /* This memory barrier is needed to keep us from reading 2503 * any other fields out of the rx_desc until we know the 2504 * descriptor has been written back 2505 */ 2506 dma_rmb(); 2507 2508 bi = &ring->rx_buffer_info[ntc]; 2509 2510 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { 2511 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, 2512 bi->xdp->data); 2513 2514 bi->xdp->data += IGC_TS_HDR_LEN; 2515 2516 /* HW timestamp has been copied into local variable. Metadata 2517 * length when XDP program is called should be 0. 2518 */ 2519 bi->xdp->data_meta += IGC_TS_HDR_LEN; 2520 size -= IGC_TS_HDR_LEN; 2521 } 2522 2523 bi->xdp->data_end = bi->xdp->data + size; 2524 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); 2525 2526 res = __igc_xdp_run_prog(adapter, prog, bi->xdp); 2527 switch (res) { 2528 case IGC_XDP_PASS: 2529 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); 2530 fallthrough; 2531 case IGC_XDP_CONSUMED: 2532 xsk_buff_free(bi->xdp); 2533 break; 2534 case IGC_XDP_TX: 2535 case IGC_XDP_REDIRECT: 2536 xdp_status |= res; 2537 break; 2538 } 2539 2540 bi->xdp = NULL; 2541 total_bytes += size; 2542 total_packets++; 2543 cleaned_count++; 2544 ntc++; 2545 if (ntc == ring->count) 2546 ntc = 0; 2547 } 2548 2549 ring->next_to_clean = ntc; 2550 rcu_read_unlock(); 2551 2552 if (cleaned_count >= IGC_RX_BUFFER_WRITE) 2553 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); 2554 2555 if (xdp_status) 2556 igc_finalize_xdp(adapter, xdp_status); 2557 2558 igc_update_rx_stats(q_vector, total_packets, total_bytes); 2559 2560 if (xsk_uses_need_wakeup(ring->xsk_pool)) { 2561 if (failure || ring->next_to_clean == ring->next_to_use) 2562 xsk_set_rx_need_wakeup(ring->xsk_pool); 2563 else 2564 xsk_clear_rx_need_wakeup(ring->xsk_pool); 2565 return total_packets; 2566 } 2567 2568 return failure ? budget : total_packets; 2569 } 2570 2571 static void igc_update_tx_stats(struct igc_q_vector *q_vector, 2572 unsigned int packets, unsigned int bytes) 2573 { 2574 struct igc_ring *ring = q_vector->tx.ring; 2575 2576 u64_stats_update_begin(&ring->tx_syncp); 2577 ring->tx_stats.bytes += bytes; 2578 ring->tx_stats.packets += packets; 2579 u64_stats_update_end(&ring->tx_syncp); 2580 2581 q_vector->tx.total_bytes += bytes; 2582 q_vector->tx.total_packets += packets; 2583 } 2584 2585 static void igc_xdp_xmit_zc(struct igc_ring *ring) 2586 { 2587 struct xsk_buff_pool *pool = ring->xsk_pool; 2588 struct netdev_queue *nq = txring_txq(ring); 2589 union igc_adv_tx_desc *tx_desc = NULL; 2590 int cpu = smp_processor_id(); 2591 u16 ntu = ring->next_to_use; 2592 struct xdp_desc xdp_desc; 2593 u16 budget; 2594 2595 if (!netif_carrier_ok(ring->netdev)) 2596 return; 2597 2598 __netif_tx_lock(nq, cpu); 2599 2600 budget = igc_desc_unused(ring); 2601 2602 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { 2603 u32 cmd_type, olinfo_status; 2604 struct igc_tx_buffer *bi; 2605 dma_addr_t dma; 2606 2607 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 2608 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 2609 xdp_desc.len; 2610 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; 2611 2612 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2613 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); 2614 2615 tx_desc = IGC_TX_DESC(ring, ntu); 2616 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 2617 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2618 tx_desc->read.buffer_addr = cpu_to_le64(dma); 2619 2620 bi = &ring->tx_buffer_info[ntu]; 2621 bi->type = IGC_TX_BUFFER_TYPE_XSK; 2622 bi->protocol = 0; 2623 bi->bytecount = xdp_desc.len; 2624 bi->gso_segs = 1; 2625 bi->time_stamp = jiffies; 2626 bi->next_to_watch = tx_desc; 2627 2628 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); 2629 2630 ntu++; 2631 if (ntu == ring->count) 2632 ntu = 0; 2633 } 2634 2635 ring->next_to_use = ntu; 2636 if (tx_desc) { 2637 igc_flush_tx_descriptors(ring); 2638 xsk_tx_release(pool); 2639 } 2640 2641 __netif_tx_unlock(nq); 2642 } 2643 2644 /** 2645 * igc_clean_tx_irq - Reclaim resources after transmit completes 2646 * @q_vector: pointer to q_vector containing needed info 2647 * @napi_budget: Used to determine if we are in netpoll 2648 * 2649 * returns true if ring is completely cleaned 2650 */ 2651 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) 2652 { 2653 struct igc_adapter *adapter = q_vector->adapter; 2654 unsigned int total_bytes = 0, total_packets = 0; 2655 unsigned int budget = q_vector->tx.work_limit; 2656 struct igc_ring *tx_ring = q_vector->tx.ring; 2657 unsigned int i = tx_ring->next_to_clean; 2658 struct igc_tx_buffer *tx_buffer; 2659 union igc_adv_tx_desc *tx_desc; 2660 u32 xsk_frames = 0; 2661 2662 if (test_bit(__IGC_DOWN, &adapter->state)) 2663 return true; 2664 2665 tx_buffer = &tx_ring->tx_buffer_info[i]; 2666 tx_desc = IGC_TX_DESC(tx_ring, i); 2667 i -= tx_ring->count; 2668 2669 do { 2670 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 2671 2672 /* if next_to_watch is not set then there is no work pending */ 2673 if (!eop_desc) 2674 break; 2675 2676 /* prevent any other reads prior to eop_desc */ 2677 smp_rmb(); 2678 2679 /* if DD is not set pending work has not been completed */ 2680 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) 2681 break; 2682 2683 /* clear next_to_watch to prevent false hangs */ 2684 tx_buffer->next_to_watch = NULL; 2685 2686 /* update the statistics for this packet */ 2687 total_bytes += tx_buffer->bytecount; 2688 total_packets += tx_buffer->gso_segs; 2689 2690 switch (tx_buffer->type) { 2691 case IGC_TX_BUFFER_TYPE_XSK: 2692 xsk_frames++; 2693 break; 2694 case IGC_TX_BUFFER_TYPE_XDP: 2695 xdp_return_frame(tx_buffer->xdpf); 2696 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2697 break; 2698 case IGC_TX_BUFFER_TYPE_SKB: 2699 napi_consume_skb(tx_buffer->skb, napi_budget); 2700 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2701 break; 2702 default: 2703 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 2704 break; 2705 } 2706 2707 /* clear last DMA location and unmap remaining buffers */ 2708 while (tx_desc != eop_desc) { 2709 tx_buffer++; 2710 tx_desc++; 2711 i++; 2712 if (unlikely(!i)) { 2713 i -= tx_ring->count; 2714 tx_buffer = tx_ring->tx_buffer_info; 2715 tx_desc = IGC_TX_DESC(tx_ring, 0); 2716 } 2717 2718 /* unmap any remaining paged data */ 2719 if (dma_unmap_len(tx_buffer, len)) 2720 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2721 } 2722 2723 /* move us one more past the eop_desc for start of next pkt */ 2724 tx_buffer++; 2725 tx_desc++; 2726 i++; 2727 if (unlikely(!i)) { 2728 i -= tx_ring->count; 2729 tx_buffer = tx_ring->tx_buffer_info; 2730 tx_desc = IGC_TX_DESC(tx_ring, 0); 2731 } 2732 2733 /* issue prefetch for next Tx descriptor */ 2734 prefetch(tx_desc); 2735 2736 /* update budget accounting */ 2737 budget--; 2738 } while (likely(budget)); 2739 2740 netdev_tx_completed_queue(txring_txq(tx_ring), 2741 total_packets, total_bytes); 2742 2743 i += tx_ring->count; 2744 tx_ring->next_to_clean = i; 2745 2746 igc_update_tx_stats(q_vector, total_packets, total_bytes); 2747 2748 if (tx_ring->xsk_pool) { 2749 if (xsk_frames) 2750 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 2751 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) 2752 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); 2753 igc_xdp_xmit_zc(tx_ring); 2754 } 2755 2756 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 2757 struct igc_hw *hw = &adapter->hw; 2758 2759 /* Detect a transmit hang in hardware, this serializes the 2760 * check with the clearing of time_stamp and movement of i 2761 */ 2762 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 2763 if (tx_buffer->next_to_watch && 2764 time_after(jiffies, tx_buffer->time_stamp + 2765 (adapter->tx_timeout_factor * HZ)) && 2766 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { 2767 /* detected Tx unit hang */ 2768 netdev_err(tx_ring->netdev, 2769 "Detected Tx Unit Hang\n" 2770 " Tx Queue <%d>\n" 2771 " TDH <%x>\n" 2772 " TDT <%x>\n" 2773 " next_to_use <%x>\n" 2774 " next_to_clean <%x>\n" 2775 "buffer_info[next_to_clean]\n" 2776 " time_stamp <%lx>\n" 2777 " next_to_watch <%p>\n" 2778 " jiffies <%lx>\n" 2779 " desc.status <%x>\n", 2780 tx_ring->queue_index, 2781 rd32(IGC_TDH(tx_ring->reg_idx)), 2782 readl(tx_ring->tail), 2783 tx_ring->next_to_use, 2784 tx_ring->next_to_clean, 2785 tx_buffer->time_stamp, 2786 tx_buffer->next_to_watch, 2787 jiffies, 2788 tx_buffer->next_to_watch->wb.status); 2789 netif_stop_subqueue(tx_ring->netdev, 2790 tx_ring->queue_index); 2791 2792 /* we are about to reset, no point in enabling stuff */ 2793 return true; 2794 } 2795 } 2796 2797 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 2798 if (unlikely(total_packets && 2799 netif_carrier_ok(tx_ring->netdev) && 2800 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 2801 /* Make sure that anybody stopping the queue after this 2802 * sees the new next_to_clean. 2803 */ 2804 smp_mb(); 2805 if (__netif_subqueue_stopped(tx_ring->netdev, 2806 tx_ring->queue_index) && 2807 !(test_bit(__IGC_DOWN, &adapter->state))) { 2808 netif_wake_subqueue(tx_ring->netdev, 2809 tx_ring->queue_index); 2810 2811 u64_stats_update_begin(&tx_ring->tx_syncp); 2812 tx_ring->tx_stats.restart_queue++; 2813 u64_stats_update_end(&tx_ring->tx_syncp); 2814 } 2815 } 2816 2817 return !!budget; 2818 } 2819 2820 static int igc_find_mac_filter(struct igc_adapter *adapter, 2821 enum igc_mac_filter_type type, const u8 *addr) 2822 { 2823 struct igc_hw *hw = &adapter->hw; 2824 int max_entries = hw->mac.rar_entry_count; 2825 u32 ral, rah; 2826 int i; 2827 2828 for (i = 0; i < max_entries; i++) { 2829 ral = rd32(IGC_RAL(i)); 2830 rah = rd32(IGC_RAH(i)); 2831 2832 if (!(rah & IGC_RAH_AV)) 2833 continue; 2834 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) 2835 continue; 2836 if ((rah & IGC_RAH_RAH_MASK) != 2837 le16_to_cpup((__le16 *)(addr + 4))) 2838 continue; 2839 if (ral != le32_to_cpup((__le32 *)(addr))) 2840 continue; 2841 2842 return i; 2843 } 2844 2845 return -1; 2846 } 2847 2848 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) 2849 { 2850 struct igc_hw *hw = &adapter->hw; 2851 int max_entries = hw->mac.rar_entry_count; 2852 u32 rah; 2853 int i; 2854 2855 for (i = 0; i < max_entries; i++) { 2856 rah = rd32(IGC_RAH(i)); 2857 2858 if (!(rah & IGC_RAH_AV)) 2859 return i; 2860 } 2861 2862 return -1; 2863 } 2864 2865 /** 2866 * igc_add_mac_filter() - Add MAC address filter 2867 * @adapter: Pointer to adapter where the filter should be added 2868 * @type: MAC address filter type (source or destination) 2869 * @addr: MAC address 2870 * @queue: If non-negative, queue assignment feature is enabled and frames 2871 * matching the filter are enqueued onto 'queue'. Otherwise, queue 2872 * assignment is disabled. 2873 * 2874 * Return: 0 in case of success, negative errno code otherwise. 2875 */ 2876 static int igc_add_mac_filter(struct igc_adapter *adapter, 2877 enum igc_mac_filter_type type, const u8 *addr, 2878 int queue) 2879 { 2880 struct net_device *dev = adapter->netdev; 2881 int index; 2882 2883 index = igc_find_mac_filter(adapter, type, addr); 2884 if (index >= 0) 2885 goto update_filter; 2886 2887 index = igc_get_avail_mac_filter_slot(adapter); 2888 if (index < 0) 2889 return -ENOSPC; 2890 2891 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", 2892 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 2893 addr, queue); 2894 2895 update_filter: 2896 igc_set_mac_filter_hw(adapter, index, type, addr, queue); 2897 return 0; 2898 } 2899 2900 /** 2901 * igc_del_mac_filter() - Delete MAC address filter 2902 * @adapter: Pointer to adapter where the filter should be deleted from 2903 * @type: MAC address filter type (source or destination) 2904 * @addr: MAC address 2905 */ 2906 static void igc_del_mac_filter(struct igc_adapter *adapter, 2907 enum igc_mac_filter_type type, const u8 *addr) 2908 { 2909 struct net_device *dev = adapter->netdev; 2910 int index; 2911 2912 index = igc_find_mac_filter(adapter, type, addr); 2913 if (index < 0) 2914 return; 2915 2916 if (index == 0) { 2917 /* If this is the default filter, we don't actually delete it. 2918 * We just reset to its default value i.e. disable queue 2919 * assignment. 2920 */ 2921 netdev_dbg(dev, "Disable default MAC filter queue assignment"); 2922 2923 igc_set_mac_filter_hw(adapter, 0, type, addr, -1); 2924 } else { 2925 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", 2926 index, 2927 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 2928 addr); 2929 2930 igc_clear_mac_filter_hw(adapter, index); 2931 } 2932 } 2933 2934 /** 2935 * igc_add_vlan_prio_filter() - Add VLAN priority filter 2936 * @adapter: Pointer to adapter where the filter should be added 2937 * @prio: VLAN priority value 2938 * @queue: Queue number which matching frames are assigned to 2939 * 2940 * Return: 0 in case of success, negative errno code otherwise. 2941 */ 2942 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, 2943 int queue) 2944 { 2945 struct net_device *dev = adapter->netdev; 2946 struct igc_hw *hw = &adapter->hw; 2947 u32 vlanpqf; 2948 2949 vlanpqf = rd32(IGC_VLANPQF); 2950 2951 if (vlanpqf & IGC_VLANPQF_VALID(prio)) { 2952 netdev_dbg(dev, "VLAN priority filter already in use\n"); 2953 return -EEXIST; 2954 } 2955 2956 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); 2957 vlanpqf |= IGC_VLANPQF_VALID(prio); 2958 2959 wr32(IGC_VLANPQF, vlanpqf); 2960 2961 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", 2962 prio, queue); 2963 return 0; 2964 } 2965 2966 /** 2967 * igc_del_vlan_prio_filter() - Delete VLAN priority filter 2968 * @adapter: Pointer to adapter where the filter should be deleted from 2969 * @prio: VLAN priority value 2970 */ 2971 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) 2972 { 2973 struct igc_hw *hw = &adapter->hw; 2974 u32 vlanpqf; 2975 2976 vlanpqf = rd32(IGC_VLANPQF); 2977 2978 vlanpqf &= ~IGC_VLANPQF_VALID(prio); 2979 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); 2980 2981 wr32(IGC_VLANPQF, vlanpqf); 2982 2983 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", 2984 prio); 2985 } 2986 2987 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) 2988 { 2989 struct igc_hw *hw = &adapter->hw; 2990 int i; 2991 2992 for (i = 0; i < MAX_ETYPE_FILTER; i++) { 2993 u32 etqf = rd32(IGC_ETQF(i)); 2994 2995 if (!(etqf & IGC_ETQF_FILTER_ENABLE)) 2996 return i; 2997 } 2998 2999 return -1; 3000 } 3001 3002 /** 3003 * igc_add_etype_filter() - Add ethertype filter 3004 * @adapter: Pointer to adapter where the filter should be added 3005 * @etype: Ethertype value 3006 * @queue: If non-negative, queue assignment feature is enabled and frames 3007 * matching the filter are enqueued onto 'queue'. Otherwise, queue 3008 * assignment is disabled. 3009 * 3010 * Return: 0 in case of success, negative errno code otherwise. 3011 */ 3012 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, 3013 int queue) 3014 { 3015 struct igc_hw *hw = &adapter->hw; 3016 int index; 3017 u32 etqf; 3018 3019 index = igc_get_avail_etype_filter_slot(adapter); 3020 if (index < 0) 3021 return -ENOSPC; 3022 3023 etqf = rd32(IGC_ETQF(index)); 3024 3025 etqf &= ~IGC_ETQF_ETYPE_MASK; 3026 etqf |= etype; 3027 3028 if (queue >= 0) { 3029 etqf &= ~IGC_ETQF_QUEUE_MASK; 3030 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); 3031 etqf |= IGC_ETQF_QUEUE_ENABLE; 3032 } 3033 3034 etqf |= IGC_ETQF_FILTER_ENABLE; 3035 3036 wr32(IGC_ETQF(index), etqf); 3037 3038 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", 3039 etype, queue); 3040 return 0; 3041 } 3042 3043 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) 3044 { 3045 struct igc_hw *hw = &adapter->hw; 3046 int i; 3047 3048 for (i = 0; i < MAX_ETYPE_FILTER; i++) { 3049 u32 etqf = rd32(IGC_ETQF(i)); 3050 3051 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) 3052 return i; 3053 } 3054 3055 return -1; 3056 } 3057 3058 /** 3059 * igc_del_etype_filter() - Delete ethertype filter 3060 * @adapter: Pointer to adapter where the filter should be deleted from 3061 * @etype: Ethertype value 3062 */ 3063 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) 3064 { 3065 struct igc_hw *hw = &adapter->hw; 3066 int index; 3067 3068 index = igc_find_etype_filter(adapter, etype); 3069 if (index < 0) 3070 return; 3071 3072 wr32(IGC_ETQF(index), 0); 3073 3074 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", 3075 etype); 3076 } 3077 3078 static int igc_enable_nfc_rule(struct igc_adapter *adapter, 3079 const struct igc_nfc_rule *rule) 3080 { 3081 int err; 3082 3083 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 3084 err = igc_add_etype_filter(adapter, rule->filter.etype, 3085 rule->action); 3086 if (err) 3087 return err; 3088 } 3089 3090 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { 3091 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 3092 rule->filter.src_addr, rule->action); 3093 if (err) 3094 return err; 3095 } 3096 3097 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { 3098 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 3099 rule->filter.dst_addr, rule->action); 3100 if (err) 3101 return err; 3102 } 3103 3104 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 3105 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> 3106 VLAN_PRIO_SHIFT; 3107 3108 err = igc_add_vlan_prio_filter(adapter, prio, rule->action); 3109 if (err) 3110 return err; 3111 } 3112 3113 return 0; 3114 } 3115 3116 static void igc_disable_nfc_rule(struct igc_adapter *adapter, 3117 const struct igc_nfc_rule *rule) 3118 { 3119 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) 3120 igc_del_etype_filter(adapter, rule->filter.etype); 3121 3122 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 3123 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> 3124 VLAN_PRIO_SHIFT; 3125 3126 igc_del_vlan_prio_filter(adapter, prio); 3127 } 3128 3129 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) 3130 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 3131 rule->filter.src_addr); 3132 3133 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) 3134 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 3135 rule->filter.dst_addr); 3136 } 3137 3138 /** 3139 * igc_get_nfc_rule() - Get NFC rule 3140 * @adapter: Pointer to adapter 3141 * @location: Rule location 3142 * 3143 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3144 * 3145 * Return: Pointer to NFC rule at @location. If not found, NULL. 3146 */ 3147 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, 3148 u32 location) 3149 { 3150 struct igc_nfc_rule *rule; 3151 3152 list_for_each_entry(rule, &adapter->nfc_rule_list, list) { 3153 if (rule->location == location) 3154 return rule; 3155 if (rule->location > location) 3156 break; 3157 } 3158 3159 return NULL; 3160 } 3161 3162 /** 3163 * igc_del_nfc_rule() - Delete NFC rule 3164 * @adapter: Pointer to adapter 3165 * @rule: Pointer to rule to be deleted 3166 * 3167 * Disable NFC rule in hardware and delete it from adapter. 3168 * 3169 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3170 */ 3171 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 3172 { 3173 igc_disable_nfc_rule(adapter, rule); 3174 3175 list_del(&rule->list); 3176 adapter->nfc_rule_count--; 3177 3178 kfree(rule); 3179 } 3180 3181 static void igc_flush_nfc_rules(struct igc_adapter *adapter) 3182 { 3183 struct igc_nfc_rule *rule, *tmp; 3184 3185 mutex_lock(&adapter->nfc_rule_lock); 3186 3187 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) 3188 igc_del_nfc_rule(adapter, rule); 3189 3190 mutex_unlock(&adapter->nfc_rule_lock); 3191 } 3192 3193 /** 3194 * igc_add_nfc_rule() - Add NFC rule 3195 * @adapter: Pointer to adapter 3196 * @rule: Pointer to rule to be added 3197 * 3198 * Enable NFC rule in hardware and add it to adapter. 3199 * 3200 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3201 * 3202 * Return: 0 on success, negative errno on failure. 3203 */ 3204 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 3205 { 3206 struct igc_nfc_rule *pred, *cur; 3207 int err; 3208 3209 err = igc_enable_nfc_rule(adapter, rule); 3210 if (err) 3211 return err; 3212 3213 pred = NULL; 3214 list_for_each_entry(cur, &adapter->nfc_rule_list, list) { 3215 if (cur->location >= rule->location) 3216 break; 3217 pred = cur; 3218 } 3219 3220 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); 3221 adapter->nfc_rule_count++; 3222 return 0; 3223 } 3224 3225 static void igc_restore_nfc_rules(struct igc_adapter *adapter) 3226 { 3227 struct igc_nfc_rule *rule; 3228 3229 mutex_lock(&adapter->nfc_rule_lock); 3230 3231 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) 3232 igc_enable_nfc_rule(adapter, rule); 3233 3234 mutex_unlock(&adapter->nfc_rule_lock); 3235 } 3236 3237 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) 3238 { 3239 struct igc_adapter *adapter = netdev_priv(netdev); 3240 3241 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); 3242 } 3243 3244 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) 3245 { 3246 struct igc_adapter *adapter = netdev_priv(netdev); 3247 3248 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); 3249 return 0; 3250 } 3251 3252 /** 3253 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 3254 * @netdev: network interface device structure 3255 * 3256 * The set_rx_mode entry point is called whenever the unicast or multicast 3257 * address lists or the network interface flags are updated. This routine is 3258 * responsible for configuring the hardware for proper unicast, multicast, 3259 * promiscuous mode, and all-multi behavior. 3260 */ 3261 static void igc_set_rx_mode(struct net_device *netdev) 3262 { 3263 struct igc_adapter *adapter = netdev_priv(netdev); 3264 struct igc_hw *hw = &adapter->hw; 3265 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; 3266 int count; 3267 3268 /* Check for Promiscuous and All Multicast modes */ 3269 if (netdev->flags & IFF_PROMISC) { 3270 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; 3271 } else { 3272 if (netdev->flags & IFF_ALLMULTI) { 3273 rctl |= IGC_RCTL_MPE; 3274 } else { 3275 /* Write addresses to the MTA, if the attempt fails 3276 * then we should just turn on promiscuous mode so 3277 * that we can at least receive multicast traffic 3278 */ 3279 count = igc_write_mc_addr_list(netdev); 3280 if (count < 0) 3281 rctl |= IGC_RCTL_MPE; 3282 } 3283 } 3284 3285 /* Write addresses to available RAR registers, if there is not 3286 * sufficient space to store all the addresses then enable 3287 * unicast promiscuous mode 3288 */ 3289 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) 3290 rctl |= IGC_RCTL_UPE; 3291 3292 /* update state of unicast and multicast */ 3293 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 3294 wr32(IGC_RCTL, rctl); 3295 3296 #if (PAGE_SIZE < 8192) 3297 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) 3298 rlpml = IGC_MAX_FRAME_BUILD_SKB; 3299 #endif 3300 wr32(IGC_RLPML, rlpml); 3301 } 3302 3303 /** 3304 * igc_configure - configure the hardware for RX and TX 3305 * @adapter: private board structure 3306 */ 3307 static void igc_configure(struct igc_adapter *adapter) 3308 { 3309 struct net_device *netdev = adapter->netdev; 3310 int i = 0; 3311 3312 igc_get_hw_control(adapter); 3313 igc_set_rx_mode(netdev); 3314 3315 igc_restore_vlan(adapter); 3316 3317 igc_setup_tctl(adapter); 3318 igc_setup_mrqc(adapter); 3319 igc_setup_rctl(adapter); 3320 3321 igc_set_default_mac_filter(adapter); 3322 igc_restore_nfc_rules(adapter); 3323 3324 igc_configure_tx(adapter); 3325 igc_configure_rx(adapter); 3326 3327 igc_rx_fifo_flush_base(&adapter->hw); 3328 3329 /* call igc_desc_unused which always leaves 3330 * at least 1 descriptor unused to make sure 3331 * next_to_use != next_to_clean 3332 */ 3333 for (i = 0; i < adapter->num_rx_queues; i++) { 3334 struct igc_ring *ring = adapter->rx_ring[i]; 3335 3336 if (ring->xsk_pool) 3337 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 3338 else 3339 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 3340 } 3341 } 3342 3343 /** 3344 * igc_write_ivar - configure ivar for given MSI-X vector 3345 * @hw: pointer to the HW structure 3346 * @msix_vector: vector number we are allocating to a given ring 3347 * @index: row index of IVAR register to write within IVAR table 3348 * @offset: column offset of in IVAR, should be multiple of 8 3349 * 3350 * The IVAR table consists of 2 columns, 3351 * each containing an cause allocation for an Rx and Tx ring, and a 3352 * variable number of rows depending on the number of queues supported. 3353 */ 3354 static void igc_write_ivar(struct igc_hw *hw, int msix_vector, 3355 int index, int offset) 3356 { 3357 u32 ivar = array_rd32(IGC_IVAR0, index); 3358 3359 /* clear any bits that are currently set */ 3360 ivar &= ~((u32)0xFF << offset); 3361 3362 /* write vector and valid bit */ 3363 ivar |= (msix_vector | IGC_IVAR_VALID) << offset; 3364 3365 array_wr32(IGC_IVAR0, index, ivar); 3366 } 3367 3368 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) 3369 { 3370 struct igc_adapter *adapter = q_vector->adapter; 3371 struct igc_hw *hw = &adapter->hw; 3372 int rx_queue = IGC_N0_QUEUE; 3373 int tx_queue = IGC_N0_QUEUE; 3374 3375 if (q_vector->rx.ring) 3376 rx_queue = q_vector->rx.ring->reg_idx; 3377 if (q_vector->tx.ring) 3378 tx_queue = q_vector->tx.ring->reg_idx; 3379 3380 switch (hw->mac.type) { 3381 case igc_i225: 3382 if (rx_queue > IGC_N0_QUEUE) 3383 igc_write_ivar(hw, msix_vector, 3384 rx_queue >> 1, 3385 (rx_queue & 0x1) << 4); 3386 if (tx_queue > IGC_N0_QUEUE) 3387 igc_write_ivar(hw, msix_vector, 3388 tx_queue >> 1, 3389 ((tx_queue & 0x1) << 4) + 8); 3390 q_vector->eims_value = BIT(msix_vector); 3391 break; 3392 default: 3393 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); 3394 break; 3395 } 3396 3397 /* add q_vector eims value to global eims_enable_mask */ 3398 adapter->eims_enable_mask |= q_vector->eims_value; 3399 3400 /* configure q_vector to set itr on first interrupt */ 3401 q_vector->set_itr = 1; 3402 } 3403 3404 /** 3405 * igc_configure_msix - Configure MSI-X hardware 3406 * @adapter: Pointer to adapter structure 3407 * 3408 * igc_configure_msix sets up the hardware to properly 3409 * generate MSI-X interrupts. 3410 */ 3411 static void igc_configure_msix(struct igc_adapter *adapter) 3412 { 3413 struct igc_hw *hw = &adapter->hw; 3414 int i, vector = 0; 3415 u32 tmp; 3416 3417 adapter->eims_enable_mask = 0; 3418 3419 /* set vector for other causes, i.e. link changes */ 3420 switch (hw->mac.type) { 3421 case igc_i225: 3422 /* Turn on MSI-X capability first, or our settings 3423 * won't stick. And it will take days to debug. 3424 */ 3425 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | 3426 IGC_GPIE_PBA | IGC_GPIE_EIAME | 3427 IGC_GPIE_NSICR); 3428 3429 /* enable msix_other interrupt */ 3430 adapter->eims_other = BIT(vector); 3431 tmp = (vector++ | IGC_IVAR_VALID) << 8; 3432 3433 wr32(IGC_IVAR_MISC, tmp); 3434 break; 3435 default: 3436 /* do nothing, since nothing else supports MSI-X */ 3437 break; 3438 } /* switch (hw->mac.type) */ 3439 3440 adapter->eims_enable_mask |= adapter->eims_other; 3441 3442 for (i = 0; i < adapter->num_q_vectors; i++) 3443 igc_assign_vector(adapter->q_vector[i], vector++); 3444 3445 wrfl(); 3446 } 3447 3448 /** 3449 * igc_irq_enable - Enable default interrupt generation settings 3450 * @adapter: board private structure 3451 */ 3452 static void igc_irq_enable(struct igc_adapter *adapter) 3453 { 3454 struct igc_hw *hw = &adapter->hw; 3455 3456 if (adapter->msix_entries) { 3457 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; 3458 u32 regval = rd32(IGC_EIAC); 3459 3460 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); 3461 regval = rd32(IGC_EIAM); 3462 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); 3463 wr32(IGC_EIMS, adapter->eims_enable_mask); 3464 wr32(IGC_IMS, ims); 3465 } else { 3466 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 3467 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 3468 } 3469 } 3470 3471 /** 3472 * igc_irq_disable - Mask off interrupt generation on the NIC 3473 * @adapter: board private structure 3474 */ 3475 static void igc_irq_disable(struct igc_adapter *adapter) 3476 { 3477 struct igc_hw *hw = &adapter->hw; 3478 3479 if (adapter->msix_entries) { 3480 u32 regval = rd32(IGC_EIAM); 3481 3482 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); 3483 wr32(IGC_EIMC, adapter->eims_enable_mask); 3484 regval = rd32(IGC_EIAC); 3485 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); 3486 } 3487 3488 wr32(IGC_IAM, 0); 3489 wr32(IGC_IMC, ~0); 3490 wrfl(); 3491 3492 if (adapter->msix_entries) { 3493 int vector = 0, i; 3494 3495 synchronize_irq(adapter->msix_entries[vector++].vector); 3496 3497 for (i = 0; i < adapter->num_q_vectors; i++) 3498 synchronize_irq(adapter->msix_entries[vector++].vector); 3499 } else { 3500 synchronize_irq(adapter->pdev->irq); 3501 } 3502 } 3503 3504 void igc_set_flag_queue_pairs(struct igc_adapter *adapter, 3505 const u32 max_rss_queues) 3506 { 3507 /* Determine if we need to pair queues. */ 3508 /* If rss_queues > half of max_rss_queues, pair the queues in 3509 * order to conserve interrupts due to limited supply. 3510 */ 3511 if (adapter->rss_queues > (max_rss_queues / 2)) 3512 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 3513 else 3514 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; 3515 } 3516 3517 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) 3518 { 3519 return IGC_MAX_RX_QUEUES; 3520 } 3521 3522 static void igc_init_queue_configuration(struct igc_adapter *adapter) 3523 { 3524 u32 max_rss_queues; 3525 3526 max_rss_queues = igc_get_max_rss_queues(adapter); 3527 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); 3528 3529 igc_set_flag_queue_pairs(adapter, max_rss_queues); 3530 } 3531 3532 /** 3533 * igc_reset_q_vector - Reset config for interrupt vector 3534 * @adapter: board private structure to initialize 3535 * @v_idx: Index of vector to be reset 3536 * 3537 * If NAPI is enabled it will delete any references to the 3538 * NAPI struct. This is preparation for igc_free_q_vector. 3539 */ 3540 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) 3541 { 3542 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 3543 3544 /* if we're coming from igc_set_interrupt_capability, the vectors are 3545 * not yet allocated 3546 */ 3547 if (!q_vector) 3548 return; 3549 3550 if (q_vector->tx.ring) 3551 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 3552 3553 if (q_vector->rx.ring) 3554 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; 3555 3556 netif_napi_del(&q_vector->napi); 3557 } 3558 3559 /** 3560 * igc_free_q_vector - Free memory allocated for specific interrupt vector 3561 * @adapter: board private structure to initialize 3562 * @v_idx: Index of vector to be freed 3563 * 3564 * This function frees the memory allocated to the q_vector. 3565 */ 3566 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) 3567 { 3568 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 3569 3570 adapter->q_vector[v_idx] = NULL; 3571 3572 /* igc_get_stats64() might access the rings on this vector, 3573 * we must wait a grace period before freeing it. 3574 */ 3575 if (q_vector) 3576 kfree_rcu(q_vector, rcu); 3577 } 3578 3579 /** 3580 * igc_free_q_vectors - Free memory allocated for interrupt vectors 3581 * @adapter: board private structure to initialize 3582 * 3583 * This function frees the memory allocated to the q_vectors. In addition if 3584 * NAPI is enabled it will delete any references to the NAPI struct prior 3585 * to freeing the q_vector. 3586 */ 3587 static void igc_free_q_vectors(struct igc_adapter *adapter) 3588 { 3589 int v_idx = adapter->num_q_vectors; 3590 3591 adapter->num_tx_queues = 0; 3592 adapter->num_rx_queues = 0; 3593 adapter->num_q_vectors = 0; 3594 3595 while (v_idx--) { 3596 igc_reset_q_vector(adapter, v_idx); 3597 igc_free_q_vector(adapter, v_idx); 3598 } 3599 } 3600 3601 /** 3602 * igc_update_itr - update the dynamic ITR value based on statistics 3603 * @q_vector: pointer to q_vector 3604 * @ring_container: ring info to update the itr for 3605 * 3606 * Stores a new ITR value based on packets and byte 3607 * counts during the last interrupt. The advantage of per interrupt 3608 * computation is faster updates and more accurate ITR for the current 3609 * traffic pattern. Constants in this function were computed 3610 * based on theoretical maximum wire speed and thresholds were set based 3611 * on testing data as well as attempting to minimize response time 3612 * while increasing bulk throughput. 3613 * NOTE: These calculations are only valid when operating in a single- 3614 * queue environment. 3615 */ 3616 static void igc_update_itr(struct igc_q_vector *q_vector, 3617 struct igc_ring_container *ring_container) 3618 { 3619 unsigned int packets = ring_container->total_packets; 3620 unsigned int bytes = ring_container->total_bytes; 3621 u8 itrval = ring_container->itr; 3622 3623 /* no packets, exit with status unchanged */ 3624 if (packets == 0) 3625 return; 3626 3627 switch (itrval) { 3628 case lowest_latency: 3629 /* handle TSO and jumbo frames */ 3630 if (bytes / packets > 8000) 3631 itrval = bulk_latency; 3632 else if ((packets < 5) && (bytes > 512)) 3633 itrval = low_latency; 3634 break; 3635 case low_latency: /* 50 usec aka 20000 ints/s */ 3636 if (bytes > 10000) { 3637 /* this if handles the TSO accounting */ 3638 if (bytes / packets > 8000) 3639 itrval = bulk_latency; 3640 else if ((packets < 10) || ((bytes / packets) > 1200)) 3641 itrval = bulk_latency; 3642 else if ((packets > 35)) 3643 itrval = lowest_latency; 3644 } else if (bytes / packets > 2000) { 3645 itrval = bulk_latency; 3646 } else if (packets <= 2 && bytes < 512) { 3647 itrval = lowest_latency; 3648 } 3649 break; 3650 case bulk_latency: /* 250 usec aka 4000 ints/s */ 3651 if (bytes > 25000) { 3652 if (packets > 35) 3653 itrval = low_latency; 3654 } else if (bytes < 1500) { 3655 itrval = low_latency; 3656 } 3657 break; 3658 } 3659 3660 /* clear work counters since we have the values we need */ 3661 ring_container->total_bytes = 0; 3662 ring_container->total_packets = 0; 3663 3664 /* write updated itr to ring container */ 3665 ring_container->itr = itrval; 3666 } 3667 3668 static void igc_set_itr(struct igc_q_vector *q_vector) 3669 { 3670 struct igc_adapter *adapter = q_vector->adapter; 3671 u32 new_itr = q_vector->itr_val; 3672 u8 current_itr = 0; 3673 3674 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 3675 switch (adapter->link_speed) { 3676 case SPEED_10: 3677 case SPEED_100: 3678 current_itr = 0; 3679 new_itr = IGC_4K_ITR; 3680 goto set_itr_now; 3681 default: 3682 break; 3683 } 3684 3685 igc_update_itr(q_vector, &q_vector->tx); 3686 igc_update_itr(q_vector, &q_vector->rx); 3687 3688 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 3689 3690 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 3691 if (current_itr == lowest_latency && 3692 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 3693 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 3694 current_itr = low_latency; 3695 3696 switch (current_itr) { 3697 /* counts and packets in update_itr are dependent on these numbers */ 3698 case lowest_latency: 3699 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ 3700 break; 3701 case low_latency: 3702 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ 3703 break; 3704 case bulk_latency: 3705 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ 3706 break; 3707 default: 3708 break; 3709 } 3710 3711 set_itr_now: 3712 if (new_itr != q_vector->itr_val) { 3713 /* this attempts to bias the interrupt rate towards Bulk 3714 * by adding intermediate steps when interrupt rate is 3715 * increasing 3716 */ 3717 new_itr = new_itr > q_vector->itr_val ? 3718 max((new_itr * q_vector->itr_val) / 3719 (new_itr + (q_vector->itr_val >> 2)), 3720 new_itr) : new_itr; 3721 /* Don't write the value here; it resets the adapter's 3722 * internal timer, and causes us to delay far longer than 3723 * we should between interrupts. Instead, we write the ITR 3724 * value at the beginning of the next interrupt so the timing 3725 * ends up being correct. 3726 */ 3727 q_vector->itr_val = new_itr; 3728 q_vector->set_itr = 1; 3729 } 3730 } 3731 3732 static void igc_reset_interrupt_capability(struct igc_adapter *adapter) 3733 { 3734 int v_idx = adapter->num_q_vectors; 3735 3736 if (adapter->msix_entries) { 3737 pci_disable_msix(adapter->pdev); 3738 kfree(adapter->msix_entries); 3739 adapter->msix_entries = NULL; 3740 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { 3741 pci_disable_msi(adapter->pdev); 3742 } 3743 3744 while (v_idx--) 3745 igc_reset_q_vector(adapter, v_idx); 3746 } 3747 3748 /** 3749 * igc_set_interrupt_capability - set MSI or MSI-X if supported 3750 * @adapter: Pointer to adapter structure 3751 * @msix: boolean value for MSI-X capability 3752 * 3753 * Attempt to configure interrupts using the best available 3754 * capabilities of the hardware and kernel. 3755 */ 3756 static void igc_set_interrupt_capability(struct igc_adapter *adapter, 3757 bool msix) 3758 { 3759 int numvecs, i; 3760 int err; 3761 3762 if (!msix) 3763 goto msi_only; 3764 adapter->flags |= IGC_FLAG_HAS_MSIX; 3765 3766 /* Number of supported queues. */ 3767 adapter->num_rx_queues = adapter->rss_queues; 3768 3769 adapter->num_tx_queues = adapter->rss_queues; 3770 3771 /* start with one vector for every Rx queue */ 3772 numvecs = adapter->num_rx_queues; 3773 3774 /* if Tx handler is separate add 1 for every Tx queue */ 3775 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) 3776 numvecs += adapter->num_tx_queues; 3777 3778 /* store the number of vectors reserved for queues */ 3779 adapter->num_q_vectors = numvecs; 3780 3781 /* add 1 vector for link status interrupts */ 3782 numvecs++; 3783 3784 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 3785 GFP_KERNEL); 3786 3787 if (!adapter->msix_entries) 3788 return; 3789 3790 /* populate entry values */ 3791 for (i = 0; i < numvecs; i++) 3792 adapter->msix_entries[i].entry = i; 3793 3794 err = pci_enable_msix_range(adapter->pdev, 3795 adapter->msix_entries, 3796 numvecs, 3797 numvecs); 3798 if (err > 0) 3799 return; 3800 3801 kfree(adapter->msix_entries); 3802 adapter->msix_entries = NULL; 3803 3804 igc_reset_interrupt_capability(adapter); 3805 3806 msi_only: 3807 adapter->flags &= ~IGC_FLAG_HAS_MSIX; 3808 3809 adapter->rss_queues = 1; 3810 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 3811 adapter->num_rx_queues = 1; 3812 adapter->num_tx_queues = 1; 3813 adapter->num_q_vectors = 1; 3814 if (!pci_enable_msi(adapter->pdev)) 3815 adapter->flags |= IGC_FLAG_HAS_MSI; 3816 } 3817 3818 /** 3819 * igc_update_ring_itr - update the dynamic ITR value based on packet size 3820 * @q_vector: pointer to q_vector 3821 * 3822 * Stores a new ITR value based on strictly on packet size. This 3823 * algorithm is less sophisticated than that used in igc_update_itr, 3824 * due to the difficulty of synchronizing statistics across multiple 3825 * receive rings. The divisors and thresholds used by this function 3826 * were determined based on theoretical maximum wire speed and testing 3827 * data, in order to minimize response time while increasing bulk 3828 * throughput. 3829 * NOTE: This function is called only when operating in a multiqueue 3830 * receive environment. 3831 */ 3832 static void igc_update_ring_itr(struct igc_q_vector *q_vector) 3833 { 3834 struct igc_adapter *adapter = q_vector->adapter; 3835 int new_val = q_vector->itr_val; 3836 int avg_wire_size = 0; 3837 unsigned int packets; 3838 3839 /* For non-gigabit speeds, just fix the interrupt rate at 4000 3840 * ints/sec - ITR timer value of 120 ticks. 3841 */ 3842 switch (adapter->link_speed) { 3843 case SPEED_10: 3844 case SPEED_100: 3845 new_val = IGC_4K_ITR; 3846 goto set_itr_val; 3847 default: 3848 break; 3849 } 3850 3851 packets = q_vector->rx.total_packets; 3852 if (packets) 3853 avg_wire_size = q_vector->rx.total_bytes / packets; 3854 3855 packets = q_vector->tx.total_packets; 3856 if (packets) 3857 avg_wire_size = max_t(u32, avg_wire_size, 3858 q_vector->tx.total_bytes / packets); 3859 3860 /* if avg_wire_size isn't set no work was done */ 3861 if (!avg_wire_size) 3862 goto clear_counts; 3863 3864 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 3865 avg_wire_size += 24; 3866 3867 /* Don't starve jumbo frames */ 3868 avg_wire_size = min(avg_wire_size, 3000); 3869 3870 /* Give a little boost to mid-size frames */ 3871 if (avg_wire_size > 300 && avg_wire_size < 1200) 3872 new_val = avg_wire_size / 3; 3873 else 3874 new_val = avg_wire_size / 2; 3875 3876 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 3877 if (new_val < IGC_20K_ITR && 3878 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 3879 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 3880 new_val = IGC_20K_ITR; 3881 3882 set_itr_val: 3883 if (new_val != q_vector->itr_val) { 3884 q_vector->itr_val = new_val; 3885 q_vector->set_itr = 1; 3886 } 3887 clear_counts: 3888 q_vector->rx.total_bytes = 0; 3889 q_vector->rx.total_packets = 0; 3890 q_vector->tx.total_bytes = 0; 3891 q_vector->tx.total_packets = 0; 3892 } 3893 3894 static void igc_ring_irq_enable(struct igc_q_vector *q_vector) 3895 { 3896 struct igc_adapter *adapter = q_vector->adapter; 3897 struct igc_hw *hw = &adapter->hw; 3898 3899 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || 3900 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { 3901 if (adapter->num_q_vectors == 1) 3902 igc_set_itr(q_vector); 3903 else 3904 igc_update_ring_itr(q_vector); 3905 } 3906 3907 if (!test_bit(__IGC_DOWN, &adapter->state)) { 3908 if (adapter->msix_entries) 3909 wr32(IGC_EIMS, q_vector->eims_value); 3910 else 3911 igc_irq_enable(adapter); 3912 } 3913 } 3914 3915 static void igc_add_ring(struct igc_ring *ring, 3916 struct igc_ring_container *head) 3917 { 3918 head->ring = ring; 3919 head->count++; 3920 } 3921 3922 /** 3923 * igc_cache_ring_register - Descriptor ring to register mapping 3924 * @adapter: board private structure to initialize 3925 * 3926 * Once we know the feature-set enabled for the device, we'll cache 3927 * the register offset the descriptor ring is assigned to. 3928 */ 3929 static void igc_cache_ring_register(struct igc_adapter *adapter) 3930 { 3931 int i = 0, j = 0; 3932 3933 switch (adapter->hw.mac.type) { 3934 case igc_i225: 3935 default: 3936 for (; i < adapter->num_rx_queues; i++) 3937 adapter->rx_ring[i]->reg_idx = i; 3938 for (; j < adapter->num_tx_queues; j++) 3939 adapter->tx_ring[j]->reg_idx = j; 3940 break; 3941 } 3942 } 3943 3944 /** 3945 * igc_poll - NAPI Rx polling callback 3946 * @napi: napi polling structure 3947 * @budget: count of how many packets we should handle 3948 */ 3949 static int igc_poll(struct napi_struct *napi, int budget) 3950 { 3951 struct igc_q_vector *q_vector = container_of(napi, 3952 struct igc_q_vector, 3953 napi); 3954 struct igc_ring *rx_ring = q_vector->rx.ring; 3955 bool clean_complete = true; 3956 int work_done = 0; 3957 3958 if (q_vector->tx.ring) 3959 clean_complete = igc_clean_tx_irq(q_vector, budget); 3960 3961 if (rx_ring) { 3962 int cleaned = rx_ring->xsk_pool ? 3963 igc_clean_rx_irq_zc(q_vector, budget) : 3964 igc_clean_rx_irq(q_vector, budget); 3965 3966 work_done += cleaned; 3967 if (cleaned >= budget) 3968 clean_complete = false; 3969 } 3970 3971 /* If all work not completed, return budget and keep polling */ 3972 if (!clean_complete) 3973 return budget; 3974 3975 /* Exit the polling mode, but don't re-enable interrupts if stack might 3976 * poll us due to busy-polling 3977 */ 3978 if (likely(napi_complete_done(napi, work_done))) 3979 igc_ring_irq_enable(q_vector); 3980 3981 return min(work_done, budget - 1); 3982 } 3983 3984 /** 3985 * igc_alloc_q_vector - Allocate memory for a single interrupt vector 3986 * @adapter: board private structure to initialize 3987 * @v_count: q_vectors allocated on adapter, used for ring interleaving 3988 * @v_idx: index of vector in adapter struct 3989 * @txr_count: total number of Tx rings to allocate 3990 * @txr_idx: index of first Tx ring to allocate 3991 * @rxr_count: total number of Rx rings to allocate 3992 * @rxr_idx: index of first Rx ring to allocate 3993 * 3994 * We allocate one q_vector. If allocation fails we return -ENOMEM. 3995 */ 3996 static int igc_alloc_q_vector(struct igc_adapter *adapter, 3997 unsigned int v_count, unsigned int v_idx, 3998 unsigned int txr_count, unsigned int txr_idx, 3999 unsigned int rxr_count, unsigned int rxr_idx) 4000 { 4001 struct igc_q_vector *q_vector; 4002 struct igc_ring *ring; 4003 int ring_count; 4004 4005 /* igc only supports 1 Tx and/or 1 Rx queue per vector */ 4006 if (txr_count > 1 || rxr_count > 1) 4007 return -ENOMEM; 4008 4009 ring_count = txr_count + rxr_count; 4010 4011 /* allocate q_vector and rings */ 4012 q_vector = adapter->q_vector[v_idx]; 4013 if (!q_vector) 4014 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), 4015 GFP_KERNEL); 4016 else 4017 memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); 4018 if (!q_vector) 4019 return -ENOMEM; 4020 4021 /* initialize NAPI */ 4022 netif_napi_add(adapter->netdev, &q_vector->napi, 4023 igc_poll, 64); 4024 4025 /* tie q_vector and adapter together */ 4026 adapter->q_vector[v_idx] = q_vector; 4027 q_vector->adapter = adapter; 4028 4029 /* initialize work limits */ 4030 q_vector->tx.work_limit = adapter->tx_work_limit; 4031 4032 /* initialize ITR configuration */ 4033 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); 4034 q_vector->itr_val = IGC_START_ITR; 4035 4036 /* initialize pointer to rings */ 4037 ring = q_vector->ring; 4038 4039 /* initialize ITR */ 4040 if (rxr_count) { 4041 /* rx or rx/tx vector */ 4042 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) 4043 q_vector->itr_val = adapter->rx_itr_setting; 4044 } else { 4045 /* tx only vector */ 4046 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) 4047 q_vector->itr_val = adapter->tx_itr_setting; 4048 } 4049 4050 if (txr_count) { 4051 /* assign generic ring traits */ 4052 ring->dev = &adapter->pdev->dev; 4053 ring->netdev = adapter->netdev; 4054 4055 /* configure backlink on ring */ 4056 ring->q_vector = q_vector; 4057 4058 /* update q_vector Tx values */ 4059 igc_add_ring(ring, &q_vector->tx); 4060 4061 /* apply Tx specific ring traits */ 4062 ring->count = adapter->tx_ring_count; 4063 ring->queue_index = txr_idx; 4064 4065 /* assign ring to adapter */ 4066 adapter->tx_ring[txr_idx] = ring; 4067 4068 /* push pointer to next ring */ 4069 ring++; 4070 } 4071 4072 if (rxr_count) { 4073 /* assign generic ring traits */ 4074 ring->dev = &adapter->pdev->dev; 4075 ring->netdev = adapter->netdev; 4076 4077 /* configure backlink on ring */ 4078 ring->q_vector = q_vector; 4079 4080 /* update q_vector Rx values */ 4081 igc_add_ring(ring, &q_vector->rx); 4082 4083 /* apply Rx specific ring traits */ 4084 ring->count = adapter->rx_ring_count; 4085 ring->queue_index = rxr_idx; 4086 4087 /* assign ring to adapter */ 4088 adapter->rx_ring[rxr_idx] = ring; 4089 } 4090 4091 return 0; 4092 } 4093 4094 /** 4095 * igc_alloc_q_vectors - Allocate memory for interrupt vectors 4096 * @adapter: board private structure to initialize 4097 * 4098 * We allocate one q_vector per queue interrupt. If allocation fails we 4099 * return -ENOMEM. 4100 */ 4101 static int igc_alloc_q_vectors(struct igc_adapter *adapter) 4102 { 4103 int rxr_remaining = adapter->num_rx_queues; 4104 int txr_remaining = adapter->num_tx_queues; 4105 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 4106 int q_vectors = adapter->num_q_vectors; 4107 int err; 4108 4109 if (q_vectors >= (rxr_remaining + txr_remaining)) { 4110 for (; rxr_remaining; v_idx++) { 4111 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 4112 0, 0, 1, rxr_idx); 4113 4114 if (err) 4115 goto err_out; 4116 4117 /* update counts and index */ 4118 rxr_remaining--; 4119 rxr_idx++; 4120 } 4121 } 4122 4123 for (; v_idx < q_vectors; v_idx++) { 4124 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 4125 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 4126 4127 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 4128 tqpv, txr_idx, rqpv, rxr_idx); 4129 4130 if (err) 4131 goto err_out; 4132 4133 /* update counts and index */ 4134 rxr_remaining -= rqpv; 4135 txr_remaining -= tqpv; 4136 rxr_idx++; 4137 txr_idx++; 4138 } 4139 4140 return 0; 4141 4142 err_out: 4143 adapter->num_tx_queues = 0; 4144 adapter->num_rx_queues = 0; 4145 adapter->num_q_vectors = 0; 4146 4147 while (v_idx--) 4148 igc_free_q_vector(adapter, v_idx); 4149 4150 return -ENOMEM; 4151 } 4152 4153 /** 4154 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 4155 * @adapter: Pointer to adapter structure 4156 * @msix: boolean for MSI-X capability 4157 * 4158 * This function initializes the interrupts and allocates all of the queues. 4159 */ 4160 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) 4161 { 4162 struct net_device *dev = adapter->netdev; 4163 int err = 0; 4164 4165 igc_set_interrupt_capability(adapter, msix); 4166 4167 err = igc_alloc_q_vectors(adapter); 4168 if (err) { 4169 netdev_err(dev, "Unable to allocate memory for vectors\n"); 4170 goto err_alloc_q_vectors; 4171 } 4172 4173 igc_cache_ring_register(adapter); 4174 4175 return 0; 4176 4177 err_alloc_q_vectors: 4178 igc_reset_interrupt_capability(adapter); 4179 return err; 4180 } 4181 4182 /** 4183 * igc_sw_init - Initialize general software structures (struct igc_adapter) 4184 * @adapter: board private structure to initialize 4185 * 4186 * igc_sw_init initializes the Adapter private data structure. 4187 * Fields are initialized based on PCI device information and 4188 * OS network device settings (MTU size). 4189 */ 4190 static int igc_sw_init(struct igc_adapter *adapter) 4191 { 4192 struct net_device *netdev = adapter->netdev; 4193 struct pci_dev *pdev = adapter->pdev; 4194 struct igc_hw *hw = &adapter->hw; 4195 4196 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 4197 4198 /* set default ring sizes */ 4199 adapter->tx_ring_count = IGC_DEFAULT_TXD; 4200 adapter->rx_ring_count = IGC_DEFAULT_RXD; 4201 4202 /* set default ITR values */ 4203 adapter->rx_itr_setting = IGC_DEFAULT_ITR; 4204 adapter->tx_itr_setting = IGC_DEFAULT_ITR; 4205 4206 /* set default work limits */ 4207 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; 4208 4209 /* adjust max frame to be at least the size of a standard frame */ 4210 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + 4211 VLAN_HLEN; 4212 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 4213 4214 mutex_init(&adapter->nfc_rule_lock); 4215 INIT_LIST_HEAD(&adapter->nfc_rule_list); 4216 adapter->nfc_rule_count = 0; 4217 4218 spin_lock_init(&adapter->stats64_lock); 4219 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ 4220 adapter->flags |= IGC_FLAG_HAS_MSIX; 4221 4222 igc_init_queue_configuration(adapter); 4223 4224 /* This call may decrease the number of queues */ 4225 if (igc_init_interrupt_scheme(adapter, true)) { 4226 netdev_err(netdev, "Unable to allocate memory for queues\n"); 4227 return -ENOMEM; 4228 } 4229 4230 /* Explicitly disable IRQ since the NIC can be in any state. */ 4231 igc_irq_disable(adapter); 4232 4233 set_bit(__IGC_DOWN, &adapter->state); 4234 4235 return 0; 4236 } 4237 4238 /** 4239 * igc_up - Open the interface and prepare it to handle traffic 4240 * @adapter: board private structure 4241 */ 4242 void igc_up(struct igc_adapter *adapter) 4243 { 4244 struct igc_hw *hw = &adapter->hw; 4245 int i = 0; 4246 4247 /* hardware has been reset, we need to reload some things */ 4248 igc_configure(adapter); 4249 4250 clear_bit(__IGC_DOWN, &adapter->state); 4251 4252 for (i = 0; i < adapter->num_q_vectors; i++) 4253 napi_enable(&adapter->q_vector[i]->napi); 4254 4255 if (adapter->msix_entries) 4256 igc_configure_msix(adapter); 4257 else 4258 igc_assign_vector(adapter->q_vector[0], 0); 4259 4260 /* Clear any pending interrupts. */ 4261 rd32(IGC_ICR); 4262 igc_irq_enable(adapter); 4263 4264 netif_tx_start_all_queues(adapter->netdev); 4265 4266 /* start the watchdog. */ 4267 hw->mac.get_link_status = true; 4268 schedule_work(&adapter->watchdog_task); 4269 } 4270 4271 /** 4272 * igc_update_stats - Update the board statistics counters 4273 * @adapter: board private structure 4274 */ 4275 void igc_update_stats(struct igc_adapter *adapter) 4276 { 4277 struct rtnl_link_stats64 *net_stats = &adapter->stats64; 4278 struct pci_dev *pdev = adapter->pdev; 4279 struct igc_hw *hw = &adapter->hw; 4280 u64 _bytes, _packets; 4281 u64 bytes, packets; 4282 unsigned int start; 4283 u32 mpc; 4284 int i; 4285 4286 /* Prevent stats update while adapter is being reset, or if the pci 4287 * connection is down. 4288 */ 4289 if (adapter->link_speed == 0) 4290 return; 4291 if (pci_channel_offline(pdev)) 4292 return; 4293 4294 packets = 0; 4295 bytes = 0; 4296 4297 rcu_read_lock(); 4298 for (i = 0; i < adapter->num_rx_queues; i++) { 4299 struct igc_ring *ring = adapter->rx_ring[i]; 4300 u32 rqdpc = rd32(IGC_RQDPC(i)); 4301 4302 if (hw->mac.type >= igc_i225) 4303 wr32(IGC_RQDPC(i), 0); 4304 4305 if (rqdpc) { 4306 ring->rx_stats.drops += rqdpc; 4307 net_stats->rx_fifo_errors += rqdpc; 4308 } 4309 4310 do { 4311 start = u64_stats_fetch_begin_irq(&ring->rx_syncp); 4312 _bytes = ring->rx_stats.bytes; 4313 _packets = ring->rx_stats.packets; 4314 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 4315 bytes += _bytes; 4316 packets += _packets; 4317 } 4318 4319 net_stats->rx_bytes = bytes; 4320 net_stats->rx_packets = packets; 4321 4322 packets = 0; 4323 bytes = 0; 4324 for (i = 0; i < adapter->num_tx_queues; i++) { 4325 struct igc_ring *ring = adapter->tx_ring[i]; 4326 4327 do { 4328 start = u64_stats_fetch_begin_irq(&ring->tx_syncp); 4329 _bytes = ring->tx_stats.bytes; 4330 _packets = ring->tx_stats.packets; 4331 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); 4332 bytes += _bytes; 4333 packets += _packets; 4334 } 4335 net_stats->tx_bytes = bytes; 4336 net_stats->tx_packets = packets; 4337 rcu_read_unlock(); 4338 4339 /* read stats registers */ 4340 adapter->stats.crcerrs += rd32(IGC_CRCERRS); 4341 adapter->stats.gprc += rd32(IGC_GPRC); 4342 adapter->stats.gorc += rd32(IGC_GORCL); 4343 rd32(IGC_GORCH); /* clear GORCL */ 4344 adapter->stats.bprc += rd32(IGC_BPRC); 4345 adapter->stats.mprc += rd32(IGC_MPRC); 4346 adapter->stats.roc += rd32(IGC_ROC); 4347 4348 adapter->stats.prc64 += rd32(IGC_PRC64); 4349 adapter->stats.prc127 += rd32(IGC_PRC127); 4350 adapter->stats.prc255 += rd32(IGC_PRC255); 4351 adapter->stats.prc511 += rd32(IGC_PRC511); 4352 adapter->stats.prc1023 += rd32(IGC_PRC1023); 4353 adapter->stats.prc1522 += rd32(IGC_PRC1522); 4354 adapter->stats.tlpic += rd32(IGC_TLPIC); 4355 adapter->stats.rlpic += rd32(IGC_RLPIC); 4356 adapter->stats.hgptc += rd32(IGC_HGPTC); 4357 4358 mpc = rd32(IGC_MPC); 4359 adapter->stats.mpc += mpc; 4360 net_stats->rx_fifo_errors += mpc; 4361 adapter->stats.scc += rd32(IGC_SCC); 4362 adapter->stats.ecol += rd32(IGC_ECOL); 4363 adapter->stats.mcc += rd32(IGC_MCC); 4364 adapter->stats.latecol += rd32(IGC_LATECOL); 4365 adapter->stats.dc += rd32(IGC_DC); 4366 adapter->stats.rlec += rd32(IGC_RLEC); 4367 adapter->stats.xonrxc += rd32(IGC_XONRXC); 4368 adapter->stats.xontxc += rd32(IGC_XONTXC); 4369 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); 4370 adapter->stats.xofftxc += rd32(IGC_XOFFTXC); 4371 adapter->stats.fcruc += rd32(IGC_FCRUC); 4372 adapter->stats.gptc += rd32(IGC_GPTC); 4373 adapter->stats.gotc += rd32(IGC_GOTCL); 4374 rd32(IGC_GOTCH); /* clear GOTCL */ 4375 adapter->stats.rnbc += rd32(IGC_RNBC); 4376 adapter->stats.ruc += rd32(IGC_RUC); 4377 adapter->stats.rfc += rd32(IGC_RFC); 4378 adapter->stats.rjc += rd32(IGC_RJC); 4379 adapter->stats.tor += rd32(IGC_TORH); 4380 adapter->stats.tot += rd32(IGC_TOTH); 4381 adapter->stats.tpr += rd32(IGC_TPR); 4382 4383 adapter->stats.ptc64 += rd32(IGC_PTC64); 4384 adapter->stats.ptc127 += rd32(IGC_PTC127); 4385 adapter->stats.ptc255 += rd32(IGC_PTC255); 4386 adapter->stats.ptc511 += rd32(IGC_PTC511); 4387 adapter->stats.ptc1023 += rd32(IGC_PTC1023); 4388 adapter->stats.ptc1522 += rd32(IGC_PTC1522); 4389 4390 adapter->stats.mptc += rd32(IGC_MPTC); 4391 adapter->stats.bptc += rd32(IGC_BPTC); 4392 4393 adapter->stats.tpt += rd32(IGC_TPT); 4394 adapter->stats.colc += rd32(IGC_COLC); 4395 adapter->stats.colc += rd32(IGC_RERC); 4396 4397 adapter->stats.algnerrc += rd32(IGC_ALGNERRC); 4398 4399 adapter->stats.tsctc += rd32(IGC_TSCTC); 4400 4401 adapter->stats.iac += rd32(IGC_IAC); 4402 4403 /* Fill out the OS statistics structure */ 4404 net_stats->multicast = adapter->stats.mprc; 4405 net_stats->collisions = adapter->stats.colc; 4406 4407 /* Rx Errors */ 4408 4409 /* RLEC on some newer hardware can be incorrect so build 4410 * our own version based on RUC and ROC 4411 */ 4412 net_stats->rx_errors = adapter->stats.rxerrc + 4413 adapter->stats.crcerrs + adapter->stats.algnerrc + 4414 adapter->stats.ruc + adapter->stats.roc + 4415 adapter->stats.cexterr; 4416 net_stats->rx_length_errors = adapter->stats.ruc + 4417 adapter->stats.roc; 4418 net_stats->rx_crc_errors = adapter->stats.crcerrs; 4419 net_stats->rx_frame_errors = adapter->stats.algnerrc; 4420 net_stats->rx_missed_errors = adapter->stats.mpc; 4421 4422 /* Tx Errors */ 4423 net_stats->tx_errors = adapter->stats.ecol + 4424 adapter->stats.latecol; 4425 net_stats->tx_aborted_errors = adapter->stats.ecol; 4426 net_stats->tx_window_errors = adapter->stats.latecol; 4427 net_stats->tx_carrier_errors = adapter->stats.tncrs; 4428 4429 /* Tx Dropped needs to be maintained elsewhere */ 4430 4431 /* Management Stats */ 4432 adapter->stats.mgptc += rd32(IGC_MGTPTC); 4433 adapter->stats.mgprc += rd32(IGC_MGTPRC); 4434 adapter->stats.mgpdc += rd32(IGC_MGTPDC); 4435 } 4436 4437 /** 4438 * igc_down - Close the interface 4439 * @adapter: board private structure 4440 */ 4441 void igc_down(struct igc_adapter *adapter) 4442 { 4443 struct net_device *netdev = adapter->netdev; 4444 struct igc_hw *hw = &adapter->hw; 4445 u32 tctl, rctl; 4446 int i = 0; 4447 4448 set_bit(__IGC_DOWN, &adapter->state); 4449 4450 igc_ptp_suspend(adapter); 4451 4452 /* disable receives in the hardware */ 4453 rctl = rd32(IGC_RCTL); 4454 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); 4455 /* flush and sleep below */ 4456 4457 /* set trans_start so we don't get spurious watchdogs during reset */ 4458 netif_trans_update(netdev); 4459 4460 netif_carrier_off(netdev); 4461 netif_tx_stop_all_queues(netdev); 4462 4463 /* disable transmits in the hardware */ 4464 tctl = rd32(IGC_TCTL); 4465 tctl &= ~IGC_TCTL_EN; 4466 wr32(IGC_TCTL, tctl); 4467 /* flush both disables and wait for them to finish */ 4468 wrfl(); 4469 usleep_range(10000, 20000); 4470 4471 igc_irq_disable(adapter); 4472 4473 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 4474 4475 for (i = 0; i < adapter->num_q_vectors; i++) { 4476 if (adapter->q_vector[i]) { 4477 napi_synchronize(&adapter->q_vector[i]->napi); 4478 napi_disable(&adapter->q_vector[i]->napi); 4479 } 4480 } 4481 4482 del_timer_sync(&adapter->watchdog_timer); 4483 del_timer_sync(&adapter->phy_info_timer); 4484 4485 /* record the stats before reset*/ 4486 spin_lock(&adapter->stats64_lock); 4487 igc_update_stats(adapter); 4488 spin_unlock(&adapter->stats64_lock); 4489 4490 adapter->link_speed = 0; 4491 adapter->link_duplex = 0; 4492 4493 if (!pci_channel_offline(adapter->pdev)) 4494 igc_reset(adapter); 4495 4496 /* clear VLAN promisc flag so VFTA will be updated if necessary */ 4497 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; 4498 4499 igc_clean_all_tx_rings(adapter); 4500 igc_clean_all_rx_rings(adapter); 4501 } 4502 4503 void igc_reinit_locked(struct igc_adapter *adapter) 4504 { 4505 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 4506 usleep_range(1000, 2000); 4507 igc_down(adapter); 4508 igc_up(adapter); 4509 clear_bit(__IGC_RESETTING, &adapter->state); 4510 } 4511 4512 static void igc_reset_task(struct work_struct *work) 4513 { 4514 struct igc_adapter *adapter; 4515 4516 adapter = container_of(work, struct igc_adapter, reset_task); 4517 4518 rtnl_lock(); 4519 /* If we're already down or resetting, just bail */ 4520 if (test_bit(__IGC_DOWN, &adapter->state) || 4521 test_bit(__IGC_RESETTING, &adapter->state)) { 4522 rtnl_unlock(); 4523 return; 4524 } 4525 4526 igc_rings_dump(adapter); 4527 igc_regs_dump(adapter); 4528 netdev_err(adapter->netdev, "Reset adapter\n"); 4529 igc_reinit_locked(adapter); 4530 rtnl_unlock(); 4531 } 4532 4533 /** 4534 * igc_change_mtu - Change the Maximum Transfer Unit 4535 * @netdev: network interface device structure 4536 * @new_mtu: new value for maximum frame size 4537 * 4538 * Returns 0 on success, negative on failure 4539 */ 4540 static int igc_change_mtu(struct net_device *netdev, int new_mtu) 4541 { 4542 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 4543 struct igc_adapter *adapter = netdev_priv(netdev); 4544 4545 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { 4546 netdev_dbg(netdev, "Jumbo frames not supported with XDP"); 4547 return -EINVAL; 4548 } 4549 4550 /* adjust max frame to be at least the size of a standard frame */ 4551 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 4552 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 4553 4554 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 4555 usleep_range(1000, 2000); 4556 4557 /* igc_down has a dependency on max_frame_size */ 4558 adapter->max_frame_size = max_frame; 4559 4560 if (netif_running(netdev)) 4561 igc_down(adapter); 4562 4563 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 4564 netdev->mtu = new_mtu; 4565 4566 if (netif_running(netdev)) 4567 igc_up(adapter); 4568 else 4569 igc_reset(adapter); 4570 4571 clear_bit(__IGC_RESETTING, &adapter->state); 4572 4573 return 0; 4574 } 4575 4576 /** 4577 * igc_get_stats64 - Get System Network Statistics 4578 * @netdev: network interface device structure 4579 * @stats: rtnl_link_stats64 pointer 4580 * 4581 * Returns the address of the device statistics structure. 4582 * The statistics are updated here and also from the timer callback. 4583 */ 4584 static void igc_get_stats64(struct net_device *netdev, 4585 struct rtnl_link_stats64 *stats) 4586 { 4587 struct igc_adapter *adapter = netdev_priv(netdev); 4588 4589 spin_lock(&adapter->stats64_lock); 4590 if (!test_bit(__IGC_RESETTING, &adapter->state)) 4591 igc_update_stats(adapter); 4592 memcpy(stats, &adapter->stats64, sizeof(*stats)); 4593 spin_unlock(&adapter->stats64_lock); 4594 } 4595 4596 static netdev_features_t igc_fix_features(struct net_device *netdev, 4597 netdev_features_t features) 4598 { 4599 /* Since there is no support for separate Rx/Tx vlan accel 4600 * enable/disable make sure Tx flag is always in same state as Rx. 4601 */ 4602 if (features & NETIF_F_HW_VLAN_CTAG_RX) 4603 features |= NETIF_F_HW_VLAN_CTAG_TX; 4604 else 4605 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 4606 4607 return features; 4608 } 4609 4610 static int igc_set_features(struct net_device *netdev, 4611 netdev_features_t features) 4612 { 4613 netdev_features_t changed = netdev->features ^ features; 4614 struct igc_adapter *adapter = netdev_priv(netdev); 4615 4616 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 4617 igc_vlan_mode(netdev, features); 4618 4619 /* Add VLAN support */ 4620 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) 4621 return 0; 4622 4623 if (!(features & NETIF_F_NTUPLE)) 4624 igc_flush_nfc_rules(adapter); 4625 4626 netdev->features = features; 4627 4628 if (netif_running(netdev)) 4629 igc_reinit_locked(adapter); 4630 else 4631 igc_reset(adapter); 4632 4633 return 1; 4634 } 4635 4636 static netdev_features_t 4637 igc_features_check(struct sk_buff *skb, struct net_device *dev, 4638 netdev_features_t features) 4639 { 4640 unsigned int network_hdr_len, mac_hdr_len; 4641 4642 /* Make certain the headers can be described by a context descriptor */ 4643 mac_hdr_len = skb_network_header(skb) - skb->data; 4644 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) 4645 return features & ~(NETIF_F_HW_CSUM | 4646 NETIF_F_SCTP_CRC | 4647 NETIF_F_HW_VLAN_CTAG_TX | 4648 NETIF_F_TSO | 4649 NETIF_F_TSO6); 4650 4651 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 4652 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) 4653 return features & ~(NETIF_F_HW_CSUM | 4654 NETIF_F_SCTP_CRC | 4655 NETIF_F_TSO | 4656 NETIF_F_TSO6); 4657 4658 /* We can only support IPv4 TSO in tunnels if we can mangle the 4659 * inner IP ID field, so strip TSO if MANGLEID is not supported. 4660 */ 4661 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 4662 features &= ~NETIF_F_TSO; 4663 4664 return features; 4665 } 4666 4667 static void igc_tsync_interrupt(struct igc_adapter *adapter) 4668 { 4669 u32 ack, tsauxc, sec, nsec, tsicr; 4670 struct igc_hw *hw = &adapter->hw; 4671 struct ptp_clock_event event; 4672 struct timespec64 ts; 4673 4674 tsicr = rd32(IGC_TSICR); 4675 ack = 0; 4676 4677 if (tsicr & IGC_TSICR_SYS_WRAP) { 4678 event.type = PTP_CLOCK_PPS; 4679 if (adapter->ptp_caps.pps) 4680 ptp_clock_event(adapter->ptp_clock, &event); 4681 ack |= IGC_TSICR_SYS_WRAP; 4682 } 4683 4684 if (tsicr & IGC_TSICR_TXTS) { 4685 /* retrieve hardware timestamp */ 4686 schedule_work(&adapter->ptp_tx_work); 4687 ack |= IGC_TSICR_TXTS; 4688 } 4689 4690 if (tsicr & IGC_TSICR_TT0) { 4691 spin_lock(&adapter->tmreg_lock); 4692 ts = timespec64_add(adapter->perout[0].start, 4693 adapter->perout[0].period); 4694 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 4695 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); 4696 tsauxc = rd32(IGC_TSAUXC); 4697 tsauxc |= IGC_TSAUXC_EN_TT0; 4698 wr32(IGC_TSAUXC, tsauxc); 4699 adapter->perout[0].start = ts; 4700 spin_unlock(&adapter->tmreg_lock); 4701 ack |= IGC_TSICR_TT0; 4702 } 4703 4704 if (tsicr & IGC_TSICR_TT1) { 4705 spin_lock(&adapter->tmreg_lock); 4706 ts = timespec64_add(adapter->perout[1].start, 4707 adapter->perout[1].period); 4708 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 4709 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); 4710 tsauxc = rd32(IGC_TSAUXC); 4711 tsauxc |= IGC_TSAUXC_EN_TT1; 4712 wr32(IGC_TSAUXC, tsauxc); 4713 adapter->perout[1].start = ts; 4714 spin_unlock(&adapter->tmreg_lock); 4715 ack |= IGC_TSICR_TT1; 4716 } 4717 4718 if (tsicr & IGC_TSICR_AUTT0) { 4719 nsec = rd32(IGC_AUXSTMPL0); 4720 sec = rd32(IGC_AUXSTMPH0); 4721 event.type = PTP_CLOCK_EXTTS; 4722 event.index = 0; 4723 event.timestamp = sec * NSEC_PER_SEC + nsec; 4724 ptp_clock_event(adapter->ptp_clock, &event); 4725 ack |= IGC_TSICR_AUTT0; 4726 } 4727 4728 if (tsicr & IGC_TSICR_AUTT1) { 4729 nsec = rd32(IGC_AUXSTMPL1); 4730 sec = rd32(IGC_AUXSTMPH1); 4731 event.type = PTP_CLOCK_EXTTS; 4732 event.index = 1; 4733 event.timestamp = sec * NSEC_PER_SEC + nsec; 4734 ptp_clock_event(adapter->ptp_clock, &event); 4735 ack |= IGC_TSICR_AUTT1; 4736 } 4737 4738 /* acknowledge the interrupts */ 4739 wr32(IGC_TSICR, ack); 4740 } 4741 4742 /** 4743 * igc_msix_other - msix other interrupt handler 4744 * @irq: interrupt number 4745 * @data: pointer to a q_vector 4746 */ 4747 static irqreturn_t igc_msix_other(int irq, void *data) 4748 { 4749 struct igc_adapter *adapter = data; 4750 struct igc_hw *hw = &adapter->hw; 4751 u32 icr = rd32(IGC_ICR); 4752 4753 /* reading ICR causes bit 31 of EICR to be cleared */ 4754 if (icr & IGC_ICR_DRSTA) 4755 schedule_work(&adapter->reset_task); 4756 4757 if (icr & IGC_ICR_DOUTSYNC) { 4758 /* HW is reporting DMA is out of sync */ 4759 adapter->stats.doosync++; 4760 } 4761 4762 if (icr & IGC_ICR_LSC) { 4763 hw->mac.get_link_status = true; 4764 /* guard against interrupt when we're going down */ 4765 if (!test_bit(__IGC_DOWN, &adapter->state)) 4766 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4767 } 4768 4769 if (icr & IGC_ICR_TS) 4770 igc_tsync_interrupt(adapter); 4771 4772 wr32(IGC_EIMS, adapter->eims_other); 4773 4774 return IRQ_HANDLED; 4775 } 4776 4777 static void igc_write_itr(struct igc_q_vector *q_vector) 4778 { 4779 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; 4780 4781 if (!q_vector->set_itr) 4782 return; 4783 4784 if (!itr_val) 4785 itr_val = IGC_ITR_VAL_MASK; 4786 4787 itr_val |= IGC_EITR_CNT_IGNR; 4788 4789 writel(itr_val, q_vector->itr_register); 4790 q_vector->set_itr = 0; 4791 } 4792 4793 static irqreturn_t igc_msix_ring(int irq, void *data) 4794 { 4795 struct igc_q_vector *q_vector = data; 4796 4797 /* Write the ITR value calculated from the previous interrupt. */ 4798 igc_write_itr(q_vector); 4799 4800 napi_schedule(&q_vector->napi); 4801 4802 return IRQ_HANDLED; 4803 } 4804 4805 /** 4806 * igc_request_msix - Initialize MSI-X interrupts 4807 * @adapter: Pointer to adapter structure 4808 * 4809 * igc_request_msix allocates MSI-X vectors and requests interrupts from the 4810 * kernel. 4811 */ 4812 static int igc_request_msix(struct igc_adapter *adapter) 4813 { 4814 int i = 0, err = 0, vector = 0, free_vector = 0; 4815 struct net_device *netdev = adapter->netdev; 4816 4817 err = request_irq(adapter->msix_entries[vector].vector, 4818 &igc_msix_other, 0, netdev->name, adapter); 4819 if (err) 4820 goto err_out; 4821 4822 for (i = 0; i < adapter->num_q_vectors; i++) { 4823 struct igc_q_vector *q_vector = adapter->q_vector[i]; 4824 4825 vector++; 4826 4827 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); 4828 4829 if (q_vector->rx.ring && q_vector->tx.ring) 4830 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, 4831 q_vector->rx.ring->queue_index); 4832 else if (q_vector->tx.ring) 4833 sprintf(q_vector->name, "%s-tx-%u", netdev->name, 4834 q_vector->tx.ring->queue_index); 4835 else if (q_vector->rx.ring) 4836 sprintf(q_vector->name, "%s-rx-%u", netdev->name, 4837 q_vector->rx.ring->queue_index); 4838 else 4839 sprintf(q_vector->name, "%s-unused", netdev->name); 4840 4841 err = request_irq(adapter->msix_entries[vector].vector, 4842 igc_msix_ring, 0, q_vector->name, 4843 q_vector); 4844 if (err) 4845 goto err_free; 4846 } 4847 4848 igc_configure_msix(adapter); 4849 return 0; 4850 4851 err_free: 4852 /* free already assigned IRQs */ 4853 free_irq(adapter->msix_entries[free_vector++].vector, adapter); 4854 4855 vector--; 4856 for (i = 0; i < vector; i++) { 4857 free_irq(adapter->msix_entries[free_vector++].vector, 4858 adapter->q_vector[i]); 4859 } 4860 err_out: 4861 return err; 4862 } 4863 4864 /** 4865 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts 4866 * @adapter: Pointer to adapter structure 4867 * 4868 * This function resets the device so that it has 0 rx queues, tx queues, and 4869 * MSI-X interrupts allocated. 4870 */ 4871 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) 4872 { 4873 igc_free_q_vectors(adapter); 4874 igc_reset_interrupt_capability(adapter); 4875 } 4876 4877 /* Need to wait a few seconds after link up to get diagnostic information from 4878 * the phy 4879 */ 4880 static void igc_update_phy_info(struct timer_list *t) 4881 { 4882 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); 4883 4884 igc_get_phy_info(&adapter->hw); 4885 } 4886 4887 /** 4888 * igc_has_link - check shared code for link and determine up/down 4889 * @adapter: pointer to driver private info 4890 */ 4891 bool igc_has_link(struct igc_adapter *adapter) 4892 { 4893 struct igc_hw *hw = &adapter->hw; 4894 bool link_active = false; 4895 4896 /* get_link_status is set on LSC (link status) interrupt or 4897 * rx sequence error interrupt. get_link_status will stay 4898 * false until the igc_check_for_link establishes link 4899 * for copper adapters ONLY 4900 */ 4901 switch (hw->phy.media_type) { 4902 case igc_media_type_copper: 4903 if (!hw->mac.get_link_status) 4904 return true; 4905 hw->mac.ops.check_for_link(hw); 4906 link_active = !hw->mac.get_link_status; 4907 break; 4908 default: 4909 case igc_media_type_unknown: 4910 break; 4911 } 4912 4913 if (hw->mac.type == igc_i225 && 4914 hw->phy.id == I225_I_PHY_ID) { 4915 if (!netif_carrier_ok(adapter->netdev)) { 4916 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 4917 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { 4918 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; 4919 adapter->link_check_timeout = jiffies; 4920 } 4921 } 4922 4923 return link_active; 4924 } 4925 4926 /** 4927 * igc_watchdog - Timer Call-back 4928 * @t: timer for the watchdog 4929 */ 4930 static void igc_watchdog(struct timer_list *t) 4931 { 4932 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); 4933 /* Do the rest outside of interrupt context */ 4934 schedule_work(&adapter->watchdog_task); 4935 } 4936 4937 static void igc_watchdog_task(struct work_struct *work) 4938 { 4939 struct igc_adapter *adapter = container_of(work, 4940 struct igc_adapter, 4941 watchdog_task); 4942 struct net_device *netdev = adapter->netdev; 4943 struct igc_hw *hw = &adapter->hw; 4944 struct igc_phy_info *phy = &hw->phy; 4945 u16 phy_data, retry_count = 20; 4946 u32 link; 4947 int i; 4948 4949 link = igc_has_link(adapter); 4950 4951 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { 4952 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) 4953 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 4954 else 4955 link = false; 4956 } 4957 4958 if (link) { 4959 /* Cancel scheduled suspend requests. */ 4960 pm_runtime_resume(netdev->dev.parent); 4961 4962 if (!netif_carrier_ok(netdev)) { 4963 u32 ctrl; 4964 4965 hw->mac.ops.get_speed_and_duplex(hw, 4966 &adapter->link_speed, 4967 &adapter->link_duplex); 4968 4969 ctrl = rd32(IGC_CTRL); 4970 /* Link status message must follow this format */ 4971 netdev_info(netdev, 4972 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 4973 adapter->link_speed, 4974 adapter->link_duplex == FULL_DUPLEX ? 4975 "Full" : "Half", 4976 (ctrl & IGC_CTRL_TFCE) && 4977 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : 4978 (ctrl & IGC_CTRL_RFCE) ? "RX" : 4979 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); 4980 4981 /* disable EEE if enabled */ 4982 if ((adapter->flags & IGC_FLAG_EEE) && 4983 adapter->link_duplex == HALF_DUPLEX) { 4984 netdev_info(netdev, 4985 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); 4986 adapter->hw.dev_spec._base.eee_enable = false; 4987 adapter->flags &= ~IGC_FLAG_EEE; 4988 } 4989 4990 /* check if SmartSpeed worked */ 4991 igc_check_downshift(hw); 4992 if (phy->speed_downgraded) 4993 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); 4994 4995 /* adjust timeout factor according to speed/duplex */ 4996 adapter->tx_timeout_factor = 1; 4997 switch (adapter->link_speed) { 4998 case SPEED_10: 4999 adapter->tx_timeout_factor = 14; 5000 break; 5001 case SPEED_100: 5002 /* maybe add some timeout factor ? */ 5003 break; 5004 } 5005 5006 if (adapter->link_speed != SPEED_1000) 5007 goto no_wait; 5008 5009 /* wait for Remote receiver status OK */ 5010 retry_read_status: 5011 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, 5012 &phy_data)) { 5013 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && 5014 retry_count) { 5015 msleep(100); 5016 retry_count--; 5017 goto retry_read_status; 5018 } else if (!retry_count) { 5019 netdev_err(netdev, "exceed max 2 second\n"); 5020 } 5021 } else { 5022 netdev_err(netdev, "read 1000Base-T Status Reg\n"); 5023 } 5024 no_wait: 5025 netif_carrier_on(netdev); 5026 5027 /* link state has changed, schedule phy info update */ 5028 if (!test_bit(__IGC_DOWN, &adapter->state)) 5029 mod_timer(&adapter->phy_info_timer, 5030 round_jiffies(jiffies + 2 * HZ)); 5031 } 5032 } else { 5033 if (netif_carrier_ok(netdev)) { 5034 adapter->link_speed = 0; 5035 adapter->link_duplex = 0; 5036 5037 /* Links status message must follow this format */ 5038 netdev_info(netdev, "NIC Link is Down\n"); 5039 netif_carrier_off(netdev); 5040 5041 /* link state has changed, schedule phy info update */ 5042 if (!test_bit(__IGC_DOWN, &adapter->state)) 5043 mod_timer(&adapter->phy_info_timer, 5044 round_jiffies(jiffies + 2 * HZ)); 5045 5046 /* link is down, time to check for alternate media */ 5047 if (adapter->flags & IGC_FLAG_MAS_ENABLE) { 5048 if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 5049 schedule_work(&adapter->reset_task); 5050 /* return immediately */ 5051 return; 5052 } 5053 } 5054 pm_schedule_suspend(netdev->dev.parent, 5055 MSEC_PER_SEC * 5); 5056 5057 /* also check for alternate media here */ 5058 } else if (!netif_carrier_ok(netdev) && 5059 (adapter->flags & IGC_FLAG_MAS_ENABLE)) { 5060 if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 5061 schedule_work(&adapter->reset_task); 5062 /* return immediately */ 5063 return; 5064 } 5065 } 5066 } 5067 5068 spin_lock(&adapter->stats64_lock); 5069 igc_update_stats(adapter); 5070 spin_unlock(&adapter->stats64_lock); 5071 5072 for (i = 0; i < adapter->num_tx_queues; i++) { 5073 struct igc_ring *tx_ring = adapter->tx_ring[i]; 5074 5075 if (!netif_carrier_ok(netdev)) { 5076 /* We've lost link, so the controller stops DMA, 5077 * but we've got queued Tx work that's never going 5078 * to get done, so reset controller to flush Tx. 5079 * (Do the reset outside of interrupt context). 5080 */ 5081 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { 5082 adapter->tx_timeout_count++; 5083 schedule_work(&adapter->reset_task); 5084 /* return immediately since reset is imminent */ 5085 return; 5086 } 5087 } 5088 5089 /* Force detection of hung controller every watchdog period */ 5090 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 5091 } 5092 5093 /* Cause software interrupt to ensure Rx ring is cleaned */ 5094 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5095 u32 eics = 0; 5096 5097 for (i = 0; i < adapter->num_q_vectors; i++) 5098 eics |= adapter->q_vector[i]->eims_value; 5099 wr32(IGC_EICS, eics); 5100 } else { 5101 wr32(IGC_ICS, IGC_ICS_RXDMT0); 5102 } 5103 5104 igc_ptp_tx_hang(adapter); 5105 5106 /* Reset the timer */ 5107 if (!test_bit(__IGC_DOWN, &adapter->state)) { 5108 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) 5109 mod_timer(&adapter->watchdog_timer, 5110 round_jiffies(jiffies + HZ)); 5111 else 5112 mod_timer(&adapter->watchdog_timer, 5113 round_jiffies(jiffies + 2 * HZ)); 5114 } 5115 } 5116 5117 /** 5118 * igc_intr_msi - Interrupt Handler 5119 * @irq: interrupt number 5120 * @data: pointer to a network interface device structure 5121 */ 5122 static irqreturn_t igc_intr_msi(int irq, void *data) 5123 { 5124 struct igc_adapter *adapter = data; 5125 struct igc_q_vector *q_vector = adapter->q_vector[0]; 5126 struct igc_hw *hw = &adapter->hw; 5127 /* read ICR disables interrupts using IAM */ 5128 u32 icr = rd32(IGC_ICR); 5129 5130 igc_write_itr(q_vector); 5131 5132 if (icr & IGC_ICR_DRSTA) 5133 schedule_work(&adapter->reset_task); 5134 5135 if (icr & IGC_ICR_DOUTSYNC) { 5136 /* HW is reporting DMA is out of sync */ 5137 adapter->stats.doosync++; 5138 } 5139 5140 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5141 hw->mac.get_link_status = true; 5142 if (!test_bit(__IGC_DOWN, &adapter->state)) 5143 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5144 } 5145 5146 napi_schedule(&q_vector->napi); 5147 5148 return IRQ_HANDLED; 5149 } 5150 5151 /** 5152 * igc_intr - Legacy Interrupt Handler 5153 * @irq: interrupt number 5154 * @data: pointer to a network interface device structure 5155 */ 5156 static irqreturn_t igc_intr(int irq, void *data) 5157 { 5158 struct igc_adapter *adapter = data; 5159 struct igc_q_vector *q_vector = adapter->q_vector[0]; 5160 struct igc_hw *hw = &adapter->hw; 5161 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 5162 * need for the IMC write 5163 */ 5164 u32 icr = rd32(IGC_ICR); 5165 5166 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 5167 * not set, then the adapter didn't send an interrupt 5168 */ 5169 if (!(icr & IGC_ICR_INT_ASSERTED)) 5170 return IRQ_NONE; 5171 5172 igc_write_itr(q_vector); 5173 5174 if (icr & IGC_ICR_DRSTA) 5175 schedule_work(&adapter->reset_task); 5176 5177 if (icr & IGC_ICR_DOUTSYNC) { 5178 /* HW is reporting DMA is out of sync */ 5179 adapter->stats.doosync++; 5180 } 5181 5182 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5183 hw->mac.get_link_status = true; 5184 /* guard against interrupt when we're going down */ 5185 if (!test_bit(__IGC_DOWN, &adapter->state)) 5186 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5187 } 5188 5189 napi_schedule(&q_vector->napi); 5190 5191 return IRQ_HANDLED; 5192 } 5193 5194 static void igc_free_irq(struct igc_adapter *adapter) 5195 { 5196 if (adapter->msix_entries) { 5197 int vector = 0, i; 5198 5199 free_irq(adapter->msix_entries[vector++].vector, adapter); 5200 5201 for (i = 0; i < adapter->num_q_vectors; i++) 5202 free_irq(adapter->msix_entries[vector++].vector, 5203 adapter->q_vector[i]); 5204 } else { 5205 free_irq(adapter->pdev->irq, adapter); 5206 } 5207 } 5208 5209 /** 5210 * igc_request_irq - initialize interrupts 5211 * @adapter: Pointer to adapter structure 5212 * 5213 * Attempts to configure interrupts using the best available 5214 * capabilities of the hardware and kernel. 5215 */ 5216 static int igc_request_irq(struct igc_adapter *adapter) 5217 { 5218 struct net_device *netdev = adapter->netdev; 5219 struct pci_dev *pdev = adapter->pdev; 5220 int err = 0; 5221 5222 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5223 err = igc_request_msix(adapter); 5224 if (!err) 5225 goto request_done; 5226 /* fall back to MSI */ 5227 igc_free_all_tx_resources(adapter); 5228 igc_free_all_rx_resources(adapter); 5229 5230 igc_clear_interrupt_scheme(adapter); 5231 err = igc_init_interrupt_scheme(adapter, false); 5232 if (err) 5233 goto request_done; 5234 igc_setup_all_tx_resources(adapter); 5235 igc_setup_all_rx_resources(adapter); 5236 igc_configure(adapter); 5237 } 5238 5239 igc_assign_vector(adapter->q_vector[0], 0); 5240 5241 if (adapter->flags & IGC_FLAG_HAS_MSI) { 5242 err = request_irq(pdev->irq, &igc_intr_msi, 0, 5243 netdev->name, adapter); 5244 if (!err) 5245 goto request_done; 5246 5247 /* fall back to legacy interrupts */ 5248 igc_reset_interrupt_capability(adapter); 5249 adapter->flags &= ~IGC_FLAG_HAS_MSI; 5250 } 5251 5252 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, 5253 netdev->name, adapter); 5254 5255 if (err) 5256 netdev_err(netdev, "Error %d getting interrupt\n", err); 5257 5258 request_done: 5259 return err; 5260 } 5261 5262 /** 5263 * __igc_open - Called when a network interface is made active 5264 * @netdev: network interface device structure 5265 * @resuming: boolean indicating if the device is resuming 5266 * 5267 * Returns 0 on success, negative value on failure 5268 * 5269 * The open entry point is called when a network interface is made 5270 * active by the system (IFF_UP). At this point all resources needed 5271 * for transmit and receive operations are allocated, the interrupt 5272 * handler is registered with the OS, the watchdog timer is started, 5273 * and the stack is notified that the interface is ready. 5274 */ 5275 static int __igc_open(struct net_device *netdev, bool resuming) 5276 { 5277 struct igc_adapter *adapter = netdev_priv(netdev); 5278 struct pci_dev *pdev = adapter->pdev; 5279 struct igc_hw *hw = &adapter->hw; 5280 int err = 0; 5281 int i = 0; 5282 5283 /* disallow open during test */ 5284 5285 if (test_bit(__IGC_TESTING, &adapter->state)) { 5286 WARN_ON(resuming); 5287 return -EBUSY; 5288 } 5289 5290 if (!resuming) 5291 pm_runtime_get_sync(&pdev->dev); 5292 5293 netif_carrier_off(netdev); 5294 5295 /* allocate transmit descriptors */ 5296 err = igc_setup_all_tx_resources(adapter); 5297 if (err) 5298 goto err_setup_tx; 5299 5300 /* allocate receive descriptors */ 5301 err = igc_setup_all_rx_resources(adapter); 5302 if (err) 5303 goto err_setup_rx; 5304 5305 igc_power_up_link(adapter); 5306 5307 igc_configure(adapter); 5308 5309 err = igc_request_irq(adapter); 5310 if (err) 5311 goto err_req_irq; 5312 5313 /* Notify the stack of the actual queue counts. */ 5314 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 5315 if (err) 5316 goto err_set_queues; 5317 5318 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 5319 if (err) 5320 goto err_set_queues; 5321 5322 clear_bit(__IGC_DOWN, &adapter->state); 5323 5324 for (i = 0; i < adapter->num_q_vectors; i++) 5325 napi_enable(&adapter->q_vector[i]->napi); 5326 5327 /* Clear any pending interrupts. */ 5328 rd32(IGC_ICR); 5329 igc_irq_enable(adapter); 5330 5331 if (!resuming) 5332 pm_runtime_put(&pdev->dev); 5333 5334 netif_tx_start_all_queues(netdev); 5335 5336 /* start the watchdog. */ 5337 hw->mac.get_link_status = true; 5338 schedule_work(&adapter->watchdog_task); 5339 5340 return IGC_SUCCESS; 5341 5342 err_set_queues: 5343 igc_free_irq(adapter); 5344 err_req_irq: 5345 igc_release_hw_control(adapter); 5346 igc_power_down_phy_copper_base(&adapter->hw); 5347 igc_free_all_rx_resources(adapter); 5348 err_setup_rx: 5349 igc_free_all_tx_resources(adapter); 5350 err_setup_tx: 5351 igc_reset(adapter); 5352 if (!resuming) 5353 pm_runtime_put(&pdev->dev); 5354 5355 return err; 5356 } 5357 5358 int igc_open(struct net_device *netdev) 5359 { 5360 return __igc_open(netdev, false); 5361 } 5362 5363 /** 5364 * __igc_close - Disables a network interface 5365 * @netdev: network interface device structure 5366 * @suspending: boolean indicating the device is suspending 5367 * 5368 * Returns 0, this is not allowed to fail 5369 * 5370 * The close entry point is called when an interface is de-activated 5371 * by the OS. The hardware is still under the driver's control, but 5372 * needs to be disabled. A global MAC reset is issued to stop the 5373 * hardware, and all transmit and receive resources are freed. 5374 */ 5375 static int __igc_close(struct net_device *netdev, bool suspending) 5376 { 5377 struct igc_adapter *adapter = netdev_priv(netdev); 5378 struct pci_dev *pdev = adapter->pdev; 5379 5380 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); 5381 5382 if (!suspending) 5383 pm_runtime_get_sync(&pdev->dev); 5384 5385 igc_down(adapter); 5386 5387 igc_release_hw_control(adapter); 5388 5389 igc_free_irq(adapter); 5390 5391 igc_free_all_tx_resources(adapter); 5392 igc_free_all_rx_resources(adapter); 5393 5394 if (!suspending) 5395 pm_runtime_put_sync(&pdev->dev); 5396 5397 return 0; 5398 } 5399 5400 int igc_close(struct net_device *netdev) 5401 { 5402 if (netif_device_present(netdev) || netdev->dismantle) 5403 return __igc_close(netdev, false); 5404 return 0; 5405 } 5406 5407 /** 5408 * igc_ioctl - Access the hwtstamp interface 5409 * @netdev: network interface device structure 5410 * @ifr: interface request data 5411 * @cmd: ioctl command 5412 **/ 5413 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 5414 { 5415 switch (cmd) { 5416 case SIOCGHWTSTAMP: 5417 return igc_ptp_get_ts_config(netdev, ifr); 5418 case SIOCSHWTSTAMP: 5419 return igc_ptp_set_ts_config(netdev, ifr); 5420 default: 5421 return -EOPNOTSUPP; 5422 } 5423 } 5424 5425 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, 5426 bool enable) 5427 { 5428 struct igc_ring *ring; 5429 int i; 5430 5431 if (queue < 0 || queue >= adapter->num_tx_queues) 5432 return -EINVAL; 5433 5434 ring = adapter->tx_ring[queue]; 5435 ring->launchtime_enable = enable; 5436 5437 if (adapter->base_time) 5438 return 0; 5439 5440 adapter->cycle_time = NSEC_PER_SEC; 5441 5442 for (i = 0; i < adapter->num_tx_queues; i++) { 5443 ring = adapter->tx_ring[i]; 5444 ring->start_time = 0; 5445 ring->end_time = NSEC_PER_SEC; 5446 } 5447 5448 return 0; 5449 } 5450 5451 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) 5452 { 5453 struct timespec64 b; 5454 5455 b = ktime_to_timespec64(base_time); 5456 5457 return timespec64_compare(now, &b) > 0; 5458 } 5459 5460 static bool validate_schedule(struct igc_adapter *adapter, 5461 const struct tc_taprio_qopt_offload *qopt) 5462 { 5463 int queue_uses[IGC_MAX_TX_QUEUES] = { }; 5464 struct timespec64 now; 5465 size_t n; 5466 5467 if (qopt->cycle_time_extension) 5468 return false; 5469 5470 igc_ptp_read(adapter, &now); 5471 5472 /* If we program the controller's BASET registers with a time 5473 * in the future, it will hold all the packets until that 5474 * time, causing a lot of TX Hangs, so to avoid that, we 5475 * reject schedules that would start in the future. 5476 */ 5477 if (!is_base_time_past(qopt->base_time, &now)) 5478 return false; 5479 5480 for (n = 0; n < qopt->num_entries; n++) { 5481 const struct tc_taprio_sched_entry *e; 5482 int i; 5483 5484 e = &qopt->entries[n]; 5485 5486 /* i225 only supports "global" frame preemption 5487 * settings. 5488 */ 5489 if (e->command != TC_TAPRIO_CMD_SET_GATES) 5490 return false; 5491 5492 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { 5493 if (e->gate_mask & BIT(i)) 5494 queue_uses[i]++; 5495 5496 if (queue_uses[i] > 1) 5497 return false; 5498 } 5499 } 5500 5501 return true; 5502 } 5503 5504 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, 5505 struct tc_etf_qopt_offload *qopt) 5506 { 5507 struct igc_hw *hw = &adapter->hw; 5508 int err; 5509 5510 if (hw->mac.type != igc_i225) 5511 return -EOPNOTSUPP; 5512 5513 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); 5514 if (err) 5515 return err; 5516 5517 return igc_tsn_offload_apply(adapter); 5518 } 5519 5520 static int igc_save_qbv_schedule(struct igc_adapter *adapter, 5521 struct tc_taprio_qopt_offload *qopt) 5522 { 5523 u32 start_time = 0, end_time = 0; 5524 size_t n; 5525 5526 if (!qopt->enable) { 5527 adapter->base_time = 0; 5528 return 0; 5529 } 5530 5531 if (adapter->base_time) 5532 return -EALREADY; 5533 5534 if (!validate_schedule(adapter, qopt)) 5535 return -EINVAL; 5536 5537 adapter->cycle_time = qopt->cycle_time; 5538 adapter->base_time = qopt->base_time; 5539 5540 /* FIXME: be a little smarter about cases when the gate for a 5541 * queue stays open for more than one entry. 5542 */ 5543 for (n = 0; n < qopt->num_entries; n++) { 5544 struct tc_taprio_sched_entry *e = &qopt->entries[n]; 5545 int i; 5546 5547 end_time += e->interval; 5548 5549 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { 5550 struct igc_ring *ring = adapter->tx_ring[i]; 5551 5552 if (!(e->gate_mask & BIT(i))) 5553 continue; 5554 5555 ring->start_time = start_time; 5556 ring->end_time = end_time; 5557 } 5558 5559 start_time += e->interval; 5560 } 5561 5562 return 0; 5563 } 5564 5565 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, 5566 struct tc_taprio_qopt_offload *qopt) 5567 { 5568 struct igc_hw *hw = &adapter->hw; 5569 int err; 5570 5571 if (hw->mac.type != igc_i225) 5572 return -EOPNOTSUPP; 5573 5574 err = igc_save_qbv_schedule(adapter, qopt); 5575 if (err) 5576 return err; 5577 5578 return igc_tsn_offload_apply(adapter); 5579 } 5580 5581 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, 5582 void *type_data) 5583 { 5584 struct igc_adapter *adapter = netdev_priv(dev); 5585 5586 switch (type) { 5587 case TC_SETUP_QDISC_TAPRIO: 5588 return igc_tsn_enable_qbv_scheduling(adapter, type_data); 5589 5590 case TC_SETUP_QDISC_ETF: 5591 return igc_tsn_enable_launchtime(adapter, type_data); 5592 5593 default: 5594 return -EOPNOTSUPP; 5595 } 5596 } 5597 5598 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) 5599 { 5600 struct igc_adapter *adapter = netdev_priv(dev); 5601 5602 switch (bpf->command) { 5603 case XDP_SETUP_PROG: 5604 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); 5605 case XDP_SETUP_XSK_POOL: 5606 return igc_xdp_setup_pool(adapter, bpf->xsk.pool, 5607 bpf->xsk.queue_id); 5608 default: 5609 return -EOPNOTSUPP; 5610 } 5611 } 5612 5613 static int igc_xdp_xmit(struct net_device *dev, int num_frames, 5614 struct xdp_frame **frames, u32 flags) 5615 { 5616 struct igc_adapter *adapter = netdev_priv(dev); 5617 int cpu = smp_processor_id(); 5618 struct netdev_queue *nq; 5619 struct igc_ring *ring; 5620 int i, drops; 5621 5622 if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) 5623 return -ENETDOWN; 5624 5625 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 5626 return -EINVAL; 5627 5628 ring = igc_xdp_get_tx_ring(adapter, cpu); 5629 nq = txring_txq(ring); 5630 5631 __netif_tx_lock(nq, cpu); 5632 5633 drops = 0; 5634 for (i = 0; i < num_frames; i++) { 5635 int err; 5636 struct xdp_frame *xdpf = frames[i]; 5637 5638 err = igc_xdp_init_tx_descriptor(ring, xdpf); 5639 if (err) { 5640 xdp_return_frame_rx_napi(xdpf); 5641 drops++; 5642 } 5643 } 5644 5645 if (flags & XDP_XMIT_FLUSH) 5646 igc_flush_tx_descriptors(ring); 5647 5648 __netif_tx_unlock(nq); 5649 5650 return num_frames - drops; 5651 } 5652 5653 static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, 5654 struct igc_q_vector *q_vector) 5655 { 5656 struct igc_hw *hw = &adapter->hw; 5657 u32 eics = 0; 5658 5659 eics |= q_vector->eims_value; 5660 wr32(IGC_EICS, eics); 5661 } 5662 5663 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 5664 { 5665 struct igc_adapter *adapter = netdev_priv(dev); 5666 struct igc_q_vector *q_vector; 5667 struct igc_ring *ring; 5668 5669 if (test_bit(__IGC_DOWN, &adapter->state)) 5670 return -ENETDOWN; 5671 5672 if (!igc_xdp_is_enabled(adapter)) 5673 return -ENXIO; 5674 5675 if (queue_id >= adapter->num_rx_queues) 5676 return -EINVAL; 5677 5678 ring = adapter->rx_ring[queue_id]; 5679 5680 if (!ring->xsk_pool) 5681 return -ENXIO; 5682 5683 q_vector = adapter->q_vector[queue_id]; 5684 if (!napi_if_scheduled_mark_missed(&q_vector->napi)) 5685 igc_trigger_rxtxq_interrupt(adapter, q_vector); 5686 5687 return 0; 5688 } 5689 5690 static const struct net_device_ops igc_netdev_ops = { 5691 .ndo_open = igc_open, 5692 .ndo_stop = igc_close, 5693 .ndo_start_xmit = igc_xmit_frame, 5694 .ndo_set_rx_mode = igc_set_rx_mode, 5695 .ndo_set_mac_address = igc_set_mac, 5696 .ndo_change_mtu = igc_change_mtu, 5697 .ndo_get_stats64 = igc_get_stats64, 5698 .ndo_fix_features = igc_fix_features, 5699 .ndo_set_features = igc_set_features, 5700 .ndo_features_check = igc_features_check, 5701 .ndo_do_ioctl = igc_ioctl, 5702 .ndo_setup_tc = igc_setup_tc, 5703 .ndo_bpf = igc_bpf, 5704 .ndo_xdp_xmit = igc_xdp_xmit, 5705 .ndo_xsk_wakeup = igc_xsk_wakeup, 5706 }; 5707 5708 /* PCIe configuration access */ 5709 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 5710 { 5711 struct igc_adapter *adapter = hw->back; 5712 5713 pci_read_config_word(adapter->pdev, reg, value); 5714 } 5715 5716 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 5717 { 5718 struct igc_adapter *adapter = hw->back; 5719 5720 pci_write_config_word(adapter->pdev, reg, *value); 5721 } 5722 5723 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 5724 { 5725 struct igc_adapter *adapter = hw->back; 5726 5727 if (!pci_is_pcie(adapter->pdev)) 5728 return -IGC_ERR_CONFIG; 5729 5730 pcie_capability_read_word(adapter->pdev, reg, value); 5731 5732 return IGC_SUCCESS; 5733 } 5734 5735 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 5736 { 5737 struct igc_adapter *adapter = hw->back; 5738 5739 if (!pci_is_pcie(adapter->pdev)) 5740 return -IGC_ERR_CONFIG; 5741 5742 pcie_capability_write_word(adapter->pdev, reg, *value); 5743 5744 return IGC_SUCCESS; 5745 } 5746 5747 u32 igc_rd32(struct igc_hw *hw, u32 reg) 5748 { 5749 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); 5750 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); 5751 u32 value = 0; 5752 5753 value = readl(&hw_addr[reg]); 5754 5755 /* reads should not return all F's */ 5756 if (!(~value) && (!reg || !(~readl(hw_addr)))) { 5757 struct net_device *netdev = igc->netdev; 5758 5759 hw->hw_addr = NULL; 5760 netif_device_detach(netdev); 5761 netdev_err(netdev, "PCIe link lost, device now detached\n"); 5762 WARN(pci_device_is_present(igc->pdev), 5763 "igc: Failed to read reg 0x%x!\n", reg); 5764 } 5765 5766 return value; 5767 } 5768 5769 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) 5770 { 5771 struct igc_mac_info *mac = &adapter->hw.mac; 5772 5773 mac->autoneg = false; 5774 5775 /* Make sure dplx is at most 1 bit and lsb of speed is not set 5776 * for the switch() below to work 5777 */ 5778 if ((spd & 1) || (dplx & ~1)) 5779 goto err_inval; 5780 5781 switch (spd + dplx) { 5782 case SPEED_10 + DUPLEX_HALF: 5783 mac->forced_speed_duplex = ADVERTISE_10_HALF; 5784 break; 5785 case SPEED_10 + DUPLEX_FULL: 5786 mac->forced_speed_duplex = ADVERTISE_10_FULL; 5787 break; 5788 case SPEED_100 + DUPLEX_HALF: 5789 mac->forced_speed_duplex = ADVERTISE_100_HALF; 5790 break; 5791 case SPEED_100 + DUPLEX_FULL: 5792 mac->forced_speed_duplex = ADVERTISE_100_FULL; 5793 break; 5794 case SPEED_1000 + DUPLEX_FULL: 5795 mac->autoneg = true; 5796 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 5797 break; 5798 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5799 goto err_inval; 5800 case SPEED_2500 + DUPLEX_FULL: 5801 mac->autoneg = true; 5802 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 5803 break; 5804 case SPEED_2500 + DUPLEX_HALF: /* not supported */ 5805 default: 5806 goto err_inval; 5807 } 5808 5809 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ 5810 adapter->hw.phy.mdix = AUTO_ALL_MODES; 5811 5812 return 0; 5813 5814 err_inval: 5815 netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n"); 5816 return -EINVAL; 5817 } 5818 5819 /** 5820 * igc_probe - Device Initialization Routine 5821 * @pdev: PCI device information struct 5822 * @ent: entry in igc_pci_tbl 5823 * 5824 * Returns 0 on success, negative on failure 5825 * 5826 * igc_probe initializes an adapter identified by a pci_dev structure. 5827 * The OS initialization, configuring the adapter private structure, 5828 * and a hardware reset occur. 5829 */ 5830 static int igc_probe(struct pci_dev *pdev, 5831 const struct pci_device_id *ent) 5832 { 5833 struct igc_adapter *adapter; 5834 struct net_device *netdev; 5835 struct igc_hw *hw; 5836 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; 5837 int err, pci_using_dac; 5838 5839 err = pci_enable_device_mem(pdev); 5840 if (err) 5841 return err; 5842 5843 pci_using_dac = 0; 5844 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 5845 if (!err) { 5846 pci_using_dac = 1; 5847 } else { 5848 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 5849 if (err) { 5850 dev_err(&pdev->dev, 5851 "No usable DMA configuration, aborting\n"); 5852 goto err_dma; 5853 } 5854 } 5855 5856 err = pci_request_mem_regions(pdev, igc_driver_name); 5857 if (err) 5858 goto err_pci_reg; 5859 5860 pci_enable_pcie_error_reporting(pdev); 5861 5862 pci_set_master(pdev); 5863 5864 err = -ENOMEM; 5865 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), 5866 IGC_MAX_TX_QUEUES); 5867 5868 if (!netdev) 5869 goto err_alloc_etherdev; 5870 5871 SET_NETDEV_DEV(netdev, &pdev->dev); 5872 5873 pci_set_drvdata(pdev, netdev); 5874 adapter = netdev_priv(netdev); 5875 adapter->netdev = netdev; 5876 adapter->pdev = pdev; 5877 hw = &adapter->hw; 5878 hw->back = adapter; 5879 adapter->port_num = hw->bus.func; 5880 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 5881 5882 err = pci_save_state(pdev); 5883 if (err) 5884 goto err_ioremap; 5885 5886 err = -EIO; 5887 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), 5888 pci_resource_len(pdev, 0)); 5889 if (!adapter->io_addr) 5890 goto err_ioremap; 5891 5892 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ 5893 hw->hw_addr = adapter->io_addr; 5894 5895 netdev->netdev_ops = &igc_netdev_ops; 5896 igc_ethtool_set_ops(netdev); 5897 netdev->watchdog_timeo = 5 * HZ; 5898 5899 netdev->mem_start = pci_resource_start(pdev, 0); 5900 netdev->mem_end = pci_resource_end(pdev, 0); 5901 5902 /* PCI config space info */ 5903 hw->vendor_id = pdev->vendor; 5904 hw->device_id = pdev->device; 5905 hw->revision_id = pdev->revision; 5906 hw->subsystem_vendor_id = pdev->subsystem_vendor; 5907 hw->subsystem_device_id = pdev->subsystem_device; 5908 5909 /* Copy the default MAC and PHY function pointers */ 5910 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 5911 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 5912 5913 /* Initialize skew-specific constants */ 5914 err = ei->get_invariants(hw); 5915 if (err) 5916 goto err_sw_init; 5917 5918 /* Add supported features to the features list*/ 5919 netdev->features |= NETIF_F_SG; 5920 netdev->features |= NETIF_F_TSO; 5921 netdev->features |= NETIF_F_TSO6; 5922 netdev->features |= NETIF_F_TSO_ECN; 5923 netdev->features |= NETIF_F_RXCSUM; 5924 netdev->features |= NETIF_F_HW_CSUM; 5925 netdev->features |= NETIF_F_SCTP_CRC; 5926 netdev->features |= NETIF_F_HW_TC; 5927 5928 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 5929 NETIF_F_GSO_GRE_CSUM | \ 5930 NETIF_F_GSO_IPXIP4 | \ 5931 NETIF_F_GSO_IPXIP6 | \ 5932 NETIF_F_GSO_UDP_TUNNEL | \ 5933 NETIF_F_GSO_UDP_TUNNEL_CSUM) 5934 5935 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; 5936 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; 5937 5938 /* setup the private structure */ 5939 err = igc_sw_init(adapter); 5940 if (err) 5941 goto err_sw_init; 5942 5943 /* copy netdev features into list of user selectable features */ 5944 netdev->hw_features |= NETIF_F_NTUPLE; 5945 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 5946 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 5947 netdev->hw_features |= netdev->features; 5948 5949 if (pci_using_dac) 5950 netdev->features |= NETIF_F_HIGHDMA; 5951 5952 netdev->vlan_features |= netdev->features; 5953 5954 /* MTU range: 68 - 9216 */ 5955 netdev->min_mtu = ETH_MIN_MTU; 5956 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; 5957 5958 /* before reading the NVM, reset the controller to put the device in a 5959 * known good starting state 5960 */ 5961 hw->mac.ops.reset_hw(hw); 5962 5963 if (igc_get_flash_presence_i225(hw)) { 5964 if (hw->nvm.ops.validate(hw) < 0) { 5965 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 5966 err = -EIO; 5967 goto err_eeprom; 5968 } 5969 } 5970 5971 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { 5972 /* copy the MAC address out of the NVM */ 5973 if (hw->mac.ops.read_mac_addr(hw)) 5974 dev_err(&pdev->dev, "NVM Read Error\n"); 5975 } 5976 5977 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 5978 5979 if (!is_valid_ether_addr(netdev->dev_addr)) { 5980 dev_err(&pdev->dev, "Invalid MAC Address\n"); 5981 err = -EIO; 5982 goto err_eeprom; 5983 } 5984 5985 /* configure RXPBSIZE and TXPBSIZE */ 5986 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); 5987 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); 5988 5989 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); 5990 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); 5991 5992 INIT_WORK(&adapter->reset_task, igc_reset_task); 5993 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); 5994 5995 /* Initialize link properties that are user-changeable */ 5996 adapter->fc_autoneg = true; 5997 hw->mac.autoneg = true; 5998 hw->phy.autoneg_advertised = 0xaf; 5999 6000 hw->fc.requested_mode = igc_fc_default; 6001 hw->fc.current_mode = igc_fc_default; 6002 6003 /* By default, support wake on port A */ 6004 adapter->flags |= IGC_FLAG_WOL_SUPPORTED; 6005 6006 /* initialize the wol settings based on the eeprom settings */ 6007 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) 6008 adapter->wol |= IGC_WUFC_MAG; 6009 6010 device_set_wakeup_enable(&adapter->pdev->dev, 6011 adapter->flags & IGC_FLAG_WOL_SUPPORTED); 6012 6013 igc_ptp_init(adapter); 6014 6015 /* reset the hardware with the new settings */ 6016 igc_reset(adapter); 6017 6018 /* let the f/w know that the h/w is now under the control of the 6019 * driver. 6020 */ 6021 igc_get_hw_control(adapter); 6022 6023 strncpy(netdev->name, "eth%d", IFNAMSIZ); 6024 err = register_netdev(netdev); 6025 if (err) 6026 goto err_register; 6027 6028 /* carrier off reporting is important to ethtool even BEFORE open */ 6029 netif_carrier_off(netdev); 6030 6031 /* Check if Media Autosense is enabled */ 6032 adapter->ei = *ei; 6033 6034 /* print pcie link status and MAC address */ 6035 pcie_print_link_status(pdev); 6036 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); 6037 6038 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 6039 /* Disable EEE for internal PHY devices */ 6040 hw->dev_spec._base.eee_enable = false; 6041 adapter->flags &= ~IGC_FLAG_EEE; 6042 igc_set_eee_i225(hw, false, false, false); 6043 6044 pm_runtime_put_noidle(&pdev->dev); 6045 6046 return 0; 6047 6048 err_register: 6049 igc_release_hw_control(adapter); 6050 err_eeprom: 6051 if (!igc_check_reset_block(hw)) 6052 igc_reset_phy(hw); 6053 err_sw_init: 6054 igc_clear_interrupt_scheme(adapter); 6055 iounmap(adapter->io_addr); 6056 err_ioremap: 6057 free_netdev(netdev); 6058 err_alloc_etherdev: 6059 pci_disable_pcie_error_reporting(pdev); 6060 pci_release_mem_regions(pdev); 6061 err_pci_reg: 6062 err_dma: 6063 pci_disable_device(pdev); 6064 return err; 6065 } 6066 6067 /** 6068 * igc_remove - Device Removal Routine 6069 * @pdev: PCI device information struct 6070 * 6071 * igc_remove is called by the PCI subsystem to alert the driver 6072 * that it should release a PCI device. This could be caused by a 6073 * Hot-Plug event, or because the driver is going to be removed from 6074 * memory. 6075 */ 6076 static void igc_remove(struct pci_dev *pdev) 6077 { 6078 struct net_device *netdev = pci_get_drvdata(pdev); 6079 struct igc_adapter *adapter = netdev_priv(netdev); 6080 6081 pm_runtime_get_noresume(&pdev->dev); 6082 6083 igc_flush_nfc_rules(adapter); 6084 6085 igc_ptp_stop(adapter); 6086 6087 set_bit(__IGC_DOWN, &adapter->state); 6088 6089 del_timer_sync(&adapter->watchdog_timer); 6090 del_timer_sync(&adapter->phy_info_timer); 6091 6092 cancel_work_sync(&adapter->reset_task); 6093 cancel_work_sync(&adapter->watchdog_task); 6094 6095 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6096 * would have already happened in close and is redundant. 6097 */ 6098 igc_release_hw_control(adapter); 6099 unregister_netdev(netdev); 6100 6101 igc_clear_interrupt_scheme(adapter); 6102 pci_iounmap(pdev, adapter->io_addr); 6103 pci_release_mem_regions(pdev); 6104 6105 free_netdev(netdev); 6106 6107 pci_disable_pcie_error_reporting(pdev); 6108 6109 pci_disable_device(pdev); 6110 } 6111 6112 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, 6113 bool runtime) 6114 { 6115 struct net_device *netdev = pci_get_drvdata(pdev); 6116 struct igc_adapter *adapter = netdev_priv(netdev); 6117 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; 6118 struct igc_hw *hw = &adapter->hw; 6119 u32 ctrl, rctl, status; 6120 bool wake; 6121 6122 rtnl_lock(); 6123 netif_device_detach(netdev); 6124 6125 if (netif_running(netdev)) 6126 __igc_close(netdev, true); 6127 6128 igc_ptp_suspend(adapter); 6129 6130 igc_clear_interrupt_scheme(adapter); 6131 rtnl_unlock(); 6132 6133 status = rd32(IGC_STATUS); 6134 if (status & IGC_STATUS_LU) 6135 wufc &= ~IGC_WUFC_LNKC; 6136 6137 if (wufc) { 6138 igc_setup_rctl(adapter); 6139 igc_set_rx_mode(netdev); 6140 6141 /* turn on all-multi mode if wake on multicast is enabled */ 6142 if (wufc & IGC_WUFC_MC) { 6143 rctl = rd32(IGC_RCTL); 6144 rctl |= IGC_RCTL_MPE; 6145 wr32(IGC_RCTL, rctl); 6146 } 6147 6148 ctrl = rd32(IGC_CTRL); 6149 ctrl |= IGC_CTRL_ADVD3WUC; 6150 wr32(IGC_CTRL, ctrl); 6151 6152 /* Allow time for pending master requests to run */ 6153 igc_disable_pcie_master(hw); 6154 6155 wr32(IGC_WUC, IGC_WUC_PME_EN); 6156 wr32(IGC_WUFC, wufc); 6157 } else { 6158 wr32(IGC_WUC, 0); 6159 wr32(IGC_WUFC, 0); 6160 } 6161 6162 wake = wufc || adapter->en_mng_pt; 6163 if (!wake) 6164 igc_power_down_phy_copper_base(&adapter->hw); 6165 else 6166 igc_power_up_link(adapter); 6167 6168 if (enable_wake) 6169 *enable_wake = wake; 6170 6171 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6172 * would have already happened in close and is redundant. 6173 */ 6174 igc_release_hw_control(adapter); 6175 6176 pci_disable_device(pdev); 6177 6178 return 0; 6179 } 6180 6181 #ifdef CONFIG_PM 6182 static int __maybe_unused igc_runtime_suspend(struct device *dev) 6183 { 6184 return __igc_shutdown(to_pci_dev(dev), NULL, 1); 6185 } 6186 6187 static void igc_deliver_wake_packet(struct net_device *netdev) 6188 { 6189 struct igc_adapter *adapter = netdev_priv(netdev); 6190 struct igc_hw *hw = &adapter->hw; 6191 struct sk_buff *skb; 6192 u32 wupl; 6193 6194 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; 6195 6196 /* WUPM stores only the first 128 bytes of the wake packet. 6197 * Read the packet only if we have the whole thing. 6198 */ 6199 if (wupl == 0 || wupl > IGC_WUPM_BYTES) 6200 return; 6201 6202 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); 6203 if (!skb) 6204 return; 6205 6206 skb_put(skb, wupl); 6207 6208 /* Ensure reads are 32-bit aligned */ 6209 wupl = roundup(wupl, 4); 6210 6211 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); 6212 6213 skb->protocol = eth_type_trans(skb, netdev); 6214 netif_rx(skb); 6215 } 6216 6217 static int __maybe_unused igc_resume(struct device *dev) 6218 { 6219 struct pci_dev *pdev = to_pci_dev(dev); 6220 struct net_device *netdev = pci_get_drvdata(pdev); 6221 struct igc_adapter *adapter = netdev_priv(netdev); 6222 struct igc_hw *hw = &adapter->hw; 6223 u32 err, val; 6224 6225 pci_set_power_state(pdev, PCI_D0); 6226 pci_restore_state(pdev); 6227 pci_save_state(pdev); 6228 6229 if (!pci_device_is_present(pdev)) 6230 return -ENODEV; 6231 err = pci_enable_device_mem(pdev); 6232 if (err) { 6233 netdev_err(netdev, "Cannot enable PCI device from suspend\n"); 6234 return err; 6235 } 6236 pci_set_master(pdev); 6237 6238 pci_enable_wake(pdev, PCI_D3hot, 0); 6239 pci_enable_wake(pdev, PCI_D3cold, 0); 6240 6241 if (igc_init_interrupt_scheme(adapter, true)) { 6242 netdev_err(netdev, "Unable to allocate memory for queues\n"); 6243 return -ENOMEM; 6244 } 6245 6246 igc_reset(adapter); 6247 6248 /* let the f/w know that the h/w is now under the control of the 6249 * driver. 6250 */ 6251 igc_get_hw_control(adapter); 6252 6253 val = rd32(IGC_WUS); 6254 if (val & WAKE_PKT_WUS) 6255 igc_deliver_wake_packet(netdev); 6256 6257 wr32(IGC_WUS, ~0); 6258 6259 rtnl_lock(); 6260 if (!err && netif_running(netdev)) 6261 err = __igc_open(netdev, true); 6262 6263 if (!err) 6264 netif_device_attach(netdev); 6265 rtnl_unlock(); 6266 6267 return err; 6268 } 6269 6270 static int __maybe_unused igc_runtime_resume(struct device *dev) 6271 { 6272 return igc_resume(dev); 6273 } 6274 6275 static int __maybe_unused igc_suspend(struct device *dev) 6276 { 6277 return __igc_shutdown(to_pci_dev(dev), NULL, 0); 6278 } 6279 6280 static int __maybe_unused igc_runtime_idle(struct device *dev) 6281 { 6282 struct net_device *netdev = dev_get_drvdata(dev); 6283 struct igc_adapter *adapter = netdev_priv(netdev); 6284 6285 if (!igc_has_link(adapter)) 6286 pm_schedule_suspend(dev, MSEC_PER_SEC * 5); 6287 6288 return -EBUSY; 6289 } 6290 #endif /* CONFIG_PM */ 6291 6292 static void igc_shutdown(struct pci_dev *pdev) 6293 { 6294 bool wake; 6295 6296 __igc_shutdown(pdev, &wake, 0); 6297 6298 if (system_state == SYSTEM_POWER_OFF) { 6299 pci_wake_from_d3(pdev, wake); 6300 pci_set_power_state(pdev, PCI_D3hot); 6301 } 6302 } 6303 6304 /** 6305 * igc_io_error_detected - called when PCI error is detected 6306 * @pdev: Pointer to PCI device 6307 * @state: The current PCI connection state 6308 * 6309 * This function is called after a PCI bus error affecting 6310 * this device has been detected. 6311 **/ 6312 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, 6313 pci_channel_state_t state) 6314 { 6315 struct net_device *netdev = pci_get_drvdata(pdev); 6316 struct igc_adapter *adapter = netdev_priv(netdev); 6317 6318 netif_device_detach(netdev); 6319 6320 if (state == pci_channel_io_perm_failure) 6321 return PCI_ERS_RESULT_DISCONNECT; 6322 6323 if (netif_running(netdev)) 6324 igc_down(adapter); 6325 pci_disable_device(pdev); 6326 6327 /* Request a slot reset. */ 6328 return PCI_ERS_RESULT_NEED_RESET; 6329 } 6330 6331 /** 6332 * igc_io_slot_reset - called after the PCI bus has been reset. 6333 * @pdev: Pointer to PCI device 6334 * 6335 * Restart the card from scratch, as if from a cold-boot. Implementation 6336 * resembles the first-half of the igc_resume routine. 6337 **/ 6338 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) 6339 { 6340 struct net_device *netdev = pci_get_drvdata(pdev); 6341 struct igc_adapter *adapter = netdev_priv(netdev); 6342 struct igc_hw *hw = &adapter->hw; 6343 pci_ers_result_t result; 6344 6345 if (pci_enable_device_mem(pdev)) { 6346 netdev_err(netdev, "Could not re-enable PCI device after reset\n"); 6347 result = PCI_ERS_RESULT_DISCONNECT; 6348 } else { 6349 pci_set_master(pdev); 6350 pci_restore_state(pdev); 6351 pci_save_state(pdev); 6352 6353 pci_enable_wake(pdev, PCI_D3hot, 0); 6354 pci_enable_wake(pdev, PCI_D3cold, 0); 6355 6356 /* In case of PCI error, adapter loses its HW address 6357 * so we should re-assign it here. 6358 */ 6359 hw->hw_addr = adapter->io_addr; 6360 6361 igc_reset(adapter); 6362 wr32(IGC_WUS, ~0); 6363 result = PCI_ERS_RESULT_RECOVERED; 6364 } 6365 6366 return result; 6367 } 6368 6369 /** 6370 * igc_io_resume - called when traffic can start to flow again. 6371 * @pdev: Pointer to PCI device 6372 * 6373 * This callback is called when the error recovery driver tells us that 6374 * its OK to resume normal operation. Implementation resembles the 6375 * second-half of the igc_resume routine. 6376 */ 6377 static void igc_io_resume(struct pci_dev *pdev) 6378 { 6379 struct net_device *netdev = pci_get_drvdata(pdev); 6380 struct igc_adapter *adapter = netdev_priv(netdev); 6381 6382 rtnl_lock(); 6383 if (netif_running(netdev)) { 6384 if (igc_open(netdev)) { 6385 netdev_err(netdev, "igc_open failed after reset\n"); 6386 return; 6387 } 6388 } 6389 6390 netif_device_attach(netdev); 6391 6392 /* let the f/w know that the h/w is now under the control of the 6393 * driver. 6394 */ 6395 igc_get_hw_control(adapter); 6396 rtnl_unlock(); 6397 } 6398 6399 static const struct pci_error_handlers igc_err_handler = { 6400 .error_detected = igc_io_error_detected, 6401 .slot_reset = igc_io_slot_reset, 6402 .resume = igc_io_resume, 6403 }; 6404 6405 #ifdef CONFIG_PM 6406 static const struct dev_pm_ops igc_pm_ops = { 6407 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) 6408 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, 6409 igc_runtime_idle) 6410 }; 6411 #endif 6412 6413 static struct pci_driver igc_driver = { 6414 .name = igc_driver_name, 6415 .id_table = igc_pci_tbl, 6416 .probe = igc_probe, 6417 .remove = igc_remove, 6418 #ifdef CONFIG_PM 6419 .driver.pm = &igc_pm_ops, 6420 #endif 6421 .shutdown = igc_shutdown, 6422 .err_handler = &igc_err_handler, 6423 }; 6424 6425 /** 6426 * igc_reinit_queues - return error 6427 * @adapter: pointer to adapter structure 6428 */ 6429 int igc_reinit_queues(struct igc_adapter *adapter) 6430 { 6431 struct net_device *netdev = adapter->netdev; 6432 int err = 0; 6433 6434 if (netif_running(netdev)) 6435 igc_close(netdev); 6436 6437 igc_reset_interrupt_capability(adapter); 6438 6439 if (igc_init_interrupt_scheme(adapter, true)) { 6440 netdev_err(netdev, "Unable to allocate memory for queues\n"); 6441 return -ENOMEM; 6442 } 6443 6444 if (netif_running(netdev)) 6445 err = igc_open(netdev); 6446 6447 return err; 6448 } 6449 6450 /** 6451 * igc_get_hw_dev - return device 6452 * @hw: pointer to hardware structure 6453 * 6454 * used by hardware layer to print debugging information 6455 */ 6456 struct net_device *igc_get_hw_dev(struct igc_hw *hw) 6457 { 6458 struct igc_adapter *adapter = hw->back; 6459 6460 return adapter->netdev; 6461 } 6462 6463 static void igc_disable_rx_ring_hw(struct igc_ring *ring) 6464 { 6465 struct igc_hw *hw = &ring->q_vector->adapter->hw; 6466 u8 idx = ring->reg_idx; 6467 u32 rxdctl; 6468 6469 rxdctl = rd32(IGC_RXDCTL(idx)); 6470 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; 6471 rxdctl |= IGC_RXDCTL_SWFLUSH; 6472 wr32(IGC_RXDCTL(idx), rxdctl); 6473 } 6474 6475 void igc_disable_rx_ring(struct igc_ring *ring) 6476 { 6477 igc_disable_rx_ring_hw(ring); 6478 igc_clean_rx_ring(ring); 6479 } 6480 6481 void igc_enable_rx_ring(struct igc_ring *ring) 6482 { 6483 struct igc_adapter *adapter = ring->q_vector->adapter; 6484 6485 igc_configure_rx_ring(adapter, ring); 6486 6487 if (ring->xsk_pool) 6488 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 6489 else 6490 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 6491 } 6492 6493 static void igc_disable_tx_ring_hw(struct igc_ring *ring) 6494 { 6495 struct igc_hw *hw = &ring->q_vector->adapter->hw; 6496 u8 idx = ring->reg_idx; 6497 u32 txdctl; 6498 6499 txdctl = rd32(IGC_TXDCTL(idx)); 6500 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; 6501 txdctl |= IGC_TXDCTL_SWFLUSH; 6502 wr32(IGC_TXDCTL(idx), txdctl); 6503 } 6504 6505 void igc_disable_tx_ring(struct igc_ring *ring) 6506 { 6507 igc_disable_tx_ring_hw(ring); 6508 igc_clean_tx_ring(ring); 6509 } 6510 6511 void igc_enable_tx_ring(struct igc_ring *ring) 6512 { 6513 struct igc_adapter *adapter = ring->q_vector->adapter; 6514 6515 igc_configure_tx_ring(adapter, ring); 6516 } 6517 6518 /** 6519 * igc_init_module - Driver Registration Routine 6520 * 6521 * igc_init_module is the first routine called when the driver is 6522 * loaded. All it does is register with the PCI subsystem. 6523 */ 6524 static int __init igc_init_module(void) 6525 { 6526 int ret; 6527 6528 pr_info("%s\n", igc_driver_string); 6529 pr_info("%s\n", igc_copyright); 6530 6531 ret = pci_register_driver(&igc_driver); 6532 return ret; 6533 } 6534 6535 module_init(igc_init_module); 6536 6537 /** 6538 * igc_exit_module - Driver Exit Cleanup Routine 6539 * 6540 * igc_exit_module is called just before the driver is removed 6541 * from memory. 6542 */ 6543 static void __exit igc_exit_module(void) 6544 { 6545 pci_unregister_driver(&igc_driver); 6546 } 6547 6548 module_exit(igc_exit_module); 6549 /* igc_main.c */ 6550