1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Intel Corporation */ 3 4 #include <linux/module.h> 5 #include <linux/types.h> 6 #include <linux/if_vlan.h> 7 #include <linux/aer.h> 8 #include <linux/tcp.h> 9 #include <linux/udp.h> 10 #include <linux/ip.h> 11 #include <linux/pm_runtime.h> 12 #include <net/pkt_sched.h> 13 #include <linux/bpf_trace.h> 14 #include <net/xdp_sock_drv.h> 15 #include <linux/pci.h> 16 17 #include <net/ipv6.h> 18 19 #include "igc.h" 20 #include "igc_hw.h" 21 #include "igc_tsn.h" 22 #include "igc_xdp.h" 23 24 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" 25 26 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 27 28 #define IGC_XDP_PASS 0 29 #define IGC_XDP_CONSUMED BIT(0) 30 #define IGC_XDP_TX BIT(1) 31 #define IGC_XDP_REDIRECT BIT(2) 32 33 static int debug = -1; 34 35 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 36 MODULE_DESCRIPTION(DRV_SUMMARY); 37 MODULE_LICENSE("GPL v2"); 38 module_param(debug, int, 0); 39 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 40 41 char igc_driver_name[] = "igc"; 42 static const char igc_driver_string[] = DRV_SUMMARY; 43 static const char igc_copyright[] = 44 "Copyright(c) 2018 Intel Corporation."; 45 46 static const struct igc_info *igc_info_tbl[] = { 47 [board_base] = &igc_base_info, 48 }; 49 50 static const struct pci_device_id igc_pci_tbl[] = { 51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, 52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, 53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, 54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, 55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, 56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, 57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, 58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, 59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, 60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, 61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, 62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, 63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, 64 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, 65 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, 66 /* required last entry */ 67 {0, } 68 }; 69 70 MODULE_DEVICE_TABLE(pci, igc_pci_tbl); 71 72 enum latency_range { 73 lowest_latency = 0, 74 low_latency = 1, 75 bulk_latency = 2, 76 latency_invalid = 255 77 }; 78 79 void igc_reset(struct igc_adapter *adapter) 80 { 81 struct net_device *dev = adapter->netdev; 82 struct igc_hw *hw = &adapter->hw; 83 struct igc_fc_info *fc = &hw->fc; 84 u32 pba, hwm; 85 86 /* Repartition PBA for greater than 9k MTU if required */ 87 pba = IGC_PBA_34K; 88 89 /* flow control settings 90 * The high water mark must be low enough to fit one full frame 91 * after transmitting the pause frame. As such we must have enough 92 * space to allow for us to complete our current transmit and then 93 * receive the frame that is in progress from the link partner. 94 * Set it to: 95 * - the full Rx FIFO size minus one full Tx plus one full Rx frame 96 */ 97 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); 98 99 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ 100 fc->low_water = fc->high_water - 16; 101 fc->pause_time = 0xFFFF; 102 fc->send_xon = 1; 103 fc->current_mode = fc->requested_mode; 104 105 hw->mac.ops.reset_hw(hw); 106 107 if (hw->mac.ops.init_hw(hw)) 108 netdev_err(dev, "Error on hardware initialization\n"); 109 110 /* Re-establish EEE setting */ 111 igc_set_eee_i225(hw, true, true, true); 112 113 if (!netif_running(adapter->netdev)) 114 igc_power_down_phy_copper_base(&adapter->hw); 115 116 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ 117 wr32(IGC_VET, ETH_P_8021Q); 118 119 /* Re-enable PTP, where applicable. */ 120 igc_ptp_reset(adapter); 121 122 /* Re-enable TSN offloading, where applicable. */ 123 igc_tsn_reset(adapter); 124 125 igc_get_phy_info(hw); 126 } 127 128 /** 129 * igc_power_up_link - Power up the phy link 130 * @adapter: address of board private structure 131 */ 132 static void igc_power_up_link(struct igc_adapter *adapter) 133 { 134 igc_reset_phy(&adapter->hw); 135 136 igc_power_up_phy_copper(&adapter->hw); 137 138 igc_setup_link(&adapter->hw); 139 } 140 141 /** 142 * igc_release_hw_control - release control of the h/w to f/w 143 * @adapter: address of board private structure 144 * 145 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. 146 * For ASF and Pass Through versions of f/w this means that the 147 * driver is no longer loaded. 148 */ 149 static void igc_release_hw_control(struct igc_adapter *adapter) 150 { 151 struct igc_hw *hw = &adapter->hw; 152 u32 ctrl_ext; 153 154 if (!pci_device_is_present(adapter->pdev)) 155 return; 156 157 /* Let firmware take over control of h/w */ 158 ctrl_ext = rd32(IGC_CTRL_EXT); 159 wr32(IGC_CTRL_EXT, 160 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 161 } 162 163 /** 164 * igc_get_hw_control - get control of the h/w from f/w 165 * @adapter: address of board private structure 166 * 167 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. 168 * For ASF and Pass Through versions of f/w this means that 169 * the driver is loaded. 170 */ 171 static void igc_get_hw_control(struct igc_adapter *adapter) 172 { 173 struct igc_hw *hw = &adapter->hw; 174 u32 ctrl_ext; 175 176 /* Let firmware know the driver has taken over */ 177 ctrl_ext = rd32(IGC_CTRL_EXT); 178 wr32(IGC_CTRL_EXT, 179 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 180 } 181 182 static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) 183 { 184 dma_unmap_single(dev, dma_unmap_addr(buf, dma), 185 dma_unmap_len(buf, len), DMA_TO_DEVICE); 186 187 dma_unmap_len_set(buf, len, 0); 188 } 189 190 /** 191 * igc_clean_tx_ring - Free Tx Buffers 192 * @tx_ring: ring to be cleaned 193 */ 194 static void igc_clean_tx_ring(struct igc_ring *tx_ring) 195 { 196 u16 i = tx_ring->next_to_clean; 197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 198 u32 xsk_frames = 0; 199 200 while (i != tx_ring->next_to_use) { 201 union igc_adv_tx_desc *eop_desc, *tx_desc; 202 203 switch (tx_buffer->type) { 204 case IGC_TX_BUFFER_TYPE_XSK: 205 xsk_frames++; 206 break; 207 case IGC_TX_BUFFER_TYPE_XDP: 208 xdp_return_frame(tx_buffer->xdpf); 209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 210 break; 211 case IGC_TX_BUFFER_TYPE_SKB: 212 dev_kfree_skb_any(tx_buffer->skb); 213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 214 break; 215 default: 216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 217 break; 218 } 219 220 /* check for eop_desc to determine the end of the packet */ 221 eop_desc = tx_buffer->next_to_watch; 222 tx_desc = IGC_TX_DESC(tx_ring, i); 223 224 /* unmap remaining buffers */ 225 while (tx_desc != eop_desc) { 226 tx_buffer++; 227 tx_desc++; 228 i++; 229 if (unlikely(i == tx_ring->count)) { 230 i = 0; 231 tx_buffer = tx_ring->tx_buffer_info; 232 tx_desc = IGC_TX_DESC(tx_ring, 0); 233 } 234 235 /* unmap any remaining paged data */ 236 if (dma_unmap_len(tx_buffer, len)) 237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 238 } 239 240 tx_buffer->next_to_watch = NULL; 241 242 /* move us one more past the eop_desc for start of next pkt */ 243 tx_buffer++; 244 i++; 245 if (unlikely(i == tx_ring->count)) { 246 i = 0; 247 tx_buffer = tx_ring->tx_buffer_info; 248 } 249 } 250 251 if (tx_ring->xsk_pool && xsk_frames) 252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 253 254 /* reset BQL for queue */ 255 netdev_tx_reset_queue(txring_txq(tx_ring)); 256 257 /* reset next_to_use and next_to_clean */ 258 tx_ring->next_to_use = 0; 259 tx_ring->next_to_clean = 0; 260 } 261 262 /** 263 * igc_free_tx_resources - Free Tx Resources per Queue 264 * @tx_ring: Tx descriptor ring for a specific queue 265 * 266 * Free all transmit software resources 267 */ 268 void igc_free_tx_resources(struct igc_ring *tx_ring) 269 { 270 igc_clean_tx_ring(tx_ring); 271 272 vfree(tx_ring->tx_buffer_info); 273 tx_ring->tx_buffer_info = NULL; 274 275 /* if not set, then don't free */ 276 if (!tx_ring->desc) 277 return; 278 279 dma_free_coherent(tx_ring->dev, tx_ring->size, 280 tx_ring->desc, tx_ring->dma); 281 282 tx_ring->desc = NULL; 283 } 284 285 /** 286 * igc_free_all_tx_resources - Free Tx Resources for All Queues 287 * @adapter: board private structure 288 * 289 * Free all transmit software resources 290 */ 291 static void igc_free_all_tx_resources(struct igc_adapter *adapter) 292 { 293 int i; 294 295 for (i = 0; i < adapter->num_tx_queues; i++) 296 igc_free_tx_resources(adapter->tx_ring[i]); 297 } 298 299 /** 300 * igc_clean_all_tx_rings - Free Tx Buffers for all queues 301 * @adapter: board private structure 302 */ 303 static void igc_clean_all_tx_rings(struct igc_adapter *adapter) 304 { 305 int i; 306 307 for (i = 0; i < adapter->num_tx_queues; i++) 308 if (adapter->tx_ring[i]) 309 igc_clean_tx_ring(adapter->tx_ring[i]); 310 } 311 312 /** 313 * igc_setup_tx_resources - allocate Tx resources (Descriptors) 314 * @tx_ring: tx descriptor ring (for a specific queue) to setup 315 * 316 * Return 0 on success, negative on failure 317 */ 318 int igc_setup_tx_resources(struct igc_ring *tx_ring) 319 { 320 struct net_device *ndev = tx_ring->netdev; 321 struct device *dev = tx_ring->dev; 322 int size = 0; 323 324 size = sizeof(struct igc_tx_buffer) * tx_ring->count; 325 tx_ring->tx_buffer_info = vzalloc(size); 326 if (!tx_ring->tx_buffer_info) 327 goto err; 328 329 /* round up to nearest 4K */ 330 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); 331 tx_ring->size = ALIGN(tx_ring->size, 4096); 332 333 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 334 &tx_ring->dma, GFP_KERNEL); 335 336 if (!tx_ring->desc) 337 goto err; 338 339 tx_ring->next_to_use = 0; 340 tx_ring->next_to_clean = 0; 341 342 return 0; 343 344 err: 345 vfree(tx_ring->tx_buffer_info); 346 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); 347 return -ENOMEM; 348 } 349 350 /** 351 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues 352 * @adapter: board private structure 353 * 354 * Return 0 on success, negative on failure 355 */ 356 static int igc_setup_all_tx_resources(struct igc_adapter *adapter) 357 { 358 struct net_device *dev = adapter->netdev; 359 int i, err = 0; 360 361 for (i = 0; i < adapter->num_tx_queues; i++) { 362 err = igc_setup_tx_resources(adapter->tx_ring[i]); 363 if (err) { 364 netdev_err(dev, "Error on Tx queue %u setup\n", i); 365 for (i--; i >= 0; i--) 366 igc_free_tx_resources(adapter->tx_ring[i]); 367 break; 368 } 369 } 370 371 return err; 372 } 373 374 static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) 375 { 376 u16 i = rx_ring->next_to_clean; 377 378 dev_kfree_skb(rx_ring->skb); 379 rx_ring->skb = NULL; 380 381 /* Free all the Rx ring sk_buffs */ 382 while (i != rx_ring->next_to_alloc) { 383 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 384 385 /* Invalidate cache lines that may have been written to by 386 * device so that we avoid corrupting memory. 387 */ 388 dma_sync_single_range_for_cpu(rx_ring->dev, 389 buffer_info->dma, 390 buffer_info->page_offset, 391 igc_rx_bufsz(rx_ring), 392 DMA_FROM_DEVICE); 393 394 /* free resources associated with mapping */ 395 dma_unmap_page_attrs(rx_ring->dev, 396 buffer_info->dma, 397 igc_rx_pg_size(rx_ring), 398 DMA_FROM_DEVICE, 399 IGC_RX_DMA_ATTR); 400 __page_frag_cache_drain(buffer_info->page, 401 buffer_info->pagecnt_bias); 402 403 i++; 404 if (i == rx_ring->count) 405 i = 0; 406 } 407 } 408 409 static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) 410 { 411 struct igc_rx_buffer *bi; 412 u16 i; 413 414 for (i = 0; i < ring->count; i++) { 415 bi = &ring->rx_buffer_info[i]; 416 if (!bi->xdp) 417 continue; 418 419 xsk_buff_free(bi->xdp); 420 bi->xdp = NULL; 421 } 422 } 423 424 /** 425 * igc_clean_rx_ring - Free Rx Buffers per Queue 426 * @ring: ring to free buffers from 427 */ 428 static void igc_clean_rx_ring(struct igc_ring *ring) 429 { 430 if (ring->xsk_pool) 431 igc_clean_rx_ring_xsk_pool(ring); 432 else 433 igc_clean_rx_ring_page_shared(ring); 434 435 clear_ring_uses_large_buffer(ring); 436 437 ring->next_to_alloc = 0; 438 ring->next_to_clean = 0; 439 ring->next_to_use = 0; 440 } 441 442 /** 443 * igc_clean_all_rx_rings - Free Rx Buffers for all queues 444 * @adapter: board private structure 445 */ 446 static void igc_clean_all_rx_rings(struct igc_adapter *adapter) 447 { 448 int i; 449 450 for (i = 0; i < adapter->num_rx_queues; i++) 451 if (adapter->rx_ring[i]) 452 igc_clean_rx_ring(adapter->rx_ring[i]); 453 } 454 455 /** 456 * igc_free_rx_resources - Free Rx Resources 457 * @rx_ring: ring to clean the resources from 458 * 459 * Free all receive software resources 460 */ 461 void igc_free_rx_resources(struct igc_ring *rx_ring) 462 { 463 igc_clean_rx_ring(rx_ring); 464 465 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 466 467 vfree(rx_ring->rx_buffer_info); 468 rx_ring->rx_buffer_info = NULL; 469 470 /* if not set, then don't free */ 471 if (!rx_ring->desc) 472 return; 473 474 dma_free_coherent(rx_ring->dev, rx_ring->size, 475 rx_ring->desc, rx_ring->dma); 476 477 rx_ring->desc = NULL; 478 } 479 480 /** 481 * igc_free_all_rx_resources - Free Rx Resources for All Queues 482 * @adapter: board private structure 483 * 484 * Free all receive software resources 485 */ 486 static void igc_free_all_rx_resources(struct igc_adapter *adapter) 487 { 488 int i; 489 490 for (i = 0; i < adapter->num_rx_queues; i++) 491 igc_free_rx_resources(adapter->rx_ring[i]); 492 } 493 494 /** 495 * igc_setup_rx_resources - allocate Rx resources (Descriptors) 496 * @rx_ring: rx descriptor ring (for a specific queue) to setup 497 * 498 * Returns 0 on success, negative on failure 499 */ 500 int igc_setup_rx_resources(struct igc_ring *rx_ring) 501 { 502 struct net_device *ndev = rx_ring->netdev; 503 struct device *dev = rx_ring->dev; 504 u8 index = rx_ring->queue_index; 505 int size, desc_len, res; 506 507 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, 508 rx_ring->q_vector->napi.napi_id); 509 if (res < 0) { 510 netdev_err(ndev, "Failed to register xdp_rxq index %u\n", 511 index); 512 return res; 513 } 514 515 size = sizeof(struct igc_rx_buffer) * rx_ring->count; 516 rx_ring->rx_buffer_info = vzalloc(size); 517 if (!rx_ring->rx_buffer_info) 518 goto err; 519 520 desc_len = sizeof(union igc_adv_rx_desc); 521 522 /* Round up to nearest 4K */ 523 rx_ring->size = rx_ring->count * desc_len; 524 rx_ring->size = ALIGN(rx_ring->size, 4096); 525 526 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 527 &rx_ring->dma, GFP_KERNEL); 528 529 if (!rx_ring->desc) 530 goto err; 531 532 rx_ring->next_to_alloc = 0; 533 rx_ring->next_to_clean = 0; 534 rx_ring->next_to_use = 0; 535 536 return 0; 537 538 err: 539 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 540 vfree(rx_ring->rx_buffer_info); 541 rx_ring->rx_buffer_info = NULL; 542 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); 543 return -ENOMEM; 544 } 545 546 /** 547 * igc_setup_all_rx_resources - wrapper to allocate Rx resources 548 * (Descriptors) for all queues 549 * @adapter: board private structure 550 * 551 * Return 0 on success, negative on failure 552 */ 553 static int igc_setup_all_rx_resources(struct igc_adapter *adapter) 554 { 555 struct net_device *dev = adapter->netdev; 556 int i, err = 0; 557 558 for (i = 0; i < adapter->num_rx_queues; i++) { 559 err = igc_setup_rx_resources(adapter->rx_ring[i]); 560 if (err) { 561 netdev_err(dev, "Error on Rx queue %u setup\n", i); 562 for (i--; i >= 0; i--) 563 igc_free_rx_resources(adapter->rx_ring[i]); 564 break; 565 } 566 } 567 568 return err; 569 } 570 571 static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, 572 struct igc_ring *ring) 573 { 574 if (!igc_xdp_is_enabled(adapter) || 575 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) 576 return NULL; 577 578 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); 579 } 580 581 /** 582 * igc_configure_rx_ring - Configure a receive ring after Reset 583 * @adapter: board private structure 584 * @ring: receive ring to be configured 585 * 586 * Configure the Rx unit of the MAC after a reset. 587 */ 588 static void igc_configure_rx_ring(struct igc_adapter *adapter, 589 struct igc_ring *ring) 590 { 591 struct igc_hw *hw = &adapter->hw; 592 union igc_adv_rx_desc *rx_desc; 593 int reg_idx = ring->reg_idx; 594 u32 srrctl = 0, rxdctl = 0; 595 u64 rdba = ring->dma; 596 u32 buf_size; 597 598 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 599 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 600 if (ring->xsk_pool) { 601 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 602 MEM_TYPE_XSK_BUFF_POOL, 603 NULL)); 604 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); 605 } else { 606 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 607 MEM_TYPE_PAGE_SHARED, 608 NULL)); 609 } 610 611 if (igc_xdp_is_enabled(adapter)) 612 set_ring_uses_large_buffer(ring); 613 614 /* disable the queue */ 615 wr32(IGC_RXDCTL(reg_idx), 0); 616 617 /* Set DMA base address registers */ 618 wr32(IGC_RDBAL(reg_idx), 619 rdba & 0x00000000ffffffffULL); 620 wr32(IGC_RDBAH(reg_idx), rdba >> 32); 621 wr32(IGC_RDLEN(reg_idx), 622 ring->count * sizeof(union igc_adv_rx_desc)); 623 624 /* initialize head and tail */ 625 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); 626 wr32(IGC_RDH(reg_idx), 0); 627 writel(0, ring->tail); 628 629 /* reset next-to- use/clean to place SW in sync with hardware */ 630 ring->next_to_clean = 0; 631 ring->next_to_use = 0; 632 633 if (ring->xsk_pool) 634 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); 635 else if (ring_uses_large_buffer(ring)) 636 buf_size = IGC_RXBUFFER_3072; 637 else 638 buf_size = IGC_RXBUFFER_2048; 639 640 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; 641 srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT; 642 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 643 644 wr32(IGC_SRRCTL(reg_idx), srrctl); 645 646 rxdctl |= IGC_RX_PTHRESH; 647 rxdctl |= IGC_RX_HTHRESH << 8; 648 rxdctl |= IGC_RX_WTHRESH << 16; 649 650 /* initialize rx_buffer_info */ 651 memset(ring->rx_buffer_info, 0, 652 sizeof(struct igc_rx_buffer) * ring->count); 653 654 /* initialize Rx descriptor 0 */ 655 rx_desc = IGC_RX_DESC(ring, 0); 656 rx_desc->wb.upper.length = 0; 657 658 /* enable receive descriptor fetching */ 659 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 660 661 wr32(IGC_RXDCTL(reg_idx), rxdctl); 662 } 663 664 /** 665 * igc_configure_rx - Configure receive Unit after Reset 666 * @adapter: board private structure 667 * 668 * Configure the Rx unit of the MAC after a reset. 669 */ 670 static void igc_configure_rx(struct igc_adapter *adapter) 671 { 672 int i; 673 674 /* Setup the HW Rx Head and Tail Descriptor Pointers and 675 * the Base and Length of the Rx Descriptor Ring 676 */ 677 for (i = 0; i < adapter->num_rx_queues; i++) 678 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); 679 } 680 681 /** 682 * igc_configure_tx_ring - Configure transmit ring after Reset 683 * @adapter: board private structure 684 * @ring: tx ring to configure 685 * 686 * Configure a transmit ring after a reset. 687 */ 688 static void igc_configure_tx_ring(struct igc_adapter *adapter, 689 struct igc_ring *ring) 690 { 691 struct igc_hw *hw = &adapter->hw; 692 int reg_idx = ring->reg_idx; 693 u64 tdba = ring->dma; 694 u32 txdctl = 0; 695 696 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 697 698 /* disable the queue */ 699 wr32(IGC_TXDCTL(reg_idx), 0); 700 wrfl(); 701 mdelay(10); 702 703 wr32(IGC_TDLEN(reg_idx), 704 ring->count * sizeof(union igc_adv_tx_desc)); 705 wr32(IGC_TDBAL(reg_idx), 706 tdba & 0x00000000ffffffffULL); 707 wr32(IGC_TDBAH(reg_idx), tdba >> 32); 708 709 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); 710 wr32(IGC_TDH(reg_idx), 0); 711 writel(0, ring->tail); 712 713 txdctl |= IGC_TX_PTHRESH; 714 txdctl |= IGC_TX_HTHRESH << 8; 715 txdctl |= IGC_TX_WTHRESH << 16; 716 717 txdctl |= IGC_TXDCTL_QUEUE_ENABLE; 718 wr32(IGC_TXDCTL(reg_idx), txdctl); 719 } 720 721 /** 722 * igc_configure_tx - Configure transmit Unit after Reset 723 * @adapter: board private structure 724 * 725 * Configure the Tx unit of the MAC after a reset. 726 */ 727 static void igc_configure_tx(struct igc_adapter *adapter) 728 { 729 int i; 730 731 for (i = 0; i < adapter->num_tx_queues; i++) 732 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); 733 } 734 735 /** 736 * igc_setup_mrqc - configure the multiple receive queue control registers 737 * @adapter: Board private structure 738 */ 739 static void igc_setup_mrqc(struct igc_adapter *adapter) 740 { 741 struct igc_hw *hw = &adapter->hw; 742 u32 j, num_rx_queues; 743 u32 mrqc, rxcsum; 744 u32 rss_key[10]; 745 746 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 747 for (j = 0; j < 10; j++) 748 wr32(IGC_RSSRK(j), rss_key[j]); 749 750 num_rx_queues = adapter->rss_queues; 751 752 if (adapter->rss_indir_tbl_init != num_rx_queues) { 753 for (j = 0; j < IGC_RETA_SIZE; j++) 754 adapter->rss_indir_tbl[j] = 755 (j * num_rx_queues) / IGC_RETA_SIZE; 756 adapter->rss_indir_tbl_init = num_rx_queues; 757 } 758 igc_write_rss_indir_tbl(adapter); 759 760 /* Disable raw packet checksumming so that RSS hash is placed in 761 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 762 * offloads as they are enabled by default 763 */ 764 rxcsum = rd32(IGC_RXCSUM); 765 rxcsum |= IGC_RXCSUM_PCSD; 766 767 /* Enable Receive Checksum Offload for SCTP */ 768 rxcsum |= IGC_RXCSUM_CRCOFL; 769 770 /* Don't need to set TUOFL or IPOFL, they default to 1 */ 771 wr32(IGC_RXCSUM, rxcsum); 772 773 /* Generate RSS hash based on packet types, TCP/UDP 774 * port numbers and/or IPv4/v6 src and dst addresses 775 */ 776 mrqc = IGC_MRQC_RSS_FIELD_IPV4 | 777 IGC_MRQC_RSS_FIELD_IPV4_TCP | 778 IGC_MRQC_RSS_FIELD_IPV6 | 779 IGC_MRQC_RSS_FIELD_IPV6_TCP | 780 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; 781 782 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) 783 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; 784 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) 785 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; 786 787 mrqc |= IGC_MRQC_ENABLE_RSS_MQ; 788 789 wr32(IGC_MRQC, mrqc); 790 } 791 792 /** 793 * igc_setup_rctl - configure the receive control registers 794 * @adapter: Board private structure 795 */ 796 static void igc_setup_rctl(struct igc_adapter *adapter) 797 { 798 struct igc_hw *hw = &adapter->hw; 799 u32 rctl; 800 801 rctl = rd32(IGC_RCTL); 802 803 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 804 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); 805 806 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | 807 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 808 809 /* enable stripping of CRC. Newer features require 810 * that the HW strips the CRC. 811 */ 812 rctl |= IGC_RCTL_SECRC; 813 814 /* disable store bad packets and clear size bits. */ 815 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); 816 817 /* enable LPE to allow for reception of jumbo frames */ 818 rctl |= IGC_RCTL_LPE; 819 820 /* disable queue 0 to prevent tail write w/o re-config */ 821 wr32(IGC_RXDCTL(0), 0); 822 823 /* This is useful for sniffing bad packets. */ 824 if (adapter->netdev->features & NETIF_F_RXALL) { 825 /* UPE and MPE will be handled by normal PROMISC logic 826 * in set_rx_mode 827 */ 828 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ 829 IGC_RCTL_BAM | /* RX All Bcast Pkts */ 830 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 831 832 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ 833 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ 834 } 835 836 wr32(IGC_RCTL, rctl); 837 } 838 839 /** 840 * igc_setup_tctl - configure the transmit control registers 841 * @adapter: Board private structure 842 */ 843 static void igc_setup_tctl(struct igc_adapter *adapter) 844 { 845 struct igc_hw *hw = &adapter->hw; 846 u32 tctl; 847 848 /* disable queue 0 which icould be enabled by default */ 849 wr32(IGC_TXDCTL(0), 0); 850 851 /* Program the Transmit Control Register */ 852 tctl = rd32(IGC_TCTL); 853 tctl &= ~IGC_TCTL_CT; 854 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | 855 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); 856 857 /* Enable transmits */ 858 tctl |= IGC_TCTL_EN; 859 860 wr32(IGC_TCTL, tctl); 861 } 862 863 /** 864 * igc_set_mac_filter_hw() - Set MAC address filter in hardware 865 * @adapter: Pointer to adapter where the filter should be set 866 * @index: Filter index 867 * @type: MAC address filter type (source or destination) 868 * @addr: MAC address 869 * @queue: If non-negative, queue assignment feature is enabled and frames 870 * matching the filter are enqueued onto 'queue'. Otherwise, queue 871 * assignment is disabled. 872 */ 873 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, 874 enum igc_mac_filter_type type, 875 const u8 *addr, int queue) 876 { 877 struct net_device *dev = adapter->netdev; 878 struct igc_hw *hw = &adapter->hw; 879 u32 ral, rah; 880 881 if (WARN_ON(index >= hw->mac.rar_entry_count)) 882 return; 883 884 ral = le32_to_cpup((__le32 *)(addr)); 885 rah = le16_to_cpup((__le16 *)(addr + 4)); 886 887 if (type == IGC_MAC_FILTER_TYPE_SRC) { 888 rah &= ~IGC_RAH_ASEL_MASK; 889 rah |= IGC_RAH_ASEL_SRC_ADDR; 890 } 891 892 if (queue >= 0) { 893 rah &= ~IGC_RAH_QSEL_MASK; 894 rah |= (queue << IGC_RAH_QSEL_SHIFT); 895 rah |= IGC_RAH_QSEL_ENABLE; 896 } 897 898 rah |= IGC_RAH_AV; 899 900 wr32(IGC_RAL(index), ral); 901 wr32(IGC_RAH(index), rah); 902 903 netdev_dbg(dev, "MAC address filter set in HW: index %d", index); 904 } 905 906 /** 907 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware 908 * @adapter: Pointer to adapter where the filter should be cleared 909 * @index: Filter index 910 */ 911 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) 912 { 913 struct net_device *dev = adapter->netdev; 914 struct igc_hw *hw = &adapter->hw; 915 916 if (WARN_ON(index >= hw->mac.rar_entry_count)) 917 return; 918 919 wr32(IGC_RAL(index), 0); 920 wr32(IGC_RAH(index), 0); 921 922 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); 923 } 924 925 /* Set default MAC address for the PF in the first RAR entry */ 926 static void igc_set_default_mac_filter(struct igc_adapter *adapter) 927 { 928 struct net_device *dev = adapter->netdev; 929 u8 *addr = adapter->hw.mac.addr; 930 931 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); 932 933 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); 934 } 935 936 /** 937 * igc_set_mac - Change the Ethernet Address of the NIC 938 * @netdev: network interface device structure 939 * @p: pointer to an address structure 940 * 941 * Returns 0 on success, negative on failure 942 */ 943 static int igc_set_mac(struct net_device *netdev, void *p) 944 { 945 struct igc_adapter *adapter = netdev_priv(netdev); 946 struct igc_hw *hw = &adapter->hw; 947 struct sockaddr *addr = p; 948 949 if (!is_valid_ether_addr(addr->sa_data)) 950 return -EADDRNOTAVAIL; 951 952 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 953 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 954 955 /* set the correct pool for the new PF MAC address in entry 0 */ 956 igc_set_default_mac_filter(adapter); 957 958 return 0; 959 } 960 961 /** 962 * igc_write_mc_addr_list - write multicast addresses to MTA 963 * @netdev: network interface device structure 964 * 965 * Writes multicast address list to the MTA hash table. 966 * Returns: -ENOMEM on failure 967 * 0 on no addresses written 968 * X on writing X addresses to MTA 969 **/ 970 static int igc_write_mc_addr_list(struct net_device *netdev) 971 { 972 struct igc_adapter *adapter = netdev_priv(netdev); 973 struct igc_hw *hw = &adapter->hw; 974 struct netdev_hw_addr *ha; 975 u8 *mta_list; 976 int i; 977 978 if (netdev_mc_empty(netdev)) { 979 /* nothing to program, so clear mc list */ 980 igc_update_mc_addr_list(hw, NULL, 0); 981 return 0; 982 } 983 984 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); 985 if (!mta_list) 986 return -ENOMEM; 987 988 /* The shared function expects a packed array of only addresses. */ 989 i = 0; 990 netdev_for_each_mc_addr(ha, netdev) 991 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 992 993 igc_update_mc_addr_list(hw, mta_list, i); 994 kfree(mta_list); 995 996 return netdev_mc_count(netdev); 997 } 998 999 static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) 1000 { 1001 ktime_t cycle_time = adapter->cycle_time; 1002 ktime_t base_time = adapter->base_time; 1003 u32 launchtime; 1004 1005 /* FIXME: when using ETF together with taprio, we may have a 1006 * case where 'delta' is larger than the cycle_time, this may 1007 * cause problems if we don't read the current value of 1008 * IGC_BASET, as the value writen into the launchtime 1009 * descriptor field may be misinterpreted. 1010 */ 1011 div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); 1012 1013 return cpu_to_le32(launchtime); 1014 } 1015 1016 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, 1017 struct igc_tx_buffer *first, 1018 u32 vlan_macip_lens, u32 type_tucmd, 1019 u32 mss_l4len_idx) 1020 { 1021 struct igc_adv_tx_context_desc *context_desc; 1022 u16 i = tx_ring->next_to_use; 1023 1024 context_desc = IGC_TX_CTXTDESC(tx_ring, i); 1025 1026 i++; 1027 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1028 1029 /* set bits to identify this as an advanced context descriptor */ 1030 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 1031 1032 /* For i225, context index must be unique per ring. */ 1033 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) 1034 mss_l4len_idx |= tx_ring->reg_idx << 4; 1035 1036 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1037 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1038 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1039 1040 /* We assume there is always a valid Tx time available. Invalid times 1041 * should have been handled by the upper layers. 1042 */ 1043 if (tx_ring->launchtime_enable) { 1044 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 1045 ktime_t txtime = first->skb->tstamp; 1046 1047 skb_txtime_consumed(first->skb); 1048 context_desc->launch_time = igc_tx_launchtime(adapter, 1049 txtime); 1050 } else { 1051 context_desc->launch_time = 0; 1052 } 1053 } 1054 1055 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) 1056 { 1057 struct sk_buff *skb = first->skb; 1058 u32 vlan_macip_lens = 0; 1059 u32 type_tucmd = 0; 1060 1061 if (skb->ip_summed != CHECKSUM_PARTIAL) { 1062 csum_failed: 1063 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && 1064 !tx_ring->launchtime_enable) 1065 return; 1066 goto no_csum; 1067 } 1068 1069 switch (skb->csum_offset) { 1070 case offsetof(struct tcphdr, check): 1071 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1072 fallthrough; 1073 case offsetof(struct udphdr, check): 1074 break; 1075 case offsetof(struct sctphdr, checksum): 1076 /* validate that this is actually an SCTP request */ 1077 if (skb_csum_is_sctp(skb)) { 1078 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; 1079 break; 1080 } 1081 fallthrough; 1082 default: 1083 skb_checksum_help(skb); 1084 goto csum_failed; 1085 } 1086 1087 /* update TX checksum flag */ 1088 first->tx_flags |= IGC_TX_FLAGS_CSUM; 1089 vlan_macip_lens = skb_checksum_start_offset(skb) - 1090 skb_network_offset(skb); 1091 no_csum: 1092 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; 1093 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1094 1095 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); 1096 } 1097 1098 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 1099 { 1100 struct net_device *netdev = tx_ring->netdev; 1101 1102 netif_stop_subqueue(netdev, tx_ring->queue_index); 1103 1104 /* memory barriier comment */ 1105 smp_mb(); 1106 1107 /* We need to check again in a case another CPU has just 1108 * made room available. 1109 */ 1110 if (igc_desc_unused(tx_ring) < size) 1111 return -EBUSY; 1112 1113 /* A reprieve! */ 1114 netif_wake_subqueue(netdev, tx_ring->queue_index); 1115 1116 u64_stats_update_begin(&tx_ring->tx_syncp2); 1117 tx_ring->tx_stats.restart_queue2++; 1118 u64_stats_update_end(&tx_ring->tx_syncp2); 1119 1120 return 0; 1121 } 1122 1123 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 1124 { 1125 if (igc_desc_unused(tx_ring) >= size) 1126 return 0; 1127 return __igc_maybe_stop_tx(tx_ring, size); 1128 } 1129 1130 #define IGC_SET_FLAG(_input, _flag, _result) \ 1131 (((_flag) <= (_result)) ? \ 1132 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ 1133 ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) 1134 1135 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 1136 { 1137 /* set type for advanced descriptor with frame checksum insertion */ 1138 u32 cmd_type = IGC_ADVTXD_DTYP_DATA | 1139 IGC_ADVTXD_DCMD_DEXT | 1140 IGC_ADVTXD_DCMD_IFCS; 1141 1142 /* set HW vlan bit if vlan is present */ 1143 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, 1144 IGC_ADVTXD_DCMD_VLE); 1145 1146 /* set segmentation bits for TSO */ 1147 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, 1148 (IGC_ADVTXD_DCMD_TSE)); 1149 1150 /* set timestamp bit if present */ 1151 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, 1152 (IGC_ADVTXD_MAC_TSTAMP)); 1153 1154 /* insert frame checksum */ 1155 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); 1156 1157 return cmd_type; 1158 } 1159 1160 static void igc_tx_olinfo_status(struct igc_ring *tx_ring, 1161 union igc_adv_tx_desc *tx_desc, 1162 u32 tx_flags, unsigned int paylen) 1163 { 1164 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; 1165 1166 /* insert L4 checksum */ 1167 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * 1168 ((IGC_TXD_POPTS_TXSM << 8) / 1169 IGC_TX_FLAGS_CSUM); 1170 1171 /* insert IPv4 checksum */ 1172 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * 1173 (((IGC_TXD_POPTS_IXSM << 8)) / 1174 IGC_TX_FLAGS_IPV4); 1175 1176 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 1177 } 1178 1179 static int igc_tx_map(struct igc_ring *tx_ring, 1180 struct igc_tx_buffer *first, 1181 const u8 hdr_len) 1182 { 1183 struct sk_buff *skb = first->skb; 1184 struct igc_tx_buffer *tx_buffer; 1185 union igc_adv_tx_desc *tx_desc; 1186 u32 tx_flags = first->tx_flags; 1187 skb_frag_t *frag; 1188 u16 i = tx_ring->next_to_use; 1189 unsigned int data_len, size; 1190 dma_addr_t dma; 1191 u32 cmd_type; 1192 1193 cmd_type = igc_tx_cmd_type(skb, tx_flags); 1194 tx_desc = IGC_TX_DESC(tx_ring, i); 1195 1196 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); 1197 1198 size = skb_headlen(skb); 1199 data_len = skb->data_len; 1200 1201 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1202 1203 tx_buffer = first; 1204 1205 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1206 if (dma_mapping_error(tx_ring->dev, dma)) 1207 goto dma_error; 1208 1209 /* record length, and DMA address */ 1210 dma_unmap_len_set(tx_buffer, len, size); 1211 dma_unmap_addr_set(tx_buffer, dma, dma); 1212 1213 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1214 1215 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { 1216 tx_desc->read.cmd_type_len = 1217 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); 1218 1219 i++; 1220 tx_desc++; 1221 if (i == tx_ring->count) { 1222 tx_desc = IGC_TX_DESC(tx_ring, 0); 1223 i = 0; 1224 } 1225 tx_desc->read.olinfo_status = 0; 1226 1227 dma += IGC_MAX_DATA_PER_TXD; 1228 size -= IGC_MAX_DATA_PER_TXD; 1229 1230 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1231 } 1232 1233 if (likely(!data_len)) 1234 break; 1235 1236 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 1237 1238 i++; 1239 tx_desc++; 1240 if (i == tx_ring->count) { 1241 tx_desc = IGC_TX_DESC(tx_ring, 0); 1242 i = 0; 1243 } 1244 tx_desc->read.olinfo_status = 0; 1245 1246 size = skb_frag_size(frag); 1247 data_len -= size; 1248 1249 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, 1250 size, DMA_TO_DEVICE); 1251 1252 tx_buffer = &tx_ring->tx_buffer_info[i]; 1253 } 1254 1255 /* write last descriptor with RS and EOP bits */ 1256 cmd_type |= size | IGC_TXD_DCMD; 1257 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 1258 1259 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1260 1261 /* set the timestamp */ 1262 first->time_stamp = jiffies; 1263 1264 skb_tx_timestamp(skb); 1265 1266 /* Force memory writes to complete before letting h/w know there 1267 * are new descriptors to fetch. (Only applicable for weak-ordered 1268 * memory model archs, such as IA-64). 1269 * 1270 * We also need this memory barrier to make certain all of the 1271 * status bits have been updated before next_to_watch is written. 1272 */ 1273 wmb(); 1274 1275 /* set next_to_watch value indicating a packet is present */ 1276 first->next_to_watch = tx_desc; 1277 1278 i++; 1279 if (i == tx_ring->count) 1280 i = 0; 1281 1282 tx_ring->next_to_use = i; 1283 1284 /* Make sure there is space in the ring for the next send. */ 1285 igc_maybe_stop_tx(tx_ring, DESC_NEEDED); 1286 1287 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 1288 writel(i, tx_ring->tail); 1289 } 1290 1291 return 0; 1292 dma_error: 1293 netdev_err(tx_ring->netdev, "TX DMA map failed\n"); 1294 tx_buffer = &tx_ring->tx_buffer_info[i]; 1295 1296 /* clear dma mappings for failed tx_buffer_info map */ 1297 while (tx_buffer != first) { 1298 if (dma_unmap_len(tx_buffer, len)) 1299 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 1300 1301 if (i-- == 0) 1302 i += tx_ring->count; 1303 tx_buffer = &tx_ring->tx_buffer_info[i]; 1304 } 1305 1306 if (dma_unmap_len(tx_buffer, len)) 1307 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 1308 1309 dev_kfree_skb_any(tx_buffer->skb); 1310 tx_buffer->skb = NULL; 1311 1312 tx_ring->next_to_use = i; 1313 1314 return -1; 1315 } 1316 1317 static int igc_tso(struct igc_ring *tx_ring, 1318 struct igc_tx_buffer *first, 1319 u8 *hdr_len) 1320 { 1321 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 1322 struct sk_buff *skb = first->skb; 1323 union { 1324 struct iphdr *v4; 1325 struct ipv6hdr *v6; 1326 unsigned char *hdr; 1327 } ip; 1328 union { 1329 struct tcphdr *tcp; 1330 struct udphdr *udp; 1331 unsigned char *hdr; 1332 } l4; 1333 u32 paylen, l4_offset; 1334 int err; 1335 1336 if (skb->ip_summed != CHECKSUM_PARTIAL) 1337 return 0; 1338 1339 if (!skb_is_gso(skb)) 1340 return 0; 1341 1342 err = skb_cow_head(skb, 0); 1343 if (err < 0) 1344 return err; 1345 1346 ip.hdr = skb_network_header(skb); 1347 l4.hdr = skb_checksum_start(skb); 1348 1349 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1350 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1351 1352 /* initialize outer IP header fields */ 1353 if (ip.v4->version == 4) { 1354 unsigned char *csum_start = skb_checksum_start(skb); 1355 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 1356 1357 /* IP header will have to cancel out any data that 1358 * is not a part of the outer IP header 1359 */ 1360 ip.v4->check = csum_fold(csum_partial(trans_start, 1361 csum_start - trans_start, 1362 0)); 1363 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; 1364 1365 ip.v4->tot_len = 0; 1366 first->tx_flags |= IGC_TX_FLAGS_TSO | 1367 IGC_TX_FLAGS_CSUM | 1368 IGC_TX_FLAGS_IPV4; 1369 } else { 1370 ip.v6->payload_len = 0; 1371 first->tx_flags |= IGC_TX_FLAGS_TSO | 1372 IGC_TX_FLAGS_CSUM; 1373 } 1374 1375 /* determine offset of inner transport header */ 1376 l4_offset = l4.hdr - skb->data; 1377 1378 /* remove payload length from inner checksum */ 1379 paylen = skb->len - l4_offset; 1380 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { 1381 /* compute length of segmentation header */ 1382 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 1383 csum_replace_by_diff(&l4.tcp->check, 1384 (__force __wsum)htonl(paylen)); 1385 } else { 1386 /* compute length of segmentation header */ 1387 *hdr_len = sizeof(*l4.udp) + l4_offset; 1388 csum_replace_by_diff(&l4.udp->check, 1389 (__force __wsum)htonl(paylen)); 1390 } 1391 1392 /* update gso size and bytecount with header size */ 1393 first->gso_segs = skb_shinfo(skb)->gso_segs; 1394 first->bytecount += (first->gso_segs - 1) * *hdr_len; 1395 1396 /* MSS L4LEN IDX */ 1397 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; 1398 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; 1399 1400 /* VLAN MACLEN IPLEN */ 1401 vlan_macip_lens = l4.hdr - ip.hdr; 1402 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; 1403 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1404 1405 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, 1406 type_tucmd, mss_l4len_idx); 1407 1408 return 1; 1409 } 1410 1411 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, 1412 struct igc_ring *tx_ring) 1413 { 1414 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 1415 __be16 protocol = vlan_get_protocol(skb); 1416 struct igc_tx_buffer *first; 1417 u32 tx_flags = 0; 1418 unsigned short f; 1419 u8 hdr_len = 0; 1420 int tso = 0; 1421 1422 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, 1423 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, 1424 * + 2 desc gap to keep tail from touching head, 1425 * + 1 desc for context descriptor, 1426 * otherwise try next time 1427 */ 1428 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1429 count += TXD_USE_COUNT(skb_frag_size( 1430 &skb_shinfo(skb)->frags[f])); 1431 1432 if (igc_maybe_stop_tx(tx_ring, count + 3)) { 1433 /* this is a hard error */ 1434 return NETDEV_TX_BUSY; 1435 } 1436 1437 /* record the location of the first descriptor for this packet */ 1438 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 1439 first->type = IGC_TX_BUFFER_TYPE_SKB; 1440 first->skb = skb; 1441 first->bytecount = skb->len; 1442 first->gso_segs = 1; 1443 1444 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1445 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 1446 1447 /* FIXME: add support for retrieving timestamps from 1448 * the other timer registers before skipping the 1449 * timestamping request. 1450 */ 1451 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && 1452 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, 1453 &adapter->state)) { 1454 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1455 tx_flags |= IGC_TX_FLAGS_TSTAMP; 1456 1457 adapter->ptp_tx_skb = skb_get(skb); 1458 adapter->ptp_tx_start = jiffies; 1459 } else { 1460 adapter->tx_hwtstamp_skipped++; 1461 } 1462 } 1463 1464 if (skb_vlan_tag_present(skb)) { 1465 tx_flags |= IGC_TX_FLAGS_VLAN; 1466 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); 1467 } 1468 1469 /* record initial flags and protocol */ 1470 first->tx_flags = tx_flags; 1471 first->protocol = protocol; 1472 1473 tso = igc_tso(tx_ring, first, &hdr_len); 1474 if (tso < 0) 1475 goto out_drop; 1476 else if (!tso) 1477 igc_tx_csum(tx_ring, first); 1478 1479 igc_tx_map(tx_ring, first, hdr_len); 1480 1481 return NETDEV_TX_OK; 1482 1483 out_drop: 1484 dev_kfree_skb_any(first->skb); 1485 first->skb = NULL; 1486 1487 return NETDEV_TX_OK; 1488 } 1489 1490 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, 1491 struct sk_buff *skb) 1492 { 1493 unsigned int r_idx = skb->queue_mapping; 1494 1495 if (r_idx >= adapter->num_tx_queues) 1496 r_idx = r_idx % adapter->num_tx_queues; 1497 1498 return adapter->tx_ring[r_idx]; 1499 } 1500 1501 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, 1502 struct net_device *netdev) 1503 { 1504 struct igc_adapter *adapter = netdev_priv(netdev); 1505 1506 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 1507 * in order to meet this minimum size requirement. 1508 */ 1509 if (skb->len < 17) { 1510 if (skb_padto(skb, 17)) 1511 return NETDEV_TX_OK; 1512 skb->len = 17; 1513 } 1514 1515 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); 1516 } 1517 1518 static void igc_rx_checksum(struct igc_ring *ring, 1519 union igc_adv_rx_desc *rx_desc, 1520 struct sk_buff *skb) 1521 { 1522 skb_checksum_none_assert(skb); 1523 1524 /* Ignore Checksum bit is set */ 1525 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) 1526 return; 1527 1528 /* Rx checksum disabled via ethtool */ 1529 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1530 return; 1531 1532 /* TCP/UDP checksum error bit is set */ 1533 if (igc_test_staterr(rx_desc, 1534 IGC_RXDEXT_STATERR_L4E | 1535 IGC_RXDEXT_STATERR_IPE)) { 1536 /* work around errata with sctp packets where the TCPE aka 1537 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 1538 * packets (aka let the stack check the crc32c) 1539 */ 1540 if (!(skb->len == 60 && 1541 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { 1542 u64_stats_update_begin(&ring->rx_syncp); 1543 ring->rx_stats.csum_err++; 1544 u64_stats_update_end(&ring->rx_syncp); 1545 } 1546 /* let the stack verify checksum errors */ 1547 return; 1548 } 1549 /* It must be a TCP or UDP packet with a valid checksum */ 1550 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | 1551 IGC_RXD_STAT_UDPCS)) 1552 skb->ip_summed = CHECKSUM_UNNECESSARY; 1553 1554 netdev_dbg(ring->netdev, "cksum success: bits %08X\n", 1555 le32_to_cpu(rx_desc->wb.upper.status_error)); 1556 } 1557 1558 static inline void igc_rx_hash(struct igc_ring *ring, 1559 union igc_adv_rx_desc *rx_desc, 1560 struct sk_buff *skb) 1561 { 1562 if (ring->netdev->features & NETIF_F_RXHASH) 1563 skb_set_hash(skb, 1564 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 1565 PKT_HASH_TYPE_L3); 1566 } 1567 1568 static void igc_rx_vlan(struct igc_ring *rx_ring, 1569 union igc_adv_rx_desc *rx_desc, 1570 struct sk_buff *skb) 1571 { 1572 struct net_device *dev = rx_ring->netdev; 1573 u16 vid; 1574 1575 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1576 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { 1577 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && 1578 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) 1579 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); 1580 else 1581 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1582 1583 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1584 } 1585 } 1586 1587 /** 1588 * igc_process_skb_fields - Populate skb header fields from Rx descriptor 1589 * @rx_ring: rx descriptor ring packet is being transacted on 1590 * @rx_desc: pointer to the EOP Rx descriptor 1591 * @skb: pointer to current skb being populated 1592 * 1593 * This function checks the ring, descriptor, and packet information in order 1594 * to populate the hash, checksum, VLAN, protocol, and other fields within the 1595 * skb. 1596 */ 1597 static void igc_process_skb_fields(struct igc_ring *rx_ring, 1598 union igc_adv_rx_desc *rx_desc, 1599 struct sk_buff *skb) 1600 { 1601 igc_rx_hash(rx_ring, rx_desc, skb); 1602 1603 igc_rx_checksum(rx_ring, rx_desc, skb); 1604 1605 igc_rx_vlan(rx_ring, rx_desc, skb); 1606 1607 skb_record_rx_queue(skb, rx_ring->queue_index); 1608 1609 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1610 } 1611 1612 static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) 1613 { 1614 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 1615 struct igc_adapter *adapter = netdev_priv(netdev); 1616 struct igc_hw *hw = &adapter->hw; 1617 u32 ctrl; 1618 1619 ctrl = rd32(IGC_CTRL); 1620 1621 if (enable) { 1622 /* enable VLAN tag insert/strip */ 1623 ctrl |= IGC_CTRL_VME; 1624 } else { 1625 /* disable VLAN tag insert/strip */ 1626 ctrl &= ~IGC_CTRL_VME; 1627 } 1628 wr32(IGC_CTRL, ctrl); 1629 } 1630 1631 static void igc_restore_vlan(struct igc_adapter *adapter) 1632 { 1633 igc_vlan_mode(adapter->netdev, adapter->netdev->features); 1634 } 1635 1636 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, 1637 const unsigned int size, 1638 int *rx_buffer_pgcnt) 1639 { 1640 struct igc_rx_buffer *rx_buffer; 1641 1642 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 1643 *rx_buffer_pgcnt = 1644 #if (PAGE_SIZE < 8192) 1645 page_count(rx_buffer->page); 1646 #else 1647 0; 1648 #endif 1649 prefetchw(rx_buffer->page); 1650 1651 /* we are reusing so sync this buffer for CPU use */ 1652 dma_sync_single_range_for_cpu(rx_ring->dev, 1653 rx_buffer->dma, 1654 rx_buffer->page_offset, 1655 size, 1656 DMA_FROM_DEVICE); 1657 1658 rx_buffer->pagecnt_bias--; 1659 1660 return rx_buffer; 1661 } 1662 1663 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, 1664 unsigned int truesize) 1665 { 1666 #if (PAGE_SIZE < 8192) 1667 buffer->page_offset ^= truesize; 1668 #else 1669 buffer->page_offset += truesize; 1670 #endif 1671 } 1672 1673 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, 1674 unsigned int size) 1675 { 1676 unsigned int truesize; 1677 1678 #if (PAGE_SIZE < 8192) 1679 truesize = igc_rx_pg_size(ring) / 2; 1680 #else 1681 truesize = ring_uses_build_skb(ring) ? 1682 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1683 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1684 SKB_DATA_ALIGN(size); 1685 #endif 1686 return truesize; 1687 } 1688 1689 /** 1690 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff 1691 * @rx_ring: rx descriptor ring to transact packets on 1692 * @rx_buffer: buffer containing page to add 1693 * @skb: sk_buff to place the data into 1694 * @size: size of buffer to be added 1695 * 1696 * This function will add the data contained in rx_buffer->page to the skb. 1697 */ 1698 static void igc_add_rx_frag(struct igc_ring *rx_ring, 1699 struct igc_rx_buffer *rx_buffer, 1700 struct sk_buff *skb, 1701 unsigned int size) 1702 { 1703 unsigned int truesize; 1704 1705 #if (PAGE_SIZE < 8192) 1706 truesize = igc_rx_pg_size(rx_ring) / 2; 1707 #else 1708 truesize = ring_uses_build_skb(rx_ring) ? 1709 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1710 SKB_DATA_ALIGN(size); 1711 #endif 1712 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1713 rx_buffer->page_offset, size, truesize); 1714 1715 igc_rx_buffer_flip(rx_buffer, truesize); 1716 } 1717 1718 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, 1719 struct igc_rx_buffer *rx_buffer, 1720 union igc_adv_rx_desc *rx_desc, 1721 unsigned int size) 1722 { 1723 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 1724 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1725 struct sk_buff *skb; 1726 1727 /* prefetch first cache line of first page */ 1728 net_prefetch(va); 1729 1730 /* build an skb around the page buffer */ 1731 skb = build_skb(va - IGC_SKB_PAD, truesize); 1732 if (unlikely(!skb)) 1733 return NULL; 1734 1735 /* update pointers within the skb to store the data */ 1736 skb_reserve(skb, IGC_SKB_PAD); 1737 __skb_put(skb, size); 1738 1739 igc_rx_buffer_flip(rx_buffer, truesize); 1740 return skb; 1741 } 1742 1743 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, 1744 struct igc_rx_buffer *rx_buffer, 1745 struct xdp_buff *xdp, 1746 ktime_t timestamp) 1747 { 1748 unsigned int size = xdp->data_end - xdp->data; 1749 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1750 void *va = xdp->data; 1751 unsigned int headlen; 1752 struct sk_buff *skb; 1753 1754 /* prefetch first cache line of first page */ 1755 net_prefetch(va); 1756 1757 /* allocate a skb to store the frags */ 1758 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); 1759 if (unlikely(!skb)) 1760 return NULL; 1761 1762 if (timestamp) 1763 skb_hwtstamps(skb)->hwtstamp = timestamp; 1764 1765 /* Determine available headroom for copy */ 1766 headlen = size; 1767 if (headlen > IGC_RX_HDR_LEN) 1768 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); 1769 1770 /* align pull length to size of long to optimize memcpy performance */ 1771 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 1772 1773 /* update all of the pointers */ 1774 size -= headlen; 1775 if (size) { 1776 skb_add_rx_frag(skb, 0, rx_buffer->page, 1777 (va + headlen) - page_address(rx_buffer->page), 1778 size, truesize); 1779 igc_rx_buffer_flip(rx_buffer, truesize); 1780 } else { 1781 rx_buffer->pagecnt_bias++; 1782 } 1783 1784 return skb; 1785 } 1786 1787 /** 1788 * igc_reuse_rx_page - page flip buffer and store it back on the ring 1789 * @rx_ring: rx descriptor ring to store buffers on 1790 * @old_buff: donor buffer to have page reused 1791 * 1792 * Synchronizes page for reuse by the adapter 1793 */ 1794 static void igc_reuse_rx_page(struct igc_ring *rx_ring, 1795 struct igc_rx_buffer *old_buff) 1796 { 1797 u16 nta = rx_ring->next_to_alloc; 1798 struct igc_rx_buffer *new_buff; 1799 1800 new_buff = &rx_ring->rx_buffer_info[nta]; 1801 1802 /* update, and store next to alloc */ 1803 nta++; 1804 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1805 1806 /* Transfer page from old buffer to new buffer. 1807 * Move each member individually to avoid possible store 1808 * forwarding stalls. 1809 */ 1810 new_buff->dma = old_buff->dma; 1811 new_buff->page = old_buff->page; 1812 new_buff->page_offset = old_buff->page_offset; 1813 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1814 } 1815 1816 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, 1817 int rx_buffer_pgcnt) 1818 { 1819 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1820 struct page *page = rx_buffer->page; 1821 1822 /* avoid re-using remote and pfmemalloc pages */ 1823 if (!dev_page_is_reusable(page)) 1824 return false; 1825 1826 #if (PAGE_SIZE < 8192) 1827 /* if we are only owner of page we can reuse it */ 1828 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) 1829 return false; 1830 #else 1831 #define IGC_LAST_OFFSET \ 1832 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) 1833 1834 if (rx_buffer->page_offset > IGC_LAST_OFFSET) 1835 return false; 1836 #endif 1837 1838 /* If we have drained the page fragment pool we need to update 1839 * the pagecnt_bias and page count so that we fully restock the 1840 * number of references the driver holds. 1841 */ 1842 if (unlikely(pagecnt_bias == 1)) { 1843 page_ref_add(page, USHRT_MAX - 1); 1844 rx_buffer->pagecnt_bias = USHRT_MAX; 1845 } 1846 1847 return true; 1848 } 1849 1850 /** 1851 * igc_is_non_eop - process handling of non-EOP buffers 1852 * @rx_ring: Rx ring being processed 1853 * @rx_desc: Rx descriptor for current buffer 1854 * 1855 * This function updates next to clean. If the buffer is an EOP buffer 1856 * this function exits returning false, otherwise it will place the 1857 * sk_buff in the next buffer to be chained and return true indicating 1858 * that this is in fact a non-EOP buffer. 1859 */ 1860 static bool igc_is_non_eop(struct igc_ring *rx_ring, 1861 union igc_adv_rx_desc *rx_desc) 1862 { 1863 u32 ntc = rx_ring->next_to_clean + 1; 1864 1865 /* fetch, update, and store next to clean */ 1866 ntc = (ntc < rx_ring->count) ? ntc : 0; 1867 rx_ring->next_to_clean = ntc; 1868 1869 prefetch(IGC_RX_DESC(rx_ring, ntc)); 1870 1871 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) 1872 return false; 1873 1874 return true; 1875 } 1876 1877 /** 1878 * igc_cleanup_headers - Correct corrupted or empty headers 1879 * @rx_ring: rx descriptor ring packet is being transacted on 1880 * @rx_desc: pointer to the EOP Rx descriptor 1881 * @skb: pointer to current skb being fixed 1882 * 1883 * Address the case where we are pulling data in on pages only 1884 * and as such no data is present in the skb header. 1885 * 1886 * In addition if skb is not at least 60 bytes we need to pad it so that 1887 * it is large enough to qualify as a valid Ethernet frame. 1888 * 1889 * Returns true if an error was encountered and skb was freed. 1890 */ 1891 static bool igc_cleanup_headers(struct igc_ring *rx_ring, 1892 union igc_adv_rx_desc *rx_desc, 1893 struct sk_buff *skb) 1894 { 1895 /* XDP packets use error pointer so abort at this point */ 1896 if (IS_ERR(skb)) 1897 return true; 1898 1899 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { 1900 struct net_device *netdev = rx_ring->netdev; 1901 1902 if (!(netdev->features & NETIF_F_RXALL)) { 1903 dev_kfree_skb_any(skb); 1904 return true; 1905 } 1906 } 1907 1908 /* if eth_skb_pad returns an error the skb was freed */ 1909 if (eth_skb_pad(skb)) 1910 return true; 1911 1912 return false; 1913 } 1914 1915 static void igc_put_rx_buffer(struct igc_ring *rx_ring, 1916 struct igc_rx_buffer *rx_buffer, 1917 int rx_buffer_pgcnt) 1918 { 1919 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { 1920 /* hand second half of page back to the ring */ 1921 igc_reuse_rx_page(rx_ring, rx_buffer); 1922 } else { 1923 /* We are not reusing the buffer so unmap it and free 1924 * any references we are holding to it 1925 */ 1926 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 1927 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1928 IGC_RX_DMA_ATTR); 1929 __page_frag_cache_drain(rx_buffer->page, 1930 rx_buffer->pagecnt_bias); 1931 } 1932 1933 /* clear contents of rx_buffer */ 1934 rx_buffer->page = NULL; 1935 } 1936 1937 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) 1938 { 1939 struct igc_adapter *adapter = rx_ring->q_vector->adapter; 1940 1941 if (ring_uses_build_skb(rx_ring)) 1942 return IGC_SKB_PAD; 1943 if (igc_xdp_is_enabled(adapter)) 1944 return XDP_PACKET_HEADROOM; 1945 1946 return 0; 1947 } 1948 1949 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, 1950 struct igc_rx_buffer *bi) 1951 { 1952 struct page *page = bi->page; 1953 dma_addr_t dma; 1954 1955 /* since we are recycling buffers we should seldom need to alloc */ 1956 if (likely(page)) 1957 return true; 1958 1959 /* alloc new page for storage */ 1960 page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); 1961 if (unlikely(!page)) { 1962 rx_ring->rx_stats.alloc_failed++; 1963 return false; 1964 } 1965 1966 /* map page for use */ 1967 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 1968 igc_rx_pg_size(rx_ring), 1969 DMA_FROM_DEVICE, 1970 IGC_RX_DMA_ATTR); 1971 1972 /* if mapping failed free memory back to system since 1973 * there isn't much point in holding memory we can't use 1974 */ 1975 if (dma_mapping_error(rx_ring->dev, dma)) { 1976 __free_page(page); 1977 1978 rx_ring->rx_stats.alloc_failed++; 1979 return false; 1980 } 1981 1982 bi->dma = dma; 1983 bi->page = page; 1984 bi->page_offset = igc_rx_offset(rx_ring); 1985 page_ref_add(page, USHRT_MAX - 1); 1986 bi->pagecnt_bias = USHRT_MAX; 1987 1988 return true; 1989 } 1990 1991 /** 1992 * igc_alloc_rx_buffers - Replace used receive buffers; packet split 1993 * @rx_ring: rx descriptor ring 1994 * @cleaned_count: number of buffers to clean 1995 */ 1996 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) 1997 { 1998 union igc_adv_rx_desc *rx_desc; 1999 u16 i = rx_ring->next_to_use; 2000 struct igc_rx_buffer *bi; 2001 u16 bufsz; 2002 2003 /* nothing to do */ 2004 if (!cleaned_count) 2005 return; 2006 2007 rx_desc = IGC_RX_DESC(rx_ring, i); 2008 bi = &rx_ring->rx_buffer_info[i]; 2009 i -= rx_ring->count; 2010 2011 bufsz = igc_rx_bufsz(rx_ring); 2012 2013 do { 2014 if (!igc_alloc_mapped_page(rx_ring, bi)) 2015 break; 2016 2017 /* sync the buffer for use by the device */ 2018 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 2019 bi->page_offset, bufsz, 2020 DMA_FROM_DEVICE); 2021 2022 /* Refresh the desc even if buffer_addrs didn't change 2023 * because each write-back erases this info. 2024 */ 2025 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 2026 2027 rx_desc++; 2028 bi++; 2029 i++; 2030 if (unlikely(!i)) { 2031 rx_desc = IGC_RX_DESC(rx_ring, 0); 2032 bi = rx_ring->rx_buffer_info; 2033 i -= rx_ring->count; 2034 } 2035 2036 /* clear the length for the next_to_use descriptor */ 2037 rx_desc->wb.upper.length = 0; 2038 2039 cleaned_count--; 2040 } while (cleaned_count); 2041 2042 i += rx_ring->count; 2043 2044 if (rx_ring->next_to_use != i) { 2045 /* record the next descriptor to use */ 2046 rx_ring->next_to_use = i; 2047 2048 /* update next to alloc since we have filled the ring */ 2049 rx_ring->next_to_alloc = i; 2050 2051 /* Force memory writes to complete before letting h/w 2052 * know there are new descriptors to fetch. (Only 2053 * applicable for weak-ordered memory model archs, 2054 * such as IA-64). 2055 */ 2056 wmb(); 2057 writel(i, rx_ring->tail); 2058 } 2059 } 2060 2061 static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) 2062 { 2063 union igc_adv_rx_desc *desc; 2064 u16 i = ring->next_to_use; 2065 struct igc_rx_buffer *bi; 2066 dma_addr_t dma; 2067 bool ok = true; 2068 2069 if (!count) 2070 return ok; 2071 2072 desc = IGC_RX_DESC(ring, i); 2073 bi = &ring->rx_buffer_info[i]; 2074 i -= ring->count; 2075 2076 do { 2077 bi->xdp = xsk_buff_alloc(ring->xsk_pool); 2078 if (!bi->xdp) { 2079 ok = false; 2080 break; 2081 } 2082 2083 dma = xsk_buff_xdp_get_dma(bi->xdp); 2084 desc->read.pkt_addr = cpu_to_le64(dma); 2085 2086 desc++; 2087 bi++; 2088 i++; 2089 if (unlikely(!i)) { 2090 desc = IGC_RX_DESC(ring, 0); 2091 bi = ring->rx_buffer_info; 2092 i -= ring->count; 2093 } 2094 2095 /* Clear the length for the next_to_use descriptor. */ 2096 desc->wb.upper.length = 0; 2097 2098 count--; 2099 } while (count); 2100 2101 i += ring->count; 2102 2103 if (ring->next_to_use != i) { 2104 ring->next_to_use = i; 2105 2106 /* Force memory writes to complete before letting h/w 2107 * know there are new descriptors to fetch. (Only 2108 * applicable for weak-ordered memory model archs, 2109 * such as IA-64). 2110 */ 2111 wmb(); 2112 writel(i, ring->tail); 2113 } 2114 2115 return ok; 2116 } 2117 2118 static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer, 2119 struct xdp_frame *xdpf, 2120 struct igc_ring *ring) 2121 { 2122 dma_addr_t dma; 2123 2124 dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); 2125 if (dma_mapping_error(ring->dev, dma)) { 2126 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); 2127 return -ENOMEM; 2128 } 2129 2130 buffer->type = IGC_TX_BUFFER_TYPE_XDP; 2131 buffer->xdpf = xdpf; 2132 buffer->protocol = 0; 2133 buffer->bytecount = xdpf->len; 2134 buffer->gso_segs = 1; 2135 buffer->time_stamp = jiffies; 2136 dma_unmap_len_set(buffer, len, xdpf->len); 2137 dma_unmap_addr_set(buffer, dma, dma); 2138 return 0; 2139 } 2140 2141 /* This function requires __netif_tx_lock is held by the caller. */ 2142 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, 2143 struct xdp_frame *xdpf) 2144 { 2145 struct igc_tx_buffer *buffer; 2146 union igc_adv_tx_desc *desc; 2147 u32 cmd_type, olinfo_status; 2148 int err; 2149 2150 if (!igc_desc_unused(ring)) 2151 return -EBUSY; 2152 2153 buffer = &ring->tx_buffer_info[ring->next_to_use]; 2154 err = igc_xdp_init_tx_buffer(buffer, xdpf, ring); 2155 if (err) 2156 return err; 2157 2158 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 2159 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 2160 buffer->bytecount; 2161 olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; 2162 2163 desc = IGC_TX_DESC(ring, ring->next_to_use); 2164 desc->read.cmd_type_len = cpu_to_le32(cmd_type); 2165 desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2166 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma)); 2167 2168 netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount); 2169 2170 buffer->next_to_watch = desc; 2171 2172 ring->next_to_use++; 2173 if (ring->next_to_use == ring->count) 2174 ring->next_to_use = 0; 2175 2176 return 0; 2177 } 2178 2179 static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, 2180 int cpu) 2181 { 2182 int index = cpu; 2183 2184 if (unlikely(index < 0)) 2185 index = 0; 2186 2187 while (index >= adapter->num_tx_queues) 2188 index -= adapter->num_tx_queues; 2189 2190 return adapter->tx_ring[index]; 2191 } 2192 2193 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) 2194 { 2195 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 2196 int cpu = smp_processor_id(); 2197 struct netdev_queue *nq; 2198 struct igc_ring *ring; 2199 int res; 2200 2201 if (unlikely(!xdpf)) 2202 return -EFAULT; 2203 2204 ring = igc_xdp_get_tx_ring(adapter, cpu); 2205 nq = txring_txq(ring); 2206 2207 __netif_tx_lock(nq, cpu); 2208 res = igc_xdp_init_tx_descriptor(ring, xdpf); 2209 __netif_tx_unlock(nq); 2210 return res; 2211 } 2212 2213 /* This function assumes rcu_read_lock() is held by the caller. */ 2214 static int __igc_xdp_run_prog(struct igc_adapter *adapter, 2215 struct bpf_prog *prog, 2216 struct xdp_buff *xdp) 2217 { 2218 u32 act = bpf_prog_run_xdp(prog, xdp); 2219 2220 switch (act) { 2221 case XDP_PASS: 2222 return IGC_XDP_PASS; 2223 case XDP_TX: 2224 if (igc_xdp_xmit_back(adapter, xdp) < 0) 2225 goto out_failure; 2226 return IGC_XDP_TX; 2227 case XDP_REDIRECT: 2228 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) 2229 goto out_failure; 2230 return IGC_XDP_REDIRECT; 2231 break; 2232 default: 2233 bpf_warn_invalid_xdp_action(act); 2234 fallthrough; 2235 case XDP_ABORTED: 2236 out_failure: 2237 trace_xdp_exception(adapter->netdev, prog, act); 2238 fallthrough; 2239 case XDP_DROP: 2240 return IGC_XDP_CONSUMED; 2241 } 2242 } 2243 2244 static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, 2245 struct xdp_buff *xdp) 2246 { 2247 struct bpf_prog *prog; 2248 int res; 2249 2250 prog = READ_ONCE(adapter->xdp_prog); 2251 if (!prog) { 2252 res = IGC_XDP_PASS; 2253 goto out; 2254 } 2255 2256 res = __igc_xdp_run_prog(adapter, prog, xdp); 2257 2258 out: 2259 return ERR_PTR(-res); 2260 } 2261 2262 /* This function assumes __netif_tx_lock is held by the caller. */ 2263 static void igc_flush_tx_descriptors(struct igc_ring *ring) 2264 { 2265 /* Once tail pointer is updated, hardware can fetch the descriptors 2266 * any time so we issue a write membar here to ensure all memory 2267 * writes are complete before the tail pointer is updated. 2268 */ 2269 wmb(); 2270 writel(ring->next_to_use, ring->tail); 2271 } 2272 2273 static void igc_finalize_xdp(struct igc_adapter *adapter, int status) 2274 { 2275 int cpu = smp_processor_id(); 2276 struct netdev_queue *nq; 2277 struct igc_ring *ring; 2278 2279 if (status & IGC_XDP_TX) { 2280 ring = igc_xdp_get_tx_ring(adapter, cpu); 2281 nq = txring_txq(ring); 2282 2283 __netif_tx_lock(nq, cpu); 2284 igc_flush_tx_descriptors(ring); 2285 __netif_tx_unlock(nq); 2286 } 2287 2288 if (status & IGC_XDP_REDIRECT) 2289 xdp_do_flush(); 2290 } 2291 2292 static void igc_update_rx_stats(struct igc_q_vector *q_vector, 2293 unsigned int packets, unsigned int bytes) 2294 { 2295 struct igc_ring *ring = q_vector->rx.ring; 2296 2297 u64_stats_update_begin(&ring->rx_syncp); 2298 ring->rx_stats.packets += packets; 2299 ring->rx_stats.bytes += bytes; 2300 u64_stats_update_end(&ring->rx_syncp); 2301 2302 q_vector->rx.total_packets += packets; 2303 q_vector->rx.total_bytes += bytes; 2304 } 2305 2306 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) 2307 { 2308 unsigned int total_bytes = 0, total_packets = 0; 2309 struct igc_adapter *adapter = q_vector->adapter; 2310 struct igc_ring *rx_ring = q_vector->rx.ring; 2311 struct sk_buff *skb = rx_ring->skb; 2312 u16 cleaned_count = igc_desc_unused(rx_ring); 2313 int xdp_status = 0, rx_buffer_pgcnt; 2314 2315 while (likely(total_packets < budget)) { 2316 union igc_adv_rx_desc *rx_desc; 2317 struct igc_rx_buffer *rx_buffer; 2318 unsigned int size, truesize; 2319 ktime_t timestamp = 0; 2320 struct xdp_buff xdp; 2321 int pkt_offset = 0; 2322 void *pktbuf; 2323 2324 /* return some buffers to hardware, one at a time is too slow */ 2325 if (cleaned_count >= IGC_RX_BUFFER_WRITE) { 2326 igc_alloc_rx_buffers(rx_ring, cleaned_count); 2327 cleaned_count = 0; 2328 } 2329 2330 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); 2331 size = le16_to_cpu(rx_desc->wb.upper.length); 2332 if (!size) 2333 break; 2334 2335 /* This memory barrier is needed to keep us from reading 2336 * any other fields out of the rx_desc until we know the 2337 * descriptor has been written back 2338 */ 2339 dma_rmb(); 2340 2341 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); 2342 truesize = igc_get_rx_frame_truesize(rx_ring, size); 2343 2344 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; 2345 2346 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { 2347 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, 2348 pktbuf); 2349 pkt_offset = IGC_TS_HDR_LEN; 2350 size -= IGC_TS_HDR_LEN; 2351 } 2352 2353 if (!skb) { 2354 xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); 2355 xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), 2356 igc_rx_offset(rx_ring) + pkt_offset, size, false); 2357 2358 skb = igc_xdp_run_prog(adapter, &xdp); 2359 } 2360 2361 if (IS_ERR(skb)) { 2362 unsigned int xdp_res = -PTR_ERR(skb); 2363 2364 switch (xdp_res) { 2365 case IGC_XDP_CONSUMED: 2366 rx_buffer->pagecnt_bias++; 2367 break; 2368 case IGC_XDP_TX: 2369 case IGC_XDP_REDIRECT: 2370 igc_rx_buffer_flip(rx_buffer, truesize); 2371 xdp_status |= xdp_res; 2372 break; 2373 } 2374 2375 total_packets++; 2376 total_bytes += size; 2377 } else if (skb) 2378 igc_add_rx_frag(rx_ring, rx_buffer, skb, size); 2379 else if (ring_uses_build_skb(rx_ring)) 2380 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); 2381 else 2382 skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, 2383 timestamp); 2384 2385 /* exit if we failed to retrieve a buffer */ 2386 if (!skb) { 2387 rx_ring->rx_stats.alloc_failed++; 2388 rx_buffer->pagecnt_bias++; 2389 break; 2390 } 2391 2392 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); 2393 cleaned_count++; 2394 2395 /* fetch next buffer in frame if non-eop */ 2396 if (igc_is_non_eop(rx_ring, rx_desc)) 2397 continue; 2398 2399 /* verify the packet layout is correct */ 2400 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { 2401 skb = NULL; 2402 continue; 2403 } 2404 2405 /* probably a little skewed due to removing CRC */ 2406 total_bytes += skb->len; 2407 2408 /* populate checksum, VLAN, and protocol */ 2409 igc_process_skb_fields(rx_ring, rx_desc, skb); 2410 2411 napi_gro_receive(&q_vector->napi, skb); 2412 2413 /* reset skb pointer */ 2414 skb = NULL; 2415 2416 /* update budget accounting */ 2417 total_packets++; 2418 } 2419 2420 if (xdp_status) 2421 igc_finalize_xdp(adapter, xdp_status); 2422 2423 /* place incomplete frames back on ring for completion */ 2424 rx_ring->skb = skb; 2425 2426 igc_update_rx_stats(q_vector, total_packets, total_bytes); 2427 2428 if (cleaned_count) 2429 igc_alloc_rx_buffers(rx_ring, cleaned_count); 2430 2431 return total_packets; 2432 } 2433 2434 static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, 2435 struct xdp_buff *xdp) 2436 { 2437 unsigned int metasize = xdp->data - xdp->data_meta; 2438 unsigned int datasize = xdp->data_end - xdp->data; 2439 unsigned int totalsize = metasize + datasize; 2440 struct sk_buff *skb; 2441 2442 skb = __napi_alloc_skb(&ring->q_vector->napi, 2443 xdp->data_end - xdp->data_hard_start, 2444 GFP_ATOMIC | __GFP_NOWARN); 2445 if (unlikely(!skb)) 2446 return NULL; 2447 2448 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); 2449 memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); 2450 if (metasize) 2451 skb_metadata_set(skb, metasize); 2452 2453 return skb; 2454 } 2455 2456 static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, 2457 union igc_adv_rx_desc *desc, 2458 struct xdp_buff *xdp, 2459 ktime_t timestamp) 2460 { 2461 struct igc_ring *ring = q_vector->rx.ring; 2462 struct sk_buff *skb; 2463 2464 skb = igc_construct_skb_zc(ring, xdp); 2465 if (!skb) { 2466 ring->rx_stats.alloc_failed++; 2467 return; 2468 } 2469 2470 if (timestamp) 2471 skb_hwtstamps(skb)->hwtstamp = timestamp; 2472 2473 if (igc_cleanup_headers(ring, desc, skb)) 2474 return; 2475 2476 igc_process_skb_fields(ring, desc, skb); 2477 napi_gro_receive(&q_vector->napi, skb); 2478 } 2479 2480 static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) 2481 { 2482 struct igc_adapter *adapter = q_vector->adapter; 2483 struct igc_ring *ring = q_vector->rx.ring; 2484 u16 cleaned_count = igc_desc_unused(ring); 2485 int total_bytes = 0, total_packets = 0; 2486 u16 ntc = ring->next_to_clean; 2487 struct bpf_prog *prog; 2488 bool failure = false; 2489 int xdp_status = 0; 2490 2491 rcu_read_lock(); 2492 2493 prog = READ_ONCE(adapter->xdp_prog); 2494 2495 while (likely(total_packets < budget)) { 2496 union igc_adv_rx_desc *desc; 2497 struct igc_rx_buffer *bi; 2498 ktime_t timestamp = 0; 2499 unsigned int size; 2500 int res; 2501 2502 desc = IGC_RX_DESC(ring, ntc); 2503 size = le16_to_cpu(desc->wb.upper.length); 2504 if (!size) 2505 break; 2506 2507 /* This memory barrier is needed to keep us from reading 2508 * any other fields out of the rx_desc until we know the 2509 * descriptor has been written back 2510 */ 2511 dma_rmb(); 2512 2513 bi = &ring->rx_buffer_info[ntc]; 2514 2515 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { 2516 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, 2517 bi->xdp->data); 2518 2519 bi->xdp->data += IGC_TS_HDR_LEN; 2520 2521 /* HW timestamp has been copied into local variable. Metadata 2522 * length when XDP program is called should be 0. 2523 */ 2524 bi->xdp->data_meta += IGC_TS_HDR_LEN; 2525 size -= IGC_TS_HDR_LEN; 2526 } 2527 2528 bi->xdp->data_end = bi->xdp->data + size; 2529 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); 2530 2531 res = __igc_xdp_run_prog(adapter, prog, bi->xdp); 2532 switch (res) { 2533 case IGC_XDP_PASS: 2534 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); 2535 fallthrough; 2536 case IGC_XDP_CONSUMED: 2537 xsk_buff_free(bi->xdp); 2538 break; 2539 case IGC_XDP_TX: 2540 case IGC_XDP_REDIRECT: 2541 xdp_status |= res; 2542 break; 2543 } 2544 2545 bi->xdp = NULL; 2546 total_bytes += size; 2547 total_packets++; 2548 cleaned_count++; 2549 ntc++; 2550 if (ntc == ring->count) 2551 ntc = 0; 2552 } 2553 2554 ring->next_to_clean = ntc; 2555 rcu_read_unlock(); 2556 2557 if (cleaned_count >= IGC_RX_BUFFER_WRITE) 2558 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); 2559 2560 if (xdp_status) 2561 igc_finalize_xdp(adapter, xdp_status); 2562 2563 igc_update_rx_stats(q_vector, total_packets, total_bytes); 2564 2565 if (xsk_uses_need_wakeup(ring->xsk_pool)) { 2566 if (failure || ring->next_to_clean == ring->next_to_use) 2567 xsk_set_rx_need_wakeup(ring->xsk_pool); 2568 else 2569 xsk_clear_rx_need_wakeup(ring->xsk_pool); 2570 return total_packets; 2571 } 2572 2573 return failure ? budget : total_packets; 2574 } 2575 2576 static void igc_update_tx_stats(struct igc_q_vector *q_vector, 2577 unsigned int packets, unsigned int bytes) 2578 { 2579 struct igc_ring *ring = q_vector->tx.ring; 2580 2581 u64_stats_update_begin(&ring->tx_syncp); 2582 ring->tx_stats.bytes += bytes; 2583 ring->tx_stats.packets += packets; 2584 u64_stats_update_end(&ring->tx_syncp); 2585 2586 q_vector->tx.total_bytes += bytes; 2587 q_vector->tx.total_packets += packets; 2588 } 2589 2590 static void igc_xdp_xmit_zc(struct igc_ring *ring) 2591 { 2592 struct xsk_buff_pool *pool = ring->xsk_pool; 2593 struct netdev_queue *nq = txring_txq(ring); 2594 union igc_adv_tx_desc *tx_desc = NULL; 2595 int cpu = smp_processor_id(); 2596 u16 ntu = ring->next_to_use; 2597 struct xdp_desc xdp_desc; 2598 u16 budget; 2599 2600 if (!netif_carrier_ok(ring->netdev)) 2601 return; 2602 2603 __netif_tx_lock(nq, cpu); 2604 2605 budget = igc_desc_unused(ring); 2606 2607 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { 2608 u32 cmd_type, olinfo_status; 2609 struct igc_tx_buffer *bi; 2610 dma_addr_t dma; 2611 2612 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 2613 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 2614 xdp_desc.len; 2615 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; 2616 2617 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2618 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); 2619 2620 tx_desc = IGC_TX_DESC(ring, ntu); 2621 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 2622 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2623 tx_desc->read.buffer_addr = cpu_to_le64(dma); 2624 2625 bi = &ring->tx_buffer_info[ntu]; 2626 bi->type = IGC_TX_BUFFER_TYPE_XSK; 2627 bi->protocol = 0; 2628 bi->bytecount = xdp_desc.len; 2629 bi->gso_segs = 1; 2630 bi->time_stamp = jiffies; 2631 bi->next_to_watch = tx_desc; 2632 2633 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); 2634 2635 ntu++; 2636 if (ntu == ring->count) 2637 ntu = 0; 2638 } 2639 2640 ring->next_to_use = ntu; 2641 if (tx_desc) { 2642 igc_flush_tx_descriptors(ring); 2643 xsk_tx_release(pool); 2644 } 2645 2646 __netif_tx_unlock(nq); 2647 } 2648 2649 /** 2650 * igc_clean_tx_irq - Reclaim resources after transmit completes 2651 * @q_vector: pointer to q_vector containing needed info 2652 * @napi_budget: Used to determine if we are in netpoll 2653 * 2654 * returns true if ring is completely cleaned 2655 */ 2656 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) 2657 { 2658 struct igc_adapter *adapter = q_vector->adapter; 2659 unsigned int total_bytes = 0, total_packets = 0; 2660 unsigned int budget = q_vector->tx.work_limit; 2661 struct igc_ring *tx_ring = q_vector->tx.ring; 2662 unsigned int i = tx_ring->next_to_clean; 2663 struct igc_tx_buffer *tx_buffer; 2664 union igc_adv_tx_desc *tx_desc; 2665 u32 xsk_frames = 0; 2666 2667 if (test_bit(__IGC_DOWN, &adapter->state)) 2668 return true; 2669 2670 tx_buffer = &tx_ring->tx_buffer_info[i]; 2671 tx_desc = IGC_TX_DESC(tx_ring, i); 2672 i -= tx_ring->count; 2673 2674 do { 2675 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 2676 2677 /* if next_to_watch is not set then there is no work pending */ 2678 if (!eop_desc) 2679 break; 2680 2681 /* prevent any other reads prior to eop_desc */ 2682 smp_rmb(); 2683 2684 /* if DD is not set pending work has not been completed */ 2685 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) 2686 break; 2687 2688 /* clear next_to_watch to prevent false hangs */ 2689 tx_buffer->next_to_watch = NULL; 2690 2691 /* update the statistics for this packet */ 2692 total_bytes += tx_buffer->bytecount; 2693 total_packets += tx_buffer->gso_segs; 2694 2695 switch (tx_buffer->type) { 2696 case IGC_TX_BUFFER_TYPE_XSK: 2697 xsk_frames++; 2698 break; 2699 case IGC_TX_BUFFER_TYPE_XDP: 2700 xdp_return_frame(tx_buffer->xdpf); 2701 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2702 break; 2703 case IGC_TX_BUFFER_TYPE_SKB: 2704 napi_consume_skb(tx_buffer->skb, napi_budget); 2705 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2706 break; 2707 default: 2708 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 2709 break; 2710 } 2711 2712 /* clear last DMA location and unmap remaining buffers */ 2713 while (tx_desc != eop_desc) { 2714 tx_buffer++; 2715 tx_desc++; 2716 i++; 2717 if (unlikely(!i)) { 2718 i -= tx_ring->count; 2719 tx_buffer = tx_ring->tx_buffer_info; 2720 tx_desc = IGC_TX_DESC(tx_ring, 0); 2721 } 2722 2723 /* unmap any remaining paged data */ 2724 if (dma_unmap_len(tx_buffer, len)) 2725 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2726 } 2727 2728 /* move us one more past the eop_desc for start of next pkt */ 2729 tx_buffer++; 2730 tx_desc++; 2731 i++; 2732 if (unlikely(!i)) { 2733 i -= tx_ring->count; 2734 tx_buffer = tx_ring->tx_buffer_info; 2735 tx_desc = IGC_TX_DESC(tx_ring, 0); 2736 } 2737 2738 /* issue prefetch for next Tx descriptor */ 2739 prefetch(tx_desc); 2740 2741 /* update budget accounting */ 2742 budget--; 2743 } while (likely(budget)); 2744 2745 netdev_tx_completed_queue(txring_txq(tx_ring), 2746 total_packets, total_bytes); 2747 2748 i += tx_ring->count; 2749 tx_ring->next_to_clean = i; 2750 2751 igc_update_tx_stats(q_vector, total_packets, total_bytes); 2752 2753 if (tx_ring->xsk_pool) { 2754 if (xsk_frames) 2755 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 2756 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) 2757 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); 2758 igc_xdp_xmit_zc(tx_ring); 2759 } 2760 2761 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 2762 struct igc_hw *hw = &adapter->hw; 2763 2764 /* Detect a transmit hang in hardware, this serializes the 2765 * check with the clearing of time_stamp and movement of i 2766 */ 2767 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 2768 if (tx_buffer->next_to_watch && 2769 time_after(jiffies, tx_buffer->time_stamp + 2770 (adapter->tx_timeout_factor * HZ)) && 2771 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { 2772 /* detected Tx unit hang */ 2773 netdev_err(tx_ring->netdev, 2774 "Detected Tx Unit Hang\n" 2775 " Tx Queue <%d>\n" 2776 " TDH <%x>\n" 2777 " TDT <%x>\n" 2778 " next_to_use <%x>\n" 2779 " next_to_clean <%x>\n" 2780 "buffer_info[next_to_clean]\n" 2781 " time_stamp <%lx>\n" 2782 " next_to_watch <%p>\n" 2783 " jiffies <%lx>\n" 2784 " desc.status <%x>\n", 2785 tx_ring->queue_index, 2786 rd32(IGC_TDH(tx_ring->reg_idx)), 2787 readl(tx_ring->tail), 2788 tx_ring->next_to_use, 2789 tx_ring->next_to_clean, 2790 tx_buffer->time_stamp, 2791 tx_buffer->next_to_watch, 2792 jiffies, 2793 tx_buffer->next_to_watch->wb.status); 2794 netif_stop_subqueue(tx_ring->netdev, 2795 tx_ring->queue_index); 2796 2797 /* we are about to reset, no point in enabling stuff */ 2798 return true; 2799 } 2800 } 2801 2802 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 2803 if (unlikely(total_packets && 2804 netif_carrier_ok(tx_ring->netdev) && 2805 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 2806 /* Make sure that anybody stopping the queue after this 2807 * sees the new next_to_clean. 2808 */ 2809 smp_mb(); 2810 if (__netif_subqueue_stopped(tx_ring->netdev, 2811 tx_ring->queue_index) && 2812 !(test_bit(__IGC_DOWN, &adapter->state))) { 2813 netif_wake_subqueue(tx_ring->netdev, 2814 tx_ring->queue_index); 2815 2816 u64_stats_update_begin(&tx_ring->tx_syncp); 2817 tx_ring->tx_stats.restart_queue++; 2818 u64_stats_update_end(&tx_ring->tx_syncp); 2819 } 2820 } 2821 2822 return !!budget; 2823 } 2824 2825 static int igc_find_mac_filter(struct igc_adapter *adapter, 2826 enum igc_mac_filter_type type, const u8 *addr) 2827 { 2828 struct igc_hw *hw = &adapter->hw; 2829 int max_entries = hw->mac.rar_entry_count; 2830 u32 ral, rah; 2831 int i; 2832 2833 for (i = 0; i < max_entries; i++) { 2834 ral = rd32(IGC_RAL(i)); 2835 rah = rd32(IGC_RAH(i)); 2836 2837 if (!(rah & IGC_RAH_AV)) 2838 continue; 2839 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) 2840 continue; 2841 if ((rah & IGC_RAH_RAH_MASK) != 2842 le16_to_cpup((__le16 *)(addr + 4))) 2843 continue; 2844 if (ral != le32_to_cpup((__le32 *)(addr))) 2845 continue; 2846 2847 return i; 2848 } 2849 2850 return -1; 2851 } 2852 2853 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) 2854 { 2855 struct igc_hw *hw = &adapter->hw; 2856 int max_entries = hw->mac.rar_entry_count; 2857 u32 rah; 2858 int i; 2859 2860 for (i = 0; i < max_entries; i++) { 2861 rah = rd32(IGC_RAH(i)); 2862 2863 if (!(rah & IGC_RAH_AV)) 2864 return i; 2865 } 2866 2867 return -1; 2868 } 2869 2870 /** 2871 * igc_add_mac_filter() - Add MAC address filter 2872 * @adapter: Pointer to adapter where the filter should be added 2873 * @type: MAC address filter type (source or destination) 2874 * @addr: MAC address 2875 * @queue: If non-negative, queue assignment feature is enabled and frames 2876 * matching the filter are enqueued onto 'queue'. Otherwise, queue 2877 * assignment is disabled. 2878 * 2879 * Return: 0 in case of success, negative errno code otherwise. 2880 */ 2881 static int igc_add_mac_filter(struct igc_adapter *adapter, 2882 enum igc_mac_filter_type type, const u8 *addr, 2883 int queue) 2884 { 2885 struct net_device *dev = adapter->netdev; 2886 int index; 2887 2888 index = igc_find_mac_filter(adapter, type, addr); 2889 if (index >= 0) 2890 goto update_filter; 2891 2892 index = igc_get_avail_mac_filter_slot(adapter); 2893 if (index < 0) 2894 return -ENOSPC; 2895 2896 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", 2897 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 2898 addr, queue); 2899 2900 update_filter: 2901 igc_set_mac_filter_hw(adapter, index, type, addr, queue); 2902 return 0; 2903 } 2904 2905 /** 2906 * igc_del_mac_filter() - Delete MAC address filter 2907 * @adapter: Pointer to adapter where the filter should be deleted from 2908 * @type: MAC address filter type (source or destination) 2909 * @addr: MAC address 2910 */ 2911 static void igc_del_mac_filter(struct igc_adapter *adapter, 2912 enum igc_mac_filter_type type, const u8 *addr) 2913 { 2914 struct net_device *dev = adapter->netdev; 2915 int index; 2916 2917 index = igc_find_mac_filter(adapter, type, addr); 2918 if (index < 0) 2919 return; 2920 2921 if (index == 0) { 2922 /* If this is the default filter, we don't actually delete it. 2923 * We just reset to its default value i.e. disable queue 2924 * assignment. 2925 */ 2926 netdev_dbg(dev, "Disable default MAC filter queue assignment"); 2927 2928 igc_set_mac_filter_hw(adapter, 0, type, addr, -1); 2929 } else { 2930 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", 2931 index, 2932 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 2933 addr); 2934 2935 igc_clear_mac_filter_hw(adapter, index); 2936 } 2937 } 2938 2939 /** 2940 * igc_add_vlan_prio_filter() - Add VLAN priority filter 2941 * @adapter: Pointer to adapter where the filter should be added 2942 * @prio: VLAN priority value 2943 * @queue: Queue number which matching frames are assigned to 2944 * 2945 * Return: 0 in case of success, negative errno code otherwise. 2946 */ 2947 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, 2948 int queue) 2949 { 2950 struct net_device *dev = adapter->netdev; 2951 struct igc_hw *hw = &adapter->hw; 2952 u32 vlanpqf; 2953 2954 vlanpqf = rd32(IGC_VLANPQF); 2955 2956 if (vlanpqf & IGC_VLANPQF_VALID(prio)) { 2957 netdev_dbg(dev, "VLAN priority filter already in use\n"); 2958 return -EEXIST; 2959 } 2960 2961 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); 2962 vlanpqf |= IGC_VLANPQF_VALID(prio); 2963 2964 wr32(IGC_VLANPQF, vlanpqf); 2965 2966 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", 2967 prio, queue); 2968 return 0; 2969 } 2970 2971 /** 2972 * igc_del_vlan_prio_filter() - Delete VLAN priority filter 2973 * @adapter: Pointer to adapter where the filter should be deleted from 2974 * @prio: VLAN priority value 2975 */ 2976 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) 2977 { 2978 struct igc_hw *hw = &adapter->hw; 2979 u32 vlanpqf; 2980 2981 vlanpqf = rd32(IGC_VLANPQF); 2982 2983 vlanpqf &= ~IGC_VLANPQF_VALID(prio); 2984 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); 2985 2986 wr32(IGC_VLANPQF, vlanpqf); 2987 2988 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", 2989 prio); 2990 } 2991 2992 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) 2993 { 2994 struct igc_hw *hw = &adapter->hw; 2995 int i; 2996 2997 for (i = 0; i < MAX_ETYPE_FILTER; i++) { 2998 u32 etqf = rd32(IGC_ETQF(i)); 2999 3000 if (!(etqf & IGC_ETQF_FILTER_ENABLE)) 3001 return i; 3002 } 3003 3004 return -1; 3005 } 3006 3007 /** 3008 * igc_add_etype_filter() - Add ethertype filter 3009 * @adapter: Pointer to adapter where the filter should be added 3010 * @etype: Ethertype value 3011 * @queue: If non-negative, queue assignment feature is enabled and frames 3012 * matching the filter are enqueued onto 'queue'. Otherwise, queue 3013 * assignment is disabled. 3014 * 3015 * Return: 0 in case of success, negative errno code otherwise. 3016 */ 3017 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, 3018 int queue) 3019 { 3020 struct igc_hw *hw = &adapter->hw; 3021 int index; 3022 u32 etqf; 3023 3024 index = igc_get_avail_etype_filter_slot(adapter); 3025 if (index < 0) 3026 return -ENOSPC; 3027 3028 etqf = rd32(IGC_ETQF(index)); 3029 3030 etqf &= ~IGC_ETQF_ETYPE_MASK; 3031 etqf |= etype; 3032 3033 if (queue >= 0) { 3034 etqf &= ~IGC_ETQF_QUEUE_MASK; 3035 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); 3036 etqf |= IGC_ETQF_QUEUE_ENABLE; 3037 } 3038 3039 etqf |= IGC_ETQF_FILTER_ENABLE; 3040 3041 wr32(IGC_ETQF(index), etqf); 3042 3043 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", 3044 etype, queue); 3045 return 0; 3046 } 3047 3048 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) 3049 { 3050 struct igc_hw *hw = &adapter->hw; 3051 int i; 3052 3053 for (i = 0; i < MAX_ETYPE_FILTER; i++) { 3054 u32 etqf = rd32(IGC_ETQF(i)); 3055 3056 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) 3057 return i; 3058 } 3059 3060 return -1; 3061 } 3062 3063 /** 3064 * igc_del_etype_filter() - Delete ethertype filter 3065 * @adapter: Pointer to adapter where the filter should be deleted from 3066 * @etype: Ethertype value 3067 */ 3068 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) 3069 { 3070 struct igc_hw *hw = &adapter->hw; 3071 int index; 3072 3073 index = igc_find_etype_filter(adapter, etype); 3074 if (index < 0) 3075 return; 3076 3077 wr32(IGC_ETQF(index), 0); 3078 3079 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", 3080 etype); 3081 } 3082 3083 static int igc_flex_filter_select(struct igc_adapter *adapter, 3084 struct igc_flex_filter *input, 3085 u32 *fhft) 3086 { 3087 struct igc_hw *hw = &adapter->hw; 3088 u8 fhft_index; 3089 u32 fhftsl; 3090 3091 if (input->index >= MAX_FLEX_FILTER) { 3092 dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); 3093 return -EINVAL; 3094 } 3095 3096 /* Indirect table select register */ 3097 fhftsl = rd32(IGC_FHFTSL); 3098 fhftsl &= ~IGC_FHFTSL_FTSL_MASK; 3099 switch (input->index) { 3100 case 0 ... 7: 3101 fhftsl |= 0x00; 3102 break; 3103 case 8 ... 15: 3104 fhftsl |= 0x01; 3105 break; 3106 case 16 ... 23: 3107 fhftsl |= 0x02; 3108 break; 3109 case 24 ... 31: 3110 fhftsl |= 0x03; 3111 break; 3112 } 3113 wr32(IGC_FHFTSL, fhftsl); 3114 3115 /* Normalize index down to host table register */ 3116 fhft_index = input->index % 8; 3117 3118 *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : 3119 IGC_FHFT_EXT(fhft_index - 4); 3120 3121 return 0; 3122 } 3123 3124 static int igc_write_flex_filter_ll(struct igc_adapter *adapter, 3125 struct igc_flex_filter *input) 3126 { 3127 struct device *dev = &adapter->pdev->dev; 3128 struct igc_hw *hw = &adapter->hw; 3129 u8 *data = input->data; 3130 u8 *mask = input->mask; 3131 u32 queuing; 3132 u32 fhft; 3133 u32 wufc; 3134 int ret; 3135 int i; 3136 3137 /* Length has to be aligned to 8. Otherwise the filter will fail. Bail 3138 * out early to avoid surprises later. 3139 */ 3140 if (input->length % 8 != 0) { 3141 dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); 3142 return -EINVAL; 3143 } 3144 3145 /* Select corresponding flex filter register and get base for host table. */ 3146 ret = igc_flex_filter_select(adapter, input, &fhft); 3147 if (ret) 3148 return ret; 3149 3150 /* When adding a filter globally disable flex filter feature. That is 3151 * recommended within the datasheet. 3152 */ 3153 wufc = rd32(IGC_WUFC); 3154 wufc &= ~IGC_WUFC_FLEX_HQ; 3155 wr32(IGC_WUFC, wufc); 3156 3157 /* Configure filter */ 3158 queuing = input->length & IGC_FHFT_LENGTH_MASK; 3159 queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; 3160 queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; 3161 3162 if (input->immediate_irq) 3163 queuing |= IGC_FHFT_IMM_INT; 3164 3165 if (input->drop) 3166 queuing |= IGC_FHFT_DROP; 3167 3168 wr32(fhft + 0xFC, queuing); 3169 3170 /* Write data (128 byte) and mask (128 bit) */ 3171 for (i = 0; i < 16; ++i) { 3172 const size_t data_idx = i * 8; 3173 const size_t row_idx = i * 16; 3174 u32 dw0 = 3175 (data[data_idx + 0] << 0) | 3176 (data[data_idx + 1] << 8) | 3177 (data[data_idx + 2] << 16) | 3178 (data[data_idx + 3] << 24); 3179 u32 dw1 = 3180 (data[data_idx + 4] << 0) | 3181 (data[data_idx + 5] << 8) | 3182 (data[data_idx + 6] << 16) | 3183 (data[data_idx + 7] << 24); 3184 u32 tmp; 3185 3186 /* Write row: dw0, dw1 and mask */ 3187 wr32(fhft + row_idx, dw0); 3188 wr32(fhft + row_idx + 4, dw1); 3189 3190 /* mask is only valid for MASK(7, 0) */ 3191 tmp = rd32(fhft + row_idx + 8); 3192 tmp &= ~GENMASK(7, 0); 3193 tmp |= mask[i]; 3194 wr32(fhft + row_idx + 8, tmp); 3195 } 3196 3197 /* Enable filter. */ 3198 wufc |= IGC_WUFC_FLEX_HQ; 3199 if (input->index > 8) { 3200 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ 3201 u32 wufc_ext = rd32(IGC_WUFC_EXT); 3202 3203 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); 3204 3205 wr32(IGC_WUFC_EXT, wufc_ext); 3206 } else { 3207 wufc |= (IGC_WUFC_FLX0 << input->index); 3208 } 3209 wr32(IGC_WUFC, wufc); 3210 3211 dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", 3212 input->index); 3213 3214 return 0; 3215 } 3216 3217 static void igc_flex_filter_add_field(struct igc_flex_filter *flex, 3218 const void *src, unsigned int offset, 3219 size_t len, const void *mask) 3220 { 3221 int i; 3222 3223 /* data */ 3224 memcpy(&flex->data[offset], src, len); 3225 3226 /* mask */ 3227 for (i = 0; i < len; ++i) { 3228 const unsigned int idx = i + offset; 3229 const u8 *ptr = mask; 3230 3231 if (mask) { 3232 if (ptr[i] & 0xff) 3233 flex->mask[idx / 8] |= BIT(idx % 8); 3234 3235 continue; 3236 } 3237 3238 flex->mask[idx / 8] |= BIT(idx % 8); 3239 } 3240 } 3241 3242 static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) 3243 { 3244 struct igc_hw *hw = &adapter->hw; 3245 u32 wufc, wufc_ext; 3246 int i; 3247 3248 wufc = rd32(IGC_WUFC); 3249 wufc_ext = rd32(IGC_WUFC_EXT); 3250 3251 for (i = 0; i < MAX_FLEX_FILTER; i++) { 3252 if (i < 8) { 3253 if (!(wufc & (IGC_WUFC_FLX0 << i))) 3254 return i; 3255 } else { 3256 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) 3257 return i; 3258 } 3259 } 3260 3261 return -ENOSPC; 3262 } 3263 3264 static bool igc_flex_filter_in_use(struct igc_adapter *adapter) 3265 { 3266 struct igc_hw *hw = &adapter->hw; 3267 u32 wufc, wufc_ext; 3268 3269 wufc = rd32(IGC_WUFC); 3270 wufc_ext = rd32(IGC_WUFC_EXT); 3271 3272 if (wufc & IGC_WUFC_FILTER_MASK) 3273 return true; 3274 3275 if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) 3276 return true; 3277 3278 return false; 3279 } 3280 3281 static int igc_add_flex_filter(struct igc_adapter *adapter, 3282 struct igc_nfc_rule *rule) 3283 { 3284 struct igc_flex_filter flex = { }; 3285 struct igc_nfc_filter *filter = &rule->filter; 3286 unsigned int eth_offset, user_offset; 3287 int ret, index; 3288 bool vlan; 3289 3290 index = igc_find_avail_flex_filter_slot(adapter); 3291 if (index < 0) 3292 return -ENOSPC; 3293 3294 /* Construct the flex filter: 3295 * -> dest_mac [6] 3296 * -> src_mac [6] 3297 * -> tpid [2] 3298 * -> vlan tci [2] 3299 * -> ether type [2] 3300 * -> user data [8] 3301 * -> = 26 bytes => 32 length 3302 */ 3303 flex.index = index; 3304 flex.length = 32; 3305 flex.rx_queue = rule->action; 3306 3307 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; 3308 eth_offset = vlan ? 16 : 12; 3309 user_offset = vlan ? 18 : 14; 3310 3311 /* Add destination MAC */ 3312 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) 3313 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, 3314 ETH_ALEN, NULL); 3315 3316 /* Add source MAC */ 3317 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) 3318 igc_flex_filter_add_field(&flex, &filter->src_addr, 6, 3319 ETH_ALEN, NULL); 3320 3321 /* Add VLAN etype */ 3322 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) 3323 igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, 3324 sizeof(filter->vlan_etype), 3325 NULL); 3326 3327 /* Add VLAN TCI */ 3328 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) 3329 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, 3330 sizeof(filter->vlan_tci), NULL); 3331 3332 /* Add Ether type */ 3333 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 3334 __be16 etype = cpu_to_be16(filter->etype); 3335 3336 igc_flex_filter_add_field(&flex, &etype, eth_offset, 3337 sizeof(etype), NULL); 3338 } 3339 3340 /* Add user data */ 3341 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) 3342 igc_flex_filter_add_field(&flex, &filter->user_data, 3343 user_offset, 3344 sizeof(filter->user_data), 3345 filter->user_mask); 3346 3347 /* Add it down to the hardware and enable it. */ 3348 ret = igc_write_flex_filter_ll(adapter, &flex); 3349 if (ret) 3350 return ret; 3351 3352 filter->flex_index = index; 3353 3354 return 0; 3355 } 3356 3357 static void igc_del_flex_filter(struct igc_adapter *adapter, 3358 u16 reg_index) 3359 { 3360 struct igc_hw *hw = &adapter->hw; 3361 u32 wufc; 3362 3363 /* Just disable the filter. The filter table itself is kept 3364 * intact. Another flex_filter_add() should override the "old" data 3365 * then. 3366 */ 3367 if (reg_index > 8) { 3368 u32 wufc_ext = rd32(IGC_WUFC_EXT); 3369 3370 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); 3371 wr32(IGC_WUFC_EXT, wufc_ext); 3372 } else { 3373 wufc = rd32(IGC_WUFC); 3374 3375 wufc &= ~(IGC_WUFC_FLX0 << reg_index); 3376 wr32(IGC_WUFC, wufc); 3377 } 3378 3379 if (igc_flex_filter_in_use(adapter)) 3380 return; 3381 3382 /* No filters are in use, we may disable flex filters */ 3383 wufc = rd32(IGC_WUFC); 3384 wufc &= ~IGC_WUFC_FLEX_HQ; 3385 wr32(IGC_WUFC, wufc); 3386 } 3387 3388 static int igc_enable_nfc_rule(struct igc_adapter *adapter, 3389 struct igc_nfc_rule *rule) 3390 { 3391 int err; 3392 3393 if (rule->flex) { 3394 return igc_add_flex_filter(adapter, rule); 3395 } 3396 3397 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 3398 err = igc_add_etype_filter(adapter, rule->filter.etype, 3399 rule->action); 3400 if (err) 3401 return err; 3402 } 3403 3404 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { 3405 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 3406 rule->filter.src_addr, rule->action); 3407 if (err) 3408 return err; 3409 } 3410 3411 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { 3412 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 3413 rule->filter.dst_addr, rule->action); 3414 if (err) 3415 return err; 3416 } 3417 3418 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 3419 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> 3420 VLAN_PRIO_SHIFT; 3421 3422 err = igc_add_vlan_prio_filter(adapter, prio, rule->action); 3423 if (err) 3424 return err; 3425 } 3426 3427 return 0; 3428 } 3429 3430 static void igc_disable_nfc_rule(struct igc_adapter *adapter, 3431 const struct igc_nfc_rule *rule) 3432 { 3433 if (rule->flex) { 3434 igc_del_flex_filter(adapter, rule->filter.flex_index); 3435 return; 3436 } 3437 3438 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) 3439 igc_del_etype_filter(adapter, rule->filter.etype); 3440 3441 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 3442 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> 3443 VLAN_PRIO_SHIFT; 3444 3445 igc_del_vlan_prio_filter(adapter, prio); 3446 } 3447 3448 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) 3449 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 3450 rule->filter.src_addr); 3451 3452 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) 3453 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 3454 rule->filter.dst_addr); 3455 } 3456 3457 /** 3458 * igc_get_nfc_rule() - Get NFC rule 3459 * @adapter: Pointer to adapter 3460 * @location: Rule location 3461 * 3462 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3463 * 3464 * Return: Pointer to NFC rule at @location. If not found, NULL. 3465 */ 3466 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, 3467 u32 location) 3468 { 3469 struct igc_nfc_rule *rule; 3470 3471 list_for_each_entry(rule, &adapter->nfc_rule_list, list) { 3472 if (rule->location == location) 3473 return rule; 3474 if (rule->location > location) 3475 break; 3476 } 3477 3478 return NULL; 3479 } 3480 3481 /** 3482 * igc_del_nfc_rule() - Delete NFC rule 3483 * @adapter: Pointer to adapter 3484 * @rule: Pointer to rule to be deleted 3485 * 3486 * Disable NFC rule in hardware and delete it from adapter. 3487 * 3488 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3489 */ 3490 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 3491 { 3492 igc_disable_nfc_rule(adapter, rule); 3493 3494 list_del(&rule->list); 3495 adapter->nfc_rule_count--; 3496 3497 kfree(rule); 3498 } 3499 3500 static void igc_flush_nfc_rules(struct igc_adapter *adapter) 3501 { 3502 struct igc_nfc_rule *rule, *tmp; 3503 3504 mutex_lock(&adapter->nfc_rule_lock); 3505 3506 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) 3507 igc_del_nfc_rule(adapter, rule); 3508 3509 mutex_unlock(&adapter->nfc_rule_lock); 3510 } 3511 3512 /** 3513 * igc_add_nfc_rule() - Add NFC rule 3514 * @adapter: Pointer to adapter 3515 * @rule: Pointer to rule to be added 3516 * 3517 * Enable NFC rule in hardware and add it to adapter. 3518 * 3519 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3520 * 3521 * Return: 0 on success, negative errno on failure. 3522 */ 3523 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 3524 { 3525 struct igc_nfc_rule *pred, *cur; 3526 int err; 3527 3528 err = igc_enable_nfc_rule(adapter, rule); 3529 if (err) 3530 return err; 3531 3532 pred = NULL; 3533 list_for_each_entry(cur, &adapter->nfc_rule_list, list) { 3534 if (cur->location >= rule->location) 3535 break; 3536 pred = cur; 3537 } 3538 3539 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); 3540 adapter->nfc_rule_count++; 3541 return 0; 3542 } 3543 3544 static void igc_restore_nfc_rules(struct igc_adapter *adapter) 3545 { 3546 struct igc_nfc_rule *rule; 3547 3548 mutex_lock(&adapter->nfc_rule_lock); 3549 3550 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) 3551 igc_enable_nfc_rule(adapter, rule); 3552 3553 mutex_unlock(&adapter->nfc_rule_lock); 3554 } 3555 3556 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) 3557 { 3558 struct igc_adapter *adapter = netdev_priv(netdev); 3559 3560 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); 3561 } 3562 3563 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) 3564 { 3565 struct igc_adapter *adapter = netdev_priv(netdev); 3566 3567 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); 3568 return 0; 3569 } 3570 3571 /** 3572 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 3573 * @netdev: network interface device structure 3574 * 3575 * The set_rx_mode entry point is called whenever the unicast or multicast 3576 * address lists or the network interface flags are updated. This routine is 3577 * responsible for configuring the hardware for proper unicast, multicast, 3578 * promiscuous mode, and all-multi behavior. 3579 */ 3580 static void igc_set_rx_mode(struct net_device *netdev) 3581 { 3582 struct igc_adapter *adapter = netdev_priv(netdev); 3583 struct igc_hw *hw = &adapter->hw; 3584 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; 3585 int count; 3586 3587 /* Check for Promiscuous and All Multicast modes */ 3588 if (netdev->flags & IFF_PROMISC) { 3589 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; 3590 } else { 3591 if (netdev->flags & IFF_ALLMULTI) { 3592 rctl |= IGC_RCTL_MPE; 3593 } else { 3594 /* Write addresses to the MTA, if the attempt fails 3595 * then we should just turn on promiscuous mode so 3596 * that we can at least receive multicast traffic 3597 */ 3598 count = igc_write_mc_addr_list(netdev); 3599 if (count < 0) 3600 rctl |= IGC_RCTL_MPE; 3601 } 3602 } 3603 3604 /* Write addresses to available RAR registers, if there is not 3605 * sufficient space to store all the addresses then enable 3606 * unicast promiscuous mode 3607 */ 3608 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) 3609 rctl |= IGC_RCTL_UPE; 3610 3611 /* update state of unicast and multicast */ 3612 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 3613 wr32(IGC_RCTL, rctl); 3614 3615 #if (PAGE_SIZE < 8192) 3616 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) 3617 rlpml = IGC_MAX_FRAME_BUILD_SKB; 3618 #endif 3619 wr32(IGC_RLPML, rlpml); 3620 } 3621 3622 /** 3623 * igc_configure - configure the hardware for RX and TX 3624 * @adapter: private board structure 3625 */ 3626 static void igc_configure(struct igc_adapter *adapter) 3627 { 3628 struct net_device *netdev = adapter->netdev; 3629 int i = 0; 3630 3631 igc_get_hw_control(adapter); 3632 igc_set_rx_mode(netdev); 3633 3634 igc_restore_vlan(adapter); 3635 3636 igc_setup_tctl(adapter); 3637 igc_setup_mrqc(adapter); 3638 igc_setup_rctl(adapter); 3639 3640 igc_set_default_mac_filter(adapter); 3641 igc_restore_nfc_rules(adapter); 3642 3643 igc_configure_tx(adapter); 3644 igc_configure_rx(adapter); 3645 3646 igc_rx_fifo_flush_base(&adapter->hw); 3647 3648 /* call igc_desc_unused which always leaves 3649 * at least 1 descriptor unused to make sure 3650 * next_to_use != next_to_clean 3651 */ 3652 for (i = 0; i < adapter->num_rx_queues; i++) { 3653 struct igc_ring *ring = adapter->rx_ring[i]; 3654 3655 if (ring->xsk_pool) 3656 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 3657 else 3658 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 3659 } 3660 } 3661 3662 /** 3663 * igc_write_ivar - configure ivar for given MSI-X vector 3664 * @hw: pointer to the HW structure 3665 * @msix_vector: vector number we are allocating to a given ring 3666 * @index: row index of IVAR register to write within IVAR table 3667 * @offset: column offset of in IVAR, should be multiple of 8 3668 * 3669 * The IVAR table consists of 2 columns, 3670 * each containing an cause allocation for an Rx and Tx ring, and a 3671 * variable number of rows depending on the number of queues supported. 3672 */ 3673 static void igc_write_ivar(struct igc_hw *hw, int msix_vector, 3674 int index, int offset) 3675 { 3676 u32 ivar = array_rd32(IGC_IVAR0, index); 3677 3678 /* clear any bits that are currently set */ 3679 ivar &= ~((u32)0xFF << offset); 3680 3681 /* write vector and valid bit */ 3682 ivar |= (msix_vector | IGC_IVAR_VALID) << offset; 3683 3684 array_wr32(IGC_IVAR0, index, ivar); 3685 } 3686 3687 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) 3688 { 3689 struct igc_adapter *adapter = q_vector->adapter; 3690 struct igc_hw *hw = &adapter->hw; 3691 int rx_queue = IGC_N0_QUEUE; 3692 int tx_queue = IGC_N0_QUEUE; 3693 3694 if (q_vector->rx.ring) 3695 rx_queue = q_vector->rx.ring->reg_idx; 3696 if (q_vector->tx.ring) 3697 tx_queue = q_vector->tx.ring->reg_idx; 3698 3699 switch (hw->mac.type) { 3700 case igc_i225: 3701 if (rx_queue > IGC_N0_QUEUE) 3702 igc_write_ivar(hw, msix_vector, 3703 rx_queue >> 1, 3704 (rx_queue & 0x1) << 4); 3705 if (tx_queue > IGC_N0_QUEUE) 3706 igc_write_ivar(hw, msix_vector, 3707 tx_queue >> 1, 3708 ((tx_queue & 0x1) << 4) + 8); 3709 q_vector->eims_value = BIT(msix_vector); 3710 break; 3711 default: 3712 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); 3713 break; 3714 } 3715 3716 /* add q_vector eims value to global eims_enable_mask */ 3717 adapter->eims_enable_mask |= q_vector->eims_value; 3718 3719 /* configure q_vector to set itr on first interrupt */ 3720 q_vector->set_itr = 1; 3721 } 3722 3723 /** 3724 * igc_configure_msix - Configure MSI-X hardware 3725 * @adapter: Pointer to adapter structure 3726 * 3727 * igc_configure_msix sets up the hardware to properly 3728 * generate MSI-X interrupts. 3729 */ 3730 static void igc_configure_msix(struct igc_adapter *adapter) 3731 { 3732 struct igc_hw *hw = &adapter->hw; 3733 int i, vector = 0; 3734 u32 tmp; 3735 3736 adapter->eims_enable_mask = 0; 3737 3738 /* set vector for other causes, i.e. link changes */ 3739 switch (hw->mac.type) { 3740 case igc_i225: 3741 /* Turn on MSI-X capability first, or our settings 3742 * won't stick. And it will take days to debug. 3743 */ 3744 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | 3745 IGC_GPIE_PBA | IGC_GPIE_EIAME | 3746 IGC_GPIE_NSICR); 3747 3748 /* enable msix_other interrupt */ 3749 adapter->eims_other = BIT(vector); 3750 tmp = (vector++ | IGC_IVAR_VALID) << 8; 3751 3752 wr32(IGC_IVAR_MISC, tmp); 3753 break; 3754 default: 3755 /* do nothing, since nothing else supports MSI-X */ 3756 break; 3757 } /* switch (hw->mac.type) */ 3758 3759 adapter->eims_enable_mask |= adapter->eims_other; 3760 3761 for (i = 0; i < adapter->num_q_vectors; i++) 3762 igc_assign_vector(adapter->q_vector[i], vector++); 3763 3764 wrfl(); 3765 } 3766 3767 /** 3768 * igc_irq_enable - Enable default interrupt generation settings 3769 * @adapter: board private structure 3770 */ 3771 static void igc_irq_enable(struct igc_adapter *adapter) 3772 { 3773 struct igc_hw *hw = &adapter->hw; 3774 3775 if (adapter->msix_entries) { 3776 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; 3777 u32 regval = rd32(IGC_EIAC); 3778 3779 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); 3780 regval = rd32(IGC_EIAM); 3781 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); 3782 wr32(IGC_EIMS, adapter->eims_enable_mask); 3783 wr32(IGC_IMS, ims); 3784 } else { 3785 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 3786 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 3787 } 3788 } 3789 3790 /** 3791 * igc_irq_disable - Mask off interrupt generation on the NIC 3792 * @adapter: board private structure 3793 */ 3794 static void igc_irq_disable(struct igc_adapter *adapter) 3795 { 3796 struct igc_hw *hw = &adapter->hw; 3797 3798 if (adapter->msix_entries) { 3799 u32 regval = rd32(IGC_EIAM); 3800 3801 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); 3802 wr32(IGC_EIMC, adapter->eims_enable_mask); 3803 regval = rd32(IGC_EIAC); 3804 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); 3805 } 3806 3807 wr32(IGC_IAM, 0); 3808 wr32(IGC_IMC, ~0); 3809 wrfl(); 3810 3811 if (adapter->msix_entries) { 3812 int vector = 0, i; 3813 3814 synchronize_irq(adapter->msix_entries[vector++].vector); 3815 3816 for (i = 0; i < adapter->num_q_vectors; i++) 3817 synchronize_irq(adapter->msix_entries[vector++].vector); 3818 } else { 3819 synchronize_irq(adapter->pdev->irq); 3820 } 3821 } 3822 3823 void igc_set_flag_queue_pairs(struct igc_adapter *adapter, 3824 const u32 max_rss_queues) 3825 { 3826 /* Determine if we need to pair queues. */ 3827 /* If rss_queues > half of max_rss_queues, pair the queues in 3828 * order to conserve interrupts due to limited supply. 3829 */ 3830 if (adapter->rss_queues > (max_rss_queues / 2)) 3831 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 3832 else 3833 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; 3834 } 3835 3836 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) 3837 { 3838 return IGC_MAX_RX_QUEUES; 3839 } 3840 3841 static void igc_init_queue_configuration(struct igc_adapter *adapter) 3842 { 3843 u32 max_rss_queues; 3844 3845 max_rss_queues = igc_get_max_rss_queues(adapter); 3846 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); 3847 3848 igc_set_flag_queue_pairs(adapter, max_rss_queues); 3849 } 3850 3851 /** 3852 * igc_reset_q_vector - Reset config for interrupt vector 3853 * @adapter: board private structure to initialize 3854 * @v_idx: Index of vector to be reset 3855 * 3856 * If NAPI is enabled it will delete any references to the 3857 * NAPI struct. This is preparation for igc_free_q_vector. 3858 */ 3859 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) 3860 { 3861 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 3862 3863 /* if we're coming from igc_set_interrupt_capability, the vectors are 3864 * not yet allocated 3865 */ 3866 if (!q_vector) 3867 return; 3868 3869 if (q_vector->tx.ring) 3870 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 3871 3872 if (q_vector->rx.ring) 3873 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; 3874 3875 netif_napi_del(&q_vector->napi); 3876 } 3877 3878 /** 3879 * igc_free_q_vector - Free memory allocated for specific interrupt vector 3880 * @adapter: board private structure to initialize 3881 * @v_idx: Index of vector to be freed 3882 * 3883 * This function frees the memory allocated to the q_vector. 3884 */ 3885 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) 3886 { 3887 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 3888 3889 adapter->q_vector[v_idx] = NULL; 3890 3891 /* igc_get_stats64() might access the rings on this vector, 3892 * we must wait a grace period before freeing it. 3893 */ 3894 if (q_vector) 3895 kfree_rcu(q_vector, rcu); 3896 } 3897 3898 /** 3899 * igc_free_q_vectors - Free memory allocated for interrupt vectors 3900 * @adapter: board private structure to initialize 3901 * 3902 * This function frees the memory allocated to the q_vectors. In addition if 3903 * NAPI is enabled it will delete any references to the NAPI struct prior 3904 * to freeing the q_vector. 3905 */ 3906 static void igc_free_q_vectors(struct igc_adapter *adapter) 3907 { 3908 int v_idx = adapter->num_q_vectors; 3909 3910 adapter->num_tx_queues = 0; 3911 adapter->num_rx_queues = 0; 3912 adapter->num_q_vectors = 0; 3913 3914 while (v_idx--) { 3915 igc_reset_q_vector(adapter, v_idx); 3916 igc_free_q_vector(adapter, v_idx); 3917 } 3918 } 3919 3920 /** 3921 * igc_update_itr - update the dynamic ITR value based on statistics 3922 * @q_vector: pointer to q_vector 3923 * @ring_container: ring info to update the itr for 3924 * 3925 * Stores a new ITR value based on packets and byte 3926 * counts during the last interrupt. The advantage of per interrupt 3927 * computation is faster updates and more accurate ITR for the current 3928 * traffic pattern. Constants in this function were computed 3929 * based on theoretical maximum wire speed and thresholds were set based 3930 * on testing data as well as attempting to minimize response time 3931 * while increasing bulk throughput. 3932 * NOTE: These calculations are only valid when operating in a single- 3933 * queue environment. 3934 */ 3935 static void igc_update_itr(struct igc_q_vector *q_vector, 3936 struct igc_ring_container *ring_container) 3937 { 3938 unsigned int packets = ring_container->total_packets; 3939 unsigned int bytes = ring_container->total_bytes; 3940 u8 itrval = ring_container->itr; 3941 3942 /* no packets, exit with status unchanged */ 3943 if (packets == 0) 3944 return; 3945 3946 switch (itrval) { 3947 case lowest_latency: 3948 /* handle TSO and jumbo frames */ 3949 if (bytes / packets > 8000) 3950 itrval = bulk_latency; 3951 else if ((packets < 5) && (bytes > 512)) 3952 itrval = low_latency; 3953 break; 3954 case low_latency: /* 50 usec aka 20000 ints/s */ 3955 if (bytes > 10000) { 3956 /* this if handles the TSO accounting */ 3957 if (bytes / packets > 8000) 3958 itrval = bulk_latency; 3959 else if ((packets < 10) || ((bytes / packets) > 1200)) 3960 itrval = bulk_latency; 3961 else if ((packets > 35)) 3962 itrval = lowest_latency; 3963 } else if (bytes / packets > 2000) { 3964 itrval = bulk_latency; 3965 } else if (packets <= 2 && bytes < 512) { 3966 itrval = lowest_latency; 3967 } 3968 break; 3969 case bulk_latency: /* 250 usec aka 4000 ints/s */ 3970 if (bytes > 25000) { 3971 if (packets > 35) 3972 itrval = low_latency; 3973 } else if (bytes < 1500) { 3974 itrval = low_latency; 3975 } 3976 break; 3977 } 3978 3979 /* clear work counters since we have the values we need */ 3980 ring_container->total_bytes = 0; 3981 ring_container->total_packets = 0; 3982 3983 /* write updated itr to ring container */ 3984 ring_container->itr = itrval; 3985 } 3986 3987 static void igc_set_itr(struct igc_q_vector *q_vector) 3988 { 3989 struct igc_adapter *adapter = q_vector->adapter; 3990 u32 new_itr = q_vector->itr_val; 3991 u8 current_itr = 0; 3992 3993 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 3994 switch (adapter->link_speed) { 3995 case SPEED_10: 3996 case SPEED_100: 3997 current_itr = 0; 3998 new_itr = IGC_4K_ITR; 3999 goto set_itr_now; 4000 default: 4001 break; 4002 } 4003 4004 igc_update_itr(q_vector, &q_vector->tx); 4005 igc_update_itr(q_vector, &q_vector->rx); 4006 4007 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 4008 4009 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 4010 if (current_itr == lowest_latency && 4011 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 4012 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 4013 current_itr = low_latency; 4014 4015 switch (current_itr) { 4016 /* counts and packets in update_itr are dependent on these numbers */ 4017 case lowest_latency: 4018 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ 4019 break; 4020 case low_latency: 4021 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ 4022 break; 4023 case bulk_latency: 4024 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ 4025 break; 4026 default: 4027 break; 4028 } 4029 4030 set_itr_now: 4031 if (new_itr != q_vector->itr_val) { 4032 /* this attempts to bias the interrupt rate towards Bulk 4033 * by adding intermediate steps when interrupt rate is 4034 * increasing 4035 */ 4036 new_itr = new_itr > q_vector->itr_val ? 4037 max((new_itr * q_vector->itr_val) / 4038 (new_itr + (q_vector->itr_val >> 2)), 4039 new_itr) : new_itr; 4040 /* Don't write the value here; it resets the adapter's 4041 * internal timer, and causes us to delay far longer than 4042 * we should between interrupts. Instead, we write the ITR 4043 * value at the beginning of the next interrupt so the timing 4044 * ends up being correct. 4045 */ 4046 q_vector->itr_val = new_itr; 4047 q_vector->set_itr = 1; 4048 } 4049 } 4050 4051 static void igc_reset_interrupt_capability(struct igc_adapter *adapter) 4052 { 4053 int v_idx = adapter->num_q_vectors; 4054 4055 if (adapter->msix_entries) { 4056 pci_disable_msix(adapter->pdev); 4057 kfree(adapter->msix_entries); 4058 adapter->msix_entries = NULL; 4059 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { 4060 pci_disable_msi(adapter->pdev); 4061 } 4062 4063 while (v_idx--) 4064 igc_reset_q_vector(adapter, v_idx); 4065 } 4066 4067 /** 4068 * igc_set_interrupt_capability - set MSI or MSI-X if supported 4069 * @adapter: Pointer to adapter structure 4070 * @msix: boolean value for MSI-X capability 4071 * 4072 * Attempt to configure interrupts using the best available 4073 * capabilities of the hardware and kernel. 4074 */ 4075 static void igc_set_interrupt_capability(struct igc_adapter *adapter, 4076 bool msix) 4077 { 4078 int numvecs, i; 4079 int err; 4080 4081 if (!msix) 4082 goto msi_only; 4083 adapter->flags |= IGC_FLAG_HAS_MSIX; 4084 4085 /* Number of supported queues. */ 4086 adapter->num_rx_queues = adapter->rss_queues; 4087 4088 adapter->num_tx_queues = adapter->rss_queues; 4089 4090 /* start with one vector for every Rx queue */ 4091 numvecs = adapter->num_rx_queues; 4092 4093 /* if Tx handler is separate add 1 for every Tx queue */ 4094 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) 4095 numvecs += adapter->num_tx_queues; 4096 4097 /* store the number of vectors reserved for queues */ 4098 adapter->num_q_vectors = numvecs; 4099 4100 /* add 1 vector for link status interrupts */ 4101 numvecs++; 4102 4103 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 4104 GFP_KERNEL); 4105 4106 if (!adapter->msix_entries) 4107 return; 4108 4109 /* populate entry values */ 4110 for (i = 0; i < numvecs; i++) 4111 adapter->msix_entries[i].entry = i; 4112 4113 err = pci_enable_msix_range(adapter->pdev, 4114 adapter->msix_entries, 4115 numvecs, 4116 numvecs); 4117 if (err > 0) 4118 return; 4119 4120 kfree(adapter->msix_entries); 4121 adapter->msix_entries = NULL; 4122 4123 igc_reset_interrupt_capability(adapter); 4124 4125 msi_only: 4126 adapter->flags &= ~IGC_FLAG_HAS_MSIX; 4127 4128 adapter->rss_queues = 1; 4129 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 4130 adapter->num_rx_queues = 1; 4131 adapter->num_tx_queues = 1; 4132 adapter->num_q_vectors = 1; 4133 if (!pci_enable_msi(adapter->pdev)) 4134 adapter->flags |= IGC_FLAG_HAS_MSI; 4135 } 4136 4137 /** 4138 * igc_update_ring_itr - update the dynamic ITR value based on packet size 4139 * @q_vector: pointer to q_vector 4140 * 4141 * Stores a new ITR value based on strictly on packet size. This 4142 * algorithm is less sophisticated than that used in igc_update_itr, 4143 * due to the difficulty of synchronizing statistics across multiple 4144 * receive rings. The divisors and thresholds used by this function 4145 * were determined based on theoretical maximum wire speed and testing 4146 * data, in order to minimize response time while increasing bulk 4147 * throughput. 4148 * NOTE: This function is called only when operating in a multiqueue 4149 * receive environment. 4150 */ 4151 static void igc_update_ring_itr(struct igc_q_vector *q_vector) 4152 { 4153 struct igc_adapter *adapter = q_vector->adapter; 4154 int new_val = q_vector->itr_val; 4155 int avg_wire_size = 0; 4156 unsigned int packets; 4157 4158 /* For non-gigabit speeds, just fix the interrupt rate at 4000 4159 * ints/sec - ITR timer value of 120 ticks. 4160 */ 4161 switch (adapter->link_speed) { 4162 case SPEED_10: 4163 case SPEED_100: 4164 new_val = IGC_4K_ITR; 4165 goto set_itr_val; 4166 default: 4167 break; 4168 } 4169 4170 packets = q_vector->rx.total_packets; 4171 if (packets) 4172 avg_wire_size = q_vector->rx.total_bytes / packets; 4173 4174 packets = q_vector->tx.total_packets; 4175 if (packets) 4176 avg_wire_size = max_t(u32, avg_wire_size, 4177 q_vector->tx.total_bytes / packets); 4178 4179 /* if avg_wire_size isn't set no work was done */ 4180 if (!avg_wire_size) 4181 goto clear_counts; 4182 4183 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 4184 avg_wire_size += 24; 4185 4186 /* Don't starve jumbo frames */ 4187 avg_wire_size = min(avg_wire_size, 3000); 4188 4189 /* Give a little boost to mid-size frames */ 4190 if (avg_wire_size > 300 && avg_wire_size < 1200) 4191 new_val = avg_wire_size / 3; 4192 else 4193 new_val = avg_wire_size / 2; 4194 4195 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 4196 if (new_val < IGC_20K_ITR && 4197 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 4198 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 4199 new_val = IGC_20K_ITR; 4200 4201 set_itr_val: 4202 if (new_val != q_vector->itr_val) { 4203 q_vector->itr_val = new_val; 4204 q_vector->set_itr = 1; 4205 } 4206 clear_counts: 4207 q_vector->rx.total_bytes = 0; 4208 q_vector->rx.total_packets = 0; 4209 q_vector->tx.total_bytes = 0; 4210 q_vector->tx.total_packets = 0; 4211 } 4212 4213 static void igc_ring_irq_enable(struct igc_q_vector *q_vector) 4214 { 4215 struct igc_adapter *adapter = q_vector->adapter; 4216 struct igc_hw *hw = &adapter->hw; 4217 4218 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || 4219 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { 4220 if (adapter->num_q_vectors == 1) 4221 igc_set_itr(q_vector); 4222 else 4223 igc_update_ring_itr(q_vector); 4224 } 4225 4226 if (!test_bit(__IGC_DOWN, &adapter->state)) { 4227 if (adapter->msix_entries) 4228 wr32(IGC_EIMS, q_vector->eims_value); 4229 else 4230 igc_irq_enable(adapter); 4231 } 4232 } 4233 4234 static void igc_add_ring(struct igc_ring *ring, 4235 struct igc_ring_container *head) 4236 { 4237 head->ring = ring; 4238 head->count++; 4239 } 4240 4241 /** 4242 * igc_cache_ring_register - Descriptor ring to register mapping 4243 * @adapter: board private structure to initialize 4244 * 4245 * Once we know the feature-set enabled for the device, we'll cache 4246 * the register offset the descriptor ring is assigned to. 4247 */ 4248 static void igc_cache_ring_register(struct igc_adapter *adapter) 4249 { 4250 int i = 0, j = 0; 4251 4252 switch (adapter->hw.mac.type) { 4253 case igc_i225: 4254 default: 4255 for (; i < adapter->num_rx_queues; i++) 4256 adapter->rx_ring[i]->reg_idx = i; 4257 for (; j < adapter->num_tx_queues; j++) 4258 adapter->tx_ring[j]->reg_idx = j; 4259 break; 4260 } 4261 } 4262 4263 /** 4264 * igc_poll - NAPI Rx polling callback 4265 * @napi: napi polling structure 4266 * @budget: count of how many packets we should handle 4267 */ 4268 static int igc_poll(struct napi_struct *napi, int budget) 4269 { 4270 struct igc_q_vector *q_vector = container_of(napi, 4271 struct igc_q_vector, 4272 napi); 4273 struct igc_ring *rx_ring = q_vector->rx.ring; 4274 bool clean_complete = true; 4275 int work_done = 0; 4276 4277 if (q_vector->tx.ring) 4278 clean_complete = igc_clean_tx_irq(q_vector, budget); 4279 4280 if (rx_ring) { 4281 int cleaned = rx_ring->xsk_pool ? 4282 igc_clean_rx_irq_zc(q_vector, budget) : 4283 igc_clean_rx_irq(q_vector, budget); 4284 4285 work_done += cleaned; 4286 if (cleaned >= budget) 4287 clean_complete = false; 4288 } 4289 4290 /* If all work not completed, return budget and keep polling */ 4291 if (!clean_complete) 4292 return budget; 4293 4294 /* Exit the polling mode, but don't re-enable interrupts if stack might 4295 * poll us due to busy-polling 4296 */ 4297 if (likely(napi_complete_done(napi, work_done))) 4298 igc_ring_irq_enable(q_vector); 4299 4300 return min(work_done, budget - 1); 4301 } 4302 4303 /** 4304 * igc_alloc_q_vector - Allocate memory for a single interrupt vector 4305 * @adapter: board private structure to initialize 4306 * @v_count: q_vectors allocated on adapter, used for ring interleaving 4307 * @v_idx: index of vector in adapter struct 4308 * @txr_count: total number of Tx rings to allocate 4309 * @txr_idx: index of first Tx ring to allocate 4310 * @rxr_count: total number of Rx rings to allocate 4311 * @rxr_idx: index of first Rx ring to allocate 4312 * 4313 * We allocate one q_vector. If allocation fails we return -ENOMEM. 4314 */ 4315 static int igc_alloc_q_vector(struct igc_adapter *adapter, 4316 unsigned int v_count, unsigned int v_idx, 4317 unsigned int txr_count, unsigned int txr_idx, 4318 unsigned int rxr_count, unsigned int rxr_idx) 4319 { 4320 struct igc_q_vector *q_vector; 4321 struct igc_ring *ring; 4322 int ring_count; 4323 4324 /* igc only supports 1 Tx and/or 1 Rx queue per vector */ 4325 if (txr_count > 1 || rxr_count > 1) 4326 return -ENOMEM; 4327 4328 ring_count = txr_count + rxr_count; 4329 4330 /* allocate q_vector and rings */ 4331 q_vector = adapter->q_vector[v_idx]; 4332 if (!q_vector) 4333 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), 4334 GFP_KERNEL); 4335 else 4336 memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); 4337 if (!q_vector) 4338 return -ENOMEM; 4339 4340 /* initialize NAPI */ 4341 netif_napi_add(adapter->netdev, &q_vector->napi, 4342 igc_poll, 64); 4343 4344 /* tie q_vector and adapter together */ 4345 adapter->q_vector[v_idx] = q_vector; 4346 q_vector->adapter = adapter; 4347 4348 /* initialize work limits */ 4349 q_vector->tx.work_limit = adapter->tx_work_limit; 4350 4351 /* initialize ITR configuration */ 4352 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); 4353 q_vector->itr_val = IGC_START_ITR; 4354 4355 /* initialize pointer to rings */ 4356 ring = q_vector->ring; 4357 4358 /* initialize ITR */ 4359 if (rxr_count) { 4360 /* rx or rx/tx vector */ 4361 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) 4362 q_vector->itr_val = adapter->rx_itr_setting; 4363 } else { 4364 /* tx only vector */ 4365 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) 4366 q_vector->itr_val = adapter->tx_itr_setting; 4367 } 4368 4369 if (txr_count) { 4370 /* assign generic ring traits */ 4371 ring->dev = &adapter->pdev->dev; 4372 ring->netdev = adapter->netdev; 4373 4374 /* configure backlink on ring */ 4375 ring->q_vector = q_vector; 4376 4377 /* update q_vector Tx values */ 4378 igc_add_ring(ring, &q_vector->tx); 4379 4380 /* apply Tx specific ring traits */ 4381 ring->count = adapter->tx_ring_count; 4382 ring->queue_index = txr_idx; 4383 4384 /* assign ring to adapter */ 4385 adapter->tx_ring[txr_idx] = ring; 4386 4387 /* push pointer to next ring */ 4388 ring++; 4389 } 4390 4391 if (rxr_count) { 4392 /* assign generic ring traits */ 4393 ring->dev = &adapter->pdev->dev; 4394 ring->netdev = adapter->netdev; 4395 4396 /* configure backlink on ring */ 4397 ring->q_vector = q_vector; 4398 4399 /* update q_vector Rx values */ 4400 igc_add_ring(ring, &q_vector->rx); 4401 4402 /* apply Rx specific ring traits */ 4403 ring->count = adapter->rx_ring_count; 4404 ring->queue_index = rxr_idx; 4405 4406 /* assign ring to adapter */ 4407 adapter->rx_ring[rxr_idx] = ring; 4408 } 4409 4410 return 0; 4411 } 4412 4413 /** 4414 * igc_alloc_q_vectors - Allocate memory for interrupt vectors 4415 * @adapter: board private structure to initialize 4416 * 4417 * We allocate one q_vector per queue interrupt. If allocation fails we 4418 * return -ENOMEM. 4419 */ 4420 static int igc_alloc_q_vectors(struct igc_adapter *adapter) 4421 { 4422 int rxr_remaining = adapter->num_rx_queues; 4423 int txr_remaining = adapter->num_tx_queues; 4424 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 4425 int q_vectors = adapter->num_q_vectors; 4426 int err; 4427 4428 if (q_vectors >= (rxr_remaining + txr_remaining)) { 4429 for (; rxr_remaining; v_idx++) { 4430 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 4431 0, 0, 1, rxr_idx); 4432 4433 if (err) 4434 goto err_out; 4435 4436 /* update counts and index */ 4437 rxr_remaining--; 4438 rxr_idx++; 4439 } 4440 } 4441 4442 for (; v_idx < q_vectors; v_idx++) { 4443 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 4444 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 4445 4446 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 4447 tqpv, txr_idx, rqpv, rxr_idx); 4448 4449 if (err) 4450 goto err_out; 4451 4452 /* update counts and index */ 4453 rxr_remaining -= rqpv; 4454 txr_remaining -= tqpv; 4455 rxr_idx++; 4456 txr_idx++; 4457 } 4458 4459 return 0; 4460 4461 err_out: 4462 adapter->num_tx_queues = 0; 4463 adapter->num_rx_queues = 0; 4464 adapter->num_q_vectors = 0; 4465 4466 while (v_idx--) 4467 igc_free_q_vector(adapter, v_idx); 4468 4469 return -ENOMEM; 4470 } 4471 4472 /** 4473 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 4474 * @adapter: Pointer to adapter structure 4475 * @msix: boolean for MSI-X capability 4476 * 4477 * This function initializes the interrupts and allocates all of the queues. 4478 */ 4479 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) 4480 { 4481 struct net_device *dev = adapter->netdev; 4482 int err = 0; 4483 4484 igc_set_interrupt_capability(adapter, msix); 4485 4486 err = igc_alloc_q_vectors(adapter); 4487 if (err) { 4488 netdev_err(dev, "Unable to allocate memory for vectors\n"); 4489 goto err_alloc_q_vectors; 4490 } 4491 4492 igc_cache_ring_register(adapter); 4493 4494 return 0; 4495 4496 err_alloc_q_vectors: 4497 igc_reset_interrupt_capability(adapter); 4498 return err; 4499 } 4500 4501 /** 4502 * igc_sw_init - Initialize general software structures (struct igc_adapter) 4503 * @adapter: board private structure to initialize 4504 * 4505 * igc_sw_init initializes the Adapter private data structure. 4506 * Fields are initialized based on PCI device information and 4507 * OS network device settings (MTU size). 4508 */ 4509 static int igc_sw_init(struct igc_adapter *adapter) 4510 { 4511 struct net_device *netdev = adapter->netdev; 4512 struct pci_dev *pdev = adapter->pdev; 4513 struct igc_hw *hw = &adapter->hw; 4514 4515 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 4516 4517 /* set default ring sizes */ 4518 adapter->tx_ring_count = IGC_DEFAULT_TXD; 4519 adapter->rx_ring_count = IGC_DEFAULT_RXD; 4520 4521 /* set default ITR values */ 4522 adapter->rx_itr_setting = IGC_DEFAULT_ITR; 4523 adapter->tx_itr_setting = IGC_DEFAULT_ITR; 4524 4525 /* set default work limits */ 4526 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; 4527 4528 /* adjust max frame to be at least the size of a standard frame */ 4529 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + 4530 VLAN_HLEN; 4531 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 4532 4533 mutex_init(&adapter->nfc_rule_lock); 4534 INIT_LIST_HEAD(&adapter->nfc_rule_list); 4535 adapter->nfc_rule_count = 0; 4536 4537 spin_lock_init(&adapter->stats64_lock); 4538 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ 4539 adapter->flags |= IGC_FLAG_HAS_MSIX; 4540 4541 igc_init_queue_configuration(adapter); 4542 4543 /* This call may decrease the number of queues */ 4544 if (igc_init_interrupt_scheme(adapter, true)) { 4545 netdev_err(netdev, "Unable to allocate memory for queues\n"); 4546 return -ENOMEM; 4547 } 4548 4549 /* Explicitly disable IRQ since the NIC can be in any state. */ 4550 igc_irq_disable(adapter); 4551 4552 set_bit(__IGC_DOWN, &adapter->state); 4553 4554 return 0; 4555 } 4556 4557 /** 4558 * igc_up - Open the interface and prepare it to handle traffic 4559 * @adapter: board private structure 4560 */ 4561 void igc_up(struct igc_adapter *adapter) 4562 { 4563 struct igc_hw *hw = &adapter->hw; 4564 int i = 0; 4565 4566 /* hardware has been reset, we need to reload some things */ 4567 igc_configure(adapter); 4568 4569 clear_bit(__IGC_DOWN, &adapter->state); 4570 4571 for (i = 0; i < adapter->num_q_vectors; i++) 4572 napi_enable(&adapter->q_vector[i]->napi); 4573 4574 if (adapter->msix_entries) 4575 igc_configure_msix(adapter); 4576 else 4577 igc_assign_vector(adapter->q_vector[0], 0); 4578 4579 /* Clear any pending interrupts. */ 4580 rd32(IGC_ICR); 4581 igc_irq_enable(adapter); 4582 4583 netif_tx_start_all_queues(adapter->netdev); 4584 4585 /* start the watchdog. */ 4586 hw->mac.get_link_status = true; 4587 schedule_work(&adapter->watchdog_task); 4588 } 4589 4590 /** 4591 * igc_update_stats - Update the board statistics counters 4592 * @adapter: board private structure 4593 */ 4594 void igc_update_stats(struct igc_adapter *adapter) 4595 { 4596 struct rtnl_link_stats64 *net_stats = &adapter->stats64; 4597 struct pci_dev *pdev = adapter->pdev; 4598 struct igc_hw *hw = &adapter->hw; 4599 u64 _bytes, _packets; 4600 u64 bytes, packets; 4601 unsigned int start; 4602 u32 mpc; 4603 int i; 4604 4605 /* Prevent stats update while adapter is being reset, or if the pci 4606 * connection is down. 4607 */ 4608 if (adapter->link_speed == 0) 4609 return; 4610 if (pci_channel_offline(pdev)) 4611 return; 4612 4613 packets = 0; 4614 bytes = 0; 4615 4616 rcu_read_lock(); 4617 for (i = 0; i < adapter->num_rx_queues; i++) { 4618 struct igc_ring *ring = adapter->rx_ring[i]; 4619 u32 rqdpc = rd32(IGC_RQDPC(i)); 4620 4621 if (hw->mac.type >= igc_i225) 4622 wr32(IGC_RQDPC(i), 0); 4623 4624 if (rqdpc) { 4625 ring->rx_stats.drops += rqdpc; 4626 net_stats->rx_fifo_errors += rqdpc; 4627 } 4628 4629 do { 4630 start = u64_stats_fetch_begin_irq(&ring->rx_syncp); 4631 _bytes = ring->rx_stats.bytes; 4632 _packets = ring->rx_stats.packets; 4633 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 4634 bytes += _bytes; 4635 packets += _packets; 4636 } 4637 4638 net_stats->rx_bytes = bytes; 4639 net_stats->rx_packets = packets; 4640 4641 packets = 0; 4642 bytes = 0; 4643 for (i = 0; i < adapter->num_tx_queues; i++) { 4644 struct igc_ring *ring = adapter->tx_ring[i]; 4645 4646 do { 4647 start = u64_stats_fetch_begin_irq(&ring->tx_syncp); 4648 _bytes = ring->tx_stats.bytes; 4649 _packets = ring->tx_stats.packets; 4650 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); 4651 bytes += _bytes; 4652 packets += _packets; 4653 } 4654 net_stats->tx_bytes = bytes; 4655 net_stats->tx_packets = packets; 4656 rcu_read_unlock(); 4657 4658 /* read stats registers */ 4659 adapter->stats.crcerrs += rd32(IGC_CRCERRS); 4660 adapter->stats.gprc += rd32(IGC_GPRC); 4661 adapter->stats.gorc += rd32(IGC_GORCL); 4662 rd32(IGC_GORCH); /* clear GORCL */ 4663 adapter->stats.bprc += rd32(IGC_BPRC); 4664 adapter->stats.mprc += rd32(IGC_MPRC); 4665 adapter->stats.roc += rd32(IGC_ROC); 4666 4667 adapter->stats.prc64 += rd32(IGC_PRC64); 4668 adapter->stats.prc127 += rd32(IGC_PRC127); 4669 adapter->stats.prc255 += rd32(IGC_PRC255); 4670 adapter->stats.prc511 += rd32(IGC_PRC511); 4671 adapter->stats.prc1023 += rd32(IGC_PRC1023); 4672 adapter->stats.prc1522 += rd32(IGC_PRC1522); 4673 adapter->stats.tlpic += rd32(IGC_TLPIC); 4674 adapter->stats.rlpic += rd32(IGC_RLPIC); 4675 adapter->stats.hgptc += rd32(IGC_HGPTC); 4676 4677 mpc = rd32(IGC_MPC); 4678 adapter->stats.mpc += mpc; 4679 net_stats->rx_fifo_errors += mpc; 4680 adapter->stats.scc += rd32(IGC_SCC); 4681 adapter->stats.ecol += rd32(IGC_ECOL); 4682 adapter->stats.mcc += rd32(IGC_MCC); 4683 adapter->stats.latecol += rd32(IGC_LATECOL); 4684 adapter->stats.dc += rd32(IGC_DC); 4685 adapter->stats.rlec += rd32(IGC_RLEC); 4686 adapter->stats.xonrxc += rd32(IGC_XONRXC); 4687 adapter->stats.xontxc += rd32(IGC_XONTXC); 4688 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); 4689 adapter->stats.xofftxc += rd32(IGC_XOFFTXC); 4690 adapter->stats.fcruc += rd32(IGC_FCRUC); 4691 adapter->stats.gptc += rd32(IGC_GPTC); 4692 adapter->stats.gotc += rd32(IGC_GOTCL); 4693 rd32(IGC_GOTCH); /* clear GOTCL */ 4694 adapter->stats.rnbc += rd32(IGC_RNBC); 4695 adapter->stats.ruc += rd32(IGC_RUC); 4696 adapter->stats.rfc += rd32(IGC_RFC); 4697 adapter->stats.rjc += rd32(IGC_RJC); 4698 adapter->stats.tor += rd32(IGC_TORH); 4699 adapter->stats.tot += rd32(IGC_TOTH); 4700 adapter->stats.tpr += rd32(IGC_TPR); 4701 4702 adapter->stats.ptc64 += rd32(IGC_PTC64); 4703 adapter->stats.ptc127 += rd32(IGC_PTC127); 4704 adapter->stats.ptc255 += rd32(IGC_PTC255); 4705 adapter->stats.ptc511 += rd32(IGC_PTC511); 4706 adapter->stats.ptc1023 += rd32(IGC_PTC1023); 4707 adapter->stats.ptc1522 += rd32(IGC_PTC1522); 4708 4709 adapter->stats.mptc += rd32(IGC_MPTC); 4710 adapter->stats.bptc += rd32(IGC_BPTC); 4711 4712 adapter->stats.tpt += rd32(IGC_TPT); 4713 adapter->stats.colc += rd32(IGC_COLC); 4714 adapter->stats.colc += rd32(IGC_RERC); 4715 4716 adapter->stats.algnerrc += rd32(IGC_ALGNERRC); 4717 4718 adapter->stats.tsctc += rd32(IGC_TSCTC); 4719 4720 adapter->stats.iac += rd32(IGC_IAC); 4721 4722 /* Fill out the OS statistics structure */ 4723 net_stats->multicast = adapter->stats.mprc; 4724 net_stats->collisions = adapter->stats.colc; 4725 4726 /* Rx Errors */ 4727 4728 /* RLEC on some newer hardware can be incorrect so build 4729 * our own version based on RUC and ROC 4730 */ 4731 net_stats->rx_errors = adapter->stats.rxerrc + 4732 adapter->stats.crcerrs + adapter->stats.algnerrc + 4733 adapter->stats.ruc + adapter->stats.roc + 4734 adapter->stats.cexterr; 4735 net_stats->rx_length_errors = adapter->stats.ruc + 4736 adapter->stats.roc; 4737 net_stats->rx_crc_errors = adapter->stats.crcerrs; 4738 net_stats->rx_frame_errors = adapter->stats.algnerrc; 4739 net_stats->rx_missed_errors = adapter->stats.mpc; 4740 4741 /* Tx Errors */ 4742 net_stats->tx_errors = adapter->stats.ecol + 4743 adapter->stats.latecol; 4744 net_stats->tx_aborted_errors = adapter->stats.ecol; 4745 net_stats->tx_window_errors = adapter->stats.latecol; 4746 net_stats->tx_carrier_errors = adapter->stats.tncrs; 4747 4748 /* Tx Dropped needs to be maintained elsewhere */ 4749 4750 /* Management Stats */ 4751 adapter->stats.mgptc += rd32(IGC_MGTPTC); 4752 adapter->stats.mgprc += rd32(IGC_MGTPRC); 4753 adapter->stats.mgpdc += rd32(IGC_MGTPDC); 4754 } 4755 4756 /** 4757 * igc_down - Close the interface 4758 * @adapter: board private structure 4759 */ 4760 void igc_down(struct igc_adapter *adapter) 4761 { 4762 struct net_device *netdev = adapter->netdev; 4763 struct igc_hw *hw = &adapter->hw; 4764 u32 tctl, rctl; 4765 int i = 0; 4766 4767 set_bit(__IGC_DOWN, &adapter->state); 4768 4769 igc_ptp_suspend(adapter); 4770 4771 if (pci_device_is_present(adapter->pdev)) { 4772 /* disable receives in the hardware */ 4773 rctl = rd32(IGC_RCTL); 4774 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); 4775 /* flush and sleep below */ 4776 } 4777 /* set trans_start so we don't get spurious watchdogs during reset */ 4778 netif_trans_update(netdev); 4779 4780 netif_carrier_off(netdev); 4781 netif_tx_stop_all_queues(netdev); 4782 4783 if (pci_device_is_present(adapter->pdev)) { 4784 /* disable transmits in the hardware */ 4785 tctl = rd32(IGC_TCTL); 4786 tctl &= ~IGC_TCTL_EN; 4787 wr32(IGC_TCTL, tctl); 4788 /* flush both disables and wait for them to finish */ 4789 wrfl(); 4790 usleep_range(10000, 20000); 4791 4792 igc_irq_disable(adapter); 4793 } 4794 4795 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 4796 4797 for (i = 0; i < adapter->num_q_vectors; i++) { 4798 if (adapter->q_vector[i]) { 4799 napi_synchronize(&adapter->q_vector[i]->napi); 4800 napi_disable(&adapter->q_vector[i]->napi); 4801 } 4802 } 4803 4804 del_timer_sync(&adapter->watchdog_timer); 4805 del_timer_sync(&adapter->phy_info_timer); 4806 4807 /* record the stats before reset*/ 4808 spin_lock(&adapter->stats64_lock); 4809 igc_update_stats(adapter); 4810 spin_unlock(&adapter->stats64_lock); 4811 4812 adapter->link_speed = 0; 4813 adapter->link_duplex = 0; 4814 4815 if (!pci_channel_offline(adapter->pdev)) 4816 igc_reset(adapter); 4817 4818 /* clear VLAN promisc flag so VFTA will be updated if necessary */ 4819 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; 4820 4821 igc_clean_all_tx_rings(adapter); 4822 igc_clean_all_rx_rings(adapter); 4823 } 4824 4825 void igc_reinit_locked(struct igc_adapter *adapter) 4826 { 4827 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 4828 usleep_range(1000, 2000); 4829 igc_down(adapter); 4830 igc_up(adapter); 4831 clear_bit(__IGC_RESETTING, &adapter->state); 4832 } 4833 4834 static void igc_reset_task(struct work_struct *work) 4835 { 4836 struct igc_adapter *adapter; 4837 4838 adapter = container_of(work, struct igc_adapter, reset_task); 4839 4840 rtnl_lock(); 4841 /* If we're already down or resetting, just bail */ 4842 if (test_bit(__IGC_DOWN, &adapter->state) || 4843 test_bit(__IGC_RESETTING, &adapter->state)) { 4844 rtnl_unlock(); 4845 return; 4846 } 4847 4848 igc_rings_dump(adapter); 4849 igc_regs_dump(adapter); 4850 netdev_err(adapter->netdev, "Reset adapter\n"); 4851 igc_reinit_locked(adapter); 4852 rtnl_unlock(); 4853 } 4854 4855 /** 4856 * igc_change_mtu - Change the Maximum Transfer Unit 4857 * @netdev: network interface device structure 4858 * @new_mtu: new value for maximum frame size 4859 * 4860 * Returns 0 on success, negative on failure 4861 */ 4862 static int igc_change_mtu(struct net_device *netdev, int new_mtu) 4863 { 4864 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 4865 struct igc_adapter *adapter = netdev_priv(netdev); 4866 4867 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { 4868 netdev_dbg(netdev, "Jumbo frames not supported with XDP"); 4869 return -EINVAL; 4870 } 4871 4872 /* adjust max frame to be at least the size of a standard frame */ 4873 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 4874 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 4875 4876 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 4877 usleep_range(1000, 2000); 4878 4879 /* igc_down has a dependency on max_frame_size */ 4880 adapter->max_frame_size = max_frame; 4881 4882 if (netif_running(netdev)) 4883 igc_down(adapter); 4884 4885 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 4886 netdev->mtu = new_mtu; 4887 4888 if (netif_running(netdev)) 4889 igc_up(adapter); 4890 else 4891 igc_reset(adapter); 4892 4893 clear_bit(__IGC_RESETTING, &adapter->state); 4894 4895 return 0; 4896 } 4897 4898 /** 4899 * igc_get_stats64 - Get System Network Statistics 4900 * @netdev: network interface device structure 4901 * @stats: rtnl_link_stats64 pointer 4902 * 4903 * Returns the address of the device statistics structure. 4904 * The statistics are updated here and also from the timer callback. 4905 */ 4906 static void igc_get_stats64(struct net_device *netdev, 4907 struct rtnl_link_stats64 *stats) 4908 { 4909 struct igc_adapter *adapter = netdev_priv(netdev); 4910 4911 spin_lock(&adapter->stats64_lock); 4912 if (!test_bit(__IGC_RESETTING, &adapter->state)) 4913 igc_update_stats(adapter); 4914 memcpy(stats, &adapter->stats64, sizeof(*stats)); 4915 spin_unlock(&adapter->stats64_lock); 4916 } 4917 4918 static netdev_features_t igc_fix_features(struct net_device *netdev, 4919 netdev_features_t features) 4920 { 4921 /* Since there is no support for separate Rx/Tx vlan accel 4922 * enable/disable make sure Tx flag is always in same state as Rx. 4923 */ 4924 if (features & NETIF_F_HW_VLAN_CTAG_RX) 4925 features |= NETIF_F_HW_VLAN_CTAG_TX; 4926 else 4927 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 4928 4929 return features; 4930 } 4931 4932 static int igc_set_features(struct net_device *netdev, 4933 netdev_features_t features) 4934 { 4935 netdev_features_t changed = netdev->features ^ features; 4936 struct igc_adapter *adapter = netdev_priv(netdev); 4937 4938 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 4939 igc_vlan_mode(netdev, features); 4940 4941 /* Add VLAN support */ 4942 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) 4943 return 0; 4944 4945 if (!(features & NETIF_F_NTUPLE)) 4946 igc_flush_nfc_rules(adapter); 4947 4948 netdev->features = features; 4949 4950 if (netif_running(netdev)) 4951 igc_reinit_locked(adapter); 4952 else 4953 igc_reset(adapter); 4954 4955 return 1; 4956 } 4957 4958 static netdev_features_t 4959 igc_features_check(struct sk_buff *skb, struct net_device *dev, 4960 netdev_features_t features) 4961 { 4962 unsigned int network_hdr_len, mac_hdr_len; 4963 4964 /* Make certain the headers can be described by a context descriptor */ 4965 mac_hdr_len = skb_network_header(skb) - skb->data; 4966 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) 4967 return features & ~(NETIF_F_HW_CSUM | 4968 NETIF_F_SCTP_CRC | 4969 NETIF_F_HW_VLAN_CTAG_TX | 4970 NETIF_F_TSO | 4971 NETIF_F_TSO6); 4972 4973 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 4974 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) 4975 return features & ~(NETIF_F_HW_CSUM | 4976 NETIF_F_SCTP_CRC | 4977 NETIF_F_TSO | 4978 NETIF_F_TSO6); 4979 4980 /* We can only support IPv4 TSO in tunnels if we can mangle the 4981 * inner IP ID field, so strip TSO if MANGLEID is not supported. 4982 */ 4983 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 4984 features &= ~NETIF_F_TSO; 4985 4986 return features; 4987 } 4988 4989 static void igc_tsync_interrupt(struct igc_adapter *adapter) 4990 { 4991 u32 ack, tsauxc, sec, nsec, tsicr; 4992 struct igc_hw *hw = &adapter->hw; 4993 struct ptp_clock_event event; 4994 struct timespec64 ts; 4995 4996 tsicr = rd32(IGC_TSICR); 4997 ack = 0; 4998 4999 if (tsicr & IGC_TSICR_SYS_WRAP) { 5000 event.type = PTP_CLOCK_PPS; 5001 if (adapter->ptp_caps.pps) 5002 ptp_clock_event(adapter->ptp_clock, &event); 5003 ack |= IGC_TSICR_SYS_WRAP; 5004 } 5005 5006 if (tsicr & IGC_TSICR_TXTS) { 5007 /* retrieve hardware timestamp */ 5008 schedule_work(&adapter->ptp_tx_work); 5009 ack |= IGC_TSICR_TXTS; 5010 } 5011 5012 if (tsicr & IGC_TSICR_TT0) { 5013 spin_lock(&adapter->tmreg_lock); 5014 ts = timespec64_add(adapter->perout[0].start, 5015 adapter->perout[0].period); 5016 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 5017 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); 5018 tsauxc = rd32(IGC_TSAUXC); 5019 tsauxc |= IGC_TSAUXC_EN_TT0; 5020 wr32(IGC_TSAUXC, tsauxc); 5021 adapter->perout[0].start = ts; 5022 spin_unlock(&adapter->tmreg_lock); 5023 ack |= IGC_TSICR_TT0; 5024 } 5025 5026 if (tsicr & IGC_TSICR_TT1) { 5027 spin_lock(&adapter->tmreg_lock); 5028 ts = timespec64_add(adapter->perout[1].start, 5029 adapter->perout[1].period); 5030 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 5031 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); 5032 tsauxc = rd32(IGC_TSAUXC); 5033 tsauxc |= IGC_TSAUXC_EN_TT1; 5034 wr32(IGC_TSAUXC, tsauxc); 5035 adapter->perout[1].start = ts; 5036 spin_unlock(&adapter->tmreg_lock); 5037 ack |= IGC_TSICR_TT1; 5038 } 5039 5040 if (tsicr & IGC_TSICR_AUTT0) { 5041 nsec = rd32(IGC_AUXSTMPL0); 5042 sec = rd32(IGC_AUXSTMPH0); 5043 event.type = PTP_CLOCK_EXTTS; 5044 event.index = 0; 5045 event.timestamp = sec * NSEC_PER_SEC + nsec; 5046 ptp_clock_event(adapter->ptp_clock, &event); 5047 ack |= IGC_TSICR_AUTT0; 5048 } 5049 5050 if (tsicr & IGC_TSICR_AUTT1) { 5051 nsec = rd32(IGC_AUXSTMPL1); 5052 sec = rd32(IGC_AUXSTMPH1); 5053 event.type = PTP_CLOCK_EXTTS; 5054 event.index = 1; 5055 event.timestamp = sec * NSEC_PER_SEC + nsec; 5056 ptp_clock_event(adapter->ptp_clock, &event); 5057 ack |= IGC_TSICR_AUTT1; 5058 } 5059 5060 /* acknowledge the interrupts */ 5061 wr32(IGC_TSICR, ack); 5062 } 5063 5064 /** 5065 * igc_msix_other - msix other interrupt handler 5066 * @irq: interrupt number 5067 * @data: pointer to a q_vector 5068 */ 5069 static irqreturn_t igc_msix_other(int irq, void *data) 5070 { 5071 struct igc_adapter *adapter = data; 5072 struct igc_hw *hw = &adapter->hw; 5073 u32 icr = rd32(IGC_ICR); 5074 5075 /* reading ICR causes bit 31 of EICR to be cleared */ 5076 if (icr & IGC_ICR_DRSTA) 5077 schedule_work(&adapter->reset_task); 5078 5079 if (icr & IGC_ICR_DOUTSYNC) { 5080 /* HW is reporting DMA is out of sync */ 5081 adapter->stats.doosync++; 5082 } 5083 5084 if (icr & IGC_ICR_LSC) { 5085 hw->mac.get_link_status = true; 5086 /* guard against interrupt when we're going down */ 5087 if (!test_bit(__IGC_DOWN, &adapter->state)) 5088 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5089 } 5090 5091 if (icr & IGC_ICR_TS) 5092 igc_tsync_interrupt(adapter); 5093 5094 wr32(IGC_EIMS, adapter->eims_other); 5095 5096 return IRQ_HANDLED; 5097 } 5098 5099 static void igc_write_itr(struct igc_q_vector *q_vector) 5100 { 5101 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; 5102 5103 if (!q_vector->set_itr) 5104 return; 5105 5106 if (!itr_val) 5107 itr_val = IGC_ITR_VAL_MASK; 5108 5109 itr_val |= IGC_EITR_CNT_IGNR; 5110 5111 writel(itr_val, q_vector->itr_register); 5112 q_vector->set_itr = 0; 5113 } 5114 5115 static irqreturn_t igc_msix_ring(int irq, void *data) 5116 { 5117 struct igc_q_vector *q_vector = data; 5118 5119 /* Write the ITR value calculated from the previous interrupt. */ 5120 igc_write_itr(q_vector); 5121 5122 napi_schedule(&q_vector->napi); 5123 5124 return IRQ_HANDLED; 5125 } 5126 5127 /** 5128 * igc_request_msix - Initialize MSI-X interrupts 5129 * @adapter: Pointer to adapter structure 5130 * 5131 * igc_request_msix allocates MSI-X vectors and requests interrupts from the 5132 * kernel. 5133 */ 5134 static int igc_request_msix(struct igc_adapter *adapter) 5135 { 5136 unsigned int num_q_vectors = adapter->num_q_vectors; 5137 int i = 0, err = 0, vector = 0, free_vector = 0; 5138 struct net_device *netdev = adapter->netdev; 5139 5140 err = request_irq(adapter->msix_entries[vector].vector, 5141 &igc_msix_other, 0, netdev->name, adapter); 5142 if (err) 5143 goto err_out; 5144 5145 if (num_q_vectors > MAX_Q_VECTORS) { 5146 num_q_vectors = MAX_Q_VECTORS; 5147 dev_warn(&adapter->pdev->dev, 5148 "The number of queue vectors (%d) is higher than max allowed (%d)\n", 5149 adapter->num_q_vectors, MAX_Q_VECTORS); 5150 } 5151 for (i = 0; i < num_q_vectors; i++) { 5152 struct igc_q_vector *q_vector = adapter->q_vector[i]; 5153 5154 vector++; 5155 5156 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); 5157 5158 if (q_vector->rx.ring && q_vector->tx.ring) 5159 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, 5160 q_vector->rx.ring->queue_index); 5161 else if (q_vector->tx.ring) 5162 sprintf(q_vector->name, "%s-tx-%u", netdev->name, 5163 q_vector->tx.ring->queue_index); 5164 else if (q_vector->rx.ring) 5165 sprintf(q_vector->name, "%s-rx-%u", netdev->name, 5166 q_vector->rx.ring->queue_index); 5167 else 5168 sprintf(q_vector->name, "%s-unused", netdev->name); 5169 5170 err = request_irq(adapter->msix_entries[vector].vector, 5171 igc_msix_ring, 0, q_vector->name, 5172 q_vector); 5173 if (err) 5174 goto err_free; 5175 } 5176 5177 igc_configure_msix(adapter); 5178 return 0; 5179 5180 err_free: 5181 /* free already assigned IRQs */ 5182 free_irq(adapter->msix_entries[free_vector++].vector, adapter); 5183 5184 vector--; 5185 for (i = 0; i < vector; i++) { 5186 free_irq(adapter->msix_entries[free_vector++].vector, 5187 adapter->q_vector[i]); 5188 } 5189 err_out: 5190 return err; 5191 } 5192 5193 /** 5194 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts 5195 * @adapter: Pointer to adapter structure 5196 * 5197 * This function resets the device so that it has 0 rx queues, tx queues, and 5198 * MSI-X interrupts allocated. 5199 */ 5200 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) 5201 { 5202 igc_free_q_vectors(adapter); 5203 igc_reset_interrupt_capability(adapter); 5204 } 5205 5206 /* Need to wait a few seconds after link up to get diagnostic information from 5207 * the phy 5208 */ 5209 static void igc_update_phy_info(struct timer_list *t) 5210 { 5211 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); 5212 5213 igc_get_phy_info(&adapter->hw); 5214 } 5215 5216 /** 5217 * igc_has_link - check shared code for link and determine up/down 5218 * @adapter: pointer to driver private info 5219 */ 5220 bool igc_has_link(struct igc_adapter *adapter) 5221 { 5222 struct igc_hw *hw = &adapter->hw; 5223 bool link_active = false; 5224 5225 /* get_link_status is set on LSC (link status) interrupt or 5226 * rx sequence error interrupt. get_link_status will stay 5227 * false until the igc_check_for_link establishes link 5228 * for copper adapters ONLY 5229 */ 5230 if (!hw->mac.get_link_status) 5231 return true; 5232 hw->mac.ops.check_for_link(hw); 5233 link_active = !hw->mac.get_link_status; 5234 5235 if (hw->mac.type == igc_i225) { 5236 if (!netif_carrier_ok(adapter->netdev)) { 5237 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 5238 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { 5239 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; 5240 adapter->link_check_timeout = jiffies; 5241 } 5242 } 5243 5244 return link_active; 5245 } 5246 5247 /** 5248 * igc_watchdog - Timer Call-back 5249 * @t: timer for the watchdog 5250 */ 5251 static void igc_watchdog(struct timer_list *t) 5252 { 5253 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); 5254 /* Do the rest outside of interrupt context */ 5255 schedule_work(&adapter->watchdog_task); 5256 } 5257 5258 static void igc_watchdog_task(struct work_struct *work) 5259 { 5260 struct igc_adapter *adapter = container_of(work, 5261 struct igc_adapter, 5262 watchdog_task); 5263 struct net_device *netdev = adapter->netdev; 5264 struct igc_hw *hw = &adapter->hw; 5265 struct igc_phy_info *phy = &hw->phy; 5266 u16 phy_data, retry_count = 20; 5267 u32 link; 5268 int i; 5269 5270 link = igc_has_link(adapter); 5271 5272 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { 5273 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) 5274 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 5275 else 5276 link = false; 5277 } 5278 5279 if (link) { 5280 /* Cancel scheduled suspend requests. */ 5281 pm_runtime_resume(netdev->dev.parent); 5282 5283 if (!netif_carrier_ok(netdev)) { 5284 u32 ctrl; 5285 5286 hw->mac.ops.get_speed_and_duplex(hw, 5287 &adapter->link_speed, 5288 &adapter->link_duplex); 5289 5290 ctrl = rd32(IGC_CTRL); 5291 /* Link status message must follow this format */ 5292 netdev_info(netdev, 5293 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 5294 adapter->link_speed, 5295 adapter->link_duplex == FULL_DUPLEX ? 5296 "Full" : "Half", 5297 (ctrl & IGC_CTRL_TFCE) && 5298 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : 5299 (ctrl & IGC_CTRL_RFCE) ? "RX" : 5300 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); 5301 5302 /* disable EEE if enabled */ 5303 if ((adapter->flags & IGC_FLAG_EEE) && 5304 adapter->link_duplex == HALF_DUPLEX) { 5305 netdev_info(netdev, 5306 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); 5307 adapter->hw.dev_spec._base.eee_enable = false; 5308 adapter->flags &= ~IGC_FLAG_EEE; 5309 } 5310 5311 /* check if SmartSpeed worked */ 5312 igc_check_downshift(hw); 5313 if (phy->speed_downgraded) 5314 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); 5315 5316 /* adjust timeout factor according to speed/duplex */ 5317 adapter->tx_timeout_factor = 1; 5318 switch (adapter->link_speed) { 5319 case SPEED_10: 5320 adapter->tx_timeout_factor = 14; 5321 break; 5322 case SPEED_100: 5323 case SPEED_1000: 5324 case SPEED_2500: 5325 adapter->tx_timeout_factor = 7; 5326 break; 5327 } 5328 5329 if (adapter->link_speed != SPEED_1000) 5330 goto no_wait; 5331 5332 /* wait for Remote receiver status OK */ 5333 retry_read_status: 5334 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, 5335 &phy_data)) { 5336 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && 5337 retry_count) { 5338 msleep(100); 5339 retry_count--; 5340 goto retry_read_status; 5341 } else if (!retry_count) { 5342 netdev_err(netdev, "exceed max 2 second\n"); 5343 } 5344 } else { 5345 netdev_err(netdev, "read 1000Base-T Status Reg\n"); 5346 } 5347 no_wait: 5348 netif_carrier_on(netdev); 5349 5350 /* link state has changed, schedule phy info update */ 5351 if (!test_bit(__IGC_DOWN, &adapter->state)) 5352 mod_timer(&adapter->phy_info_timer, 5353 round_jiffies(jiffies + 2 * HZ)); 5354 } 5355 } else { 5356 if (netif_carrier_ok(netdev)) { 5357 adapter->link_speed = 0; 5358 adapter->link_duplex = 0; 5359 5360 /* Links status message must follow this format */ 5361 netdev_info(netdev, "NIC Link is Down\n"); 5362 netif_carrier_off(netdev); 5363 5364 /* link state has changed, schedule phy info update */ 5365 if (!test_bit(__IGC_DOWN, &adapter->state)) 5366 mod_timer(&adapter->phy_info_timer, 5367 round_jiffies(jiffies + 2 * HZ)); 5368 5369 /* link is down, time to check for alternate media */ 5370 if (adapter->flags & IGC_FLAG_MAS_ENABLE) { 5371 if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 5372 schedule_work(&adapter->reset_task); 5373 /* return immediately */ 5374 return; 5375 } 5376 } 5377 pm_schedule_suspend(netdev->dev.parent, 5378 MSEC_PER_SEC * 5); 5379 5380 /* also check for alternate media here */ 5381 } else if (!netif_carrier_ok(netdev) && 5382 (adapter->flags & IGC_FLAG_MAS_ENABLE)) { 5383 if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 5384 schedule_work(&adapter->reset_task); 5385 /* return immediately */ 5386 return; 5387 } 5388 } 5389 } 5390 5391 spin_lock(&adapter->stats64_lock); 5392 igc_update_stats(adapter); 5393 spin_unlock(&adapter->stats64_lock); 5394 5395 for (i = 0; i < adapter->num_tx_queues; i++) { 5396 struct igc_ring *tx_ring = adapter->tx_ring[i]; 5397 5398 if (!netif_carrier_ok(netdev)) { 5399 /* We've lost link, so the controller stops DMA, 5400 * but we've got queued Tx work that's never going 5401 * to get done, so reset controller to flush Tx. 5402 * (Do the reset outside of interrupt context). 5403 */ 5404 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { 5405 adapter->tx_timeout_count++; 5406 schedule_work(&adapter->reset_task); 5407 /* return immediately since reset is imminent */ 5408 return; 5409 } 5410 } 5411 5412 /* Force detection of hung controller every watchdog period */ 5413 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 5414 } 5415 5416 /* Cause software interrupt to ensure Rx ring is cleaned */ 5417 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5418 u32 eics = 0; 5419 5420 for (i = 0; i < adapter->num_q_vectors; i++) 5421 eics |= adapter->q_vector[i]->eims_value; 5422 wr32(IGC_EICS, eics); 5423 } else { 5424 wr32(IGC_ICS, IGC_ICS_RXDMT0); 5425 } 5426 5427 igc_ptp_tx_hang(adapter); 5428 5429 /* Reset the timer */ 5430 if (!test_bit(__IGC_DOWN, &adapter->state)) { 5431 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) 5432 mod_timer(&adapter->watchdog_timer, 5433 round_jiffies(jiffies + HZ)); 5434 else 5435 mod_timer(&adapter->watchdog_timer, 5436 round_jiffies(jiffies + 2 * HZ)); 5437 } 5438 } 5439 5440 /** 5441 * igc_intr_msi - Interrupt Handler 5442 * @irq: interrupt number 5443 * @data: pointer to a network interface device structure 5444 */ 5445 static irqreturn_t igc_intr_msi(int irq, void *data) 5446 { 5447 struct igc_adapter *adapter = data; 5448 struct igc_q_vector *q_vector = adapter->q_vector[0]; 5449 struct igc_hw *hw = &adapter->hw; 5450 /* read ICR disables interrupts using IAM */ 5451 u32 icr = rd32(IGC_ICR); 5452 5453 igc_write_itr(q_vector); 5454 5455 if (icr & IGC_ICR_DRSTA) 5456 schedule_work(&adapter->reset_task); 5457 5458 if (icr & IGC_ICR_DOUTSYNC) { 5459 /* HW is reporting DMA is out of sync */ 5460 adapter->stats.doosync++; 5461 } 5462 5463 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5464 hw->mac.get_link_status = true; 5465 if (!test_bit(__IGC_DOWN, &adapter->state)) 5466 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5467 } 5468 5469 napi_schedule(&q_vector->napi); 5470 5471 return IRQ_HANDLED; 5472 } 5473 5474 /** 5475 * igc_intr - Legacy Interrupt Handler 5476 * @irq: interrupt number 5477 * @data: pointer to a network interface device structure 5478 */ 5479 static irqreturn_t igc_intr(int irq, void *data) 5480 { 5481 struct igc_adapter *adapter = data; 5482 struct igc_q_vector *q_vector = adapter->q_vector[0]; 5483 struct igc_hw *hw = &adapter->hw; 5484 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 5485 * need for the IMC write 5486 */ 5487 u32 icr = rd32(IGC_ICR); 5488 5489 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 5490 * not set, then the adapter didn't send an interrupt 5491 */ 5492 if (!(icr & IGC_ICR_INT_ASSERTED)) 5493 return IRQ_NONE; 5494 5495 igc_write_itr(q_vector); 5496 5497 if (icr & IGC_ICR_DRSTA) 5498 schedule_work(&adapter->reset_task); 5499 5500 if (icr & IGC_ICR_DOUTSYNC) { 5501 /* HW is reporting DMA is out of sync */ 5502 adapter->stats.doosync++; 5503 } 5504 5505 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5506 hw->mac.get_link_status = true; 5507 /* guard against interrupt when we're going down */ 5508 if (!test_bit(__IGC_DOWN, &adapter->state)) 5509 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5510 } 5511 5512 napi_schedule(&q_vector->napi); 5513 5514 return IRQ_HANDLED; 5515 } 5516 5517 static void igc_free_irq(struct igc_adapter *adapter) 5518 { 5519 if (adapter->msix_entries) { 5520 int vector = 0, i; 5521 5522 free_irq(adapter->msix_entries[vector++].vector, adapter); 5523 5524 for (i = 0; i < adapter->num_q_vectors; i++) 5525 free_irq(adapter->msix_entries[vector++].vector, 5526 adapter->q_vector[i]); 5527 } else { 5528 free_irq(adapter->pdev->irq, adapter); 5529 } 5530 } 5531 5532 /** 5533 * igc_request_irq - initialize interrupts 5534 * @adapter: Pointer to adapter structure 5535 * 5536 * Attempts to configure interrupts using the best available 5537 * capabilities of the hardware and kernel. 5538 */ 5539 static int igc_request_irq(struct igc_adapter *adapter) 5540 { 5541 struct net_device *netdev = adapter->netdev; 5542 struct pci_dev *pdev = adapter->pdev; 5543 int err = 0; 5544 5545 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5546 err = igc_request_msix(adapter); 5547 if (!err) 5548 goto request_done; 5549 /* fall back to MSI */ 5550 igc_free_all_tx_resources(adapter); 5551 igc_free_all_rx_resources(adapter); 5552 5553 igc_clear_interrupt_scheme(adapter); 5554 err = igc_init_interrupt_scheme(adapter, false); 5555 if (err) 5556 goto request_done; 5557 igc_setup_all_tx_resources(adapter); 5558 igc_setup_all_rx_resources(adapter); 5559 igc_configure(adapter); 5560 } 5561 5562 igc_assign_vector(adapter->q_vector[0], 0); 5563 5564 if (adapter->flags & IGC_FLAG_HAS_MSI) { 5565 err = request_irq(pdev->irq, &igc_intr_msi, 0, 5566 netdev->name, adapter); 5567 if (!err) 5568 goto request_done; 5569 5570 /* fall back to legacy interrupts */ 5571 igc_reset_interrupt_capability(adapter); 5572 adapter->flags &= ~IGC_FLAG_HAS_MSI; 5573 } 5574 5575 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, 5576 netdev->name, adapter); 5577 5578 if (err) 5579 netdev_err(netdev, "Error %d getting interrupt\n", err); 5580 5581 request_done: 5582 return err; 5583 } 5584 5585 /** 5586 * __igc_open - Called when a network interface is made active 5587 * @netdev: network interface device structure 5588 * @resuming: boolean indicating if the device is resuming 5589 * 5590 * Returns 0 on success, negative value on failure 5591 * 5592 * The open entry point is called when a network interface is made 5593 * active by the system (IFF_UP). At this point all resources needed 5594 * for transmit and receive operations are allocated, the interrupt 5595 * handler is registered with the OS, the watchdog timer is started, 5596 * and the stack is notified that the interface is ready. 5597 */ 5598 static int __igc_open(struct net_device *netdev, bool resuming) 5599 { 5600 struct igc_adapter *adapter = netdev_priv(netdev); 5601 struct pci_dev *pdev = adapter->pdev; 5602 struct igc_hw *hw = &adapter->hw; 5603 int err = 0; 5604 int i = 0; 5605 5606 /* disallow open during test */ 5607 5608 if (test_bit(__IGC_TESTING, &adapter->state)) { 5609 WARN_ON(resuming); 5610 return -EBUSY; 5611 } 5612 5613 if (!resuming) 5614 pm_runtime_get_sync(&pdev->dev); 5615 5616 netif_carrier_off(netdev); 5617 5618 /* allocate transmit descriptors */ 5619 err = igc_setup_all_tx_resources(adapter); 5620 if (err) 5621 goto err_setup_tx; 5622 5623 /* allocate receive descriptors */ 5624 err = igc_setup_all_rx_resources(adapter); 5625 if (err) 5626 goto err_setup_rx; 5627 5628 igc_power_up_link(adapter); 5629 5630 igc_configure(adapter); 5631 5632 err = igc_request_irq(adapter); 5633 if (err) 5634 goto err_req_irq; 5635 5636 /* Notify the stack of the actual queue counts. */ 5637 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 5638 if (err) 5639 goto err_set_queues; 5640 5641 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 5642 if (err) 5643 goto err_set_queues; 5644 5645 clear_bit(__IGC_DOWN, &adapter->state); 5646 5647 for (i = 0; i < adapter->num_q_vectors; i++) 5648 napi_enable(&adapter->q_vector[i]->napi); 5649 5650 /* Clear any pending interrupts. */ 5651 rd32(IGC_ICR); 5652 igc_irq_enable(adapter); 5653 5654 if (!resuming) 5655 pm_runtime_put(&pdev->dev); 5656 5657 netif_tx_start_all_queues(netdev); 5658 5659 /* start the watchdog. */ 5660 hw->mac.get_link_status = true; 5661 schedule_work(&adapter->watchdog_task); 5662 5663 return IGC_SUCCESS; 5664 5665 err_set_queues: 5666 igc_free_irq(adapter); 5667 err_req_irq: 5668 igc_release_hw_control(adapter); 5669 igc_power_down_phy_copper_base(&adapter->hw); 5670 igc_free_all_rx_resources(adapter); 5671 err_setup_rx: 5672 igc_free_all_tx_resources(adapter); 5673 err_setup_tx: 5674 igc_reset(adapter); 5675 if (!resuming) 5676 pm_runtime_put(&pdev->dev); 5677 5678 return err; 5679 } 5680 5681 int igc_open(struct net_device *netdev) 5682 { 5683 return __igc_open(netdev, false); 5684 } 5685 5686 /** 5687 * __igc_close - Disables a network interface 5688 * @netdev: network interface device structure 5689 * @suspending: boolean indicating the device is suspending 5690 * 5691 * Returns 0, this is not allowed to fail 5692 * 5693 * The close entry point is called when an interface is de-activated 5694 * by the OS. The hardware is still under the driver's control, but 5695 * needs to be disabled. A global MAC reset is issued to stop the 5696 * hardware, and all transmit and receive resources are freed. 5697 */ 5698 static int __igc_close(struct net_device *netdev, bool suspending) 5699 { 5700 struct igc_adapter *adapter = netdev_priv(netdev); 5701 struct pci_dev *pdev = adapter->pdev; 5702 5703 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); 5704 5705 if (!suspending) 5706 pm_runtime_get_sync(&pdev->dev); 5707 5708 igc_down(adapter); 5709 5710 igc_release_hw_control(adapter); 5711 5712 igc_free_irq(adapter); 5713 5714 igc_free_all_tx_resources(adapter); 5715 igc_free_all_rx_resources(adapter); 5716 5717 if (!suspending) 5718 pm_runtime_put_sync(&pdev->dev); 5719 5720 return 0; 5721 } 5722 5723 int igc_close(struct net_device *netdev) 5724 { 5725 if (netif_device_present(netdev) || netdev->dismantle) 5726 return __igc_close(netdev, false); 5727 return 0; 5728 } 5729 5730 /** 5731 * igc_ioctl - Access the hwtstamp interface 5732 * @netdev: network interface device structure 5733 * @ifr: interface request data 5734 * @cmd: ioctl command 5735 **/ 5736 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 5737 { 5738 switch (cmd) { 5739 case SIOCGHWTSTAMP: 5740 return igc_ptp_get_ts_config(netdev, ifr); 5741 case SIOCSHWTSTAMP: 5742 return igc_ptp_set_ts_config(netdev, ifr); 5743 default: 5744 return -EOPNOTSUPP; 5745 } 5746 } 5747 5748 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, 5749 bool enable) 5750 { 5751 struct igc_ring *ring; 5752 5753 if (queue < 0 || queue >= adapter->num_tx_queues) 5754 return -EINVAL; 5755 5756 ring = adapter->tx_ring[queue]; 5757 ring->launchtime_enable = enable; 5758 5759 return 0; 5760 } 5761 5762 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) 5763 { 5764 struct timespec64 b; 5765 5766 b = ktime_to_timespec64(base_time); 5767 5768 return timespec64_compare(now, &b) > 0; 5769 } 5770 5771 static bool validate_schedule(struct igc_adapter *adapter, 5772 const struct tc_taprio_qopt_offload *qopt) 5773 { 5774 int queue_uses[IGC_MAX_TX_QUEUES] = { }; 5775 struct timespec64 now; 5776 size_t n; 5777 5778 if (qopt->cycle_time_extension) 5779 return false; 5780 5781 igc_ptp_read(adapter, &now); 5782 5783 /* If we program the controller's BASET registers with a time 5784 * in the future, it will hold all the packets until that 5785 * time, causing a lot of TX Hangs, so to avoid that, we 5786 * reject schedules that would start in the future. 5787 */ 5788 if (!is_base_time_past(qopt->base_time, &now)) 5789 return false; 5790 5791 for (n = 0; n < qopt->num_entries; n++) { 5792 const struct tc_taprio_sched_entry *e; 5793 int i; 5794 5795 e = &qopt->entries[n]; 5796 5797 /* i225 only supports "global" frame preemption 5798 * settings. 5799 */ 5800 if (e->command != TC_TAPRIO_CMD_SET_GATES) 5801 return false; 5802 5803 for (i = 0; i < adapter->num_tx_queues; i++) { 5804 if (e->gate_mask & BIT(i)) 5805 queue_uses[i]++; 5806 5807 if (queue_uses[i] > 1) 5808 return false; 5809 } 5810 } 5811 5812 return true; 5813 } 5814 5815 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, 5816 struct tc_etf_qopt_offload *qopt) 5817 { 5818 struct igc_hw *hw = &adapter->hw; 5819 int err; 5820 5821 if (hw->mac.type != igc_i225) 5822 return -EOPNOTSUPP; 5823 5824 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); 5825 if (err) 5826 return err; 5827 5828 return igc_tsn_offload_apply(adapter); 5829 } 5830 5831 static int igc_tsn_clear_schedule(struct igc_adapter *adapter) 5832 { 5833 int i; 5834 5835 adapter->base_time = 0; 5836 adapter->cycle_time = NSEC_PER_SEC; 5837 5838 for (i = 0; i < adapter->num_tx_queues; i++) { 5839 struct igc_ring *ring = adapter->tx_ring[i]; 5840 5841 ring->start_time = 0; 5842 ring->end_time = NSEC_PER_SEC; 5843 } 5844 5845 return 0; 5846 } 5847 5848 static int igc_save_qbv_schedule(struct igc_adapter *adapter, 5849 struct tc_taprio_qopt_offload *qopt) 5850 { 5851 u32 start_time = 0, end_time = 0; 5852 size_t n; 5853 5854 if (!qopt->enable) 5855 return igc_tsn_clear_schedule(adapter); 5856 5857 if (adapter->base_time) 5858 return -EALREADY; 5859 5860 if (!validate_schedule(adapter, qopt)) 5861 return -EINVAL; 5862 5863 adapter->cycle_time = qopt->cycle_time; 5864 adapter->base_time = qopt->base_time; 5865 5866 /* FIXME: be a little smarter about cases when the gate for a 5867 * queue stays open for more than one entry. 5868 */ 5869 for (n = 0; n < qopt->num_entries; n++) { 5870 struct tc_taprio_sched_entry *e = &qopt->entries[n]; 5871 int i; 5872 5873 end_time += e->interval; 5874 5875 for (i = 0; i < adapter->num_tx_queues; i++) { 5876 struct igc_ring *ring = adapter->tx_ring[i]; 5877 5878 if (!(e->gate_mask & BIT(i))) 5879 continue; 5880 5881 ring->start_time = start_time; 5882 ring->end_time = end_time; 5883 } 5884 5885 start_time += e->interval; 5886 } 5887 5888 return 0; 5889 } 5890 5891 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, 5892 struct tc_taprio_qopt_offload *qopt) 5893 { 5894 struct igc_hw *hw = &adapter->hw; 5895 int err; 5896 5897 if (hw->mac.type != igc_i225) 5898 return -EOPNOTSUPP; 5899 5900 err = igc_save_qbv_schedule(adapter, qopt); 5901 if (err) 5902 return err; 5903 5904 return igc_tsn_offload_apply(adapter); 5905 } 5906 5907 static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, 5908 bool enable, int idleslope, int sendslope, 5909 int hicredit, int locredit) 5910 { 5911 bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; 5912 struct net_device *netdev = adapter->netdev; 5913 struct igc_ring *ring; 5914 int i; 5915 5916 /* i225 has two sets of credit-based shaper logic. 5917 * Supporting it only on the top two priority queues 5918 */ 5919 if (queue < 0 || queue > 1) 5920 return -EINVAL; 5921 5922 ring = adapter->tx_ring[queue]; 5923 5924 for (i = 0; i < IGC_MAX_SR_QUEUES; i++) 5925 if (adapter->tx_ring[i]) 5926 cbs_status[i] = adapter->tx_ring[i]->cbs_enable; 5927 5928 /* CBS should be enabled on the highest priority queue first in order 5929 * for the CBS algorithm to operate as intended. 5930 */ 5931 if (enable) { 5932 if (queue == 1 && !cbs_status[0]) { 5933 netdev_err(netdev, 5934 "Enabling CBS on queue1 before queue0\n"); 5935 return -EINVAL; 5936 } 5937 } else { 5938 if (queue == 0 && cbs_status[1]) { 5939 netdev_err(netdev, 5940 "Disabling CBS on queue0 before queue1\n"); 5941 return -EINVAL; 5942 } 5943 } 5944 5945 ring->cbs_enable = enable; 5946 ring->idleslope = idleslope; 5947 ring->sendslope = sendslope; 5948 ring->hicredit = hicredit; 5949 ring->locredit = locredit; 5950 5951 return 0; 5952 } 5953 5954 static int igc_tsn_enable_cbs(struct igc_adapter *adapter, 5955 struct tc_cbs_qopt_offload *qopt) 5956 { 5957 struct igc_hw *hw = &adapter->hw; 5958 int err; 5959 5960 if (hw->mac.type != igc_i225) 5961 return -EOPNOTSUPP; 5962 5963 if (qopt->queue < 0 || qopt->queue > 1) 5964 return -EINVAL; 5965 5966 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, 5967 qopt->idleslope, qopt->sendslope, 5968 qopt->hicredit, qopt->locredit); 5969 if (err) 5970 return err; 5971 5972 return igc_tsn_offload_apply(adapter); 5973 } 5974 5975 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, 5976 void *type_data) 5977 { 5978 struct igc_adapter *adapter = netdev_priv(dev); 5979 5980 switch (type) { 5981 case TC_SETUP_QDISC_TAPRIO: 5982 return igc_tsn_enable_qbv_scheduling(adapter, type_data); 5983 5984 case TC_SETUP_QDISC_ETF: 5985 return igc_tsn_enable_launchtime(adapter, type_data); 5986 5987 case TC_SETUP_QDISC_CBS: 5988 return igc_tsn_enable_cbs(adapter, type_data); 5989 5990 default: 5991 return -EOPNOTSUPP; 5992 } 5993 } 5994 5995 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) 5996 { 5997 struct igc_adapter *adapter = netdev_priv(dev); 5998 5999 switch (bpf->command) { 6000 case XDP_SETUP_PROG: 6001 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); 6002 case XDP_SETUP_XSK_POOL: 6003 return igc_xdp_setup_pool(adapter, bpf->xsk.pool, 6004 bpf->xsk.queue_id); 6005 default: 6006 return -EOPNOTSUPP; 6007 } 6008 } 6009 6010 static int igc_xdp_xmit(struct net_device *dev, int num_frames, 6011 struct xdp_frame **frames, u32 flags) 6012 { 6013 struct igc_adapter *adapter = netdev_priv(dev); 6014 int cpu = smp_processor_id(); 6015 struct netdev_queue *nq; 6016 struct igc_ring *ring; 6017 int i, drops; 6018 6019 if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) 6020 return -ENETDOWN; 6021 6022 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 6023 return -EINVAL; 6024 6025 ring = igc_xdp_get_tx_ring(adapter, cpu); 6026 nq = txring_txq(ring); 6027 6028 __netif_tx_lock(nq, cpu); 6029 6030 drops = 0; 6031 for (i = 0; i < num_frames; i++) { 6032 int err; 6033 struct xdp_frame *xdpf = frames[i]; 6034 6035 err = igc_xdp_init_tx_descriptor(ring, xdpf); 6036 if (err) { 6037 xdp_return_frame_rx_napi(xdpf); 6038 drops++; 6039 } 6040 } 6041 6042 if (flags & XDP_XMIT_FLUSH) 6043 igc_flush_tx_descriptors(ring); 6044 6045 __netif_tx_unlock(nq); 6046 6047 return num_frames - drops; 6048 } 6049 6050 static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, 6051 struct igc_q_vector *q_vector) 6052 { 6053 struct igc_hw *hw = &adapter->hw; 6054 u32 eics = 0; 6055 6056 eics |= q_vector->eims_value; 6057 wr32(IGC_EICS, eics); 6058 } 6059 6060 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 6061 { 6062 struct igc_adapter *adapter = netdev_priv(dev); 6063 struct igc_q_vector *q_vector; 6064 struct igc_ring *ring; 6065 6066 if (test_bit(__IGC_DOWN, &adapter->state)) 6067 return -ENETDOWN; 6068 6069 if (!igc_xdp_is_enabled(adapter)) 6070 return -ENXIO; 6071 6072 if (queue_id >= adapter->num_rx_queues) 6073 return -EINVAL; 6074 6075 ring = adapter->rx_ring[queue_id]; 6076 6077 if (!ring->xsk_pool) 6078 return -ENXIO; 6079 6080 q_vector = adapter->q_vector[queue_id]; 6081 if (!napi_if_scheduled_mark_missed(&q_vector->napi)) 6082 igc_trigger_rxtxq_interrupt(adapter, q_vector); 6083 6084 return 0; 6085 } 6086 6087 static const struct net_device_ops igc_netdev_ops = { 6088 .ndo_open = igc_open, 6089 .ndo_stop = igc_close, 6090 .ndo_start_xmit = igc_xmit_frame, 6091 .ndo_set_rx_mode = igc_set_rx_mode, 6092 .ndo_set_mac_address = igc_set_mac, 6093 .ndo_change_mtu = igc_change_mtu, 6094 .ndo_get_stats64 = igc_get_stats64, 6095 .ndo_fix_features = igc_fix_features, 6096 .ndo_set_features = igc_set_features, 6097 .ndo_features_check = igc_features_check, 6098 .ndo_eth_ioctl = igc_ioctl, 6099 .ndo_setup_tc = igc_setup_tc, 6100 .ndo_bpf = igc_bpf, 6101 .ndo_xdp_xmit = igc_xdp_xmit, 6102 .ndo_xsk_wakeup = igc_xsk_wakeup, 6103 }; 6104 6105 /* PCIe configuration access */ 6106 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 6107 { 6108 struct igc_adapter *adapter = hw->back; 6109 6110 pci_read_config_word(adapter->pdev, reg, value); 6111 } 6112 6113 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 6114 { 6115 struct igc_adapter *adapter = hw->back; 6116 6117 pci_write_config_word(adapter->pdev, reg, *value); 6118 } 6119 6120 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 6121 { 6122 struct igc_adapter *adapter = hw->back; 6123 6124 if (!pci_is_pcie(adapter->pdev)) 6125 return -IGC_ERR_CONFIG; 6126 6127 pcie_capability_read_word(adapter->pdev, reg, value); 6128 6129 return IGC_SUCCESS; 6130 } 6131 6132 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 6133 { 6134 struct igc_adapter *adapter = hw->back; 6135 6136 if (!pci_is_pcie(adapter->pdev)) 6137 return -IGC_ERR_CONFIG; 6138 6139 pcie_capability_write_word(adapter->pdev, reg, *value); 6140 6141 return IGC_SUCCESS; 6142 } 6143 6144 u32 igc_rd32(struct igc_hw *hw, u32 reg) 6145 { 6146 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); 6147 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); 6148 u32 value = 0; 6149 6150 value = readl(&hw_addr[reg]); 6151 6152 /* reads should not return all F's */ 6153 if (!(~value) && (!reg || !(~readl(hw_addr)))) { 6154 struct net_device *netdev = igc->netdev; 6155 6156 hw->hw_addr = NULL; 6157 netif_device_detach(netdev); 6158 netdev_err(netdev, "PCIe link lost, device now detached\n"); 6159 WARN(pci_device_is_present(igc->pdev), 6160 "igc: Failed to read reg 0x%x!\n", reg); 6161 } 6162 6163 return value; 6164 } 6165 6166 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) 6167 { 6168 struct igc_mac_info *mac = &adapter->hw.mac; 6169 6170 mac->autoneg = false; 6171 6172 /* Make sure dplx is at most 1 bit and lsb of speed is not set 6173 * for the switch() below to work 6174 */ 6175 if ((spd & 1) || (dplx & ~1)) 6176 goto err_inval; 6177 6178 switch (spd + dplx) { 6179 case SPEED_10 + DUPLEX_HALF: 6180 mac->forced_speed_duplex = ADVERTISE_10_HALF; 6181 break; 6182 case SPEED_10 + DUPLEX_FULL: 6183 mac->forced_speed_duplex = ADVERTISE_10_FULL; 6184 break; 6185 case SPEED_100 + DUPLEX_HALF: 6186 mac->forced_speed_duplex = ADVERTISE_100_HALF; 6187 break; 6188 case SPEED_100 + DUPLEX_FULL: 6189 mac->forced_speed_duplex = ADVERTISE_100_FULL; 6190 break; 6191 case SPEED_1000 + DUPLEX_FULL: 6192 mac->autoneg = true; 6193 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 6194 break; 6195 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 6196 goto err_inval; 6197 case SPEED_2500 + DUPLEX_FULL: 6198 mac->autoneg = true; 6199 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 6200 break; 6201 case SPEED_2500 + DUPLEX_HALF: /* not supported */ 6202 default: 6203 goto err_inval; 6204 } 6205 6206 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ 6207 adapter->hw.phy.mdix = AUTO_ALL_MODES; 6208 6209 return 0; 6210 6211 err_inval: 6212 netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n"); 6213 return -EINVAL; 6214 } 6215 6216 /** 6217 * igc_probe - Device Initialization Routine 6218 * @pdev: PCI device information struct 6219 * @ent: entry in igc_pci_tbl 6220 * 6221 * Returns 0 on success, negative on failure 6222 * 6223 * igc_probe initializes an adapter identified by a pci_dev structure. 6224 * The OS initialization, configuring the adapter private structure, 6225 * and a hardware reset occur. 6226 */ 6227 static int igc_probe(struct pci_dev *pdev, 6228 const struct pci_device_id *ent) 6229 { 6230 struct igc_adapter *adapter; 6231 struct net_device *netdev; 6232 struct igc_hw *hw; 6233 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; 6234 int err, pci_using_dac; 6235 6236 err = pci_enable_device_mem(pdev); 6237 if (err) 6238 return err; 6239 6240 pci_using_dac = 0; 6241 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 6242 if (!err) { 6243 pci_using_dac = 1; 6244 } else { 6245 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 6246 if (err) { 6247 dev_err(&pdev->dev, 6248 "No usable DMA configuration, aborting\n"); 6249 goto err_dma; 6250 } 6251 } 6252 6253 err = pci_request_mem_regions(pdev, igc_driver_name); 6254 if (err) 6255 goto err_pci_reg; 6256 6257 pci_enable_pcie_error_reporting(pdev); 6258 6259 err = pci_enable_ptm(pdev, NULL); 6260 if (err < 0) 6261 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); 6262 6263 pci_set_master(pdev); 6264 6265 err = -ENOMEM; 6266 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), 6267 IGC_MAX_TX_QUEUES); 6268 6269 if (!netdev) 6270 goto err_alloc_etherdev; 6271 6272 SET_NETDEV_DEV(netdev, &pdev->dev); 6273 6274 pci_set_drvdata(pdev, netdev); 6275 adapter = netdev_priv(netdev); 6276 adapter->netdev = netdev; 6277 adapter->pdev = pdev; 6278 hw = &adapter->hw; 6279 hw->back = adapter; 6280 adapter->port_num = hw->bus.func; 6281 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 6282 6283 err = pci_save_state(pdev); 6284 if (err) 6285 goto err_ioremap; 6286 6287 err = -EIO; 6288 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), 6289 pci_resource_len(pdev, 0)); 6290 if (!adapter->io_addr) 6291 goto err_ioremap; 6292 6293 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ 6294 hw->hw_addr = adapter->io_addr; 6295 6296 netdev->netdev_ops = &igc_netdev_ops; 6297 igc_ethtool_set_ops(netdev); 6298 netdev->watchdog_timeo = 5 * HZ; 6299 6300 netdev->mem_start = pci_resource_start(pdev, 0); 6301 netdev->mem_end = pci_resource_end(pdev, 0); 6302 6303 /* PCI config space info */ 6304 hw->vendor_id = pdev->vendor; 6305 hw->device_id = pdev->device; 6306 hw->revision_id = pdev->revision; 6307 hw->subsystem_vendor_id = pdev->subsystem_vendor; 6308 hw->subsystem_device_id = pdev->subsystem_device; 6309 6310 /* Copy the default MAC and PHY function pointers */ 6311 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 6312 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 6313 6314 /* Initialize skew-specific constants */ 6315 err = ei->get_invariants(hw); 6316 if (err) 6317 goto err_sw_init; 6318 6319 /* Add supported features to the features list*/ 6320 netdev->features |= NETIF_F_SG; 6321 netdev->features |= NETIF_F_TSO; 6322 netdev->features |= NETIF_F_TSO6; 6323 netdev->features |= NETIF_F_TSO_ECN; 6324 netdev->features |= NETIF_F_RXCSUM; 6325 netdev->features |= NETIF_F_HW_CSUM; 6326 netdev->features |= NETIF_F_SCTP_CRC; 6327 netdev->features |= NETIF_F_HW_TC; 6328 6329 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 6330 NETIF_F_GSO_GRE_CSUM | \ 6331 NETIF_F_GSO_IPXIP4 | \ 6332 NETIF_F_GSO_IPXIP6 | \ 6333 NETIF_F_GSO_UDP_TUNNEL | \ 6334 NETIF_F_GSO_UDP_TUNNEL_CSUM) 6335 6336 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; 6337 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; 6338 6339 /* setup the private structure */ 6340 err = igc_sw_init(adapter); 6341 if (err) 6342 goto err_sw_init; 6343 6344 /* copy netdev features into list of user selectable features */ 6345 netdev->hw_features |= NETIF_F_NTUPLE; 6346 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 6347 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 6348 netdev->hw_features |= netdev->features; 6349 6350 if (pci_using_dac) 6351 netdev->features |= NETIF_F_HIGHDMA; 6352 6353 netdev->vlan_features |= netdev->features; 6354 6355 /* MTU range: 68 - 9216 */ 6356 netdev->min_mtu = ETH_MIN_MTU; 6357 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; 6358 6359 /* before reading the NVM, reset the controller to put the device in a 6360 * known good starting state 6361 */ 6362 hw->mac.ops.reset_hw(hw); 6363 6364 if (igc_get_flash_presence_i225(hw)) { 6365 if (hw->nvm.ops.validate(hw) < 0) { 6366 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 6367 err = -EIO; 6368 goto err_eeprom; 6369 } 6370 } 6371 6372 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { 6373 /* copy the MAC address out of the NVM */ 6374 if (hw->mac.ops.read_mac_addr(hw)) 6375 dev_err(&pdev->dev, "NVM Read Error\n"); 6376 } 6377 6378 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 6379 6380 if (!is_valid_ether_addr(netdev->dev_addr)) { 6381 dev_err(&pdev->dev, "Invalid MAC Address\n"); 6382 err = -EIO; 6383 goto err_eeprom; 6384 } 6385 6386 /* configure RXPBSIZE and TXPBSIZE */ 6387 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); 6388 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); 6389 6390 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); 6391 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); 6392 6393 INIT_WORK(&adapter->reset_task, igc_reset_task); 6394 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); 6395 6396 /* Initialize link properties that are user-changeable */ 6397 adapter->fc_autoneg = true; 6398 hw->mac.autoneg = true; 6399 hw->phy.autoneg_advertised = 0xaf; 6400 6401 hw->fc.requested_mode = igc_fc_default; 6402 hw->fc.current_mode = igc_fc_default; 6403 6404 /* By default, support wake on port A */ 6405 adapter->flags |= IGC_FLAG_WOL_SUPPORTED; 6406 6407 /* initialize the wol settings based on the eeprom settings */ 6408 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) 6409 adapter->wol |= IGC_WUFC_MAG; 6410 6411 device_set_wakeup_enable(&adapter->pdev->dev, 6412 adapter->flags & IGC_FLAG_WOL_SUPPORTED); 6413 6414 igc_ptp_init(adapter); 6415 6416 igc_tsn_clear_schedule(adapter); 6417 6418 /* reset the hardware with the new settings */ 6419 igc_reset(adapter); 6420 6421 /* let the f/w know that the h/w is now under the control of the 6422 * driver. 6423 */ 6424 igc_get_hw_control(adapter); 6425 6426 strncpy(netdev->name, "eth%d", IFNAMSIZ); 6427 err = register_netdev(netdev); 6428 if (err) 6429 goto err_register; 6430 6431 /* carrier off reporting is important to ethtool even BEFORE open */ 6432 netif_carrier_off(netdev); 6433 6434 /* Check if Media Autosense is enabled */ 6435 adapter->ei = *ei; 6436 6437 /* print pcie link status and MAC address */ 6438 pcie_print_link_status(pdev); 6439 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); 6440 6441 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 6442 /* Disable EEE for internal PHY devices */ 6443 hw->dev_spec._base.eee_enable = false; 6444 adapter->flags &= ~IGC_FLAG_EEE; 6445 igc_set_eee_i225(hw, false, false, false); 6446 6447 pm_runtime_put_noidle(&pdev->dev); 6448 6449 return 0; 6450 6451 err_register: 6452 igc_release_hw_control(adapter); 6453 err_eeprom: 6454 if (!igc_check_reset_block(hw)) 6455 igc_reset_phy(hw); 6456 err_sw_init: 6457 igc_clear_interrupt_scheme(adapter); 6458 iounmap(adapter->io_addr); 6459 err_ioremap: 6460 free_netdev(netdev); 6461 err_alloc_etherdev: 6462 pci_disable_pcie_error_reporting(pdev); 6463 pci_release_mem_regions(pdev); 6464 err_pci_reg: 6465 err_dma: 6466 pci_disable_device(pdev); 6467 return err; 6468 } 6469 6470 /** 6471 * igc_remove - Device Removal Routine 6472 * @pdev: PCI device information struct 6473 * 6474 * igc_remove is called by the PCI subsystem to alert the driver 6475 * that it should release a PCI device. This could be caused by a 6476 * Hot-Plug event, or because the driver is going to be removed from 6477 * memory. 6478 */ 6479 static void igc_remove(struct pci_dev *pdev) 6480 { 6481 struct net_device *netdev = pci_get_drvdata(pdev); 6482 struct igc_adapter *adapter = netdev_priv(netdev); 6483 6484 pm_runtime_get_noresume(&pdev->dev); 6485 6486 igc_flush_nfc_rules(adapter); 6487 6488 igc_ptp_stop(adapter); 6489 6490 set_bit(__IGC_DOWN, &adapter->state); 6491 6492 del_timer_sync(&adapter->watchdog_timer); 6493 del_timer_sync(&adapter->phy_info_timer); 6494 6495 cancel_work_sync(&adapter->reset_task); 6496 cancel_work_sync(&adapter->watchdog_task); 6497 6498 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6499 * would have already happened in close and is redundant. 6500 */ 6501 igc_release_hw_control(adapter); 6502 unregister_netdev(netdev); 6503 6504 igc_clear_interrupt_scheme(adapter); 6505 pci_iounmap(pdev, adapter->io_addr); 6506 pci_release_mem_regions(pdev); 6507 6508 free_netdev(netdev); 6509 6510 pci_disable_pcie_error_reporting(pdev); 6511 6512 pci_disable_device(pdev); 6513 } 6514 6515 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, 6516 bool runtime) 6517 { 6518 struct net_device *netdev = pci_get_drvdata(pdev); 6519 struct igc_adapter *adapter = netdev_priv(netdev); 6520 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; 6521 struct igc_hw *hw = &adapter->hw; 6522 u32 ctrl, rctl, status; 6523 bool wake; 6524 6525 rtnl_lock(); 6526 netif_device_detach(netdev); 6527 6528 if (netif_running(netdev)) 6529 __igc_close(netdev, true); 6530 6531 igc_ptp_suspend(adapter); 6532 6533 igc_clear_interrupt_scheme(adapter); 6534 rtnl_unlock(); 6535 6536 status = rd32(IGC_STATUS); 6537 if (status & IGC_STATUS_LU) 6538 wufc &= ~IGC_WUFC_LNKC; 6539 6540 if (wufc) { 6541 igc_setup_rctl(adapter); 6542 igc_set_rx_mode(netdev); 6543 6544 /* turn on all-multi mode if wake on multicast is enabled */ 6545 if (wufc & IGC_WUFC_MC) { 6546 rctl = rd32(IGC_RCTL); 6547 rctl |= IGC_RCTL_MPE; 6548 wr32(IGC_RCTL, rctl); 6549 } 6550 6551 ctrl = rd32(IGC_CTRL); 6552 ctrl |= IGC_CTRL_ADVD3WUC; 6553 wr32(IGC_CTRL, ctrl); 6554 6555 /* Allow time for pending master requests to run */ 6556 igc_disable_pcie_master(hw); 6557 6558 wr32(IGC_WUC, IGC_WUC_PME_EN); 6559 wr32(IGC_WUFC, wufc); 6560 } else { 6561 wr32(IGC_WUC, 0); 6562 wr32(IGC_WUFC, 0); 6563 } 6564 6565 wake = wufc || adapter->en_mng_pt; 6566 if (!wake) 6567 igc_power_down_phy_copper_base(&adapter->hw); 6568 else 6569 igc_power_up_link(adapter); 6570 6571 if (enable_wake) 6572 *enable_wake = wake; 6573 6574 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6575 * would have already happened in close and is redundant. 6576 */ 6577 igc_release_hw_control(adapter); 6578 6579 pci_disable_device(pdev); 6580 6581 return 0; 6582 } 6583 6584 #ifdef CONFIG_PM 6585 static int __maybe_unused igc_runtime_suspend(struct device *dev) 6586 { 6587 return __igc_shutdown(to_pci_dev(dev), NULL, 1); 6588 } 6589 6590 static void igc_deliver_wake_packet(struct net_device *netdev) 6591 { 6592 struct igc_adapter *adapter = netdev_priv(netdev); 6593 struct igc_hw *hw = &adapter->hw; 6594 struct sk_buff *skb; 6595 u32 wupl; 6596 6597 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; 6598 6599 /* WUPM stores only the first 128 bytes of the wake packet. 6600 * Read the packet only if we have the whole thing. 6601 */ 6602 if (wupl == 0 || wupl > IGC_WUPM_BYTES) 6603 return; 6604 6605 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); 6606 if (!skb) 6607 return; 6608 6609 skb_put(skb, wupl); 6610 6611 /* Ensure reads are 32-bit aligned */ 6612 wupl = roundup(wupl, 4); 6613 6614 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); 6615 6616 skb->protocol = eth_type_trans(skb, netdev); 6617 netif_rx(skb); 6618 } 6619 6620 static int __maybe_unused igc_resume(struct device *dev) 6621 { 6622 struct pci_dev *pdev = to_pci_dev(dev); 6623 struct net_device *netdev = pci_get_drvdata(pdev); 6624 struct igc_adapter *adapter = netdev_priv(netdev); 6625 struct igc_hw *hw = &adapter->hw; 6626 u32 err, val; 6627 6628 pci_set_power_state(pdev, PCI_D0); 6629 pci_restore_state(pdev); 6630 pci_save_state(pdev); 6631 6632 if (!pci_device_is_present(pdev)) 6633 return -ENODEV; 6634 err = pci_enable_device_mem(pdev); 6635 if (err) { 6636 netdev_err(netdev, "Cannot enable PCI device from suspend\n"); 6637 return err; 6638 } 6639 pci_set_master(pdev); 6640 6641 pci_enable_wake(pdev, PCI_D3hot, 0); 6642 pci_enable_wake(pdev, PCI_D3cold, 0); 6643 6644 if (igc_init_interrupt_scheme(adapter, true)) { 6645 netdev_err(netdev, "Unable to allocate memory for queues\n"); 6646 return -ENOMEM; 6647 } 6648 6649 igc_reset(adapter); 6650 6651 /* let the f/w know that the h/w is now under the control of the 6652 * driver. 6653 */ 6654 igc_get_hw_control(adapter); 6655 6656 val = rd32(IGC_WUS); 6657 if (val & WAKE_PKT_WUS) 6658 igc_deliver_wake_packet(netdev); 6659 6660 wr32(IGC_WUS, ~0); 6661 6662 rtnl_lock(); 6663 if (!err && netif_running(netdev)) 6664 err = __igc_open(netdev, true); 6665 6666 if (!err) 6667 netif_device_attach(netdev); 6668 rtnl_unlock(); 6669 6670 return err; 6671 } 6672 6673 static int __maybe_unused igc_runtime_resume(struct device *dev) 6674 { 6675 return igc_resume(dev); 6676 } 6677 6678 static int __maybe_unused igc_suspend(struct device *dev) 6679 { 6680 return __igc_shutdown(to_pci_dev(dev), NULL, 0); 6681 } 6682 6683 static int __maybe_unused igc_runtime_idle(struct device *dev) 6684 { 6685 struct net_device *netdev = dev_get_drvdata(dev); 6686 struct igc_adapter *adapter = netdev_priv(netdev); 6687 6688 if (!igc_has_link(adapter)) 6689 pm_schedule_suspend(dev, MSEC_PER_SEC * 5); 6690 6691 return -EBUSY; 6692 } 6693 #endif /* CONFIG_PM */ 6694 6695 static void igc_shutdown(struct pci_dev *pdev) 6696 { 6697 bool wake; 6698 6699 __igc_shutdown(pdev, &wake, 0); 6700 6701 if (system_state == SYSTEM_POWER_OFF) { 6702 pci_wake_from_d3(pdev, wake); 6703 pci_set_power_state(pdev, PCI_D3hot); 6704 } 6705 } 6706 6707 /** 6708 * igc_io_error_detected - called when PCI error is detected 6709 * @pdev: Pointer to PCI device 6710 * @state: The current PCI connection state 6711 * 6712 * This function is called after a PCI bus error affecting 6713 * this device has been detected. 6714 **/ 6715 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, 6716 pci_channel_state_t state) 6717 { 6718 struct net_device *netdev = pci_get_drvdata(pdev); 6719 struct igc_adapter *adapter = netdev_priv(netdev); 6720 6721 netif_device_detach(netdev); 6722 6723 if (state == pci_channel_io_perm_failure) 6724 return PCI_ERS_RESULT_DISCONNECT; 6725 6726 if (netif_running(netdev)) 6727 igc_down(adapter); 6728 pci_disable_device(pdev); 6729 6730 /* Request a slot reset. */ 6731 return PCI_ERS_RESULT_NEED_RESET; 6732 } 6733 6734 /** 6735 * igc_io_slot_reset - called after the PCI bus has been reset. 6736 * @pdev: Pointer to PCI device 6737 * 6738 * Restart the card from scratch, as if from a cold-boot. Implementation 6739 * resembles the first-half of the igc_resume routine. 6740 **/ 6741 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) 6742 { 6743 struct net_device *netdev = pci_get_drvdata(pdev); 6744 struct igc_adapter *adapter = netdev_priv(netdev); 6745 struct igc_hw *hw = &adapter->hw; 6746 pci_ers_result_t result; 6747 6748 if (pci_enable_device_mem(pdev)) { 6749 netdev_err(netdev, "Could not re-enable PCI device after reset\n"); 6750 result = PCI_ERS_RESULT_DISCONNECT; 6751 } else { 6752 pci_set_master(pdev); 6753 pci_restore_state(pdev); 6754 pci_save_state(pdev); 6755 6756 pci_enable_wake(pdev, PCI_D3hot, 0); 6757 pci_enable_wake(pdev, PCI_D3cold, 0); 6758 6759 /* In case of PCI error, adapter loses its HW address 6760 * so we should re-assign it here. 6761 */ 6762 hw->hw_addr = adapter->io_addr; 6763 6764 igc_reset(adapter); 6765 wr32(IGC_WUS, ~0); 6766 result = PCI_ERS_RESULT_RECOVERED; 6767 } 6768 6769 return result; 6770 } 6771 6772 /** 6773 * igc_io_resume - called when traffic can start to flow again. 6774 * @pdev: Pointer to PCI device 6775 * 6776 * This callback is called when the error recovery driver tells us that 6777 * its OK to resume normal operation. Implementation resembles the 6778 * second-half of the igc_resume routine. 6779 */ 6780 static void igc_io_resume(struct pci_dev *pdev) 6781 { 6782 struct net_device *netdev = pci_get_drvdata(pdev); 6783 struct igc_adapter *adapter = netdev_priv(netdev); 6784 6785 rtnl_lock(); 6786 if (netif_running(netdev)) { 6787 if (igc_open(netdev)) { 6788 netdev_err(netdev, "igc_open failed after reset\n"); 6789 return; 6790 } 6791 } 6792 6793 netif_device_attach(netdev); 6794 6795 /* let the f/w know that the h/w is now under the control of the 6796 * driver. 6797 */ 6798 igc_get_hw_control(adapter); 6799 rtnl_unlock(); 6800 } 6801 6802 static const struct pci_error_handlers igc_err_handler = { 6803 .error_detected = igc_io_error_detected, 6804 .slot_reset = igc_io_slot_reset, 6805 .resume = igc_io_resume, 6806 }; 6807 6808 #ifdef CONFIG_PM 6809 static const struct dev_pm_ops igc_pm_ops = { 6810 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) 6811 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, 6812 igc_runtime_idle) 6813 }; 6814 #endif 6815 6816 static struct pci_driver igc_driver = { 6817 .name = igc_driver_name, 6818 .id_table = igc_pci_tbl, 6819 .probe = igc_probe, 6820 .remove = igc_remove, 6821 #ifdef CONFIG_PM 6822 .driver.pm = &igc_pm_ops, 6823 #endif 6824 .shutdown = igc_shutdown, 6825 .err_handler = &igc_err_handler, 6826 }; 6827 6828 /** 6829 * igc_reinit_queues - return error 6830 * @adapter: pointer to adapter structure 6831 */ 6832 int igc_reinit_queues(struct igc_adapter *adapter) 6833 { 6834 struct net_device *netdev = adapter->netdev; 6835 int err = 0; 6836 6837 if (netif_running(netdev)) 6838 igc_close(netdev); 6839 6840 igc_reset_interrupt_capability(adapter); 6841 6842 if (igc_init_interrupt_scheme(adapter, true)) { 6843 netdev_err(netdev, "Unable to allocate memory for queues\n"); 6844 return -ENOMEM; 6845 } 6846 6847 if (netif_running(netdev)) 6848 err = igc_open(netdev); 6849 6850 return err; 6851 } 6852 6853 /** 6854 * igc_get_hw_dev - return device 6855 * @hw: pointer to hardware structure 6856 * 6857 * used by hardware layer to print debugging information 6858 */ 6859 struct net_device *igc_get_hw_dev(struct igc_hw *hw) 6860 { 6861 struct igc_adapter *adapter = hw->back; 6862 6863 return adapter->netdev; 6864 } 6865 6866 static void igc_disable_rx_ring_hw(struct igc_ring *ring) 6867 { 6868 struct igc_hw *hw = &ring->q_vector->adapter->hw; 6869 u8 idx = ring->reg_idx; 6870 u32 rxdctl; 6871 6872 rxdctl = rd32(IGC_RXDCTL(idx)); 6873 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; 6874 rxdctl |= IGC_RXDCTL_SWFLUSH; 6875 wr32(IGC_RXDCTL(idx), rxdctl); 6876 } 6877 6878 void igc_disable_rx_ring(struct igc_ring *ring) 6879 { 6880 igc_disable_rx_ring_hw(ring); 6881 igc_clean_rx_ring(ring); 6882 } 6883 6884 void igc_enable_rx_ring(struct igc_ring *ring) 6885 { 6886 struct igc_adapter *adapter = ring->q_vector->adapter; 6887 6888 igc_configure_rx_ring(adapter, ring); 6889 6890 if (ring->xsk_pool) 6891 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 6892 else 6893 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 6894 } 6895 6896 static void igc_disable_tx_ring_hw(struct igc_ring *ring) 6897 { 6898 struct igc_hw *hw = &ring->q_vector->adapter->hw; 6899 u8 idx = ring->reg_idx; 6900 u32 txdctl; 6901 6902 txdctl = rd32(IGC_TXDCTL(idx)); 6903 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; 6904 txdctl |= IGC_TXDCTL_SWFLUSH; 6905 wr32(IGC_TXDCTL(idx), txdctl); 6906 } 6907 6908 void igc_disable_tx_ring(struct igc_ring *ring) 6909 { 6910 igc_disable_tx_ring_hw(ring); 6911 igc_clean_tx_ring(ring); 6912 } 6913 6914 void igc_enable_tx_ring(struct igc_ring *ring) 6915 { 6916 struct igc_adapter *adapter = ring->q_vector->adapter; 6917 6918 igc_configure_tx_ring(adapter, ring); 6919 } 6920 6921 /** 6922 * igc_init_module - Driver Registration Routine 6923 * 6924 * igc_init_module is the first routine called when the driver is 6925 * loaded. All it does is register with the PCI subsystem. 6926 */ 6927 static int __init igc_init_module(void) 6928 { 6929 int ret; 6930 6931 pr_info("%s\n", igc_driver_string); 6932 pr_info("%s\n", igc_copyright); 6933 6934 ret = pci_register_driver(&igc_driver); 6935 return ret; 6936 } 6937 6938 module_init(igc_init_module); 6939 6940 /** 6941 * igc_exit_module - Driver Exit Cleanup Routine 6942 * 6943 * igc_exit_module is called just before the driver is removed 6944 * from memory. 6945 */ 6946 static void __exit igc_exit_module(void) 6947 { 6948 pci_unregister_driver(&igc_driver); 6949 } 6950 6951 module_exit(igc_exit_module); 6952 /* igc_main.c */ 6953