1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Intel Corporation */ 3 4 #include <linux/module.h> 5 #include <linux/types.h> 6 #include <linux/if_vlan.h> 7 #include <linux/tcp.h> 8 #include <linux/udp.h> 9 #include <linux/ip.h> 10 #include <linux/pm_runtime.h> 11 #include <net/pkt_sched.h> 12 #include <linux/bpf_trace.h> 13 #include <net/xdp_sock_drv.h> 14 #include <linux/pci.h> 15 16 #include <net/ipv6.h> 17 18 #include "igc.h" 19 #include "igc_hw.h" 20 #include "igc_tsn.h" 21 #include "igc_xdp.h" 22 23 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" 24 25 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 26 27 #define IGC_XDP_PASS 0 28 #define IGC_XDP_CONSUMED BIT(0) 29 #define IGC_XDP_TX BIT(1) 30 #define IGC_XDP_REDIRECT BIT(2) 31 32 static int debug = -1; 33 34 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 35 MODULE_DESCRIPTION(DRV_SUMMARY); 36 MODULE_LICENSE("GPL v2"); 37 module_param(debug, int, 0); 38 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 39 40 char igc_driver_name[] = "igc"; 41 static const char igc_driver_string[] = DRV_SUMMARY; 42 static const char igc_copyright[] = 43 "Copyright(c) 2018 Intel Corporation."; 44 45 static const struct igc_info *igc_info_tbl[] = { 46 [board_base] = &igc_base_info, 47 }; 48 49 static const struct pci_device_id igc_pci_tbl[] = { 50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, 51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, 52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, 53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, 54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, 55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, 56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, 57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, 58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base }, 59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, 60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, 61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, 62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, 63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, 64 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, 65 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, 66 /* required last entry */ 67 {0, } 68 }; 69 70 MODULE_DEVICE_TABLE(pci, igc_pci_tbl); 71 72 enum latency_range { 73 lowest_latency = 0, 74 low_latency = 1, 75 bulk_latency = 2, 76 latency_invalid = 255 77 }; 78 79 void igc_reset(struct igc_adapter *adapter) 80 { 81 struct net_device *dev = adapter->netdev; 82 struct igc_hw *hw = &adapter->hw; 83 struct igc_fc_info *fc = &hw->fc; 84 u32 pba, hwm; 85 86 /* Repartition PBA for greater than 9k MTU if required */ 87 pba = IGC_PBA_34K; 88 89 /* flow control settings 90 * The high water mark must be low enough to fit one full frame 91 * after transmitting the pause frame. As such we must have enough 92 * space to allow for us to complete our current transmit and then 93 * receive the frame that is in progress from the link partner. 94 * Set it to: 95 * - the full Rx FIFO size minus one full Tx plus one full Rx frame 96 */ 97 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); 98 99 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ 100 fc->low_water = fc->high_water - 16; 101 fc->pause_time = 0xFFFF; 102 fc->send_xon = 1; 103 fc->current_mode = fc->requested_mode; 104 105 hw->mac.ops.reset_hw(hw); 106 107 if (hw->mac.ops.init_hw(hw)) 108 netdev_err(dev, "Error on hardware initialization\n"); 109 110 /* Re-establish EEE setting */ 111 igc_set_eee_i225(hw, true, true, true); 112 113 if (!netif_running(adapter->netdev)) 114 igc_power_down_phy_copper_base(&adapter->hw); 115 116 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ 117 wr32(IGC_VET, ETH_P_8021Q); 118 119 /* Re-enable PTP, where applicable. */ 120 igc_ptp_reset(adapter); 121 122 /* Re-enable TSN offloading, where applicable. */ 123 igc_tsn_reset(adapter); 124 125 igc_get_phy_info(hw); 126 } 127 128 /** 129 * igc_power_up_link - Power up the phy link 130 * @adapter: address of board private structure 131 */ 132 static void igc_power_up_link(struct igc_adapter *adapter) 133 { 134 igc_reset_phy(&adapter->hw); 135 136 igc_power_up_phy_copper(&adapter->hw); 137 138 igc_setup_link(&adapter->hw); 139 } 140 141 /** 142 * igc_release_hw_control - release control of the h/w to f/w 143 * @adapter: address of board private structure 144 * 145 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. 146 * For ASF and Pass Through versions of f/w this means that the 147 * driver is no longer loaded. 148 */ 149 static void igc_release_hw_control(struct igc_adapter *adapter) 150 { 151 struct igc_hw *hw = &adapter->hw; 152 u32 ctrl_ext; 153 154 if (!pci_device_is_present(adapter->pdev)) 155 return; 156 157 /* Let firmware take over control of h/w */ 158 ctrl_ext = rd32(IGC_CTRL_EXT); 159 wr32(IGC_CTRL_EXT, 160 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 161 } 162 163 /** 164 * igc_get_hw_control - get control of the h/w from f/w 165 * @adapter: address of board private structure 166 * 167 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. 168 * For ASF and Pass Through versions of f/w this means that 169 * the driver is loaded. 170 */ 171 static void igc_get_hw_control(struct igc_adapter *adapter) 172 { 173 struct igc_hw *hw = &adapter->hw; 174 u32 ctrl_ext; 175 176 /* Let firmware know the driver has taken over */ 177 ctrl_ext = rd32(IGC_CTRL_EXT); 178 wr32(IGC_CTRL_EXT, 179 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 180 } 181 182 static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) 183 { 184 dma_unmap_single(dev, dma_unmap_addr(buf, dma), 185 dma_unmap_len(buf, len), DMA_TO_DEVICE); 186 187 dma_unmap_len_set(buf, len, 0); 188 } 189 190 /** 191 * igc_clean_tx_ring - Free Tx Buffers 192 * @tx_ring: ring to be cleaned 193 */ 194 static void igc_clean_tx_ring(struct igc_ring *tx_ring) 195 { 196 u16 i = tx_ring->next_to_clean; 197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 198 u32 xsk_frames = 0; 199 200 while (i != tx_ring->next_to_use) { 201 union igc_adv_tx_desc *eop_desc, *tx_desc; 202 203 switch (tx_buffer->type) { 204 case IGC_TX_BUFFER_TYPE_XSK: 205 xsk_frames++; 206 break; 207 case IGC_TX_BUFFER_TYPE_XDP: 208 xdp_return_frame(tx_buffer->xdpf); 209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 210 break; 211 case IGC_TX_BUFFER_TYPE_SKB: 212 dev_kfree_skb_any(tx_buffer->skb); 213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 214 break; 215 default: 216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 217 break; 218 } 219 220 /* check for eop_desc to determine the end of the packet */ 221 eop_desc = tx_buffer->next_to_watch; 222 tx_desc = IGC_TX_DESC(tx_ring, i); 223 224 /* unmap remaining buffers */ 225 while (tx_desc != eop_desc) { 226 tx_buffer++; 227 tx_desc++; 228 i++; 229 if (unlikely(i == tx_ring->count)) { 230 i = 0; 231 tx_buffer = tx_ring->tx_buffer_info; 232 tx_desc = IGC_TX_DESC(tx_ring, 0); 233 } 234 235 /* unmap any remaining paged data */ 236 if (dma_unmap_len(tx_buffer, len)) 237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 238 } 239 240 tx_buffer->next_to_watch = NULL; 241 242 /* move us one more past the eop_desc for start of next pkt */ 243 tx_buffer++; 244 i++; 245 if (unlikely(i == tx_ring->count)) { 246 i = 0; 247 tx_buffer = tx_ring->tx_buffer_info; 248 } 249 } 250 251 if (tx_ring->xsk_pool && xsk_frames) 252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 253 254 /* reset BQL for queue */ 255 netdev_tx_reset_queue(txring_txq(tx_ring)); 256 257 /* Zero out the buffer ring */ 258 memset(tx_ring->tx_buffer_info, 0, 259 sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); 260 261 /* Zero out the descriptor ring */ 262 memset(tx_ring->desc, 0, tx_ring->size); 263 264 /* reset next_to_use and next_to_clean */ 265 tx_ring->next_to_use = 0; 266 tx_ring->next_to_clean = 0; 267 } 268 269 /** 270 * igc_free_tx_resources - Free Tx Resources per Queue 271 * @tx_ring: Tx descriptor ring for a specific queue 272 * 273 * Free all transmit software resources 274 */ 275 void igc_free_tx_resources(struct igc_ring *tx_ring) 276 { 277 igc_disable_tx_ring(tx_ring); 278 279 vfree(tx_ring->tx_buffer_info); 280 tx_ring->tx_buffer_info = NULL; 281 282 /* if not set, then don't free */ 283 if (!tx_ring->desc) 284 return; 285 286 dma_free_coherent(tx_ring->dev, tx_ring->size, 287 tx_ring->desc, tx_ring->dma); 288 289 tx_ring->desc = NULL; 290 } 291 292 /** 293 * igc_free_all_tx_resources - Free Tx Resources for All Queues 294 * @adapter: board private structure 295 * 296 * Free all transmit software resources 297 */ 298 static void igc_free_all_tx_resources(struct igc_adapter *adapter) 299 { 300 int i; 301 302 for (i = 0; i < adapter->num_tx_queues; i++) 303 igc_free_tx_resources(adapter->tx_ring[i]); 304 } 305 306 /** 307 * igc_clean_all_tx_rings - Free Tx Buffers for all queues 308 * @adapter: board private structure 309 */ 310 static void igc_clean_all_tx_rings(struct igc_adapter *adapter) 311 { 312 int i; 313 314 for (i = 0; i < adapter->num_tx_queues; i++) 315 if (adapter->tx_ring[i]) 316 igc_clean_tx_ring(adapter->tx_ring[i]); 317 } 318 319 /** 320 * igc_setup_tx_resources - allocate Tx resources (Descriptors) 321 * @tx_ring: tx descriptor ring (for a specific queue) to setup 322 * 323 * Return 0 on success, negative on failure 324 */ 325 int igc_setup_tx_resources(struct igc_ring *tx_ring) 326 { 327 struct net_device *ndev = tx_ring->netdev; 328 struct device *dev = tx_ring->dev; 329 int size = 0; 330 331 size = sizeof(struct igc_tx_buffer) * tx_ring->count; 332 tx_ring->tx_buffer_info = vzalloc(size); 333 if (!tx_ring->tx_buffer_info) 334 goto err; 335 336 /* round up to nearest 4K */ 337 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); 338 tx_ring->size = ALIGN(tx_ring->size, 4096); 339 340 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 341 &tx_ring->dma, GFP_KERNEL); 342 343 if (!tx_ring->desc) 344 goto err; 345 346 tx_ring->next_to_use = 0; 347 tx_ring->next_to_clean = 0; 348 349 return 0; 350 351 err: 352 vfree(tx_ring->tx_buffer_info); 353 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); 354 return -ENOMEM; 355 } 356 357 /** 358 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues 359 * @adapter: board private structure 360 * 361 * Return 0 on success, negative on failure 362 */ 363 static int igc_setup_all_tx_resources(struct igc_adapter *adapter) 364 { 365 struct net_device *dev = adapter->netdev; 366 int i, err = 0; 367 368 for (i = 0; i < adapter->num_tx_queues; i++) { 369 err = igc_setup_tx_resources(adapter->tx_ring[i]); 370 if (err) { 371 netdev_err(dev, "Error on Tx queue %u setup\n", i); 372 for (i--; i >= 0; i--) 373 igc_free_tx_resources(adapter->tx_ring[i]); 374 break; 375 } 376 } 377 378 return err; 379 } 380 381 static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) 382 { 383 u16 i = rx_ring->next_to_clean; 384 385 dev_kfree_skb(rx_ring->skb); 386 rx_ring->skb = NULL; 387 388 /* Free all the Rx ring sk_buffs */ 389 while (i != rx_ring->next_to_alloc) { 390 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 391 392 /* Invalidate cache lines that may have been written to by 393 * device so that we avoid corrupting memory. 394 */ 395 dma_sync_single_range_for_cpu(rx_ring->dev, 396 buffer_info->dma, 397 buffer_info->page_offset, 398 igc_rx_bufsz(rx_ring), 399 DMA_FROM_DEVICE); 400 401 /* free resources associated with mapping */ 402 dma_unmap_page_attrs(rx_ring->dev, 403 buffer_info->dma, 404 igc_rx_pg_size(rx_ring), 405 DMA_FROM_DEVICE, 406 IGC_RX_DMA_ATTR); 407 __page_frag_cache_drain(buffer_info->page, 408 buffer_info->pagecnt_bias); 409 410 i++; 411 if (i == rx_ring->count) 412 i = 0; 413 } 414 } 415 416 static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) 417 { 418 struct igc_rx_buffer *bi; 419 u16 i; 420 421 for (i = 0; i < ring->count; i++) { 422 bi = &ring->rx_buffer_info[i]; 423 if (!bi->xdp) 424 continue; 425 426 xsk_buff_free(bi->xdp); 427 bi->xdp = NULL; 428 } 429 } 430 431 /** 432 * igc_clean_rx_ring - Free Rx Buffers per Queue 433 * @ring: ring to free buffers from 434 */ 435 static void igc_clean_rx_ring(struct igc_ring *ring) 436 { 437 if (ring->xsk_pool) 438 igc_clean_rx_ring_xsk_pool(ring); 439 else 440 igc_clean_rx_ring_page_shared(ring); 441 442 clear_ring_uses_large_buffer(ring); 443 444 ring->next_to_alloc = 0; 445 ring->next_to_clean = 0; 446 ring->next_to_use = 0; 447 } 448 449 /** 450 * igc_clean_all_rx_rings - Free Rx Buffers for all queues 451 * @adapter: board private structure 452 */ 453 static void igc_clean_all_rx_rings(struct igc_adapter *adapter) 454 { 455 int i; 456 457 for (i = 0; i < adapter->num_rx_queues; i++) 458 if (adapter->rx_ring[i]) 459 igc_clean_rx_ring(adapter->rx_ring[i]); 460 } 461 462 /** 463 * igc_free_rx_resources - Free Rx Resources 464 * @rx_ring: ring to clean the resources from 465 * 466 * Free all receive software resources 467 */ 468 void igc_free_rx_resources(struct igc_ring *rx_ring) 469 { 470 igc_clean_rx_ring(rx_ring); 471 472 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 473 474 vfree(rx_ring->rx_buffer_info); 475 rx_ring->rx_buffer_info = NULL; 476 477 /* if not set, then don't free */ 478 if (!rx_ring->desc) 479 return; 480 481 dma_free_coherent(rx_ring->dev, rx_ring->size, 482 rx_ring->desc, rx_ring->dma); 483 484 rx_ring->desc = NULL; 485 } 486 487 /** 488 * igc_free_all_rx_resources - Free Rx Resources for All Queues 489 * @adapter: board private structure 490 * 491 * Free all receive software resources 492 */ 493 static void igc_free_all_rx_resources(struct igc_adapter *adapter) 494 { 495 int i; 496 497 for (i = 0; i < adapter->num_rx_queues; i++) 498 igc_free_rx_resources(adapter->rx_ring[i]); 499 } 500 501 /** 502 * igc_setup_rx_resources - allocate Rx resources (Descriptors) 503 * @rx_ring: rx descriptor ring (for a specific queue) to setup 504 * 505 * Returns 0 on success, negative on failure 506 */ 507 int igc_setup_rx_resources(struct igc_ring *rx_ring) 508 { 509 struct net_device *ndev = rx_ring->netdev; 510 struct device *dev = rx_ring->dev; 511 u8 index = rx_ring->queue_index; 512 int size, desc_len, res; 513 514 /* XDP RX-queue info */ 515 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 516 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 517 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, 518 rx_ring->q_vector->napi.napi_id); 519 if (res < 0) { 520 netdev_err(ndev, "Failed to register xdp_rxq index %u\n", 521 index); 522 return res; 523 } 524 525 size = sizeof(struct igc_rx_buffer) * rx_ring->count; 526 rx_ring->rx_buffer_info = vzalloc(size); 527 if (!rx_ring->rx_buffer_info) 528 goto err; 529 530 desc_len = sizeof(union igc_adv_rx_desc); 531 532 /* Round up to nearest 4K */ 533 rx_ring->size = rx_ring->count * desc_len; 534 rx_ring->size = ALIGN(rx_ring->size, 4096); 535 536 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 537 &rx_ring->dma, GFP_KERNEL); 538 539 if (!rx_ring->desc) 540 goto err; 541 542 rx_ring->next_to_alloc = 0; 543 rx_ring->next_to_clean = 0; 544 rx_ring->next_to_use = 0; 545 546 return 0; 547 548 err: 549 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 550 vfree(rx_ring->rx_buffer_info); 551 rx_ring->rx_buffer_info = NULL; 552 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); 553 return -ENOMEM; 554 } 555 556 /** 557 * igc_setup_all_rx_resources - wrapper to allocate Rx resources 558 * (Descriptors) for all queues 559 * @adapter: board private structure 560 * 561 * Return 0 on success, negative on failure 562 */ 563 static int igc_setup_all_rx_resources(struct igc_adapter *adapter) 564 { 565 struct net_device *dev = adapter->netdev; 566 int i, err = 0; 567 568 for (i = 0; i < adapter->num_rx_queues; i++) { 569 err = igc_setup_rx_resources(adapter->rx_ring[i]); 570 if (err) { 571 netdev_err(dev, "Error on Rx queue %u setup\n", i); 572 for (i--; i >= 0; i--) 573 igc_free_rx_resources(adapter->rx_ring[i]); 574 break; 575 } 576 } 577 578 return err; 579 } 580 581 static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, 582 struct igc_ring *ring) 583 { 584 if (!igc_xdp_is_enabled(adapter) || 585 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) 586 return NULL; 587 588 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); 589 } 590 591 /** 592 * igc_configure_rx_ring - Configure a receive ring after Reset 593 * @adapter: board private structure 594 * @ring: receive ring to be configured 595 * 596 * Configure the Rx unit of the MAC after a reset. 597 */ 598 static void igc_configure_rx_ring(struct igc_adapter *adapter, 599 struct igc_ring *ring) 600 { 601 struct igc_hw *hw = &adapter->hw; 602 union igc_adv_rx_desc *rx_desc; 603 int reg_idx = ring->reg_idx; 604 u32 srrctl = 0, rxdctl = 0; 605 u64 rdba = ring->dma; 606 u32 buf_size; 607 608 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 609 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 610 if (ring->xsk_pool) { 611 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 612 MEM_TYPE_XSK_BUFF_POOL, 613 NULL)); 614 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); 615 } else { 616 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 617 MEM_TYPE_PAGE_SHARED, 618 NULL)); 619 } 620 621 if (igc_xdp_is_enabled(adapter)) 622 set_ring_uses_large_buffer(ring); 623 624 /* disable the queue */ 625 wr32(IGC_RXDCTL(reg_idx), 0); 626 627 /* Set DMA base address registers */ 628 wr32(IGC_RDBAL(reg_idx), 629 rdba & 0x00000000ffffffffULL); 630 wr32(IGC_RDBAH(reg_idx), rdba >> 32); 631 wr32(IGC_RDLEN(reg_idx), 632 ring->count * sizeof(union igc_adv_rx_desc)); 633 634 /* initialize head and tail */ 635 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); 636 wr32(IGC_RDH(reg_idx), 0); 637 writel(0, ring->tail); 638 639 /* reset next-to- use/clean to place SW in sync with hardware */ 640 ring->next_to_clean = 0; 641 ring->next_to_use = 0; 642 643 if (ring->xsk_pool) 644 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); 645 else if (ring_uses_large_buffer(ring)) 646 buf_size = IGC_RXBUFFER_3072; 647 else 648 buf_size = IGC_RXBUFFER_2048; 649 650 srrctl = rd32(IGC_SRRCTL(reg_idx)); 651 srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK | 652 IGC_SRRCTL_DESCTYPE_MASK); 653 srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN); 654 srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size); 655 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 656 657 wr32(IGC_SRRCTL(reg_idx), srrctl); 658 659 rxdctl |= IGC_RX_PTHRESH; 660 rxdctl |= IGC_RX_HTHRESH << 8; 661 rxdctl |= IGC_RX_WTHRESH << 16; 662 663 /* initialize rx_buffer_info */ 664 memset(ring->rx_buffer_info, 0, 665 sizeof(struct igc_rx_buffer) * ring->count); 666 667 /* initialize Rx descriptor 0 */ 668 rx_desc = IGC_RX_DESC(ring, 0); 669 rx_desc->wb.upper.length = 0; 670 671 /* enable receive descriptor fetching */ 672 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 673 674 wr32(IGC_RXDCTL(reg_idx), rxdctl); 675 } 676 677 /** 678 * igc_configure_rx - Configure receive Unit after Reset 679 * @adapter: board private structure 680 * 681 * Configure the Rx unit of the MAC after a reset. 682 */ 683 static void igc_configure_rx(struct igc_adapter *adapter) 684 { 685 int i; 686 687 /* Setup the HW Rx Head and Tail Descriptor Pointers and 688 * the Base and Length of the Rx Descriptor Ring 689 */ 690 for (i = 0; i < adapter->num_rx_queues; i++) 691 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); 692 } 693 694 /** 695 * igc_configure_tx_ring - Configure transmit ring after Reset 696 * @adapter: board private structure 697 * @ring: tx ring to configure 698 * 699 * Configure a transmit ring after a reset. 700 */ 701 static void igc_configure_tx_ring(struct igc_adapter *adapter, 702 struct igc_ring *ring) 703 { 704 struct igc_hw *hw = &adapter->hw; 705 int reg_idx = ring->reg_idx; 706 u64 tdba = ring->dma; 707 u32 txdctl = 0; 708 709 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 710 711 /* disable the queue */ 712 wr32(IGC_TXDCTL(reg_idx), 0); 713 wrfl(); 714 715 wr32(IGC_TDLEN(reg_idx), 716 ring->count * sizeof(union igc_adv_tx_desc)); 717 wr32(IGC_TDBAL(reg_idx), 718 tdba & 0x00000000ffffffffULL); 719 wr32(IGC_TDBAH(reg_idx), tdba >> 32); 720 721 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); 722 wr32(IGC_TDH(reg_idx), 0); 723 writel(0, ring->tail); 724 725 txdctl |= IGC_TX_PTHRESH; 726 txdctl |= IGC_TX_HTHRESH << 8; 727 txdctl |= IGC_TX_WTHRESH << 16; 728 729 txdctl |= IGC_TXDCTL_QUEUE_ENABLE; 730 wr32(IGC_TXDCTL(reg_idx), txdctl); 731 } 732 733 /** 734 * igc_configure_tx - Configure transmit Unit after Reset 735 * @adapter: board private structure 736 * 737 * Configure the Tx unit of the MAC after a reset. 738 */ 739 static void igc_configure_tx(struct igc_adapter *adapter) 740 { 741 int i; 742 743 for (i = 0; i < adapter->num_tx_queues; i++) 744 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); 745 } 746 747 /** 748 * igc_setup_mrqc - configure the multiple receive queue control registers 749 * @adapter: Board private structure 750 */ 751 static void igc_setup_mrqc(struct igc_adapter *adapter) 752 { 753 struct igc_hw *hw = &adapter->hw; 754 u32 j, num_rx_queues; 755 u32 mrqc, rxcsum; 756 u32 rss_key[10]; 757 758 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 759 for (j = 0; j < 10; j++) 760 wr32(IGC_RSSRK(j), rss_key[j]); 761 762 num_rx_queues = adapter->rss_queues; 763 764 if (adapter->rss_indir_tbl_init != num_rx_queues) { 765 for (j = 0; j < IGC_RETA_SIZE; j++) 766 adapter->rss_indir_tbl[j] = 767 (j * num_rx_queues) / IGC_RETA_SIZE; 768 adapter->rss_indir_tbl_init = num_rx_queues; 769 } 770 igc_write_rss_indir_tbl(adapter); 771 772 /* Disable raw packet checksumming so that RSS hash is placed in 773 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 774 * offloads as they are enabled by default 775 */ 776 rxcsum = rd32(IGC_RXCSUM); 777 rxcsum |= IGC_RXCSUM_PCSD; 778 779 /* Enable Receive Checksum Offload for SCTP */ 780 rxcsum |= IGC_RXCSUM_CRCOFL; 781 782 /* Don't need to set TUOFL or IPOFL, they default to 1 */ 783 wr32(IGC_RXCSUM, rxcsum); 784 785 /* Generate RSS hash based on packet types, TCP/UDP 786 * port numbers and/or IPv4/v6 src and dst addresses 787 */ 788 mrqc = IGC_MRQC_RSS_FIELD_IPV4 | 789 IGC_MRQC_RSS_FIELD_IPV4_TCP | 790 IGC_MRQC_RSS_FIELD_IPV6 | 791 IGC_MRQC_RSS_FIELD_IPV6_TCP | 792 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; 793 794 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) 795 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; 796 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) 797 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; 798 799 mrqc |= IGC_MRQC_ENABLE_RSS_MQ; 800 801 wr32(IGC_MRQC, mrqc); 802 } 803 804 /** 805 * igc_setup_rctl - configure the receive control registers 806 * @adapter: Board private structure 807 */ 808 static void igc_setup_rctl(struct igc_adapter *adapter) 809 { 810 struct igc_hw *hw = &adapter->hw; 811 u32 rctl; 812 813 rctl = rd32(IGC_RCTL); 814 815 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 816 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); 817 818 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | 819 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 820 821 /* enable stripping of CRC. Newer features require 822 * that the HW strips the CRC. 823 */ 824 rctl |= IGC_RCTL_SECRC; 825 826 /* disable store bad packets and clear size bits. */ 827 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); 828 829 /* enable LPE to allow for reception of jumbo frames */ 830 rctl |= IGC_RCTL_LPE; 831 832 /* disable queue 0 to prevent tail write w/o re-config */ 833 wr32(IGC_RXDCTL(0), 0); 834 835 /* This is useful for sniffing bad packets. */ 836 if (adapter->netdev->features & NETIF_F_RXALL) { 837 /* UPE and MPE will be handled by normal PROMISC logic 838 * in set_rx_mode 839 */ 840 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ 841 IGC_RCTL_BAM | /* RX All Bcast Pkts */ 842 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 843 844 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ 845 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ 846 } 847 848 wr32(IGC_RCTL, rctl); 849 } 850 851 /** 852 * igc_setup_tctl - configure the transmit control registers 853 * @adapter: Board private structure 854 */ 855 static void igc_setup_tctl(struct igc_adapter *adapter) 856 { 857 struct igc_hw *hw = &adapter->hw; 858 u32 tctl; 859 860 /* disable queue 0 which icould be enabled by default */ 861 wr32(IGC_TXDCTL(0), 0); 862 863 /* Program the Transmit Control Register */ 864 tctl = rd32(IGC_TCTL); 865 tctl &= ~IGC_TCTL_CT; 866 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | 867 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); 868 869 /* Enable transmits */ 870 tctl |= IGC_TCTL_EN; 871 872 wr32(IGC_TCTL, tctl); 873 } 874 875 /** 876 * igc_set_mac_filter_hw() - Set MAC address filter in hardware 877 * @adapter: Pointer to adapter where the filter should be set 878 * @index: Filter index 879 * @type: MAC address filter type (source or destination) 880 * @addr: MAC address 881 * @queue: If non-negative, queue assignment feature is enabled and frames 882 * matching the filter are enqueued onto 'queue'. Otherwise, queue 883 * assignment is disabled. 884 */ 885 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, 886 enum igc_mac_filter_type type, 887 const u8 *addr, int queue) 888 { 889 struct net_device *dev = adapter->netdev; 890 struct igc_hw *hw = &adapter->hw; 891 u32 ral, rah; 892 893 if (WARN_ON(index >= hw->mac.rar_entry_count)) 894 return; 895 896 ral = le32_to_cpup((__le32 *)(addr)); 897 rah = le16_to_cpup((__le16 *)(addr + 4)); 898 899 if (type == IGC_MAC_FILTER_TYPE_SRC) { 900 rah &= ~IGC_RAH_ASEL_MASK; 901 rah |= IGC_RAH_ASEL_SRC_ADDR; 902 } 903 904 if (queue >= 0) { 905 rah &= ~IGC_RAH_QSEL_MASK; 906 rah |= (queue << IGC_RAH_QSEL_SHIFT); 907 rah |= IGC_RAH_QSEL_ENABLE; 908 } 909 910 rah |= IGC_RAH_AV; 911 912 wr32(IGC_RAL(index), ral); 913 wr32(IGC_RAH(index), rah); 914 915 netdev_dbg(dev, "MAC address filter set in HW: index %d", index); 916 } 917 918 /** 919 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware 920 * @adapter: Pointer to adapter where the filter should be cleared 921 * @index: Filter index 922 */ 923 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) 924 { 925 struct net_device *dev = adapter->netdev; 926 struct igc_hw *hw = &adapter->hw; 927 928 if (WARN_ON(index >= hw->mac.rar_entry_count)) 929 return; 930 931 wr32(IGC_RAL(index), 0); 932 wr32(IGC_RAH(index), 0); 933 934 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); 935 } 936 937 /* Set default MAC address for the PF in the first RAR entry */ 938 static void igc_set_default_mac_filter(struct igc_adapter *adapter) 939 { 940 struct net_device *dev = adapter->netdev; 941 u8 *addr = adapter->hw.mac.addr; 942 943 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); 944 945 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); 946 } 947 948 /** 949 * igc_set_mac - Change the Ethernet Address of the NIC 950 * @netdev: network interface device structure 951 * @p: pointer to an address structure 952 * 953 * Returns 0 on success, negative on failure 954 */ 955 static int igc_set_mac(struct net_device *netdev, void *p) 956 { 957 struct igc_adapter *adapter = netdev_priv(netdev); 958 struct igc_hw *hw = &adapter->hw; 959 struct sockaddr *addr = p; 960 961 if (!is_valid_ether_addr(addr->sa_data)) 962 return -EADDRNOTAVAIL; 963 964 eth_hw_addr_set(netdev, addr->sa_data); 965 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 966 967 /* set the correct pool for the new PF MAC address in entry 0 */ 968 igc_set_default_mac_filter(adapter); 969 970 return 0; 971 } 972 973 /** 974 * igc_write_mc_addr_list - write multicast addresses to MTA 975 * @netdev: network interface device structure 976 * 977 * Writes multicast address list to the MTA hash table. 978 * Returns: -ENOMEM on failure 979 * 0 on no addresses written 980 * X on writing X addresses to MTA 981 **/ 982 static int igc_write_mc_addr_list(struct net_device *netdev) 983 { 984 struct igc_adapter *adapter = netdev_priv(netdev); 985 struct igc_hw *hw = &adapter->hw; 986 struct netdev_hw_addr *ha; 987 u8 *mta_list; 988 int i; 989 990 if (netdev_mc_empty(netdev)) { 991 /* nothing to program, so clear mc list */ 992 igc_update_mc_addr_list(hw, NULL, 0); 993 return 0; 994 } 995 996 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); 997 if (!mta_list) 998 return -ENOMEM; 999 1000 /* The shared function expects a packed array of only addresses. */ 1001 i = 0; 1002 netdev_for_each_mc_addr(ha, netdev) 1003 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 1004 1005 igc_update_mc_addr_list(hw, mta_list, i); 1006 kfree(mta_list); 1007 1008 return netdev_mc_count(netdev); 1009 } 1010 1011 static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, 1012 bool *first_flag, bool *insert_empty) 1013 { 1014 struct igc_adapter *adapter = netdev_priv(ring->netdev); 1015 ktime_t cycle_time = adapter->cycle_time; 1016 ktime_t base_time = adapter->base_time; 1017 ktime_t now = ktime_get_clocktai(); 1018 ktime_t baset_est, end_of_cycle; 1019 s32 launchtime; 1020 s64 n; 1021 1022 n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); 1023 1024 baset_est = ktime_add_ns(base_time, cycle_time * (n)); 1025 end_of_cycle = ktime_add_ns(baset_est, cycle_time); 1026 1027 if (ktime_compare(txtime, end_of_cycle) >= 0) { 1028 if (baset_est != ring->last_ff_cycle) { 1029 *first_flag = true; 1030 ring->last_ff_cycle = baset_est; 1031 1032 if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) 1033 *insert_empty = true; 1034 } 1035 } 1036 1037 /* Introducing a window at end of cycle on which packets 1038 * potentially not honor launchtime. Window of 5us chosen 1039 * considering software update the tail pointer and packets 1040 * are dma'ed to packet buffer. 1041 */ 1042 if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) 1043 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", 1044 txtime); 1045 1046 ring->last_tx_cycle = end_of_cycle; 1047 1048 launchtime = ktime_sub_ns(txtime, baset_est); 1049 if (launchtime > 0) 1050 div_s64_rem(launchtime, cycle_time, &launchtime); 1051 else 1052 launchtime = 0; 1053 1054 return cpu_to_le32(launchtime); 1055 } 1056 1057 static int igc_init_empty_frame(struct igc_ring *ring, 1058 struct igc_tx_buffer *buffer, 1059 struct sk_buff *skb) 1060 { 1061 unsigned int size; 1062 dma_addr_t dma; 1063 1064 size = skb_headlen(skb); 1065 1066 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); 1067 if (dma_mapping_error(ring->dev, dma)) { 1068 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); 1069 return -ENOMEM; 1070 } 1071 1072 buffer->skb = skb; 1073 buffer->protocol = 0; 1074 buffer->bytecount = skb->len; 1075 buffer->gso_segs = 1; 1076 buffer->time_stamp = jiffies; 1077 dma_unmap_len_set(buffer, len, skb->len); 1078 dma_unmap_addr_set(buffer, dma, dma); 1079 1080 return 0; 1081 } 1082 1083 static int igc_init_tx_empty_descriptor(struct igc_ring *ring, 1084 struct sk_buff *skb, 1085 struct igc_tx_buffer *first) 1086 { 1087 union igc_adv_tx_desc *desc; 1088 u32 cmd_type, olinfo_status; 1089 int err; 1090 1091 if (!igc_desc_unused(ring)) 1092 return -EBUSY; 1093 1094 err = igc_init_empty_frame(ring, first, skb); 1095 if (err) 1096 return err; 1097 1098 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 1099 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 1100 first->bytecount; 1101 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; 1102 1103 desc = IGC_TX_DESC(ring, ring->next_to_use); 1104 desc->read.cmd_type_len = cpu_to_le32(cmd_type); 1105 desc->read.olinfo_status = cpu_to_le32(olinfo_status); 1106 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); 1107 1108 netdev_tx_sent_queue(txring_txq(ring), skb->len); 1109 1110 first->next_to_watch = desc; 1111 1112 ring->next_to_use++; 1113 if (ring->next_to_use == ring->count) 1114 ring->next_to_use = 0; 1115 1116 return 0; 1117 } 1118 1119 #define IGC_EMPTY_FRAME_SIZE 60 1120 1121 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, 1122 __le32 launch_time, bool first_flag, 1123 u32 vlan_macip_lens, u32 type_tucmd, 1124 u32 mss_l4len_idx) 1125 { 1126 struct igc_adv_tx_context_desc *context_desc; 1127 u16 i = tx_ring->next_to_use; 1128 1129 context_desc = IGC_TX_CTXTDESC(tx_ring, i); 1130 1131 i++; 1132 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1133 1134 /* set bits to identify this as an advanced context descriptor */ 1135 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 1136 1137 /* For i225, context index must be unique per ring. */ 1138 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) 1139 mss_l4len_idx |= tx_ring->reg_idx << 4; 1140 1141 if (first_flag) 1142 mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; 1143 1144 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1145 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1146 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1147 context_desc->launch_time = launch_time; 1148 } 1149 1150 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, 1151 __le32 launch_time, bool first_flag) 1152 { 1153 struct sk_buff *skb = first->skb; 1154 u32 vlan_macip_lens = 0; 1155 u32 type_tucmd = 0; 1156 1157 if (skb->ip_summed != CHECKSUM_PARTIAL) { 1158 csum_failed: 1159 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && 1160 !tx_ring->launchtime_enable) 1161 return; 1162 goto no_csum; 1163 } 1164 1165 switch (skb->csum_offset) { 1166 case offsetof(struct tcphdr, check): 1167 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1168 fallthrough; 1169 case offsetof(struct udphdr, check): 1170 break; 1171 case offsetof(struct sctphdr, checksum): 1172 /* validate that this is actually an SCTP request */ 1173 if (skb_csum_is_sctp(skb)) { 1174 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; 1175 break; 1176 } 1177 fallthrough; 1178 default: 1179 skb_checksum_help(skb); 1180 goto csum_failed; 1181 } 1182 1183 /* update TX checksum flag */ 1184 first->tx_flags |= IGC_TX_FLAGS_CSUM; 1185 vlan_macip_lens = skb_checksum_start_offset(skb) - 1186 skb_network_offset(skb); 1187 no_csum: 1188 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; 1189 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1190 1191 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, 1192 vlan_macip_lens, type_tucmd, 0); 1193 } 1194 1195 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 1196 { 1197 struct net_device *netdev = tx_ring->netdev; 1198 1199 netif_stop_subqueue(netdev, tx_ring->queue_index); 1200 1201 /* memory barriier comment */ 1202 smp_mb(); 1203 1204 /* We need to check again in a case another CPU has just 1205 * made room available. 1206 */ 1207 if (igc_desc_unused(tx_ring) < size) 1208 return -EBUSY; 1209 1210 /* A reprieve! */ 1211 netif_wake_subqueue(netdev, tx_ring->queue_index); 1212 1213 u64_stats_update_begin(&tx_ring->tx_syncp2); 1214 tx_ring->tx_stats.restart_queue2++; 1215 u64_stats_update_end(&tx_ring->tx_syncp2); 1216 1217 return 0; 1218 } 1219 1220 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 1221 { 1222 if (igc_desc_unused(tx_ring) >= size) 1223 return 0; 1224 return __igc_maybe_stop_tx(tx_ring, size); 1225 } 1226 1227 #define IGC_SET_FLAG(_input, _flag, _result) \ 1228 (((_flag) <= (_result)) ? \ 1229 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ 1230 ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) 1231 1232 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 1233 { 1234 /* set type for advanced descriptor with frame checksum insertion */ 1235 u32 cmd_type = IGC_ADVTXD_DTYP_DATA | 1236 IGC_ADVTXD_DCMD_DEXT | 1237 IGC_ADVTXD_DCMD_IFCS; 1238 1239 /* set HW vlan bit if vlan is present */ 1240 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, 1241 IGC_ADVTXD_DCMD_VLE); 1242 1243 /* set segmentation bits for TSO */ 1244 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, 1245 (IGC_ADVTXD_DCMD_TSE)); 1246 1247 /* set timestamp bit if present */ 1248 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, 1249 (IGC_ADVTXD_MAC_TSTAMP)); 1250 1251 /* insert frame checksum */ 1252 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); 1253 1254 return cmd_type; 1255 } 1256 1257 static void igc_tx_olinfo_status(struct igc_ring *tx_ring, 1258 union igc_adv_tx_desc *tx_desc, 1259 u32 tx_flags, unsigned int paylen) 1260 { 1261 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; 1262 1263 /* insert L4 checksum */ 1264 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * 1265 ((IGC_TXD_POPTS_TXSM << 8) / 1266 IGC_TX_FLAGS_CSUM); 1267 1268 /* insert IPv4 checksum */ 1269 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * 1270 (((IGC_TXD_POPTS_IXSM << 8)) / 1271 IGC_TX_FLAGS_IPV4); 1272 1273 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 1274 } 1275 1276 static int igc_tx_map(struct igc_ring *tx_ring, 1277 struct igc_tx_buffer *first, 1278 const u8 hdr_len) 1279 { 1280 struct sk_buff *skb = first->skb; 1281 struct igc_tx_buffer *tx_buffer; 1282 union igc_adv_tx_desc *tx_desc; 1283 u32 tx_flags = first->tx_flags; 1284 skb_frag_t *frag; 1285 u16 i = tx_ring->next_to_use; 1286 unsigned int data_len, size; 1287 dma_addr_t dma; 1288 u32 cmd_type; 1289 1290 cmd_type = igc_tx_cmd_type(skb, tx_flags); 1291 tx_desc = IGC_TX_DESC(tx_ring, i); 1292 1293 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); 1294 1295 size = skb_headlen(skb); 1296 data_len = skb->data_len; 1297 1298 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1299 1300 tx_buffer = first; 1301 1302 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1303 if (dma_mapping_error(tx_ring->dev, dma)) 1304 goto dma_error; 1305 1306 /* record length, and DMA address */ 1307 dma_unmap_len_set(tx_buffer, len, size); 1308 dma_unmap_addr_set(tx_buffer, dma, dma); 1309 1310 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1311 1312 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { 1313 tx_desc->read.cmd_type_len = 1314 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); 1315 1316 i++; 1317 tx_desc++; 1318 if (i == tx_ring->count) { 1319 tx_desc = IGC_TX_DESC(tx_ring, 0); 1320 i = 0; 1321 } 1322 tx_desc->read.olinfo_status = 0; 1323 1324 dma += IGC_MAX_DATA_PER_TXD; 1325 size -= IGC_MAX_DATA_PER_TXD; 1326 1327 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1328 } 1329 1330 if (likely(!data_len)) 1331 break; 1332 1333 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 1334 1335 i++; 1336 tx_desc++; 1337 if (i == tx_ring->count) { 1338 tx_desc = IGC_TX_DESC(tx_ring, 0); 1339 i = 0; 1340 } 1341 tx_desc->read.olinfo_status = 0; 1342 1343 size = skb_frag_size(frag); 1344 data_len -= size; 1345 1346 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, 1347 size, DMA_TO_DEVICE); 1348 1349 tx_buffer = &tx_ring->tx_buffer_info[i]; 1350 } 1351 1352 /* write last descriptor with RS and EOP bits */ 1353 cmd_type |= size | IGC_TXD_DCMD; 1354 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 1355 1356 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1357 1358 /* set the timestamp */ 1359 first->time_stamp = jiffies; 1360 1361 skb_tx_timestamp(skb); 1362 1363 /* Force memory writes to complete before letting h/w know there 1364 * are new descriptors to fetch. (Only applicable for weak-ordered 1365 * memory model archs, such as IA-64). 1366 * 1367 * We also need this memory barrier to make certain all of the 1368 * status bits have been updated before next_to_watch is written. 1369 */ 1370 wmb(); 1371 1372 /* set next_to_watch value indicating a packet is present */ 1373 first->next_to_watch = tx_desc; 1374 1375 i++; 1376 if (i == tx_ring->count) 1377 i = 0; 1378 1379 tx_ring->next_to_use = i; 1380 1381 /* Make sure there is space in the ring for the next send. */ 1382 igc_maybe_stop_tx(tx_ring, DESC_NEEDED); 1383 1384 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 1385 writel(i, tx_ring->tail); 1386 } 1387 1388 return 0; 1389 dma_error: 1390 netdev_err(tx_ring->netdev, "TX DMA map failed\n"); 1391 tx_buffer = &tx_ring->tx_buffer_info[i]; 1392 1393 /* clear dma mappings for failed tx_buffer_info map */ 1394 while (tx_buffer != first) { 1395 if (dma_unmap_len(tx_buffer, len)) 1396 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 1397 1398 if (i-- == 0) 1399 i += tx_ring->count; 1400 tx_buffer = &tx_ring->tx_buffer_info[i]; 1401 } 1402 1403 if (dma_unmap_len(tx_buffer, len)) 1404 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 1405 1406 dev_kfree_skb_any(tx_buffer->skb); 1407 tx_buffer->skb = NULL; 1408 1409 tx_ring->next_to_use = i; 1410 1411 return -1; 1412 } 1413 1414 static int igc_tso(struct igc_ring *tx_ring, 1415 struct igc_tx_buffer *first, 1416 __le32 launch_time, bool first_flag, 1417 u8 *hdr_len) 1418 { 1419 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 1420 struct sk_buff *skb = first->skb; 1421 union { 1422 struct iphdr *v4; 1423 struct ipv6hdr *v6; 1424 unsigned char *hdr; 1425 } ip; 1426 union { 1427 struct tcphdr *tcp; 1428 struct udphdr *udp; 1429 unsigned char *hdr; 1430 } l4; 1431 u32 paylen, l4_offset; 1432 int err; 1433 1434 if (skb->ip_summed != CHECKSUM_PARTIAL) 1435 return 0; 1436 1437 if (!skb_is_gso(skb)) 1438 return 0; 1439 1440 err = skb_cow_head(skb, 0); 1441 if (err < 0) 1442 return err; 1443 1444 ip.hdr = skb_network_header(skb); 1445 l4.hdr = skb_checksum_start(skb); 1446 1447 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1448 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1449 1450 /* initialize outer IP header fields */ 1451 if (ip.v4->version == 4) { 1452 unsigned char *csum_start = skb_checksum_start(skb); 1453 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 1454 1455 /* IP header will have to cancel out any data that 1456 * is not a part of the outer IP header 1457 */ 1458 ip.v4->check = csum_fold(csum_partial(trans_start, 1459 csum_start - trans_start, 1460 0)); 1461 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; 1462 1463 ip.v4->tot_len = 0; 1464 first->tx_flags |= IGC_TX_FLAGS_TSO | 1465 IGC_TX_FLAGS_CSUM | 1466 IGC_TX_FLAGS_IPV4; 1467 } else { 1468 ip.v6->payload_len = 0; 1469 first->tx_flags |= IGC_TX_FLAGS_TSO | 1470 IGC_TX_FLAGS_CSUM; 1471 } 1472 1473 /* determine offset of inner transport header */ 1474 l4_offset = l4.hdr - skb->data; 1475 1476 /* remove payload length from inner checksum */ 1477 paylen = skb->len - l4_offset; 1478 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { 1479 /* compute length of segmentation header */ 1480 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 1481 csum_replace_by_diff(&l4.tcp->check, 1482 (__force __wsum)htonl(paylen)); 1483 } else { 1484 /* compute length of segmentation header */ 1485 *hdr_len = sizeof(*l4.udp) + l4_offset; 1486 csum_replace_by_diff(&l4.udp->check, 1487 (__force __wsum)htonl(paylen)); 1488 } 1489 1490 /* update gso size and bytecount with header size */ 1491 first->gso_segs = skb_shinfo(skb)->gso_segs; 1492 first->bytecount += (first->gso_segs - 1) * *hdr_len; 1493 1494 /* MSS L4LEN IDX */ 1495 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; 1496 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; 1497 1498 /* VLAN MACLEN IPLEN */ 1499 vlan_macip_lens = l4.hdr - ip.hdr; 1500 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; 1501 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1502 1503 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, 1504 vlan_macip_lens, type_tucmd, mss_l4len_idx); 1505 1506 return 1; 1507 } 1508 1509 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, 1510 struct igc_ring *tx_ring) 1511 { 1512 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 1513 bool first_flag = false, insert_empty = false; 1514 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 1515 __be16 protocol = vlan_get_protocol(skb); 1516 struct igc_tx_buffer *first; 1517 __le32 launch_time = 0; 1518 u32 tx_flags = 0; 1519 unsigned short f; 1520 ktime_t txtime; 1521 u8 hdr_len = 0; 1522 int tso = 0; 1523 1524 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, 1525 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, 1526 * + 2 desc gap to keep tail from touching head, 1527 * + 1 desc for context descriptor, 1528 * otherwise try next time 1529 */ 1530 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1531 count += TXD_USE_COUNT(skb_frag_size( 1532 &skb_shinfo(skb)->frags[f])); 1533 1534 if (igc_maybe_stop_tx(tx_ring, count + 5)) { 1535 /* this is a hard error */ 1536 return NETDEV_TX_BUSY; 1537 } 1538 1539 if (!tx_ring->launchtime_enable) 1540 goto done; 1541 1542 txtime = skb->tstamp; 1543 skb->tstamp = ktime_set(0, 0); 1544 launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); 1545 1546 if (insert_empty) { 1547 struct igc_tx_buffer *empty_info; 1548 struct sk_buff *empty; 1549 void *data; 1550 1551 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 1552 empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); 1553 if (!empty) 1554 goto done; 1555 1556 data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); 1557 memset(data, 0, IGC_EMPTY_FRAME_SIZE); 1558 1559 igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); 1560 1561 if (igc_init_tx_empty_descriptor(tx_ring, 1562 empty, 1563 empty_info) < 0) 1564 dev_kfree_skb_any(empty); 1565 } 1566 1567 done: 1568 /* record the location of the first descriptor for this packet */ 1569 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 1570 first->type = IGC_TX_BUFFER_TYPE_SKB; 1571 first->skb = skb; 1572 first->bytecount = skb->len; 1573 first->gso_segs = 1; 1574 1575 if (adapter->qbv_transition || tx_ring->oper_gate_closed) 1576 goto out_drop; 1577 1578 if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { 1579 adapter->stats.txdrop++; 1580 goto out_drop; 1581 } 1582 1583 if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && 1584 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1585 /* FIXME: add support for retrieving timestamps from 1586 * the other timer registers before skipping the 1587 * timestamping request. 1588 */ 1589 unsigned long flags; 1590 1591 spin_lock_irqsave(&adapter->ptp_tx_lock, flags); 1592 if (!adapter->ptp_tx_skb) { 1593 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1594 tx_flags |= IGC_TX_FLAGS_TSTAMP; 1595 1596 adapter->ptp_tx_skb = skb_get(skb); 1597 adapter->ptp_tx_start = jiffies; 1598 } else { 1599 adapter->tx_hwtstamp_skipped++; 1600 } 1601 1602 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); 1603 } 1604 1605 if (skb_vlan_tag_present(skb)) { 1606 tx_flags |= IGC_TX_FLAGS_VLAN; 1607 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); 1608 } 1609 1610 /* record initial flags and protocol */ 1611 first->tx_flags = tx_flags; 1612 first->protocol = protocol; 1613 1614 tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); 1615 if (tso < 0) 1616 goto out_drop; 1617 else if (!tso) 1618 igc_tx_csum(tx_ring, first, launch_time, first_flag); 1619 1620 igc_tx_map(tx_ring, first, hdr_len); 1621 1622 return NETDEV_TX_OK; 1623 1624 out_drop: 1625 dev_kfree_skb_any(first->skb); 1626 first->skb = NULL; 1627 1628 return NETDEV_TX_OK; 1629 } 1630 1631 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, 1632 struct sk_buff *skb) 1633 { 1634 unsigned int r_idx = skb->queue_mapping; 1635 1636 if (r_idx >= adapter->num_tx_queues) 1637 r_idx = r_idx % adapter->num_tx_queues; 1638 1639 return adapter->tx_ring[r_idx]; 1640 } 1641 1642 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, 1643 struct net_device *netdev) 1644 { 1645 struct igc_adapter *adapter = netdev_priv(netdev); 1646 1647 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 1648 * in order to meet this minimum size requirement. 1649 */ 1650 if (skb->len < 17) { 1651 if (skb_padto(skb, 17)) 1652 return NETDEV_TX_OK; 1653 skb->len = 17; 1654 } 1655 1656 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); 1657 } 1658 1659 static void igc_rx_checksum(struct igc_ring *ring, 1660 union igc_adv_rx_desc *rx_desc, 1661 struct sk_buff *skb) 1662 { 1663 skb_checksum_none_assert(skb); 1664 1665 /* Ignore Checksum bit is set */ 1666 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) 1667 return; 1668 1669 /* Rx checksum disabled via ethtool */ 1670 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1671 return; 1672 1673 /* TCP/UDP checksum error bit is set */ 1674 if (igc_test_staterr(rx_desc, 1675 IGC_RXDEXT_STATERR_L4E | 1676 IGC_RXDEXT_STATERR_IPE)) { 1677 /* work around errata with sctp packets where the TCPE aka 1678 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 1679 * packets (aka let the stack check the crc32c) 1680 */ 1681 if (!(skb->len == 60 && 1682 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { 1683 u64_stats_update_begin(&ring->rx_syncp); 1684 ring->rx_stats.csum_err++; 1685 u64_stats_update_end(&ring->rx_syncp); 1686 } 1687 /* let the stack verify checksum errors */ 1688 return; 1689 } 1690 /* It must be a TCP or UDP packet with a valid checksum */ 1691 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | 1692 IGC_RXD_STAT_UDPCS)) 1693 skb->ip_summed = CHECKSUM_UNNECESSARY; 1694 1695 netdev_dbg(ring->netdev, "cksum success: bits %08X\n", 1696 le32_to_cpu(rx_desc->wb.upper.status_error)); 1697 } 1698 1699 /* Mapping HW RSS Type to enum pkt_hash_types */ 1700 static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = { 1701 [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2, 1702 [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4, 1703 [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3, 1704 [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4, 1705 [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3, 1706 [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3, 1707 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4, 1708 [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4, 1709 [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4, 1710 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4, 1711 [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ 1712 [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */ 1713 [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */ 1714 [13] = PKT_HASH_TYPE_NONE, 1715 [14] = PKT_HASH_TYPE_NONE, 1716 [15] = PKT_HASH_TYPE_NONE, 1717 }; 1718 1719 static inline void igc_rx_hash(struct igc_ring *ring, 1720 union igc_adv_rx_desc *rx_desc, 1721 struct sk_buff *skb) 1722 { 1723 if (ring->netdev->features & NETIF_F_RXHASH) { 1724 u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 1725 u32 rss_type = igc_rss_type(rx_desc); 1726 1727 skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]); 1728 } 1729 } 1730 1731 static void igc_rx_vlan(struct igc_ring *rx_ring, 1732 union igc_adv_rx_desc *rx_desc, 1733 struct sk_buff *skb) 1734 { 1735 struct net_device *dev = rx_ring->netdev; 1736 u16 vid; 1737 1738 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1739 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { 1740 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && 1741 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) 1742 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); 1743 else 1744 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1745 1746 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1747 } 1748 } 1749 1750 /** 1751 * igc_process_skb_fields - Populate skb header fields from Rx descriptor 1752 * @rx_ring: rx descriptor ring packet is being transacted on 1753 * @rx_desc: pointer to the EOP Rx descriptor 1754 * @skb: pointer to current skb being populated 1755 * 1756 * This function checks the ring, descriptor, and packet information in order 1757 * to populate the hash, checksum, VLAN, protocol, and other fields within the 1758 * skb. 1759 */ 1760 static void igc_process_skb_fields(struct igc_ring *rx_ring, 1761 union igc_adv_rx_desc *rx_desc, 1762 struct sk_buff *skb) 1763 { 1764 igc_rx_hash(rx_ring, rx_desc, skb); 1765 1766 igc_rx_checksum(rx_ring, rx_desc, skb); 1767 1768 igc_rx_vlan(rx_ring, rx_desc, skb); 1769 1770 skb_record_rx_queue(skb, rx_ring->queue_index); 1771 1772 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1773 } 1774 1775 static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) 1776 { 1777 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 1778 struct igc_adapter *adapter = netdev_priv(netdev); 1779 struct igc_hw *hw = &adapter->hw; 1780 u32 ctrl; 1781 1782 ctrl = rd32(IGC_CTRL); 1783 1784 if (enable) { 1785 /* enable VLAN tag insert/strip */ 1786 ctrl |= IGC_CTRL_VME; 1787 } else { 1788 /* disable VLAN tag insert/strip */ 1789 ctrl &= ~IGC_CTRL_VME; 1790 } 1791 wr32(IGC_CTRL, ctrl); 1792 } 1793 1794 static void igc_restore_vlan(struct igc_adapter *adapter) 1795 { 1796 igc_vlan_mode(adapter->netdev, adapter->netdev->features); 1797 } 1798 1799 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, 1800 const unsigned int size, 1801 int *rx_buffer_pgcnt) 1802 { 1803 struct igc_rx_buffer *rx_buffer; 1804 1805 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 1806 *rx_buffer_pgcnt = 1807 #if (PAGE_SIZE < 8192) 1808 page_count(rx_buffer->page); 1809 #else 1810 0; 1811 #endif 1812 prefetchw(rx_buffer->page); 1813 1814 /* we are reusing so sync this buffer for CPU use */ 1815 dma_sync_single_range_for_cpu(rx_ring->dev, 1816 rx_buffer->dma, 1817 rx_buffer->page_offset, 1818 size, 1819 DMA_FROM_DEVICE); 1820 1821 rx_buffer->pagecnt_bias--; 1822 1823 return rx_buffer; 1824 } 1825 1826 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, 1827 unsigned int truesize) 1828 { 1829 #if (PAGE_SIZE < 8192) 1830 buffer->page_offset ^= truesize; 1831 #else 1832 buffer->page_offset += truesize; 1833 #endif 1834 } 1835 1836 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, 1837 unsigned int size) 1838 { 1839 unsigned int truesize; 1840 1841 #if (PAGE_SIZE < 8192) 1842 truesize = igc_rx_pg_size(ring) / 2; 1843 #else 1844 truesize = ring_uses_build_skb(ring) ? 1845 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1846 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1847 SKB_DATA_ALIGN(size); 1848 #endif 1849 return truesize; 1850 } 1851 1852 /** 1853 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff 1854 * @rx_ring: rx descriptor ring to transact packets on 1855 * @rx_buffer: buffer containing page to add 1856 * @skb: sk_buff to place the data into 1857 * @size: size of buffer to be added 1858 * 1859 * This function will add the data contained in rx_buffer->page to the skb. 1860 */ 1861 static void igc_add_rx_frag(struct igc_ring *rx_ring, 1862 struct igc_rx_buffer *rx_buffer, 1863 struct sk_buff *skb, 1864 unsigned int size) 1865 { 1866 unsigned int truesize; 1867 1868 #if (PAGE_SIZE < 8192) 1869 truesize = igc_rx_pg_size(rx_ring) / 2; 1870 #else 1871 truesize = ring_uses_build_skb(rx_ring) ? 1872 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1873 SKB_DATA_ALIGN(size); 1874 #endif 1875 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1876 rx_buffer->page_offset, size, truesize); 1877 1878 igc_rx_buffer_flip(rx_buffer, truesize); 1879 } 1880 1881 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, 1882 struct igc_rx_buffer *rx_buffer, 1883 struct xdp_buff *xdp) 1884 { 1885 unsigned int size = xdp->data_end - xdp->data; 1886 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1887 unsigned int metasize = xdp->data - xdp->data_meta; 1888 struct sk_buff *skb; 1889 1890 /* prefetch first cache line of first page */ 1891 net_prefetch(xdp->data_meta); 1892 1893 /* build an skb around the page buffer */ 1894 skb = napi_build_skb(xdp->data_hard_start, truesize); 1895 if (unlikely(!skb)) 1896 return NULL; 1897 1898 /* update pointers within the skb to store the data */ 1899 skb_reserve(skb, xdp->data - xdp->data_hard_start); 1900 __skb_put(skb, size); 1901 if (metasize) 1902 skb_metadata_set(skb, metasize); 1903 1904 igc_rx_buffer_flip(rx_buffer, truesize); 1905 return skb; 1906 } 1907 1908 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, 1909 struct igc_rx_buffer *rx_buffer, 1910 struct xdp_buff *xdp, 1911 ktime_t timestamp) 1912 { 1913 unsigned int metasize = xdp->data - xdp->data_meta; 1914 unsigned int size = xdp->data_end - xdp->data; 1915 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1916 void *va = xdp->data; 1917 unsigned int headlen; 1918 struct sk_buff *skb; 1919 1920 /* prefetch first cache line of first page */ 1921 net_prefetch(xdp->data_meta); 1922 1923 /* allocate a skb to store the frags */ 1924 skb = napi_alloc_skb(&rx_ring->q_vector->napi, 1925 IGC_RX_HDR_LEN + metasize); 1926 if (unlikely(!skb)) 1927 return NULL; 1928 1929 if (timestamp) 1930 skb_hwtstamps(skb)->hwtstamp = timestamp; 1931 1932 /* Determine available headroom for copy */ 1933 headlen = size; 1934 if (headlen > IGC_RX_HDR_LEN) 1935 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); 1936 1937 /* align pull length to size of long to optimize memcpy performance */ 1938 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, 1939 ALIGN(headlen + metasize, sizeof(long))); 1940 1941 if (metasize) { 1942 skb_metadata_set(skb, metasize); 1943 __skb_pull(skb, metasize); 1944 } 1945 1946 /* update all of the pointers */ 1947 size -= headlen; 1948 if (size) { 1949 skb_add_rx_frag(skb, 0, rx_buffer->page, 1950 (va + headlen) - page_address(rx_buffer->page), 1951 size, truesize); 1952 igc_rx_buffer_flip(rx_buffer, truesize); 1953 } else { 1954 rx_buffer->pagecnt_bias++; 1955 } 1956 1957 return skb; 1958 } 1959 1960 /** 1961 * igc_reuse_rx_page - page flip buffer and store it back on the ring 1962 * @rx_ring: rx descriptor ring to store buffers on 1963 * @old_buff: donor buffer to have page reused 1964 * 1965 * Synchronizes page for reuse by the adapter 1966 */ 1967 static void igc_reuse_rx_page(struct igc_ring *rx_ring, 1968 struct igc_rx_buffer *old_buff) 1969 { 1970 u16 nta = rx_ring->next_to_alloc; 1971 struct igc_rx_buffer *new_buff; 1972 1973 new_buff = &rx_ring->rx_buffer_info[nta]; 1974 1975 /* update, and store next to alloc */ 1976 nta++; 1977 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1978 1979 /* Transfer page from old buffer to new buffer. 1980 * Move each member individually to avoid possible store 1981 * forwarding stalls. 1982 */ 1983 new_buff->dma = old_buff->dma; 1984 new_buff->page = old_buff->page; 1985 new_buff->page_offset = old_buff->page_offset; 1986 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1987 } 1988 1989 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, 1990 int rx_buffer_pgcnt) 1991 { 1992 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1993 struct page *page = rx_buffer->page; 1994 1995 /* avoid re-using remote and pfmemalloc pages */ 1996 if (!dev_page_is_reusable(page)) 1997 return false; 1998 1999 #if (PAGE_SIZE < 8192) 2000 /* if we are only owner of page we can reuse it */ 2001 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) 2002 return false; 2003 #else 2004 #define IGC_LAST_OFFSET \ 2005 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) 2006 2007 if (rx_buffer->page_offset > IGC_LAST_OFFSET) 2008 return false; 2009 #endif 2010 2011 /* If we have drained the page fragment pool we need to update 2012 * the pagecnt_bias and page count so that we fully restock the 2013 * number of references the driver holds. 2014 */ 2015 if (unlikely(pagecnt_bias == 1)) { 2016 page_ref_add(page, USHRT_MAX - 1); 2017 rx_buffer->pagecnt_bias = USHRT_MAX; 2018 } 2019 2020 return true; 2021 } 2022 2023 /** 2024 * igc_is_non_eop - process handling of non-EOP buffers 2025 * @rx_ring: Rx ring being processed 2026 * @rx_desc: Rx descriptor for current buffer 2027 * 2028 * This function updates next to clean. If the buffer is an EOP buffer 2029 * this function exits returning false, otherwise it will place the 2030 * sk_buff in the next buffer to be chained and return true indicating 2031 * that this is in fact a non-EOP buffer. 2032 */ 2033 static bool igc_is_non_eop(struct igc_ring *rx_ring, 2034 union igc_adv_rx_desc *rx_desc) 2035 { 2036 u32 ntc = rx_ring->next_to_clean + 1; 2037 2038 /* fetch, update, and store next to clean */ 2039 ntc = (ntc < rx_ring->count) ? ntc : 0; 2040 rx_ring->next_to_clean = ntc; 2041 2042 prefetch(IGC_RX_DESC(rx_ring, ntc)); 2043 2044 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) 2045 return false; 2046 2047 return true; 2048 } 2049 2050 /** 2051 * igc_cleanup_headers - Correct corrupted or empty headers 2052 * @rx_ring: rx descriptor ring packet is being transacted on 2053 * @rx_desc: pointer to the EOP Rx descriptor 2054 * @skb: pointer to current skb being fixed 2055 * 2056 * Address the case where we are pulling data in on pages only 2057 * and as such no data is present in the skb header. 2058 * 2059 * In addition if skb is not at least 60 bytes we need to pad it so that 2060 * it is large enough to qualify as a valid Ethernet frame. 2061 * 2062 * Returns true if an error was encountered and skb was freed. 2063 */ 2064 static bool igc_cleanup_headers(struct igc_ring *rx_ring, 2065 union igc_adv_rx_desc *rx_desc, 2066 struct sk_buff *skb) 2067 { 2068 /* XDP packets use error pointer so abort at this point */ 2069 if (IS_ERR(skb)) 2070 return true; 2071 2072 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { 2073 struct net_device *netdev = rx_ring->netdev; 2074 2075 if (!(netdev->features & NETIF_F_RXALL)) { 2076 dev_kfree_skb_any(skb); 2077 return true; 2078 } 2079 } 2080 2081 /* if eth_skb_pad returns an error the skb was freed */ 2082 if (eth_skb_pad(skb)) 2083 return true; 2084 2085 return false; 2086 } 2087 2088 static void igc_put_rx_buffer(struct igc_ring *rx_ring, 2089 struct igc_rx_buffer *rx_buffer, 2090 int rx_buffer_pgcnt) 2091 { 2092 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { 2093 /* hand second half of page back to the ring */ 2094 igc_reuse_rx_page(rx_ring, rx_buffer); 2095 } else { 2096 /* We are not reusing the buffer so unmap it and free 2097 * any references we are holding to it 2098 */ 2099 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 2100 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 2101 IGC_RX_DMA_ATTR); 2102 __page_frag_cache_drain(rx_buffer->page, 2103 rx_buffer->pagecnt_bias); 2104 } 2105 2106 /* clear contents of rx_buffer */ 2107 rx_buffer->page = NULL; 2108 } 2109 2110 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) 2111 { 2112 struct igc_adapter *adapter = rx_ring->q_vector->adapter; 2113 2114 if (ring_uses_build_skb(rx_ring)) 2115 return IGC_SKB_PAD; 2116 if (igc_xdp_is_enabled(adapter)) 2117 return XDP_PACKET_HEADROOM; 2118 2119 return 0; 2120 } 2121 2122 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, 2123 struct igc_rx_buffer *bi) 2124 { 2125 struct page *page = bi->page; 2126 dma_addr_t dma; 2127 2128 /* since we are recycling buffers we should seldom need to alloc */ 2129 if (likely(page)) 2130 return true; 2131 2132 /* alloc new page for storage */ 2133 page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); 2134 if (unlikely(!page)) { 2135 rx_ring->rx_stats.alloc_failed++; 2136 return false; 2137 } 2138 2139 /* map page for use */ 2140 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 2141 igc_rx_pg_size(rx_ring), 2142 DMA_FROM_DEVICE, 2143 IGC_RX_DMA_ATTR); 2144 2145 /* if mapping failed free memory back to system since 2146 * there isn't much point in holding memory we can't use 2147 */ 2148 if (dma_mapping_error(rx_ring->dev, dma)) { 2149 __free_page(page); 2150 2151 rx_ring->rx_stats.alloc_failed++; 2152 return false; 2153 } 2154 2155 bi->dma = dma; 2156 bi->page = page; 2157 bi->page_offset = igc_rx_offset(rx_ring); 2158 page_ref_add(page, USHRT_MAX - 1); 2159 bi->pagecnt_bias = USHRT_MAX; 2160 2161 return true; 2162 } 2163 2164 /** 2165 * igc_alloc_rx_buffers - Replace used receive buffers; packet split 2166 * @rx_ring: rx descriptor ring 2167 * @cleaned_count: number of buffers to clean 2168 */ 2169 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) 2170 { 2171 union igc_adv_rx_desc *rx_desc; 2172 u16 i = rx_ring->next_to_use; 2173 struct igc_rx_buffer *bi; 2174 u16 bufsz; 2175 2176 /* nothing to do */ 2177 if (!cleaned_count) 2178 return; 2179 2180 rx_desc = IGC_RX_DESC(rx_ring, i); 2181 bi = &rx_ring->rx_buffer_info[i]; 2182 i -= rx_ring->count; 2183 2184 bufsz = igc_rx_bufsz(rx_ring); 2185 2186 do { 2187 if (!igc_alloc_mapped_page(rx_ring, bi)) 2188 break; 2189 2190 /* sync the buffer for use by the device */ 2191 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 2192 bi->page_offset, bufsz, 2193 DMA_FROM_DEVICE); 2194 2195 /* Refresh the desc even if buffer_addrs didn't change 2196 * because each write-back erases this info. 2197 */ 2198 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 2199 2200 rx_desc++; 2201 bi++; 2202 i++; 2203 if (unlikely(!i)) { 2204 rx_desc = IGC_RX_DESC(rx_ring, 0); 2205 bi = rx_ring->rx_buffer_info; 2206 i -= rx_ring->count; 2207 } 2208 2209 /* clear the length for the next_to_use descriptor */ 2210 rx_desc->wb.upper.length = 0; 2211 2212 cleaned_count--; 2213 } while (cleaned_count); 2214 2215 i += rx_ring->count; 2216 2217 if (rx_ring->next_to_use != i) { 2218 /* record the next descriptor to use */ 2219 rx_ring->next_to_use = i; 2220 2221 /* update next to alloc since we have filled the ring */ 2222 rx_ring->next_to_alloc = i; 2223 2224 /* Force memory writes to complete before letting h/w 2225 * know there are new descriptors to fetch. (Only 2226 * applicable for weak-ordered memory model archs, 2227 * such as IA-64). 2228 */ 2229 wmb(); 2230 writel(i, rx_ring->tail); 2231 } 2232 } 2233 2234 static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) 2235 { 2236 union igc_adv_rx_desc *desc; 2237 u16 i = ring->next_to_use; 2238 struct igc_rx_buffer *bi; 2239 dma_addr_t dma; 2240 bool ok = true; 2241 2242 if (!count) 2243 return ok; 2244 2245 XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff); 2246 2247 desc = IGC_RX_DESC(ring, i); 2248 bi = &ring->rx_buffer_info[i]; 2249 i -= ring->count; 2250 2251 do { 2252 bi->xdp = xsk_buff_alloc(ring->xsk_pool); 2253 if (!bi->xdp) { 2254 ok = false; 2255 break; 2256 } 2257 2258 dma = xsk_buff_xdp_get_dma(bi->xdp); 2259 desc->read.pkt_addr = cpu_to_le64(dma); 2260 2261 desc++; 2262 bi++; 2263 i++; 2264 if (unlikely(!i)) { 2265 desc = IGC_RX_DESC(ring, 0); 2266 bi = ring->rx_buffer_info; 2267 i -= ring->count; 2268 } 2269 2270 /* Clear the length for the next_to_use descriptor. */ 2271 desc->wb.upper.length = 0; 2272 2273 count--; 2274 } while (count); 2275 2276 i += ring->count; 2277 2278 if (ring->next_to_use != i) { 2279 ring->next_to_use = i; 2280 2281 /* Force memory writes to complete before letting h/w 2282 * know there are new descriptors to fetch. (Only 2283 * applicable for weak-ordered memory model archs, 2284 * such as IA-64). 2285 */ 2286 wmb(); 2287 writel(i, ring->tail); 2288 } 2289 2290 return ok; 2291 } 2292 2293 /* This function requires __netif_tx_lock is held by the caller. */ 2294 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, 2295 struct xdp_frame *xdpf) 2296 { 2297 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); 2298 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; 2299 u16 count, index = ring->next_to_use; 2300 struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; 2301 struct igc_tx_buffer *buffer = head; 2302 union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index); 2303 u32 olinfo_status, len = xdpf->len, cmd_type; 2304 void *data = xdpf->data; 2305 u16 i; 2306 2307 count = TXD_USE_COUNT(len); 2308 for (i = 0; i < nr_frags; i++) 2309 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); 2310 2311 if (igc_maybe_stop_tx(ring, count + 3)) { 2312 /* this is a hard error */ 2313 return -EBUSY; 2314 } 2315 2316 i = 0; 2317 head->bytecount = xdp_get_frame_len(xdpf); 2318 head->type = IGC_TX_BUFFER_TYPE_XDP; 2319 head->gso_segs = 1; 2320 head->xdpf = xdpf; 2321 2322 olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; 2323 desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2324 2325 for (;;) { 2326 dma_addr_t dma; 2327 2328 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); 2329 if (dma_mapping_error(ring->dev, dma)) { 2330 netdev_err_once(ring->netdev, 2331 "Failed to map DMA for TX\n"); 2332 goto unmap; 2333 } 2334 2335 dma_unmap_len_set(buffer, len, len); 2336 dma_unmap_addr_set(buffer, dma, dma); 2337 2338 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 2339 IGC_ADVTXD_DCMD_IFCS | len; 2340 2341 desc->read.cmd_type_len = cpu_to_le32(cmd_type); 2342 desc->read.buffer_addr = cpu_to_le64(dma); 2343 2344 buffer->protocol = 0; 2345 2346 if (++index == ring->count) 2347 index = 0; 2348 2349 if (i == nr_frags) 2350 break; 2351 2352 buffer = &ring->tx_buffer_info[index]; 2353 desc = IGC_TX_DESC(ring, index); 2354 desc->read.olinfo_status = 0; 2355 2356 data = skb_frag_address(&sinfo->frags[i]); 2357 len = skb_frag_size(&sinfo->frags[i]); 2358 i++; 2359 } 2360 desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); 2361 2362 netdev_tx_sent_queue(txring_txq(ring), head->bytecount); 2363 /* set the timestamp */ 2364 head->time_stamp = jiffies; 2365 /* set next_to_watch value indicating a packet is present */ 2366 head->next_to_watch = desc; 2367 ring->next_to_use = index; 2368 2369 return 0; 2370 2371 unmap: 2372 for (;;) { 2373 buffer = &ring->tx_buffer_info[index]; 2374 if (dma_unmap_len(buffer, len)) 2375 dma_unmap_page(ring->dev, 2376 dma_unmap_addr(buffer, dma), 2377 dma_unmap_len(buffer, len), 2378 DMA_TO_DEVICE); 2379 dma_unmap_len_set(buffer, len, 0); 2380 if (buffer == head) 2381 break; 2382 2383 if (!index) 2384 index += ring->count; 2385 index--; 2386 } 2387 2388 return -ENOMEM; 2389 } 2390 2391 static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, 2392 int cpu) 2393 { 2394 int index = cpu; 2395 2396 if (unlikely(index < 0)) 2397 index = 0; 2398 2399 while (index >= adapter->num_tx_queues) 2400 index -= adapter->num_tx_queues; 2401 2402 return adapter->tx_ring[index]; 2403 } 2404 2405 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) 2406 { 2407 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 2408 int cpu = smp_processor_id(); 2409 struct netdev_queue *nq; 2410 struct igc_ring *ring; 2411 int res; 2412 2413 if (unlikely(!xdpf)) 2414 return -EFAULT; 2415 2416 ring = igc_xdp_get_tx_ring(adapter, cpu); 2417 nq = txring_txq(ring); 2418 2419 __netif_tx_lock(nq, cpu); 2420 /* Avoid transmit queue timeout since we share it with the slow path */ 2421 txq_trans_cond_update(nq); 2422 res = igc_xdp_init_tx_descriptor(ring, xdpf); 2423 __netif_tx_unlock(nq); 2424 return res; 2425 } 2426 2427 /* This function assumes rcu_read_lock() is held by the caller. */ 2428 static int __igc_xdp_run_prog(struct igc_adapter *adapter, 2429 struct bpf_prog *prog, 2430 struct xdp_buff *xdp) 2431 { 2432 u32 act = bpf_prog_run_xdp(prog, xdp); 2433 2434 switch (act) { 2435 case XDP_PASS: 2436 return IGC_XDP_PASS; 2437 case XDP_TX: 2438 if (igc_xdp_xmit_back(adapter, xdp) < 0) 2439 goto out_failure; 2440 return IGC_XDP_TX; 2441 case XDP_REDIRECT: 2442 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) 2443 goto out_failure; 2444 return IGC_XDP_REDIRECT; 2445 break; 2446 default: 2447 bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); 2448 fallthrough; 2449 case XDP_ABORTED: 2450 out_failure: 2451 trace_xdp_exception(adapter->netdev, prog, act); 2452 fallthrough; 2453 case XDP_DROP: 2454 return IGC_XDP_CONSUMED; 2455 } 2456 } 2457 2458 static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, 2459 struct xdp_buff *xdp) 2460 { 2461 struct bpf_prog *prog; 2462 int res; 2463 2464 prog = READ_ONCE(adapter->xdp_prog); 2465 if (!prog) { 2466 res = IGC_XDP_PASS; 2467 goto out; 2468 } 2469 2470 res = __igc_xdp_run_prog(adapter, prog, xdp); 2471 2472 out: 2473 return ERR_PTR(-res); 2474 } 2475 2476 /* This function assumes __netif_tx_lock is held by the caller. */ 2477 static void igc_flush_tx_descriptors(struct igc_ring *ring) 2478 { 2479 /* Once tail pointer is updated, hardware can fetch the descriptors 2480 * any time so we issue a write membar here to ensure all memory 2481 * writes are complete before the tail pointer is updated. 2482 */ 2483 wmb(); 2484 writel(ring->next_to_use, ring->tail); 2485 } 2486 2487 static void igc_finalize_xdp(struct igc_adapter *adapter, int status) 2488 { 2489 int cpu = smp_processor_id(); 2490 struct netdev_queue *nq; 2491 struct igc_ring *ring; 2492 2493 if (status & IGC_XDP_TX) { 2494 ring = igc_xdp_get_tx_ring(adapter, cpu); 2495 nq = txring_txq(ring); 2496 2497 __netif_tx_lock(nq, cpu); 2498 igc_flush_tx_descriptors(ring); 2499 __netif_tx_unlock(nq); 2500 } 2501 2502 if (status & IGC_XDP_REDIRECT) 2503 xdp_do_flush(); 2504 } 2505 2506 static void igc_update_rx_stats(struct igc_q_vector *q_vector, 2507 unsigned int packets, unsigned int bytes) 2508 { 2509 struct igc_ring *ring = q_vector->rx.ring; 2510 2511 u64_stats_update_begin(&ring->rx_syncp); 2512 ring->rx_stats.packets += packets; 2513 ring->rx_stats.bytes += bytes; 2514 u64_stats_update_end(&ring->rx_syncp); 2515 2516 q_vector->rx.total_packets += packets; 2517 q_vector->rx.total_bytes += bytes; 2518 } 2519 2520 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) 2521 { 2522 unsigned int total_bytes = 0, total_packets = 0; 2523 struct igc_adapter *adapter = q_vector->adapter; 2524 struct igc_ring *rx_ring = q_vector->rx.ring; 2525 struct sk_buff *skb = rx_ring->skb; 2526 u16 cleaned_count = igc_desc_unused(rx_ring); 2527 int xdp_status = 0, rx_buffer_pgcnt; 2528 2529 while (likely(total_packets < budget)) { 2530 union igc_adv_rx_desc *rx_desc; 2531 struct igc_rx_buffer *rx_buffer; 2532 unsigned int size, truesize; 2533 struct igc_xdp_buff ctx; 2534 ktime_t timestamp = 0; 2535 int pkt_offset = 0; 2536 void *pktbuf; 2537 2538 /* return some buffers to hardware, one at a time is too slow */ 2539 if (cleaned_count >= IGC_RX_BUFFER_WRITE) { 2540 igc_alloc_rx_buffers(rx_ring, cleaned_count); 2541 cleaned_count = 0; 2542 } 2543 2544 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); 2545 size = le16_to_cpu(rx_desc->wb.upper.length); 2546 if (!size) 2547 break; 2548 2549 /* This memory barrier is needed to keep us from reading 2550 * any other fields out of the rx_desc until we know the 2551 * descriptor has been written back 2552 */ 2553 dma_rmb(); 2554 2555 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); 2556 truesize = igc_get_rx_frame_truesize(rx_ring, size); 2557 2558 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; 2559 2560 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { 2561 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, 2562 pktbuf); 2563 ctx.rx_ts = timestamp; 2564 pkt_offset = IGC_TS_HDR_LEN; 2565 size -= IGC_TS_HDR_LEN; 2566 } 2567 2568 if (!skb) { 2569 xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); 2570 xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), 2571 igc_rx_offset(rx_ring) + pkt_offset, 2572 size, true); 2573 xdp_buff_clear_frags_flag(&ctx.xdp); 2574 ctx.rx_desc = rx_desc; 2575 2576 skb = igc_xdp_run_prog(adapter, &ctx.xdp); 2577 } 2578 2579 if (IS_ERR(skb)) { 2580 unsigned int xdp_res = -PTR_ERR(skb); 2581 2582 switch (xdp_res) { 2583 case IGC_XDP_CONSUMED: 2584 rx_buffer->pagecnt_bias++; 2585 break; 2586 case IGC_XDP_TX: 2587 case IGC_XDP_REDIRECT: 2588 igc_rx_buffer_flip(rx_buffer, truesize); 2589 xdp_status |= xdp_res; 2590 break; 2591 } 2592 2593 total_packets++; 2594 total_bytes += size; 2595 } else if (skb) 2596 igc_add_rx_frag(rx_ring, rx_buffer, skb, size); 2597 else if (ring_uses_build_skb(rx_ring)) 2598 skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp); 2599 else 2600 skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp, 2601 timestamp); 2602 2603 /* exit if we failed to retrieve a buffer */ 2604 if (!skb) { 2605 rx_ring->rx_stats.alloc_failed++; 2606 rx_buffer->pagecnt_bias++; 2607 break; 2608 } 2609 2610 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); 2611 cleaned_count++; 2612 2613 /* fetch next buffer in frame if non-eop */ 2614 if (igc_is_non_eop(rx_ring, rx_desc)) 2615 continue; 2616 2617 /* verify the packet layout is correct */ 2618 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { 2619 skb = NULL; 2620 continue; 2621 } 2622 2623 /* probably a little skewed due to removing CRC */ 2624 total_bytes += skb->len; 2625 2626 /* populate checksum, VLAN, and protocol */ 2627 igc_process_skb_fields(rx_ring, rx_desc, skb); 2628 2629 napi_gro_receive(&q_vector->napi, skb); 2630 2631 /* reset skb pointer */ 2632 skb = NULL; 2633 2634 /* update budget accounting */ 2635 total_packets++; 2636 } 2637 2638 if (xdp_status) 2639 igc_finalize_xdp(adapter, xdp_status); 2640 2641 /* place incomplete frames back on ring for completion */ 2642 rx_ring->skb = skb; 2643 2644 igc_update_rx_stats(q_vector, total_packets, total_bytes); 2645 2646 if (cleaned_count) 2647 igc_alloc_rx_buffers(rx_ring, cleaned_count); 2648 2649 return total_packets; 2650 } 2651 2652 static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, 2653 struct xdp_buff *xdp) 2654 { 2655 unsigned int totalsize = xdp->data_end - xdp->data_meta; 2656 unsigned int metasize = xdp->data - xdp->data_meta; 2657 struct sk_buff *skb; 2658 2659 net_prefetch(xdp->data_meta); 2660 2661 skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, 2662 GFP_ATOMIC | __GFP_NOWARN); 2663 if (unlikely(!skb)) 2664 return NULL; 2665 2666 memcpy(__skb_put(skb, totalsize), xdp->data_meta, 2667 ALIGN(totalsize, sizeof(long))); 2668 2669 if (metasize) { 2670 skb_metadata_set(skb, metasize); 2671 __skb_pull(skb, metasize); 2672 } 2673 2674 return skb; 2675 } 2676 2677 static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, 2678 union igc_adv_rx_desc *desc, 2679 struct xdp_buff *xdp, 2680 ktime_t timestamp) 2681 { 2682 struct igc_ring *ring = q_vector->rx.ring; 2683 struct sk_buff *skb; 2684 2685 skb = igc_construct_skb_zc(ring, xdp); 2686 if (!skb) { 2687 ring->rx_stats.alloc_failed++; 2688 return; 2689 } 2690 2691 if (timestamp) 2692 skb_hwtstamps(skb)->hwtstamp = timestamp; 2693 2694 if (igc_cleanup_headers(ring, desc, skb)) 2695 return; 2696 2697 igc_process_skb_fields(ring, desc, skb); 2698 napi_gro_receive(&q_vector->napi, skb); 2699 } 2700 2701 static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp) 2702 { 2703 /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The 2704 * igc_xdp_buff shares its layout with xdp_buff_xsk and private 2705 * igc_xdp_buff fields fall into xdp_buff_xsk->cb 2706 */ 2707 return (struct igc_xdp_buff *)xdp; 2708 } 2709 2710 static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) 2711 { 2712 struct igc_adapter *adapter = q_vector->adapter; 2713 struct igc_ring *ring = q_vector->rx.ring; 2714 u16 cleaned_count = igc_desc_unused(ring); 2715 int total_bytes = 0, total_packets = 0; 2716 u16 ntc = ring->next_to_clean; 2717 struct bpf_prog *prog; 2718 bool failure = false; 2719 int xdp_status = 0; 2720 2721 rcu_read_lock(); 2722 2723 prog = READ_ONCE(adapter->xdp_prog); 2724 2725 while (likely(total_packets < budget)) { 2726 union igc_adv_rx_desc *desc; 2727 struct igc_rx_buffer *bi; 2728 struct igc_xdp_buff *ctx; 2729 ktime_t timestamp = 0; 2730 unsigned int size; 2731 int res; 2732 2733 desc = IGC_RX_DESC(ring, ntc); 2734 size = le16_to_cpu(desc->wb.upper.length); 2735 if (!size) 2736 break; 2737 2738 /* This memory barrier is needed to keep us from reading 2739 * any other fields out of the rx_desc until we know the 2740 * descriptor has been written back 2741 */ 2742 dma_rmb(); 2743 2744 bi = &ring->rx_buffer_info[ntc]; 2745 2746 ctx = xsk_buff_to_igc_ctx(bi->xdp); 2747 ctx->rx_desc = desc; 2748 2749 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { 2750 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, 2751 bi->xdp->data); 2752 ctx->rx_ts = timestamp; 2753 2754 bi->xdp->data += IGC_TS_HDR_LEN; 2755 2756 /* HW timestamp has been copied into local variable. Metadata 2757 * length when XDP program is called should be 0. 2758 */ 2759 bi->xdp->data_meta += IGC_TS_HDR_LEN; 2760 size -= IGC_TS_HDR_LEN; 2761 } 2762 2763 bi->xdp->data_end = bi->xdp->data + size; 2764 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); 2765 2766 res = __igc_xdp_run_prog(adapter, prog, bi->xdp); 2767 switch (res) { 2768 case IGC_XDP_PASS: 2769 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); 2770 fallthrough; 2771 case IGC_XDP_CONSUMED: 2772 xsk_buff_free(bi->xdp); 2773 break; 2774 case IGC_XDP_TX: 2775 case IGC_XDP_REDIRECT: 2776 xdp_status |= res; 2777 break; 2778 } 2779 2780 bi->xdp = NULL; 2781 total_bytes += size; 2782 total_packets++; 2783 cleaned_count++; 2784 ntc++; 2785 if (ntc == ring->count) 2786 ntc = 0; 2787 } 2788 2789 ring->next_to_clean = ntc; 2790 rcu_read_unlock(); 2791 2792 if (cleaned_count >= IGC_RX_BUFFER_WRITE) 2793 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); 2794 2795 if (xdp_status) 2796 igc_finalize_xdp(adapter, xdp_status); 2797 2798 igc_update_rx_stats(q_vector, total_packets, total_bytes); 2799 2800 if (xsk_uses_need_wakeup(ring->xsk_pool)) { 2801 if (failure || ring->next_to_clean == ring->next_to_use) 2802 xsk_set_rx_need_wakeup(ring->xsk_pool); 2803 else 2804 xsk_clear_rx_need_wakeup(ring->xsk_pool); 2805 return total_packets; 2806 } 2807 2808 return failure ? budget : total_packets; 2809 } 2810 2811 static void igc_update_tx_stats(struct igc_q_vector *q_vector, 2812 unsigned int packets, unsigned int bytes) 2813 { 2814 struct igc_ring *ring = q_vector->tx.ring; 2815 2816 u64_stats_update_begin(&ring->tx_syncp); 2817 ring->tx_stats.bytes += bytes; 2818 ring->tx_stats.packets += packets; 2819 u64_stats_update_end(&ring->tx_syncp); 2820 2821 q_vector->tx.total_bytes += bytes; 2822 q_vector->tx.total_packets += packets; 2823 } 2824 2825 static void igc_xdp_xmit_zc(struct igc_ring *ring) 2826 { 2827 struct xsk_buff_pool *pool = ring->xsk_pool; 2828 struct netdev_queue *nq = txring_txq(ring); 2829 union igc_adv_tx_desc *tx_desc = NULL; 2830 int cpu = smp_processor_id(); 2831 struct xdp_desc xdp_desc; 2832 u16 budget, ntu; 2833 2834 if (!netif_carrier_ok(ring->netdev)) 2835 return; 2836 2837 __netif_tx_lock(nq, cpu); 2838 2839 /* Avoid transmit queue timeout since we share it with the slow path */ 2840 txq_trans_cond_update(nq); 2841 2842 ntu = ring->next_to_use; 2843 budget = igc_desc_unused(ring); 2844 2845 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { 2846 u32 cmd_type, olinfo_status; 2847 struct igc_tx_buffer *bi; 2848 dma_addr_t dma; 2849 2850 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 2851 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 2852 xdp_desc.len; 2853 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; 2854 2855 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2856 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); 2857 2858 tx_desc = IGC_TX_DESC(ring, ntu); 2859 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 2860 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2861 tx_desc->read.buffer_addr = cpu_to_le64(dma); 2862 2863 bi = &ring->tx_buffer_info[ntu]; 2864 bi->type = IGC_TX_BUFFER_TYPE_XSK; 2865 bi->protocol = 0; 2866 bi->bytecount = xdp_desc.len; 2867 bi->gso_segs = 1; 2868 bi->time_stamp = jiffies; 2869 bi->next_to_watch = tx_desc; 2870 2871 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); 2872 2873 ntu++; 2874 if (ntu == ring->count) 2875 ntu = 0; 2876 } 2877 2878 ring->next_to_use = ntu; 2879 if (tx_desc) { 2880 igc_flush_tx_descriptors(ring); 2881 xsk_tx_release(pool); 2882 } 2883 2884 __netif_tx_unlock(nq); 2885 } 2886 2887 /** 2888 * igc_clean_tx_irq - Reclaim resources after transmit completes 2889 * @q_vector: pointer to q_vector containing needed info 2890 * @napi_budget: Used to determine if we are in netpoll 2891 * 2892 * returns true if ring is completely cleaned 2893 */ 2894 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) 2895 { 2896 struct igc_adapter *adapter = q_vector->adapter; 2897 unsigned int total_bytes = 0, total_packets = 0; 2898 unsigned int budget = q_vector->tx.work_limit; 2899 struct igc_ring *tx_ring = q_vector->tx.ring; 2900 unsigned int i = tx_ring->next_to_clean; 2901 struct igc_tx_buffer *tx_buffer; 2902 union igc_adv_tx_desc *tx_desc; 2903 u32 xsk_frames = 0; 2904 2905 if (test_bit(__IGC_DOWN, &adapter->state)) 2906 return true; 2907 2908 tx_buffer = &tx_ring->tx_buffer_info[i]; 2909 tx_desc = IGC_TX_DESC(tx_ring, i); 2910 i -= tx_ring->count; 2911 2912 do { 2913 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 2914 2915 /* if next_to_watch is not set then there is no work pending */ 2916 if (!eop_desc) 2917 break; 2918 2919 /* prevent any other reads prior to eop_desc */ 2920 smp_rmb(); 2921 2922 /* if DD is not set pending work has not been completed */ 2923 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) 2924 break; 2925 2926 /* clear next_to_watch to prevent false hangs */ 2927 tx_buffer->next_to_watch = NULL; 2928 2929 /* update the statistics for this packet */ 2930 total_bytes += tx_buffer->bytecount; 2931 total_packets += tx_buffer->gso_segs; 2932 2933 switch (tx_buffer->type) { 2934 case IGC_TX_BUFFER_TYPE_XSK: 2935 xsk_frames++; 2936 break; 2937 case IGC_TX_BUFFER_TYPE_XDP: 2938 xdp_return_frame(tx_buffer->xdpf); 2939 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2940 break; 2941 case IGC_TX_BUFFER_TYPE_SKB: 2942 napi_consume_skb(tx_buffer->skb, napi_budget); 2943 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2944 break; 2945 default: 2946 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 2947 break; 2948 } 2949 2950 /* clear last DMA location and unmap remaining buffers */ 2951 while (tx_desc != eop_desc) { 2952 tx_buffer++; 2953 tx_desc++; 2954 i++; 2955 if (unlikely(!i)) { 2956 i -= tx_ring->count; 2957 tx_buffer = tx_ring->tx_buffer_info; 2958 tx_desc = IGC_TX_DESC(tx_ring, 0); 2959 } 2960 2961 /* unmap any remaining paged data */ 2962 if (dma_unmap_len(tx_buffer, len)) 2963 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2964 } 2965 2966 /* move us one more past the eop_desc for start of next pkt */ 2967 tx_buffer++; 2968 tx_desc++; 2969 i++; 2970 if (unlikely(!i)) { 2971 i -= tx_ring->count; 2972 tx_buffer = tx_ring->tx_buffer_info; 2973 tx_desc = IGC_TX_DESC(tx_ring, 0); 2974 } 2975 2976 /* issue prefetch for next Tx descriptor */ 2977 prefetch(tx_desc); 2978 2979 /* update budget accounting */ 2980 budget--; 2981 } while (likely(budget)); 2982 2983 netdev_tx_completed_queue(txring_txq(tx_ring), 2984 total_packets, total_bytes); 2985 2986 i += tx_ring->count; 2987 tx_ring->next_to_clean = i; 2988 2989 igc_update_tx_stats(q_vector, total_packets, total_bytes); 2990 2991 if (tx_ring->xsk_pool) { 2992 if (xsk_frames) 2993 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 2994 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) 2995 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); 2996 igc_xdp_xmit_zc(tx_ring); 2997 } 2998 2999 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 3000 struct igc_hw *hw = &adapter->hw; 3001 3002 /* Detect a transmit hang in hardware, this serializes the 3003 * check with the clearing of time_stamp and movement of i 3004 */ 3005 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 3006 if (tx_buffer->next_to_watch && 3007 time_after(jiffies, tx_buffer->time_stamp + 3008 (adapter->tx_timeout_factor * HZ)) && 3009 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && 3010 (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && 3011 !tx_ring->oper_gate_closed) { 3012 /* detected Tx unit hang */ 3013 netdev_err(tx_ring->netdev, 3014 "Detected Tx Unit Hang\n" 3015 " Tx Queue <%d>\n" 3016 " TDH <%x>\n" 3017 " TDT <%x>\n" 3018 " next_to_use <%x>\n" 3019 " next_to_clean <%x>\n" 3020 "buffer_info[next_to_clean]\n" 3021 " time_stamp <%lx>\n" 3022 " next_to_watch <%p>\n" 3023 " jiffies <%lx>\n" 3024 " desc.status <%x>\n", 3025 tx_ring->queue_index, 3026 rd32(IGC_TDH(tx_ring->reg_idx)), 3027 readl(tx_ring->tail), 3028 tx_ring->next_to_use, 3029 tx_ring->next_to_clean, 3030 tx_buffer->time_stamp, 3031 tx_buffer->next_to_watch, 3032 jiffies, 3033 tx_buffer->next_to_watch->wb.status); 3034 netif_stop_subqueue(tx_ring->netdev, 3035 tx_ring->queue_index); 3036 3037 /* we are about to reset, no point in enabling stuff */ 3038 return true; 3039 } 3040 } 3041 3042 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 3043 if (unlikely(total_packets && 3044 netif_carrier_ok(tx_ring->netdev) && 3045 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 3046 /* Make sure that anybody stopping the queue after this 3047 * sees the new next_to_clean. 3048 */ 3049 smp_mb(); 3050 if (__netif_subqueue_stopped(tx_ring->netdev, 3051 tx_ring->queue_index) && 3052 !(test_bit(__IGC_DOWN, &adapter->state))) { 3053 netif_wake_subqueue(tx_ring->netdev, 3054 tx_ring->queue_index); 3055 3056 u64_stats_update_begin(&tx_ring->tx_syncp); 3057 tx_ring->tx_stats.restart_queue++; 3058 u64_stats_update_end(&tx_ring->tx_syncp); 3059 } 3060 } 3061 3062 return !!budget; 3063 } 3064 3065 static int igc_find_mac_filter(struct igc_adapter *adapter, 3066 enum igc_mac_filter_type type, const u8 *addr) 3067 { 3068 struct igc_hw *hw = &adapter->hw; 3069 int max_entries = hw->mac.rar_entry_count; 3070 u32 ral, rah; 3071 int i; 3072 3073 for (i = 0; i < max_entries; i++) { 3074 ral = rd32(IGC_RAL(i)); 3075 rah = rd32(IGC_RAH(i)); 3076 3077 if (!(rah & IGC_RAH_AV)) 3078 continue; 3079 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) 3080 continue; 3081 if ((rah & IGC_RAH_RAH_MASK) != 3082 le16_to_cpup((__le16 *)(addr + 4))) 3083 continue; 3084 if (ral != le32_to_cpup((__le32 *)(addr))) 3085 continue; 3086 3087 return i; 3088 } 3089 3090 return -1; 3091 } 3092 3093 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) 3094 { 3095 struct igc_hw *hw = &adapter->hw; 3096 int max_entries = hw->mac.rar_entry_count; 3097 u32 rah; 3098 int i; 3099 3100 for (i = 0; i < max_entries; i++) { 3101 rah = rd32(IGC_RAH(i)); 3102 3103 if (!(rah & IGC_RAH_AV)) 3104 return i; 3105 } 3106 3107 return -1; 3108 } 3109 3110 /** 3111 * igc_add_mac_filter() - Add MAC address filter 3112 * @adapter: Pointer to adapter where the filter should be added 3113 * @type: MAC address filter type (source or destination) 3114 * @addr: MAC address 3115 * @queue: If non-negative, queue assignment feature is enabled and frames 3116 * matching the filter are enqueued onto 'queue'. Otherwise, queue 3117 * assignment is disabled. 3118 * 3119 * Return: 0 in case of success, negative errno code otherwise. 3120 */ 3121 static int igc_add_mac_filter(struct igc_adapter *adapter, 3122 enum igc_mac_filter_type type, const u8 *addr, 3123 int queue) 3124 { 3125 struct net_device *dev = adapter->netdev; 3126 int index; 3127 3128 index = igc_find_mac_filter(adapter, type, addr); 3129 if (index >= 0) 3130 goto update_filter; 3131 3132 index = igc_get_avail_mac_filter_slot(adapter); 3133 if (index < 0) 3134 return -ENOSPC; 3135 3136 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", 3137 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 3138 addr, queue); 3139 3140 update_filter: 3141 igc_set_mac_filter_hw(adapter, index, type, addr, queue); 3142 return 0; 3143 } 3144 3145 /** 3146 * igc_del_mac_filter() - Delete MAC address filter 3147 * @adapter: Pointer to adapter where the filter should be deleted from 3148 * @type: MAC address filter type (source or destination) 3149 * @addr: MAC address 3150 */ 3151 static void igc_del_mac_filter(struct igc_adapter *adapter, 3152 enum igc_mac_filter_type type, const u8 *addr) 3153 { 3154 struct net_device *dev = adapter->netdev; 3155 int index; 3156 3157 index = igc_find_mac_filter(adapter, type, addr); 3158 if (index < 0) 3159 return; 3160 3161 if (index == 0) { 3162 /* If this is the default filter, we don't actually delete it. 3163 * We just reset to its default value i.e. disable queue 3164 * assignment. 3165 */ 3166 netdev_dbg(dev, "Disable default MAC filter queue assignment"); 3167 3168 igc_set_mac_filter_hw(adapter, 0, type, addr, -1); 3169 } else { 3170 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", 3171 index, 3172 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 3173 addr); 3174 3175 igc_clear_mac_filter_hw(adapter, index); 3176 } 3177 } 3178 3179 /** 3180 * igc_add_vlan_prio_filter() - Add VLAN priority filter 3181 * @adapter: Pointer to adapter where the filter should be added 3182 * @prio: VLAN priority value 3183 * @queue: Queue number which matching frames are assigned to 3184 * 3185 * Return: 0 in case of success, negative errno code otherwise. 3186 */ 3187 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, 3188 int queue) 3189 { 3190 struct net_device *dev = adapter->netdev; 3191 struct igc_hw *hw = &adapter->hw; 3192 u32 vlanpqf; 3193 3194 vlanpqf = rd32(IGC_VLANPQF); 3195 3196 if (vlanpqf & IGC_VLANPQF_VALID(prio)) { 3197 netdev_dbg(dev, "VLAN priority filter already in use\n"); 3198 return -EEXIST; 3199 } 3200 3201 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); 3202 vlanpqf |= IGC_VLANPQF_VALID(prio); 3203 3204 wr32(IGC_VLANPQF, vlanpqf); 3205 3206 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", 3207 prio, queue); 3208 return 0; 3209 } 3210 3211 /** 3212 * igc_del_vlan_prio_filter() - Delete VLAN priority filter 3213 * @adapter: Pointer to adapter where the filter should be deleted from 3214 * @prio: VLAN priority value 3215 */ 3216 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) 3217 { 3218 struct igc_hw *hw = &adapter->hw; 3219 u32 vlanpqf; 3220 3221 vlanpqf = rd32(IGC_VLANPQF); 3222 3223 vlanpqf &= ~IGC_VLANPQF_VALID(prio); 3224 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); 3225 3226 wr32(IGC_VLANPQF, vlanpqf); 3227 3228 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", 3229 prio); 3230 } 3231 3232 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) 3233 { 3234 struct igc_hw *hw = &adapter->hw; 3235 int i; 3236 3237 for (i = 0; i < MAX_ETYPE_FILTER; i++) { 3238 u32 etqf = rd32(IGC_ETQF(i)); 3239 3240 if (!(etqf & IGC_ETQF_FILTER_ENABLE)) 3241 return i; 3242 } 3243 3244 return -1; 3245 } 3246 3247 /** 3248 * igc_add_etype_filter() - Add ethertype filter 3249 * @adapter: Pointer to adapter where the filter should be added 3250 * @etype: Ethertype value 3251 * @queue: If non-negative, queue assignment feature is enabled and frames 3252 * matching the filter are enqueued onto 'queue'. Otherwise, queue 3253 * assignment is disabled. 3254 * 3255 * Return: 0 in case of success, negative errno code otherwise. 3256 */ 3257 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, 3258 int queue) 3259 { 3260 struct igc_hw *hw = &adapter->hw; 3261 int index; 3262 u32 etqf; 3263 3264 index = igc_get_avail_etype_filter_slot(adapter); 3265 if (index < 0) 3266 return -ENOSPC; 3267 3268 etqf = rd32(IGC_ETQF(index)); 3269 3270 etqf &= ~IGC_ETQF_ETYPE_MASK; 3271 etqf |= etype; 3272 3273 if (queue >= 0) { 3274 etqf &= ~IGC_ETQF_QUEUE_MASK; 3275 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); 3276 etqf |= IGC_ETQF_QUEUE_ENABLE; 3277 } 3278 3279 etqf |= IGC_ETQF_FILTER_ENABLE; 3280 3281 wr32(IGC_ETQF(index), etqf); 3282 3283 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", 3284 etype, queue); 3285 return 0; 3286 } 3287 3288 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) 3289 { 3290 struct igc_hw *hw = &adapter->hw; 3291 int i; 3292 3293 for (i = 0; i < MAX_ETYPE_FILTER; i++) { 3294 u32 etqf = rd32(IGC_ETQF(i)); 3295 3296 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) 3297 return i; 3298 } 3299 3300 return -1; 3301 } 3302 3303 /** 3304 * igc_del_etype_filter() - Delete ethertype filter 3305 * @adapter: Pointer to adapter where the filter should be deleted from 3306 * @etype: Ethertype value 3307 */ 3308 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) 3309 { 3310 struct igc_hw *hw = &adapter->hw; 3311 int index; 3312 3313 index = igc_find_etype_filter(adapter, etype); 3314 if (index < 0) 3315 return; 3316 3317 wr32(IGC_ETQF(index), 0); 3318 3319 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", 3320 etype); 3321 } 3322 3323 static int igc_flex_filter_select(struct igc_adapter *adapter, 3324 struct igc_flex_filter *input, 3325 u32 *fhft) 3326 { 3327 struct igc_hw *hw = &adapter->hw; 3328 u8 fhft_index; 3329 u32 fhftsl; 3330 3331 if (input->index >= MAX_FLEX_FILTER) { 3332 dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); 3333 return -EINVAL; 3334 } 3335 3336 /* Indirect table select register */ 3337 fhftsl = rd32(IGC_FHFTSL); 3338 fhftsl &= ~IGC_FHFTSL_FTSL_MASK; 3339 switch (input->index) { 3340 case 0 ... 7: 3341 fhftsl |= 0x00; 3342 break; 3343 case 8 ... 15: 3344 fhftsl |= 0x01; 3345 break; 3346 case 16 ... 23: 3347 fhftsl |= 0x02; 3348 break; 3349 case 24 ... 31: 3350 fhftsl |= 0x03; 3351 break; 3352 } 3353 wr32(IGC_FHFTSL, fhftsl); 3354 3355 /* Normalize index down to host table register */ 3356 fhft_index = input->index % 8; 3357 3358 *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : 3359 IGC_FHFT_EXT(fhft_index - 4); 3360 3361 return 0; 3362 } 3363 3364 static int igc_write_flex_filter_ll(struct igc_adapter *adapter, 3365 struct igc_flex_filter *input) 3366 { 3367 struct device *dev = &adapter->pdev->dev; 3368 struct igc_hw *hw = &adapter->hw; 3369 u8 *data = input->data; 3370 u8 *mask = input->mask; 3371 u32 queuing; 3372 u32 fhft; 3373 u32 wufc; 3374 int ret; 3375 int i; 3376 3377 /* Length has to be aligned to 8. Otherwise the filter will fail. Bail 3378 * out early to avoid surprises later. 3379 */ 3380 if (input->length % 8 != 0) { 3381 dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); 3382 return -EINVAL; 3383 } 3384 3385 /* Select corresponding flex filter register and get base for host table. */ 3386 ret = igc_flex_filter_select(adapter, input, &fhft); 3387 if (ret) 3388 return ret; 3389 3390 /* When adding a filter globally disable flex filter feature. That is 3391 * recommended within the datasheet. 3392 */ 3393 wufc = rd32(IGC_WUFC); 3394 wufc &= ~IGC_WUFC_FLEX_HQ; 3395 wr32(IGC_WUFC, wufc); 3396 3397 /* Configure filter */ 3398 queuing = input->length & IGC_FHFT_LENGTH_MASK; 3399 queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; 3400 queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; 3401 3402 if (input->immediate_irq) 3403 queuing |= IGC_FHFT_IMM_INT; 3404 3405 if (input->drop) 3406 queuing |= IGC_FHFT_DROP; 3407 3408 wr32(fhft + 0xFC, queuing); 3409 3410 /* Write data (128 byte) and mask (128 bit) */ 3411 for (i = 0; i < 16; ++i) { 3412 const size_t data_idx = i * 8; 3413 const size_t row_idx = i * 16; 3414 u32 dw0 = 3415 (data[data_idx + 0] << 0) | 3416 (data[data_idx + 1] << 8) | 3417 (data[data_idx + 2] << 16) | 3418 (data[data_idx + 3] << 24); 3419 u32 dw1 = 3420 (data[data_idx + 4] << 0) | 3421 (data[data_idx + 5] << 8) | 3422 (data[data_idx + 6] << 16) | 3423 (data[data_idx + 7] << 24); 3424 u32 tmp; 3425 3426 /* Write row: dw0, dw1 and mask */ 3427 wr32(fhft + row_idx, dw0); 3428 wr32(fhft + row_idx + 4, dw1); 3429 3430 /* mask is only valid for MASK(7, 0) */ 3431 tmp = rd32(fhft + row_idx + 8); 3432 tmp &= ~GENMASK(7, 0); 3433 tmp |= mask[i]; 3434 wr32(fhft + row_idx + 8, tmp); 3435 } 3436 3437 /* Enable filter. */ 3438 wufc |= IGC_WUFC_FLEX_HQ; 3439 if (input->index > 8) { 3440 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ 3441 u32 wufc_ext = rd32(IGC_WUFC_EXT); 3442 3443 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); 3444 3445 wr32(IGC_WUFC_EXT, wufc_ext); 3446 } else { 3447 wufc |= (IGC_WUFC_FLX0 << input->index); 3448 } 3449 wr32(IGC_WUFC, wufc); 3450 3451 dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", 3452 input->index); 3453 3454 return 0; 3455 } 3456 3457 static void igc_flex_filter_add_field(struct igc_flex_filter *flex, 3458 const void *src, unsigned int offset, 3459 size_t len, const void *mask) 3460 { 3461 int i; 3462 3463 /* data */ 3464 memcpy(&flex->data[offset], src, len); 3465 3466 /* mask */ 3467 for (i = 0; i < len; ++i) { 3468 const unsigned int idx = i + offset; 3469 const u8 *ptr = mask; 3470 3471 if (mask) { 3472 if (ptr[i] & 0xff) 3473 flex->mask[idx / 8] |= BIT(idx % 8); 3474 3475 continue; 3476 } 3477 3478 flex->mask[idx / 8] |= BIT(idx % 8); 3479 } 3480 } 3481 3482 static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) 3483 { 3484 struct igc_hw *hw = &adapter->hw; 3485 u32 wufc, wufc_ext; 3486 int i; 3487 3488 wufc = rd32(IGC_WUFC); 3489 wufc_ext = rd32(IGC_WUFC_EXT); 3490 3491 for (i = 0; i < MAX_FLEX_FILTER; i++) { 3492 if (i < 8) { 3493 if (!(wufc & (IGC_WUFC_FLX0 << i))) 3494 return i; 3495 } else { 3496 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) 3497 return i; 3498 } 3499 } 3500 3501 return -ENOSPC; 3502 } 3503 3504 static bool igc_flex_filter_in_use(struct igc_adapter *adapter) 3505 { 3506 struct igc_hw *hw = &adapter->hw; 3507 u32 wufc, wufc_ext; 3508 3509 wufc = rd32(IGC_WUFC); 3510 wufc_ext = rd32(IGC_WUFC_EXT); 3511 3512 if (wufc & IGC_WUFC_FILTER_MASK) 3513 return true; 3514 3515 if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) 3516 return true; 3517 3518 return false; 3519 } 3520 3521 static int igc_add_flex_filter(struct igc_adapter *adapter, 3522 struct igc_nfc_rule *rule) 3523 { 3524 struct igc_flex_filter flex = { }; 3525 struct igc_nfc_filter *filter = &rule->filter; 3526 unsigned int eth_offset, user_offset; 3527 int ret, index; 3528 bool vlan; 3529 3530 index = igc_find_avail_flex_filter_slot(adapter); 3531 if (index < 0) 3532 return -ENOSPC; 3533 3534 /* Construct the flex filter: 3535 * -> dest_mac [6] 3536 * -> src_mac [6] 3537 * -> tpid [2] 3538 * -> vlan tci [2] 3539 * -> ether type [2] 3540 * -> user data [8] 3541 * -> = 26 bytes => 32 length 3542 */ 3543 flex.index = index; 3544 flex.length = 32; 3545 flex.rx_queue = rule->action; 3546 3547 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; 3548 eth_offset = vlan ? 16 : 12; 3549 user_offset = vlan ? 18 : 14; 3550 3551 /* Add destination MAC */ 3552 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) 3553 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, 3554 ETH_ALEN, NULL); 3555 3556 /* Add source MAC */ 3557 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) 3558 igc_flex_filter_add_field(&flex, &filter->src_addr, 6, 3559 ETH_ALEN, NULL); 3560 3561 /* Add VLAN etype */ 3562 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) 3563 igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, 3564 sizeof(filter->vlan_etype), 3565 NULL); 3566 3567 /* Add VLAN TCI */ 3568 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) 3569 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, 3570 sizeof(filter->vlan_tci), NULL); 3571 3572 /* Add Ether type */ 3573 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 3574 __be16 etype = cpu_to_be16(filter->etype); 3575 3576 igc_flex_filter_add_field(&flex, &etype, eth_offset, 3577 sizeof(etype), NULL); 3578 } 3579 3580 /* Add user data */ 3581 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) 3582 igc_flex_filter_add_field(&flex, &filter->user_data, 3583 user_offset, 3584 sizeof(filter->user_data), 3585 filter->user_mask); 3586 3587 /* Add it down to the hardware and enable it. */ 3588 ret = igc_write_flex_filter_ll(adapter, &flex); 3589 if (ret) 3590 return ret; 3591 3592 filter->flex_index = index; 3593 3594 return 0; 3595 } 3596 3597 static void igc_del_flex_filter(struct igc_adapter *adapter, 3598 u16 reg_index) 3599 { 3600 struct igc_hw *hw = &adapter->hw; 3601 u32 wufc; 3602 3603 /* Just disable the filter. The filter table itself is kept 3604 * intact. Another flex_filter_add() should override the "old" data 3605 * then. 3606 */ 3607 if (reg_index > 8) { 3608 u32 wufc_ext = rd32(IGC_WUFC_EXT); 3609 3610 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); 3611 wr32(IGC_WUFC_EXT, wufc_ext); 3612 } else { 3613 wufc = rd32(IGC_WUFC); 3614 3615 wufc &= ~(IGC_WUFC_FLX0 << reg_index); 3616 wr32(IGC_WUFC, wufc); 3617 } 3618 3619 if (igc_flex_filter_in_use(adapter)) 3620 return; 3621 3622 /* No filters are in use, we may disable flex filters */ 3623 wufc = rd32(IGC_WUFC); 3624 wufc &= ~IGC_WUFC_FLEX_HQ; 3625 wr32(IGC_WUFC, wufc); 3626 } 3627 3628 static int igc_enable_nfc_rule(struct igc_adapter *adapter, 3629 struct igc_nfc_rule *rule) 3630 { 3631 int err; 3632 3633 if (rule->flex) { 3634 return igc_add_flex_filter(adapter, rule); 3635 } 3636 3637 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 3638 err = igc_add_etype_filter(adapter, rule->filter.etype, 3639 rule->action); 3640 if (err) 3641 return err; 3642 } 3643 3644 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { 3645 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 3646 rule->filter.src_addr, rule->action); 3647 if (err) 3648 return err; 3649 } 3650 3651 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { 3652 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 3653 rule->filter.dst_addr, rule->action); 3654 if (err) 3655 return err; 3656 } 3657 3658 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 3659 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> 3660 VLAN_PRIO_SHIFT; 3661 3662 err = igc_add_vlan_prio_filter(adapter, prio, rule->action); 3663 if (err) 3664 return err; 3665 } 3666 3667 return 0; 3668 } 3669 3670 static void igc_disable_nfc_rule(struct igc_adapter *adapter, 3671 const struct igc_nfc_rule *rule) 3672 { 3673 if (rule->flex) { 3674 igc_del_flex_filter(adapter, rule->filter.flex_index); 3675 return; 3676 } 3677 3678 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) 3679 igc_del_etype_filter(adapter, rule->filter.etype); 3680 3681 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 3682 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> 3683 VLAN_PRIO_SHIFT; 3684 3685 igc_del_vlan_prio_filter(adapter, prio); 3686 } 3687 3688 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) 3689 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 3690 rule->filter.src_addr); 3691 3692 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) 3693 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 3694 rule->filter.dst_addr); 3695 } 3696 3697 /** 3698 * igc_get_nfc_rule() - Get NFC rule 3699 * @adapter: Pointer to adapter 3700 * @location: Rule location 3701 * 3702 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3703 * 3704 * Return: Pointer to NFC rule at @location. If not found, NULL. 3705 */ 3706 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, 3707 u32 location) 3708 { 3709 struct igc_nfc_rule *rule; 3710 3711 list_for_each_entry(rule, &adapter->nfc_rule_list, list) { 3712 if (rule->location == location) 3713 return rule; 3714 if (rule->location > location) 3715 break; 3716 } 3717 3718 return NULL; 3719 } 3720 3721 /** 3722 * igc_del_nfc_rule() - Delete NFC rule 3723 * @adapter: Pointer to adapter 3724 * @rule: Pointer to rule to be deleted 3725 * 3726 * Disable NFC rule in hardware and delete it from adapter. 3727 * 3728 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3729 */ 3730 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 3731 { 3732 igc_disable_nfc_rule(adapter, rule); 3733 3734 list_del(&rule->list); 3735 adapter->nfc_rule_count--; 3736 3737 kfree(rule); 3738 } 3739 3740 static void igc_flush_nfc_rules(struct igc_adapter *adapter) 3741 { 3742 struct igc_nfc_rule *rule, *tmp; 3743 3744 mutex_lock(&adapter->nfc_rule_lock); 3745 3746 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) 3747 igc_del_nfc_rule(adapter, rule); 3748 3749 mutex_unlock(&adapter->nfc_rule_lock); 3750 } 3751 3752 /** 3753 * igc_add_nfc_rule() - Add NFC rule 3754 * @adapter: Pointer to adapter 3755 * @rule: Pointer to rule to be added 3756 * 3757 * Enable NFC rule in hardware and add it to adapter. 3758 * 3759 * Context: Expects adapter->nfc_rule_lock to be held by caller. 3760 * 3761 * Return: 0 on success, negative errno on failure. 3762 */ 3763 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 3764 { 3765 struct igc_nfc_rule *pred, *cur; 3766 int err; 3767 3768 err = igc_enable_nfc_rule(adapter, rule); 3769 if (err) 3770 return err; 3771 3772 pred = NULL; 3773 list_for_each_entry(cur, &adapter->nfc_rule_list, list) { 3774 if (cur->location >= rule->location) 3775 break; 3776 pred = cur; 3777 } 3778 3779 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); 3780 adapter->nfc_rule_count++; 3781 return 0; 3782 } 3783 3784 static void igc_restore_nfc_rules(struct igc_adapter *adapter) 3785 { 3786 struct igc_nfc_rule *rule; 3787 3788 mutex_lock(&adapter->nfc_rule_lock); 3789 3790 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) 3791 igc_enable_nfc_rule(adapter, rule); 3792 3793 mutex_unlock(&adapter->nfc_rule_lock); 3794 } 3795 3796 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) 3797 { 3798 struct igc_adapter *adapter = netdev_priv(netdev); 3799 3800 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); 3801 } 3802 3803 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) 3804 { 3805 struct igc_adapter *adapter = netdev_priv(netdev); 3806 3807 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); 3808 return 0; 3809 } 3810 3811 /** 3812 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 3813 * @netdev: network interface device structure 3814 * 3815 * The set_rx_mode entry point is called whenever the unicast or multicast 3816 * address lists or the network interface flags are updated. This routine is 3817 * responsible for configuring the hardware for proper unicast, multicast, 3818 * promiscuous mode, and all-multi behavior. 3819 */ 3820 static void igc_set_rx_mode(struct net_device *netdev) 3821 { 3822 struct igc_adapter *adapter = netdev_priv(netdev); 3823 struct igc_hw *hw = &adapter->hw; 3824 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; 3825 int count; 3826 3827 /* Check for Promiscuous and All Multicast modes */ 3828 if (netdev->flags & IFF_PROMISC) { 3829 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; 3830 } else { 3831 if (netdev->flags & IFF_ALLMULTI) { 3832 rctl |= IGC_RCTL_MPE; 3833 } else { 3834 /* Write addresses to the MTA, if the attempt fails 3835 * then we should just turn on promiscuous mode so 3836 * that we can at least receive multicast traffic 3837 */ 3838 count = igc_write_mc_addr_list(netdev); 3839 if (count < 0) 3840 rctl |= IGC_RCTL_MPE; 3841 } 3842 } 3843 3844 /* Write addresses to available RAR registers, if there is not 3845 * sufficient space to store all the addresses then enable 3846 * unicast promiscuous mode 3847 */ 3848 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) 3849 rctl |= IGC_RCTL_UPE; 3850 3851 /* update state of unicast and multicast */ 3852 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 3853 wr32(IGC_RCTL, rctl); 3854 3855 #if (PAGE_SIZE < 8192) 3856 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) 3857 rlpml = IGC_MAX_FRAME_BUILD_SKB; 3858 #endif 3859 wr32(IGC_RLPML, rlpml); 3860 } 3861 3862 /** 3863 * igc_configure - configure the hardware for RX and TX 3864 * @adapter: private board structure 3865 */ 3866 static void igc_configure(struct igc_adapter *adapter) 3867 { 3868 struct net_device *netdev = adapter->netdev; 3869 int i = 0; 3870 3871 igc_get_hw_control(adapter); 3872 igc_set_rx_mode(netdev); 3873 3874 igc_restore_vlan(adapter); 3875 3876 igc_setup_tctl(adapter); 3877 igc_setup_mrqc(adapter); 3878 igc_setup_rctl(adapter); 3879 3880 igc_set_default_mac_filter(adapter); 3881 igc_restore_nfc_rules(adapter); 3882 3883 igc_configure_tx(adapter); 3884 igc_configure_rx(adapter); 3885 3886 igc_rx_fifo_flush_base(&adapter->hw); 3887 3888 /* call igc_desc_unused which always leaves 3889 * at least 1 descriptor unused to make sure 3890 * next_to_use != next_to_clean 3891 */ 3892 for (i = 0; i < adapter->num_rx_queues; i++) { 3893 struct igc_ring *ring = adapter->rx_ring[i]; 3894 3895 if (ring->xsk_pool) 3896 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 3897 else 3898 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 3899 } 3900 } 3901 3902 /** 3903 * igc_write_ivar - configure ivar for given MSI-X vector 3904 * @hw: pointer to the HW structure 3905 * @msix_vector: vector number we are allocating to a given ring 3906 * @index: row index of IVAR register to write within IVAR table 3907 * @offset: column offset of in IVAR, should be multiple of 8 3908 * 3909 * The IVAR table consists of 2 columns, 3910 * each containing an cause allocation for an Rx and Tx ring, and a 3911 * variable number of rows depending on the number of queues supported. 3912 */ 3913 static void igc_write_ivar(struct igc_hw *hw, int msix_vector, 3914 int index, int offset) 3915 { 3916 u32 ivar = array_rd32(IGC_IVAR0, index); 3917 3918 /* clear any bits that are currently set */ 3919 ivar &= ~((u32)0xFF << offset); 3920 3921 /* write vector and valid bit */ 3922 ivar |= (msix_vector | IGC_IVAR_VALID) << offset; 3923 3924 array_wr32(IGC_IVAR0, index, ivar); 3925 } 3926 3927 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) 3928 { 3929 struct igc_adapter *adapter = q_vector->adapter; 3930 struct igc_hw *hw = &adapter->hw; 3931 int rx_queue = IGC_N0_QUEUE; 3932 int tx_queue = IGC_N0_QUEUE; 3933 3934 if (q_vector->rx.ring) 3935 rx_queue = q_vector->rx.ring->reg_idx; 3936 if (q_vector->tx.ring) 3937 tx_queue = q_vector->tx.ring->reg_idx; 3938 3939 switch (hw->mac.type) { 3940 case igc_i225: 3941 if (rx_queue > IGC_N0_QUEUE) 3942 igc_write_ivar(hw, msix_vector, 3943 rx_queue >> 1, 3944 (rx_queue & 0x1) << 4); 3945 if (tx_queue > IGC_N0_QUEUE) 3946 igc_write_ivar(hw, msix_vector, 3947 tx_queue >> 1, 3948 ((tx_queue & 0x1) << 4) + 8); 3949 q_vector->eims_value = BIT(msix_vector); 3950 break; 3951 default: 3952 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); 3953 break; 3954 } 3955 3956 /* add q_vector eims value to global eims_enable_mask */ 3957 adapter->eims_enable_mask |= q_vector->eims_value; 3958 3959 /* configure q_vector to set itr on first interrupt */ 3960 q_vector->set_itr = 1; 3961 } 3962 3963 /** 3964 * igc_configure_msix - Configure MSI-X hardware 3965 * @adapter: Pointer to adapter structure 3966 * 3967 * igc_configure_msix sets up the hardware to properly 3968 * generate MSI-X interrupts. 3969 */ 3970 static void igc_configure_msix(struct igc_adapter *adapter) 3971 { 3972 struct igc_hw *hw = &adapter->hw; 3973 int i, vector = 0; 3974 u32 tmp; 3975 3976 adapter->eims_enable_mask = 0; 3977 3978 /* set vector for other causes, i.e. link changes */ 3979 switch (hw->mac.type) { 3980 case igc_i225: 3981 /* Turn on MSI-X capability first, or our settings 3982 * won't stick. And it will take days to debug. 3983 */ 3984 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | 3985 IGC_GPIE_PBA | IGC_GPIE_EIAME | 3986 IGC_GPIE_NSICR); 3987 3988 /* enable msix_other interrupt */ 3989 adapter->eims_other = BIT(vector); 3990 tmp = (vector++ | IGC_IVAR_VALID) << 8; 3991 3992 wr32(IGC_IVAR_MISC, tmp); 3993 break; 3994 default: 3995 /* do nothing, since nothing else supports MSI-X */ 3996 break; 3997 } /* switch (hw->mac.type) */ 3998 3999 adapter->eims_enable_mask |= adapter->eims_other; 4000 4001 for (i = 0; i < adapter->num_q_vectors; i++) 4002 igc_assign_vector(adapter->q_vector[i], vector++); 4003 4004 wrfl(); 4005 } 4006 4007 /** 4008 * igc_irq_enable - Enable default interrupt generation settings 4009 * @adapter: board private structure 4010 */ 4011 static void igc_irq_enable(struct igc_adapter *adapter) 4012 { 4013 struct igc_hw *hw = &adapter->hw; 4014 4015 if (adapter->msix_entries) { 4016 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; 4017 u32 regval = rd32(IGC_EIAC); 4018 4019 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); 4020 regval = rd32(IGC_EIAM); 4021 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); 4022 wr32(IGC_EIMS, adapter->eims_enable_mask); 4023 wr32(IGC_IMS, ims); 4024 } else { 4025 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 4026 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 4027 } 4028 } 4029 4030 /** 4031 * igc_irq_disable - Mask off interrupt generation on the NIC 4032 * @adapter: board private structure 4033 */ 4034 static void igc_irq_disable(struct igc_adapter *adapter) 4035 { 4036 struct igc_hw *hw = &adapter->hw; 4037 4038 if (adapter->msix_entries) { 4039 u32 regval = rd32(IGC_EIAM); 4040 4041 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); 4042 wr32(IGC_EIMC, adapter->eims_enable_mask); 4043 regval = rd32(IGC_EIAC); 4044 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); 4045 } 4046 4047 wr32(IGC_IAM, 0); 4048 wr32(IGC_IMC, ~0); 4049 wrfl(); 4050 4051 if (adapter->msix_entries) { 4052 int vector = 0, i; 4053 4054 synchronize_irq(adapter->msix_entries[vector++].vector); 4055 4056 for (i = 0; i < adapter->num_q_vectors; i++) 4057 synchronize_irq(adapter->msix_entries[vector++].vector); 4058 } else { 4059 synchronize_irq(adapter->pdev->irq); 4060 } 4061 } 4062 4063 void igc_set_flag_queue_pairs(struct igc_adapter *adapter, 4064 const u32 max_rss_queues) 4065 { 4066 /* Determine if we need to pair queues. */ 4067 /* If rss_queues > half of max_rss_queues, pair the queues in 4068 * order to conserve interrupts due to limited supply. 4069 */ 4070 if (adapter->rss_queues > (max_rss_queues / 2)) 4071 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 4072 else 4073 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; 4074 } 4075 4076 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) 4077 { 4078 return IGC_MAX_RX_QUEUES; 4079 } 4080 4081 static void igc_init_queue_configuration(struct igc_adapter *adapter) 4082 { 4083 u32 max_rss_queues; 4084 4085 max_rss_queues = igc_get_max_rss_queues(adapter); 4086 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); 4087 4088 igc_set_flag_queue_pairs(adapter, max_rss_queues); 4089 } 4090 4091 /** 4092 * igc_reset_q_vector - Reset config for interrupt vector 4093 * @adapter: board private structure to initialize 4094 * @v_idx: Index of vector to be reset 4095 * 4096 * If NAPI is enabled it will delete any references to the 4097 * NAPI struct. This is preparation for igc_free_q_vector. 4098 */ 4099 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) 4100 { 4101 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 4102 4103 /* if we're coming from igc_set_interrupt_capability, the vectors are 4104 * not yet allocated 4105 */ 4106 if (!q_vector) 4107 return; 4108 4109 if (q_vector->tx.ring) 4110 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 4111 4112 if (q_vector->rx.ring) 4113 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; 4114 4115 netif_napi_del(&q_vector->napi); 4116 } 4117 4118 /** 4119 * igc_free_q_vector - Free memory allocated for specific interrupt vector 4120 * @adapter: board private structure to initialize 4121 * @v_idx: Index of vector to be freed 4122 * 4123 * This function frees the memory allocated to the q_vector. 4124 */ 4125 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) 4126 { 4127 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 4128 4129 adapter->q_vector[v_idx] = NULL; 4130 4131 /* igc_get_stats64() might access the rings on this vector, 4132 * we must wait a grace period before freeing it. 4133 */ 4134 if (q_vector) 4135 kfree_rcu(q_vector, rcu); 4136 } 4137 4138 /** 4139 * igc_free_q_vectors - Free memory allocated for interrupt vectors 4140 * @adapter: board private structure to initialize 4141 * 4142 * This function frees the memory allocated to the q_vectors. In addition if 4143 * NAPI is enabled it will delete any references to the NAPI struct prior 4144 * to freeing the q_vector. 4145 */ 4146 static void igc_free_q_vectors(struct igc_adapter *adapter) 4147 { 4148 int v_idx = adapter->num_q_vectors; 4149 4150 adapter->num_tx_queues = 0; 4151 adapter->num_rx_queues = 0; 4152 adapter->num_q_vectors = 0; 4153 4154 while (v_idx--) { 4155 igc_reset_q_vector(adapter, v_idx); 4156 igc_free_q_vector(adapter, v_idx); 4157 } 4158 } 4159 4160 /** 4161 * igc_update_itr - update the dynamic ITR value based on statistics 4162 * @q_vector: pointer to q_vector 4163 * @ring_container: ring info to update the itr for 4164 * 4165 * Stores a new ITR value based on packets and byte 4166 * counts during the last interrupt. The advantage of per interrupt 4167 * computation is faster updates and more accurate ITR for the current 4168 * traffic pattern. Constants in this function were computed 4169 * based on theoretical maximum wire speed and thresholds were set based 4170 * on testing data as well as attempting to minimize response time 4171 * while increasing bulk throughput. 4172 * NOTE: These calculations are only valid when operating in a single- 4173 * queue environment. 4174 */ 4175 static void igc_update_itr(struct igc_q_vector *q_vector, 4176 struct igc_ring_container *ring_container) 4177 { 4178 unsigned int packets = ring_container->total_packets; 4179 unsigned int bytes = ring_container->total_bytes; 4180 u8 itrval = ring_container->itr; 4181 4182 /* no packets, exit with status unchanged */ 4183 if (packets == 0) 4184 return; 4185 4186 switch (itrval) { 4187 case lowest_latency: 4188 /* handle TSO and jumbo frames */ 4189 if (bytes / packets > 8000) 4190 itrval = bulk_latency; 4191 else if ((packets < 5) && (bytes > 512)) 4192 itrval = low_latency; 4193 break; 4194 case low_latency: /* 50 usec aka 20000 ints/s */ 4195 if (bytes > 10000) { 4196 /* this if handles the TSO accounting */ 4197 if (bytes / packets > 8000) 4198 itrval = bulk_latency; 4199 else if ((packets < 10) || ((bytes / packets) > 1200)) 4200 itrval = bulk_latency; 4201 else if ((packets > 35)) 4202 itrval = lowest_latency; 4203 } else if (bytes / packets > 2000) { 4204 itrval = bulk_latency; 4205 } else if (packets <= 2 && bytes < 512) { 4206 itrval = lowest_latency; 4207 } 4208 break; 4209 case bulk_latency: /* 250 usec aka 4000 ints/s */ 4210 if (bytes > 25000) { 4211 if (packets > 35) 4212 itrval = low_latency; 4213 } else if (bytes < 1500) { 4214 itrval = low_latency; 4215 } 4216 break; 4217 } 4218 4219 /* clear work counters since we have the values we need */ 4220 ring_container->total_bytes = 0; 4221 ring_container->total_packets = 0; 4222 4223 /* write updated itr to ring container */ 4224 ring_container->itr = itrval; 4225 } 4226 4227 static void igc_set_itr(struct igc_q_vector *q_vector) 4228 { 4229 struct igc_adapter *adapter = q_vector->adapter; 4230 u32 new_itr = q_vector->itr_val; 4231 u8 current_itr = 0; 4232 4233 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 4234 switch (adapter->link_speed) { 4235 case SPEED_10: 4236 case SPEED_100: 4237 current_itr = 0; 4238 new_itr = IGC_4K_ITR; 4239 goto set_itr_now; 4240 default: 4241 break; 4242 } 4243 4244 igc_update_itr(q_vector, &q_vector->tx); 4245 igc_update_itr(q_vector, &q_vector->rx); 4246 4247 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 4248 4249 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 4250 if (current_itr == lowest_latency && 4251 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 4252 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 4253 current_itr = low_latency; 4254 4255 switch (current_itr) { 4256 /* counts and packets in update_itr are dependent on these numbers */ 4257 case lowest_latency: 4258 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ 4259 break; 4260 case low_latency: 4261 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ 4262 break; 4263 case bulk_latency: 4264 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ 4265 break; 4266 default: 4267 break; 4268 } 4269 4270 set_itr_now: 4271 if (new_itr != q_vector->itr_val) { 4272 /* this attempts to bias the interrupt rate towards Bulk 4273 * by adding intermediate steps when interrupt rate is 4274 * increasing 4275 */ 4276 new_itr = new_itr > q_vector->itr_val ? 4277 max((new_itr * q_vector->itr_val) / 4278 (new_itr + (q_vector->itr_val >> 2)), 4279 new_itr) : new_itr; 4280 /* Don't write the value here; it resets the adapter's 4281 * internal timer, and causes us to delay far longer than 4282 * we should between interrupts. Instead, we write the ITR 4283 * value at the beginning of the next interrupt so the timing 4284 * ends up being correct. 4285 */ 4286 q_vector->itr_val = new_itr; 4287 q_vector->set_itr = 1; 4288 } 4289 } 4290 4291 static void igc_reset_interrupt_capability(struct igc_adapter *adapter) 4292 { 4293 int v_idx = adapter->num_q_vectors; 4294 4295 if (adapter->msix_entries) { 4296 pci_disable_msix(adapter->pdev); 4297 kfree(adapter->msix_entries); 4298 adapter->msix_entries = NULL; 4299 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { 4300 pci_disable_msi(adapter->pdev); 4301 } 4302 4303 while (v_idx--) 4304 igc_reset_q_vector(adapter, v_idx); 4305 } 4306 4307 /** 4308 * igc_set_interrupt_capability - set MSI or MSI-X if supported 4309 * @adapter: Pointer to adapter structure 4310 * @msix: boolean value for MSI-X capability 4311 * 4312 * Attempt to configure interrupts using the best available 4313 * capabilities of the hardware and kernel. 4314 */ 4315 static void igc_set_interrupt_capability(struct igc_adapter *adapter, 4316 bool msix) 4317 { 4318 int numvecs, i; 4319 int err; 4320 4321 if (!msix) 4322 goto msi_only; 4323 adapter->flags |= IGC_FLAG_HAS_MSIX; 4324 4325 /* Number of supported queues. */ 4326 adapter->num_rx_queues = adapter->rss_queues; 4327 4328 adapter->num_tx_queues = adapter->rss_queues; 4329 4330 /* start with one vector for every Rx queue */ 4331 numvecs = adapter->num_rx_queues; 4332 4333 /* if Tx handler is separate add 1 for every Tx queue */ 4334 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) 4335 numvecs += adapter->num_tx_queues; 4336 4337 /* store the number of vectors reserved for queues */ 4338 adapter->num_q_vectors = numvecs; 4339 4340 /* add 1 vector for link status interrupts */ 4341 numvecs++; 4342 4343 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 4344 GFP_KERNEL); 4345 4346 if (!adapter->msix_entries) 4347 return; 4348 4349 /* populate entry values */ 4350 for (i = 0; i < numvecs; i++) 4351 adapter->msix_entries[i].entry = i; 4352 4353 err = pci_enable_msix_range(adapter->pdev, 4354 adapter->msix_entries, 4355 numvecs, 4356 numvecs); 4357 if (err > 0) 4358 return; 4359 4360 kfree(adapter->msix_entries); 4361 adapter->msix_entries = NULL; 4362 4363 igc_reset_interrupt_capability(adapter); 4364 4365 msi_only: 4366 adapter->flags &= ~IGC_FLAG_HAS_MSIX; 4367 4368 adapter->rss_queues = 1; 4369 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 4370 adapter->num_rx_queues = 1; 4371 adapter->num_tx_queues = 1; 4372 adapter->num_q_vectors = 1; 4373 if (!pci_enable_msi(adapter->pdev)) 4374 adapter->flags |= IGC_FLAG_HAS_MSI; 4375 } 4376 4377 /** 4378 * igc_update_ring_itr - update the dynamic ITR value based on packet size 4379 * @q_vector: pointer to q_vector 4380 * 4381 * Stores a new ITR value based on strictly on packet size. This 4382 * algorithm is less sophisticated than that used in igc_update_itr, 4383 * due to the difficulty of synchronizing statistics across multiple 4384 * receive rings. The divisors and thresholds used by this function 4385 * were determined based on theoretical maximum wire speed and testing 4386 * data, in order to minimize response time while increasing bulk 4387 * throughput. 4388 * NOTE: This function is called only when operating in a multiqueue 4389 * receive environment. 4390 */ 4391 static void igc_update_ring_itr(struct igc_q_vector *q_vector) 4392 { 4393 struct igc_adapter *adapter = q_vector->adapter; 4394 int new_val = q_vector->itr_val; 4395 int avg_wire_size = 0; 4396 unsigned int packets; 4397 4398 /* For non-gigabit speeds, just fix the interrupt rate at 4000 4399 * ints/sec - ITR timer value of 120 ticks. 4400 */ 4401 switch (adapter->link_speed) { 4402 case SPEED_10: 4403 case SPEED_100: 4404 new_val = IGC_4K_ITR; 4405 goto set_itr_val; 4406 default: 4407 break; 4408 } 4409 4410 packets = q_vector->rx.total_packets; 4411 if (packets) 4412 avg_wire_size = q_vector->rx.total_bytes / packets; 4413 4414 packets = q_vector->tx.total_packets; 4415 if (packets) 4416 avg_wire_size = max_t(u32, avg_wire_size, 4417 q_vector->tx.total_bytes / packets); 4418 4419 /* if avg_wire_size isn't set no work was done */ 4420 if (!avg_wire_size) 4421 goto clear_counts; 4422 4423 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 4424 avg_wire_size += 24; 4425 4426 /* Don't starve jumbo frames */ 4427 avg_wire_size = min(avg_wire_size, 3000); 4428 4429 /* Give a little boost to mid-size frames */ 4430 if (avg_wire_size > 300 && avg_wire_size < 1200) 4431 new_val = avg_wire_size / 3; 4432 else 4433 new_val = avg_wire_size / 2; 4434 4435 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 4436 if (new_val < IGC_20K_ITR && 4437 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 4438 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 4439 new_val = IGC_20K_ITR; 4440 4441 set_itr_val: 4442 if (new_val != q_vector->itr_val) { 4443 q_vector->itr_val = new_val; 4444 q_vector->set_itr = 1; 4445 } 4446 clear_counts: 4447 q_vector->rx.total_bytes = 0; 4448 q_vector->rx.total_packets = 0; 4449 q_vector->tx.total_bytes = 0; 4450 q_vector->tx.total_packets = 0; 4451 } 4452 4453 static void igc_ring_irq_enable(struct igc_q_vector *q_vector) 4454 { 4455 struct igc_adapter *adapter = q_vector->adapter; 4456 struct igc_hw *hw = &adapter->hw; 4457 4458 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || 4459 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { 4460 if (adapter->num_q_vectors == 1) 4461 igc_set_itr(q_vector); 4462 else 4463 igc_update_ring_itr(q_vector); 4464 } 4465 4466 if (!test_bit(__IGC_DOWN, &adapter->state)) { 4467 if (adapter->msix_entries) 4468 wr32(IGC_EIMS, q_vector->eims_value); 4469 else 4470 igc_irq_enable(adapter); 4471 } 4472 } 4473 4474 static void igc_add_ring(struct igc_ring *ring, 4475 struct igc_ring_container *head) 4476 { 4477 head->ring = ring; 4478 head->count++; 4479 } 4480 4481 /** 4482 * igc_cache_ring_register - Descriptor ring to register mapping 4483 * @adapter: board private structure to initialize 4484 * 4485 * Once we know the feature-set enabled for the device, we'll cache 4486 * the register offset the descriptor ring is assigned to. 4487 */ 4488 static void igc_cache_ring_register(struct igc_adapter *adapter) 4489 { 4490 int i = 0, j = 0; 4491 4492 switch (adapter->hw.mac.type) { 4493 case igc_i225: 4494 default: 4495 for (; i < adapter->num_rx_queues; i++) 4496 adapter->rx_ring[i]->reg_idx = i; 4497 for (; j < adapter->num_tx_queues; j++) 4498 adapter->tx_ring[j]->reg_idx = j; 4499 break; 4500 } 4501 } 4502 4503 /** 4504 * igc_poll - NAPI Rx polling callback 4505 * @napi: napi polling structure 4506 * @budget: count of how many packets we should handle 4507 */ 4508 static int igc_poll(struct napi_struct *napi, int budget) 4509 { 4510 struct igc_q_vector *q_vector = container_of(napi, 4511 struct igc_q_vector, 4512 napi); 4513 struct igc_ring *rx_ring = q_vector->rx.ring; 4514 bool clean_complete = true; 4515 int work_done = 0; 4516 4517 if (q_vector->tx.ring) 4518 clean_complete = igc_clean_tx_irq(q_vector, budget); 4519 4520 if (rx_ring) { 4521 int cleaned = rx_ring->xsk_pool ? 4522 igc_clean_rx_irq_zc(q_vector, budget) : 4523 igc_clean_rx_irq(q_vector, budget); 4524 4525 work_done += cleaned; 4526 if (cleaned >= budget) 4527 clean_complete = false; 4528 } 4529 4530 /* If all work not completed, return budget and keep polling */ 4531 if (!clean_complete) 4532 return budget; 4533 4534 /* Exit the polling mode, but don't re-enable interrupts if stack might 4535 * poll us due to busy-polling 4536 */ 4537 if (likely(napi_complete_done(napi, work_done))) 4538 igc_ring_irq_enable(q_vector); 4539 4540 return min(work_done, budget - 1); 4541 } 4542 4543 /** 4544 * igc_alloc_q_vector - Allocate memory for a single interrupt vector 4545 * @adapter: board private structure to initialize 4546 * @v_count: q_vectors allocated on adapter, used for ring interleaving 4547 * @v_idx: index of vector in adapter struct 4548 * @txr_count: total number of Tx rings to allocate 4549 * @txr_idx: index of first Tx ring to allocate 4550 * @rxr_count: total number of Rx rings to allocate 4551 * @rxr_idx: index of first Rx ring to allocate 4552 * 4553 * We allocate one q_vector. If allocation fails we return -ENOMEM. 4554 */ 4555 static int igc_alloc_q_vector(struct igc_adapter *adapter, 4556 unsigned int v_count, unsigned int v_idx, 4557 unsigned int txr_count, unsigned int txr_idx, 4558 unsigned int rxr_count, unsigned int rxr_idx) 4559 { 4560 struct igc_q_vector *q_vector; 4561 struct igc_ring *ring; 4562 int ring_count; 4563 4564 /* igc only supports 1 Tx and/or 1 Rx queue per vector */ 4565 if (txr_count > 1 || rxr_count > 1) 4566 return -ENOMEM; 4567 4568 ring_count = txr_count + rxr_count; 4569 4570 /* allocate q_vector and rings */ 4571 q_vector = adapter->q_vector[v_idx]; 4572 if (!q_vector) 4573 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), 4574 GFP_KERNEL); 4575 else 4576 memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); 4577 if (!q_vector) 4578 return -ENOMEM; 4579 4580 /* initialize NAPI */ 4581 netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); 4582 4583 /* tie q_vector and adapter together */ 4584 adapter->q_vector[v_idx] = q_vector; 4585 q_vector->adapter = adapter; 4586 4587 /* initialize work limits */ 4588 q_vector->tx.work_limit = adapter->tx_work_limit; 4589 4590 /* initialize ITR configuration */ 4591 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); 4592 q_vector->itr_val = IGC_START_ITR; 4593 4594 /* initialize pointer to rings */ 4595 ring = q_vector->ring; 4596 4597 /* initialize ITR */ 4598 if (rxr_count) { 4599 /* rx or rx/tx vector */ 4600 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) 4601 q_vector->itr_val = adapter->rx_itr_setting; 4602 } else { 4603 /* tx only vector */ 4604 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) 4605 q_vector->itr_val = adapter->tx_itr_setting; 4606 } 4607 4608 if (txr_count) { 4609 /* assign generic ring traits */ 4610 ring->dev = &adapter->pdev->dev; 4611 ring->netdev = adapter->netdev; 4612 4613 /* configure backlink on ring */ 4614 ring->q_vector = q_vector; 4615 4616 /* update q_vector Tx values */ 4617 igc_add_ring(ring, &q_vector->tx); 4618 4619 /* apply Tx specific ring traits */ 4620 ring->count = adapter->tx_ring_count; 4621 ring->queue_index = txr_idx; 4622 4623 /* assign ring to adapter */ 4624 adapter->tx_ring[txr_idx] = ring; 4625 4626 /* push pointer to next ring */ 4627 ring++; 4628 } 4629 4630 if (rxr_count) { 4631 /* assign generic ring traits */ 4632 ring->dev = &adapter->pdev->dev; 4633 ring->netdev = adapter->netdev; 4634 4635 /* configure backlink on ring */ 4636 ring->q_vector = q_vector; 4637 4638 /* update q_vector Rx values */ 4639 igc_add_ring(ring, &q_vector->rx); 4640 4641 /* apply Rx specific ring traits */ 4642 ring->count = adapter->rx_ring_count; 4643 ring->queue_index = rxr_idx; 4644 4645 /* assign ring to adapter */ 4646 adapter->rx_ring[rxr_idx] = ring; 4647 } 4648 4649 return 0; 4650 } 4651 4652 /** 4653 * igc_alloc_q_vectors - Allocate memory for interrupt vectors 4654 * @adapter: board private structure to initialize 4655 * 4656 * We allocate one q_vector per queue interrupt. If allocation fails we 4657 * return -ENOMEM. 4658 */ 4659 static int igc_alloc_q_vectors(struct igc_adapter *adapter) 4660 { 4661 int rxr_remaining = adapter->num_rx_queues; 4662 int txr_remaining = adapter->num_tx_queues; 4663 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 4664 int q_vectors = adapter->num_q_vectors; 4665 int err; 4666 4667 if (q_vectors >= (rxr_remaining + txr_remaining)) { 4668 for (; rxr_remaining; v_idx++) { 4669 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 4670 0, 0, 1, rxr_idx); 4671 4672 if (err) 4673 goto err_out; 4674 4675 /* update counts and index */ 4676 rxr_remaining--; 4677 rxr_idx++; 4678 } 4679 } 4680 4681 for (; v_idx < q_vectors; v_idx++) { 4682 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 4683 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 4684 4685 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 4686 tqpv, txr_idx, rqpv, rxr_idx); 4687 4688 if (err) 4689 goto err_out; 4690 4691 /* update counts and index */ 4692 rxr_remaining -= rqpv; 4693 txr_remaining -= tqpv; 4694 rxr_idx++; 4695 txr_idx++; 4696 } 4697 4698 return 0; 4699 4700 err_out: 4701 adapter->num_tx_queues = 0; 4702 adapter->num_rx_queues = 0; 4703 adapter->num_q_vectors = 0; 4704 4705 while (v_idx--) 4706 igc_free_q_vector(adapter, v_idx); 4707 4708 return -ENOMEM; 4709 } 4710 4711 /** 4712 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 4713 * @adapter: Pointer to adapter structure 4714 * @msix: boolean for MSI-X capability 4715 * 4716 * This function initializes the interrupts and allocates all of the queues. 4717 */ 4718 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) 4719 { 4720 struct net_device *dev = adapter->netdev; 4721 int err = 0; 4722 4723 igc_set_interrupt_capability(adapter, msix); 4724 4725 err = igc_alloc_q_vectors(adapter); 4726 if (err) { 4727 netdev_err(dev, "Unable to allocate memory for vectors\n"); 4728 goto err_alloc_q_vectors; 4729 } 4730 4731 igc_cache_ring_register(adapter); 4732 4733 return 0; 4734 4735 err_alloc_q_vectors: 4736 igc_reset_interrupt_capability(adapter); 4737 return err; 4738 } 4739 4740 /** 4741 * igc_sw_init - Initialize general software structures (struct igc_adapter) 4742 * @adapter: board private structure to initialize 4743 * 4744 * igc_sw_init initializes the Adapter private data structure. 4745 * Fields are initialized based on PCI device information and 4746 * OS network device settings (MTU size). 4747 */ 4748 static int igc_sw_init(struct igc_adapter *adapter) 4749 { 4750 struct net_device *netdev = adapter->netdev; 4751 struct pci_dev *pdev = adapter->pdev; 4752 struct igc_hw *hw = &adapter->hw; 4753 4754 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 4755 4756 /* set default ring sizes */ 4757 adapter->tx_ring_count = IGC_DEFAULT_TXD; 4758 adapter->rx_ring_count = IGC_DEFAULT_RXD; 4759 4760 /* set default ITR values */ 4761 adapter->rx_itr_setting = IGC_DEFAULT_ITR; 4762 adapter->tx_itr_setting = IGC_DEFAULT_ITR; 4763 4764 /* set default work limits */ 4765 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; 4766 4767 /* adjust max frame to be at least the size of a standard frame */ 4768 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + 4769 VLAN_HLEN; 4770 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 4771 4772 mutex_init(&adapter->nfc_rule_lock); 4773 INIT_LIST_HEAD(&adapter->nfc_rule_list); 4774 adapter->nfc_rule_count = 0; 4775 4776 spin_lock_init(&adapter->stats64_lock); 4777 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ 4778 adapter->flags |= IGC_FLAG_HAS_MSIX; 4779 4780 igc_init_queue_configuration(adapter); 4781 4782 /* This call may decrease the number of queues */ 4783 if (igc_init_interrupt_scheme(adapter, true)) { 4784 netdev_err(netdev, "Unable to allocate memory for queues\n"); 4785 return -ENOMEM; 4786 } 4787 4788 /* Explicitly disable IRQ since the NIC can be in any state. */ 4789 igc_irq_disable(adapter); 4790 4791 set_bit(__IGC_DOWN, &adapter->state); 4792 4793 return 0; 4794 } 4795 4796 /** 4797 * igc_up - Open the interface and prepare it to handle traffic 4798 * @adapter: board private structure 4799 */ 4800 void igc_up(struct igc_adapter *adapter) 4801 { 4802 struct igc_hw *hw = &adapter->hw; 4803 int i = 0; 4804 4805 /* hardware has been reset, we need to reload some things */ 4806 igc_configure(adapter); 4807 4808 clear_bit(__IGC_DOWN, &adapter->state); 4809 4810 for (i = 0; i < adapter->num_q_vectors; i++) 4811 napi_enable(&adapter->q_vector[i]->napi); 4812 4813 if (adapter->msix_entries) 4814 igc_configure_msix(adapter); 4815 else 4816 igc_assign_vector(adapter->q_vector[0], 0); 4817 4818 /* Clear any pending interrupts. */ 4819 rd32(IGC_ICR); 4820 igc_irq_enable(adapter); 4821 4822 netif_tx_start_all_queues(adapter->netdev); 4823 4824 /* start the watchdog. */ 4825 hw->mac.get_link_status = true; 4826 schedule_work(&adapter->watchdog_task); 4827 } 4828 4829 /** 4830 * igc_update_stats - Update the board statistics counters 4831 * @adapter: board private structure 4832 */ 4833 void igc_update_stats(struct igc_adapter *adapter) 4834 { 4835 struct rtnl_link_stats64 *net_stats = &adapter->stats64; 4836 struct pci_dev *pdev = adapter->pdev; 4837 struct igc_hw *hw = &adapter->hw; 4838 u64 _bytes, _packets; 4839 u64 bytes, packets; 4840 unsigned int start; 4841 u32 mpc; 4842 int i; 4843 4844 /* Prevent stats update while adapter is being reset, or if the pci 4845 * connection is down. 4846 */ 4847 if (adapter->link_speed == 0) 4848 return; 4849 if (pci_channel_offline(pdev)) 4850 return; 4851 4852 packets = 0; 4853 bytes = 0; 4854 4855 rcu_read_lock(); 4856 for (i = 0; i < adapter->num_rx_queues; i++) { 4857 struct igc_ring *ring = adapter->rx_ring[i]; 4858 u32 rqdpc = rd32(IGC_RQDPC(i)); 4859 4860 if (hw->mac.type >= igc_i225) 4861 wr32(IGC_RQDPC(i), 0); 4862 4863 if (rqdpc) { 4864 ring->rx_stats.drops += rqdpc; 4865 net_stats->rx_fifo_errors += rqdpc; 4866 } 4867 4868 do { 4869 start = u64_stats_fetch_begin(&ring->rx_syncp); 4870 _bytes = ring->rx_stats.bytes; 4871 _packets = ring->rx_stats.packets; 4872 } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); 4873 bytes += _bytes; 4874 packets += _packets; 4875 } 4876 4877 net_stats->rx_bytes = bytes; 4878 net_stats->rx_packets = packets; 4879 4880 packets = 0; 4881 bytes = 0; 4882 for (i = 0; i < adapter->num_tx_queues; i++) { 4883 struct igc_ring *ring = adapter->tx_ring[i]; 4884 4885 do { 4886 start = u64_stats_fetch_begin(&ring->tx_syncp); 4887 _bytes = ring->tx_stats.bytes; 4888 _packets = ring->tx_stats.packets; 4889 } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); 4890 bytes += _bytes; 4891 packets += _packets; 4892 } 4893 net_stats->tx_bytes = bytes; 4894 net_stats->tx_packets = packets; 4895 rcu_read_unlock(); 4896 4897 /* read stats registers */ 4898 adapter->stats.crcerrs += rd32(IGC_CRCERRS); 4899 adapter->stats.gprc += rd32(IGC_GPRC); 4900 adapter->stats.gorc += rd32(IGC_GORCL); 4901 rd32(IGC_GORCH); /* clear GORCL */ 4902 adapter->stats.bprc += rd32(IGC_BPRC); 4903 adapter->stats.mprc += rd32(IGC_MPRC); 4904 adapter->stats.roc += rd32(IGC_ROC); 4905 4906 adapter->stats.prc64 += rd32(IGC_PRC64); 4907 adapter->stats.prc127 += rd32(IGC_PRC127); 4908 adapter->stats.prc255 += rd32(IGC_PRC255); 4909 adapter->stats.prc511 += rd32(IGC_PRC511); 4910 adapter->stats.prc1023 += rd32(IGC_PRC1023); 4911 adapter->stats.prc1522 += rd32(IGC_PRC1522); 4912 adapter->stats.tlpic += rd32(IGC_TLPIC); 4913 adapter->stats.rlpic += rd32(IGC_RLPIC); 4914 adapter->stats.hgptc += rd32(IGC_HGPTC); 4915 4916 mpc = rd32(IGC_MPC); 4917 adapter->stats.mpc += mpc; 4918 net_stats->rx_fifo_errors += mpc; 4919 adapter->stats.scc += rd32(IGC_SCC); 4920 adapter->stats.ecol += rd32(IGC_ECOL); 4921 adapter->stats.mcc += rd32(IGC_MCC); 4922 adapter->stats.latecol += rd32(IGC_LATECOL); 4923 adapter->stats.dc += rd32(IGC_DC); 4924 adapter->stats.rlec += rd32(IGC_RLEC); 4925 adapter->stats.xonrxc += rd32(IGC_XONRXC); 4926 adapter->stats.xontxc += rd32(IGC_XONTXC); 4927 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); 4928 adapter->stats.xofftxc += rd32(IGC_XOFFTXC); 4929 adapter->stats.fcruc += rd32(IGC_FCRUC); 4930 adapter->stats.gptc += rd32(IGC_GPTC); 4931 adapter->stats.gotc += rd32(IGC_GOTCL); 4932 rd32(IGC_GOTCH); /* clear GOTCL */ 4933 adapter->stats.rnbc += rd32(IGC_RNBC); 4934 adapter->stats.ruc += rd32(IGC_RUC); 4935 adapter->stats.rfc += rd32(IGC_RFC); 4936 adapter->stats.rjc += rd32(IGC_RJC); 4937 adapter->stats.tor += rd32(IGC_TORH); 4938 adapter->stats.tot += rd32(IGC_TOTH); 4939 adapter->stats.tpr += rd32(IGC_TPR); 4940 4941 adapter->stats.ptc64 += rd32(IGC_PTC64); 4942 adapter->stats.ptc127 += rd32(IGC_PTC127); 4943 adapter->stats.ptc255 += rd32(IGC_PTC255); 4944 adapter->stats.ptc511 += rd32(IGC_PTC511); 4945 adapter->stats.ptc1023 += rd32(IGC_PTC1023); 4946 adapter->stats.ptc1522 += rd32(IGC_PTC1522); 4947 4948 adapter->stats.mptc += rd32(IGC_MPTC); 4949 adapter->stats.bptc += rd32(IGC_BPTC); 4950 4951 adapter->stats.tpt += rd32(IGC_TPT); 4952 adapter->stats.colc += rd32(IGC_COLC); 4953 adapter->stats.colc += rd32(IGC_RERC); 4954 4955 adapter->stats.algnerrc += rd32(IGC_ALGNERRC); 4956 4957 adapter->stats.tsctc += rd32(IGC_TSCTC); 4958 4959 adapter->stats.iac += rd32(IGC_IAC); 4960 4961 /* Fill out the OS statistics structure */ 4962 net_stats->multicast = adapter->stats.mprc; 4963 net_stats->collisions = adapter->stats.colc; 4964 4965 /* Rx Errors */ 4966 4967 /* RLEC on some newer hardware can be incorrect so build 4968 * our own version based on RUC and ROC 4969 */ 4970 net_stats->rx_errors = adapter->stats.rxerrc + 4971 adapter->stats.crcerrs + adapter->stats.algnerrc + 4972 adapter->stats.ruc + adapter->stats.roc + 4973 adapter->stats.cexterr; 4974 net_stats->rx_length_errors = adapter->stats.ruc + 4975 adapter->stats.roc; 4976 net_stats->rx_crc_errors = adapter->stats.crcerrs; 4977 net_stats->rx_frame_errors = adapter->stats.algnerrc; 4978 net_stats->rx_missed_errors = adapter->stats.mpc; 4979 4980 /* Tx Errors */ 4981 net_stats->tx_errors = adapter->stats.ecol + 4982 adapter->stats.latecol; 4983 net_stats->tx_aborted_errors = adapter->stats.ecol; 4984 net_stats->tx_window_errors = adapter->stats.latecol; 4985 net_stats->tx_carrier_errors = adapter->stats.tncrs; 4986 4987 /* Tx Dropped */ 4988 net_stats->tx_dropped = adapter->stats.txdrop; 4989 4990 /* Management Stats */ 4991 adapter->stats.mgptc += rd32(IGC_MGTPTC); 4992 adapter->stats.mgprc += rd32(IGC_MGTPRC); 4993 adapter->stats.mgpdc += rd32(IGC_MGTPDC); 4994 } 4995 4996 /** 4997 * igc_down - Close the interface 4998 * @adapter: board private structure 4999 */ 5000 void igc_down(struct igc_adapter *adapter) 5001 { 5002 struct net_device *netdev = adapter->netdev; 5003 struct igc_hw *hw = &adapter->hw; 5004 u32 tctl, rctl; 5005 int i = 0; 5006 5007 set_bit(__IGC_DOWN, &adapter->state); 5008 5009 igc_ptp_suspend(adapter); 5010 5011 if (pci_device_is_present(adapter->pdev)) { 5012 /* disable receives in the hardware */ 5013 rctl = rd32(IGC_RCTL); 5014 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); 5015 /* flush and sleep below */ 5016 } 5017 /* set trans_start so we don't get spurious watchdogs during reset */ 5018 netif_trans_update(netdev); 5019 5020 netif_carrier_off(netdev); 5021 netif_tx_stop_all_queues(netdev); 5022 5023 if (pci_device_is_present(adapter->pdev)) { 5024 /* disable transmits in the hardware */ 5025 tctl = rd32(IGC_TCTL); 5026 tctl &= ~IGC_TCTL_EN; 5027 wr32(IGC_TCTL, tctl); 5028 /* flush both disables and wait for them to finish */ 5029 wrfl(); 5030 usleep_range(10000, 20000); 5031 5032 igc_irq_disable(adapter); 5033 } 5034 5035 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 5036 5037 for (i = 0; i < adapter->num_q_vectors; i++) { 5038 if (adapter->q_vector[i]) { 5039 napi_synchronize(&adapter->q_vector[i]->napi); 5040 napi_disable(&adapter->q_vector[i]->napi); 5041 } 5042 } 5043 5044 del_timer_sync(&adapter->watchdog_timer); 5045 del_timer_sync(&adapter->phy_info_timer); 5046 5047 /* record the stats before reset*/ 5048 spin_lock(&adapter->stats64_lock); 5049 igc_update_stats(adapter); 5050 spin_unlock(&adapter->stats64_lock); 5051 5052 adapter->link_speed = 0; 5053 adapter->link_duplex = 0; 5054 5055 if (!pci_channel_offline(adapter->pdev)) 5056 igc_reset(adapter); 5057 5058 /* clear VLAN promisc flag so VFTA will be updated if necessary */ 5059 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; 5060 5061 igc_clean_all_tx_rings(adapter); 5062 igc_clean_all_rx_rings(adapter); 5063 } 5064 5065 void igc_reinit_locked(struct igc_adapter *adapter) 5066 { 5067 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 5068 usleep_range(1000, 2000); 5069 igc_down(adapter); 5070 igc_up(adapter); 5071 clear_bit(__IGC_RESETTING, &adapter->state); 5072 } 5073 5074 static void igc_reset_task(struct work_struct *work) 5075 { 5076 struct igc_adapter *adapter; 5077 5078 adapter = container_of(work, struct igc_adapter, reset_task); 5079 5080 rtnl_lock(); 5081 /* If we're already down or resetting, just bail */ 5082 if (test_bit(__IGC_DOWN, &adapter->state) || 5083 test_bit(__IGC_RESETTING, &adapter->state)) { 5084 rtnl_unlock(); 5085 return; 5086 } 5087 5088 igc_rings_dump(adapter); 5089 igc_regs_dump(adapter); 5090 netdev_err(adapter->netdev, "Reset adapter\n"); 5091 igc_reinit_locked(adapter); 5092 rtnl_unlock(); 5093 } 5094 5095 /** 5096 * igc_change_mtu - Change the Maximum Transfer Unit 5097 * @netdev: network interface device structure 5098 * @new_mtu: new value for maximum frame size 5099 * 5100 * Returns 0 on success, negative on failure 5101 */ 5102 static int igc_change_mtu(struct net_device *netdev, int new_mtu) 5103 { 5104 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 5105 struct igc_adapter *adapter = netdev_priv(netdev); 5106 5107 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { 5108 netdev_dbg(netdev, "Jumbo frames not supported with XDP"); 5109 return -EINVAL; 5110 } 5111 5112 /* adjust max frame to be at least the size of a standard frame */ 5113 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 5114 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 5115 5116 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 5117 usleep_range(1000, 2000); 5118 5119 /* igc_down has a dependency on max_frame_size */ 5120 adapter->max_frame_size = max_frame; 5121 5122 if (netif_running(netdev)) 5123 igc_down(adapter); 5124 5125 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5126 netdev->mtu = new_mtu; 5127 5128 if (netif_running(netdev)) 5129 igc_up(adapter); 5130 else 5131 igc_reset(adapter); 5132 5133 clear_bit(__IGC_RESETTING, &adapter->state); 5134 5135 return 0; 5136 } 5137 5138 /** 5139 * igc_tx_timeout - Respond to a Tx Hang 5140 * @netdev: network interface device structure 5141 * @txqueue: queue number that timed out 5142 **/ 5143 static void igc_tx_timeout(struct net_device *netdev, 5144 unsigned int __always_unused txqueue) 5145 { 5146 struct igc_adapter *adapter = netdev_priv(netdev); 5147 struct igc_hw *hw = &adapter->hw; 5148 5149 /* Do the reset outside of interrupt context */ 5150 adapter->tx_timeout_count++; 5151 schedule_work(&adapter->reset_task); 5152 wr32(IGC_EICS, 5153 (adapter->eims_enable_mask & ~adapter->eims_other)); 5154 } 5155 5156 /** 5157 * igc_get_stats64 - Get System Network Statistics 5158 * @netdev: network interface device structure 5159 * @stats: rtnl_link_stats64 pointer 5160 * 5161 * Returns the address of the device statistics structure. 5162 * The statistics are updated here and also from the timer callback. 5163 */ 5164 static void igc_get_stats64(struct net_device *netdev, 5165 struct rtnl_link_stats64 *stats) 5166 { 5167 struct igc_adapter *adapter = netdev_priv(netdev); 5168 5169 spin_lock(&adapter->stats64_lock); 5170 if (!test_bit(__IGC_RESETTING, &adapter->state)) 5171 igc_update_stats(adapter); 5172 memcpy(stats, &adapter->stats64, sizeof(*stats)); 5173 spin_unlock(&adapter->stats64_lock); 5174 } 5175 5176 static netdev_features_t igc_fix_features(struct net_device *netdev, 5177 netdev_features_t features) 5178 { 5179 /* Since there is no support for separate Rx/Tx vlan accel 5180 * enable/disable make sure Tx flag is always in same state as Rx. 5181 */ 5182 if (features & NETIF_F_HW_VLAN_CTAG_RX) 5183 features |= NETIF_F_HW_VLAN_CTAG_TX; 5184 else 5185 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 5186 5187 return features; 5188 } 5189 5190 static int igc_set_features(struct net_device *netdev, 5191 netdev_features_t features) 5192 { 5193 netdev_features_t changed = netdev->features ^ features; 5194 struct igc_adapter *adapter = netdev_priv(netdev); 5195 5196 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 5197 igc_vlan_mode(netdev, features); 5198 5199 /* Add VLAN support */ 5200 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) 5201 return 0; 5202 5203 if (!(features & NETIF_F_NTUPLE)) 5204 igc_flush_nfc_rules(adapter); 5205 5206 netdev->features = features; 5207 5208 if (netif_running(netdev)) 5209 igc_reinit_locked(adapter); 5210 else 5211 igc_reset(adapter); 5212 5213 return 1; 5214 } 5215 5216 static netdev_features_t 5217 igc_features_check(struct sk_buff *skb, struct net_device *dev, 5218 netdev_features_t features) 5219 { 5220 unsigned int network_hdr_len, mac_hdr_len; 5221 5222 /* Make certain the headers can be described by a context descriptor */ 5223 mac_hdr_len = skb_network_header(skb) - skb->data; 5224 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) 5225 return features & ~(NETIF_F_HW_CSUM | 5226 NETIF_F_SCTP_CRC | 5227 NETIF_F_HW_VLAN_CTAG_TX | 5228 NETIF_F_TSO | 5229 NETIF_F_TSO6); 5230 5231 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 5232 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) 5233 return features & ~(NETIF_F_HW_CSUM | 5234 NETIF_F_SCTP_CRC | 5235 NETIF_F_TSO | 5236 NETIF_F_TSO6); 5237 5238 /* We can only support IPv4 TSO in tunnels if we can mangle the 5239 * inner IP ID field, so strip TSO if MANGLEID is not supported. 5240 */ 5241 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 5242 features &= ~NETIF_F_TSO; 5243 5244 return features; 5245 } 5246 5247 static void igc_tsync_interrupt(struct igc_adapter *adapter) 5248 { 5249 u32 ack, tsauxc, sec, nsec, tsicr; 5250 struct igc_hw *hw = &adapter->hw; 5251 struct ptp_clock_event event; 5252 struct timespec64 ts; 5253 5254 tsicr = rd32(IGC_TSICR); 5255 ack = 0; 5256 5257 if (tsicr & IGC_TSICR_SYS_WRAP) { 5258 event.type = PTP_CLOCK_PPS; 5259 if (adapter->ptp_caps.pps) 5260 ptp_clock_event(adapter->ptp_clock, &event); 5261 ack |= IGC_TSICR_SYS_WRAP; 5262 } 5263 5264 if (tsicr & IGC_TSICR_TXTS) { 5265 /* retrieve hardware timestamp */ 5266 igc_ptp_tx_tstamp_event(adapter); 5267 ack |= IGC_TSICR_TXTS; 5268 } 5269 5270 if (tsicr & IGC_TSICR_TT0) { 5271 spin_lock(&adapter->tmreg_lock); 5272 ts = timespec64_add(adapter->perout[0].start, 5273 adapter->perout[0].period); 5274 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 5275 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); 5276 tsauxc = rd32(IGC_TSAUXC); 5277 tsauxc |= IGC_TSAUXC_EN_TT0; 5278 wr32(IGC_TSAUXC, tsauxc); 5279 adapter->perout[0].start = ts; 5280 spin_unlock(&adapter->tmreg_lock); 5281 ack |= IGC_TSICR_TT0; 5282 } 5283 5284 if (tsicr & IGC_TSICR_TT1) { 5285 spin_lock(&adapter->tmreg_lock); 5286 ts = timespec64_add(adapter->perout[1].start, 5287 adapter->perout[1].period); 5288 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 5289 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); 5290 tsauxc = rd32(IGC_TSAUXC); 5291 tsauxc |= IGC_TSAUXC_EN_TT1; 5292 wr32(IGC_TSAUXC, tsauxc); 5293 adapter->perout[1].start = ts; 5294 spin_unlock(&adapter->tmreg_lock); 5295 ack |= IGC_TSICR_TT1; 5296 } 5297 5298 if (tsicr & IGC_TSICR_AUTT0) { 5299 nsec = rd32(IGC_AUXSTMPL0); 5300 sec = rd32(IGC_AUXSTMPH0); 5301 event.type = PTP_CLOCK_EXTTS; 5302 event.index = 0; 5303 event.timestamp = sec * NSEC_PER_SEC + nsec; 5304 ptp_clock_event(adapter->ptp_clock, &event); 5305 ack |= IGC_TSICR_AUTT0; 5306 } 5307 5308 if (tsicr & IGC_TSICR_AUTT1) { 5309 nsec = rd32(IGC_AUXSTMPL1); 5310 sec = rd32(IGC_AUXSTMPH1); 5311 event.type = PTP_CLOCK_EXTTS; 5312 event.index = 1; 5313 event.timestamp = sec * NSEC_PER_SEC + nsec; 5314 ptp_clock_event(adapter->ptp_clock, &event); 5315 ack |= IGC_TSICR_AUTT1; 5316 } 5317 5318 /* acknowledge the interrupts */ 5319 wr32(IGC_TSICR, ack); 5320 } 5321 5322 /** 5323 * igc_msix_other - msix other interrupt handler 5324 * @irq: interrupt number 5325 * @data: pointer to a q_vector 5326 */ 5327 static irqreturn_t igc_msix_other(int irq, void *data) 5328 { 5329 struct igc_adapter *adapter = data; 5330 struct igc_hw *hw = &adapter->hw; 5331 u32 icr = rd32(IGC_ICR); 5332 5333 /* reading ICR causes bit 31 of EICR to be cleared */ 5334 if (icr & IGC_ICR_DRSTA) 5335 schedule_work(&adapter->reset_task); 5336 5337 if (icr & IGC_ICR_DOUTSYNC) { 5338 /* HW is reporting DMA is out of sync */ 5339 adapter->stats.doosync++; 5340 } 5341 5342 if (icr & IGC_ICR_LSC) { 5343 hw->mac.get_link_status = true; 5344 /* guard against interrupt when we're going down */ 5345 if (!test_bit(__IGC_DOWN, &adapter->state)) 5346 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5347 } 5348 5349 if (icr & IGC_ICR_TS) 5350 igc_tsync_interrupt(adapter); 5351 5352 wr32(IGC_EIMS, adapter->eims_other); 5353 5354 return IRQ_HANDLED; 5355 } 5356 5357 static void igc_write_itr(struct igc_q_vector *q_vector) 5358 { 5359 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; 5360 5361 if (!q_vector->set_itr) 5362 return; 5363 5364 if (!itr_val) 5365 itr_val = IGC_ITR_VAL_MASK; 5366 5367 itr_val |= IGC_EITR_CNT_IGNR; 5368 5369 writel(itr_val, q_vector->itr_register); 5370 q_vector->set_itr = 0; 5371 } 5372 5373 static irqreturn_t igc_msix_ring(int irq, void *data) 5374 { 5375 struct igc_q_vector *q_vector = data; 5376 5377 /* Write the ITR value calculated from the previous interrupt. */ 5378 igc_write_itr(q_vector); 5379 5380 napi_schedule(&q_vector->napi); 5381 5382 return IRQ_HANDLED; 5383 } 5384 5385 /** 5386 * igc_request_msix - Initialize MSI-X interrupts 5387 * @adapter: Pointer to adapter structure 5388 * 5389 * igc_request_msix allocates MSI-X vectors and requests interrupts from the 5390 * kernel. 5391 */ 5392 static int igc_request_msix(struct igc_adapter *adapter) 5393 { 5394 unsigned int num_q_vectors = adapter->num_q_vectors; 5395 int i = 0, err = 0, vector = 0, free_vector = 0; 5396 struct net_device *netdev = adapter->netdev; 5397 5398 err = request_irq(adapter->msix_entries[vector].vector, 5399 &igc_msix_other, 0, netdev->name, adapter); 5400 if (err) 5401 goto err_out; 5402 5403 if (num_q_vectors > MAX_Q_VECTORS) { 5404 num_q_vectors = MAX_Q_VECTORS; 5405 dev_warn(&adapter->pdev->dev, 5406 "The number of queue vectors (%d) is higher than max allowed (%d)\n", 5407 adapter->num_q_vectors, MAX_Q_VECTORS); 5408 } 5409 for (i = 0; i < num_q_vectors; i++) { 5410 struct igc_q_vector *q_vector = adapter->q_vector[i]; 5411 5412 vector++; 5413 5414 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); 5415 5416 if (q_vector->rx.ring && q_vector->tx.ring) 5417 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, 5418 q_vector->rx.ring->queue_index); 5419 else if (q_vector->tx.ring) 5420 sprintf(q_vector->name, "%s-tx-%u", netdev->name, 5421 q_vector->tx.ring->queue_index); 5422 else if (q_vector->rx.ring) 5423 sprintf(q_vector->name, "%s-rx-%u", netdev->name, 5424 q_vector->rx.ring->queue_index); 5425 else 5426 sprintf(q_vector->name, "%s-unused", netdev->name); 5427 5428 err = request_irq(adapter->msix_entries[vector].vector, 5429 igc_msix_ring, 0, q_vector->name, 5430 q_vector); 5431 if (err) 5432 goto err_free; 5433 } 5434 5435 igc_configure_msix(adapter); 5436 return 0; 5437 5438 err_free: 5439 /* free already assigned IRQs */ 5440 free_irq(adapter->msix_entries[free_vector++].vector, adapter); 5441 5442 vector--; 5443 for (i = 0; i < vector; i++) { 5444 free_irq(adapter->msix_entries[free_vector++].vector, 5445 adapter->q_vector[i]); 5446 } 5447 err_out: 5448 return err; 5449 } 5450 5451 /** 5452 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts 5453 * @adapter: Pointer to adapter structure 5454 * 5455 * This function resets the device so that it has 0 rx queues, tx queues, and 5456 * MSI-X interrupts allocated. 5457 */ 5458 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) 5459 { 5460 igc_free_q_vectors(adapter); 5461 igc_reset_interrupt_capability(adapter); 5462 } 5463 5464 /* Need to wait a few seconds after link up to get diagnostic information from 5465 * the phy 5466 */ 5467 static void igc_update_phy_info(struct timer_list *t) 5468 { 5469 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); 5470 5471 igc_get_phy_info(&adapter->hw); 5472 } 5473 5474 /** 5475 * igc_has_link - check shared code for link and determine up/down 5476 * @adapter: pointer to driver private info 5477 */ 5478 bool igc_has_link(struct igc_adapter *adapter) 5479 { 5480 struct igc_hw *hw = &adapter->hw; 5481 bool link_active = false; 5482 5483 /* get_link_status is set on LSC (link status) interrupt or 5484 * rx sequence error interrupt. get_link_status will stay 5485 * false until the igc_check_for_link establishes link 5486 * for copper adapters ONLY 5487 */ 5488 if (!hw->mac.get_link_status) 5489 return true; 5490 hw->mac.ops.check_for_link(hw); 5491 link_active = !hw->mac.get_link_status; 5492 5493 if (hw->mac.type == igc_i225) { 5494 if (!netif_carrier_ok(adapter->netdev)) { 5495 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 5496 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { 5497 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; 5498 adapter->link_check_timeout = jiffies; 5499 } 5500 } 5501 5502 return link_active; 5503 } 5504 5505 /** 5506 * igc_watchdog - Timer Call-back 5507 * @t: timer for the watchdog 5508 */ 5509 static void igc_watchdog(struct timer_list *t) 5510 { 5511 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); 5512 /* Do the rest outside of interrupt context */ 5513 schedule_work(&adapter->watchdog_task); 5514 } 5515 5516 static void igc_watchdog_task(struct work_struct *work) 5517 { 5518 struct igc_adapter *adapter = container_of(work, 5519 struct igc_adapter, 5520 watchdog_task); 5521 struct net_device *netdev = adapter->netdev; 5522 struct igc_hw *hw = &adapter->hw; 5523 struct igc_phy_info *phy = &hw->phy; 5524 u16 phy_data, retry_count = 20; 5525 u32 link; 5526 int i; 5527 5528 link = igc_has_link(adapter); 5529 5530 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { 5531 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) 5532 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 5533 else 5534 link = false; 5535 } 5536 5537 if (link) { 5538 /* Cancel scheduled suspend requests. */ 5539 pm_runtime_resume(netdev->dev.parent); 5540 5541 if (!netif_carrier_ok(netdev)) { 5542 u32 ctrl; 5543 5544 hw->mac.ops.get_speed_and_duplex(hw, 5545 &adapter->link_speed, 5546 &adapter->link_duplex); 5547 5548 ctrl = rd32(IGC_CTRL); 5549 /* Link status message must follow this format */ 5550 netdev_info(netdev, 5551 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 5552 adapter->link_speed, 5553 adapter->link_duplex == FULL_DUPLEX ? 5554 "Full" : "Half", 5555 (ctrl & IGC_CTRL_TFCE) && 5556 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : 5557 (ctrl & IGC_CTRL_RFCE) ? "RX" : 5558 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); 5559 5560 /* disable EEE if enabled */ 5561 if ((adapter->flags & IGC_FLAG_EEE) && 5562 adapter->link_duplex == HALF_DUPLEX) { 5563 netdev_info(netdev, 5564 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); 5565 adapter->hw.dev_spec._base.eee_enable = false; 5566 adapter->flags &= ~IGC_FLAG_EEE; 5567 } 5568 5569 /* check if SmartSpeed worked */ 5570 igc_check_downshift(hw); 5571 if (phy->speed_downgraded) 5572 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); 5573 5574 /* adjust timeout factor according to speed/duplex */ 5575 adapter->tx_timeout_factor = 1; 5576 switch (adapter->link_speed) { 5577 case SPEED_10: 5578 adapter->tx_timeout_factor = 14; 5579 break; 5580 case SPEED_100: 5581 case SPEED_1000: 5582 case SPEED_2500: 5583 adapter->tx_timeout_factor = 1; 5584 break; 5585 } 5586 5587 /* Once the launch time has been set on the wire, there 5588 * is a delay before the link speed can be determined 5589 * based on link-up activity. Write into the register 5590 * as soon as we know the correct link speed. 5591 */ 5592 igc_tsn_adjust_txtime_offset(adapter); 5593 5594 if (adapter->link_speed != SPEED_1000) 5595 goto no_wait; 5596 5597 /* wait for Remote receiver status OK */ 5598 retry_read_status: 5599 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, 5600 &phy_data)) { 5601 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && 5602 retry_count) { 5603 msleep(100); 5604 retry_count--; 5605 goto retry_read_status; 5606 } else if (!retry_count) { 5607 netdev_err(netdev, "exceed max 2 second\n"); 5608 } 5609 } else { 5610 netdev_err(netdev, "read 1000Base-T Status Reg\n"); 5611 } 5612 no_wait: 5613 netif_carrier_on(netdev); 5614 5615 /* link state has changed, schedule phy info update */ 5616 if (!test_bit(__IGC_DOWN, &adapter->state)) 5617 mod_timer(&adapter->phy_info_timer, 5618 round_jiffies(jiffies + 2 * HZ)); 5619 } 5620 } else { 5621 if (netif_carrier_ok(netdev)) { 5622 adapter->link_speed = 0; 5623 adapter->link_duplex = 0; 5624 5625 /* Links status message must follow this format */ 5626 netdev_info(netdev, "NIC Link is Down\n"); 5627 netif_carrier_off(netdev); 5628 5629 /* link state has changed, schedule phy info update */ 5630 if (!test_bit(__IGC_DOWN, &adapter->state)) 5631 mod_timer(&adapter->phy_info_timer, 5632 round_jiffies(jiffies + 2 * HZ)); 5633 5634 pm_schedule_suspend(netdev->dev.parent, 5635 MSEC_PER_SEC * 5); 5636 } 5637 } 5638 5639 spin_lock(&adapter->stats64_lock); 5640 igc_update_stats(adapter); 5641 spin_unlock(&adapter->stats64_lock); 5642 5643 for (i = 0; i < adapter->num_tx_queues; i++) { 5644 struct igc_ring *tx_ring = adapter->tx_ring[i]; 5645 5646 if (!netif_carrier_ok(netdev)) { 5647 /* We've lost link, so the controller stops DMA, 5648 * but we've got queued Tx work that's never going 5649 * to get done, so reset controller to flush Tx. 5650 * (Do the reset outside of interrupt context). 5651 */ 5652 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { 5653 adapter->tx_timeout_count++; 5654 schedule_work(&adapter->reset_task); 5655 /* return immediately since reset is imminent */ 5656 return; 5657 } 5658 } 5659 5660 /* Force detection of hung controller every watchdog period */ 5661 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 5662 } 5663 5664 /* Cause software interrupt to ensure Rx ring is cleaned */ 5665 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5666 u32 eics = 0; 5667 5668 for (i = 0; i < adapter->num_q_vectors; i++) 5669 eics |= adapter->q_vector[i]->eims_value; 5670 wr32(IGC_EICS, eics); 5671 } else { 5672 wr32(IGC_ICS, IGC_ICS_RXDMT0); 5673 } 5674 5675 igc_ptp_tx_hang(adapter); 5676 5677 /* Reset the timer */ 5678 if (!test_bit(__IGC_DOWN, &adapter->state)) { 5679 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) 5680 mod_timer(&adapter->watchdog_timer, 5681 round_jiffies(jiffies + HZ)); 5682 else 5683 mod_timer(&adapter->watchdog_timer, 5684 round_jiffies(jiffies + 2 * HZ)); 5685 } 5686 } 5687 5688 /** 5689 * igc_intr_msi - Interrupt Handler 5690 * @irq: interrupt number 5691 * @data: pointer to a network interface device structure 5692 */ 5693 static irqreturn_t igc_intr_msi(int irq, void *data) 5694 { 5695 struct igc_adapter *adapter = data; 5696 struct igc_q_vector *q_vector = adapter->q_vector[0]; 5697 struct igc_hw *hw = &adapter->hw; 5698 /* read ICR disables interrupts using IAM */ 5699 u32 icr = rd32(IGC_ICR); 5700 5701 igc_write_itr(q_vector); 5702 5703 if (icr & IGC_ICR_DRSTA) 5704 schedule_work(&adapter->reset_task); 5705 5706 if (icr & IGC_ICR_DOUTSYNC) { 5707 /* HW is reporting DMA is out of sync */ 5708 adapter->stats.doosync++; 5709 } 5710 5711 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5712 hw->mac.get_link_status = true; 5713 if (!test_bit(__IGC_DOWN, &adapter->state)) 5714 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5715 } 5716 5717 if (icr & IGC_ICR_TS) 5718 igc_tsync_interrupt(adapter); 5719 5720 napi_schedule(&q_vector->napi); 5721 5722 return IRQ_HANDLED; 5723 } 5724 5725 /** 5726 * igc_intr - Legacy Interrupt Handler 5727 * @irq: interrupt number 5728 * @data: pointer to a network interface device structure 5729 */ 5730 static irqreturn_t igc_intr(int irq, void *data) 5731 { 5732 struct igc_adapter *adapter = data; 5733 struct igc_q_vector *q_vector = adapter->q_vector[0]; 5734 struct igc_hw *hw = &adapter->hw; 5735 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 5736 * need for the IMC write 5737 */ 5738 u32 icr = rd32(IGC_ICR); 5739 5740 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 5741 * not set, then the adapter didn't send an interrupt 5742 */ 5743 if (!(icr & IGC_ICR_INT_ASSERTED)) 5744 return IRQ_NONE; 5745 5746 igc_write_itr(q_vector); 5747 5748 if (icr & IGC_ICR_DRSTA) 5749 schedule_work(&adapter->reset_task); 5750 5751 if (icr & IGC_ICR_DOUTSYNC) { 5752 /* HW is reporting DMA is out of sync */ 5753 adapter->stats.doosync++; 5754 } 5755 5756 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5757 hw->mac.get_link_status = true; 5758 /* guard against interrupt when we're going down */ 5759 if (!test_bit(__IGC_DOWN, &adapter->state)) 5760 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5761 } 5762 5763 if (icr & IGC_ICR_TS) 5764 igc_tsync_interrupt(adapter); 5765 5766 napi_schedule(&q_vector->napi); 5767 5768 return IRQ_HANDLED; 5769 } 5770 5771 static void igc_free_irq(struct igc_adapter *adapter) 5772 { 5773 if (adapter->msix_entries) { 5774 int vector = 0, i; 5775 5776 free_irq(adapter->msix_entries[vector++].vector, adapter); 5777 5778 for (i = 0; i < adapter->num_q_vectors; i++) 5779 free_irq(adapter->msix_entries[vector++].vector, 5780 adapter->q_vector[i]); 5781 } else { 5782 free_irq(adapter->pdev->irq, adapter); 5783 } 5784 } 5785 5786 /** 5787 * igc_request_irq - initialize interrupts 5788 * @adapter: Pointer to adapter structure 5789 * 5790 * Attempts to configure interrupts using the best available 5791 * capabilities of the hardware and kernel. 5792 */ 5793 static int igc_request_irq(struct igc_adapter *adapter) 5794 { 5795 struct net_device *netdev = adapter->netdev; 5796 struct pci_dev *pdev = adapter->pdev; 5797 int err = 0; 5798 5799 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5800 err = igc_request_msix(adapter); 5801 if (!err) 5802 goto request_done; 5803 /* fall back to MSI */ 5804 igc_free_all_tx_resources(adapter); 5805 igc_free_all_rx_resources(adapter); 5806 5807 igc_clear_interrupt_scheme(adapter); 5808 err = igc_init_interrupt_scheme(adapter, false); 5809 if (err) 5810 goto request_done; 5811 igc_setup_all_tx_resources(adapter); 5812 igc_setup_all_rx_resources(adapter); 5813 igc_configure(adapter); 5814 } 5815 5816 igc_assign_vector(adapter->q_vector[0], 0); 5817 5818 if (adapter->flags & IGC_FLAG_HAS_MSI) { 5819 err = request_irq(pdev->irq, &igc_intr_msi, 0, 5820 netdev->name, adapter); 5821 if (!err) 5822 goto request_done; 5823 5824 /* fall back to legacy interrupts */ 5825 igc_reset_interrupt_capability(adapter); 5826 adapter->flags &= ~IGC_FLAG_HAS_MSI; 5827 } 5828 5829 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, 5830 netdev->name, adapter); 5831 5832 if (err) 5833 netdev_err(netdev, "Error %d getting interrupt\n", err); 5834 5835 request_done: 5836 return err; 5837 } 5838 5839 /** 5840 * __igc_open - Called when a network interface is made active 5841 * @netdev: network interface device structure 5842 * @resuming: boolean indicating if the device is resuming 5843 * 5844 * Returns 0 on success, negative value on failure 5845 * 5846 * The open entry point is called when a network interface is made 5847 * active by the system (IFF_UP). At this point all resources needed 5848 * for transmit and receive operations are allocated, the interrupt 5849 * handler is registered with the OS, the watchdog timer is started, 5850 * and the stack is notified that the interface is ready. 5851 */ 5852 static int __igc_open(struct net_device *netdev, bool resuming) 5853 { 5854 struct igc_adapter *adapter = netdev_priv(netdev); 5855 struct pci_dev *pdev = adapter->pdev; 5856 struct igc_hw *hw = &adapter->hw; 5857 int err = 0; 5858 int i = 0; 5859 5860 /* disallow open during test */ 5861 5862 if (test_bit(__IGC_TESTING, &adapter->state)) { 5863 WARN_ON(resuming); 5864 return -EBUSY; 5865 } 5866 5867 if (!resuming) 5868 pm_runtime_get_sync(&pdev->dev); 5869 5870 netif_carrier_off(netdev); 5871 5872 /* allocate transmit descriptors */ 5873 err = igc_setup_all_tx_resources(adapter); 5874 if (err) 5875 goto err_setup_tx; 5876 5877 /* allocate receive descriptors */ 5878 err = igc_setup_all_rx_resources(adapter); 5879 if (err) 5880 goto err_setup_rx; 5881 5882 igc_power_up_link(adapter); 5883 5884 igc_configure(adapter); 5885 5886 err = igc_request_irq(adapter); 5887 if (err) 5888 goto err_req_irq; 5889 5890 /* Notify the stack of the actual queue counts. */ 5891 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 5892 if (err) 5893 goto err_set_queues; 5894 5895 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 5896 if (err) 5897 goto err_set_queues; 5898 5899 clear_bit(__IGC_DOWN, &adapter->state); 5900 5901 for (i = 0; i < adapter->num_q_vectors; i++) 5902 napi_enable(&adapter->q_vector[i]->napi); 5903 5904 /* Clear any pending interrupts. */ 5905 rd32(IGC_ICR); 5906 igc_irq_enable(adapter); 5907 5908 if (!resuming) 5909 pm_runtime_put(&pdev->dev); 5910 5911 netif_tx_start_all_queues(netdev); 5912 5913 /* start the watchdog. */ 5914 hw->mac.get_link_status = true; 5915 schedule_work(&adapter->watchdog_task); 5916 5917 return IGC_SUCCESS; 5918 5919 err_set_queues: 5920 igc_free_irq(adapter); 5921 err_req_irq: 5922 igc_release_hw_control(adapter); 5923 igc_power_down_phy_copper_base(&adapter->hw); 5924 igc_free_all_rx_resources(adapter); 5925 err_setup_rx: 5926 igc_free_all_tx_resources(adapter); 5927 err_setup_tx: 5928 igc_reset(adapter); 5929 if (!resuming) 5930 pm_runtime_put(&pdev->dev); 5931 5932 return err; 5933 } 5934 5935 int igc_open(struct net_device *netdev) 5936 { 5937 return __igc_open(netdev, false); 5938 } 5939 5940 /** 5941 * __igc_close - Disables a network interface 5942 * @netdev: network interface device structure 5943 * @suspending: boolean indicating the device is suspending 5944 * 5945 * Returns 0, this is not allowed to fail 5946 * 5947 * The close entry point is called when an interface is de-activated 5948 * by the OS. The hardware is still under the driver's control, but 5949 * needs to be disabled. A global MAC reset is issued to stop the 5950 * hardware, and all transmit and receive resources are freed. 5951 */ 5952 static int __igc_close(struct net_device *netdev, bool suspending) 5953 { 5954 struct igc_adapter *adapter = netdev_priv(netdev); 5955 struct pci_dev *pdev = adapter->pdev; 5956 5957 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); 5958 5959 if (!suspending) 5960 pm_runtime_get_sync(&pdev->dev); 5961 5962 igc_down(adapter); 5963 5964 igc_release_hw_control(adapter); 5965 5966 igc_free_irq(adapter); 5967 5968 igc_free_all_tx_resources(adapter); 5969 igc_free_all_rx_resources(adapter); 5970 5971 if (!suspending) 5972 pm_runtime_put_sync(&pdev->dev); 5973 5974 return 0; 5975 } 5976 5977 int igc_close(struct net_device *netdev) 5978 { 5979 if (netif_device_present(netdev) || netdev->dismantle) 5980 return __igc_close(netdev, false); 5981 return 0; 5982 } 5983 5984 /** 5985 * igc_ioctl - Access the hwtstamp interface 5986 * @netdev: network interface device structure 5987 * @ifr: interface request data 5988 * @cmd: ioctl command 5989 **/ 5990 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 5991 { 5992 switch (cmd) { 5993 case SIOCGHWTSTAMP: 5994 return igc_ptp_get_ts_config(netdev, ifr); 5995 case SIOCSHWTSTAMP: 5996 return igc_ptp_set_ts_config(netdev, ifr); 5997 default: 5998 return -EOPNOTSUPP; 5999 } 6000 } 6001 6002 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, 6003 bool enable) 6004 { 6005 struct igc_ring *ring; 6006 6007 if (queue < 0 || queue >= adapter->num_tx_queues) 6008 return -EINVAL; 6009 6010 ring = adapter->tx_ring[queue]; 6011 ring->launchtime_enable = enable; 6012 6013 return 0; 6014 } 6015 6016 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) 6017 { 6018 struct timespec64 b; 6019 6020 b = ktime_to_timespec64(base_time); 6021 6022 return timespec64_compare(now, &b) > 0; 6023 } 6024 6025 static bool validate_schedule(struct igc_adapter *adapter, 6026 const struct tc_taprio_qopt_offload *qopt) 6027 { 6028 int queue_uses[IGC_MAX_TX_QUEUES] = { }; 6029 struct igc_hw *hw = &adapter->hw; 6030 struct timespec64 now; 6031 size_t n; 6032 6033 if (qopt->cycle_time_extension) 6034 return false; 6035 6036 igc_ptp_read(adapter, &now); 6037 6038 /* If we program the controller's BASET registers with a time 6039 * in the future, it will hold all the packets until that 6040 * time, causing a lot of TX Hangs, so to avoid that, we 6041 * reject schedules that would start in the future. 6042 * Note: Limitation above is no longer in i226. 6043 */ 6044 if (!is_base_time_past(qopt->base_time, &now) && 6045 igc_is_device_id_i225(hw)) 6046 return false; 6047 6048 for (n = 0; n < qopt->num_entries; n++) { 6049 const struct tc_taprio_sched_entry *e, *prev; 6050 int i; 6051 6052 prev = n ? &qopt->entries[n - 1] : NULL; 6053 e = &qopt->entries[n]; 6054 6055 /* i225 only supports "global" frame preemption 6056 * settings. 6057 */ 6058 if (e->command != TC_TAPRIO_CMD_SET_GATES) 6059 return false; 6060 6061 for (i = 0; i < adapter->num_tx_queues; i++) 6062 if (e->gate_mask & BIT(i)) { 6063 queue_uses[i]++; 6064 6065 /* There are limitations: A single queue cannot 6066 * be opened and closed multiple times per cycle 6067 * unless the gate stays open. Check for it. 6068 */ 6069 if (queue_uses[i] > 1 && 6070 !(prev->gate_mask & BIT(i))) 6071 return false; 6072 } 6073 } 6074 6075 return true; 6076 } 6077 6078 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, 6079 struct tc_etf_qopt_offload *qopt) 6080 { 6081 struct igc_hw *hw = &adapter->hw; 6082 int err; 6083 6084 if (hw->mac.type != igc_i225) 6085 return -EOPNOTSUPP; 6086 6087 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); 6088 if (err) 6089 return err; 6090 6091 return igc_tsn_offload_apply(adapter); 6092 } 6093 6094 static int igc_tsn_clear_schedule(struct igc_adapter *adapter) 6095 { 6096 int i; 6097 6098 adapter->base_time = 0; 6099 adapter->cycle_time = NSEC_PER_SEC; 6100 adapter->taprio_offload_enable = false; 6101 adapter->qbv_config_change_errors = 0; 6102 adapter->qbv_transition = false; 6103 adapter->qbv_count = 0; 6104 6105 for (i = 0; i < adapter->num_tx_queues; i++) { 6106 struct igc_ring *ring = adapter->tx_ring[i]; 6107 6108 ring->start_time = 0; 6109 ring->end_time = NSEC_PER_SEC; 6110 ring->max_sdu = 0; 6111 ring->oper_gate_closed = false; 6112 ring->admin_gate_closed = false; 6113 } 6114 6115 return 0; 6116 } 6117 6118 static int igc_save_qbv_schedule(struct igc_adapter *adapter, 6119 struct tc_taprio_qopt_offload *qopt) 6120 { 6121 bool queue_configured[IGC_MAX_TX_QUEUES] = { }; 6122 struct igc_hw *hw = &adapter->hw; 6123 u32 start_time = 0, end_time = 0; 6124 struct timespec64 now; 6125 size_t n; 6126 int i; 6127 6128 if (qopt->cmd == TAPRIO_CMD_DESTROY) 6129 return igc_tsn_clear_schedule(adapter); 6130 6131 if (qopt->cmd != TAPRIO_CMD_REPLACE) 6132 return -EOPNOTSUPP; 6133 6134 if (qopt->base_time < 0) 6135 return -ERANGE; 6136 6137 if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) 6138 return -EALREADY; 6139 6140 if (!validate_schedule(adapter, qopt)) 6141 return -EINVAL; 6142 6143 adapter->cycle_time = qopt->cycle_time; 6144 adapter->base_time = qopt->base_time; 6145 adapter->taprio_offload_enable = true; 6146 6147 igc_ptp_read(adapter, &now); 6148 6149 for (n = 0; n < qopt->num_entries; n++) { 6150 struct tc_taprio_sched_entry *e = &qopt->entries[n]; 6151 6152 end_time += e->interval; 6153 6154 /* If any of the conditions below are true, we need to manually 6155 * control the end time of the cycle. 6156 * 1. Qbv users can specify a cycle time that is not equal 6157 * to the total GCL intervals. Hence, recalculation is 6158 * necessary here to exclude the time interval that 6159 * exceeds the cycle time. 6160 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, 6161 * once the end of the list is reached, it will switch 6162 * to the END_OF_CYCLE state and leave the gates in the 6163 * same state until the next cycle is started. 6164 */ 6165 if (end_time > adapter->cycle_time || 6166 n + 1 == qopt->num_entries) 6167 end_time = adapter->cycle_time; 6168 6169 for (i = 0; i < adapter->num_tx_queues; i++) { 6170 struct igc_ring *ring = adapter->tx_ring[i]; 6171 6172 if (!(e->gate_mask & BIT(i))) 6173 continue; 6174 6175 /* Check whether a queue stays open for more than one 6176 * entry. If so, keep the start and advance the end 6177 * time. 6178 */ 6179 if (!queue_configured[i]) 6180 ring->start_time = start_time; 6181 ring->end_time = end_time; 6182 6183 if (ring->start_time >= adapter->cycle_time) 6184 queue_configured[i] = false; 6185 else 6186 queue_configured[i] = true; 6187 } 6188 6189 start_time += e->interval; 6190 } 6191 6192 /* Check whether a queue gets configured. 6193 * If not, set the start and end time to be end time. 6194 */ 6195 for (i = 0; i < adapter->num_tx_queues; i++) { 6196 struct igc_ring *ring = adapter->tx_ring[i]; 6197 6198 if (!is_base_time_past(qopt->base_time, &now)) { 6199 ring->admin_gate_closed = false; 6200 } else { 6201 ring->oper_gate_closed = false; 6202 ring->admin_gate_closed = false; 6203 } 6204 6205 if (!queue_configured[i]) { 6206 if (!is_base_time_past(qopt->base_time, &now)) 6207 ring->admin_gate_closed = true; 6208 else 6209 ring->oper_gate_closed = true; 6210 6211 ring->start_time = end_time; 6212 ring->end_time = end_time; 6213 } 6214 } 6215 6216 for (i = 0; i < adapter->num_tx_queues; i++) { 6217 struct igc_ring *ring = adapter->tx_ring[i]; 6218 struct net_device *dev = adapter->netdev; 6219 6220 if (qopt->max_sdu[i]) 6221 ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; 6222 else 6223 ring->max_sdu = 0; 6224 } 6225 6226 return 0; 6227 } 6228 6229 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, 6230 struct tc_taprio_qopt_offload *qopt) 6231 { 6232 struct igc_hw *hw = &adapter->hw; 6233 int err; 6234 6235 if (hw->mac.type != igc_i225) 6236 return -EOPNOTSUPP; 6237 6238 err = igc_save_qbv_schedule(adapter, qopt); 6239 if (err) 6240 return err; 6241 6242 return igc_tsn_offload_apply(adapter); 6243 } 6244 6245 static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, 6246 bool enable, int idleslope, int sendslope, 6247 int hicredit, int locredit) 6248 { 6249 bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; 6250 struct net_device *netdev = adapter->netdev; 6251 struct igc_ring *ring; 6252 int i; 6253 6254 /* i225 has two sets of credit-based shaper logic. 6255 * Supporting it only on the top two priority queues 6256 */ 6257 if (queue < 0 || queue > 1) 6258 return -EINVAL; 6259 6260 ring = adapter->tx_ring[queue]; 6261 6262 for (i = 0; i < IGC_MAX_SR_QUEUES; i++) 6263 if (adapter->tx_ring[i]) 6264 cbs_status[i] = adapter->tx_ring[i]->cbs_enable; 6265 6266 /* CBS should be enabled on the highest priority queue first in order 6267 * for the CBS algorithm to operate as intended. 6268 */ 6269 if (enable) { 6270 if (queue == 1 && !cbs_status[0]) { 6271 netdev_err(netdev, 6272 "Enabling CBS on queue1 before queue0\n"); 6273 return -EINVAL; 6274 } 6275 } else { 6276 if (queue == 0 && cbs_status[1]) { 6277 netdev_err(netdev, 6278 "Disabling CBS on queue0 before queue1\n"); 6279 return -EINVAL; 6280 } 6281 } 6282 6283 ring->cbs_enable = enable; 6284 ring->idleslope = idleslope; 6285 ring->sendslope = sendslope; 6286 ring->hicredit = hicredit; 6287 ring->locredit = locredit; 6288 6289 return 0; 6290 } 6291 6292 static int igc_tsn_enable_cbs(struct igc_adapter *adapter, 6293 struct tc_cbs_qopt_offload *qopt) 6294 { 6295 struct igc_hw *hw = &adapter->hw; 6296 int err; 6297 6298 if (hw->mac.type != igc_i225) 6299 return -EOPNOTSUPP; 6300 6301 if (qopt->queue < 0 || qopt->queue > 1) 6302 return -EINVAL; 6303 6304 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, 6305 qopt->idleslope, qopt->sendslope, 6306 qopt->hicredit, qopt->locredit); 6307 if (err) 6308 return err; 6309 6310 return igc_tsn_offload_apply(adapter); 6311 } 6312 6313 static int igc_tc_query_caps(struct igc_adapter *adapter, 6314 struct tc_query_caps_base *base) 6315 { 6316 struct igc_hw *hw = &adapter->hw; 6317 6318 switch (base->type) { 6319 case TC_SETUP_QDISC_TAPRIO: { 6320 struct tc_taprio_caps *caps = base->caps; 6321 6322 caps->broken_mqprio = true; 6323 6324 if (hw->mac.type == igc_i225) { 6325 caps->supports_queue_max_sdu = true; 6326 caps->gate_mask_per_txq = true; 6327 } 6328 6329 return 0; 6330 } 6331 default: 6332 return -EOPNOTSUPP; 6333 } 6334 } 6335 6336 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, 6337 void *type_data) 6338 { 6339 struct igc_adapter *adapter = netdev_priv(dev); 6340 6341 adapter->tc_setup_type = type; 6342 6343 switch (type) { 6344 case TC_QUERY_CAPS: 6345 return igc_tc_query_caps(adapter, type_data); 6346 case TC_SETUP_QDISC_TAPRIO: 6347 return igc_tsn_enable_qbv_scheduling(adapter, type_data); 6348 6349 case TC_SETUP_QDISC_ETF: 6350 return igc_tsn_enable_launchtime(adapter, type_data); 6351 6352 case TC_SETUP_QDISC_CBS: 6353 return igc_tsn_enable_cbs(adapter, type_data); 6354 6355 default: 6356 return -EOPNOTSUPP; 6357 } 6358 } 6359 6360 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) 6361 { 6362 struct igc_adapter *adapter = netdev_priv(dev); 6363 6364 switch (bpf->command) { 6365 case XDP_SETUP_PROG: 6366 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); 6367 case XDP_SETUP_XSK_POOL: 6368 return igc_xdp_setup_pool(adapter, bpf->xsk.pool, 6369 bpf->xsk.queue_id); 6370 default: 6371 return -EOPNOTSUPP; 6372 } 6373 } 6374 6375 static int igc_xdp_xmit(struct net_device *dev, int num_frames, 6376 struct xdp_frame **frames, u32 flags) 6377 { 6378 struct igc_adapter *adapter = netdev_priv(dev); 6379 int cpu = smp_processor_id(); 6380 struct netdev_queue *nq; 6381 struct igc_ring *ring; 6382 int i, drops; 6383 6384 if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) 6385 return -ENETDOWN; 6386 6387 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 6388 return -EINVAL; 6389 6390 ring = igc_xdp_get_tx_ring(adapter, cpu); 6391 nq = txring_txq(ring); 6392 6393 __netif_tx_lock(nq, cpu); 6394 6395 /* Avoid transmit queue timeout since we share it with the slow path */ 6396 txq_trans_cond_update(nq); 6397 6398 drops = 0; 6399 for (i = 0; i < num_frames; i++) { 6400 int err; 6401 struct xdp_frame *xdpf = frames[i]; 6402 6403 err = igc_xdp_init_tx_descriptor(ring, xdpf); 6404 if (err) { 6405 xdp_return_frame_rx_napi(xdpf); 6406 drops++; 6407 } 6408 } 6409 6410 if (flags & XDP_XMIT_FLUSH) 6411 igc_flush_tx_descriptors(ring); 6412 6413 __netif_tx_unlock(nq); 6414 6415 return num_frames - drops; 6416 } 6417 6418 static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, 6419 struct igc_q_vector *q_vector) 6420 { 6421 struct igc_hw *hw = &adapter->hw; 6422 u32 eics = 0; 6423 6424 eics |= q_vector->eims_value; 6425 wr32(IGC_EICS, eics); 6426 } 6427 6428 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 6429 { 6430 struct igc_adapter *adapter = netdev_priv(dev); 6431 struct igc_q_vector *q_vector; 6432 struct igc_ring *ring; 6433 6434 if (test_bit(__IGC_DOWN, &adapter->state)) 6435 return -ENETDOWN; 6436 6437 if (!igc_xdp_is_enabled(adapter)) 6438 return -ENXIO; 6439 6440 if (queue_id >= adapter->num_rx_queues) 6441 return -EINVAL; 6442 6443 ring = adapter->rx_ring[queue_id]; 6444 6445 if (!ring->xsk_pool) 6446 return -ENXIO; 6447 6448 q_vector = adapter->q_vector[queue_id]; 6449 if (!napi_if_scheduled_mark_missed(&q_vector->napi)) 6450 igc_trigger_rxtxq_interrupt(adapter, q_vector); 6451 6452 return 0; 6453 } 6454 6455 static const struct net_device_ops igc_netdev_ops = { 6456 .ndo_open = igc_open, 6457 .ndo_stop = igc_close, 6458 .ndo_start_xmit = igc_xmit_frame, 6459 .ndo_set_rx_mode = igc_set_rx_mode, 6460 .ndo_set_mac_address = igc_set_mac, 6461 .ndo_change_mtu = igc_change_mtu, 6462 .ndo_tx_timeout = igc_tx_timeout, 6463 .ndo_get_stats64 = igc_get_stats64, 6464 .ndo_fix_features = igc_fix_features, 6465 .ndo_set_features = igc_set_features, 6466 .ndo_features_check = igc_features_check, 6467 .ndo_eth_ioctl = igc_ioctl, 6468 .ndo_setup_tc = igc_setup_tc, 6469 .ndo_bpf = igc_bpf, 6470 .ndo_xdp_xmit = igc_xdp_xmit, 6471 .ndo_xsk_wakeup = igc_xsk_wakeup, 6472 }; 6473 6474 /* PCIe configuration access */ 6475 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 6476 { 6477 struct igc_adapter *adapter = hw->back; 6478 6479 pci_read_config_word(adapter->pdev, reg, value); 6480 } 6481 6482 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 6483 { 6484 struct igc_adapter *adapter = hw->back; 6485 6486 pci_write_config_word(adapter->pdev, reg, *value); 6487 } 6488 6489 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 6490 { 6491 struct igc_adapter *adapter = hw->back; 6492 6493 if (!pci_is_pcie(adapter->pdev)) 6494 return -IGC_ERR_CONFIG; 6495 6496 pcie_capability_read_word(adapter->pdev, reg, value); 6497 6498 return IGC_SUCCESS; 6499 } 6500 6501 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 6502 { 6503 struct igc_adapter *adapter = hw->back; 6504 6505 if (!pci_is_pcie(adapter->pdev)) 6506 return -IGC_ERR_CONFIG; 6507 6508 pcie_capability_write_word(adapter->pdev, reg, *value); 6509 6510 return IGC_SUCCESS; 6511 } 6512 6513 u32 igc_rd32(struct igc_hw *hw, u32 reg) 6514 { 6515 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); 6516 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); 6517 u32 value = 0; 6518 6519 if (IGC_REMOVED(hw_addr)) 6520 return ~value; 6521 6522 value = readl(&hw_addr[reg]); 6523 6524 /* reads should not return all F's */ 6525 if (!(~value) && (!reg || !(~readl(hw_addr)))) { 6526 struct net_device *netdev = igc->netdev; 6527 6528 hw->hw_addr = NULL; 6529 netif_device_detach(netdev); 6530 netdev_err(netdev, "PCIe link lost, device now detached\n"); 6531 WARN(pci_device_is_present(igc->pdev), 6532 "igc: Failed to read reg 0x%x!\n", reg); 6533 } 6534 6535 return value; 6536 } 6537 6538 /* Mapping HW RSS Type to enum xdp_rss_hash_type */ 6539 static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = { 6540 [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2, 6541 [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP, 6542 [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4, 6543 [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP, 6544 [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX, 6545 [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6, 6546 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, 6547 [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP, 6548 [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP, 6549 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX, 6550 [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ 6551 [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */ 6552 [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */ 6553 [13] = XDP_RSS_TYPE_NONE, 6554 [14] = XDP_RSS_TYPE_NONE, 6555 [15] = XDP_RSS_TYPE_NONE, 6556 }; 6557 6558 static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, 6559 enum xdp_rss_hash_type *rss_type) 6560 { 6561 const struct igc_xdp_buff *ctx = (void *)_ctx; 6562 6563 if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) 6564 return -ENODATA; 6565 6566 *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); 6567 *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; 6568 6569 return 0; 6570 } 6571 6572 static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) 6573 { 6574 const struct igc_xdp_buff *ctx = (void *)_ctx; 6575 6576 if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { 6577 *timestamp = ctx->rx_ts; 6578 6579 return 0; 6580 } 6581 6582 return -ENODATA; 6583 } 6584 6585 static const struct xdp_metadata_ops igc_xdp_metadata_ops = { 6586 .xmo_rx_hash = igc_xdp_rx_hash, 6587 .xmo_rx_timestamp = igc_xdp_rx_timestamp, 6588 }; 6589 6590 static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer) 6591 { 6592 struct igc_adapter *adapter = container_of(timer, struct igc_adapter, 6593 hrtimer); 6594 unsigned int i; 6595 6596 adapter->qbv_transition = true; 6597 for (i = 0; i < adapter->num_tx_queues; i++) { 6598 struct igc_ring *tx_ring = adapter->tx_ring[i]; 6599 6600 if (tx_ring->admin_gate_closed) { 6601 tx_ring->admin_gate_closed = false; 6602 tx_ring->oper_gate_closed = true; 6603 } else { 6604 tx_ring->oper_gate_closed = false; 6605 } 6606 } 6607 adapter->qbv_transition = false; 6608 return HRTIMER_NORESTART; 6609 } 6610 6611 /** 6612 * igc_probe - Device Initialization Routine 6613 * @pdev: PCI device information struct 6614 * @ent: entry in igc_pci_tbl 6615 * 6616 * Returns 0 on success, negative on failure 6617 * 6618 * igc_probe initializes an adapter identified by a pci_dev structure. 6619 * The OS initialization, configuring the adapter private structure, 6620 * and a hardware reset occur. 6621 */ 6622 static int igc_probe(struct pci_dev *pdev, 6623 const struct pci_device_id *ent) 6624 { 6625 struct igc_adapter *adapter; 6626 struct net_device *netdev; 6627 struct igc_hw *hw; 6628 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; 6629 int err; 6630 6631 err = pci_enable_device_mem(pdev); 6632 if (err) 6633 return err; 6634 6635 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 6636 if (err) { 6637 dev_err(&pdev->dev, 6638 "No usable DMA configuration, aborting\n"); 6639 goto err_dma; 6640 } 6641 6642 err = pci_request_mem_regions(pdev, igc_driver_name); 6643 if (err) 6644 goto err_pci_reg; 6645 6646 err = pci_enable_ptm(pdev, NULL); 6647 if (err < 0) 6648 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); 6649 6650 pci_set_master(pdev); 6651 6652 err = -ENOMEM; 6653 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), 6654 IGC_MAX_TX_QUEUES); 6655 6656 if (!netdev) 6657 goto err_alloc_etherdev; 6658 6659 SET_NETDEV_DEV(netdev, &pdev->dev); 6660 6661 pci_set_drvdata(pdev, netdev); 6662 adapter = netdev_priv(netdev); 6663 adapter->netdev = netdev; 6664 adapter->pdev = pdev; 6665 hw = &adapter->hw; 6666 hw->back = adapter; 6667 adapter->port_num = hw->bus.func; 6668 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 6669 6670 err = pci_save_state(pdev); 6671 if (err) 6672 goto err_ioremap; 6673 6674 err = -EIO; 6675 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), 6676 pci_resource_len(pdev, 0)); 6677 if (!adapter->io_addr) 6678 goto err_ioremap; 6679 6680 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ 6681 hw->hw_addr = adapter->io_addr; 6682 6683 netdev->netdev_ops = &igc_netdev_ops; 6684 netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; 6685 igc_ethtool_set_ops(netdev); 6686 netdev->watchdog_timeo = 5 * HZ; 6687 6688 netdev->mem_start = pci_resource_start(pdev, 0); 6689 netdev->mem_end = pci_resource_end(pdev, 0); 6690 6691 /* PCI config space info */ 6692 hw->vendor_id = pdev->vendor; 6693 hw->device_id = pdev->device; 6694 hw->revision_id = pdev->revision; 6695 hw->subsystem_vendor_id = pdev->subsystem_vendor; 6696 hw->subsystem_device_id = pdev->subsystem_device; 6697 6698 /* Copy the default MAC and PHY function pointers */ 6699 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 6700 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 6701 6702 /* Initialize skew-specific constants */ 6703 err = ei->get_invariants(hw); 6704 if (err) 6705 goto err_sw_init; 6706 6707 /* Add supported features to the features list*/ 6708 netdev->features |= NETIF_F_SG; 6709 netdev->features |= NETIF_F_TSO; 6710 netdev->features |= NETIF_F_TSO6; 6711 netdev->features |= NETIF_F_TSO_ECN; 6712 netdev->features |= NETIF_F_RXHASH; 6713 netdev->features |= NETIF_F_RXCSUM; 6714 netdev->features |= NETIF_F_HW_CSUM; 6715 netdev->features |= NETIF_F_SCTP_CRC; 6716 netdev->features |= NETIF_F_HW_TC; 6717 6718 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 6719 NETIF_F_GSO_GRE_CSUM | \ 6720 NETIF_F_GSO_IPXIP4 | \ 6721 NETIF_F_GSO_IPXIP6 | \ 6722 NETIF_F_GSO_UDP_TUNNEL | \ 6723 NETIF_F_GSO_UDP_TUNNEL_CSUM) 6724 6725 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; 6726 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; 6727 6728 /* setup the private structure */ 6729 err = igc_sw_init(adapter); 6730 if (err) 6731 goto err_sw_init; 6732 6733 /* copy netdev features into list of user selectable features */ 6734 netdev->hw_features |= NETIF_F_NTUPLE; 6735 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 6736 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 6737 netdev->hw_features |= netdev->features; 6738 6739 netdev->features |= NETIF_F_HIGHDMA; 6740 6741 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 6742 netdev->mpls_features |= NETIF_F_HW_CSUM; 6743 netdev->hw_enc_features |= netdev->vlan_features; 6744 6745 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 6746 NETDEV_XDP_ACT_XSK_ZEROCOPY; 6747 6748 /* MTU range: 68 - 9216 */ 6749 netdev->min_mtu = ETH_MIN_MTU; 6750 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; 6751 6752 /* before reading the NVM, reset the controller to put the device in a 6753 * known good starting state 6754 */ 6755 hw->mac.ops.reset_hw(hw); 6756 6757 if (igc_get_flash_presence_i225(hw)) { 6758 if (hw->nvm.ops.validate(hw) < 0) { 6759 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 6760 err = -EIO; 6761 goto err_eeprom; 6762 } 6763 } 6764 6765 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { 6766 /* copy the MAC address out of the NVM */ 6767 if (hw->mac.ops.read_mac_addr(hw)) 6768 dev_err(&pdev->dev, "NVM Read Error\n"); 6769 } 6770 6771 eth_hw_addr_set(netdev, hw->mac.addr); 6772 6773 if (!is_valid_ether_addr(netdev->dev_addr)) { 6774 dev_err(&pdev->dev, "Invalid MAC Address\n"); 6775 err = -EIO; 6776 goto err_eeprom; 6777 } 6778 6779 /* configure RXPBSIZE and TXPBSIZE */ 6780 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); 6781 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); 6782 6783 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); 6784 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); 6785 6786 INIT_WORK(&adapter->reset_task, igc_reset_task); 6787 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); 6788 6789 hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6790 adapter->hrtimer.function = &igc_qbv_scheduling_timer; 6791 6792 /* Initialize link properties that are user-changeable */ 6793 adapter->fc_autoneg = true; 6794 hw->mac.autoneg = true; 6795 hw->phy.autoneg_advertised = 0xaf; 6796 6797 hw->fc.requested_mode = igc_fc_default; 6798 hw->fc.current_mode = igc_fc_default; 6799 6800 /* By default, support wake on port A */ 6801 adapter->flags |= IGC_FLAG_WOL_SUPPORTED; 6802 6803 /* initialize the wol settings based on the eeprom settings */ 6804 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) 6805 adapter->wol |= IGC_WUFC_MAG; 6806 6807 device_set_wakeup_enable(&adapter->pdev->dev, 6808 adapter->flags & IGC_FLAG_WOL_SUPPORTED); 6809 6810 igc_ptp_init(adapter); 6811 6812 igc_tsn_clear_schedule(adapter); 6813 6814 /* reset the hardware with the new settings */ 6815 igc_reset(adapter); 6816 6817 /* let the f/w know that the h/w is now under the control of the 6818 * driver. 6819 */ 6820 igc_get_hw_control(adapter); 6821 6822 strncpy(netdev->name, "eth%d", IFNAMSIZ); 6823 err = register_netdev(netdev); 6824 if (err) 6825 goto err_register; 6826 6827 /* carrier off reporting is important to ethtool even BEFORE open */ 6828 netif_carrier_off(netdev); 6829 6830 /* Check if Media Autosense is enabled */ 6831 adapter->ei = *ei; 6832 6833 /* print pcie link status and MAC address */ 6834 pcie_print_link_status(pdev); 6835 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); 6836 6837 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 6838 /* Disable EEE for internal PHY devices */ 6839 hw->dev_spec._base.eee_enable = false; 6840 adapter->flags &= ~IGC_FLAG_EEE; 6841 igc_set_eee_i225(hw, false, false, false); 6842 6843 pm_runtime_put_noidle(&pdev->dev); 6844 6845 return 0; 6846 6847 err_register: 6848 igc_release_hw_control(adapter); 6849 err_eeprom: 6850 if (!igc_check_reset_block(hw)) 6851 igc_reset_phy(hw); 6852 err_sw_init: 6853 igc_clear_interrupt_scheme(adapter); 6854 iounmap(adapter->io_addr); 6855 err_ioremap: 6856 free_netdev(netdev); 6857 err_alloc_etherdev: 6858 pci_release_mem_regions(pdev); 6859 err_pci_reg: 6860 err_dma: 6861 pci_disable_device(pdev); 6862 return err; 6863 } 6864 6865 /** 6866 * igc_remove - Device Removal Routine 6867 * @pdev: PCI device information struct 6868 * 6869 * igc_remove is called by the PCI subsystem to alert the driver 6870 * that it should release a PCI device. This could be caused by a 6871 * Hot-Plug event, or because the driver is going to be removed from 6872 * memory. 6873 */ 6874 static void igc_remove(struct pci_dev *pdev) 6875 { 6876 struct net_device *netdev = pci_get_drvdata(pdev); 6877 struct igc_adapter *adapter = netdev_priv(netdev); 6878 6879 pm_runtime_get_noresume(&pdev->dev); 6880 6881 igc_flush_nfc_rules(adapter); 6882 6883 igc_ptp_stop(adapter); 6884 6885 pci_disable_ptm(pdev); 6886 pci_clear_master(pdev); 6887 6888 set_bit(__IGC_DOWN, &adapter->state); 6889 6890 del_timer_sync(&adapter->watchdog_timer); 6891 del_timer_sync(&adapter->phy_info_timer); 6892 6893 cancel_work_sync(&adapter->reset_task); 6894 cancel_work_sync(&adapter->watchdog_task); 6895 hrtimer_cancel(&adapter->hrtimer); 6896 6897 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6898 * would have already happened in close and is redundant. 6899 */ 6900 igc_release_hw_control(adapter); 6901 unregister_netdev(netdev); 6902 6903 igc_clear_interrupt_scheme(adapter); 6904 pci_iounmap(pdev, adapter->io_addr); 6905 pci_release_mem_regions(pdev); 6906 6907 free_netdev(netdev); 6908 6909 pci_disable_device(pdev); 6910 } 6911 6912 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, 6913 bool runtime) 6914 { 6915 struct net_device *netdev = pci_get_drvdata(pdev); 6916 struct igc_adapter *adapter = netdev_priv(netdev); 6917 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; 6918 struct igc_hw *hw = &adapter->hw; 6919 u32 ctrl, rctl, status; 6920 bool wake; 6921 6922 rtnl_lock(); 6923 netif_device_detach(netdev); 6924 6925 if (netif_running(netdev)) 6926 __igc_close(netdev, true); 6927 6928 igc_ptp_suspend(adapter); 6929 6930 igc_clear_interrupt_scheme(adapter); 6931 rtnl_unlock(); 6932 6933 status = rd32(IGC_STATUS); 6934 if (status & IGC_STATUS_LU) 6935 wufc &= ~IGC_WUFC_LNKC; 6936 6937 if (wufc) { 6938 igc_setup_rctl(adapter); 6939 igc_set_rx_mode(netdev); 6940 6941 /* turn on all-multi mode if wake on multicast is enabled */ 6942 if (wufc & IGC_WUFC_MC) { 6943 rctl = rd32(IGC_RCTL); 6944 rctl |= IGC_RCTL_MPE; 6945 wr32(IGC_RCTL, rctl); 6946 } 6947 6948 ctrl = rd32(IGC_CTRL); 6949 ctrl |= IGC_CTRL_ADVD3WUC; 6950 wr32(IGC_CTRL, ctrl); 6951 6952 /* Allow time for pending master requests to run */ 6953 igc_disable_pcie_master(hw); 6954 6955 wr32(IGC_WUC, IGC_WUC_PME_EN); 6956 wr32(IGC_WUFC, wufc); 6957 } else { 6958 wr32(IGC_WUC, 0); 6959 wr32(IGC_WUFC, 0); 6960 } 6961 6962 wake = wufc || adapter->en_mng_pt; 6963 if (!wake) 6964 igc_power_down_phy_copper_base(&adapter->hw); 6965 else 6966 igc_power_up_link(adapter); 6967 6968 if (enable_wake) 6969 *enable_wake = wake; 6970 6971 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6972 * would have already happened in close and is redundant. 6973 */ 6974 igc_release_hw_control(adapter); 6975 6976 pci_disable_device(pdev); 6977 6978 return 0; 6979 } 6980 6981 #ifdef CONFIG_PM 6982 static int __maybe_unused igc_runtime_suspend(struct device *dev) 6983 { 6984 return __igc_shutdown(to_pci_dev(dev), NULL, 1); 6985 } 6986 6987 static void igc_deliver_wake_packet(struct net_device *netdev) 6988 { 6989 struct igc_adapter *adapter = netdev_priv(netdev); 6990 struct igc_hw *hw = &adapter->hw; 6991 struct sk_buff *skb; 6992 u32 wupl; 6993 6994 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; 6995 6996 /* WUPM stores only the first 128 bytes of the wake packet. 6997 * Read the packet only if we have the whole thing. 6998 */ 6999 if (wupl == 0 || wupl > IGC_WUPM_BYTES) 7000 return; 7001 7002 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); 7003 if (!skb) 7004 return; 7005 7006 skb_put(skb, wupl); 7007 7008 /* Ensure reads are 32-bit aligned */ 7009 wupl = roundup(wupl, 4); 7010 7011 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); 7012 7013 skb->protocol = eth_type_trans(skb, netdev); 7014 netif_rx(skb); 7015 } 7016 7017 static int __maybe_unused igc_resume(struct device *dev) 7018 { 7019 struct pci_dev *pdev = to_pci_dev(dev); 7020 struct net_device *netdev = pci_get_drvdata(pdev); 7021 struct igc_adapter *adapter = netdev_priv(netdev); 7022 struct igc_hw *hw = &adapter->hw; 7023 u32 err, val; 7024 7025 pci_set_power_state(pdev, PCI_D0); 7026 pci_restore_state(pdev); 7027 pci_save_state(pdev); 7028 7029 if (!pci_device_is_present(pdev)) 7030 return -ENODEV; 7031 err = pci_enable_device_mem(pdev); 7032 if (err) { 7033 netdev_err(netdev, "Cannot enable PCI device from suspend\n"); 7034 return err; 7035 } 7036 pci_set_master(pdev); 7037 7038 pci_enable_wake(pdev, PCI_D3hot, 0); 7039 pci_enable_wake(pdev, PCI_D3cold, 0); 7040 7041 if (igc_init_interrupt_scheme(adapter, true)) { 7042 netdev_err(netdev, "Unable to allocate memory for queues\n"); 7043 return -ENOMEM; 7044 } 7045 7046 igc_reset(adapter); 7047 7048 /* let the f/w know that the h/w is now under the control of the 7049 * driver. 7050 */ 7051 igc_get_hw_control(adapter); 7052 7053 val = rd32(IGC_WUS); 7054 if (val & WAKE_PKT_WUS) 7055 igc_deliver_wake_packet(netdev); 7056 7057 wr32(IGC_WUS, ~0); 7058 7059 rtnl_lock(); 7060 if (!err && netif_running(netdev)) 7061 err = __igc_open(netdev, true); 7062 7063 if (!err) 7064 netif_device_attach(netdev); 7065 rtnl_unlock(); 7066 7067 return err; 7068 } 7069 7070 static int __maybe_unused igc_runtime_resume(struct device *dev) 7071 { 7072 return igc_resume(dev); 7073 } 7074 7075 static int __maybe_unused igc_suspend(struct device *dev) 7076 { 7077 return __igc_shutdown(to_pci_dev(dev), NULL, 0); 7078 } 7079 7080 static int __maybe_unused igc_runtime_idle(struct device *dev) 7081 { 7082 struct net_device *netdev = dev_get_drvdata(dev); 7083 struct igc_adapter *adapter = netdev_priv(netdev); 7084 7085 if (!igc_has_link(adapter)) 7086 pm_schedule_suspend(dev, MSEC_PER_SEC * 5); 7087 7088 return -EBUSY; 7089 } 7090 #endif /* CONFIG_PM */ 7091 7092 static void igc_shutdown(struct pci_dev *pdev) 7093 { 7094 bool wake; 7095 7096 __igc_shutdown(pdev, &wake, 0); 7097 7098 if (system_state == SYSTEM_POWER_OFF) { 7099 pci_wake_from_d3(pdev, wake); 7100 pci_set_power_state(pdev, PCI_D3hot); 7101 } 7102 } 7103 7104 /** 7105 * igc_io_error_detected - called when PCI error is detected 7106 * @pdev: Pointer to PCI device 7107 * @state: The current PCI connection state 7108 * 7109 * This function is called after a PCI bus error affecting 7110 * this device has been detected. 7111 **/ 7112 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, 7113 pci_channel_state_t state) 7114 { 7115 struct net_device *netdev = pci_get_drvdata(pdev); 7116 struct igc_adapter *adapter = netdev_priv(netdev); 7117 7118 netif_device_detach(netdev); 7119 7120 if (state == pci_channel_io_perm_failure) 7121 return PCI_ERS_RESULT_DISCONNECT; 7122 7123 if (netif_running(netdev)) 7124 igc_down(adapter); 7125 pci_disable_device(pdev); 7126 7127 /* Request a slot reset. */ 7128 return PCI_ERS_RESULT_NEED_RESET; 7129 } 7130 7131 /** 7132 * igc_io_slot_reset - called after the PCI bus has been reset. 7133 * @pdev: Pointer to PCI device 7134 * 7135 * Restart the card from scratch, as if from a cold-boot. Implementation 7136 * resembles the first-half of the igc_resume routine. 7137 **/ 7138 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) 7139 { 7140 struct net_device *netdev = pci_get_drvdata(pdev); 7141 struct igc_adapter *adapter = netdev_priv(netdev); 7142 struct igc_hw *hw = &adapter->hw; 7143 pci_ers_result_t result; 7144 7145 if (pci_enable_device_mem(pdev)) { 7146 netdev_err(netdev, "Could not re-enable PCI device after reset\n"); 7147 result = PCI_ERS_RESULT_DISCONNECT; 7148 } else { 7149 pci_set_master(pdev); 7150 pci_restore_state(pdev); 7151 pci_save_state(pdev); 7152 7153 pci_enable_wake(pdev, PCI_D3hot, 0); 7154 pci_enable_wake(pdev, PCI_D3cold, 0); 7155 7156 /* In case of PCI error, adapter loses its HW address 7157 * so we should re-assign it here. 7158 */ 7159 hw->hw_addr = adapter->io_addr; 7160 7161 igc_reset(adapter); 7162 wr32(IGC_WUS, ~0); 7163 result = PCI_ERS_RESULT_RECOVERED; 7164 } 7165 7166 return result; 7167 } 7168 7169 /** 7170 * igc_io_resume - called when traffic can start to flow again. 7171 * @pdev: Pointer to PCI device 7172 * 7173 * This callback is called when the error recovery driver tells us that 7174 * its OK to resume normal operation. Implementation resembles the 7175 * second-half of the igc_resume routine. 7176 */ 7177 static void igc_io_resume(struct pci_dev *pdev) 7178 { 7179 struct net_device *netdev = pci_get_drvdata(pdev); 7180 struct igc_adapter *adapter = netdev_priv(netdev); 7181 7182 rtnl_lock(); 7183 if (netif_running(netdev)) { 7184 if (igc_open(netdev)) { 7185 netdev_err(netdev, "igc_open failed after reset\n"); 7186 return; 7187 } 7188 } 7189 7190 netif_device_attach(netdev); 7191 7192 /* let the f/w know that the h/w is now under the control of the 7193 * driver. 7194 */ 7195 igc_get_hw_control(adapter); 7196 rtnl_unlock(); 7197 } 7198 7199 static const struct pci_error_handlers igc_err_handler = { 7200 .error_detected = igc_io_error_detected, 7201 .slot_reset = igc_io_slot_reset, 7202 .resume = igc_io_resume, 7203 }; 7204 7205 #ifdef CONFIG_PM 7206 static const struct dev_pm_ops igc_pm_ops = { 7207 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) 7208 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, 7209 igc_runtime_idle) 7210 }; 7211 #endif 7212 7213 static struct pci_driver igc_driver = { 7214 .name = igc_driver_name, 7215 .id_table = igc_pci_tbl, 7216 .probe = igc_probe, 7217 .remove = igc_remove, 7218 #ifdef CONFIG_PM 7219 .driver.pm = &igc_pm_ops, 7220 #endif 7221 .shutdown = igc_shutdown, 7222 .err_handler = &igc_err_handler, 7223 }; 7224 7225 /** 7226 * igc_reinit_queues - return error 7227 * @adapter: pointer to adapter structure 7228 */ 7229 int igc_reinit_queues(struct igc_adapter *adapter) 7230 { 7231 struct net_device *netdev = adapter->netdev; 7232 int err = 0; 7233 7234 if (netif_running(netdev)) 7235 igc_close(netdev); 7236 7237 igc_reset_interrupt_capability(adapter); 7238 7239 if (igc_init_interrupt_scheme(adapter, true)) { 7240 netdev_err(netdev, "Unable to allocate memory for queues\n"); 7241 return -ENOMEM; 7242 } 7243 7244 if (netif_running(netdev)) 7245 err = igc_open(netdev); 7246 7247 return err; 7248 } 7249 7250 /** 7251 * igc_get_hw_dev - return device 7252 * @hw: pointer to hardware structure 7253 * 7254 * used by hardware layer to print debugging information 7255 */ 7256 struct net_device *igc_get_hw_dev(struct igc_hw *hw) 7257 { 7258 struct igc_adapter *adapter = hw->back; 7259 7260 return adapter->netdev; 7261 } 7262 7263 static void igc_disable_rx_ring_hw(struct igc_ring *ring) 7264 { 7265 struct igc_hw *hw = &ring->q_vector->adapter->hw; 7266 u8 idx = ring->reg_idx; 7267 u32 rxdctl; 7268 7269 rxdctl = rd32(IGC_RXDCTL(idx)); 7270 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; 7271 rxdctl |= IGC_RXDCTL_SWFLUSH; 7272 wr32(IGC_RXDCTL(idx), rxdctl); 7273 } 7274 7275 void igc_disable_rx_ring(struct igc_ring *ring) 7276 { 7277 igc_disable_rx_ring_hw(ring); 7278 igc_clean_rx_ring(ring); 7279 } 7280 7281 void igc_enable_rx_ring(struct igc_ring *ring) 7282 { 7283 struct igc_adapter *adapter = ring->q_vector->adapter; 7284 7285 igc_configure_rx_ring(adapter, ring); 7286 7287 if (ring->xsk_pool) 7288 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 7289 else 7290 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 7291 } 7292 7293 static void igc_disable_tx_ring_hw(struct igc_ring *ring) 7294 { 7295 struct igc_hw *hw = &ring->q_vector->adapter->hw; 7296 u8 idx = ring->reg_idx; 7297 u32 txdctl; 7298 7299 txdctl = rd32(IGC_TXDCTL(idx)); 7300 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; 7301 txdctl |= IGC_TXDCTL_SWFLUSH; 7302 wr32(IGC_TXDCTL(idx), txdctl); 7303 } 7304 7305 void igc_disable_tx_ring(struct igc_ring *ring) 7306 { 7307 igc_disable_tx_ring_hw(ring); 7308 igc_clean_tx_ring(ring); 7309 } 7310 7311 void igc_enable_tx_ring(struct igc_ring *ring) 7312 { 7313 struct igc_adapter *adapter = ring->q_vector->adapter; 7314 7315 igc_configure_tx_ring(adapter, ring); 7316 } 7317 7318 /** 7319 * igc_init_module - Driver Registration Routine 7320 * 7321 * igc_init_module is the first routine called when the driver is 7322 * loaded. All it does is register with the PCI subsystem. 7323 */ 7324 static int __init igc_init_module(void) 7325 { 7326 int ret; 7327 7328 pr_info("%s\n", igc_driver_string); 7329 pr_info("%s\n", igc_copyright); 7330 7331 ret = pci_register_driver(&igc_driver); 7332 return ret; 7333 } 7334 7335 module_init(igc_init_module); 7336 7337 /** 7338 * igc_exit_module - Driver Exit Cleanup Routine 7339 * 7340 * igc_exit_module is called just before the driver is removed 7341 * from memory. 7342 */ 7343 static void __exit igc_exit_module(void) 7344 { 7345 pci_unregister_driver(&igc_driver); 7346 } 7347 7348 module_exit(igc_exit_module); 7349 /* igc_main.c */ 7350