1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Intel Corporation */ 3 4 #include <linux/module.h> 5 #include <linux/types.h> 6 #include <linux/if_vlan.h> 7 #include <linux/aer.h> 8 #include <linux/tcp.h> 9 #include <linux/udp.h> 10 #include <linux/ip.h> 11 #include <linux/pm_runtime.h> 12 13 #include <net/ipv6.h> 14 15 #include "igc.h" 16 #include "igc_hw.h" 17 18 #define DRV_VERSION "0.0.1-k" 19 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" 20 21 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 22 23 static int debug = -1; 24 25 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 26 MODULE_DESCRIPTION(DRV_SUMMARY); 27 MODULE_LICENSE("GPL v2"); 28 MODULE_VERSION(DRV_VERSION); 29 module_param(debug, int, 0); 30 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 31 32 char igc_driver_name[] = "igc"; 33 char igc_driver_version[] = DRV_VERSION; 34 static const char igc_driver_string[] = DRV_SUMMARY; 35 static const char igc_copyright[] = 36 "Copyright(c) 2018 Intel Corporation."; 37 38 static const struct igc_info *igc_info_tbl[] = { 39 [board_base] = &igc_base_info, 40 }; 41 42 static const struct pci_device_id igc_pci_tbl[] = { 43 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, 44 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, 45 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, 46 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, 47 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, 48 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, 49 /* required last entry */ 50 {0, } 51 }; 52 53 MODULE_DEVICE_TABLE(pci, igc_pci_tbl); 54 55 enum latency_range { 56 lowest_latency = 0, 57 low_latency = 1, 58 bulk_latency = 2, 59 latency_invalid = 255 60 }; 61 62 /** 63 * igc_power_down_link - Power down the phy/serdes link 64 * @adapter: address of board private structure 65 */ 66 static void igc_power_down_link(struct igc_adapter *adapter) 67 { 68 if (adapter->hw.phy.media_type == igc_media_type_copper) 69 igc_power_down_phy_copper_base(&adapter->hw); 70 } 71 72 void igc_reset(struct igc_adapter *adapter) 73 { 74 struct pci_dev *pdev = adapter->pdev; 75 struct igc_hw *hw = &adapter->hw; 76 struct igc_fc_info *fc = &hw->fc; 77 u32 pba, hwm; 78 79 /* Repartition PBA for greater than 9k MTU if required */ 80 pba = IGC_PBA_34K; 81 82 /* flow control settings 83 * The high water mark must be low enough to fit one full frame 84 * after transmitting the pause frame. As such we must have enough 85 * space to allow for us to complete our current transmit and then 86 * receive the frame that is in progress from the link partner. 87 * Set it to: 88 * - the full Rx FIFO size minus one full Tx plus one full Rx frame 89 */ 90 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); 91 92 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ 93 fc->low_water = fc->high_water - 16; 94 fc->pause_time = 0xFFFF; 95 fc->send_xon = 1; 96 fc->current_mode = fc->requested_mode; 97 98 hw->mac.ops.reset_hw(hw); 99 100 if (hw->mac.ops.init_hw(hw)) 101 dev_err(&pdev->dev, "Hardware Error\n"); 102 103 if (!netif_running(adapter->netdev)) 104 igc_power_down_link(adapter); 105 106 /* Re-enable PTP, where applicable. */ 107 igc_ptp_reset(adapter); 108 109 igc_get_phy_info(hw); 110 } 111 112 /** 113 * igc_power_up_link - Power up the phy link 114 * @adapter: address of board private structure 115 */ 116 static void igc_power_up_link(struct igc_adapter *adapter) 117 { 118 igc_reset_phy(&adapter->hw); 119 120 if (adapter->hw.phy.media_type == igc_media_type_copper) 121 igc_power_up_phy_copper(&adapter->hw); 122 123 igc_setup_link(&adapter->hw); 124 } 125 126 /** 127 * igc_release_hw_control - release control of the h/w to f/w 128 * @adapter: address of board private structure 129 * 130 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. 131 * For ASF and Pass Through versions of f/w this means that the 132 * driver is no longer loaded. 133 */ 134 static void igc_release_hw_control(struct igc_adapter *adapter) 135 { 136 struct igc_hw *hw = &adapter->hw; 137 u32 ctrl_ext; 138 139 /* Let firmware take over control of h/w */ 140 ctrl_ext = rd32(IGC_CTRL_EXT); 141 wr32(IGC_CTRL_EXT, 142 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 143 } 144 145 /** 146 * igc_get_hw_control - get control of the h/w from f/w 147 * @adapter: address of board private structure 148 * 149 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. 150 * For ASF and Pass Through versions of f/w this means that 151 * the driver is loaded. 152 */ 153 static void igc_get_hw_control(struct igc_adapter *adapter) 154 { 155 struct igc_hw *hw = &adapter->hw; 156 u32 ctrl_ext; 157 158 /* Let firmware know the driver has taken over */ 159 ctrl_ext = rd32(IGC_CTRL_EXT); 160 wr32(IGC_CTRL_EXT, 161 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 162 } 163 164 /** 165 * igc_clean_tx_ring - Free Tx Buffers 166 * @tx_ring: ring to be cleaned 167 */ 168 static void igc_clean_tx_ring(struct igc_ring *tx_ring) 169 { 170 u16 i = tx_ring->next_to_clean; 171 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 172 173 while (i != tx_ring->next_to_use) { 174 union igc_adv_tx_desc *eop_desc, *tx_desc; 175 176 /* Free all the Tx ring sk_buffs */ 177 dev_kfree_skb_any(tx_buffer->skb); 178 179 /* unmap skb header data */ 180 dma_unmap_single(tx_ring->dev, 181 dma_unmap_addr(tx_buffer, dma), 182 dma_unmap_len(tx_buffer, len), 183 DMA_TO_DEVICE); 184 185 /* check for eop_desc to determine the end of the packet */ 186 eop_desc = tx_buffer->next_to_watch; 187 tx_desc = IGC_TX_DESC(tx_ring, i); 188 189 /* unmap remaining buffers */ 190 while (tx_desc != eop_desc) { 191 tx_buffer++; 192 tx_desc++; 193 i++; 194 if (unlikely(i == tx_ring->count)) { 195 i = 0; 196 tx_buffer = tx_ring->tx_buffer_info; 197 tx_desc = IGC_TX_DESC(tx_ring, 0); 198 } 199 200 /* unmap any remaining paged data */ 201 if (dma_unmap_len(tx_buffer, len)) 202 dma_unmap_page(tx_ring->dev, 203 dma_unmap_addr(tx_buffer, dma), 204 dma_unmap_len(tx_buffer, len), 205 DMA_TO_DEVICE); 206 } 207 208 /* move us one more past the eop_desc for start of next pkt */ 209 tx_buffer++; 210 i++; 211 if (unlikely(i == tx_ring->count)) { 212 i = 0; 213 tx_buffer = tx_ring->tx_buffer_info; 214 } 215 } 216 217 /* reset BQL for queue */ 218 netdev_tx_reset_queue(txring_txq(tx_ring)); 219 220 /* reset next_to_use and next_to_clean */ 221 tx_ring->next_to_use = 0; 222 tx_ring->next_to_clean = 0; 223 } 224 225 /** 226 * igc_free_tx_resources - Free Tx Resources per Queue 227 * @tx_ring: Tx descriptor ring for a specific queue 228 * 229 * Free all transmit software resources 230 */ 231 void igc_free_tx_resources(struct igc_ring *tx_ring) 232 { 233 igc_clean_tx_ring(tx_ring); 234 235 vfree(tx_ring->tx_buffer_info); 236 tx_ring->tx_buffer_info = NULL; 237 238 /* if not set, then don't free */ 239 if (!tx_ring->desc) 240 return; 241 242 dma_free_coherent(tx_ring->dev, tx_ring->size, 243 tx_ring->desc, tx_ring->dma); 244 245 tx_ring->desc = NULL; 246 } 247 248 /** 249 * igc_free_all_tx_resources - Free Tx Resources for All Queues 250 * @adapter: board private structure 251 * 252 * Free all transmit software resources 253 */ 254 static void igc_free_all_tx_resources(struct igc_adapter *adapter) 255 { 256 int i; 257 258 for (i = 0; i < adapter->num_tx_queues; i++) 259 igc_free_tx_resources(adapter->tx_ring[i]); 260 } 261 262 /** 263 * igc_clean_all_tx_rings - Free Tx Buffers for all queues 264 * @adapter: board private structure 265 */ 266 static void igc_clean_all_tx_rings(struct igc_adapter *adapter) 267 { 268 int i; 269 270 for (i = 0; i < adapter->num_tx_queues; i++) 271 if (adapter->tx_ring[i]) 272 igc_clean_tx_ring(adapter->tx_ring[i]); 273 } 274 275 /** 276 * igc_setup_tx_resources - allocate Tx resources (Descriptors) 277 * @tx_ring: tx descriptor ring (for a specific queue) to setup 278 * 279 * Return 0 on success, negative on failure 280 */ 281 int igc_setup_tx_resources(struct igc_ring *tx_ring) 282 { 283 struct device *dev = tx_ring->dev; 284 int size = 0; 285 286 size = sizeof(struct igc_tx_buffer) * tx_ring->count; 287 tx_ring->tx_buffer_info = vzalloc(size); 288 if (!tx_ring->tx_buffer_info) 289 goto err; 290 291 /* round up to nearest 4K */ 292 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); 293 tx_ring->size = ALIGN(tx_ring->size, 4096); 294 295 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 296 &tx_ring->dma, GFP_KERNEL); 297 298 if (!tx_ring->desc) 299 goto err; 300 301 tx_ring->next_to_use = 0; 302 tx_ring->next_to_clean = 0; 303 304 return 0; 305 306 err: 307 vfree(tx_ring->tx_buffer_info); 308 dev_err(dev, 309 "Unable to allocate memory for the transmit descriptor ring\n"); 310 return -ENOMEM; 311 } 312 313 /** 314 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues 315 * @adapter: board private structure 316 * 317 * Return 0 on success, negative on failure 318 */ 319 static int igc_setup_all_tx_resources(struct igc_adapter *adapter) 320 { 321 struct pci_dev *pdev = adapter->pdev; 322 int i, err = 0; 323 324 for (i = 0; i < adapter->num_tx_queues; i++) { 325 err = igc_setup_tx_resources(adapter->tx_ring[i]); 326 if (err) { 327 dev_err(&pdev->dev, 328 "Allocation for Tx Queue %u failed\n", i); 329 for (i--; i >= 0; i--) 330 igc_free_tx_resources(adapter->tx_ring[i]); 331 break; 332 } 333 } 334 335 return err; 336 } 337 338 /** 339 * igc_clean_rx_ring - Free Rx Buffers per Queue 340 * @rx_ring: ring to free buffers from 341 */ 342 static void igc_clean_rx_ring(struct igc_ring *rx_ring) 343 { 344 u16 i = rx_ring->next_to_clean; 345 346 dev_kfree_skb(rx_ring->skb); 347 rx_ring->skb = NULL; 348 349 /* Free all the Rx ring sk_buffs */ 350 while (i != rx_ring->next_to_alloc) { 351 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 352 353 /* Invalidate cache lines that may have been written to by 354 * device so that we avoid corrupting memory. 355 */ 356 dma_sync_single_range_for_cpu(rx_ring->dev, 357 buffer_info->dma, 358 buffer_info->page_offset, 359 igc_rx_bufsz(rx_ring), 360 DMA_FROM_DEVICE); 361 362 /* free resources associated with mapping */ 363 dma_unmap_page_attrs(rx_ring->dev, 364 buffer_info->dma, 365 igc_rx_pg_size(rx_ring), 366 DMA_FROM_DEVICE, 367 IGC_RX_DMA_ATTR); 368 __page_frag_cache_drain(buffer_info->page, 369 buffer_info->pagecnt_bias); 370 371 i++; 372 if (i == rx_ring->count) 373 i = 0; 374 } 375 376 rx_ring->next_to_alloc = 0; 377 rx_ring->next_to_clean = 0; 378 rx_ring->next_to_use = 0; 379 } 380 381 /** 382 * igc_clean_all_rx_rings - Free Rx Buffers for all queues 383 * @adapter: board private structure 384 */ 385 static void igc_clean_all_rx_rings(struct igc_adapter *adapter) 386 { 387 int i; 388 389 for (i = 0; i < adapter->num_rx_queues; i++) 390 if (adapter->rx_ring[i]) 391 igc_clean_rx_ring(adapter->rx_ring[i]); 392 } 393 394 /** 395 * igc_free_rx_resources - Free Rx Resources 396 * @rx_ring: ring to clean the resources from 397 * 398 * Free all receive software resources 399 */ 400 void igc_free_rx_resources(struct igc_ring *rx_ring) 401 { 402 igc_clean_rx_ring(rx_ring); 403 404 vfree(rx_ring->rx_buffer_info); 405 rx_ring->rx_buffer_info = NULL; 406 407 /* if not set, then don't free */ 408 if (!rx_ring->desc) 409 return; 410 411 dma_free_coherent(rx_ring->dev, rx_ring->size, 412 rx_ring->desc, rx_ring->dma); 413 414 rx_ring->desc = NULL; 415 } 416 417 /** 418 * igc_free_all_rx_resources - Free Rx Resources for All Queues 419 * @adapter: board private structure 420 * 421 * Free all receive software resources 422 */ 423 static void igc_free_all_rx_resources(struct igc_adapter *adapter) 424 { 425 int i; 426 427 for (i = 0; i < adapter->num_rx_queues; i++) 428 igc_free_rx_resources(adapter->rx_ring[i]); 429 } 430 431 /** 432 * igc_setup_rx_resources - allocate Rx resources (Descriptors) 433 * @rx_ring: rx descriptor ring (for a specific queue) to setup 434 * 435 * Returns 0 on success, negative on failure 436 */ 437 int igc_setup_rx_resources(struct igc_ring *rx_ring) 438 { 439 struct device *dev = rx_ring->dev; 440 int size, desc_len; 441 442 size = sizeof(struct igc_rx_buffer) * rx_ring->count; 443 rx_ring->rx_buffer_info = vzalloc(size); 444 if (!rx_ring->rx_buffer_info) 445 goto err; 446 447 desc_len = sizeof(union igc_adv_rx_desc); 448 449 /* Round up to nearest 4K */ 450 rx_ring->size = rx_ring->count * desc_len; 451 rx_ring->size = ALIGN(rx_ring->size, 4096); 452 453 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 454 &rx_ring->dma, GFP_KERNEL); 455 456 if (!rx_ring->desc) 457 goto err; 458 459 rx_ring->next_to_alloc = 0; 460 rx_ring->next_to_clean = 0; 461 rx_ring->next_to_use = 0; 462 463 return 0; 464 465 err: 466 vfree(rx_ring->rx_buffer_info); 467 rx_ring->rx_buffer_info = NULL; 468 dev_err(dev, 469 "Unable to allocate memory for the receive descriptor ring\n"); 470 return -ENOMEM; 471 } 472 473 /** 474 * igc_setup_all_rx_resources - wrapper to allocate Rx resources 475 * (Descriptors) for all queues 476 * @adapter: board private structure 477 * 478 * Return 0 on success, negative on failure 479 */ 480 static int igc_setup_all_rx_resources(struct igc_adapter *adapter) 481 { 482 struct pci_dev *pdev = adapter->pdev; 483 int i, err = 0; 484 485 for (i = 0; i < adapter->num_rx_queues; i++) { 486 err = igc_setup_rx_resources(adapter->rx_ring[i]); 487 if (err) { 488 dev_err(&pdev->dev, 489 "Allocation for Rx Queue %u failed\n", i); 490 for (i--; i >= 0; i--) 491 igc_free_rx_resources(adapter->rx_ring[i]); 492 break; 493 } 494 } 495 496 return err; 497 } 498 499 /** 500 * igc_configure_rx_ring - Configure a receive ring after Reset 501 * @adapter: board private structure 502 * @ring: receive ring to be configured 503 * 504 * Configure the Rx unit of the MAC after a reset. 505 */ 506 static void igc_configure_rx_ring(struct igc_adapter *adapter, 507 struct igc_ring *ring) 508 { 509 struct igc_hw *hw = &adapter->hw; 510 union igc_adv_rx_desc *rx_desc; 511 int reg_idx = ring->reg_idx; 512 u32 srrctl = 0, rxdctl = 0; 513 u64 rdba = ring->dma; 514 515 /* disable the queue */ 516 wr32(IGC_RXDCTL(reg_idx), 0); 517 518 /* Set DMA base address registers */ 519 wr32(IGC_RDBAL(reg_idx), 520 rdba & 0x00000000ffffffffULL); 521 wr32(IGC_RDBAH(reg_idx), rdba >> 32); 522 wr32(IGC_RDLEN(reg_idx), 523 ring->count * sizeof(union igc_adv_rx_desc)); 524 525 /* initialize head and tail */ 526 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); 527 wr32(IGC_RDH(reg_idx), 0); 528 writel(0, ring->tail); 529 530 /* reset next-to- use/clean to place SW in sync with hardware */ 531 ring->next_to_clean = 0; 532 ring->next_to_use = 0; 533 534 /* set descriptor configuration */ 535 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; 536 if (ring_uses_large_buffer(ring)) 537 srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT; 538 else 539 srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT; 540 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 541 542 wr32(IGC_SRRCTL(reg_idx), srrctl); 543 544 rxdctl |= IGC_RX_PTHRESH; 545 rxdctl |= IGC_RX_HTHRESH << 8; 546 rxdctl |= IGC_RX_WTHRESH << 16; 547 548 /* initialize rx_buffer_info */ 549 memset(ring->rx_buffer_info, 0, 550 sizeof(struct igc_rx_buffer) * ring->count); 551 552 /* initialize Rx descriptor 0 */ 553 rx_desc = IGC_RX_DESC(ring, 0); 554 rx_desc->wb.upper.length = 0; 555 556 /* enable receive descriptor fetching */ 557 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 558 559 wr32(IGC_RXDCTL(reg_idx), rxdctl); 560 } 561 562 /** 563 * igc_configure_rx - Configure receive Unit after Reset 564 * @adapter: board private structure 565 * 566 * Configure the Rx unit of the MAC after a reset. 567 */ 568 static void igc_configure_rx(struct igc_adapter *adapter) 569 { 570 int i; 571 572 /* Setup the HW Rx Head and Tail Descriptor Pointers and 573 * the Base and Length of the Rx Descriptor Ring 574 */ 575 for (i = 0; i < adapter->num_rx_queues; i++) 576 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); 577 } 578 579 /** 580 * igc_configure_tx_ring - Configure transmit ring after Reset 581 * @adapter: board private structure 582 * @ring: tx ring to configure 583 * 584 * Configure a transmit ring after a reset. 585 */ 586 static void igc_configure_tx_ring(struct igc_adapter *adapter, 587 struct igc_ring *ring) 588 { 589 struct igc_hw *hw = &adapter->hw; 590 int reg_idx = ring->reg_idx; 591 u64 tdba = ring->dma; 592 u32 txdctl = 0; 593 594 /* disable the queue */ 595 wr32(IGC_TXDCTL(reg_idx), 0); 596 wrfl(); 597 mdelay(10); 598 599 wr32(IGC_TDLEN(reg_idx), 600 ring->count * sizeof(union igc_adv_tx_desc)); 601 wr32(IGC_TDBAL(reg_idx), 602 tdba & 0x00000000ffffffffULL); 603 wr32(IGC_TDBAH(reg_idx), tdba >> 32); 604 605 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); 606 wr32(IGC_TDH(reg_idx), 0); 607 writel(0, ring->tail); 608 609 txdctl |= IGC_TX_PTHRESH; 610 txdctl |= IGC_TX_HTHRESH << 8; 611 txdctl |= IGC_TX_WTHRESH << 16; 612 613 txdctl |= IGC_TXDCTL_QUEUE_ENABLE; 614 wr32(IGC_TXDCTL(reg_idx), txdctl); 615 } 616 617 /** 618 * igc_configure_tx - Configure transmit Unit after Reset 619 * @adapter: board private structure 620 * 621 * Configure the Tx unit of the MAC after a reset. 622 */ 623 static void igc_configure_tx(struct igc_adapter *adapter) 624 { 625 int i; 626 627 for (i = 0; i < adapter->num_tx_queues; i++) 628 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); 629 } 630 631 /** 632 * igc_setup_mrqc - configure the multiple receive queue control registers 633 * @adapter: Board private structure 634 */ 635 static void igc_setup_mrqc(struct igc_adapter *adapter) 636 { 637 struct igc_hw *hw = &adapter->hw; 638 u32 j, num_rx_queues; 639 u32 mrqc, rxcsum; 640 u32 rss_key[10]; 641 642 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 643 for (j = 0; j < 10; j++) 644 wr32(IGC_RSSRK(j), rss_key[j]); 645 646 num_rx_queues = adapter->rss_queues; 647 648 if (adapter->rss_indir_tbl_init != num_rx_queues) { 649 for (j = 0; j < IGC_RETA_SIZE; j++) 650 adapter->rss_indir_tbl[j] = 651 (j * num_rx_queues) / IGC_RETA_SIZE; 652 adapter->rss_indir_tbl_init = num_rx_queues; 653 } 654 igc_write_rss_indir_tbl(adapter); 655 656 /* Disable raw packet checksumming so that RSS hash is placed in 657 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 658 * offloads as they are enabled by default 659 */ 660 rxcsum = rd32(IGC_RXCSUM); 661 rxcsum |= IGC_RXCSUM_PCSD; 662 663 /* Enable Receive Checksum Offload for SCTP */ 664 rxcsum |= IGC_RXCSUM_CRCOFL; 665 666 /* Don't need to set TUOFL or IPOFL, they default to 1 */ 667 wr32(IGC_RXCSUM, rxcsum); 668 669 /* Generate RSS hash based on packet types, TCP/UDP 670 * port numbers and/or IPv4/v6 src and dst addresses 671 */ 672 mrqc = IGC_MRQC_RSS_FIELD_IPV4 | 673 IGC_MRQC_RSS_FIELD_IPV4_TCP | 674 IGC_MRQC_RSS_FIELD_IPV6 | 675 IGC_MRQC_RSS_FIELD_IPV6_TCP | 676 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; 677 678 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) 679 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; 680 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) 681 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; 682 683 mrqc |= IGC_MRQC_ENABLE_RSS_MQ; 684 685 wr32(IGC_MRQC, mrqc); 686 } 687 688 /** 689 * igc_setup_rctl - configure the receive control registers 690 * @adapter: Board private structure 691 */ 692 static void igc_setup_rctl(struct igc_adapter *adapter) 693 { 694 struct igc_hw *hw = &adapter->hw; 695 u32 rctl; 696 697 rctl = rd32(IGC_RCTL); 698 699 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 700 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); 701 702 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | 703 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 704 705 /* enable stripping of CRC. Newer features require 706 * that the HW strips the CRC. 707 */ 708 rctl |= IGC_RCTL_SECRC; 709 710 /* disable store bad packets and clear size bits. */ 711 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); 712 713 /* enable LPE to allow for reception of jumbo frames */ 714 rctl |= IGC_RCTL_LPE; 715 716 /* disable queue 0 to prevent tail write w/o re-config */ 717 wr32(IGC_RXDCTL(0), 0); 718 719 /* This is useful for sniffing bad packets. */ 720 if (adapter->netdev->features & NETIF_F_RXALL) { 721 /* UPE and MPE will be handled by normal PROMISC logic 722 * in set_rx_mode 723 */ 724 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ 725 IGC_RCTL_BAM | /* RX All Bcast Pkts */ 726 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 727 728 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ 729 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ 730 } 731 732 wr32(IGC_RCTL, rctl); 733 } 734 735 /** 736 * igc_setup_tctl - configure the transmit control registers 737 * @adapter: Board private structure 738 */ 739 static void igc_setup_tctl(struct igc_adapter *adapter) 740 { 741 struct igc_hw *hw = &adapter->hw; 742 u32 tctl; 743 744 /* disable queue 0 which icould be enabled by default */ 745 wr32(IGC_TXDCTL(0), 0); 746 747 /* Program the Transmit Control Register */ 748 tctl = rd32(IGC_TCTL); 749 tctl &= ~IGC_TCTL_CT; 750 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | 751 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); 752 753 /* Enable transmits */ 754 tctl |= IGC_TCTL_EN; 755 756 wr32(IGC_TCTL, tctl); 757 } 758 759 /** 760 * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table 761 * @adapter: address of board private structure 762 * @index: Index of the RAR entry which need to be synced with MAC table 763 */ 764 static void igc_rar_set_index(struct igc_adapter *adapter, u32 index) 765 { 766 u8 *addr = adapter->mac_table[index].addr; 767 struct igc_hw *hw = &adapter->hw; 768 u32 rar_low, rar_high; 769 770 /* HW expects these to be in network order when they are plugged 771 * into the registers which are little endian. In order to guarantee 772 * that ordering we need to do an leXX_to_cpup here in order to be 773 * ready for the byteswap that occurs with writel 774 */ 775 rar_low = le32_to_cpup((__le32 *)(addr)); 776 rar_high = le16_to_cpup((__le16 *)(addr + 4)); 777 778 /* Indicate to hardware the Address is Valid. */ 779 if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) { 780 if (is_valid_ether_addr(addr)) 781 rar_high |= IGC_RAH_AV; 782 783 rar_high |= IGC_RAH_POOL_1 << 784 adapter->mac_table[index].queue; 785 } 786 787 wr32(IGC_RAL(index), rar_low); 788 wrfl(); 789 wr32(IGC_RAH(index), rar_high); 790 wrfl(); 791 } 792 793 /* Set default MAC address for the PF in the first RAR entry */ 794 static void igc_set_default_mac_filter(struct igc_adapter *adapter) 795 { 796 struct igc_mac_addr *mac_table = &adapter->mac_table[0]; 797 798 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); 799 mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE; 800 801 igc_rar_set_index(adapter, 0); 802 } 803 804 /** 805 * igc_set_mac - Change the Ethernet Address of the NIC 806 * @netdev: network interface device structure 807 * @p: pointer to an address structure 808 * 809 * Returns 0 on success, negative on failure 810 */ 811 static int igc_set_mac(struct net_device *netdev, void *p) 812 { 813 struct igc_adapter *adapter = netdev_priv(netdev); 814 struct igc_hw *hw = &adapter->hw; 815 struct sockaddr *addr = p; 816 817 if (!is_valid_ether_addr(addr->sa_data)) 818 return -EADDRNOTAVAIL; 819 820 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 821 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 822 823 /* set the correct pool for the new PF MAC address in entry 0 */ 824 igc_set_default_mac_filter(adapter); 825 826 return 0; 827 } 828 829 /** 830 * igc_write_mc_addr_list - write multicast addresses to MTA 831 * @netdev: network interface device structure 832 * 833 * Writes multicast address list to the MTA hash table. 834 * Returns: -ENOMEM on failure 835 * 0 on no addresses written 836 * X on writing X addresses to MTA 837 **/ 838 static int igc_write_mc_addr_list(struct net_device *netdev) 839 { 840 struct igc_adapter *adapter = netdev_priv(netdev); 841 struct igc_hw *hw = &adapter->hw; 842 struct netdev_hw_addr *ha; 843 u8 *mta_list; 844 int i; 845 846 if (netdev_mc_empty(netdev)) { 847 /* nothing to program, so clear mc list */ 848 igc_update_mc_addr_list(hw, NULL, 0); 849 return 0; 850 } 851 852 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); 853 if (!mta_list) 854 return -ENOMEM; 855 856 /* The shared function expects a packed array of only addresses. */ 857 i = 0; 858 netdev_for_each_mc_addr(ha, netdev) 859 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 860 861 igc_update_mc_addr_list(hw, mta_list, i); 862 kfree(mta_list); 863 864 return netdev_mc_count(netdev); 865 } 866 867 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, 868 struct igc_tx_buffer *first, 869 u32 vlan_macip_lens, u32 type_tucmd, 870 u32 mss_l4len_idx) 871 { 872 struct igc_adv_tx_context_desc *context_desc; 873 u16 i = tx_ring->next_to_use; 874 struct timespec64 ts; 875 876 context_desc = IGC_TX_CTXTDESC(tx_ring, i); 877 878 i++; 879 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 880 881 /* set bits to identify this as an advanced context descriptor */ 882 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 883 884 /* For i225, context index must be unique per ring. */ 885 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) 886 mss_l4len_idx |= tx_ring->reg_idx << 4; 887 888 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 889 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 890 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 891 892 /* We assume there is always a valid Tx time available. Invalid times 893 * should have been handled by the upper layers. 894 */ 895 if (tx_ring->launchtime_enable) { 896 ts = ktime_to_timespec64(first->skb->tstamp); 897 first->skb->tstamp = ktime_set(0, 0); 898 context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32); 899 } else { 900 context_desc->launch_time = 0; 901 } 902 } 903 904 static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb) 905 { 906 unsigned int offset = 0; 907 908 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); 909 910 return offset == skb_checksum_start_offset(skb); 911 } 912 913 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) 914 { 915 struct sk_buff *skb = first->skb; 916 u32 vlan_macip_lens = 0; 917 u32 type_tucmd = 0; 918 919 if (skb->ip_summed != CHECKSUM_PARTIAL) { 920 csum_failed: 921 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && 922 !tx_ring->launchtime_enable) 923 return; 924 goto no_csum; 925 } 926 927 switch (skb->csum_offset) { 928 case offsetof(struct tcphdr, check): 929 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 930 /* fall through */ 931 case offsetof(struct udphdr, check): 932 break; 933 case offsetof(struct sctphdr, checksum): 934 /* validate that this is actually an SCTP request */ 935 if ((first->protocol == htons(ETH_P_IP) && 936 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || 937 (first->protocol == htons(ETH_P_IPV6) && 938 igc_ipv6_csum_is_sctp(skb))) { 939 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; 940 break; 941 } 942 /* fall through */ 943 default: 944 skb_checksum_help(skb); 945 goto csum_failed; 946 } 947 948 /* update TX checksum flag */ 949 first->tx_flags |= IGC_TX_FLAGS_CSUM; 950 vlan_macip_lens = skb_checksum_start_offset(skb) - 951 skb_network_offset(skb); 952 no_csum: 953 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; 954 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 955 956 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); 957 } 958 959 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 960 { 961 struct net_device *netdev = tx_ring->netdev; 962 963 netif_stop_subqueue(netdev, tx_ring->queue_index); 964 965 /* memory barriier comment */ 966 smp_mb(); 967 968 /* We need to check again in a case another CPU has just 969 * made room available. 970 */ 971 if (igc_desc_unused(tx_ring) < size) 972 return -EBUSY; 973 974 /* A reprieve! */ 975 netif_wake_subqueue(netdev, tx_ring->queue_index); 976 977 u64_stats_update_begin(&tx_ring->tx_syncp2); 978 tx_ring->tx_stats.restart_queue2++; 979 u64_stats_update_end(&tx_ring->tx_syncp2); 980 981 return 0; 982 } 983 984 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 985 { 986 if (igc_desc_unused(tx_ring) >= size) 987 return 0; 988 return __igc_maybe_stop_tx(tx_ring, size); 989 } 990 991 #define IGC_SET_FLAG(_input, _flag, _result) \ 992 (((_flag) <= (_result)) ? \ 993 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ 994 ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) 995 996 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 997 { 998 /* set type for advanced descriptor with frame checksum insertion */ 999 u32 cmd_type = IGC_ADVTXD_DTYP_DATA | 1000 IGC_ADVTXD_DCMD_DEXT | 1001 IGC_ADVTXD_DCMD_IFCS; 1002 1003 /* set segmentation bits for TSO */ 1004 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, 1005 (IGC_ADVTXD_DCMD_TSE)); 1006 1007 /* set timestamp bit if present */ 1008 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, 1009 (IGC_ADVTXD_MAC_TSTAMP)); 1010 1011 return cmd_type; 1012 } 1013 1014 static void igc_tx_olinfo_status(struct igc_ring *tx_ring, 1015 union igc_adv_tx_desc *tx_desc, 1016 u32 tx_flags, unsigned int paylen) 1017 { 1018 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; 1019 1020 /* insert L4 checksum */ 1021 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * 1022 ((IGC_TXD_POPTS_TXSM << 8) / 1023 IGC_TX_FLAGS_CSUM); 1024 1025 /* insert IPv4 checksum */ 1026 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * 1027 (((IGC_TXD_POPTS_IXSM << 8)) / 1028 IGC_TX_FLAGS_IPV4); 1029 1030 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 1031 } 1032 1033 static int igc_tx_map(struct igc_ring *tx_ring, 1034 struct igc_tx_buffer *first, 1035 const u8 hdr_len) 1036 { 1037 struct sk_buff *skb = first->skb; 1038 struct igc_tx_buffer *tx_buffer; 1039 union igc_adv_tx_desc *tx_desc; 1040 u32 tx_flags = first->tx_flags; 1041 skb_frag_t *frag; 1042 u16 i = tx_ring->next_to_use; 1043 unsigned int data_len, size; 1044 dma_addr_t dma; 1045 u32 cmd_type = igc_tx_cmd_type(skb, tx_flags); 1046 1047 tx_desc = IGC_TX_DESC(tx_ring, i); 1048 1049 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); 1050 1051 size = skb_headlen(skb); 1052 data_len = skb->data_len; 1053 1054 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1055 1056 tx_buffer = first; 1057 1058 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1059 if (dma_mapping_error(tx_ring->dev, dma)) 1060 goto dma_error; 1061 1062 /* record length, and DMA address */ 1063 dma_unmap_len_set(tx_buffer, len, size); 1064 dma_unmap_addr_set(tx_buffer, dma, dma); 1065 1066 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1067 1068 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { 1069 tx_desc->read.cmd_type_len = 1070 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); 1071 1072 i++; 1073 tx_desc++; 1074 if (i == tx_ring->count) { 1075 tx_desc = IGC_TX_DESC(tx_ring, 0); 1076 i = 0; 1077 } 1078 tx_desc->read.olinfo_status = 0; 1079 1080 dma += IGC_MAX_DATA_PER_TXD; 1081 size -= IGC_MAX_DATA_PER_TXD; 1082 1083 tx_desc->read.buffer_addr = cpu_to_le64(dma); 1084 } 1085 1086 if (likely(!data_len)) 1087 break; 1088 1089 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 1090 1091 i++; 1092 tx_desc++; 1093 if (i == tx_ring->count) { 1094 tx_desc = IGC_TX_DESC(tx_ring, 0); 1095 i = 0; 1096 } 1097 tx_desc->read.olinfo_status = 0; 1098 1099 size = skb_frag_size(frag); 1100 data_len -= size; 1101 1102 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, 1103 size, DMA_TO_DEVICE); 1104 1105 tx_buffer = &tx_ring->tx_buffer_info[i]; 1106 } 1107 1108 /* write last descriptor with RS and EOP bits */ 1109 cmd_type |= size | IGC_TXD_DCMD; 1110 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 1111 1112 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1113 1114 /* set the timestamp */ 1115 first->time_stamp = jiffies; 1116 1117 skb_tx_timestamp(skb); 1118 1119 /* Force memory writes to complete before letting h/w know there 1120 * are new descriptors to fetch. (Only applicable for weak-ordered 1121 * memory model archs, such as IA-64). 1122 * 1123 * We also need this memory barrier to make certain all of the 1124 * status bits have been updated before next_to_watch is written. 1125 */ 1126 wmb(); 1127 1128 /* set next_to_watch value indicating a packet is present */ 1129 first->next_to_watch = tx_desc; 1130 1131 i++; 1132 if (i == tx_ring->count) 1133 i = 0; 1134 1135 tx_ring->next_to_use = i; 1136 1137 /* Make sure there is space in the ring for the next send. */ 1138 igc_maybe_stop_tx(tx_ring, DESC_NEEDED); 1139 1140 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 1141 writel(i, tx_ring->tail); 1142 } 1143 1144 return 0; 1145 dma_error: 1146 dev_err(tx_ring->dev, "TX DMA map failed\n"); 1147 tx_buffer = &tx_ring->tx_buffer_info[i]; 1148 1149 /* clear dma mappings for failed tx_buffer_info map */ 1150 while (tx_buffer != first) { 1151 if (dma_unmap_len(tx_buffer, len)) 1152 dma_unmap_page(tx_ring->dev, 1153 dma_unmap_addr(tx_buffer, dma), 1154 dma_unmap_len(tx_buffer, len), 1155 DMA_TO_DEVICE); 1156 dma_unmap_len_set(tx_buffer, len, 0); 1157 1158 if (i-- == 0) 1159 i += tx_ring->count; 1160 tx_buffer = &tx_ring->tx_buffer_info[i]; 1161 } 1162 1163 if (dma_unmap_len(tx_buffer, len)) 1164 dma_unmap_single(tx_ring->dev, 1165 dma_unmap_addr(tx_buffer, dma), 1166 dma_unmap_len(tx_buffer, len), 1167 DMA_TO_DEVICE); 1168 dma_unmap_len_set(tx_buffer, len, 0); 1169 1170 dev_kfree_skb_any(tx_buffer->skb); 1171 tx_buffer->skb = NULL; 1172 1173 tx_ring->next_to_use = i; 1174 1175 return -1; 1176 } 1177 1178 static int igc_tso(struct igc_ring *tx_ring, 1179 struct igc_tx_buffer *first, 1180 u8 *hdr_len) 1181 { 1182 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 1183 struct sk_buff *skb = first->skb; 1184 union { 1185 struct iphdr *v4; 1186 struct ipv6hdr *v6; 1187 unsigned char *hdr; 1188 } ip; 1189 union { 1190 struct tcphdr *tcp; 1191 struct udphdr *udp; 1192 unsigned char *hdr; 1193 } l4; 1194 u32 paylen, l4_offset; 1195 int err; 1196 1197 if (skb->ip_summed != CHECKSUM_PARTIAL) 1198 return 0; 1199 1200 if (!skb_is_gso(skb)) 1201 return 0; 1202 1203 err = skb_cow_head(skb, 0); 1204 if (err < 0) 1205 return err; 1206 1207 ip.hdr = skb_network_header(skb); 1208 l4.hdr = skb_checksum_start(skb); 1209 1210 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1211 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1212 1213 /* initialize outer IP header fields */ 1214 if (ip.v4->version == 4) { 1215 unsigned char *csum_start = skb_checksum_start(skb); 1216 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 1217 1218 /* IP header will have to cancel out any data that 1219 * is not a part of the outer IP header 1220 */ 1221 ip.v4->check = csum_fold(csum_partial(trans_start, 1222 csum_start - trans_start, 1223 0)); 1224 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; 1225 1226 ip.v4->tot_len = 0; 1227 first->tx_flags |= IGC_TX_FLAGS_TSO | 1228 IGC_TX_FLAGS_CSUM | 1229 IGC_TX_FLAGS_IPV4; 1230 } else { 1231 ip.v6->payload_len = 0; 1232 first->tx_flags |= IGC_TX_FLAGS_TSO | 1233 IGC_TX_FLAGS_CSUM; 1234 } 1235 1236 /* determine offset of inner transport header */ 1237 l4_offset = l4.hdr - skb->data; 1238 1239 /* remove payload length from inner checksum */ 1240 paylen = skb->len - l4_offset; 1241 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { 1242 /* compute length of segmentation header */ 1243 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 1244 csum_replace_by_diff(&l4.tcp->check, 1245 (__force __wsum)htonl(paylen)); 1246 } else { 1247 /* compute length of segmentation header */ 1248 *hdr_len = sizeof(*l4.udp) + l4_offset; 1249 csum_replace_by_diff(&l4.udp->check, 1250 (__force __wsum)htonl(paylen)); 1251 } 1252 1253 /* update gso size and bytecount with header size */ 1254 first->gso_segs = skb_shinfo(skb)->gso_segs; 1255 first->bytecount += (first->gso_segs - 1) * *hdr_len; 1256 1257 /* MSS L4LEN IDX */ 1258 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; 1259 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; 1260 1261 /* VLAN MACLEN IPLEN */ 1262 vlan_macip_lens = l4.hdr - ip.hdr; 1263 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; 1264 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1265 1266 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, 1267 type_tucmd, mss_l4len_idx); 1268 1269 return 1; 1270 } 1271 1272 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, 1273 struct igc_ring *tx_ring) 1274 { 1275 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 1276 __be16 protocol = vlan_get_protocol(skb); 1277 struct igc_tx_buffer *first; 1278 u32 tx_flags = 0; 1279 unsigned short f; 1280 u8 hdr_len = 0; 1281 int tso = 0; 1282 1283 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, 1284 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, 1285 * + 2 desc gap to keep tail from touching head, 1286 * + 1 desc for context descriptor, 1287 * otherwise try next time 1288 */ 1289 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1290 count += TXD_USE_COUNT(skb_frag_size( 1291 &skb_shinfo(skb)->frags[f])); 1292 1293 if (igc_maybe_stop_tx(tx_ring, count + 3)) { 1294 /* this is a hard error */ 1295 return NETDEV_TX_BUSY; 1296 } 1297 1298 /* record the location of the first descriptor for this packet */ 1299 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 1300 first->skb = skb; 1301 first->bytecount = skb->len; 1302 first->gso_segs = 1; 1303 1304 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 1305 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 1306 1307 /* FIXME: add support for retrieving timestamps from 1308 * the other timer registers before skipping the 1309 * timestamping request. 1310 */ 1311 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && 1312 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, 1313 &adapter->state)) { 1314 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1315 tx_flags |= IGC_TX_FLAGS_TSTAMP; 1316 1317 adapter->ptp_tx_skb = skb_get(skb); 1318 adapter->ptp_tx_start = jiffies; 1319 } else { 1320 adapter->tx_hwtstamp_skipped++; 1321 } 1322 } 1323 1324 /* record initial flags and protocol */ 1325 first->tx_flags = tx_flags; 1326 first->protocol = protocol; 1327 1328 tso = igc_tso(tx_ring, first, &hdr_len); 1329 if (tso < 0) 1330 goto out_drop; 1331 else if (!tso) 1332 igc_tx_csum(tx_ring, first); 1333 1334 igc_tx_map(tx_ring, first, hdr_len); 1335 1336 return NETDEV_TX_OK; 1337 1338 out_drop: 1339 dev_kfree_skb_any(first->skb); 1340 first->skb = NULL; 1341 1342 return NETDEV_TX_OK; 1343 } 1344 1345 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, 1346 struct sk_buff *skb) 1347 { 1348 unsigned int r_idx = skb->queue_mapping; 1349 1350 if (r_idx >= adapter->num_tx_queues) 1351 r_idx = r_idx % adapter->num_tx_queues; 1352 1353 return adapter->tx_ring[r_idx]; 1354 } 1355 1356 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, 1357 struct net_device *netdev) 1358 { 1359 struct igc_adapter *adapter = netdev_priv(netdev); 1360 1361 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 1362 * in order to meet this minimum size requirement. 1363 */ 1364 if (skb->len < 17) { 1365 if (skb_padto(skb, 17)) 1366 return NETDEV_TX_OK; 1367 skb->len = 17; 1368 } 1369 1370 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); 1371 } 1372 1373 static void igc_rx_checksum(struct igc_ring *ring, 1374 union igc_adv_rx_desc *rx_desc, 1375 struct sk_buff *skb) 1376 { 1377 skb_checksum_none_assert(skb); 1378 1379 /* Ignore Checksum bit is set */ 1380 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) 1381 return; 1382 1383 /* Rx checksum disabled via ethtool */ 1384 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1385 return; 1386 1387 /* TCP/UDP checksum error bit is set */ 1388 if (igc_test_staterr(rx_desc, 1389 IGC_RXDEXT_STATERR_TCPE | 1390 IGC_RXDEXT_STATERR_IPE)) { 1391 /* work around errata with sctp packets where the TCPE aka 1392 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 1393 * packets (aka let the stack check the crc32c) 1394 */ 1395 if (!(skb->len == 60 && 1396 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { 1397 u64_stats_update_begin(&ring->rx_syncp); 1398 ring->rx_stats.csum_err++; 1399 u64_stats_update_end(&ring->rx_syncp); 1400 } 1401 /* let the stack verify checksum errors */ 1402 return; 1403 } 1404 /* It must be a TCP or UDP packet with a valid checksum */ 1405 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | 1406 IGC_RXD_STAT_UDPCS)) 1407 skb->ip_summed = CHECKSUM_UNNECESSARY; 1408 1409 dev_dbg(ring->dev, "cksum success: bits %08X\n", 1410 le32_to_cpu(rx_desc->wb.upper.status_error)); 1411 } 1412 1413 static inline void igc_rx_hash(struct igc_ring *ring, 1414 union igc_adv_rx_desc *rx_desc, 1415 struct sk_buff *skb) 1416 { 1417 if (ring->netdev->features & NETIF_F_RXHASH) 1418 skb_set_hash(skb, 1419 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), 1420 PKT_HASH_TYPE_L3); 1421 } 1422 1423 /** 1424 * igc_process_skb_fields - Populate skb header fields from Rx descriptor 1425 * @rx_ring: rx descriptor ring packet is being transacted on 1426 * @rx_desc: pointer to the EOP Rx descriptor 1427 * @skb: pointer to current skb being populated 1428 * 1429 * This function checks the ring, descriptor, and packet information in 1430 * order to populate the hash, checksum, VLAN, timestamp, protocol, and 1431 * other fields within the skb. 1432 */ 1433 static void igc_process_skb_fields(struct igc_ring *rx_ring, 1434 union igc_adv_rx_desc *rx_desc, 1435 struct sk_buff *skb) 1436 { 1437 igc_rx_hash(rx_ring, rx_desc, skb); 1438 1439 igc_rx_checksum(rx_ring, rx_desc, skb); 1440 1441 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TS) && 1442 !igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) 1443 igc_ptp_rx_rgtstamp(rx_ring->q_vector, skb); 1444 1445 skb_record_rx_queue(skb, rx_ring->queue_index); 1446 1447 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1448 } 1449 1450 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, 1451 const unsigned int size) 1452 { 1453 struct igc_rx_buffer *rx_buffer; 1454 1455 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 1456 prefetchw(rx_buffer->page); 1457 1458 /* we are reusing so sync this buffer for CPU use */ 1459 dma_sync_single_range_for_cpu(rx_ring->dev, 1460 rx_buffer->dma, 1461 rx_buffer->page_offset, 1462 size, 1463 DMA_FROM_DEVICE); 1464 1465 rx_buffer->pagecnt_bias--; 1466 1467 return rx_buffer; 1468 } 1469 1470 /** 1471 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff 1472 * @rx_ring: rx descriptor ring to transact packets on 1473 * @rx_buffer: buffer containing page to add 1474 * @skb: sk_buff to place the data into 1475 * @size: size of buffer to be added 1476 * 1477 * This function will add the data contained in rx_buffer->page to the skb. 1478 */ 1479 static void igc_add_rx_frag(struct igc_ring *rx_ring, 1480 struct igc_rx_buffer *rx_buffer, 1481 struct sk_buff *skb, 1482 unsigned int size) 1483 { 1484 #if (PAGE_SIZE < 8192) 1485 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; 1486 1487 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1488 rx_buffer->page_offset, size, truesize); 1489 rx_buffer->page_offset ^= truesize; 1490 #else 1491 unsigned int truesize = ring_uses_build_skb(rx_ring) ? 1492 SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1493 SKB_DATA_ALIGN(size); 1494 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1495 rx_buffer->page_offset, size, truesize); 1496 rx_buffer->page_offset += truesize; 1497 #endif 1498 } 1499 1500 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, 1501 struct igc_rx_buffer *rx_buffer, 1502 union igc_adv_rx_desc *rx_desc, 1503 unsigned int size) 1504 { 1505 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 1506 #if (PAGE_SIZE < 8192) 1507 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; 1508 #else 1509 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1510 SKB_DATA_ALIGN(IGC_SKB_PAD + size); 1511 #endif 1512 struct sk_buff *skb; 1513 1514 /* prefetch first cache line of first page */ 1515 prefetch(va); 1516 #if L1_CACHE_BYTES < 128 1517 prefetch(va + L1_CACHE_BYTES); 1518 #endif 1519 1520 /* build an skb around the page buffer */ 1521 skb = build_skb(va - IGC_SKB_PAD, truesize); 1522 if (unlikely(!skb)) 1523 return NULL; 1524 1525 /* update pointers within the skb to store the data */ 1526 skb_reserve(skb, IGC_SKB_PAD); 1527 __skb_put(skb, size); 1528 1529 /* update buffer offset */ 1530 #if (PAGE_SIZE < 8192) 1531 rx_buffer->page_offset ^= truesize; 1532 #else 1533 rx_buffer->page_offset += truesize; 1534 #endif 1535 1536 return skb; 1537 } 1538 1539 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, 1540 struct igc_rx_buffer *rx_buffer, 1541 union igc_adv_rx_desc *rx_desc, 1542 unsigned int size) 1543 { 1544 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; 1545 #if (PAGE_SIZE < 8192) 1546 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2; 1547 #else 1548 unsigned int truesize = SKB_DATA_ALIGN(size); 1549 #endif 1550 unsigned int headlen; 1551 struct sk_buff *skb; 1552 1553 /* prefetch first cache line of first page */ 1554 prefetch(va); 1555 #if L1_CACHE_BYTES < 128 1556 prefetch(va + L1_CACHE_BYTES); 1557 #endif 1558 1559 /* allocate a skb to store the frags */ 1560 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); 1561 if (unlikely(!skb)) 1562 return NULL; 1563 1564 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) { 1565 igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); 1566 va += IGC_TS_HDR_LEN; 1567 size -= IGC_TS_HDR_LEN; 1568 } 1569 1570 /* Determine available headroom for copy */ 1571 headlen = size; 1572 if (headlen > IGC_RX_HDR_LEN) 1573 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); 1574 1575 /* align pull length to size of long to optimize memcpy performance */ 1576 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 1577 1578 /* update all of the pointers */ 1579 size -= headlen; 1580 if (size) { 1581 skb_add_rx_frag(skb, 0, rx_buffer->page, 1582 (va + headlen) - page_address(rx_buffer->page), 1583 size, truesize); 1584 #if (PAGE_SIZE < 8192) 1585 rx_buffer->page_offset ^= truesize; 1586 #else 1587 rx_buffer->page_offset += truesize; 1588 #endif 1589 } else { 1590 rx_buffer->pagecnt_bias++; 1591 } 1592 1593 return skb; 1594 } 1595 1596 /** 1597 * igc_reuse_rx_page - page flip buffer and store it back on the ring 1598 * @rx_ring: rx descriptor ring to store buffers on 1599 * @old_buff: donor buffer to have page reused 1600 * 1601 * Synchronizes page for reuse by the adapter 1602 */ 1603 static void igc_reuse_rx_page(struct igc_ring *rx_ring, 1604 struct igc_rx_buffer *old_buff) 1605 { 1606 u16 nta = rx_ring->next_to_alloc; 1607 struct igc_rx_buffer *new_buff; 1608 1609 new_buff = &rx_ring->rx_buffer_info[nta]; 1610 1611 /* update, and store next to alloc */ 1612 nta++; 1613 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1614 1615 /* Transfer page from old buffer to new buffer. 1616 * Move each member individually to avoid possible store 1617 * forwarding stalls. 1618 */ 1619 new_buff->dma = old_buff->dma; 1620 new_buff->page = old_buff->page; 1621 new_buff->page_offset = old_buff->page_offset; 1622 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1623 } 1624 1625 static inline bool igc_page_is_reserved(struct page *page) 1626 { 1627 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 1628 } 1629 1630 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer) 1631 { 1632 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1633 struct page *page = rx_buffer->page; 1634 1635 /* avoid re-using remote pages */ 1636 if (unlikely(igc_page_is_reserved(page))) 1637 return false; 1638 1639 #if (PAGE_SIZE < 8192) 1640 /* if we are only owner of page we can reuse it */ 1641 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) 1642 return false; 1643 #else 1644 #define IGC_LAST_OFFSET \ 1645 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) 1646 1647 if (rx_buffer->page_offset > IGC_LAST_OFFSET) 1648 return false; 1649 #endif 1650 1651 /* If we have drained the page fragment pool we need to update 1652 * the pagecnt_bias and page count so that we fully restock the 1653 * number of references the driver holds. 1654 */ 1655 if (unlikely(!pagecnt_bias)) { 1656 page_ref_add(page, USHRT_MAX); 1657 rx_buffer->pagecnt_bias = USHRT_MAX; 1658 } 1659 1660 return true; 1661 } 1662 1663 /** 1664 * igc_is_non_eop - process handling of non-EOP buffers 1665 * @rx_ring: Rx ring being processed 1666 * @rx_desc: Rx descriptor for current buffer 1667 * 1668 * This function updates next to clean. If the buffer is an EOP buffer 1669 * this function exits returning false, otherwise it will place the 1670 * sk_buff in the next buffer to be chained and return true indicating 1671 * that this is in fact a non-EOP buffer. 1672 */ 1673 static bool igc_is_non_eop(struct igc_ring *rx_ring, 1674 union igc_adv_rx_desc *rx_desc) 1675 { 1676 u32 ntc = rx_ring->next_to_clean + 1; 1677 1678 /* fetch, update, and store next to clean */ 1679 ntc = (ntc < rx_ring->count) ? ntc : 0; 1680 rx_ring->next_to_clean = ntc; 1681 1682 prefetch(IGC_RX_DESC(rx_ring, ntc)); 1683 1684 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) 1685 return false; 1686 1687 return true; 1688 } 1689 1690 /** 1691 * igc_cleanup_headers - Correct corrupted or empty headers 1692 * @rx_ring: rx descriptor ring packet is being transacted on 1693 * @rx_desc: pointer to the EOP Rx descriptor 1694 * @skb: pointer to current skb being fixed 1695 * 1696 * Address the case where we are pulling data in on pages only 1697 * and as such no data is present in the skb header. 1698 * 1699 * In addition if skb is not at least 60 bytes we need to pad it so that 1700 * it is large enough to qualify as a valid Ethernet frame. 1701 * 1702 * Returns true if an error was encountered and skb was freed. 1703 */ 1704 static bool igc_cleanup_headers(struct igc_ring *rx_ring, 1705 union igc_adv_rx_desc *rx_desc, 1706 struct sk_buff *skb) 1707 { 1708 if (unlikely((igc_test_staterr(rx_desc, 1709 IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) { 1710 struct net_device *netdev = rx_ring->netdev; 1711 1712 if (!(netdev->features & NETIF_F_RXALL)) { 1713 dev_kfree_skb_any(skb); 1714 return true; 1715 } 1716 } 1717 1718 /* if eth_skb_pad returns an error the skb was freed */ 1719 if (eth_skb_pad(skb)) 1720 return true; 1721 1722 return false; 1723 } 1724 1725 static void igc_put_rx_buffer(struct igc_ring *rx_ring, 1726 struct igc_rx_buffer *rx_buffer) 1727 { 1728 if (igc_can_reuse_rx_page(rx_buffer)) { 1729 /* hand second half of page back to the ring */ 1730 igc_reuse_rx_page(rx_ring, rx_buffer); 1731 } else { 1732 /* We are not reusing the buffer so unmap it and free 1733 * any references we are holding to it 1734 */ 1735 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 1736 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1737 IGC_RX_DMA_ATTR); 1738 __page_frag_cache_drain(rx_buffer->page, 1739 rx_buffer->pagecnt_bias); 1740 } 1741 1742 /* clear contents of rx_buffer */ 1743 rx_buffer->page = NULL; 1744 } 1745 1746 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) 1747 { 1748 return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0; 1749 } 1750 1751 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, 1752 struct igc_rx_buffer *bi) 1753 { 1754 struct page *page = bi->page; 1755 dma_addr_t dma; 1756 1757 /* since we are recycling buffers we should seldom need to alloc */ 1758 if (likely(page)) 1759 return true; 1760 1761 /* alloc new page for storage */ 1762 page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); 1763 if (unlikely(!page)) { 1764 rx_ring->rx_stats.alloc_failed++; 1765 return false; 1766 } 1767 1768 /* map page for use */ 1769 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 1770 igc_rx_pg_size(rx_ring), 1771 DMA_FROM_DEVICE, 1772 IGC_RX_DMA_ATTR); 1773 1774 /* if mapping failed free memory back to system since 1775 * there isn't much point in holding memory we can't use 1776 */ 1777 if (dma_mapping_error(rx_ring->dev, dma)) { 1778 __free_page(page); 1779 1780 rx_ring->rx_stats.alloc_failed++; 1781 return false; 1782 } 1783 1784 bi->dma = dma; 1785 bi->page = page; 1786 bi->page_offset = igc_rx_offset(rx_ring); 1787 bi->pagecnt_bias = 1; 1788 1789 return true; 1790 } 1791 1792 /** 1793 * igc_alloc_rx_buffers - Replace used receive buffers; packet split 1794 * @rx_ring: rx descriptor ring 1795 * @cleaned_count: number of buffers to clean 1796 */ 1797 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) 1798 { 1799 union igc_adv_rx_desc *rx_desc; 1800 u16 i = rx_ring->next_to_use; 1801 struct igc_rx_buffer *bi; 1802 u16 bufsz; 1803 1804 /* nothing to do */ 1805 if (!cleaned_count) 1806 return; 1807 1808 rx_desc = IGC_RX_DESC(rx_ring, i); 1809 bi = &rx_ring->rx_buffer_info[i]; 1810 i -= rx_ring->count; 1811 1812 bufsz = igc_rx_bufsz(rx_ring); 1813 1814 do { 1815 if (!igc_alloc_mapped_page(rx_ring, bi)) 1816 break; 1817 1818 /* sync the buffer for use by the device */ 1819 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 1820 bi->page_offset, bufsz, 1821 DMA_FROM_DEVICE); 1822 1823 /* Refresh the desc even if buffer_addrs didn't change 1824 * because each write-back erases this info. 1825 */ 1826 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 1827 1828 rx_desc++; 1829 bi++; 1830 i++; 1831 if (unlikely(!i)) { 1832 rx_desc = IGC_RX_DESC(rx_ring, 0); 1833 bi = rx_ring->rx_buffer_info; 1834 i -= rx_ring->count; 1835 } 1836 1837 /* clear the length for the next_to_use descriptor */ 1838 rx_desc->wb.upper.length = 0; 1839 1840 cleaned_count--; 1841 } while (cleaned_count); 1842 1843 i += rx_ring->count; 1844 1845 if (rx_ring->next_to_use != i) { 1846 /* record the next descriptor to use */ 1847 rx_ring->next_to_use = i; 1848 1849 /* update next to alloc since we have filled the ring */ 1850 rx_ring->next_to_alloc = i; 1851 1852 /* Force memory writes to complete before letting h/w 1853 * know there are new descriptors to fetch. (Only 1854 * applicable for weak-ordered memory model archs, 1855 * such as IA-64). 1856 */ 1857 wmb(); 1858 writel(i, rx_ring->tail); 1859 } 1860 } 1861 1862 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) 1863 { 1864 unsigned int total_bytes = 0, total_packets = 0; 1865 struct igc_ring *rx_ring = q_vector->rx.ring; 1866 struct sk_buff *skb = rx_ring->skb; 1867 u16 cleaned_count = igc_desc_unused(rx_ring); 1868 1869 while (likely(total_packets < budget)) { 1870 union igc_adv_rx_desc *rx_desc; 1871 struct igc_rx_buffer *rx_buffer; 1872 unsigned int size; 1873 1874 /* return some buffers to hardware, one at a time is too slow */ 1875 if (cleaned_count >= IGC_RX_BUFFER_WRITE) { 1876 igc_alloc_rx_buffers(rx_ring, cleaned_count); 1877 cleaned_count = 0; 1878 } 1879 1880 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); 1881 size = le16_to_cpu(rx_desc->wb.upper.length); 1882 if (!size) 1883 break; 1884 1885 /* This memory barrier is needed to keep us from reading 1886 * any other fields out of the rx_desc until we know the 1887 * descriptor has been written back 1888 */ 1889 dma_rmb(); 1890 1891 rx_buffer = igc_get_rx_buffer(rx_ring, size); 1892 1893 /* retrieve a buffer from the ring */ 1894 if (skb) 1895 igc_add_rx_frag(rx_ring, rx_buffer, skb, size); 1896 else if (ring_uses_build_skb(rx_ring)) 1897 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); 1898 else 1899 skb = igc_construct_skb(rx_ring, rx_buffer, 1900 rx_desc, size); 1901 1902 /* exit if we failed to retrieve a buffer */ 1903 if (!skb) { 1904 rx_ring->rx_stats.alloc_failed++; 1905 rx_buffer->pagecnt_bias++; 1906 break; 1907 } 1908 1909 igc_put_rx_buffer(rx_ring, rx_buffer); 1910 cleaned_count++; 1911 1912 /* fetch next buffer in frame if non-eop */ 1913 if (igc_is_non_eop(rx_ring, rx_desc)) 1914 continue; 1915 1916 /* verify the packet layout is correct */ 1917 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { 1918 skb = NULL; 1919 continue; 1920 } 1921 1922 /* probably a little skewed due to removing CRC */ 1923 total_bytes += skb->len; 1924 1925 /* populate checksum, timestamp, VLAN, and protocol */ 1926 igc_process_skb_fields(rx_ring, rx_desc, skb); 1927 1928 napi_gro_receive(&q_vector->napi, skb); 1929 1930 /* reset skb pointer */ 1931 skb = NULL; 1932 1933 /* update budget accounting */ 1934 total_packets++; 1935 } 1936 1937 /* place incomplete frames back on ring for completion */ 1938 rx_ring->skb = skb; 1939 1940 u64_stats_update_begin(&rx_ring->rx_syncp); 1941 rx_ring->rx_stats.packets += total_packets; 1942 rx_ring->rx_stats.bytes += total_bytes; 1943 u64_stats_update_end(&rx_ring->rx_syncp); 1944 q_vector->rx.total_packets += total_packets; 1945 q_vector->rx.total_bytes += total_bytes; 1946 1947 if (cleaned_count) 1948 igc_alloc_rx_buffers(rx_ring, cleaned_count); 1949 1950 return total_packets; 1951 } 1952 1953 /** 1954 * igc_clean_tx_irq - Reclaim resources after transmit completes 1955 * @q_vector: pointer to q_vector containing needed info 1956 * @napi_budget: Used to determine if we are in netpoll 1957 * 1958 * returns true if ring is completely cleaned 1959 */ 1960 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) 1961 { 1962 struct igc_adapter *adapter = q_vector->adapter; 1963 unsigned int total_bytes = 0, total_packets = 0; 1964 unsigned int budget = q_vector->tx.work_limit; 1965 struct igc_ring *tx_ring = q_vector->tx.ring; 1966 unsigned int i = tx_ring->next_to_clean; 1967 struct igc_tx_buffer *tx_buffer; 1968 union igc_adv_tx_desc *tx_desc; 1969 1970 if (test_bit(__IGC_DOWN, &adapter->state)) 1971 return true; 1972 1973 tx_buffer = &tx_ring->tx_buffer_info[i]; 1974 tx_desc = IGC_TX_DESC(tx_ring, i); 1975 i -= tx_ring->count; 1976 1977 do { 1978 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 1979 1980 /* if next_to_watch is not set then there is no work pending */ 1981 if (!eop_desc) 1982 break; 1983 1984 /* prevent any other reads prior to eop_desc */ 1985 smp_rmb(); 1986 1987 /* if DD is not set pending work has not been completed */ 1988 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) 1989 break; 1990 1991 /* clear next_to_watch to prevent false hangs */ 1992 tx_buffer->next_to_watch = NULL; 1993 1994 /* update the statistics for this packet */ 1995 total_bytes += tx_buffer->bytecount; 1996 total_packets += tx_buffer->gso_segs; 1997 1998 /* free the skb */ 1999 napi_consume_skb(tx_buffer->skb, napi_budget); 2000 2001 /* unmap skb header data */ 2002 dma_unmap_single(tx_ring->dev, 2003 dma_unmap_addr(tx_buffer, dma), 2004 dma_unmap_len(tx_buffer, len), 2005 DMA_TO_DEVICE); 2006 2007 /* clear tx_buffer data */ 2008 dma_unmap_len_set(tx_buffer, len, 0); 2009 2010 /* clear last DMA location and unmap remaining buffers */ 2011 while (tx_desc != eop_desc) { 2012 tx_buffer++; 2013 tx_desc++; 2014 i++; 2015 if (unlikely(!i)) { 2016 i -= tx_ring->count; 2017 tx_buffer = tx_ring->tx_buffer_info; 2018 tx_desc = IGC_TX_DESC(tx_ring, 0); 2019 } 2020 2021 /* unmap any remaining paged data */ 2022 if (dma_unmap_len(tx_buffer, len)) { 2023 dma_unmap_page(tx_ring->dev, 2024 dma_unmap_addr(tx_buffer, dma), 2025 dma_unmap_len(tx_buffer, len), 2026 DMA_TO_DEVICE); 2027 dma_unmap_len_set(tx_buffer, len, 0); 2028 } 2029 } 2030 2031 /* move us one more past the eop_desc for start of next pkt */ 2032 tx_buffer++; 2033 tx_desc++; 2034 i++; 2035 if (unlikely(!i)) { 2036 i -= tx_ring->count; 2037 tx_buffer = tx_ring->tx_buffer_info; 2038 tx_desc = IGC_TX_DESC(tx_ring, 0); 2039 } 2040 2041 /* issue prefetch for next Tx descriptor */ 2042 prefetch(tx_desc); 2043 2044 /* update budget accounting */ 2045 budget--; 2046 } while (likely(budget)); 2047 2048 netdev_tx_completed_queue(txring_txq(tx_ring), 2049 total_packets, total_bytes); 2050 2051 i += tx_ring->count; 2052 tx_ring->next_to_clean = i; 2053 u64_stats_update_begin(&tx_ring->tx_syncp); 2054 tx_ring->tx_stats.bytes += total_bytes; 2055 tx_ring->tx_stats.packets += total_packets; 2056 u64_stats_update_end(&tx_ring->tx_syncp); 2057 q_vector->tx.total_bytes += total_bytes; 2058 q_vector->tx.total_packets += total_packets; 2059 2060 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 2061 struct igc_hw *hw = &adapter->hw; 2062 2063 /* Detect a transmit hang in hardware, this serializes the 2064 * check with the clearing of time_stamp and movement of i 2065 */ 2066 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 2067 if (tx_buffer->next_to_watch && 2068 time_after(jiffies, tx_buffer->time_stamp + 2069 (adapter->tx_timeout_factor * HZ)) && 2070 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { 2071 /* detected Tx unit hang */ 2072 dev_err(tx_ring->dev, 2073 "Detected Tx Unit Hang\n" 2074 " Tx Queue <%d>\n" 2075 " TDH <%x>\n" 2076 " TDT <%x>\n" 2077 " next_to_use <%x>\n" 2078 " next_to_clean <%x>\n" 2079 "buffer_info[next_to_clean]\n" 2080 " time_stamp <%lx>\n" 2081 " next_to_watch <%p>\n" 2082 " jiffies <%lx>\n" 2083 " desc.status <%x>\n", 2084 tx_ring->queue_index, 2085 rd32(IGC_TDH(tx_ring->reg_idx)), 2086 readl(tx_ring->tail), 2087 tx_ring->next_to_use, 2088 tx_ring->next_to_clean, 2089 tx_buffer->time_stamp, 2090 tx_buffer->next_to_watch, 2091 jiffies, 2092 tx_buffer->next_to_watch->wb.status); 2093 netif_stop_subqueue(tx_ring->netdev, 2094 tx_ring->queue_index); 2095 2096 /* we are about to reset, no point in enabling stuff */ 2097 return true; 2098 } 2099 } 2100 2101 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 2102 if (unlikely(total_packets && 2103 netif_carrier_ok(tx_ring->netdev) && 2104 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 2105 /* Make sure that anybody stopping the queue after this 2106 * sees the new next_to_clean. 2107 */ 2108 smp_mb(); 2109 if (__netif_subqueue_stopped(tx_ring->netdev, 2110 tx_ring->queue_index) && 2111 !(test_bit(__IGC_DOWN, &adapter->state))) { 2112 netif_wake_subqueue(tx_ring->netdev, 2113 tx_ring->queue_index); 2114 2115 u64_stats_update_begin(&tx_ring->tx_syncp); 2116 tx_ring->tx_stats.restart_queue++; 2117 u64_stats_update_end(&tx_ring->tx_syncp); 2118 } 2119 } 2120 2121 return !!budget; 2122 } 2123 2124 static void igc_nfc_filter_restore(struct igc_adapter *adapter) 2125 { 2126 struct igc_nfc_filter *rule; 2127 2128 spin_lock(&adapter->nfc_lock); 2129 2130 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) 2131 igc_add_filter(adapter, rule); 2132 2133 spin_unlock(&adapter->nfc_lock); 2134 } 2135 2136 /* If the filter to be added and an already existing filter express 2137 * the same address and address type, it should be possible to only 2138 * override the other configurations, for example the queue to steer 2139 * traffic. 2140 */ 2141 static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry, 2142 const u8 *addr, const u8 flags) 2143 { 2144 if (!(entry->state & IGC_MAC_STATE_IN_USE)) 2145 return true; 2146 2147 if ((entry->state & IGC_MAC_STATE_SRC_ADDR) != 2148 (flags & IGC_MAC_STATE_SRC_ADDR)) 2149 return false; 2150 2151 if (!ether_addr_equal(addr, entry->addr)) 2152 return false; 2153 2154 return true; 2155 } 2156 2157 /* Add a MAC filter for 'addr' directing matching traffic to 'queue', 2158 * 'flags' is used to indicate what kind of match is made, match is by 2159 * default for the destination address, if matching by source address 2160 * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used. 2161 */ 2162 static int igc_add_mac_filter(struct igc_adapter *adapter, 2163 const u8 *addr, const u8 queue) 2164 { 2165 struct igc_hw *hw = &adapter->hw; 2166 int rar_entries = hw->mac.rar_entry_count; 2167 int i; 2168 2169 if (is_zero_ether_addr(addr)) 2170 return -EINVAL; 2171 2172 /* Search for the first empty entry in the MAC table. 2173 * Do not touch entries at the end of the table reserved for the VF MAC 2174 * addresses. 2175 */ 2176 for (i = 0; i < rar_entries; i++) { 2177 if (!igc_mac_entry_can_be_used(&adapter->mac_table[i], 2178 addr, 0)) 2179 continue; 2180 2181 ether_addr_copy(adapter->mac_table[i].addr, addr); 2182 adapter->mac_table[i].queue = queue; 2183 adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE; 2184 2185 igc_rar_set_index(adapter, i); 2186 return i; 2187 } 2188 2189 return -ENOSPC; 2190 } 2191 2192 /* Remove a MAC filter for 'addr' directing matching traffic to 2193 * 'queue', 'flags' is used to indicate what kind of match need to be 2194 * removed, match is by default for the destination address, if 2195 * matching by source address is to be removed the flag 2196 * IGC_MAC_STATE_SRC_ADDR can be used. 2197 */ 2198 static int igc_del_mac_filter(struct igc_adapter *adapter, 2199 const u8 *addr, const u8 queue) 2200 { 2201 struct igc_hw *hw = &adapter->hw; 2202 int rar_entries = hw->mac.rar_entry_count; 2203 int i; 2204 2205 if (is_zero_ether_addr(addr)) 2206 return -EINVAL; 2207 2208 /* Search for matching entry in the MAC table based on given address 2209 * and queue. Do not touch entries at the end of the table reserved 2210 * for the VF MAC addresses. 2211 */ 2212 for (i = 0; i < rar_entries; i++) { 2213 if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE)) 2214 continue; 2215 if (adapter->mac_table[i].state != 0) 2216 continue; 2217 if (adapter->mac_table[i].queue != queue) 2218 continue; 2219 if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) 2220 continue; 2221 2222 /* When a filter for the default address is "deleted", 2223 * we return it to its initial configuration 2224 */ 2225 if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) { 2226 adapter->mac_table[i].state = 2227 IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE; 2228 adapter->mac_table[i].queue = 0; 2229 } else { 2230 adapter->mac_table[i].state = 0; 2231 adapter->mac_table[i].queue = 0; 2232 memset(adapter->mac_table[i].addr, 0, ETH_ALEN); 2233 } 2234 2235 igc_rar_set_index(adapter, i); 2236 return 0; 2237 } 2238 2239 return -ENOENT; 2240 } 2241 2242 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) 2243 { 2244 struct igc_adapter *adapter = netdev_priv(netdev); 2245 int ret; 2246 2247 ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues); 2248 2249 return min_t(int, ret, 0); 2250 } 2251 2252 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) 2253 { 2254 struct igc_adapter *adapter = netdev_priv(netdev); 2255 2256 igc_del_mac_filter(adapter, addr, adapter->num_rx_queues); 2257 2258 return 0; 2259 } 2260 2261 /** 2262 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2263 * @netdev: network interface device structure 2264 * 2265 * The set_rx_mode entry point is called whenever the unicast or multicast 2266 * address lists or the network interface flags are updated. This routine is 2267 * responsible for configuring the hardware for proper unicast, multicast, 2268 * promiscuous mode, and all-multi behavior. 2269 */ 2270 static void igc_set_rx_mode(struct net_device *netdev) 2271 { 2272 struct igc_adapter *adapter = netdev_priv(netdev); 2273 struct igc_hw *hw = &adapter->hw; 2274 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; 2275 int count; 2276 2277 /* Check for Promiscuous and All Multicast modes */ 2278 if (netdev->flags & IFF_PROMISC) { 2279 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; 2280 } else { 2281 if (netdev->flags & IFF_ALLMULTI) { 2282 rctl |= IGC_RCTL_MPE; 2283 } else { 2284 /* Write addresses to the MTA, if the attempt fails 2285 * then we should just turn on promiscuous mode so 2286 * that we can at least receive multicast traffic 2287 */ 2288 count = igc_write_mc_addr_list(netdev); 2289 if (count < 0) 2290 rctl |= IGC_RCTL_MPE; 2291 } 2292 } 2293 2294 /* Write addresses to available RAR registers, if there is not 2295 * sufficient space to store all the addresses then enable 2296 * unicast promiscuous mode 2297 */ 2298 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) 2299 rctl |= IGC_RCTL_UPE; 2300 2301 /* update state of unicast and multicast */ 2302 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 2303 wr32(IGC_RCTL, rctl); 2304 2305 #if (PAGE_SIZE < 8192) 2306 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) 2307 rlpml = IGC_MAX_FRAME_BUILD_SKB; 2308 #endif 2309 wr32(IGC_RLPML, rlpml); 2310 } 2311 2312 /** 2313 * igc_configure - configure the hardware for RX and TX 2314 * @adapter: private board structure 2315 */ 2316 static void igc_configure(struct igc_adapter *adapter) 2317 { 2318 struct net_device *netdev = adapter->netdev; 2319 int i = 0; 2320 2321 igc_get_hw_control(adapter); 2322 igc_set_rx_mode(netdev); 2323 2324 igc_setup_tctl(adapter); 2325 igc_setup_mrqc(adapter); 2326 igc_setup_rctl(adapter); 2327 2328 igc_nfc_filter_restore(adapter); 2329 igc_configure_tx(adapter); 2330 igc_configure_rx(adapter); 2331 2332 igc_rx_fifo_flush_base(&adapter->hw); 2333 2334 /* call igc_desc_unused which always leaves 2335 * at least 1 descriptor unused to make sure 2336 * next_to_use != next_to_clean 2337 */ 2338 for (i = 0; i < adapter->num_rx_queues; i++) { 2339 struct igc_ring *ring = adapter->rx_ring[i]; 2340 2341 igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 2342 } 2343 } 2344 2345 /** 2346 * igc_write_ivar - configure ivar for given MSI-X vector 2347 * @hw: pointer to the HW structure 2348 * @msix_vector: vector number we are allocating to a given ring 2349 * @index: row index of IVAR register to write within IVAR table 2350 * @offset: column offset of in IVAR, should be multiple of 8 2351 * 2352 * The IVAR table consists of 2 columns, 2353 * each containing an cause allocation for an Rx and Tx ring, and a 2354 * variable number of rows depending on the number of queues supported. 2355 */ 2356 static void igc_write_ivar(struct igc_hw *hw, int msix_vector, 2357 int index, int offset) 2358 { 2359 u32 ivar = array_rd32(IGC_IVAR0, index); 2360 2361 /* clear any bits that are currently set */ 2362 ivar &= ~((u32)0xFF << offset); 2363 2364 /* write vector and valid bit */ 2365 ivar |= (msix_vector | IGC_IVAR_VALID) << offset; 2366 2367 array_wr32(IGC_IVAR0, index, ivar); 2368 } 2369 2370 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) 2371 { 2372 struct igc_adapter *adapter = q_vector->adapter; 2373 struct igc_hw *hw = &adapter->hw; 2374 int rx_queue = IGC_N0_QUEUE; 2375 int tx_queue = IGC_N0_QUEUE; 2376 2377 if (q_vector->rx.ring) 2378 rx_queue = q_vector->rx.ring->reg_idx; 2379 if (q_vector->tx.ring) 2380 tx_queue = q_vector->tx.ring->reg_idx; 2381 2382 switch (hw->mac.type) { 2383 case igc_i225: 2384 if (rx_queue > IGC_N0_QUEUE) 2385 igc_write_ivar(hw, msix_vector, 2386 rx_queue >> 1, 2387 (rx_queue & 0x1) << 4); 2388 if (tx_queue > IGC_N0_QUEUE) 2389 igc_write_ivar(hw, msix_vector, 2390 tx_queue >> 1, 2391 ((tx_queue & 0x1) << 4) + 8); 2392 q_vector->eims_value = BIT(msix_vector); 2393 break; 2394 default: 2395 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); 2396 break; 2397 } 2398 2399 /* add q_vector eims value to global eims_enable_mask */ 2400 adapter->eims_enable_mask |= q_vector->eims_value; 2401 2402 /* configure q_vector to set itr on first interrupt */ 2403 q_vector->set_itr = 1; 2404 } 2405 2406 /** 2407 * igc_configure_msix - Configure MSI-X hardware 2408 * @adapter: Pointer to adapter structure 2409 * 2410 * igc_configure_msix sets up the hardware to properly 2411 * generate MSI-X interrupts. 2412 */ 2413 static void igc_configure_msix(struct igc_adapter *adapter) 2414 { 2415 struct igc_hw *hw = &adapter->hw; 2416 int i, vector = 0; 2417 u32 tmp; 2418 2419 adapter->eims_enable_mask = 0; 2420 2421 /* set vector for other causes, i.e. link changes */ 2422 switch (hw->mac.type) { 2423 case igc_i225: 2424 /* Turn on MSI-X capability first, or our settings 2425 * won't stick. And it will take days to debug. 2426 */ 2427 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | 2428 IGC_GPIE_PBA | IGC_GPIE_EIAME | 2429 IGC_GPIE_NSICR); 2430 2431 /* enable msix_other interrupt */ 2432 adapter->eims_other = BIT(vector); 2433 tmp = (vector++ | IGC_IVAR_VALID) << 8; 2434 2435 wr32(IGC_IVAR_MISC, tmp); 2436 break; 2437 default: 2438 /* do nothing, since nothing else supports MSI-X */ 2439 break; 2440 } /* switch (hw->mac.type) */ 2441 2442 adapter->eims_enable_mask |= adapter->eims_other; 2443 2444 for (i = 0; i < adapter->num_q_vectors; i++) 2445 igc_assign_vector(adapter->q_vector[i], vector++); 2446 2447 wrfl(); 2448 } 2449 2450 /** 2451 * igc_irq_enable - Enable default interrupt generation settings 2452 * @adapter: board private structure 2453 */ 2454 static void igc_irq_enable(struct igc_adapter *adapter) 2455 { 2456 struct igc_hw *hw = &adapter->hw; 2457 2458 if (adapter->msix_entries) { 2459 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; 2460 u32 regval = rd32(IGC_EIAC); 2461 2462 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); 2463 regval = rd32(IGC_EIAM); 2464 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); 2465 wr32(IGC_EIMS, adapter->eims_enable_mask); 2466 wr32(IGC_IMS, ims); 2467 } else { 2468 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 2469 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 2470 } 2471 } 2472 2473 /** 2474 * igc_irq_disable - Mask off interrupt generation on the NIC 2475 * @adapter: board private structure 2476 */ 2477 static void igc_irq_disable(struct igc_adapter *adapter) 2478 { 2479 struct igc_hw *hw = &adapter->hw; 2480 2481 if (adapter->msix_entries) { 2482 u32 regval = rd32(IGC_EIAM); 2483 2484 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); 2485 wr32(IGC_EIMC, adapter->eims_enable_mask); 2486 regval = rd32(IGC_EIAC); 2487 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); 2488 } 2489 2490 wr32(IGC_IAM, 0); 2491 wr32(IGC_IMC, ~0); 2492 wrfl(); 2493 2494 if (adapter->msix_entries) { 2495 int vector = 0, i; 2496 2497 synchronize_irq(adapter->msix_entries[vector++].vector); 2498 2499 for (i = 0; i < adapter->num_q_vectors; i++) 2500 synchronize_irq(adapter->msix_entries[vector++].vector); 2501 } else { 2502 synchronize_irq(adapter->pdev->irq); 2503 } 2504 } 2505 2506 void igc_set_flag_queue_pairs(struct igc_adapter *adapter, 2507 const u32 max_rss_queues) 2508 { 2509 /* Determine if we need to pair queues. */ 2510 /* If rss_queues > half of max_rss_queues, pair the queues in 2511 * order to conserve interrupts due to limited supply. 2512 */ 2513 if (adapter->rss_queues > (max_rss_queues / 2)) 2514 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 2515 else 2516 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; 2517 } 2518 2519 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) 2520 { 2521 unsigned int max_rss_queues; 2522 2523 /* Determine the maximum number of RSS queues supported. */ 2524 max_rss_queues = IGC_MAX_RX_QUEUES; 2525 2526 return max_rss_queues; 2527 } 2528 2529 static void igc_init_queue_configuration(struct igc_adapter *adapter) 2530 { 2531 u32 max_rss_queues; 2532 2533 max_rss_queues = igc_get_max_rss_queues(adapter); 2534 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); 2535 2536 igc_set_flag_queue_pairs(adapter, max_rss_queues); 2537 } 2538 2539 /** 2540 * igc_reset_q_vector - Reset config for interrupt vector 2541 * @adapter: board private structure to initialize 2542 * @v_idx: Index of vector to be reset 2543 * 2544 * If NAPI is enabled it will delete any references to the 2545 * NAPI struct. This is preparation for igc_free_q_vector. 2546 */ 2547 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) 2548 { 2549 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 2550 2551 /* if we're coming from igc_set_interrupt_capability, the vectors are 2552 * not yet allocated 2553 */ 2554 if (!q_vector) 2555 return; 2556 2557 if (q_vector->tx.ring) 2558 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 2559 2560 if (q_vector->rx.ring) 2561 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; 2562 2563 netif_napi_del(&q_vector->napi); 2564 } 2565 2566 /** 2567 * igc_free_q_vector - Free memory allocated for specific interrupt vector 2568 * @adapter: board private structure to initialize 2569 * @v_idx: Index of vector to be freed 2570 * 2571 * This function frees the memory allocated to the q_vector. 2572 */ 2573 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) 2574 { 2575 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 2576 2577 adapter->q_vector[v_idx] = NULL; 2578 2579 /* igc_get_stats64() might access the rings on this vector, 2580 * we must wait a grace period before freeing it. 2581 */ 2582 if (q_vector) 2583 kfree_rcu(q_vector, rcu); 2584 } 2585 2586 /** 2587 * igc_free_q_vectors - Free memory allocated for interrupt vectors 2588 * @adapter: board private structure to initialize 2589 * 2590 * This function frees the memory allocated to the q_vectors. In addition if 2591 * NAPI is enabled it will delete any references to the NAPI struct prior 2592 * to freeing the q_vector. 2593 */ 2594 static void igc_free_q_vectors(struct igc_adapter *adapter) 2595 { 2596 int v_idx = adapter->num_q_vectors; 2597 2598 adapter->num_tx_queues = 0; 2599 adapter->num_rx_queues = 0; 2600 adapter->num_q_vectors = 0; 2601 2602 while (v_idx--) { 2603 igc_reset_q_vector(adapter, v_idx); 2604 igc_free_q_vector(adapter, v_idx); 2605 } 2606 } 2607 2608 /** 2609 * igc_update_itr - update the dynamic ITR value based on statistics 2610 * @q_vector: pointer to q_vector 2611 * @ring_container: ring info to update the itr for 2612 * 2613 * Stores a new ITR value based on packets and byte 2614 * counts during the last interrupt. The advantage of per interrupt 2615 * computation is faster updates and more accurate ITR for the current 2616 * traffic pattern. Constants in this function were computed 2617 * based on theoretical maximum wire speed and thresholds were set based 2618 * on testing data as well as attempting to minimize response time 2619 * while increasing bulk throughput. 2620 * NOTE: These calculations are only valid when operating in a single- 2621 * queue environment. 2622 */ 2623 static void igc_update_itr(struct igc_q_vector *q_vector, 2624 struct igc_ring_container *ring_container) 2625 { 2626 unsigned int packets = ring_container->total_packets; 2627 unsigned int bytes = ring_container->total_bytes; 2628 u8 itrval = ring_container->itr; 2629 2630 /* no packets, exit with status unchanged */ 2631 if (packets == 0) 2632 return; 2633 2634 switch (itrval) { 2635 case lowest_latency: 2636 /* handle TSO and jumbo frames */ 2637 if (bytes / packets > 8000) 2638 itrval = bulk_latency; 2639 else if ((packets < 5) && (bytes > 512)) 2640 itrval = low_latency; 2641 break; 2642 case low_latency: /* 50 usec aka 20000 ints/s */ 2643 if (bytes > 10000) { 2644 /* this if handles the TSO accounting */ 2645 if (bytes / packets > 8000) 2646 itrval = bulk_latency; 2647 else if ((packets < 10) || ((bytes / packets) > 1200)) 2648 itrval = bulk_latency; 2649 else if ((packets > 35)) 2650 itrval = lowest_latency; 2651 } else if (bytes / packets > 2000) { 2652 itrval = bulk_latency; 2653 } else if (packets <= 2 && bytes < 512) { 2654 itrval = lowest_latency; 2655 } 2656 break; 2657 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2658 if (bytes > 25000) { 2659 if (packets > 35) 2660 itrval = low_latency; 2661 } else if (bytes < 1500) { 2662 itrval = low_latency; 2663 } 2664 break; 2665 } 2666 2667 /* clear work counters since we have the values we need */ 2668 ring_container->total_bytes = 0; 2669 ring_container->total_packets = 0; 2670 2671 /* write updated itr to ring container */ 2672 ring_container->itr = itrval; 2673 } 2674 2675 static void igc_set_itr(struct igc_q_vector *q_vector) 2676 { 2677 struct igc_adapter *adapter = q_vector->adapter; 2678 u32 new_itr = q_vector->itr_val; 2679 u8 current_itr = 0; 2680 2681 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2682 switch (adapter->link_speed) { 2683 case SPEED_10: 2684 case SPEED_100: 2685 current_itr = 0; 2686 new_itr = IGC_4K_ITR; 2687 goto set_itr_now; 2688 default: 2689 break; 2690 } 2691 2692 igc_update_itr(q_vector, &q_vector->tx); 2693 igc_update_itr(q_vector, &q_vector->rx); 2694 2695 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 2696 2697 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2698 if (current_itr == lowest_latency && 2699 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 2700 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 2701 current_itr = low_latency; 2702 2703 switch (current_itr) { 2704 /* counts and packets in update_itr are dependent on these numbers */ 2705 case lowest_latency: 2706 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ 2707 break; 2708 case low_latency: 2709 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ 2710 break; 2711 case bulk_latency: 2712 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ 2713 break; 2714 default: 2715 break; 2716 } 2717 2718 set_itr_now: 2719 if (new_itr != q_vector->itr_val) { 2720 /* this attempts to bias the interrupt rate towards Bulk 2721 * by adding intermediate steps when interrupt rate is 2722 * increasing 2723 */ 2724 new_itr = new_itr > q_vector->itr_val ? 2725 max((new_itr * q_vector->itr_val) / 2726 (new_itr + (q_vector->itr_val >> 2)), 2727 new_itr) : new_itr; 2728 /* Don't write the value here; it resets the adapter's 2729 * internal timer, and causes us to delay far longer than 2730 * we should between interrupts. Instead, we write the ITR 2731 * value at the beginning of the next interrupt so the timing 2732 * ends up being correct. 2733 */ 2734 q_vector->itr_val = new_itr; 2735 q_vector->set_itr = 1; 2736 } 2737 } 2738 2739 static void igc_reset_interrupt_capability(struct igc_adapter *adapter) 2740 { 2741 int v_idx = adapter->num_q_vectors; 2742 2743 if (adapter->msix_entries) { 2744 pci_disable_msix(adapter->pdev); 2745 kfree(adapter->msix_entries); 2746 adapter->msix_entries = NULL; 2747 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { 2748 pci_disable_msi(adapter->pdev); 2749 } 2750 2751 while (v_idx--) 2752 igc_reset_q_vector(adapter, v_idx); 2753 } 2754 2755 /** 2756 * igc_set_interrupt_capability - set MSI or MSI-X if supported 2757 * @adapter: Pointer to adapter structure 2758 * @msix: boolean value for MSI-X capability 2759 * 2760 * Attempt to configure interrupts using the best available 2761 * capabilities of the hardware and kernel. 2762 */ 2763 static void igc_set_interrupt_capability(struct igc_adapter *adapter, 2764 bool msix) 2765 { 2766 int numvecs, i; 2767 int err; 2768 2769 if (!msix) 2770 goto msi_only; 2771 adapter->flags |= IGC_FLAG_HAS_MSIX; 2772 2773 /* Number of supported queues. */ 2774 adapter->num_rx_queues = adapter->rss_queues; 2775 2776 adapter->num_tx_queues = adapter->rss_queues; 2777 2778 /* start with one vector for every Rx queue */ 2779 numvecs = adapter->num_rx_queues; 2780 2781 /* if Tx handler is separate add 1 for every Tx queue */ 2782 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) 2783 numvecs += adapter->num_tx_queues; 2784 2785 /* store the number of vectors reserved for queues */ 2786 adapter->num_q_vectors = numvecs; 2787 2788 /* add 1 vector for link status interrupts */ 2789 numvecs++; 2790 2791 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 2792 GFP_KERNEL); 2793 2794 if (!adapter->msix_entries) 2795 return; 2796 2797 /* populate entry values */ 2798 for (i = 0; i < numvecs; i++) 2799 adapter->msix_entries[i].entry = i; 2800 2801 err = pci_enable_msix_range(adapter->pdev, 2802 adapter->msix_entries, 2803 numvecs, 2804 numvecs); 2805 if (err > 0) 2806 return; 2807 2808 kfree(adapter->msix_entries); 2809 adapter->msix_entries = NULL; 2810 2811 igc_reset_interrupt_capability(adapter); 2812 2813 msi_only: 2814 adapter->flags &= ~IGC_FLAG_HAS_MSIX; 2815 2816 adapter->rss_queues = 1; 2817 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 2818 adapter->num_rx_queues = 1; 2819 adapter->num_tx_queues = 1; 2820 adapter->num_q_vectors = 1; 2821 if (!pci_enable_msi(adapter->pdev)) 2822 adapter->flags |= IGC_FLAG_HAS_MSI; 2823 } 2824 2825 /** 2826 * igc_update_ring_itr - update the dynamic ITR value based on packet size 2827 * @q_vector: pointer to q_vector 2828 * 2829 * Stores a new ITR value based on strictly on packet size. This 2830 * algorithm is less sophisticated than that used in igc_update_itr, 2831 * due to the difficulty of synchronizing statistics across multiple 2832 * receive rings. The divisors and thresholds used by this function 2833 * were determined based on theoretical maximum wire speed and testing 2834 * data, in order to minimize response time while increasing bulk 2835 * throughput. 2836 * NOTE: This function is called only when operating in a multiqueue 2837 * receive environment. 2838 */ 2839 static void igc_update_ring_itr(struct igc_q_vector *q_vector) 2840 { 2841 struct igc_adapter *adapter = q_vector->adapter; 2842 int new_val = q_vector->itr_val; 2843 int avg_wire_size = 0; 2844 unsigned int packets; 2845 2846 /* For non-gigabit speeds, just fix the interrupt rate at 4000 2847 * ints/sec - ITR timer value of 120 ticks. 2848 */ 2849 switch (adapter->link_speed) { 2850 case SPEED_10: 2851 case SPEED_100: 2852 new_val = IGC_4K_ITR; 2853 goto set_itr_val; 2854 default: 2855 break; 2856 } 2857 2858 packets = q_vector->rx.total_packets; 2859 if (packets) 2860 avg_wire_size = q_vector->rx.total_bytes / packets; 2861 2862 packets = q_vector->tx.total_packets; 2863 if (packets) 2864 avg_wire_size = max_t(u32, avg_wire_size, 2865 q_vector->tx.total_bytes / packets); 2866 2867 /* if avg_wire_size isn't set no work was done */ 2868 if (!avg_wire_size) 2869 goto clear_counts; 2870 2871 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 2872 avg_wire_size += 24; 2873 2874 /* Don't starve jumbo frames */ 2875 avg_wire_size = min(avg_wire_size, 3000); 2876 2877 /* Give a little boost to mid-size frames */ 2878 if (avg_wire_size > 300 && avg_wire_size < 1200) 2879 new_val = avg_wire_size / 3; 2880 else 2881 new_val = avg_wire_size / 2; 2882 2883 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2884 if (new_val < IGC_20K_ITR && 2885 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 2886 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 2887 new_val = IGC_20K_ITR; 2888 2889 set_itr_val: 2890 if (new_val != q_vector->itr_val) { 2891 q_vector->itr_val = new_val; 2892 q_vector->set_itr = 1; 2893 } 2894 clear_counts: 2895 q_vector->rx.total_bytes = 0; 2896 q_vector->rx.total_packets = 0; 2897 q_vector->tx.total_bytes = 0; 2898 q_vector->tx.total_packets = 0; 2899 } 2900 2901 static void igc_ring_irq_enable(struct igc_q_vector *q_vector) 2902 { 2903 struct igc_adapter *adapter = q_vector->adapter; 2904 struct igc_hw *hw = &adapter->hw; 2905 2906 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || 2907 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { 2908 if (adapter->num_q_vectors == 1) 2909 igc_set_itr(q_vector); 2910 else 2911 igc_update_ring_itr(q_vector); 2912 } 2913 2914 if (!test_bit(__IGC_DOWN, &adapter->state)) { 2915 if (adapter->msix_entries) 2916 wr32(IGC_EIMS, q_vector->eims_value); 2917 else 2918 igc_irq_enable(adapter); 2919 } 2920 } 2921 2922 static void igc_add_ring(struct igc_ring *ring, 2923 struct igc_ring_container *head) 2924 { 2925 head->ring = ring; 2926 head->count++; 2927 } 2928 2929 /** 2930 * igc_cache_ring_register - Descriptor ring to register mapping 2931 * @adapter: board private structure to initialize 2932 * 2933 * Once we know the feature-set enabled for the device, we'll cache 2934 * the register offset the descriptor ring is assigned to. 2935 */ 2936 static void igc_cache_ring_register(struct igc_adapter *adapter) 2937 { 2938 int i = 0, j = 0; 2939 2940 switch (adapter->hw.mac.type) { 2941 case igc_i225: 2942 /* Fall through */ 2943 default: 2944 for (; i < adapter->num_rx_queues; i++) 2945 adapter->rx_ring[i]->reg_idx = i; 2946 for (; j < adapter->num_tx_queues; j++) 2947 adapter->tx_ring[j]->reg_idx = j; 2948 break; 2949 } 2950 } 2951 2952 /** 2953 * igc_poll - NAPI Rx polling callback 2954 * @napi: napi polling structure 2955 * @budget: count of how many packets we should handle 2956 */ 2957 static int igc_poll(struct napi_struct *napi, int budget) 2958 { 2959 struct igc_q_vector *q_vector = container_of(napi, 2960 struct igc_q_vector, 2961 napi); 2962 bool clean_complete = true; 2963 int work_done = 0; 2964 2965 if (q_vector->tx.ring) 2966 clean_complete = igc_clean_tx_irq(q_vector, budget); 2967 2968 if (q_vector->rx.ring) { 2969 int cleaned = igc_clean_rx_irq(q_vector, budget); 2970 2971 work_done += cleaned; 2972 if (cleaned >= budget) 2973 clean_complete = false; 2974 } 2975 2976 /* If all work not completed, return budget and keep polling */ 2977 if (!clean_complete) 2978 return budget; 2979 2980 /* Exit the polling mode, but don't re-enable interrupts if stack might 2981 * poll us due to busy-polling 2982 */ 2983 if (likely(napi_complete_done(napi, work_done))) 2984 igc_ring_irq_enable(q_vector); 2985 2986 return min(work_done, budget - 1); 2987 } 2988 2989 /** 2990 * igc_alloc_q_vector - Allocate memory for a single interrupt vector 2991 * @adapter: board private structure to initialize 2992 * @v_count: q_vectors allocated on adapter, used for ring interleaving 2993 * @v_idx: index of vector in adapter struct 2994 * @txr_count: total number of Tx rings to allocate 2995 * @txr_idx: index of first Tx ring to allocate 2996 * @rxr_count: total number of Rx rings to allocate 2997 * @rxr_idx: index of first Rx ring to allocate 2998 * 2999 * We allocate one q_vector. If allocation fails we return -ENOMEM. 3000 */ 3001 static int igc_alloc_q_vector(struct igc_adapter *adapter, 3002 unsigned int v_count, unsigned int v_idx, 3003 unsigned int txr_count, unsigned int txr_idx, 3004 unsigned int rxr_count, unsigned int rxr_idx) 3005 { 3006 struct igc_q_vector *q_vector; 3007 struct igc_ring *ring; 3008 int ring_count; 3009 3010 /* igc only supports 1 Tx and/or 1 Rx queue per vector */ 3011 if (txr_count > 1 || rxr_count > 1) 3012 return -ENOMEM; 3013 3014 ring_count = txr_count + rxr_count; 3015 3016 /* allocate q_vector and rings */ 3017 q_vector = adapter->q_vector[v_idx]; 3018 if (!q_vector) 3019 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), 3020 GFP_KERNEL); 3021 else 3022 memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); 3023 if (!q_vector) 3024 return -ENOMEM; 3025 3026 /* initialize NAPI */ 3027 netif_napi_add(adapter->netdev, &q_vector->napi, 3028 igc_poll, 64); 3029 3030 /* tie q_vector and adapter together */ 3031 adapter->q_vector[v_idx] = q_vector; 3032 q_vector->adapter = adapter; 3033 3034 /* initialize work limits */ 3035 q_vector->tx.work_limit = adapter->tx_work_limit; 3036 3037 /* initialize ITR configuration */ 3038 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); 3039 q_vector->itr_val = IGC_START_ITR; 3040 3041 /* initialize pointer to rings */ 3042 ring = q_vector->ring; 3043 3044 /* initialize ITR */ 3045 if (rxr_count) { 3046 /* rx or rx/tx vector */ 3047 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) 3048 q_vector->itr_val = adapter->rx_itr_setting; 3049 } else { 3050 /* tx only vector */ 3051 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) 3052 q_vector->itr_val = adapter->tx_itr_setting; 3053 } 3054 3055 if (txr_count) { 3056 /* assign generic ring traits */ 3057 ring->dev = &adapter->pdev->dev; 3058 ring->netdev = adapter->netdev; 3059 3060 /* configure backlink on ring */ 3061 ring->q_vector = q_vector; 3062 3063 /* update q_vector Tx values */ 3064 igc_add_ring(ring, &q_vector->tx); 3065 3066 /* apply Tx specific ring traits */ 3067 ring->count = adapter->tx_ring_count; 3068 ring->queue_index = txr_idx; 3069 3070 /* assign ring to adapter */ 3071 adapter->tx_ring[txr_idx] = ring; 3072 3073 /* push pointer to next ring */ 3074 ring++; 3075 } 3076 3077 if (rxr_count) { 3078 /* assign generic ring traits */ 3079 ring->dev = &adapter->pdev->dev; 3080 ring->netdev = adapter->netdev; 3081 3082 /* configure backlink on ring */ 3083 ring->q_vector = q_vector; 3084 3085 /* update q_vector Rx values */ 3086 igc_add_ring(ring, &q_vector->rx); 3087 3088 /* apply Rx specific ring traits */ 3089 ring->count = adapter->rx_ring_count; 3090 ring->queue_index = rxr_idx; 3091 3092 /* assign ring to adapter */ 3093 adapter->rx_ring[rxr_idx] = ring; 3094 } 3095 3096 return 0; 3097 } 3098 3099 /** 3100 * igc_alloc_q_vectors - Allocate memory for interrupt vectors 3101 * @adapter: board private structure to initialize 3102 * 3103 * We allocate one q_vector per queue interrupt. If allocation fails we 3104 * return -ENOMEM. 3105 */ 3106 static int igc_alloc_q_vectors(struct igc_adapter *adapter) 3107 { 3108 int rxr_remaining = adapter->num_rx_queues; 3109 int txr_remaining = adapter->num_tx_queues; 3110 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 3111 int q_vectors = adapter->num_q_vectors; 3112 int err; 3113 3114 if (q_vectors >= (rxr_remaining + txr_remaining)) { 3115 for (; rxr_remaining; v_idx++) { 3116 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 3117 0, 0, 1, rxr_idx); 3118 3119 if (err) 3120 goto err_out; 3121 3122 /* update counts and index */ 3123 rxr_remaining--; 3124 rxr_idx++; 3125 } 3126 } 3127 3128 for (; v_idx < q_vectors; v_idx++) { 3129 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 3130 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 3131 3132 err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 3133 tqpv, txr_idx, rqpv, rxr_idx); 3134 3135 if (err) 3136 goto err_out; 3137 3138 /* update counts and index */ 3139 rxr_remaining -= rqpv; 3140 txr_remaining -= tqpv; 3141 rxr_idx++; 3142 txr_idx++; 3143 } 3144 3145 return 0; 3146 3147 err_out: 3148 adapter->num_tx_queues = 0; 3149 adapter->num_rx_queues = 0; 3150 adapter->num_q_vectors = 0; 3151 3152 while (v_idx--) 3153 igc_free_q_vector(adapter, v_idx); 3154 3155 return -ENOMEM; 3156 } 3157 3158 /** 3159 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 3160 * @adapter: Pointer to adapter structure 3161 * @msix: boolean for MSI-X capability 3162 * 3163 * This function initializes the interrupts and allocates all of the queues. 3164 */ 3165 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) 3166 { 3167 struct pci_dev *pdev = adapter->pdev; 3168 int err = 0; 3169 3170 igc_set_interrupt_capability(adapter, msix); 3171 3172 err = igc_alloc_q_vectors(adapter); 3173 if (err) { 3174 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); 3175 goto err_alloc_q_vectors; 3176 } 3177 3178 igc_cache_ring_register(adapter); 3179 3180 return 0; 3181 3182 err_alloc_q_vectors: 3183 igc_reset_interrupt_capability(adapter); 3184 return err; 3185 } 3186 3187 /** 3188 * igc_sw_init - Initialize general software structures (struct igc_adapter) 3189 * @adapter: board private structure to initialize 3190 * 3191 * igc_sw_init initializes the Adapter private data structure. 3192 * Fields are initialized based on PCI device information and 3193 * OS network device settings (MTU size). 3194 */ 3195 static int igc_sw_init(struct igc_adapter *adapter) 3196 { 3197 struct net_device *netdev = adapter->netdev; 3198 struct pci_dev *pdev = adapter->pdev; 3199 struct igc_hw *hw = &adapter->hw; 3200 3201 int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count; 3202 3203 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 3204 3205 /* set default ring sizes */ 3206 adapter->tx_ring_count = IGC_DEFAULT_TXD; 3207 adapter->rx_ring_count = IGC_DEFAULT_RXD; 3208 3209 /* set default ITR values */ 3210 adapter->rx_itr_setting = IGC_DEFAULT_ITR; 3211 adapter->tx_itr_setting = IGC_DEFAULT_ITR; 3212 3213 /* set default work limits */ 3214 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; 3215 3216 /* adjust max frame to be at least the size of a standard frame */ 3217 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + 3218 VLAN_HLEN; 3219 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3220 3221 spin_lock_init(&adapter->nfc_lock); 3222 spin_lock_init(&adapter->stats64_lock); 3223 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ 3224 adapter->flags |= IGC_FLAG_HAS_MSIX; 3225 3226 adapter->mac_table = kzalloc(size, GFP_ATOMIC); 3227 if (!adapter->mac_table) 3228 return -ENOMEM; 3229 3230 igc_init_queue_configuration(adapter); 3231 3232 /* This call may decrease the number of queues */ 3233 if (igc_init_interrupt_scheme(adapter, true)) { 3234 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 3235 return -ENOMEM; 3236 } 3237 3238 /* Explicitly disable IRQ since the NIC can be in any state. */ 3239 igc_irq_disable(adapter); 3240 3241 set_bit(__IGC_DOWN, &adapter->state); 3242 3243 return 0; 3244 } 3245 3246 /** 3247 * igc_up - Open the interface and prepare it to handle traffic 3248 * @adapter: board private structure 3249 */ 3250 void igc_up(struct igc_adapter *adapter) 3251 { 3252 struct igc_hw *hw = &adapter->hw; 3253 int i = 0; 3254 3255 /* hardware has been reset, we need to reload some things */ 3256 igc_configure(adapter); 3257 3258 clear_bit(__IGC_DOWN, &adapter->state); 3259 3260 for (i = 0; i < adapter->num_q_vectors; i++) 3261 napi_enable(&adapter->q_vector[i]->napi); 3262 3263 if (adapter->msix_entries) 3264 igc_configure_msix(adapter); 3265 else 3266 igc_assign_vector(adapter->q_vector[0], 0); 3267 3268 /* Clear any pending interrupts. */ 3269 rd32(IGC_ICR); 3270 igc_irq_enable(adapter); 3271 3272 netif_tx_start_all_queues(adapter->netdev); 3273 3274 /* start the watchdog. */ 3275 hw->mac.get_link_status = 1; 3276 schedule_work(&adapter->watchdog_task); 3277 } 3278 3279 /** 3280 * igc_update_stats - Update the board statistics counters 3281 * @adapter: board private structure 3282 */ 3283 void igc_update_stats(struct igc_adapter *adapter) 3284 { 3285 struct rtnl_link_stats64 *net_stats = &adapter->stats64; 3286 struct pci_dev *pdev = adapter->pdev; 3287 struct igc_hw *hw = &adapter->hw; 3288 u64 _bytes, _packets; 3289 u64 bytes, packets; 3290 unsigned int start; 3291 u32 mpc; 3292 int i; 3293 3294 /* Prevent stats update while adapter is being reset, or if the pci 3295 * connection is down. 3296 */ 3297 if (adapter->link_speed == 0) 3298 return; 3299 if (pci_channel_offline(pdev)) 3300 return; 3301 3302 packets = 0; 3303 bytes = 0; 3304 3305 rcu_read_lock(); 3306 for (i = 0; i < adapter->num_rx_queues; i++) { 3307 struct igc_ring *ring = adapter->rx_ring[i]; 3308 u32 rqdpc = rd32(IGC_RQDPC(i)); 3309 3310 if (hw->mac.type >= igc_i225) 3311 wr32(IGC_RQDPC(i), 0); 3312 3313 if (rqdpc) { 3314 ring->rx_stats.drops += rqdpc; 3315 net_stats->rx_fifo_errors += rqdpc; 3316 } 3317 3318 do { 3319 start = u64_stats_fetch_begin_irq(&ring->rx_syncp); 3320 _bytes = ring->rx_stats.bytes; 3321 _packets = ring->rx_stats.packets; 3322 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 3323 bytes += _bytes; 3324 packets += _packets; 3325 } 3326 3327 net_stats->rx_bytes = bytes; 3328 net_stats->rx_packets = packets; 3329 3330 packets = 0; 3331 bytes = 0; 3332 for (i = 0; i < adapter->num_tx_queues; i++) { 3333 struct igc_ring *ring = adapter->tx_ring[i]; 3334 3335 do { 3336 start = u64_stats_fetch_begin_irq(&ring->tx_syncp); 3337 _bytes = ring->tx_stats.bytes; 3338 _packets = ring->tx_stats.packets; 3339 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); 3340 bytes += _bytes; 3341 packets += _packets; 3342 } 3343 net_stats->tx_bytes = bytes; 3344 net_stats->tx_packets = packets; 3345 rcu_read_unlock(); 3346 3347 /* read stats registers */ 3348 adapter->stats.crcerrs += rd32(IGC_CRCERRS); 3349 adapter->stats.gprc += rd32(IGC_GPRC); 3350 adapter->stats.gorc += rd32(IGC_GORCL); 3351 rd32(IGC_GORCH); /* clear GORCL */ 3352 adapter->stats.bprc += rd32(IGC_BPRC); 3353 adapter->stats.mprc += rd32(IGC_MPRC); 3354 adapter->stats.roc += rd32(IGC_ROC); 3355 3356 adapter->stats.prc64 += rd32(IGC_PRC64); 3357 adapter->stats.prc127 += rd32(IGC_PRC127); 3358 adapter->stats.prc255 += rd32(IGC_PRC255); 3359 adapter->stats.prc511 += rd32(IGC_PRC511); 3360 adapter->stats.prc1023 += rd32(IGC_PRC1023); 3361 adapter->stats.prc1522 += rd32(IGC_PRC1522); 3362 adapter->stats.symerrs += rd32(IGC_SYMERRS); 3363 adapter->stats.sec += rd32(IGC_SEC); 3364 3365 mpc = rd32(IGC_MPC); 3366 adapter->stats.mpc += mpc; 3367 net_stats->rx_fifo_errors += mpc; 3368 adapter->stats.scc += rd32(IGC_SCC); 3369 adapter->stats.ecol += rd32(IGC_ECOL); 3370 adapter->stats.mcc += rd32(IGC_MCC); 3371 adapter->stats.latecol += rd32(IGC_LATECOL); 3372 adapter->stats.dc += rd32(IGC_DC); 3373 adapter->stats.rlec += rd32(IGC_RLEC); 3374 adapter->stats.xonrxc += rd32(IGC_XONRXC); 3375 adapter->stats.xontxc += rd32(IGC_XONTXC); 3376 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); 3377 adapter->stats.xofftxc += rd32(IGC_XOFFTXC); 3378 adapter->stats.fcruc += rd32(IGC_FCRUC); 3379 adapter->stats.gptc += rd32(IGC_GPTC); 3380 adapter->stats.gotc += rd32(IGC_GOTCL); 3381 rd32(IGC_GOTCH); /* clear GOTCL */ 3382 adapter->stats.rnbc += rd32(IGC_RNBC); 3383 adapter->stats.ruc += rd32(IGC_RUC); 3384 adapter->stats.rfc += rd32(IGC_RFC); 3385 adapter->stats.rjc += rd32(IGC_RJC); 3386 adapter->stats.tor += rd32(IGC_TORH); 3387 adapter->stats.tot += rd32(IGC_TOTH); 3388 adapter->stats.tpr += rd32(IGC_TPR); 3389 3390 adapter->stats.ptc64 += rd32(IGC_PTC64); 3391 adapter->stats.ptc127 += rd32(IGC_PTC127); 3392 adapter->stats.ptc255 += rd32(IGC_PTC255); 3393 adapter->stats.ptc511 += rd32(IGC_PTC511); 3394 adapter->stats.ptc1023 += rd32(IGC_PTC1023); 3395 adapter->stats.ptc1522 += rd32(IGC_PTC1522); 3396 3397 adapter->stats.mptc += rd32(IGC_MPTC); 3398 adapter->stats.bptc += rd32(IGC_BPTC); 3399 3400 adapter->stats.tpt += rd32(IGC_TPT); 3401 adapter->stats.colc += rd32(IGC_COLC); 3402 3403 adapter->stats.algnerrc += rd32(IGC_ALGNERRC); 3404 3405 adapter->stats.tsctc += rd32(IGC_TSCTC); 3406 adapter->stats.tsctfc += rd32(IGC_TSCTFC); 3407 3408 adapter->stats.iac += rd32(IGC_IAC); 3409 adapter->stats.icrxoc += rd32(IGC_ICRXOC); 3410 adapter->stats.icrxptc += rd32(IGC_ICRXPTC); 3411 adapter->stats.icrxatc += rd32(IGC_ICRXATC); 3412 adapter->stats.ictxptc += rd32(IGC_ICTXPTC); 3413 adapter->stats.ictxatc += rd32(IGC_ICTXATC); 3414 adapter->stats.ictxqec += rd32(IGC_ICTXQEC); 3415 adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC); 3416 adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC); 3417 3418 /* Fill out the OS statistics structure */ 3419 net_stats->multicast = adapter->stats.mprc; 3420 net_stats->collisions = adapter->stats.colc; 3421 3422 /* Rx Errors */ 3423 3424 /* RLEC on some newer hardware can be incorrect so build 3425 * our own version based on RUC and ROC 3426 */ 3427 net_stats->rx_errors = adapter->stats.rxerrc + 3428 adapter->stats.crcerrs + adapter->stats.algnerrc + 3429 adapter->stats.ruc + adapter->stats.roc + 3430 adapter->stats.cexterr; 3431 net_stats->rx_length_errors = adapter->stats.ruc + 3432 adapter->stats.roc; 3433 net_stats->rx_crc_errors = adapter->stats.crcerrs; 3434 net_stats->rx_frame_errors = adapter->stats.algnerrc; 3435 net_stats->rx_missed_errors = adapter->stats.mpc; 3436 3437 /* Tx Errors */ 3438 net_stats->tx_errors = adapter->stats.ecol + 3439 adapter->stats.latecol; 3440 net_stats->tx_aborted_errors = adapter->stats.ecol; 3441 net_stats->tx_window_errors = adapter->stats.latecol; 3442 net_stats->tx_carrier_errors = adapter->stats.tncrs; 3443 3444 /* Tx Dropped needs to be maintained elsewhere */ 3445 3446 /* Management Stats */ 3447 adapter->stats.mgptc += rd32(IGC_MGTPTC); 3448 adapter->stats.mgprc += rd32(IGC_MGTPRC); 3449 adapter->stats.mgpdc += rd32(IGC_MGTPDC); 3450 } 3451 3452 static void igc_nfc_filter_exit(struct igc_adapter *adapter) 3453 { 3454 struct igc_nfc_filter *rule; 3455 3456 spin_lock(&adapter->nfc_lock); 3457 3458 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) 3459 igc_erase_filter(adapter, rule); 3460 3461 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) 3462 igc_erase_filter(adapter, rule); 3463 3464 spin_unlock(&adapter->nfc_lock); 3465 } 3466 3467 /** 3468 * igc_down - Close the interface 3469 * @adapter: board private structure 3470 */ 3471 void igc_down(struct igc_adapter *adapter) 3472 { 3473 struct net_device *netdev = adapter->netdev; 3474 struct igc_hw *hw = &adapter->hw; 3475 u32 tctl, rctl; 3476 int i = 0; 3477 3478 set_bit(__IGC_DOWN, &adapter->state); 3479 3480 /* disable receives in the hardware */ 3481 rctl = rd32(IGC_RCTL); 3482 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); 3483 /* flush and sleep below */ 3484 3485 igc_nfc_filter_exit(adapter); 3486 3487 /* set trans_start so we don't get spurious watchdogs during reset */ 3488 netif_trans_update(netdev); 3489 3490 netif_carrier_off(netdev); 3491 netif_tx_stop_all_queues(netdev); 3492 3493 /* disable transmits in the hardware */ 3494 tctl = rd32(IGC_TCTL); 3495 tctl &= ~IGC_TCTL_EN; 3496 wr32(IGC_TCTL, tctl); 3497 /* flush both disables and wait for them to finish */ 3498 wrfl(); 3499 usleep_range(10000, 20000); 3500 3501 igc_irq_disable(adapter); 3502 3503 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 3504 3505 for (i = 0; i < adapter->num_q_vectors; i++) { 3506 if (adapter->q_vector[i]) { 3507 napi_synchronize(&adapter->q_vector[i]->napi); 3508 napi_disable(&adapter->q_vector[i]->napi); 3509 } 3510 } 3511 3512 del_timer_sync(&adapter->watchdog_timer); 3513 del_timer_sync(&adapter->phy_info_timer); 3514 3515 /* record the stats before reset*/ 3516 spin_lock(&adapter->stats64_lock); 3517 igc_update_stats(adapter); 3518 spin_unlock(&adapter->stats64_lock); 3519 3520 adapter->link_speed = 0; 3521 adapter->link_duplex = 0; 3522 3523 if (!pci_channel_offline(adapter->pdev)) 3524 igc_reset(adapter); 3525 3526 /* clear VLAN promisc flag so VFTA will be updated if necessary */ 3527 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; 3528 3529 igc_clean_all_tx_rings(adapter); 3530 igc_clean_all_rx_rings(adapter); 3531 } 3532 3533 void igc_reinit_locked(struct igc_adapter *adapter) 3534 { 3535 WARN_ON(in_interrupt()); 3536 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 3537 usleep_range(1000, 2000); 3538 igc_down(adapter); 3539 igc_up(adapter); 3540 clear_bit(__IGC_RESETTING, &adapter->state); 3541 } 3542 3543 static void igc_reset_task(struct work_struct *work) 3544 { 3545 struct igc_adapter *adapter; 3546 3547 adapter = container_of(work, struct igc_adapter, reset_task); 3548 3549 igc_rings_dump(adapter); 3550 igc_regs_dump(adapter); 3551 netdev_err(adapter->netdev, "Reset adapter\n"); 3552 igc_reinit_locked(adapter); 3553 } 3554 3555 /** 3556 * igc_change_mtu - Change the Maximum Transfer Unit 3557 * @netdev: network interface device structure 3558 * @new_mtu: new value for maximum frame size 3559 * 3560 * Returns 0 on success, negative on failure 3561 */ 3562 static int igc_change_mtu(struct net_device *netdev, int new_mtu) 3563 { 3564 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 3565 struct igc_adapter *adapter = netdev_priv(netdev); 3566 3567 /* adjust max frame to be at least the size of a standard frame */ 3568 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 3569 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 3570 3571 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 3572 usleep_range(1000, 2000); 3573 3574 /* igc_down has a dependency on max_frame_size */ 3575 adapter->max_frame_size = max_frame; 3576 3577 if (netif_running(netdev)) 3578 igc_down(adapter); 3579 3580 netdev_dbg(netdev, "changing MTU from %d to %d\n", 3581 netdev->mtu, new_mtu); 3582 netdev->mtu = new_mtu; 3583 3584 if (netif_running(netdev)) 3585 igc_up(adapter); 3586 else 3587 igc_reset(adapter); 3588 3589 clear_bit(__IGC_RESETTING, &adapter->state); 3590 3591 return 0; 3592 } 3593 3594 /** 3595 * igc_get_stats - Get System Network Statistics 3596 * @netdev: network interface device structure 3597 * 3598 * Returns the address of the device statistics structure. 3599 * The statistics are updated here and also from the timer callback. 3600 */ 3601 static struct net_device_stats *igc_get_stats(struct net_device *netdev) 3602 { 3603 struct igc_adapter *adapter = netdev_priv(netdev); 3604 3605 if (!test_bit(__IGC_RESETTING, &adapter->state)) 3606 igc_update_stats(adapter); 3607 3608 /* only return the current stats */ 3609 return &netdev->stats; 3610 } 3611 3612 static netdev_features_t igc_fix_features(struct net_device *netdev, 3613 netdev_features_t features) 3614 { 3615 /* Since there is no support for separate Rx/Tx vlan accel 3616 * enable/disable make sure Tx flag is always in same state as Rx. 3617 */ 3618 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3619 features |= NETIF_F_HW_VLAN_CTAG_TX; 3620 else 3621 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 3622 3623 return features; 3624 } 3625 3626 static int igc_set_features(struct net_device *netdev, 3627 netdev_features_t features) 3628 { 3629 netdev_features_t changed = netdev->features ^ features; 3630 struct igc_adapter *adapter = netdev_priv(netdev); 3631 3632 /* Add VLAN support */ 3633 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) 3634 return 0; 3635 3636 if (!(features & NETIF_F_NTUPLE)) { 3637 struct hlist_node *node2; 3638 struct igc_nfc_filter *rule; 3639 3640 spin_lock(&adapter->nfc_lock); 3641 hlist_for_each_entry_safe(rule, node2, 3642 &adapter->nfc_filter_list, nfc_node) { 3643 igc_erase_filter(adapter, rule); 3644 hlist_del(&rule->nfc_node); 3645 kfree(rule); 3646 } 3647 spin_unlock(&adapter->nfc_lock); 3648 adapter->nfc_filter_count = 0; 3649 } 3650 3651 netdev->features = features; 3652 3653 if (netif_running(netdev)) 3654 igc_reinit_locked(adapter); 3655 else 3656 igc_reset(adapter); 3657 3658 return 1; 3659 } 3660 3661 static netdev_features_t 3662 igc_features_check(struct sk_buff *skb, struct net_device *dev, 3663 netdev_features_t features) 3664 { 3665 unsigned int network_hdr_len, mac_hdr_len; 3666 3667 /* Make certain the headers can be described by a context descriptor */ 3668 mac_hdr_len = skb_network_header(skb) - skb->data; 3669 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) 3670 return features & ~(NETIF_F_HW_CSUM | 3671 NETIF_F_SCTP_CRC | 3672 NETIF_F_HW_VLAN_CTAG_TX | 3673 NETIF_F_TSO | 3674 NETIF_F_TSO6); 3675 3676 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 3677 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) 3678 return features & ~(NETIF_F_HW_CSUM | 3679 NETIF_F_SCTP_CRC | 3680 NETIF_F_TSO | 3681 NETIF_F_TSO6); 3682 3683 /* We can only support IPv4 TSO in tunnels if we can mangle the 3684 * inner IP ID field, so strip TSO if MANGLEID is not supported. 3685 */ 3686 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 3687 features &= ~NETIF_F_TSO; 3688 3689 return features; 3690 } 3691 3692 /* Add a MAC filter for 'addr' directing matching traffic to 'queue', 3693 * 'flags' is used to indicate what kind of match is made, match is by 3694 * default for the destination address, if matching by source address 3695 * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used. 3696 */ 3697 static int igc_add_mac_filter_flags(struct igc_adapter *adapter, 3698 const u8 *addr, const u8 queue, 3699 const u8 flags) 3700 { 3701 struct igc_hw *hw = &adapter->hw; 3702 int rar_entries = hw->mac.rar_entry_count; 3703 int i; 3704 3705 if (is_zero_ether_addr(addr)) 3706 return -EINVAL; 3707 3708 /* Search for the first empty entry in the MAC table. 3709 * Do not touch entries at the end of the table reserved for the VF MAC 3710 * addresses. 3711 */ 3712 for (i = 0; i < rar_entries; i++) { 3713 if (!igc_mac_entry_can_be_used(&adapter->mac_table[i], 3714 addr, flags)) 3715 continue; 3716 3717 ether_addr_copy(adapter->mac_table[i].addr, addr); 3718 adapter->mac_table[i].queue = queue; 3719 adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags; 3720 3721 igc_rar_set_index(adapter, i); 3722 return i; 3723 } 3724 3725 return -ENOSPC; 3726 } 3727 3728 int igc_add_mac_steering_filter(struct igc_adapter *adapter, 3729 const u8 *addr, u8 queue, u8 flags) 3730 { 3731 return igc_add_mac_filter_flags(adapter, addr, queue, 3732 IGC_MAC_STATE_QUEUE_STEERING | flags); 3733 } 3734 3735 /* Remove a MAC filter for 'addr' directing matching traffic to 3736 * 'queue', 'flags' is used to indicate what kind of match need to be 3737 * removed, match is by default for the destination address, if 3738 * matching by source address is to be removed the flag 3739 * IGC_MAC_STATE_SRC_ADDR can be used. 3740 */ 3741 static int igc_del_mac_filter_flags(struct igc_adapter *adapter, 3742 const u8 *addr, const u8 queue, 3743 const u8 flags) 3744 { 3745 struct igc_hw *hw = &adapter->hw; 3746 int rar_entries = hw->mac.rar_entry_count; 3747 int i; 3748 3749 if (is_zero_ether_addr(addr)) 3750 return -EINVAL; 3751 3752 /* Search for matching entry in the MAC table based on given address 3753 * and queue. Do not touch entries at the end of the table reserved 3754 * for the VF MAC addresses. 3755 */ 3756 for (i = 0; i < rar_entries; i++) { 3757 if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE)) 3758 continue; 3759 if ((adapter->mac_table[i].state & flags) != flags) 3760 continue; 3761 if (adapter->mac_table[i].queue != queue) 3762 continue; 3763 if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) 3764 continue; 3765 3766 /* When a filter for the default address is "deleted", 3767 * we return it to its initial configuration 3768 */ 3769 if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) { 3770 adapter->mac_table[i].state = 3771 IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE; 3772 } else { 3773 adapter->mac_table[i].state = 0; 3774 adapter->mac_table[i].queue = 0; 3775 memset(adapter->mac_table[i].addr, 0, ETH_ALEN); 3776 } 3777 3778 igc_rar_set_index(adapter, i); 3779 return 0; 3780 } 3781 3782 return -ENOENT; 3783 } 3784 3785 int igc_del_mac_steering_filter(struct igc_adapter *adapter, 3786 const u8 *addr, u8 queue, u8 flags) 3787 { 3788 return igc_del_mac_filter_flags(adapter, addr, queue, 3789 IGC_MAC_STATE_QUEUE_STEERING | flags); 3790 } 3791 3792 static void igc_tsync_interrupt(struct igc_adapter *adapter) 3793 { 3794 struct igc_hw *hw = &adapter->hw; 3795 u32 tsicr = rd32(IGC_TSICR); 3796 u32 ack = 0; 3797 3798 if (tsicr & IGC_TSICR_TXTS) { 3799 /* retrieve hardware timestamp */ 3800 schedule_work(&adapter->ptp_tx_work); 3801 ack |= IGC_TSICR_TXTS; 3802 } 3803 3804 /* acknowledge the interrupts */ 3805 wr32(IGC_TSICR, ack); 3806 } 3807 3808 /** 3809 * igc_msix_other - msix other interrupt handler 3810 * @irq: interrupt number 3811 * @data: pointer to a q_vector 3812 */ 3813 static irqreturn_t igc_msix_other(int irq, void *data) 3814 { 3815 struct igc_adapter *adapter = data; 3816 struct igc_hw *hw = &adapter->hw; 3817 u32 icr = rd32(IGC_ICR); 3818 3819 /* reading ICR causes bit 31 of EICR to be cleared */ 3820 if (icr & IGC_ICR_DRSTA) 3821 schedule_work(&adapter->reset_task); 3822 3823 if (icr & IGC_ICR_DOUTSYNC) { 3824 /* HW is reporting DMA is out of sync */ 3825 adapter->stats.doosync++; 3826 } 3827 3828 if (icr & IGC_ICR_LSC) { 3829 hw->mac.get_link_status = 1; 3830 /* guard against interrupt when we're going down */ 3831 if (!test_bit(__IGC_DOWN, &adapter->state)) 3832 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3833 } 3834 3835 if (icr & IGC_ICR_TS) 3836 igc_tsync_interrupt(adapter); 3837 3838 wr32(IGC_EIMS, adapter->eims_other); 3839 3840 return IRQ_HANDLED; 3841 } 3842 3843 static void igc_write_itr(struct igc_q_vector *q_vector) 3844 { 3845 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; 3846 3847 if (!q_vector->set_itr) 3848 return; 3849 3850 if (!itr_val) 3851 itr_val = IGC_ITR_VAL_MASK; 3852 3853 itr_val |= IGC_EITR_CNT_IGNR; 3854 3855 writel(itr_val, q_vector->itr_register); 3856 q_vector->set_itr = 0; 3857 } 3858 3859 static irqreturn_t igc_msix_ring(int irq, void *data) 3860 { 3861 struct igc_q_vector *q_vector = data; 3862 3863 /* Write the ITR value calculated from the previous interrupt. */ 3864 igc_write_itr(q_vector); 3865 3866 napi_schedule(&q_vector->napi); 3867 3868 return IRQ_HANDLED; 3869 } 3870 3871 /** 3872 * igc_request_msix - Initialize MSI-X interrupts 3873 * @adapter: Pointer to adapter structure 3874 * 3875 * igc_request_msix allocates MSI-X vectors and requests interrupts from the 3876 * kernel. 3877 */ 3878 static int igc_request_msix(struct igc_adapter *adapter) 3879 { 3880 int i = 0, err = 0, vector = 0, free_vector = 0; 3881 struct net_device *netdev = adapter->netdev; 3882 3883 err = request_irq(adapter->msix_entries[vector].vector, 3884 &igc_msix_other, 0, netdev->name, adapter); 3885 if (err) 3886 goto err_out; 3887 3888 for (i = 0; i < adapter->num_q_vectors; i++) { 3889 struct igc_q_vector *q_vector = adapter->q_vector[i]; 3890 3891 vector++; 3892 3893 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); 3894 3895 if (q_vector->rx.ring && q_vector->tx.ring) 3896 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, 3897 q_vector->rx.ring->queue_index); 3898 else if (q_vector->tx.ring) 3899 sprintf(q_vector->name, "%s-tx-%u", netdev->name, 3900 q_vector->tx.ring->queue_index); 3901 else if (q_vector->rx.ring) 3902 sprintf(q_vector->name, "%s-rx-%u", netdev->name, 3903 q_vector->rx.ring->queue_index); 3904 else 3905 sprintf(q_vector->name, "%s-unused", netdev->name); 3906 3907 err = request_irq(adapter->msix_entries[vector].vector, 3908 igc_msix_ring, 0, q_vector->name, 3909 q_vector); 3910 if (err) 3911 goto err_free; 3912 } 3913 3914 igc_configure_msix(adapter); 3915 return 0; 3916 3917 err_free: 3918 /* free already assigned IRQs */ 3919 free_irq(adapter->msix_entries[free_vector++].vector, adapter); 3920 3921 vector--; 3922 for (i = 0; i < vector; i++) { 3923 free_irq(adapter->msix_entries[free_vector++].vector, 3924 adapter->q_vector[i]); 3925 } 3926 err_out: 3927 return err; 3928 } 3929 3930 /** 3931 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts 3932 * @adapter: Pointer to adapter structure 3933 * 3934 * This function resets the device so that it has 0 rx queues, tx queues, and 3935 * MSI-X interrupts allocated. 3936 */ 3937 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) 3938 { 3939 igc_free_q_vectors(adapter); 3940 igc_reset_interrupt_capability(adapter); 3941 } 3942 3943 /* Need to wait a few seconds after link up to get diagnostic information from 3944 * the phy 3945 */ 3946 static void igc_update_phy_info(struct timer_list *t) 3947 { 3948 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); 3949 3950 igc_get_phy_info(&adapter->hw); 3951 } 3952 3953 /** 3954 * igc_has_link - check shared code for link and determine up/down 3955 * @adapter: pointer to driver private info 3956 */ 3957 bool igc_has_link(struct igc_adapter *adapter) 3958 { 3959 struct igc_hw *hw = &adapter->hw; 3960 bool link_active = false; 3961 3962 /* get_link_status is set on LSC (link status) interrupt or 3963 * rx sequence error interrupt. get_link_status will stay 3964 * false until the igc_check_for_link establishes link 3965 * for copper adapters ONLY 3966 */ 3967 switch (hw->phy.media_type) { 3968 case igc_media_type_copper: 3969 if (!hw->mac.get_link_status) 3970 return true; 3971 hw->mac.ops.check_for_link(hw); 3972 link_active = !hw->mac.get_link_status; 3973 break; 3974 default: 3975 case igc_media_type_unknown: 3976 break; 3977 } 3978 3979 if (hw->mac.type == igc_i225 && 3980 hw->phy.id == I225_I_PHY_ID) { 3981 if (!netif_carrier_ok(adapter->netdev)) { 3982 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 3983 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { 3984 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; 3985 adapter->link_check_timeout = jiffies; 3986 } 3987 } 3988 3989 return link_active; 3990 } 3991 3992 /** 3993 * igc_watchdog - Timer Call-back 3994 * @t: timer for the watchdog 3995 */ 3996 static void igc_watchdog(struct timer_list *t) 3997 { 3998 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); 3999 /* Do the rest outside of interrupt context */ 4000 schedule_work(&adapter->watchdog_task); 4001 } 4002 4003 static void igc_watchdog_task(struct work_struct *work) 4004 { 4005 struct igc_adapter *adapter = container_of(work, 4006 struct igc_adapter, 4007 watchdog_task); 4008 struct net_device *netdev = adapter->netdev; 4009 struct igc_hw *hw = &adapter->hw; 4010 struct igc_phy_info *phy = &hw->phy; 4011 u16 phy_data, retry_count = 20; 4012 u32 connsw; 4013 u32 link; 4014 int i; 4015 4016 link = igc_has_link(adapter); 4017 4018 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { 4019 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) 4020 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 4021 else 4022 link = false; 4023 } 4024 4025 /* Force link down if we have fiber to swap to */ 4026 if (adapter->flags & IGC_FLAG_MAS_ENABLE) { 4027 if (hw->phy.media_type == igc_media_type_copper) { 4028 connsw = rd32(IGC_CONNSW); 4029 if (!(connsw & IGC_CONNSW_AUTOSENSE_EN)) 4030 link = 0; 4031 } 4032 } 4033 if (link) { 4034 /* Cancel scheduled suspend requests. */ 4035 pm_runtime_resume(netdev->dev.parent); 4036 4037 if (!netif_carrier_ok(netdev)) { 4038 u32 ctrl; 4039 4040 hw->mac.ops.get_speed_and_duplex(hw, 4041 &adapter->link_speed, 4042 &adapter->link_duplex); 4043 4044 ctrl = rd32(IGC_CTRL); 4045 /* Link status message must follow this format */ 4046 netdev_info(netdev, 4047 "igc: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 4048 netdev->name, 4049 adapter->link_speed, 4050 adapter->link_duplex == FULL_DUPLEX ? 4051 "Full" : "Half", 4052 (ctrl & IGC_CTRL_TFCE) && 4053 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : 4054 (ctrl & IGC_CTRL_RFCE) ? "RX" : 4055 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); 4056 4057 /* check if SmartSpeed worked */ 4058 igc_check_downshift(hw); 4059 if (phy->speed_downgraded) 4060 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); 4061 4062 /* adjust timeout factor according to speed/duplex */ 4063 adapter->tx_timeout_factor = 1; 4064 switch (adapter->link_speed) { 4065 case SPEED_10: 4066 adapter->tx_timeout_factor = 14; 4067 break; 4068 case SPEED_100: 4069 /* maybe add some timeout factor ? */ 4070 break; 4071 } 4072 4073 if (adapter->link_speed != SPEED_1000) 4074 goto no_wait; 4075 4076 /* wait for Remote receiver status OK */ 4077 retry_read_status: 4078 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, 4079 &phy_data)) { 4080 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && 4081 retry_count) { 4082 msleep(100); 4083 retry_count--; 4084 goto retry_read_status; 4085 } else if (!retry_count) { 4086 dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); 4087 } 4088 } else { 4089 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); 4090 } 4091 no_wait: 4092 netif_carrier_on(netdev); 4093 4094 /* link state has changed, schedule phy info update */ 4095 if (!test_bit(__IGC_DOWN, &adapter->state)) 4096 mod_timer(&adapter->phy_info_timer, 4097 round_jiffies(jiffies + 2 * HZ)); 4098 } 4099 } else { 4100 if (netif_carrier_ok(netdev)) { 4101 adapter->link_speed = 0; 4102 adapter->link_duplex = 0; 4103 4104 /* Links status message must follow this format */ 4105 netdev_info(netdev, "igc: %s NIC Link is Down\n", 4106 netdev->name); 4107 netif_carrier_off(netdev); 4108 4109 /* link state has changed, schedule phy info update */ 4110 if (!test_bit(__IGC_DOWN, &adapter->state)) 4111 mod_timer(&adapter->phy_info_timer, 4112 round_jiffies(jiffies + 2 * HZ)); 4113 4114 /* link is down, time to check for alternate media */ 4115 if (adapter->flags & IGC_FLAG_MAS_ENABLE) { 4116 if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 4117 schedule_work(&adapter->reset_task); 4118 /* return immediately */ 4119 return; 4120 } 4121 } 4122 pm_schedule_suspend(netdev->dev.parent, 4123 MSEC_PER_SEC * 5); 4124 4125 /* also check for alternate media here */ 4126 } else if (!netif_carrier_ok(netdev) && 4127 (adapter->flags & IGC_FLAG_MAS_ENABLE)) { 4128 if (adapter->flags & IGC_FLAG_MEDIA_RESET) { 4129 schedule_work(&adapter->reset_task); 4130 /* return immediately */ 4131 return; 4132 } 4133 } 4134 } 4135 4136 spin_lock(&adapter->stats64_lock); 4137 igc_update_stats(adapter); 4138 spin_unlock(&adapter->stats64_lock); 4139 4140 for (i = 0; i < adapter->num_tx_queues; i++) { 4141 struct igc_ring *tx_ring = adapter->tx_ring[i]; 4142 4143 if (!netif_carrier_ok(netdev)) { 4144 /* We've lost link, so the controller stops DMA, 4145 * but we've got queued Tx work that's never going 4146 * to get done, so reset controller to flush Tx. 4147 * (Do the reset outside of interrupt context). 4148 */ 4149 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { 4150 adapter->tx_timeout_count++; 4151 schedule_work(&adapter->reset_task); 4152 /* return immediately since reset is imminent */ 4153 return; 4154 } 4155 } 4156 4157 /* Force detection of hung controller every watchdog period */ 4158 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 4159 } 4160 4161 /* Cause software interrupt to ensure Rx ring is cleaned */ 4162 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 4163 u32 eics = 0; 4164 4165 for (i = 0; i < adapter->num_q_vectors; i++) 4166 eics |= adapter->q_vector[i]->eims_value; 4167 wr32(IGC_EICS, eics); 4168 } else { 4169 wr32(IGC_ICS, IGC_ICS_RXDMT0); 4170 } 4171 4172 igc_ptp_tx_hang(adapter); 4173 4174 /* Reset the timer */ 4175 if (!test_bit(__IGC_DOWN, &adapter->state)) { 4176 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) 4177 mod_timer(&adapter->watchdog_timer, 4178 round_jiffies(jiffies + HZ)); 4179 else 4180 mod_timer(&adapter->watchdog_timer, 4181 round_jiffies(jiffies + 2 * HZ)); 4182 } 4183 } 4184 4185 /** 4186 * igc_intr_msi - Interrupt Handler 4187 * @irq: interrupt number 4188 * @data: pointer to a network interface device structure 4189 */ 4190 static irqreturn_t igc_intr_msi(int irq, void *data) 4191 { 4192 struct igc_adapter *adapter = data; 4193 struct igc_q_vector *q_vector = adapter->q_vector[0]; 4194 struct igc_hw *hw = &adapter->hw; 4195 /* read ICR disables interrupts using IAM */ 4196 u32 icr = rd32(IGC_ICR); 4197 4198 igc_write_itr(q_vector); 4199 4200 if (icr & IGC_ICR_DRSTA) 4201 schedule_work(&adapter->reset_task); 4202 4203 if (icr & IGC_ICR_DOUTSYNC) { 4204 /* HW is reporting DMA is out of sync */ 4205 adapter->stats.doosync++; 4206 } 4207 4208 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 4209 hw->mac.get_link_status = 1; 4210 if (!test_bit(__IGC_DOWN, &adapter->state)) 4211 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4212 } 4213 4214 napi_schedule(&q_vector->napi); 4215 4216 return IRQ_HANDLED; 4217 } 4218 4219 /** 4220 * igc_intr - Legacy Interrupt Handler 4221 * @irq: interrupt number 4222 * @data: pointer to a network interface device structure 4223 */ 4224 static irqreturn_t igc_intr(int irq, void *data) 4225 { 4226 struct igc_adapter *adapter = data; 4227 struct igc_q_vector *q_vector = adapter->q_vector[0]; 4228 struct igc_hw *hw = &adapter->hw; 4229 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 4230 * need for the IMC write 4231 */ 4232 u32 icr = rd32(IGC_ICR); 4233 4234 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 4235 * not set, then the adapter didn't send an interrupt 4236 */ 4237 if (!(icr & IGC_ICR_INT_ASSERTED)) 4238 return IRQ_NONE; 4239 4240 igc_write_itr(q_vector); 4241 4242 if (icr & IGC_ICR_DRSTA) 4243 schedule_work(&adapter->reset_task); 4244 4245 if (icr & IGC_ICR_DOUTSYNC) { 4246 /* HW is reporting DMA is out of sync */ 4247 adapter->stats.doosync++; 4248 } 4249 4250 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 4251 hw->mac.get_link_status = 1; 4252 /* guard against interrupt when we're going down */ 4253 if (!test_bit(__IGC_DOWN, &adapter->state)) 4254 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4255 } 4256 4257 napi_schedule(&q_vector->napi); 4258 4259 return IRQ_HANDLED; 4260 } 4261 4262 static void igc_free_irq(struct igc_adapter *adapter) 4263 { 4264 if (adapter->msix_entries) { 4265 int vector = 0, i; 4266 4267 free_irq(adapter->msix_entries[vector++].vector, adapter); 4268 4269 for (i = 0; i < adapter->num_q_vectors; i++) 4270 free_irq(adapter->msix_entries[vector++].vector, 4271 adapter->q_vector[i]); 4272 } else { 4273 free_irq(adapter->pdev->irq, adapter); 4274 } 4275 } 4276 4277 /** 4278 * igc_request_irq - initialize interrupts 4279 * @adapter: Pointer to adapter structure 4280 * 4281 * Attempts to configure interrupts using the best available 4282 * capabilities of the hardware and kernel. 4283 */ 4284 static int igc_request_irq(struct igc_adapter *adapter) 4285 { 4286 struct net_device *netdev = adapter->netdev; 4287 struct pci_dev *pdev = adapter->pdev; 4288 int err = 0; 4289 4290 if (adapter->flags & IGC_FLAG_HAS_MSIX) { 4291 err = igc_request_msix(adapter); 4292 if (!err) 4293 goto request_done; 4294 /* fall back to MSI */ 4295 igc_free_all_tx_resources(adapter); 4296 igc_free_all_rx_resources(adapter); 4297 4298 igc_clear_interrupt_scheme(adapter); 4299 err = igc_init_interrupt_scheme(adapter, false); 4300 if (err) 4301 goto request_done; 4302 igc_setup_all_tx_resources(adapter); 4303 igc_setup_all_rx_resources(adapter); 4304 igc_configure(adapter); 4305 } 4306 4307 igc_assign_vector(adapter->q_vector[0], 0); 4308 4309 if (adapter->flags & IGC_FLAG_HAS_MSI) { 4310 err = request_irq(pdev->irq, &igc_intr_msi, 0, 4311 netdev->name, adapter); 4312 if (!err) 4313 goto request_done; 4314 4315 /* fall back to legacy interrupts */ 4316 igc_reset_interrupt_capability(adapter); 4317 adapter->flags &= ~IGC_FLAG_HAS_MSI; 4318 } 4319 4320 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, 4321 netdev->name, adapter); 4322 4323 if (err) 4324 dev_err(&pdev->dev, "Error %d getting interrupt\n", 4325 err); 4326 4327 request_done: 4328 return err; 4329 } 4330 4331 /** 4332 * __igc_open - Called when a network interface is made active 4333 * @netdev: network interface device structure 4334 * @resuming: boolean indicating if the device is resuming 4335 * 4336 * Returns 0 on success, negative value on failure 4337 * 4338 * The open entry point is called when a network interface is made 4339 * active by the system (IFF_UP). At this point all resources needed 4340 * for transmit and receive operations are allocated, the interrupt 4341 * handler is registered with the OS, the watchdog timer is started, 4342 * and the stack is notified that the interface is ready. 4343 */ 4344 static int __igc_open(struct net_device *netdev, bool resuming) 4345 { 4346 struct igc_adapter *adapter = netdev_priv(netdev); 4347 struct pci_dev *pdev = adapter->pdev; 4348 struct igc_hw *hw = &adapter->hw; 4349 int err = 0; 4350 int i = 0; 4351 4352 /* disallow open during test */ 4353 4354 if (test_bit(__IGC_TESTING, &adapter->state)) { 4355 WARN_ON(resuming); 4356 return -EBUSY; 4357 } 4358 4359 if (!resuming) 4360 pm_runtime_get_sync(&pdev->dev); 4361 4362 netif_carrier_off(netdev); 4363 4364 /* allocate transmit descriptors */ 4365 err = igc_setup_all_tx_resources(adapter); 4366 if (err) 4367 goto err_setup_tx; 4368 4369 /* allocate receive descriptors */ 4370 err = igc_setup_all_rx_resources(adapter); 4371 if (err) 4372 goto err_setup_rx; 4373 4374 igc_power_up_link(adapter); 4375 4376 igc_configure(adapter); 4377 4378 err = igc_request_irq(adapter); 4379 if (err) 4380 goto err_req_irq; 4381 4382 /* Notify the stack of the actual queue counts. */ 4383 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 4384 if (err) 4385 goto err_set_queues; 4386 4387 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 4388 if (err) 4389 goto err_set_queues; 4390 4391 clear_bit(__IGC_DOWN, &adapter->state); 4392 4393 for (i = 0; i < adapter->num_q_vectors; i++) 4394 napi_enable(&adapter->q_vector[i]->napi); 4395 4396 /* Clear any pending interrupts. */ 4397 rd32(IGC_ICR); 4398 igc_irq_enable(adapter); 4399 4400 if (!resuming) 4401 pm_runtime_put(&pdev->dev); 4402 4403 netif_tx_start_all_queues(netdev); 4404 4405 /* start the watchdog. */ 4406 hw->mac.get_link_status = 1; 4407 schedule_work(&adapter->watchdog_task); 4408 4409 return IGC_SUCCESS; 4410 4411 err_set_queues: 4412 igc_free_irq(adapter); 4413 err_req_irq: 4414 igc_release_hw_control(adapter); 4415 igc_power_down_link(adapter); 4416 igc_free_all_rx_resources(adapter); 4417 err_setup_rx: 4418 igc_free_all_tx_resources(adapter); 4419 err_setup_tx: 4420 igc_reset(adapter); 4421 if (!resuming) 4422 pm_runtime_put(&pdev->dev); 4423 4424 return err; 4425 } 4426 4427 static int igc_open(struct net_device *netdev) 4428 { 4429 return __igc_open(netdev, false); 4430 } 4431 4432 /** 4433 * __igc_close - Disables a network interface 4434 * @netdev: network interface device structure 4435 * @suspending: boolean indicating the device is suspending 4436 * 4437 * Returns 0, this is not allowed to fail 4438 * 4439 * The close entry point is called when an interface is de-activated 4440 * by the OS. The hardware is still under the driver's control, but 4441 * needs to be disabled. A global MAC reset is issued to stop the 4442 * hardware, and all transmit and receive resources are freed. 4443 */ 4444 static int __igc_close(struct net_device *netdev, bool suspending) 4445 { 4446 struct igc_adapter *adapter = netdev_priv(netdev); 4447 struct pci_dev *pdev = adapter->pdev; 4448 4449 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); 4450 4451 if (!suspending) 4452 pm_runtime_get_sync(&pdev->dev); 4453 4454 igc_down(adapter); 4455 4456 igc_release_hw_control(adapter); 4457 4458 igc_free_irq(adapter); 4459 4460 igc_free_all_tx_resources(adapter); 4461 igc_free_all_rx_resources(adapter); 4462 4463 if (!suspending) 4464 pm_runtime_put_sync(&pdev->dev); 4465 4466 return 0; 4467 } 4468 4469 static int igc_close(struct net_device *netdev) 4470 { 4471 if (netif_device_present(netdev) || netdev->dismantle) 4472 return __igc_close(netdev, false); 4473 return 0; 4474 } 4475 4476 /** 4477 * igc_ioctl - Access the hwtstamp interface 4478 * @netdev: network interface device structure 4479 * @ifreq: interface request data 4480 * @cmd: ioctl command 4481 **/ 4482 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4483 { 4484 switch (cmd) { 4485 case SIOCGHWTSTAMP: 4486 return igc_ptp_get_ts_config(netdev, ifr); 4487 case SIOCSHWTSTAMP: 4488 return igc_ptp_set_ts_config(netdev, ifr); 4489 default: 4490 return -EOPNOTSUPP; 4491 } 4492 } 4493 4494 static const struct net_device_ops igc_netdev_ops = { 4495 .ndo_open = igc_open, 4496 .ndo_stop = igc_close, 4497 .ndo_start_xmit = igc_xmit_frame, 4498 .ndo_set_rx_mode = igc_set_rx_mode, 4499 .ndo_set_mac_address = igc_set_mac, 4500 .ndo_change_mtu = igc_change_mtu, 4501 .ndo_get_stats = igc_get_stats, 4502 .ndo_fix_features = igc_fix_features, 4503 .ndo_set_features = igc_set_features, 4504 .ndo_features_check = igc_features_check, 4505 .ndo_do_ioctl = igc_ioctl, 4506 }; 4507 4508 /* PCIe configuration access */ 4509 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 4510 { 4511 struct igc_adapter *adapter = hw->back; 4512 4513 pci_read_config_word(adapter->pdev, reg, value); 4514 } 4515 4516 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 4517 { 4518 struct igc_adapter *adapter = hw->back; 4519 4520 pci_write_config_word(adapter->pdev, reg, *value); 4521 } 4522 4523 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 4524 { 4525 struct igc_adapter *adapter = hw->back; 4526 4527 if (!pci_is_pcie(adapter->pdev)) 4528 return -IGC_ERR_CONFIG; 4529 4530 pcie_capability_read_word(adapter->pdev, reg, value); 4531 4532 return IGC_SUCCESS; 4533 } 4534 4535 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 4536 { 4537 struct igc_adapter *adapter = hw->back; 4538 4539 if (!pci_is_pcie(adapter->pdev)) 4540 return -IGC_ERR_CONFIG; 4541 4542 pcie_capability_write_word(adapter->pdev, reg, *value); 4543 4544 return IGC_SUCCESS; 4545 } 4546 4547 u32 igc_rd32(struct igc_hw *hw, u32 reg) 4548 { 4549 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); 4550 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); 4551 u32 value = 0; 4552 4553 if (IGC_REMOVED(hw_addr)) 4554 return ~value; 4555 4556 value = readl(&hw_addr[reg]); 4557 4558 /* reads should not return all F's */ 4559 if (!(~value) && (!reg || !(~readl(hw_addr)))) { 4560 struct net_device *netdev = igc->netdev; 4561 4562 hw->hw_addr = NULL; 4563 netif_device_detach(netdev); 4564 netdev_err(netdev, "PCIe link lost, device now detached\n"); 4565 WARN(pci_device_is_present(igc->pdev), 4566 "igc: Failed to read reg 0x%x!\n", reg); 4567 } 4568 4569 return value; 4570 } 4571 4572 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) 4573 { 4574 struct pci_dev *pdev = adapter->pdev; 4575 struct igc_mac_info *mac = &adapter->hw.mac; 4576 4577 mac->autoneg = 0; 4578 4579 /* Make sure dplx is at most 1 bit and lsb of speed is not set 4580 * for the switch() below to work 4581 */ 4582 if ((spd & 1) || (dplx & ~1)) 4583 goto err_inval; 4584 4585 switch (spd + dplx) { 4586 case SPEED_10 + DUPLEX_HALF: 4587 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4588 break; 4589 case SPEED_10 + DUPLEX_FULL: 4590 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4591 break; 4592 case SPEED_100 + DUPLEX_HALF: 4593 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4594 break; 4595 case SPEED_100 + DUPLEX_FULL: 4596 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4597 break; 4598 case SPEED_1000 + DUPLEX_FULL: 4599 mac->autoneg = 1; 4600 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 4601 break; 4602 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 4603 goto err_inval; 4604 case SPEED_2500 + DUPLEX_FULL: 4605 mac->autoneg = 1; 4606 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 4607 break; 4608 case SPEED_2500 + DUPLEX_HALF: /* not supported */ 4609 default: 4610 goto err_inval; 4611 } 4612 4613 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ 4614 adapter->hw.phy.mdix = AUTO_ALL_MODES; 4615 4616 return 0; 4617 4618 err_inval: 4619 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); 4620 return -EINVAL; 4621 } 4622 4623 /** 4624 * igc_probe - Device Initialization Routine 4625 * @pdev: PCI device information struct 4626 * @ent: entry in igc_pci_tbl 4627 * 4628 * Returns 0 on success, negative on failure 4629 * 4630 * igc_probe initializes an adapter identified by a pci_dev structure. 4631 * The OS initialization, configuring the adapter private structure, 4632 * and a hardware reset occur. 4633 */ 4634 static int igc_probe(struct pci_dev *pdev, 4635 const struct pci_device_id *ent) 4636 { 4637 struct igc_adapter *adapter; 4638 struct net_device *netdev; 4639 struct igc_hw *hw; 4640 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; 4641 int err, pci_using_dac; 4642 4643 err = pci_enable_device_mem(pdev); 4644 if (err) 4645 return err; 4646 4647 pci_using_dac = 0; 4648 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 4649 if (!err) { 4650 pci_using_dac = 1; 4651 } else { 4652 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 4653 if (err) { 4654 dev_err(&pdev->dev, 4655 "No usable DMA configuration, aborting\n"); 4656 goto err_dma; 4657 } 4658 } 4659 4660 err = pci_request_mem_regions(pdev, igc_driver_name); 4661 if (err) 4662 goto err_pci_reg; 4663 4664 pci_enable_pcie_error_reporting(pdev); 4665 4666 pci_set_master(pdev); 4667 4668 err = -ENOMEM; 4669 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), 4670 IGC_MAX_TX_QUEUES); 4671 4672 if (!netdev) 4673 goto err_alloc_etherdev; 4674 4675 SET_NETDEV_DEV(netdev, &pdev->dev); 4676 4677 pci_set_drvdata(pdev, netdev); 4678 adapter = netdev_priv(netdev); 4679 adapter->netdev = netdev; 4680 adapter->pdev = pdev; 4681 hw = &adapter->hw; 4682 hw->back = adapter; 4683 adapter->port_num = hw->bus.func; 4684 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 4685 4686 err = pci_save_state(pdev); 4687 if (err) 4688 goto err_ioremap; 4689 4690 err = -EIO; 4691 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), 4692 pci_resource_len(pdev, 0)); 4693 if (!adapter->io_addr) 4694 goto err_ioremap; 4695 4696 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ 4697 hw->hw_addr = adapter->io_addr; 4698 4699 netdev->netdev_ops = &igc_netdev_ops; 4700 igc_set_ethtool_ops(netdev); 4701 netdev->watchdog_timeo = 5 * HZ; 4702 4703 netdev->mem_start = pci_resource_start(pdev, 0); 4704 netdev->mem_end = pci_resource_end(pdev, 0); 4705 4706 /* PCI config space info */ 4707 hw->vendor_id = pdev->vendor; 4708 hw->device_id = pdev->device; 4709 hw->revision_id = pdev->revision; 4710 hw->subsystem_vendor_id = pdev->subsystem_vendor; 4711 hw->subsystem_device_id = pdev->subsystem_device; 4712 4713 /* Copy the default MAC and PHY function pointers */ 4714 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 4715 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 4716 4717 /* Initialize skew-specific constants */ 4718 err = ei->get_invariants(hw); 4719 if (err) 4720 goto err_sw_init; 4721 4722 /* Add supported features to the features list*/ 4723 netdev->features |= NETIF_F_SG; 4724 netdev->features |= NETIF_F_TSO; 4725 netdev->features |= NETIF_F_TSO6; 4726 netdev->features |= NETIF_F_RXCSUM; 4727 netdev->features |= NETIF_F_HW_CSUM; 4728 netdev->features |= NETIF_F_SCTP_CRC; 4729 4730 /* setup the private structure */ 4731 err = igc_sw_init(adapter); 4732 if (err) 4733 goto err_sw_init; 4734 4735 /* copy netdev features into list of user selectable features */ 4736 netdev->hw_features |= NETIF_F_NTUPLE; 4737 netdev->hw_features |= netdev->features; 4738 4739 if (pci_using_dac) 4740 netdev->features |= NETIF_F_HIGHDMA; 4741 4742 /* MTU range: 68 - 9216 */ 4743 netdev->min_mtu = ETH_MIN_MTU; 4744 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; 4745 4746 /* before reading the NVM, reset the controller to put the device in a 4747 * known good starting state 4748 */ 4749 hw->mac.ops.reset_hw(hw); 4750 4751 if (igc_get_flash_presence_i225(hw)) { 4752 if (hw->nvm.ops.validate(hw) < 0) { 4753 dev_err(&pdev->dev, 4754 "The NVM Checksum Is Not Valid\n"); 4755 err = -EIO; 4756 goto err_eeprom; 4757 } 4758 } 4759 4760 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { 4761 /* copy the MAC address out of the NVM */ 4762 if (hw->mac.ops.read_mac_addr(hw)) 4763 dev_err(&pdev->dev, "NVM Read Error\n"); 4764 } 4765 4766 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 4767 4768 if (!is_valid_ether_addr(netdev->dev_addr)) { 4769 dev_err(&pdev->dev, "Invalid MAC Address\n"); 4770 err = -EIO; 4771 goto err_eeprom; 4772 } 4773 4774 /* configure RXPBSIZE and TXPBSIZE */ 4775 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); 4776 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); 4777 4778 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); 4779 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); 4780 4781 INIT_WORK(&adapter->reset_task, igc_reset_task); 4782 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); 4783 4784 /* Initialize link properties that are user-changeable */ 4785 adapter->fc_autoneg = true; 4786 hw->mac.autoneg = true; 4787 hw->phy.autoneg_advertised = 0xaf; 4788 4789 hw->fc.requested_mode = igc_fc_default; 4790 hw->fc.current_mode = igc_fc_default; 4791 4792 /* By default, support wake on port A */ 4793 adapter->flags |= IGC_FLAG_WOL_SUPPORTED; 4794 4795 /* initialize the wol settings based on the eeprom settings */ 4796 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) 4797 adapter->wol |= IGC_WUFC_MAG; 4798 4799 device_set_wakeup_enable(&adapter->pdev->dev, 4800 adapter->flags & IGC_FLAG_WOL_SUPPORTED); 4801 4802 /* reset the hardware with the new settings */ 4803 igc_reset(adapter); 4804 4805 /* let the f/w know that the h/w is now under the control of the 4806 * driver. 4807 */ 4808 igc_get_hw_control(adapter); 4809 4810 strncpy(netdev->name, "eth%d", IFNAMSIZ); 4811 err = register_netdev(netdev); 4812 if (err) 4813 goto err_register; 4814 4815 /* carrier off reporting is important to ethtool even BEFORE open */ 4816 netif_carrier_off(netdev); 4817 4818 /* do hw tstamp init after resetting */ 4819 igc_ptp_init(adapter); 4820 4821 /* Check if Media Autosense is enabled */ 4822 adapter->ei = *ei; 4823 4824 /* print pcie link status and MAC address */ 4825 pcie_print_link_status(pdev); 4826 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); 4827 4828 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); 4829 4830 pm_runtime_put_noidle(&pdev->dev); 4831 4832 return 0; 4833 4834 err_register: 4835 igc_release_hw_control(adapter); 4836 err_eeprom: 4837 if (!igc_check_reset_block(hw)) 4838 igc_reset_phy(hw); 4839 err_sw_init: 4840 igc_clear_interrupt_scheme(adapter); 4841 iounmap(adapter->io_addr); 4842 err_ioremap: 4843 free_netdev(netdev); 4844 err_alloc_etherdev: 4845 pci_release_mem_regions(pdev); 4846 err_pci_reg: 4847 err_dma: 4848 pci_disable_device(pdev); 4849 return err; 4850 } 4851 4852 /** 4853 * igc_remove - Device Removal Routine 4854 * @pdev: PCI device information struct 4855 * 4856 * igc_remove is called by the PCI subsystem to alert the driver 4857 * that it should release a PCI device. This could be caused by a 4858 * Hot-Plug event, or because the driver is going to be removed from 4859 * memory. 4860 */ 4861 static void igc_remove(struct pci_dev *pdev) 4862 { 4863 struct net_device *netdev = pci_get_drvdata(pdev); 4864 struct igc_adapter *adapter = netdev_priv(netdev); 4865 4866 pm_runtime_get_noresume(&pdev->dev); 4867 4868 igc_ptp_stop(adapter); 4869 4870 set_bit(__IGC_DOWN, &adapter->state); 4871 4872 del_timer_sync(&adapter->watchdog_timer); 4873 del_timer_sync(&adapter->phy_info_timer); 4874 4875 cancel_work_sync(&adapter->reset_task); 4876 cancel_work_sync(&adapter->watchdog_task); 4877 4878 /* Release control of h/w to f/w. If f/w is AMT enabled, this 4879 * would have already happened in close and is redundant. 4880 */ 4881 igc_release_hw_control(adapter); 4882 unregister_netdev(netdev); 4883 4884 igc_clear_interrupt_scheme(adapter); 4885 pci_iounmap(pdev, adapter->io_addr); 4886 pci_release_mem_regions(pdev); 4887 4888 kfree(adapter->mac_table); 4889 free_netdev(netdev); 4890 4891 pci_disable_pcie_error_reporting(pdev); 4892 4893 pci_disable_device(pdev); 4894 } 4895 4896 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, 4897 bool runtime) 4898 { 4899 struct net_device *netdev = pci_get_drvdata(pdev); 4900 struct igc_adapter *adapter = netdev_priv(netdev); 4901 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; 4902 struct igc_hw *hw = &adapter->hw; 4903 u32 ctrl, rctl, status; 4904 bool wake; 4905 4906 rtnl_lock(); 4907 netif_device_detach(netdev); 4908 4909 if (netif_running(netdev)) 4910 __igc_close(netdev, true); 4911 4912 igc_ptp_suspend(adapter); 4913 4914 igc_clear_interrupt_scheme(adapter); 4915 rtnl_unlock(); 4916 4917 status = rd32(IGC_STATUS); 4918 if (status & IGC_STATUS_LU) 4919 wufc &= ~IGC_WUFC_LNKC; 4920 4921 if (wufc) { 4922 igc_setup_rctl(adapter); 4923 igc_set_rx_mode(netdev); 4924 4925 /* turn on all-multi mode if wake on multicast is enabled */ 4926 if (wufc & IGC_WUFC_MC) { 4927 rctl = rd32(IGC_RCTL); 4928 rctl |= IGC_RCTL_MPE; 4929 wr32(IGC_RCTL, rctl); 4930 } 4931 4932 ctrl = rd32(IGC_CTRL); 4933 ctrl |= IGC_CTRL_ADVD3WUC; 4934 wr32(IGC_CTRL, ctrl); 4935 4936 /* Allow time for pending master requests to run */ 4937 igc_disable_pcie_master(hw); 4938 4939 wr32(IGC_WUC, IGC_WUC_PME_EN); 4940 wr32(IGC_WUFC, wufc); 4941 } else { 4942 wr32(IGC_WUC, 0); 4943 wr32(IGC_WUFC, 0); 4944 } 4945 4946 wake = wufc || adapter->en_mng_pt; 4947 if (!wake) 4948 igc_power_down_link(adapter); 4949 else 4950 igc_power_up_link(adapter); 4951 4952 if (enable_wake) 4953 *enable_wake = wake; 4954 4955 /* Release control of h/w to f/w. If f/w is AMT enabled, this 4956 * would have already happened in close and is redundant. 4957 */ 4958 igc_release_hw_control(adapter); 4959 4960 pci_disable_device(pdev); 4961 4962 return 0; 4963 } 4964 4965 #ifdef CONFIG_PM 4966 static int __maybe_unused igc_runtime_suspend(struct device *dev) 4967 { 4968 return __igc_shutdown(to_pci_dev(dev), NULL, 1); 4969 } 4970 4971 static void igc_deliver_wake_packet(struct net_device *netdev) 4972 { 4973 struct igc_adapter *adapter = netdev_priv(netdev); 4974 struct igc_hw *hw = &adapter->hw; 4975 struct sk_buff *skb; 4976 u32 wupl; 4977 4978 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; 4979 4980 /* WUPM stores only the first 128 bytes of the wake packet. 4981 * Read the packet only if we have the whole thing. 4982 */ 4983 if (wupl == 0 || wupl > IGC_WUPM_BYTES) 4984 return; 4985 4986 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); 4987 if (!skb) 4988 return; 4989 4990 skb_put(skb, wupl); 4991 4992 /* Ensure reads are 32-bit aligned */ 4993 wupl = roundup(wupl, 4); 4994 4995 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); 4996 4997 skb->protocol = eth_type_trans(skb, netdev); 4998 netif_rx(skb); 4999 } 5000 5001 static int __maybe_unused igc_resume(struct device *dev) 5002 { 5003 struct pci_dev *pdev = to_pci_dev(dev); 5004 struct net_device *netdev = pci_get_drvdata(pdev); 5005 struct igc_adapter *adapter = netdev_priv(netdev); 5006 struct igc_hw *hw = &adapter->hw; 5007 u32 err, val; 5008 5009 pci_set_power_state(pdev, PCI_D0); 5010 pci_restore_state(pdev); 5011 pci_save_state(pdev); 5012 5013 if (!pci_device_is_present(pdev)) 5014 return -ENODEV; 5015 err = pci_enable_device_mem(pdev); 5016 if (err) { 5017 dev_err(&pdev->dev, 5018 "igc: Cannot enable PCI device from suspend\n"); 5019 return err; 5020 } 5021 pci_set_master(pdev); 5022 5023 pci_enable_wake(pdev, PCI_D3hot, 0); 5024 pci_enable_wake(pdev, PCI_D3cold, 0); 5025 5026 if (igc_init_interrupt_scheme(adapter, true)) { 5027 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 5028 return -ENOMEM; 5029 } 5030 5031 igc_reset(adapter); 5032 5033 /* let the f/w know that the h/w is now under the control of the 5034 * driver. 5035 */ 5036 igc_get_hw_control(adapter); 5037 5038 val = rd32(IGC_WUS); 5039 if (val & WAKE_PKT_WUS) 5040 igc_deliver_wake_packet(netdev); 5041 5042 wr32(IGC_WUS, ~0); 5043 5044 rtnl_lock(); 5045 if (!err && netif_running(netdev)) 5046 err = __igc_open(netdev, true); 5047 5048 if (!err) 5049 netif_device_attach(netdev); 5050 rtnl_unlock(); 5051 5052 return err; 5053 } 5054 5055 static int __maybe_unused igc_runtime_resume(struct device *dev) 5056 { 5057 return igc_resume(dev); 5058 } 5059 5060 static int __maybe_unused igc_suspend(struct device *dev) 5061 { 5062 return __igc_shutdown(to_pci_dev(dev), NULL, 0); 5063 } 5064 5065 static int __maybe_unused igc_runtime_idle(struct device *dev) 5066 { 5067 struct net_device *netdev = dev_get_drvdata(dev); 5068 struct igc_adapter *adapter = netdev_priv(netdev); 5069 5070 if (!igc_has_link(adapter)) 5071 pm_schedule_suspend(dev, MSEC_PER_SEC * 5); 5072 5073 return -EBUSY; 5074 } 5075 #endif /* CONFIG_PM */ 5076 5077 static void igc_shutdown(struct pci_dev *pdev) 5078 { 5079 bool wake; 5080 5081 __igc_shutdown(pdev, &wake, 0); 5082 5083 if (system_state == SYSTEM_POWER_OFF) { 5084 pci_wake_from_d3(pdev, wake); 5085 pci_set_power_state(pdev, PCI_D3hot); 5086 } 5087 } 5088 5089 /** 5090 * igc_io_error_detected - called when PCI error is detected 5091 * @pdev: Pointer to PCI device 5092 * @state: The current PCI connection state 5093 * 5094 * This function is called after a PCI bus error affecting 5095 * this device has been detected. 5096 **/ 5097 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, 5098 pci_channel_state_t state) 5099 { 5100 struct net_device *netdev = pci_get_drvdata(pdev); 5101 struct igc_adapter *adapter = netdev_priv(netdev); 5102 5103 netif_device_detach(netdev); 5104 5105 if (state == pci_channel_io_perm_failure) 5106 return PCI_ERS_RESULT_DISCONNECT; 5107 5108 if (netif_running(netdev)) 5109 igc_down(adapter); 5110 pci_disable_device(pdev); 5111 5112 /* Request a slot reset. */ 5113 return PCI_ERS_RESULT_NEED_RESET; 5114 } 5115 5116 /** 5117 * igc_io_slot_reset - called after the PCI bus has been reset. 5118 * @pdev: Pointer to PCI device 5119 * 5120 * Restart the card from scratch, as if from a cold-boot. Implementation 5121 * resembles the first-half of the igc_resume routine. 5122 **/ 5123 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) 5124 { 5125 struct net_device *netdev = pci_get_drvdata(pdev); 5126 struct igc_adapter *adapter = netdev_priv(netdev); 5127 struct igc_hw *hw = &adapter->hw; 5128 pci_ers_result_t result; 5129 5130 if (pci_enable_device_mem(pdev)) { 5131 dev_err(&pdev->dev, 5132 "Could not re-enable PCI device after reset.\n"); 5133 result = PCI_ERS_RESULT_DISCONNECT; 5134 } else { 5135 pci_set_master(pdev); 5136 pci_restore_state(pdev); 5137 pci_save_state(pdev); 5138 5139 pci_enable_wake(pdev, PCI_D3hot, 0); 5140 pci_enable_wake(pdev, PCI_D3cold, 0); 5141 5142 /* In case of PCI error, adapter loses its HW address 5143 * so we should re-assign it here. 5144 */ 5145 hw->hw_addr = adapter->io_addr; 5146 5147 igc_reset(adapter); 5148 wr32(IGC_WUS, ~0); 5149 result = PCI_ERS_RESULT_RECOVERED; 5150 } 5151 5152 return result; 5153 } 5154 5155 /** 5156 * igc_io_resume - called when traffic can start to flow again. 5157 * @pdev: Pointer to PCI device 5158 * 5159 * This callback is called when the error recovery driver tells us that 5160 * its OK to resume normal operation. Implementation resembles the 5161 * second-half of the igc_resume routine. 5162 */ 5163 static void igc_io_resume(struct pci_dev *pdev) 5164 { 5165 struct net_device *netdev = pci_get_drvdata(pdev); 5166 struct igc_adapter *adapter = netdev_priv(netdev); 5167 5168 rtnl_lock(); 5169 if (netif_running(netdev)) { 5170 if (igc_open(netdev)) { 5171 dev_err(&pdev->dev, "igc_open failed after reset\n"); 5172 return; 5173 } 5174 } 5175 5176 netif_device_attach(netdev); 5177 5178 /* let the f/w know that the h/w is now under the control of the 5179 * driver. 5180 */ 5181 igc_get_hw_control(adapter); 5182 rtnl_unlock(); 5183 } 5184 5185 static const struct pci_error_handlers igc_err_handler = { 5186 .error_detected = igc_io_error_detected, 5187 .slot_reset = igc_io_slot_reset, 5188 .resume = igc_io_resume, 5189 }; 5190 5191 #ifdef CONFIG_PM 5192 static const struct dev_pm_ops igc_pm_ops = { 5193 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) 5194 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, 5195 igc_runtime_idle) 5196 }; 5197 #endif 5198 5199 static struct pci_driver igc_driver = { 5200 .name = igc_driver_name, 5201 .id_table = igc_pci_tbl, 5202 .probe = igc_probe, 5203 .remove = igc_remove, 5204 #ifdef CONFIG_PM 5205 .driver.pm = &igc_pm_ops, 5206 #endif 5207 .shutdown = igc_shutdown, 5208 .err_handler = &igc_err_handler, 5209 }; 5210 5211 /** 5212 * igc_reinit_queues - return error 5213 * @adapter: pointer to adapter structure 5214 */ 5215 int igc_reinit_queues(struct igc_adapter *adapter) 5216 { 5217 struct net_device *netdev = adapter->netdev; 5218 struct pci_dev *pdev = adapter->pdev; 5219 int err = 0; 5220 5221 if (netif_running(netdev)) 5222 igc_close(netdev); 5223 5224 igc_reset_interrupt_capability(adapter); 5225 5226 if (igc_init_interrupt_scheme(adapter, true)) { 5227 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 5228 return -ENOMEM; 5229 } 5230 5231 if (netif_running(netdev)) 5232 err = igc_open(netdev); 5233 5234 return err; 5235 } 5236 5237 /** 5238 * igc_get_hw_dev - return device 5239 * @hw: pointer to hardware structure 5240 * 5241 * used by hardware layer to print debugging information 5242 */ 5243 struct net_device *igc_get_hw_dev(struct igc_hw *hw) 5244 { 5245 struct igc_adapter *adapter = hw->back; 5246 5247 return adapter->netdev; 5248 } 5249 5250 /** 5251 * igc_init_module - Driver Registration Routine 5252 * 5253 * igc_init_module is the first routine called when the driver is 5254 * loaded. All it does is register with the PCI subsystem. 5255 */ 5256 static int __init igc_init_module(void) 5257 { 5258 int ret; 5259 5260 pr_info("%s - version %s\n", 5261 igc_driver_string, igc_driver_version); 5262 5263 pr_info("%s\n", igc_copyright); 5264 5265 ret = pci_register_driver(&igc_driver); 5266 return ret; 5267 } 5268 5269 module_init(igc_init_module); 5270 5271 /** 5272 * igc_exit_module - Driver Exit Cleanup Routine 5273 * 5274 * igc_exit_module is called just before the driver is removed 5275 * from memory. 5276 */ 5277 static void __exit igc_exit_module(void) 5278 { 5279 pci_unregister_driver(&igc_driver); 5280 } 5281 5282 module_exit(igc_exit_module); 5283 /* igc_main.c */ 5284