1d89f8841SSasha Neftin // SPDX-License-Identifier: GPL-2.0 2d89f8841SSasha Neftin /* Copyright (c) 2018 Intel Corporation */ 3d89f8841SSasha Neftin 4d89f8841SSasha Neftin #include <linux/module.h> 5d89f8841SSasha Neftin #include <linux/types.h> 6c9a11c23SSasha Neftin #include <linux/if_vlan.h> 7d3ae3cfbSSasha Neftin #include <linux/tcp.h> 8d3ae3cfbSSasha Neftin #include <linux/udp.h> 9d3ae3cfbSSasha Neftin #include <linux/ip.h> 109513d2a5SSasha Neftin #include <linux/pm_runtime.h> 11ec50a9d4SVinicius Costa Gomes #include <net/pkt_sched.h> 1226575105SAndre Guedes #include <linux/bpf_trace.h> 13fc9df2a0SAndre Guedes #include <net/xdp_sock_drv.h> 141b5d73fbSVinicius Costa Gomes #include <linux/pci.h> 151b5d73fbSVinicius Costa Gomes 16d3ae3cfbSSasha Neftin #include <net/ipv6.h> 17d89f8841SSasha Neftin 18d89f8841SSasha Neftin #include "igc.h" 19d89f8841SSasha Neftin #include "igc_hw.h" 20ec50a9d4SVinicius Costa Gomes #include "igc_tsn.h" 2126575105SAndre Guedes #include "igc_xdp.h" 22d89f8841SSasha Neftin 23d89f8841SSasha Neftin #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" 24d89f8841SSasha Neftin 258c5ad0daSSasha Neftin #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 268c5ad0daSSasha Neftin 2726575105SAndre Guedes #define IGC_XDP_PASS 0 2826575105SAndre Guedes #define IGC_XDP_CONSUMED BIT(0) 2973f1071cSAndre Guedes #define IGC_XDP_TX BIT(1) 304ff32036SAndre Guedes #define IGC_XDP_REDIRECT BIT(2) 3126575105SAndre Guedes 32c9a11c23SSasha Neftin static int debug = -1; 33c9a11c23SSasha Neftin 34d89f8841SSasha Neftin MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 35d89f8841SSasha Neftin MODULE_DESCRIPTION(DRV_SUMMARY); 36d89f8841SSasha Neftin MODULE_LICENSE("GPL v2"); 37c9a11c23SSasha Neftin module_param(debug, int, 0); 38c9a11c23SSasha Neftin MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 39d89f8841SSasha Neftin 40d89f8841SSasha Neftin char igc_driver_name[] = "igc"; 41d89f8841SSasha Neftin static const char igc_driver_string[] = DRV_SUMMARY; 42d89f8841SSasha Neftin static const char igc_copyright[] = 43d89f8841SSasha Neftin "Copyright(c) 2018 Intel Corporation."; 44d89f8841SSasha Neftin 45ab405612SSasha Neftin static const struct igc_info *igc_info_tbl[] = { 46ab405612SSasha Neftin [board_base] = &igc_base_info, 47ab405612SSasha Neftin }; 48ab405612SSasha Neftin 49d89f8841SSasha Neftin static const struct pci_device_id igc_pci_tbl[] = { 50ab405612SSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, 51ab405612SSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, 526d37a382SSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, 536d37a382SSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, 546d37a382SSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, 55c2a3f8feSSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, 56bfa5e98cSSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, 57c2a3f8feSSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, 588f20571dSSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base }, 59c2a3f8feSSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, 6043546211SSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, 6143546211SSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, 6243546211SSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, 6343546211SSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, 6443546211SSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, 650e7d4b93SSasha Neftin { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, 66d89f8841SSasha Neftin /* required last entry */ 67d89f8841SSasha Neftin {0, } 68d89f8841SSasha Neftin }; 69d89f8841SSasha Neftin 70d89f8841SSasha Neftin MODULE_DEVICE_TABLE(pci, igc_pci_tbl); 71d89f8841SSasha Neftin 723df25e4cSSasha Neftin enum latency_range { 733df25e4cSSasha Neftin lowest_latency = 0, 743df25e4cSSasha Neftin low_latency = 1, 753df25e4cSSasha Neftin bulk_latency = 2, 763df25e4cSSasha Neftin latency_invalid = 255 773df25e4cSSasha Neftin }; 78c9a11c23SSasha Neftin 798c5ad0daSSasha Neftin void igc_reset(struct igc_adapter *adapter) 80c9a11c23SSasha Neftin { 8125f06effSAndre Guedes struct net_device *dev = adapter->netdev; 82c0071c7aSSasha Neftin struct igc_hw *hw = &adapter->hw; 830373ad4dSSasha Neftin struct igc_fc_info *fc = &hw->fc; 840373ad4dSSasha Neftin u32 pba, hwm; 850373ad4dSSasha Neftin 860373ad4dSSasha Neftin /* Repartition PBA for greater than 9k MTU if required */ 870373ad4dSSasha Neftin pba = IGC_PBA_34K; 880373ad4dSSasha Neftin 890373ad4dSSasha Neftin /* flow control settings 900373ad4dSSasha Neftin * The high water mark must be low enough to fit one full frame 910373ad4dSSasha Neftin * after transmitting the pause frame. As such we must have enough 920373ad4dSSasha Neftin * space to allow for us to complete our current transmit and then 930373ad4dSSasha Neftin * receive the frame that is in progress from the link partner. 940373ad4dSSasha Neftin * Set it to: 950373ad4dSSasha Neftin * - the full Rx FIFO size minus one full Tx plus one full Rx frame 960373ad4dSSasha Neftin */ 970373ad4dSSasha Neftin hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); 980373ad4dSSasha Neftin 990373ad4dSSasha Neftin fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ 1000373ad4dSSasha Neftin fc->low_water = fc->high_water - 16; 1010373ad4dSSasha Neftin fc->pause_time = 0xFFFF; 1020373ad4dSSasha Neftin fc->send_xon = 1; 1030373ad4dSSasha Neftin fc->current_mode = fc->requested_mode; 104c0071c7aSSasha Neftin 105c0071c7aSSasha Neftin hw->mac.ops.reset_hw(hw); 106c0071c7aSSasha Neftin 107c0071c7aSSasha Neftin if (hw->mac.ops.init_hw(hw)) 10825f06effSAndre Guedes netdev_err(dev, "Error on hardware initialization\n"); 109c0071c7aSSasha Neftin 11093ec439aSSasha Neftin /* Re-establish EEE setting */ 11193ec439aSSasha Neftin igc_set_eee_i225(hw, true, true, true); 11293ec439aSSasha Neftin 113c9a11c23SSasha Neftin if (!netif_running(adapter->netdev)) 114a0beb3c1SSasha Neftin igc_power_down_phy_copper_base(&adapter->hw); 1155586838fSSasha Neftin 1168d744963SMuhammad Husaini Zulkifli /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ 1178d744963SMuhammad Husaini Zulkifli wr32(IGC_VET, ETH_P_8021Q); 1188d744963SMuhammad Husaini Zulkifli 1195f295805SVinicius Costa Gomes /* Re-enable PTP, where applicable. */ 1205f295805SVinicius Costa Gomes igc_ptp_reset(adapter); 1215f295805SVinicius Costa Gomes 122ec50a9d4SVinicius Costa Gomes /* Re-enable TSN offloading, where applicable. */ 12361572d5fSVinicius Costa Gomes igc_tsn_reset(adapter); 124ec50a9d4SVinicius Costa Gomes 1255586838fSSasha Neftin igc_get_phy_info(hw); 126c9a11c23SSasha Neftin } 127c9a11c23SSasha Neftin 128c9a11c23SSasha Neftin /** 129684ea87cSSasha Neftin * igc_power_up_link - Power up the phy link 130c9a11c23SSasha Neftin * @adapter: address of board private structure 131c9a11c23SSasha Neftin */ 132c9a11c23SSasha Neftin static void igc_power_up_link(struct igc_adapter *adapter) 133c9a11c23SSasha Neftin { 1345586838fSSasha Neftin igc_reset_phy(&adapter->hw); 1355586838fSSasha Neftin 1365586838fSSasha Neftin igc_power_up_phy_copper(&adapter->hw); 1375586838fSSasha Neftin 1385586838fSSasha Neftin igc_setup_link(&adapter->hw); 139c9a11c23SSasha Neftin } 140c9a11c23SSasha Neftin 141c9a11c23SSasha Neftin /** 142c9a11c23SSasha Neftin * igc_release_hw_control - release control of the h/w to f/w 143c9a11c23SSasha Neftin * @adapter: address of board private structure 144c9a11c23SSasha Neftin * 145c9a11c23SSasha Neftin * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. 146c9a11c23SSasha Neftin * For ASF and Pass Through versions of f/w this means that the 147c9a11c23SSasha Neftin * driver is no longer loaded. 148c9a11c23SSasha Neftin */ 149c9a11c23SSasha Neftin static void igc_release_hw_control(struct igc_adapter *adapter) 150c9a11c23SSasha Neftin { 151c9a11c23SSasha Neftin struct igc_hw *hw = &adapter->hw; 152c9a11c23SSasha Neftin u32 ctrl_ext; 153c9a11c23SSasha Neftin 1544b799595SAaron Ma if (!pci_device_is_present(adapter->pdev)) 1554b799595SAaron Ma return; 1564b799595SAaron Ma 157c9a11c23SSasha Neftin /* Let firmware take over control of h/w */ 158c9a11c23SSasha Neftin ctrl_ext = rd32(IGC_CTRL_EXT); 159c9a11c23SSasha Neftin wr32(IGC_CTRL_EXT, 160c9a11c23SSasha Neftin ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 161c9a11c23SSasha Neftin } 162c9a11c23SSasha Neftin 163c9a11c23SSasha Neftin /** 164c9a11c23SSasha Neftin * igc_get_hw_control - get control of the h/w from f/w 165c9a11c23SSasha Neftin * @adapter: address of board private structure 166c9a11c23SSasha Neftin * 167c9a11c23SSasha Neftin * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. 168c9a11c23SSasha Neftin * For ASF and Pass Through versions of f/w this means that 169c9a11c23SSasha Neftin * the driver is loaded. 170c9a11c23SSasha Neftin */ 171c9a11c23SSasha Neftin static void igc_get_hw_control(struct igc_adapter *adapter) 172c9a11c23SSasha Neftin { 173c9a11c23SSasha Neftin struct igc_hw *hw = &adapter->hw; 174c9a11c23SSasha Neftin u32 ctrl_ext; 175c9a11c23SSasha Neftin 176c9a11c23SSasha Neftin /* Let firmware know the driver has taken over */ 177c9a11c23SSasha Neftin ctrl_ext = rd32(IGC_CTRL_EXT); 178c9a11c23SSasha Neftin wr32(IGC_CTRL_EXT, 179c9a11c23SSasha Neftin ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 180c9a11c23SSasha Neftin } 181c9a11c23SSasha Neftin 18261234295SAndre Guedes static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) 18361234295SAndre Guedes { 18461234295SAndre Guedes dma_unmap_single(dev, dma_unmap_addr(buf, dma), 18561234295SAndre Guedes dma_unmap_len(buf, len), DMA_TO_DEVICE); 18661234295SAndre Guedes 18761234295SAndre Guedes dma_unmap_len_set(buf, len, 0); 18861234295SAndre Guedes } 18961234295SAndre Guedes 190c9a11c23SSasha Neftin /** 19113b5b7fdSSasha Neftin * igc_clean_tx_ring - Free Tx Buffers 19213b5b7fdSSasha Neftin * @tx_ring: ring to be cleaned 19313b5b7fdSSasha Neftin */ 19413b5b7fdSSasha Neftin static void igc_clean_tx_ring(struct igc_ring *tx_ring) 19513b5b7fdSSasha Neftin { 19613b5b7fdSSasha Neftin u16 i = tx_ring->next_to_clean; 19713b5b7fdSSasha Neftin struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 1989acf59a7SAndre Guedes u32 xsk_frames = 0; 19913b5b7fdSSasha Neftin 20013b5b7fdSSasha Neftin while (i != tx_ring->next_to_use) { 20113b5b7fdSSasha Neftin union igc_adv_tx_desc *eop_desc, *tx_desc; 20213b5b7fdSSasha Neftin 203859b4dfaSAndre Guedes switch (tx_buffer->type) { 2049acf59a7SAndre Guedes case IGC_TX_BUFFER_TYPE_XSK: 2059acf59a7SAndre Guedes xsk_frames++; 2069acf59a7SAndre Guedes break; 207859b4dfaSAndre Guedes case IGC_TX_BUFFER_TYPE_XDP: 20873f1071cSAndre Guedes xdp_return_frame(tx_buffer->xdpf); 2099acf59a7SAndre Guedes igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 210859b4dfaSAndre Guedes break; 211859b4dfaSAndre Guedes case IGC_TX_BUFFER_TYPE_SKB: 21213b5b7fdSSasha Neftin dev_kfree_skb_any(tx_buffer->skb); 2139acf59a7SAndre Guedes igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 214859b4dfaSAndre Guedes break; 215859b4dfaSAndre Guedes default: 216859b4dfaSAndre Guedes netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 217859b4dfaSAndre Guedes break; 218859b4dfaSAndre Guedes } 21913b5b7fdSSasha Neftin 22013b5b7fdSSasha Neftin /* check for eop_desc to determine the end of the packet */ 22113b5b7fdSSasha Neftin eop_desc = tx_buffer->next_to_watch; 22213b5b7fdSSasha Neftin tx_desc = IGC_TX_DESC(tx_ring, i); 22313b5b7fdSSasha Neftin 22413b5b7fdSSasha Neftin /* unmap remaining buffers */ 22513b5b7fdSSasha Neftin while (tx_desc != eop_desc) { 22613b5b7fdSSasha Neftin tx_buffer++; 22713b5b7fdSSasha Neftin tx_desc++; 22813b5b7fdSSasha Neftin i++; 22913b5b7fdSSasha Neftin if (unlikely(i == tx_ring->count)) { 23013b5b7fdSSasha Neftin i = 0; 23113b5b7fdSSasha Neftin tx_buffer = tx_ring->tx_buffer_info; 23213b5b7fdSSasha Neftin tx_desc = IGC_TX_DESC(tx_ring, 0); 23313b5b7fdSSasha Neftin } 23413b5b7fdSSasha Neftin 23513b5b7fdSSasha Neftin /* unmap any remaining paged data */ 23613b5b7fdSSasha Neftin if (dma_unmap_len(tx_buffer, len)) 23761234295SAndre Guedes igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 23813b5b7fdSSasha Neftin } 23913b5b7fdSSasha Neftin 24056ea7ed1SVinicius Costa Gomes tx_buffer->next_to_watch = NULL; 24156ea7ed1SVinicius Costa Gomes 24213b5b7fdSSasha Neftin /* move us one more past the eop_desc for start of next pkt */ 24313b5b7fdSSasha Neftin tx_buffer++; 24413b5b7fdSSasha Neftin i++; 24513b5b7fdSSasha Neftin if (unlikely(i == tx_ring->count)) { 24613b5b7fdSSasha Neftin i = 0; 24713b5b7fdSSasha Neftin tx_buffer = tx_ring->tx_buffer_info; 24813b5b7fdSSasha Neftin } 24913b5b7fdSSasha Neftin } 25013b5b7fdSSasha Neftin 2519acf59a7SAndre Guedes if (tx_ring->xsk_pool && xsk_frames) 2529acf59a7SAndre Guedes xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 2539acf59a7SAndre Guedes 25413b5b7fdSSasha Neftin /* reset BQL for queue */ 25513b5b7fdSSasha Neftin netdev_tx_reset_queue(txring_txq(tx_ring)); 25613b5b7fdSSasha Neftin 257e43516f5SMuhammad Husaini Zulkifli /* Zero out the buffer ring */ 258e43516f5SMuhammad Husaini Zulkifli memset(tx_ring->tx_buffer_info, 0, 259e43516f5SMuhammad Husaini Zulkifli sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); 260e43516f5SMuhammad Husaini Zulkifli 261e43516f5SMuhammad Husaini Zulkifli /* Zero out the descriptor ring */ 262e43516f5SMuhammad Husaini Zulkifli memset(tx_ring->desc, 0, tx_ring->size); 263e43516f5SMuhammad Husaini Zulkifli 26413b5b7fdSSasha Neftin /* reset next_to_use and next_to_clean */ 26513b5b7fdSSasha Neftin tx_ring->next_to_use = 0; 26613b5b7fdSSasha Neftin tx_ring->next_to_clean = 0; 26713b5b7fdSSasha Neftin } 26813b5b7fdSSasha Neftin 26913b5b7fdSSasha Neftin /** 27014504ac5SSasha Neftin * igc_free_tx_resources - Free Tx Resources per Queue 27114504ac5SSasha Neftin * @tx_ring: Tx descriptor ring for a specific queue 27214504ac5SSasha Neftin * 27314504ac5SSasha Neftin * Free all transmit software resources 27414504ac5SSasha Neftin */ 27514504ac5SSasha Neftin void igc_free_tx_resources(struct igc_ring *tx_ring) 27614504ac5SSasha Neftin { 277e43516f5SMuhammad Husaini Zulkifli igc_disable_tx_ring(tx_ring); 27814504ac5SSasha Neftin 27914504ac5SSasha Neftin vfree(tx_ring->tx_buffer_info); 28014504ac5SSasha Neftin tx_ring->tx_buffer_info = NULL; 28114504ac5SSasha Neftin 28214504ac5SSasha Neftin /* if not set, then don't free */ 28314504ac5SSasha Neftin if (!tx_ring->desc) 28414504ac5SSasha Neftin return; 28514504ac5SSasha Neftin 28614504ac5SSasha Neftin dma_free_coherent(tx_ring->dev, tx_ring->size, 28714504ac5SSasha Neftin tx_ring->desc, tx_ring->dma); 28814504ac5SSasha Neftin 28914504ac5SSasha Neftin tx_ring->desc = NULL; 29014504ac5SSasha Neftin } 29114504ac5SSasha Neftin 29214504ac5SSasha Neftin /** 29314504ac5SSasha Neftin * igc_free_all_tx_resources - Free Tx Resources for All Queues 29414504ac5SSasha Neftin * @adapter: board private structure 29514504ac5SSasha Neftin * 29614504ac5SSasha Neftin * Free all transmit software resources 29714504ac5SSasha Neftin */ 29814504ac5SSasha Neftin static void igc_free_all_tx_resources(struct igc_adapter *adapter) 29914504ac5SSasha Neftin { 30014504ac5SSasha Neftin int i; 30114504ac5SSasha Neftin 30214504ac5SSasha Neftin for (i = 0; i < adapter->num_tx_queues; i++) 30314504ac5SSasha Neftin igc_free_tx_resources(adapter->tx_ring[i]); 30414504ac5SSasha Neftin } 30514504ac5SSasha Neftin 30614504ac5SSasha Neftin /** 3070507ef8aSSasha Neftin * igc_clean_all_tx_rings - Free Tx Buffers for all queues 3080507ef8aSSasha Neftin * @adapter: board private structure 3090507ef8aSSasha Neftin */ 3100507ef8aSSasha Neftin static void igc_clean_all_tx_rings(struct igc_adapter *adapter) 3110507ef8aSSasha Neftin { 3120507ef8aSSasha Neftin int i; 3130507ef8aSSasha Neftin 3140507ef8aSSasha Neftin for (i = 0; i < adapter->num_tx_queues; i++) 3150507ef8aSSasha Neftin if (adapter->tx_ring[i]) 3160507ef8aSSasha Neftin igc_clean_tx_ring(adapter->tx_ring[i]); 3170507ef8aSSasha Neftin } 3180507ef8aSSasha Neftin 319d4a7ce64SMuhammad Husaini Zulkifli static void igc_disable_tx_ring_hw(struct igc_ring *ring) 320d4a7ce64SMuhammad Husaini Zulkifli { 321d4a7ce64SMuhammad Husaini Zulkifli struct igc_hw *hw = &ring->q_vector->adapter->hw; 322d4a7ce64SMuhammad Husaini Zulkifli u8 idx = ring->reg_idx; 323d4a7ce64SMuhammad Husaini Zulkifli u32 txdctl; 324d4a7ce64SMuhammad Husaini Zulkifli 325d4a7ce64SMuhammad Husaini Zulkifli txdctl = rd32(IGC_TXDCTL(idx)); 326d4a7ce64SMuhammad Husaini Zulkifli txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; 327d4a7ce64SMuhammad Husaini Zulkifli txdctl |= IGC_TXDCTL_SWFLUSH; 328d4a7ce64SMuhammad Husaini Zulkifli wr32(IGC_TXDCTL(idx), txdctl); 329d4a7ce64SMuhammad Husaini Zulkifli } 330d4a7ce64SMuhammad Husaini Zulkifli 331d4a7ce64SMuhammad Husaini Zulkifli /** 332d4a7ce64SMuhammad Husaini Zulkifli * igc_disable_all_tx_rings_hw - Disable all transmit queue operation 333d4a7ce64SMuhammad Husaini Zulkifli * @adapter: board private structure 334d4a7ce64SMuhammad Husaini Zulkifli */ 335d4a7ce64SMuhammad Husaini Zulkifli static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter) 336d4a7ce64SMuhammad Husaini Zulkifli { 337d4a7ce64SMuhammad Husaini Zulkifli int i; 338d4a7ce64SMuhammad Husaini Zulkifli 339d4a7ce64SMuhammad Husaini Zulkifli for (i = 0; i < adapter->num_tx_queues; i++) { 340d4a7ce64SMuhammad Husaini Zulkifli struct igc_ring *tx_ring = adapter->tx_ring[i]; 341d4a7ce64SMuhammad Husaini Zulkifli 342d4a7ce64SMuhammad Husaini Zulkifli igc_disable_tx_ring_hw(tx_ring); 343d4a7ce64SMuhammad Husaini Zulkifli } 344d4a7ce64SMuhammad Husaini Zulkifli } 345d4a7ce64SMuhammad Husaini Zulkifli 3460507ef8aSSasha Neftin /** 34713b5b7fdSSasha Neftin * igc_setup_tx_resources - allocate Tx resources (Descriptors) 34813b5b7fdSSasha Neftin * @tx_ring: tx descriptor ring (for a specific queue) to setup 34913b5b7fdSSasha Neftin * 35013b5b7fdSSasha Neftin * Return 0 on success, negative on failure 35113b5b7fdSSasha Neftin */ 3528c5ad0daSSasha Neftin int igc_setup_tx_resources(struct igc_ring *tx_ring) 35313b5b7fdSSasha Neftin { 35425f06effSAndre Guedes struct net_device *ndev = tx_ring->netdev; 35513b5b7fdSSasha Neftin struct device *dev = tx_ring->dev; 35613b5b7fdSSasha Neftin int size = 0; 35713b5b7fdSSasha Neftin 35813b5b7fdSSasha Neftin size = sizeof(struct igc_tx_buffer) * tx_ring->count; 35913b5b7fdSSasha Neftin tx_ring->tx_buffer_info = vzalloc(size); 36013b5b7fdSSasha Neftin if (!tx_ring->tx_buffer_info) 36113b5b7fdSSasha Neftin goto err; 36213b5b7fdSSasha Neftin 36313b5b7fdSSasha Neftin /* round up to nearest 4K */ 36413b5b7fdSSasha Neftin tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); 36513b5b7fdSSasha Neftin tx_ring->size = ALIGN(tx_ring->size, 4096); 36613b5b7fdSSasha Neftin 36713b5b7fdSSasha Neftin tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 36813b5b7fdSSasha Neftin &tx_ring->dma, GFP_KERNEL); 36913b5b7fdSSasha Neftin 37013b5b7fdSSasha Neftin if (!tx_ring->desc) 37113b5b7fdSSasha Neftin goto err; 37213b5b7fdSSasha Neftin 37313b5b7fdSSasha Neftin tx_ring->next_to_use = 0; 37413b5b7fdSSasha Neftin tx_ring->next_to_clean = 0; 37513b5b7fdSSasha Neftin 37613b5b7fdSSasha Neftin return 0; 37713b5b7fdSSasha Neftin 37813b5b7fdSSasha Neftin err: 37913b5b7fdSSasha Neftin vfree(tx_ring->tx_buffer_info); 38025f06effSAndre Guedes netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); 38113b5b7fdSSasha Neftin return -ENOMEM; 38213b5b7fdSSasha Neftin } 38313b5b7fdSSasha Neftin 38413b5b7fdSSasha Neftin /** 38513b5b7fdSSasha Neftin * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues 38613b5b7fdSSasha Neftin * @adapter: board private structure 38713b5b7fdSSasha Neftin * 38813b5b7fdSSasha Neftin * Return 0 on success, negative on failure 38913b5b7fdSSasha Neftin */ 39013b5b7fdSSasha Neftin static int igc_setup_all_tx_resources(struct igc_adapter *adapter) 39113b5b7fdSSasha Neftin { 39225f06effSAndre Guedes struct net_device *dev = adapter->netdev; 39313b5b7fdSSasha Neftin int i, err = 0; 39413b5b7fdSSasha Neftin 39513b5b7fdSSasha Neftin for (i = 0; i < adapter->num_tx_queues; i++) { 39613b5b7fdSSasha Neftin err = igc_setup_tx_resources(adapter->tx_ring[i]); 39713b5b7fdSSasha Neftin if (err) { 39825f06effSAndre Guedes netdev_err(dev, "Error on Tx queue %u setup\n", i); 39913b5b7fdSSasha Neftin for (i--; i >= 0; i--) 40013b5b7fdSSasha Neftin igc_free_tx_resources(adapter->tx_ring[i]); 40113b5b7fdSSasha Neftin break; 40213b5b7fdSSasha Neftin } 40313b5b7fdSSasha Neftin } 40413b5b7fdSSasha Neftin 40513b5b7fdSSasha Neftin return err; 40613b5b7fdSSasha Neftin } 40713b5b7fdSSasha Neftin 408f4851648SAndre Guedes static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) 40913b5b7fdSSasha Neftin { 41013b5b7fdSSasha Neftin u16 i = rx_ring->next_to_clean; 41113b5b7fdSSasha Neftin 41213b5b7fdSSasha Neftin dev_kfree_skb(rx_ring->skb); 41313b5b7fdSSasha Neftin rx_ring->skb = NULL; 41413b5b7fdSSasha Neftin 41513b5b7fdSSasha Neftin /* Free all the Rx ring sk_buffs */ 41613b5b7fdSSasha Neftin while (i != rx_ring->next_to_alloc) { 41713b5b7fdSSasha Neftin struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 41813b5b7fdSSasha Neftin 41913b5b7fdSSasha Neftin /* Invalidate cache lines that may have been written to by 42013b5b7fdSSasha Neftin * device so that we avoid corrupting memory. 42113b5b7fdSSasha Neftin */ 42213b5b7fdSSasha Neftin dma_sync_single_range_for_cpu(rx_ring->dev, 42313b5b7fdSSasha Neftin buffer_info->dma, 42413b5b7fdSSasha Neftin buffer_info->page_offset, 42513b5b7fdSSasha Neftin igc_rx_bufsz(rx_ring), 42613b5b7fdSSasha Neftin DMA_FROM_DEVICE); 42713b5b7fdSSasha Neftin 42813b5b7fdSSasha Neftin /* free resources associated with mapping */ 42913b5b7fdSSasha Neftin dma_unmap_page_attrs(rx_ring->dev, 43013b5b7fdSSasha Neftin buffer_info->dma, 43113b5b7fdSSasha Neftin igc_rx_pg_size(rx_ring), 43213b5b7fdSSasha Neftin DMA_FROM_DEVICE, 43313b5b7fdSSasha Neftin IGC_RX_DMA_ATTR); 43413b5b7fdSSasha Neftin __page_frag_cache_drain(buffer_info->page, 43513b5b7fdSSasha Neftin buffer_info->pagecnt_bias); 43613b5b7fdSSasha Neftin 43713b5b7fdSSasha Neftin i++; 43813b5b7fdSSasha Neftin if (i == rx_ring->count) 43913b5b7fdSSasha Neftin i = 0; 44013b5b7fdSSasha Neftin } 441f4851648SAndre Guedes } 44213b5b7fdSSasha Neftin 443fc9df2a0SAndre Guedes static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) 444fc9df2a0SAndre Guedes { 445fc9df2a0SAndre Guedes struct igc_rx_buffer *bi; 446fc9df2a0SAndre Guedes u16 i; 447fc9df2a0SAndre Guedes 448fc9df2a0SAndre Guedes for (i = 0; i < ring->count; i++) { 449fc9df2a0SAndre Guedes bi = &ring->rx_buffer_info[i]; 450fc9df2a0SAndre Guedes if (!bi->xdp) 451fc9df2a0SAndre Guedes continue; 452fc9df2a0SAndre Guedes 453fc9df2a0SAndre Guedes xsk_buff_free(bi->xdp); 454fc9df2a0SAndre Guedes bi->xdp = NULL; 455fc9df2a0SAndre Guedes } 456fc9df2a0SAndre Guedes } 457fc9df2a0SAndre Guedes 458f4851648SAndre Guedes /** 459f4851648SAndre Guedes * igc_clean_rx_ring - Free Rx Buffers per Queue 460f4851648SAndre Guedes * @ring: ring to free buffers from 461f4851648SAndre Guedes */ 462f4851648SAndre Guedes static void igc_clean_rx_ring(struct igc_ring *ring) 463f4851648SAndre Guedes { 464fc9df2a0SAndre Guedes if (ring->xsk_pool) 465fc9df2a0SAndre Guedes igc_clean_rx_ring_xsk_pool(ring); 466fc9df2a0SAndre Guedes else 467f4851648SAndre Guedes igc_clean_rx_ring_page_shared(ring); 46826575105SAndre Guedes 469f4851648SAndre Guedes clear_ring_uses_large_buffer(ring); 470f4851648SAndre Guedes 471f4851648SAndre Guedes ring->next_to_alloc = 0; 472f4851648SAndre Guedes ring->next_to_clean = 0; 473f4851648SAndre Guedes ring->next_to_use = 0; 47413b5b7fdSSasha Neftin } 47513b5b7fdSSasha Neftin 47613b5b7fdSSasha Neftin /** 4770507ef8aSSasha Neftin * igc_clean_all_rx_rings - Free Rx Buffers for all queues 4780507ef8aSSasha Neftin * @adapter: board private structure 4790507ef8aSSasha Neftin */ 4800507ef8aSSasha Neftin static void igc_clean_all_rx_rings(struct igc_adapter *adapter) 4810507ef8aSSasha Neftin { 4820507ef8aSSasha Neftin int i; 4830507ef8aSSasha Neftin 4840507ef8aSSasha Neftin for (i = 0; i < adapter->num_rx_queues; i++) 4850507ef8aSSasha Neftin if (adapter->rx_ring[i]) 4860507ef8aSSasha Neftin igc_clean_rx_ring(adapter->rx_ring[i]); 4870507ef8aSSasha Neftin } 4880507ef8aSSasha Neftin 4890507ef8aSSasha Neftin /** 49013b5b7fdSSasha Neftin * igc_free_rx_resources - Free Rx Resources 49113b5b7fdSSasha Neftin * @rx_ring: ring to clean the resources from 49213b5b7fdSSasha Neftin * 49313b5b7fdSSasha Neftin * Free all receive software resources 49413b5b7fdSSasha Neftin */ 4958c5ad0daSSasha Neftin void igc_free_rx_resources(struct igc_ring *rx_ring) 49613b5b7fdSSasha Neftin { 49713b5b7fdSSasha Neftin igc_clean_rx_ring(rx_ring); 49813b5b7fdSSasha Neftin 4994609ffb9SAndre Guedes xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 50073f1071cSAndre Guedes 50113b5b7fdSSasha Neftin vfree(rx_ring->rx_buffer_info); 50213b5b7fdSSasha Neftin rx_ring->rx_buffer_info = NULL; 50313b5b7fdSSasha Neftin 50413b5b7fdSSasha Neftin /* if not set, then don't free */ 50513b5b7fdSSasha Neftin if (!rx_ring->desc) 50613b5b7fdSSasha Neftin return; 50713b5b7fdSSasha Neftin 50813b5b7fdSSasha Neftin dma_free_coherent(rx_ring->dev, rx_ring->size, 50913b5b7fdSSasha Neftin rx_ring->desc, rx_ring->dma); 51013b5b7fdSSasha Neftin 51113b5b7fdSSasha Neftin rx_ring->desc = NULL; 51213b5b7fdSSasha Neftin } 51313b5b7fdSSasha Neftin 51413b5b7fdSSasha Neftin /** 51513b5b7fdSSasha Neftin * igc_free_all_rx_resources - Free Rx Resources for All Queues 51613b5b7fdSSasha Neftin * @adapter: board private structure 51713b5b7fdSSasha Neftin * 51813b5b7fdSSasha Neftin * Free all receive software resources 51913b5b7fdSSasha Neftin */ 52013b5b7fdSSasha Neftin static void igc_free_all_rx_resources(struct igc_adapter *adapter) 52113b5b7fdSSasha Neftin { 52213b5b7fdSSasha Neftin int i; 52313b5b7fdSSasha Neftin 52413b5b7fdSSasha Neftin for (i = 0; i < adapter->num_rx_queues; i++) 52513b5b7fdSSasha Neftin igc_free_rx_resources(adapter->rx_ring[i]); 52613b5b7fdSSasha Neftin } 52713b5b7fdSSasha Neftin 52813b5b7fdSSasha Neftin /** 52913b5b7fdSSasha Neftin * igc_setup_rx_resources - allocate Rx resources (Descriptors) 53013b5b7fdSSasha Neftin * @rx_ring: rx descriptor ring (for a specific queue) to setup 53113b5b7fdSSasha Neftin * 53213b5b7fdSSasha Neftin * Returns 0 on success, negative on failure 53313b5b7fdSSasha Neftin */ 5348c5ad0daSSasha Neftin int igc_setup_rx_resources(struct igc_ring *rx_ring) 53513b5b7fdSSasha Neftin { 53625f06effSAndre Guedes struct net_device *ndev = rx_ring->netdev; 53713b5b7fdSSasha Neftin struct device *dev = rx_ring->dev; 5384609ffb9SAndre Guedes u8 index = rx_ring->queue_index; 53973f1071cSAndre Guedes int size, desc_len, res; 54073f1071cSAndre Guedes 541453307b5SCorinna Vinschen /* XDP RX-queue info */ 542453307b5SCorinna Vinschen if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 543453307b5SCorinna Vinschen xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 5444609ffb9SAndre Guedes res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, 5454609ffb9SAndre Guedes rx_ring->q_vector->napi.napi_id); 5464609ffb9SAndre Guedes if (res < 0) { 5474609ffb9SAndre Guedes netdev_err(ndev, "Failed to register xdp_rxq index %u\n", 5484609ffb9SAndre Guedes index); 54973f1071cSAndre Guedes return res; 5504609ffb9SAndre Guedes } 55113b5b7fdSSasha Neftin 55213b5b7fdSSasha Neftin size = sizeof(struct igc_rx_buffer) * rx_ring->count; 55313b5b7fdSSasha Neftin rx_ring->rx_buffer_info = vzalloc(size); 55413b5b7fdSSasha Neftin if (!rx_ring->rx_buffer_info) 55513b5b7fdSSasha Neftin goto err; 55613b5b7fdSSasha Neftin 55713b5b7fdSSasha Neftin desc_len = sizeof(union igc_adv_rx_desc); 55813b5b7fdSSasha Neftin 55913b5b7fdSSasha Neftin /* Round up to nearest 4K */ 56013b5b7fdSSasha Neftin rx_ring->size = rx_ring->count * desc_len; 56113b5b7fdSSasha Neftin rx_ring->size = ALIGN(rx_ring->size, 4096); 56213b5b7fdSSasha Neftin 56313b5b7fdSSasha Neftin rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 56413b5b7fdSSasha Neftin &rx_ring->dma, GFP_KERNEL); 56513b5b7fdSSasha Neftin 56613b5b7fdSSasha Neftin if (!rx_ring->desc) 56713b5b7fdSSasha Neftin goto err; 56813b5b7fdSSasha Neftin 56913b5b7fdSSasha Neftin rx_ring->next_to_alloc = 0; 57013b5b7fdSSasha Neftin rx_ring->next_to_clean = 0; 57113b5b7fdSSasha Neftin rx_ring->next_to_use = 0; 57213b5b7fdSSasha Neftin 57313b5b7fdSSasha Neftin return 0; 57413b5b7fdSSasha Neftin 57513b5b7fdSSasha Neftin err: 5764609ffb9SAndre Guedes xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 57713b5b7fdSSasha Neftin vfree(rx_ring->rx_buffer_info); 57813b5b7fdSSasha Neftin rx_ring->rx_buffer_info = NULL; 57925f06effSAndre Guedes netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); 58013b5b7fdSSasha Neftin return -ENOMEM; 58113b5b7fdSSasha Neftin } 58213b5b7fdSSasha Neftin 58313b5b7fdSSasha Neftin /** 58413b5b7fdSSasha Neftin * igc_setup_all_rx_resources - wrapper to allocate Rx resources 58513b5b7fdSSasha Neftin * (Descriptors) for all queues 58613b5b7fdSSasha Neftin * @adapter: board private structure 58713b5b7fdSSasha Neftin * 58813b5b7fdSSasha Neftin * Return 0 on success, negative on failure 58913b5b7fdSSasha Neftin */ 59013b5b7fdSSasha Neftin static int igc_setup_all_rx_resources(struct igc_adapter *adapter) 59113b5b7fdSSasha Neftin { 59225f06effSAndre Guedes struct net_device *dev = adapter->netdev; 59313b5b7fdSSasha Neftin int i, err = 0; 59413b5b7fdSSasha Neftin 59513b5b7fdSSasha Neftin for (i = 0; i < adapter->num_rx_queues; i++) { 59613b5b7fdSSasha Neftin err = igc_setup_rx_resources(adapter->rx_ring[i]); 59713b5b7fdSSasha Neftin if (err) { 59825f06effSAndre Guedes netdev_err(dev, "Error on Rx queue %u setup\n", i); 59913b5b7fdSSasha Neftin for (i--; i >= 0; i--) 60013b5b7fdSSasha Neftin igc_free_rx_resources(adapter->rx_ring[i]); 60113b5b7fdSSasha Neftin break; 60213b5b7fdSSasha Neftin } 60313b5b7fdSSasha Neftin } 60413b5b7fdSSasha Neftin 60513b5b7fdSSasha Neftin return err; 60613b5b7fdSSasha Neftin } 60713b5b7fdSSasha Neftin 608fc9df2a0SAndre Guedes static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, 609fc9df2a0SAndre Guedes struct igc_ring *ring) 610fc9df2a0SAndre Guedes { 611fc9df2a0SAndre Guedes if (!igc_xdp_is_enabled(adapter) || 612fc9df2a0SAndre Guedes !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) 613fc9df2a0SAndre Guedes return NULL; 614fc9df2a0SAndre Guedes 615fc9df2a0SAndre Guedes return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); 616fc9df2a0SAndre Guedes } 617fc9df2a0SAndre Guedes 61813b5b7fdSSasha Neftin /** 61913b5b7fdSSasha Neftin * igc_configure_rx_ring - Configure a receive ring after Reset 62013b5b7fdSSasha Neftin * @adapter: board private structure 62113b5b7fdSSasha Neftin * @ring: receive ring to be configured 62213b5b7fdSSasha Neftin * 62313b5b7fdSSasha Neftin * Configure the Rx unit of the MAC after a reset. 62413b5b7fdSSasha Neftin */ 62513b5b7fdSSasha Neftin static void igc_configure_rx_ring(struct igc_adapter *adapter, 62613b5b7fdSSasha Neftin struct igc_ring *ring) 62713b5b7fdSSasha Neftin { 62813b5b7fdSSasha Neftin struct igc_hw *hw = &adapter->hw; 62913b5b7fdSSasha Neftin union igc_adv_rx_desc *rx_desc; 63013b5b7fdSSasha Neftin int reg_idx = ring->reg_idx; 63113b5b7fdSSasha Neftin u32 srrctl = 0, rxdctl = 0; 63213b5b7fdSSasha Neftin u64 rdba = ring->dma; 633fc9df2a0SAndre Guedes u32 buf_size; 63413b5b7fdSSasha Neftin 635fc9df2a0SAndre Guedes xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 636fc9df2a0SAndre Guedes ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 637fc9df2a0SAndre Guedes if (ring->xsk_pool) { 6384609ffb9SAndre Guedes WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 639fc9df2a0SAndre Guedes MEM_TYPE_XSK_BUFF_POOL, 640fc9df2a0SAndre Guedes NULL)); 641fc9df2a0SAndre Guedes xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); 642fc9df2a0SAndre Guedes } else { 643fc9df2a0SAndre Guedes WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 644fc9df2a0SAndre Guedes MEM_TYPE_PAGE_SHARED, 645fc9df2a0SAndre Guedes NULL)); 646fc9df2a0SAndre Guedes } 6474609ffb9SAndre Guedes 64826575105SAndre Guedes if (igc_xdp_is_enabled(adapter)) 64926575105SAndre Guedes set_ring_uses_large_buffer(ring); 65026575105SAndre Guedes 65113b5b7fdSSasha Neftin /* disable the queue */ 65213b5b7fdSSasha Neftin wr32(IGC_RXDCTL(reg_idx), 0); 65313b5b7fdSSasha Neftin 65413b5b7fdSSasha Neftin /* Set DMA base address registers */ 65513b5b7fdSSasha Neftin wr32(IGC_RDBAL(reg_idx), 65613b5b7fdSSasha Neftin rdba & 0x00000000ffffffffULL); 65713b5b7fdSSasha Neftin wr32(IGC_RDBAH(reg_idx), rdba >> 32); 65813b5b7fdSSasha Neftin wr32(IGC_RDLEN(reg_idx), 65913b5b7fdSSasha Neftin ring->count * sizeof(union igc_adv_rx_desc)); 66013b5b7fdSSasha Neftin 66113b5b7fdSSasha Neftin /* initialize head and tail */ 66213b5b7fdSSasha Neftin ring->tail = adapter->io_addr + IGC_RDT(reg_idx); 66313b5b7fdSSasha Neftin wr32(IGC_RDH(reg_idx), 0); 66413b5b7fdSSasha Neftin writel(0, ring->tail); 66513b5b7fdSSasha Neftin 66613b5b7fdSSasha Neftin /* reset next-to- use/clean to place SW in sync with hardware */ 66713b5b7fdSSasha Neftin ring->next_to_clean = 0; 66813b5b7fdSSasha Neftin ring->next_to_use = 0; 66913b5b7fdSSasha Neftin 670fc9df2a0SAndre Guedes if (ring->xsk_pool) 671fc9df2a0SAndre Guedes buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); 672fc9df2a0SAndre Guedes else if (ring_uses_large_buffer(ring)) 673fc9df2a0SAndre Guedes buf_size = IGC_RXBUFFER_3072; 67413b5b7fdSSasha Neftin else 675fc9df2a0SAndre Guedes buf_size = IGC_RXBUFFER_2048; 676fc9df2a0SAndre Guedes 6773ce29c17SSong Yoong Siang srrctl = rd32(IGC_SRRCTL(reg_idx)); 6783ce29c17SSong Yoong Siang srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK | 6793ce29c17SSong Yoong Siang IGC_SRRCTL_DESCTYPE_MASK); 6803ce29c17SSong Yoong Siang srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN); 6813ce29c17SSong Yoong Siang srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size); 68213b5b7fdSSasha Neftin srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 68313b5b7fdSSasha Neftin 68413b5b7fdSSasha Neftin wr32(IGC_SRRCTL(reg_idx), srrctl); 68513b5b7fdSSasha Neftin 68613b5b7fdSSasha Neftin rxdctl |= IGC_RX_PTHRESH; 68713b5b7fdSSasha Neftin rxdctl |= IGC_RX_HTHRESH << 8; 68813b5b7fdSSasha Neftin rxdctl |= IGC_RX_WTHRESH << 16; 68913b5b7fdSSasha Neftin 69013b5b7fdSSasha Neftin /* initialize rx_buffer_info */ 69113b5b7fdSSasha Neftin memset(ring->rx_buffer_info, 0, 69213b5b7fdSSasha Neftin sizeof(struct igc_rx_buffer) * ring->count); 69313b5b7fdSSasha Neftin 69413b5b7fdSSasha Neftin /* initialize Rx descriptor 0 */ 69513b5b7fdSSasha Neftin rx_desc = IGC_RX_DESC(ring, 0); 69613b5b7fdSSasha Neftin rx_desc->wb.upper.length = 0; 69713b5b7fdSSasha Neftin 69813b5b7fdSSasha Neftin /* enable receive descriptor fetching */ 69913b5b7fdSSasha Neftin rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 70013b5b7fdSSasha Neftin 70113b5b7fdSSasha Neftin wr32(IGC_RXDCTL(reg_idx), rxdctl); 70213b5b7fdSSasha Neftin } 70313b5b7fdSSasha Neftin 70413b5b7fdSSasha Neftin /** 70513b5b7fdSSasha Neftin * igc_configure_rx - Configure receive Unit after Reset 70613b5b7fdSSasha Neftin * @adapter: board private structure 70713b5b7fdSSasha Neftin * 70813b5b7fdSSasha Neftin * Configure the Rx unit of the MAC after a reset. 70913b5b7fdSSasha Neftin */ 71013b5b7fdSSasha Neftin static void igc_configure_rx(struct igc_adapter *adapter) 71113b5b7fdSSasha Neftin { 71213b5b7fdSSasha Neftin int i; 71313b5b7fdSSasha Neftin 71413b5b7fdSSasha Neftin /* Setup the HW Rx Head and Tail Descriptor Pointers and 71513b5b7fdSSasha Neftin * the Base and Length of the Rx Descriptor Ring 71613b5b7fdSSasha Neftin */ 71713b5b7fdSSasha Neftin for (i = 0; i < adapter->num_rx_queues; i++) 71813b5b7fdSSasha Neftin igc_configure_rx_ring(adapter, adapter->rx_ring[i]); 71913b5b7fdSSasha Neftin } 72013b5b7fdSSasha Neftin 72113b5b7fdSSasha Neftin /** 72213b5b7fdSSasha Neftin * igc_configure_tx_ring - Configure transmit ring after Reset 72313b5b7fdSSasha Neftin * @adapter: board private structure 72413b5b7fdSSasha Neftin * @ring: tx ring to configure 72513b5b7fdSSasha Neftin * 72613b5b7fdSSasha Neftin * Configure a transmit ring after a reset. 72713b5b7fdSSasha Neftin */ 72813b5b7fdSSasha Neftin static void igc_configure_tx_ring(struct igc_adapter *adapter, 72913b5b7fdSSasha Neftin struct igc_ring *ring) 73013b5b7fdSSasha Neftin { 73113b5b7fdSSasha Neftin struct igc_hw *hw = &adapter->hw; 73213b5b7fdSSasha Neftin int reg_idx = ring->reg_idx; 73313b5b7fdSSasha Neftin u64 tdba = ring->dma; 73413b5b7fdSSasha Neftin u32 txdctl = 0; 73513b5b7fdSSasha Neftin 7369acf59a7SAndre Guedes ring->xsk_pool = igc_get_xsk_pool(adapter, ring); 7379acf59a7SAndre Guedes 73813b5b7fdSSasha Neftin /* disable the queue */ 73913b5b7fdSSasha Neftin wr32(IGC_TXDCTL(reg_idx), 0); 74013b5b7fdSSasha Neftin wrfl(); 74113b5b7fdSSasha Neftin 74213b5b7fdSSasha Neftin wr32(IGC_TDLEN(reg_idx), 74313b5b7fdSSasha Neftin ring->count * sizeof(union igc_adv_tx_desc)); 74413b5b7fdSSasha Neftin wr32(IGC_TDBAL(reg_idx), 74513b5b7fdSSasha Neftin tdba & 0x00000000ffffffffULL); 74613b5b7fdSSasha Neftin wr32(IGC_TDBAH(reg_idx), tdba >> 32); 74713b5b7fdSSasha Neftin 74813b5b7fdSSasha Neftin ring->tail = adapter->io_addr + IGC_TDT(reg_idx); 74913b5b7fdSSasha Neftin wr32(IGC_TDH(reg_idx), 0); 75013b5b7fdSSasha Neftin writel(0, ring->tail); 75113b5b7fdSSasha Neftin 75213b5b7fdSSasha Neftin txdctl |= IGC_TX_PTHRESH; 75313b5b7fdSSasha Neftin txdctl |= IGC_TX_HTHRESH << 8; 75413b5b7fdSSasha Neftin txdctl |= IGC_TX_WTHRESH << 16; 75513b5b7fdSSasha Neftin 75613b5b7fdSSasha Neftin txdctl |= IGC_TXDCTL_QUEUE_ENABLE; 75713b5b7fdSSasha Neftin wr32(IGC_TXDCTL(reg_idx), txdctl); 75813b5b7fdSSasha Neftin } 75913b5b7fdSSasha Neftin 76013b5b7fdSSasha Neftin /** 76113b5b7fdSSasha Neftin * igc_configure_tx - Configure transmit Unit after Reset 76213b5b7fdSSasha Neftin * @adapter: board private structure 76313b5b7fdSSasha Neftin * 76413b5b7fdSSasha Neftin * Configure the Tx unit of the MAC after a reset. 76513b5b7fdSSasha Neftin */ 76613b5b7fdSSasha Neftin static void igc_configure_tx(struct igc_adapter *adapter) 76713b5b7fdSSasha Neftin { 76813b5b7fdSSasha Neftin int i; 76913b5b7fdSSasha Neftin 77013b5b7fdSSasha Neftin for (i = 0; i < adapter->num_tx_queues; i++) 77113b5b7fdSSasha Neftin igc_configure_tx_ring(adapter, adapter->tx_ring[i]); 77213b5b7fdSSasha Neftin } 77313b5b7fdSSasha Neftin 77413b5b7fdSSasha Neftin /** 77513b5b7fdSSasha Neftin * igc_setup_mrqc - configure the multiple receive queue control registers 77613b5b7fdSSasha Neftin * @adapter: Board private structure 77713b5b7fdSSasha Neftin */ 77813b5b7fdSSasha Neftin static void igc_setup_mrqc(struct igc_adapter *adapter) 77913b5b7fdSSasha Neftin { 7802121c271SSasha Neftin struct igc_hw *hw = &adapter->hw; 7812121c271SSasha Neftin u32 j, num_rx_queues; 7822121c271SSasha Neftin u32 mrqc, rxcsum; 7832121c271SSasha Neftin u32 rss_key[10]; 7842121c271SSasha Neftin 7852121c271SSasha Neftin netdev_rss_key_fill(rss_key, sizeof(rss_key)); 7862121c271SSasha Neftin for (j = 0; j < 10; j++) 7872121c271SSasha Neftin wr32(IGC_RSSRK(j), rss_key[j]); 7882121c271SSasha Neftin 7892121c271SSasha Neftin num_rx_queues = adapter->rss_queues; 7902121c271SSasha Neftin 7912121c271SSasha Neftin if (adapter->rss_indir_tbl_init != num_rx_queues) { 7922121c271SSasha Neftin for (j = 0; j < IGC_RETA_SIZE; j++) 7932121c271SSasha Neftin adapter->rss_indir_tbl[j] = 7942121c271SSasha Neftin (j * num_rx_queues) / IGC_RETA_SIZE; 7952121c271SSasha Neftin adapter->rss_indir_tbl_init = num_rx_queues; 7962121c271SSasha Neftin } 7972121c271SSasha Neftin igc_write_rss_indir_tbl(adapter); 7982121c271SSasha Neftin 7992121c271SSasha Neftin /* Disable raw packet checksumming so that RSS hash is placed in 8002121c271SSasha Neftin * descriptor on writeback. No need to enable TCP/UDP/IP checksum 8012121c271SSasha Neftin * offloads as they are enabled by default 8022121c271SSasha Neftin */ 8032121c271SSasha Neftin rxcsum = rd32(IGC_RXCSUM); 8042121c271SSasha Neftin rxcsum |= IGC_RXCSUM_PCSD; 8052121c271SSasha Neftin 8062121c271SSasha Neftin /* Enable Receive Checksum Offload for SCTP */ 8072121c271SSasha Neftin rxcsum |= IGC_RXCSUM_CRCOFL; 8082121c271SSasha Neftin 8092121c271SSasha Neftin /* Don't need to set TUOFL or IPOFL, they default to 1 */ 8102121c271SSasha Neftin wr32(IGC_RXCSUM, rxcsum); 8112121c271SSasha Neftin 8122121c271SSasha Neftin /* Generate RSS hash based on packet types, TCP/UDP 8132121c271SSasha Neftin * port numbers and/or IPv4/v6 src and dst addresses 8142121c271SSasha Neftin */ 8152121c271SSasha Neftin mrqc = IGC_MRQC_RSS_FIELD_IPV4 | 8162121c271SSasha Neftin IGC_MRQC_RSS_FIELD_IPV4_TCP | 8172121c271SSasha Neftin IGC_MRQC_RSS_FIELD_IPV6 | 8182121c271SSasha Neftin IGC_MRQC_RSS_FIELD_IPV6_TCP | 8192121c271SSasha Neftin IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; 8202121c271SSasha Neftin 8212121c271SSasha Neftin if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) 8222121c271SSasha Neftin mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; 8232121c271SSasha Neftin if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) 8242121c271SSasha Neftin mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; 8252121c271SSasha Neftin 8262121c271SSasha Neftin mrqc |= IGC_MRQC_ENABLE_RSS_MQ; 8272121c271SSasha Neftin 8282121c271SSasha Neftin wr32(IGC_MRQC, mrqc); 82913b5b7fdSSasha Neftin } 83013b5b7fdSSasha Neftin 83113b5b7fdSSasha Neftin /** 83213b5b7fdSSasha Neftin * igc_setup_rctl - configure the receive control registers 83313b5b7fdSSasha Neftin * @adapter: Board private structure 83413b5b7fdSSasha Neftin */ 83513b5b7fdSSasha Neftin static void igc_setup_rctl(struct igc_adapter *adapter) 83613b5b7fdSSasha Neftin { 83713b5b7fdSSasha Neftin struct igc_hw *hw = &adapter->hw; 83813b5b7fdSSasha Neftin u32 rctl; 83913b5b7fdSSasha Neftin 84013b5b7fdSSasha Neftin rctl = rd32(IGC_RCTL); 84113b5b7fdSSasha Neftin 84213b5b7fdSSasha Neftin rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 84313b5b7fdSSasha Neftin rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); 84413b5b7fdSSasha Neftin 84513b5b7fdSSasha Neftin rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | 84613b5b7fdSSasha Neftin (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 84713b5b7fdSSasha Neftin 84813b5b7fdSSasha Neftin /* enable stripping of CRC. Newer features require 84913b5b7fdSSasha Neftin * that the HW strips the CRC. 85013b5b7fdSSasha Neftin */ 85113b5b7fdSSasha Neftin rctl |= IGC_RCTL_SECRC; 85213b5b7fdSSasha Neftin 85313b5b7fdSSasha Neftin /* disable store bad packets and clear size bits. */ 85413b5b7fdSSasha Neftin rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); 85513b5b7fdSSasha Neftin 85613b5b7fdSSasha Neftin /* enable LPE to allow for reception of jumbo frames */ 85713b5b7fdSSasha Neftin rctl |= IGC_RCTL_LPE; 85813b5b7fdSSasha Neftin 85913b5b7fdSSasha Neftin /* disable queue 0 to prevent tail write w/o re-config */ 86013b5b7fdSSasha Neftin wr32(IGC_RXDCTL(0), 0); 86113b5b7fdSSasha Neftin 86213b5b7fdSSasha Neftin /* This is useful for sniffing bad packets. */ 86313b5b7fdSSasha Neftin if (adapter->netdev->features & NETIF_F_RXALL) { 86413b5b7fdSSasha Neftin /* UPE and MPE will be handled by normal PROMISC logic 86513b5b7fdSSasha Neftin * in set_rx_mode 86613b5b7fdSSasha Neftin */ 86713b5b7fdSSasha Neftin rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ 86813b5b7fdSSasha Neftin IGC_RCTL_BAM | /* RX All Bcast Pkts */ 86913b5b7fdSSasha Neftin IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 87013b5b7fdSSasha Neftin 87113b5b7fdSSasha Neftin rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ 87213b5b7fdSSasha Neftin IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ 87313b5b7fdSSasha Neftin } 87413b5b7fdSSasha Neftin 87513b5b7fdSSasha Neftin wr32(IGC_RCTL, rctl); 87613b5b7fdSSasha Neftin } 87713b5b7fdSSasha Neftin 87813b5b7fdSSasha Neftin /** 87913b5b7fdSSasha Neftin * igc_setup_tctl - configure the transmit control registers 88013b5b7fdSSasha Neftin * @adapter: Board private structure 88113b5b7fdSSasha Neftin */ 88213b5b7fdSSasha Neftin static void igc_setup_tctl(struct igc_adapter *adapter) 88313b5b7fdSSasha Neftin { 88413b5b7fdSSasha Neftin struct igc_hw *hw = &adapter->hw; 88513b5b7fdSSasha Neftin u32 tctl; 88613b5b7fdSSasha Neftin 88713b5b7fdSSasha Neftin /* disable queue 0 which icould be enabled by default */ 88813b5b7fdSSasha Neftin wr32(IGC_TXDCTL(0), 0); 88913b5b7fdSSasha Neftin 89013b5b7fdSSasha Neftin /* Program the Transmit Control Register */ 89113b5b7fdSSasha Neftin tctl = rd32(IGC_TCTL); 89213b5b7fdSSasha Neftin tctl &= ~IGC_TCTL_CT; 89313b5b7fdSSasha Neftin tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | 89413b5b7fdSSasha Neftin (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); 89513b5b7fdSSasha Neftin 89613b5b7fdSSasha Neftin /* Enable transmits */ 89713b5b7fdSSasha Neftin tctl |= IGC_TCTL_EN; 89813b5b7fdSSasha Neftin 89913b5b7fdSSasha Neftin wr32(IGC_TCTL, tctl); 90013b5b7fdSSasha Neftin } 90113b5b7fdSSasha Neftin 90213b5b7fdSSasha Neftin /** 903424045beSAndre Guedes * igc_set_mac_filter_hw() - Set MAC address filter in hardware 904424045beSAndre Guedes * @adapter: Pointer to adapter where the filter should be set 905424045beSAndre Guedes * @index: Filter index 906750433d0SAndre Guedes * @type: MAC address filter type (source or destination) 907750433d0SAndre Guedes * @addr: MAC address 908424045beSAndre Guedes * @queue: If non-negative, queue assignment feature is enabled and frames 909424045beSAndre Guedes * matching the filter are enqueued onto 'queue'. Otherwise, queue 910424045beSAndre Guedes * assignment is disabled. 9113988d8bfSSasha Neftin */ 912424045beSAndre Guedes static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, 913750433d0SAndre Guedes enum igc_mac_filter_type type, 914424045beSAndre Guedes const u8 *addr, int queue) 9153988d8bfSSasha Neftin { 916949b922eSAndre Guedes struct net_device *dev = adapter->netdev; 9173988d8bfSSasha Neftin struct igc_hw *hw = &adapter->hw; 918424045beSAndre Guedes u32 ral, rah; 9193988d8bfSSasha Neftin 920424045beSAndre Guedes if (WARN_ON(index >= hw->mac.rar_entry_count)) 921424045beSAndre Guedes return; 9223988d8bfSSasha Neftin 923424045beSAndre Guedes ral = le32_to_cpup((__le32 *)(addr)); 924424045beSAndre Guedes rah = le16_to_cpup((__le16 *)(addr + 4)); 9253988d8bfSSasha Neftin 926750433d0SAndre Guedes if (type == IGC_MAC_FILTER_TYPE_SRC) { 927750433d0SAndre Guedes rah &= ~IGC_RAH_ASEL_MASK; 928750433d0SAndre Guedes rah |= IGC_RAH_ASEL_SRC_ADDR; 9293988d8bfSSasha Neftin } 9303988d8bfSSasha Neftin 931424045beSAndre Guedes if (queue >= 0) { 932424045beSAndre Guedes rah &= ~IGC_RAH_QSEL_MASK; 933424045beSAndre Guedes rah |= (queue << IGC_RAH_QSEL_SHIFT); 934424045beSAndre Guedes rah |= IGC_RAH_QSEL_ENABLE; 935424045beSAndre Guedes } 936424045beSAndre Guedes 937424045beSAndre Guedes rah |= IGC_RAH_AV; 938424045beSAndre Guedes 939424045beSAndre Guedes wr32(IGC_RAL(index), ral); 940424045beSAndre Guedes wr32(IGC_RAH(index), rah); 941949b922eSAndre Guedes 942949b922eSAndre Guedes netdev_dbg(dev, "MAC address filter set in HW: index %d", index); 943424045beSAndre Guedes } 944424045beSAndre Guedes 945424045beSAndre Guedes /** 946424045beSAndre Guedes * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware 947424045beSAndre Guedes * @adapter: Pointer to adapter where the filter should be cleared 948424045beSAndre Guedes * @index: Filter index 9493988d8bfSSasha Neftin */ 950424045beSAndre Guedes static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) 951424045beSAndre Guedes { 952949b922eSAndre Guedes struct net_device *dev = adapter->netdev; 953424045beSAndre Guedes struct igc_hw *hw = &adapter->hw; 9543988d8bfSSasha Neftin 955424045beSAndre Guedes if (WARN_ON(index >= hw->mac.rar_entry_count)) 956424045beSAndre Guedes return; 95727945ebeSAndre Guedes 958424045beSAndre Guedes wr32(IGC_RAL(index), 0); 959424045beSAndre Guedes wr32(IGC_RAH(index), 0); 960949b922eSAndre Guedes 961949b922eSAndre Guedes netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); 9623988d8bfSSasha Neftin } 9633988d8bfSSasha Neftin 9643988d8bfSSasha Neftin /* Set default MAC address for the PF in the first RAR entry */ 9653988d8bfSSasha Neftin static void igc_set_default_mac_filter(struct igc_adapter *adapter) 9663988d8bfSSasha Neftin { 967949b922eSAndre Guedes struct net_device *dev = adapter->netdev; 968949b922eSAndre Guedes u8 *addr = adapter->hw.mac.addr; 9693988d8bfSSasha Neftin 970949b922eSAndre Guedes netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); 9713988d8bfSSasha Neftin 972750433d0SAndre Guedes igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); 9733988d8bfSSasha Neftin } 9743988d8bfSSasha Neftin 9753988d8bfSSasha Neftin /** 976c9a11c23SSasha Neftin * igc_set_mac - Change the Ethernet Address of the NIC 977c9a11c23SSasha Neftin * @netdev: network interface device structure 978c9a11c23SSasha Neftin * @p: pointer to an address structure 979c9a11c23SSasha Neftin * 980c9a11c23SSasha Neftin * Returns 0 on success, negative on failure 981c9a11c23SSasha Neftin */ 982c9a11c23SSasha Neftin static int igc_set_mac(struct net_device *netdev, void *p) 983c9a11c23SSasha Neftin { 984c9a11c23SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 985c9a11c23SSasha Neftin struct igc_hw *hw = &adapter->hw; 986c9a11c23SSasha Neftin struct sockaddr *addr = p; 987c9a11c23SSasha Neftin 988c9a11c23SSasha Neftin if (!is_valid_ether_addr(addr->sa_data)) 989c9a11c23SSasha Neftin return -EADDRNOTAVAIL; 990c9a11c23SSasha Neftin 991a05e4c0aSJakub Kicinski eth_hw_addr_set(netdev, addr->sa_data); 992c9a11c23SSasha Neftin memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 993c9a11c23SSasha Neftin 994c9a11c23SSasha Neftin /* set the correct pool for the new PF MAC address in entry 0 */ 995c9a11c23SSasha Neftin igc_set_default_mac_filter(adapter); 996c9a11c23SSasha Neftin 997c9a11c23SSasha Neftin return 0; 998c9a11c23SSasha Neftin } 999c9a11c23SSasha Neftin 10007f839684SSasha Neftin /** 10017f839684SSasha Neftin * igc_write_mc_addr_list - write multicast addresses to MTA 10027f839684SSasha Neftin * @netdev: network interface device structure 10037f839684SSasha Neftin * 10047f839684SSasha Neftin * Writes multicast address list to the MTA hash table. 10057f839684SSasha Neftin * Returns: -ENOMEM on failure 10067f839684SSasha Neftin * 0 on no addresses written 10077f839684SSasha Neftin * X on writing X addresses to MTA 10087f839684SSasha Neftin **/ 10097f839684SSasha Neftin static int igc_write_mc_addr_list(struct net_device *netdev) 10107f839684SSasha Neftin { 10117f839684SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 10127f839684SSasha Neftin struct igc_hw *hw = &adapter->hw; 10137f839684SSasha Neftin struct netdev_hw_addr *ha; 10147f839684SSasha Neftin u8 *mta_list; 10157f839684SSasha Neftin int i; 10167f839684SSasha Neftin 10177f839684SSasha Neftin if (netdev_mc_empty(netdev)) { 10187f839684SSasha Neftin /* nothing to program, so clear mc list */ 10197f839684SSasha Neftin igc_update_mc_addr_list(hw, NULL, 0); 10207f839684SSasha Neftin return 0; 10217f839684SSasha Neftin } 10227f839684SSasha Neftin 10237f839684SSasha Neftin mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); 10247f839684SSasha Neftin if (!mta_list) 10257f839684SSasha Neftin return -ENOMEM; 10267f839684SSasha Neftin 10277f839684SSasha Neftin /* The shared function expects a packed array of only addresses. */ 10287f839684SSasha Neftin i = 0; 10297f839684SSasha Neftin netdev_for_each_mc_addr(ha, netdev) 10307f839684SSasha Neftin memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 10317f839684SSasha Neftin 10327f839684SSasha Neftin igc_update_mc_addr_list(hw, mta_list, i); 10337f839684SSasha Neftin kfree(mta_list); 10347f839684SSasha Neftin 10357f839684SSasha Neftin return netdev_mc_count(netdev); 10367f839684SSasha Neftin } 10377f839684SSasha Neftin 1038db0b124fSVinicius Costa Gomes static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, 1039db0b124fSVinicius Costa Gomes bool *first_flag, bool *insert_empty) 104082faa9b7SVinicius Costa Gomes { 1041db0b124fSVinicius Costa Gomes struct igc_adapter *adapter = netdev_priv(ring->netdev); 104282faa9b7SVinicius Costa Gomes ktime_t cycle_time = adapter->cycle_time; 104382faa9b7SVinicius Costa Gomes ktime_t base_time = adapter->base_time; 1044db0b124fSVinicius Costa Gomes ktime_t now = ktime_get_clocktai(); 1045db0b124fSVinicius Costa Gomes ktime_t baset_est, end_of_cycle; 1046c1bca9acSFlorian Kauer s32 launchtime; 1047db0b124fSVinicius Costa Gomes s64 n; 104882faa9b7SVinicius Costa Gomes 1049db0b124fSVinicius Costa Gomes n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); 1050db0b124fSVinicius Costa Gomes 1051db0b124fSVinicius Costa Gomes baset_est = ktime_add_ns(base_time, cycle_time * (n)); 1052db0b124fSVinicius Costa Gomes end_of_cycle = ktime_add_ns(baset_est, cycle_time); 1053db0b124fSVinicius Costa Gomes 1054db0b124fSVinicius Costa Gomes if (ktime_compare(txtime, end_of_cycle) >= 0) { 1055db0b124fSVinicius Costa Gomes if (baset_est != ring->last_ff_cycle) { 1056db0b124fSVinicius Costa Gomes *first_flag = true; 1057db0b124fSVinicius Costa Gomes ring->last_ff_cycle = baset_est; 1058db0b124fSVinicius Costa Gomes 10590bcc6285SFlorian Kauer if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) 1060db0b124fSVinicius Costa Gomes *insert_empty = true; 1061db0b124fSVinicius Costa Gomes } 1062db0b124fSVinicius Costa Gomes } 1063db0b124fSVinicius Costa Gomes 1064db0b124fSVinicius Costa Gomes /* Introducing a window at end of cycle on which packets 1065db0b124fSVinicius Costa Gomes * potentially not honor launchtime. Window of 5us chosen 1066db0b124fSVinicius Costa Gomes * considering software update the tail pointer and packets 1067db0b124fSVinicius Costa Gomes * are dma'ed to packet buffer. 106882faa9b7SVinicius Costa Gomes */ 1069db0b124fSVinicius Costa Gomes if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) 1070db0b124fSVinicius Costa Gomes netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", 1071db0b124fSVinicius Costa Gomes txtime); 1072db0b124fSVinicius Costa Gomes 1073db0b124fSVinicius Costa Gomes ring->last_tx_cycle = end_of_cycle; 1074db0b124fSVinicius Costa Gomes 1075db0b124fSVinicius Costa Gomes launchtime = ktime_sub_ns(txtime, baset_est); 1076db0b124fSVinicius Costa Gomes if (launchtime > 0) 1077db0b124fSVinicius Costa Gomes div_s64_rem(launchtime, cycle_time, &launchtime); 1078db0b124fSVinicius Costa Gomes else 1079db0b124fSVinicius Costa Gomes launchtime = 0; 108082faa9b7SVinicius Costa Gomes 108182faa9b7SVinicius Costa Gomes return cpu_to_le32(launchtime); 108282faa9b7SVinicius Costa Gomes } 108382faa9b7SVinicius Costa Gomes 1084db0b124fSVinicius Costa Gomes static int igc_init_empty_frame(struct igc_ring *ring, 1085db0b124fSVinicius Costa Gomes struct igc_tx_buffer *buffer, 1086db0b124fSVinicius Costa Gomes struct sk_buff *skb) 1087db0b124fSVinicius Costa Gomes { 1088db0b124fSVinicius Costa Gomes unsigned int size; 1089db0b124fSVinicius Costa Gomes dma_addr_t dma; 1090db0b124fSVinicius Costa Gomes 1091db0b124fSVinicius Costa Gomes size = skb_headlen(skb); 1092db0b124fSVinicius Costa Gomes 1093db0b124fSVinicius Costa Gomes dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); 1094db0b124fSVinicius Costa Gomes if (dma_mapping_error(ring->dev, dma)) { 1095db0b124fSVinicius Costa Gomes netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); 1096db0b124fSVinicius Costa Gomes return -ENOMEM; 1097db0b124fSVinicius Costa Gomes } 1098db0b124fSVinicius Costa Gomes 1099db0b124fSVinicius Costa Gomes buffer->skb = skb; 1100db0b124fSVinicius Costa Gomes buffer->protocol = 0; 1101db0b124fSVinicius Costa Gomes buffer->bytecount = skb->len; 1102db0b124fSVinicius Costa Gomes buffer->gso_segs = 1; 1103db0b124fSVinicius Costa Gomes buffer->time_stamp = jiffies; 1104db0b124fSVinicius Costa Gomes dma_unmap_len_set(buffer, len, skb->len); 1105db0b124fSVinicius Costa Gomes dma_unmap_addr_set(buffer, dma, dma); 1106db0b124fSVinicius Costa Gomes 1107db0b124fSVinicius Costa Gomes return 0; 1108db0b124fSVinicius Costa Gomes } 1109db0b124fSVinicius Costa Gomes 1110db0b124fSVinicius Costa Gomes static int igc_init_tx_empty_descriptor(struct igc_ring *ring, 1111db0b124fSVinicius Costa Gomes struct sk_buff *skb, 1112db0b124fSVinicius Costa Gomes struct igc_tx_buffer *first) 1113db0b124fSVinicius Costa Gomes { 1114db0b124fSVinicius Costa Gomes union igc_adv_tx_desc *desc; 1115db0b124fSVinicius Costa Gomes u32 cmd_type, olinfo_status; 1116db0b124fSVinicius Costa Gomes int err; 1117db0b124fSVinicius Costa Gomes 1118db0b124fSVinicius Costa Gomes if (!igc_desc_unused(ring)) 1119db0b124fSVinicius Costa Gomes return -EBUSY; 1120db0b124fSVinicius Costa Gomes 1121db0b124fSVinicius Costa Gomes err = igc_init_empty_frame(ring, first, skb); 1122db0b124fSVinicius Costa Gomes if (err) 1123db0b124fSVinicius Costa Gomes return err; 1124db0b124fSVinicius Costa Gomes 1125db0b124fSVinicius Costa Gomes cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 1126db0b124fSVinicius Costa Gomes IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 1127db0b124fSVinicius Costa Gomes first->bytecount; 1128db0b124fSVinicius Costa Gomes olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; 1129db0b124fSVinicius Costa Gomes 1130db0b124fSVinicius Costa Gomes desc = IGC_TX_DESC(ring, ring->next_to_use); 1131db0b124fSVinicius Costa Gomes desc->read.cmd_type_len = cpu_to_le32(cmd_type); 1132db0b124fSVinicius Costa Gomes desc->read.olinfo_status = cpu_to_le32(olinfo_status); 1133db0b124fSVinicius Costa Gomes desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); 1134db0b124fSVinicius Costa Gomes 1135db0b124fSVinicius Costa Gomes netdev_tx_sent_queue(txring_txq(ring), skb->len); 1136db0b124fSVinicius Costa Gomes 1137db0b124fSVinicius Costa Gomes first->next_to_watch = desc; 1138db0b124fSVinicius Costa Gomes 1139db0b124fSVinicius Costa Gomes ring->next_to_use++; 1140db0b124fSVinicius Costa Gomes if (ring->next_to_use == ring->count) 1141db0b124fSVinicius Costa Gomes ring->next_to_use = 0; 1142db0b124fSVinicius Costa Gomes 1143db0b124fSVinicius Costa Gomes return 0; 1144db0b124fSVinicius Costa Gomes } 1145db0b124fSVinicius Costa Gomes 1146db0b124fSVinicius Costa Gomes #define IGC_EMPTY_FRAME_SIZE 60 1147db0b124fSVinicius Costa Gomes 1148d3ae3cfbSSasha Neftin static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, 1149db0b124fSVinicius Costa Gomes __le32 launch_time, bool first_flag, 1150d3ae3cfbSSasha Neftin u32 vlan_macip_lens, u32 type_tucmd, 1151d3ae3cfbSSasha Neftin u32 mss_l4len_idx) 1152d3ae3cfbSSasha Neftin { 1153d3ae3cfbSSasha Neftin struct igc_adv_tx_context_desc *context_desc; 1154d3ae3cfbSSasha Neftin u16 i = tx_ring->next_to_use; 1155d3ae3cfbSSasha Neftin 1156d3ae3cfbSSasha Neftin context_desc = IGC_TX_CTXTDESC(tx_ring, i); 1157d3ae3cfbSSasha Neftin 1158d3ae3cfbSSasha Neftin i++; 1159d3ae3cfbSSasha Neftin tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1160d3ae3cfbSSasha Neftin 1161d3ae3cfbSSasha Neftin /* set bits to identify this as an advanced context descriptor */ 1162d3ae3cfbSSasha Neftin type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 1163d3ae3cfbSSasha Neftin 116493d85dc5SSasha Neftin /* For i225, context index must be unique per ring. */ 1165d3ae3cfbSSasha Neftin if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) 1166d3ae3cfbSSasha Neftin mss_l4len_idx |= tx_ring->reg_idx << 4; 1167d3ae3cfbSSasha Neftin 1168db0b124fSVinicius Costa Gomes if (first_flag) 1169db0b124fSVinicius Costa Gomes mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; 1170db0b124fSVinicius Costa Gomes 1171d3ae3cfbSSasha Neftin context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1172d3ae3cfbSSasha Neftin context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1173d3ae3cfbSSasha Neftin context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1174db0b124fSVinicius Costa Gomes context_desc->launch_time = launch_time; 1175d3ae3cfbSSasha Neftin } 1176d3ae3cfbSSasha Neftin 1177db0b124fSVinicius Costa Gomes static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, 1178db0b124fSVinicius Costa Gomes __le32 launch_time, bool first_flag) 1179c9a11c23SSasha Neftin { 1180d3ae3cfbSSasha Neftin struct sk_buff *skb = first->skb; 1181d3ae3cfbSSasha Neftin u32 vlan_macip_lens = 0; 1182d3ae3cfbSSasha Neftin u32 type_tucmd = 0; 1183d3ae3cfbSSasha Neftin 1184d3ae3cfbSSasha Neftin if (skb->ip_summed != CHECKSUM_PARTIAL) { 1185d3ae3cfbSSasha Neftin csum_failed: 1186d3ae3cfbSSasha Neftin if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && 1187d3ae3cfbSSasha Neftin !tx_ring->launchtime_enable) 1188d3ae3cfbSSasha Neftin return; 1189d3ae3cfbSSasha Neftin goto no_csum; 1190d3ae3cfbSSasha Neftin } 1191d3ae3cfbSSasha Neftin 1192d3ae3cfbSSasha Neftin switch (skb->csum_offset) { 1193d3ae3cfbSSasha Neftin case offsetof(struct tcphdr, check): 1194d3ae3cfbSSasha Neftin type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 11955463fce6SJeff Kirsher fallthrough; 1196d3ae3cfbSSasha Neftin case offsetof(struct udphdr, check): 1197d3ae3cfbSSasha Neftin break; 1198d3ae3cfbSSasha Neftin case offsetof(struct sctphdr, checksum): 1199d3ae3cfbSSasha Neftin /* validate that this is actually an SCTP request */ 1200609d29a9SXin Long if (skb_csum_is_sctp(skb)) { 1201d3ae3cfbSSasha Neftin type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; 1202d3ae3cfbSSasha Neftin break; 1203d3ae3cfbSSasha Neftin } 12045463fce6SJeff Kirsher fallthrough; 1205d3ae3cfbSSasha Neftin default: 1206d3ae3cfbSSasha Neftin skb_checksum_help(skb); 1207d3ae3cfbSSasha Neftin goto csum_failed; 1208d3ae3cfbSSasha Neftin } 1209d3ae3cfbSSasha Neftin 1210d3ae3cfbSSasha Neftin /* update TX checksum flag */ 1211d3ae3cfbSSasha Neftin first->tx_flags |= IGC_TX_FLAGS_CSUM; 1212d3ae3cfbSSasha Neftin vlan_macip_lens = skb_checksum_start_offset(skb) - 1213d3ae3cfbSSasha Neftin skb_network_offset(skb); 1214d3ae3cfbSSasha Neftin no_csum: 1215d3ae3cfbSSasha Neftin vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; 1216d3ae3cfbSSasha Neftin vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1217d3ae3cfbSSasha Neftin 1218db0b124fSVinicius Costa Gomes igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, 1219db0b124fSVinicius Costa Gomes vlan_macip_lens, type_tucmd, 0); 12200507ef8aSSasha Neftin } 12210507ef8aSSasha Neftin 12220507ef8aSSasha Neftin static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 12230507ef8aSSasha Neftin { 12240507ef8aSSasha Neftin struct net_device *netdev = tx_ring->netdev; 12250507ef8aSSasha Neftin 12260507ef8aSSasha Neftin netif_stop_subqueue(netdev, tx_ring->queue_index); 12270507ef8aSSasha Neftin 12280507ef8aSSasha Neftin /* memory barriier comment */ 12290507ef8aSSasha Neftin smp_mb(); 12300507ef8aSSasha Neftin 12310507ef8aSSasha Neftin /* We need to check again in a case another CPU has just 12320507ef8aSSasha Neftin * made room available. 12330507ef8aSSasha Neftin */ 12340507ef8aSSasha Neftin if (igc_desc_unused(tx_ring) < size) 12350507ef8aSSasha Neftin return -EBUSY; 12360507ef8aSSasha Neftin 12370507ef8aSSasha Neftin /* A reprieve! */ 12380507ef8aSSasha Neftin netif_wake_subqueue(netdev, tx_ring->queue_index); 12390507ef8aSSasha Neftin 12400507ef8aSSasha Neftin u64_stats_update_begin(&tx_ring->tx_syncp2); 12410507ef8aSSasha Neftin tx_ring->tx_stats.restart_queue2++; 12420507ef8aSSasha Neftin u64_stats_update_end(&tx_ring->tx_syncp2); 12430507ef8aSSasha Neftin 12440507ef8aSSasha Neftin return 0; 12450507ef8aSSasha Neftin } 12460507ef8aSSasha Neftin 12470507ef8aSSasha Neftin static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) 12480507ef8aSSasha Neftin { 12490507ef8aSSasha Neftin if (igc_desc_unused(tx_ring) >= size) 12500507ef8aSSasha Neftin return 0; 12510507ef8aSSasha Neftin return __igc_maybe_stop_tx(tx_ring, size); 12520507ef8aSSasha Neftin } 12530507ef8aSSasha Neftin 12542c344ae2SVinicius Costa Gomes #define IGC_SET_FLAG(_input, _flag, _result) \ 12552c344ae2SVinicius Costa Gomes (((_flag) <= (_result)) ? \ 12562c344ae2SVinicius Costa Gomes ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ 12572c344ae2SVinicius Costa Gomes ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) 12582c344ae2SVinicius Costa Gomes 12598d744963SMuhammad Husaini Zulkifli static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) 12600507ef8aSSasha Neftin { 12610507ef8aSSasha Neftin /* set type for advanced descriptor with frame checksum insertion */ 12620507ef8aSSasha Neftin u32 cmd_type = IGC_ADVTXD_DTYP_DATA | 12630507ef8aSSasha Neftin IGC_ADVTXD_DCMD_DEXT | 12640507ef8aSSasha Neftin IGC_ADVTXD_DCMD_IFCS; 12650507ef8aSSasha Neftin 12668d744963SMuhammad Husaini Zulkifli /* set HW vlan bit if vlan is present */ 12678d744963SMuhammad Husaini Zulkifli cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, 12688d744963SMuhammad Husaini Zulkifli IGC_ADVTXD_DCMD_VLE); 12698d744963SMuhammad Husaini Zulkifli 1270f38b782dSSasha Neftin /* set segmentation bits for TSO */ 1271f38b782dSSasha Neftin cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, 1272f38b782dSSasha Neftin (IGC_ADVTXD_DCMD_TSE)); 1273f38b782dSSasha Neftin 12743ed247e7SVinicius Costa Gomes /* set timestamp bit if present, will select the register set 12753ed247e7SVinicius Costa Gomes * based on the _TSTAMP(_X) bit. 12763ed247e7SVinicius Costa Gomes */ 12772c344ae2SVinicius Costa Gomes cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, 12782c344ae2SVinicius Costa Gomes (IGC_ADVTXD_MAC_TSTAMP)); 12792c344ae2SVinicius Costa Gomes 12803ed247e7SVinicius Costa Gomes cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_1, 12813ed247e7SVinicius Costa Gomes (IGC_ADVTXD_TSTAMP_REG_1)); 12823ed247e7SVinicius Costa Gomes 12833ed247e7SVinicius Costa Gomes cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_2, 12843ed247e7SVinicius Costa Gomes (IGC_ADVTXD_TSTAMP_REG_2)); 12853ed247e7SVinicius Costa Gomes 12863ed247e7SVinicius Costa Gomes cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_3, 12873ed247e7SVinicius Costa Gomes (IGC_ADVTXD_TSTAMP_REG_3)); 12883ed247e7SVinicius Costa Gomes 12898d744963SMuhammad Husaini Zulkifli /* insert frame checksum */ 12908d744963SMuhammad Husaini Zulkifli cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); 12918d744963SMuhammad Husaini Zulkifli 12920507ef8aSSasha Neftin return cmd_type; 12930507ef8aSSasha Neftin } 12940507ef8aSSasha Neftin 12950507ef8aSSasha Neftin static void igc_tx_olinfo_status(struct igc_ring *tx_ring, 12960507ef8aSSasha Neftin union igc_adv_tx_desc *tx_desc, 12970507ef8aSSasha Neftin u32 tx_flags, unsigned int paylen) 12980507ef8aSSasha Neftin { 12990507ef8aSSasha Neftin u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; 13000507ef8aSSasha Neftin 13010507ef8aSSasha Neftin /* insert L4 checksum */ 13020507ef8aSSasha Neftin olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * 13030507ef8aSSasha Neftin ((IGC_TXD_POPTS_TXSM << 8) / 13040507ef8aSSasha Neftin IGC_TX_FLAGS_CSUM); 13050507ef8aSSasha Neftin 13060507ef8aSSasha Neftin /* insert IPv4 checksum */ 13070507ef8aSSasha Neftin olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * 13080507ef8aSSasha Neftin (((IGC_TXD_POPTS_IXSM << 8)) / 13090507ef8aSSasha Neftin IGC_TX_FLAGS_IPV4); 13100507ef8aSSasha Neftin 13110507ef8aSSasha Neftin tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 13120507ef8aSSasha Neftin } 13130507ef8aSSasha Neftin 13140507ef8aSSasha Neftin static int igc_tx_map(struct igc_ring *tx_ring, 13150507ef8aSSasha Neftin struct igc_tx_buffer *first, 13160507ef8aSSasha Neftin const u8 hdr_len) 13170507ef8aSSasha Neftin { 13180507ef8aSSasha Neftin struct sk_buff *skb = first->skb; 13190507ef8aSSasha Neftin struct igc_tx_buffer *tx_buffer; 13200507ef8aSSasha Neftin union igc_adv_tx_desc *tx_desc; 13210507ef8aSSasha Neftin u32 tx_flags = first->tx_flags; 1322d7840976SMatthew Wilcox (Oracle) skb_frag_t *frag; 13230507ef8aSSasha Neftin u16 i = tx_ring->next_to_use; 13240507ef8aSSasha Neftin unsigned int data_len, size; 13250507ef8aSSasha Neftin dma_addr_t dma; 13268d744963SMuhammad Husaini Zulkifli u32 cmd_type; 13270507ef8aSSasha Neftin 13288d744963SMuhammad Husaini Zulkifli cmd_type = igc_tx_cmd_type(skb, tx_flags); 13290507ef8aSSasha Neftin tx_desc = IGC_TX_DESC(tx_ring, i); 13300507ef8aSSasha Neftin 13310507ef8aSSasha Neftin igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); 13320507ef8aSSasha Neftin 13330507ef8aSSasha Neftin size = skb_headlen(skb); 13340507ef8aSSasha Neftin data_len = skb->data_len; 13350507ef8aSSasha Neftin 13360507ef8aSSasha Neftin dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 13370507ef8aSSasha Neftin 13380507ef8aSSasha Neftin tx_buffer = first; 13390507ef8aSSasha Neftin 13400507ef8aSSasha Neftin for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 13410507ef8aSSasha Neftin if (dma_mapping_error(tx_ring->dev, dma)) 13420507ef8aSSasha Neftin goto dma_error; 13430507ef8aSSasha Neftin 13440507ef8aSSasha Neftin /* record length, and DMA address */ 13450507ef8aSSasha Neftin dma_unmap_len_set(tx_buffer, len, size); 13460507ef8aSSasha Neftin dma_unmap_addr_set(tx_buffer, dma, dma); 13470507ef8aSSasha Neftin 13480507ef8aSSasha Neftin tx_desc->read.buffer_addr = cpu_to_le64(dma); 13490507ef8aSSasha Neftin 13500507ef8aSSasha Neftin while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { 13510507ef8aSSasha Neftin tx_desc->read.cmd_type_len = 13520507ef8aSSasha Neftin cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); 13530507ef8aSSasha Neftin 13540507ef8aSSasha Neftin i++; 13550507ef8aSSasha Neftin tx_desc++; 13560507ef8aSSasha Neftin if (i == tx_ring->count) { 13570507ef8aSSasha Neftin tx_desc = IGC_TX_DESC(tx_ring, 0); 13580507ef8aSSasha Neftin i = 0; 13590507ef8aSSasha Neftin } 13600507ef8aSSasha Neftin tx_desc->read.olinfo_status = 0; 13610507ef8aSSasha Neftin 13620507ef8aSSasha Neftin dma += IGC_MAX_DATA_PER_TXD; 13630507ef8aSSasha Neftin size -= IGC_MAX_DATA_PER_TXD; 13640507ef8aSSasha Neftin 13650507ef8aSSasha Neftin tx_desc->read.buffer_addr = cpu_to_le64(dma); 13660507ef8aSSasha Neftin } 13670507ef8aSSasha Neftin 13680507ef8aSSasha Neftin if (likely(!data_len)) 13690507ef8aSSasha Neftin break; 13700507ef8aSSasha Neftin 13710507ef8aSSasha Neftin tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); 13720507ef8aSSasha Neftin 13730507ef8aSSasha Neftin i++; 13740507ef8aSSasha Neftin tx_desc++; 13750507ef8aSSasha Neftin if (i == tx_ring->count) { 13760507ef8aSSasha Neftin tx_desc = IGC_TX_DESC(tx_ring, 0); 13770507ef8aSSasha Neftin i = 0; 13780507ef8aSSasha Neftin } 13790507ef8aSSasha Neftin tx_desc->read.olinfo_status = 0; 13800507ef8aSSasha Neftin 13810507ef8aSSasha Neftin size = skb_frag_size(frag); 13820507ef8aSSasha Neftin data_len -= size; 13830507ef8aSSasha Neftin 13840507ef8aSSasha Neftin dma = skb_frag_dma_map(tx_ring->dev, frag, 0, 13850507ef8aSSasha Neftin size, DMA_TO_DEVICE); 13860507ef8aSSasha Neftin 13870507ef8aSSasha Neftin tx_buffer = &tx_ring->tx_buffer_info[i]; 13880507ef8aSSasha Neftin } 13890507ef8aSSasha Neftin 13900507ef8aSSasha Neftin /* write last descriptor with RS and EOP bits */ 13910507ef8aSSasha Neftin cmd_type |= size | IGC_TXD_DCMD; 13920507ef8aSSasha Neftin tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 13930507ef8aSSasha Neftin 13940507ef8aSSasha Neftin netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 13950507ef8aSSasha Neftin 13960507ef8aSSasha Neftin /* set the timestamp */ 13970507ef8aSSasha Neftin first->time_stamp = jiffies; 13980507ef8aSSasha Neftin 1399a9e51058SJacob Keller skb_tx_timestamp(skb); 1400a9e51058SJacob Keller 14010507ef8aSSasha Neftin /* Force memory writes to complete before letting h/w know there 14020507ef8aSSasha Neftin * are new descriptors to fetch. (Only applicable for weak-ordered 14030507ef8aSSasha Neftin * memory model archs, such as IA-64). 14040507ef8aSSasha Neftin * 14050507ef8aSSasha Neftin * We also need this memory barrier to make certain all of the 14060507ef8aSSasha Neftin * status bits have been updated before next_to_watch is written. 14070507ef8aSSasha Neftin */ 14080507ef8aSSasha Neftin wmb(); 14090507ef8aSSasha Neftin 14100507ef8aSSasha Neftin /* set next_to_watch value indicating a packet is present */ 14110507ef8aSSasha Neftin first->next_to_watch = tx_desc; 14120507ef8aSSasha Neftin 14130507ef8aSSasha Neftin i++; 14140507ef8aSSasha Neftin if (i == tx_ring->count) 14150507ef8aSSasha Neftin i = 0; 14160507ef8aSSasha Neftin 14170507ef8aSSasha Neftin tx_ring->next_to_use = i; 14180507ef8aSSasha Neftin 14190507ef8aSSasha Neftin /* Make sure there is space in the ring for the next send. */ 14200507ef8aSSasha Neftin igc_maybe_stop_tx(tx_ring, DESC_NEEDED); 14210507ef8aSSasha Neftin 14226b16f9eeSFlorian Westphal if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 14230507ef8aSSasha Neftin writel(i, tx_ring->tail); 14240507ef8aSSasha Neftin } 14250507ef8aSSasha Neftin 14260507ef8aSSasha Neftin return 0; 14270507ef8aSSasha Neftin dma_error: 142825f06effSAndre Guedes netdev_err(tx_ring->netdev, "TX DMA map failed\n"); 14290507ef8aSSasha Neftin tx_buffer = &tx_ring->tx_buffer_info[i]; 14300507ef8aSSasha Neftin 14310507ef8aSSasha Neftin /* clear dma mappings for failed tx_buffer_info map */ 14320507ef8aSSasha Neftin while (tx_buffer != first) { 14330507ef8aSSasha Neftin if (dma_unmap_len(tx_buffer, len)) 143461234295SAndre Guedes igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 14350507ef8aSSasha Neftin 14360507ef8aSSasha Neftin if (i-- == 0) 14370507ef8aSSasha Neftin i += tx_ring->count; 14380507ef8aSSasha Neftin tx_buffer = &tx_ring->tx_buffer_info[i]; 14390507ef8aSSasha Neftin } 14400507ef8aSSasha Neftin 14410507ef8aSSasha Neftin if (dma_unmap_len(tx_buffer, len)) 144261234295SAndre Guedes igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 14430507ef8aSSasha Neftin 14440507ef8aSSasha Neftin dev_kfree_skb_any(tx_buffer->skb); 14450507ef8aSSasha Neftin tx_buffer->skb = NULL; 14460507ef8aSSasha Neftin 14470507ef8aSSasha Neftin tx_ring->next_to_use = i; 14480507ef8aSSasha Neftin 14490507ef8aSSasha Neftin return -1; 14500507ef8aSSasha Neftin } 14510507ef8aSSasha Neftin 1452f38b782dSSasha Neftin static int igc_tso(struct igc_ring *tx_ring, 1453f38b782dSSasha Neftin struct igc_tx_buffer *first, 1454db0b124fSVinicius Costa Gomes __le32 launch_time, bool first_flag, 1455f38b782dSSasha Neftin u8 *hdr_len) 1456f38b782dSSasha Neftin { 1457f38b782dSSasha Neftin u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; 1458f38b782dSSasha Neftin struct sk_buff *skb = first->skb; 1459f38b782dSSasha Neftin union { 1460f38b782dSSasha Neftin struct iphdr *v4; 1461f38b782dSSasha Neftin struct ipv6hdr *v6; 1462f38b782dSSasha Neftin unsigned char *hdr; 1463f38b782dSSasha Neftin } ip; 1464f38b782dSSasha Neftin union { 1465f38b782dSSasha Neftin struct tcphdr *tcp; 1466f38b782dSSasha Neftin struct udphdr *udp; 1467f38b782dSSasha Neftin unsigned char *hdr; 1468f38b782dSSasha Neftin } l4; 1469f38b782dSSasha Neftin u32 paylen, l4_offset; 1470f38b782dSSasha Neftin int err; 1471f38b782dSSasha Neftin 1472f38b782dSSasha Neftin if (skb->ip_summed != CHECKSUM_PARTIAL) 1473f38b782dSSasha Neftin return 0; 1474f38b782dSSasha Neftin 1475f38b782dSSasha Neftin if (!skb_is_gso(skb)) 1476f38b782dSSasha Neftin return 0; 1477f38b782dSSasha Neftin 1478f38b782dSSasha Neftin err = skb_cow_head(skb, 0); 1479f38b782dSSasha Neftin if (err < 0) 1480f38b782dSSasha Neftin return err; 1481f38b782dSSasha Neftin 1482f38b782dSSasha Neftin ip.hdr = skb_network_header(skb); 1483f38b782dSSasha Neftin l4.hdr = skb_checksum_start(skb); 1484f38b782dSSasha Neftin 1485f38b782dSSasha Neftin /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1486f38b782dSSasha Neftin type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; 1487f38b782dSSasha Neftin 1488f38b782dSSasha Neftin /* initialize outer IP header fields */ 1489f38b782dSSasha Neftin if (ip.v4->version == 4) { 1490f38b782dSSasha Neftin unsigned char *csum_start = skb_checksum_start(skb); 1491f38b782dSSasha Neftin unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); 1492f38b782dSSasha Neftin 1493f38b782dSSasha Neftin /* IP header will have to cancel out any data that 1494f38b782dSSasha Neftin * is not a part of the outer IP header 1495f38b782dSSasha Neftin */ 1496f38b782dSSasha Neftin ip.v4->check = csum_fold(csum_partial(trans_start, 1497f38b782dSSasha Neftin csum_start - trans_start, 1498f38b782dSSasha Neftin 0)); 1499f38b782dSSasha Neftin type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; 1500f38b782dSSasha Neftin 1501f38b782dSSasha Neftin ip.v4->tot_len = 0; 1502f38b782dSSasha Neftin first->tx_flags |= IGC_TX_FLAGS_TSO | 1503f38b782dSSasha Neftin IGC_TX_FLAGS_CSUM | 1504f38b782dSSasha Neftin IGC_TX_FLAGS_IPV4; 1505f38b782dSSasha Neftin } else { 1506f38b782dSSasha Neftin ip.v6->payload_len = 0; 1507f38b782dSSasha Neftin first->tx_flags |= IGC_TX_FLAGS_TSO | 1508f38b782dSSasha Neftin IGC_TX_FLAGS_CSUM; 1509f38b782dSSasha Neftin } 1510f38b782dSSasha Neftin 1511f38b782dSSasha Neftin /* determine offset of inner transport header */ 1512f38b782dSSasha Neftin l4_offset = l4.hdr - skb->data; 1513f38b782dSSasha Neftin 1514f38b782dSSasha Neftin /* remove payload length from inner checksum */ 1515f38b782dSSasha Neftin paylen = skb->len - l4_offset; 1516f38b782dSSasha Neftin if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { 1517f38b782dSSasha Neftin /* compute length of segmentation header */ 1518f38b782dSSasha Neftin *hdr_len = (l4.tcp->doff * 4) + l4_offset; 1519f38b782dSSasha Neftin csum_replace_by_diff(&l4.tcp->check, 1520f38b782dSSasha Neftin (__force __wsum)htonl(paylen)); 1521f38b782dSSasha Neftin } else { 1522f38b782dSSasha Neftin /* compute length of segmentation header */ 1523f38b782dSSasha Neftin *hdr_len = sizeof(*l4.udp) + l4_offset; 1524f38b782dSSasha Neftin csum_replace_by_diff(&l4.udp->check, 1525f38b782dSSasha Neftin (__force __wsum)htonl(paylen)); 1526f38b782dSSasha Neftin } 1527f38b782dSSasha Neftin 1528f38b782dSSasha Neftin /* update gso size and bytecount with header size */ 1529f38b782dSSasha Neftin first->gso_segs = skb_shinfo(skb)->gso_segs; 1530f38b782dSSasha Neftin first->bytecount += (first->gso_segs - 1) * *hdr_len; 1531f38b782dSSasha Neftin 1532f38b782dSSasha Neftin /* MSS L4LEN IDX */ 1533f38b782dSSasha Neftin mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; 1534f38b782dSSasha Neftin mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; 1535f38b782dSSasha Neftin 1536f38b782dSSasha Neftin /* VLAN MACLEN IPLEN */ 1537f38b782dSSasha Neftin vlan_macip_lens = l4.hdr - ip.hdr; 1538f38b782dSSasha Neftin vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; 1539f38b782dSSasha Neftin vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; 1540f38b782dSSasha Neftin 1541db0b124fSVinicius Costa Gomes igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, 1542db0b124fSVinicius Costa Gomes vlan_macip_lens, type_tucmd, mss_l4len_idx); 1543f38b782dSSasha Neftin 1544f38b782dSSasha Neftin return 1; 1545f38b782dSSasha Neftin } 1546f38b782dSSasha Neftin 15473ed247e7SVinicius Costa Gomes static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *skb, u32 *flags) 15483ed247e7SVinicius Costa Gomes { 15493ed247e7SVinicius Costa Gomes int i; 15503ed247e7SVinicius Costa Gomes 15513ed247e7SVinicius Costa Gomes for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { 15523ed247e7SVinicius Costa Gomes struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; 15533ed247e7SVinicius Costa Gomes 15543ed247e7SVinicius Costa Gomes if (tstamp->skb) 15553ed247e7SVinicius Costa Gomes continue; 15563ed247e7SVinicius Costa Gomes 15573ed247e7SVinicius Costa Gomes tstamp->skb = skb_get(skb); 15583ed247e7SVinicius Costa Gomes tstamp->start = jiffies; 15593ed247e7SVinicius Costa Gomes *flags = tstamp->flags; 15603ed247e7SVinicius Costa Gomes 15613ed247e7SVinicius Costa Gomes return true; 15623ed247e7SVinicius Costa Gomes } 15633ed247e7SVinicius Costa Gomes 15643ed247e7SVinicius Costa Gomes return false; 15653ed247e7SVinicius Costa Gomes } 15663ed247e7SVinicius Costa Gomes 15670507ef8aSSasha Neftin static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, 15680507ef8aSSasha Neftin struct igc_ring *tx_ring) 15690507ef8aSSasha Neftin { 157092a0dcb8STan Tee Min struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 1571db0b124fSVinicius Costa Gomes bool first_flag = false, insert_empty = false; 15720507ef8aSSasha Neftin u16 count = TXD_USE_COUNT(skb_headlen(skb)); 15730507ef8aSSasha Neftin __be16 protocol = vlan_get_protocol(skb); 15740507ef8aSSasha Neftin struct igc_tx_buffer *first; 1575db0b124fSVinicius Costa Gomes __le32 launch_time = 0; 15760507ef8aSSasha Neftin u32 tx_flags = 0; 15770507ef8aSSasha Neftin unsigned short f; 1578db0b124fSVinicius Costa Gomes ktime_t txtime; 15790507ef8aSSasha Neftin u8 hdr_len = 0; 1580f38b782dSSasha Neftin int tso = 0; 15810507ef8aSSasha Neftin 15820507ef8aSSasha Neftin /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, 15830507ef8aSSasha Neftin * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, 15840507ef8aSSasha Neftin * + 2 desc gap to keep tail from touching head, 15850507ef8aSSasha Neftin * + 1 desc for context descriptor, 15860507ef8aSSasha Neftin * otherwise try next time 15870507ef8aSSasha Neftin */ 15880507ef8aSSasha Neftin for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1589d7840976SMatthew Wilcox (Oracle) count += TXD_USE_COUNT(skb_frag_size( 1590d7840976SMatthew Wilcox (Oracle) &skb_shinfo(skb)->frags[f])); 15910507ef8aSSasha Neftin 1592db0b124fSVinicius Costa Gomes if (igc_maybe_stop_tx(tx_ring, count + 5)) { 15930507ef8aSSasha Neftin /* this is a hard error */ 15940507ef8aSSasha Neftin return NETDEV_TX_BUSY; 15950507ef8aSSasha Neftin } 15960507ef8aSSasha Neftin 1597db0b124fSVinicius Costa Gomes if (!tx_ring->launchtime_enable) 1598db0b124fSVinicius Costa Gomes goto done; 1599db0b124fSVinicius Costa Gomes 1600db0b124fSVinicius Costa Gomes txtime = skb->tstamp; 1601db0b124fSVinicius Costa Gomes skb->tstamp = ktime_set(0, 0); 1602db0b124fSVinicius Costa Gomes launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); 1603db0b124fSVinicius Costa Gomes 1604db0b124fSVinicius Costa Gomes if (insert_empty) { 1605db0b124fSVinicius Costa Gomes struct igc_tx_buffer *empty_info; 1606db0b124fSVinicius Costa Gomes struct sk_buff *empty; 1607db0b124fSVinicius Costa Gomes void *data; 1608db0b124fSVinicius Costa Gomes 1609db0b124fSVinicius Costa Gomes empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 1610db0b124fSVinicius Costa Gomes empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); 1611db0b124fSVinicius Costa Gomes if (!empty) 1612db0b124fSVinicius Costa Gomes goto done; 1613db0b124fSVinicius Costa Gomes 1614db0b124fSVinicius Costa Gomes data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); 1615db0b124fSVinicius Costa Gomes memset(data, 0, IGC_EMPTY_FRAME_SIZE); 1616db0b124fSVinicius Costa Gomes 1617db0b124fSVinicius Costa Gomes igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); 1618db0b124fSVinicius Costa Gomes 1619db0b124fSVinicius Costa Gomes if (igc_init_tx_empty_descriptor(tx_ring, 1620db0b124fSVinicius Costa Gomes empty, 1621db0b124fSVinicius Costa Gomes empty_info) < 0) 1622db0b124fSVinicius Costa Gomes dev_kfree_skb_any(empty); 1623db0b124fSVinicius Costa Gomes } 1624db0b124fSVinicius Costa Gomes 1625db0b124fSVinicius Costa Gomes done: 16260507ef8aSSasha Neftin /* record the location of the first descriptor for this packet */ 16270507ef8aSSasha Neftin first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 1628859b4dfaSAndre Guedes first->type = IGC_TX_BUFFER_TYPE_SKB; 16290507ef8aSSasha Neftin first->skb = skb; 16300507ef8aSSasha Neftin first->bytecount = skb->len; 16310507ef8aSSasha Neftin first->gso_segs = 1; 16320507ef8aSSasha Neftin 1633175c2412SMuhammad Husaini Zulkifli if (adapter->qbv_transition || tx_ring->oper_gate_closed) 1634175c2412SMuhammad Husaini Zulkifli goto out_drop; 1635175c2412SMuhammad Husaini Zulkifli 163625102893STan Tee Min if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { 163792a0dcb8STan Tee Min adapter->stats.txdrop++; 163892a0dcb8STan Tee Min goto out_drop; 163992a0dcb8STan Tee Min } 164092a0dcb8STan Tee Min 1641ce58c7ccSVinicius Costa Gomes if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && 1642ce58c7ccSVinicius Costa Gomes skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 16432c344ae2SVinicius Costa Gomes /* FIXME: add support for retrieving timestamps from 16442c344ae2SVinicius Costa Gomes * the other timer registers before skipping the 16452c344ae2SVinicius Costa Gomes * timestamping request. 16462c344ae2SVinicius Costa Gomes */ 16479c50e2b1SVinicius Costa Gomes unsigned long flags; 16483ed247e7SVinicius Costa Gomes u32 tstamp_flags; 16499c50e2b1SVinicius Costa Gomes 16509c50e2b1SVinicius Costa Gomes spin_lock_irqsave(&adapter->ptp_tx_lock, flags); 16513ed247e7SVinicius Costa Gomes if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) { 16522c344ae2SVinicius Costa Gomes skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 16533ed247e7SVinicius Costa Gomes tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags; 16542c344ae2SVinicius Costa Gomes } else { 16552c344ae2SVinicius Costa Gomes adapter->tx_hwtstamp_skipped++; 16562c344ae2SVinicius Costa Gomes } 16579c50e2b1SVinicius Costa Gomes 16589c50e2b1SVinicius Costa Gomes spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); 16592c344ae2SVinicius Costa Gomes } 16602c344ae2SVinicius Costa Gomes 16618d744963SMuhammad Husaini Zulkifli if (skb_vlan_tag_present(skb)) { 16628d744963SMuhammad Husaini Zulkifli tx_flags |= IGC_TX_FLAGS_VLAN; 16638d744963SMuhammad Husaini Zulkifli tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); 16648d744963SMuhammad Husaini Zulkifli } 16658d744963SMuhammad Husaini Zulkifli 16660507ef8aSSasha Neftin /* record initial flags and protocol */ 16670507ef8aSSasha Neftin first->tx_flags = tx_flags; 16680507ef8aSSasha Neftin first->protocol = protocol; 16690507ef8aSSasha Neftin 1670db0b124fSVinicius Costa Gomes tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); 1671f38b782dSSasha Neftin if (tso < 0) 1672f38b782dSSasha Neftin goto out_drop; 1673f38b782dSSasha Neftin else if (!tso) 1674db0b124fSVinicius Costa Gomes igc_tx_csum(tx_ring, first, launch_time, first_flag); 16750507ef8aSSasha Neftin 16760507ef8aSSasha Neftin igc_tx_map(tx_ring, first, hdr_len); 16770507ef8aSSasha Neftin 1678c9a11c23SSasha Neftin return NETDEV_TX_OK; 1679f38b782dSSasha Neftin 1680f38b782dSSasha Neftin out_drop: 1681f38b782dSSasha Neftin dev_kfree_skb_any(first->skb); 1682f38b782dSSasha Neftin first->skb = NULL; 1683f38b782dSSasha Neftin 1684f38b782dSSasha Neftin return NETDEV_TX_OK; 1685c9a11c23SSasha Neftin } 1686c9a11c23SSasha Neftin 16870507ef8aSSasha Neftin static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, 16880507ef8aSSasha Neftin struct sk_buff *skb) 168913b5b7fdSSasha Neftin { 16900507ef8aSSasha Neftin unsigned int r_idx = skb->queue_mapping; 16910507ef8aSSasha Neftin 16920507ef8aSSasha Neftin if (r_idx >= adapter->num_tx_queues) 16930507ef8aSSasha Neftin r_idx = r_idx % adapter->num_tx_queues; 16940507ef8aSSasha Neftin 16950507ef8aSSasha Neftin return adapter->tx_ring[r_idx]; 169613b5b7fdSSasha Neftin } 169713b5b7fdSSasha Neftin 16980507ef8aSSasha Neftin static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, 16990507ef8aSSasha Neftin struct net_device *netdev) 170013b5b7fdSSasha Neftin { 17010507ef8aSSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 170213b5b7fdSSasha Neftin 17030507ef8aSSasha Neftin /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 17040507ef8aSSasha Neftin * in order to meet this minimum size requirement. 170513b5b7fdSSasha Neftin */ 17060507ef8aSSasha Neftin if (skb->len < 17) { 17070507ef8aSSasha Neftin if (skb_padto(skb, 17)) 17080507ef8aSSasha Neftin return NETDEV_TX_OK; 17090507ef8aSSasha Neftin skb->len = 17; 17100507ef8aSSasha Neftin } 171113b5b7fdSSasha Neftin 17120507ef8aSSasha Neftin return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); 17130507ef8aSSasha Neftin } 17140507ef8aSSasha Neftin 17153bdd7086SSasha Neftin static void igc_rx_checksum(struct igc_ring *ring, 17163bdd7086SSasha Neftin union igc_adv_rx_desc *rx_desc, 17173bdd7086SSasha Neftin struct sk_buff *skb) 17183bdd7086SSasha Neftin { 17193bdd7086SSasha Neftin skb_checksum_none_assert(skb); 17203bdd7086SSasha Neftin 17213bdd7086SSasha Neftin /* Ignore Checksum bit is set */ 17223bdd7086SSasha Neftin if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) 17233bdd7086SSasha Neftin return; 17243bdd7086SSasha Neftin 17253bdd7086SSasha Neftin /* Rx checksum disabled via ethtool */ 17263bdd7086SSasha Neftin if (!(ring->netdev->features & NETIF_F_RXCSUM)) 17273bdd7086SSasha Neftin return; 17283bdd7086SSasha Neftin 17293bdd7086SSasha Neftin /* TCP/UDP checksum error bit is set */ 17303bdd7086SSasha Neftin if (igc_test_staterr(rx_desc, 1731ef8a17a2SAndre Guedes IGC_RXDEXT_STATERR_L4E | 17323bdd7086SSasha Neftin IGC_RXDEXT_STATERR_IPE)) { 17333bdd7086SSasha Neftin /* work around errata with sctp packets where the TCPE aka 17343bdd7086SSasha Neftin * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 17353bdd7086SSasha Neftin * packets (aka let the stack check the crc32c) 17363bdd7086SSasha Neftin */ 17373bdd7086SSasha Neftin if (!(skb->len == 60 && 17383bdd7086SSasha Neftin test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { 17393bdd7086SSasha Neftin u64_stats_update_begin(&ring->rx_syncp); 17403bdd7086SSasha Neftin ring->rx_stats.csum_err++; 17413bdd7086SSasha Neftin u64_stats_update_end(&ring->rx_syncp); 17423bdd7086SSasha Neftin } 17433bdd7086SSasha Neftin /* let the stack verify checksum errors */ 17443bdd7086SSasha Neftin return; 17453bdd7086SSasha Neftin } 17463bdd7086SSasha Neftin /* It must be a TCP or UDP packet with a valid checksum */ 17473bdd7086SSasha Neftin if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | 17483bdd7086SSasha Neftin IGC_RXD_STAT_UDPCS)) 17493bdd7086SSasha Neftin skb->ip_summed = CHECKSUM_UNNECESSARY; 17503bdd7086SSasha Neftin 175125f06effSAndre Guedes netdev_dbg(ring->netdev, "cksum success: bits %08X\n", 17523bdd7086SSasha Neftin le32_to_cpu(rx_desc->wb.upper.status_error)); 17533bdd7086SSasha Neftin } 17543bdd7086SSasha Neftin 175584214ab4SJesper Dangaard Brouer /* Mapping HW RSS Type to enum pkt_hash_types */ 175684214ab4SJesper Dangaard Brouer static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = { 175784214ab4SJesper Dangaard Brouer [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2, 175884214ab4SJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4, 175984214ab4SJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3, 176084214ab4SJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4, 176184214ab4SJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3, 176284214ab4SJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3, 176384214ab4SJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4, 176484214ab4SJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4, 176584214ab4SJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4, 176684214ab4SJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4, 176784214ab4SJesper Dangaard Brouer [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ 176884214ab4SJesper Dangaard Brouer [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */ 176984214ab4SJesper Dangaard Brouer [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */ 177084214ab4SJesper Dangaard Brouer [13] = PKT_HASH_TYPE_NONE, 177184214ab4SJesper Dangaard Brouer [14] = PKT_HASH_TYPE_NONE, 177284214ab4SJesper Dangaard Brouer [15] = PKT_HASH_TYPE_NONE, 177384214ab4SJesper Dangaard Brouer }; 177484214ab4SJesper Dangaard Brouer 17750507ef8aSSasha Neftin static inline void igc_rx_hash(struct igc_ring *ring, 17760507ef8aSSasha Neftin union igc_adv_rx_desc *rx_desc, 17770507ef8aSSasha Neftin struct sk_buff *skb) 17780507ef8aSSasha Neftin { 177984214ab4SJesper Dangaard Brouer if (ring->netdev->features & NETIF_F_RXHASH) { 178084214ab4SJesper Dangaard Brouer u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 178184214ab4SJesper Dangaard Brouer u32 rss_type = igc_rss_type(rx_desc); 178284214ab4SJesper Dangaard Brouer 178384214ab4SJesper Dangaard Brouer skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]); 178484214ab4SJesper Dangaard Brouer } 17850507ef8aSSasha Neftin } 17860507ef8aSSasha Neftin 17878d744963SMuhammad Husaini Zulkifli static void igc_rx_vlan(struct igc_ring *rx_ring, 17888d744963SMuhammad Husaini Zulkifli union igc_adv_rx_desc *rx_desc, 17898d744963SMuhammad Husaini Zulkifli struct sk_buff *skb) 17908d744963SMuhammad Husaini Zulkifli { 17918d744963SMuhammad Husaini Zulkifli struct net_device *dev = rx_ring->netdev; 17928d744963SMuhammad Husaini Zulkifli u16 vid; 17938d744963SMuhammad Husaini Zulkifli 17948d744963SMuhammad Husaini Zulkifli if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 17958d744963SMuhammad Husaini Zulkifli igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { 17968d744963SMuhammad Husaini Zulkifli if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && 17978d744963SMuhammad Husaini Zulkifli test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) 17988d744963SMuhammad Husaini Zulkifli vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); 17998d744963SMuhammad Husaini Zulkifli else 18008d744963SMuhammad Husaini Zulkifli vid = le16_to_cpu(rx_desc->wb.upper.vlan); 18018d744963SMuhammad Husaini Zulkifli 18028d744963SMuhammad Husaini Zulkifli __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 18038d744963SMuhammad Husaini Zulkifli } 18048d744963SMuhammad Husaini Zulkifli } 18058d744963SMuhammad Husaini Zulkifli 18060507ef8aSSasha Neftin /** 18070507ef8aSSasha Neftin * igc_process_skb_fields - Populate skb header fields from Rx descriptor 18080507ef8aSSasha Neftin * @rx_ring: rx descriptor ring packet is being transacted on 18090507ef8aSSasha Neftin * @rx_desc: pointer to the EOP Rx descriptor 18100507ef8aSSasha Neftin * @skb: pointer to current skb being populated 18110507ef8aSSasha Neftin * 18123a66abe9SAndre Guedes * This function checks the ring, descriptor, and packet information in order 18133a66abe9SAndre Guedes * to populate the hash, checksum, VLAN, protocol, and other fields within the 18143a66abe9SAndre Guedes * skb. 18150507ef8aSSasha Neftin */ 18160507ef8aSSasha Neftin static void igc_process_skb_fields(struct igc_ring *rx_ring, 18170507ef8aSSasha Neftin union igc_adv_rx_desc *rx_desc, 18180507ef8aSSasha Neftin struct sk_buff *skb) 18190507ef8aSSasha Neftin { 18200507ef8aSSasha Neftin igc_rx_hash(rx_ring, rx_desc, skb); 18210507ef8aSSasha Neftin 18223bdd7086SSasha Neftin igc_rx_checksum(rx_ring, rx_desc, skb); 18233bdd7086SSasha Neftin 18248d744963SMuhammad Husaini Zulkifli igc_rx_vlan(rx_ring, rx_desc, skb); 18258d744963SMuhammad Husaini Zulkifli 18260507ef8aSSasha Neftin skb_record_rx_queue(skb, rx_ring->queue_index); 18270507ef8aSSasha Neftin 18280507ef8aSSasha Neftin skb->protocol = eth_type_trans(skb, rx_ring->netdev); 18290507ef8aSSasha Neftin } 18300507ef8aSSasha Neftin 18318d744963SMuhammad Husaini Zulkifli static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) 18328d744963SMuhammad Husaini Zulkifli { 18338d744963SMuhammad Husaini Zulkifli bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); 18348d744963SMuhammad Husaini Zulkifli struct igc_adapter *adapter = netdev_priv(netdev); 18358d744963SMuhammad Husaini Zulkifli struct igc_hw *hw = &adapter->hw; 18368d744963SMuhammad Husaini Zulkifli u32 ctrl; 18378d744963SMuhammad Husaini Zulkifli 18388d744963SMuhammad Husaini Zulkifli ctrl = rd32(IGC_CTRL); 18398d744963SMuhammad Husaini Zulkifli 18408d744963SMuhammad Husaini Zulkifli if (enable) { 18418d744963SMuhammad Husaini Zulkifli /* enable VLAN tag insert/strip */ 18428d744963SMuhammad Husaini Zulkifli ctrl |= IGC_CTRL_VME; 18438d744963SMuhammad Husaini Zulkifli } else { 18448d744963SMuhammad Husaini Zulkifli /* disable VLAN tag insert/strip */ 18458d744963SMuhammad Husaini Zulkifli ctrl &= ~IGC_CTRL_VME; 18468d744963SMuhammad Husaini Zulkifli } 18478d744963SMuhammad Husaini Zulkifli wr32(IGC_CTRL, ctrl); 18488d744963SMuhammad Husaini Zulkifli } 18498d744963SMuhammad Husaini Zulkifli 18508d744963SMuhammad Husaini Zulkifli static void igc_restore_vlan(struct igc_adapter *adapter) 18518d744963SMuhammad Husaini Zulkifli { 18528d744963SMuhammad Husaini Zulkifli igc_vlan_mode(adapter->netdev, adapter->netdev->features); 18538d744963SMuhammad Husaini Zulkifli } 18548d744963SMuhammad Husaini Zulkifli 18550507ef8aSSasha Neftin static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, 18564ff32036SAndre Guedes const unsigned int size, 18574ff32036SAndre Guedes int *rx_buffer_pgcnt) 18580507ef8aSSasha Neftin { 18590507ef8aSSasha Neftin struct igc_rx_buffer *rx_buffer; 18600507ef8aSSasha Neftin 18610507ef8aSSasha Neftin rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 18624ff32036SAndre Guedes *rx_buffer_pgcnt = 18634ff32036SAndre Guedes #if (PAGE_SIZE < 8192) 18644ff32036SAndre Guedes page_count(rx_buffer->page); 18654ff32036SAndre Guedes #else 18664ff32036SAndre Guedes 0; 18674ff32036SAndre Guedes #endif 18680507ef8aSSasha Neftin prefetchw(rx_buffer->page); 18690507ef8aSSasha Neftin 18700507ef8aSSasha Neftin /* we are reusing so sync this buffer for CPU use */ 18710507ef8aSSasha Neftin dma_sync_single_range_for_cpu(rx_ring->dev, 18720507ef8aSSasha Neftin rx_buffer->dma, 18730507ef8aSSasha Neftin rx_buffer->page_offset, 18740507ef8aSSasha Neftin size, 18750507ef8aSSasha Neftin DMA_FROM_DEVICE); 18760507ef8aSSasha Neftin 18770507ef8aSSasha Neftin rx_buffer->pagecnt_bias--; 18780507ef8aSSasha Neftin 18790507ef8aSSasha Neftin return rx_buffer; 18800507ef8aSSasha Neftin } 18810507ef8aSSasha Neftin 1882613cf199SAndre Guedes static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, 1883613cf199SAndre Guedes unsigned int truesize) 1884613cf199SAndre Guedes { 1885613cf199SAndre Guedes #if (PAGE_SIZE < 8192) 1886613cf199SAndre Guedes buffer->page_offset ^= truesize; 1887613cf199SAndre Guedes #else 1888613cf199SAndre Guedes buffer->page_offset += truesize; 1889613cf199SAndre Guedes #endif 1890613cf199SAndre Guedes } 1891613cf199SAndre Guedes 1892a39f5e53SAndre Guedes static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, 1893a39f5e53SAndre Guedes unsigned int size) 1894a39f5e53SAndre Guedes { 1895a39f5e53SAndre Guedes unsigned int truesize; 1896a39f5e53SAndre Guedes 1897a39f5e53SAndre Guedes #if (PAGE_SIZE < 8192) 1898a39f5e53SAndre Guedes truesize = igc_rx_pg_size(ring) / 2; 1899a39f5e53SAndre Guedes #else 1900a39f5e53SAndre Guedes truesize = ring_uses_build_skb(ring) ? 1901a39f5e53SAndre Guedes SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1902a39f5e53SAndre Guedes SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 1903a39f5e53SAndre Guedes SKB_DATA_ALIGN(size); 1904a39f5e53SAndre Guedes #endif 1905a39f5e53SAndre Guedes return truesize; 1906a39f5e53SAndre Guedes } 1907a39f5e53SAndre Guedes 19080507ef8aSSasha Neftin /** 19090507ef8aSSasha Neftin * igc_add_rx_frag - Add contents of Rx buffer to sk_buff 19100507ef8aSSasha Neftin * @rx_ring: rx descriptor ring to transact packets on 19110507ef8aSSasha Neftin * @rx_buffer: buffer containing page to add 19120507ef8aSSasha Neftin * @skb: sk_buff to place the data into 19130507ef8aSSasha Neftin * @size: size of buffer to be added 19140507ef8aSSasha Neftin * 19150507ef8aSSasha Neftin * This function will add the data contained in rx_buffer->page to the skb. 19160507ef8aSSasha Neftin */ 19170507ef8aSSasha Neftin static void igc_add_rx_frag(struct igc_ring *rx_ring, 19180507ef8aSSasha Neftin struct igc_rx_buffer *rx_buffer, 19190507ef8aSSasha Neftin struct sk_buff *skb, 19200507ef8aSSasha Neftin unsigned int size) 19210507ef8aSSasha Neftin { 1922613cf199SAndre Guedes unsigned int truesize; 19230507ef8aSSasha Neftin 1924613cf199SAndre Guedes #if (PAGE_SIZE < 8192) 1925613cf199SAndre Guedes truesize = igc_rx_pg_size(rx_ring) / 2; 19260507ef8aSSasha Neftin #else 1927613cf199SAndre Guedes truesize = ring_uses_build_skb(rx_ring) ? 19280507ef8aSSasha Neftin SKB_DATA_ALIGN(IGC_SKB_PAD + size) : 19290507ef8aSSasha Neftin SKB_DATA_ALIGN(size); 1930613cf199SAndre Guedes #endif 19310507ef8aSSasha Neftin skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 19320507ef8aSSasha Neftin rx_buffer->page_offset, size, truesize); 1933613cf199SAndre Guedes 1934613cf199SAndre Guedes igc_rx_buffer_flip(rx_buffer, truesize); 19350507ef8aSSasha Neftin } 19360507ef8aSSasha Neftin 19370507ef8aSSasha Neftin static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, 19380507ef8aSSasha Neftin struct igc_rx_buffer *rx_buffer, 1939f51b5e2bSJesper Dangaard Brouer struct xdp_buff *xdp) 19400507ef8aSSasha Neftin { 1941f51b5e2bSJesper Dangaard Brouer unsigned int size = xdp->data_end - xdp->data; 1942a39f5e53SAndre Guedes unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 1943f51b5e2bSJesper Dangaard Brouer unsigned int metasize = xdp->data - xdp->data_meta; 19440507ef8aSSasha Neftin struct sk_buff *skb; 19450507ef8aSSasha Neftin 19460507ef8aSSasha Neftin /* prefetch first cache line of first page */ 1947f51b5e2bSJesper Dangaard Brouer net_prefetch(xdp->data_meta); 19480507ef8aSSasha Neftin 19490507ef8aSSasha Neftin /* build an skb around the page buffer */ 19504dd330a7SAlexander Lobakin skb = napi_build_skb(xdp->data_hard_start, truesize); 19510507ef8aSSasha Neftin if (unlikely(!skb)) 19520507ef8aSSasha Neftin return NULL; 19530507ef8aSSasha Neftin 19540507ef8aSSasha Neftin /* update pointers within the skb to store the data */ 1955f51b5e2bSJesper Dangaard Brouer skb_reserve(skb, xdp->data - xdp->data_hard_start); 19560507ef8aSSasha Neftin __skb_put(skb, size); 1957f51b5e2bSJesper Dangaard Brouer if (metasize) 1958f51b5e2bSJesper Dangaard Brouer skb_metadata_set(skb, metasize); 19590507ef8aSSasha Neftin 1960613cf199SAndre Guedes igc_rx_buffer_flip(rx_buffer, truesize); 19610507ef8aSSasha Neftin return skb; 19620507ef8aSSasha Neftin } 19630507ef8aSSasha Neftin 19640507ef8aSSasha Neftin static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, 19650507ef8aSSasha Neftin struct igc_rx_buffer *rx_buffer, 196626575105SAndre Guedes struct xdp_buff *xdp, 1967e1ed4f92SAndre Guedes ktime_t timestamp) 19680507ef8aSSasha Neftin { 1969f51b5e2bSJesper Dangaard Brouer unsigned int metasize = xdp->data - xdp->data_meta; 197026575105SAndre Guedes unsigned int size = xdp->data_end - xdp->data; 1971a39f5e53SAndre Guedes unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); 197226575105SAndre Guedes void *va = xdp->data; 19730507ef8aSSasha Neftin unsigned int headlen; 19740507ef8aSSasha Neftin struct sk_buff *skb; 19750507ef8aSSasha Neftin 19760507ef8aSSasha Neftin /* prefetch first cache line of first page */ 1977f51b5e2bSJesper Dangaard Brouer net_prefetch(xdp->data_meta); 19780507ef8aSSasha Neftin 19790507ef8aSSasha Neftin /* allocate a skb to store the frags */ 1980f51b5e2bSJesper Dangaard Brouer skb = napi_alloc_skb(&rx_ring->q_vector->napi, 1981f51b5e2bSJesper Dangaard Brouer IGC_RX_HDR_LEN + metasize); 19820507ef8aSSasha Neftin if (unlikely(!skb)) 19830507ef8aSSasha Neftin return NULL; 19840507ef8aSSasha Neftin 1985e1ed4f92SAndre Guedes if (timestamp) 1986e1ed4f92SAndre Guedes skb_hwtstamps(skb)->hwtstamp = timestamp; 198781b05520SVinicius Costa Gomes 19880507ef8aSSasha Neftin /* Determine available headroom for copy */ 19890507ef8aSSasha Neftin headlen = size; 19900507ef8aSSasha Neftin if (headlen > IGC_RX_HDR_LEN) 1991c43f1255SStanislav Fomichev headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); 19920507ef8aSSasha Neftin 19930507ef8aSSasha Neftin /* align pull length to size of long to optimize memcpy performance */ 1994f51b5e2bSJesper Dangaard Brouer memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, 1995f51b5e2bSJesper Dangaard Brouer ALIGN(headlen + metasize, sizeof(long))); 1996f51b5e2bSJesper Dangaard Brouer 1997f51b5e2bSJesper Dangaard Brouer if (metasize) { 1998f51b5e2bSJesper Dangaard Brouer skb_metadata_set(skb, metasize); 1999f51b5e2bSJesper Dangaard Brouer __skb_pull(skb, metasize); 2000f51b5e2bSJesper Dangaard Brouer } 20010507ef8aSSasha Neftin 20020507ef8aSSasha Neftin /* update all of the pointers */ 20030507ef8aSSasha Neftin size -= headlen; 20040507ef8aSSasha Neftin if (size) { 20050507ef8aSSasha Neftin skb_add_rx_frag(skb, 0, rx_buffer->page, 20060507ef8aSSasha Neftin (va + headlen) - page_address(rx_buffer->page), 20070507ef8aSSasha Neftin size, truesize); 2008613cf199SAndre Guedes igc_rx_buffer_flip(rx_buffer, truesize); 20090507ef8aSSasha Neftin } else { 20100507ef8aSSasha Neftin rx_buffer->pagecnt_bias++; 20110507ef8aSSasha Neftin } 20120507ef8aSSasha Neftin 20130507ef8aSSasha Neftin return skb; 20140507ef8aSSasha Neftin } 20150507ef8aSSasha Neftin 20160507ef8aSSasha Neftin /** 20170507ef8aSSasha Neftin * igc_reuse_rx_page - page flip buffer and store it back on the ring 20180507ef8aSSasha Neftin * @rx_ring: rx descriptor ring to store buffers on 20190507ef8aSSasha Neftin * @old_buff: donor buffer to have page reused 20200507ef8aSSasha Neftin * 20210507ef8aSSasha Neftin * Synchronizes page for reuse by the adapter 20220507ef8aSSasha Neftin */ 20230507ef8aSSasha Neftin static void igc_reuse_rx_page(struct igc_ring *rx_ring, 20240507ef8aSSasha Neftin struct igc_rx_buffer *old_buff) 20250507ef8aSSasha Neftin { 20260507ef8aSSasha Neftin u16 nta = rx_ring->next_to_alloc; 20270507ef8aSSasha Neftin struct igc_rx_buffer *new_buff; 20280507ef8aSSasha Neftin 20290507ef8aSSasha Neftin new_buff = &rx_ring->rx_buffer_info[nta]; 20300507ef8aSSasha Neftin 20310507ef8aSSasha Neftin /* update, and store next to alloc */ 20320507ef8aSSasha Neftin nta++; 20330507ef8aSSasha Neftin rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 20340507ef8aSSasha Neftin 20350507ef8aSSasha Neftin /* Transfer page from old buffer to new buffer. 20360507ef8aSSasha Neftin * Move each member individually to avoid possible store 20370507ef8aSSasha Neftin * forwarding stalls. 20380507ef8aSSasha Neftin */ 20390507ef8aSSasha Neftin new_buff->dma = old_buff->dma; 20400507ef8aSSasha Neftin new_buff->page = old_buff->page; 20410507ef8aSSasha Neftin new_buff->page_offset = old_buff->page_offset; 20420507ef8aSSasha Neftin new_buff->pagecnt_bias = old_buff->pagecnt_bias; 20430507ef8aSSasha Neftin } 20440507ef8aSSasha Neftin 20454ff32036SAndre Guedes static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, 20464ff32036SAndre Guedes int rx_buffer_pgcnt) 20470507ef8aSSasha Neftin { 20480507ef8aSSasha Neftin unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 20490507ef8aSSasha Neftin struct page *page = rx_buffer->page; 20500507ef8aSSasha Neftin 2051a79afa78SAlexander Lobakin /* avoid re-using remote and pfmemalloc pages */ 2052a79afa78SAlexander Lobakin if (!dev_page_is_reusable(page)) 20530507ef8aSSasha Neftin return false; 20540507ef8aSSasha Neftin 20550507ef8aSSasha Neftin #if (PAGE_SIZE < 8192) 20560507ef8aSSasha Neftin /* if we are only owner of page we can reuse it */ 20574ff32036SAndre Guedes if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) 20580507ef8aSSasha Neftin return false; 20590507ef8aSSasha Neftin #else 20600507ef8aSSasha Neftin #define IGC_LAST_OFFSET \ 20610507ef8aSSasha Neftin (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) 20620507ef8aSSasha Neftin 20630507ef8aSSasha Neftin if (rx_buffer->page_offset > IGC_LAST_OFFSET) 20640507ef8aSSasha Neftin return false; 20650507ef8aSSasha Neftin #endif 20660507ef8aSSasha Neftin 20670507ef8aSSasha Neftin /* If we have drained the page fragment pool we need to update 20680507ef8aSSasha Neftin * the pagecnt_bias and page count so that we fully restock the 20690507ef8aSSasha Neftin * number of references the driver holds. 20700507ef8aSSasha Neftin */ 20714ff32036SAndre Guedes if (unlikely(pagecnt_bias == 1)) { 20724ff32036SAndre Guedes page_ref_add(page, USHRT_MAX - 1); 20730507ef8aSSasha Neftin rx_buffer->pagecnt_bias = USHRT_MAX; 20740507ef8aSSasha Neftin } 20750507ef8aSSasha Neftin 20760507ef8aSSasha Neftin return true; 20770507ef8aSSasha Neftin } 20780507ef8aSSasha Neftin 20790507ef8aSSasha Neftin /** 20800507ef8aSSasha Neftin * igc_is_non_eop - process handling of non-EOP buffers 20810507ef8aSSasha Neftin * @rx_ring: Rx ring being processed 20820507ef8aSSasha Neftin * @rx_desc: Rx descriptor for current buffer 20830507ef8aSSasha Neftin * 20840507ef8aSSasha Neftin * This function updates next to clean. If the buffer is an EOP buffer 20850507ef8aSSasha Neftin * this function exits returning false, otherwise it will place the 20860507ef8aSSasha Neftin * sk_buff in the next buffer to be chained and return true indicating 20870507ef8aSSasha Neftin * that this is in fact a non-EOP buffer. 20880507ef8aSSasha Neftin */ 20890507ef8aSSasha Neftin static bool igc_is_non_eop(struct igc_ring *rx_ring, 20900507ef8aSSasha Neftin union igc_adv_rx_desc *rx_desc) 20910507ef8aSSasha Neftin { 20920507ef8aSSasha Neftin u32 ntc = rx_ring->next_to_clean + 1; 20930507ef8aSSasha Neftin 20940507ef8aSSasha Neftin /* fetch, update, and store next to clean */ 20950507ef8aSSasha Neftin ntc = (ntc < rx_ring->count) ? ntc : 0; 20960507ef8aSSasha Neftin rx_ring->next_to_clean = ntc; 20970507ef8aSSasha Neftin 20980507ef8aSSasha Neftin prefetch(IGC_RX_DESC(rx_ring, ntc)); 20990507ef8aSSasha Neftin 21000507ef8aSSasha Neftin if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) 21010507ef8aSSasha Neftin return false; 21020507ef8aSSasha Neftin 21030507ef8aSSasha Neftin return true; 21040507ef8aSSasha Neftin } 21050507ef8aSSasha Neftin 21060507ef8aSSasha Neftin /** 21070507ef8aSSasha Neftin * igc_cleanup_headers - Correct corrupted or empty headers 21080507ef8aSSasha Neftin * @rx_ring: rx descriptor ring packet is being transacted on 21090507ef8aSSasha Neftin * @rx_desc: pointer to the EOP Rx descriptor 21100507ef8aSSasha Neftin * @skb: pointer to current skb being fixed 21110507ef8aSSasha Neftin * 21120507ef8aSSasha Neftin * Address the case where we are pulling data in on pages only 21130507ef8aSSasha Neftin * and as such no data is present in the skb header. 21140507ef8aSSasha Neftin * 21150507ef8aSSasha Neftin * In addition if skb is not at least 60 bytes we need to pad it so that 21160507ef8aSSasha Neftin * it is large enough to qualify as a valid Ethernet frame. 21170507ef8aSSasha Neftin * 21180507ef8aSSasha Neftin * Returns true if an error was encountered and skb was freed. 21190507ef8aSSasha Neftin */ 21200507ef8aSSasha Neftin static bool igc_cleanup_headers(struct igc_ring *rx_ring, 21210507ef8aSSasha Neftin union igc_adv_rx_desc *rx_desc, 21220507ef8aSSasha Neftin struct sk_buff *skb) 21230507ef8aSSasha Neftin { 212426575105SAndre Guedes /* XDP packets use error pointer so abort at this point */ 212526575105SAndre Guedes if (IS_ERR(skb)) 212626575105SAndre Guedes return true; 212726575105SAndre Guedes 2128ef8a17a2SAndre Guedes if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { 21290507ef8aSSasha Neftin struct net_device *netdev = rx_ring->netdev; 21300507ef8aSSasha Neftin 21310507ef8aSSasha Neftin if (!(netdev->features & NETIF_F_RXALL)) { 21320507ef8aSSasha Neftin dev_kfree_skb_any(skb); 21330507ef8aSSasha Neftin return true; 21340507ef8aSSasha Neftin } 21350507ef8aSSasha Neftin } 21360507ef8aSSasha Neftin 21370507ef8aSSasha Neftin /* if eth_skb_pad returns an error the skb was freed */ 21380507ef8aSSasha Neftin if (eth_skb_pad(skb)) 21390507ef8aSSasha Neftin return true; 21400507ef8aSSasha Neftin 214113b5b7fdSSasha Neftin return false; 214213b5b7fdSSasha Neftin } 214313b5b7fdSSasha Neftin 21440507ef8aSSasha Neftin static void igc_put_rx_buffer(struct igc_ring *rx_ring, 21454ff32036SAndre Guedes struct igc_rx_buffer *rx_buffer, 21464ff32036SAndre Guedes int rx_buffer_pgcnt) 21470507ef8aSSasha Neftin { 21484ff32036SAndre Guedes if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { 21490507ef8aSSasha Neftin /* hand second half of page back to the ring */ 21500507ef8aSSasha Neftin igc_reuse_rx_page(rx_ring, rx_buffer); 21510507ef8aSSasha Neftin } else { 21520507ef8aSSasha Neftin /* We are not reusing the buffer so unmap it and free 21530507ef8aSSasha Neftin * any references we are holding to it 21540507ef8aSSasha Neftin */ 21550507ef8aSSasha Neftin dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 21560507ef8aSSasha Neftin igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 21570507ef8aSSasha Neftin IGC_RX_DMA_ATTR); 21580507ef8aSSasha Neftin __page_frag_cache_drain(rx_buffer->page, 21590507ef8aSSasha Neftin rx_buffer->pagecnt_bias); 21600507ef8aSSasha Neftin } 216113b5b7fdSSasha Neftin 21620507ef8aSSasha Neftin /* clear contents of rx_buffer */ 21630507ef8aSSasha Neftin rx_buffer->page = NULL; 216413b5b7fdSSasha Neftin } 216513b5b7fdSSasha Neftin 2166aac8f68cSSasha Neftin static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) 2167aac8f68cSSasha Neftin { 216826575105SAndre Guedes struct igc_adapter *adapter = rx_ring->q_vector->adapter; 216926575105SAndre Guedes 217026575105SAndre Guedes if (ring_uses_build_skb(rx_ring)) 217126575105SAndre Guedes return IGC_SKB_PAD; 217226575105SAndre Guedes if (igc_xdp_is_enabled(adapter)) 217326575105SAndre Guedes return XDP_PACKET_HEADROOM; 217426575105SAndre Guedes 217526575105SAndre Guedes return 0; 2176aac8f68cSSasha Neftin } 2177aac8f68cSSasha Neftin 2178aac8f68cSSasha Neftin static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, 2179aac8f68cSSasha Neftin struct igc_rx_buffer *bi) 2180aac8f68cSSasha Neftin { 2181aac8f68cSSasha Neftin struct page *page = bi->page; 2182aac8f68cSSasha Neftin dma_addr_t dma; 2183aac8f68cSSasha Neftin 2184aac8f68cSSasha Neftin /* since we are recycling buffers we should seldom need to alloc */ 2185aac8f68cSSasha Neftin if (likely(page)) 2186aac8f68cSSasha Neftin return true; 2187aac8f68cSSasha Neftin 2188aac8f68cSSasha Neftin /* alloc new page for storage */ 2189aac8f68cSSasha Neftin page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); 2190aac8f68cSSasha Neftin if (unlikely(!page)) { 2191aac8f68cSSasha Neftin rx_ring->rx_stats.alloc_failed++; 2192aac8f68cSSasha Neftin return false; 2193aac8f68cSSasha Neftin } 2194aac8f68cSSasha Neftin 2195aac8f68cSSasha Neftin /* map page for use */ 2196aac8f68cSSasha Neftin dma = dma_map_page_attrs(rx_ring->dev, page, 0, 2197aac8f68cSSasha Neftin igc_rx_pg_size(rx_ring), 2198aac8f68cSSasha Neftin DMA_FROM_DEVICE, 2199aac8f68cSSasha Neftin IGC_RX_DMA_ATTR); 2200aac8f68cSSasha Neftin 2201aac8f68cSSasha Neftin /* if mapping failed free memory back to system since 2202aac8f68cSSasha Neftin * there isn't much point in holding memory we can't use 2203aac8f68cSSasha Neftin */ 2204aac8f68cSSasha Neftin if (dma_mapping_error(rx_ring->dev, dma)) { 2205aac8f68cSSasha Neftin __free_page(page); 2206aac8f68cSSasha Neftin 2207aac8f68cSSasha Neftin rx_ring->rx_stats.alloc_failed++; 2208aac8f68cSSasha Neftin return false; 2209aac8f68cSSasha Neftin } 2210aac8f68cSSasha Neftin 2211aac8f68cSSasha Neftin bi->dma = dma; 2212aac8f68cSSasha Neftin bi->page = page; 2213aac8f68cSSasha Neftin bi->page_offset = igc_rx_offset(rx_ring); 22144ff32036SAndre Guedes page_ref_add(page, USHRT_MAX - 1); 22154ff32036SAndre Guedes bi->pagecnt_bias = USHRT_MAX; 2216aac8f68cSSasha Neftin 2217aac8f68cSSasha Neftin return true; 2218aac8f68cSSasha Neftin } 2219aac8f68cSSasha Neftin 222013b5b7fdSSasha Neftin /** 222113b5b7fdSSasha Neftin * igc_alloc_rx_buffers - Replace used receive buffers; packet split 2222085c8589SSasha Neftin * @rx_ring: rx descriptor ring 2223085c8589SSasha Neftin * @cleaned_count: number of buffers to clean 222413b5b7fdSSasha Neftin */ 222513b5b7fdSSasha Neftin static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) 222613b5b7fdSSasha Neftin { 222713b5b7fdSSasha Neftin union igc_adv_rx_desc *rx_desc; 222813b5b7fdSSasha Neftin u16 i = rx_ring->next_to_use; 222913b5b7fdSSasha Neftin struct igc_rx_buffer *bi; 223013b5b7fdSSasha Neftin u16 bufsz; 223113b5b7fdSSasha Neftin 223213b5b7fdSSasha Neftin /* nothing to do */ 223313b5b7fdSSasha Neftin if (!cleaned_count) 223413b5b7fdSSasha Neftin return; 223513b5b7fdSSasha Neftin 223613b5b7fdSSasha Neftin rx_desc = IGC_RX_DESC(rx_ring, i); 223713b5b7fdSSasha Neftin bi = &rx_ring->rx_buffer_info[i]; 223813b5b7fdSSasha Neftin i -= rx_ring->count; 223913b5b7fdSSasha Neftin 224013b5b7fdSSasha Neftin bufsz = igc_rx_bufsz(rx_ring); 224113b5b7fdSSasha Neftin 224213b5b7fdSSasha Neftin do { 224313b5b7fdSSasha Neftin if (!igc_alloc_mapped_page(rx_ring, bi)) 224413b5b7fdSSasha Neftin break; 224513b5b7fdSSasha Neftin 224613b5b7fdSSasha Neftin /* sync the buffer for use by the device */ 224713b5b7fdSSasha Neftin dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 224813b5b7fdSSasha Neftin bi->page_offset, bufsz, 224913b5b7fdSSasha Neftin DMA_FROM_DEVICE); 225013b5b7fdSSasha Neftin 225113b5b7fdSSasha Neftin /* Refresh the desc even if buffer_addrs didn't change 225213b5b7fdSSasha Neftin * because each write-back erases this info. 225313b5b7fdSSasha Neftin */ 225413b5b7fdSSasha Neftin rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 225513b5b7fdSSasha Neftin 225613b5b7fdSSasha Neftin rx_desc++; 225713b5b7fdSSasha Neftin bi++; 225813b5b7fdSSasha Neftin i++; 225913b5b7fdSSasha Neftin if (unlikely(!i)) { 226013b5b7fdSSasha Neftin rx_desc = IGC_RX_DESC(rx_ring, 0); 226113b5b7fdSSasha Neftin bi = rx_ring->rx_buffer_info; 226213b5b7fdSSasha Neftin i -= rx_ring->count; 226313b5b7fdSSasha Neftin } 226413b5b7fdSSasha Neftin 226513b5b7fdSSasha Neftin /* clear the length for the next_to_use descriptor */ 226613b5b7fdSSasha Neftin rx_desc->wb.upper.length = 0; 226713b5b7fdSSasha Neftin 226813b5b7fdSSasha Neftin cleaned_count--; 226913b5b7fdSSasha Neftin } while (cleaned_count); 227013b5b7fdSSasha Neftin 227113b5b7fdSSasha Neftin i += rx_ring->count; 227213b5b7fdSSasha Neftin 227313b5b7fdSSasha Neftin if (rx_ring->next_to_use != i) { 227413b5b7fdSSasha Neftin /* record the next descriptor to use */ 227513b5b7fdSSasha Neftin rx_ring->next_to_use = i; 227613b5b7fdSSasha Neftin 227713b5b7fdSSasha Neftin /* update next to alloc since we have filled the ring */ 227813b5b7fdSSasha Neftin rx_ring->next_to_alloc = i; 227913b5b7fdSSasha Neftin 228013b5b7fdSSasha Neftin /* Force memory writes to complete before letting h/w 228113b5b7fdSSasha Neftin * know there are new descriptors to fetch. (Only 228213b5b7fdSSasha Neftin * applicable for weak-ordered memory model archs, 228313b5b7fdSSasha Neftin * such as IA-64). 228413b5b7fdSSasha Neftin */ 228513b5b7fdSSasha Neftin wmb(); 228613b5b7fdSSasha Neftin writel(i, rx_ring->tail); 228713b5b7fdSSasha Neftin } 228813b5b7fdSSasha Neftin } 228913b5b7fdSSasha Neftin 2290fc9df2a0SAndre Guedes static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) 2291fc9df2a0SAndre Guedes { 2292fc9df2a0SAndre Guedes union igc_adv_rx_desc *desc; 2293fc9df2a0SAndre Guedes u16 i = ring->next_to_use; 2294fc9df2a0SAndre Guedes struct igc_rx_buffer *bi; 2295fc9df2a0SAndre Guedes dma_addr_t dma; 2296fc9df2a0SAndre Guedes bool ok = true; 2297fc9df2a0SAndre Guedes 2298fc9df2a0SAndre Guedes if (!count) 2299fc9df2a0SAndre Guedes return ok; 2300fc9df2a0SAndre Guedes 230173b7123dSJesper Dangaard Brouer XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff); 230273b7123dSJesper Dangaard Brouer 2303fc9df2a0SAndre Guedes desc = IGC_RX_DESC(ring, i); 2304fc9df2a0SAndre Guedes bi = &ring->rx_buffer_info[i]; 2305fc9df2a0SAndre Guedes i -= ring->count; 2306fc9df2a0SAndre Guedes 2307fc9df2a0SAndre Guedes do { 2308fc9df2a0SAndre Guedes bi->xdp = xsk_buff_alloc(ring->xsk_pool); 2309fc9df2a0SAndre Guedes if (!bi->xdp) { 2310fc9df2a0SAndre Guedes ok = false; 2311fc9df2a0SAndre Guedes break; 2312fc9df2a0SAndre Guedes } 2313fc9df2a0SAndre Guedes 2314fc9df2a0SAndre Guedes dma = xsk_buff_xdp_get_dma(bi->xdp); 2315fc9df2a0SAndre Guedes desc->read.pkt_addr = cpu_to_le64(dma); 2316fc9df2a0SAndre Guedes 2317fc9df2a0SAndre Guedes desc++; 2318fc9df2a0SAndre Guedes bi++; 2319fc9df2a0SAndre Guedes i++; 2320fc9df2a0SAndre Guedes if (unlikely(!i)) { 2321fc9df2a0SAndre Guedes desc = IGC_RX_DESC(ring, 0); 2322fc9df2a0SAndre Guedes bi = ring->rx_buffer_info; 2323fc9df2a0SAndre Guedes i -= ring->count; 2324fc9df2a0SAndre Guedes } 2325fc9df2a0SAndre Guedes 2326fc9df2a0SAndre Guedes /* Clear the length for the next_to_use descriptor. */ 2327fc9df2a0SAndre Guedes desc->wb.upper.length = 0; 2328fc9df2a0SAndre Guedes 2329fc9df2a0SAndre Guedes count--; 2330fc9df2a0SAndre Guedes } while (count); 2331fc9df2a0SAndre Guedes 2332fc9df2a0SAndre Guedes i += ring->count; 2333fc9df2a0SAndre Guedes 2334fc9df2a0SAndre Guedes if (ring->next_to_use != i) { 2335fc9df2a0SAndre Guedes ring->next_to_use = i; 2336fc9df2a0SAndre Guedes 2337fc9df2a0SAndre Guedes /* Force memory writes to complete before letting h/w 2338fc9df2a0SAndre Guedes * know there are new descriptors to fetch. (Only 2339fc9df2a0SAndre Guedes * applicable for weak-ordered memory model archs, 2340fc9df2a0SAndre Guedes * such as IA-64). 2341fc9df2a0SAndre Guedes */ 2342fc9df2a0SAndre Guedes wmb(); 2343fc9df2a0SAndre Guedes writel(i, ring->tail); 2344fc9df2a0SAndre Guedes } 2345fc9df2a0SAndre Guedes 2346fc9df2a0SAndre Guedes return ok; 2347fc9df2a0SAndre Guedes } 2348fc9df2a0SAndre Guedes 234973f1071cSAndre Guedes /* This function requires __netif_tx_lock is held by the caller. */ 235073f1071cSAndre Guedes static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, 235173f1071cSAndre Guedes struct xdp_frame *xdpf) 235273f1071cSAndre Guedes { 23538c78c1e5SLorenzo Bianconi struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); 23548c78c1e5SLorenzo Bianconi u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; 23558c78c1e5SLorenzo Bianconi u16 count, index = ring->next_to_use; 23568c78c1e5SLorenzo Bianconi struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; 23578c78c1e5SLorenzo Bianconi struct igc_tx_buffer *buffer = head; 23588c78c1e5SLorenzo Bianconi union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index); 23598c78c1e5SLorenzo Bianconi u32 olinfo_status, len = xdpf->len, cmd_type; 23608c78c1e5SLorenzo Bianconi void *data = xdpf->data; 23618c78c1e5SLorenzo Bianconi u16 i; 236273f1071cSAndre Guedes 23638c78c1e5SLorenzo Bianconi count = TXD_USE_COUNT(len); 23648c78c1e5SLorenzo Bianconi for (i = 0; i < nr_frags; i++) 23658c78c1e5SLorenzo Bianconi count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); 23668c78c1e5SLorenzo Bianconi 23678c78c1e5SLorenzo Bianconi if (igc_maybe_stop_tx(ring, count + 3)) { 23688c78c1e5SLorenzo Bianconi /* this is a hard error */ 236973f1071cSAndre Guedes return -EBUSY; 23708c78c1e5SLorenzo Bianconi } 237173f1071cSAndre Guedes 23728c78c1e5SLorenzo Bianconi i = 0; 23738c78c1e5SLorenzo Bianconi head->bytecount = xdp_get_frame_len(xdpf); 23748c78c1e5SLorenzo Bianconi head->type = IGC_TX_BUFFER_TYPE_XDP; 23758c78c1e5SLorenzo Bianconi head->gso_segs = 1; 23768c78c1e5SLorenzo Bianconi head->xdpf = xdpf; 23778c78c1e5SLorenzo Bianconi 23788c78c1e5SLorenzo Bianconi olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; 23798c78c1e5SLorenzo Bianconi desc->read.olinfo_status = cpu_to_le32(olinfo_status); 23808c78c1e5SLorenzo Bianconi 23818c78c1e5SLorenzo Bianconi for (;;) { 23828c78c1e5SLorenzo Bianconi dma_addr_t dma; 23838c78c1e5SLorenzo Bianconi 23848c78c1e5SLorenzo Bianconi dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); 23858c78c1e5SLorenzo Bianconi if (dma_mapping_error(ring->dev, dma)) { 23868c78c1e5SLorenzo Bianconi netdev_err_once(ring->netdev, 23878c78c1e5SLorenzo Bianconi "Failed to map DMA for TX\n"); 23888c78c1e5SLorenzo Bianconi goto unmap; 23898c78c1e5SLorenzo Bianconi } 23908c78c1e5SLorenzo Bianconi 23918c78c1e5SLorenzo Bianconi dma_unmap_len_set(buffer, len, len); 23928c78c1e5SLorenzo Bianconi dma_unmap_addr_set(buffer, dma, dma); 239373f1071cSAndre Guedes 239473f1071cSAndre Guedes cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 23958c78c1e5SLorenzo Bianconi IGC_ADVTXD_DCMD_IFCS | len; 239673f1071cSAndre Guedes 239773f1071cSAndre Guedes desc->read.cmd_type_len = cpu_to_le32(cmd_type); 23988c78c1e5SLorenzo Bianconi desc->read.buffer_addr = cpu_to_le64(dma); 239973f1071cSAndre Guedes 24008c78c1e5SLorenzo Bianconi buffer->protocol = 0; 240173f1071cSAndre Guedes 24028c78c1e5SLorenzo Bianconi if (++index == ring->count) 24038c78c1e5SLorenzo Bianconi index = 0; 240473f1071cSAndre Guedes 24058c78c1e5SLorenzo Bianconi if (i == nr_frags) 24068c78c1e5SLorenzo Bianconi break; 24078c78c1e5SLorenzo Bianconi 24088c78c1e5SLorenzo Bianconi buffer = &ring->tx_buffer_info[index]; 24098c78c1e5SLorenzo Bianconi desc = IGC_TX_DESC(ring, index); 24108c78c1e5SLorenzo Bianconi desc->read.olinfo_status = 0; 24118c78c1e5SLorenzo Bianconi 24128c78c1e5SLorenzo Bianconi data = skb_frag_address(&sinfo->frags[i]); 24138c78c1e5SLorenzo Bianconi len = skb_frag_size(&sinfo->frags[i]); 24148c78c1e5SLorenzo Bianconi i++; 24158c78c1e5SLorenzo Bianconi } 24168c78c1e5SLorenzo Bianconi desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); 24178c78c1e5SLorenzo Bianconi 24188c78c1e5SLorenzo Bianconi netdev_tx_sent_queue(txring_txq(ring), head->bytecount); 24198c78c1e5SLorenzo Bianconi /* set the timestamp */ 24208c78c1e5SLorenzo Bianconi head->time_stamp = jiffies; 24218c78c1e5SLorenzo Bianconi /* set next_to_watch value indicating a packet is present */ 24228c78c1e5SLorenzo Bianconi head->next_to_watch = desc; 24238c78c1e5SLorenzo Bianconi ring->next_to_use = index; 242473f1071cSAndre Guedes 242573f1071cSAndre Guedes return 0; 24268c78c1e5SLorenzo Bianconi 24278c78c1e5SLorenzo Bianconi unmap: 24288c78c1e5SLorenzo Bianconi for (;;) { 24298c78c1e5SLorenzo Bianconi buffer = &ring->tx_buffer_info[index]; 24308c78c1e5SLorenzo Bianconi if (dma_unmap_len(buffer, len)) 24318c78c1e5SLorenzo Bianconi dma_unmap_page(ring->dev, 24328c78c1e5SLorenzo Bianconi dma_unmap_addr(buffer, dma), 24338c78c1e5SLorenzo Bianconi dma_unmap_len(buffer, len), 24348c78c1e5SLorenzo Bianconi DMA_TO_DEVICE); 24358c78c1e5SLorenzo Bianconi dma_unmap_len_set(buffer, len, 0); 24368c78c1e5SLorenzo Bianconi if (buffer == head) 24378c78c1e5SLorenzo Bianconi break; 24388c78c1e5SLorenzo Bianconi 24398c78c1e5SLorenzo Bianconi if (!index) 24408c78c1e5SLorenzo Bianconi index += ring->count; 24418c78c1e5SLorenzo Bianconi index--; 24428c78c1e5SLorenzo Bianconi } 24438c78c1e5SLorenzo Bianconi 24448c78c1e5SLorenzo Bianconi return -ENOMEM; 244573f1071cSAndre Guedes } 244673f1071cSAndre Guedes 244773f1071cSAndre Guedes static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, 244873f1071cSAndre Guedes int cpu) 244973f1071cSAndre Guedes { 245073f1071cSAndre Guedes int index = cpu; 245173f1071cSAndre Guedes 245273f1071cSAndre Guedes if (unlikely(index < 0)) 245373f1071cSAndre Guedes index = 0; 245473f1071cSAndre Guedes 245573f1071cSAndre Guedes while (index >= adapter->num_tx_queues) 245673f1071cSAndre Guedes index -= adapter->num_tx_queues; 245773f1071cSAndre Guedes 245873f1071cSAndre Guedes return adapter->tx_ring[index]; 245973f1071cSAndre Guedes } 246073f1071cSAndre Guedes 246173f1071cSAndre Guedes static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) 246273f1071cSAndre Guedes { 246373f1071cSAndre Guedes struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 246473f1071cSAndre Guedes int cpu = smp_processor_id(); 246573f1071cSAndre Guedes struct netdev_queue *nq; 246673f1071cSAndre Guedes struct igc_ring *ring; 246773f1071cSAndre Guedes int res; 246873f1071cSAndre Guedes 246973f1071cSAndre Guedes if (unlikely(!xdpf)) 247073f1071cSAndre Guedes return -EFAULT; 247173f1071cSAndre Guedes 247273f1071cSAndre Guedes ring = igc_xdp_get_tx_ring(adapter, cpu); 247373f1071cSAndre Guedes nq = txring_txq(ring); 247473f1071cSAndre Guedes 247573f1071cSAndre Guedes __netif_tx_lock(nq, cpu); 247695b68148SKurt Kanzenbach /* Avoid transmit queue timeout since we share it with the slow path */ 247795b68148SKurt Kanzenbach txq_trans_cond_update(nq); 247873f1071cSAndre Guedes res = igc_xdp_init_tx_descriptor(ring, xdpf); 247973f1071cSAndre Guedes __netif_tx_unlock(nq); 248073f1071cSAndre Guedes return res; 248173f1071cSAndre Guedes } 248273f1071cSAndre Guedes 248373a6e372SAndre Guedes /* This function assumes rcu_read_lock() is held by the caller. */ 248473a6e372SAndre Guedes static int __igc_xdp_run_prog(struct igc_adapter *adapter, 248573a6e372SAndre Guedes struct bpf_prog *prog, 248673a6e372SAndre Guedes struct xdp_buff *xdp) 248773a6e372SAndre Guedes { 248873a6e372SAndre Guedes u32 act = bpf_prog_run_xdp(prog, xdp); 248973a6e372SAndre Guedes 249073a6e372SAndre Guedes switch (act) { 249173a6e372SAndre Guedes case XDP_PASS: 249273a6e372SAndre Guedes return IGC_XDP_PASS; 249373a6e372SAndre Guedes case XDP_TX: 249473f1071cSAndre Guedes if (igc_xdp_xmit_back(adapter, xdp) < 0) 249545ce0859SMagnus Karlsson goto out_failure; 249612628565SDavid S. Miller return IGC_XDP_TX; 249773a6e372SAndre Guedes case XDP_REDIRECT: 24984ff32036SAndre Guedes if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) 249945ce0859SMagnus Karlsson goto out_failure; 250012628565SDavid S. Miller return IGC_XDP_REDIRECT; 25014ff32036SAndre Guedes break; 250273a6e372SAndre Guedes default: 2503c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); 250473a6e372SAndre Guedes fallthrough; 250573a6e372SAndre Guedes case XDP_ABORTED: 250645ce0859SMagnus Karlsson out_failure: 250773a6e372SAndre Guedes trace_xdp_exception(adapter->netdev, prog, act); 250873a6e372SAndre Guedes fallthrough; 250973a6e372SAndre Guedes case XDP_DROP: 251073a6e372SAndre Guedes return IGC_XDP_CONSUMED; 251173a6e372SAndre Guedes } 251273a6e372SAndre Guedes } 251373a6e372SAndre Guedes 2514c9a11c23SSasha Neftin static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, 2515c9a11c23SSasha Neftin struct xdp_buff *xdp) 2516c9a11c23SSasha Neftin { 2517c9a11c23SSasha Neftin struct bpf_prog *prog; 2518c9a11c23SSasha Neftin int res; 251926575105SAndre Guedes 252026575105SAndre Guedes prog = READ_ONCE(adapter->xdp_prog); 252126575105SAndre Guedes if (!prog) { 252226575105SAndre Guedes res = IGC_XDP_PASS; 252349589b23SToke Høiland-Jørgensen goto out; 252426575105SAndre Guedes } 252526575105SAndre Guedes 252673a6e372SAndre Guedes res = __igc_xdp_run_prog(adapter, prog, xdp); 252726575105SAndre Guedes 252849589b23SToke Høiland-Jørgensen out: 252926575105SAndre Guedes return ERR_PTR(-res); 253026575105SAndre Guedes } 253126575105SAndre Guedes 253273f1071cSAndre Guedes /* This function assumes __netif_tx_lock is held by the caller. */ 253373f1071cSAndre Guedes static void igc_flush_tx_descriptors(struct igc_ring *ring) 253473f1071cSAndre Guedes { 253573f1071cSAndre Guedes /* Once tail pointer is updated, hardware can fetch the descriptors 253673f1071cSAndre Guedes * any time so we issue a write membar here to ensure all memory 253773f1071cSAndre Guedes * writes are complete before the tail pointer is updated. 253873f1071cSAndre Guedes */ 253973f1071cSAndre Guedes wmb(); 254073f1071cSAndre Guedes writel(ring->next_to_use, ring->tail); 254173f1071cSAndre Guedes } 254273f1071cSAndre Guedes 254373f1071cSAndre Guedes static void igc_finalize_xdp(struct igc_adapter *adapter, int status) 254473f1071cSAndre Guedes { 254573f1071cSAndre Guedes int cpu = smp_processor_id(); 254673f1071cSAndre Guedes struct netdev_queue *nq; 254773f1071cSAndre Guedes struct igc_ring *ring; 254873f1071cSAndre Guedes 254973f1071cSAndre Guedes if (status & IGC_XDP_TX) { 255073f1071cSAndre Guedes ring = igc_xdp_get_tx_ring(adapter, cpu); 255173f1071cSAndre Guedes nq = txring_txq(ring); 255273f1071cSAndre Guedes 255373f1071cSAndre Guedes __netif_tx_lock(nq, cpu); 255473f1071cSAndre Guedes igc_flush_tx_descriptors(ring); 255573f1071cSAndre Guedes __netif_tx_unlock(nq); 255673f1071cSAndre Guedes } 25574ff32036SAndre Guedes 25584ff32036SAndre Guedes if (status & IGC_XDP_REDIRECT) 25594ff32036SAndre Guedes xdp_do_flush(); 256073f1071cSAndre Guedes } 256173f1071cSAndre Guedes 2562a27e6e73SAndre Guedes static void igc_update_rx_stats(struct igc_q_vector *q_vector, 2563a27e6e73SAndre Guedes unsigned int packets, unsigned int bytes) 2564a27e6e73SAndre Guedes { 2565a27e6e73SAndre Guedes struct igc_ring *ring = q_vector->rx.ring; 2566a27e6e73SAndre Guedes 2567a27e6e73SAndre Guedes u64_stats_update_begin(&ring->rx_syncp); 2568a27e6e73SAndre Guedes ring->rx_stats.packets += packets; 2569a27e6e73SAndre Guedes ring->rx_stats.bytes += bytes; 2570a27e6e73SAndre Guedes u64_stats_update_end(&ring->rx_syncp); 2571a27e6e73SAndre Guedes 2572a27e6e73SAndre Guedes q_vector->rx.total_packets += packets; 2573a27e6e73SAndre Guedes q_vector->rx.total_bytes += bytes; 2574a27e6e73SAndre Guedes } 2575a27e6e73SAndre Guedes 25760507ef8aSSasha Neftin static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) 25770507ef8aSSasha Neftin { 25780507ef8aSSasha Neftin unsigned int total_bytes = 0, total_packets = 0; 257973f1071cSAndre Guedes struct igc_adapter *adapter = q_vector->adapter; 25800507ef8aSSasha Neftin struct igc_ring *rx_ring = q_vector->rx.ring; 25810507ef8aSSasha Neftin struct sk_buff *skb = rx_ring->skb; 25820507ef8aSSasha Neftin u16 cleaned_count = igc_desc_unused(rx_ring); 25834ff32036SAndre Guedes int xdp_status = 0, rx_buffer_pgcnt; 25840507ef8aSSasha Neftin 25850507ef8aSSasha Neftin while (likely(total_packets < budget)) { 25860507ef8aSSasha Neftin union igc_adv_rx_desc *rx_desc; 25870507ef8aSSasha Neftin struct igc_rx_buffer *rx_buffer; 258873f1071cSAndre Guedes unsigned int size, truesize; 258973b7123dSJesper Dangaard Brouer struct igc_xdp_buff ctx; 2590e1ed4f92SAndre Guedes ktime_t timestamp = 0; 2591e1ed4f92SAndre Guedes int pkt_offset = 0; 259226575105SAndre Guedes void *pktbuf; 25930507ef8aSSasha Neftin 25940507ef8aSSasha Neftin /* return some buffers to hardware, one at a time is too slow */ 25950507ef8aSSasha Neftin if (cleaned_count >= IGC_RX_BUFFER_WRITE) { 25960507ef8aSSasha Neftin igc_alloc_rx_buffers(rx_ring, cleaned_count); 25970507ef8aSSasha Neftin cleaned_count = 0; 25980507ef8aSSasha Neftin } 25990507ef8aSSasha Neftin 26000507ef8aSSasha Neftin rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); 26010507ef8aSSasha Neftin size = le16_to_cpu(rx_desc->wb.upper.length); 26020507ef8aSSasha Neftin if (!size) 26030507ef8aSSasha Neftin break; 26040507ef8aSSasha Neftin 26050507ef8aSSasha Neftin /* This memory barrier is needed to keep us from reading 26060507ef8aSSasha Neftin * any other fields out of the rx_desc until we know the 26070507ef8aSSasha Neftin * descriptor has been written back 26080507ef8aSSasha Neftin */ 26090507ef8aSSasha Neftin dma_rmb(); 26100507ef8aSSasha Neftin 26114ff32036SAndre Guedes rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); 261273f1071cSAndre Guedes truesize = igc_get_rx_frame_truesize(rx_ring, size); 26130507ef8aSSasha Neftin 261426575105SAndre Guedes pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; 2615e1ed4f92SAndre Guedes 261626575105SAndre Guedes if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { 2617e1ed4f92SAndre Guedes timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, 2618e1ed4f92SAndre Guedes pktbuf); 2619d6772667SJesper Dangaard Brouer ctx.rx_ts = timestamp; 2620e1ed4f92SAndre Guedes pkt_offset = IGC_TS_HDR_LEN; 2621e1ed4f92SAndre Guedes size -= IGC_TS_HDR_LEN; 2622e1ed4f92SAndre Guedes } 2623e1ed4f92SAndre Guedes 262426575105SAndre Guedes if (!skb) { 262573b7123dSJesper Dangaard Brouer xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); 262673b7123dSJesper Dangaard Brouer xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), 2627f51b5e2bSJesper Dangaard Brouer igc_rx_offset(rx_ring) + pkt_offset, 2628f51b5e2bSJesper Dangaard Brouer size, true); 262973b7123dSJesper Dangaard Brouer xdp_buff_clear_frags_flag(&ctx.xdp); 26308416814fSJesper Dangaard Brouer ctx.rx_desc = rx_desc; 263126575105SAndre Guedes 263273b7123dSJesper Dangaard Brouer skb = igc_xdp_run_prog(adapter, &ctx.xdp); 263326575105SAndre Guedes } 263426575105SAndre Guedes 263526575105SAndre Guedes if (IS_ERR(skb)) { 263673f1071cSAndre Guedes unsigned int xdp_res = -PTR_ERR(skb); 263773f1071cSAndre Guedes 263873f1071cSAndre Guedes switch (xdp_res) { 263973f1071cSAndre Guedes case IGC_XDP_CONSUMED: 264026575105SAndre Guedes rx_buffer->pagecnt_bias++; 264173f1071cSAndre Guedes break; 264273f1071cSAndre Guedes case IGC_XDP_TX: 26434ff32036SAndre Guedes case IGC_XDP_REDIRECT: 264473f1071cSAndre Guedes igc_rx_buffer_flip(rx_buffer, truesize); 264573f1071cSAndre Guedes xdp_status |= xdp_res; 264673f1071cSAndre Guedes break; 264773f1071cSAndre Guedes } 264873f1071cSAndre Guedes 264926575105SAndre Guedes total_packets++; 265026575105SAndre Guedes total_bytes += size; 265126575105SAndre Guedes } else if (skb) 26520507ef8aSSasha Neftin igc_add_rx_frag(rx_ring, rx_buffer, skb, size); 26530507ef8aSSasha Neftin else if (ring_uses_build_skb(rx_ring)) 265473b7123dSJesper Dangaard Brouer skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp); 26550507ef8aSSasha Neftin else 265673b7123dSJesper Dangaard Brouer skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp, 265726575105SAndre Guedes timestamp); 26580507ef8aSSasha Neftin 26590507ef8aSSasha Neftin /* exit if we failed to retrieve a buffer */ 26600507ef8aSSasha Neftin if (!skb) { 26610507ef8aSSasha Neftin rx_ring->rx_stats.alloc_failed++; 26620507ef8aSSasha Neftin rx_buffer->pagecnt_bias++; 26630507ef8aSSasha Neftin break; 26640507ef8aSSasha Neftin } 26650507ef8aSSasha Neftin 26664ff32036SAndre Guedes igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); 26670507ef8aSSasha Neftin cleaned_count++; 26680507ef8aSSasha Neftin 26690507ef8aSSasha Neftin /* fetch next buffer in frame if non-eop */ 26700507ef8aSSasha Neftin if (igc_is_non_eop(rx_ring, rx_desc)) 26710507ef8aSSasha Neftin continue; 26720507ef8aSSasha Neftin 26730507ef8aSSasha Neftin /* verify the packet layout is correct */ 26740507ef8aSSasha Neftin if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { 26750507ef8aSSasha Neftin skb = NULL; 26760507ef8aSSasha Neftin continue; 26770507ef8aSSasha Neftin } 26780507ef8aSSasha Neftin 26790507ef8aSSasha Neftin /* probably a little skewed due to removing CRC */ 26800507ef8aSSasha Neftin total_bytes += skb->len; 26810507ef8aSSasha Neftin 26823a66abe9SAndre Guedes /* populate checksum, VLAN, and protocol */ 26830507ef8aSSasha Neftin igc_process_skb_fields(rx_ring, rx_desc, skb); 26840507ef8aSSasha Neftin 26850507ef8aSSasha Neftin napi_gro_receive(&q_vector->napi, skb); 26860507ef8aSSasha Neftin 26870507ef8aSSasha Neftin /* reset skb pointer */ 26880507ef8aSSasha Neftin skb = NULL; 26890507ef8aSSasha Neftin 26900507ef8aSSasha Neftin /* update budget accounting */ 26910507ef8aSSasha Neftin total_packets++; 26920507ef8aSSasha Neftin } 26930507ef8aSSasha Neftin 269473f1071cSAndre Guedes if (xdp_status) 269573f1071cSAndre Guedes igc_finalize_xdp(adapter, xdp_status); 269673f1071cSAndre Guedes 26970507ef8aSSasha Neftin /* place incomplete frames back on ring for completion */ 26980507ef8aSSasha Neftin rx_ring->skb = skb; 26990507ef8aSSasha Neftin 2700a27e6e73SAndre Guedes igc_update_rx_stats(q_vector, total_packets, total_bytes); 27010507ef8aSSasha Neftin 27020507ef8aSSasha Neftin if (cleaned_count) 27030507ef8aSSasha Neftin igc_alloc_rx_buffers(rx_ring, cleaned_count); 27040507ef8aSSasha Neftin 27050507ef8aSSasha Neftin return total_packets; 27060507ef8aSSasha Neftin } 27070507ef8aSSasha Neftin 2708fc9df2a0SAndre Guedes static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, 2709fc9df2a0SAndre Guedes struct xdp_buff *xdp) 2710fc9df2a0SAndre Guedes { 2711f9e61d36SAlexander Lobakin unsigned int totalsize = xdp->data_end - xdp->data_meta; 2712fc9df2a0SAndre Guedes unsigned int metasize = xdp->data - xdp->data_meta; 2713fc9df2a0SAndre Guedes struct sk_buff *skb; 2714fc9df2a0SAndre Guedes 2715f9e61d36SAlexander Lobakin net_prefetch(xdp->data_meta); 2716f9e61d36SAlexander Lobakin 2717f9e61d36SAlexander Lobakin skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, 2718fc9df2a0SAndre Guedes GFP_ATOMIC | __GFP_NOWARN); 2719fc9df2a0SAndre Guedes if (unlikely(!skb)) 2720fc9df2a0SAndre Guedes return NULL; 2721fc9df2a0SAndre Guedes 2722f9e61d36SAlexander Lobakin memcpy(__skb_put(skb, totalsize), xdp->data_meta, 2723f9e61d36SAlexander Lobakin ALIGN(totalsize, sizeof(long))); 2724f9e61d36SAlexander Lobakin 27254fa8fcd3SJesper Dangaard Brouer if (metasize) { 2726fc9df2a0SAndre Guedes skb_metadata_set(skb, metasize); 27274fa8fcd3SJesper Dangaard Brouer __skb_pull(skb, metasize); 27284fa8fcd3SJesper Dangaard Brouer } 2729fc9df2a0SAndre Guedes 2730fc9df2a0SAndre Guedes return skb; 2731fc9df2a0SAndre Guedes } 2732fc9df2a0SAndre Guedes 2733fc9df2a0SAndre Guedes static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, 2734fc9df2a0SAndre Guedes union igc_adv_rx_desc *desc, 2735fc9df2a0SAndre Guedes struct xdp_buff *xdp, 2736fc9df2a0SAndre Guedes ktime_t timestamp) 2737fc9df2a0SAndre Guedes { 2738fc9df2a0SAndre Guedes struct igc_ring *ring = q_vector->rx.ring; 2739fc9df2a0SAndre Guedes struct sk_buff *skb; 2740fc9df2a0SAndre Guedes 2741fc9df2a0SAndre Guedes skb = igc_construct_skb_zc(ring, xdp); 2742fc9df2a0SAndre Guedes if (!skb) { 2743fc9df2a0SAndre Guedes ring->rx_stats.alloc_failed++; 2744fc9df2a0SAndre Guedes return; 2745fc9df2a0SAndre Guedes } 2746fc9df2a0SAndre Guedes 2747fc9df2a0SAndre Guedes if (timestamp) 2748fc9df2a0SAndre Guedes skb_hwtstamps(skb)->hwtstamp = timestamp; 2749fc9df2a0SAndre Guedes 2750fc9df2a0SAndre Guedes if (igc_cleanup_headers(ring, desc, skb)) 2751fc9df2a0SAndre Guedes return; 2752fc9df2a0SAndre Guedes 2753fc9df2a0SAndre Guedes igc_process_skb_fields(ring, desc, skb); 2754fc9df2a0SAndre Guedes napi_gro_receive(&q_vector->napi, skb); 2755fc9df2a0SAndre Guedes } 2756fc9df2a0SAndre Guedes 27578416814fSJesper Dangaard Brouer static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp) 27588416814fSJesper Dangaard Brouer { 27598416814fSJesper Dangaard Brouer /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The 27608416814fSJesper Dangaard Brouer * igc_xdp_buff shares its layout with xdp_buff_xsk and private 27618416814fSJesper Dangaard Brouer * igc_xdp_buff fields fall into xdp_buff_xsk->cb 27628416814fSJesper Dangaard Brouer */ 27638416814fSJesper Dangaard Brouer return (struct igc_xdp_buff *)xdp; 27648416814fSJesper Dangaard Brouer } 27658416814fSJesper Dangaard Brouer 2766fc9df2a0SAndre Guedes static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) 2767fc9df2a0SAndre Guedes { 2768fc9df2a0SAndre Guedes struct igc_adapter *adapter = q_vector->adapter; 2769fc9df2a0SAndre Guedes struct igc_ring *ring = q_vector->rx.ring; 2770fc9df2a0SAndre Guedes u16 cleaned_count = igc_desc_unused(ring); 2771fc9df2a0SAndre Guedes int total_bytes = 0, total_packets = 0; 2772fc9df2a0SAndre Guedes u16 ntc = ring->next_to_clean; 2773fc9df2a0SAndre Guedes struct bpf_prog *prog; 2774fc9df2a0SAndre Guedes bool failure = false; 2775fc9df2a0SAndre Guedes int xdp_status = 0; 2776fc9df2a0SAndre Guedes 2777fc9df2a0SAndre Guedes rcu_read_lock(); 2778fc9df2a0SAndre Guedes 2779fc9df2a0SAndre Guedes prog = READ_ONCE(adapter->xdp_prog); 2780fc9df2a0SAndre Guedes 2781fc9df2a0SAndre Guedes while (likely(total_packets < budget)) { 2782fc9df2a0SAndre Guedes union igc_adv_rx_desc *desc; 2783fc9df2a0SAndre Guedes struct igc_rx_buffer *bi; 27848416814fSJesper Dangaard Brouer struct igc_xdp_buff *ctx; 2785fc9df2a0SAndre Guedes ktime_t timestamp = 0; 2786fc9df2a0SAndre Guedes unsigned int size; 2787fc9df2a0SAndre Guedes int res; 2788fc9df2a0SAndre Guedes 2789fc9df2a0SAndre Guedes desc = IGC_RX_DESC(ring, ntc); 2790fc9df2a0SAndre Guedes size = le16_to_cpu(desc->wb.upper.length); 2791fc9df2a0SAndre Guedes if (!size) 2792fc9df2a0SAndre Guedes break; 2793fc9df2a0SAndre Guedes 2794fc9df2a0SAndre Guedes /* This memory barrier is needed to keep us from reading 2795fc9df2a0SAndre Guedes * any other fields out of the rx_desc until we know the 2796fc9df2a0SAndre Guedes * descriptor has been written back 2797fc9df2a0SAndre Guedes */ 2798fc9df2a0SAndre Guedes dma_rmb(); 2799fc9df2a0SAndre Guedes 2800fc9df2a0SAndre Guedes bi = &ring->rx_buffer_info[ntc]; 2801fc9df2a0SAndre Guedes 28028416814fSJesper Dangaard Brouer ctx = xsk_buff_to_igc_ctx(bi->xdp); 28038416814fSJesper Dangaard Brouer ctx->rx_desc = desc; 28048416814fSJesper Dangaard Brouer 2805fc9df2a0SAndre Guedes if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { 2806fc9df2a0SAndre Guedes timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, 2807fc9df2a0SAndre Guedes bi->xdp->data); 2808d6772667SJesper Dangaard Brouer ctx->rx_ts = timestamp; 2809fc9df2a0SAndre Guedes 2810fc9df2a0SAndre Guedes bi->xdp->data += IGC_TS_HDR_LEN; 2811fc9df2a0SAndre Guedes 2812fc9df2a0SAndre Guedes /* HW timestamp has been copied into local variable. Metadata 2813fc9df2a0SAndre Guedes * length when XDP program is called should be 0. 2814fc9df2a0SAndre Guedes */ 2815fc9df2a0SAndre Guedes bi->xdp->data_meta += IGC_TS_HDR_LEN; 2816fc9df2a0SAndre Guedes size -= IGC_TS_HDR_LEN; 2817fc9df2a0SAndre Guedes } 2818fc9df2a0SAndre Guedes 2819fc9df2a0SAndre Guedes bi->xdp->data_end = bi->xdp->data + size; 2820fc9df2a0SAndre Guedes xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); 2821fc9df2a0SAndre Guedes 2822fc9df2a0SAndre Guedes res = __igc_xdp_run_prog(adapter, prog, bi->xdp); 2823fc9df2a0SAndre Guedes switch (res) { 2824fc9df2a0SAndre Guedes case IGC_XDP_PASS: 2825fc9df2a0SAndre Guedes igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); 2826fc9df2a0SAndre Guedes fallthrough; 2827fc9df2a0SAndre Guedes case IGC_XDP_CONSUMED: 2828fc9df2a0SAndre Guedes xsk_buff_free(bi->xdp); 2829fc9df2a0SAndre Guedes break; 2830fc9df2a0SAndre Guedes case IGC_XDP_TX: 2831fc9df2a0SAndre Guedes case IGC_XDP_REDIRECT: 2832fc9df2a0SAndre Guedes xdp_status |= res; 2833fc9df2a0SAndre Guedes break; 2834fc9df2a0SAndre Guedes } 2835fc9df2a0SAndre Guedes 2836fc9df2a0SAndre Guedes bi->xdp = NULL; 2837fc9df2a0SAndre Guedes total_bytes += size; 2838fc9df2a0SAndre Guedes total_packets++; 2839fc9df2a0SAndre Guedes cleaned_count++; 2840fc9df2a0SAndre Guedes ntc++; 2841fc9df2a0SAndre Guedes if (ntc == ring->count) 2842fc9df2a0SAndre Guedes ntc = 0; 2843fc9df2a0SAndre Guedes } 2844fc9df2a0SAndre Guedes 2845fc9df2a0SAndre Guedes ring->next_to_clean = ntc; 2846fc9df2a0SAndre Guedes rcu_read_unlock(); 2847fc9df2a0SAndre Guedes 2848fc9df2a0SAndre Guedes if (cleaned_count >= IGC_RX_BUFFER_WRITE) 2849fc9df2a0SAndre Guedes failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); 2850fc9df2a0SAndre Guedes 2851fc9df2a0SAndre Guedes if (xdp_status) 2852fc9df2a0SAndre Guedes igc_finalize_xdp(adapter, xdp_status); 2853fc9df2a0SAndre Guedes 2854fc9df2a0SAndre Guedes igc_update_rx_stats(q_vector, total_packets, total_bytes); 2855fc9df2a0SAndre Guedes 2856fc9df2a0SAndre Guedes if (xsk_uses_need_wakeup(ring->xsk_pool)) { 2857fc9df2a0SAndre Guedes if (failure || ring->next_to_clean == ring->next_to_use) 2858fc9df2a0SAndre Guedes xsk_set_rx_need_wakeup(ring->xsk_pool); 2859fc9df2a0SAndre Guedes else 2860fc9df2a0SAndre Guedes xsk_clear_rx_need_wakeup(ring->xsk_pool); 2861fc9df2a0SAndre Guedes return total_packets; 2862fc9df2a0SAndre Guedes } 2863fc9df2a0SAndre Guedes 2864fc9df2a0SAndre Guedes return failure ? budget : total_packets; 2865fc9df2a0SAndre Guedes } 2866fc9df2a0SAndre Guedes 2867a27e6e73SAndre Guedes static void igc_update_tx_stats(struct igc_q_vector *q_vector, 2868a27e6e73SAndre Guedes unsigned int packets, unsigned int bytes) 2869a27e6e73SAndre Guedes { 2870a27e6e73SAndre Guedes struct igc_ring *ring = q_vector->tx.ring; 2871a27e6e73SAndre Guedes 2872a27e6e73SAndre Guedes u64_stats_update_begin(&ring->tx_syncp); 2873a27e6e73SAndre Guedes ring->tx_stats.bytes += bytes; 2874a27e6e73SAndre Guedes ring->tx_stats.packets += packets; 2875a27e6e73SAndre Guedes u64_stats_update_end(&ring->tx_syncp); 2876a27e6e73SAndre Guedes 2877a27e6e73SAndre Guedes q_vector->tx.total_bytes += bytes; 2878a27e6e73SAndre Guedes q_vector->tx.total_packets += packets; 2879a27e6e73SAndre Guedes } 2880a27e6e73SAndre Guedes 28819acf59a7SAndre Guedes static void igc_xdp_xmit_zc(struct igc_ring *ring) 28829acf59a7SAndre Guedes { 28839acf59a7SAndre Guedes struct xsk_buff_pool *pool = ring->xsk_pool; 28849acf59a7SAndre Guedes struct netdev_queue *nq = txring_txq(ring); 28859acf59a7SAndre Guedes union igc_adv_tx_desc *tx_desc = NULL; 28869acf59a7SAndre Guedes int cpu = smp_processor_id(); 28879acf59a7SAndre Guedes struct xdp_desc xdp_desc; 288878adb4bcSFlorian Kauer u16 budget, ntu; 28899acf59a7SAndre Guedes 28909acf59a7SAndre Guedes if (!netif_carrier_ok(ring->netdev)) 28919acf59a7SAndre Guedes return; 28929acf59a7SAndre Guedes 28939acf59a7SAndre Guedes __netif_tx_lock(nq, cpu); 28949acf59a7SAndre Guedes 289595b68148SKurt Kanzenbach /* Avoid transmit queue timeout since we share it with the slow path */ 289695b68148SKurt Kanzenbach txq_trans_cond_update(nq); 289795b68148SKurt Kanzenbach 289878adb4bcSFlorian Kauer ntu = ring->next_to_use; 28999acf59a7SAndre Guedes budget = igc_desc_unused(ring); 29009acf59a7SAndre Guedes 29019acf59a7SAndre Guedes while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { 29029acf59a7SAndre Guedes u32 cmd_type, olinfo_status; 29039acf59a7SAndre Guedes struct igc_tx_buffer *bi; 29049acf59a7SAndre Guedes dma_addr_t dma; 29059acf59a7SAndre Guedes 29069acf59a7SAndre Guedes cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | 29079acf59a7SAndre Guedes IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | 29089acf59a7SAndre Guedes xdp_desc.len; 29099acf59a7SAndre Guedes olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; 29109acf59a7SAndre Guedes 29119acf59a7SAndre Guedes dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 29129acf59a7SAndre Guedes xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); 29139acf59a7SAndre Guedes 29149acf59a7SAndre Guedes tx_desc = IGC_TX_DESC(ring, ntu); 29159acf59a7SAndre Guedes tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 29169acf59a7SAndre Guedes tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 29179acf59a7SAndre Guedes tx_desc->read.buffer_addr = cpu_to_le64(dma); 29189acf59a7SAndre Guedes 29199acf59a7SAndre Guedes bi = &ring->tx_buffer_info[ntu]; 29209acf59a7SAndre Guedes bi->type = IGC_TX_BUFFER_TYPE_XSK; 29219acf59a7SAndre Guedes bi->protocol = 0; 29229acf59a7SAndre Guedes bi->bytecount = xdp_desc.len; 29239acf59a7SAndre Guedes bi->gso_segs = 1; 29249acf59a7SAndre Guedes bi->time_stamp = jiffies; 29259acf59a7SAndre Guedes bi->next_to_watch = tx_desc; 29269acf59a7SAndre Guedes 29279acf59a7SAndre Guedes netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); 29289acf59a7SAndre Guedes 29299acf59a7SAndre Guedes ntu++; 29309acf59a7SAndre Guedes if (ntu == ring->count) 29319acf59a7SAndre Guedes ntu = 0; 29329acf59a7SAndre Guedes } 29339acf59a7SAndre Guedes 29349acf59a7SAndre Guedes ring->next_to_use = ntu; 29359acf59a7SAndre Guedes if (tx_desc) { 29369acf59a7SAndre Guedes igc_flush_tx_descriptors(ring); 29379acf59a7SAndre Guedes xsk_tx_release(pool); 29389acf59a7SAndre Guedes } 29399acf59a7SAndre Guedes 29409acf59a7SAndre Guedes __netif_tx_unlock(nq); 29419acf59a7SAndre Guedes } 29429acf59a7SAndre Guedes 29430507ef8aSSasha Neftin /** 29440507ef8aSSasha Neftin * igc_clean_tx_irq - Reclaim resources after transmit completes 29450507ef8aSSasha Neftin * @q_vector: pointer to q_vector containing needed info 29460507ef8aSSasha Neftin * @napi_budget: Used to determine if we are in netpoll 29470507ef8aSSasha Neftin * 29480507ef8aSSasha Neftin * returns true if ring is completely cleaned 29490507ef8aSSasha Neftin */ 29500507ef8aSSasha Neftin static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) 29510507ef8aSSasha Neftin { 29520507ef8aSSasha Neftin struct igc_adapter *adapter = q_vector->adapter; 29530507ef8aSSasha Neftin unsigned int total_bytes = 0, total_packets = 0; 29540507ef8aSSasha Neftin unsigned int budget = q_vector->tx.work_limit; 29550507ef8aSSasha Neftin struct igc_ring *tx_ring = q_vector->tx.ring; 29560507ef8aSSasha Neftin unsigned int i = tx_ring->next_to_clean; 29570507ef8aSSasha Neftin struct igc_tx_buffer *tx_buffer; 29580507ef8aSSasha Neftin union igc_adv_tx_desc *tx_desc; 29599acf59a7SAndre Guedes u32 xsk_frames = 0; 29600507ef8aSSasha Neftin 29610507ef8aSSasha Neftin if (test_bit(__IGC_DOWN, &adapter->state)) 29620507ef8aSSasha Neftin return true; 29630507ef8aSSasha Neftin 29640507ef8aSSasha Neftin tx_buffer = &tx_ring->tx_buffer_info[i]; 29650507ef8aSSasha Neftin tx_desc = IGC_TX_DESC(tx_ring, i); 29660507ef8aSSasha Neftin i -= tx_ring->count; 29670507ef8aSSasha Neftin 29680507ef8aSSasha Neftin do { 29690507ef8aSSasha Neftin union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 29700507ef8aSSasha Neftin 29710507ef8aSSasha Neftin /* if next_to_watch is not set then there is no work pending */ 29720507ef8aSSasha Neftin if (!eop_desc) 29730507ef8aSSasha Neftin break; 29740507ef8aSSasha Neftin 29750507ef8aSSasha Neftin /* prevent any other reads prior to eop_desc */ 29760507ef8aSSasha Neftin smp_rmb(); 29770507ef8aSSasha Neftin 29780507ef8aSSasha Neftin /* if DD is not set pending work has not been completed */ 29790507ef8aSSasha Neftin if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) 29800507ef8aSSasha Neftin break; 29810507ef8aSSasha Neftin 29820507ef8aSSasha Neftin /* clear next_to_watch to prevent false hangs */ 29830507ef8aSSasha Neftin tx_buffer->next_to_watch = NULL; 29840507ef8aSSasha Neftin 29850507ef8aSSasha Neftin /* update the statistics for this packet */ 29860507ef8aSSasha Neftin total_bytes += tx_buffer->bytecount; 29870507ef8aSSasha Neftin total_packets += tx_buffer->gso_segs; 29880507ef8aSSasha Neftin 2989859b4dfaSAndre Guedes switch (tx_buffer->type) { 29909acf59a7SAndre Guedes case IGC_TX_BUFFER_TYPE_XSK: 29919acf59a7SAndre Guedes xsk_frames++; 29929acf59a7SAndre Guedes break; 2993859b4dfaSAndre Guedes case IGC_TX_BUFFER_TYPE_XDP: 299473f1071cSAndre Guedes xdp_return_frame(tx_buffer->xdpf); 29959acf59a7SAndre Guedes igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 2996859b4dfaSAndre Guedes break; 2997859b4dfaSAndre Guedes case IGC_TX_BUFFER_TYPE_SKB: 29980507ef8aSSasha Neftin napi_consume_skb(tx_buffer->skb, napi_budget); 29999acf59a7SAndre Guedes igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 3000859b4dfaSAndre Guedes break; 3001859b4dfaSAndre Guedes default: 3002859b4dfaSAndre Guedes netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); 3003859b4dfaSAndre Guedes break; 3004859b4dfaSAndre Guedes } 30050507ef8aSSasha Neftin 30060507ef8aSSasha Neftin /* clear last DMA location and unmap remaining buffers */ 30070507ef8aSSasha Neftin while (tx_desc != eop_desc) { 30080507ef8aSSasha Neftin tx_buffer++; 30090507ef8aSSasha Neftin tx_desc++; 30100507ef8aSSasha Neftin i++; 30110507ef8aSSasha Neftin if (unlikely(!i)) { 30120507ef8aSSasha Neftin i -= tx_ring->count; 30130507ef8aSSasha Neftin tx_buffer = tx_ring->tx_buffer_info; 30140507ef8aSSasha Neftin tx_desc = IGC_TX_DESC(tx_ring, 0); 30150507ef8aSSasha Neftin } 30160507ef8aSSasha Neftin 30170507ef8aSSasha Neftin /* unmap any remaining paged data */ 301861234295SAndre Guedes if (dma_unmap_len(tx_buffer, len)) 301961234295SAndre Guedes igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); 30200507ef8aSSasha Neftin } 30210507ef8aSSasha Neftin 30220507ef8aSSasha Neftin /* move us one more past the eop_desc for start of next pkt */ 30230507ef8aSSasha Neftin tx_buffer++; 30240507ef8aSSasha Neftin tx_desc++; 30250507ef8aSSasha Neftin i++; 30260507ef8aSSasha Neftin if (unlikely(!i)) { 30270507ef8aSSasha Neftin i -= tx_ring->count; 30280507ef8aSSasha Neftin tx_buffer = tx_ring->tx_buffer_info; 30290507ef8aSSasha Neftin tx_desc = IGC_TX_DESC(tx_ring, 0); 30300507ef8aSSasha Neftin } 30310507ef8aSSasha Neftin 30320507ef8aSSasha Neftin /* issue prefetch for next Tx descriptor */ 30330507ef8aSSasha Neftin prefetch(tx_desc); 30340507ef8aSSasha Neftin 30350507ef8aSSasha Neftin /* update budget accounting */ 30360507ef8aSSasha Neftin budget--; 30370507ef8aSSasha Neftin } while (likely(budget)); 30380507ef8aSSasha Neftin 30390507ef8aSSasha Neftin netdev_tx_completed_queue(txring_txq(tx_ring), 30400507ef8aSSasha Neftin total_packets, total_bytes); 30410507ef8aSSasha Neftin 30420507ef8aSSasha Neftin i += tx_ring->count; 30430507ef8aSSasha Neftin tx_ring->next_to_clean = i; 3044a27e6e73SAndre Guedes 3045a27e6e73SAndre Guedes igc_update_tx_stats(q_vector, total_packets, total_bytes); 30460507ef8aSSasha Neftin 30479acf59a7SAndre Guedes if (tx_ring->xsk_pool) { 30489acf59a7SAndre Guedes if (xsk_frames) 30499acf59a7SAndre Guedes xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); 30509acf59a7SAndre Guedes if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) 30519acf59a7SAndre Guedes xsk_set_tx_need_wakeup(tx_ring->xsk_pool); 30529acf59a7SAndre Guedes igc_xdp_xmit_zc(tx_ring); 30539acf59a7SAndre Guedes } 30549acf59a7SAndre Guedes 30550507ef8aSSasha Neftin if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 30560507ef8aSSasha Neftin struct igc_hw *hw = &adapter->hw; 30570507ef8aSSasha Neftin 30580507ef8aSSasha Neftin /* Detect a transmit hang in hardware, this serializes the 30590507ef8aSSasha Neftin * check with the clearing of time_stamp and movement of i 30600507ef8aSSasha Neftin */ 30610507ef8aSSasha Neftin clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 30620507ef8aSSasha Neftin if (tx_buffer->next_to_watch && 30630507ef8aSSasha Neftin time_after(jiffies, tx_buffer->time_stamp + 30640507ef8aSSasha Neftin (adapter->tx_timeout_factor * HZ)) && 30659b275176SSasha Neftin !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && 3066175c2412SMuhammad Husaini Zulkifli (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && 3067175c2412SMuhammad Husaini Zulkifli !tx_ring->oper_gate_closed) { 30680507ef8aSSasha Neftin /* detected Tx unit hang */ 306925f06effSAndre Guedes netdev_err(tx_ring->netdev, 30700507ef8aSSasha Neftin "Detected Tx Unit Hang\n" 30710507ef8aSSasha Neftin " Tx Queue <%d>\n" 30720507ef8aSSasha Neftin " TDH <%x>\n" 30730507ef8aSSasha Neftin " TDT <%x>\n" 30740507ef8aSSasha Neftin " next_to_use <%x>\n" 30750507ef8aSSasha Neftin " next_to_clean <%x>\n" 30760507ef8aSSasha Neftin "buffer_info[next_to_clean]\n" 30770507ef8aSSasha Neftin " time_stamp <%lx>\n" 30780507ef8aSSasha Neftin " next_to_watch <%p>\n" 30790507ef8aSSasha Neftin " jiffies <%lx>\n" 30800507ef8aSSasha Neftin " desc.status <%x>\n", 30810507ef8aSSasha Neftin tx_ring->queue_index, 30820507ef8aSSasha Neftin rd32(IGC_TDH(tx_ring->reg_idx)), 30830507ef8aSSasha Neftin readl(tx_ring->tail), 30840507ef8aSSasha Neftin tx_ring->next_to_use, 30850507ef8aSSasha Neftin tx_ring->next_to_clean, 30860507ef8aSSasha Neftin tx_buffer->time_stamp, 30870507ef8aSSasha Neftin tx_buffer->next_to_watch, 30880507ef8aSSasha Neftin jiffies, 30890507ef8aSSasha Neftin tx_buffer->next_to_watch->wb.status); 30900507ef8aSSasha Neftin netif_stop_subqueue(tx_ring->netdev, 30910507ef8aSSasha Neftin tx_ring->queue_index); 30920507ef8aSSasha Neftin 30930507ef8aSSasha Neftin /* we are about to reset, no point in enabling stuff */ 30940507ef8aSSasha Neftin return true; 30950507ef8aSSasha Neftin } 30960507ef8aSSasha Neftin } 30970507ef8aSSasha Neftin 30980507ef8aSSasha Neftin #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 30990507ef8aSSasha Neftin if (unlikely(total_packets && 31000507ef8aSSasha Neftin netif_carrier_ok(tx_ring->netdev) && 31010507ef8aSSasha Neftin igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 31020507ef8aSSasha Neftin /* Make sure that anybody stopping the queue after this 31030507ef8aSSasha Neftin * sees the new next_to_clean. 31040507ef8aSSasha Neftin */ 31050507ef8aSSasha Neftin smp_mb(); 31060507ef8aSSasha Neftin if (__netif_subqueue_stopped(tx_ring->netdev, 31070507ef8aSSasha Neftin tx_ring->queue_index) && 31080507ef8aSSasha Neftin !(test_bit(__IGC_DOWN, &adapter->state))) { 31090507ef8aSSasha Neftin netif_wake_subqueue(tx_ring->netdev, 31100507ef8aSSasha Neftin tx_ring->queue_index); 31110507ef8aSSasha Neftin 31120507ef8aSSasha Neftin u64_stats_update_begin(&tx_ring->tx_syncp); 31130507ef8aSSasha Neftin tx_ring->tx_stats.restart_queue++; 31140507ef8aSSasha Neftin u64_stats_update_end(&tx_ring->tx_syncp); 31150507ef8aSSasha Neftin } 31160507ef8aSSasha Neftin } 31170507ef8aSSasha Neftin 31180507ef8aSSasha Neftin return !!budget; 31190507ef8aSSasha Neftin } 31200507ef8aSSasha Neftin 3121750433d0SAndre Guedes static int igc_find_mac_filter(struct igc_adapter *adapter, 3122750433d0SAndre Guedes enum igc_mac_filter_type type, const u8 *addr) 312386a4de66SSasha Neftin { 312486a4de66SSasha Neftin struct igc_hw *hw = &adapter->hw; 3125d66358caSAndre Guedes int max_entries = hw->mac.rar_entry_count; 3126d66358caSAndre Guedes u32 ral, rah; 312786a4de66SSasha Neftin int i; 312886a4de66SSasha Neftin 3129794e5bc8SAndre Guedes for (i = 0; i < max_entries; i++) { 3130d66358caSAndre Guedes ral = rd32(IGC_RAL(i)); 3131d66358caSAndre Guedes rah = rd32(IGC_RAH(i)); 313286a4de66SSasha Neftin 3133d66358caSAndre Guedes if (!(rah & IGC_RAH_AV)) 3134794e5bc8SAndre Guedes continue; 3135750433d0SAndre Guedes if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) 3136750433d0SAndre Guedes continue; 3137d66358caSAndre Guedes if ((rah & IGC_RAH_RAH_MASK) != 3138d66358caSAndre Guedes le16_to_cpup((__le16 *)(addr + 4))) 3139d66358caSAndre Guedes continue; 3140d66358caSAndre Guedes if (ral != le32_to_cpup((__le32 *)(addr))) 314186a4de66SSasha Neftin continue; 314286a4de66SSasha Neftin 314386a4de66SSasha Neftin return i; 314486a4de66SSasha Neftin } 314586a4de66SSasha Neftin 3146794e5bc8SAndre Guedes return -1; 314786a4de66SSasha Neftin } 314886a4de66SSasha Neftin 3149794e5bc8SAndre Guedes static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) 315086a4de66SSasha Neftin { 315186a4de66SSasha Neftin struct igc_hw *hw = &adapter->hw; 3152d66358caSAndre Guedes int max_entries = hw->mac.rar_entry_count; 3153d66358caSAndre Guedes u32 rah; 315486a4de66SSasha Neftin int i; 315586a4de66SSasha Neftin 3156794e5bc8SAndre Guedes for (i = 0; i < max_entries; i++) { 3157d66358caSAndre Guedes rah = rd32(IGC_RAH(i)); 315886a4de66SSasha Neftin 3159d66358caSAndre Guedes if (!(rah & IGC_RAH_AV)) 3160794e5bc8SAndre Guedes return i; 316186a4de66SSasha Neftin } 316286a4de66SSasha Neftin 3163794e5bc8SAndre Guedes return -1; 316486a4de66SSasha Neftin } 316586a4de66SSasha Neftin 3166e9736fa4SAndre Guedes /** 3167e9736fa4SAndre Guedes * igc_add_mac_filter() - Add MAC address filter 3168e9736fa4SAndre Guedes * @adapter: Pointer to adapter where the filter should be added 3169750433d0SAndre Guedes * @type: MAC address filter type (source or destination) 3170e9736fa4SAndre Guedes * @addr: MAC address 3171e9736fa4SAndre Guedes * @queue: If non-negative, queue assignment feature is enabled and frames 3172e9736fa4SAndre Guedes * matching the filter are enqueued onto 'queue'. Otherwise, queue 3173e9736fa4SAndre Guedes * assignment is disabled. 3174e9736fa4SAndre Guedes * 3175e9736fa4SAndre Guedes * Return: 0 in case of success, negative errno code otherwise. 317686a4de66SSasha Neftin */ 317736fa2152SAndre Guedes static int igc_add_mac_filter(struct igc_adapter *adapter, 3178750433d0SAndre Guedes enum igc_mac_filter_type type, const u8 *addr, 3179750433d0SAndre Guedes int queue) 318086a4de66SSasha Neftin { 3181949b922eSAndre Guedes struct net_device *dev = adapter->netdev; 3182794e5bc8SAndre Guedes int index; 318386a4de66SSasha Neftin 3184750433d0SAndre Guedes index = igc_find_mac_filter(adapter, type, addr); 3185794e5bc8SAndre Guedes if (index >= 0) 3186d66358caSAndre Guedes goto update_filter; 318786a4de66SSasha Neftin 3188794e5bc8SAndre Guedes index = igc_get_avail_mac_filter_slot(adapter); 3189794e5bc8SAndre Guedes if (index < 0) 319086a4de66SSasha Neftin return -ENOSPC; 3191794e5bc8SAndre Guedes 3192750433d0SAndre Guedes netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", 3193750433d0SAndre Guedes index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 3194750433d0SAndre Guedes addr, queue); 3195949b922eSAndre Guedes 3196d66358caSAndre Guedes update_filter: 3197750433d0SAndre Guedes igc_set_mac_filter_hw(adapter, index, type, addr, queue); 319886a4de66SSasha Neftin return 0; 319986a4de66SSasha Neftin } 320086a4de66SSasha Neftin 3201c6aae591SAndre Guedes /** 3202c6aae591SAndre Guedes * igc_del_mac_filter() - Delete MAC address filter 3203c6aae591SAndre Guedes * @adapter: Pointer to adapter where the filter should be deleted from 3204750433d0SAndre Guedes * @type: MAC address filter type (source or destination) 3205c6aae591SAndre Guedes * @addr: MAC address 320686a4de66SSasha Neftin */ 3207acda576fSAndre Guedes static void igc_del_mac_filter(struct igc_adapter *adapter, 3208750433d0SAndre Guedes enum igc_mac_filter_type type, const u8 *addr) 320986a4de66SSasha Neftin { 3210949b922eSAndre Guedes struct net_device *dev = adapter->netdev; 32115f930713SAndre Guedes int index; 321286a4de66SSasha Neftin 3213750433d0SAndre Guedes index = igc_find_mac_filter(adapter, type, addr); 32145f930713SAndre Guedes if (index < 0) 3215acda576fSAndre Guedes return; 321686a4de66SSasha Neftin 3217d66358caSAndre Guedes if (index == 0) { 32185f930713SAndre Guedes /* If this is the default filter, we don't actually delete it. 32195f930713SAndre Guedes * We just reset to its default value i.e. disable queue 32205f930713SAndre Guedes * assignment. 322186a4de66SSasha Neftin */ 3222949b922eSAndre Guedes netdev_dbg(dev, "Disable default MAC filter queue assignment"); 3223949b922eSAndre Guedes 3224750433d0SAndre Guedes igc_set_mac_filter_hw(adapter, 0, type, addr, -1); 322586a4de66SSasha Neftin } else { 3226750433d0SAndre Guedes netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", 3227750433d0SAndre Guedes index, 3228750433d0SAndre Guedes type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", 3229750433d0SAndre Guedes addr); 3230949b922eSAndre Guedes 32315f930713SAndre Guedes igc_clear_mac_filter_hw(adapter, index); 323286a4de66SSasha Neftin } 323386a4de66SSasha Neftin } 323486a4de66SSasha Neftin 323512ddee68SAndre Guedes /** 323612ddee68SAndre Guedes * igc_add_vlan_prio_filter() - Add VLAN priority filter 323712ddee68SAndre Guedes * @adapter: Pointer to adapter where the filter should be added 323812ddee68SAndre Guedes * @prio: VLAN priority value 323912ddee68SAndre Guedes * @queue: Queue number which matching frames are assigned to 324012ddee68SAndre Guedes * 324112ddee68SAndre Guedes * Return: 0 in case of success, negative errno code otherwise. 324212ddee68SAndre Guedes */ 324336fa2152SAndre Guedes static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, 324436fa2152SAndre Guedes int queue) 324512ddee68SAndre Guedes { 324612ddee68SAndre Guedes struct net_device *dev = adapter->netdev; 324712ddee68SAndre Guedes struct igc_hw *hw = &adapter->hw; 324812ddee68SAndre Guedes u32 vlanpqf; 324912ddee68SAndre Guedes 325012ddee68SAndre Guedes vlanpqf = rd32(IGC_VLANPQF); 325112ddee68SAndre Guedes 325212ddee68SAndre Guedes if (vlanpqf & IGC_VLANPQF_VALID(prio)) { 325312ddee68SAndre Guedes netdev_dbg(dev, "VLAN priority filter already in use\n"); 325412ddee68SAndre Guedes return -EEXIST; 325512ddee68SAndre Guedes } 325612ddee68SAndre Guedes 325712ddee68SAndre Guedes vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); 325812ddee68SAndre Guedes vlanpqf |= IGC_VLANPQF_VALID(prio); 325912ddee68SAndre Guedes 326012ddee68SAndre Guedes wr32(IGC_VLANPQF, vlanpqf); 326112ddee68SAndre Guedes 326212ddee68SAndre Guedes netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", 326312ddee68SAndre Guedes prio, queue); 326412ddee68SAndre Guedes return 0; 326512ddee68SAndre Guedes } 326612ddee68SAndre Guedes 326712ddee68SAndre Guedes /** 326812ddee68SAndre Guedes * igc_del_vlan_prio_filter() - Delete VLAN priority filter 326912ddee68SAndre Guedes * @adapter: Pointer to adapter where the filter should be deleted from 327012ddee68SAndre Guedes * @prio: VLAN priority value 327112ddee68SAndre Guedes */ 327236fa2152SAndre Guedes static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) 327312ddee68SAndre Guedes { 327412ddee68SAndre Guedes struct igc_hw *hw = &adapter->hw; 327512ddee68SAndre Guedes u32 vlanpqf; 327612ddee68SAndre Guedes 327712ddee68SAndre Guedes vlanpqf = rd32(IGC_VLANPQF); 327812ddee68SAndre Guedes 327912ddee68SAndre Guedes vlanpqf &= ~IGC_VLANPQF_VALID(prio); 328012ddee68SAndre Guedes vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); 328112ddee68SAndre Guedes 328212ddee68SAndre Guedes wr32(IGC_VLANPQF, vlanpqf); 328312ddee68SAndre Guedes 328412ddee68SAndre Guedes netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", 328512ddee68SAndre Guedes prio); 328612ddee68SAndre Guedes } 328712ddee68SAndre Guedes 3288aa7ca726SAndre Guedes static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) 3289aa7ca726SAndre Guedes { 3290aa7ca726SAndre Guedes struct igc_hw *hw = &adapter->hw; 3291aa7ca726SAndre Guedes int i; 3292aa7ca726SAndre Guedes 3293aa7ca726SAndre Guedes for (i = 0; i < MAX_ETYPE_FILTER; i++) { 3294aa7ca726SAndre Guedes u32 etqf = rd32(IGC_ETQF(i)); 3295aa7ca726SAndre Guedes 3296aa7ca726SAndre Guedes if (!(etqf & IGC_ETQF_FILTER_ENABLE)) 3297aa7ca726SAndre Guedes return i; 3298aa7ca726SAndre Guedes } 3299aa7ca726SAndre Guedes 3300aa7ca726SAndre Guedes return -1; 3301aa7ca726SAndre Guedes } 3302aa7ca726SAndre Guedes 3303aa7ca726SAndre Guedes /** 3304aa7ca726SAndre Guedes * igc_add_etype_filter() - Add ethertype filter 3305aa7ca726SAndre Guedes * @adapter: Pointer to adapter where the filter should be added 3306aa7ca726SAndre Guedes * @etype: Ethertype value 3307aa7ca726SAndre Guedes * @queue: If non-negative, queue assignment feature is enabled and frames 3308aa7ca726SAndre Guedes * matching the filter are enqueued onto 'queue'. Otherwise, queue 3309aa7ca726SAndre Guedes * assignment is disabled. 3310aa7ca726SAndre Guedes * 3311aa7ca726SAndre Guedes * Return: 0 in case of success, negative errno code otherwise. 3312aa7ca726SAndre Guedes */ 331336fa2152SAndre Guedes static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, 331436fa2152SAndre Guedes int queue) 3315aa7ca726SAndre Guedes { 3316aa7ca726SAndre Guedes struct igc_hw *hw = &adapter->hw; 3317aa7ca726SAndre Guedes int index; 3318aa7ca726SAndre Guedes u32 etqf; 3319aa7ca726SAndre Guedes 3320aa7ca726SAndre Guedes index = igc_get_avail_etype_filter_slot(adapter); 3321aa7ca726SAndre Guedes if (index < 0) 3322aa7ca726SAndre Guedes return -ENOSPC; 3323aa7ca726SAndre Guedes 3324aa7ca726SAndre Guedes etqf = rd32(IGC_ETQF(index)); 3325aa7ca726SAndre Guedes 3326aa7ca726SAndre Guedes etqf &= ~IGC_ETQF_ETYPE_MASK; 3327aa7ca726SAndre Guedes etqf |= etype; 3328aa7ca726SAndre Guedes 3329aa7ca726SAndre Guedes if (queue >= 0) { 3330aa7ca726SAndre Guedes etqf &= ~IGC_ETQF_QUEUE_MASK; 3331aa7ca726SAndre Guedes etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); 3332aa7ca726SAndre Guedes etqf |= IGC_ETQF_QUEUE_ENABLE; 3333aa7ca726SAndre Guedes } 3334aa7ca726SAndre Guedes 3335aa7ca726SAndre Guedes etqf |= IGC_ETQF_FILTER_ENABLE; 3336aa7ca726SAndre Guedes 3337aa7ca726SAndre Guedes wr32(IGC_ETQF(index), etqf); 3338aa7ca726SAndre Guedes 3339aa7ca726SAndre Guedes netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", 3340aa7ca726SAndre Guedes etype, queue); 3341aa7ca726SAndre Guedes return 0; 3342aa7ca726SAndre Guedes } 3343aa7ca726SAndre Guedes 3344aa7ca726SAndre Guedes static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) 3345aa7ca726SAndre Guedes { 3346aa7ca726SAndre Guedes struct igc_hw *hw = &adapter->hw; 3347aa7ca726SAndre Guedes int i; 3348aa7ca726SAndre Guedes 3349aa7ca726SAndre Guedes for (i = 0; i < MAX_ETYPE_FILTER; i++) { 3350aa7ca726SAndre Guedes u32 etqf = rd32(IGC_ETQF(i)); 3351aa7ca726SAndre Guedes 3352aa7ca726SAndre Guedes if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) 3353aa7ca726SAndre Guedes return i; 3354aa7ca726SAndre Guedes } 3355aa7ca726SAndre Guedes 3356aa7ca726SAndre Guedes return -1; 3357aa7ca726SAndre Guedes } 3358aa7ca726SAndre Guedes 3359aa7ca726SAndre Guedes /** 3360aa7ca726SAndre Guedes * igc_del_etype_filter() - Delete ethertype filter 3361aa7ca726SAndre Guedes * @adapter: Pointer to adapter where the filter should be deleted from 3362aa7ca726SAndre Guedes * @etype: Ethertype value 3363aa7ca726SAndre Guedes */ 3364acda576fSAndre Guedes static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) 3365aa7ca726SAndre Guedes { 3366aa7ca726SAndre Guedes struct igc_hw *hw = &adapter->hw; 3367aa7ca726SAndre Guedes int index; 3368aa7ca726SAndre Guedes 3369aa7ca726SAndre Guedes index = igc_find_etype_filter(adapter, etype); 3370aa7ca726SAndre Guedes if (index < 0) 3371acda576fSAndre Guedes return; 3372aa7ca726SAndre Guedes 3373aa7ca726SAndre Guedes wr32(IGC_ETQF(index), 0); 3374aa7ca726SAndre Guedes 3375aa7ca726SAndre Guedes netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", 3376aa7ca726SAndre Guedes etype); 3377aa7ca726SAndre Guedes } 3378aa7ca726SAndre Guedes 33796574631bSKurt Kanzenbach static int igc_flex_filter_select(struct igc_adapter *adapter, 33806574631bSKurt Kanzenbach struct igc_flex_filter *input, 33816574631bSKurt Kanzenbach u32 *fhft) 33826574631bSKurt Kanzenbach { 33836574631bSKurt Kanzenbach struct igc_hw *hw = &adapter->hw; 33846574631bSKurt Kanzenbach u8 fhft_index; 33856574631bSKurt Kanzenbach u32 fhftsl; 33866574631bSKurt Kanzenbach 33876574631bSKurt Kanzenbach if (input->index >= MAX_FLEX_FILTER) { 33886574631bSKurt Kanzenbach dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); 33896574631bSKurt Kanzenbach return -EINVAL; 33906574631bSKurt Kanzenbach } 33916574631bSKurt Kanzenbach 33926574631bSKurt Kanzenbach /* Indirect table select register */ 33936574631bSKurt Kanzenbach fhftsl = rd32(IGC_FHFTSL); 33946574631bSKurt Kanzenbach fhftsl &= ~IGC_FHFTSL_FTSL_MASK; 33956574631bSKurt Kanzenbach switch (input->index) { 33966574631bSKurt Kanzenbach case 0 ... 7: 33976574631bSKurt Kanzenbach fhftsl |= 0x00; 33986574631bSKurt Kanzenbach break; 33996574631bSKurt Kanzenbach case 8 ... 15: 34006574631bSKurt Kanzenbach fhftsl |= 0x01; 34016574631bSKurt Kanzenbach break; 34026574631bSKurt Kanzenbach case 16 ... 23: 34036574631bSKurt Kanzenbach fhftsl |= 0x02; 34046574631bSKurt Kanzenbach break; 34056574631bSKurt Kanzenbach case 24 ... 31: 34066574631bSKurt Kanzenbach fhftsl |= 0x03; 34076574631bSKurt Kanzenbach break; 34086574631bSKurt Kanzenbach } 34096574631bSKurt Kanzenbach wr32(IGC_FHFTSL, fhftsl); 34106574631bSKurt Kanzenbach 34116574631bSKurt Kanzenbach /* Normalize index down to host table register */ 34126574631bSKurt Kanzenbach fhft_index = input->index % 8; 34136574631bSKurt Kanzenbach 34146574631bSKurt Kanzenbach *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : 34156574631bSKurt Kanzenbach IGC_FHFT_EXT(fhft_index - 4); 34166574631bSKurt Kanzenbach 34176574631bSKurt Kanzenbach return 0; 34186574631bSKurt Kanzenbach } 34196574631bSKurt Kanzenbach 34202b477d05SKurt Kanzenbach static int igc_write_flex_filter_ll(struct igc_adapter *adapter, 34216574631bSKurt Kanzenbach struct igc_flex_filter *input) 34226574631bSKurt Kanzenbach { 34236574631bSKurt Kanzenbach struct device *dev = &adapter->pdev->dev; 34246574631bSKurt Kanzenbach struct igc_hw *hw = &adapter->hw; 34256574631bSKurt Kanzenbach u8 *data = input->data; 34266574631bSKurt Kanzenbach u8 *mask = input->mask; 34276574631bSKurt Kanzenbach u32 queuing; 34286574631bSKurt Kanzenbach u32 fhft; 34296574631bSKurt Kanzenbach u32 wufc; 34306574631bSKurt Kanzenbach int ret; 34316574631bSKurt Kanzenbach int i; 34326574631bSKurt Kanzenbach 34336574631bSKurt Kanzenbach /* Length has to be aligned to 8. Otherwise the filter will fail. Bail 34346574631bSKurt Kanzenbach * out early to avoid surprises later. 34356574631bSKurt Kanzenbach */ 34366574631bSKurt Kanzenbach if (input->length % 8 != 0) { 34376574631bSKurt Kanzenbach dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); 34386574631bSKurt Kanzenbach return -EINVAL; 34396574631bSKurt Kanzenbach } 34406574631bSKurt Kanzenbach 34416574631bSKurt Kanzenbach /* Select corresponding flex filter register and get base for host table. */ 34426574631bSKurt Kanzenbach ret = igc_flex_filter_select(adapter, input, &fhft); 34436574631bSKurt Kanzenbach if (ret) 34446574631bSKurt Kanzenbach return ret; 34456574631bSKurt Kanzenbach 34466574631bSKurt Kanzenbach /* When adding a filter globally disable flex filter feature. That is 34476574631bSKurt Kanzenbach * recommended within the datasheet. 34486574631bSKurt Kanzenbach */ 34496574631bSKurt Kanzenbach wufc = rd32(IGC_WUFC); 34506574631bSKurt Kanzenbach wufc &= ~IGC_WUFC_FLEX_HQ; 34516574631bSKurt Kanzenbach wr32(IGC_WUFC, wufc); 34526574631bSKurt Kanzenbach 34536574631bSKurt Kanzenbach /* Configure filter */ 34546574631bSKurt Kanzenbach queuing = input->length & IGC_FHFT_LENGTH_MASK; 34556574631bSKurt Kanzenbach queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; 34566574631bSKurt Kanzenbach queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; 34576574631bSKurt Kanzenbach 34586574631bSKurt Kanzenbach if (input->immediate_irq) 34596574631bSKurt Kanzenbach queuing |= IGC_FHFT_IMM_INT; 34606574631bSKurt Kanzenbach 34616574631bSKurt Kanzenbach if (input->drop) 34626574631bSKurt Kanzenbach queuing |= IGC_FHFT_DROP; 34636574631bSKurt Kanzenbach 34646574631bSKurt Kanzenbach wr32(fhft + 0xFC, queuing); 34656574631bSKurt Kanzenbach 34666574631bSKurt Kanzenbach /* Write data (128 byte) and mask (128 bit) */ 34676574631bSKurt Kanzenbach for (i = 0; i < 16; ++i) { 34686574631bSKurt Kanzenbach const size_t data_idx = i * 8; 34696574631bSKurt Kanzenbach const size_t row_idx = i * 16; 34706574631bSKurt Kanzenbach u32 dw0 = 34716574631bSKurt Kanzenbach (data[data_idx + 0] << 0) | 34726574631bSKurt Kanzenbach (data[data_idx + 1] << 8) | 34736574631bSKurt Kanzenbach (data[data_idx + 2] << 16) | 34746574631bSKurt Kanzenbach (data[data_idx + 3] << 24); 34756574631bSKurt Kanzenbach u32 dw1 = 34766574631bSKurt Kanzenbach (data[data_idx + 4] << 0) | 34776574631bSKurt Kanzenbach (data[data_idx + 5] << 8) | 34786574631bSKurt Kanzenbach (data[data_idx + 6] << 16) | 34796574631bSKurt Kanzenbach (data[data_idx + 7] << 24); 34806574631bSKurt Kanzenbach u32 tmp; 34816574631bSKurt Kanzenbach 34826574631bSKurt Kanzenbach /* Write row: dw0, dw1 and mask */ 34836574631bSKurt Kanzenbach wr32(fhft + row_idx, dw0); 34846574631bSKurt Kanzenbach wr32(fhft + row_idx + 4, dw1); 34856574631bSKurt Kanzenbach 34866574631bSKurt Kanzenbach /* mask is only valid for MASK(7, 0) */ 34876574631bSKurt Kanzenbach tmp = rd32(fhft + row_idx + 8); 34886574631bSKurt Kanzenbach tmp &= ~GENMASK(7, 0); 34896574631bSKurt Kanzenbach tmp |= mask[i]; 34906574631bSKurt Kanzenbach wr32(fhft + row_idx + 8, tmp); 34916574631bSKurt Kanzenbach } 34926574631bSKurt Kanzenbach 34936574631bSKurt Kanzenbach /* Enable filter. */ 34946574631bSKurt Kanzenbach wufc |= IGC_WUFC_FLEX_HQ; 34956574631bSKurt Kanzenbach if (input->index > 8) { 34966574631bSKurt Kanzenbach /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ 34976574631bSKurt Kanzenbach u32 wufc_ext = rd32(IGC_WUFC_EXT); 34986574631bSKurt Kanzenbach 34996574631bSKurt Kanzenbach wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); 35006574631bSKurt Kanzenbach 35016574631bSKurt Kanzenbach wr32(IGC_WUFC_EXT, wufc_ext); 35026574631bSKurt Kanzenbach } else { 35036574631bSKurt Kanzenbach wufc |= (IGC_WUFC_FLX0 << input->index); 35046574631bSKurt Kanzenbach } 35056574631bSKurt Kanzenbach wr32(IGC_WUFC, wufc); 35066574631bSKurt Kanzenbach 35076574631bSKurt Kanzenbach dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", 35086574631bSKurt Kanzenbach input->index); 35096574631bSKurt Kanzenbach 35106574631bSKurt Kanzenbach return 0; 35116574631bSKurt Kanzenbach } 35126574631bSKurt Kanzenbach 35132b477d05SKurt Kanzenbach static void igc_flex_filter_add_field(struct igc_flex_filter *flex, 35142b477d05SKurt Kanzenbach const void *src, unsigned int offset, 35152b477d05SKurt Kanzenbach size_t len, const void *mask) 35162b477d05SKurt Kanzenbach { 35172b477d05SKurt Kanzenbach int i; 35182b477d05SKurt Kanzenbach 35192b477d05SKurt Kanzenbach /* data */ 35202b477d05SKurt Kanzenbach memcpy(&flex->data[offset], src, len); 35212b477d05SKurt Kanzenbach 35222b477d05SKurt Kanzenbach /* mask */ 35232b477d05SKurt Kanzenbach for (i = 0; i < len; ++i) { 35242b477d05SKurt Kanzenbach const unsigned int idx = i + offset; 35252b477d05SKurt Kanzenbach const u8 *ptr = mask; 35262b477d05SKurt Kanzenbach 35272b477d05SKurt Kanzenbach if (mask) { 35282b477d05SKurt Kanzenbach if (ptr[i] & 0xff) 35292b477d05SKurt Kanzenbach flex->mask[idx / 8] |= BIT(idx % 8); 35302b477d05SKurt Kanzenbach 35312b477d05SKurt Kanzenbach continue; 35322b477d05SKurt Kanzenbach } 35332b477d05SKurt Kanzenbach 35342b477d05SKurt Kanzenbach flex->mask[idx / 8] |= BIT(idx % 8); 35352b477d05SKurt Kanzenbach } 35362b477d05SKurt Kanzenbach } 35372b477d05SKurt Kanzenbach 35382b477d05SKurt Kanzenbach static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) 35392b477d05SKurt Kanzenbach { 35402b477d05SKurt Kanzenbach struct igc_hw *hw = &adapter->hw; 35412b477d05SKurt Kanzenbach u32 wufc, wufc_ext; 35422b477d05SKurt Kanzenbach int i; 35432b477d05SKurt Kanzenbach 35442b477d05SKurt Kanzenbach wufc = rd32(IGC_WUFC); 35452b477d05SKurt Kanzenbach wufc_ext = rd32(IGC_WUFC_EXT); 35462b477d05SKurt Kanzenbach 35472b477d05SKurt Kanzenbach for (i = 0; i < MAX_FLEX_FILTER; i++) { 35482b477d05SKurt Kanzenbach if (i < 8) { 35492b477d05SKurt Kanzenbach if (!(wufc & (IGC_WUFC_FLX0 << i))) 35502b477d05SKurt Kanzenbach return i; 35512b477d05SKurt Kanzenbach } else { 35522b477d05SKurt Kanzenbach if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) 35532b477d05SKurt Kanzenbach return i; 35542b477d05SKurt Kanzenbach } 35552b477d05SKurt Kanzenbach } 35562b477d05SKurt Kanzenbach 35572b477d05SKurt Kanzenbach return -ENOSPC; 35582b477d05SKurt Kanzenbach } 35592b477d05SKurt Kanzenbach 35602b477d05SKurt Kanzenbach static bool igc_flex_filter_in_use(struct igc_adapter *adapter) 35612b477d05SKurt Kanzenbach { 35622b477d05SKurt Kanzenbach struct igc_hw *hw = &adapter->hw; 35632b477d05SKurt Kanzenbach u32 wufc, wufc_ext; 35642b477d05SKurt Kanzenbach 35652b477d05SKurt Kanzenbach wufc = rd32(IGC_WUFC); 35662b477d05SKurt Kanzenbach wufc_ext = rd32(IGC_WUFC_EXT); 35672b477d05SKurt Kanzenbach 35682b477d05SKurt Kanzenbach if (wufc & IGC_WUFC_FILTER_MASK) 35692b477d05SKurt Kanzenbach return true; 35702b477d05SKurt Kanzenbach 35712b477d05SKurt Kanzenbach if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) 35722b477d05SKurt Kanzenbach return true; 35732b477d05SKurt Kanzenbach 35742b477d05SKurt Kanzenbach return false; 35752b477d05SKurt Kanzenbach } 35762b477d05SKurt Kanzenbach 35772b477d05SKurt Kanzenbach static int igc_add_flex_filter(struct igc_adapter *adapter, 35782b477d05SKurt Kanzenbach struct igc_nfc_rule *rule) 35792b477d05SKurt Kanzenbach { 35802b477d05SKurt Kanzenbach struct igc_flex_filter flex = { }; 35812b477d05SKurt Kanzenbach struct igc_nfc_filter *filter = &rule->filter; 35822b477d05SKurt Kanzenbach unsigned int eth_offset, user_offset; 35832b477d05SKurt Kanzenbach int ret, index; 35842b477d05SKurt Kanzenbach bool vlan; 35852b477d05SKurt Kanzenbach 35862b477d05SKurt Kanzenbach index = igc_find_avail_flex_filter_slot(adapter); 35872b477d05SKurt Kanzenbach if (index < 0) 35882b477d05SKurt Kanzenbach return -ENOSPC; 35892b477d05SKurt Kanzenbach 35902b477d05SKurt Kanzenbach /* Construct the flex filter: 35912b477d05SKurt Kanzenbach * -> dest_mac [6] 35922b477d05SKurt Kanzenbach * -> src_mac [6] 35932b477d05SKurt Kanzenbach * -> tpid [2] 35942b477d05SKurt Kanzenbach * -> vlan tci [2] 35952b477d05SKurt Kanzenbach * -> ether type [2] 35962b477d05SKurt Kanzenbach * -> user data [8] 35972b477d05SKurt Kanzenbach * -> = 26 bytes => 32 length 35982b477d05SKurt Kanzenbach */ 35992b477d05SKurt Kanzenbach flex.index = index; 36002b477d05SKurt Kanzenbach flex.length = 32; 36012b477d05SKurt Kanzenbach flex.rx_queue = rule->action; 36022b477d05SKurt Kanzenbach 36032b477d05SKurt Kanzenbach vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; 36042b477d05SKurt Kanzenbach eth_offset = vlan ? 16 : 12; 36052b477d05SKurt Kanzenbach user_offset = vlan ? 18 : 14; 36062b477d05SKurt Kanzenbach 36072b477d05SKurt Kanzenbach /* Add destination MAC */ 36082b477d05SKurt Kanzenbach if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) 36092b477d05SKurt Kanzenbach igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, 36102b477d05SKurt Kanzenbach ETH_ALEN, NULL); 36112b477d05SKurt Kanzenbach 36122b477d05SKurt Kanzenbach /* Add source MAC */ 36132b477d05SKurt Kanzenbach if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) 36142b477d05SKurt Kanzenbach igc_flex_filter_add_field(&flex, &filter->src_addr, 6, 36152b477d05SKurt Kanzenbach ETH_ALEN, NULL); 36162b477d05SKurt Kanzenbach 36172b477d05SKurt Kanzenbach /* Add VLAN etype */ 36182b477d05SKurt Kanzenbach if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) 36192b477d05SKurt Kanzenbach igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, 36202b477d05SKurt Kanzenbach sizeof(filter->vlan_etype), 36212b477d05SKurt Kanzenbach NULL); 36222b477d05SKurt Kanzenbach 36232b477d05SKurt Kanzenbach /* Add VLAN TCI */ 36242b477d05SKurt Kanzenbach if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) 36252b477d05SKurt Kanzenbach igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, 36262b477d05SKurt Kanzenbach sizeof(filter->vlan_tci), NULL); 36272b477d05SKurt Kanzenbach 36282b477d05SKurt Kanzenbach /* Add Ether type */ 36292b477d05SKurt Kanzenbach if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 36302b477d05SKurt Kanzenbach __be16 etype = cpu_to_be16(filter->etype); 36312b477d05SKurt Kanzenbach 36322b477d05SKurt Kanzenbach igc_flex_filter_add_field(&flex, &etype, eth_offset, 36332b477d05SKurt Kanzenbach sizeof(etype), NULL); 36342b477d05SKurt Kanzenbach } 36352b477d05SKurt Kanzenbach 36362b477d05SKurt Kanzenbach /* Add user data */ 36372b477d05SKurt Kanzenbach if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) 36382b477d05SKurt Kanzenbach igc_flex_filter_add_field(&flex, &filter->user_data, 36392b477d05SKurt Kanzenbach user_offset, 36402b477d05SKurt Kanzenbach sizeof(filter->user_data), 36412b477d05SKurt Kanzenbach filter->user_mask); 36422b477d05SKurt Kanzenbach 36432b477d05SKurt Kanzenbach /* Add it down to the hardware and enable it. */ 36442b477d05SKurt Kanzenbach ret = igc_write_flex_filter_ll(adapter, &flex); 36452b477d05SKurt Kanzenbach if (ret) 36462b477d05SKurt Kanzenbach return ret; 36472b477d05SKurt Kanzenbach 36482b477d05SKurt Kanzenbach filter->flex_index = index; 36492b477d05SKurt Kanzenbach 36502b477d05SKurt Kanzenbach return 0; 36512b477d05SKurt Kanzenbach } 36522b477d05SKurt Kanzenbach 36532b477d05SKurt Kanzenbach static void igc_del_flex_filter(struct igc_adapter *adapter, 36542b477d05SKurt Kanzenbach u16 reg_index) 36552b477d05SKurt Kanzenbach { 36562b477d05SKurt Kanzenbach struct igc_hw *hw = &adapter->hw; 36572b477d05SKurt Kanzenbach u32 wufc; 36582b477d05SKurt Kanzenbach 36592b477d05SKurt Kanzenbach /* Just disable the filter. The filter table itself is kept 36602b477d05SKurt Kanzenbach * intact. Another flex_filter_add() should override the "old" data 36612b477d05SKurt Kanzenbach * then. 36622b477d05SKurt Kanzenbach */ 36632b477d05SKurt Kanzenbach if (reg_index > 8) { 36642b477d05SKurt Kanzenbach u32 wufc_ext = rd32(IGC_WUFC_EXT); 36652b477d05SKurt Kanzenbach 36662b477d05SKurt Kanzenbach wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); 36672b477d05SKurt Kanzenbach wr32(IGC_WUFC_EXT, wufc_ext); 36682b477d05SKurt Kanzenbach } else { 36692b477d05SKurt Kanzenbach wufc = rd32(IGC_WUFC); 36702b477d05SKurt Kanzenbach 36712b477d05SKurt Kanzenbach wufc &= ~(IGC_WUFC_FLX0 << reg_index); 36722b477d05SKurt Kanzenbach wr32(IGC_WUFC, wufc); 36732b477d05SKurt Kanzenbach } 36742b477d05SKurt Kanzenbach 36752b477d05SKurt Kanzenbach if (igc_flex_filter_in_use(adapter)) 36762b477d05SKurt Kanzenbach return; 36772b477d05SKurt Kanzenbach 36782b477d05SKurt Kanzenbach /* No filters are in use, we may disable flex filters */ 36792b477d05SKurt Kanzenbach wufc = rd32(IGC_WUFC); 36802b477d05SKurt Kanzenbach wufc &= ~IGC_WUFC_FLEX_HQ; 36812b477d05SKurt Kanzenbach wr32(IGC_WUFC, wufc); 36822b477d05SKurt Kanzenbach } 36832b477d05SKurt Kanzenbach 368436fa2152SAndre Guedes static int igc_enable_nfc_rule(struct igc_adapter *adapter, 36852b477d05SKurt Kanzenbach struct igc_nfc_rule *rule) 368636fa2152SAndre Guedes { 368736fa2152SAndre Guedes int err; 368836fa2152SAndre Guedes 368973744262SKurt Kanzenbach if (rule->flex) { 369073744262SKurt Kanzenbach return igc_add_flex_filter(adapter, rule); 36912b477d05SKurt Kanzenbach } 36922b477d05SKurt Kanzenbach 369336fa2152SAndre Guedes if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { 369436fa2152SAndre Guedes err = igc_add_etype_filter(adapter, rule->filter.etype, 369536fa2152SAndre Guedes rule->action); 369636fa2152SAndre Guedes if (err) 369736fa2152SAndre Guedes return err; 369836fa2152SAndre Guedes } 369936fa2152SAndre Guedes 370036fa2152SAndre Guedes if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { 370136fa2152SAndre Guedes err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 370236fa2152SAndre Guedes rule->filter.src_addr, rule->action); 370336fa2152SAndre Guedes if (err) 370436fa2152SAndre Guedes return err; 370536fa2152SAndre Guedes } 370636fa2152SAndre Guedes 370736fa2152SAndre Guedes if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { 370836fa2152SAndre Guedes err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 370936fa2152SAndre Guedes rule->filter.dst_addr, rule->action); 371036fa2152SAndre Guedes if (err) 371136fa2152SAndre Guedes return err; 371236fa2152SAndre Guedes } 371336fa2152SAndre Guedes 371436fa2152SAndre Guedes if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 371536fa2152SAndre Guedes int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> 371636fa2152SAndre Guedes VLAN_PRIO_SHIFT; 371736fa2152SAndre Guedes 371836fa2152SAndre Guedes err = igc_add_vlan_prio_filter(adapter, prio, rule->action); 371936fa2152SAndre Guedes if (err) 372036fa2152SAndre Guedes return err; 372136fa2152SAndre Guedes } 372236fa2152SAndre Guedes 372336fa2152SAndre Guedes return 0; 372436fa2152SAndre Guedes } 372536fa2152SAndre Guedes 3726acda576fSAndre Guedes static void igc_disable_nfc_rule(struct igc_adapter *adapter, 372736fa2152SAndre Guedes const struct igc_nfc_rule *rule) 372836fa2152SAndre Guedes { 372973744262SKurt Kanzenbach if (rule->flex) { 37302b477d05SKurt Kanzenbach igc_del_flex_filter(adapter, rule->filter.flex_index); 373173744262SKurt Kanzenbach return; 373273744262SKurt Kanzenbach } 37332b477d05SKurt Kanzenbach 373436fa2152SAndre Guedes if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) 373536fa2152SAndre Guedes igc_del_etype_filter(adapter, rule->filter.etype); 373636fa2152SAndre Guedes 373736fa2152SAndre Guedes if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { 373836fa2152SAndre Guedes int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> 373936fa2152SAndre Guedes VLAN_PRIO_SHIFT; 374036fa2152SAndre Guedes 374136fa2152SAndre Guedes igc_del_vlan_prio_filter(adapter, prio); 374236fa2152SAndre Guedes } 374336fa2152SAndre Guedes 374436fa2152SAndre Guedes if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) 374536fa2152SAndre Guedes igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, 374636fa2152SAndre Guedes rule->filter.src_addr); 374736fa2152SAndre Guedes 374836fa2152SAndre Guedes if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) 374936fa2152SAndre Guedes igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, 375036fa2152SAndre Guedes rule->filter.dst_addr); 375136fa2152SAndre Guedes } 375236fa2152SAndre Guedes 375336fa2152SAndre Guedes /** 375436fa2152SAndre Guedes * igc_get_nfc_rule() - Get NFC rule 375536fa2152SAndre Guedes * @adapter: Pointer to adapter 375636fa2152SAndre Guedes * @location: Rule location 375736fa2152SAndre Guedes * 375836fa2152SAndre Guedes * Context: Expects adapter->nfc_rule_lock to be held by caller. 375936fa2152SAndre Guedes * 376036fa2152SAndre Guedes * Return: Pointer to NFC rule at @location. If not found, NULL. 376136fa2152SAndre Guedes */ 376236fa2152SAndre Guedes struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, 376336fa2152SAndre Guedes u32 location) 376436fa2152SAndre Guedes { 376536fa2152SAndre Guedes struct igc_nfc_rule *rule; 376636fa2152SAndre Guedes 376736fa2152SAndre Guedes list_for_each_entry(rule, &adapter->nfc_rule_list, list) { 376836fa2152SAndre Guedes if (rule->location == location) 376936fa2152SAndre Guedes return rule; 377036fa2152SAndre Guedes if (rule->location > location) 377136fa2152SAndre Guedes break; 377236fa2152SAndre Guedes } 377336fa2152SAndre Guedes 377436fa2152SAndre Guedes return NULL; 377536fa2152SAndre Guedes } 377636fa2152SAndre Guedes 377736fa2152SAndre Guedes /** 377836fa2152SAndre Guedes * igc_del_nfc_rule() - Delete NFC rule 377936fa2152SAndre Guedes * @adapter: Pointer to adapter 378036fa2152SAndre Guedes * @rule: Pointer to rule to be deleted 378136fa2152SAndre Guedes * 378236fa2152SAndre Guedes * Disable NFC rule in hardware and delete it from adapter. 378336fa2152SAndre Guedes * 378436fa2152SAndre Guedes * Context: Expects adapter->nfc_rule_lock to be held by caller. 378536fa2152SAndre Guedes */ 378636fa2152SAndre Guedes void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 378736fa2152SAndre Guedes { 378836fa2152SAndre Guedes igc_disable_nfc_rule(adapter, rule); 378936fa2152SAndre Guedes 379036fa2152SAndre Guedes list_del(&rule->list); 379136fa2152SAndre Guedes adapter->nfc_rule_count--; 379236fa2152SAndre Guedes 379336fa2152SAndre Guedes kfree(rule); 379436fa2152SAndre Guedes } 379536fa2152SAndre Guedes 3796e256ec83SAndre Guedes static void igc_flush_nfc_rules(struct igc_adapter *adapter) 3797e256ec83SAndre Guedes { 3798e256ec83SAndre Guedes struct igc_nfc_rule *rule, *tmp; 3799e256ec83SAndre Guedes 380042fc5dc0SAndre Guedes mutex_lock(&adapter->nfc_rule_lock); 3801e256ec83SAndre Guedes 3802e256ec83SAndre Guedes list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) 3803e256ec83SAndre Guedes igc_del_nfc_rule(adapter, rule); 3804e256ec83SAndre Guedes 380542fc5dc0SAndre Guedes mutex_unlock(&adapter->nfc_rule_lock); 3806e256ec83SAndre Guedes } 3807e256ec83SAndre Guedes 380836fa2152SAndre Guedes /** 380936fa2152SAndre Guedes * igc_add_nfc_rule() - Add NFC rule 381036fa2152SAndre Guedes * @adapter: Pointer to adapter 381136fa2152SAndre Guedes * @rule: Pointer to rule to be added 381236fa2152SAndre Guedes * 381336fa2152SAndre Guedes * Enable NFC rule in hardware and add it to adapter. 381436fa2152SAndre Guedes * 381536fa2152SAndre Guedes * Context: Expects adapter->nfc_rule_lock to be held by caller. 381636fa2152SAndre Guedes * 381736fa2152SAndre Guedes * Return: 0 on success, negative errno on failure. 381836fa2152SAndre Guedes */ 381936fa2152SAndre Guedes int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) 382036fa2152SAndre Guedes { 382136fa2152SAndre Guedes struct igc_nfc_rule *pred, *cur; 382236fa2152SAndre Guedes int err; 382336fa2152SAndre Guedes 382436fa2152SAndre Guedes err = igc_enable_nfc_rule(adapter, rule); 382536fa2152SAndre Guedes if (err) 382636fa2152SAndre Guedes return err; 382736fa2152SAndre Guedes 382836fa2152SAndre Guedes pred = NULL; 382936fa2152SAndre Guedes list_for_each_entry(cur, &adapter->nfc_rule_list, list) { 383036fa2152SAndre Guedes if (cur->location >= rule->location) 383136fa2152SAndre Guedes break; 383236fa2152SAndre Guedes pred = cur; 383336fa2152SAndre Guedes } 383436fa2152SAndre Guedes 383536fa2152SAndre Guedes list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); 383636fa2152SAndre Guedes adapter->nfc_rule_count++; 383736fa2152SAndre Guedes return 0; 383836fa2152SAndre Guedes } 383936fa2152SAndre Guedes 384036fa2152SAndre Guedes static void igc_restore_nfc_rules(struct igc_adapter *adapter) 384136fa2152SAndre Guedes { 384236fa2152SAndre Guedes struct igc_nfc_rule *rule; 384336fa2152SAndre Guedes 384442fc5dc0SAndre Guedes mutex_lock(&adapter->nfc_rule_lock); 384536fa2152SAndre Guedes 384636fa2152SAndre Guedes list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) 384736fa2152SAndre Guedes igc_enable_nfc_rule(adapter, rule); 384836fa2152SAndre Guedes 384942fc5dc0SAndre Guedes mutex_unlock(&adapter->nfc_rule_lock); 385086a4de66SSasha Neftin } 385186a4de66SSasha Neftin 385286a4de66SSasha Neftin static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) 385386a4de66SSasha Neftin { 385486a4de66SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 385586a4de66SSasha Neftin 3856750433d0SAndre Guedes return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); 385786a4de66SSasha Neftin } 385886a4de66SSasha Neftin 385986a4de66SSasha Neftin static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) 386086a4de66SSasha Neftin { 386186a4de66SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 386286a4de66SSasha Neftin 3863acda576fSAndre Guedes igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); 386486a4de66SSasha Neftin return 0; 386586a4de66SSasha Neftin } 386686a4de66SSasha Neftin 386786a4de66SSasha Neftin /** 386886a4de66SSasha Neftin * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 386986a4de66SSasha Neftin * @netdev: network interface device structure 387086a4de66SSasha Neftin * 387186a4de66SSasha Neftin * The set_rx_mode entry point is called whenever the unicast or multicast 387286a4de66SSasha Neftin * address lists or the network interface flags are updated. This routine is 387386a4de66SSasha Neftin * responsible for configuring the hardware for proper unicast, multicast, 387486a4de66SSasha Neftin * promiscuous mode, and all-multi behavior. 387586a4de66SSasha Neftin */ 387686a4de66SSasha Neftin static void igc_set_rx_mode(struct net_device *netdev) 387786a4de66SSasha Neftin { 387886a4de66SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 387986a4de66SSasha Neftin struct igc_hw *hw = &adapter->hw; 388086a4de66SSasha Neftin u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; 388186a4de66SSasha Neftin int count; 388286a4de66SSasha Neftin 388386a4de66SSasha Neftin /* Check for Promiscuous and All Multicast modes */ 388486a4de66SSasha Neftin if (netdev->flags & IFF_PROMISC) { 388586a4de66SSasha Neftin rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; 388686a4de66SSasha Neftin } else { 388786a4de66SSasha Neftin if (netdev->flags & IFF_ALLMULTI) { 388886a4de66SSasha Neftin rctl |= IGC_RCTL_MPE; 388986a4de66SSasha Neftin } else { 389086a4de66SSasha Neftin /* Write addresses to the MTA, if the attempt fails 389186a4de66SSasha Neftin * then we should just turn on promiscuous mode so 389286a4de66SSasha Neftin * that we can at least receive multicast traffic 389386a4de66SSasha Neftin */ 389486a4de66SSasha Neftin count = igc_write_mc_addr_list(netdev); 389586a4de66SSasha Neftin if (count < 0) 389686a4de66SSasha Neftin rctl |= IGC_RCTL_MPE; 389786a4de66SSasha Neftin } 389886a4de66SSasha Neftin } 389986a4de66SSasha Neftin 390086a4de66SSasha Neftin /* Write addresses to available RAR registers, if there is not 390186a4de66SSasha Neftin * sufficient space to store all the addresses then enable 390286a4de66SSasha Neftin * unicast promiscuous mode 390386a4de66SSasha Neftin */ 390486a4de66SSasha Neftin if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) 390586a4de66SSasha Neftin rctl |= IGC_RCTL_UPE; 390686a4de66SSasha Neftin 390786a4de66SSasha Neftin /* update state of unicast and multicast */ 390886a4de66SSasha Neftin rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 390986a4de66SSasha Neftin wr32(IGC_RCTL, rctl); 391086a4de66SSasha Neftin 391186a4de66SSasha Neftin #if (PAGE_SIZE < 8192) 391286a4de66SSasha Neftin if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) 391386a4de66SSasha Neftin rlpml = IGC_MAX_FRAME_BUILD_SKB; 391486a4de66SSasha Neftin #endif 391586a4de66SSasha Neftin wr32(IGC_RLPML, rlpml); 391686a4de66SSasha Neftin } 391786a4de66SSasha Neftin 39181a7c0f2eSSasha Neftin /** 39191a7c0f2eSSasha Neftin * igc_configure - configure the hardware for RX and TX 39201a7c0f2eSSasha Neftin * @adapter: private board structure 39211a7c0f2eSSasha Neftin */ 39221a7c0f2eSSasha Neftin static void igc_configure(struct igc_adapter *adapter) 39231a7c0f2eSSasha Neftin { 39241a7c0f2eSSasha Neftin struct net_device *netdev = adapter->netdev; 39251a7c0f2eSSasha Neftin int i = 0; 39261a7c0f2eSSasha Neftin 39271a7c0f2eSSasha Neftin igc_get_hw_control(adapter); 39281a7c0f2eSSasha Neftin igc_set_rx_mode(netdev); 39291a7c0f2eSSasha Neftin 39308d744963SMuhammad Husaini Zulkifli igc_restore_vlan(adapter); 39318d744963SMuhammad Husaini Zulkifli 39321a7c0f2eSSasha Neftin igc_setup_tctl(adapter); 39331a7c0f2eSSasha Neftin igc_setup_mrqc(adapter); 39341a7c0f2eSSasha Neftin igc_setup_rctl(adapter); 39351a7c0f2eSSasha Neftin 3936ac9156b2SAndre Guedes igc_set_default_mac_filter(adapter); 393797700bc8SAndre Guedes igc_restore_nfc_rules(adapter); 3938ac9156b2SAndre Guedes 39391a7c0f2eSSasha Neftin igc_configure_tx(adapter); 39401a7c0f2eSSasha Neftin igc_configure_rx(adapter); 39411a7c0f2eSSasha Neftin 39421a7c0f2eSSasha Neftin igc_rx_fifo_flush_base(&adapter->hw); 39431a7c0f2eSSasha Neftin 39441a7c0f2eSSasha Neftin /* call igc_desc_unused which always leaves 39451a7c0f2eSSasha Neftin * at least 1 descriptor unused to make sure 39461a7c0f2eSSasha Neftin * next_to_use != next_to_clean 39471a7c0f2eSSasha Neftin */ 39481a7c0f2eSSasha Neftin for (i = 0; i < adapter->num_rx_queues; i++) { 39491a7c0f2eSSasha Neftin struct igc_ring *ring = adapter->rx_ring[i]; 39501a7c0f2eSSasha Neftin 3951fc9df2a0SAndre Guedes if (ring->xsk_pool) 3952fc9df2a0SAndre Guedes igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 3953fc9df2a0SAndre Guedes else 39541a7c0f2eSSasha Neftin igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 39551a7c0f2eSSasha Neftin } 39561a7c0f2eSSasha Neftin } 39571a7c0f2eSSasha Neftin 3958c9a11c23SSasha Neftin /** 3959f817fa05SSasha Neftin * igc_write_ivar - configure ivar for given MSI-X vector 3960f817fa05SSasha Neftin * @hw: pointer to the HW structure 3961f817fa05SSasha Neftin * @msix_vector: vector number we are allocating to a given ring 3962f817fa05SSasha Neftin * @index: row index of IVAR register to write within IVAR table 3963f817fa05SSasha Neftin * @offset: column offset of in IVAR, should be multiple of 8 3964f817fa05SSasha Neftin * 3965f817fa05SSasha Neftin * The IVAR table consists of 2 columns, 3966f817fa05SSasha Neftin * each containing an cause allocation for an Rx and Tx ring, and a 3967f817fa05SSasha Neftin * variable number of rows depending on the number of queues supported. 3968f817fa05SSasha Neftin */ 3969f817fa05SSasha Neftin static void igc_write_ivar(struct igc_hw *hw, int msix_vector, 3970f817fa05SSasha Neftin int index, int offset) 3971f817fa05SSasha Neftin { 3972f817fa05SSasha Neftin u32 ivar = array_rd32(IGC_IVAR0, index); 3973f817fa05SSasha Neftin 3974f817fa05SSasha Neftin /* clear any bits that are currently set */ 3975f817fa05SSasha Neftin ivar &= ~((u32)0xFF << offset); 3976f817fa05SSasha Neftin 3977f817fa05SSasha Neftin /* write vector and valid bit */ 3978f817fa05SSasha Neftin ivar |= (msix_vector | IGC_IVAR_VALID) << offset; 3979f817fa05SSasha Neftin 3980f817fa05SSasha Neftin array_wr32(IGC_IVAR0, index, ivar); 3981f817fa05SSasha Neftin } 3982f817fa05SSasha Neftin 3983f817fa05SSasha Neftin static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) 3984f817fa05SSasha Neftin { 3985f817fa05SSasha Neftin struct igc_adapter *adapter = q_vector->adapter; 3986f817fa05SSasha Neftin struct igc_hw *hw = &adapter->hw; 3987f817fa05SSasha Neftin int rx_queue = IGC_N0_QUEUE; 3988f817fa05SSasha Neftin int tx_queue = IGC_N0_QUEUE; 3989f817fa05SSasha Neftin 3990f817fa05SSasha Neftin if (q_vector->rx.ring) 3991f817fa05SSasha Neftin rx_queue = q_vector->rx.ring->reg_idx; 3992f817fa05SSasha Neftin if (q_vector->tx.ring) 3993f817fa05SSasha Neftin tx_queue = q_vector->tx.ring->reg_idx; 3994f817fa05SSasha Neftin 3995f817fa05SSasha Neftin switch (hw->mac.type) { 3996f817fa05SSasha Neftin case igc_i225: 3997f817fa05SSasha Neftin if (rx_queue > IGC_N0_QUEUE) 3998f817fa05SSasha Neftin igc_write_ivar(hw, msix_vector, 3999f817fa05SSasha Neftin rx_queue >> 1, 4000f817fa05SSasha Neftin (rx_queue & 0x1) << 4); 4001f817fa05SSasha Neftin if (tx_queue > IGC_N0_QUEUE) 4002f817fa05SSasha Neftin igc_write_ivar(hw, msix_vector, 4003f817fa05SSasha Neftin tx_queue >> 1, 4004f817fa05SSasha Neftin ((tx_queue & 0x1) << 4) + 8); 4005f817fa05SSasha Neftin q_vector->eims_value = BIT(msix_vector); 4006f817fa05SSasha Neftin break; 4007f817fa05SSasha Neftin default: 4008f817fa05SSasha Neftin WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); 4009f817fa05SSasha Neftin break; 4010f817fa05SSasha Neftin } 4011f817fa05SSasha Neftin 4012f817fa05SSasha Neftin /* add q_vector eims value to global eims_enable_mask */ 4013f817fa05SSasha Neftin adapter->eims_enable_mask |= q_vector->eims_value; 4014f817fa05SSasha Neftin 4015f817fa05SSasha Neftin /* configure q_vector to set itr on first interrupt */ 4016f817fa05SSasha Neftin q_vector->set_itr = 1; 4017f817fa05SSasha Neftin } 4018f817fa05SSasha Neftin 4019f817fa05SSasha Neftin /** 4020a146ea02SSasha Neftin * igc_configure_msix - Configure MSI-X hardware 4021a146ea02SSasha Neftin * @adapter: Pointer to adapter structure 4022a146ea02SSasha Neftin * 4023a146ea02SSasha Neftin * igc_configure_msix sets up the hardware to properly 4024a146ea02SSasha Neftin * generate MSI-X interrupts. 4025a146ea02SSasha Neftin */ 4026a146ea02SSasha Neftin static void igc_configure_msix(struct igc_adapter *adapter) 4027a146ea02SSasha Neftin { 4028a146ea02SSasha Neftin struct igc_hw *hw = &adapter->hw; 4029a146ea02SSasha Neftin int i, vector = 0; 4030a146ea02SSasha Neftin u32 tmp; 4031a146ea02SSasha Neftin 4032a146ea02SSasha Neftin adapter->eims_enable_mask = 0; 4033a146ea02SSasha Neftin 4034a146ea02SSasha Neftin /* set vector for other causes, i.e. link changes */ 4035a146ea02SSasha Neftin switch (hw->mac.type) { 4036a146ea02SSasha Neftin case igc_i225: 4037a146ea02SSasha Neftin /* Turn on MSI-X capability first, or our settings 4038a146ea02SSasha Neftin * won't stick. And it will take days to debug. 4039a146ea02SSasha Neftin */ 4040a146ea02SSasha Neftin wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | 4041a146ea02SSasha Neftin IGC_GPIE_PBA | IGC_GPIE_EIAME | 4042a146ea02SSasha Neftin IGC_GPIE_NSICR); 4043a146ea02SSasha Neftin 4044a146ea02SSasha Neftin /* enable msix_other interrupt */ 4045a146ea02SSasha Neftin adapter->eims_other = BIT(vector); 4046a146ea02SSasha Neftin tmp = (vector++ | IGC_IVAR_VALID) << 8; 4047a146ea02SSasha Neftin 4048a146ea02SSasha Neftin wr32(IGC_IVAR_MISC, tmp); 4049a146ea02SSasha Neftin break; 4050a146ea02SSasha Neftin default: 4051a146ea02SSasha Neftin /* do nothing, since nothing else supports MSI-X */ 4052a146ea02SSasha Neftin break; 4053a146ea02SSasha Neftin } /* switch (hw->mac.type) */ 4054a146ea02SSasha Neftin 4055a146ea02SSasha Neftin adapter->eims_enable_mask |= adapter->eims_other; 4056a146ea02SSasha Neftin 4057a146ea02SSasha Neftin for (i = 0; i < adapter->num_q_vectors; i++) 4058a146ea02SSasha Neftin igc_assign_vector(adapter->q_vector[i], vector++); 4059a146ea02SSasha Neftin 4060a146ea02SSasha Neftin wrfl(); 4061a146ea02SSasha Neftin } 4062a146ea02SSasha Neftin 4063a146ea02SSasha Neftin /** 4064fccf939eSSasha Neftin * igc_irq_enable - Enable default interrupt generation settings 4065fccf939eSSasha Neftin * @adapter: board private structure 4066fccf939eSSasha Neftin */ 4067fccf939eSSasha Neftin static void igc_irq_enable(struct igc_adapter *adapter) 4068fccf939eSSasha Neftin { 4069fccf939eSSasha Neftin struct igc_hw *hw = &adapter->hw; 4070fccf939eSSasha Neftin 4071fccf939eSSasha Neftin if (adapter->msix_entries) { 4072fccf939eSSasha Neftin u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; 4073fccf939eSSasha Neftin u32 regval = rd32(IGC_EIAC); 4074fccf939eSSasha Neftin 4075fccf939eSSasha Neftin wr32(IGC_EIAC, regval | adapter->eims_enable_mask); 4076fccf939eSSasha Neftin regval = rd32(IGC_EIAM); 4077fccf939eSSasha Neftin wr32(IGC_EIAM, regval | adapter->eims_enable_mask); 4078fccf939eSSasha Neftin wr32(IGC_EIMS, adapter->eims_enable_mask); 4079fccf939eSSasha Neftin wr32(IGC_IMS, ims); 4080fccf939eSSasha Neftin } else { 4081fccf939eSSasha Neftin wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 4082fccf939eSSasha Neftin wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); 4083fccf939eSSasha Neftin } 4084fccf939eSSasha Neftin } 4085fccf939eSSasha Neftin 4086fccf939eSSasha Neftin /** 408735f9a78aSSasha Neftin * igc_irq_disable - Mask off interrupt generation on the NIC 408835f9a78aSSasha Neftin * @adapter: board private structure 408935f9a78aSSasha Neftin */ 409035f9a78aSSasha Neftin static void igc_irq_disable(struct igc_adapter *adapter) 409135f9a78aSSasha Neftin { 409235f9a78aSSasha Neftin struct igc_hw *hw = &adapter->hw; 409335f9a78aSSasha Neftin 409435f9a78aSSasha Neftin if (adapter->msix_entries) { 409535f9a78aSSasha Neftin u32 regval = rd32(IGC_EIAM); 409635f9a78aSSasha Neftin 409735f9a78aSSasha Neftin wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); 409835f9a78aSSasha Neftin wr32(IGC_EIMC, adapter->eims_enable_mask); 409935f9a78aSSasha Neftin regval = rd32(IGC_EIAC); 410035f9a78aSSasha Neftin wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); 410135f9a78aSSasha Neftin } 410235f9a78aSSasha Neftin 410335f9a78aSSasha Neftin wr32(IGC_IAM, 0); 410435f9a78aSSasha Neftin wr32(IGC_IMC, ~0); 410535f9a78aSSasha Neftin wrfl(); 410635f9a78aSSasha Neftin 410735f9a78aSSasha Neftin if (adapter->msix_entries) { 410835f9a78aSSasha Neftin int vector = 0, i; 410935f9a78aSSasha Neftin 411035f9a78aSSasha Neftin synchronize_irq(adapter->msix_entries[vector++].vector); 411135f9a78aSSasha Neftin 411235f9a78aSSasha Neftin for (i = 0; i < adapter->num_q_vectors; i++) 411335f9a78aSSasha Neftin synchronize_irq(adapter->msix_entries[vector++].vector); 411435f9a78aSSasha Neftin } else { 411535f9a78aSSasha Neftin synchronize_irq(adapter->pdev->irq); 411635f9a78aSSasha Neftin } 411735f9a78aSSasha Neftin } 411835f9a78aSSasha Neftin 411963c92c9dSSasha Neftin void igc_set_flag_queue_pairs(struct igc_adapter *adapter, 412063c92c9dSSasha Neftin const u32 max_rss_queues) 412163c92c9dSSasha Neftin { 412263c92c9dSSasha Neftin /* Determine if we need to pair queues. */ 412363c92c9dSSasha Neftin /* If rss_queues > half of max_rss_queues, pair the queues in 412463c92c9dSSasha Neftin * order to conserve interrupts due to limited supply. 412563c92c9dSSasha Neftin */ 412663c92c9dSSasha Neftin if (adapter->rss_queues > (max_rss_queues / 2)) 412763c92c9dSSasha Neftin adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 412863c92c9dSSasha Neftin else 412963c92c9dSSasha Neftin adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; 413063c92c9dSSasha Neftin } 413163c92c9dSSasha Neftin 413263c92c9dSSasha Neftin unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) 413363c92c9dSSasha Neftin { 41344d0710c2SAndre Guedes return IGC_MAX_RX_QUEUES; 413563c92c9dSSasha Neftin } 413663c92c9dSSasha Neftin 413763c92c9dSSasha Neftin static void igc_init_queue_configuration(struct igc_adapter *adapter) 413863c92c9dSSasha Neftin { 413963c92c9dSSasha Neftin u32 max_rss_queues; 414063c92c9dSSasha Neftin 414163c92c9dSSasha Neftin max_rss_queues = igc_get_max_rss_queues(adapter); 414263c92c9dSSasha Neftin adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); 414363c92c9dSSasha Neftin 414463c92c9dSSasha Neftin igc_set_flag_queue_pairs(adapter, max_rss_queues); 414563c92c9dSSasha Neftin } 414663c92c9dSSasha Neftin 414763c92c9dSSasha Neftin /** 414863c92c9dSSasha Neftin * igc_reset_q_vector - Reset config for interrupt vector 414963c92c9dSSasha Neftin * @adapter: board private structure to initialize 415063c92c9dSSasha Neftin * @v_idx: Index of vector to be reset 415163c92c9dSSasha Neftin * 415263c92c9dSSasha Neftin * If NAPI is enabled it will delete any references to the 415363c92c9dSSasha Neftin * NAPI struct. This is preparation for igc_free_q_vector. 415463c92c9dSSasha Neftin */ 415563c92c9dSSasha Neftin static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) 415663c92c9dSSasha Neftin { 415763c92c9dSSasha Neftin struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 415863c92c9dSSasha Neftin 415963c92c9dSSasha Neftin /* if we're coming from igc_set_interrupt_capability, the vectors are 416063c92c9dSSasha Neftin * not yet allocated 416163c92c9dSSasha Neftin */ 416263c92c9dSSasha Neftin if (!q_vector) 416363c92c9dSSasha Neftin return; 416463c92c9dSSasha Neftin 416563c92c9dSSasha Neftin if (q_vector->tx.ring) 416663c92c9dSSasha Neftin adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 416763c92c9dSSasha Neftin 416863c92c9dSSasha Neftin if (q_vector->rx.ring) 416963c92c9dSSasha Neftin adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; 417063c92c9dSSasha Neftin 417163c92c9dSSasha Neftin netif_napi_del(&q_vector->napi); 417263c92c9dSSasha Neftin } 417363c92c9dSSasha Neftin 417463c92c9dSSasha Neftin /** 417563c92c9dSSasha Neftin * igc_free_q_vector - Free memory allocated for specific interrupt vector 417663c92c9dSSasha Neftin * @adapter: board private structure to initialize 417763c92c9dSSasha Neftin * @v_idx: Index of vector to be freed 417863c92c9dSSasha Neftin * 417963c92c9dSSasha Neftin * This function frees the memory allocated to the q_vector. 418063c92c9dSSasha Neftin */ 418163c92c9dSSasha Neftin static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) 418263c92c9dSSasha Neftin { 418363c92c9dSSasha Neftin struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; 418463c92c9dSSasha Neftin 418563c92c9dSSasha Neftin adapter->q_vector[v_idx] = NULL; 418663c92c9dSSasha Neftin 418763c92c9dSSasha Neftin /* igc_get_stats64() might access the rings on this vector, 418863c92c9dSSasha Neftin * we must wait a grace period before freeing it. 418963c92c9dSSasha Neftin */ 419063c92c9dSSasha Neftin if (q_vector) 419163c92c9dSSasha Neftin kfree_rcu(q_vector, rcu); 419263c92c9dSSasha Neftin } 419363c92c9dSSasha Neftin 419463c92c9dSSasha Neftin /** 419563c92c9dSSasha Neftin * igc_free_q_vectors - Free memory allocated for interrupt vectors 419663c92c9dSSasha Neftin * @adapter: board private structure to initialize 419763c92c9dSSasha Neftin * 419863c92c9dSSasha Neftin * This function frees the memory allocated to the q_vectors. In addition if 419963c92c9dSSasha Neftin * NAPI is enabled it will delete any references to the NAPI struct prior 420063c92c9dSSasha Neftin * to freeing the q_vector. 420163c92c9dSSasha Neftin */ 420263c92c9dSSasha Neftin static void igc_free_q_vectors(struct igc_adapter *adapter) 420363c92c9dSSasha Neftin { 420463c92c9dSSasha Neftin int v_idx = adapter->num_q_vectors; 420563c92c9dSSasha Neftin 420663c92c9dSSasha Neftin adapter->num_tx_queues = 0; 420763c92c9dSSasha Neftin adapter->num_rx_queues = 0; 420863c92c9dSSasha Neftin adapter->num_q_vectors = 0; 420963c92c9dSSasha Neftin 421063c92c9dSSasha Neftin while (v_idx--) { 421163c92c9dSSasha Neftin igc_reset_q_vector(adapter, v_idx); 421263c92c9dSSasha Neftin igc_free_q_vector(adapter, v_idx); 421363c92c9dSSasha Neftin } 421463c92c9dSSasha Neftin } 421563c92c9dSSasha Neftin 421663c92c9dSSasha Neftin /** 421763c92c9dSSasha Neftin * igc_update_itr - update the dynamic ITR value based on statistics 421863c92c9dSSasha Neftin * @q_vector: pointer to q_vector 421963c92c9dSSasha Neftin * @ring_container: ring info to update the itr for 422063c92c9dSSasha Neftin * 422163c92c9dSSasha Neftin * Stores a new ITR value based on packets and byte 422263c92c9dSSasha Neftin * counts during the last interrupt. The advantage of per interrupt 422363c92c9dSSasha Neftin * computation is faster updates and more accurate ITR for the current 422463c92c9dSSasha Neftin * traffic pattern. Constants in this function were computed 422563c92c9dSSasha Neftin * based on theoretical maximum wire speed and thresholds were set based 422663c92c9dSSasha Neftin * on testing data as well as attempting to minimize response time 422763c92c9dSSasha Neftin * while increasing bulk throughput. 422863c92c9dSSasha Neftin * NOTE: These calculations are only valid when operating in a single- 422963c92c9dSSasha Neftin * queue environment. 423063c92c9dSSasha Neftin */ 423163c92c9dSSasha Neftin static void igc_update_itr(struct igc_q_vector *q_vector, 423263c92c9dSSasha Neftin struct igc_ring_container *ring_container) 423363c92c9dSSasha Neftin { 423463c92c9dSSasha Neftin unsigned int packets = ring_container->total_packets; 423563c92c9dSSasha Neftin unsigned int bytes = ring_container->total_bytes; 423663c92c9dSSasha Neftin u8 itrval = ring_container->itr; 423763c92c9dSSasha Neftin 423863c92c9dSSasha Neftin /* no packets, exit with status unchanged */ 423963c92c9dSSasha Neftin if (packets == 0) 424063c92c9dSSasha Neftin return; 424163c92c9dSSasha Neftin 424263c92c9dSSasha Neftin switch (itrval) { 424363c92c9dSSasha Neftin case lowest_latency: 424463c92c9dSSasha Neftin /* handle TSO and jumbo frames */ 424563c92c9dSSasha Neftin if (bytes / packets > 8000) 424663c92c9dSSasha Neftin itrval = bulk_latency; 424763c92c9dSSasha Neftin else if ((packets < 5) && (bytes > 512)) 424863c92c9dSSasha Neftin itrval = low_latency; 424963c92c9dSSasha Neftin break; 425063c92c9dSSasha Neftin case low_latency: /* 50 usec aka 20000 ints/s */ 425163c92c9dSSasha Neftin if (bytes > 10000) { 425263c92c9dSSasha Neftin /* this if handles the TSO accounting */ 425363c92c9dSSasha Neftin if (bytes / packets > 8000) 425463c92c9dSSasha Neftin itrval = bulk_latency; 425563c92c9dSSasha Neftin else if ((packets < 10) || ((bytes / packets) > 1200)) 425663c92c9dSSasha Neftin itrval = bulk_latency; 425763c92c9dSSasha Neftin else if ((packets > 35)) 425863c92c9dSSasha Neftin itrval = lowest_latency; 425963c92c9dSSasha Neftin } else if (bytes / packets > 2000) { 426063c92c9dSSasha Neftin itrval = bulk_latency; 426163c92c9dSSasha Neftin } else if (packets <= 2 && bytes < 512) { 426263c92c9dSSasha Neftin itrval = lowest_latency; 426363c92c9dSSasha Neftin } 426463c92c9dSSasha Neftin break; 426563c92c9dSSasha Neftin case bulk_latency: /* 250 usec aka 4000 ints/s */ 426663c92c9dSSasha Neftin if (bytes > 25000) { 426763c92c9dSSasha Neftin if (packets > 35) 426863c92c9dSSasha Neftin itrval = low_latency; 426963c92c9dSSasha Neftin } else if (bytes < 1500) { 427063c92c9dSSasha Neftin itrval = low_latency; 427163c92c9dSSasha Neftin } 427263c92c9dSSasha Neftin break; 427363c92c9dSSasha Neftin } 427463c92c9dSSasha Neftin 427563c92c9dSSasha Neftin /* clear work counters since we have the values we need */ 427663c92c9dSSasha Neftin ring_container->total_bytes = 0; 427763c92c9dSSasha Neftin ring_container->total_packets = 0; 427863c92c9dSSasha Neftin 427963c92c9dSSasha Neftin /* write updated itr to ring container */ 428063c92c9dSSasha Neftin ring_container->itr = itrval; 428163c92c9dSSasha Neftin } 428263c92c9dSSasha Neftin 428363c92c9dSSasha Neftin static void igc_set_itr(struct igc_q_vector *q_vector) 428463c92c9dSSasha Neftin { 428563c92c9dSSasha Neftin struct igc_adapter *adapter = q_vector->adapter; 428663c92c9dSSasha Neftin u32 new_itr = q_vector->itr_val; 428763c92c9dSSasha Neftin u8 current_itr = 0; 428863c92c9dSSasha Neftin 428963c92c9dSSasha Neftin /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 429063c92c9dSSasha Neftin switch (adapter->link_speed) { 429163c92c9dSSasha Neftin case SPEED_10: 429263c92c9dSSasha Neftin case SPEED_100: 429363c92c9dSSasha Neftin current_itr = 0; 429463c92c9dSSasha Neftin new_itr = IGC_4K_ITR; 429563c92c9dSSasha Neftin goto set_itr_now; 429663c92c9dSSasha Neftin default: 429763c92c9dSSasha Neftin break; 429863c92c9dSSasha Neftin } 429963c92c9dSSasha Neftin 430063c92c9dSSasha Neftin igc_update_itr(q_vector, &q_vector->tx); 430163c92c9dSSasha Neftin igc_update_itr(q_vector, &q_vector->rx); 430263c92c9dSSasha Neftin 430363c92c9dSSasha Neftin current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 430463c92c9dSSasha Neftin 430563c92c9dSSasha Neftin /* conservative mode (itr 3) eliminates the lowest_latency setting */ 430663c92c9dSSasha Neftin if (current_itr == lowest_latency && 430763c92c9dSSasha Neftin ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 430863c92c9dSSasha Neftin (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 430963c92c9dSSasha Neftin current_itr = low_latency; 431063c92c9dSSasha Neftin 431163c92c9dSSasha Neftin switch (current_itr) { 431263c92c9dSSasha Neftin /* counts and packets in update_itr are dependent on these numbers */ 431363c92c9dSSasha Neftin case lowest_latency: 431463c92c9dSSasha Neftin new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ 431563c92c9dSSasha Neftin break; 431663c92c9dSSasha Neftin case low_latency: 431763c92c9dSSasha Neftin new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ 431863c92c9dSSasha Neftin break; 431963c92c9dSSasha Neftin case bulk_latency: 432063c92c9dSSasha Neftin new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ 432163c92c9dSSasha Neftin break; 432263c92c9dSSasha Neftin default: 432363c92c9dSSasha Neftin break; 432463c92c9dSSasha Neftin } 432563c92c9dSSasha Neftin 432663c92c9dSSasha Neftin set_itr_now: 432763c92c9dSSasha Neftin if (new_itr != q_vector->itr_val) { 432863c92c9dSSasha Neftin /* this attempts to bias the interrupt rate towards Bulk 432963c92c9dSSasha Neftin * by adding intermediate steps when interrupt rate is 433063c92c9dSSasha Neftin * increasing 433163c92c9dSSasha Neftin */ 433263c92c9dSSasha Neftin new_itr = new_itr > q_vector->itr_val ? 433363c92c9dSSasha Neftin max((new_itr * q_vector->itr_val) / 433463c92c9dSSasha Neftin (new_itr + (q_vector->itr_val >> 2)), 433563c92c9dSSasha Neftin new_itr) : new_itr; 433663c92c9dSSasha Neftin /* Don't write the value here; it resets the adapter's 433763c92c9dSSasha Neftin * internal timer, and causes us to delay far longer than 433863c92c9dSSasha Neftin * we should between interrupts. Instead, we write the ITR 433963c92c9dSSasha Neftin * value at the beginning of the next interrupt so the timing 434063c92c9dSSasha Neftin * ends up being correct. 434163c92c9dSSasha Neftin */ 434263c92c9dSSasha Neftin q_vector->itr_val = new_itr; 434363c92c9dSSasha Neftin q_vector->set_itr = 1; 434463c92c9dSSasha Neftin } 434563c92c9dSSasha Neftin } 434663c92c9dSSasha Neftin 434763c92c9dSSasha Neftin static void igc_reset_interrupt_capability(struct igc_adapter *adapter) 434863c92c9dSSasha Neftin { 434963c92c9dSSasha Neftin int v_idx = adapter->num_q_vectors; 435063c92c9dSSasha Neftin 435163c92c9dSSasha Neftin if (adapter->msix_entries) { 435263c92c9dSSasha Neftin pci_disable_msix(adapter->pdev); 435363c92c9dSSasha Neftin kfree(adapter->msix_entries); 435463c92c9dSSasha Neftin adapter->msix_entries = NULL; 435563c92c9dSSasha Neftin } else if (adapter->flags & IGC_FLAG_HAS_MSI) { 435663c92c9dSSasha Neftin pci_disable_msi(adapter->pdev); 435763c92c9dSSasha Neftin } 435863c92c9dSSasha Neftin 435963c92c9dSSasha Neftin while (v_idx--) 436063c92c9dSSasha Neftin igc_reset_q_vector(adapter, v_idx); 436163c92c9dSSasha Neftin } 436263c92c9dSSasha Neftin 436363c92c9dSSasha Neftin /** 436463c92c9dSSasha Neftin * igc_set_interrupt_capability - set MSI or MSI-X if supported 436563c92c9dSSasha Neftin * @adapter: Pointer to adapter structure 436663c92c9dSSasha Neftin * @msix: boolean value for MSI-X capability 436763c92c9dSSasha Neftin * 436863c92c9dSSasha Neftin * Attempt to configure interrupts using the best available 436963c92c9dSSasha Neftin * capabilities of the hardware and kernel. 437063c92c9dSSasha Neftin */ 437163c92c9dSSasha Neftin static void igc_set_interrupt_capability(struct igc_adapter *adapter, 437263c92c9dSSasha Neftin bool msix) 437363c92c9dSSasha Neftin { 437463c92c9dSSasha Neftin int numvecs, i; 437563c92c9dSSasha Neftin int err; 437663c92c9dSSasha Neftin 437763c92c9dSSasha Neftin if (!msix) 437863c92c9dSSasha Neftin goto msi_only; 437963c92c9dSSasha Neftin adapter->flags |= IGC_FLAG_HAS_MSIX; 438063c92c9dSSasha Neftin 438163c92c9dSSasha Neftin /* Number of supported queues. */ 438263c92c9dSSasha Neftin adapter->num_rx_queues = adapter->rss_queues; 438363c92c9dSSasha Neftin 438463c92c9dSSasha Neftin adapter->num_tx_queues = adapter->rss_queues; 438563c92c9dSSasha Neftin 438663c92c9dSSasha Neftin /* start with one vector for every Rx queue */ 438763c92c9dSSasha Neftin numvecs = adapter->num_rx_queues; 438863c92c9dSSasha Neftin 438963c92c9dSSasha Neftin /* if Tx handler is separate add 1 for every Tx queue */ 439063c92c9dSSasha Neftin if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) 439163c92c9dSSasha Neftin numvecs += adapter->num_tx_queues; 439263c92c9dSSasha Neftin 439363c92c9dSSasha Neftin /* store the number of vectors reserved for queues */ 439463c92c9dSSasha Neftin adapter->num_q_vectors = numvecs; 439563c92c9dSSasha Neftin 439663c92c9dSSasha Neftin /* add 1 vector for link status interrupts */ 439763c92c9dSSasha Neftin numvecs++; 439863c92c9dSSasha Neftin 439963c92c9dSSasha Neftin adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 440063c92c9dSSasha Neftin GFP_KERNEL); 440163c92c9dSSasha Neftin 440263c92c9dSSasha Neftin if (!adapter->msix_entries) 440363c92c9dSSasha Neftin return; 440463c92c9dSSasha Neftin 440563c92c9dSSasha Neftin /* populate entry values */ 440663c92c9dSSasha Neftin for (i = 0; i < numvecs; i++) 440763c92c9dSSasha Neftin adapter->msix_entries[i].entry = i; 440863c92c9dSSasha Neftin 440963c92c9dSSasha Neftin err = pci_enable_msix_range(adapter->pdev, 441063c92c9dSSasha Neftin adapter->msix_entries, 441163c92c9dSSasha Neftin numvecs, 441263c92c9dSSasha Neftin numvecs); 441363c92c9dSSasha Neftin if (err > 0) 441463c92c9dSSasha Neftin return; 441563c92c9dSSasha Neftin 441663c92c9dSSasha Neftin kfree(adapter->msix_entries); 441763c92c9dSSasha Neftin adapter->msix_entries = NULL; 441863c92c9dSSasha Neftin 441963c92c9dSSasha Neftin igc_reset_interrupt_capability(adapter); 442063c92c9dSSasha Neftin 442163c92c9dSSasha Neftin msi_only: 442263c92c9dSSasha Neftin adapter->flags &= ~IGC_FLAG_HAS_MSIX; 442363c92c9dSSasha Neftin 442463c92c9dSSasha Neftin adapter->rss_queues = 1; 442563c92c9dSSasha Neftin adapter->flags |= IGC_FLAG_QUEUE_PAIRS; 442663c92c9dSSasha Neftin adapter->num_rx_queues = 1; 442763c92c9dSSasha Neftin adapter->num_tx_queues = 1; 442863c92c9dSSasha Neftin adapter->num_q_vectors = 1; 442963c92c9dSSasha Neftin if (!pci_enable_msi(adapter->pdev)) 443063c92c9dSSasha Neftin adapter->flags |= IGC_FLAG_HAS_MSI; 443163c92c9dSSasha Neftin } 443263c92c9dSSasha Neftin 443363c92c9dSSasha Neftin /** 443463c92c9dSSasha Neftin * igc_update_ring_itr - update the dynamic ITR value based on packet size 443563c92c9dSSasha Neftin * @q_vector: pointer to q_vector 443663c92c9dSSasha Neftin * 443763c92c9dSSasha Neftin * Stores a new ITR value based on strictly on packet size. This 443863c92c9dSSasha Neftin * algorithm is less sophisticated than that used in igc_update_itr, 443963c92c9dSSasha Neftin * due to the difficulty of synchronizing statistics across multiple 444063c92c9dSSasha Neftin * receive rings. The divisors and thresholds used by this function 444163c92c9dSSasha Neftin * were determined based on theoretical maximum wire speed and testing 444263c92c9dSSasha Neftin * data, in order to minimize response time while increasing bulk 444363c92c9dSSasha Neftin * throughput. 444463c92c9dSSasha Neftin * NOTE: This function is called only when operating in a multiqueue 444563c92c9dSSasha Neftin * receive environment. 444663c92c9dSSasha Neftin */ 444763c92c9dSSasha Neftin static void igc_update_ring_itr(struct igc_q_vector *q_vector) 444863c92c9dSSasha Neftin { 444963c92c9dSSasha Neftin struct igc_adapter *adapter = q_vector->adapter; 445063c92c9dSSasha Neftin int new_val = q_vector->itr_val; 445163c92c9dSSasha Neftin int avg_wire_size = 0; 445263c92c9dSSasha Neftin unsigned int packets; 445363c92c9dSSasha Neftin 445463c92c9dSSasha Neftin /* For non-gigabit speeds, just fix the interrupt rate at 4000 445563c92c9dSSasha Neftin * ints/sec - ITR timer value of 120 ticks. 445663c92c9dSSasha Neftin */ 445763c92c9dSSasha Neftin switch (adapter->link_speed) { 445863c92c9dSSasha Neftin case SPEED_10: 445963c92c9dSSasha Neftin case SPEED_100: 446063c92c9dSSasha Neftin new_val = IGC_4K_ITR; 446163c92c9dSSasha Neftin goto set_itr_val; 446263c92c9dSSasha Neftin default: 446363c92c9dSSasha Neftin break; 446463c92c9dSSasha Neftin } 446563c92c9dSSasha Neftin 446663c92c9dSSasha Neftin packets = q_vector->rx.total_packets; 446763c92c9dSSasha Neftin if (packets) 446863c92c9dSSasha Neftin avg_wire_size = q_vector->rx.total_bytes / packets; 446963c92c9dSSasha Neftin 447063c92c9dSSasha Neftin packets = q_vector->tx.total_packets; 447163c92c9dSSasha Neftin if (packets) 447263c92c9dSSasha Neftin avg_wire_size = max_t(u32, avg_wire_size, 447363c92c9dSSasha Neftin q_vector->tx.total_bytes / packets); 447463c92c9dSSasha Neftin 447563c92c9dSSasha Neftin /* if avg_wire_size isn't set no work was done */ 447663c92c9dSSasha Neftin if (!avg_wire_size) 447763c92c9dSSasha Neftin goto clear_counts; 447863c92c9dSSasha Neftin 447963c92c9dSSasha Neftin /* Add 24 bytes to size to account for CRC, preamble, and gap */ 448063c92c9dSSasha Neftin avg_wire_size += 24; 448163c92c9dSSasha Neftin 448263c92c9dSSasha Neftin /* Don't starve jumbo frames */ 448363c92c9dSSasha Neftin avg_wire_size = min(avg_wire_size, 3000); 448463c92c9dSSasha Neftin 448563c92c9dSSasha Neftin /* Give a little boost to mid-size frames */ 448663c92c9dSSasha Neftin if (avg_wire_size > 300 && avg_wire_size < 1200) 448763c92c9dSSasha Neftin new_val = avg_wire_size / 3; 448863c92c9dSSasha Neftin else 448963c92c9dSSasha Neftin new_val = avg_wire_size / 2; 449063c92c9dSSasha Neftin 449163c92c9dSSasha Neftin /* conservative mode (itr 3) eliminates the lowest_latency setting */ 449263c92c9dSSasha Neftin if (new_val < IGC_20K_ITR && 449363c92c9dSSasha Neftin ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || 449463c92c9dSSasha Neftin (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) 449563c92c9dSSasha Neftin new_val = IGC_20K_ITR; 449663c92c9dSSasha Neftin 449763c92c9dSSasha Neftin set_itr_val: 449863c92c9dSSasha Neftin if (new_val != q_vector->itr_val) { 449963c92c9dSSasha Neftin q_vector->itr_val = new_val; 450063c92c9dSSasha Neftin q_vector->set_itr = 1; 450163c92c9dSSasha Neftin } 450263c92c9dSSasha Neftin clear_counts: 450363c92c9dSSasha Neftin q_vector->rx.total_bytes = 0; 450463c92c9dSSasha Neftin q_vector->rx.total_packets = 0; 450563c92c9dSSasha Neftin q_vector->tx.total_bytes = 0; 450663c92c9dSSasha Neftin q_vector->tx.total_packets = 0; 450763c92c9dSSasha Neftin } 450863c92c9dSSasha Neftin 450963c92c9dSSasha Neftin static void igc_ring_irq_enable(struct igc_q_vector *q_vector) 451063c92c9dSSasha Neftin { 451163c92c9dSSasha Neftin struct igc_adapter *adapter = q_vector->adapter; 451263c92c9dSSasha Neftin struct igc_hw *hw = &adapter->hw; 451363c92c9dSSasha Neftin 451463c92c9dSSasha Neftin if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || 451563c92c9dSSasha Neftin (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { 451663c92c9dSSasha Neftin if (adapter->num_q_vectors == 1) 451763c92c9dSSasha Neftin igc_set_itr(q_vector); 451863c92c9dSSasha Neftin else 451963c92c9dSSasha Neftin igc_update_ring_itr(q_vector); 452063c92c9dSSasha Neftin } 452163c92c9dSSasha Neftin 452263c92c9dSSasha Neftin if (!test_bit(__IGC_DOWN, &adapter->state)) { 452363c92c9dSSasha Neftin if (adapter->msix_entries) 452463c92c9dSSasha Neftin wr32(IGC_EIMS, q_vector->eims_value); 452563c92c9dSSasha Neftin else 452663c92c9dSSasha Neftin igc_irq_enable(adapter); 452763c92c9dSSasha Neftin } 452863c92c9dSSasha Neftin } 452963c92c9dSSasha Neftin 453063c92c9dSSasha Neftin static void igc_add_ring(struct igc_ring *ring, 453163c92c9dSSasha Neftin struct igc_ring_container *head) 453263c92c9dSSasha Neftin { 453363c92c9dSSasha Neftin head->ring = ring; 453463c92c9dSSasha Neftin head->count++; 453563c92c9dSSasha Neftin } 453663c92c9dSSasha Neftin 453763c92c9dSSasha Neftin /** 453863c92c9dSSasha Neftin * igc_cache_ring_register - Descriptor ring to register mapping 453963c92c9dSSasha Neftin * @adapter: board private structure to initialize 454063c92c9dSSasha Neftin * 454163c92c9dSSasha Neftin * Once we know the feature-set enabled for the device, we'll cache 454263c92c9dSSasha Neftin * the register offset the descriptor ring is assigned to. 454363c92c9dSSasha Neftin */ 454463c92c9dSSasha Neftin static void igc_cache_ring_register(struct igc_adapter *adapter) 454563c92c9dSSasha Neftin { 454663c92c9dSSasha Neftin int i = 0, j = 0; 454763c92c9dSSasha Neftin 454863c92c9dSSasha Neftin switch (adapter->hw.mac.type) { 454963c92c9dSSasha Neftin case igc_i225: 455063c92c9dSSasha Neftin default: 455163c92c9dSSasha Neftin for (; i < adapter->num_rx_queues; i++) 455263c92c9dSSasha Neftin adapter->rx_ring[i]->reg_idx = i; 455363c92c9dSSasha Neftin for (; j < adapter->num_tx_queues; j++) 455463c92c9dSSasha Neftin adapter->tx_ring[j]->reg_idx = j; 455563c92c9dSSasha Neftin break; 455663c92c9dSSasha Neftin } 455763c92c9dSSasha Neftin } 455863c92c9dSSasha Neftin 455963c92c9dSSasha Neftin /** 456063c92c9dSSasha Neftin * igc_poll - NAPI Rx polling callback 456163c92c9dSSasha Neftin * @napi: napi polling structure 456263c92c9dSSasha Neftin * @budget: count of how many packets we should handle 456363c92c9dSSasha Neftin */ 456463c92c9dSSasha Neftin static int igc_poll(struct napi_struct *napi, int budget) 456563c92c9dSSasha Neftin { 456663c92c9dSSasha Neftin struct igc_q_vector *q_vector = container_of(napi, 456763c92c9dSSasha Neftin struct igc_q_vector, 456863c92c9dSSasha Neftin napi); 4569fc9df2a0SAndre Guedes struct igc_ring *rx_ring = q_vector->rx.ring; 457063c92c9dSSasha Neftin bool clean_complete = true; 457163c92c9dSSasha Neftin int work_done = 0; 457263c92c9dSSasha Neftin 457363c92c9dSSasha Neftin if (q_vector->tx.ring) 457463c92c9dSSasha Neftin clean_complete = igc_clean_tx_irq(q_vector, budget); 457563c92c9dSSasha Neftin 4576fc9df2a0SAndre Guedes if (rx_ring) { 4577fc9df2a0SAndre Guedes int cleaned = rx_ring->xsk_pool ? 4578fc9df2a0SAndre Guedes igc_clean_rx_irq_zc(q_vector, budget) : 4579fc9df2a0SAndre Guedes igc_clean_rx_irq(q_vector, budget); 458063c92c9dSSasha Neftin 458163c92c9dSSasha Neftin work_done += cleaned; 458263c92c9dSSasha Neftin if (cleaned >= budget) 458363c92c9dSSasha Neftin clean_complete = false; 458463c92c9dSSasha Neftin } 458563c92c9dSSasha Neftin 458663c92c9dSSasha Neftin /* If all work not completed, return budget and keep polling */ 458763c92c9dSSasha Neftin if (!clean_complete) 458863c92c9dSSasha Neftin return budget; 458963c92c9dSSasha Neftin 459063c92c9dSSasha Neftin /* Exit the polling mode, but don't re-enable interrupts if stack might 459163c92c9dSSasha Neftin * poll us due to busy-polling 459263c92c9dSSasha Neftin */ 459363c92c9dSSasha Neftin if (likely(napi_complete_done(napi, work_done))) 459463c92c9dSSasha Neftin igc_ring_irq_enable(q_vector); 459563c92c9dSSasha Neftin 459663c92c9dSSasha Neftin return min(work_done, budget - 1); 459763c92c9dSSasha Neftin } 459863c92c9dSSasha Neftin 459963c92c9dSSasha Neftin /** 460063c92c9dSSasha Neftin * igc_alloc_q_vector - Allocate memory for a single interrupt vector 460163c92c9dSSasha Neftin * @adapter: board private structure to initialize 460263c92c9dSSasha Neftin * @v_count: q_vectors allocated on adapter, used for ring interleaving 460363c92c9dSSasha Neftin * @v_idx: index of vector in adapter struct 460463c92c9dSSasha Neftin * @txr_count: total number of Tx rings to allocate 460563c92c9dSSasha Neftin * @txr_idx: index of first Tx ring to allocate 460663c92c9dSSasha Neftin * @rxr_count: total number of Rx rings to allocate 460763c92c9dSSasha Neftin * @rxr_idx: index of first Rx ring to allocate 460863c92c9dSSasha Neftin * 460963c92c9dSSasha Neftin * We allocate one q_vector. If allocation fails we return -ENOMEM. 461063c92c9dSSasha Neftin */ 461163c92c9dSSasha Neftin static int igc_alloc_q_vector(struct igc_adapter *adapter, 461263c92c9dSSasha Neftin unsigned int v_count, unsigned int v_idx, 461363c92c9dSSasha Neftin unsigned int txr_count, unsigned int txr_idx, 461463c92c9dSSasha Neftin unsigned int rxr_count, unsigned int rxr_idx) 461563c92c9dSSasha Neftin { 461663c92c9dSSasha Neftin struct igc_q_vector *q_vector; 461763c92c9dSSasha Neftin struct igc_ring *ring; 461863c92c9dSSasha Neftin int ring_count; 461963c92c9dSSasha Neftin 462063c92c9dSSasha Neftin /* igc only supports 1 Tx and/or 1 Rx queue per vector */ 462163c92c9dSSasha Neftin if (txr_count > 1 || rxr_count > 1) 462263c92c9dSSasha Neftin return -ENOMEM; 462363c92c9dSSasha Neftin 462463c92c9dSSasha Neftin ring_count = txr_count + rxr_count; 462563c92c9dSSasha Neftin 462663c92c9dSSasha Neftin /* allocate q_vector and rings */ 462763c92c9dSSasha Neftin q_vector = adapter->q_vector[v_idx]; 462863c92c9dSSasha Neftin if (!q_vector) 462963c92c9dSSasha Neftin q_vector = kzalloc(struct_size(q_vector, ring, ring_count), 463063c92c9dSSasha Neftin GFP_KERNEL); 463163c92c9dSSasha Neftin else 463263c92c9dSSasha Neftin memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); 463363c92c9dSSasha Neftin if (!q_vector) 463463c92c9dSSasha Neftin return -ENOMEM; 463563c92c9dSSasha Neftin 463663c92c9dSSasha Neftin /* initialize NAPI */ 4637b48b89f9SJakub Kicinski netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); 463863c92c9dSSasha Neftin 463963c92c9dSSasha Neftin /* tie q_vector and adapter together */ 464063c92c9dSSasha Neftin adapter->q_vector[v_idx] = q_vector; 464163c92c9dSSasha Neftin q_vector->adapter = adapter; 464263c92c9dSSasha Neftin 464363c92c9dSSasha Neftin /* initialize work limits */ 464463c92c9dSSasha Neftin q_vector->tx.work_limit = adapter->tx_work_limit; 464563c92c9dSSasha Neftin 464663c92c9dSSasha Neftin /* initialize ITR configuration */ 464763c92c9dSSasha Neftin q_vector->itr_register = adapter->io_addr + IGC_EITR(0); 464863c92c9dSSasha Neftin q_vector->itr_val = IGC_START_ITR; 464963c92c9dSSasha Neftin 465063c92c9dSSasha Neftin /* initialize pointer to rings */ 465163c92c9dSSasha Neftin ring = q_vector->ring; 465263c92c9dSSasha Neftin 465363c92c9dSSasha Neftin /* initialize ITR */ 465463c92c9dSSasha Neftin if (rxr_count) { 465563c92c9dSSasha Neftin /* rx or rx/tx vector */ 465663c92c9dSSasha Neftin if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) 465763c92c9dSSasha Neftin q_vector->itr_val = adapter->rx_itr_setting; 465863c92c9dSSasha Neftin } else { 465963c92c9dSSasha Neftin /* tx only vector */ 466063c92c9dSSasha Neftin if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) 466163c92c9dSSasha Neftin q_vector->itr_val = adapter->tx_itr_setting; 466263c92c9dSSasha Neftin } 466363c92c9dSSasha Neftin 466463c92c9dSSasha Neftin if (txr_count) { 466563c92c9dSSasha Neftin /* assign generic ring traits */ 466663c92c9dSSasha Neftin ring->dev = &adapter->pdev->dev; 466763c92c9dSSasha Neftin ring->netdev = adapter->netdev; 466863c92c9dSSasha Neftin 466963c92c9dSSasha Neftin /* configure backlink on ring */ 467063c92c9dSSasha Neftin ring->q_vector = q_vector; 467163c92c9dSSasha Neftin 467263c92c9dSSasha Neftin /* update q_vector Tx values */ 467363c92c9dSSasha Neftin igc_add_ring(ring, &q_vector->tx); 467463c92c9dSSasha Neftin 467563c92c9dSSasha Neftin /* apply Tx specific ring traits */ 467663c92c9dSSasha Neftin ring->count = adapter->tx_ring_count; 467763c92c9dSSasha Neftin ring->queue_index = txr_idx; 467863c92c9dSSasha Neftin 467963c92c9dSSasha Neftin /* assign ring to adapter */ 468063c92c9dSSasha Neftin adapter->tx_ring[txr_idx] = ring; 468163c92c9dSSasha Neftin 468263c92c9dSSasha Neftin /* push pointer to next ring */ 468363c92c9dSSasha Neftin ring++; 468463c92c9dSSasha Neftin } 468563c92c9dSSasha Neftin 468663c92c9dSSasha Neftin if (rxr_count) { 468763c92c9dSSasha Neftin /* assign generic ring traits */ 468863c92c9dSSasha Neftin ring->dev = &adapter->pdev->dev; 468963c92c9dSSasha Neftin ring->netdev = adapter->netdev; 469063c92c9dSSasha Neftin 469163c92c9dSSasha Neftin /* configure backlink on ring */ 469263c92c9dSSasha Neftin ring->q_vector = q_vector; 469363c92c9dSSasha Neftin 469463c92c9dSSasha Neftin /* update q_vector Rx values */ 469563c92c9dSSasha Neftin igc_add_ring(ring, &q_vector->rx); 469663c92c9dSSasha Neftin 469763c92c9dSSasha Neftin /* apply Rx specific ring traits */ 469863c92c9dSSasha Neftin ring->count = adapter->rx_ring_count; 469963c92c9dSSasha Neftin ring->queue_index = rxr_idx; 470063c92c9dSSasha Neftin 470163c92c9dSSasha Neftin /* assign ring to adapter */ 470263c92c9dSSasha Neftin adapter->rx_ring[rxr_idx] = ring; 470363c92c9dSSasha Neftin } 470463c92c9dSSasha Neftin 470563c92c9dSSasha Neftin return 0; 470663c92c9dSSasha Neftin } 470763c92c9dSSasha Neftin 470863c92c9dSSasha Neftin /** 470963c92c9dSSasha Neftin * igc_alloc_q_vectors - Allocate memory for interrupt vectors 471063c92c9dSSasha Neftin * @adapter: board private structure to initialize 471163c92c9dSSasha Neftin * 471263c92c9dSSasha Neftin * We allocate one q_vector per queue interrupt. If allocation fails we 471363c92c9dSSasha Neftin * return -ENOMEM. 471463c92c9dSSasha Neftin */ 471563c92c9dSSasha Neftin static int igc_alloc_q_vectors(struct igc_adapter *adapter) 471663c92c9dSSasha Neftin { 471763c92c9dSSasha Neftin int rxr_remaining = adapter->num_rx_queues; 471863c92c9dSSasha Neftin int txr_remaining = adapter->num_tx_queues; 471963c92c9dSSasha Neftin int rxr_idx = 0, txr_idx = 0, v_idx = 0; 472063c92c9dSSasha Neftin int q_vectors = adapter->num_q_vectors; 472163c92c9dSSasha Neftin int err; 472263c92c9dSSasha Neftin 472363c92c9dSSasha Neftin if (q_vectors >= (rxr_remaining + txr_remaining)) { 472463c92c9dSSasha Neftin for (; rxr_remaining; v_idx++) { 472563c92c9dSSasha Neftin err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 472663c92c9dSSasha Neftin 0, 0, 1, rxr_idx); 472763c92c9dSSasha Neftin 472863c92c9dSSasha Neftin if (err) 472963c92c9dSSasha Neftin goto err_out; 473063c92c9dSSasha Neftin 473163c92c9dSSasha Neftin /* update counts and index */ 473263c92c9dSSasha Neftin rxr_remaining--; 473363c92c9dSSasha Neftin rxr_idx++; 473463c92c9dSSasha Neftin } 473563c92c9dSSasha Neftin } 473663c92c9dSSasha Neftin 473763c92c9dSSasha Neftin for (; v_idx < q_vectors; v_idx++) { 473863c92c9dSSasha Neftin int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 473963c92c9dSSasha Neftin int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 474063c92c9dSSasha Neftin 474163c92c9dSSasha Neftin err = igc_alloc_q_vector(adapter, q_vectors, v_idx, 474263c92c9dSSasha Neftin tqpv, txr_idx, rqpv, rxr_idx); 474363c92c9dSSasha Neftin 474463c92c9dSSasha Neftin if (err) 474563c92c9dSSasha Neftin goto err_out; 474663c92c9dSSasha Neftin 474763c92c9dSSasha Neftin /* update counts and index */ 474863c92c9dSSasha Neftin rxr_remaining -= rqpv; 474963c92c9dSSasha Neftin txr_remaining -= tqpv; 475063c92c9dSSasha Neftin rxr_idx++; 475163c92c9dSSasha Neftin txr_idx++; 475263c92c9dSSasha Neftin } 475363c92c9dSSasha Neftin 475463c92c9dSSasha Neftin return 0; 475563c92c9dSSasha Neftin 475663c92c9dSSasha Neftin err_out: 475763c92c9dSSasha Neftin adapter->num_tx_queues = 0; 475863c92c9dSSasha Neftin adapter->num_rx_queues = 0; 475963c92c9dSSasha Neftin adapter->num_q_vectors = 0; 476063c92c9dSSasha Neftin 476163c92c9dSSasha Neftin while (v_idx--) 476263c92c9dSSasha Neftin igc_free_q_vector(adapter, v_idx); 476363c92c9dSSasha Neftin 476463c92c9dSSasha Neftin return -ENOMEM; 476563c92c9dSSasha Neftin } 476663c92c9dSSasha Neftin 476763c92c9dSSasha Neftin /** 476863c92c9dSSasha Neftin * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 476963c92c9dSSasha Neftin * @adapter: Pointer to adapter structure 477063c92c9dSSasha Neftin * @msix: boolean for MSI-X capability 477163c92c9dSSasha Neftin * 477263c92c9dSSasha Neftin * This function initializes the interrupts and allocates all of the queues. 477363c92c9dSSasha Neftin */ 477463c92c9dSSasha Neftin static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) 477563c92c9dSSasha Neftin { 477625f06effSAndre Guedes struct net_device *dev = adapter->netdev; 477763c92c9dSSasha Neftin int err = 0; 477863c92c9dSSasha Neftin 477963c92c9dSSasha Neftin igc_set_interrupt_capability(adapter, msix); 478063c92c9dSSasha Neftin 478163c92c9dSSasha Neftin err = igc_alloc_q_vectors(adapter); 478263c92c9dSSasha Neftin if (err) { 478325f06effSAndre Guedes netdev_err(dev, "Unable to allocate memory for vectors\n"); 478463c92c9dSSasha Neftin goto err_alloc_q_vectors; 478563c92c9dSSasha Neftin } 478663c92c9dSSasha Neftin 478763c92c9dSSasha Neftin igc_cache_ring_register(adapter); 478863c92c9dSSasha Neftin 478963c92c9dSSasha Neftin return 0; 479063c92c9dSSasha Neftin 479163c92c9dSSasha Neftin err_alloc_q_vectors: 479263c92c9dSSasha Neftin igc_reset_interrupt_capability(adapter); 479363c92c9dSSasha Neftin return err; 479463c92c9dSSasha Neftin } 479563c92c9dSSasha Neftin 479663c92c9dSSasha Neftin /** 479763c92c9dSSasha Neftin * igc_sw_init - Initialize general software structures (struct igc_adapter) 479863c92c9dSSasha Neftin * @adapter: board private structure to initialize 479963c92c9dSSasha Neftin * 480063c92c9dSSasha Neftin * igc_sw_init initializes the Adapter private data structure. 480163c92c9dSSasha Neftin * Fields are initialized based on PCI device information and 480263c92c9dSSasha Neftin * OS network device settings (MTU size). 480363c92c9dSSasha Neftin */ 480463c92c9dSSasha Neftin static int igc_sw_init(struct igc_adapter *adapter) 480563c92c9dSSasha Neftin { 480663c92c9dSSasha Neftin struct net_device *netdev = adapter->netdev; 480763c92c9dSSasha Neftin struct pci_dev *pdev = adapter->pdev; 480863c92c9dSSasha Neftin struct igc_hw *hw = &adapter->hw; 480963c92c9dSSasha Neftin 481063c92c9dSSasha Neftin pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 481163c92c9dSSasha Neftin 481263c92c9dSSasha Neftin /* set default ring sizes */ 481363c92c9dSSasha Neftin adapter->tx_ring_count = IGC_DEFAULT_TXD; 481463c92c9dSSasha Neftin adapter->rx_ring_count = IGC_DEFAULT_RXD; 481563c92c9dSSasha Neftin 481663c92c9dSSasha Neftin /* set default ITR values */ 481763c92c9dSSasha Neftin adapter->rx_itr_setting = IGC_DEFAULT_ITR; 481863c92c9dSSasha Neftin adapter->tx_itr_setting = IGC_DEFAULT_ITR; 481963c92c9dSSasha Neftin 482063c92c9dSSasha Neftin /* set default work limits */ 482163c92c9dSSasha Neftin adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; 482263c92c9dSSasha Neftin 482363c92c9dSSasha Neftin /* adjust max frame to be at least the size of a standard frame */ 482463c92c9dSSasha Neftin adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + 482563c92c9dSSasha Neftin VLAN_HLEN; 482663c92c9dSSasha Neftin adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 482763c92c9dSSasha Neftin 482842fc5dc0SAndre Guedes mutex_init(&adapter->nfc_rule_lock); 4829d957c601SAndre Guedes INIT_LIST_HEAD(&adapter->nfc_rule_list); 4830d957c601SAndre Guedes adapter->nfc_rule_count = 0; 4831d957c601SAndre Guedes 483263c92c9dSSasha Neftin spin_lock_init(&adapter->stats64_lock); 483306b41258SMuhammad Husaini Zulkifli spin_lock_init(&adapter->qbv_tx_lock); 483463c92c9dSSasha Neftin /* Assume MSI-X interrupts, will be checked during IRQ allocation */ 483563c92c9dSSasha Neftin adapter->flags |= IGC_FLAG_HAS_MSIX; 483663c92c9dSSasha Neftin 483763c92c9dSSasha Neftin igc_init_queue_configuration(adapter); 483863c92c9dSSasha Neftin 483963c92c9dSSasha Neftin /* This call may decrease the number of queues */ 484063c92c9dSSasha Neftin if (igc_init_interrupt_scheme(adapter, true)) { 484125f06effSAndre Guedes netdev_err(netdev, "Unable to allocate memory for queues\n"); 484263c92c9dSSasha Neftin return -ENOMEM; 484363c92c9dSSasha Neftin } 484463c92c9dSSasha Neftin 484563c92c9dSSasha Neftin /* Explicitly disable IRQ since the NIC can be in any state. */ 484663c92c9dSSasha Neftin igc_irq_disable(adapter); 484763c92c9dSSasha Neftin 484863c92c9dSSasha Neftin set_bit(__IGC_DOWN, &adapter->state); 484963c92c9dSSasha Neftin 485063c92c9dSSasha Neftin return 0; 485163c92c9dSSasha Neftin } 485263c92c9dSSasha Neftin 485335f9a78aSSasha Neftin /** 4854c9a11c23SSasha Neftin * igc_up - Open the interface and prepare it to handle traffic 4855c9a11c23SSasha Neftin * @adapter: board private structure 4856c9a11c23SSasha Neftin */ 48578c5ad0daSSasha Neftin void igc_up(struct igc_adapter *adapter) 4858c9a11c23SSasha Neftin { 48593df25e4cSSasha Neftin struct igc_hw *hw = &adapter->hw; 4860c9a11c23SSasha Neftin int i = 0; 4861c9a11c23SSasha Neftin 4862c9a11c23SSasha Neftin /* hardware has been reset, we need to reload some things */ 4863c9a11c23SSasha Neftin igc_configure(adapter); 4864c9a11c23SSasha Neftin 4865c9a11c23SSasha Neftin clear_bit(__IGC_DOWN, &adapter->state); 4866c9a11c23SSasha Neftin 4867c9a11c23SSasha Neftin for (i = 0; i < adapter->num_q_vectors; i++) 4868c9a11c23SSasha Neftin napi_enable(&adapter->q_vector[i]->napi); 48693df25e4cSSasha Neftin 48703df25e4cSSasha Neftin if (adapter->msix_entries) 48713df25e4cSSasha Neftin igc_configure_msix(adapter); 48723df25e4cSSasha Neftin else 48733df25e4cSSasha Neftin igc_assign_vector(adapter->q_vector[0], 0); 48743df25e4cSSasha Neftin 48753df25e4cSSasha Neftin /* Clear any pending interrupts. */ 48763df25e4cSSasha Neftin rd32(IGC_ICR); 48773df25e4cSSasha Neftin igc_irq_enable(adapter); 487813b5b7fdSSasha Neftin 487913b5b7fdSSasha Neftin netif_tx_start_all_queues(adapter->netdev); 488013b5b7fdSSasha Neftin 488113b5b7fdSSasha Neftin /* start the watchdog. */ 4882501f2309SJiapeng Zhong hw->mac.get_link_status = true; 4883208983f0SSasha Neftin schedule_work(&adapter->watchdog_task); 4884c9a11c23SSasha Neftin } 4885c9a11c23SSasha Neftin 4886c9a11c23SSasha Neftin /** 4887c9a11c23SSasha Neftin * igc_update_stats - Update the board statistics counters 4888c9a11c23SSasha Neftin * @adapter: board private structure 4889c9a11c23SSasha Neftin */ 489036b9fea6SSasha Neftin void igc_update_stats(struct igc_adapter *adapter) 4891c9a11c23SSasha Neftin { 489236b9fea6SSasha Neftin struct rtnl_link_stats64 *net_stats = &adapter->stats64; 489336b9fea6SSasha Neftin struct pci_dev *pdev = adapter->pdev; 489436b9fea6SSasha Neftin struct igc_hw *hw = &adapter->hw; 489536b9fea6SSasha Neftin u64 _bytes, _packets; 489636b9fea6SSasha Neftin u64 bytes, packets; 489736b9fea6SSasha Neftin unsigned int start; 489836b9fea6SSasha Neftin u32 mpc; 489936b9fea6SSasha Neftin int i; 490036b9fea6SSasha Neftin 490136b9fea6SSasha Neftin /* Prevent stats update while adapter is being reset, or if the pci 490236b9fea6SSasha Neftin * connection is down. 490336b9fea6SSasha Neftin */ 490436b9fea6SSasha Neftin if (adapter->link_speed == 0) 490536b9fea6SSasha Neftin return; 490636b9fea6SSasha Neftin if (pci_channel_offline(pdev)) 490736b9fea6SSasha Neftin return; 490836b9fea6SSasha Neftin 490936b9fea6SSasha Neftin packets = 0; 491036b9fea6SSasha Neftin bytes = 0; 491136b9fea6SSasha Neftin 491236b9fea6SSasha Neftin rcu_read_lock(); 491336b9fea6SSasha Neftin for (i = 0; i < adapter->num_rx_queues; i++) { 491436b9fea6SSasha Neftin struct igc_ring *ring = adapter->rx_ring[i]; 491536b9fea6SSasha Neftin u32 rqdpc = rd32(IGC_RQDPC(i)); 491636b9fea6SSasha Neftin 491736b9fea6SSasha Neftin if (hw->mac.type >= igc_i225) 491836b9fea6SSasha Neftin wr32(IGC_RQDPC(i), 0); 491936b9fea6SSasha Neftin 492036b9fea6SSasha Neftin if (rqdpc) { 492136b9fea6SSasha Neftin ring->rx_stats.drops += rqdpc; 492236b9fea6SSasha Neftin net_stats->rx_fifo_errors += rqdpc; 492336b9fea6SSasha Neftin } 492436b9fea6SSasha Neftin 492536b9fea6SSasha Neftin do { 4926068c38adSThomas Gleixner start = u64_stats_fetch_begin(&ring->rx_syncp); 492736b9fea6SSasha Neftin _bytes = ring->rx_stats.bytes; 492836b9fea6SSasha Neftin _packets = ring->rx_stats.packets; 4929068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); 493036b9fea6SSasha Neftin bytes += _bytes; 493136b9fea6SSasha Neftin packets += _packets; 493236b9fea6SSasha Neftin } 493336b9fea6SSasha Neftin 493436b9fea6SSasha Neftin net_stats->rx_bytes = bytes; 493536b9fea6SSasha Neftin net_stats->rx_packets = packets; 493636b9fea6SSasha Neftin 493736b9fea6SSasha Neftin packets = 0; 493836b9fea6SSasha Neftin bytes = 0; 493936b9fea6SSasha Neftin for (i = 0; i < adapter->num_tx_queues; i++) { 494036b9fea6SSasha Neftin struct igc_ring *ring = adapter->tx_ring[i]; 494136b9fea6SSasha Neftin 494236b9fea6SSasha Neftin do { 4943068c38adSThomas Gleixner start = u64_stats_fetch_begin(&ring->tx_syncp); 494436b9fea6SSasha Neftin _bytes = ring->tx_stats.bytes; 494536b9fea6SSasha Neftin _packets = ring->tx_stats.packets; 4946068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); 494736b9fea6SSasha Neftin bytes += _bytes; 494836b9fea6SSasha Neftin packets += _packets; 494936b9fea6SSasha Neftin } 495036b9fea6SSasha Neftin net_stats->tx_bytes = bytes; 495136b9fea6SSasha Neftin net_stats->tx_packets = packets; 495236b9fea6SSasha Neftin rcu_read_unlock(); 495336b9fea6SSasha Neftin 495436b9fea6SSasha Neftin /* read stats registers */ 495536b9fea6SSasha Neftin adapter->stats.crcerrs += rd32(IGC_CRCERRS); 495636b9fea6SSasha Neftin adapter->stats.gprc += rd32(IGC_GPRC); 495736b9fea6SSasha Neftin adapter->stats.gorc += rd32(IGC_GORCL); 495836b9fea6SSasha Neftin rd32(IGC_GORCH); /* clear GORCL */ 495936b9fea6SSasha Neftin adapter->stats.bprc += rd32(IGC_BPRC); 496036b9fea6SSasha Neftin adapter->stats.mprc += rd32(IGC_MPRC); 496136b9fea6SSasha Neftin adapter->stats.roc += rd32(IGC_ROC); 496236b9fea6SSasha Neftin 496336b9fea6SSasha Neftin adapter->stats.prc64 += rd32(IGC_PRC64); 496436b9fea6SSasha Neftin adapter->stats.prc127 += rd32(IGC_PRC127); 496536b9fea6SSasha Neftin adapter->stats.prc255 += rd32(IGC_PRC255); 496636b9fea6SSasha Neftin adapter->stats.prc511 += rd32(IGC_PRC511); 496736b9fea6SSasha Neftin adapter->stats.prc1023 += rd32(IGC_PRC1023); 496836b9fea6SSasha Neftin adapter->stats.prc1522 += rd32(IGC_PRC1522); 496940edc734SSasha Neftin adapter->stats.tlpic += rd32(IGC_TLPIC); 497040edc734SSasha Neftin adapter->stats.rlpic += rd32(IGC_RLPIC); 4971e6529944SSasha Neftin adapter->stats.hgptc += rd32(IGC_HGPTC); 497236b9fea6SSasha Neftin 497336b9fea6SSasha Neftin mpc = rd32(IGC_MPC); 497436b9fea6SSasha Neftin adapter->stats.mpc += mpc; 497536b9fea6SSasha Neftin net_stats->rx_fifo_errors += mpc; 497636b9fea6SSasha Neftin adapter->stats.scc += rd32(IGC_SCC); 497736b9fea6SSasha Neftin adapter->stats.ecol += rd32(IGC_ECOL); 497836b9fea6SSasha Neftin adapter->stats.mcc += rd32(IGC_MCC); 497936b9fea6SSasha Neftin adapter->stats.latecol += rd32(IGC_LATECOL); 498036b9fea6SSasha Neftin adapter->stats.dc += rd32(IGC_DC); 498136b9fea6SSasha Neftin adapter->stats.rlec += rd32(IGC_RLEC); 498236b9fea6SSasha Neftin adapter->stats.xonrxc += rd32(IGC_XONRXC); 498336b9fea6SSasha Neftin adapter->stats.xontxc += rd32(IGC_XONTXC); 498436b9fea6SSasha Neftin adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); 498536b9fea6SSasha Neftin adapter->stats.xofftxc += rd32(IGC_XOFFTXC); 498636b9fea6SSasha Neftin adapter->stats.fcruc += rd32(IGC_FCRUC); 498736b9fea6SSasha Neftin adapter->stats.gptc += rd32(IGC_GPTC); 498836b9fea6SSasha Neftin adapter->stats.gotc += rd32(IGC_GOTCL); 498936b9fea6SSasha Neftin rd32(IGC_GOTCH); /* clear GOTCL */ 499036b9fea6SSasha Neftin adapter->stats.rnbc += rd32(IGC_RNBC); 499136b9fea6SSasha Neftin adapter->stats.ruc += rd32(IGC_RUC); 499236b9fea6SSasha Neftin adapter->stats.rfc += rd32(IGC_RFC); 499336b9fea6SSasha Neftin adapter->stats.rjc += rd32(IGC_RJC); 499436b9fea6SSasha Neftin adapter->stats.tor += rd32(IGC_TORH); 499536b9fea6SSasha Neftin adapter->stats.tot += rd32(IGC_TOTH); 499636b9fea6SSasha Neftin adapter->stats.tpr += rd32(IGC_TPR); 499736b9fea6SSasha Neftin 499836b9fea6SSasha Neftin adapter->stats.ptc64 += rd32(IGC_PTC64); 499936b9fea6SSasha Neftin adapter->stats.ptc127 += rd32(IGC_PTC127); 500036b9fea6SSasha Neftin adapter->stats.ptc255 += rd32(IGC_PTC255); 500136b9fea6SSasha Neftin adapter->stats.ptc511 += rd32(IGC_PTC511); 500236b9fea6SSasha Neftin adapter->stats.ptc1023 += rd32(IGC_PTC1023); 500336b9fea6SSasha Neftin adapter->stats.ptc1522 += rd32(IGC_PTC1522); 500436b9fea6SSasha Neftin 500536b9fea6SSasha Neftin adapter->stats.mptc += rd32(IGC_MPTC); 500636b9fea6SSasha Neftin adapter->stats.bptc += rd32(IGC_BPTC); 500736b9fea6SSasha Neftin 500836b9fea6SSasha Neftin adapter->stats.tpt += rd32(IGC_TPT); 500936b9fea6SSasha Neftin adapter->stats.colc += rd32(IGC_COLC); 501051c657b4SSasha Neftin adapter->stats.colc += rd32(IGC_RERC); 501136b9fea6SSasha Neftin 501236b9fea6SSasha Neftin adapter->stats.algnerrc += rd32(IGC_ALGNERRC); 501336b9fea6SSasha Neftin 501436b9fea6SSasha Neftin adapter->stats.tsctc += rd32(IGC_TSCTC); 501536b9fea6SSasha Neftin 501636b9fea6SSasha Neftin adapter->stats.iac += rd32(IGC_IAC); 501736b9fea6SSasha Neftin 501836b9fea6SSasha Neftin /* Fill out the OS statistics structure */ 501936b9fea6SSasha Neftin net_stats->multicast = adapter->stats.mprc; 502036b9fea6SSasha Neftin net_stats->collisions = adapter->stats.colc; 502136b9fea6SSasha Neftin 502236b9fea6SSasha Neftin /* Rx Errors */ 502336b9fea6SSasha Neftin 502436b9fea6SSasha Neftin /* RLEC on some newer hardware can be incorrect so build 502536b9fea6SSasha Neftin * our own version based on RUC and ROC 502636b9fea6SSasha Neftin */ 502736b9fea6SSasha Neftin net_stats->rx_errors = adapter->stats.rxerrc + 502836b9fea6SSasha Neftin adapter->stats.crcerrs + adapter->stats.algnerrc + 502936b9fea6SSasha Neftin adapter->stats.ruc + adapter->stats.roc + 503036b9fea6SSasha Neftin adapter->stats.cexterr; 503136b9fea6SSasha Neftin net_stats->rx_length_errors = adapter->stats.ruc + 503236b9fea6SSasha Neftin adapter->stats.roc; 503336b9fea6SSasha Neftin net_stats->rx_crc_errors = adapter->stats.crcerrs; 503436b9fea6SSasha Neftin net_stats->rx_frame_errors = adapter->stats.algnerrc; 503536b9fea6SSasha Neftin net_stats->rx_missed_errors = adapter->stats.mpc; 503636b9fea6SSasha Neftin 503736b9fea6SSasha Neftin /* Tx Errors */ 503836b9fea6SSasha Neftin net_stats->tx_errors = adapter->stats.ecol + 503936b9fea6SSasha Neftin adapter->stats.latecol; 504036b9fea6SSasha Neftin net_stats->tx_aborted_errors = adapter->stats.ecol; 504136b9fea6SSasha Neftin net_stats->tx_window_errors = adapter->stats.latecol; 504236b9fea6SSasha Neftin net_stats->tx_carrier_errors = adapter->stats.tncrs; 504336b9fea6SSasha Neftin 504492a0dcb8STan Tee Min /* Tx Dropped */ 504592a0dcb8STan Tee Min net_stats->tx_dropped = adapter->stats.txdrop; 504636b9fea6SSasha Neftin 504736b9fea6SSasha Neftin /* Management Stats */ 504836b9fea6SSasha Neftin adapter->stats.mgptc += rd32(IGC_MGTPTC); 504936b9fea6SSasha Neftin adapter->stats.mgprc += rd32(IGC_MGTPRC); 505036b9fea6SSasha Neftin adapter->stats.mgpdc += rd32(IGC_MGTPDC); 5051c9a11c23SSasha Neftin } 5052c9a11c23SSasha Neftin 5053c9a11c23SSasha Neftin /** 5054c9a11c23SSasha Neftin * igc_down - Close the interface 5055c9a11c23SSasha Neftin * @adapter: board private structure 5056c9a11c23SSasha Neftin */ 50578c5ad0daSSasha Neftin void igc_down(struct igc_adapter *adapter) 5058c9a11c23SSasha Neftin { 5059c9a11c23SSasha Neftin struct net_device *netdev = adapter->netdev; 50600507ef8aSSasha Neftin struct igc_hw *hw = &adapter->hw; 50610507ef8aSSasha Neftin u32 tctl, rctl; 5062c9a11c23SSasha Neftin int i = 0; 5063c9a11c23SSasha Neftin 5064c9a11c23SSasha Neftin set_bit(__IGC_DOWN, &adapter->state); 5065c9a11c23SSasha Neftin 5066b03c49cdSVinicius Costa Gomes igc_ptp_suspend(adapter); 5067b03c49cdSVinicius Costa Gomes 50684b799595SAaron Ma if (pci_device_is_present(adapter->pdev)) { 50690507ef8aSSasha Neftin /* disable receives in the hardware */ 50700507ef8aSSasha Neftin rctl = rd32(IGC_RCTL); 50710507ef8aSSasha Neftin wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); 50720507ef8aSSasha Neftin /* flush and sleep below */ 50734b799595SAaron Ma } 5074c9a11c23SSasha Neftin /* set trans_start so we don't get spurious watchdogs during reset */ 5075c9a11c23SSasha Neftin netif_trans_update(netdev); 5076c9a11c23SSasha Neftin 5077c9a11c23SSasha Neftin netif_carrier_off(netdev); 5078c9a11c23SSasha Neftin netif_tx_stop_all_queues(netdev); 5079c9a11c23SSasha Neftin 50804b799595SAaron Ma if (pci_device_is_present(adapter->pdev)) { 50810507ef8aSSasha Neftin /* disable transmits in the hardware */ 50820507ef8aSSasha Neftin tctl = rd32(IGC_TCTL); 50830507ef8aSSasha Neftin tctl &= ~IGC_TCTL_EN; 50840507ef8aSSasha Neftin wr32(IGC_TCTL, tctl); 50850507ef8aSSasha Neftin /* flush both disables and wait for them to finish */ 50860507ef8aSSasha Neftin wrfl(); 50870507ef8aSSasha Neftin usleep_range(10000, 20000); 50880507ef8aSSasha Neftin 50890507ef8aSSasha Neftin igc_irq_disable(adapter); 50904b799595SAaron Ma } 50910507ef8aSSasha Neftin 50920507ef8aSSasha Neftin adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 50930507ef8aSSasha Neftin 50940507ef8aSSasha Neftin for (i = 0; i < adapter->num_q_vectors; i++) { 50950507ef8aSSasha Neftin if (adapter->q_vector[i]) { 50960507ef8aSSasha Neftin napi_synchronize(&adapter->q_vector[i]->napi); 5097c9a11c23SSasha Neftin napi_disable(&adapter->q_vector[i]->napi); 50980507ef8aSSasha Neftin } 50990507ef8aSSasha Neftin } 51000507ef8aSSasha Neftin 51010507ef8aSSasha Neftin del_timer_sync(&adapter->watchdog_timer); 51020507ef8aSSasha Neftin del_timer_sync(&adapter->phy_info_timer); 51030507ef8aSSasha Neftin 51040507ef8aSSasha Neftin /* record the stats before reset*/ 51050507ef8aSSasha Neftin spin_lock(&adapter->stats64_lock); 51060507ef8aSSasha Neftin igc_update_stats(adapter); 51070507ef8aSSasha Neftin spin_unlock(&adapter->stats64_lock); 5108c9a11c23SSasha Neftin 5109c9a11c23SSasha Neftin adapter->link_speed = 0; 5110c9a11c23SSasha Neftin adapter->link_duplex = 0; 51110507ef8aSSasha Neftin 51120507ef8aSSasha Neftin if (!pci_channel_offline(adapter->pdev)) 51130507ef8aSSasha Neftin igc_reset(adapter); 51140507ef8aSSasha Neftin 51150507ef8aSSasha Neftin /* clear VLAN promisc flag so VFTA will be updated if necessary */ 51160507ef8aSSasha Neftin adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; 51170507ef8aSSasha Neftin 5118d4a7ce64SMuhammad Husaini Zulkifli igc_disable_all_tx_rings_hw(adapter); 51190507ef8aSSasha Neftin igc_clean_all_tx_rings(adapter); 51200507ef8aSSasha Neftin igc_clean_all_rx_rings(adapter); 51210507ef8aSSasha Neftin } 51220507ef8aSSasha Neftin 51238c5ad0daSSasha Neftin void igc_reinit_locked(struct igc_adapter *adapter) 51240507ef8aSSasha Neftin { 51250507ef8aSSasha Neftin while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 51260507ef8aSSasha Neftin usleep_range(1000, 2000); 51270507ef8aSSasha Neftin igc_down(adapter); 51280507ef8aSSasha Neftin igc_up(adapter); 51290507ef8aSSasha Neftin clear_bit(__IGC_RESETTING, &adapter->state); 51300507ef8aSSasha Neftin } 51310507ef8aSSasha Neftin 51320507ef8aSSasha Neftin static void igc_reset_task(struct work_struct *work) 51330507ef8aSSasha Neftin { 51340507ef8aSSasha Neftin struct igc_adapter *adapter; 51350507ef8aSSasha Neftin 51360507ef8aSSasha Neftin adapter = container_of(work, struct igc_adapter, reset_task); 51370507ef8aSSasha Neftin 51386da26237SSasha Neftin rtnl_lock(); 51396da26237SSasha Neftin /* If we're already down or resetting, just bail */ 51406da26237SSasha Neftin if (test_bit(__IGC_DOWN, &adapter->state) || 51416da26237SSasha Neftin test_bit(__IGC_RESETTING, &adapter->state)) { 51426da26237SSasha Neftin rtnl_unlock(); 51436da26237SSasha Neftin return; 51446da26237SSasha Neftin } 51456da26237SSasha Neftin 51469c384ee3SSasha Neftin igc_rings_dump(adapter); 51479c384ee3SSasha Neftin igc_regs_dump(adapter); 51480507ef8aSSasha Neftin netdev_err(adapter->netdev, "Reset adapter\n"); 51490507ef8aSSasha Neftin igc_reinit_locked(adapter); 51506da26237SSasha Neftin rtnl_unlock(); 5151c9a11c23SSasha Neftin } 5152c9a11c23SSasha Neftin 5153c9a11c23SSasha Neftin /** 5154c9a11c23SSasha Neftin * igc_change_mtu - Change the Maximum Transfer Unit 5155c9a11c23SSasha Neftin * @netdev: network interface device structure 5156c9a11c23SSasha Neftin * @new_mtu: new value for maximum frame size 5157c9a11c23SSasha Neftin * 5158c9a11c23SSasha Neftin * Returns 0 on success, negative on failure 5159c9a11c23SSasha Neftin */ 5160c9a11c23SSasha Neftin static int igc_change_mtu(struct net_device *netdev, int new_mtu) 5161c9a11c23SSasha Neftin { 5162c9a11c23SSasha Neftin int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 5163c9a11c23SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 5164c9a11c23SSasha Neftin 516526575105SAndre Guedes if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { 516626575105SAndre Guedes netdev_dbg(netdev, "Jumbo frames not supported with XDP"); 516726575105SAndre Guedes return -EINVAL; 516826575105SAndre Guedes } 516926575105SAndre Guedes 5170c9a11c23SSasha Neftin /* adjust max frame to be at least the size of a standard frame */ 5171c9a11c23SSasha Neftin if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) 5172c9a11c23SSasha Neftin max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; 5173c9a11c23SSasha Neftin 5174c9a11c23SSasha Neftin while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) 5175c9a11c23SSasha Neftin usleep_range(1000, 2000); 5176c9a11c23SSasha Neftin 5177c9a11c23SSasha Neftin /* igc_down has a dependency on max_frame_size */ 5178c9a11c23SSasha Neftin adapter->max_frame_size = max_frame; 5179c9a11c23SSasha Neftin 5180c9a11c23SSasha Neftin if (netif_running(netdev)) 5181c9a11c23SSasha Neftin igc_down(adapter); 5182c9a11c23SSasha Neftin 518325f06effSAndre Guedes netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5184c9a11c23SSasha Neftin netdev->mtu = new_mtu; 5185c9a11c23SSasha Neftin 5186c9a11c23SSasha Neftin if (netif_running(netdev)) 5187c9a11c23SSasha Neftin igc_up(adapter); 5188c9a11c23SSasha Neftin else 5189c9a11c23SSasha Neftin igc_reset(adapter); 5190c9a11c23SSasha Neftin 5191c9a11c23SSasha Neftin clear_bit(__IGC_RESETTING, &adapter->state); 5192c9a11c23SSasha Neftin 5193c9a11c23SSasha Neftin return 0; 5194c9a11c23SSasha Neftin } 5195c9a11c23SSasha Neftin 5196c9a11c23SSasha Neftin /** 51979b275176SSasha Neftin * igc_tx_timeout - Respond to a Tx Hang 51989b275176SSasha Neftin * @netdev: network interface device structure 51999b275176SSasha Neftin * @txqueue: queue number that timed out 52009b275176SSasha Neftin **/ 52019b275176SSasha Neftin static void igc_tx_timeout(struct net_device *netdev, 52029b275176SSasha Neftin unsigned int __always_unused txqueue) 52039b275176SSasha Neftin { 52049b275176SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 52059b275176SSasha Neftin struct igc_hw *hw = &adapter->hw; 52069b275176SSasha Neftin 52079b275176SSasha Neftin /* Do the reset outside of interrupt context */ 52089b275176SSasha Neftin adapter->tx_timeout_count++; 52099b275176SSasha Neftin schedule_work(&adapter->reset_task); 52109b275176SSasha Neftin wr32(IGC_EICS, 52119b275176SSasha Neftin (adapter->eims_enable_mask & ~adapter->eims_other)); 52129b275176SSasha Neftin } 52139b275176SSasha Neftin 52149b275176SSasha Neftin /** 52156b7ed22aSVinicius Costa Gomes * igc_get_stats64 - Get System Network Statistics 5216c9a11c23SSasha Neftin * @netdev: network interface device structure 52176b7ed22aSVinicius Costa Gomes * @stats: rtnl_link_stats64 pointer 5218c9a11c23SSasha Neftin * 5219c9a11c23SSasha Neftin * Returns the address of the device statistics structure. 5220c9a11c23SSasha Neftin * The statistics are updated here and also from the timer callback. 5221c9a11c23SSasha Neftin */ 52226b7ed22aSVinicius Costa Gomes static void igc_get_stats64(struct net_device *netdev, 52236b7ed22aSVinicius Costa Gomes struct rtnl_link_stats64 *stats) 5224c9a11c23SSasha Neftin { 5225c9a11c23SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 5226c9a11c23SSasha Neftin 52276b7ed22aSVinicius Costa Gomes spin_lock(&adapter->stats64_lock); 5228c9a11c23SSasha Neftin if (!test_bit(__IGC_RESETTING, &adapter->state)) 5229c9a11c23SSasha Neftin igc_update_stats(adapter); 52306b7ed22aSVinicius Costa Gomes memcpy(stats, &adapter->stats64, sizeof(*stats)); 52316b7ed22aSVinicius Costa Gomes spin_unlock(&adapter->stats64_lock); 5232c9a11c23SSasha Neftin } 5233c9a11c23SSasha Neftin 523465cd3a72SSasha Neftin static netdev_features_t igc_fix_features(struct net_device *netdev, 523565cd3a72SSasha Neftin netdev_features_t features) 523665cd3a72SSasha Neftin { 523765cd3a72SSasha Neftin /* Since there is no support for separate Rx/Tx vlan accel 523865cd3a72SSasha Neftin * enable/disable make sure Tx flag is always in same state as Rx. 523965cd3a72SSasha Neftin */ 524065cd3a72SSasha Neftin if (features & NETIF_F_HW_VLAN_CTAG_RX) 524165cd3a72SSasha Neftin features |= NETIF_F_HW_VLAN_CTAG_TX; 524265cd3a72SSasha Neftin else 524365cd3a72SSasha Neftin features &= ~NETIF_F_HW_VLAN_CTAG_TX; 524465cd3a72SSasha Neftin 524565cd3a72SSasha Neftin return features; 524665cd3a72SSasha Neftin } 524765cd3a72SSasha Neftin 524865cd3a72SSasha Neftin static int igc_set_features(struct net_device *netdev, 524965cd3a72SSasha Neftin netdev_features_t features) 525065cd3a72SSasha Neftin { 525165cd3a72SSasha Neftin netdev_features_t changed = netdev->features ^ features; 525265cd3a72SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 525365cd3a72SSasha Neftin 52548d744963SMuhammad Husaini Zulkifli if (changed & NETIF_F_HW_VLAN_CTAG_RX) 52558d744963SMuhammad Husaini Zulkifli igc_vlan_mode(netdev, features); 52568d744963SMuhammad Husaini Zulkifli 525765cd3a72SSasha Neftin /* Add VLAN support */ 525865cd3a72SSasha Neftin if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) 525965cd3a72SSasha Neftin return 0; 526065cd3a72SSasha Neftin 5261e256ec83SAndre Guedes if (!(features & NETIF_F_NTUPLE)) 5262e256ec83SAndre Guedes igc_flush_nfc_rules(adapter); 526365cd3a72SSasha Neftin 526465cd3a72SSasha Neftin netdev->features = features; 526565cd3a72SSasha Neftin 526665cd3a72SSasha Neftin if (netif_running(netdev)) 526765cd3a72SSasha Neftin igc_reinit_locked(adapter); 526865cd3a72SSasha Neftin else 526965cd3a72SSasha Neftin igc_reset(adapter); 527065cd3a72SSasha Neftin 527165cd3a72SSasha Neftin return 1; 527265cd3a72SSasha Neftin } 527365cd3a72SSasha Neftin 527465cd3a72SSasha Neftin static netdev_features_t 527565cd3a72SSasha Neftin igc_features_check(struct sk_buff *skb, struct net_device *dev, 527665cd3a72SSasha Neftin netdev_features_t features) 527765cd3a72SSasha Neftin { 527865cd3a72SSasha Neftin unsigned int network_hdr_len, mac_hdr_len; 527965cd3a72SSasha Neftin 528065cd3a72SSasha Neftin /* Make certain the headers can be described by a context descriptor */ 528165cd3a72SSasha Neftin mac_hdr_len = skb_network_header(skb) - skb->data; 528265cd3a72SSasha Neftin if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) 528365cd3a72SSasha Neftin return features & ~(NETIF_F_HW_CSUM | 528465cd3a72SSasha Neftin NETIF_F_SCTP_CRC | 528565cd3a72SSasha Neftin NETIF_F_HW_VLAN_CTAG_TX | 528665cd3a72SSasha Neftin NETIF_F_TSO | 528765cd3a72SSasha Neftin NETIF_F_TSO6); 528865cd3a72SSasha Neftin 528965cd3a72SSasha Neftin network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); 529065cd3a72SSasha Neftin if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) 529165cd3a72SSasha Neftin return features & ~(NETIF_F_HW_CSUM | 529265cd3a72SSasha Neftin NETIF_F_SCTP_CRC | 529365cd3a72SSasha Neftin NETIF_F_TSO | 529465cd3a72SSasha Neftin NETIF_F_TSO6); 529565cd3a72SSasha Neftin 529665cd3a72SSasha Neftin /* We can only support IPv4 TSO in tunnels if we can mangle the 529765cd3a72SSasha Neftin * inner IP ID field, so strip TSO if MANGLEID is not supported. 529865cd3a72SSasha Neftin */ 529965cd3a72SSasha Neftin if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 530065cd3a72SSasha Neftin features &= ~NETIF_F_TSO; 530165cd3a72SSasha Neftin 530265cd3a72SSasha Neftin return features; 530365cd3a72SSasha Neftin } 530465cd3a72SSasha Neftin 53052c344ae2SVinicius Costa Gomes static void igc_tsync_interrupt(struct igc_adapter *adapter) 53062c344ae2SVinicius Costa Gomes { 530787938851SEderson de Souza u32 ack, tsauxc, sec, nsec, tsicr; 53082c344ae2SVinicius Costa Gomes struct igc_hw *hw = &adapter->hw; 530964433e5bSEderson de Souza struct ptp_clock_event event; 531087938851SEderson de Souza struct timespec64 ts; 531187938851SEderson de Souza 531287938851SEderson de Souza tsicr = rd32(IGC_TSICR); 531387938851SEderson de Souza ack = 0; 53142c344ae2SVinicius Costa Gomes 531564433e5bSEderson de Souza if (tsicr & IGC_TSICR_SYS_WRAP) { 531664433e5bSEderson de Souza event.type = PTP_CLOCK_PPS; 531764433e5bSEderson de Souza if (adapter->ptp_caps.pps) 531864433e5bSEderson de Souza ptp_clock_event(adapter->ptp_clock, &event); 531964433e5bSEderson de Souza ack |= IGC_TSICR_SYS_WRAP; 532064433e5bSEderson de Souza } 532164433e5bSEderson de Souza 53222c344ae2SVinicius Costa Gomes if (tsicr & IGC_TSICR_TXTS) { 53232c344ae2SVinicius Costa Gomes /* retrieve hardware timestamp */ 5324afa14158SVinicius Costa Gomes igc_ptp_tx_tstamp_event(adapter); 53252c344ae2SVinicius Costa Gomes ack |= IGC_TSICR_TXTS; 53262c344ae2SVinicius Costa Gomes } 53272c344ae2SVinicius Costa Gomes 532887938851SEderson de Souza if (tsicr & IGC_TSICR_TT0) { 532987938851SEderson de Souza spin_lock(&adapter->tmreg_lock); 533087938851SEderson de Souza ts = timespec64_add(adapter->perout[0].start, 533187938851SEderson de Souza adapter->perout[0].period); 533287938851SEderson de Souza wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 533387938851SEderson de Souza wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); 533487938851SEderson de Souza tsauxc = rd32(IGC_TSAUXC); 533587938851SEderson de Souza tsauxc |= IGC_TSAUXC_EN_TT0; 533687938851SEderson de Souza wr32(IGC_TSAUXC, tsauxc); 533787938851SEderson de Souza adapter->perout[0].start = ts; 533887938851SEderson de Souza spin_unlock(&adapter->tmreg_lock); 533987938851SEderson de Souza ack |= IGC_TSICR_TT0; 534087938851SEderson de Souza } 534187938851SEderson de Souza 534287938851SEderson de Souza if (tsicr & IGC_TSICR_TT1) { 534387938851SEderson de Souza spin_lock(&adapter->tmreg_lock); 534487938851SEderson de Souza ts = timespec64_add(adapter->perout[1].start, 534587938851SEderson de Souza adapter->perout[1].period); 534687938851SEderson de Souza wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); 534787938851SEderson de Souza wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); 534887938851SEderson de Souza tsauxc = rd32(IGC_TSAUXC); 534987938851SEderson de Souza tsauxc |= IGC_TSAUXC_EN_TT1; 535087938851SEderson de Souza wr32(IGC_TSAUXC, tsauxc); 535187938851SEderson de Souza adapter->perout[1].start = ts; 535287938851SEderson de Souza spin_unlock(&adapter->tmreg_lock); 535387938851SEderson de Souza ack |= IGC_TSICR_TT1; 535487938851SEderson de Souza } 535587938851SEderson de Souza 535687938851SEderson de Souza if (tsicr & IGC_TSICR_AUTT0) { 535787938851SEderson de Souza nsec = rd32(IGC_AUXSTMPL0); 535887938851SEderson de Souza sec = rd32(IGC_AUXSTMPH0); 535987938851SEderson de Souza event.type = PTP_CLOCK_EXTTS; 536087938851SEderson de Souza event.index = 0; 536187938851SEderson de Souza event.timestamp = sec * NSEC_PER_SEC + nsec; 536287938851SEderson de Souza ptp_clock_event(adapter->ptp_clock, &event); 536387938851SEderson de Souza ack |= IGC_TSICR_AUTT0; 536487938851SEderson de Souza } 536587938851SEderson de Souza 536687938851SEderson de Souza if (tsicr & IGC_TSICR_AUTT1) { 536787938851SEderson de Souza nsec = rd32(IGC_AUXSTMPL1); 536887938851SEderson de Souza sec = rd32(IGC_AUXSTMPH1); 536987938851SEderson de Souza event.type = PTP_CLOCK_EXTTS; 537087938851SEderson de Souza event.index = 1; 537187938851SEderson de Souza event.timestamp = sec * NSEC_PER_SEC + nsec; 537287938851SEderson de Souza ptp_clock_event(adapter->ptp_clock, &event); 537387938851SEderson de Souza ack |= IGC_TSICR_AUTT1; 537487938851SEderson de Souza } 537587938851SEderson de Souza 53762c344ae2SVinicius Costa Gomes /* acknowledge the interrupts */ 53772c344ae2SVinicius Costa Gomes wr32(IGC_TSICR, ack); 53782c344ae2SVinicius Costa Gomes } 53792c344ae2SVinicius Costa Gomes 538013b5b7fdSSasha Neftin /** 53813df25e4cSSasha Neftin * igc_msix_other - msix other interrupt handler 53823df25e4cSSasha Neftin * @irq: interrupt number 53833df25e4cSSasha Neftin * @data: pointer to a q_vector 53843df25e4cSSasha Neftin */ 53853df25e4cSSasha Neftin static irqreturn_t igc_msix_other(int irq, void *data) 53863df25e4cSSasha Neftin { 53873df25e4cSSasha Neftin struct igc_adapter *adapter = data; 53883df25e4cSSasha Neftin struct igc_hw *hw = &adapter->hw; 53893df25e4cSSasha Neftin u32 icr = rd32(IGC_ICR); 53903df25e4cSSasha Neftin 53913df25e4cSSasha Neftin /* reading ICR causes bit 31 of EICR to be cleared */ 53923df25e4cSSasha Neftin if (icr & IGC_ICR_DRSTA) 53933df25e4cSSasha Neftin schedule_work(&adapter->reset_task); 53943df25e4cSSasha Neftin 53953df25e4cSSasha Neftin if (icr & IGC_ICR_DOUTSYNC) { 53963df25e4cSSasha Neftin /* HW is reporting DMA is out of sync */ 53973df25e4cSSasha Neftin adapter->stats.doosync++; 53983df25e4cSSasha Neftin } 53993df25e4cSSasha Neftin 54003df25e4cSSasha Neftin if (icr & IGC_ICR_LSC) { 5401501f2309SJiapeng Zhong hw->mac.get_link_status = true; 54023df25e4cSSasha Neftin /* guard against interrupt when we're going down */ 54033df25e4cSSasha Neftin if (!test_bit(__IGC_DOWN, &adapter->state)) 54043df25e4cSSasha Neftin mod_timer(&adapter->watchdog_timer, jiffies + 1); 54053df25e4cSSasha Neftin } 54063df25e4cSSasha Neftin 54072c344ae2SVinicius Costa Gomes if (icr & IGC_ICR_TS) 54082c344ae2SVinicius Costa Gomes igc_tsync_interrupt(adapter); 54092c344ae2SVinicius Costa Gomes 54103df25e4cSSasha Neftin wr32(IGC_EIMS, adapter->eims_other); 54113df25e4cSSasha Neftin 54123df25e4cSSasha Neftin return IRQ_HANDLED; 54133df25e4cSSasha Neftin } 54143df25e4cSSasha Neftin 541555cd7386SSasha Neftin static void igc_write_itr(struct igc_q_vector *q_vector) 541655cd7386SSasha Neftin { 541755cd7386SSasha Neftin u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; 541855cd7386SSasha Neftin 541955cd7386SSasha Neftin if (!q_vector->set_itr) 542055cd7386SSasha Neftin return; 542155cd7386SSasha Neftin 542255cd7386SSasha Neftin if (!itr_val) 542355cd7386SSasha Neftin itr_val = IGC_ITR_VAL_MASK; 542455cd7386SSasha Neftin 542555cd7386SSasha Neftin itr_val |= IGC_EITR_CNT_IGNR; 542655cd7386SSasha Neftin 542755cd7386SSasha Neftin writel(itr_val, q_vector->itr_register); 542855cd7386SSasha Neftin q_vector->set_itr = 0; 542955cd7386SSasha Neftin } 543055cd7386SSasha Neftin 54313df25e4cSSasha Neftin static irqreturn_t igc_msix_ring(int irq, void *data) 54323df25e4cSSasha Neftin { 54333df25e4cSSasha Neftin struct igc_q_vector *q_vector = data; 54343df25e4cSSasha Neftin 54353df25e4cSSasha Neftin /* Write the ITR value calculated from the previous interrupt. */ 54363df25e4cSSasha Neftin igc_write_itr(q_vector); 54373df25e4cSSasha Neftin 54383df25e4cSSasha Neftin napi_schedule(&q_vector->napi); 54393df25e4cSSasha Neftin 54403df25e4cSSasha Neftin return IRQ_HANDLED; 54413df25e4cSSasha Neftin } 54423df25e4cSSasha Neftin 54433df25e4cSSasha Neftin /** 54443df25e4cSSasha Neftin * igc_request_msix - Initialize MSI-X interrupts 54453df25e4cSSasha Neftin * @adapter: Pointer to adapter structure 54463df25e4cSSasha Neftin * 54473df25e4cSSasha Neftin * igc_request_msix allocates MSI-X vectors and requests interrupts from the 54483df25e4cSSasha Neftin * kernel. 54493df25e4cSSasha Neftin */ 54503df25e4cSSasha Neftin static int igc_request_msix(struct igc_adapter *adapter) 54513df25e4cSSasha Neftin { 5452373e2829SSasha Neftin unsigned int num_q_vectors = adapter->num_q_vectors; 54533df25e4cSSasha Neftin int i = 0, err = 0, vector = 0, free_vector = 0; 54543df25e4cSSasha Neftin struct net_device *netdev = adapter->netdev; 54553df25e4cSSasha Neftin 54563df25e4cSSasha Neftin err = request_irq(adapter->msix_entries[vector].vector, 54573df25e4cSSasha Neftin &igc_msix_other, 0, netdev->name, adapter); 54583df25e4cSSasha Neftin if (err) 54593df25e4cSSasha Neftin goto err_out; 54603df25e4cSSasha Neftin 5461373e2829SSasha Neftin if (num_q_vectors > MAX_Q_VECTORS) { 5462373e2829SSasha Neftin num_q_vectors = MAX_Q_VECTORS; 5463373e2829SSasha Neftin dev_warn(&adapter->pdev->dev, 5464373e2829SSasha Neftin "The number of queue vectors (%d) is higher than max allowed (%d)\n", 5465373e2829SSasha Neftin adapter->num_q_vectors, MAX_Q_VECTORS); 5466373e2829SSasha Neftin } 5467373e2829SSasha Neftin for (i = 0; i < num_q_vectors; i++) { 54683df25e4cSSasha Neftin struct igc_q_vector *q_vector = adapter->q_vector[i]; 54693df25e4cSSasha Neftin 54703df25e4cSSasha Neftin vector++; 54713df25e4cSSasha Neftin 54723df25e4cSSasha Neftin q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); 54733df25e4cSSasha Neftin 54743df25e4cSSasha Neftin if (q_vector->rx.ring && q_vector->tx.ring) 54753df25e4cSSasha Neftin sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, 54763df25e4cSSasha Neftin q_vector->rx.ring->queue_index); 54773df25e4cSSasha Neftin else if (q_vector->tx.ring) 54783df25e4cSSasha Neftin sprintf(q_vector->name, "%s-tx-%u", netdev->name, 54793df25e4cSSasha Neftin q_vector->tx.ring->queue_index); 54803df25e4cSSasha Neftin else if (q_vector->rx.ring) 54813df25e4cSSasha Neftin sprintf(q_vector->name, "%s-rx-%u", netdev->name, 54823df25e4cSSasha Neftin q_vector->rx.ring->queue_index); 54833df25e4cSSasha Neftin else 54843df25e4cSSasha Neftin sprintf(q_vector->name, "%s-unused", netdev->name); 54853df25e4cSSasha Neftin 54863df25e4cSSasha Neftin err = request_irq(adapter->msix_entries[vector].vector, 54873df25e4cSSasha Neftin igc_msix_ring, 0, q_vector->name, 54883df25e4cSSasha Neftin q_vector); 54893df25e4cSSasha Neftin if (err) 54903df25e4cSSasha Neftin goto err_free; 54913df25e4cSSasha Neftin } 54923df25e4cSSasha Neftin 54933df25e4cSSasha Neftin igc_configure_msix(adapter); 54943df25e4cSSasha Neftin return 0; 54953df25e4cSSasha Neftin 54963df25e4cSSasha Neftin err_free: 54973df25e4cSSasha Neftin /* free already assigned IRQs */ 54983df25e4cSSasha Neftin free_irq(adapter->msix_entries[free_vector++].vector, adapter); 54993df25e4cSSasha Neftin 55003df25e4cSSasha Neftin vector--; 55013df25e4cSSasha Neftin for (i = 0; i < vector; i++) { 55023df25e4cSSasha Neftin free_irq(adapter->msix_entries[free_vector++].vector, 55033df25e4cSSasha Neftin adapter->q_vector[i]); 55043df25e4cSSasha Neftin } 55053df25e4cSSasha Neftin err_out: 55063df25e4cSSasha Neftin return err; 55073df25e4cSSasha Neftin } 55083df25e4cSSasha Neftin 55093df25e4cSSasha Neftin /** 5510a8c4873bSSasha Neftin * igc_clear_interrupt_scheme - reset the device to a state of no interrupts 5511a8c4873bSSasha Neftin * @adapter: Pointer to adapter structure 5512a8c4873bSSasha Neftin * 5513a8c4873bSSasha Neftin * This function resets the device so that it has 0 rx queues, tx queues, and 5514a8c4873bSSasha Neftin * MSI-X interrupts allocated. 5515a8c4873bSSasha Neftin */ 5516a8c4873bSSasha Neftin static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) 5517a8c4873bSSasha Neftin { 5518a8c4873bSSasha Neftin igc_free_q_vectors(adapter); 5519a8c4873bSSasha Neftin igc_reset_interrupt_capability(adapter); 5520a8c4873bSSasha Neftin } 5521a8c4873bSSasha Neftin 5522208983f0SSasha Neftin /* Need to wait a few seconds after link up to get diagnostic information from 5523208983f0SSasha Neftin * the phy 5524208983f0SSasha Neftin */ 5525208983f0SSasha Neftin static void igc_update_phy_info(struct timer_list *t) 5526208983f0SSasha Neftin { 5527208983f0SSasha Neftin struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); 5528208983f0SSasha Neftin 5529208983f0SSasha Neftin igc_get_phy_info(&adapter->hw); 5530208983f0SSasha Neftin } 5531208983f0SSasha Neftin 5532208983f0SSasha Neftin /** 5533208983f0SSasha Neftin * igc_has_link - check shared code for link and determine up/down 5534208983f0SSasha Neftin * @adapter: pointer to driver private info 5535208983f0SSasha Neftin */ 55368c5ad0daSSasha Neftin bool igc_has_link(struct igc_adapter *adapter) 5537208983f0SSasha Neftin { 5538208983f0SSasha Neftin struct igc_hw *hw = &adapter->hw; 5539208983f0SSasha Neftin bool link_active = false; 5540208983f0SSasha Neftin 5541208983f0SSasha Neftin /* get_link_status is set on LSC (link status) interrupt or 5542208983f0SSasha Neftin * rx sequence error interrupt. get_link_status will stay 5543208983f0SSasha Neftin * false until the igc_check_for_link establishes link 5544208983f0SSasha Neftin * for copper adapters ONLY 5545208983f0SSasha Neftin */ 5546208983f0SSasha Neftin if (!hw->mac.get_link_status) 5547208983f0SSasha Neftin return true; 5548208983f0SSasha Neftin hw->mac.ops.check_for_link(hw); 5549208983f0SSasha Neftin link_active = !hw->mac.get_link_status; 5550208983f0SSasha Neftin 55517c496de5SSasha Neftin if (hw->mac.type == igc_i225) { 5552208983f0SSasha Neftin if (!netif_carrier_ok(adapter->netdev)) { 5553208983f0SSasha Neftin adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 5554208983f0SSasha Neftin } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { 5555208983f0SSasha Neftin adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; 5556208983f0SSasha Neftin adapter->link_check_timeout = jiffies; 5557208983f0SSasha Neftin } 5558208983f0SSasha Neftin } 5559208983f0SSasha Neftin 5560208983f0SSasha Neftin return link_active; 5561208983f0SSasha Neftin } 5562208983f0SSasha Neftin 55633df25e4cSSasha Neftin /** 55640507ef8aSSasha Neftin * igc_watchdog - Timer Call-back 556586efeccdSSasha Neftin * @t: timer for the watchdog 55660507ef8aSSasha Neftin */ 55670507ef8aSSasha Neftin static void igc_watchdog(struct timer_list *t) 55680507ef8aSSasha Neftin { 55690507ef8aSSasha Neftin struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); 5570208983f0SSasha Neftin /* Do the rest outside of interrupt context */ 5571208983f0SSasha Neftin schedule_work(&adapter->watchdog_task); 5572208983f0SSasha Neftin } 5573208983f0SSasha Neftin 5574208983f0SSasha Neftin static void igc_watchdog_task(struct work_struct *work) 5575208983f0SSasha Neftin { 5576208983f0SSasha Neftin struct igc_adapter *adapter = container_of(work, 5577208983f0SSasha Neftin struct igc_adapter, 5578208983f0SSasha Neftin watchdog_task); 5579208983f0SSasha Neftin struct net_device *netdev = adapter->netdev; 5580208983f0SSasha Neftin struct igc_hw *hw = &adapter->hw; 5581208983f0SSasha Neftin struct igc_phy_info *phy = &hw->phy; 5582208983f0SSasha Neftin u16 phy_data, retry_count = 20; 5583208983f0SSasha Neftin u32 link; 5584208983f0SSasha Neftin int i; 5585208983f0SSasha Neftin 5586208983f0SSasha Neftin link = igc_has_link(adapter); 5587208983f0SSasha Neftin 5588208983f0SSasha Neftin if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { 5589208983f0SSasha Neftin if (time_after(jiffies, (adapter->link_check_timeout + HZ))) 5590208983f0SSasha Neftin adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 5591208983f0SSasha Neftin else 5592208983f0SSasha Neftin link = false; 5593208983f0SSasha Neftin } 5594208983f0SSasha Neftin 5595208983f0SSasha Neftin if (link) { 55968594a7f3SSasha Neftin /* Cancel scheduled suspend requests. */ 55978594a7f3SSasha Neftin pm_runtime_resume(netdev->dev.parent); 55988594a7f3SSasha Neftin 5599208983f0SSasha Neftin if (!netif_carrier_ok(netdev)) { 5600208983f0SSasha Neftin u32 ctrl; 5601208983f0SSasha Neftin 5602208983f0SSasha Neftin hw->mac.ops.get_speed_and_duplex(hw, 5603208983f0SSasha Neftin &adapter->link_speed, 5604208983f0SSasha Neftin &adapter->link_duplex); 5605208983f0SSasha Neftin 5606208983f0SSasha Neftin ctrl = rd32(IGC_CTRL); 5607208983f0SSasha Neftin /* Link status message must follow this format */ 5608208983f0SSasha Neftin netdev_info(netdev, 560925f06effSAndre Guedes "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", 5610208983f0SSasha Neftin adapter->link_speed, 5611208983f0SSasha Neftin adapter->link_duplex == FULL_DUPLEX ? 5612208983f0SSasha Neftin "Full" : "Half", 5613208983f0SSasha Neftin (ctrl & IGC_CTRL_TFCE) && 5614208983f0SSasha Neftin (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : 5615208983f0SSasha Neftin (ctrl & IGC_CTRL_RFCE) ? "RX" : 5616208983f0SSasha Neftin (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); 5617208983f0SSasha Neftin 561893ec439aSSasha Neftin /* disable EEE if enabled */ 561993ec439aSSasha Neftin if ((adapter->flags & IGC_FLAG_EEE) && 562093ec439aSSasha Neftin adapter->link_duplex == HALF_DUPLEX) { 562193ec439aSSasha Neftin netdev_info(netdev, 562293ec439aSSasha Neftin "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); 562393ec439aSSasha Neftin adapter->hw.dev_spec._base.eee_enable = false; 562493ec439aSSasha Neftin adapter->flags &= ~IGC_FLAG_EEE; 562593ec439aSSasha Neftin } 562693ec439aSSasha Neftin 5627208983f0SSasha Neftin /* check if SmartSpeed worked */ 5628208983f0SSasha Neftin igc_check_downshift(hw); 5629208983f0SSasha Neftin if (phy->speed_downgraded) 5630208983f0SSasha Neftin netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); 5631208983f0SSasha Neftin 5632208983f0SSasha Neftin /* adjust timeout factor according to speed/duplex */ 5633208983f0SSasha Neftin adapter->tx_timeout_factor = 1; 5634208983f0SSasha Neftin switch (adapter->link_speed) { 5635208983f0SSasha Neftin case SPEED_10: 5636208983f0SSasha Neftin adapter->tx_timeout_factor = 14; 5637208983f0SSasha Neftin break; 5638208983f0SSasha Neftin case SPEED_100: 5639b27b8dc7SMuhammad Husaini Zulkifli case SPEED_1000: 5640b27b8dc7SMuhammad Husaini Zulkifli case SPEED_2500: 56419b275176SSasha Neftin adapter->tx_timeout_factor = 1; 5642208983f0SSasha Neftin break; 5643208983f0SSasha Neftin } 5644208983f0SSasha Neftin 5645790835fcSMuhammad Husaini Zulkifli /* Once the launch time has been set on the wire, there 5646790835fcSMuhammad Husaini Zulkifli * is a delay before the link speed can be determined 5647790835fcSMuhammad Husaini Zulkifli * based on link-up activity. Write into the register 5648790835fcSMuhammad Husaini Zulkifli * as soon as we know the correct link speed. 5649790835fcSMuhammad Husaini Zulkifli */ 5650790835fcSMuhammad Husaini Zulkifli igc_tsn_adjust_txtime_offset(adapter); 5651790835fcSMuhammad Husaini Zulkifli 5652208983f0SSasha Neftin if (adapter->link_speed != SPEED_1000) 5653208983f0SSasha Neftin goto no_wait; 5654208983f0SSasha Neftin 5655208983f0SSasha Neftin /* wait for Remote receiver status OK */ 5656208983f0SSasha Neftin retry_read_status: 5657208983f0SSasha Neftin if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, 5658208983f0SSasha Neftin &phy_data)) { 5659208983f0SSasha Neftin if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && 5660208983f0SSasha Neftin retry_count) { 5661208983f0SSasha Neftin msleep(100); 5662208983f0SSasha Neftin retry_count--; 5663208983f0SSasha Neftin goto retry_read_status; 5664208983f0SSasha Neftin } else if (!retry_count) { 566525f06effSAndre Guedes netdev_err(netdev, "exceed max 2 second\n"); 5666208983f0SSasha Neftin } 5667208983f0SSasha Neftin } else { 566825f06effSAndre Guedes netdev_err(netdev, "read 1000Base-T Status Reg\n"); 5669208983f0SSasha Neftin } 5670208983f0SSasha Neftin no_wait: 5671208983f0SSasha Neftin netif_carrier_on(netdev); 5672208983f0SSasha Neftin 5673208983f0SSasha Neftin /* link state has changed, schedule phy info update */ 5674208983f0SSasha Neftin if (!test_bit(__IGC_DOWN, &adapter->state)) 5675208983f0SSasha Neftin mod_timer(&adapter->phy_info_timer, 5676208983f0SSasha Neftin round_jiffies(jiffies + 2 * HZ)); 5677208983f0SSasha Neftin } 5678208983f0SSasha Neftin } else { 5679208983f0SSasha Neftin if (netif_carrier_ok(netdev)) { 5680208983f0SSasha Neftin adapter->link_speed = 0; 5681208983f0SSasha Neftin adapter->link_duplex = 0; 5682208983f0SSasha Neftin 5683208983f0SSasha Neftin /* Links status message must follow this format */ 568425f06effSAndre Guedes netdev_info(netdev, "NIC Link is Down\n"); 5685208983f0SSasha Neftin netif_carrier_off(netdev); 5686208983f0SSasha Neftin 5687208983f0SSasha Neftin /* link state has changed, schedule phy info update */ 5688208983f0SSasha Neftin if (!test_bit(__IGC_DOWN, &adapter->state)) 5689208983f0SSasha Neftin mod_timer(&adapter->phy_info_timer, 5690208983f0SSasha Neftin round_jiffies(jiffies + 2 * HZ)); 5691208983f0SSasha Neftin 56928594a7f3SSasha Neftin pm_schedule_suspend(netdev->dev.parent, 56938594a7f3SSasha Neftin MSEC_PER_SEC * 5); 5694208983f0SSasha Neftin } 5695208983f0SSasha Neftin } 5696208983f0SSasha Neftin 5697208983f0SSasha Neftin spin_lock(&adapter->stats64_lock); 5698208983f0SSasha Neftin igc_update_stats(adapter); 5699208983f0SSasha Neftin spin_unlock(&adapter->stats64_lock); 5700208983f0SSasha Neftin 5701208983f0SSasha Neftin for (i = 0; i < adapter->num_tx_queues; i++) { 5702208983f0SSasha Neftin struct igc_ring *tx_ring = adapter->tx_ring[i]; 5703208983f0SSasha Neftin 5704208983f0SSasha Neftin if (!netif_carrier_ok(netdev)) { 5705208983f0SSasha Neftin /* We've lost link, so the controller stops DMA, 5706208983f0SSasha Neftin * but we've got queued Tx work that's never going 5707208983f0SSasha Neftin * to get done, so reset controller to flush Tx. 5708208983f0SSasha Neftin * (Do the reset outside of interrupt context). 5709208983f0SSasha Neftin */ 5710208983f0SSasha Neftin if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { 5711208983f0SSasha Neftin adapter->tx_timeout_count++; 5712208983f0SSasha Neftin schedule_work(&adapter->reset_task); 5713208983f0SSasha Neftin /* return immediately since reset is imminent */ 5714208983f0SSasha Neftin return; 5715208983f0SSasha Neftin } 5716208983f0SSasha Neftin } 5717208983f0SSasha Neftin 5718208983f0SSasha Neftin /* Force detection of hung controller every watchdog period */ 5719208983f0SSasha Neftin set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 5720208983f0SSasha Neftin } 5721208983f0SSasha Neftin 5722208983f0SSasha Neftin /* Cause software interrupt to ensure Rx ring is cleaned */ 5723208983f0SSasha Neftin if (adapter->flags & IGC_FLAG_HAS_MSIX) { 5724208983f0SSasha Neftin u32 eics = 0; 5725208983f0SSasha Neftin 5726208983f0SSasha Neftin for (i = 0; i < adapter->num_q_vectors; i++) 5727208983f0SSasha Neftin eics |= adapter->q_vector[i]->eims_value; 5728208983f0SSasha Neftin wr32(IGC_EICS, eics); 5729208983f0SSasha Neftin } else { 5730208983f0SSasha Neftin wr32(IGC_ICS, IGC_ICS_RXDMT0); 5731208983f0SSasha Neftin } 5732208983f0SSasha Neftin 57332c344ae2SVinicius Costa Gomes igc_ptp_tx_hang(adapter); 57342c344ae2SVinicius Costa Gomes 5735208983f0SSasha Neftin /* Reset the timer */ 5736208983f0SSasha Neftin if (!test_bit(__IGC_DOWN, &adapter->state)) { 5737208983f0SSasha Neftin if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) 5738208983f0SSasha Neftin mod_timer(&adapter->watchdog_timer, 5739208983f0SSasha Neftin round_jiffies(jiffies + HZ)); 5740208983f0SSasha Neftin else 5741208983f0SSasha Neftin mod_timer(&adapter->watchdog_timer, 5742208983f0SSasha Neftin round_jiffies(jiffies + 2 * HZ)); 5743208983f0SSasha Neftin } 57440507ef8aSSasha Neftin } 57450507ef8aSSasha Neftin 57460507ef8aSSasha Neftin /** 574713b5b7fdSSasha Neftin * igc_intr_msi - Interrupt Handler 574813b5b7fdSSasha Neftin * @irq: interrupt number 574913b5b7fdSSasha Neftin * @data: pointer to a network interface device structure 575013b5b7fdSSasha Neftin */ 575113b5b7fdSSasha Neftin static irqreturn_t igc_intr_msi(int irq, void *data) 575213b5b7fdSSasha Neftin { 575313b5b7fdSSasha Neftin struct igc_adapter *adapter = data; 575413b5b7fdSSasha Neftin struct igc_q_vector *q_vector = adapter->q_vector[0]; 575513b5b7fdSSasha Neftin struct igc_hw *hw = &adapter->hw; 575613b5b7fdSSasha Neftin /* read ICR disables interrupts using IAM */ 575713b5b7fdSSasha Neftin u32 icr = rd32(IGC_ICR); 575813b5b7fdSSasha Neftin 575913b5b7fdSSasha Neftin igc_write_itr(q_vector); 576013b5b7fdSSasha Neftin 576113b5b7fdSSasha Neftin if (icr & IGC_ICR_DRSTA) 576213b5b7fdSSasha Neftin schedule_work(&adapter->reset_task); 576313b5b7fdSSasha Neftin 576413b5b7fdSSasha Neftin if (icr & IGC_ICR_DOUTSYNC) { 576513b5b7fdSSasha Neftin /* HW is reporting DMA is out of sync */ 576613b5b7fdSSasha Neftin adapter->stats.doosync++; 576713b5b7fdSSasha Neftin } 576813b5b7fdSSasha Neftin 576913b5b7fdSSasha Neftin if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5770501f2309SJiapeng Zhong hw->mac.get_link_status = true; 577113b5b7fdSSasha Neftin if (!test_bit(__IGC_DOWN, &adapter->state)) 577213b5b7fdSSasha Neftin mod_timer(&adapter->watchdog_timer, jiffies + 1); 577313b5b7fdSSasha Neftin } 577413b5b7fdSSasha Neftin 5775f85846bbSJames McLaughlin if (icr & IGC_ICR_TS) 5776f85846bbSJames McLaughlin igc_tsync_interrupt(adapter); 5777f85846bbSJames McLaughlin 577813b5b7fdSSasha Neftin napi_schedule(&q_vector->napi); 577913b5b7fdSSasha Neftin 578013b5b7fdSSasha Neftin return IRQ_HANDLED; 578113b5b7fdSSasha Neftin } 578213b5b7fdSSasha Neftin 578313b5b7fdSSasha Neftin /** 578413b5b7fdSSasha Neftin * igc_intr - Legacy Interrupt Handler 578513b5b7fdSSasha Neftin * @irq: interrupt number 578613b5b7fdSSasha Neftin * @data: pointer to a network interface device structure 578713b5b7fdSSasha Neftin */ 578813b5b7fdSSasha Neftin static irqreturn_t igc_intr(int irq, void *data) 578913b5b7fdSSasha Neftin { 579013b5b7fdSSasha Neftin struct igc_adapter *adapter = data; 579113b5b7fdSSasha Neftin struct igc_q_vector *q_vector = adapter->q_vector[0]; 579213b5b7fdSSasha Neftin struct igc_hw *hw = &adapter->hw; 579313b5b7fdSSasha Neftin /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 579413b5b7fdSSasha Neftin * need for the IMC write 579513b5b7fdSSasha Neftin */ 579613b5b7fdSSasha Neftin u32 icr = rd32(IGC_ICR); 579713b5b7fdSSasha Neftin 579813b5b7fdSSasha Neftin /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 579913b5b7fdSSasha Neftin * not set, then the adapter didn't send an interrupt 580013b5b7fdSSasha Neftin */ 580113b5b7fdSSasha Neftin if (!(icr & IGC_ICR_INT_ASSERTED)) 580213b5b7fdSSasha Neftin return IRQ_NONE; 580313b5b7fdSSasha Neftin 580413b5b7fdSSasha Neftin igc_write_itr(q_vector); 580513b5b7fdSSasha Neftin 580613b5b7fdSSasha Neftin if (icr & IGC_ICR_DRSTA) 580713b5b7fdSSasha Neftin schedule_work(&adapter->reset_task); 580813b5b7fdSSasha Neftin 580913b5b7fdSSasha Neftin if (icr & IGC_ICR_DOUTSYNC) { 581013b5b7fdSSasha Neftin /* HW is reporting DMA is out of sync */ 581113b5b7fdSSasha Neftin adapter->stats.doosync++; 581213b5b7fdSSasha Neftin } 581313b5b7fdSSasha Neftin 581413b5b7fdSSasha Neftin if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { 5815501f2309SJiapeng Zhong hw->mac.get_link_status = true; 581613b5b7fdSSasha Neftin /* guard against interrupt when we're going down */ 581713b5b7fdSSasha Neftin if (!test_bit(__IGC_DOWN, &adapter->state)) 581813b5b7fdSSasha Neftin mod_timer(&adapter->watchdog_timer, jiffies + 1); 581913b5b7fdSSasha Neftin } 582013b5b7fdSSasha Neftin 5821f85846bbSJames McLaughlin if (icr & IGC_ICR_TS) 5822f85846bbSJames McLaughlin igc_tsync_interrupt(adapter); 5823f85846bbSJames McLaughlin 582413b5b7fdSSasha Neftin napi_schedule(&q_vector->napi); 582513b5b7fdSSasha Neftin 582613b5b7fdSSasha Neftin return IRQ_HANDLED; 582713b5b7fdSSasha Neftin } 582813b5b7fdSSasha Neftin 58293df25e4cSSasha Neftin static void igc_free_irq(struct igc_adapter *adapter) 58303df25e4cSSasha Neftin { 58313df25e4cSSasha Neftin if (adapter->msix_entries) { 58323df25e4cSSasha Neftin int vector = 0, i; 58333df25e4cSSasha Neftin 58343df25e4cSSasha Neftin free_irq(adapter->msix_entries[vector++].vector, adapter); 58353df25e4cSSasha Neftin 58363df25e4cSSasha Neftin for (i = 0; i < adapter->num_q_vectors; i++) 58373df25e4cSSasha Neftin free_irq(adapter->msix_entries[vector++].vector, 58383df25e4cSSasha Neftin adapter->q_vector[i]); 58393df25e4cSSasha Neftin } else { 58403df25e4cSSasha Neftin free_irq(adapter->pdev->irq, adapter); 58413df25e4cSSasha Neftin } 58423df25e4cSSasha Neftin } 58433df25e4cSSasha Neftin 58443df25e4cSSasha Neftin /** 58453df25e4cSSasha Neftin * igc_request_irq - initialize interrupts 58463df25e4cSSasha Neftin * @adapter: Pointer to adapter structure 58473df25e4cSSasha Neftin * 58483df25e4cSSasha Neftin * Attempts to configure interrupts using the best available 58493df25e4cSSasha Neftin * capabilities of the hardware and kernel. 58503df25e4cSSasha Neftin */ 58513df25e4cSSasha Neftin static int igc_request_irq(struct igc_adapter *adapter) 58523df25e4cSSasha Neftin { 585313b5b7fdSSasha Neftin struct net_device *netdev = adapter->netdev; 585413b5b7fdSSasha Neftin struct pci_dev *pdev = adapter->pdev; 58553df25e4cSSasha Neftin int err = 0; 58563df25e4cSSasha Neftin 58573df25e4cSSasha Neftin if (adapter->flags & IGC_FLAG_HAS_MSIX) { 58583df25e4cSSasha Neftin err = igc_request_msix(adapter); 58593df25e4cSSasha Neftin if (!err) 58603df25e4cSSasha Neftin goto request_done; 58613df25e4cSSasha Neftin /* fall back to MSI */ 586213b5b7fdSSasha Neftin igc_free_all_tx_resources(adapter); 586313b5b7fdSSasha Neftin igc_free_all_rx_resources(adapter); 58643df25e4cSSasha Neftin 58653df25e4cSSasha Neftin igc_clear_interrupt_scheme(adapter); 58663df25e4cSSasha Neftin err = igc_init_interrupt_scheme(adapter, false); 58673df25e4cSSasha Neftin if (err) 58683df25e4cSSasha Neftin goto request_done; 586913b5b7fdSSasha Neftin igc_setup_all_tx_resources(adapter); 587013b5b7fdSSasha Neftin igc_setup_all_rx_resources(adapter); 58713df25e4cSSasha Neftin igc_configure(adapter); 58723df25e4cSSasha Neftin } 58733df25e4cSSasha Neftin 587413b5b7fdSSasha Neftin igc_assign_vector(adapter->q_vector[0], 0); 587513b5b7fdSSasha Neftin 587613b5b7fdSSasha Neftin if (adapter->flags & IGC_FLAG_HAS_MSI) { 587713b5b7fdSSasha Neftin err = request_irq(pdev->irq, &igc_intr_msi, 0, 587813b5b7fdSSasha Neftin netdev->name, adapter); 587913b5b7fdSSasha Neftin if (!err) 588013b5b7fdSSasha Neftin goto request_done; 588113b5b7fdSSasha Neftin 588213b5b7fdSSasha Neftin /* fall back to legacy interrupts */ 588313b5b7fdSSasha Neftin igc_reset_interrupt_capability(adapter); 588413b5b7fdSSasha Neftin adapter->flags &= ~IGC_FLAG_HAS_MSI; 588513b5b7fdSSasha Neftin } 588613b5b7fdSSasha Neftin 588713b5b7fdSSasha Neftin err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, 588813b5b7fdSSasha Neftin netdev->name, adapter); 588913b5b7fdSSasha Neftin 589013b5b7fdSSasha Neftin if (err) 589125f06effSAndre Guedes netdev_err(netdev, "Error %d getting interrupt\n", err); 589213b5b7fdSSasha Neftin 58933df25e4cSSasha Neftin request_done: 58943df25e4cSSasha Neftin return err; 58953df25e4cSSasha Neftin } 58963df25e4cSSasha Neftin 58973df25e4cSSasha Neftin /** 589886efeccdSSasha Neftin * __igc_open - Called when a network interface is made active 5899c9a11c23SSasha Neftin * @netdev: network interface device structure 590086efeccdSSasha Neftin * @resuming: boolean indicating if the device is resuming 5901c9a11c23SSasha Neftin * 5902c9a11c23SSasha Neftin * Returns 0 on success, negative value on failure 5903c9a11c23SSasha Neftin * 5904c9a11c23SSasha Neftin * The open entry point is called when a network interface is made 5905c9a11c23SSasha Neftin * active by the system (IFF_UP). At this point all resources needed 5906c9a11c23SSasha Neftin * for transmit and receive operations are allocated, the interrupt 5907c9a11c23SSasha Neftin * handler is registered with the OS, the watchdog timer is started, 5908c9a11c23SSasha Neftin * and the stack is notified that the interface is ready. 5909c9a11c23SSasha Neftin */ 5910c9a11c23SSasha Neftin static int __igc_open(struct net_device *netdev, bool resuming) 5911c9a11c23SSasha Neftin { 5912c9a11c23SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 59138594a7f3SSasha Neftin struct pci_dev *pdev = adapter->pdev; 5914c9a11c23SSasha Neftin struct igc_hw *hw = &adapter->hw; 59153df25e4cSSasha Neftin int err = 0; 5916c9a11c23SSasha Neftin int i = 0; 5917c9a11c23SSasha Neftin 5918c9a11c23SSasha Neftin /* disallow open during test */ 5919c9a11c23SSasha Neftin 5920c9a11c23SSasha Neftin if (test_bit(__IGC_TESTING, &adapter->state)) { 5921c9a11c23SSasha Neftin WARN_ON(resuming); 5922c9a11c23SSasha Neftin return -EBUSY; 5923c9a11c23SSasha Neftin } 5924c9a11c23SSasha Neftin 59258594a7f3SSasha Neftin if (!resuming) 59268594a7f3SSasha Neftin pm_runtime_get_sync(&pdev->dev); 59278594a7f3SSasha Neftin 5928c9a11c23SSasha Neftin netif_carrier_off(netdev); 5929c9a11c23SSasha Neftin 593013b5b7fdSSasha Neftin /* allocate transmit descriptors */ 593113b5b7fdSSasha Neftin err = igc_setup_all_tx_resources(adapter); 593213b5b7fdSSasha Neftin if (err) 593313b5b7fdSSasha Neftin goto err_setup_tx; 593413b5b7fdSSasha Neftin 593513b5b7fdSSasha Neftin /* allocate receive descriptors */ 593613b5b7fdSSasha Neftin err = igc_setup_all_rx_resources(adapter); 593713b5b7fdSSasha Neftin if (err) 593813b5b7fdSSasha Neftin goto err_setup_rx; 593913b5b7fdSSasha Neftin 5940c9a11c23SSasha Neftin igc_power_up_link(adapter); 5941c9a11c23SSasha Neftin 5942c9a11c23SSasha Neftin igc_configure(adapter); 5943c9a11c23SSasha Neftin 59443df25e4cSSasha Neftin err = igc_request_irq(adapter); 59453df25e4cSSasha Neftin if (err) 59463df25e4cSSasha Neftin goto err_req_irq; 59473df25e4cSSasha Neftin 59483df25e4cSSasha Neftin /* Notify the stack of the actual queue counts. */ 594914b21cecSColin Ian King err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 59503df25e4cSSasha Neftin if (err) 59513df25e4cSSasha Neftin goto err_set_queues; 59523df25e4cSSasha Neftin 59533df25e4cSSasha Neftin err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 59543df25e4cSSasha Neftin if (err) 59553df25e4cSSasha Neftin goto err_set_queues; 59563df25e4cSSasha Neftin 5957c9a11c23SSasha Neftin clear_bit(__IGC_DOWN, &adapter->state); 5958c9a11c23SSasha Neftin 5959c9a11c23SSasha Neftin for (i = 0; i < adapter->num_q_vectors; i++) 5960c9a11c23SSasha Neftin napi_enable(&adapter->q_vector[i]->napi); 5961c9a11c23SSasha Neftin 59623df25e4cSSasha Neftin /* Clear any pending interrupts. */ 59633df25e4cSSasha Neftin rd32(IGC_ICR); 59643df25e4cSSasha Neftin igc_irq_enable(adapter); 59653df25e4cSSasha Neftin 59668594a7f3SSasha Neftin if (!resuming) 59678594a7f3SSasha Neftin pm_runtime_put(&pdev->dev); 59688594a7f3SSasha Neftin 596913b5b7fdSSasha Neftin netif_tx_start_all_queues(netdev); 597013b5b7fdSSasha Neftin 5971c9a11c23SSasha Neftin /* start the watchdog. */ 5972501f2309SJiapeng Zhong hw->mac.get_link_status = true; 5973208983f0SSasha Neftin schedule_work(&adapter->watchdog_task); 5974c9a11c23SSasha Neftin 5975c9a11c23SSasha Neftin return IGC_SUCCESS; 59763df25e4cSSasha Neftin 59773df25e4cSSasha Neftin err_set_queues: 59783df25e4cSSasha Neftin igc_free_irq(adapter); 59793df25e4cSSasha Neftin err_req_irq: 59803df25e4cSSasha Neftin igc_release_hw_control(adapter); 5981a0beb3c1SSasha Neftin igc_power_down_phy_copper_base(&adapter->hw); 598213b5b7fdSSasha Neftin igc_free_all_rx_resources(adapter); 598313b5b7fdSSasha Neftin err_setup_rx: 598413b5b7fdSSasha Neftin igc_free_all_tx_resources(adapter); 598513b5b7fdSSasha Neftin err_setup_tx: 598613b5b7fdSSasha Neftin igc_reset(adapter); 59878594a7f3SSasha Neftin if (!resuming) 59888594a7f3SSasha Neftin pm_runtime_put(&pdev->dev); 59893df25e4cSSasha Neftin 59903df25e4cSSasha Neftin return err; 5991c9a11c23SSasha Neftin } 5992c9a11c23SSasha Neftin 5993f026d8caSVitaly Lifshits int igc_open(struct net_device *netdev) 5994c9a11c23SSasha Neftin { 5995c9a11c23SSasha Neftin return __igc_open(netdev, false); 5996c9a11c23SSasha Neftin } 5997c9a11c23SSasha Neftin 5998c9a11c23SSasha Neftin /** 599986efeccdSSasha Neftin * __igc_close - Disables a network interface 6000c9a11c23SSasha Neftin * @netdev: network interface device structure 600186efeccdSSasha Neftin * @suspending: boolean indicating the device is suspending 6002c9a11c23SSasha Neftin * 6003c9a11c23SSasha Neftin * Returns 0, this is not allowed to fail 6004c9a11c23SSasha Neftin * 6005c9a11c23SSasha Neftin * The close entry point is called when an interface is de-activated 6006c9a11c23SSasha Neftin * by the OS. The hardware is still under the driver's control, but 6007c9a11c23SSasha Neftin * needs to be disabled. A global MAC reset is issued to stop the 6008c9a11c23SSasha Neftin * hardware, and all transmit and receive resources are freed. 6009c9a11c23SSasha Neftin */ 6010c9a11c23SSasha Neftin static int __igc_close(struct net_device *netdev, bool suspending) 6011c9a11c23SSasha Neftin { 6012c9a11c23SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 60138594a7f3SSasha Neftin struct pci_dev *pdev = adapter->pdev; 6014c9a11c23SSasha Neftin 6015c9a11c23SSasha Neftin WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); 6016c9a11c23SSasha Neftin 60178594a7f3SSasha Neftin if (!suspending) 60188594a7f3SSasha Neftin pm_runtime_get_sync(&pdev->dev); 60198594a7f3SSasha Neftin 6020c9a11c23SSasha Neftin igc_down(adapter); 6021c9a11c23SSasha Neftin 6022c9a11c23SSasha Neftin igc_release_hw_control(adapter); 6023c9a11c23SSasha Neftin 60243df25e4cSSasha Neftin igc_free_irq(adapter); 60253df25e4cSSasha Neftin 602613b5b7fdSSasha Neftin igc_free_all_tx_resources(adapter); 602713b5b7fdSSasha Neftin igc_free_all_rx_resources(adapter); 602813b5b7fdSSasha Neftin 60298594a7f3SSasha Neftin if (!suspending) 60308594a7f3SSasha Neftin pm_runtime_put_sync(&pdev->dev); 60318594a7f3SSasha Neftin 6032c9a11c23SSasha Neftin return 0; 6033c9a11c23SSasha Neftin } 6034c9a11c23SSasha Neftin 6035f026d8caSVitaly Lifshits int igc_close(struct net_device *netdev) 6036c9a11c23SSasha Neftin { 6037c9a11c23SSasha Neftin if (netif_device_present(netdev) || netdev->dismantle) 6038c9a11c23SSasha Neftin return __igc_close(netdev, false); 6039c9a11c23SSasha Neftin return 0; 6040c9a11c23SSasha Neftin } 6041c9a11c23SSasha Neftin 60425f295805SVinicius Costa Gomes /** 60435f295805SVinicius Costa Gomes * igc_ioctl - Access the hwtstamp interface 60445f295805SVinicius Costa Gomes * @netdev: network interface device structure 6045b50f7bcaSJesse Brandeburg * @ifr: interface request data 60465f295805SVinicius Costa Gomes * @cmd: ioctl command 60475f295805SVinicius Costa Gomes **/ 60485f295805SVinicius Costa Gomes static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 60495f295805SVinicius Costa Gomes { 60505f295805SVinicius Costa Gomes switch (cmd) { 60515f295805SVinicius Costa Gomes case SIOCGHWTSTAMP: 60525f295805SVinicius Costa Gomes return igc_ptp_get_ts_config(netdev, ifr); 60535f295805SVinicius Costa Gomes case SIOCSHWTSTAMP: 60545f295805SVinicius Costa Gomes return igc_ptp_set_ts_config(netdev, ifr); 60555f295805SVinicius Costa Gomes default: 60565f295805SVinicius Costa Gomes return -EOPNOTSUPP; 60575f295805SVinicius Costa Gomes } 60585f295805SVinicius Costa Gomes } 60595f295805SVinicius Costa Gomes 606082faa9b7SVinicius Costa Gomes static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, 606182faa9b7SVinicius Costa Gomes bool enable) 606282faa9b7SVinicius Costa Gomes { 606382faa9b7SVinicius Costa Gomes struct igc_ring *ring; 606482faa9b7SVinicius Costa Gomes 606582faa9b7SVinicius Costa Gomes if (queue < 0 || queue >= adapter->num_tx_queues) 606682faa9b7SVinicius Costa Gomes return -EINVAL; 606782faa9b7SVinicius Costa Gomes 606882faa9b7SVinicius Costa Gomes ring = adapter->tx_ring[queue]; 606982faa9b7SVinicius Costa Gomes ring->launchtime_enable = enable; 607082faa9b7SVinicius Costa Gomes 607182faa9b7SVinicius Costa Gomes return 0; 607282faa9b7SVinicius Costa Gomes } 607382faa9b7SVinicius Costa Gomes 607458c4ee0eSVinicius Costa Gomes static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) 607558c4ee0eSVinicius Costa Gomes { 607658c4ee0eSVinicius Costa Gomes struct timespec64 b; 607758c4ee0eSVinicius Costa Gomes 607858c4ee0eSVinicius Costa Gomes b = ktime_to_timespec64(base_time); 607958c4ee0eSVinicius Costa Gomes 608058c4ee0eSVinicius Costa Gomes return timespec64_compare(now, &b) > 0; 608158c4ee0eSVinicius Costa Gomes } 608258c4ee0eSVinicius Costa Gomes 608358c4ee0eSVinicius Costa Gomes static bool validate_schedule(struct igc_adapter *adapter, 608458c4ee0eSVinicius Costa Gomes const struct tc_taprio_qopt_offload *qopt) 6085ec50a9d4SVinicius Costa Gomes { 6086ec50a9d4SVinicius Costa Gomes int queue_uses[IGC_MAX_TX_QUEUES] = { }; 6087b8897dc5SMuhammad Husaini Zulkifli struct igc_hw *hw = &adapter->hw; 608858c4ee0eSVinicius Costa Gomes struct timespec64 now; 6089ec50a9d4SVinicius Costa Gomes size_t n; 6090ec50a9d4SVinicius Costa Gomes 6091ec50a9d4SVinicius Costa Gomes if (qopt->cycle_time_extension) 6092ec50a9d4SVinicius Costa Gomes return false; 6093ec50a9d4SVinicius Costa Gomes 609458c4ee0eSVinicius Costa Gomes igc_ptp_read(adapter, &now); 609558c4ee0eSVinicius Costa Gomes 609658c4ee0eSVinicius Costa Gomes /* If we program the controller's BASET registers with a time 609758c4ee0eSVinicius Costa Gomes * in the future, it will hold all the packets until that 609858c4ee0eSVinicius Costa Gomes * time, causing a lot of TX Hangs, so to avoid that, we 609958c4ee0eSVinicius Costa Gomes * reject schedules that would start in the future. 6100b8897dc5SMuhammad Husaini Zulkifli * Note: Limitation above is no longer in i226. 610158c4ee0eSVinicius Costa Gomes */ 6102b8897dc5SMuhammad Husaini Zulkifli if (!is_base_time_past(qopt->base_time, &now) && 6103b8897dc5SMuhammad Husaini Zulkifli igc_is_device_id_i225(hw)) 610458c4ee0eSVinicius Costa Gomes return false; 610558c4ee0eSVinicius Costa Gomes 6106ec50a9d4SVinicius Costa Gomes for (n = 0; n < qopt->num_entries; n++) { 6107a5fd3946SKurt Kanzenbach const struct tc_taprio_sched_entry *e, *prev; 6108ec50a9d4SVinicius Costa Gomes int i; 6109ec50a9d4SVinicius Costa Gomes 6110a5fd3946SKurt Kanzenbach prev = n ? &qopt->entries[n - 1] : NULL; 6111ec50a9d4SVinicius Costa Gomes e = &qopt->entries[n]; 6112ec50a9d4SVinicius Costa Gomes 6113ec50a9d4SVinicius Costa Gomes /* i225 only supports "global" frame preemption 6114ec50a9d4SVinicius Costa Gomes * settings. 6115ec50a9d4SVinicius Costa Gomes */ 6116ec50a9d4SVinicius Costa Gomes if (e->command != TC_TAPRIO_CMD_SET_GATES) 6117ec50a9d4SVinicius Costa Gomes return false; 6118ec50a9d4SVinicius Costa Gomes 61192b4cc3d3SAKASHI Takahiro for (i = 0; i < adapter->num_tx_queues; i++) 61202b4cc3d3SAKASHI Takahiro if (e->gate_mask & BIT(i)) { 6121ec50a9d4SVinicius Costa Gomes queue_uses[i]++; 6122ec50a9d4SVinicius Costa Gomes 61232b4cc3d3SAKASHI Takahiro /* There are limitations: A single queue cannot 61242b4cc3d3SAKASHI Takahiro * be opened and closed multiple times per cycle 61252b4cc3d3SAKASHI Takahiro * unless the gate stays open. Check for it. 6126a5fd3946SKurt Kanzenbach */ 6127a5fd3946SKurt Kanzenbach if (queue_uses[i] > 1 && 6128a5fd3946SKurt Kanzenbach !(prev->gate_mask & BIT(i))) 6129ec50a9d4SVinicius Costa Gomes return false; 6130ec50a9d4SVinicius Costa Gomes } 6131ec50a9d4SVinicius Costa Gomes } 6132ec50a9d4SVinicius Costa Gomes 6133ec50a9d4SVinicius Costa Gomes return true; 6134ec50a9d4SVinicius Costa Gomes } 6135ec50a9d4SVinicius Costa Gomes 613682faa9b7SVinicius Costa Gomes static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, 613782faa9b7SVinicius Costa Gomes struct tc_etf_qopt_offload *qopt) 613882faa9b7SVinicius Costa Gomes { 613982faa9b7SVinicius Costa Gomes struct igc_hw *hw = &adapter->hw; 614082faa9b7SVinicius Costa Gomes int err; 614182faa9b7SVinicius Costa Gomes 614282faa9b7SVinicius Costa Gomes if (hw->mac.type != igc_i225) 614382faa9b7SVinicius Costa Gomes return -EOPNOTSUPP; 614482faa9b7SVinicius Costa Gomes 614582faa9b7SVinicius Costa Gomes err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); 614682faa9b7SVinicius Costa Gomes if (err) 614782faa9b7SVinicius Costa Gomes return err; 614882faa9b7SVinicius Costa Gomes 61491d1b4c63SMuhammad Husaini Zulkifli return igc_tsn_offload_apply(adapter); 615082faa9b7SVinicius Costa Gomes } 615182faa9b7SVinicius Costa Gomes 615206b41258SMuhammad Husaini Zulkifli static int igc_qbv_clear_schedule(struct igc_adapter *adapter) 6153c814a2d2SVinicius Costa Gomes { 615406b41258SMuhammad Husaini Zulkifli unsigned long flags; 6155c814a2d2SVinicius Costa Gomes int i; 6156c814a2d2SVinicius Costa Gomes 6157c814a2d2SVinicius Costa Gomes adapter->base_time = 0; 6158c814a2d2SVinicius Costa Gomes adapter->cycle_time = NSEC_PER_SEC; 615982ff5f29SFlorian Kauer adapter->taprio_offload_enable = false; 6160ae4fe469SMuhammad Husaini Zulkifli adapter->qbv_config_change_errors = 0; 6161175c2412SMuhammad Husaini Zulkifli adapter->qbv_count = 0; 6162c814a2d2SVinicius Costa Gomes 6163c814a2d2SVinicius Costa Gomes for (i = 0; i < adapter->num_tx_queues; i++) { 6164c814a2d2SVinicius Costa Gomes struct igc_ring *ring = adapter->tx_ring[i]; 6165c814a2d2SVinicius Costa Gomes 6166c814a2d2SVinicius Costa Gomes ring->start_time = 0; 6167c814a2d2SVinicius Costa Gomes ring->end_time = NSEC_PER_SEC; 616892a0dcb8STan Tee Min ring->max_sdu = 0; 616906b41258SMuhammad Husaini Zulkifli } 617006b41258SMuhammad Husaini Zulkifli 617106b41258SMuhammad Husaini Zulkifli spin_lock_irqsave(&adapter->qbv_tx_lock, flags); 617206b41258SMuhammad Husaini Zulkifli 617306b41258SMuhammad Husaini Zulkifli adapter->qbv_transition = false; 617406b41258SMuhammad Husaini Zulkifli 617506b41258SMuhammad Husaini Zulkifli for (i = 0; i < adapter->num_tx_queues; i++) { 617606b41258SMuhammad Husaini Zulkifli struct igc_ring *ring = adapter->tx_ring[i]; 617706b41258SMuhammad Husaini Zulkifli 6178175c2412SMuhammad Husaini Zulkifli ring->oper_gate_closed = false; 6179175c2412SMuhammad Husaini Zulkifli ring->admin_gate_closed = false; 6180c814a2d2SVinicius Costa Gomes } 6181c814a2d2SVinicius Costa Gomes 618206b41258SMuhammad Husaini Zulkifli spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); 618306b41258SMuhammad Husaini Zulkifli 618406b41258SMuhammad Husaini Zulkifli return 0; 618506b41258SMuhammad Husaini Zulkifli } 618606b41258SMuhammad Husaini Zulkifli 618706b41258SMuhammad Husaini Zulkifli static int igc_tsn_clear_schedule(struct igc_adapter *adapter) 618806b41258SMuhammad Husaini Zulkifli { 618906b41258SMuhammad Husaini Zulkifli igc_qbv_clear_schedule(adapter); 619006b41258SMuhammad Husaini Zulkifli 6191c814a2d2SVinicius Costa Gomes return 0; 6192c814a2d2SVinicius Costa Gomes } 6193c814a2d2SVinicius Costa Gomes 6194d3750076SMuhammad Husaini Zulkifli static void igc_taprio_stats(struct net_device *dev, 6195d3750076SMuhammad Husaini Zulkifli struct tc_taprio_qopt_stats *stats) 6196d3750076SMuhammad Husaini Zulkifli { 6197d3750076SMuhammad Husaini Zulkifli /* When Strict_End is enabled, the tx_overruns counter 6198d3750076SMuhammad Husaini Zulkifli * will always be zero. 6199d3750076SMuhammad Husaini Zulkifli */ 6200d3750076SMuhammad Husaini Zulkifli stats->tx_overruns = 0; 6201d3750076SMuhammad Husaini Zulkifli } 6202d3750076SMuhammad Husaini Zulkifli 6203d3750076SMuhammad Husaini Zulkifli static void igc_taprio_queue_stats(struct net_device *dev, 6204d3750076SMuhammad Husaini Zulkifli struct tc_taprio_qopt_queue_stats *queue_stats) 6205d3750076SMuhammad Husaini Zulkifli { 6206d3750076SMuhammad Husaini Zulkifli struct tc_taprio_qopt_stats *stats = &queue_stats->stats; 6207d3750076SMuhammad Husaini Zulkifli 6208d3750076SMuhammad Husaini Zulkifli /* When Strict_End is enabled, the tx_overruns counter 6209d3750076SMuhammad Husaini Zulkifli * will always be zero. 6210d3750076SMuhammad Husaini Zulkifli */ 6211d3750076SMuhammad Husaini Zulkifli stats->tx_overruns = 0; 6212d3750076SMuhammad Husaini Zulkifli } 6213d3750076SMuhammad Husaini Zulkifli 6214ec50a9d4SVinicius Costa Gomes static int igc_save_qbv_schedule(struct igc_adapter *adapter, 6215ec50a9d4SVinicius Costa Gomes struct tc_taprio_qopt_offload *qopt) 6216ec50a9d4SVinicius Costa Gomes { 6217a5fd3946SKurt Kanzenbach bool queue_configured[IGC_MAX_TX_QUEUES] = { }; 62185ac1231aSTan Tee Min struct igc_hw *hw = &adapter->hw; 6219ec50a9d4SVinicius Costa Gomes u32 start_time = 0, end_time = 0; 6220175c2412SMuhammad Husaini Zulkifli struct timespec64 now; 622106b41258SMuhammad Husaini Zulkifli unsigned long flags; 6222ec50a9d4SVinicius Costa Gomes size_t n; 622372abeeddSTan Tee Min int i; 6224ec50a9d4SVinicius Costa Gomes 6225d3750076SMuhammad Husaini Zulkifli switch (qopt->cmd) { 6226d3750076SMuhammad Husaini Zulkifli case TAPRIO_CMD_REPLACE: 6227d3750076SMuhammad Husaini Zulkifli break; 6228d3750076SMuhammad Husaini Zulkifli case TAPRIO_CMD_DESTROY: 6229c814a2d2SVinicius Costa Gomes return igc_tsn_clear_schedule(adapter); 6230d3750076SMuhammad Husaini Zulkifli case TAPRIO_CMD_STATS: 6231d3750076SMuhammad Husaini Zulkifli igc_taprio_stats(adapter->netdev, &qopt->stats); 6232d3750076SMuhammad Husaini Zulkifli return 0; 6233d3750076SMuhammad Husaini Zulkifli case TAPRIO_CMD_QUEUE_STATS: 6234d3750076SMuhammad Husaini Zulkifli igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); 6235d3750076SMuhammad Husaini Zulkifli return 0; 6236d3750076SMuhammad Husaini Zulkifli default: 623782ff5f29SFlorian Kauer return -EOPNOTSUPP; 6238d3750076SMuhammad Husaini Zulkifli } 623982ff5f29SFlorian Kauer 62403b61764fSMuhammad Husaini Zulkifli if (qopt->base_time < 0) 62413b61764fSMuhammad Husaini Zulkifli return -ERANGE; 62423b61764fSMuhammad Husaini Zulkifli 6243e5d88c53SFlorian Kauer if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) 6244ec50a9d4SVinicius Costa Gomes return -EALREADY; 6245ec50a9d4SVinicius Costa Gomes 624658c4ee0eSVinicius Costa Gomes if (!validate_schedule(adapter, qopt)) 6247ec50a9d4SVinicius Costa Gomes return -EINVAL; 6248ec50a9d4SVinicius Costa Gomes 6249ec50a9d4SVinicius Costa Gomes adapter->cycle_time = qopt->cycle_time; 6250ec50a9d4SVinicius Costa Gomes adapter->base_time = qopt->base_time; 625182ff5f29SFlorian Kauer adapter->taprio_offload_enable = true; 6252ec50a9d4SVinicius Costa Gomes 6253175c2412SMuhammad Husaini Zulkifli igc_ptp_read(adapter, &now); 6254175c2412SMuhammad Husaini Zulkifli 6255ec50a9d4SVinicius Costa Gomes for (n = 0; n < qopt->num_entries; n++) { 6256ec50a9d4SVinicius Costa Gomes struct tc_taprio_sched_entry *e = &qopt->entries[n]; 6257ec50a9d4SVinicius Costa Gomes 6258ec50a9d4SVinicius Costa Gomes end_time += e->interval; 6259ec50a9d4SVinicius Costa Gomes 62606d05251dSTan Tee Min /* If any of the conditions below are true, we need to manually 62616d05251dSTan Tee Min * control the end time of the cycle. 62626d05251dSTan Tee Min * 1. Qbv users can specify a cycle time that is not equal 62636d05251dSTan Tee Min * to the total GCL intervals. Hence, recalculation is 62646d05251dSTan Tee Min * necessary here to exclude the time interval that 62656d05251dSTan Tee Min * exceeds the cycle time. 62666d05251dSTan Tee Min * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, 62676d05251dSTan Tee Min * once the end of the list is reached, it will switch 62686d05251dSTan Tee Min * to the END_OF_CYCLE state and leave the gates in the 62696d05251dSTan Tee Min * same state until the next cycle is started. 62706d05251dSTan Tee Min */ 62716d05251dSTan Tee Min if (end_time > adapter->cycle_time || 62726d05251dSTan Tee Min n + 1 == qopt->num_entries) 62736d05251dSTan Tee Min end_time = adapter->cycle_time; 62746d05251dSTan Tee Min 6275691bd4d7SToshiki Nishioka for (i = 0; i < adapter->num_tx_queues; i++) { 6276ec50a9d4SVinicius Costa Gomes struct igc_ring *ring = adapter->tx_ring[i]; 6277ec50a9d4SVinicius Costa Gomes 6278ec50a9d4SVinicius Costa Gomes if (!(e->gate_mask & BIT(i))) 6279ec50a9d4SVinicius Costa Gomes continue; 6280ec50a9d4SVinicius Costa Gomes 6281a5fd3946SKurt Kanzenbach /* Check whether a queue stays open for more than one 6282a5fd3946SKurt Kanzenbach * entry. If so, keep the start and advance the end 6283a5fd3946SKurt Kanzenbach * time. 6284a5fd3946SKurt Kanzenbach */ 6285a5fd3946SKurt Kanzenbach if (!queue_configured[i]) 6286ec50a9d4SVinicius Costa Gomes ring->start_time = start_time; 6287ec50a9d4SVinicius Costa Gomes ring->end_time = end_time; 6288a5fd3946SKurt Kanzenbach 6289175c2412SMuhammad Husaini Zulkifli if (ring->start_time >= adapter->cycle_time) 6290175c2412SMuhammad Husaini Zulkifli queue_configured[i] = false; 6291175c2412SMuhammad Husaini Zulkifli else 6292a5fd3946SKurt Kanzenbach queue_configured[i] = true; 6293ec50a9d4SVinicius Costa Gomes } 6294ec50a9d4SVinicius Costa Gomes 6295ec50a9d4SVinicius Costa Gomes start_time += e->interval; 6296ec50a9d4SVinicius Costa Gomes } 6297ec50a9d4SVinicius Costa Gomes 629806b41258SMuhammad Husaini Zulkifli spin_lock_irqsave(&adapter->qbv_tx_lock, flags); 629906b41258SMuhammad Husaini Zulkifli 630072abeeddSTan Tee Min /* Check whether a queue gets configured. 630172abeeddSTan Tee Min * If not, set the start and end time to be end time. 630272abeeddSTan Tee Min */ 630372abeeddSTan Tee Min for (i = 0; i < adapter->num_tx_queues; i++) { 630472abeeddSTan Tee Min struct igc_ring *ring = adapter->tx_ring[i]; 630572abeeddSTan Tee Min 6306175c2412SMuhammad Husaini Zulkifli if (!is_base_time_past(qopt->base_time, &now)) { 6307175c2412SMuhammad Husaini Zulkifli ring->admin_gate_closed = false; 6308175c2412SMuhammad Husaini Zulkifli } else { 6309175c2412SMuhammad Husaini Zulkifli ring->oper_gate_closed = false; 6310175c2412SMuhammad Husaini Zulkifli ring->admin_gate_closed = false; 6311175c2412SMuhammad Husaini Zulkifli } 6312175c2412SMuhammad Husaini Zulkifli 6313175c2412SMuhammad Husaini Zulkifli if (!queue_configured[i]) { 6314175c2412SMuhammad Husaini Zulkifli if (!is_base_time_past(qopt->base_time, &now)) 6315175c2412SMuhammad Husaini Zulkifli ring->admin_gate_closed = true; 6316175c2412SMuhammad Husaini Zulkifli else 6317175c2412SMuhammad Husaini Zulkifli ring->oper_gate_closed = true; 6318175c2412SMuhammad Husaini Zulkifli 631972abeeddSTan Tee Min ring->start_time = end_time; 632072abeeddSTan Tee Min ring->end_time = end_time; 632172abeeddSTan Tee Min } 632272abeeddSTan Tee Min } 632372abeeddSTan Tee Min 632406b41258SMuhammad Husaini Zulkifli spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); 632506b41258SMuhammad Husaini Zulkifli 632692a0dcb8STan Tee Min for (i = 0; i < adapter->num_tx_queues; i++) { 632792a0dcb8STan Tee Min struct igc_ring *ring = adapter->tx_ring[i]; 632892a0dcb8STan Tee Min struct net_device *dev = adapter->netdev; 632992a0dcb8STan Tee Min 633092a0dcb8STan Tee Min if (qopt->max_sdu[i]) 633125102893STan Tee Min ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; 633292a0dcb8STan Tee Min else 633392a0dcb8STan Tee Min ring->max_sdu = 0; 633492a0dcb8STan Tee Min } 633592a0dcb8STan Tee Min 6336ec50a9d4SVinicius Costa Gomes return 0; 6337ec50a9d4SVinicius Costa Gomes } 6338ec50a9d4SVinicius Costa Gomes 6339ec50a9d4SVinicius Costa Gomes static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, 6340ec50a9d4SVinicius Costa Gomes struct tc_taprio_qopt_offload *qopt) 6341ec50a9d4SVinicius Costa Gomes { 6342ec50a9d4SVinicius Costa Gomes struct igc_hw *hw = &adapter->hw; 6343ec50a9d4SVinicius Costa Gomes int err; 6344ec50a9d4SVinicius Costa Gomes 6345ec50a9d4SVinicius Costa Gomes if (hw->mac.type != igc_i225) 6346ec50a9d4SVinicius Costa Gomes return -EOPNOTSUPP; 6347ec50a9d4SVinicius Costa Gomes 6348ec50a9d4SVinicius Costa Gomes err = igc_save_qbv_schedule(adapter, qopt); 6349ec50a9d4SVinicius Costa Gomes if (err) 6350ec50a9d4SVinicius Costa Gomes return err; 6351ec50a9d4SVinicius Costa Gomes 63521d1b4c63SMuhammad Husaini Zulkifli return igc_tsn_offload_apply(adapter); 6353ec50a9d4SVinicius Costa Gomes } 6354ec50a9d4SVinicius Costa Gomes 63551ab011b0SAravindhan Gunasekaran static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, 63561ab011b0SAravindhan Gunasekaran bool enable, int idleslope, int sendslope, 63571ab011b0SAravindhan Gunasekaran int hicredit, int locredit) 63581ab011b0SAravindhan Gunasekaran { 63591ab011b0SAravindhan Gunasekaran bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; 63601ab011b0SAravindhan Gunasekaran struct net_device *netdev = adapter->netdev; 63611ab011b0SAravindhan Gunasekaran struct igc_ring *ring; 63621ab011b0SAravindhan Gunasekaran int i; 63631ab011b0SAravindhan Gunasekaran 63641ab011b0SAravindhan Gunasekaran /* i225 has two sets of credit-based shaper logic. 63651ab011b0SAravindhan Gunasekaran * Supporting it only on the top two priority queues 63661ab011b0SAravindhan Gunasekaran */ 63671ab011b0SAravindhan Gunasekaran if (queue < 0 || queue > 1) 63681ab011b0SAravindhan Gunasekaran return -EINVAL; 63691ab011b0SAravindhan Gunasekaran 63701ab011b0SAravindhan Gunasekaran ring = adapter->tx_ring[queue]; 63711ab011b0SAravindhan Gunasekaran 63721ab011b0SAravindhan Gunasekaran for (i = 0; i < IGC_MAX_SR_QUEUES; i++) 63731ab011b0SAravindhan Gunasekaran if (adapter->tx_ring[i]) 63741ab011b0SAravindhan Gunasekaran cbs_status[i] = adapter->tx_ring[i]->cbs_enable; 63751ab011b0SAravindhan Gunasekaran 63761ab011b0SAravindhan Gunasekaran /* CBS should be enabled on the highest priority queue first in order 63771ab011b0SAravindhan Gunasekaran * for the CBS algorithm to operate as intended. 63781ab011b0SAravindhan Gunasekaran */ 63791ab011b0SAravindhan Gunasekaran if (enable) { 63801ab011b0SAravindhan Gunasekaran if (queue == 1 && !cbs_status[0]) { 63811ab011b0SAravindhan Gunasekaran netdev_err(netdev, 63821ab011b0SAravindhan Gunasekaran "Enabling CBS on queue1 before queue0\n"); 63831ab011b0SAravindhan Gunasekaran return -EINVAL; 63841ab011b0SAravindhan Gunasekaran } 63851ab011b0SAravindhan Gunasekaran } else { 63861ab011b0SAravindhan Gunasekaran if (queue == 0 && cbs_status[1]) { 63871ab011b0SAravindhan Gunasekaran netdev_err(netdev, 63881ab011b0SAravindhan Gunasekaran "Disabling CBS on queue0 before queue1\n"); 63891ab011b0SAravindhan Gunasekaran return -EINVAL; 63901ab011b0SAravindhan Gunasekaran } 63911ab011b0SAravindhan Gunasekaran } 63921ab011b0SAravindhan Gunasekaran 63931ab011b0SAravindhan Gunasekaran ring->cbs_enable = enable; 63941ab011b0SAravindhan Gunasekaran ring->idleslope = idleslope; 63951ab011b0SAravindhan Gunasekaran ring->sendslope = sendslope; 63961ab011b0SAravindhan Gunasekaran ring->hicredit = hicredit; 63971ab011b0SAravindhan Gunasekaran ring->locredit = locredit; 63981ab011b0SAravindhan Gunasekaran 63991ab011b0SAravindhan Gunasekaran return 0; 64001ab011b0SAravindhan Gunasekaran } 64011ab011b0SAravindhan Gunasekaran 64021ab011b0SAravindhan Gunasekaran static int igc_tsn_enable_cbs(struct igc_adapter *adapter, 64031ab011b0SAravindhan Gunasekaran struct tc_cbs_qopt_offload *qopt) 64041ab011b0SAravindhan Gunasekaran { 64051ab011b0SAravindhan Gunasekaran struct igc_hw *hw = &adapter->hw; 64061ab011b0SAravindhan Gunasekaran int err; 64071ab011b0SAravindhan Gunasekaran 64081ab011b0SAravindhan Gunasekaran if (hw->mac.type != igc_i225) 64091ab011b0SAravindhan Gunasekaran return -EOPNOTSUPP; 64101ab011b0SAravindhan Gunasekaran 64111ab011b0SAravindhan Gunasekaran if (qopt->queue < 0 || qopt->queue > 1) 64121ab011b0SAravindhan Gunasekaran return -EINVAL; 64131ab011b0SAravindhan Gunasekaran 64141ab011b0SAravindhan Gunasekaran err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, 64151ab011b0SAravindhan Gunasekaran qopt->idleslope, qopt->sendslope, 64161ab011b0SAravindhan Gunasekaran qopt->hicredit, qopt->locredit); 64171ab011b0SAravindhan Gunasekaran if (err) 64181ab011b0SAravindhan Gunasekaran return err; 64191ab011b0SAravindhan Gunasekaran 64201d1b4c63SMuhammad Husaini Zulkifli return igc_tsn_offload_apply(adapter); 64211ab011b0SAravindhan Gunasekaran } 64221ab011b0SAravindhan Gunasekaran 6423522d15eaSVladimir Oltean static int igc_tc_query_caps(struct igc_adapter *adapter, 6424522d15eaSVladimir Oltean struct tc_query_caps_base *base) 6425522d15eaSVladimir Oltean { 6426522d15eaSVladimir Oltean struct igc_hw *hw = &adapter->hw; 6427522d15eaSVladimir Oltean 6428522d15eaSVladimir Oltean switch (base->type) { 6429522d15eaSVladimir Oltean case TC_SETUP_QDISC_TAPRIO: { 6430522d15eaSVladimir Oltean struct tc_taprio_caps *caps = base->caps; 6431522d15eaSVladimir Oltean 64322f530df7SVladimir Oltean caps->broken_mqprio = true; 6433522d15eaSVladimir Oltean 643492a0dcb8STan Tee Min if (hw->mac.type == igc_i225) { 643592a0dcb8STan Tee Min caps->supports_queue_max_sdu = true; 6436522d15eaSVladimir Oltean caps->gate_mask_per_txq = true; 643792a0dcb8STan Tee Min } 6438522d15eaSVladimir Oltean 6439522d15eaSVladimir Oltean return 0; 6440522d15eaSVladimir Oltean } 6441522d15eaSVladimir Oltean default: 6442522d15eaSVladimir Oltean return -EOPNOTSUPP; 6443522d15eaSVladimir Oltean } 6444522d15eaSVladimir Oltean } 6445522d15eaSVladimir Oltean 6446ec50a9d4SVinicius Costa Gomes static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, 6447ec50a9d4SVinicius Costa Gomes void *type_data) 6448ec50a9d4SVinicius Costa Gomes { 6449ec50a9d4SVinicius Costa Gomes struct igc_adapter *adapter = netdev_priv(dev); 6450ec50a9d4SVinicius Costa Gomes 6451ed89b74dSMuhammad Husaini Zulkifli adapter->tc_setup_type = type; 6452ed89b74dSMuhammad Husaini Zulkifli 6453ec50a9d4SVinicius Costa Gomes switch (type) { 6454522d15eaSVladimir Oltean case TC_QUERY_CAPS: 6455522d15eaSVladimir Oltean return igc_tc_query_caps(adapter, type_data); 6456ec50a9d4SVinicius Costa Gomes case TC_SETUP_QDISC_TAPRIO: 6457ec50a9d4SVinicius Costa Gomes return igc_tsn_enable_qbv_scheduling(adapter, type_data); 6458ec50a9d4SVinicius Costa Gomes 645982faa9b7SVinicius Costa Gomes case TC_SETUP_QDISC_ETF: 646082faa9b7SVinicius Costa Gomes return igc_tsn_enable_launchtime(adapter, type_data); 646182faa9b7SVinicius Costa Gomes 64621ab011b0SAravindhan Gunasekaran case TC_SETUP_QDISC_CBS: 64631ab011b0SAravindhan Gunasekaran return igc_tsn_enable_cbs(adapter, type_data); 64641ab011b0SAravindhan Gunasekaran 6465ec50a9d4SVinicius Costa Gomes default: 6466ec50a9d4SVinicius Costa Gomes return -EOPNOTSUPP; 6467ec50a9d4SVinicius Costa Gomes } 6468ec50a9d4SVinicius Costa Gomes } 6469ec50a9d4SVinicius Costa Gomes 647026575105SAndre Guedes static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) 647126575105SAndre Guedes { 647226575105SAndre Guedes struct igc_adapter *adapter = netdev_priv(dev); 647326575105SAndre Guedes 647426575105SAndre Guedes switch (bpf->command) { 647526575105SAndre Guedes case XDP_SETUP_PROG: 647626575105SAndre Guedes return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); 6477fc9df2a0SAndre Guedes case XDP_SETUP_XSK_POOL: 6478fc9df2a0SAndre Guedes return igc_xdp_setup_pool(adapter, bpf->xsk.pool, 6479fc9df2a0SAndre Guedes bpf->xsk.queue_id); 648026575105SAndre Guedes default: 648126575105SAndre Guedes return -EOPNOTSUPP; 648226575105SAndre Guedes } 648326575105SAndre Guedes } 648426575105SAndre Guedes 64854ff32036SAndre Guedes static int igc_xdp_xmit(struct net_device *dev, int num_frames, 64864ff32036SAndre Guedes struct xdp_frame **frames, u32 flags) 64874ff32036SAndre Guedes { 64884ff32036SAndre Guedes struct igc_adapter *adapter = netdev_priv(dev); 64894ff32036SAndre Guedes int cpu = smp_processor_id(); 64904ff32036SAndre Guedes struct netdev_queue *nq; 64914ff32036SAndre Guedes struct igc_ring *ring; 6492*8df393afSFlorian Kauer int i, nxmit; 64934ff32036SAndre Guedes 6494cb47b1f6SVinicius Costa Gomes if (unlikely(!netif_carrier_ok(dev))) 64954ff32036SAndre Guedes return -ENETDOWN; 64964ff32036SAndre Guedes 64974ff32036SAndre Guedes if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 64984ff32036SAndre Guedes return -EINVAL; 64994ff32036SAndre Guedes 65004ff32036SAndre Guedes ring = igc_xdp_get_tx_ring(adapter, cpu); 65014ff32036SAndre Guedes nq = txring_txq(ring); 65024ff32036SAndre Guedes 65034ff32036SAndre Guedes __netif_tx_lock(nq, cpu); 65044ff32036SAndre Guedes 650595b68148SKurt Kanzenbach /* Avoid transmit queue timeout since we share it with the slow path */ 650695b68148SKurt Kanzenbach txq_trans_cond_update(nq); 650795b68148SKurt Kanzenbach 6508*8df393afSFlorian Kauer nxmit = 0; 65094ff32036SAndre Guedes for (i = 0; i < num_frames; i++) { 65104ff32036SAndre Guedes int err; 65114ff32036SAndre Guedes struct xdp_frame *xdpf = frames[i]; 65124ff32036SAndre Guedes 65134ff32036SAndre Guedes err = igc_xdp_init_tx_descriptor(ring, xdpf); 6514*8df393afSFlorian Kauer if (err) 6515*8df393afSFlorian Kauer break; 6516*8df393afSFlorian Kauer nxmit++; 65174ff32036SAndre Guedes } 65184ff32036SAndre Guedes 65194ff32036SAndre Guedes if (flags & XDP_XMIT_FLUSH) 65204ff32036SAndre Guedes igc_flush_tx_descriptors(ring); 65214ff32036SAndre Guedes 65224ff32036SAndre Guedes __netif_tx_unlock(nq); 65234ff32036SAndre Guedes 6524*8df393afSFlorian Kauer return nxmit; 65254ff32036SAndre Guedes } 65264ff32036SAndre Guedes 6527fc9df2a0SAndre Guedes static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, 6528fc9df2a0SAndre Guedes struct igc_q_vector *q_vector) 6529fc9df2a0SAndre Guedes { 6530fc9df2a0SAndre Guedes struct igc_hw *hw = &adapter->hw; 6531fc9df2a0SAndre Guedes u32 eics = 0; 6532fc9df2a0SAndre Guedes 6533fc9df2a0SAndre Guedes eics |= q_vector->eims_value; 6534fc9df2a0SAndre Guedes wr32(IGC_EICS, eics); 6535fc9df2a0SAndre Guedes } 6536fc9df2a0SAndre Guedes 6537fc9df2a0SAndre Guedes int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 6538fc9df2a0SAndre Guedes { 6539fc9df2a0SAndre Guedes struct igc_adapter *adapter = netdev_priv(dev); 6540fc9df2a0SAndre Guedes struct igc_q_vector *q_vector; 6541fc9df2a0SAndre Guedes struct igc_ring *ring; 6542fc9df2a0SAndre Guedes 6543fc9df2a0SAndre Guedes if (test_bit(__IGC_DOWN, &adapter->state)) 6544fc9df2a0SAndre Guedes return -ENETDOWN; 6545fc9df2a0SAndre Guedes 6546fc9df2a0SAndre Guedes if (!igc_xdp_is_enabled(adapter)) 6547fc9df2a0SAndre Guedes return -ENXIO; 6548fc9df2a0SAndre Guedes 6549fc9df2a0SAndre Guedes if (queue_id >= adapter->num_rx_queues) 6550fc9df2a0SAndre Guedes return -EINVAL; 6551fc9df2a0SAndre Guedes 6552fc9df2a0SAndre Guedes ring = adapter->rx_ring[queue_id]; 6553fc9df2a0SAndre Guedes 6554fc9df2a0SAndre Guedes if (!ring->xsk_pool) 6555fc9df2a0SAndre Guedes return -ENXIO; 6556fc9df2a0SAndre Guedes 6557fc9df2a0SAndre Guedes q_vector = adapter->q_vector[queue_id]; 6558fc9df2a0SAndre Guedes if (!napi_if_scheduled_mark_missed(&q_vector->napi)) 6559fc9df2a0SAndre Guedes igc_trigger_rxtxq_interrupt(adapter, q_vector); 6560fc9df2a0SAndre Guedes 6561fc9df2a0SAndre Guedes return 0; 6562fc9df2a0SAndre Guedes } 6563fc9df2a0SAndre Guedes 6564c9a11c23SSasha Neftin static const struct net_device_ops igc_netdev_ops = { 6565c9a11c23SSasha Neftin .ndo_open = igc_open, 6566c9a11c23SSasha Neftin .ndo_stop = igc_close, 6567c9a11c23SSasha Neftin .ndo_start_xmit = igc_xmit_frame, 65687f839684SSasha Neftin .ndo_set_rx_mode = igc_set_rx_mode, 6569c9a11c23SSasha Neftin .ndo_set_mac_address = igc_set_mac, 6570c9a11c23SSasha Neftin .ndo_change_mtu = igc_change_mtu, 65719b275176SSasha Neftin .ndo_tx_timeout = igc_tx_timeout, 65726b7ed22aSVinicius Costa Gomes .ndo_get_stats64 = igc_get_stats64, 657365cd3a72SSasha Neftin .ndo_fix_features = igc_fix_features, 657465cd3a72SSasha Neftin .ndo_set_features = igc_set_features, 657565cd3a72SSasha Neftin .ndo_features_check = igc_features_check, 6576a7605370SArnd Bergmann .ndo_eth_ioctl = igc_ioctl, 6577ec50a9d4SVinicius Costa Gomes .ndo_setup_tc = igc_setup_tc, 657826575105SAndre Guedes .ndo_bpf = igc_bpf, 65794ff32036SAndre Guedes .ndo_xdp_xmit = igc_xdp_xmit, 6580fc9df2a0SAndre Guedes .ndo_xsk_wakeup = igc_xsk_wakeup, 6581c9a11c23SSasha Neftin }; 6582146740f9SSasha Neftin 6583146740f9SSasha Neftin /* PCIe configuration access */ 6584146740f9SSasha Neftin void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 6585146740f9SSasha Neftin { 6586146740f9SSasha Neftin struct igc_adapter *adapter = hw->back; 6587146740f9SSasha Neftin 6588146740f9SSasha Neftin pci_read_config_word(adapter->pdev, reg, value); 6589146740f9SSasha Neftin } 6590146740f9SSasha Neftin 6591146740f9SSasha Neftin void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) 6592146740f9SSasha Neftin { 6593146740f9SSasha Neftin struct igc_adapter *adapter = hw->back; 6594146740f9SSasha Neftin 6595146740f9SSasha Neftin pci_write_config_word(adapter->pdev, reg, *value); 6596146740f9SSasha Neftin } 6597146740f9SSasha Neftin 6598146740f9SSasha Neftin s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 6599146740f9SSasha Neftin { 6600146740f9SSasha Neftin struct igc_adapter *adapter = hw->back; 6601146740f9SSasha Neftin 6602a16f6d3aSFrederick Lawler if (!pci_is_pcie(adapter->pdev)) 6603146740f9SSasha Neftin return -IGC_ERR_CONFIG; 6604146740f9SSasha Neftin 6605a16f6d3aSFrederick Lawler pcie_capability_read_word(adapter->pdev, reg, value); 6606146740f9SSasha Neftin 6607146740f9SSasha Neftin return IGC_SUCCESS; 6608146740f9SSasha Neftin } 6609146740f9SSasha Neftin 6610146740f9SSasha Neftin s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) 6611146740f9SSasha Neftin { 6612146740f9SSasha Neftin struct igc_adapter *adapter = hw->back; 6613146740f9SSasha Neftin 6614a16f6d3aSFrederick Lawler if (!pci_is_pcie(adapter->pdev)) 6615146740f9SSasha Neftin return -IGC_ERR_CONFIG; 6616146740f9SSasha Neftin 6617a16f6d3aSFrederick Lawler pcie_capability_write_word(adapter->pdev, reg, *value); 6618146740f9SSasha Neftin 6619146740f9SSasha Neftin return IGC_SUCCESS; 6620146740f9SSasha Neftin } 6621146740f9SSasha Neftin 6622146740f9SSasha Neftin u32 igc_rd32(struct igc_hw *hw, u32 reg) 6623146740f9SSasha Neftin { 6624c9a11c23SSasha Neftin struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); 6625146740f9SSasha Neftin u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); 6626146740f9SSasha Neftin u32 value = 0; 6627146740f9SSasha Neftin 66287c1ddceeSLennert Buytenhek if (IGC_REMOVED(hw_addr)) 66297c1ddceeSLennert Buytenhek return ~value; 66307c1ddceeSLennert Buytenhek 6631146740f9SSasha Neftin value = readl(&hw_addr[reg]); 6632146740f9SSasha Neftin 6633146740f9SSasha Neftin /* reads should not return all F's */ 6634c9a11c23SSasha Neftin if (!(~value) && (!reg || !(~readl(hw_addr)))) { 6635c9a11c23SSasha Neftin struct net_device *netdev = igc->netdev; 6636c9a11c23SSasha Neftin 6637146740f9SSasha Neftin hw->hw_addr = NULL; 6638c9a11c23SSasha Neftin netif_device_detach(netdev); 6639c9a11c23SSasha Neftin netdev_err(netdev, "PCIe link lost, device now detached\n"); 664094bc1e52SLyude Paul WARN(pci_device_is_present(igc->pdev), 664194bc1e52SLyude Paul "igc: Failed to read reg 0x%x!\n", reg); 6642c9a11c23SSasha Neftin } 6643146740f9SSasha Neftin 6644146740f9SSasha Neftin return value; 6645146740f9SSasha Neftin } 6646146740f9SSasha Neftin 66478416814fSJesper Dangaard Brouer /* Mapping HW RSS Type to enum xdp_rss_hash_type */ 66488416814fSJesper Dangaard Brouer static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = { 66498416814fSJesper Dangaard Brouer [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2, 66508416814fSJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP, 66518416814fSJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4, 66528416814fSJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP, 66538416814fSJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX, 66548416814fSJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6, 66558416814fSJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, 66568416814fSJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP, 66578416814fSJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP, 66588416814fSJesper Dangaard Brouer [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX, 66598416814fSJesper Dangaard Brouer [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ 66608416814fSJesper Dangaard Brouer [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */ 66618416814fSJesper Dangaard Brouer [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */ 66628416814fSJesper Dangaard Brouer [13] = XDP_RSS_TYPE_NONE, 66638416814fSJesper Dangaard Brouer [14] = XDP_RSS_TYPE_NONE, 66648416814fSJesper Dangaard Brouer [15] = XDP_RSS_TYPE_NONE, 66658416814fSJesper Dangaard Brouer }; 66668416814fSJesper Dangaard Brouer 66678416814fSJesper Dangaard Brouer static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, 66688416814fSJesper Dangaard Brouer enum xdp_rss_hash_type *rss_type) 66698416814fSJesper Dangaard Brouer { 66708416814fSJesper Dangaard Brouer const struct igc_xdp_buff *ctx = (void *)_ctx; 66718416814fSJesper Dangaard Brouer 66728416814fSJesper Dangaard Brouer if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) 66738416814fSJesper Dangaard Brouer return -ENODATA; 66748416814fSJesper Dangaard Brouer 66758416814fSJesper Dangaard Brouer *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); 66768416814fSJesper Dangaard Brouer *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; 66778416814fSJesper Dangaard Brouer 66788416814fSJesper Dangaard Brouer return 0; 66798416814fSJesper Dangaard Brouer } 66808416814fSJesper Dangaard Brouer 6681d6772667SJesper Dangaard Brouer static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) 6682d6772667SJesper Dangaard Brouer { 6683d6772667SJesper Dangaard Brouer const struct igc_xdp_buff *ctx = (void *)_ctx; 6684d6772667SJesper Dangaard Brouer 6685d6772667SJesper Dangaard Brouer if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { 6686d6772667SJesper Dangaard Brouer *timestamp = ctx->rx_ts; 6687d6772667SJesper Dangaard Brouer 6688d6772667SJesper Dangaard Brouer return 0; 6689d6772667SJesper Dangaard Brouer } 6690d6772667SJesper Dangaard Brouer 6691d6772667SJesper Dangaard Brouer return -ENODATA; 6692d6772667SJesper Dangaard Brouer } 6693d6772667SJesper Dangaard Brouer 66948416814fSJesper Dangaard Brouer static const struct xdp_metadata_ops igc_xdp_metadata_ops = { 66958416814fSJesper Dangaard Brouer .xmo_rx_hash = igc_xdp_rx_hash, 6696d6772667SJesper Dangaard Brouer .xmo_rx_timestamp = igc_xdp_rx_timestamp, 66978416814fSJesper Dangaard Brouer }; 66988416814fSJesper Dangaard Brouer 6699175c2412SMuhammad Husaini Zulkifli static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer) 6700175c2412SMuhammad Husaini Zulkifli { 6701175c2412SMuhammad Husaini Zulkifli struct igc_adapter *adapter = container_of(timer, struct igc_adapter, 6702175c2412SMuhammad Husaini Zulkifli hrtimer); 670306b41258SMuhammad Husaini Zulkifli unsigned long flags; 6704175c2412SMuhammad Husaini Zulkifli unsigned int i; 6705175c2412SMuhammad Husaini Zulkifli 670606b41258SMuhammad Husaini Zulkifli spin_lock_irqsave(&adapter->qbv_tx_lock, flags); 670706b41258SMuhammad Husaini Zulkifli 6708175c2412SMuhammad Husaini Zulkifli adapter->qbv_transition = true; 6709175c2412SMuhammad Husaini Zulkifli for (i = 0; i < adapter->num_tx_queues; i++) { 6710175c2412SMuhammad Husaini Zulkifli struct igc_ring *tx_ring = adapter->tx_ring[i]; 6711175c2412SMuhammad Husaini Zulkifli 6712175c2412SMuhammad Husaini Zulkifli if (tx_ring->admin_gate_closed) { 6713175c2412SMuhammad Husaini Zulkifli tx_ring->admin_gate_closed = false; 6714175c2412SMuhammad Husaini Zulkifli tx_ring->oper_gate_closed = true; 6715175c2412SMuhammad Husaini Zulkifli } else { 6716175c2412SMuhammad Husaini Zulkifli tx_ring->oper_gate_closed = false; 6717175c2412SMuhammad Husaini Zulkifli } 6718175c2412SMuhammad Husaini Zulkifli } 6719175c2412SMuhammad Husaini Zulkifli adapter->qbv_transition = false; 672006b41258SMuhammad Husaini Zulkifli 672106b41258SMuhammad Husaini Zulkifli spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); 672206b41258SMuhammad Husaini Zulkifli 6723175c2412SMuhammad Husaini Zulkifli return HRTIMER_NORESTART; 6724175c2412SMuhammad Husaini Zulkifli } 6725175c2412SMuhammad Husaini Zulkifli 6726d89f8841SSasha Neftin /** 6727d89f8841SSasha Neftin * igc_probe - Device Initialization Routine 6728d89f8841SSasha Neftin * @pdev: PCI device information struct 6729d89f8841SSasha Neftin * @ent: entry in igc_pci_tbl 6730d89f8841SSasha Neftin * 6731d89f8841SSasha Neftin * Returns 0 on success, negative on failure 6732d89f8841SSasha Neftin * 6733d89f8841SSasha Neftin * igc_probe initializes an adapter identified by a pci_dev structure. 6734d89f8841SSasha Neftin * The OS initialization, configuring the adapter private structure, 6735d89f8841SSasha Neftin * and a hardware reset occur. 6736d89f8841SSasha Neftin */ 6737d89f8841SSasha Neftin static int igc_probe(struct pci_dev *pdev, 6738d89f8841SSasha Neftin const struct pci_device_id *ent) 6739d89f8841SSasha Neftin { 6740146740f9SSasha Neftin struct igc_adapter *adapter; 6741c9a11c23SSasha Neftin struct net_device *netdev; 6742c9a11c23SSasha Neftin struct igc_hw *hw; 6743ab405612SSasha Neftin const struct igc_info *ei = igc_info_tbl[ent->driver_data]; 6744fea89930SChristophe JAILLET int err; 6745d89f8841SSasha Neftin 6746d89f8841SSasha Neftin err = pci_enable_device_mem(pdev); 6747d89f8841SSasha Neftin if (err) 6748d89f8841SSasha Neftin return err; 6749d89f8841SSasha Neftin 675021da01fdSSasha Neftin err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 6751d89f8841SSasha Neftin if (err) { 675221da01fdSSasha Neftin dev_err(&pdev->dev, 675321da01fdSSasha Neftin "No usable DMA configuration, aborting\n"); 6754d89f8841SSasha Neftin goto err_dma; 6755d89f8841SSasha Neftin } 6756d89f8841SSasha Neftin 675721da01fdSSasha Neftin err = pci_request_mem_regions(pdev, igc_driver_name); 6758d89f8841SSasha Neftin if (err) 6759d89f8841SSasha Neftin goto err_pci_reg; 6760d89f8841SSasha Neftin 67611b5d73fbSVinicius Costa Gomes err = pci_enable_ptm(pdev, NULL); 67621b5d73fbSVinicius Costa Gomes if (err < 0) 67631b5d73fbSVinicius Costa Gomes dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); 67641b5d73fbSVinicius Costa Gomes 6765d89f8841SSasha Neftin pci_set_master(pdev); 6766c9a11c23SSasha Neftin 6767c9a11c23SSasha Neftin err = -ENOMEM; 6768c9a11c23SSasha Neftin netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), 6769c9a11c23SSasha Neftin IGC_MAX_TX_QUEUES); 6770c9a11c23SSasha Neftin 6771c9a11c23SSasha Neftin if (!netdev) 6772c9a11c23SSasha Neftin goto err_alloc_etherdev; 6773c9a11c23SSasha Neftin 6774c9a11c23SSasha Neftin SET_NETDEV_DEV(netdev, &pdev->dev); 6775c9a11c23SSasha Neftin 6776c9a11c23SSasha Neftin pci_set_drvdata(pdev, netdev); 6777c9a11c23SSasha Neftin adapter = netdev_priv(netdev); 6778c9a11c23SSasha Neftin adapter->netdev = netdev; 6779c9a11c23SSasha Neftin adapter->pdev = pdev; 6780c9a11c23SSasha Neftin hw = &adapter->hw; 6781c9a11c23SSasha Neftin hw->back = adapter; 6782c9a11c23SSasha Neftin adapter->port_num = hw->bus.func; 67838c5ad0daSSasha Neftin adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 6784c9a11c23SSasha Neftin 6785d89f8841SSasha Neftin err = pci_save_state(pdev); 6786c9a11c23SSasha Neftin if (err) 6787c9a11c23SSasha Neftin goto err_ioremap; 6788c9a11c23SSasha Neftin 6789c9a11c23SSasha Neftin err = -EIO; 6790c9a11c23SSasha Neftin adapter->io_addr = ioremap(pci_resource_start(pdev, 0), 6791c9a11c23SSasha Neftin pci_resource_len(pdev, 0)); 6792c9a11c23SSasha Neftin if (!adapter->io_addr) 6793c9a11c23SSasha Neftin goto err_ioremap; 6794c9a11c23SSasha Neftin 6795c9a11c23SSasha Neftin /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ 6796c9a11c23SSasha Neftin hw->hw_addr = adapter->io_addr; 6797c9a11c23SSasha Neftin 6798c9a11c23SSasha Neftin netdev->netdev_ops = &igc_netdev_ops; 67998416814fSJesper Dangaard Brouer netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; 68007df76bd1SAndre Guedes igc_ethtool_set_ops(netdev); 6801c9a11c23SSasha Neftin netdev->watchdog_timeo = 5 * HZ; 6802c9a11c23SSasha Neftin 6803c9a11c23SSasha Neftin netdev->mem_start = pci_resource_start(pdev, 0); 6804c9a11c23SSasha Neftin netdev->mem_end = pci_resource_end(pdev, 0); 6805c9a11c23SSasha Neftin 6806c9a11c23SSasha Neftin /* PCI config space info */ 6807c9a11c23SSasha Neftin hw->vendor_id = pdev->vendor; 6808c9a11c23SSasha Neftin hw->device_id = pdev->device; 6809c9a11c23SSasha Neftin hw->revision_id = pdev->revision; 6810c9a11c23SSasha Neftin hw->subsystem_vendor_id = pdev->subsystem_vendor; 6811c9a11c23SSasha Neftin hw->subsystem_device_id = pdev->subsystem_device; 6812146740f9SSasha Neftin 6813ab405612SSasha Neftin /* Copy the default MAC and PHY function pointers */ 6814ab405612SSasha Neftin memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 68155586838fSSasha Neftin memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 6816ab405612SSasha Neftin 6817ab405612SSasha Neftin /* Initialize skew-specific constants */ 6818ab405612SSasha Neftin err = ei->get_invariants(hw); 6819ab405612SSasha Neftin if (err) 6820ab405612SSasha Neftin goto err_sw_init; 6821ab405612SSasha Neftin 6822d3ae3cfbSSasha Neftin /* Add supported features to the features list*/ 6823b7b46245SSasha Neftin netdev->features |= NETIF_F_SG; 6824f38b782dSSasha Neftin netdev->features |= NETIF_F_TSO; 6825f38b782dSSasha Neftin netdev->features |= NETIF_F_TSO6; 68268e8204a4SSasha Neftin netdev->features |= NETIF_F_TSO_ECN; 682784214ab4SJesper Dangaard Brouer netdev->features |= NETIF_F_RXHASH; 68283bdd7086SSasha Neftin netdev->features |= NETIF_F_RXCSUM; 6829d3ae3cfbSSasha Neftin netdev->features |= NETIF_F_HW_CSUM; 68300ac960a8SSasha Neftin netdev->features |= NETIF_F_SCTP_CRC; 6831635071e2SSasha Neftin netdev->features |= NETIF_F_HW_TC; 6832d3ae3cfbSSasha Neftin 683334428dffSSasha Neftin #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ 683434428dffSSasha Neftin NETIF_F_GSO_GRE_CSUM | \ 683534428dffSSasha Neftin NETIF_F_GSO_IPXIP4 | \ 683634428dffSSasha Neftin NETIF_F_GSO_IPXIP6 | \ 683734428dffSSasha Neftin NETIF_F_GSO_UDP_TUNNEL | \ 683834428dffSSasha Neftin NETIF_F_GSO_UDP_TUNNEL_CSUM) 683934428dffSSasha Neftin 684034428dffSSasha Neftin netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; 684134428dffSSasha Neftin netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; 6842146740f9SSasha Neftin 6843146740f9SSasha Neftin /* setup the private structure */ 6844146740f9SSasha Neftin err = igc_sw_init(adapter); 6845146740f9SSasha Neftin if (err) 6846146740f9SSasha Neftin goto err_sw_init; 6847146740f9SSasha Neftin 684865cd3a72SSasha Neftin /* copy netdev features into list of user selectable features */ 684965cd3a72SSasha Neftin netdev->hw_features |= NETIF_F_NTUPLE; 68508d744963SMuhammad Husaini Zulkifli netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 68518d744963SMuhammad Husaini Zulkifli netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 6852d3ae3cfbSSasha Neftin netdev->hw_features |= netdev->features; 685365cd3a72SSasha Neftin 68544439dc42SSasha Neftin netdev->features |= NETIF_F_HIGHDMA; 68554439dc42SSasha Neftin 685640ee363cSPaolo Abeni netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; 685740ee363cSPaolo Abeni netdev->mpls_features |= NETIF_F_HW_CSUM; 685840ee363cSPaolo Abeni netdev->hw_enc_features |= netdev->vlan_features; 68598d744963SMuhammad Husaini Zulkifli 686066c0e13aSMarek Majtyka netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 686166c0e13aSMarek Majtyka NETDEV_XDP_ACT_XSK_ZEROCOPY; 686266c0e13aSMarek Majtyka 6863c9a11c23SSasha Neftin /* MTU range: 68 - 9216 */ 6864c9a11c23SSasha Neftin netdev->min_mtu = ETH_MIN_MTU; 6865c9a11c23SSasha Neftin netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; 6866c9a11c23SSasha Neftin 68674eb80801SSasha Neftin /* before reading the NVM, reset the controller to put the device in a 68684eb80801SSasha Neftin * known good starting state 68694eb80801SSasha Neftin */ 68704eb80801SSasha Neftin hw->mac.ops.reset_hw(hw); 68714eb80801SSasha Neftin 68729b924eddSSasha Neftin if (igc_get_flash_presence_i225(hw)) { 68739b924eddSSasha Neftin if (hw->nvm.ops.validate(hw) < 0) { 687425f06effSAndre Guedes dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); 68759b924eddSSasha Neftin err = -EIO; 68769b924eddSSasha Neftin goto err_eeprom; 68779b924eddSSasha Neftin } 68789b924eddSSasha Neftin } 68799b924eddSSasha Neftin 68804eb80801SSasha Neftin if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { 68814eb80801SSasha Neftin /* copy the MAC address out of the NVM */ 68824eb80801SSasha Neftin if (hw->mac.ops.read_mac_addr(hw)) 68834eb80801SSasha Neftin dev_err(&pdev->dev, "NVM Read Error\n"); 68844eb80801SSasha Neftin } 68854eb80801SSasha Neftin 6886a05e4c0aSJakub Kicinski eth_hw_addr_set(netdev, hw->mac.addr); 68874eb80801SSasha Neftin 68884eb80801SSasha Neftin if (!is_valid_ether_addr(netdev->dev_addr)) { 68894eb80801SSasha Neftin dev_err(&pdev->dev, "Invalid MAC Address\n"); 68904eb80801SSasha Neftin err = -EIO; 68914eb80801SSasha Neftin goto err_eeprom; 68924eb80801SSasha Neftin } 68934eb80801SSasha Neftin 68940507ef8aSSasha Neftin /* configure RXPBSIZE and TXPBSIZE */ 68950507ef8aSSasha Neftin wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); 68960507ef8aSSasha Neftin wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); 68970507ef8aSSasha Neftin 68980507ef8aSSasha Neftin timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); 6899208983f0SSasha Neftin timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); 69000507ef8aSSasha Neftin 69010507ef8aSSasha Neftin INIT_WORK(&adapter->reset_task, igc_reset_task); 6902208983f0SSasha Neftin INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); 69030507ef8aSSasha Neftin 6904175c2412SMuhammad Husaini Zulkifli hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6905175c2412SMuhammad Husaini Zulkifli adapter->hrtimer.function = &igc_qbv_scheduling_timer; 6906175c2412SMuhammad Husaini Zulkifli 69074eb80801SSasha Neftin /* Initialize link properties that are user-changeable */ 69084eb80801SSasha Neftin adapter->fc_autoneg = true; 69094eb80801SSasha Neftin hw->mac.autoneg = true; 69104eb80801SSasha Neftin hw->phy.autoneg_advertised = 0xaf; 69114eb80801SSasha Neftin 69124eb80801SSasha Neftin hw->fc.requested_mode = igc_fc_default; 69134eb80801SSasha Neftin hw->fc.current_mode = igc_fc_default; 69144eb80801SSasha Neftin 6915e055600dSSasha Neftin /* By default, support wake on port A */ 6916e055600dSSasha Neftin adapter->flags |= IGC_FLAG_WOL_SUPPORTED; 6917e055600dSSasha Neftin 6918e055600dSSasha Neftin /* initialize the wol settings based on the eeprom settings */ 6919e055600dSSasha Neftin if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) 6920e055600dSSasha Neftin adapter->wol |= IGC_WUFC_MAG; 6921e055600dSSasha Neftin 6922e055600dSSasha Neftin device_set_wakeup_enable(&adapter->pdev->dev, 6923e055600dSSasha Neftin adapter->flags & IGC_FLAG_WOL_SUPPORTED); 6924e055600dSSasha Neftin 69253cda505aSVinicius Costa Gomes igc_ptp_init(adapter); 69263cda505aSVinicius Costa Gomes 6927c814a2d2SVinicius Costa Gomes igc_tsn_clear_schedule(adapter); 6928c814a2d2SVinicius Costa Gomes 6929c9a11c23SSasha Neftin /* reset the hardware with the new settings */ 6930c9a11c23SSasha Neftin igc_reset(adapter); 6931c9a11c23SSasha Neftin 6932c9a11c23SSasha Neftin /* let the f/w know that the h/w is now under the control of the 6933c9a11c23SSasha Neftin * driver. 6934c9a11c23SSasha Neftin */ 6935c9a11c23SSasha Neftin igc_get_hw_control(adapter); 6936c9a11c23SSasha Neftin 6937c9a11c23SSasha Neftin strncpy(netdev->name, "eth%d", IFNAMSIZ); 6938c9a11c23SSasha Neftin err = register_netdev(netdev); 6939c9a11c23SSasha Neftin if (err) 6940c9a11c23SSasha Neftin goto err_register; 6941c9a11c23SSasha Neftin 6942c9a11c23SSasha Neftin /* carrier off reporting is important to ethtool even BEFORE open */ 6943c9a11c23SSasha Neftin netif_carrier_off(netdev); 6944c9a11c23SSasha Neftin 6945ab405612SSasha Neftin /* Check if Media Autosense is enabled */ 6946ab405612SSasha Neftin adapter->ei = *ei; 6947ab405612SSasha Neftin 6948c9a11c23SSasha Neftin /* print pcie link status and MAC address */ 6949c9a11c23SSasha Neftin pcie_print_link_status(pdev); 6950c9a11c23SSasha Neftin netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); 6951c9a11c23SSasha Neftin 6952e0751556SRafael J. Wysocki dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 695393ec439aSSasha Neftin /* Disable EEE for internal PHY devices */ 695493ec439aSSasha Neftin hw->dev_spec._base.eee_enable = false; 695593ec439aSSasha Neftin adapter->flags &= ~IGC_FLAG_EEE; 695693ec439aSSasha Neftin igc_set_eee_i225(hw, false, false, false); 69578594a7f3SSasha Neftin 69588594a7f3SSasha Neftin pm_runtime_put_noidle(&pdev->dev); 69598594a7f3SSasha Neftin 6960d89f8841SSasha Neftin return 0; 6961d89f8841SSasha Neftin 6962c9a11c23SSasha Neftin err_register: 6963c9a11c23SSasha Neftin igc_release_hw_control(adapter); 69644eb80801SSasha Neftin err_eeprom: 69654eb80801SSasha Neftin if (!igc_check_reset_block(hw)) 69664eb80801SSasha Neftin igc_reset_phy(hw); 6967146740f9SSasha Neftin err_sw_init: 69683df25e4cSSasha Neftin igc_clear_interrupt_scheme(adapter); 69693df25e4cSSasha Neftin iounmap(adapter->io_addr); 6970c9a11c23SSasha Neftin err_ioremap: 6971c9a11c23SSasha Neftin free_netdev(netdev); 6972c9a11c23SSasha Neftin err_alloc_etherdev: 6973faf4dd52SSasha Neftin pci_release_mem_regions(pdev); 6974d89f8841SSasha Neftin err_pci_reg: 6975d89f8841SSasha Neftin err_dma: 6976d89f8841SSasha Neftin pci_disable_device(pdev); 6977d89f8841SSasha Neftin return err; 6978d89f8841SSasha Neftin } 6979d89f8841SSasha Neftin 6980d89f8841SSasha Neftin /** 6981d89f8841SSasha Neftin * igc_remove - Device Removal Routine 6982d89f8841SSasha Neftin * @pdev: PCI device information struct 6983d89f8841SSasha Neftin * 6984d89f8841SSasha Neftin * igc_remove is called by the PCI subsystem to alert the driver 6985d89f8841SSasha Neftin * that it should release a PCI device. This could be caused by a 6986d89f8841SSasha Neftin * Hot-Plug event, or because the driver is going to be removed from 6987d89f8841SSasha Neftin * memory. 6988d89f8841SSasha Neftin */ 6989d89f8841SSasha Neftin static void igc_remove(struct pci_dev *pdev) 6990d89f8841SSasha Neftin { 6991c9a11c23SSasha Neftin struct net_device *netdev = pci_get_drvdata(pdev); 6992c9a11c23SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 6993c9a11c23SSasha Neftin 69948594a7f3SSasha Neftin pm_runtime_get_noresume(&pdev->dev); 69958594a7f3SSasha Neftin 6996e256ec83SAndre Guedes igc_flush_nfc_rules(adapter); 6997e256ec83SAndre Guedes 69985f295805SVinicius Costa Gomes igc_ptp_stop(adapter); 69995f295805SVinicius Costa Gomes 7000c080fe26SVinicius Costa Gomes pci_disable_ptm(pdev); 7001c080fe26SVinicius Costa Gomes pci_clear_master(pdev); 7002c080fe26SVinicius Costa Gomes 7003c9a11c23SSasha Neftin set_bit(__IGC_DOWN, &adapter->state); 70040507ef8aSSasha Neftin 70050507ef8aSSasha Neftin del_timer_sync(&adapter->watchdog_timer); 7006208983f0SSasha Neftin del_timer_sync(&adapter->phy_info_timer); 70070507ef8aSSasha Neftin 70080507ef8aSSasha Neftin cancel_work_sync(&adapter->reset_task); 7009208983f0SSasha Neftin cancel_work_sync(&adapter->watchdog_task); 7010175c2412SMuhammad Husaini Zulkifli hrtimer_cancel(&adapter->hrtimer); 7011c9a11c23SSasha Neftin 7012c9a11c23SSasha Neftin /* Release control of h/w to f/w. If f/w is AMT enabled, this 7013c9a11c23SSasha Neftin * would have already happened in close and is redundant. 7014c9a11c23SSasha Neftin */ 7015c9a11c23SSasha Neftin igc_release_hw_control(adapter); 7016c9a11c23SSasha Neftin unregister_netdev(netdev); 7017c9a11c23SSasha Neftin 70180507ef8aSSasha Neftin igc_clear_interrupt_scheme(adapter); 70190507ef8aSSasha Neftin pci_iounmap(pdev, adapter->io_addr); 70200507ef8aSSasha Neftin pci_release_mem_regions(pdev); 7021d89f8841SSasha Neftin 7022c9a11c23SSasha Neftin free_netdev(netdev); 70230507ef8aSSasha Neftin 7024d89f8841SSasha Neftin pci_disable_device(pdev); 7025d89f8841SSasha Neftin } 7026d89f8841SSasha Neftin 70279513d2a5SSasha Neftin static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, 70289513d2a5SSasha Neftin bool runtime) 70299513d2a5SSasha Neftin { 70309513d2a5SSasha Neftin struct net_device *netdev = pci_get_drvdata(pdev); 70319513d2a5SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 70329513d2a5SSasha Neftin u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; 70339513d2a5SSasha Neftin struct igc_hw *hw = &adapter->hw; 70349513d2a5SSasha Neftin u32 ctrl, rctl, status; 70359513d2a5SSasha Neftin bool wake; 70369513d2a5SSasha Neftin 70379513d2a5SSasha Neftin rtnl_lock(); 70389513d2a5SSasha Neftin netif_device_detach(netdev); 70399513d2a5SSasha Neftin 70409513d2a5SSasha Neftin if (netif_running(netdev)) 70419513d2a5SSasha Neftin __igc_close(netdev, true); 70429513d2a5SSasha Neftin 7043a5136f76SSasha Neftin igc_ptp_suspend(adapter); 7044a5136f76SSasha Neftin 70459513d2a5SSasha Neftin igc_clear_interrupt_scheme(adapter); 70469513d2a5SSasha Neftin rtnl_unlock(); 70479513d2a5SSasha Neftin 70489513d2a5SSasha Neftin status = rd32(IGC_STATUS); 70499513d2a5SSasha Neftin if (status & IGC_STATUS_LU) 70509513d2a5SSasha Neftin wufc &= ~IGC_WUFC_LNKC; 70519513d2a5SSasha Neftin 70529513d2a5SSasha Neftin if (wufc) { 70539513d2a5SSasha Neftin igc_setup_rctl(adapter); 70549513d2a5SSasha Neftin igc_set_rx_mode(netdev); 70559513d2a5SSasha Neftin 70569513d2a5SSasha Neftin /* turn on all-multi mode if wake on multicast is enabled */ 70579513d2a5SSasha Neftin if (wufc & IGC_WUFC_MC) { 70589513d2a5SSasha Neftin rctl = rd32(IGC_RCTL); 70599513d2a5SSasha Neftin rctl |= IGC_RCTL_MPE; 70609513d2a5SSasha Neftin wr32(IGC_RCTL, rctl); 70619513d2a5SSasha Neftin } 70629513d2a5SSasha Neftin 70639513d2a5SSasha Neftin ctrl = rd32(IGC_CTRL); 70649513d2a5SSasha Neftin ctrl |= IGC_CTRL_ADVD3WUC; 70659513d2a5SSasha Neftin wr32(IGC_CTRL, ctrl); 70669513d2a5SSasha Neftin 70679513d2a5SSasha Neftin /* Allow time for pending master requests to run */ 70689513d2a5SSasha Neftin igc_disable_pcie_master(hw); 70699513d2a5SSasha Neftin 70709513d2a5SSasha Neftin wr32(IGC_WUC, IGC_WUC_PME_EN); 70719513d2a5SSasha Neftin wr32(IGC_WUFC, wufc); 70729513d2a5SSasha Neftin } else { 70739513d2a5SSasha Neftin wr32(IGC_WUC, 0); 70749513d2a5SSasha Neftin wr32(IGC_WUFC, 0); 70759513d2a5SSasha Neftin } 70769513d2a5SSasha Neftin 70779513d2a5SSasha Neftin wake = wufc || adapter->en_mng_pt; 70789513d2a5SSasha Neftin if (!wake) 7079a0beb3c1SSasha Neftin igc_power_down_phy_copper_base(&adapter->hw); 70809513d2a5SSasha Neftin else 70819513d2a5SSasha Neftin igc_power_up_link(adapter); 70829513d2a5SSasha Neftin 70839513d2a5SSasha Neftin if (enable_wake) 70849513d2a5SSasha Neftin *enable_wake = wake; 70859513d2a5SSasha Neftin 70869513d2a5SSasha Neftin /* Release control of h/w to f/w. If f/w is AMT enabled, this 70879513d2a5SSasha Neftin * would have already happened in close and is redundant. 70889513d2a5SSasha Neftin */ 70899513d2a5SSasha Neftin igc_release_hw_control(adapter); 70909513d2a5SSasha Neftin 70919513d2a5SSasha Neftin pci_disable_device(pdev); 70929513d2a5SSasha Neftin 70939513d2a5SSasha Neftin return 0; 70949513d2a5SSasha Neftin } 70959513d2a5SSasha Neftin 70969513d2a5SSasha Neftin #ifdef CONFIG_PM 70979513d2a5SSasha Neftin static int __maybe_unused igc_runtime_suspend(struct device *dev) 70989513d2a5SSasha Neftin { 70999513d2a5SSasha Neftin return __igc_shutdown(to_pci_dev(dev), NULL, 1); 71009513d2a5SSasha Neftin } 71019513d2a5SSasha Neftin 71029513d2a5SSasha Neftin static void igc_deliver_wake_packet(struct net_device *netdev) 71039513d2a5SSasha Neftin { 71049513d2a5SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 71059513d2a5SSasha Neftin struct igc_hw *hw = &adapter->hw; 71069513d2a5SSasha Neftin struct sk_buff *skb; 71079513d2a5SSasha Neftin u32 wupl; 71089513d2a5SSasha Neftin 71099513d2a5SSasha Neftin wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; 71109513d2a5SSasha Neftin 71119513d2a5SSasha Neftin /* WUPM stores only the first 128 bytes of the wake packet. 71129513d2a5SSasha Neftin * Read the packet only if we have the whole thing. 71139513d2a5SSasha Neftin */ 71149513d2a5SSasha Neftin if (wupl == 0 || wupl > IGC_WUPM_BYTES) 71159513d2a5SSasha Neftin return; 71169513d2a5SSasha Neftin 71179513d2a5SSasha Neftin skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); 71189513d2a5SSasha Neftin if (!skb) 71199513d2a5SSasha Neftin return; 71209513d2a5SSasha Neftin 71219513d2a5SSasha Neftin skb_put(skb, wupl); 71229513d2a5SSasha Neftin 71239513d2a5SSasha Neftin /* Ensure reads are 32-bit aligned */ 71249513d2a5SSasha Neftin wupl = roundup(wupl, 4); 71259513d2a5SSasha Neftin 71269513d2a5SSasha Neftin memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); 71279513d2a5SSasha Neftin 71289513d2a5SSasha Neftin skb->protocol = eth_type_trans(skb, netdev); 71299513d2a5SSasha Neftin netif_rx(skb); 71309513d2a5SSasha Neftin } 71319513d2a5SSasha Neftin 71329513d2a5SSasha Neftin static int __maybe_unused igc_resume(struct device *dev) 71339513d2a5SSasha Neftin { 71349513d2a5SSasha Neftin struct pci_dev *pdev = to_pci_dev(dev); 71359513d2a5SSasha Neftin struct net_device *netdev = pci_get_drvdata(pdev); 71369513d2a5SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 71379513d2a5SSasha Neftin struct igc_hw *hw = &adapter->hw; 71389513d2a5SSasha Neftin u32 err, val; 71399513d2a5SSasha Neftin 71409513d2a5SSasha Neftin pci_set_power_state(pdev, PCI_D0); 71419513d2a5SSasha Neftin pci_restore_state(pdev); 71429513d2a5SSasha Neftin pci_save_state(pdev); 71439513d2a5SSasha Neftin 71449513d2a5SSasha Neftin if (!pci_device_is_present(pdev)) 71459513d2a5SSasha Neftin return -ENODEV; 71469513d2a5SSasha Neftin err = pci_enable_device_mem(pdev); 71479513d2a5SSasha Neftin if (err) { 714825f06effSAndre Guedes netdev_err(netdev, "Cannot enable PCI device from suspend\n"); 71499513d2a5SSasha Neftin return err; 71509513d2a5SSasha Neftin } 71519513d2a5SSasha Neftin pci_set_master(pdev); 71529513d2a5SSasha Neftin 71539513d2a5SSasha Neftin pci_enable_wake(pdev, PCI_D3hot, 0); 71549513d2a5SSasha Neftin pci_enable_wake(pdev, PCI_D3cold, 0); 71559513d2a5SSasha Neftin 71569513d2a5SSasha Neftin if (igc_init_interrupt_scheme(adapter, true)) { 715725f06effSAndre Guedes netdev_err(netdev, "Unable to allocate memory for queues\n"); 71589513d2a5SSasha Neftin return -ENOMEM; 71599513d2a5SSasha Neftin } 71609513d2a5SSasha Neftin 71619513d2a5SSasha Neftin igc_reset(adapter); 71629513d2a5SSasha Neftin 71639513d2a5SSasha Neftin /* let the f/w know that the h/w is now under the control of the 71649513d2a5SSasha Neftin * driver. 71659513d2a5SSasha Neftin */ 71669513d2a5SSasha Neftin igc_get_hw_control(adapter); 71679513d2a5SSasha Neftin 71689513d2a5SSasha Neftin val = rd32(IGC_WUS); 71699513d2a5SSasha Neftin if (val & WAKE_PKT_WUS) 71709513d2a5SSasha Neftin igc_deliver_wake_packet(netdev); 71719513d2a5SSasha Neftin 71729513d2a5SSasha Neftin wr32(IGC_WUS, ~0); 71739513d2a5SSasha Neftin 71749513d2a5SSasha Neftin rtnl_lock(); 71759513d2a5SSasha Neftin if (!err && netif_running(netdev)) 71769513d2a5SSasha Neftin err = __igc_open(netdev, true); 71779513d2a5SSasha Neftin 71789513d2a5SSasha Neftin if (!err) 71799513d2a5SSasha Neftin netif_device_attach(netdev); 71809513d2a5SSasha Neftin rtnl_unlock(); 71819513d2a5SSasha Neftin 71829513d2a5SSasha Neftin return err; 71839513d2a5SSasha Neftin } 71849513d2a5SSasha Neftin 71859513d2a5SSasha Neftin static int __maybe_unused igc_runtime_resume(struct device *dev) 71869513d2a5SSasha Neftin { 71879513d2a5SSasha Neftin return igc_resume(dev); 71889513d2a5SSasha Neftin } 71899513d2a5SSasha Neftin 71909513d2a5SSasha Neftin static int __maybe_unused igc_suspend(struct device *dev) 71919513d2a5SSasha Neftin { 71929513d2a5SSasha Neftin return __igc_shutdown(to_pci_dev(dev), NULL, 0); 71939513d2a5SSasha Neftin } 71949513d2a5SSasha Neftin 71959513d2a5SSasha Neftin static int __maybe_unused igc_runtime_idle(struct device *dev) 71969513d2a5SSasha Neftin { 71979513d2a5SSasha Neftin struct net_device *netdev = dev_get_drvdata(dev); 71989513d2a5SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 71999513d2a5SSasha Neftin 72009513d2a5SSasha Neftin if (!igc_has_link(adapter)) 72019513d2a5SSasha Neftin pm_schedule_suspend(dev, MSEC_PER_SEC * 5); 72029513d2a5SSasha Neftin 72039513d2a5SSasha Neftin return -EBUSY; 72049513d2a5SSasha Neftin } 72059513d2a5SSasha Neftin #endif /* CONFIG_PM */ 72069513d2a5SSasha Neftin 72079513d2a5SSasha Neftin static void igc_shutdown(struct pci_dev *pdev) 72089513d2a5SSasha Neftin { 72099513d2a5SSasha Neftin bool wake; 72109513d2a5SSasha Neftin 72119513d2a5SSasha Neftin __igc_shutdown(pdev, &wake, 0); 72129513d2a5SSasha Neftin 72139513d2a5SSasha Neftin if (system_state == SYSTEM_POWER_OFF) { 72149513d2a5SSasha Neftin pci_wake_from_d3(pdev, wake); 72159513d2a5SSasha Neftin pci_set_power_state(pdev, PCI_D3hot); 72169513d2a5SSasha Neftin } 72179513d2a5SSasha Neftin } 72189513d2a5SSasha Neftin 7219bc23aa94SSasha Neftin /** 7220bc23aa94SSasha Neftin * igc_io_error_detected - called when PCI error is detected 7221bc23aa94SSasha Neftin * @pdev: Pointer to PCI device 7222bc23aa94SSasha Neftin * @state: The current PCI connection state 7223bc23aa94SSasha Neftin * 7224bc23aa94SSasha Neftin * This function is called after a PCI bus error affecting 7225bc23aa94SSasha Neftin * this device has been detected. 7226bc23aa94SSasha Neftin **/ 7227bc23aa94SSasha Neftin static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, 7228bc23aa94SSasha Neftin pci_channel_state_t state) 7229bc23aa94SSasha Neftin { 7230bc23aa94SSasha Neftin struct net_device *netdev = pci_get_drvdata(pdev); 7231bc23aa94SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 7232bc23aa94SSasha Neftin 7233bc23aa94SSasha Neftin netif_device_detach(netdev); 7234bc23aa94SSasha Neftin 7235bc23aa94SSasha Neftin if (state == pci_channel_io_perm_failure) 7236bc23aa94SSasha Neftin return PCI_ERS_RESULT_DISCONNECT; 7237bc23aa94SSasha Neftin 7238bc23aa94SSasha Neftin if (netif_running(netdev)) 7239bc23aa94SSasha Neftin igc_down(adapter); 7240bc23aa94SSasha Neftin pci_disable_device(pdev); 7241bc23aa94SSasha Neftin 7242bc23aa94SSasha Neftin /* Request a slot reset. */ 7243bc23aa94SSasha Neftin return PCI_ERS_RESULT_NEED_RESET; 7244bc23aa94SSasha Neftin } 7245bc23aa94SSasha Neftin 7246bc23aa94SSasha Neftin /** 7247bc23aa94SSasha Neftin * igc_io_slot_reset - called after the PCI bus has been reset. 7248bc23aa94SSasha Neftin * @pdev: Pointer to PCI device 7249bc23aa94SSasha Neftin * 7250bc23aa94SSasha Neftin * Restart the card from scratch, as if from a cold-boot. Implementation 7251bc23aa94SSasha Neftin * resembles the first-half of the igc_resume routine. 7252bc23aa94SSasha Neftin **/ 7253bc23aa94SSasha Neftin static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) 7254bc23aa94SSasha Neftin { 7255bc23aa94SSasha Neftin struct net_device *netdev = pci_get_drvdata(pdev); 7256bc23aa94SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 7257bc23aa94SSasha Neftin struct igc_hw *hw = &adapter->hw; 7258bc23aa94SSasha Neftin pci_ers_result_t result; 7259bc23aa94SSasha Neftin 7260bc23aa94SSasha Neftin if (pci_enable_device_mem(pdev)) { 726125f06effSAndre Guedes netdev_err(netdev, "Could not re-enable PCI device after reset\n"); 7262bc23aa94SSasha Neftin result = PCI_ERS_RESULT_DISCONNECT; 7263bc23aa94SSasha Neftin } else { 7264bc23aa94SSasha Neftin pci_set_master(pdev); 7265bc23aa94SSasha Neftin pci_restore_state(pdev); 7266bc23aa94SSasha Neftin pci_save_state(pdev); 7267bc23aa94SSasha Neftin 7268bc23aa94SSasha Neftin pci_enable_wake(pdev, PCI_D3hot, 0); 7269bc23aa94SSasha Neftin pci_enable_wake(pdev, PCI_D3cold, 0); 7270bc23aa94SSasha Neftin 7271bc23aa94SSasha Neftin /* In case of PCI error, adapter loses its HW address 7272bc23aa94SSasha Neftin * so we should re-assign it here. 7273bc23aa94SSasha Neftin */ 7274bc23aa94SSasha Neftin hw->hw_addr = adapter->io_addr; 7275bc23aa94SSasha Neftin 7276bc23aa94SSasha Neftin igc_reset(adapter); 7277bc23aa94SSasha Neftin wr32(IGC_WUS, ~0); 7278bc23aa94SSasha Neftin result = PCI_ERS_RESULT_RECOVERED; 7279bc23aa94SSasha Neftin } 7280bc23aa94SSasha Neftin 7281bc23aa94SSasha Neftin return result; 7282bc23aa94SSasha Neftin } 7283bc23aa94SSasha Neftin 7284bc23aa94SSasha Neftin /** 7285bc23aa94SSasha Neftin * igc_io_resume - called when traffic can start to flow again. 7286bc23aa94SSasha Neftin * @pdev: Pointer to PCI device 7287bc23aa94SSasha Neftin * 7288bc23aa94SSasha Neftin * This callback is called when the error recovery driver tells us that 7289bc23aa94SSasha Neftin * its OK to resume normal operation. Implementation resembles the 7290bc23aa94SSasha Neftin * second-half of the igc_resume routine. 7291bc23aa94SSasha Neftin */ 7292bc23aa94SSasha Neftin static void igc_io_resume(struct pci_dev *pdev) 7293bc23aa94SSasha Neftin { 7294bc23aa94SSasha Neftin struct net_device *netdev = pci_get_drvdata(pdev); 7295bc23aa94SSasha Neftin struct igc_adapter *adapter = netdev_priv(netdev); 7296bc23aa94SSasha Neftin 7297bc23aa94SSasha Neftin rtnl_lock(); 7298bc23aa94SSasha Neftin if (netif_running(netdev)) { 7299bc23aa94SSasha Neftin if (igc_open(netdev)) { 730025f06effSAndre Guedes netdev_err(netdev, "igc_open failed after reset\n"); 7301bc23aa94SSasha Neftin return; 7302bc23aa94SSasha Neftin } 7303bc23aa94SSasha Neftin } 7304bc23aa94SSasha Neftin 7305bc23aa94SSasha Neftin netif_device_attach(netdev); 7306bc23aa94SSasha Neftin 7307bc23aa94SSasha Neftin /* let the f/w know that the h/w is now under the control of the 7308bc23aa94SSasha Neftin * driver. 7309bc23aa94SSasha Neftin */ 7310bc23aa94SSasha Neftin igc_get_hw_control(adapter); 7311bc23aa94SSasha Neftin rtnl_unlock(); 7312bc23aa94SSasha Neftin } 7313bc23aa94SSasha Neftin 7314bc23aa94SSasha Neftin static const struct pci_error_handlers igc_err_handler = { 7315bc23aa94SSasha Neftin .error_detected = igc_io_error_detected, 7316bc23aa94SSasha Neftin .slot_reset = igc_io_slot_reset, 7317bc23aa94SSasha Neftin .resume = igc_io_resume, 7318bc23aa94SSasha Neftin }; 7319bc23aa94SSasha Neftin 73209513d2a5SSasha Neftin #ifdef CONFIG_PM 73219513d2a5SSasha Neftin static const struct dev_pm_ops igc_pm_ops = { 73229513d2a5SSasha Neftin SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) 73239513d2a5SSasha Neftin SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, 73249513d2a5SSasha Neftin igc_runtime_idle) 73259513d2a5SSasha Neftin }; 73269513d2a5SSasha Neftin #endif 73279513d2a5SSasha Neftin 7328d89f8841SSasha Neftin static struct pci_driver igc_driver = { 7329d89f8841SSasha Neftin .name = igc_driver_name, 7330d89f8841SSasha Neftin .id_table = igc_pci_tbl, 7331d89f8841SSasha Neftin .probe = igc_probe, 7332d89f8841SSasha Neftin .remove = igc_remove, 73339513d2a5SSasha Neftin #ifdef CONFIG_PM 73349513d2a5SSasha Neftin .driver.pm = &igc_pm_ops, 73359513d2a5SSasha Neftin #endif 73369513d2a5SSasha Neftin .shutdown = igc_shutdown, 7337bc23aa94SSasha Neftin .err_handler = &igc_err_handler, 7338d89f8841SSasha Neftin }; 7339d89f8841SSasha Neftin 7340146740f9SSasha Neftin /** 73418c5ad0daSSasha Neftin * igc_reinit_queues - return error 73428c5ad0daSSasha Neftin * @adapter: pointer to adapter structure 73438c5ad0daSSasha Neftin */ 73448c5ad0daSSasha Neftin int igc_reinit_queues(struct igc_adapter *adapter) 73458c5ad0daSSasha Neftin { 73468c5ad0daSSasha Neftin struct net_device *netdev = adapter->netdev; 73478c5ad0daSSasha Neftin int err = 0; 73488c5ad0daSSasha Neftin 73498c5ad0daSSasha Neftin if (netif_running(netdev)) 73508c5ad0daSSasha Neftin igc_close(netdev); 73518c5ad0daSSasha Neftin 73528c5ad0daSSasha Neftin igc_reset_interrupt_capability(adapter); 73538c5ad0daSSasha Neftin 73548c5ad0daSSasha Neftin if (igc_init_interrupt_scheme(adapter, true)) { 735525f06effSAndre Guedes netdev_err(netdev, "Unable to allocate memory for queues\n"); 73568c5ad0daSSasha Neftin return -ENOMEM; 73578c5ad0daSSasha Neftin } 73588c5ad0daSSasha Neftin 73598c5ad0daSSasha Neftin if (netif_running(netdev)) 73608c5ad0daSSasha Neftin err = igc_open(netdev); 73618c5ad0daSSasha Neftin 73628c5ad0daSSasha Neftin return err; 73638c5ad0daSSasha Neftin } 73648c5ad0daSSasha Neftin 73658c5ad0daSSasha Neftin /** 7366c0071c7aSSasha Neftin * igc_get_hw_dev - return device 7367c0071c7aSSasha Neftin * @hw: pointer to hardware structure 7368c0071c7aSSasha Neftin * 7369c0071c7aSSasha Neftin * used by hardware layer to print debugging information 7370c0071c7aSSasha Neftin */ 7371c0071c7aSSasha Neftin struct net_device *igc_get_hw_dev(struct igc_hw *hw) 7372c0071c7aSSasha Neftin { 7373c0071c7aSSasha Neftin struct igc_adapter *adapter = hw->back; 7374c0071c7aSSasha Neftin 7375c0071c7aSSasha Neftin return adapter->netdev; 7376c0071c7aSSasha Neftin } 7377c0071c7aSSasha Neftin 7378fc9df2a0SAndre Guedes static void igc_disable_rx_ring_hw(struct igc_ring *ring) 7379fc9df2a0SAndre Guedes { 7380fc9df2a0SAndre Guedes struct igc_hw *hw = &ring->q_vector->adapter->hw; 7381fc9df2a0SAndre Guedes u8 idx = ring->reg_idx; 7382fc9df2a0SAndre Guedes u32 rxdctl; 7383fc9df2a0SAndre Guedes 7384fc9df2a0SAndre Guedes rxdctl = rd32(IGC_RXDCTL(idx)); 7385fc9df2a0SAndre Guedes rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; 7386fc9df2a0SAndre Guedes rxdctl |= IGC_RXDCTL_SWFLUSH; 7387fc9df2a0SAndre Guedes wr32(IGC_RXDCTL(idx), rxdctl); 7388fc9df2a0SAndre Guedes } 7389fc9df2a0SAndre Guedes 7390fc9df2a0SAndre Guedes void igc_disable_rx_ring(struct igc_ring *ring) 7391fc9df2a0SAndre Guedes { 7392fc9df2a0SAndre Guedes igc_disable_rx_ring_hw(ring); 7393fc9df2a0SAndre Guedes igc_clean_rx_ring(ring); 7394fc9df2a0SAndre Guedes } 7395fc9df2a0SAndre Guedes 7396fc9df2a0SAndre Guedes void igc_enable_rx_ring(struct igc_ring *ring) 7397fc9df2a0SAndre Guedes { 7398fc9df2a0SAndre Guedes struct igc_adapter *adapter = ring->q_vector->adapter; 7399fc9df2a0SAndre Guedes 7400fc9df2a0SAndre Guedes igc_configure_rx_ring(adapter, ring); 7401fc9df2a0SAndre Guedes 7402fc9df2a0SAndre Guedes if (ring->xsk_pool) 7403fc9df2a0SAndre Guedes igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); 7404fc9df2a0SAndre Guedes else 7405fc9df2a0SAndre Guedes igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); 7406fc9df2a0SAndre Guedes } 7407fc9df2a0SAndre Guedes 74089acf59a7SAndre Guedes void igc_disable_tx_ring(struct igc_ring *ring) 74099acf59a7SAndre Guedes { 74109acf59a7SAndre Guedes igc_disable_tx_ring_hw(ring); 74119acf59a7SAndre Guedes igc_clean_tx_ring(ring); 74129acf59a7SAndre Guedes } 74139acf59a7SAndre Guedes 74149acf59a7SAndre Guedes void igc_enable_tx_ring(struct igc_ring *ring) 74159acf59a7SAndre Guedes { 74169acf59a7SAndre Guedes struct igc_adapter *adapter = ring->q_vector->adapter; 74179acf59a7SAndre Guedes 74189acf59a7SAndre Guedes igc_configure_tx_ring(adapter, ring); 74199acf59a7SAndre Guedes } 74209acf59a7SAndre Guedes 7421c0071c7aSSasha Neftin /** 7422d89f8841SSasha Neftin * igc_init_module - Driver Registration Routine 7423d89f8841SSasha Neftin * 7424d89f8841SSasha Neftin * igc_init_module is the first routine called when the driver is 7425d89f8841SSasha Neftin * loaded. All it does is register with the PCI subsystem. 7426d89f8841SSasha Neftin */ 7427d89f8841SSasha Neftin static int __init igc_init_module(void) 7428d89f8841SSasha Neftin { 7429d89f8841SSasha Neftin int ret; 7430d89f8841SSasha Neftin 743134a2a3b8SJeff Kirsher pr_info("%s\n", igc_driver_string); 7432d89f8841SSasha Neftin pr_info("%s\n", igc_copyright); 7433d89f8841SSasha Neftin 7434d89f8841SSasha Neftin ret = pci_register_driver(&igc_driver); 7435d89f8841SSasha Neftin return ret; 7436d89f8841SSasha Neftin } 7437d89f8841SSasha Neftin 7438d89f8841SSasha Neftin module_init(igc_init_module); 7439d89f8841SSasha Neftin 7440d89f8841SSasha Neftin /** 7441d89f8841SSasha Neftin * igc_exit_module - Driver Exit Cleanup Routine 7442d89f8841SSasha Neftin * 7443d89f8841SSasha Neftin * igc_exit_module is called just before the driver is removed 7444d89f8841SSasha Neftin * from memory. 7445d89f8841SSasha Neftin */ 7446d89f8841SSasha Neftin static void __exit igc_exit_module(void) 7447d89f8841SSasha Neftin { 7448d89f8841SSasha Neftin pci_unregister_driver(&igc_driver); 7449d89f8841SSasha Neftin } 7450d89f8841SSasha Neftin 7451d89f8841SSasha Neftin module_exit(igc_exit_module); 7452d89f8841SSasha Neftin /* igc_main.c */ 7453