15ec8b7d1SJesse Brandeburg // SPDX-License-Identifier: GPL-2.0
25ec8b7d1SJesse Brandeburg /* Copyright(c) 2013 - 2018 Intel Corporation. */
35ec8b7d1SJesse Brandeburg
45ec8b7d1SJesse Brandeburg #include "iavf.h"
566bc8e0fSJesse Brandeburg #include "iavf_prototype.h"
65ec8b7d1SJesse Brandeburg #include "iavf_client.h"
75ec8b7d1SJesse Brandeburg /* All iavf tracepoints are defined by the include below, which must
85ec8b7d1SJesse Brandeburg * be included exactly once across the whole kernel with
95ec8b7d1SJesse Brandeburg * CREATE_TRACE_POINTS defined
105ec8b7d1SJesse Brandeburg */
115ec8b7d1SJesse Brandeburg #define CREATE_TRACE_POINTS
12ad64ed8bSJesse Brandeburg #include "iavf_trace.h"
135ec8b7d1SJesse Brandeburg
145ec8b7d1SJesse Brandeburg static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
155ec8b7d1SJesse Brandeburg static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
165ec8b7d1SJesse Brandeburg static int iavf_close(struct net_device *netdev);
1759756ad6SMateusz Palczewski static void iavf_init_get_resources(struct iavf_adapter *adapter);
18b66c7bc1SJakub Pawlak static int iavf_check_reset_complete(struct iavf_hw *hw);
195ec8b7d1SJesse Brandeburg
205ec8b7d1SJesse Brandeburg char iavf_driver_name[] = "iavf";
215ec8b7d1SJesse Brandeburg static const char iavf_driver_string[] =
225ec8b7d1SJesse Brandeburg "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
235ec8b7d1SJesse Brandeburg
245ec8b7d1SJesse Brandeburg static const char iavf_copyright[] =
255ec8b7d1SJesse Brandeburg "Copyright (c) 2013 - 2018 Intel Corporation.";
265ec8b7d1SJesse Brandeburg
275ec8b7d1SJesse Brandeburg /* iavf_pci_tbl - PCI Device ID Table
285ec8b7d1SJesse Brandeburg *
295ec8b7d1SJesse Brandeburg * Wildcard entries (PCI_ANY_ID) should come last
305ec8b7d1SJesse Brandeburg * Last entry must be all 0s
315ec8b7d1SJesse Brandeburg *
325ec8b7d1SJesse Brandeburg * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
335ec8b7d1SJesse Brandeburg * Class, Class Mask, private data (not used) }
345ec8b7d1SJesse Brandeburg */
355ec8b7d1SJesse Brandeburg static const struct pci_device_id iavf_pci_tbl[] = {
364dbc76e0SJesse Brandeburg {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
374dbc76e0SJesse Brandeburg {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
384dbc76e0SJesse Brandeburg {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
394dbc76e0SJesse Brandeburg {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
405ec8b7d1SJesse Brandeburg /* required last entry */
415ec8b7d1SJesse Brandeburg {0, }
425ec8b7d1SJesse Brandeburg };
435ec8b7d1SJesse Brandeburg
445ec8b7d1SJesse Brandeburg MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
455ec8b7d1SJesse Brandeburg
465ec8b7d1SJesse Brandeburg MODULE_ALIAS("i40evf");
475ec8b7d1SJesse Brandeburg MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
4898674ebeSJesse Brandeburg MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
4998674ebeSJesse Brandeburg MODULE_LICENSE("GPL v2");
505ec8b7d1SJesse Brandeburg
51b66c7bc1SJakub Pawlak static const struct net_device_ops iavf_netdev_ops;
525ec8b7d1SJesse Brandeburg
iavf_status_to_errno(enum iavf_status status)53bae569d0SMateusz Palczewski int iavf_status_to_errno(enum iavf_status status)
54bae569d0SMateusz Palczewski {
55bae569d0SMateusz Palczewski switch (status) {
56bae569d0SMateusz Palczewski case IAVF_SUCCESS:
57bae569d0SMateusz Palczewski return 0;
58bae569d0SMateusz Palczewski case IAVF_ERR_PARAM:
59bae569d0SMateusz Palczewski case IAVF_ERR_MAC_TYPE:
60bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_MAC_ADDR:
61bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_LINK_SETTINGS:
62bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_PD_ID:
63bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_QP_ID:
64bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_CQ_ID:
65bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_CEQ_ID:
66bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_AEQ_ID:
67bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_SIZE:
68bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_ARP_INDEX:
69bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_FPM_FUNC_ID:
70bae569d0SMateusz Palczewski case IAVF_ERR_QP_INVALID_MSG_SIZE:
71bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_FRAG_COUNT:
72bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_ALIGNMENT:
73bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
74bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_IMM_DATA_SIZE:
75bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_VF_ID:
76bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_HMCFN_ID:
77bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_PBLE_INDEX:
78bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_SD_INDEX:
79bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
80bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_SD_TYPE:
81bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
82bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
83bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
84bae569d0SMateusz Palczewski return -EINVAL;
85bae569d0SMateusz Palczewski case IAVF_ERR_NVM:
86bae569d0SMateusz Palczewski case IAVF_ERR_NVM_CHECKSUM:
87bae569d0SMateusz Palczewski case IAVF_ERR_PHY:
88bae569d0SMateusz Palczewski case IAVF_ERR_CONFIG:
89bae569d0SMateusz Palczewski case IAVF_ERR_UNKNOWN_PHY:
90bae569d0SMateusz Palczewski case IAVF_ERR_LINK_SETUP:
91bae569d0SMateusz Palczewski case IAVF_ERR_ADAPTER_STOPPED:
920a62b209SMateusz Palczewski case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
93bae569d0SMateusz Palczewski case IAVF_ERR_AUTONEG_NOT_COMPLETE:
94bae569d0SMateusz Palczewski case IAVF_ERR_RESET_FAILED:
95bae569d0SMateusz Palczewski case IAVF_ERR_BAD_PTR:
96bae569d0SMateusz Palczewski case IAVF_ERR_SWFW_SYNC:
97bae569d0SMateusz Palczewski case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
98bae569d0SMateusz Palczewski case IAVF_ERR_QUEUE_EMPTY:
99bae569d0SMateusz Palczewski case IAVF_ERR_FLUSHED_QUEUE:
100bae569d0SMateusz Palczewski case IAVF_ERR_OPCODE_MISMATCH:
101bae569d0SMateusz Palczewski case IAVF_ERR_CQP_COMPL_ERROR:
102bae569d0SMateusz Palczewski case IAVF_ERR_BACKING_PAGE_ERROR:
103bae569d0SMateusz Palczewski case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
104bae569d0SMateusz Palczewski case IAVF_ERR_MEMCPY_FAILED:
105bae569d0SMateusz Palczewski case IAVF_ERR_SRQ_ENABLED:
106bae569d0SMateusz Palczewski case IAVF_ERR_ADMIN_QUEUE_ERROR:
107bae569d0SMateusz Palczewski case IAVF_ERR_ADMIN_QUEUE_FULL:
1082723f3b5SJesse Brandeburg case IAVF_ERR_BAD_RDMA_CQE:
109bae569d0SMateusz Palczewski case IAVF_ERR_NVM_BLANK_MODE:
110bae569d0SMateusz Palczewski case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
111bae569d0SMateusz Palczewski case IAVF_ERR_DIAG_TEST_FAILED:
112bae569d0SMateusz Palczewski case IAVF_ERR_FIRMWARE_API_VERSION:
113bae569d0SMateusz Palczewski case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
114bae569d0SMateusz Palczewski return -EIO;
115bae569d0SMateusz Palczewski case IAVF_ERR_DEVICE_NOT_SUPPORTED:
116bae569d0SMateusz Palczewski return -ENODEV;
117bae569d0SMateusz Palczewski case IAVF_ERR_NO_AVAILABLE_VSI:
118bae569d0SMateusz Palczewski case IAVF_ERR_RING_FULL:
119bae569d0SMateusz Palczewski return -ENOSPC;
120bae569d0SMateusz Palczewski case IAVF_ERR_NO_MEMORY:
121bae569d0SMateusz Palczewski return -ENOMEM;
122bae569d0SMateusz Palczewski case IAVF_ERR_TIMEOUT:
123bae569d0SMateusz Palczewski case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
124bae569d0SMateusz Palczewski return -ETIMEDOUT;
125bae569d0SMateusz Palczewski case IAVF_ERR_NOT_IMPLEMENTED:
126bae569d0SMateusz Palczewski case IAVF_NOT_SUPPORTED:
127bae569d0SMateusz Palczewski return -EOPNOTSUPP;
128bae569d0SMateusz Palczewski case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
129bae569d0SMateusz Palczewski return -EALREADY;
130bae569d0SMateusz Palczewski case IAVF_ERR_NOT_READY:
131bae569d0SMateusz Palczewski return -EBUSY;
132bae569d0SMateusz Palczewski case IAVF_ERR_BUF_TOO_SHORT:
133bae569d0SMateusz Palczewski return -EMSGSIZE;
134bae569d0SMateusz Palczewski }
135bae569d0SMateusz Palczewski
136bae569d0SMateusz Palczewski return -EIO;
137bae569d0SMateusz Palczewski }
138bae569d0SMateusz Palczewski
virtchnl_status_to_errno(enum virtchnl_status_code v_status)139bae569d0SMateusz Palczewski int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
140bae569d0SMateusz Palczewski {
141bae569d0SMateusz Palczewski switch (v_status) {
142bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_SUCCESS:
143bae569d0SMateusz Palczewski return 0;
144bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_PARAM:
145bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
146bae569d0SMateusz Palczewski return -EINVAL;
147bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_NO_MEMORY:
148bae569d0SMateusz Palczewski return -ENOMEM;
149bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
150bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
151bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
152bae569d0SMateusz Palczewski return -EIO;
153bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
154bae569d0SMateusz Palczewski return -EOPNOTSUPP;
155bae569d0SMateusz Palczewski }
156bae569d0SMateusz Palczewski
157bae569d0SMateusz Palczewski return -EIO;
158bae569d0SMateusz Palczewski }
159bae569d0SMateusz Palczewski
1605ec8b7d1SJesse Brandeburg /**
161247aa001SKaren Sornek * iavf_pdev_to_adapter - go from pci_dev to adapter
162247aa001SKaren Sornek * @pdev: pci_dev pointer
163247aa001SKaren Sornek */
iavf_pdev_to_adapter(struct pci_dev * pdev)164247aa001SKaren Sornek static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
165247aa001SKaren Sornek {
166247aa001SKaren Sornek return netdev_priv(pci_get_drvdata(pdev));
167247aa001SKaren Sornek }
168247aa001SKaren Sornek
169247aa001SKaren Sornek /**
170c2ed2403SMarcin Szycik * iavf_is_reset_in_progress - Check if a reset is in progress
171c2ed2403SMarcin Szycik * @adapter: board private structure
172c2ed2403SMarcin Szycik */
iavf_is_reset_in_progress(struct iavf_adapter * adapter)173c2ed2403SMarcin Szycik static bool iavf_is_reset_in_progress(struct iavf_adapter *adapter)
174c2ed2403SMarcin Szycik {
175c2ed2403SMarcin Szycik if (adapter->state == __IAVF_RESETTING ||
176c2ed2403SMarcin Szycik adapter->flags & (IAVF_FLAG_RESET_PENDING |
177c2ed2403SMarcin Szycik IAVF_FLAG_RESET_NEEDED))
178c2ed2403SMarcin Szycik return true;
179c2ed2403SMarcin Szycik
180c2ed2403SMarcin Szycik return false;
181c2ed2403SMarcin Szycik }
182c2ed2403SMarcin Szycik
183c2ed2403SMarcin Szycik /**
184c2ed2403SMarcin Szycik * iavf_wait_for_reset - Wait for reset to finish.
185c2ed2403SMarcin Szycik * @adapter: board private structure
186c2ed2403SMarcin Szycik *
187c2ed2403SMarcin Szycik * Returns 0 if reset finished successfully, negative on timeout or interrupt.
188c2ed2403SMarcin Szycik */
iavf_wait_for_reset(struct iavf_adapter * adapter)189c2ed2403SMarcin Szycik int iavf_wait_for_reset(struct iavf_adapter *adapter)
190c2ed2403SMarcin Szycik {
191c2ed2403SMarcin Szycik int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue,
192c2ed2403SMarcin Szycik !iavf_is_reset_in_progress(adapter),
193c2ed2403SMarcin Szycik msecs_to_jiffies(5000));
194c2ed2403SMarcin Szycik
195c2ed2403SMarcin Szycik /* If ret < 0 then it means wait was interrupted.
196c2ed2403SMarcin Szycik * If ret == 0 then it means we got a timeout while waiting
197c2ed2403SMarcin Szycik * for reset to finish.
198c2ed2403SMarcin Szycik * If ret > 0 it means reset has finished.
199c2ed2403SMarcin Szycik */
200c2ed2403SMarcin Szycik if (ret > 0)
201c2ed2403SMarcin Szycik return 0;
202c2ed2403SMarcin Szycik else if (ret < 0)
203c2ed2403SMarcin Szycik return -EINTR;
204c2ed2403SMarcin Szycik else
205c2ed2403SMarcin Szycik return -EBUSY;
206c2ed2403SMarcin Szycik }
207c2ed2403SMarcin Szycik
208c2ed2403SMarcin Szycik /**
2095ec8b7d1SJesse Brandeburg * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
2105ec8b7d1SJesse Brandeburg * @hw: pointer to the HW structure
2115ec8b7d1SJesse Brandeburg * @mem: ptr to mem struct to fill out
2125ec8b7d1SJesse Brandeburg * @size: size of memory requested
2135ec8b7d1SJesse Brandeburg * @alignment: what to align the allocation to
2145ec8b7d1SJesse Brandeburg **/
iavf_allocate_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem,u64 size,u32 alignment)21580754bbcSSergey Nemov enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
21656184e01SJesse Brandeburg struct iavf_dma_mem *mem,
2175ec8b7d1SJesse Brandeburg u64 size, u32 alignment)
2185ec8b7d1SJesse Brandeburg {
2195ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
2205ec8b7d1SJesse Brandeburg
2215ec8b7d1SJesse Brandeburg if (!mem)
2228821b3faSAlice Michael return IAVF_ERR_PARAM;
2235ec8b7d1SJesse Brandeburg
2245ec8b7d1SJesse Brandeburg mem->size = ALIGN(size, alignment);
2255ec8b7d1SJesse Brandeburg mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
2265ec8b7d1SJesse Brandeburg (dma_addr_t *)&mem->pa, GFP_KERNEL);
2275ec8b7d1SJesse Brandeburg if (mem->va)
2285ec8b7d1SJesse Brandeburg return 0;
2295ec8b7d1SJesse Brandeburg else
2308821b3faSAlice Michael return IAVF_ERR_NO_MEMORY;
2315ec8b7d1SJesse Brandeburg }
2325ec8b7d1SJesse Brandeburg
2335ec8b7d1SJesse Brandeburg /**
234b855bcdeSPrzemek Kitszel * iavf_free_dma_mem - wrapper for DMA memory freeing
2355ec8b7d1SJesse Brandeburg * @hw: pointer to the HW structure
2365ec8b7d1SJesse Brandeburg * @mem: ptr to mem struct to free
2375ec8b7d1SJesse Brandeburg **/
iavf_free_dma_mem(struct iavf_hw * hw,struct iavf_dma_mem * mem)238b855bcdeSPrzemek Kitszel enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem)
2395ec8b7d1SJesse Brandeburg {
2405ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
2415ec8b7d1SJesse Brandeburg
2425ec8b7d1SJesse Brandeburg if (!mem || !mem->va)
2438821b3faSAlice Michael return IAVF_ERR_PARAM;
2445ec8b7d1SJesse Brandeburg dma_free_coherent(&adapter->pdev->dev, mem->size,
2455ec8b7d1SJesse Brandeburg mem->va, (dma_addr_t)mem->pa);
2465ec8b7d1SJesse Brandeburg return 0;
2475ec8b7d1SJesse Brandeburg }
2485ec8b7d1SJesse Brandeburg
2495ec8b7d1SJesse Brandeburg /**
250b855bcdeSPrzemek Kitszel * iavf_allocate_virt_mem - virt memory alloc wrapper
2515ec8b7d1SJesse Brandeburg * @hw: pointer to the HW structure
2525ec8b7d1SJesse Brandeburg * @mem: ptr to mem struct to fill out
2535ec8b7d1SJesse Brandeburg * @size: size of memory requested
2545ec8b7d1SJesse Brandeburg **/
iavf_allocate_virt_mem(struct iavf_hw * hw,struct iavf_virt_mem * mem,u32 size)255b855bcdeSPrzemek Kitszel enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
25656184e01SJesse Brandeburg struct iavf_virt_mem *mem, u32 size)
2575ec8b7d1SJesse Brandeburg {
2585ec8b7d1SJesse Brandeburg if (!mem)
2598821b3faSAlice Michael return IAVF_ERR_PARAM;
2605ec8b7d1SJesse Brandeburg
2615ec8b7d1SJesse Brandeburg mem->size = size;
2625ec8b7d1SJesse Brandeburg mem->va = kzalloc(size, GFP_KERNEL);
2635ec8b7d1SJesse Brandeburg
2645ec8b7d1SJesse Brandeburg if (mem->va)
2655ec8b7d1SJesse Brandeburg return 0;
2665ec8b7d1SJesse Brandeburg else
2678821b3faSAlice Michael return IAVF_ERR_NO_MEMORY;
2685ec8b7d1SJesse Brandeburg }
2695ec8b7d1SJesse Brandeburg
2705ec8b7d1SJesse Brandeburg /**
271b855bcdeSPrzemek Kitszel * iavf_free_virt_mem - virt memory free wrapper
2725ec8b7d1SJesse Brandeburg * @hw: pointer to the HW structure
2735ec8b7d1SJesse Brandeburg * @mem: ptr to mem struct to free
2745ec8b7d1SJesse Brandeburg **/
iavf_free_virt_mem(struct iavf_hw * hw,struct iavf_virt_mem * mem)275b855bcdeSPrzemek Kitszel void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
2765ec8b7d1SJesse Brandeburg {
2775ec8b7d1SJesse Brandeburg kfree(mem->va);
2785ec8b7d1SJesse Brandeburg }
2795ec8b7d1SJesse Brandeburg
2805ec8b7d1SJesse Brandeburg /**
2815ec8b7d1SJesse Brandeburg * iavf_schedule_reset - Set the flags and schedule a reset event
2825ec8b7d1SJesse Brandeburg * @adapter: board private structure
283c34743daSAhmed Zaki * @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
2845ec8b7d1SJesse Brandeburg **/
iavf_schedule_reset(struct iavf_adapter * adapter,u64 flags)285c34743daSAhmed Zaki void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
2865ec8b7d1SJesse Brandeburg {
287c34743daSAhmed Zaki if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
288c34743daSAhmed Zaki !(adapter->flags &
2895ec8b7d1SJesse Brandeburg (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
290c34743daSAhmed Zaki adapter->flags |= flags;
2914411a608SMichal Schmidt queue_work(adapter->wq, &adapter->reset_task);
2925ec8b7d1SJesse Brandeburg }
2935ec8b7d1SJesse Brandeburg }
2945ec8b7d1SJesse Brandeburg
2955ec8b7d1SJesse Brandeburg /**
296ed4cad33SPetr Oros * iavf_schedule_aq_request - Set the flags and schedule aq request
2973b5bdd18SJedrzej Jagielski * @adapter: board private structure
298ed4cad33SPetr Oros * @flags: requested aq flags
2993b5bdd18SJedrzej Jagielski **/
iavf_schedule_aq_request(struct iavf_adapter * adapter,u64 flags)300ed4cad33SPetr Oros void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags)
3013b5bdd18SJedrzej Jagielski {
302ed4cad33SPetr Oros adapter->aq_required |= flags;
3034411a608SMichal Schmidt mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
3043b5bdd18SJedrzej Jagielski }
3053b5bdd18SJedrzej Jagielski
3063b5bdd18SJedrzej Jagielski /**
3075ec8b7d1SJesse Brandeburg * iavf_tx_timeout - Respond to a Tx Hang
3085ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
309b50f7bcaSJesse Brandeburg * @txqueue: queue number that is timing out
3105ec8b7d1SJesse Brandeburg **/
iavf_tx_timeout(struct net_device * netdev,unsigned int txqueue)3110290bd29SMichael S. Tsirkin static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3125ec8b7d1SJesse Brandeburg {
3135ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
3145ec8b7d1SJesse Brandeburg
3155ec8b7d1SJesse Brandeburg adapter->tx_timeout_count++;
316c34743daSAhmed Zaki iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
3175ec8b7d1SJesse Brandeburg }
3185ec8b7d1SJesse Brandeburg
3195ec8b7d1SJesse Brandeburg /**
3205ec8b7d1SJesse Brandeburg * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
3215ec8b7d1SJesse Brandeburg * @adapter: board private structure
3225ec8b7d1SJesse Brandeburg **/
iavf_misc_irq_disable(struct iavf_adapter * adapter)3235ec8b7d1SJesse Brandeburg static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
3245ec8b7d1SJesse Brandeburg {
325f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
3265ec8b7d1SJesse Brandeburg
3275ec8b7d1SJesse Brandeburg if (!adapter->msix_entries)
3285ec8b7d1SJesse Brandeburg return;
3295ec8b7d1SJesse Brandeburg
330f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
3315ec8b7d1SJesse Brandeburg
332f1cad2ceSJesse Brandeburg iavf_flush(hw);
3335ec8b7d1SJesse Brandeburg
3345ec8b7d1SJesse Brandeburg synchronize_irq(adapter->msix_entries[0].vector);
3355ec8b7d1SJesse Brandeburg }
3365ec8b7d1SJesse Brandeburg
3375ec8b7d1SJesse Brandeburg /**
3385ec8b7d1SJesse Brandeburg * iavf_misc_irq_enable - Enable default interrupt generation settings
3395ec8b7d1SJesse Brandeburg * @adapter: board private structure
3405ec8b7d1SJesse Brandeburg **/
iavf_misc_irq_enable(struct iavf_adapter * adapter)3415ec8b7d1SJesse Brandeburg static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
3425ec8b7d1SJesse Brandeburg {
343f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
3445ec8b7d1SJesse Brandeburg
345f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
346f1cad2ceSJesse Brandeburg IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
347f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
3485ec8b7d1SJesse Brandeburg
349f1cad2ceSJesse Brandeburg iavf_flush(hw);
3505ec8b7d1SJesse Brandeburg }
3515ec8b7d1SJesse Brandeburg
3525ec8b7d1SJesse Brandeburg /**
3535ec8b7d1SJesse Brandeburg * iavf_irq_disable - Mask off interrupt generation on the NIC
3545ec8b7d1SJesse Brandeburg * @adapter: board private structure
3555ec8b7d1SJesse Brandeburg **/
iavf_irq_disable(struct iavf_adapter * adapter)3565ec8b7d1SJesse Brandeburg static void iavf_irq_disable(struct iavf_adapter *adapter)
3575ec8b7d1SJesse Brandeburg {
3585ec8b7d1SJesse Brandeburg int i;
359f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
3605ec8b7d1SJesse Brandeburg
3615ec8b7d1SJesse Brandeburg if (!adapter->msix_entries)
3625ec8b7d1SJesse Brandeburg return;
3635ec8b7d1SJesse Brandeburg
3645ec8b7d1SJesse Brandeburg for (i = 1; i < adapter->num_msix_vectors; i++) {
365f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
3665ec8b7d1SJesse Brandeburg synchronize_irq(adapter->msix_entries[i].vector);
3675ec8b7d1SJesse Brandeburg }
368f1cad2ceSJesse Brandeburg iavf_flush(hw);
3695ec8b7d1SJesse Brandeburg }
3705ec8b7d1SJesse Brandeburg
3715ec8b7d1SJesse Brandeburg /**
372c37cf54cSAhmed Zaki * iavf_irq_enable_queues - Enable interrupt for all queues
3735ec8b7d1SJesse Brandeburg * @adapter: board private structure
3745ec8b7d1SJesse Brandeburg **/
iavf_irq_enable_queues(struct iavf_adapter * adapter)375a4aadf0fSPrzemek Kitszel static void iavf_irq_enable_queues(struct iavf_adapter *adapter)
3765ec8b7d1SJesse Brandeburg {
377f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
3785ec8b7d1SJesse Brandeburg int i;
3795ec8b7d1SJesse Brandeburg
3805ec8b7d1SJesse Brandeburg for (i = 1; i < adapter->num_msix_vectors; i++) {
381f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
382f1cad2ceSJesse Brandeburg IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
383f1cad2ceSJesse Brandeburg IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
3845ec8b7d1SJesse Brandeburg }
3855ec8b7d1SJesse Brandeburg }
3865ec8b7d1SJesse Brandeburg
3875ec8b7d1SJesse Brandeburg /**
3885ec8b7d1SJesse Brandeburg * iavf_irq_enable - Enable default interrupt generation settings
3895ec8b7d1SJesse Brandeburg * @adapter: board private structure
3905ec8b7d1SJesse Brandeburg * @flush: boolean value whether to run rd32()
3915ec8b7d1SJesse Brandeburg **/
iavf_irq_enable(struct iavf_adapter * adapter,bool flush)3925ec8b7d1SJesse Brandeburg void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
3935ec8b7d1SJesse Brandeburg {
394f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
3955ec8b7d1SJesse Brandeburg
3965ec8b7d1SJesse Brandeburg iavf_misc_irq_enable(adapter);
397c37cf54cSAhmed Zaki iavf_irq_enable_queues(adapter);
3985ec8b7d1SJesse Brandeburg
3995ec8b7d1SJesse Brandeburg if (flush)
400f1cad2ceSJesse Brandeburg iavf_flush(hw);
4015ec8b7d1SJesse Brandeburg }
4025ec8b7d1SJesse Brandeburg
4035ec8b7d1SJesse Brandeburg /**
4045ec8b7d1SJesse Brandeburg * iavf_msix_aq - Interrupt handler for vector 0
4055ec8b7d1SJesse Brandeburg * @irq: interrupt number
4065ec8b7d1SJesse Brandeburg * @data: pointer to netdev
4075ec8b7d1SJesse Brandeburg **/
iavf_msix_aq(int irq,void * data)4085ec8b7d1SJesse Brandeburg static irqreturn_t iavf_msix_aq(int irq, void *data)
4095ec8b7d1SJesse Brandeburg {
4105ec8b7d1SJesse Brandeburg struct net_device *netdev = data;
4115ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
412f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
4135ec8b7d1SJesse Brandeburg
4145ec8b7d1SJesse Brandeburg /* handle non-queue interrupts, these reads clear the registers */
415f1cad2ceSJesse Brandeburg rd32(hw, IAVF_VFINT_ICR01);
416f1cad2ceSJesse Brandeburg rd32(hw, IAVF_VFINT_ICR0_ENA1);
4175ec8b7d1SJesse Brandeburg
418fc2e6b3bSSlawomir Laba if (adapter->state != __IAVF_REMOVE)
4195ec8b7d1SJesse Brandeburg /* schedule work on the private workqueue */
4204411a608SMichal Schmidt queue_work(adapter->wq, &adapter->adminq_task);
4215ec8b7d1SJesse Brandeburg
4225ec8b7d1SJesse Brandeburg return IRQ_HANDLED;
4235ec8b7d1SJesse Brandeburg }
4245ec8b7d1SJesse Brandeburg
4255ec8b7d1SJesse Brandeburg /**
4265ec8b7d1SJesse Brandeburg * iavf_msix_clean_rings - MSIX mode Interrupt Handler
4275ec8b7d1SJesse Brandeburg * @irq: interrupt number
4285ec8b7d1SJesse Brandeburg * @data: pointer to a q_vector
4295ec8b7d1SJesse Brandeburg **/
iavf_msix_clean_rings(int irq,void * data)4305ec8b7d1SJesse Brandeburg static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
4315ec8b7d1SJesse Brandeburg {
43256184e01SJesse Brandeburg struct iavf_q_vector *q_vector = data;
4335ec8b7d1SJesse Brandeburg
4345ec8b7d1SJesse Brandeburg if (!q_vector->tx.ring && !q_vector->rx.ring)
4355ec8b7d1SJesse Brandeburg return IRQ_HANDLED;
4365ec8b7d1SJesse Brandeburg
4375ec8b7d1SJesse Brandeburg napi_schedule_irqoff(&q_vector->napi);
4385ec8b7d1SJesse Brandeburg
4395ec8b7d1SJesse Brandeburg return IRQ_HANDLED;
4405ec8b7d1SJesse Brandeburg }
4415ec8b7d1SJesse Brandeburg
4425ec8b7d1SJesse Brandeburg /**
4435ec8b7d1SJesse Brandeburg * iavf_map_vector_to_rxq - associate irqs with rx queues
4445ec8b7d1SJesse Brandeburg * @adapter: board private structure
4455ec8b7d1SJesse Brandeburg * @v_idx: interrupt number
4465ec8b7d1SJesse Brandeburg * @r_idx: queue number
4475ec8b7d1SJesse Brandeburg **/
4485ec8b7d1SJesse Brandeburg static void
iavf_map_vector_to_rxq(struct iavf_adapter * adapter,int v_idx,int r_idx)4495ec8b7d1SJesse Brandeburg iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
4505ec8b7d1SJesse Brandeburg {
45156184e01SJesse Brandeburg struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
45256184e01SJesse Brandeburg struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
453f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
4545ec8b7d1SJesse Brandeburg
4555ec8b7d1SJesse Brandeburg rx_ring->q_vector = q_vector;
4565ec8b7d1SJesse Brandeburg rx_ring->next = q_vector->rx.ring;
4575ec8b7d1SJesse Brandeburg rx_ring->vsi = &adapter->vsi;
4585ec8b7d1SJesse Brandeburg q_vector->rx.ring = rx_ring;
4595ec8b7d1SJesse Brandeburg q_vector->rx.count++;
4605ec8b7d1SJesse Brandeburg q_vector->rx.next_update = jiffies + 1;
4615ec8b7d1SJesse Brandeburg q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
4625ec8b7d1SJesse Brandeburg q_vector->ring_mask |= BIT(r_idx);
46356184e01SJesse Brandeburg wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
4644eda4e00SNicholas Nunley q_vector->rx.current_itr >> 1);
4655ec8b7d1SJesse Brandeburg q_vector->rx.current_itr = q_vector->rx.target_itr;
4665ec8b7d1SJesse Brandeburg }
4675ec8b7d1SJesse Brandeburg
4685ec8b7d1SJesse Brandeburg /**
4695ec8b7d1SJesse Brandeburg * iavf_map_vector_to_txq - associate irqs with tx queues
4705ec8b7d1SJesse Brandeburg * @adapter: board private structure
4715ec8b7d1SJesse Brandeburg * @v_idx: interrupt number
4725ec8b7d1SJesse Brandeburg * @t_idx: queue number
4735ec8b7d1SJesse Brandeburg **/
4745ec8b7d1SJesse Brandeburg static void
iavf_map_vector_to_txq(struct iavf_adapter * adapter,int v_idx,int t_idx)4755ec8b7d1SJesse Brandeburg iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
4765ec8b7d1SJesse Brandeburg {
47756184e01SJesse Brandeburg struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
47856184e01SJesse Brandeburg struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
479f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
4805ec8b7d1SJesse Brandeburg
4815ec8b7d1SJesse Brandeburg tx_ring->q_vector = q_vector;
4825ec8b7d1SJesse Brandeburg tx_ring->next = q_vector->tx.ring;
4835ec8b7d1SJesse Brandeburg tx_ring->vsi = &adapter->vsi;
4845ec8b7d1SJesse Brandeburg q_vector->tx.ring = tx_ring;
4855ec8b7d1SJesse Brandeburg q_vector->tx.count++;
4865ec8b7d1SJesse Brandeburg q_vector->tx.next_update = jiffies + 1;
4875ec8b7d1SJesse Brandeburg q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
4885ec8b7d1SJesse Brandeburg q_vector->num_ringpairs++;
48956184e01SJesse Brandeburg wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
4904eda4e00SNicholas Nunley q_vector->tx.target_itr >> 1);
4915ec8b7d1SJesse Brandeburg q_vector->tx.current_itr = q_vector->tx.target_itr;
4925ec8b7d1SJesse Brandeburg }
4935ec8b7d1SJesse Brandeburg
4945ec8b7d1SJesse Brandeburg /**
4955ec8b7d1SJesse Brandeburg * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
4965ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
4975ec8b7d1SJesse Brandeburg *
4985ec8b7d1SJesse Brandeburg * This function maps descriptor rings to the queue-specific vectors
4995ec8b7d1SJesse Brandeburg * we were allotted through the MSI-X enabling code. Ideally, we'd have
5005ec8b7d1SJesse Brandeburg * one vector per ring/queue, but on a constrained vector budget, we
5015ec8b7d1SJesse Brandeburg * group the rings as "efficiently" as possible. You would add new
5025ec8b7d1SJesse Brandeburg * mapping configurations in here.
5035ec8b7d1SJesse Brandeburg **/
iavf_map_rings_to_vectors(struct iavf_adapter * adapter)5045ec8b7d1SJesse Brandeburg static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
5055ec8b7d1SJesse Brandeburg {
5065ec8b7d1SJesse Brandeburg int rings_remaining = adapter->num_active_queues;
5075ec8b7d1SJesse Brandeburg int ridx = 0, vidx = 0;
5085ec8b7d1SJesse Brandeburg int q_vectors;
5095ec8b7d1SJesse Brandeburg
5105ec8b7d1SJesse Brandeburg q_vectors = adapter->num_msix_vectors - NONQ_VECS;
5115ec8b7d1SJesse Brandeburg
5125ec8b7d1SJesse Brandeburg for (; ridx < rings_remaining; ridx++) {
5135ec8b7d1SJesse Brandeburg iavf_map_vector_to_rxq(adapter, vidx, ridx);
5145ec8b7d1SJesse Brandeburg iavf_map_vector_to_txq(adapter, vidx, ridx);
5155ec8b7d1SJesse Brandeburg
5165ec8b7d1SJesse Brandeburg /* In the case where we have more queues than vectors, continue
5175ec8b7d1SJesse Brandeburg * round-robin on vectors until all queues are mapped.
5185ec8b7d1SJesse Brandeburg */
5195ec8b7d1SJesse Brandeburg if (++vidx >= q_vectors)
5205ec8b7d1SJesse Brandeburg vidx = 0;
5215ec8b7d1SJesse Brandeburg }
5225ec8b7d1SJesse Brandeburg
5235ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
5245ec8b7d1SJesse Brandeburg }
5255ec8b7d1SJesse Brandeburg
5265ec8b7d1SJesse Brandeburg /**
5275ec8b7d1SJesse Brandeburg * iavf_irq_affinity_notify - Callback for affinity changes
5285ec8b7d1SJesse Brandeburg * @notify: context as to what irq was changed
5295ec8b7d1SJesse Brandeburg * @mask: the new affinity mask
5305ec8b7d1SJesse Brandeburg *
5315ec8b7d1SJesse Brandeburg * This is a callback function used by the irq_set_affinity_notifier function
5325ec8b7d1SJesse Brandeburg * so that we may register to receive changes to the irq affinity masks.
5335ec8b7d1SJesse Brandeburg **/
iavf_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)5345ec8b7d1SJesse Brandeburg static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
5355ec8b7d1SJesse Brandeburg const cpumask_t *mask)
5365ec8b7d1SJesse Brandeburg {
53756184e01SJesse Brandeburg struct iavf_q_vector *q_vector =
53856184e01SJesse Brandeburg container_of(notify, struct iavf_q_vector, affinity_notify);
5395ec8b7d1SJesse Brandeburg
5405ec8b7d1SJesse Brandeburg cpumask_copy(&q_vector->affinity_mask, mask);
5415ec8b7d1SJesse Brandeburg }
5425ec8b7d1SJesse Brandeburg
5435ec8b7d1SJesse Brandeburg /**
5445ec8b7d1SJesse Brandeburg * iavf_irq_affinity_release - Callback for affinity notifier release
5455ec8b7d1SJesse Brandeburg * @ref: internal core kernel usage
5465ec8b7d1SJesse Brandeburg *
5475ec8b7d1SJesse Brandeburg * This is a callback function used by the irq_set_affinity_notifier function
5485ec8b7d1SJesse Brandeburg * to inform the current notification subscriber that they will no longer
5495ec8b7d1SJesse Brandeburg * receive notifications.
5505ec8b7d1SJesse Brandeburg **/
iavf_irq_affinity_release(struct kref * ref)5515ec8b7d1SJesse Brandeburg static void iavf_irq_affinity_release(struct kref *ref) {}
5525ec8b7d1SJesse Brandeburg
5535ec8b7d1SJesse Brandeburg /**
5545ec8b7d1SJesse Brandeburg * iavf_request_traffic_irqs - Initialize MSI-X interrupts
5555ec8b7d1SJesse Brandeburg * @adapter: board private structure
5565ec8b7d1SJesse Brandeburg * @basename: device basename
5575ec8b7d1SJesse Brandeburg *
5585ec8b7d1SJesse Brandeburg * Allocates MSI-X vectors for tx and rx handling, and requests
5595ec8b7d1SJesse Brandeburg * interrupts from the kernel.
5605ec8b7d1SJesse Brandeburg **/
5615ec8b7d1SJesse Brandeburg static int
iavf_request_traffic_irqs(struct iavf_adapter * adapter,char * basename)5625ec8b7d1SJesse Brandeburg iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
5635ec8b7d1SJesse Brandeburg {
5645ec8b7d1SJesse Brandeburg unsigned int vector, q_vectors;
5655ec8b7d1SJesse Brandeburg unsigned int rx_int_idx = 0, tx_int_idx = 0;
5665ec8b7d1SJesse Brandeburg int irq_num, err;
5675ec8b7d1SJesse Brandeburg int cpu;
5685ec8b7d1SJesse Brandeburg
5695ec8b7d1SJesse Brandeburg iavf_irq_disable(adapter);
5705ec8b7d1SJesse Brandeburg /* Decrement for Other and TCP Timer vectors */
5715ec8b7d1SJesse Brandeburg q_vectors = adapter->num_msix_vectors - NONQ_VECS;
5725ec8b7d1SJesse Brandeburg
5735ec8b7d1SJesse Brandeburg for (vector = 0; vector < q_vectors; vector++) {
57456184e01SJesse Brandeburg struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
5755ec8b7d1SJesse Brandeburg
5765ec8b7d1SJesse Brandeburg irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
5775ec8b7d1SJesse Brandeburg
5785ec8b7d1SJesse Brandeburg if (q_vector->tx.ring && q_vector->rx.ring) {
5795ec8b7d1SJesse Brandeburg snprintf(q_vector->name, sizeof(q_vector->name),
580c2fbcc94SKaren Sornek "iavf-%s-TxRx-%u", basename, rx_int_idx++);
5815ec8b7d1SJesse Brandeburg tx_int_idx++;
5825ec8b7d1SJesse Brandeburg } else if (q_vector->rx.ring) {
5835ec8b7d1SJesse Brandeburg snprintf(q_vector->name, sizeof(q_vector->name),
584c2fbcc94SKaren Sornek "iavf-%s-rx-%u", basename, rx_int_idx++);
5855ec8b7d1SJesse Brandeburg } else if (q_vector->tx.ring) {
5865ec8b7d1SJesse Brandeburg snprintf(q_vector->name, sizeof(q_vector->name),
587c2fbcc94SKaren Sornek "iavf-%s-tx-%u", basename, tx_int_idx++);
5885ec8b7d1SJesse Brandeburg } else {
5895ec8b7d1SJesse Brandeburg /* skip this unused q_vector */
5905ec8b7d1SJesse Brandeburg continue;
5915ec8b7d1SJesse Brandeburg }
5925ec8b7d1SJesse Brandeburg err = request_irq(irq_num,
5935ec8b7d1SJesse Brandeburg iavf_msix_clean_rings,
5945ec8b7d1SJesse Brandeburg 0,
5955ec8b7d1SJesse Brandeburg q_vector->name,
5965ec8b7d1SJesse Brandeburg q_vector);
5975ec8b7d1SJesse Brandeburg if (err) {
5985ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev,
5995ec8b7d1SJesse Brandeburg "Request_irq failed, error: %d\n", err);
6005ec8b7d1SJesse Brandeburg goto free_queue_irqs;
6015ec8b7d1SJesse Brandeburg }
6025ec8b7d1SJesse Brandeburg /* register for affinity change notifications */
6035ec8b7d1SJesse Brandeburg q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
6045ec8b7d1SJesse Brandeburg q_vector->affinity_notify.release =
6055ec8b7d1SJesse Brandeburg iavf_irq_affinity_release;
6065ec8b7d1SJesse Brandeburg irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
6075ec8b7d1SJesse Brandeburg /* Spread the IRQ affinity hints across online CPUs. Note that
6085ec8b7d1SJesse Brandeburg * get_cpu_mask returns a mask with a permanent lifetime so
6090f9744f4SNitesh Narayan Lal * it's safe to use as a hint for irq_update_affinity_hint.
6105ec8b7d1SJesse Brandeburg */
6115ec8b7d1SJesse Brandeburg cpu = cpumask_local_spread(q_vector->v_idx, -1);
6120f9744f4SNitesh Narayan Lal irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
6135ec8b7d1SJesse Brandeburg }
6145ec8b7d1SJesse Brandeburg
6155ec8b7d1SJesse Brandeburg return 0;
6165ec8b7d1SJesse Brandeburg
6175ec8b7d1SJesse Brandeburg free_queue_irqs:
6185ec8b7d1SJesse Brandeburg while (vector) {
6195ec8b7d1SJesse Brandeburg vector--;
6205ec8b7d1SJesse Brandeburg irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
6215ec8b7d1SJesse Brandeburg irq_set_affinity_notifier(irq_num, NULL);
6220f9744f4SNitesh Narayan Lal irq_update_affinity_hint(irq_num, NULL);
6235ec8b7d1SJesse Brandeburg free_irq(irq_num, &adapter->q_vectors[vector]);
6245ec8b7d1SJesse Brandeburg }
6255ec8b7d1SJesse Brandeburg return err;
6265ec8b7d1SJesse Brandeburg }
6275ec8b7d1SJesse Brandeburg
6285ec8b7d1SJesse Brandeburg /**
6295ec8b7d1SJesse Brandeburg * iavf_request_misc_irq - Initialize MSI-X interrupts
6305ec8b7d1SJesse Brandeburg * @adapter: board private structure
6315ec8b7d1SJesse Brandeburg *
6325ec8b7d1SJesse Brandeburg * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
6335ec8b7d1SJesse Brandeburg * vector is only for the admin queue, and stays active even when the netdev
6345ec8b7d1SJesse Brandeburg * is closed.
6355ec8b7d1SJesse Brandeburg **/
iavf_request_misc_irq(struct iavf_adapter * adapter)6365ec8b7d1SJesse Brandeburg static int iavf_request_misc_irq(struct iavf_adapter *adapter)
6375ec8b7d1SJesse Brandeburg {
6385ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
6395ec8b7d1SJesse Brandeburg int err;
6405ec8b7d1SJesse Brandeburg
6415ec8b7d1SJesse Brandeburg snprintf(adapter->misc_vector_name,
6425ec8b7d1SJesse Brandeburg sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
6435ec8b7d1SJesse Brandeburg dev_name(&adapter->pdev->dev));
6445ec8b7d1SJesse Brandeburg err = request_irq(adapter->msix_entries[0].vector,
6455ec8b7d1SJesse Brandeburg &iavf_msix_aq, 0,
6465ec8b7d1SJesse Brandeburg adapter->misc_vector_name, netdev);
6475ec8b7d1SJesse Brandeburg if (err) {
6485ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
6495ec8b7d1SJesse Brandeburg "request_irq for %s failed: %d\n",
6505ec8b7d1SJesse Brandeburg adapter->misc_vector_name, err);
6515ec8b7d1SJesse Brandeburg free_irq(adapter->msix_entries[0].vector, netdev);
6525ec8b7d1SJesse Brandeburg }
6535ec8b7d1SJesse Brandeburg return err;
6545ec8b7d1SJesse Brandeburg }
6555ec8b7d1SJesse Brandeburg
6565ec8b7d1SJesse Brandeburg /**
6575ec8b7d1SJesse Brandeburg * iavf_free_traffic_irqs - Free MSI-X interrupts
6585ec8b7d1SJesse Brandeburg * @adapter: board private structure
6595ec8b7d1SJesse Brandeburg *
6605ec8b7d1SJesse Brandeburg * Frees all MSI-X vectors other than 0.
6615ec8b7d1SJesse Brandeburg **/
iavf_free_traffic_irqs(struct iavf_adapter * adapter)6625ec8b7d1SJesse Brandeburg static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
6635ec8b7d1SJesse Brandeburg {
6645ec8b7d1SJesse Brandeburg int vector, irq_num, q_vectors;
6655ec8b7d1SJesse Brandeburg
6665ec8b7d1SJesse Brandeburg if (!adapter->msix_entries)
6675ec8b7d1SJesse Brandeburg return;
6685ec8b7d1SJesse Brandeburg
6695ec8b7d1SJesse Brandeburg q_vectors = adapter->num_msix_vectors - NONQ_VECS;
6705ec8b7d1SJesse Brandeburg
6715ec8b7d1SJesse Brandeburg for (vector = 0; vector < q_vectors; vector++) {
6725ec8b7d1SJesse Brandeburg irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
6735ec8b7d1SJesse Brandeburg irq_set_affinity_notifier(irq_num, NULL);
6740f9744f4SNitesh Narayan Lal irq_update_affinity_hint(irq_num, NULL);
6755ec8b7d1SJesse Brandeburg free_irq(irq_num, &adapter->q_vectors[vector]);
6765ec8b7d1SJesse Brandeburg }
6775ec8b7d1SJesse Brandeburg }
6785ec8b7d1SJesse Brandeburg
6795ec8b7d1SJesse Brandeburg /**
6805ec8b7d1SJesse Brandeburg * iavf_free_misc_irq - Free MSI-X miscellaneous vector
6815ec8b7d1SJesse Brandeburg * @adapter: board private structure
6825ec8b7d1SJesse Brandeburg *
6835ec8b7d1SJesse Brandeburg * Frees MSI-X vector 0.
6845ec8b7d1SJesse Brandeburg **/
iavf_free_misc_irq(struct iavf_adapter * adapter)6855ec8b7d1SJesse Brandeburg static void iavf_free_misc_irq(struct iavf_adapter *adapter)
6865ec8b7d1SJesse Brandeburg {
6875ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
6885ec8b7d1SJesse Brandeburg
6895ec8b7d1SJesse Brandeburg if (!adapter->msix_entries)
6905ec8b7d1SJesse Brandeburg return;
6915ec8b7d1SJesse Brandeburg
6925ec8b7d1SJesse Brandeburg free_irq(adapter->msix_entries[0].vector, netdev);
6935ec8b7d1SJesse Brandeburg }
6945ec8b7d1SJesse Brandeburg
6955ec8b7d1SJesse Brandeburg /**
6965ec8b7d1SJesse Brandeburg * iavf_configure_tx - Configure Transmit Unit after Reset
6975ec8b7d1SJesse Brandeburg * @adapter: board private structure
6985ec8b7d1SJesse Brandeburg *
6995ec8b7d1SJesse Brandeburg * Configure the Tx unit of the MAC after a reset.
7005ec8b7d1SJesse Brandeburg **/
iavf_configure_tx(struct iavf_adapter * adapter)7015ec8b7d1SJesse Brandeburg static void iavf_configure_tx(struct iavf_adapter *adapter)
7025ec8b7d1SJesse Brandeburg {
703f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
7045ec8b7d1SJesse Brandeburg int i;
7055ec8b7d1SJesse Brandeburg
7065ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++)
707f1cad2ceSJesse Brandeburg adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
7085ec8b7d1SJesse Brandeburg }
7095ec8b7d1SJesse Brandeburg
7105ec8b7d1SJesse Brandeburg /**
7115ec8b7d1SJesse Brandeburg * iavf_configure_rx - Configure Receive Unit after Reset
7125ec8b7d1SJesse Brandeburg * @adapter: board private structure
7135ec8b7d1SJesse Brandeburg *
7145ec8b7d1SJesse Brandeburg * Configure the Rx unit of the MAC after a reset.
7155ec8b7d1SJesse Brandeburg **/
iavf_configure_rx(struct iavf_adapter * adapter)7165ec8b7d1SJesse Brandeburg static void iavf_configure_rx(struct iavf_adapter *adapter)
7175ec8b7d1SJesse Brandeburg {
71856184e01SJesse Brandeburg unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
719f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
7205ec8b7d1SJesse Brandeburg int i;
7215ec8b7d1SJesse Brandeburg
7225ec8b7d1SJesse Brandeburg /* Legacy Rx will always default to a 2048 buffer size. */
7235ec8b7d1SJesse Brandeburg #if (PAGE_SIZE < 8192)
7245ec8b7d1SJesse Brandeburg if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
7255ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
7265ec8b7d1SJesse Brandeburg
7275ec8b7d1SJesse Brandeburg /* For jumbo frames on systems with 4K pages we have to use
7285ec8b7d1SJesse Brandeburg * an order 1 page, so we might as well increase the size
7295ec8b7d1SJesse Brandeburg * of our Rx buffer to make better use of the available space
7305ec8b7d1SJesse Brandeburg */
73156184e01SJesse Brandeburg rx_buf_len = IAVF_RXBUFFER_3072;
7325ec8b7d1SJesse Brandeburg
7335ec8b7d1SJesse Brandeburg /* We use a 1536 buffer size for configurations with
7345ec8b7d1SJesse Brandeburg * standard Ethernet mtu. On x86 this gives us enough room
7355ec8b7d1SJesse Brandeburg * for shared info and 192 bytes of padding.
7365ec8b7d1SJesse Brandeburg */
73756184e01SJesse Brandeburg if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
7385ec8b7d1SJesse Brandeburg (netdev->mtu <= ETH_DATA_LEN))
73956184e01SJesse Brandeburg rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
7405ec8b7d1SJesse Brandeburg }
7415ec8b7d1SJesse Brandeburg #endif
7425ec8b7d1SJesse Brandeburg
7435ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++) {
744f1cad2ceSJesse Brandeburg adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
7455ec8b7d1SJesse Brandeburg adapter->rx_rings[i].rx_buf_len = rx_buf_len;
7465ec8b7d1SJesse Brandeburg
7475ec8b7d1SJesse Brandeburg if (adapter->flags & IAVF_FLAG_LEGACY_RX)
7485ec8b7d1SJesse Brandeburg clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
7495ec8b7d1SJesse Brandeburg else
7505ec8b7d1SJesse Brandeburg set_ring_build_skb_enabled(&adapter->rx_rings[i]);
7515ec8b7d1SJesse Brandeburg }
7525ec8b7d1SJesse Brandeburg }
7535ec8b7d1SJesse Brandeburg
7545ec8b7d1SJesse Brandeburg /**
7555ec8b7d1SJesse Brandeburg * iavf_find_vlan - Search filter list for specific vlan filter
7565ec8b7d1SJesse Brandeburg * @adapter: board private structure
7575ec8b7d1SJesse Brandeburg * @vlan: vlan tag
7585ec8b7d1SJesse Brandeburg *
7595ec8b7d1SJesse Brandeburg * Returns ptr to the filter object or NULL. Must be called while holding the
7605ec8b7d1SJesse Brandeburg * mac_vlan_list_lock.
7615ec8b7d1SJesse Brandeburg **/
7625ec8b7d1SJesse Brandeburg static struct
iavf_find_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)76348ccc43eSBrett Creeley iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
76448ccc43eSBrett Creeley struct iavf_vlan vlan)
7655ec8b7d1SJesse Brandeburg {
7665ec8b7d1SJesse Brandeburg struct iavf_vlan_filter *f;
7675ec8b7d1SJesse Brandeburg
7685ec8b7d1SJesse Brandeburg list_for_each_entry(f, &adapter->vlan_filter_list, list) {
76948ccc43eSBrett Creeley if (f->vlan.vid == vlan.vid &&
77048ccc43eSBrett Creeley f->vlan.tpid == vlan.tpid)
7715ec8b7d1SJesse Brandeburg return f;
7725ec8b7d1SJesse Brandeburg }
77348ccc43eSBrett Creeley
7745ec8b7d1SJesse Brandeburg return NULL;
7755ec8b7d1SJesse Brandeburg }
7765ec8b7d1SJesse Brandeburg
7775ec8b7d1SJesse Brandeburg /**
7785ec8b7d1SJesse Brandeburg * iavf_add_vlan - Add a vlan filter to the list
7795ec8b7d1SJesse Brandeburg * @adapter: board private structure
7805ec8b7d1SJesse Brandeburg * @vlan: VLAN tag
7815ec8b7d1SJesse Brandeburg *
7825ec8b7d1SJesse Brandeburg * Returns ptr to the filter object or NULL when no memory available.
7835ec8b7d1SJesse Brandeburg **/
7845ec8b7d1SJesse Brandeburg static struct
iavf_add_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)78548ccc43eSBrett Creeley iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
78648ccc43eSBrett Creeley struct iavf_vlan vlan)
7875ec8b7d1SJesse Brandeburg {
7885ec8b7d1SJesse Brandeburg struct iavf_vlan_filter *f = NULL;
7895ec8b7d1SJesse Brandeburg
7905ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
7915ec8b7d1SJesse Brandeburg
7925ec8b7d1SJesse Brandeburg f = iavf_find_vlan(adapter, vlan);
7935ec8b7d1SJesse Brandeburg if (!f) {
794f0a48fb4SAleksandr Loktionov f = kzalloc(sizeof(*f), GFP_ATOMIC);
7955ec8b7d1SJesse Brandeburg if (!f)
7965ec8b7d1SJesse Brandeburg goto clearout;
7975ec8b7d1SJesse Brandeburg
7985ec8b7d1SJesse Brandeburg f->vlan = vlan;
7995ec8b7d1SJesse Brandeburg
800c2417a7bSAkeem G Abodunrin list_add_tail(&f->list, &adapter->vlan_filter_list);
8010c0da0e9SAhmed Zaki f->state = IAVF_VLAN_ADD;
8029c85b7faSAhmed Zaki adapter->num_vlan_filters++;
8035f3d319aSPetr Oros iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
8045ec8b7d1SJesse Brandeburg }
8055ec8b7d1SJesse Brandeburg
8065ec8b7d1SJesse Brandeburg clearout:
8075ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
8085ec8b7d1SJesse Brandeburg return f;
8095ec8b7d1SJesse Brandeburg }
8105ec8b7d1SJesse Brandeburg
8115ec8b7d1SJesse Brandeburg /**
8125ec8b7d1SJesse Brandeburg * iavf_del_vlan - Remove a vlan filter from the list
8135ec8b7d1SJesse Brandeburg * @adapter: board private structure
8145ec8b7d1SJesse Brandeburg * @vlan: VLAN tag
8155ec8b7d1SJesse Brandeburg **/
iavf_del_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)81648ccc43eSBrett Creeley static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
8175ec8b7d1SJesse Brandeburg {
8185ec8b7d1SJesse Brandeburg struct iavf_vlan_filter *f;
8195ec8b7d1SJesse Brandeburg
8205ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
8215ec8b7d1SJesse Brandeburg
8225ec8b7d1SJesse Brandeburg f = iavf_find_vlan(adapter, vlan);
8235ec8b7d1SJesse Brandeburg if (f) {
8240c0da0e9SAhmed Zaki f->state = IAVF_VLAN_REMOVE;
8255f3d319aSPetr Oros iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
8265ec8b7d1SJesse Brandeburg }
8275ec8b7d1SJesse Brandeburg
8285ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
8295ec8b7d1SJesse Brandeburg }
8305ec8b7d1SJesse Brandeburg
8315ec8b7d1SJesse Brandeburg /**
83242930142SAkeem G Abodunrin * iavf_restore_filters
83342930142SAkeem G Abodunrin * @adapter: board private structure
83442930142SAkeem G Abodunrin *
83542930142SAkeem G Abodunrin * Restore existing non MAC filters when VF netdev comes back up
83642930142SAkeem G Abodunrin **/
iavf_restore_filters(struct iavf_adapter * adapter)83742930142SAkeem G Abodunrin static void iavf_restore_filters(struct iavf_adapter *adapter)
83842930142SAkeem G Abodunrin {
8399c85b7faSAhmed Zaki struct iavf_vlan_filter *f;
84042930142SAkeem G Abodunrin
8415951a2b9SBrett Creeley /* re-add all VLAN filters */
8429c85b7faSAhmed Zaki spin_lock_bh(&adapter->mac_vlan_list_lock);
84348ccc43eSBrett Creeley
8449c85b7faSAhmed Zaki list_for_each_entry(f, &adapter->vlan_filter_list, list) {
8459c85b7faSAhmed Zaki if (f->state == IAVF_VLAN_INACTIVE)
8469c85b7faSAhmed Zaki f->state = IAVF_VLAN_ADD;
8479c85b7faSAhmed Zaki }
8489c85b7faSAhmed Zaki
8499c85b7faSAhmed Zaki spin_unlock_bh(&adapter->mac_vlan_list_lock);
8509c85b7faSAhmed Zaki adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
85142930142SAkeem G Abodunrin }
85242930142SAkeem G Abodunrin
85342930142SAkeem G Abodunrin /**
85492fc5085SBrett Creeley * iavf_get_num_vlans_added - get number of VLANs added
85592fc5085SBrett Creeley * @adapter: board private structure
85692fc5085SBrett Creeley */
iavf_get_num_vlans_added(struct iavf_adapter * adapter)857968996c0SPrzemyslaw Patynowski u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
85892fc5085SBrett Creeley {
8599c85b7faSAhmed Zaki return adapter->num_vlan_filters;
86092fc5085SBrett Creeley }
86192fc5085SBrett Creeley
86292fc5085SBrett Creeley /**
86392fc5085SBrett Creeley * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
86492fc5085SBrett Creeley * @adapter: board private structure
86592fc5085SBrett Creeley *
86692fc5085SBrett Creeley * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
86792fc5085SBrett Creeley * do not impose a limit as that maintains current behavior and for
86892fc5085SBrett Creeley * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
86992fc5085SBrett Creeley **/
iavf_get_max_vlans_allowed(struct iavf_adapter * adapter)87092fc5085SBrett Creeley static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
87192fc5085SBrett Creeley {
87292fc5085SBrett Creeley /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
87392fc5085SBrett Creeley * never been a limit on the VF driver side
87492fc5085SBrett Creeley */
87592fc5085SBrett Creeley if (VLAN_ALLOWED(adapter))
87692fc5085SBrett Creeley return VLAN_N_VID;
87792fc5085SBrett Creeley else if (VLAN_V2_ALLOWED(adapter))
87892fc5085SBrett Creeley return adapter->vlan_v2_caps.filtering.max_filters;
87992fc5085SBrett Creeley
88092fc5085SBrett Creeley return 0;
88192fc5085SBrett Creeley }
88292fc5085SBrett Creeley
88392fc5085SBrett Creeley /**
88492fc5085SBrett Creeley * iavf_max_vlans_added - check if maximum VLANs allowed already exist
88592fc5085SBrett Creeley * @adapter: board private structure
88692fc5085SBrett Creeley **/
iavf_max_vlans_added(struct iavf_adapter * adapter)88792fc5085SBrett Creeley static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
88892fc5085SBrett Creeley {
88992fc5085SBrett Creeley if (iavf_get_num_vlans_added(adapter) <
89092fc5085SBrett Creeley iavf_get_max_vlans_allowed(adapter))
89192fc5085SBrett Creeley return false;
89292fc5085SBrett Creeley
89392fc5085SBrett Creeley return true;
89492fc5085SBrett Creeley }
89592fc5085SBrett Creeley
89692fc5085SBrett Creeley /**
8975ec8b7d1SJesse Brandeburg * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
8985ec8b7d1SJesse Brandeburg * @netdev: network device struct
8995ec8b7d1SJesse Brandeburg * @proto: unused protocol data
9005ec8b7d1SJesse Brandeburg * @vid: VLAN tag
9015ec8b7d1SJesse Brandeburg **/
iavf_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)9025ec8b7d1SJesse Brandeburg static int iavf_vlan_rx_add_vid(struct net_device *netdev,
9035ec8b7d1SJesse Brandeburg __always_unused __be16 proto, u16 vid)
9045ec8b7d1SJesse Brandeburg {
9055ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
9065ec8b7d1SJesse Brandeburg
907964290ffSAhmed Zaki /* Do not track VLAN 0 filter, always added by the PF on VF init */
908964290ffSAhmed Zaki if (!vid)
909964290ffSAhmed Zaki return 0;
910964290ffSAhmed Zaki
91148ccc43eSBrett Creeley if (!VLAN_FILTERING_ALLOWED(adapter))
9125ec8b7d1SJesse Brandeburg return -EIO;
91342930142SAkeem G Abodunrin
91492fc5085SBrett Creeley if (iavf_max_vlans_added(adapter)) {
91592fc5085SBrett Creeley netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
91692fc5085SBrett Creeley iavf_get_max_vlans_allowed(adapter));
91792fc5085SBrett Creeley return -EIO;
91892fc5085SBrett Creeley }
91992fc5085SBrett Creeley
92048ccc43eSBrett Creeley if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
9215ec8b7d1SJesse Brandeburg return -ENOMEM;
92242930142SAkeem G Abodunrin
9235ec8b7d1SJesse Brandeburg return 0;
9245ec8b7d1SJesse Brandeburg }
9255ec8b7d1SJesse Brandeburg
9265ec8b7d1SJesse Brandeburg /**
9275ec8b7d1SJesse Brandeburg * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
9285ec8b7d1SJesse Brandeburg * @netdev: network device struct
9295ec8b7d1SJesse Brandeburg * @proto: unused protocol data
9305ec8b7d1SJesse Brandeburg * @vid: VLAN tag
9315ec8b7d1SJesse Brandeburg **/
iavf_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)9325ec8b7d1SJesse Brandeburg static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
9335ec8b7d1SJesse Brandeburg __always_unused __be16 proto, u16 vid)
9345ec8b7d1SJesse Brandeburg {
9355ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
9365ec8b7d1SJesse Brandeburg
937964290ffSAhmed Zaki /* We do not track VLAN 0 filter */
938964290ffSAhmed Zaki if (!vid)
939964290ffSAhmed Zaki return 0;
940964290ffSAhmed Zaki
94148ccc43eSBrett Creeley iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
94242930142SAkeem G Abodunrin return 0;
9435ec8b7d1SJesse Brandeburg }
9445ec8b7d1SJesse Brandeburg
9455ec8b7d1SJesse Brandeburg /**
9465ec8b7d1SJesse Brandeburg * iavf_find_filter - Search filter list for specific mac filter
9475ec8b7d1SJesse Brandeburg * @adapter: board private structure
9485ec8b7d1SJesse Brandeburg * @macaddr: the MAC address
9495ec8b7d1SJesse Brandeburg *
9505ec8b7d1SJesse Brandeburg * Returns ptr to the filter object or NULL. Must be called while holding the
9515ec8b7d1SJesse Brandeburg * mac_vlan_list_lock.
9525ec8b7d1SJesse Brandeburg **/
9535ec8b7d1SJesse Brandeburg static struct
iavf_find_filter(struct iavf_adapter * adapter,const u8 * macaddr)9545ec8b7d1SJesse Brandeburg iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
9555ec8b7d1SJesse Brandeburg const u8 *macaddr)
9565ec8b7d1SJesse Brandeburg {
9575ec8b7d1SJesse Brandeburg struct iavf_mac_filter *f;
9585ec8b7d1SJesse Brandeburg
9595ec8b7d1SJesse Brandeburg if (!macaddr)
9605ec8b7d1SJesse Brandeburg return NULL;
9615ec8b7d1SJesse Brandeburg
9625ec8b7d1SJesse Brandeburg list_for_each_entry(f, &adapter->mac_filter_list, list) {
9635ec8b7d1SJesse Brandeburg if (ether_addr_equal(macaddr, f->macaddr))
9645ec8b7d1SJesse Brandeburg return f;
9655ec8b7d1SJesse Brandeburg }
9665ec8b7d1SJesse Brandeburg return NULL;
9675ec8b7d1SJesse Brandeburg }
9685ec8b7d1SJesse Brandeburg
9695ec8b7d1SJesse Brandeburg /**
97056184e01SJesse Brandeburg * iavf_add_filter - Add a mac filter to the filter list
9715ec8b7d1SJesse Brandeburg * @adapter: board private structure
9725ec8b7d1SJesse Brandeburg * @macaddr: the MAC address
9735ec8b7d1SJesse Brandeburg *
9745ec8b7d1SJesse Brandeburg * Returns ptr to the filter object or NULL when no memory available.
9755ec8b7d1SJesse Brandeburg **/
iavf_add_filter(struct iavf_adapter * adapter,const u8 * macaddr)9769e052291SStefan Assmann struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
9775ec8b7d1SJesse Brandeburg const u8 *macaddr)
9785ec8b7d1SJesse Brandeburg {
9795ec8b7d1SJesse Brandeburg struct iavf_mac_filter *f;
9805ec8b7d1SJesse Brandeburg
9815ec8b7d1SJesse Brandeburg if (!macaddr)
9825ec8b7d1SJesse Brandeburg return NULL;
9835ec8b7d1SJesse Brandeburg
9845ec8b7d1SJesse Brandeburg f = iavf_find_filter(adapter, macaddr);
9855ec8b7d1SJesse Brandeburg if (!f) {
9865ec8b7d1SJesse Brandeburg f = kzalloc(sizeof(*f), GFP_ATOMIC);
9875ec8b7d1SJesse Brandeburg if (!f)
9885ec8b7d1SJesse Brandeburg return f;
9895ec8b7d1SJesse Brandeburg
9905ec8b7d1SJesse Brandeburg ether_addr_copy(f->macaddr, macaddr);
9915ec8b7d1SJesse Brandeburg
9925ec8b7d1SJesse Brandeburg list_add_tail(&f->list, &adapter->mac_filter_list);
9935ec8b7d1SJesse Brandeburg f->add = true;
99435a2443dSMateusz Palczewski f->add_handled = false;
9958da80c9dSSylwester Dziedziuch f->is_new_mac = true;
99664560384SMichal Wilczynski f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
9975ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
9985ec8b7d1SJesse Brandeburg } else {
9995ec8b7d1SJesse Brandeburg f->remove = false;
10005ec8b7d1SJesse Brandeburg }
10015ec8b7d1SJesse Brandeburg
10025ec8b7d1SJesse Brandeburg return f;
10035ec8b7d1SJesse Brandeburg }
10045ec8b7d1SJesse Brandeburg
10055ec8b7d1SJesse Brandeburg /**
100635a2443dSMateusz Palczewski * iavf_replace_primary_mac - Replace current primary address
100735a2443dSMateusz Palczewski * @adapter: board private structure
100835a2443dSMateusz Palczewski * @new_mac: new MAC address to be applied
10095ec8b7d1SJesse Brandeburg *
101035a2443dSMateusz Palczewski * Replace current dev_addr and send request to PF for removal of previous
101135a2443dSMateusz Palczewski * primary MAC address filter and addition of new primary MAC filter.
101235a2443dSMateusz Palczewski * Return 0 for success, -ENOMEM for failure.
101335a2443dSMateusz Palczewski *
101435a2443dSMateusz Palczewski * Do not call this with mac_vlan_list_lock!
10155ec8b7d1SJesse Brandeburg **/
iavf_replace_primary_mac(struct iavf_adapter * adapter,const u8 * new_mac)1016a4aadf0fSPrzemek Kitszel static int iavf_replace_primary_mac(struct iavf_adapter *adapter,
101735a2443dSMateusz Palczewski const u8 *new_mac)
10185ec8b7d1SJesse Brandeburg {
1019f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
102061f723e6SPrzemek Kitszel struct iavf_mac_filter *new_f;
102161f723e6SPrzemek Kitszel struct iavf_mac_filter *old_f;
10225ec8b7d1SJesse Brandeburg
10235ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
10245ec8b7d1SJesse Brandeburg
102561f723e6SPrzemek Kitszel new_f = iavf_add_filter(adapter, new_mac);
102661f723e6SPrzemek Kitszel if (!new_f) {
102761f723e6SPrzemek Kitszel spin_unlock_bh(&adapter->mac_vlan_list_lock);
102861f723e6SPrzemek Kitszel return -ENOMEM;
102935a2443dSMateusz Palczewski }
103035a2443dSMateusz Palczewski
103161f723e6SPrzemek Kitszel old_f = iavf_find_filter(adapter, hw->mac.addr);
103261f723e6SPrzemek Kitszel if (old_f) {
103361f723e6SPrzemek Kitszel old_f->is_primary = false;
103461f723e6SPrzemek Kitszel old_f->remove = true;
10355ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
10365ec8b7d1SJesse Brandeburg }
103761f723e6SPrzemek Kitszel /* Always send the request to add if changing primary MAC,
103835a2443dSMateusz Palczewski * even if filter is already present on the list
103935a2443dSMateusz Palczewski */
104061f723e6SPrzemek Kitszel new_f->is_primary = true;
104161f723e6SPrzemek Kitszel new_f->add = true;
104235a2443dSMateusz Palczewski adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
104335a2443dSMateusz Palczewski ether_addr_copy(hw->mac.addr, new_mac);
10445ec8b7d1SJesse Brandeburg
10455ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
10465ec8b7d1SJesse Brandeburg
1047a3e839d5SMateusz Palczewski /* schedule the watchdog task to immediately process the request */
1048e2b53ea5SStefan Assmann mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
104935a2443dSMateusz Palczewski return 0;
105035a2443dSMateusz Palczewski }
10515ec8b7d1SJesse Brandeburg
105235a2443dSMateusz Palczewski /**
105335a2443dSMateusz Palczewski * iavf_is_mac_set_handled - wait for a response to set MAC from PF
105435a2443dSMateusz Palczewski * @netdev: network interface device structure
105535a2443dSMateusz Palczewski * @macaddr: MAC address to set
105635a2443dSMateusz Palczewski *
105735a2443dSMateusz Palczewski * Returns true on success, false on failure
105835a2443dSMateusz Palczewski */
iavf_is_mac_set_handled(struct net_device * netdev,const u8 * macaddr)105935a2443dSMateusz Palczewski static bool iavf_is_mac_set_handled(struct net_device *netdev,
106035a2443dSMateusz Palczewski const u8 *macaddr)
106135a2443dSMateusz Palczewski {
106235a2443dSMateusz Palczewski struct iavf_adapter *adapter = netdev_priv(netdev);
106335a2443dSMateusz Palczewski struct iavf_mac_filter *f;
106435a2443dSMateusz Palczewski bool ret = false;
106535a2443dSMateusz Palczewski
106635a2443dSMateusz Palczewski spin_lock_bh(&adapter->mac_vlan_list_lock);
106735a2443dSMateusz Palczewski
106835a2443dSMateusz Palczewski f = iavf_find_filter(adapter, macaddr);
106935a2443dSMateusz Palczewski
107035a2443dSMateusz Palczewski if (!f || (!f->add && f->add_handled))
107135a2443dSMateusz Palczewski ret = true;
107235a2443dSMateusz Palczewski
107335a2443dSMateusz Palczewski spin_unlock_bh(&adapter->mac_vlan_list_lock);
107435a2443dSMateusz Palczewski
107535a2443dSMateusz Palczewski return ret;
107635a2443dSMateusz Palczewski }
107735a2443dSMateusz Palczewski
107835a2443dSMateusz Palczewski /**
107935a2443dSMateusz Palczewski * iavf_set_mac - NDO callback to set port MAC address
108035a2443dSMateusz Palczewski * @netdev: network interface device structure
108135a2443dSMateusz Palczewski * @p: pointer to an address structure
108235a2443dSMateusz Palczewski *
108335a2443dSMateusz Palczewski * Returns 0 on success, negative on failure
108435a2443dSMateusz Palczewski */
iavf_set_mac(struct net_device * netdev,void * p)108535a2443dSMateusz Palczewski static int iavf_set_mac(struct net_device *netdev, void *p)
108635a2443dSMateusz Palczewski {
108735a2443dSMateusz Palczewski struct iavf_adapter *adapter = netdev_priv(netdev);
108835a2443dSMateusz Palczewski struct sockaddr *addr = p;
108935a2443dSMateusz Palczewski int ret;
109035a2443dSMateusz Palczewski
109135a2443dSMateusz Palczewski if (!is_valid_ether_addr(addr->sa_data))
109235a2443dSMateusz Palczewski return -EADDRNOTAVAIL;
109335a2443dSMateusz Palczewski
109435a2443dSMateusz Palczewski ret = iavf_replace_primary_mac(adapter, addr->sa_data);
109535a2443dSMateusz Palczewski
109635a2443dSMateusz Palczewski if (ret)
109735a2443dSMateusz Palczewski return ret;
109835a2443dSMateusz Palczewski
1099f66b98c8SSylwester Dziedziuch ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
1100f66b98c8SSylwester Dziedziuch iavf_is_mac_set_handled(netdev, addr->sa_data),
1101f66b98c8SSylwester Dziedziuch msecs_to_jiffies(2500));
110235a2443dSMateusz Palczewski
110335a2443dSMateusz Palczewski /* If ret < 0 then it means wait was interrupted.
110435a2443dSMateusz Palczewski * If ret == 0 then it means we got a timeout.
110535a2443dSMateusz Palczewski * else it means we got response for set MAC from PF,
110635a2443dSMateusz Palczewski * check if netdev MAC was updated to requested MAC,
110735a2443dSMateusz Palczewski * if yes then set MAC succeeded otherwise it failed return -EACCES
110835a2443dSMateusz Palczewski */
110935a2443dSMateusz Palczewski if (ret < 0)
111035a2443dSMateusz Palczewski return ret;
111135a2443dSMateusz Palczewski
111235a2443dSMateusz Palczewski if (!ret)
111335a2443dSMateusz Palczewski return -EAGAIN;
111435a2443dSMateusz Palczewski
111535a2443dSMateusz Palczewski if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
111635a2443dSMateusz Palczewski return -EACCES;
111735a2443dSMateusz Palczewski
111835a2443dSMateusz Palczewski return 0;
11195ec8b7d1SJesse Brandeburg }
11205ec8b7d1SJesse Brandeburg
11215ec8b7d1SJesse Brandeburg /**
11225ec8b7d1SJesse Brandeburg * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
11235ec8b7d1SJesse Brandeburg * @netdev: the netdevice
11245ec8b7d1SJesse Brandeburg * @addr: address to add
11255ec8b7d1SJesse Brandeburg *
11265ec8b7d1SJesse Brandeburg * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
11275ec8b7d1SJesse Brandeburg * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
11285ec8b7d1SJesse Brandeburg */
iavf_addr_sync(struct net_device * netdev,const u8 * addr)11295ec8b7d1SJesse Brandeburg static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
11305ec8b7d1SJesse Brandeburg {
11315ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
11325ec8b7d1SJesse Brandeburg
11335ec8b7d1SJesse Brandeburg if (iavf_add_filter(adapter, addr))
11345ec8b7d1SJesse Brandeburg return 0;
11355ec8b7d1SJesse Brandeburg else
11365ec8b7d1SJesse Brandeburg return -ENOMEM;
11375ec8b7d1SJesse Brandeburg }
11385ec8b7d1SJesse Brandeburg
11395ec8b7d1SJesse Brandeburg /**
11405ec8b7d1SJesse Brandeburg * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
11415ec8b7d1SJesse Brandeburg * @netdev: the netdevice
11425ec8b7d1SJesse Brandeburg * @addr: address to add
11435ec8b7d1SJesse Brandeburg *
11445ec8b7d1SJesse Brandeburg * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
11455ec8b7d1SJesse Brandeburg * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
11465ec8b7d1SJesse Brandeburg */
iavf_addr_unsync(struct net_device * netdev,const u8 * addr)11475ec8b7d1SJesse Brandeburg static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
11485ec8b7d1SJesse Brandeburg {
11495ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
11505ec8b7d1SJesse Brandeburg struct iavf_mac_filter *f;
11515ec8b7d1SJesse Brandeburg
11525ec8b7d1SJesse Brandeburg /* Under some circumstances, we might receive a request to delete
11535ec8b7d1SJesse Brandeburg * our own device address from our uc list. Because we store the
11545ec8b7d1SJesse Brandeburg * device address in the VSI's MAC/VLAN filter list, we need to ignore
11555ec8b7d1SJesse Brandeburg * such requests and not delete our device address from this list.
11565ec8b7d1SJesse Brandeburg */
11575ec8b7d1SJesse Brandeburg if (ether_addr_equal(addr, netdev->dev_addr))
11585ec8b7d1SJesse Brandeburg return 0;
11595ec8b7d1SJesse Brandeburg
11605ec8b7d1SJesse Brandeburg f = iavf_find_filter(adapter, addr);
11615ec8b7d1SJesse Brandeburg if (f) {
11625ec8b7d1SJesse Brandeburg f->remove = true;
11635ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
11645ec8b7d1SJesse Brandeburg }
11655ec8b7d1SJesse Brandeburg return 0;
11665ec8b7d1SJesse Brandeburg }
11675ec8b7d1SJesse Brandeburg
11685ec8b7d1SJesse Brandeburg /**
11697e85cf09SBrett Creeley * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
11707e85cf09SBrett Creeley * @adapter: device specific adapter
11717e85cf09SBrett Creeley */
iavf_promiscuous_mode_changed(struct iavf_adapter * adapter)11727e85cf09SBrett Creeley bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
11737e85cf09SBrett Creeley {
11747e85cf09SBrett Creeley return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
11757e85cf09SBrett Creeley (IFF_PROMISC | IFF_ALLMULTI);
11767e85cf09SBrett Creeley }
11777e85cf09SBrett Creeley
11787e85cf09SBrett Creeley /**
11795ec8b7d1SJesse Brandeburg * iavf_set_rx_mode - NDO callback to set the netdev filters
11805ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
11815ec8b7d1SJesse Brandeburg **/
iavf_set_rx_mode(struct net_device * netdev)11825ec8b7d1SJesse Brandeburg static void iavf_set_rx_mode(struct net_device *netdev)
11835ec8b7d1SJesse Brandeburg {
11845ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
11855ec8b7d1SJesse Brandeburg
11865ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
11875ec8b7d1SJesse Brandeburg __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
11885ec8b7d1SJesse Brandeburg __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
11895ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
11905ec8b7d1SJesse Brandeburg
11917e85cf09SBrett Creeley spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
11927e85cf09SBrett Creeley if (iavf_promiscuous_mode_changed(adapter))
11937e85cf09SBrett Creeley adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
11947e85cf09SBrett Creeley spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
11955ec8b7d1SJesse Brandeburg }
11965ec8b7d1SJesse Brandeburg
11975ec8b7d1SJesse Brandeburg /**
11985ec8b7d1SJesse Brandeburg * iavf_napi_enable_all - enable NAPI on all queue vectors
11995ec8b7d1SJesse Brandeburg * @adapter: board private structure
12005ec8b7d1SJesse Brandeburg **/
iavf_napi_enable_all(struct iavf_adapter * adapter)12015ec8b7d1SJesse Brandeburg static void iavf_napi_enable_all(struct iavf_adapter *adapter)
12025ec8b7d1SJesse Brandeburg {
12035ec8b7d1SJesse Brandeburg int q_idx;
120456184e01SJesse Brandeburg struct iavf_q_vector *q_vector;
12055ec8b7d1SJesse Brandeburg int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
12065ec8b7d1SJesse Brandeburg
12075ec8b7d1SJesse Brandeburg for (q_idx = 0; q_idx < q_vectors; q_idx++) {
12085ec8b7d1SJesse Brandeburg struct napi_struct *napi;
12095ec8b7d1SJesse Brandeburg
12105ec8b7d1SJesse Brandeburg q_vector = &adapter->q_vectors[q_idx];
12115ec8b7d1SJesse Brandeburg napi = &q_vector->napi;
12125ec8b7d1SJesse Brandeburg napi_enable(napi);
12135ec8b7d1SJesse Brandeburg }
12145ec8b7d1SJesse Brandeburg }
12155ec8b7d1SJesse Brandeburg
12165ec8b7d1SJesse Brandeburg /**
12175ec8b7d1SJesse Brandeburg * iavf_napi_disable_all - disable NAPI on all queue vectors
12185ec8b7d1SJesse Brandeburg * @adapter: board private structure
12195ec8b7d1SJesse Brandeburg **/
iavf_napi_disable_all(struct iavf_adapter * adapter)12205ec8b7d1SJesse Brandeburg static void iavf_napi_disable_all(struct iavf_adapter *adapter)
12215ec8b7d1SJesse Brandeburg {
12225ec8b7d1SJesse Brandeburg int q_idx;
122356184e01SJesse Brandeburg struct iavf_q_vector *q_vector;
12245ec8b7d1SJesse Brandeburg int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
12255ec8b7d1SJesse Brandeburg
12265ec8b7d1SJesse Brandeburg for (q_idx = 0; q_idx < q_vectors; q_idx++) {
12275ec8b7d1SJesse Brandeburg q_vector = &adapter->q_vectors[q_idx];
12285ec8b7d1SJesse Brandeburg napi_disable(&q_vector->napi);
12295ec8b7d1SJesse Brandeburg }
12305ec8b7d1SJesse Brandeburg }
12315ec8b7d1SJesse Brandeburg
12325ec8b7d1SJesse Brandeburg /**
12335ec8b7d1SJesse Brandeburg * iavf_configure - set up transmit and receive data structures
12345ec8b7d1SJesse Brandeburg * @adapter: board private structure
12355ec8b7d1SJesse Brandeburg **/
iavf_configure(struct iavf_adapter * adapter)12365ec8b7d1SJesse Brandeburg static void iavf_configure(struct iavf_adapter *adapter)
12375ec8b7d1SJesse Brandeburg {
12385ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
12395ec8b7d1SJesse Brandeburg int i;
12405ec8b7d1SJesse Brandeburg
12415ec8b7d1SJesse Brandeburg iavf_set_rx_mode(netdev);
12425ec8b7d1SJesse Brandeburg
12435ec8b7d1SJesse Brandeburg iavf_configure_tx(adapter);
12445ec8b7d1SJesse Brandeburg iavf_configure_rx(adapter);
12455ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
12465ec8b7d1SJesse Brandeburg
12475ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++) {
124856184e01SJesse Brandeburg struct iavf_ring *ring = &adapter->rx_rings[i];
12495ec8b7d1SJesse Brandeburg
125056184e01SJesse Brandeburg iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
12515ec8b7d1SJesse Brandeburg }
12525ec8b7d1SJesse Brandeburg }
12535ec8b7d1SJesse Brandeburg
12545ec8b7d1SJesse Brandeburg /**
12555ec8b7d1SJesse Brandeburg * iavf_up_complete - Finish the last steps of bringing up a connection
12565ec8b7d1SJesse Brandeburg * @adapter: board private structure
12575ec8b7d1SJesse Brandeburg *
12585ec8b7d1SJesse Brandeburg * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
12595ec8b7d1SJesse Brandeburg **/
iavf_up_complete(struct iavf_adapter * adapter)12605ec8b7d1SJesse Brandeburg static void iavf_up_complete(struct iavf_adapter *adapter)
12615ec8b7d1SJesse Brandeburg {
126245eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_RUNNING);
126356184e01SJesse Brandeburg clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
12645ec8b7d1SJesse Brandeburg
12655ec8b7d1SJesse Brandeburg iavf_napi_enable_all(adapter);
12665ec8b7d1SJesse Brandeburg
12675ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
12685ec8b7d1SJesse Brandeburg if (CLIENT_ENABLED(adapter))
12695ec8b7d1SJesse Brandeburg adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
12704411a608SMichal Schmidt mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
12715ec8b7d1SJesse Brandeburg }
12725ec8b7d1SJesse Brandeburg
12735ec8b7d1SJesse Brandeburg /**
127411c12adcSMichal Jaron * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
127511c12adcSMichal Jaron * yet and mark other to be removed.
127611c12adcSMichal Jaron * @adapter: board private structure
127711c12adcSMichal Jaron **/
iavf_clear_mac_vlan_filters(struct iavf_adapter * adapter)127811c12adcSMichal Jaron static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
127911c12adcSMichal Jaron {
128011c12adcSMichal Jaron struct iavf_vlan_filter *vlf, *vlftmp;
128111c12adcSMichal Jaron struct iavf_mac_filter *f, *ftmp;
128211c12adcSMichal Jaron
128311c12adcSMichal Jaron spin_lock_bh(&adapter->mac_vlan_list_lock);
128411c12adcSMichal Jaron /* clear the sync flag on all filters */
128511c12adcSMichal Jaron __dev_uc_unsync(adapter->netdev, NULL);
128611c12adcSMichal Jaron __dev_mc_unsync(adapter->netdev, NULL);
128711c12adcSMichal Jaron
128811c12adcSMichal Jaron /* remove all MAC filters */
128911c12adcSMichal Jaron list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
129011c12adcSMichal Jaron list) {
129111c12adcSMichal Jaron if (f->add) {
129211c12adcSMichal Jaron list_del(&f->list);
129311c12adcSMichal Jaron kfree(f);
129411c12adcSMichal Jaron } else {
129511c12adcSMichal Jaron f->remove = true;
129611c12adcSMichal Jaron }
129711c12adcSMichal Jaron }
129811c12adcSMichal Jaron
12999c85b7faSAhmed Zaki /* disable all VLAN filters */
130011c12adcSMichal Jaron list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
13019c85b7faSAhmed Zaki list)
13029c85b7faSAhmed Zaki vlf->state = IAVF_VLAN_DISABLE;
13039c85b7faSAhmed Zaki
130411c12adcSMichal Jaron spin_unlock_bh(&adapter->mac_vlan_list_lock);
130511c12adcSMichal Jaron }
130611c12adcSMichal Jaron
130711c12adcSMichal Jaron /**
130811c12adcSMichal Jaron * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
130911c12adcSMichal Jaron * mark other to be removed.
131011c12adcSMichal Jaron * @adapter: board private structure
131111c12adcSMichal Jaron **/
iavf_clear_cloud_filters(struct iavf_adapter * adapter)131211c12adcSMichal Jaron static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
131311c12adcSMichal Jaron {
131411c12adcSMichal Jaron struct iavf_cloud_filter *cf, *cftmp;
131511c12adcSMichal Jaron
131611c12adcSMichal Jaron /* remove all cloud filters */
131711c12adcSMichal Jaron spin_lock_bh(&adapter->cloud_filter_list_lock);
131811c12adcSMichal Jaron list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
131911c12adcSMichal Jaron list) {
132011c12adcSMichal Jaron if (cf->add) {
132111c12adcSMichal Jaron list_del(&cf->list);
132211c12adcSMichal Jaron kfree(cf);
132311c12adcSMichal Jaron adapter->num_cloud_filters--;
132411c12adcSMichal Jaron } else {
132511c12adcSMichal Jaron cf->del = true;
132611c12adcSMichal Jaron }
132711c12adcSMichal Jaron }
132811c12adcSMichal Jaron spin_unlock_bh(&adapter->cloud_filter_list_lock);
132911c12adcSMichal Jaron }
133011c12adcSMichal Jaron
133111c12adcSMichal Jaron /**
133211c12adcSMichal Jaron * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
133311c12adcSMichal Jaron * other to be removed.
133411c12adcSMichal Jaron * @adapter: board private structure
133511c12adcSMichal Jaron **/
iavf_clear_fdir_filters(struct iavf_adapter * adapter)133611c12adcSMichal Jaron static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
133711c12adcSMichal Jaron {
13383beb9d66SPiotr Gardocki struct iavf_fdir_fltr *fdir;
133911c12adcSMichal Jaron
134011c12adcSMichal Jaron /* remove all Flow Director filters */
134111c12adcSMichal Jaron spin_lock_bh(&adapter->fdir_fltr_lock);
13423beb9d66SPiotr Gardocki list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
134311c12adcSMichal Jaron if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
13443beb9d66SPiotr Gardocki /* Cancel a request, keep filter as inactive */
13453beb9d66SPiotr Gardocki fdir->state = IAVF_FDIR_FLTR_INACTIVE;
13463beb9d66SPiotr Gardocki } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
13473beb9d66SPiotr Gardocki fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
13483beb9d66SPiotr Gardocki /* Disable filters which are active or have a pending
13493beb9d66SPiotr Gardocki * request to PF to be added
13503beb9d66SPiotr Gardocki */
13513beb9d66SPiotr Gardocki fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
135211c12adcSMichal Jaron }
135311c12adcSMichal Jaron }
135411c12adcSMichal Jaron spin_unlock_bh(&adapter->fdir_fltr_lock);
135511c12adcSMichal Jaron }
135611c12adcSMichal Jaron
135711c12adcSMichal Jaron /**
135811c12adcSMichal Jaron * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
135911c12adcSMichal Jaron * other to be removed.
136011c12adcSMichal Jaron * @adapter: board private structure
136111c12adcSMichal Jaron **/
iavf_clear_adv_rss_conf(struct iavf_adapter * adapter)136211c12adcSMichal Jaron static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
136311c12adcSMichal Jaron {
136411c12adcSMichal Jaron struct iavf_adv_rss *rss, *rsstmp;
136511c12adcSMichal Jaron
136611c12adcSMichal Jaron /* remove all advance RSS configuration */
136711c12adcSMichal Jaron spin_lock_bh(&adapter->adv_rss_lock);
136811c12adcSMichal Jaron list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
136911c12adcSMichal Jaron list) {
137011c12adcSMichal Jaron if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
137111c12adcSMichal Jaron list_del(&rss->list);
137211c12adcSMichal Jaron kfree(rss);
137311c12adcSMichal Jaron } else {
137411c12adcSMichal Jaron rss->state = IAVF_ADV_RSS_DEL_REQUEST;
137511c12adcSMichal Jaron }
137611c12adcSMichal Jaron }
137711c12adcSMichal Jaron spin_unlock_bh(&adapter->adv_rss_lock);
137811c12adcSMichal Jaron }
137911c12adcSMichal Jaron
138011c12adcSMichal Jaron /**
138156184e01SJesse Brandeburg * iavf_down - Shutdown the connection processing
13825ec8b7d1SJesse Brandeburg * @adapter: board private structure
13835ec8b7d1SJesse Brandeburg *
13845ec8b7d1SJesse Brandeburg * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
13855ec8b7d1SJesse Brandeburg **/
iavf_down(struct iavf_adapter * adapter)13865ec8b7d1SJesse Brandeburg void iavf_down(struct iavf_adapter *adapter)
13875ec8b7d1SJesse Brandeburg {
13885ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
13895ec8b7d1SJesse Brandeburg
13905ec8b7d1SJesse Brandeburg if (adapter->state <= __IAVF_DOWN_PENDING)
13915ec8b7d1SJesse Brandeburg return;
13925ec8b7d1SJesse Brandeburg
13935ec8b7d1SJesse Brandeburg netif_carrier_off(netdev);
13945ec8b7d1SJesse Brandeburg netif_tx_disable(netdev);
13955ec8b7d1SJesse Brandeburg adapter->link_up = false;
13965ec8b7d1SJesse Brandeburg iavf_napi_disable_all(adapter);
13975ec8b7d1SJesse Brandeburg iavf_irq_disable(adapter);
13985ec8b7d1SJesse Brandeburg
139911c12adcSMichal Jaron iavf_clear_mac_vlan_filters(adapter);
140011c12adcSMichal Jaron iavf_clear_cloud_filters(adapter);
140111c12adcSMichal Jaron iavf_clear_fdir_filters(adapter);
140211c12adcSMichal Jaron iavf_clear_adv_rss_conf(adapter);
14030aaeb4fbSHaiyue Wang
1404c8de44b5SRadoslaw Tyl if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1405c8de44b5SRadoslaw Tyl !(test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))) {
14065ec8b7d1SJesse Brandeburg /* cancel any current operation */
14075ec8b7d1SJesse Brandeburg adapter->current_op = VIRTCHNL_OP_UNKNOWN;
14085ec8b7d1SJesse Brandeburg /* Schedule operations to close down the HW. Don't wait
14095ec8b7d1SJesse Brandeburg * here for this to complete. The watchdog is still running
14105ec8b7d1SJesse Brandeburg * and it will take care of this.
14115ec8b7d1SJesse Brandeburg */
141211c12adcSMichal Jaron if (!list_empty(&adapter->mac_filter_list))
141311c12adcSMichal Jaron adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
141411c12adcSMichal Jaron if (!list_empty(&adapter->vlan_filter_list))
14155ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
141611c12adcSMichal Jaron if (!list_empty(&adapter->cloud_filter_list))
14175ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
141811c12adcSMichal Jaron if (!list_empty(&adapter->fdir_list_head))
14190dbfbabbSHaiyue Wang adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
142011c12adcSMichal Jaron if (!list_empty(&adapter->adv_rss_list_head))
14210aaeb4fbSHaiyue Wang adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
14225ec8b7d1SJesse Brandeburg }
14235ec8b7d1SJesse Brandeburg
142453798666SMichal Schmidt adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
14254411a608SMichal Schmidt mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
14265ec8b7d1SJesse Brandeburg }
14275ec8b7d1SJesse Brandeburg
14285ec8b7d1SJesse Brandeburg /**
14295ec8b7d1SJesse Brandeburg * iavf_acquire_msix_vectors - Setup the MSIX capability
14305ec8b7d1SJesse Brandeburg * @adapter: board private structure
14315ec8b7d1SJesse Brandeburg * @vectors: number of vectors to request
14325ec8b7d1SJesse Brandeburg *
14335ec8b7d1SJesse Brandeburg * Work with the OS to set up the MSIX vectors needed.
14345ec8b7d1SJesse Brandeburg *
14355ec8b7d1SJesse Brandeburg * Returns 0 on success, negative on failure
14365ec8b7d1SJesse Brandeburg **/
14375ec8b7d1SJesse Brandeburg static int
iavf_acquire_msix_vectors(struct iavf_adapter * adapter,int vectors)14385ec8b7d1SJesse Brandeburg iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
14395ec8b7d1SJesse Brandeburg {
14405ec8b7d1SJesse Brandeburg int err, vector_threshold;
14415ec8b7d1SJesse Brandeburg
14425ec8b7d1SJesse Brandeburg /* We'll want at least 3 (vector_threshold):
14435ec8b7d1SJesse Brandeburg * 0) Other (Admin Queue and link, mostly)
14445ec8b7d1SJesse Brandeburg * 1) TxQ[0] Cleanup
14455ec8b7d1SJesse Brandeburg * 2) RxQ[0] Cleanup
14465ec8b7d1SJesse Brandeburg */
14475ec8b7d1SJesse Brandeburg vector_threshold = MIN_MSIX_COUNT;
14485ec8b7d1SJesse Brandeburg
14495ec8b7d1SJesse Brandeburg /* The more we get, the more we will assign to Tx/Rx Cleanup
14505ec8b7d1SJesse Brandeburg * for the separate queues...where Rx Cleanup >= Tx Cleanup.
14515ec8b7d1SJesse Brandeburg * Right now, we simply care about how many we'll get; we'll
14525ec8b7d1SJesse Brandeburg * set them up later while requesting irq's.
14535ec8b7d1SJesse Brandeburg */
14545ec8b7d1SJesse Brandeburg err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
14555ec8b7d1SJesse Brandeburg vector_threshold, vectors);
14565ec8b7d1SJesse Brandeburg if (err < 0) {
14575ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
14585ec8b7d1SJesse Brandeburg kfree(adapter->msix_entries);
14595ec8b7d1SJesse Brandeburg adapter->msix_entries = NULL;
14605ec8b7d1SJesse Brandeburg return err;
14615ec8b7d1SJesse Brandeburg }
14625ec8b7d1SJesse Brandeburg
14635ec8b7d1SJesse Brandeburg /* Adjust for only the vectors we'll use, which is minimum
14645ec8b7d1SJesse Brandeburg * of max_msix_q_vectors + NONQ_VECS, or the number of
14655ec8b7d1SJesse Brandeburg * vectors we were allocated.
14665ec8b7d1SJesse Brandeburg */
14675ec8b7d1SJesse Brandeburg adapter->num_msix_vectors = err;
14685ec8b7d1SJesse Brandeburg return 0;
14695ec8b7d1SJesse Brandeburg }
14705ec8b7d1SJesse Brandeburg
14715ec8b7d1SJesse Brandeburg /**
14725ec8b7d1SJesse Brandeburg * iavf_free_queues - Free memory for all rings
14735ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
14745ec8b7d1SJesse Brandeburg *
14755ec8b7d1SJesse Brandeburg * Free all of the memory associated with queue pairs.
14765ec8b7d1SJesse Brandeburg **/
iavf_free_queues(struct iavf_adapter * adapter)14775ec8b7d1SJesse Brandeburg static void iavf_free_queues(struct iavf_adapter *adapter)
14785ec8b7d1SJesse Brandeburg {
14795ec8b7d1SJesse Brandeburg if (!adapter->vsi_res)
14805ec8b7d1SJesse Brandeburg return;
14815ec8b7d1SJesse Brandeburg adapter->num_active_queues = 0;
14825ec8b7d1SJesse Brandeburg kfree(adapter->tx_rings);
14835ec8b7d1SJesse Brandeburg adapter->tx_rings = NULL;
14845ec8b7d1SJesse Brandeburg kfree(adapter->rx_rings);
14855ec8b7d1SJesse Brandeburg adapter->rx_rings = NULL;
14865ec8b7d1SJesse Brandeburg }
14875ec8b7d1SJesse Brandeburg
14885ec8b7d1SJesse Brandeburg /**
1489ccd219d2SBrett Creeley * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1490ccd219d2SBrett Creeley * @adapter: board private structure
1491ccd219d2SBrett Creeley *
1492ccd219d2SBrett Creeley * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1493ccd219d2SBrett Creeley * stripped in certain descriptor fields. Instead of checking the offload
1494ccd219d2SBrett Creeley * capability bits in the hot path, cache the location the ring specific
1495ccd219d2SBrett Creeley * flags.
1496ccd219d2SBrett Creeley */
iavf_set_queue_vlan_tag_loc(struct iavf_adapter * adapter)1497ccd219d2SBrett Creeley void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1498ccd219d2SBrett Creeley {
1499ccd219d2SBrett Creeley int i;
1500ccd219d2SBrett Creeley
1501ccd219d2SBrett Creeley for (i = 0; i < adapter->num_active_queues; i++) {
1502ccd219d2SBrett Creeley struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1503ccd219d2SBrett Creeley struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1504ccd219d2SBrett Creeley
1505ccd219d2SBrett Creeley /* prevent multiple L2TAG bits being set after VFR */
1506ccd219d2SBrett Creeley tx_ring->flags &=
1507ccd219d2SBrett Creeley ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1508ccd219d2SBrett Creeley IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1509ccd219d2SBrett Creeley rx_ring->flags &=
1510ccd219d2SBrett Creeley ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1511ccd219d2SBrett Creeley IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1512ccd219d2SBrett Creeley
1513ccd219d2SBrett Creeley if (VLAN_ALLOWED(adapter)) {
1514ccd219d2SBrett Creeley tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1515ccd219d2SBrett Creeley rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1516ccd219d2SBrett Creeley } else if (VLAN_V2_ALLOWED(adapter)) {
1517ccd219d2SBrett Creeley struct virtchnl_vlan_supported_caps *stripping_support;
1518ccd219d2SBrett Creeley struct virtchnl_vlan_supported_caps *insertion_support;
1519ccd219d2SBrett Creeley
1520ccd219d2SBrett Creeley stripping_support =
1521ccd219d2SBrett Creeley &adapter->vlan_v2_caps.offloads.stripping_support;
1522ccd219d2SBrett Creeley insertion_support =
1523ccd219d2SBrett Creeley &adapter->vlan_v2_caps.offloads.insertion_support;
1524ccd219d2SBrett Creeley
1525ccd219d2SBrett Creeley if (stripping_support->outer) {
1526ccd219d2SBrett Creeley if (stripping_support->outer &
1527ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1528ccd219d2SBrett Creeley rx_ring->flags |=
1529ccd219d2SBrett Creeley IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1530ccd219d2SBrett Creeley else if (stripping_support->outer &
1531ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1532ccd219d2SBrett Creeley rx_ring->flags |=
1533ccd219d2SBrett Creeley IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1534ccd219d2SBrett Creeley } else if (stripping_support->inner) {
1535ccd219d2SBrett Creeley if (stripping_support->inner &
1536ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1537ccd219d2SBrett Creeley rx_ring->flags |=
1538ccd219d2SBrett Creeley IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1539ccd219d2SBrett Creeley else if (stripping_support->inner &
1540ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1541ccd219d2SBrett Creeley rx_ring->flags |=
1542ccd219d2SBrett Creeley IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1543ccd219d2SBrett Creeley }
1544ccd219d2SBrett Creeley
1545ccd219d2SBrett Creeley if (insertion_support->outer) {
1546ccd219d2SBrett Creeley if (insertion_support->outer &
1547ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1548ccd219d2SBrett Creeley tx_ring->flags |=
1549ccd219d2SBrett Creeley IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1550ccd219d2SBrett Creeley else if (insertion_support->outer &
1551ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1552ccd219d2SBrett Creeley tx_ring->flags |=
1553ccd219d2SBrett Creeley IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1554ccd219d2SBrett Creeley } else if (insertion_support->inner) {
1555ccd219d2SBrett Creeley if (insertion_support->inner &
1556ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1557ccd219d2SBrett Creeley tx_ring->flags |=
1558ccd219d2SBrett Creeley IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1559ccd219d2SBrett Creeley else if (insertion_support->inner &
1560ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1561ccd219d2SBrett Creeley tx_ring->flags |=
1562ccd219d2SBrett Creeley IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1563ccd219d2SBrett Creeley }
1564ccd219d2SBrett Creeley }
1565ccd219d2SBrett Creeley }
1566ccd219d2SBrett Creeley }
1567ccd219d2SBrett Creeley
1568ccd219d2SBrett Creeley /**
15695ec8b7d1SJesse Brandeburg * iavf_alloc_queues - Allocate memory for all rings
15705ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
15715ec8b7d1SJesse Brandeburg *
15725ec8b7d1SJesse Brandeburg * We allocate one ring per queue at run-time since we don't know the
15735ec8b7d1SJesse Brandeburg * number of queues at compile-time. The polling_netdev array is
15745ec8b7d1SJesse Brandeburg * intended for Multiqueue, but should work fine with a single queue.
15755ec8b7d1SJesse Brandeburg **/
iavf_alloc_queues(struct iavf_adapter * adapter)15765ec8b7d1SJesse Brandeburg static int iavf_alloc_queues(struct iavf_adapter *adapter)
15775ec8b7d1SJesse Brandeburg {
15785ec8b7d1SJesse Brandeburg int i, num_active_queues;
15795ec8b7d1SJesse Brandeburg
15805ec8b7d1SJesse Brandeburg /* If we're in reset reallocating queues we don't actually know yet for
15815ec8b7d1SJesse Brandeburg * certain the PF gave us the number of queues we asked for but we'll
15825ec8b7d1SJesse Brandeburg * assume it did. Once basic reset is finished we'll confirm once we
15835ec8b7d1SJesse Brandeburg * start negotiating config with PF.
15845ec8b7d1SJesse Brandeburg */
15855ec8b7d1SJesse Brandeburg if (adapter->num_req_queues)
15865ec8b7d1SJesse Brandeburg num_active_queues = adapter->num_req_queues;
15875ec8b7d1SJesse Brandeburg else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
15885ec8b7d1SJesse Brandeburg adapter->num_tc)
15895ec8b7d1SJesse Brandeburg num_active_queues = adapter->ch_config.total_qps;
15905ec8b7d1SJesse Brandeburg else
15915ec8b7d1SJesse Brandeburg num_active_queues = min_t(int,
15925ec8b7d1SJesse Brandeburg adapter->vsi_res->num_queue_pairs,
15935ec8b7d1SJesse Brandeburg (int)(num_online_cpus()));
15945ec8b7d1SJesse Brandeburg
15955ec8b7d1SJesse Brandeburg
15965ec8b7d1SJesse Brandeburg adapter->tx_rings = kcalloc(num_active_queues,
159756184e01SJesse Brandeburg sizeof(struct iavf_ring), GFP_KERNEL);
15985ec8b7d1SJesse Brandeburg if (!adapter->tx_rings)
15995ec8b7d1SJesse Brandeburg goto err_out;
16005ec8b7d1SJesse Brandeburg adapter->rx_rings = kcalloc(num_active_queues,
160156184e01SJesse Brandeburg sizeof(struct iavf_ring), GFP_KERNEL);
16025ec8b7d1SJesse Brandeburg if (!adapter->rx_rings)
16035ec8b7d1SJesse Brandeburg goto err_out;
16045ec8b7d1SJesse Brandeburg
16055ec8b7d1SJesse Brandeburg for (i = 0; i < num_active_queues; i++) {
160656184e01SJesse Brandeburg struct iavf_ring *tx_ring;
160756184e01SJesse Brandeburg struct iavf_ring *rx_ring;
16085ec8b7d1SJesse Brandeburg
16095ec8b7d1SJesse Brandeburg tx_ring = &adapter->tx_rings[i];
16105ec8b7d1SJesse Brandeburg
16115ec8b7d1SJesse Brandeburg tx_ring->queue_index = i;
16125ec8b7d1SJesse Brandeburg tx_ring->netdev = adapter->netdev;
16135ec8b7d1SJesse Brandeburg tx_ring->dev = &adapter->pdev->dev;
16145ec8b7d1SJesse Brandeburg tx_ring->count = adapter->tx_desc_count;
161556184e01SJesse Brandeburg tx_ring->itr_setting = IAVF_ITR_TX_DEF;
16165ec8b7d1SJesse Brandeburg if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
161756184e01SJesse Brandeburg tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
16185ec8b7d1SJesse Brandeburg
16195ec8b7d1SJesse Brandeburg rx_ring = &adapter->rx_rings[i];
16205ec8b7d1SJesse Brandeburg rx_ring->queue_index = i;
16215ec8b7d1SJesse Brandeburg rx_ring->netdev = adapter->netdev;
16225ec8b7d1SJesse Brandeburg rx_ring->dev = &adapter->pdev->dev;
16235ec8b7d1SJesse Brandeburg rx_ring->count = adapter->rx_desc_count;
162456184e01SJesse Brandeburg rx_ring->itr_setting = IAVF_ITR_RX_DEF;
16255ec8b7d1SJesse Brandeburg }
16265ec8b7d1SJesse Brandeburg
16275ec8b7d1SJesse Brandeburg adapter->num_active_queues = num_active_queues;
16285ec8b7d1SJesse Brandeburg
1629ccd219d2SBrett Creeley iavf_set_queue_vlan_tag_loc(adapter);
1630ccd219d2SBrett Creeley
16315ec8b7d1SJesse Brandeburg return 0;
16325ec8b7d1SJesse Brandeburg
16335ec8b7d1SJesse Brandeburg err_out:
16345ec8b7d1SJesse Brandeburg iavf_free_queues(adapter);
16355ec8b7d1SJesse Brandeburg return -ENOMEM;
16365ec8b7d1SJesse Brandeburg }
16375ec8b7d1SJesse Brandeburg
16385ec8b7d1SJesse Brandeburg /**
16395ec8b7d1SJesse Brandeburg * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
16405ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
16415ec8b7d1SJesse Brandeburg *
16425ec8b7d1SJesse Brandeburg * Attempt to configure the interrupts using the best available
16435ec8b7d1SJesse Brandeburg * capabilities of the hardware and the kernel.
16445ec8b7d1SJesse Brandeburg **/
iavf_set_interrupt_capability(struct iavf_adapter * adapter)16455ec8b7d1SJesse Brandeburg static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
16465ec8b7d1SJesse Brandeburg {
16475ec8b7d1SJesse Brandeburg int vector, v_budget;
16485ec8b7d1SJesse Brandeburg int pairs = 0;
16495ec8b7d1SJesse Brandeburg int err = 0;
16505ec8b7d1SJesse Brandeburg
16515ec8b7d1SJesse Brandeburg if (!adapter->vsi_res) {
16525ec8b7d1SJesse Brandeburg err = -EIO;
16535ec8b7d1SJesse Brandeburg goto out;
16545ec8b7d1SJesse Brandeburg }
16555ec8b7d1SJesse Brandeburg pairs = adapter->num_active_queues;
16565ec8b7d1SJesse Brandeburg
16575ec8b7d1SJesse Brandeburg /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
16585ec8b7d1SJesse Brandeburg * us much good if we have more vectors than CPUs. However, we already
16595ec8b7d1SJesse Brandeburg * limit the total number of queues by the number of CPUs so we do not
16605ec8b7d1SJesse Brandeburg * need any further limiting here.
16615ec8b7d1SJesse Brandeburg */
16625ec8b7d1SJesse Brandeburg v_budget = min_t(int, pairs + NONQ_VECS,
16635ec8b7d1SJesse Brandeburg (int)adapter->vf_res->max_vectors);
16645ec8b7d1SJesse Brandeburg
16655ec8b7d1SJesse Brandeburg adapter->msix_entries = kcalloc(v_budget,
16665ec8b7d1SJesse Brandeburg sizeof(struct msix_entry), GFP_KERNEL);
16675ec8b7d1SJesse Brandeburg if (!adapter->msix_entries) {
16685ec8b7d1SJesse Brandeburg err = -ENOMEM;
16695ec8b7d1SJesse Brandeburg goto out;
16705ec8b7d1SJesse Brandeburg }
16715ec8b7d1SJesse Brandeburg
16725ec8b7d1SJesse Brandeburg for (vector = 0; vector < v_budget; vector++)
16735ec8b7d1SJesse Brandeburg adapter->msix_entries[vector].entry = vector;
16745ec8b7d1SJesse Brandeburg
16755ec8b7d1SJesse Brandeburg err = iavf_acquire_msix_vectors(adapter, v_budget);
1676d1639a17SAhmed Zaki if (!err)
1677d1639a17SAhmed Zaki iavf_schedule_finish_config(adapter);
16785ec8b7d1SJesse Brandeburg
16795ec8b7d1SJesse Brandeburg out:
16805ec8b7d1SJesse Brandeburg return err;
16815ec8b7d1SJesse Brandeburg }
16825ec8b7d1SJesse Brandeburg
16835ec8b7d1SJesse Brandeburg /**
168456184e01SJesse Brandeburg * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
16855ec8b7d1SJesse Brandeburg * @adapter: board private structure
16865ec8b7d1SJesse Brandeburg *
16875ec8b7d1SJesse Brandeburg * Return 0 on success, negative on failure
16885ec8b7d1SJesse Brandeburg **/
iavf_config_rss_aq(struct iavf_adapter * adapter)16895ec8b7d1SJesse Brandeburg static int iavf_config_rss_aq(struct iavf_adapter *adapter)
16905ec8b7d1SJesse Brandeburg {
16917af36e32SAlice Michael struct iavf_aqc_get_set_rss_key_data *rss_key =
16927af36e32SAlice Michael (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1693f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
1694bae569d0SMateusz Palczewski enum iavf_status status;
16955ec8b7d1SJesse Brandeburg
16965ec8b7d1SJesse Brandeburg if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
16975ec8b7d1SJesse Brandeburg /* bail because we already have a command pending */
16985ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
16995ec8b7d1SJesse Brandeburg adapter->current_op);
17005ec8b7d1SJesse Brandeburg return -EBUSY;
17015ec8b7d1SJesse Brandeburg }
17025ec8b7d1SJesse Brandeburg
1703bae569d0SMateusz Palczewski status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1704bae569d0SMateusz Palczewski if (status) {
17055ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1706bae569d0SMateusz Palczewski iavf_stat_str(hw, status),
17075ec8b7d1SJesse Brandeburg iavf_aq_str(hw, hw->aq.asq_last_status));
1708bae569d0SMateusz Palczewski return iavf_status_to_errno(status);
17095ec8b7d1SJesse Brandeburg
17105ec8b7d1SJesse Brandeburg }
17115ec8b7d1SJesse Brandeburg
1712bae569d0SMateusz Palczewski status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
17135ec8b7d1SJesse Brandeburg adapter->rss_lut, adapter->rss_lut_size);
1714bae569d0SMateusz Palczewski if (status) {
17155ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1716bae569d0SMateusz Palczewski iavf_stat_str(hw, status),
17175ec8b7d1SJesse Brandeburg iavf_aq_str(hw, hw->aq.asq_last_status));
1718bae569d0SMateusz Palczewski return iavf_status_to_errno(status);
17195ec8b7d1SJesse Brandeburg }
17205ec8b7d1SJesse Brandeburg
1721bae569d0SMateusz Palczewski return 0;
17225ec8b7d1SJesse Brandeburg
17235ec8b7d1SJesse Brandeburg }
17245ec8b7d1SJesse Brandeburg
17255ec8b7d1SJesse Brandeburg /**
17265ec8b7d1SJesse Brandeburg * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
17275ec8b7d1SJesse Brandeburg * @adapter: board private structure
17285ec8b7d1SJesse Brandeburg *
17295ec8b7d1SJesse Brandeburg * Returns 0 on success, negative on failure
17305ec8b7d1SJesse Brandeburg **/
iavf_config_rss_reg(struct iavf_adapter * adapter)17315ec8b7d1SJesse Brandeburg static int iavf_config_rss_reg(struct iavf_adapter *adapter)
17325ec8b7d1SJesse Brandeburg {
1733f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
17345ec8b7d1SJesse Brandeburg u32 *dw;
17355ec8b7d1SJesse Brandeburg u16 i;
17365ec8b7d1SJesse Brandeburg
17375ec8b7d1SJesse Brandeburg dw = (u32 *)adapter->rss_key;
17385ec8b7d1SJesse Brandeburg for (i = 0; i <= adapter->rss_key_size / 4; i++)
1739f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
17405ec8b7d1SJesse Brandeburg
17415ec8b7d1SJesse Brandeburg dw = (u32 *)adapter->rss_lut;
17425ec8b7d1SJesse Brandeburg for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1743f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
17445ec8b7d1SJesse Brandeburg
1745f1cad2ceSJesse Brandeburg iavf_flush(hw);
17465ec8b7d1SJesse Brandeburg
17475ec8b7d1SJesse Brandeburg return 0;
17485ec8b7d1SJesse Brandeburg }
17495ec8b7d1SJesse Brandeburg
17505ec8b7d1SJesse Brandeburg /**
17515ec8b7d1SJesse Brandeburg * iavf_config_rss - Configure RSS keys and lut
17525ec8b7d1SJesse Brandeburg * @adapter: board private structure
17535ec8b7d1SJesse Brandeburg *
17545ec8b7d1SJesse Brandeburg * Returns 0 on success, negative on failure
17555ec8b7d1SJesse Brandeburg **/
iavf_config_rss(struct iavf_adapter * adapter)17565ec8b7d1SJesse Brandeburg int iavf_config_rss(struct iavf_adapter *adapter)
17575ec8b7d1SJesse Brandeburg {
17585ec8b7d1SJesse Brandeburg
17595ec8b7d1SJesse Brandeburg if (RSS_PF(adapter)) {
17605ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
17615ec8b7d1SJesse Brandeburg IAVF_FLAG_AQ_SET_RSS_KEY;
17625ec8b7d1SJesse Brandeburg return 0;
17635ec8b7d1SJesse Brandeburg } else if (RSS_AQ(adapter)) {
17645ec8b7d1SJesse Brandeburg return iavf_config_rss_aq(adapter);
17655ec8b7d1SJesse Brandeburg } else {
17665ec8b7d1SJesse Brandeburg return iavf_config_rss_reg(adapter);
17675ec8b7d1SJesse Brandeburg }
17685ec8b7d1SJesse Brandeburg }
17695ec8b7d1SJesse Brandeburg
17705ec8b7d1SJesse Brandeburg /**
17715ec8b7d1SJesse Brandeburg * iavf_fill_rss_lut - Fill the lut with default values
17725ec8b7d1SJesse Brandeburg * @adapter: board private structure
17735ec8b7d1SJesse Brandeburg **/
iavf_fill_rss_lut(struct iavf_adapter * adapter)17745ec8b7d1SJesse Brandeburg static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
17755ec8b7d1SJesse Brandeburg {
17765ec8b7d1SJesse Brandeburg u16 i;
17775ec8b7d1SJesse Brandeburg
17785ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->rss_lut_size; i++)
17795ec8b7d1SJesse Brandeburg adapter->rss_lut[i] = i % adapter->num_active_queues;
17805ec8b7d1SJesse Brandeburg }
17815ec8b7d1SJesse Brandeburg
17825ec8b7d1SJesse Brandeburg /**
17835ec8b7d1SJesse Brandeburg * iavf_init_rss - Prepare for RSS
17845ec8b7d1SJesse Brandeburg * @adapter: board private structure
17855ec8b7d1SJesse Brandeburg *
17865ec8b7d1SJesse Brandeburg * Return 0 on success, negative on failure
17875ec8b7d1SJesse Brandeburg **/
iavf_init_rss(struct iavf_adapter * adapter)17885ec8b7d1SJesse Brandeburg static int iavf_init_rss(struct iavf_adapter *adapter)
17895ec8b7d1SJesse Brandeburg {
1790f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
17915ec8b7d1SJesse Brandeburg
17925ec8b7d1SJesse Brandeburg if (!RSS_PF(adapter)) {
17935ec8b7d1SJesse Brandeburg /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
17945ec8b7d1SJesse Brandeburg if (adapter->vf_res->vf_cap_flags &
17955ec8b7d1SJesse Brandeburg VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
179656184e01SJesse Brandeburg adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
17975ec8b7d1SJesse Brandeburg else
179856184e01SJesse Brandeburg adapter->hena = IAVF_DEFAULT_RSS_HENA;
17995ec8b7d1SJesse Brandeburg
1800f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1801f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
18025ec8b7d1SJesse Brandeburg }
18035ec8b7d1SJesse Brandeburg
18045ec8b7d1SJesse Brandeburg iavf_fill_rss_lut(adapter);
18055ec8b7d1SJesse Brandeburg netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
18065ec8b7d1SJesse Brandeburg
1807c3fec56eSMinghao Chi return iavf_config_rss(adapter);
18085ec8b7d1SJesse Brandeburg }
18095ec8b7d1SJesse Brandeburg
18105ec8b7d1SJesse Brandeburg /**
18115ec8b7d1SJesse Brandeburg * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
18125ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
18135ec8b7d1SJesse Brandeburg *
18145ec8b7d1SJesse Brandeburg * We allocate one q_vector per queue interrupt. If allocation fails we
18155ec8b7d1SJesse Brandeburg * return -ENOMEM.
18165ec8b7d1SJesse Brandeburg **/
iavf_alloc_q_vectors(struct iavf_adapter * adapter)18175ec8b7d1SJesse Brandeburg static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
18185ec8b7d1SJesse Brandeburg {
18195ec8b7d1SJesse Brandeburg int q_idx = 0, num_q_vectors;
182056184e01SJesse Brandeburg struct iavf_q_vector *q_vector;
18215ec8b7d1SJesse Brandeburg
18225ec8b7d1SJesse Brandeburg num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
18235ec8b7d1SJesse Brandeburg adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
18245ec8b7d1SJesse Brandeburg GFP_KERNEL);
18255ec8b7d1SJesse Brandeburg if (!adapter->q_vectors)
18265ec8b7d1SJesse Brandeburg return -ENOMEM;
18275ec8b7d1SJesse Brandeburg
18285ec8b7d1SJesse Brandeburg for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
18295ec8b7d1SJesse Brandeburg q_vector = &adapter->q_vectors[q_idx];
18305ec8b7d1SJesse Brandeburg q_vector->adapter = adapter;
18315ec8b7d1SJesse Brandeburg q_vector->vsi = &adapter->vsi;
18325ec8b7d1SJesse Brandeburg q_vector->v_idx = q_idx;
18335ec8b7d1SJesse Brandeburg q_vector->reg_idx = q_idx;
18345ec8b7d1SJesse Brandeburg cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
18355ec8b7d1SJesse Brandeburg netif_napi_add(adapter->netdev, &q_vector->napi,
1836b48b89f9SJakub Kicinski iavf_napi_poll);
18375ec8b7d1SJesse Brandeburg }
18385ec8b7d1SJesse Brandeburg
18395ec8b7d1SJesse Brandeburg return 0;
18405ec8b7d1SJesse Brandeburg }
18415ec8b7d1SJesse Brandeburg
18425ec8b7d1SJesse Brandeburg /**
18435ec8b7d1SJesse Brandeburg * iavf_free_q_vectors - Free memory allocated for interrupt vectors
18445ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
18455ec8b7d1SJesse Brandeburg *
18465ec8b7d1SJesse Brandeburg * This function frees the memory allocated to the q_vectors. In addition if
18475ec8b7d1SJesse Brandeburg * NAPI is enabled it will delete any references to the NAPI struct prior
18485ec8b7d1SJesse Brandeburg * to freeing the q_vector.
18495ec8b7d1SJesse Brandeburg **/
iavf_free_q_vectors(struct iavf_adapter * adapter)18505ec8b7d1SJesse Brandeburg static void iavf_free_q_vectors(struct iavf_adapter *adapter)
18515ec8b7d1SJesse Brandeburg {
18525ec8b7d1SJesse Brandeburg int q_idx, num_q_vectors;
18535ec8b7d1SJesse Brandeburg
18545ec8b7d1SJesse Brandeburg if (!adapter->q_vectors)
18555ec8b7d1SJesse Brandeburg return;
18565ec8b7d1SJesse Brandeburg
18575ec8b7d1SJesse Brandeburg num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
18585ec8b7d1SJesse Brandeburg
18595ec8b7d1SJesse Brandeburg for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
186056184e01SJesse Brandeburg struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
18615ec8b7d1SJesse Brandeburg
18625ec8b7d1SJesse Brandeburg netif_napi_del(&q_vector->napi);
18635ec8b7d1SJesse Brandeburg }
18645ec8b7d1SJesse Brandeburg kfree(adapter->q_vectors);
18655ec8b7d1SJesse Brandeburg adapter->q_vectors = NULL;
18665ec8b7d1SJesse Brandeburg }
18675ec8b7d1SJesse Brandeburg
18685ec8b7d1SJesse Brandeburg /**
18695ec8b7d1SJesse Brandeburg * iavf_reset_interrupt_capability - Reset MSIX setup
18705ec8b7d1SJesse Brandeburg * @adapter: board private structure
18715ec8b7d1SJesse Brandeburg *
18725ec8b7d1SJesse Brandeburg **/
iavf_reset_interrupt_capability(struct iavf_adapter * adapter)1873a4aadf0fSPrzemek Kitszel static void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
18745ec8b7d1SJesse Brandeburg {
18755ec8b7d1SJesse Brandeburg if (!adapter->msix_entries)
18765ec8b7d1SJesse Brandeburg return;
18775ec8b7d1SJesse Brandeburg
18785ec8b7d1SJesse Brandeburg pci_disable_msix(adapter->pdev);
18795ec8b7d1SJesse Brandeburg kfree(adapter->msix_entries);
18805ec8b7d1SJesse Brandeburg adapter->msix_entries = NULL;
18815ec8b7d1SJesse Brandeburg }
18825ec8b7d1SJesse Brandeburg
18835ec8b7d1SJesse Brandeburg /**
18845ec8b7d1SJesse Brandeburg * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
18855ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
18865ec8b7d1SJesse Brandeburg *
18875ec8b7d1SJesse Brandeburg **/
iavf_init_interrupt_scheme(struct iavf_adapter * adapter)1888a4aadf0fSPrzemek Kitszel static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
18895ec8b7d1SJesse Brandeburg {
18905ec8b7d1SJesse Brandeburg int err;
18915ec8b7d1SJesse Brandeburg
18925ec8b7d1SJesse Brandeburg err = iavf_alloc_queues(adapter);
18935ec8b7d1SJesse Brandeburg if (err) {
18945ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
18955ec8b7d1SJesse Brandeburg "Unable to allocate memory for queues\n");
18965ec8b7d1SJesse Brandeburg goto err_alloc_queues;
18975ec8b7d1SJesse Brandeburg }
18985ec8b7d1SJesse Brandeburg
18995ec8b7d1SJesse Brandeburg err = iavf_set_interrupt_capability(adapter);
19005ec8b7d1SJesse Brandeburg if (err) {
19015ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
19025ec8b7d1SJesse Brandeburg "Unable to setup interrupt capabilities\n");
19035ec8b7d1SJesse Brandeburg goto err_set_interrupt;
19045ec8b7d1SJesse Brandeburg }
19055ec8b7d1SJesse Brandeburg
19065ec8b7d1SJesse Brandeburg err = iavf_alloc_q_vectors(adapter);
19075ec8b7d1SJesse Brandeburg if (err) {
19085ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
19095ec8b7d1SJesse Brandeburg "Unable to allocate memory for queue vectors\n");
19105ec8b7d1SJesse Brandeburg goto err_alloc_q_vectors;
19115ec8b7d1SJesse Brandeburg }
19125ec8b7d1SJesse Brandeburg
19135ec8b7d1SJesse Brandeburg /* If we've made it so far while ADq flag being ON, then we haven't
19145ec8b7d1SJesse Brandeburg * bailed out anywhere in middle. And ADq isn't just enabled but actual
19155ec8b7d1SJesse Brandeburg * resources have been allocated in the reset path.
19165ec8b7d1SJesse Brandeburg * Now we can truly claim that ADq is enabled.
19175ec8b7d1SJesse Brandeburg */
19185ec8b7d1SJesse Brandeburg if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
19195ec8b7d1SJesse Brandeburg adapter->num_tc)
19205ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
19215ec8b7d1SJesse Brandeburg adapter->num_tc);
19225ec8b7d1SJesse Brandeburg
19235ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
19245ec8b7d1SJesse Brandeburg (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
19255ec8b7d1SJesse Brandeburg adapter->num_active_queues);
19265ec8b7d1SJesse Brandeburg
19275ec8b7d1SJesse Brandeburg return 0;
19285ec8b7d1SJesse Brandeburg err_alloc_q_vectors:
19295ec8b7d1SJesse Brandeburg iavf_reset_interrupt_capability(adapter);
19305ec8b7d1SJesse Brandeburg err_set_interrupt:
19315ec8b7d1SJesse Brandeburg iavf_free_queues(adapter);
19325ec8b7d1SJesse Brandeburg err_alloc_queues:
19335ec8b7d1SJesse Brandeburg return err;
19345ec8b7d1SJesse Brandeburg }
19355ec8b7d1SJesse Brandeburg
19365ec8b7d1SJesse Brandeburg /**
19375ec8b7d1SJesse Brandeburg * iavf_free_rss - Free memory used by RSS structs
19385ec8b7d1SJesse Brandeburg * @adapter: board private structure
19395ec8b7d1SJesse Brandeburg **/
iavf_free_rss(struct iavf_adapter * adapter)19405ec8b7d1SJesse Brandeburg static void iavf_free_rss(struct iavf_adapter *adapter)
19415ec8b7d1SJesse Brandeburg {
19425ec8b7d1SJesse Brandeburg kfree(adapter->rss_key);
19435ec8b7d1SJesse Brandeburg adapter->rss_key = NULL;
19445ec8b7d1SJesse Brandeburg
19455ec8b7d1SJesse Brandeburg kfree(adapter->rss_lut);
19465ec8b7d1SJesse Brandeburg adapter->rss_lut = NULL;
19475ec8b7d1SJesse Brandeburg }
19485ec8b7d1SJesse Brandeburg
19495ec8b7d1SJesse Brandeburg /**
19505ec8b7d1SJesse Brandeburg * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
19515ec8b7d1SJesse Brandeburg * @adapter: board private structure
1952a77ed5c5SAhmed Zaki * @running: true if adapter->state == __IAVF_RUNNING
19535ec8b7d1SJesse Brandeburg *
19545ec8b7d1SJesse Brandeburg * Returns 0 on success, negative on failure
19555ec8b7d1SJesse Brandeburg **/
iavf_reinit_interrupt_scheme(struct iavf_adapter * adapter,bool running)1956a77ed5c5SAhmed Zaki static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool running)
19575ec8b7d1SJesse Brandeburg {
19585ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
19595ec8b7d1SJesse Brandeburg int err;
19605ec8b7d1SJesse Brandeburg
1961a77ed5c5SAhmed Zaki if (running)
19625ec8b7d1SJesse Brandeburg iavf_free_traffic_irqs(adapter);
19635ec8b7d1SJesse Brandeburg iavf_free_misc_irq(adapter);
19645ec8b7d1SJesse Brandeburg iavf_reset_interrupt_capability(adapter);
19655ec8b7d1SJesse Brandeburg iavf_free_q_vectors(adapter);
19665ec8b7d1SJesse Brandeburg iavf_free_queues(adapter);
19675ec8b7d1SJesse Brandeburg
19685ec8b7d1SJesse Brandeburg err = iavf_init_interrupt_scheme(adapter);
19695ec8b7d1SJesse Brandeburg if (err)
19705ec8b7d1SJesse Brandeburg goto err;
19715ec8b7d1SJesse Brandeburg
19725ec8b7d1SJesse Brandeburg netif_tx_stop_all_queues(netdev);
19735ec8b7d1SJesse Brandeburg
19745ec8b7d1SJesse Brandeburg err = iavf_request_misc_irq(adapter);
19755ec8b7d1SJesse Brandeburg if (err)
19765ec8b7d1SJesse Brandeburg goto err;
19775ec8b7d1SJesse Brandeburg
197856184e01SJesse Brandeburg set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
19795ec8b7d1SJesse Brandeburg
19805ec8b7d1SJesse Brandeburg iavf_map_rings_to_vectors(adapter);
19815ec8b7d1SJesse Brandeburg err:
19825ec8b7d1SJesse Brandeburg return err;
19835ec8b7d1SJesse Brandeburg }
19845ec8b7d1SJesse Brandeburg
19855ec8b7d1SJesse Brandeburg /**
1986d1639a17SAhmed Zaki * iavf_finish_config - do all netdev work that needs RTNL
1987d1639a17SAhmed Zaki * @work: our work_struct
1988d1639a17SAhmed Zaki *
1989d1639a17SAhmed Zaki * Do work that needs both RTNL and crit_lock.
1990d1639a17SAhmed Zaki **/
iavf_finish_config(struct work_struct * work)1991d1639a17SAhmed Zaki static void iavf_finish_config(struct work_struct *work)
1992d1639a17SAhmed Zaki {
1993d1639a17SAhmed Zaki struct iavf_adapter *adapter;
1994d1639a17SAhmed Zaki int pairs, err;
1995d1639a17SAhmed Zaki
1996d1639a17SAhmed Zaki adapter = container_of(work, struct iavf_adapter, finish_config);
1997d1639a17SAhmed Zaki
1998d1639a17SAhmed Zaki /* Always take RTNL first to prevent circular lock dependency */
1999d1639a17SAhmed Zaki rtnl_lock();
2000d1639a17SAhmed Zaki mutex_lock(&adapter->crit_lock);
2001d1639a17SAhmed Zaki
2002d1639a17SAhmed Zaki if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
2003d1639a17SAhmed Zaki adapter->netdev_registered &&
2004d1639a17SAhmed Zaki !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
2005d1639a17SAhmed Zaki netdev_update_features(adapter->netdev);
2006d1639a17SAhmed Zaki adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
2007d1639a17SAhmed Zaki }
2008d1639a17SAhmed Zaki
2009d1639a17SAhmed Zaki switch (adapter->state) {
2010d1639a17SAhmed Zaki case __IAVF_DOWN:
2011d1639a17SAhmed Zaki if (!adapter->netdev_registered) {
2012d1639a17SAhmed Zaki err = register_netdevice(adapter->netdev);
2013d1639a17SAhmed Zaki if (err) {
2014d1639a17SAhmed Zaki dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
2015d1639a17SAhmed Zaki err);
2016d1639a17SAhmed Zaki
2017d1639a17SAhmed Zaki /* go back and try again.*/
2018d1639a17SAhmed Zaki iavf_free_rss(adapter);
2019d1639a17SAhmed Zaki iavf_free_misc_irq(adapter);
2020d1639a17SAhmed Zaki iavf_reset_interrupt_capability(adapter);
2021d1639a17SAhmed Zaki iavf_change_state(adapter,
2022d1639a17SAhmed Zaki __IAVF_INIT_CONFIG_ADAPTER);
2023d1639a17SAhmed Zaki goto out;
2024d1639a17SAhmed Zaki }
2025d1639a17SAhmed Zaki adapter->netdev_registered = true;
2026d1639a17SAhmed Zaki }
2027d1639a17SAhmed Zaki
2028d1639a17SAhmed Zaki /* Set the real number of queues when reset occurs while
2029d1639a17SAhmed Zaki * state == __IAVF_DOWN
2030d1639a17SAhmed Zaki */
2031d1639a17SAhmed Zaki fallthrough;
2032d1639a17SAhmed Zaki case __IAVF_RUNNING:
2033d1639a17SAhmed Zaki pairs = adapter->num_active_queues;
2034d1639a17SAhmed Zaki netif_set_real_num_rx_queues(adapter->netdev, pairs);
2035d1639a17SAhmed Zaki netif_set_real_num_tx_queues(adapter->netdev, pairs);
2036d1639a17SAhmed Zaki break;
2037d1639a17SAhmed Zaki
2038d1639a17SAhmed Zaki default:
2039d1639a17SAhmed Zaki break;
2040d1639a17SAhmed Zaki }
2041d1639a17SAhmed Zaki
2042d1639a17SAhmed Zaki out:
2043d1639a17SAhmed Zaki mutex_unlock(&adapter->crit_lock);
2044d1639a17SAhmed Zaki rtnl_unlock();
2045d1639a17SAhmed Zaki }
2046d1639a17SAhmed Zaki
2047d1639a17SAhmed Zaki /**
2048d1639a17SAhmed Zaki * iavf_schedule_finish_config - Set the flags and schedule a reset event
2049d1639a17SAhmed Zaki * @adapter: board private structure
2050d1639a17SAhmed Zaki **/
iavf_schedule_finish_config(struct iavf_adapter * adapter)2051d1639a17SAhmed Zaki void iavf_schedule_finish_config(struct iavf_adapter *adapter)
2052d1639a17SAhmed Zaki {
2053d1639a17SAhmed Zaki if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
2054d1639a17SAhmed Zaki queue_work(adapter->wq, &adapter->finish_config);
2055d1639a17SAhmed Zaki }
2056d1639a17SAhmed Zaki
2057d1639a17SAhmed Zaki /**
2058b476b003SJakub Pawlak * iavf_process_aq_command - process aq_required flags
2059b476b003SJakub Pawlak * and sends aq command
2060b476b003SJakub Pawlak * @adapter: pointer to iavf adapter structure
2061b476b003SJakub Pawlak *
2062b476b003SJakub Pawlak * Returns 0 on success
2063b476b003SJakub Pawlak * Returns error code if no command was sent
2064b476b003SJakub Pawlak * or error code if the command failed.
2065b476b003SJakub Pawlak **/
iavf_process_aq_command(struct iavf_adapter * adapter)2066b476b003SJakub Pawlak static int iavf_process_aq_command(struct iavf_adapter *adapter)
2067b476b003SJakub Pawlak {
2068b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
2069b476b003SJakub Pawlak return iavf_send_vf_config_msg(adapter);
2070209f2f9cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
2071209f2f9cSBrett Creeley return iavf_send_vf_offload_vlan_v2_msg(adapter);
2072b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
2073b476b003SJakub Pawlak iavf_disable_queues(adapter);
2074b476b003SJakub Pawlak return 0;
2075b476b003SJakub Pawlak }
2076b476b003SJakub Pawlak
2077b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
2078b476b003SJakub Pawlak iavf_map_queues(adapter);
2079b476b003SJakub Pawlak return 0;
2080b476b003SJakub Pawlak }
2081b476b003SJakub Pawlak
2082b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
2083b476b003SJakub Pawlak iavf_add_ether_addrs(adapter);
2084b476b003SJakub Pawlak return 0;
2085b476b003SJakub Pawlak }
2086b476b003SJakub Pawlak
2087b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
2088b476b003SJakub Pawlak iavf_add_vlans(adapter);
2089b476b003SJakub Pawlak return 0;
2090b476b003SJakub Pawlak }
2091b476b003SJakub Pawlak
2092b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
2093b476b003SJakub Pawlak iavf_del_ether_addrs(adapter);
2094b476b003SJakub Pawlak return 0;
2095b476b003SJakub Pawlak }
2096b476b003SJakub Pawlak
2097b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
2098b476b003SJakub Pawlak iavf_del_vlans(adapter);
2099b476b003SJakub Pawlak return 0;
2100b476b003SJakub Pawlak }
2101b476b003SJakub Pawlak
2102b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
2103b476b003SJakub Pawlak iavf_enable_vlan_stripping(adapter);
2104b476b003SJakub Pawlak return 0;
2105b476b003SJakub Pawlak }
2106b476b003SJakub Pawlak
2107b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
2108b476b003SJakub Pawlak iavf_disable_vlan_stripping(adapter);
2109b476b003SJakub Pawlak return 0;
2110b476b003SJakub Pawlak }
2111b476b003SJakub Pawlak
2112b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
2113b476b003SJakub Pawlak iavf_configure_queues(adapter);
2114b476b003SJakub Pawlak return 0;
2115b476b003SJakub Pawlak }
2116b476b003SJakub Pawlak
2117b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
2118b476b003SJakub Pawlak iavf_enable_queues(adapter);
2119b476b003SJakub Pawlak return 0;
2120b476b003SJakub Pawlak }
2121b476b003SJakub Pawlak
2122b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
2123b476b003SJakub Pawlak /* This message goes straight to the firmware, not the
2124b476b003SJakub Pawlak * PF, so we don't have to set current_op as we will
2125b476b003SJakub Pawlak * not get a response through the ARQ.
2126b476b003SJakub Pawlak */
2127b476b003SJakub Pawlak adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
2128b476b003SJakub Pawlak return 0;
2129b476b003SJakub Pawlak }
2130b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
2131b476b003SJakub Pawlak iavf_get_hena(adapter);
2132b476b003SJakub Pawlak return 0;
2133b476b003SJakub Pawlak }
2134b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
2135b476b003SJakub Pawlak iavf_set_hena(adapter);
2136b476b003SJakub Pawlak return 0;
2137b476b003SJakub Pawlak }
2138b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
2139b476b003SJakub Pawlak iavf_set_rss_key(adapter);
2140b476b003SJakub Pawlak return 0;
2141b476b003SJakub Pawlak }
2142b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
2143b476b003SJakub Pawlak iavf_set_rss_lut(adapter);
2144b476b003SJakub Pawlak return 0;
2145b476b003SJakub Pawlak }
2146b476b003SJakub Pawlak
21477e85cf09SBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
21487e85cf09SBrett Creeley iavf_set_promiscuous(adapter);
2149b476b003SJakub Pawlak return 0;
2150b476b003SJakub Pawlak }
2151b476b003SJakub Pawlak
2152b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
2153b476b003SJakub Pawlak iavf_enable_channels(adapter);
2154b476b003SJakub Pawlak return 0;
2155b476b003SJakub Pawlak }
2156b476b003SJakub Pawlak
2157b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
2158b476b003SJakub Pawlak iavf_disable_channels(adapter);
2159b476b003SJakub Pawlak return 0;
2160b476b003SJakub Pawlak }
2161b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2162b476b003SJakub Pawlak iavf_add_cloud_filter(adapter);
2163b476b003SJakub Pawlak return 0;
2164b476b003SJakub Pawlak }
2165b476b003SJakub Pawlak
2166b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2167b476b003SJakub Pawlak iavf_del_cloud_filter(adapter);
2168b476b003SJakub Pawlak return 0;
2169b476b003SJakub Pawlak }
217068dfe634SPaul Greenwalt if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
217168dfe634SPaul Greenwalt iavf_del_cloud_filter(adapter);
217268dfe634SPaul Greenwalt return 0;
217368dfe634SPaul Greenwalt }
217468dfe634SPaul Greenwalt if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
217568dfe634SPaul Greenwalt iavf_add_cloud_filter(adapter);
217668dfe634SPaul Greenwalt return 0;
217768dfe634SPaul Greenwalt }
21780dbfbabbSHaiyue Wang if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
21790dbfbabbSHaiyue Wang iavf_add_fdir_filter(adapter);
21800dbfbabbSHaiyue Wang return IAVF_SUCCESS;
21810dbfbabbSHaiyue Wang }
21820dbfbabbSHaiyue Wang if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
21830dbfbabbSHaiyue Wang iavf_del_fdir_filter(adapter);
21840dbfbabbSHaiyue Wang return IAVF_SUCCESS;
21850dbfbabbSHaiyue Wang }
21860aaeb4fbSHaiyue Wang if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
21870aaeb4fbSHaiyue Wang iavf_add_adv_rss_cfg(adapter);
21880aaeb4fbSHaiyue Wang return 0;
21890aaeb4fbSHaiyue Wang }
21900aaeb4fbSHaiyue Wang if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
21910aaeb4fbSHaiyue Wang iavf_del_adv_rss_cfg(adapter);
21920aaeb4fbSHaiyue Wang return 0;
21930aaeb4fbSHaiyue Wang }
21948afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
21958afadd1cSBrett Creeley iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
21968afadd1cSBrett Creeley return 0;
21978afadd1cSBrett Creeley }
21988afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
21998afadd1cSBrett Creeley iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
22008afadd1cSBrett Creeley return 0;
22018afadd1cSBrett Creeley }
22028afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
22038afadd1cSBrett Creeley iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
22048afadd1cSBrett Creeley return 0;
22058afadd1cSBrett Creeley }
22068afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
22078afadd1cSBrett Creeley iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
22088afadd1cSBrett Creeley return 0;
22098afadd1cSBrett Creeley }
22108afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
22118afadd1cSBrett Creeley iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
22128afadd1cSBrett Creeley return 0;
22138afadd1cSBrett Creeley }
22148afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
22158afadd1cSBrett Creeley iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
22168afadd1cSBrett Creeley return 0;
22178afadd1cSBrett Creeley }
22188afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
22198afadd1cSBrett Creeley iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
22208afadd1cSBrett Creeley return 0;
22218afadd1cSBrett Creeley }
22228afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
22238afadd1cSBrett Creeley iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
22248afadd1cSBrett Creeley return 0;
22258afadd1cSBrett Creeley }
22268afadd1cSBrett Creeley
22273b5bdd18SJedrzej Jagielski if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
22283b5bdd18SJedrzej Jagielski iavf_request_stats(adapter);
22293b5bdd18SJedrzej Jagielski return 0;
22303b5bdd18SJedrzej Jagielski }
22313b5bdd18SJedrzej Jagielski
2232b476b003SJakub Pawlak return -EAGAIN;
2233b476b003SJakub Pawlak }
2234b476b003SJakub Pawlak
2235b476b003SJakub Pawlak /**
22368afadd1cSBrett Creeley * iavf_set_vlan_offload_features - set VLAN offload configuration
22378afadd1cSBrett Creeley * @adapter: board private structure
22388afadd1cSBrett Creeley * @prev_features: previous features used for comparison
22398afadd1cSBrett Creeley * @features: updated features used for configuration
22408afadd1cSBrett Creeley *
22418afadd1cSBrett Creeley * Set the aq_required bit(s) based on the requested features passed in to
22428afadd1cSBrett Creeley * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
22438afadd1cSBrett Creeley * the watchdog if any changes are requested to expedite the request via
22448afadd1cSBrett Creeley * virtchnl.
22458afadd1cSBrett Creeley **/
2246a4aadf0fSPrzemek Kitszel static void
iavf_set_vlan_offload_features(struct iavf_adapter * adapter,netdev_features_t prev_features,netdev_features_t features)22478afadd1cSBrett Creeley iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
22488afadd1cSBrett Creeley netdev_features_t prev_features,
22498afadd1cSBrett Creeley netdev_features_t features)
22508afadd1cSBrett Creeley {
22518afadd1cSBrett Creeley bool enable_stripping = true, enable_insertion = true;
22528afadd1cSBrett Creeley u16 vlan_ethertype = 0;
22538afadd1cSBrett Creeley u64 aq_required = 0;
22548afadd1cSBrett Creeley
22558afadd1cSBrett Creeley /* keep cases separate because one ethertype for offloads can be
22568afadd1cSBrett Creeley * disabled at the same time as another is disabled, so check for an
22578afadd1cSBrett Creeley * enabled ethertype first, then check for disabled. Default to
22588afadd1cSBrett Creeley * ETH_P_8021Q so an ethertype is specified if disabling insertion and
22598afadd1cSBrett Creeley * stripping.
22608afadd1cSBrett Creeley */
22618afadd1cSBrett Creeley if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
22628afadd1cSBrett Creeley vlan_ethertype = ETH_P_8021AD;
22638afadd1cSBrett Creeley else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
22648afadd1cSBrett Creeley vlan_ethertype = ETH_P_8021Q;
22658afadd1cSBrett Creeley else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
22668afadd1cSBrett Creeley vlan_ethertype = ETH_P_8021AD;
22678afadd1cSBrett Creeley else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
22688afadd1cSBrett Creeley vlan_ethertype = ETH_P_8021Q;
22698afadd1cSBrett Creeley else
22708afadd1cSBrett Creeley vlan_ethertype = ETH_P_8021Q;
22718afadd1cSBrett Creeley
22728afadd1cSBrett Creeley if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
22738afadd1cSBrett Creeley enable_stripping = false;
22748afadd1cSBrett Creeley if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
22758afadd1cSBrett Creeley enable_insertion = false;
22768afadd1cSBrett Creeley
22778afadd1cSBrett Creeley if (VLAN_ALLOWED(adapter)) {
22788afadd1cSBrett Creeley /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
22798afadd1cSBrett Creeley * stripping via virtchnl. VLAN insertion can be toggled on the
22808afadd1cSBrett Creeley * netdev, but it doesn't require a virtchnl message
22818afadd1cSBrett Creeley */
22828afadd1cSBrett Creeley if (enable_stripping)
22838afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
22848afadd1cSBrett Creeley else
22858afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
22868afadd1cSBrett Creeley
22878afadd1cSBrett Creeley } else if (VLAN_V2_ALLOWED(adapter)) {
22888afadd1cSBrett Creeley switch (vlan_ethertype) {
22898afadd1cSBrett Creeley case ETH_P_8021Q:
22908afadd1cSBrett Creeley if (enable_stripping)
22918afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
22928afadd1cSBrett Creeley else
22938afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
22948afadd1cSBrett Creeley
22958afadd1cSBrett Creeley if (enable_insertion)
22968afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
22978afadd1cSBrett Creeley else
22988afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
22998afadd1cSBrett Creeley break;
23008afadd1cSBrett Creeley case ETH_P_8021AD:
23018afadd1cSBrett Creeley if (enable_stripping)
23028afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
23038afadd1cSBrett Creeley else
23048afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
23058afadd1cSBrett Creeley
23068afadd1cSBrett Creeley if (enable_insertion)
23078afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
23088afadd1cSBrett Creeley else
23098afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
23108afadd1cSBrett Creeley break;
23118afadd1cSBrett Creeley }
23128afadd1cSBrett Creeley }
23138afadd1cSBrett Creeley
23148afadd1cSBrett Creeley if (aq_required) {
23158afadd1cSBrett Creeley adapter->aq_required |= aq_required;
23164411a608SMichal Schmidt mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
23178afadd1cSBrett Creeley }
23188afadd1cSBrett Creeley }
23198afadd1cSBrett Creeley
23208afadd1cSBrett Creeley /**
2321b66c7bc1SJakub Pawlak * iavf_startup - first step of driver startup
2322b66c7bc1SJakub Pawlak * @adapter: board private structure
2323b66c7bc1SJakub Pawlak *
2324b66c7bc1SJakub Pawlak * Function process __IAVF_STARTUP driver state.
2325b66c7bc1SJakub Pawlak * When success the state is changed to __IAVF_INIT_VERSION_CHECK
232659756ad6SMateusz Palczewski * when fails the state is changed to __IAVF_INIT_FAILED
2327b66c7bc1SJakub Pawlak **/
iavf_startup(struct iavf_adapter * adapter)232859756ad6SMateusz Palczewski static void iavf_startup(struct iavf_adapter *adapter)
2329b66c7bc1SJakub Pawlak {
2330b66c7bc1SJakub Pawlak struct pci_dev *pdev = adapter->pdev;
2331b66c7bc1SJakub Pawlak struct iavf_hw *hw = &adapter->hw;
2332bae569d0SMateusz Palczewski enum iavf_status status;
2333bae569d0SMateusz Palczewski int ret;
2334b66c7bc1SJakub Pawlak
2335b66c7bc1SJakub Pawlak WARN_ON(adapter->state != __IAVF_STARTUP);
2336b66c7bc1SJakub Pawlak
2337b66c7bc1SJakub Pawlak /* driver loaded, probe complete */
2338b66c7bc1SJakub Pawlak adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2339b66c7bc1SJakub Pawlak adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2340bae569d0SMateusz Palczewski status = iavf_set_mac_type(hw);
2341bae569d0SMateusz Palczewski if (status) {
2342bae569d0SMateusz Palczewski dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
2343b66c7bc1SJakub Pawlak goto err;
2344b66c7bc1SJakub Pawlak }
2345b66c7bc1SJakub Pawlak
2346bae569d0SMateusz Palczewski ret = iavf_check_reset_complete(hw);
2347bae569d0SMateusz Palczewski if (ret) {
2348b66c7bc1SJakub Pawlak dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2349bae569d0SMateusz Palczewski ret);
2350b66c7bc1SJakub Pawlak goto err;
2351b66c7bc1SJakub Pawlak }
2352b66c7bc1SJakub Pawlak hw->aq.num_arq_entries = IAVF_AQ_LEN;
2353b66c7bc1SJakub Pawlak hw->aq.num_asq_entries = IAVF_AQ_LEN;
2354b66c7bc1SJakub Pawlak hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2355b66c7bc1SJakub Pawlak hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2356b66c7bc1SJakub Pawlak
2357bae569d0SMateusz Palczewski status = iavf_init_adminq(hw);
2358bae569d0SMateusz Palczewski if (status) {
2359bae569d0SMateusz Palczewski dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2360bae569d0SMateusz Palczewski status);
2361b66c7bc1SJakub Pawlak goto err;
2362b66c7bc1SJakub Pawlak }
2363bae569d0SMateusz Palczewski ret = iavf_send_api_ver(adapter);
2364bae569d0SMateusz Palczewski if (ret) {
2365bae569d0SMateusz Palczewski dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2366b66c7bc1SJakub Pawlak iavf_shutdown_adminq(hw);
2367b66c7bc1SJakub Pawlak goto err;
2368b66c7bc1SJakub Pawlak }
236945eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
237059756ad6SMateusz Palczewski return;
2371b66c7bc1SJakub Pawlak err:
237259756ad6SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_FAILED);
2373b66c7bc1SJakub Pawlak }
2374b66c7bc1SJakub Pawlak
2375b66c7bc1SJakub Pawlak /**
2376b66c7bc1SJakub Pawlak * iavf_init_version_check - second step of driver startup
2377b66c7bc1SJakub Pawlak * @adapter: board private structure
2378b66c7bc1SJakub Pawlak *
2379b66c7bc1SJakub Pawlak * Function process __IAVF_INIT_VERSION_CHECK driver state.
2380b66c7bc1SJakub Pawlak * When success the state is changed to __IAVF_INIT_GET_RESOURCES
238159756ad6SMateusz Palczewski * when fails the state is changed to __IAVF_INIT_FAILED
2382b66c7bc1SJakub Pawlak **/
iavf_init_version_check(struct iavf_adapter * adapter)238359756ad6SMateusz Palczewski static void iavf_init_version_check(struct iavf_adapter *adapter)
2384b66c7bc1SJakub Pawlak {
2385b66c7bc1SJakub Pawlak struct pci_dev *pdev = adapter->pdev;
2386b66c7bc1SJakub Pawlak struct iavf_hw *hw = &adapter->hw;
2387b66c7bc1SJakub Pawlak int err = -EAGAIN;
2388b66c7bc1SJakub Pawlak
2389b66c7bc1SJakub Pawlak WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2390b66c7bc1SJakub Pawlak
2391b66c7bc1SJakub Pawlak if (!iavf_asq_done(hw)) {
2392b66c7bc1SJakub Pawlak dev_err(&pdev->dev, "Admin queue command never completed\n");
2393b66c7bc1SJakub Pawlak iavf_shutdown_adminq(hw);
239445eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_STARTUP);
2395b66c7bc1SJakub Pawlak goto err;
2396b66c7bc1SJakub Pawlak }
2397b66c7bc1SJakub Pawlak
2398b66c7bc1SJakub Pawlak /* aq msg sent, awaiting reply */
2399b66c7bc1SJakub Pawlak err = iavf_verify_api_ver(adapter);
2400b66c7bc1SJakub Pawlak if (err) {
2401bae569d0SMateusz Palczewski if (err == -EALREADY)
2402b66c7bc1SJakub Pawlak err = iavf_send_api_ver(adapter);
2403b66c7bc1SJakub Pawlak else
2404b66c7bc1SJakub Pawlak dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2405b66c7bc1SJakub Pawlak adapter->pf_version.major,
2406b66c7bc1SJakub Pawlak adapter->pf_version.minor,
2407b66c7bc1SJakub Pawlak VIRTCHNL_VERSION_MAJOR,
2408b66c7bc1SJakub Pawlak VIRTCHNL_VERSION_MINOR);
2409b66c7bc1SJakub Pawlak goto err;
2410b66c7bc1SJakub Pawlak }
2411b66c7bc1SJakub Pawlak err = iavf_send_vf_config_msg(adapter);
2412b66c7bc1SJakub Pawlak if (err) {
2413b66c7bc1SJakub Pawlak dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2414b66c7bc1SJakub Pawlak err);
2415b66c7bc1SJakub Pawlak goto err;
2416b66c7bc1SJakub Pawlak }
241745eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
241859756ad6SMateusz Palczewski return;
2419b66c7bc1SJakub Pawlak err:
242059756ad6SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_FAILED);
2421b66c7bc1SJakub Pawlak }
2422b66c7bc1SJakub Pawlak
2423b66c7bc1SJakub Pawlak /**
2424209f2f9cSBrett Creeley * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2425209f2f9cSBrett Creeley * @adapter: board private structure
2426209f2f9cSBrett Creeley */
iavf_parse_vf_resource_msg(struct iavf_adapter * adapter)2427209f2f9cSBrett Creeley int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2428209f2f9cSBrett Creeley {
2429209f2f9cSBrett Creeley int i, num_req_queues = adapter->num_req_queues;
2430209f2f9cSBrett Creeley struct iavf_vsi *vsi = &adapter->vsi;
2431209f2f9cSBrett Creeley
2432209f2f9cSBrett Creeley for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2433209f2f9cSBrett Creeley if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2434209f2f9cSBrett Creeley adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2435209f2f9cSBrett Creeley }
2436209f2f9cSBrett Creeley if (!adapter->vsi_res) {
2437209f2f9cSBrett Creeley dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2438209f2f9cSBrett Creeley return -ENODEV;
2439209f2f9cSBrett Creeley }
2440209f2f9cSBrett Creeley
2441209f2f9cSBrett Creeley if (num_req_queues &&
2442209f2f9cSBrett Creeley num_req_queues > adapter->vsi_res->num_queue_pairs) {
2443209f2f9cSBrett Creeley /* Problem. The PF gave us fewer queues than what we had
2444209f2f9cSBrett Creeley * negotiated in our request. Need a reset to see if we can't
2445209f2f9cSBrett Creeley * get back to a working state.
2446209f2f9cSBrett Creeley */
2447209f2f9cSBrett Creeley dev_err(&adapter->pdev->dev,
2448209f2f9cSBrett Creeley "Requested %d queues, but PF only gave us %d.\n",
2449209f2f9cSBrett Creeley num_req_queues,
2450209f2f9cSBrett Creeley adapter->vsi_res->num_queue_pairs);
245157d03f56SMichal Maloszewski adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2452209f2f9cSBrett Creeley adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2453c34743daSAhmed Zaki iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
2454209f2f9cSBrett Creeley
2455209f2f9cSBrett Creeley return -EAGAIN;
2456209f2f9cSBrett Creeley }
2457209f2f9cSBrett Creeley adapter->num_req_queues = 0;
2458209f2f9cSBrett Creeley adapter->vsi.id = adapter->vsi_res->vsi_id;
2459209f2f9cSBrett Creeley
2460209f2f9cSBrett Creeley adapter->vsi.back = adapter;
2461209f2f9cSBrett Creeley adapter->vsi.base_vector = 1;
2462209f2f9cSBrett Creeley vsi->netdev = adapter->netdev;
2463209f2f9cSBrett Creeley vsi->qs_handle = adapter->vsi_res->qset_handle;
2464209f2f9cSBrett Creeley if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2465209f2f9cSBrett Creeley adapter->rss_key_size = adapter->vf_res->rss_key_size;
2466209f2f9cSBrett Creeley adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2467209f2f9cSBrett Creeley } else {
2468209f2f9cSBrett Creeley adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2469209f2f9cSBrett Creeley adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2470209f2f9cSBrett Creeley }
2471209f2f9cSBrett Creeley
2472209f2f9cSBrett Creeley return 0;
2473209f2f9cSBrett Creeley }
2474209f2f9cSBrett Creeley
2475209f2f9cSBrett Creeley /**
2476b66c7bc1SJakub Pawlak * iavf_init_get_resources - third step of driver startup
2477b66c7bc1SJakub Pawlak * @adapter: board private structure
2478b66c7bc1SJakub Pawlak *
2479b66c7bc1SJakub Pawlak * Function process __IAVF_INIT_GET_RESOURCES driver state and
2480b66c7bc1SJakub Pawlak * finishes driver initialization procedure.
2481b66c7bc1SJakub Pawlak * When success the state is changed to __IAVF_DOWN
248259756ad6SMateusz Palczewski * when fails the state is changed to __IAVF_INIT_FAILED
2483b66c7bc1SJakub Pawlak **/
iavf_init_get_resources(struct iavf_adapter * adapter)248459756ad6SMateusz Palczewski static void iavf_init_get_resources(struct iavf_adapter *adapter)
2485b66c7bc1SJakub Pawlak {
2486b66c7bc1SJakub Pawlak struct pci_dev *pdev = adapter->pdev;
2487b66c7bc1SJakub Pawlak struct iavf_hw *hw = &adapter->hw;
2488e0ef26fbSBrett Creeley int err;
2489b66c7bc1SJakub Pawlak
2490b66c7bc1SJakub Pawlak WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2491b66c7bc1SJakub Pawlak /* aq msg sent, awaiting reply */
2492b66c7bc1SJakub Pawlak if (!adapter->vf_res) {
2493e0ef26fbSBrett Creeley adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2494e0ef26fbSBrett Creeley GFP_KERNEL);
2495e0ef26fbSBrett Creeley if (!adapter->vf_res) {
2496e0ef26fbSBrett Creeley err = -ENOMEM;
2497b66c7bc1SJakub Pawlak goto err;
2498b66c7bc1SJakub Pawlak }
2499e0ef26fbSBrett Creeley }
2500b66c7bc1SJakub Pawlak err = iavf_get_vf_config(adapter);
2501bae569d0SMateusz Palczewski if (err == -EALREADY) {
2502b66c7bc1SJakub Pawlak err = iavf_send_vf_config_msg(adapter);
2503541a1af4SPrzemyslaw Patynowski goto err;
2504bae569d0SMateusz Palczewski } else if (err == -EINVAL) {
2505bae569d0SMateusz Palczewski /* We only get -EINVAL if the device is in a very bad
2506b66c7bc1SJakub Pawlak * state or if we've been disabled for previous bad
2507b66c7bc1SJakub Pawlak * behavior. Either way, we're done now.
2508b66c7bc1SJakub Pawlak */
2509b66c7bc1SJakub Pawlak iavf_shutdown_adminq(hw);
2510b66c7bc1SJakub Pawlak dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
251159756ad6SMateusz Palczewski return;
2512b66c7bc1SJakub Pawlak }
2513b66c7bc1SJakub Pawlak if (err) {
2514b66c7bc1SJakub Pawlak dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2515b66c7bc1SJakub Pawlak goto err_alloc;
2516b66c7bc1SJakub Pawlak }
2517b66c7bc1SJakub Pawlak
2518209f2f9cSBrett Creeley err = iavf_parse_vf_resource_msg(adapter);
251987dba256SMateusz Palczewski if (err) {
252087dba256SMateusz Palczewski dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2521209f2f9cSBrett Creeley err);
2522209f2f9cSBrett Creeley goto err_alloc;
2523209f2f9cSBrett Creeley }
252487dba256SMateusz Palczewski /* Some features require additional messages to negotiate extended
252587dba256SMateusz Palczewski * capabilities. These are processed in sequence by the
252687dba256SMateusz Palczewski * __IAVF_INIT_EXTENDED_CAPS driver state.
2527209f2f9cSBrett Creeley */
252887dba256SMateusz Palczewski adapter->extended_caps = IAVF_EXTENDED_CAPS;
252987dba256SMateusz Palczewski
253087dba256SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2531209f2f9cSBrett Creeley return;
2532209f2f9cSBrett Creeley
2533209f2f9cSBrett Creeley err_alloc:
2534209f2f9cSBrett Creeley kfree(adapter->vf_res);
2535209f2f9cSBrett Creeley adapter->vf_res = NULL;
2536209f2f9cSBrett Creeley err:
2537209f2f9cSBrett Creeley iavf_change_state(adapter, __IAVF_INIT_FAILED);
2538209f2f9cSBrett Creeley }
2539209f2f9cSBrett Creeley
2540209f2f9cSBrett Creeley /**
254187dba256SMateusz Palczewski * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2542209f2f9cSBrett Creeley * @adapter: board private structure
2543209f2f9cSBrett Creeley *
254487dba256SMateusz Palczewski * Function processes send of the extended VLAN V2 capability message to the
254587dba256SMateusz Palczewski * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
254687dba256SMateusz Palczewski * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
254787dba256SMateusz Palczewski */
iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter * adapter)254887dba256SMateusz Palczewski static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2549209f2f9cSBrett Creeley {
2550209f2f9cSBrett Creeley int ret;
2551209f2f9cSBrett Creeley
255287dba256SMateusz Palczewski WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
255387dba256SMateusz Palczewski
255487dba256SMateusz Palczewski ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
255587dba256SMateusz Palczewski if (ret && ret == -EOPNOTSUPP) {
255687dba256SMateusz Palczewski /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
255787dba256SMateusz Palczewski * we did not send the capability exchange message and do not
255887dba256SMateusz Palczewski * expect a response.
255987dba256SMateusz Palczewski */
256087dba256SMateusz Palczewski adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
256187dba256SMateusz Palczewski }
256287dba256SMateusz Palczewski
256387dba256SMateusz Palczewski /* We sent the message, so move on to the next step */
256487dba256SMateusz Palczewski adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
256587dba256SMateusz Palczewski }
256687dba256SMateusz Palczewski
256787dba256SMateusz Palczewski /**
256887dba256SMateusz Palczewski * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
256987dba256SMateusz Palczewski * @adapter: board private structure
257087dba256SMateusz Palczewski *
257187dba256SMateusz Palczewski * Function processes receipt of the extended VLAN V2 capability message from
257287dba256SMateusz Palczewski * the PF.
257387dba256SMateusz Palczewski **/
iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter * adapter)257487dba256SMateusz Palczewski static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
257587dba256SMateusz Palczewski {
257687dba256SMateusz Palczewski int ret;
257787dba256SMateusz Palczewski
257887dba256SMateusz Palczewski WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2579209f2f9cSBrett Creeley
2580209f2f9cSBrett Creeley memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2581209f2f9cSBrett Creeley
2582209f2f9cSBrett Creeley ret = iavf_get_vf_vlan_v2_caps(adapter);
258387dba256SMateusz Palczewski if (ret)
2584209f2f9cSBrett Creeley goto err;
2585209f2f9cSBrett Creeley
258687dba256SMateusz Palczewski /* We've processed receipt of the VLAN V2 caps message */
258787dba256SMateusz Palczewski adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2588209f2f9cSBrett Creeley return;
2589209f2f9cSBrett Creeley err:
259087dba256SMateusz Palczewski /* We didn't receive a reply. Make sure we try sending again when
259187dba256SMateusz Palczewski * __IAVF_INIT_FAILED attempts to recover.
259287dba256SMateusz Palczewski */
259387dba256SMateusz Palczewski adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2594209f2f9cSBrett Creeley iavf_change_state(adapter, __IAVF_INIT_FAILED);
2595209f2f9cSBrett Creeley }
2596209f2f9cSBrett Creeley
2597209f2f9cSBrett Creeley /**
259887dba256SMateusz Palczewski * iavf_init_process_extended_caps - Part of driver startup
259987dba256SMateusz Palczewski * @adapter: board private structure
260087dba256SMateusz Palczewski *
260187dba256SMateusz Palczewski * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
260287dba256SMateusz Palczewski * handles negotiating capabilities for features which require an additional
260387dba256SMateusz Palczewski * message.
260487dba256SMateusz Palczewski *
260587dba256SMateusz Palczewski * Once all extended capabilities exchanges are finished, the driver will
260687dba256SMateusz Palczewski * transition into __IAVF_INIT_CONFIG_ADAPTER.
260787dba256SMateusz Palczewski */
iavf_init_process_extended_caps(struct iavf_adapter * adapter)260887dba256SMateusz Palczewski static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
260987dba256SMateusz Palczewski {
261087dba256SMateusz Palczewski WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
261187dba256SMateusz Palczewski
261287dba256SMateusz Palczewski /* Process capability exchange for VLAN V2 */
261387dba256SMateusz Palczewski if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
261487dba256SMateusz Palczewski iavf_init_send_offload_vlan_v2_caps(adapter);
261587dba256SMateusz Palczewski return;
261687dba256SMateusz Palczewski } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
261787dba256SMateusz Palczewski iavf_init_recv_offload_vlan_v2_caps(adapter);
261887dba256SMateusz Palczewski return;
261987dba256SMateusz Palczewski }
262087dba256SMateusz Palczewski
262187dba256SMateusz Palczewski /* When we reach here, no further extended capabilities exchanges are
262287dba256SMateusz Palczewski * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
262387dba256SMateusz Palczewski */
262487dba256SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
262587dba256SMateusz Palczewski }
262687dba256SMateusz Palczewski
262787dba256SMateusz Palczewski /**
2628209f2f9cSBrett Creeley * iavf_init_config_adapter - last part of driver startup
2629209f2f9cSBrett Creeley * @adapter: board private structure
2630209f2f9cSBrett Creeley *
2631209f2f9cSBrett Creeley * After all the supported capabilities are negotiated, then the
2632209f2f9cSBrett Creeley * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2633209f2f9cSBrett Creeley */
iavf_init_config_adapter(struct iavf_adapter * adapter)2634209f2f9cSBrett Creeley static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2635209f2f9cSBrett Creeley {
2636209f2f9cSBrett Creeley struct net_device *netdev = adapter->netdev;
2637209f2f9cSBrett Creeley struct pci_dev *pdev = adapter->pdev;
2638209f2f9cSBrett Creeley int err;
2639209f2f9cSBrett Creeley
2640209f2f9cSBrett Creeley WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2641209f2f9cSBrett Creeley
2642209f2f9cSBrett Creeley if (iavf_process_config(adapter))
2643209f2f9cSBrett Creeley goto err;
2644209f2f9cSBrett Creeley
2645b66c7bc1SJakub Pawlak adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2646b66c7bc1SJakub Pawlak
2647b66c7bc1SJakub Pawlak adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2648b66c7bc1SJakub Pawlak
2649b66c7bc1SJakub Pawlak netdev->netdev_ops = &iavf_netdev_ops;
2650b66c7bc1SJakub Pawlak iavf_set_ethtool_ops(netdev);
2651b66c7bc1SJakub Pawlak netdev->watchdog_timeo = 5 * HZ;
2652b66c7bc1SJakub Pawlak
2653b66c7bc1SJakub Pawlak /* MTU range: 68 - 9710 */
2654b66c7bc1SJakub Pawlak netdev->min_mtu = ETH_MIN_MTU;
2655b66c7bc1SJakub Pawlak netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
2656b66c7bc1SJakub Pawlak
2657b66c7bc1SJakub Pawlak if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2658b66c7bc1SJakub Pawlak dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2659b66c7bc1SJakub Pawlak adapter->hw.mac.addr);
2660b66c7bc1SJakub Pawlak eth_hw_addr_random(netdev);
2661b66c7bc1SJakub Pawlak ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2662b66c7bc1SJakub Pawlak } else {
2663f3956ebbSJakub Kicinski eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2664b66c7bc1SJakub Pawlak ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2665b66c7bc1SJakub Pawlak }
2666b66c7bc1SJakub Pawlak
2667b66c7bc1SJakub Pawlak adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2668b66c7bc1SJakub Pawlak adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2669b66c7bc1SJakub Pawlak err = iavf_init_interrupt_scheme(adapter);
2670b66c7bc1SJakub Pawlak if (err)
2671b66c7bc1SJakub Pawlak goto err_sw_init;
2672b66c7bc1SJakub Pawlak iavf_map_rings_to_vectors(adapter);
2673b66c7bc1SJakub Pawlak if (adapter->vf_res->vf_cap_flags &
2674b66c7bc1SJakub Pawlak VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2675b66c7bc1SJakub Pawlak adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2676b66c7bc1SJakub Pawlak
2677b66c7bc1SJakub Pawlak err = iavf_request_misc_irq(adapter);
2678b66c7bc1SJakub Pawlak if (err)
2679b66c7bc1SJakub Pawlak goto err_sw_init;
2680b66c7bc1SJakub Pawlak
2681b66c7bc1SJakub Pawlak netif_carrier_off(netdev);
2682b66c7bc1SJakub Pawlak adapter->link_up = false;
2683b66c7bc1SJakub Pawlak netif_tx_stop_all_queues(netdev);
2684d1639a17SAhmed Zaki
2685b66c7bc1SJakub Pawlak if (CLIENT_ALLOWED(adapter)) {
2686b66c7bc1SJakub Pawlak err = iavf_lan_add_device(adapter);
2687f1340265SJakub Kicinski if (err)
2688b66c7bc1SJakub Pawlak dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2689b66c7bc1SJakub Pawlak err);
2690b66c7bc1SJakub Pawlak }
2691b66c7bc1SJakub Pawlak dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2692b66c7bc1SJakub Pawlak if (netdev->features & NETIF_F_GRO)
2693b66c7bc1SJakub Pawlak dev_info(&pdev->dev, "GRO is enabled\n");
2694b66c7bc1SJakub Pawlak
269545eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_DOWN);
2696b66c7bc1SJakub Pawlak set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2697b66c7bc1SJakub Pawlak
2698b66c7bc1SJakub Pawlak iavf_misc_irq_enable(adapter);
2699b66c7bc1SJakub Pawlak wake_up(&adapter->down_waitqueue);
2700b66c7bc1SJakub Pawlak
2701b66c7bc1SJakub Pawlak adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2702b66c7bc1SJakub Pawlak adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2703753f3884SWei Yongjun if (!adapter->rss_key || !adapter->rss_lut) {
2704753f3884SWei Yongjun err = -ENOMEM;
2705b66c7bc1SJakub Pawlak goto err_mem;
2706753f3884SWei Yongjun }
2707b66c7bc1SJakub Pawlak if (RSS_AQ(adapter))
2708b66c7bc1SJakub Pawlak adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2709b66c7bc1SJakub Pawlak else
2710b66c7bc1SJakub Pawlak iavf_init_rss(adapter);
2711b66c7bc1SJakub Pawlak
27128afadd1cSBrett Creeley if (VLAN_V2_ALLOWED(adapter))
27138afadd1cSBrett Creeley /* request initial VLAN offload settings */
27148afadd1cSBrett Creeley iavf_set_vlan_offload_features(adapter, 0, netdev->features);
27158afadd1cSBrett Creeley
2716d1639a17SAhmed Zaki iavf_schedule_finish_config(adapter);
271759756ad6SMateusz Palczewski return;
2718d1639a17SAhmed Zaki
2719b66c7bc1SJakub Pawlak err_mem:
2720b66c7bc1SJakub Pawlak iavf_free_rss(adapter);
2721b66c7bc1SJakub Pawlak iavf_free_misc_irq(adapter);
2722b66c7bc1SJakub Pawlak err_sw_init:
2723b66c7bc1SJakub Pawlak iavf_reset_interrupt_capability(adapter);
2724b66c7bc1SJakub Pawlak err:
272559756ad6SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_FAILED);
2726b66c7bc1SJakub Pawlak }
2727b66c7bc1SJakub Pawlak
2728b66c7bc1SJakub Pawlak /**
27295ec8b7d1SJesse Brandeburg * iavf_watchdog_task - Periodic call-back task
27305ec8b7d1SJesse Brandeburg * @work: pointer to work_struct
27315ec8b7d1SJesse Brandeburg **/
iavf_watchdog_task(struct work_struct * work)27325ec8b7d1SJesse Brandeburg static void iavf_watchdog_task(struct work_struct *work)
27335ec8b7d1SJesse Brandeburg {
27345ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = container_of(work,
27355ec8b7d1SJesse Brandeburg struct iavf_adapter,
2736fdd4044fSJakub Pawlak watchdog_task.work);
2737f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
27385ec8b7d1SJesse Brandeburg u32 reg_val;
27395ec8b7d1SJesse Brandeburg
2740fc2e6b3bSSlawomir Laba if (!mutex_trylock(&adapter->crit_lock)) {
2741fc2e6b3bSSlawomir Laba if (adapter->state == __IAVF_REMOVE)
2742fc2e6b3bSSlawomir Laba return;
2743fc2e6b3bSSlawomir Laba
27445ec8b7d1SJesse Brandeburg goto restart_watchdog;
2745fc2e6b3bSSlawomir Laba }
27465ec8b7d1SJesse Brandeburg
2747bac84861SJan Sokolowski if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
274845eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_COMM_FAILED);
2749bac84861SJan Sokolowski
2750bac84861SJan Sokolowski switch (adapter->state) {
2751898ef1cbSMateusz Palczewski case __IAVF_STARTUP:
2752898ef1cbSMateusz Palczewski iavf_startup(adapter);
2753898ef1cbSMateusz Palczewski mutex_unlock(&adapter->crit_lock);
27544411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2755898ef1cbSMateusz Palczewski msecs_to_jiffies(30));
2756898ef1cbSMateusz Palczewski return;
2757898ef1cbSMateusz Palczewski case __IAVF_INIT_VERSION_CHECK:
2758898ef1cbSMateusz Palczewski iavf_init_version_check(adapter);
2759898ef1cbSMateusz Palczewski mutex_unlock(&adapter->crit_lock);
27604411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2761898ef1cbSMateusz Palczewski msecs_to_jiffies(30));
2762898ef1cbSMateusz Palczewski return;
2763898ef1cbSMateusz Palczewski case __IAVF_INIT_GET_RESOURCES:
2764898ef1cbSMateusz Palczewski iavf_init_get_resources(adapter);
2765898ef1cbSMateusz Palczewski mutex_unlock(&adapter->crit_lock);
27664411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2767898ef1cbSMateusz Palczewski msecs_to_jiffies(1));
2768898ef1cbSMateusz Palczewski return;
276987dba256SMateusz Palczewski case __IAVF_INIT_EXTENDED_CAPS:
277087dba256SMateusz Palczewski iavf_init_process_extended_caps(adapter);
2771209f2f9cSBrett Creeley mutex_unlock(&adapter->crit_lock);
27724411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2773209f2f9cSBrett Creeley msecs_to_jiffies(1));
2774209f2f9cSBrett Creeley return;
2775209f2f9cSBrett Creeley case __IAVF_INIT_CONFIG_ADAPTER:
2776209f2f9cSBrett Creeley iavf_init_config_adapter(adapter);
2777209f2f9cSBrett Creeley mutex_unlock(&adapter->crit_lock);
27784411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2779209f2f9cSBrett Creeley msecs_to_jiffies(1));
2780209f2f9cSBrett Creeley return;
2781898ef1cbSMateusz Palczewski case __IAVF_INIT_FAILED:
27823ccd54efSSlawomir Laba if (test_bit(__IAVF_IN_REMOVE_TASK,
27833ccd54efSSlawomir Laba &adapter->crit_section)) {
27843ccd54efSSlawomir Laba /* Do not update the state and do not reschedule
27853ccd54efSSlawomir Laba * watchdog task, iavf_remove should handle this state
27863ccd54efSSlawomir Laba * as it can loop forever
27873ccd54efSSlawomir Laba */
27883ccd54efSSlawomir Laba mutex_unlock(&adapter->crit_lock);
27893ccd54efSSlawomir Laba return;
27903ccd54efSSlawomir Laba }
2791898ef1cbSMateusz Palczewski if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2792898ef1cbSMateusz Palczewski dev_err(&adapter->pdev->dev,
2793898ef1cbSMateusz Palczewski "Failed to communicate with PF; waiting before retry\n");
2794898ef1cbSMateusz Palczewski adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2795898ef1cbSMateusz Palczewski iavf_shutdown_adminq(hw);
2796898ef1cbSMateusz Palczewski mutex_unlock(&adapter->crit_lock);
27974411a608SMichal Schmidt queue_delayed_work(adapter->wq,
2798898ef1cbSMateusz Palczewski &adapter->watchdog_task, (5 * HZ));
2799898ef1cbSMateusz Palczewski return;
2800898ef1cbSMateusz Palczewski }
2801898ef1cbSMateusz Palczewski /* Try again from failed step*/
2802898ef1cbSMateusz Palczewski iavf_change_state(adapter, adapter->last_state);
2803898ef1cbSMateusz Palczewski mutex_unlock(&adapter->crit_lock);
28044411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
2805898ef1cbSMateusz Palczewski return;
2806bac84861SJan Sokolowski case __IAVF_COMM_FAILED:
28073ccd54efSSlawomir Laba if (test_bit(__IAVF_IN_REMOVE_TASK,
28083ccd54efSSlawomir Laba &adapter->crit_section)) {
28093ccd54efSSlawomir Laba /* Set state to __IAVF_INIT_FAILED and perform remove
28103ccd54efSSlawomir Laba * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
28113ccd54efSSlawomir Laba * doesn't bring the state back to __IAVF_COMM_FAILED.
28123ccd54efSSlawomir Laba */
28133ccd54efSSlawomir Laba iavf_change_state(adapter, __IAVF_INIT_FAILED);
28143ccd54efSSlawomir Laba adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
28153ccd54efSSlawomir Laba mutex_unlock(&adapter->crit_lock);
28163ccd54efSSlawomir Laba return;
28173ccd54efSSlawomir Laba }
2818f1cad2ceSJesse Brandeburg reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2819f1cad2ceSJesse Brandeburg IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2820b476b003SJakub Pawlak if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2821b476b003SJakub Pawlak reg_val == VIRTCHNL_VFR_COMPLETED) {
28225ec8b7d1SJesse Brandeburg /* A chance for redemption! */
2823bac84861SJan Sokolowski dev_err(&adapter->pdev->dev,
2824bac84861SJan Sokolowski "Hardware came out of reset. Attempting reinit.\n");
2825898ef1cbSMateusz Palczewski /* When init task contacts the PF and
28265ec8b7d1SJesse Brandeburg * gets everything set up again, it'll restart the
28275ec8b7d1SJesse Brandeburg * watchdog for us. Down, boy. Sit. Stay. Woof.
28285ec8b7d1SJesse Brandeburg */
2829898ef1cbSMateusz Palczewski iavf_change_state(adapter, __IAVF_STARTUP);
2830898ef1cbSMateusz Palczewski adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
28315ec8b7d1SJesse Brandeburg }
28325ec8b7d1SJesse Brandeburg adapter->aq_required = 0;
28335ec8b7d1SJesse Brandeburg adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2834bc2f39a6SDan Carpenter mutex_unlock(&adapter->crit_lock);
28354411a608SMichal Schmidt queue_delayed_work(adapter->wq,
2836bac84861SJan Sokolowski &adapter->watchdog_task,
2837bac84861SJan Sokolowski msecs_to_jiffies(10));
2838898ef1cbSMateusz Palczewski return;
2839bac84861SJan Sokolowski case __IAVF_RESETTING:
28405ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
28414411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
28424411a608SMichal Schmidt HZ * 2);
2843bac84861SJan Sokolowski return;
2844bac84861SJan Sokolowski case __IAVF_DOWN:
2845bac84861SJan Sokolowski case __IAVF_DOWN_PENDING:
2846bac84861SJan Sokolowski case __IAVF_TESTING:
2847bac84861SJan Sokolowski case __IAVF_RUNNING:
28485ec8b7d1SJesse Brandeburg if (adapter->current_op) {
28495ec8b7d1SJesse Brandeburg if (!iavf_asq_done(hw)) {
2850bac84861SJan Sokolowski dev_dbg(&adapter->pdev->dev,
2851bac84861SJan Sokolowski "Admin queue timeout\n");
28525ec8b7d1SJesse Brandeburg iavf_send_api_ver(adapter);
28535ec8b7d1SJesse Brandeburg }
2854bac84861SJan Sokolowski } else {
2855209f2f9cSBrett Creeley int ret = iavf_process_aq_command(adapter);
2856209f2f9cSBrett Creeley
285793580766STony Nguyen /* An error will be returned if no commands were
285893580766STony Nguyen * processed; use this opportunity to update stats
2859209f2f9cSBrett Creeley * if the error isn't -ENOTSUPP
286093580766STony Nguyen */
2861209f2f9cSBrett Creeley if (ret && ret != -EOPNOTSUPP &&
2862bac84861SJan Sokolowski adapter->state == __IAVF_RUNNING)
2863b476b003SJakub Pawlak iavf_request_stats(adapter);
28645ec8b7d1SJesse Brandeburg }
2865898ef1cbSMateusz Palczewski if (adapter->state == __IAVF_RUNNING)
2866898ef1cbSMateusz Palczewski iavf_detect_recover_hung(&adapter->vsi);
2867bac84861SJan Sokolowski break;
2868bac84861SJan Sokolowski case __IAVF_REMOVE:
2869bac84861SJan Sokolowski default:
2870bc2f39a6SDan Carpenter mutex_unlock(&adapter->crit_lock);
2871898ef1cbSMateusz Palczewski return;
2872bac84861SJan Sokolowski }
2873bac84861SJan Sokolowski
2874bac84861SJan Sokolowski /* check for hw reset */
2875bac84861SJan Sokolowski reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2876bac84861SJan Sokolowski if (!reg_val) {
2877bac84861SJan Sokolowski adapter->aq_required = 0;
2878bac84861SJan Sokolowski adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2879bac84861SJan Sokolowski dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2880c34743daSAhmed Zaki iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
2881898ef1cbSMateusz Palczewski mutex_unlock(&adapter->crit_lock);
28824411a608SMichal Schmidt queue_delayed_work(adapter->wq,
2883898ef1cbSMateusz Palczewski &adapter->watchdog_task, HZ * 2);
2884898ef1cbSMateusz Palczewski return;
2885bac84861SJan Sokolowski }
28865ec8b7d1SJesse Brandeburg
28875ec8b7d1SJesse Brandeburg schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
28885ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
28895ec8b7d1SJesse Brandeburg restart_watchdog:
2890a472eb5cSSlawomir Laba if (adapter->state >= __IAVF_DOWN)
28914411a608SMichal Schmidt queue_work(adapter->wq, &adapter->adminq_task);
28925ec8b7d1SJesse Brandeburg if (adapter->aq_required)
28934411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2894fdd4044fSJakub Pawlak msecs_to_jiffies(20));
28955ec8b7d1SJesse Brandeburg else
28964411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
28974411a608SMichal Schmidt HZ * 2);
28985ec8b7d1SJesse Brandeburg }
28995ec8b7d1SJesse Brandeburg
290016b2dd8cSPrzemyslaw Patynowski /**
290116b2dd8cSPrzemyslaw Patynowski * iavf_disable_vf - disable VF
290216b2dd8cSPrzemyslaw Patynowski * @adapter: board private structure
290316b2dd8cSPrzemyslaw Patynowski *
290416b2dd8cSPrzemyslaw Patynowski * Set communication failed flag and free all resources.
290516b2dd8cSPrzemyslaw Patynowski * NOTE: This function is expected to be called with crit_lock being held.
290616b2dd8cSPrzemyslaw Patynowski **/
iavf_disable_vf(struct iavf_adapter * adapter)29075ec8b7d1SJesse Brandeburg static void iavf_disable_vf(struct iavf_adapter *adapter)
29085ec8b7d1SJesse Brandeburg {
29095ec8b7d1SJesse Brandeburg struct iavf_mac_filter *f, *ftmp;
29105ec8b7d1SJesse Brandeburg struct iavf_vlan_filter *fv, *fvtmp;
29115ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *cf, *cftmp;
29125ec8b7d1SJesse Brandeburg
29135ec8b7d1SJesse Brandeburg adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
29145ec8b7d1SJesse Brandeburg
29155ec8b7d1SJesse Brandeburg /* We don't use netif_running() because it may be true prior to
29165ec8b7d1SJesse Brandeburg * ndo_open() returning, so we can't assume it means all our open
29175ec8b7d1SJesse Brandeburg * tasks have finished, since we're not holding the rtnl_lock here.
29185ec8b7d1SJesse Brandeburg */
29195ec8b7d1SJesse Brandeburg if (adapter->state == __IAVF_RUNNING) {
292056184e01SJesse Brandeburg set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
29215ec8b7d1SJesse Brandeburg netif_carrier_off(adapter->netdev);
29225ec8b7d1SJesse Brandeburg netif_tx_disable(adapter->netdev);
29235ec8b7d1SJesse Brandeburg adapter->link_up = false;
29245ec8b7d1SJesse Brandeburg iavf_napi_disable_all(adapter);
29255ec8b7d1SJesse Brandeburg iavf_irq_disable(adapter);
29265ec8b7d1SJesse Brandeburg iavf_free_traffic_irqs(adapter);
29275ec8b7d1SJesse Brandeburg iavf_free_all_tx_resources(adapter);
29285ec8b7d1SJesse Brandeburg iavf_free_all_rx_resources(adapter);
29295ec8b7d1SJesse Brandeburg }
29305ec8b7d1SJesse Brandeburg
29315ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
29325ec8b7d1SJesse Brandeburg
29335ec8b7d1SJesse Brandeburg /* Delete all of the filters */
29345ec8b7d1SJesse Brandeburg list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
29355ec8b7d1SJesse Brandeburg list_del(&f->list);
29365ec8b7d1SJesse Brandeburg kfree(f);
29375ec8b7d1SJesse Brandeburg }
29385ec8b7d1SJesse Brandeburg
29395ec8b7d1SJesse Brandeburg list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
29405ec8b7d1SJesse Brandeburg list_del(&fv->list);
29415ec8b7d1SJesse Brandeburg kfree(fv);
29425ec8b7d1SJesse Brandeburg }
29439c85b7faSAhmed Zaki adapter->num_vlan_filters = 0;
29445ec8b7d1SJesse Brandeburg
29455ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
29465ec8b7d1SJesse Brandeburg
29475ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->cloud_filter_list_lock);
29485ec8b7d1SJesse Brandeburg list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
29495ec8b7d1SJesse Brandeburg list_del(&cf->list);
29505ec8b7d1SJesse Brandeburg kfree(cf);
29515ec8b7d1SJesse Brandeburg adapter->num_cloud_filters--;
29525ec8b7d1SJesse Brandeburg }
29535ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->cloud_filter_list_lock);
29545ec8b7d1SJesse Brandeburg
29555ec8b7d1SJesse Brandeburg iavf_free_misc_irq(adapter);
29565ec8b7d1SJesse Brandeburg iavf_reset_interrupt_capability(adapter);
29575ec8b7d1SJesse Brandeburg iavf_free_q_vectors(adapter);
295889f22f12SNicholas Nunley iavf_free_queues(adapter);
2959e0ef26fbSBrett Creeley memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
29605ec8b7d1SJesse Brandeburg iavf_shutdown_adminq(&adapter->hw);
29615ec8b7d1SJesse Brandeburg adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
296245eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_DOWN);
29635ec8b7d1SJesse Brandeburg wake_up(&adapter->down_waitqueue);
29645ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
29655ec8b7d1SJesse Brandeburg }
29665ec8b7d1SJesse Brandeburg
29675ec8b7d1SJesse Brandeburg /**
29685ec8b7d1SJesse Brandeburg * iavf_reset_task - Call-back task to handle hardware reset
29695ec8b7d1SJesse Brandeburg * @work: pointer to work_struct
29705ec8b7d1SJesse Brandeburg *
29715ec8b7d1SJesse Brandeburg * During reset we need to shut down and reinitialize the admin queue
29725ec8b7d1SJesse Brandeburg * before we can use it to communicate with the PF again. We also clear
29735ec8b7d1SJesse Brandeburg * and reinit the rings because that context is lost as well.
29745ec8b7d1SJesse Brandeburg **/
iavf_reset_task(struct work_struct * work)29755ec8b7d1SJesse Brandeburg static void iavf_reset_task(struct work_struct *work)
29765ec8b7d1SJesse Brandeburg {
29775ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = container_of(work,
29785ec8b7d1SJesse Brandeburg struct iavf_adapter,
29795ec8b7d1SJesse Brandeburg reset_task);
29805ec8b7d1SJesse Brandeburg struct virtchnl_vf_resource *vfres = adapter->vf_res;
29815ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
2982f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
29839e052291SStefan Assmann struct iavf_mac_filter *f, *ftmp;
29845ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *cf;
2985bae569d0SMateusz Palczewski enum iavf_status status;
29865ec8b7d1SJesse Brandeburg u32 reg_val;
29875ec8b7d1SJesse Brandeburg int i = 0, err;
29885ec8b7d1SJesse Brandeburg bool running;
29895ec8b7d1SJesse Brandeburg
29905ec8b7d1SJesse Brandeburg /* When device is being removed it doesn't make sense to run the reset
29915ec8b7d1SJesse Brandeburg * task, just return in such a case.
29925ec8b7d1SJesse Brandeburg */
2993fc2e6b3bSSlawomir Laba if (!mutex_trylock(&adapter->crit_lock)) {
2994fc2e6b3bSSlawomir Laba if (adapter->state != __IAVF_REMOVE)
29954411a608SMichal Schmidt queue_work(adapter->wq, &adapter->reset_task);
29965ec8b7d1SJesse Brandeburg
2997d2806d96SMarcin Szycik return;
2998226d5285SStefan Assmann }
2999fc2e6b3bSSlawomir Laba
30005ac49f3cSStefan Assmann while (!mutex_trylock(&adapter->client_lock))
30015ec8b7d1SJesse Brandeburg usleep_range(500, 1000);
30025ec8b7d1SJesse Brandeburg if (CLIENT_ENABLED(adapter)) {
30035ec8b7d1SJesse Brandeburg adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
30045ec8b7d1SJesse Brandeburg IAVF_FLAG_CLIENT_NEEDS_CLOSE |
30055ec8b7d1SJesse Brandeburg IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
30065ec8b7d1SJesse Brandeburg IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
30075ec8b7d1SJesse Brandeburg cancel_delayed_work_sync(&adapter->client_task);
30085ec8b7d1SJesse Brandeburg iavf_notify_client_close(&adapter->vsi, true);
30095ec8b7d1SJesse Brandeburg }
30105ec8b7d1SJesse Brandeburg iavf_misc_irq_disable(adapter);
30115ec8b7d1SJesse Brandeburg if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
30125ec8b7d1SJesse Brandeburg adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
30135ec8b7d1SJesse Brandeburg /* Restart the AQ here. If we have been reset but didn't
30145ec8b7d1SJesse Brandeburg * detect it, or if the PF had to reinit, our AQ will be hosed.
30155ec8b7d1SJesse Brandeburg */
30165ec8b7d1SJesse Brandeburg iavf_shutdown_adminq(hw);
30175ec8b7d1SJesse Brandeburg iavf_init_adminq(hw);
30185ec8b7d1SJesse Brandeburg iavf_request_reset(adapter);
30195ec8b7d1SJesse Brandeburg }
30205ec8b7d1SJesse Brandeburg adapter->flags |= IAVF_FLAG_RESET_PENDING;
30215ec8b7d1SJesse Brandeburg
30225ec8b7d1SJesse Brandeburg /* poll until we see the reset actually happen */
30238e3e4b9dSPaul Greenwalt for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
3024f1cad2ceSJesse Brandeburg reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
3025f1cad2ceSJesse Brandeburg IAVF_VF_ARQLEN1_ARQENABLE_MASK;
30265ec8b7d1SJesse Brandeburg if (!reg_val)
30275ec8b7d1SJesse Brandeburg break;
30285ec8b7d1SJesse Brandeburg usleep_range(5000, 10000);
30295ec8b7d1SJesse Brandeburg }
30308e3e4b9dSPaul Greenwalt if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
30315ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "Never saw reset\n");
30325ec8b7d1SJesse Brandeburg goto continue_reset; /* act like the reset happened */
30335ec8b7d1SJesse Brandeburg }
30345ec8b7d1SJesse Brandeburg
30355ec8b7d1SJesse Brandeburg /* wait until the reset is complete and the PF is responding to us */
30368e3e4b9dSPaul Greenwalt for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
30375ec8b7d1SJesse Brandeburg /* sleep first to make sure a minimum wait time is met */
30385ec8b7d1SJesse Brandeburg msleep(IAVF_RESET_WAIT_MS);
30395ec8b7d1SJesse Brandeburg
3040f1cad2ceSJesse Brandeburg reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
3041f1cad2ceSJesse Brandeburg IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
30425ec8b7d1SJesse Brandeburg if (reg_val == VIRTCHNL_VFR_VFACTIVE)
30435ec8b7d1SJesse Brandeburg break;
30445ec8b7d1SJesse Brandeburg }
30455ec8b7d1SJesse Brandeburg
30465ec8b7d1SJesse Brandeburg pci_set_master(adapter->pdev);
30477e4dcc13SMitch Williams pci_restore_msi_state(adapter->pdev);
30485ec8b7d1SJesse Brandeburg
30498e3e4b9dSPaul Greenwalt if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
30505ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
30515ec8b7d1SJesse Brandeburg reg_val);
30525ec8b7d1SJesse Brandeburg iavf_disable_vf(adapter);
30535ac49f3cSStefan Assmann mutex_unlock(&adapter->client_lock);
3054e85ff9c6SSlawomir Laba mutex_unlock(&adapter->crit_lock);
30555ec8b7d1SJesse Brandeburg return; /* Do not attempt to reinit. It's dead, Jim. */
30565ec8b7d1SJesse Brandeburg }
30575ec8b7d1SJesse Brandeburg
30585ec8b7d1SJesse Brandeburg continue_reset:
30595ec8b7d1SJesse Brandeburg /* We don't use netif_running() because it may be true prior to
30605ec8b7d1SJesse Brandeburg * ndo_open() returning, so we can't assume it means all our open
30615ec8b7d1SJesse Brandeburg * tasks have finished, since we're not holding the rtnl_lock here.
30625ec8b7d1SJesse Brandeburg */
306314756b2aSSlawomir Laba running = adapter->state == __IAVF_RUNNING;
30645ec8b7d1SJesse Brandeburg
30655ec8b7d1SJesse Brandeburg if (running) {
30665ec8b7d1SJesse Brandeburg netif_carrier_off(netdev);
3067c678669dSIvan Vecera netif_tx_stop_all_queues(netdev);
30685ec8b7d1SJesse Brandeburg adapter->link_up = false;
30695ec8b7d1SJesse Brandeburg iavf_napi_disable_all(adapter);
30705ec8b7d1SJesse Brandeburg }
30715ec8b7d1SJesse Brandeburg iavf_irq_disable(adapter);
30725ec8b7d1SJesse Brandeburg
307345eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_RESETTING);
30745ec8b7d1SJesse Brandeburg adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
30755ec8b7d1SJesse Brandeburg
30765ec8b7d1SJesse Brandeburg /* free the Tx/Rx rings and descriptors, might be better to just
30775ec8b7d1SJesse Brandeburg * re-use them sometime in the future
30785ec8b7d1SJesse Brandeburg */
30795ec8b7d1SJesse Brandeburg iavf_free_all_rx_resources(adapter);
30805ec8b7d1SJesse Brandeburg iavf_free_all_tx_resources(adapter);
30815ec8b7d1SJesse Brandeburg
30825ec8b7d1SJesse Brandeburg adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
30835ec8b7d1SJesse Brandeburg /* kill and reinit the admin queue */
30845ec8b7d1SJesse Brandeburg iavf_shutdown_adminq(hw);
30855ec8b7d1SJesse Brandeburg adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3086bae569d0SMateusz Palczewski status = iavf_init_adminq(hw);
3087bae569d0SMateusz Palczewski if (status) {
30885ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
3089bae569d0SMateusz Palczewski status);
3090bae569d0SMateusz Palczewski goto reset_err;
3091bae569d0SMateusz Palczewski }
30925ec8b7d1SJesse Brandeburg adapter->aq_required = 0;
30935ec8b7d1SJesse Brandeburg
309457d03f56SMichal Maloszewski if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
309557d03f56SMichal Maloszewski (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3096a77ed5c5SAhmed Zaki err = iavf_reinit_interrupt_scheme(adapter, running);
30975ec8b7d1SJesse Brandeburg if (err)
30985ec8b7d1SJesse Brandeburg goto reset_err;
30995ec8b7d1SJesse Brandeburg }
31005ec8b7d1SJesse Brandeburg
3101a7550f8bSMd Fahad Iqbal Polash if (RSS_AQ(adapter)) {
3102a7550f8bSMd Fahad Iqbal Polash adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3103a7550f8bSMd Fahad Iqbal Polash } else {
3104a7550f8bSMd Fahad Iqbal Polash err = iavf_init_rss(adapter);
3105a7550f8bSMd Fahad Iqbal Polash if (err)
3106a7550f8bSMd Fahad Iqbal Polash goto reset_err;
3107a7550f8bSMd Fahad Iqbal Polash }
3108a7550f8bSMd Fahad Iqbal Polash
31095ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3110209f2f9cSBrett Creeley /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
3111209f2f9cSBrett Creeley * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
3112209f2f9cSBrett Creeley * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
3113209f2f9cSBrett Creeley * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
3114209f2f9cSBrett Creeley * been successfully sent and negotiated
3115209f2f9cSBrett Creeley */
3116209f2f9cSBrett Creeley adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
31175ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
31185ec8b7d1SJesse Brandeburg
31195ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
31205ec8b7d1SJesse Brandeburg
31219e052291SStefan Assmann /* Delete filter for the current MAC address, it could have
31229e052291SStefan Assmann * been changed by the PF via administratively set MAC.
31239e052291SStefan Assmann * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
31249e052291SStefan Assmann */
31259e052291SStefan Assmann list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
31269e052291SStefan Assmann if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
31279e052291SStefan Assmann list_del(&f->list);
31289e052291SStefan Assmann kfree(f);
31299e052291SStefan Assmann }
31309e052291SStefan Assmann }
31315ec8b7d1SJesse Brandeburg /* re-add all MAC filters */
31325ec8b7d1SJesse Brandeburg list_for_each_entry(f, &adapter->mac_filter_list, list) {
31335ec8b7d1SJesse Brandeburg f->add = true;
31345ec8b7d1SJesse Brandeburg }
31355ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
31365ec8b7d1SJesse Brandeburg
31375ec8b7d1SJesse Brandeburg /* check if TCs are running and re-add all cloud filters */
31385ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->cloud_filter_list_lock);
31395ec8b7d1SJesse Brandeburg if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
31405ec8b7d1SJesse Brandeburg adapter->num_tc) {
31415ec8b7d1SJesse Brandeburg list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
31425ec8b7d1SJesse Brandeburg cf->add = true;
31435ec8b7d1SJesse Brandeburg }
31445ec8b7d1SJesse Brandeburg }
31455ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->cloud_filter_list_lock);
31465ec8b7d1SJesse Brandeburg
31475ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
31485ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
31495ec8b7d1SJesse Brandeburg iavf_misc_irq_enable(adapter);
31505ec8b7d1SJesse Brandeburg
31514411a608SMichal Schmidt mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
31525ec8b7d1SJesse Brandeburg
31535ec8b7d1SJesse Brandeburg /* We were running when the reset started, so we need to restore some
31545ec8b7d1SJesse Brandeburg * state here.
31555ec8b7d1SJesse Brandeburg */
31565ec8b7d1SJesse Brandeburg if (running) {
31575ec8b7d1SJesse Brandeburg /* allocate transmit descriptors */
31585ec8b7d1SJesse Brandeburg err = iavf_setup_all_tx_resources(adapter);
31595ec8b7d1SJesse Brandeburg if (err)
31605ec8b7d1SJesse Brandeburg goto reset_err;
31615ec8b7d1SJesse Brandeburg
31625ec8b7d1SJesse Brandeburg /* allocate receive descriptors */
31635ec8b7d1SJesse Brandeburg err = iavf_setup_all_rx_resources(adapter);
31645ec8b7d1SJesse Brandeburg if (err)
31655ec8b7d1SJesse Brandeburg goto reset_err;
31665ec8b7d1SJesse Brandeburg
316757d03f56SMichal Maloszewski if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
316857d03f56SMichal Maloszewski (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
31695ec8b7d1SJesse Brandeburg err = iavf_request_traffic_irqs(adapter, netdev->name);
31705ec8b7d1SJesse Brandeburg if (err)
31715ec8b7d1SJesse Brandeburg goto reset_err;
31725ec8b7d1SJesse Brandeburg
317357d03f56SMichal Maloszewski adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
31745ec8b7d1SJesse Brandeburg }
31755ec8b7d1SJesse Brandeburg
31765ec8b7d1SJesse Brandeburg iavf_configure(adapter);
31775ec8b7d1SJesse Brandeburg
317845eebd62SMateusz Palczewski /* iavf_up_complete() will switch device back
317945eebd62SMateusz Palczewski * to __IAVF_RUNNING
318045eebd62SMateusz Palczewski */
31815ec8b7d1SJesse Brandeburg iavf_up_complete(adapter);
31827d59706dSMateusz Palczewski
31835ec8b7d1SJesse Brandeburg iavf_irq_enable(adapter, true);
31845ec8b7d1SJesse Brandeburg } else {
318545eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_DOWN);
31865ec8b7d1SJesse Brandeburg wake_up(&adapter->down_waitqueue);
31875ec8b7d1SJesse Brandeburg }
318857d03f56SMichal Maloszewski
318957d03f56SMichal Maloszewski adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
319057d03f56SMichal Maloszewski
3191c2ed2403SMarcin Szycik wake_up(&adapter->reset_waitqueue);
31925ac49f3cSStefan Assmann mutex_unlock(&adapter->client_lock);
31935ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
31945ec8b7d1SJesse Brandeburg
3195d2806d96SMarcin Szycik return;
31965ec8b7d1SJesse Brandeburg reset_err:
319731071173SPrzemyslaw Patynowski if (running) {
319831071173SPrzemyslaw Patynowski set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
319931071173SPrzemyslaw Patynowski iavf_free_traffic_irqs(adapter);
320031071173SPrzemyslaw Patynowski }
320131071173SPrzemyslaw Patynowski iavf_disable_vf(adapter);
320231071173SPrzemyslaw Patynowski
32035ac49f3cSStefan Assmann mutex_unlock(&adapter->client_lock);
32045ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
32055ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
32065ec8b7d1SJesse Brandeburg }
32075ec8b7d1SJesse Brandeburg
32085ec8b7d1SJesse Brandeburg /**
32095ec8b7d1SJesse Brandeburg * iavf_adminq_task - worker thread to clean the admin queue
32105ec8b7d1SJesse Brandeburg * @work: pointer to work_struct containing our data
32115ec8b7d1SJesse Brandeburg **/
iavf_adminq_task(struct work_struct * work)32125ec8b7d1SJesse Brandeburg static void iavf_adminq_task(struct work_struct *work)
32135ec8b7d1SJesse Brandeburg {
32145ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter =
32155ec8b7d1SJesse Brandeburg container_of(work, struct iavf_adapter, adminq_task);
3216f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
32177af36e32SAlice Michael struct iavf_arq_event_info event;
32185ec8b7d1SJesse Brandeburg enum virtchnl_ops v_op;
321980754bbcSSergey Nemov enum iavf_status ret, v_ret;
32205ec8b7d1SJesse Brandeburg u32 val, oldval;
32215ec8b7d1SJesse Brandeburg u16 pending;
32225ec8b7d1SJesse Brandeburg
3223fc2e6b3bSSlawomir Laba if (!mutex_trylock(&adapter->crit_lock)) {
3224fc2e6b3bSSlawomir Laba if (adapter->state == __IAVF_REMOVE)
3225fc2e6b3bSSlawomir Laba return;
3226fc2e6b3bSSlawomir Laba
32274411a608SMichal Schmidt queue_work(adapter->wq, &adapter->adminq_task);
3228fc2e6b3bSSlawomir Laba goto out;
3229fc2e6b3bSSlawomir Laba }
3230fc2e6b3bSSlawomir Laba
323191896c8aSJacob Keller if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
323291896c8aSJacob Keller goto unlock;
323391896c8aSJacob Keller
32345ec8b7d1SJesse Brandeburg event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
32355ec8b7d1SJesse Brandeburg event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
32365ec8b7d1SJesse Brandeburg if (!event.msg_buf)
3237a2f054c1SJacob Keller goto unlock;
32385ec8b7d1SJesse Brandeburg
32395ec8b7d1SJesse Brandeburg do {
32405ec8b7d1SJesse Brandeburg ret = iavf_clean_arq_element(hw, &event, &pending);
32415ec8b7d1SJesse Brandeburg v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
324280754bbcSSergey Nemov v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
32435ec8b7d1SJesse Brandeburg
32445ec8b7d1SJesse Brandeburg if (ret || !v_op)
32455ec8b7d1SJesse Brandeburg break; /* No event to process or error cleaning ARQ */
32465ec8b7d1SJesse Brandeburg
32475ec8b7d1SJesse Brandeburg iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
32485ec8b7d1SJesse Brandeburg event.msg_len);
32495ec8b7d1SJesse Brandeburg if (pending != 0)
32505ec8b7d1SJesse Brandeburg memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
32515ec8b7d1SJesse Brandeburg } while (pending);
32525ec8b7d1SJesse Brandeburg
3253c34743daSAhmed Zaki if (iavf_is_reset_in_progress(adapter))
32545ec8b7d1SJesse Brandeburg goto freedom;
32555ec8b7d1SJesse Brandeburg
32565ec8b7d1SJesse Brandeburg /* check for error indications */
32575ec8b7d1SJesse Brandeburg val = rd32(hw, hw->aq.arq.len);
3258321421b5SSurabhi Boob if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
32595ec8b7d1SJesse Brandeburg goto freedom;
32605ec8b7d1SJesse Brandeburg oldval = val;
3261f1cad2ceSJesse Brandeburg if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
32625ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3263f1cad2ceSJesse Brandeburg val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
32645ec8b7d1SJesse Brandeburg }
3265f1cad2ceSJesse Brandeburg if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
32665ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3267f1cad2ceSJesse Brandeburg val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
32685ec8b7d1SJesse Brandeburg }
3269f1cad2ceSJesse Brandeburg if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
32705ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3271f1cad2ceSJesse Brandeburg val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
32725ec8b7d1SJesse Brandeburg }
32735ec8b7d1SJesse Brandeburg if (oldval != val)
32745ec8b7d1SJesse Brandeburg wr32(hw, hw->aq.arq.len, val);
32755ec8b7d1SJesse Brandeburg
32765ec8b7d1SJesse Brandeburg val = rd32(hw, hw->aq.asq.len);
32775ec8b7d1SJesse Brandeburg oldval = val;
3278f1cad2ceSJesse Brandeburg if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
32795ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3280f1cad2ceSJesse Brandeburg val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
32815ec8b7d1SJesse Brandeburg }
3282f1cad2ceSJesse Brandeburg if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
32835ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3284f1cad2ceSJesse Brandeburg val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
32855ec8b7d1SJesse Brandeburg }
3286f1cad2ceSJesse Brandeburg if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
32875ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3288f1cad2ceSJesse Brandeburg val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
32895ec8b7d1SJesse Brandeburg }
32905ec8b7d1SJesse Brandeburg if (oldval != val)
32915ec8b7d1SJesse Brandeburg wr32(hw, hw->aq.asq.len, val);
32925ec8b7d1SJesse Brandeburg
32935ec8b7d1SJesse Brandeburg freedom:
32945ec8b7d1SJesse Brandeburg kfree(event.msg_buf);
3295a2f054c1SJacob Keller unlock:
3296a2f054c1SJacob Keller mutex_unlock(&adapter->crit_lock);
32975ec8b7d1SJesse Brandeburg out:
32985ec8b7d1SJesse Brandeburg /* re-enable Admin queue interrupt cause */
32995ec8b7d1SJesse Brandeburg iavf_misc_irq_enable(adapter);
33005ec8b7d1SJesse Brandeburg }
33015ec8b7d1SJesse Brandeburg
33025ec8b7d1SJesse Brandeburg /**
33035ec8b7d1SJesse Brandeburg * iavf_client_task - worker thread to perform client work
33045ec8b7d1SJesse Brandeburg * @work: pointer to work_struct containing our data
33055ec8b7d1SJesse Brandeburg *
33065ec8b7d1SJesse Brandeburg * This task handles client interactions. Because client calls can be
33075ec8b7d1SJesse Brandeburg * reentrant, we can't handle them in the watchdog.
33085ec8b7d1SJesse Brandeburg **/
iavf_client_task(struct work_struct * work)33095ec8b7d1SJesse Brandeburg static void iavf_client_task(struct work_struct *work)
33105ec8b7d1SJesse Brandeburg {
33115ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter =
33125ec8b7d1SJesse Brandeburg container_of(work, struct iavf_adapter, client_task.work);
33135ec8b7d1SJesse Brandeburg
33145ec8b7d1SJesse Brandeburg /* If we can't get the client bit, just give up. We'll be rescheduled
33155ec8b7d1SJesse Brandeburg * later.
33165ec8b7d1SJesse Brandeburg */
33175ec8b7d1SJesse Brandeburg
33185ac49f3cSStefan Assmann if (!mutex_trylock(&adapter->client_lock))
33195ec8b7d1SJesse Brandeburg return;
33205ec8b7d1SJesse Brandeburg
33215ec8b7d1SJesse Brandeburg if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
33225ec8b7d1SJesse Brandeburg iavf_client_subtask(adapter);
33235ec8b7d1SJesse Brandeburg adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
33245ec8b7d1SJesse Brandeburg goto out;
33255ec8b7d1SJesse Brandeburg }
33265ec8b7d1SJesse Brandeburg if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
33275ec8b7d1SJesse Brandeburg iavf_notify_client_l2_params(&adapter->vsi);
33285ec8b7d1SJesse Brandeburg adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
33295ec8b7d1SJesse Brandeburg goto out;
33305ec8b7d1SJesse Brandeburg }
33315ec8b7d1SJesse Brandeburg if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
33325ec8b7d1SJesse Brandeburg iavf_notify_client_close(&adapter->vsi, false);
33335ec8b7d1SJesse Brandeburg adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
33345ec8b7d1SJesse Brandeburg goto out;
33355ec8b7d1SJesse Brandeburg }
33365ec8b7d1SJesse Brandeburg if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
33375ec8b7d1SJesse Brandeburg iavf_notify_client_open(&adapter->vsi);
33385ec8b7d1SJesse Brandeburg adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
33395ec8b7d1SJesse Brandeburg }
33405ec8b7d1SJesse Brandeburg out:
33415ac49f3cSStefan Assmann mutex_unlock(&adapter->client_lock);
33425ec8b7d1SJesse Brandeburg }
33435ec8b7d1SJesse Brandeburg
33445ec8b7d1SJesse Brandeburg /**
33455ec8b7d1SJesse Brandeburg * iavf_free_all_tx_resources - Free Tx Resources for All Queues
33465ec8b7d1SJesse Brandeburg * @adapter: board private structure
33475ec8b7d1SJesse Brandeburg *
33485ec8b7d1SJesse Brandeburg * Free all transmit software resources
33495ec8b7d1SJesse Brandeburg **/
iavf_free_all_tx_resources(struct iavf_adapter * adapter)33505ec8b7d1SJesse Brandeburg void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
33515ec8b7d1SJesse Brandeburg {
33525ec8b7d1SJesse Brandeburg int i;
33535ec8b7d1SJesse Brandeburg
33545ec8b7d1SJesse Brandeburg if (!adapter->tx_rings)
33555ec8b7d1SJesse Brandeburg return;
33565ec8b7d1SJesse Brandeburg
33575ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++)
33585ec8b7d1SJesse Brandeburg if (adapter->tx_rings[i].desc)
33595ec8b7d1SJesse Brandeburg iavf_free_tx_resources(&adapter->tx_rings[i]);
33605ec8b7d1SJesse Brandeburg }
33615ec8b7d1SJesse Brandeburg
33625ec8b7d1SJesse Brandeburg /**
33635ec8b7d1SJesse Brandeburg * iavf_setup_all_tx_resources - allocate all queues Tx resources
33645ec8b7d1SJesse Brandeburg * @adapter: board private structure
33655ec8b7d1SJesse Brandeburg *
33665ec8b7d1SJesse Brandeburg * If this function returns with an error, then it's possible one or
33675ec8b7d1SJesse Brandeburg * more of the rings is populated (while the rest are not). It is the
33685ec8b7d1SJesse Brandeburg * callers duty to clean those orphaned rings.
33695ec8b7d1SJesse Brandeburg *
33705ec8b7d1SJesse Brandeburg * Return 0 on success, negative on failure
33715ec8b7d1SJesse Brandeburg **/
iavf_setup_all_tx_resources(struct iavf_adapter * adapter)33725ec8b7d1SJesse Brandeburg static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
33735ec8b7d1SJesse Brandeburg {
33745ec8b7d1SJesse Brandeburg int i, err = 0;
33755ec8b7d1SJesse Brandeburg
33765ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++) {
33775ec8b7d1SJesse Brandeburg adapter->tx_rings[i].count = adapter->tx_desc_count;
33785ec8b7d1SJesse Brandeburg err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
33795ec8b7d1SJesse Brandeburg if (!err)
33805ec8b7d1SJesse Brandeburg continue;
33815ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
33825ec8b7d1SJesse Brandeburg "Allocation for Tx Queue %u failed\n", i);
33835ec8b7d1SJesse Brandeburg break;
33845ec8b7d1SJesse Brandeburg }
33855ec8b7d1SJesse Brandeburg
33865ec8b7d1SJesse Brandeburg return err;
33875ec8b7d1SJesse Brandeburg }
33885ec8b7d1SJesse Brandeburg
33895ec8b7d1SJesse Brandeburg /**
33905ec8b7d1SJesse Brandeburg * iavf_setup_all_rx_resources - allocate all queues Rx resources
33915ec8b7d1SJesse Brandeburg * @adapter: board private structure
33925ec8b7d1SJesse Brandeburg *
33935ec8b7d1SJesse Brandeburg * If this function returns with an error, then it's possible one or
33945ec8b7d1SJesse Brandeburg * more of the rings is populated (while the rest are not). It is the
33955ec8b7d1SJesse Brandeburg * callers duty to clean those orphaned rings.
33965ec8b7d1SJesse Brandeburg *
33975ec8b7d1SJesse Brandeburg * Return 0 on success, negative on failure
33985ec8b7d1SJesse Brandeburg **/
iavf_setup_all_rx_resources(struct iavf_adapter * adapter)33995ec8b7d1SJesse Brandeburg static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
34005ec8b7d1SJesse Brandeburg {
34015ec8b7d1SJesse Brandeburg int i, err = 0;
34025ec8b7d1SJesse Brandeburg
34035ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++) {
34045ec8b7d1SJesse Brandeburg adapter->rx_rings[i].count = adapter->rx_desc_count;
34055ec8b7d1SJesse Brandeburg err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
34065ec8b7d1SJesse Brandeburg if (!err)
34075ec8b7d1SJesse Brandeburg continue;
34085ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
34095ec8b7d1SJesse Brandeburg "Allocation for Rx Queue %u failed\n", i);
34105ec8b7d1SJesse Brandeburg break;
34115ec8b7d1SJesse Brandeburg }
34125ec8b7d1SJesse Brandeburg return err;
34135ec8b7d1SJesse Brandeburg }
34145ec8b7d1SJesse Brandeburg
34155ec8b7d1SJesse Brandeburg /**
34165ec8b7d1SJesse Brandeburg * iavf_free_all_rx_resources - Free Rx Resources for All Queues
34175ec8b7d1SJesse Brandeburg * @adapter: board private structure
34185ec8b7d1SJesse Brandeburg *
34195ec8b7d1SJesse Brandeburg * Free all receive software resources
34205ec8b7d1SJesse Brandeburg **/
iavf_free_all_rx_resources(struct iavf_adapter * adapter)34215ec8b7d1SJesse Brandeburg void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
34225ec8b7d1SJesse Brandeburg {
34235ec8b7d1SJesse Brandeburg int i;
34245ec8b7d1SJesse Brandeburg
34255ec8b7d1SJesse Brandeburg if (!adapter->rx_rings)
34265ec8b7d1SJesse Brandeburg return;
34275ec8b7d1SJesse Brandeburg
34285ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++)
34295ec8b7d1SJesse Brandeburg if (adapter->rx_rings[i].desc)
34305ec8b7d1SJesse Brandeburg iavf_free_rx_resources(&adapter->rx_rings[i]);
34315ec8b7d1SJesse Brandeburg }
34325ec8b7d1SJesse Brandeburg
34335ec8b7d1SJesse Brandeburg /**
34345ec8b7d1SJesse Brandeburg * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
34355ec8b7d1SJesse Brandeburg * @adapter: board private structure
34365ec8b7d1SJesse Brandeburg * @max_tx_rate: max Tx bw for a tc
34375ec8b7d1SJesse Brandeburg **/
iavf_validate_tx_bandwidth(struct iavf_adapter * adapter,u64 max_tx_rate)34385ec8b7d1SJesse Brandeburg static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
34395ec8b7d1SJesse Brandeburg u64 max_tx_rate)
34405ec8b7d1SJesse Brandeburg {
34415ec8b7d1SJesse Brandeburg int speed = 0, ret = 0;
34425ec8b7d1SJesse Brandeburg
3443e0ef26fbSBrett Creeley if (ADV_LINK_SUPPORT(adapter)) {
3444e0ef26fbSBrett Creeley if (adapter->link_speed_mbps < U32_MAX) {
3445e0ef26fbSBrett Creeley speed = adapter->link_speed_mbps;
3446e0ef26fbSBrett Creeley goto validate_bw;
3447e0ef26fbSBrett Creeley } else {
3448e0ef26fbSBrett Creeley dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3449e0ef26fbSBrett Creeley return -EINVAL;
3450e0ef26fbSBrett Creeley }
3451e0ef26fbSBrett Creeley }
3452e0ef26fbSBrett Creeley
34535ec8b7d1SJesse Brandeburg switch (adapter->link_speed) {
34545071bda2SAleksandr Loktionov case VIRTCHNL_LINK_SPEED_40GB:
345518c012d9SBrett Creeley speed = SPEED_40000;
34565ec8b7d1SJesse Brandeburg break;
34575071bda2SAleksandr Loktionov case VIRTCHNL_LINK_SPEED_25GB:
345818c012d9SBrett Creeley speed = SPEED_25000;
34595ec8b7d1SJesse Brandeburg break;
34605071bda2SAleksandr Loktionov case VIRTCHNL_LINK_SPEED_20GB:
346118c012d9SBrett Creeley speed = SPEED_20000;
34625ec8b7d1SJesse Brandeburg break;
34635071bda2SAleksandr Loktionov case VIRTCHNL_LINK_SPEED_10GB:
346418c012d9SBrett Creeley speed = SPEED_10000;
346518c012d9SBrett Creeley break;
346618c012d9SBrett Creeley case VIRTCHNL_LINK_SPEED_5GB:
346718c012d9SBrett Creeley speed = SPEED_5000;
346818c012d9SBrett Creeley break;
346918c012d9SBrett Creeley case VIRTCHNL_LINK_SPEED_2_5GB:
347018c012d9SBrett Creeley speed = SPEED_2500;
34715ec8b7d1SJesse Brandeburg break;
34725071bda2SAleksandr Loktionov case VIRTCHNL_LINK_SPEED_1GB:
347318c012d9SBrett Creeley speed = SPEED_1000;
34745ec8b7d1SJesse Brandeburg break;
34755071bda2SAleksandr Loktionov case VIRTCHNL_LINK_SPEED_100MB:
347618c012d9SBrett Creeley speed = SPEED_100;
34775ec8b7d1SJesse Brandeburg break;
34785ec8b7d1SJesse Brandeburg default:
34795ec8b7d1SJesse Brandeburg break;
34805ec8b7d1SJesse Brandeburg }
34815ec8b7d1SJesse Brandeburg
3482e0ef26fbSBrett Creeley validate_bw:
34835ec8b7d1SJesse Brandeburg if (max_tx_rate > speed) {
34845ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
34855ec8b7d1SJesse Brandeburg "Invalid tx rate specified\n");
34865ec8b7d1SJesse Brandeburg ret = -EINVAL;
34875ec8b7d1SJesse Brandeburg }
34885ec8b7d1SJesse Brandeburg
34895ec8b7d1SJesse Brandeburg return ret;
34905ec8b7d1SJesse Brandeburg }
34915ec8b7d1SJesse Brandeburg
34925ec8b7d1SJesse Brandeburg /**
3493262de08fSJesse Brandeburg * iavf_validate_ch_config - validate queue mapping info
34945ec8b7d1SJesse Brandeburg * @adapter: board private structure
34955ec8b7d1SJesse Brandeburg * @mqprio_qopt: queue parameters
34965ec8b7d1SJesse Brandeburg *
34975ec8b7d1SJesse Brandeburg * This function validates if the config provided by the user to
34985ec8b7d1SJesse Brandeburg * configure queue channels is valid or not. Returns 0 on a valid
34995ec8b7d1SJesse Brandeburg * config.
35005ec8b7d1SJesse Brandeburg **/
iavf_validate_ch_config(struct iavf_adapter * adapter,struct tc_mqprio_qopt_offload * mqprio_qopt)35015ec8b7d1SJesse Brandeburg static int iavf_validate_ch_config(struct iavf_adapter *adapter,
35025ec8b7d1SJesse Brandeburg struct tc_mqprio_qopt_offload *mqprio_qopt)
35035ec8b7d1SJesse Brandeburg {
35045ec8b7d1SJesse Brandeburg u64 total_max_rate = 0;
3505ec60d54cSPrzemyslaw Patynowski u32 tx_rate_rem = 0;
35065ec8b7d1SJesse Brandeburg int i, num_qps = 0;
35075ec8b7d1SJesse Brandeburg u64 tx_rate = 0;
35085ec8b7d1SJesse Brandeburg int ret = 0;
35095ec8b7d1SJesse Brandeburg
35105ec8b7d1SJesse Brandeburg if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
35115ec8b7d1SJesse Brandeburg mqprio_qopt->qopt.num_tc < 1)
35125ec8b7d1SJesse Brandeburg return -EINVAL;
35135ec8b7d1SJesse Brandeburg
35145ec8b7d1SJesse Brandeburg for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
35155ec8b7d1SJesse Brandeburg if (!mqprio_qopt->qopt.count[i] ||
35165ec8b7d1SJesse Brandeburg mqprio_qopt->qopt.offset[i] != num_qps)
35175ec8b7d1SJesse Brandeburg return -EINVAL;
35185ec8b7d1SJesse Brandeburg if (mqprio_qopt->min_rate[i]) {
35195ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
3520ec60d54cSPrzemyslaw Patynowski "Invalid min tx rate (greater than 0) specified for TC%d\n",
3521ec60d54cSPrzemyslaw Patynowski i);
35225ec8b7d1SJesse Brandeburg return -EINVAL;
35235ec8b7d1SJesse Brandeburg }
3524ec60d54cSPrzemyslaw Patynowski
35255ec8b7d1SJesse Brandeburg /* convert to Mbps */
35265ec8b7d1SJesse Brandeburg tx_rate = div_u64(mqprio_qopt->max_rate[i],
35275ec8b7d1SJesse Brandeburg IAVF_MBPS_DIVISOR);
3528ec60d54cSPrzemyslaw Patynowski
3529ec60d54cSPrzemyslaw Patynowski if (mqprio_qopt->max_rate[i] &&
3530ec60d54cSPrzemyslaw Patynowski tx_rate < IAVF_MBPS_QUANTA) {
3531ec60d54cSPrzemyslaw Patynowski dev_err(&adapter->pdev->dev,
3532ec60d54cSPrzemyslaw Patynowski "Invalid max tx rate for TC%d, minimum %dMbps\n",
3533ec60d54cSPrzemyslaw Patynowski i, IAVF_MBPS_QUANTA);
3534ec60d54cSPrzemyslaw Patynowski return -EINVAL;
3535ec60d54cSPrzemyslaw Patynowski }
3536ec60d54cSPrzemyslaw Patynowski
3537ec60d54cSPrzemyslaw Patynowski (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
3538ec60d54cSPrzemyslaw Patynowski
3539ec60d54cSPrzemyslaw Patynowski if (tx_rate_rem != 0) {
3540ec60d54cSPrzemyslaw Patynowski dev_err(&adapter->pdev->dev,
3541ec60d54cSPrzemyslaw Patynowski "Invalid max tx rate for TC%d, not divisible by %d\n",
3542ec60d54cSPrzemyslaw Patynowski i, IAVF_MBPS_QUANTA);
3543ec60d54cSPrzemyslaw Patynowski return -EINVAL;
3544ec60d54cSPrzemyslaw Patynowski }
3545ec60d54cSPrzemyslaw Patynowski
35465ec8b7d1SJesse Brandeburg total_max_rate += tx_rate;
35475ec8b7d1SJesse Brandeburg num_qps += mqprio_qopt->qopt.count[i];
35485ec8b7d1SJesse Brandeburg }
3549b712941cSKaren Sornek if (num_qps > adapter->num_active_queues) {
3550b712941cSKaren Sornek dev_err(&adapter->pdev->dev,
3551b712941cSKaren Sornek "Cannot support requested number of queues\n");
35525ec8b7d1SJesse Brandeburg return -EINVAL;
3553b712941cSKaren Sornek }
35545ec8b7d1SJesse Brandeburg
35555ec8b7d1SJesse Brandeburg ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
35565ec8b7d1SJesse Brandeburg return ret;
35575ec8b7d1SJesse Brandeburg }
35585ec8b7d1SJesse Brandeburg
35595ec8b7d1SJesse Brandeburg /**
3560b50f7bcaSJesse Brandeburg * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3561b50f7bcaSJesse Brandeburg * @adapter: board private structure
35625ec8b7d1SJesse Brandeburg **/
iavf_del_all_cloud_filters(struct iavf_adapter * adapter)35635ec8b7d1SJesse Brandeburg static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
35645ec8b7d1SJesse Brandeburg {
35655ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *cf, *cftmp;
35665ec8b7d1SJesse Brandeburg
35675ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->cloud_filter_list_lock);
35685ec8b7d1SJesse Brandeburg list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
35695ec8b7d1SJesse Brandeburg list) {
35705ec8b7d1SJesse Brandeburg list_del(&cf->list);
35715ec8b7d1SJesse Brandeburg kfree(cf);
35725ec8b7d1SJesse Brandeburg adapter->num_cloud_filters--;
35735ec8b7d1SJesse Brandeburg }
35745ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->cloud_filter_list_lock);
35755ec8b7d1SJesse Brandeburg }
35765ec8b7d1SJesse Brandeburg
35775ec8b7d1SJesse Brandeburg /**
35782f7cc2dfSSudheer Mogilappagari * iavf_is_tc_config_same - Compare the mqprio TC config with the
35792f7cc2dfSSudheer Mogilappagari * TC config already configured on this adapter.
35802f7cc2dfSSudheer Mogilappagari * @adapter: board private structure
35812f7cc2dfSSudheer Mogilappagari * @mqprio_qopt: TC config received from kernel.
35822f7cc2dfSSudheer Mogilappagari *
35832f7cc2dfSSudheer Mogilappagari * This function compares the TC config received from the kernel
35842f7cc2dfSSudheer Mogilappagari * with the config already configured on the adapter.
35852f7cc2dfSSudheer Mogilappagari *
35862f7cc2dfSSudheer Mogilappagari * Return: True if configuration is same, false otherwise.
35872f7cc2dfSSudheer Mogilappagari **/
iavf_is_tc_config_same(struct iavf_adapter * adapter,struct tc_mqprio_qopt * mqprio_qopt)35882f7cc2dfSSudheer Mogilappagari static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
35892f7cc2dfSSudheer Mogilappagari struct tc_mqprio_qopt *mqprio_qopt)
35902f7cc2dfSSudheer Mogilappagari {
35912f7cc2dfSSudheer Mogilappagari struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
35922f7cc2dfSSudheer Mogilappagari int i;
35932f7cc2dfSSudheer Mogilappagari
35942f7cc2dfSSudheer Mogilappagari if (adapter->num_tc != mqprio_qopt->num_tc)
35952f7cc2dfSSudheer Mogilappagari return false;
35962f7cc2dfSSudheer Mogilappagari
35972f7cc2dfSSudheer Mogilappagari for (i = 0; i < adapter->num_tc; i++) {
35982f7cc2dfSSudheer Mogilappagari if (ch[i].count != mqprio_qopt->count[i] ||
35992f7cc2dfSSudheer Mogilappagari ch[i].offset != mqprio_qopt->offset[i])
36002f7cc2dfSSudheer Mogilappagari return false;
36012f7cc2dfSSudheer Mogilappagari }
36022f7cc2dfSSudheer Mogilappagari return true;
36032f7cc2dfSSudheer Mogilappagari }
36042f7cc2dfSSudheer Mogilappagari
36052f7cc2dfSSudheer Mogilappagari /**
36065ec8b7d1SJesse Brandeburg * __iavf_setup_tc - configure multiple traffic classes
36075ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
3608b50f7bcaSJesse Brandeburg * @type_data: tc offload data
36095ec8b7d1SJesse Brandeburg *
36105ec8b7d1SJesse Brandeburg * This function processes the config information provided by the
36115ec8b7d1SJesse Brandeburg * user to configure traffic classes/queue channels and packages the
36125ec8b7d1SJesse Brandeburg * information to request the PF to setup traffic classes.
36135ec8b7d1SJesse Brandeburg *
36145ec8b7d1SJesse Brandeburg * Returns 0 on success.
36155ec8b7d1SJesse Brandeburg **/
__iavf_setup_tc(struct net_device * netdev,void * type_data)36165ec8b7d1SJesse Brandeburg static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
36175ec8b7d1SJesse Brandeburg {
36185ec8b7d1SJesse Brandeburg struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
36195ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
36205ec8b7d1SJesse Brandeburg struct virtchnl_vf_resource *vfres = adapter->vf_res;
36215ec8b7d1SJesse Brandeburg u8 num_tc = 0, total_qps = 0;
36225ec8b7d1SJesse Brandeburg int ret = 0, netdev_tc = 0;
36235ec8b7d1SJesse Brandeburg u64 max_tx_rate;
36245ec8b7d1SJesse Brandeburg u16 mode;
36255ec8b7d1SJesse Brandeburg int i;
36265ec8b7d1SJesse Brandeburg
36275ec8b7d1SJesse Brandeburg num_tc = mqprio_qopt->qopt.num_tc;
36285ec8b7d1SJesse Brandeburg mode = mqprio_qopt->mode;
36295ec8b7d1SJesse Brandeburg
36305ec8b7d1SJesse Brandeburg /* delete queue_channel */
36315ec8b7d1SJesse Brandeburg if (!mqprio_qopt->qopt.hw) {
36325ec8b7d1SJesse Brandeburg if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
36335ec8b7d1SJesse Brandeburg /* reset the tc configuration */
36345ec8b7d1SJesse Brandeburg netdev_reset_tc(netdev);
36355ec8b7d1SJesse Brandeburg adapter->num_tc = 0;
36365ec8b7d1SJesse Brandeburg netif_tx_stop_all_queues(netdev);
36375ec8b7d1SJesse Brandeburg netif_tx_disable(netdev);
36385ec8b7d1SJesse Brandeburg iavf_del_all_cloud_filters(adapter);
36395ec8b7d1SJesse Brandeburg adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
364093cb804eSPrzemyslaw Patynowski total_qps = adapter->orig_num_active_queues;
36415ec8b7d1SJesse Brandeburg goto exit;
36425ec8b7d1SJesse Brandeburg } else {
36435ec8b7d1SJesse Brandeburg return -EINVAL;
36445ec8b7d1SJesse Brandeburg }
36455ec8b7d1SJesse Brandeburg }
36465ec8b7d1SJesse Brandeburg
36475ec8b7d1SJesse Brandeburg /* add queue channel */
36485ec8b7d1SJesse Brandeburg if (mode == TC_MQPRIO_MODE_CHANNEL) {
36495ec8b7d1SJesse Brandeburg if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
36505ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "ADq not supported\n");
36515ec8b7d1SJesse Brandeburg return -EOPNOTSUPP;
36525ec8b7d1SJesse Brandeburg }
36535ec8b7d1SJesse Brandeburg if (adapter->ch_config.state != __IAVF_TC_INVALID) {
36545ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
36555ec8b7d1SJesse Brandeburg return -EINVAL;
36565ec8b7d1SJesse Brandeburg }
36575ec8b7d1SJesse Brandeburg
36585ec8b7d1SJesse Brandeburg ret = iavf_validate_ch_config(adapter, mqprio_qopt);
36595ec8b7d1SJesse Brandeburg if (ret)
36605ec8b7d1SJesse Brandeburg return ret;
36615ec8b7d1SJesse Brandeburg /* Return if same TC config is requested */
36622f7cc2dfSSudheer Mogilappagari if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
36635ec8b7d1SJesse Brandeburg return 0;
36645ec8b7d1SJesse Brandeburg adapter->num_tc = num_tc;
36655ec8b7d1SJesse Brandeburg
36665ec8b7d1SJesse Brandeburg for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
36675ec8b7d1SJesse Brandeburg if (i < num_tc) {
36685ec8b7d1SJesse Brandeburg adapter->ch_config.ch_info[i].count =
36695ec8b7d1SJesse Brandeburg mqprio_qopt->qopt.count[i];
36705ec8b7d1SJesse Brandeburg adapter->ch_config.ch_info[i].offset =
36715ec8b7d1SJesse Brandeburg mqprio_qopt->qopt.offset[i];
36725ec8b7d1SJesse Brandeburg total_qps += mqprio_qopt->qopt.count[i];
36735ec8b7d1SJesse Brandeburg max_tx_rate = mqprio_qopt->max_rate[i];
36745ec8b7d1SJesse Brandeburg /* convert to Mbps */
36755ec8b7d1SJesse Brandeburg max_tx_rate = div_u64(max_tx_rate,
36765ec8b7d1SJesse Brandeburg IAVF_MBPS_DIVISOR);
36775ec8b7d1SJesse Brandeburg adapter->ch_config.ch_info[i].max_tx_rate =
36785ec8b7d1SJesse Brandeburg max_tx_rate;
36795ec8b7d1SJesse Brandeburg } else {
36805ec8b7d1SJesse Brandeburg adapter->ch_config.ch_info[i].count = 1;
36815ec8b7d1SJesse Brandeburg adapter->ch_config.ch_info[i].offset = 0;
36825ec8b7d1SJesse Brandeburg }
36835ec8b7d1SJesse Brandeburg }
368493cb804eSPrzemyslaw Patynowski
368593cb804eSPrzemyslaw Patynowski /* Take snapshot of original config such as "num_active_queues"
368693cb804eSPrzemyslaw Patynowski * It is used later when delete ADQ flow is exercised, so that
368793cb804eSPrzemyslaw Patynowski * once delete ADQ flow completes, VF shall go back to its
368893cb804eSPrzemyslaw Patynowski * original queue configuration
368993cb804eSPrzemyslaw Patynowski */
369093cb804eSPrzemyslaw Patynowski
369193cb804eSPrzemyslaw Patynowski adapter->orig_num_active_queues = adapter->num_active_queues;
369293cb804eSPrzemyslaw Patynowski
369393cb804eSPrzemyslaw Patynowski /* Store queue info based on TC so that VF gets configured
369493cb804eSPrzemyslaw Patynowski * with correct number of queues when VF completes ADQ config
369593cb804eSPrzemyslaw Patynowski * flow
369693cb804eSPrzemyslaw Patynowski */
36975ec8b7d1SJesse Brandeburg adapter->ch_config.total_qps = total_qps;
369893cb804eSPrzemyslaw Patynowski
36995ec8b7d1SJesse Brandeburg netif_tx_stop_all_queues(netdev);
37005ec8b7d1SJesse Brandeburg netif_tx_disable(netdev);
37015ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
37025ec8b7d1SJesse Brandeburg netdev_reset_tc(netdev);
37035ec8b7d1SJesse Brandeburg /* Report the tc mapping up the stack */
37045ec8b7d1SJesse Brandeburg netdev_set_num_tc(adapter->netdev, num_tc);
37055ec8b7d1SJesse Brandeburg for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
37065ec8b7d1SJesse Brandeburg u16 qcount = mqprio_qopt->qopt.count[i];
37075ec8b7d1SJesse Brandeburg u16 qoffset = mqprio_qopt->qopt.offset[i];
37085ec8b7d1SJesse Brandeburg
37095ec8b7d1SJesse Brandeburg if (i < num_tc)
37105ec8b7d1SJesse Brandeburg netdev_set_tc_queue(netdev, netdev_tc++, qcount,
37115ec8b7d1SJesse Brandeburg qoffset);
37125ec8b7d1SJesse Brandeburg }
37135ec8b7d1SJesse Brandeburg }
37145ec8b7d1SJesse Brandeburg exit:
371593cb804eSPrzemyslaw Patynowski if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
371693cb804eSPrzemyslaw Patynowski return 0;
371793cb804eSPrzemyslaw Patynowski
371893cb804eSPrzemyslaw Patynowski netif_set_real_num_rx_queues(netdev, total_qps);
371993cb804eSPrzemyslaw Patynowski netif_set_real_num_tx_queues(netdev, total_qps);
372093cb804eSPrzemyslaw Patynowski
37215ec8b7d1SJesse Brandeburg return ret;
37225ec8b7d1SJesse Brandeburg }
37235ec8b7d1SJesse Brandeburg
37245ec8b7d1SJesse Brandeburg /**
37255ec8b7d1SJesse Brandeburg * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
37265ec8b7d1SJesse Brandeburg * @adapter: board private structure
3727b50f7bcaSJesse Brandeburg * @f: pointer to struct flow_cls_offload
37285ec8b7d1SJesse Brandeburg * @filter: pointer to cloud filter structure
37295ec8b7d1SJesse Brandeburg */
iavf_parse_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * f,struct iavf_cloud_filter * filter)37305ec8b7d1SJesse Brandeburg static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3731f9e30088SPablo Neira Ayuso struct flow_cls_offload *f,
37325ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *filter)
37335ec8b7d1SJesse Brandeburg {
3734f9e30088SPablo Neira Ayuso struct flow_rule *rule = flow_cls_offload_flow_rule(f);
37358f256622SPablo Neira Ayuso struct flow_dissector *dissector = rule->match.dissector;
37365ec8b7d1SJesse Brandeburg u16 n_proto_mask = 0;
37375ec8b7d1SJesse Brandeburg u16 n_proto_key = 0;
37385ec8b7d1SJesse Brandeburg u8 field_flags = 0;
37395ec8b7d1SJesse Brandeburg u16 addr_type = 0;
37405ec8b7d1SJesse Brandeburg u16 n_proto = 0;
37415ec8b7d1SJesse Brandeburg int i = 0;
37425ec8b7d1SJesse Brandeburg struct virtchnl_filter *vf = &filter->f;
37435ec8b7d1SJesse Brandeburg
37448f256622SPablo Neira Ayuso if (dissector->used_keys &
37452b3082c6SRatheesh Kannoth ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
37462b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
37472b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
37482b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
37492b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
37502b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
37512b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
37522b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
37532b3082c6SRatheesh Kannoth dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%llx\n",
37548f256622SPablo Neira Ayuso dissector->used_keys);
37555ec8b7d1SJesse Brandeburg return -EOPNOTSUPP;
37565ec8b7d1SJesse Brandeburg }
37575ec8b7d1SJesse Brandeburg
37588f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
37598f256622SPablo Neira Ayuso struct flow_match_enc_keyid match;
37605ec8b7d1SJesse Brandeburg
37618f256622SPablo Neira Ayuso flow_rule_match_enc_keyid(rule, &match);
37628f256622SPablo Neira Ayuso if (match.mask->keyid != 0)
37635ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
37645ec8b7d1SJesse Brandeburg }
37655ec8b7d1SJesse Brandeburg
37668f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
37678f256622SPablo Neira Ayuso struct flow_match_basic match;
37685ec8b7d1SJesse Brandeburg
37698f256622SPablo Neira Ayuso flow_rule_match_basic(rule, &match);
37708f256622SPablo Neira Ayuso n_proto_key = ntohs(match.key->n_proto);
37718f256622SPablo Neira Ayuso n_proto_mask = ntohs(match.mask->n_proto);
37725ec8b7d1SJesse Brandeburg
37735ec8b7d1SJesse Brandeburg if (n_proto_key == ETH_P_ALL) {
37745ec8b7d1SJesse Brandeburg n_proto_key = 0;
37755ec8b7d1SJesse Brandeburg n_proto_mask = 0;
37765ec8b7d1SJesse Brandeburg }
37775ec8b7d1SJesse Brandeburg n_proto = n_proto_key & n_proto_mask;
37785ec8b7d1SJesse Brandeburg if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
37795ec8b7d1SJesse Brandeburg return -EINVAL;
37805ec8b7d1SJesse Brandeburg if (n_proto == ETH_P_IPV6) {
37815ec8b7d1SJesse Brandeburg /* specify flow type as TCP IPv6 */
37825ec8b7d1SJesse Brandeburg vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
37835ec8b7d1SJesse Brandeburg }
37845ec8b7d1SJesse Brandeburg
37858f256622SPablo Neira Ayuso if (match.key->ip_proto != IPPROTO_TCP) {
37865ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
37875ec8b7d1SJesse Brandeburg return -EINVAL;
37885ec8b7d1SJesse Brandeburg }
37895ec8b7d1SJesse Brandeburg }
37905ec8b7d1SJesse Brandeburg
37918f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
37928f256622SPablo Neira Ayuso struct flow_match_eth_addrs match;
37935ec8b7d1SJesse Brandeburg
37948f256622SPablo Neira Ayuso flow_rule_match_eth_addrs(rule, &match);
37958f256622SPablo Neira Ayuso
37965ec8b7d1SJesse Brandeburg /* use is_broadcast and is_zero to check for all 0xf or 0 */
37978f256622SPablo Neira Ayuso if (!is_zero_ether_addr(match.mask->dst)) {
37988f256622SPablo Neira Ayuso if (is_broadcast_ether_addr(match.mask->dst)) {
37995ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_OMAC;
38005ec8b7d1SJesse Brandeburg } else {
38015ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
38028f256622SPablo Neira Ayuso match.mask->dst);
38039f4651eaSJacob Keller return -EINVAL;
38045ec8b7d1SJesse Brandeburg }
38055ec8b7d1SJesse Brandeburg }
38065ec8b7d1SJesse Brandeburg
38078f256622SPablo Neira Ayuso if (!is_zero_ether_addr(match.mask->src)) {
38088f256622SPablo Neira Ayuso if (is_broadcast_ether_addr(match.mask->src)) {
38095ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IMAC;
38105ec8b7d1SJesse Brandeburg } else {
38115ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
38128f256622SPablo Neira Ayuso match.mask->src);
38139f4651eaSJacob Keller return -EINVAL;
38145ec8b7d1SJesse Brandeburg }
38155ec8b7d1SJesse Brandeburg }
38165ec8b7d1SJesse Brandeburg
38178f256622SPablo Neira Ayuso if (!is_zero_ether_addr(match.key->dst))
38188f256622SPablo Neira Ayuso if (is_valid_ether_addr(match.key->dst) ||
38198f256622SPablo Neira Ayuso is_multicast_ether_addr(match.key->dst)) {
38205ec8b7d1SJesse Brandeburg /* set the mask if a valid dst_mac address */
38215ec8b7d1SJesse Brandeburg for (i = 0; i < ETH_ALEN; i++)
38225ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.dst_mac[i] |= 0xff;
38235ec8b7d1SJesse Brandeburg ether_addr_copy(vf->data.tcp_spec.dst_mac,
38248f256622SPablo Neira Ayuso match.key->dst);
38255ec8b7d1SJesse Brandeburg }
38265ec8b7d1SJesse Brandeburg
38278f256622SPablo Neira Ayuso if (!is_zero_ether_addr(match.key->src))
38288f256622SPablo Neira Ayuso if (is_valid_ether_addr(match.key->src) ||
38298f256622SPablo Neira Ayuso is_multicast_ether_addr(match.key->src)) {
38305ec8b7d1SJesse Brandeburg /* set the mask if a valid dst_mac address */
38315ec8b7d1SJesse Brandeburg for (i = 0; i < ETH_ALEN; i++)
38325ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.src_mac[i] |= 0xff;
38335ec8b7d1SJesse Brandeburg ether_addr_copy(vf->data.tcp_spec.src_mac,
38348f256622SPablo Neira Ayuso match.key->src);
38355ec8b7d1SJesse Brandeburg }
38365ec8b7d1SJesse Brandeburg }
38375ec8b7d1SJesse Brandeburg
38388f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
38398f256622SPablo Neira Ayuso struct flow_match_vlan match;
38405ec8b7d1SJesse Brandeburg
38418f256622SPablo Neira Ayuso flow_rule_match_vlan(rule, &match);
38428f256622SPablo Neira Ayuso if (match.mask->vlan_id) {
38438f256622SPablo Neira Ayuso if (match.mask->vlan_id == VLAN_VID_MASK) {
38445ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IVLAN;
38455ec8b7d1SJesse Brandeburg } else {
38465ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
38478f256622SPablo Neira Ayuso match.mask->vlan_id);
38489f4651eaSJacob Keller return -EINVAL;
38495ec8b7d1SJesse Brandeburg }
38505ec8b7d1SJesse Brandeburg }
38515ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
38528f256622SPablo Neira Ayuso vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
38535ec8b7d1SJesse Brandeburg }
38545ec8b7d1SJesse Brandeburg
38558f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
38568f256622SPablo Neira Ayuso struct flow_match_control match;
38575ec8b7d1SJesse Brandeburg
38588f256622SPablo Neira Ayuso flow_rule_match_control(rule, &match);
38598f256622SPablo Neira Ayuso addr_type = match.key->addr_type;
38605ec8b7d1SJesse Brandeburg }
38615ec8b7d1SJesse Brandeburg
38625ec8b7d1SJesse Brandeburg if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
38638f256622SPablo Neira Ayuso struct flow_match_ipv4_addrs match;
38645ec8b7d1SJesse Brandeburg
38658f256622SPablo Neira Ayuso flow_rule_match_ipv4_addrs(rule, &match);
38668f256622SPablo Neira Ayuso if (match.mask->dst) {
38678f256622SPablo Neira Ayuso if (match.mask->dst == cpu_to_be32(0xffffffff)) {
38685ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IIP;
38695ec8b7d1SJesse Brandeburg } else {
38705ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
38718f256622SPablo Neira Ayuso be32_to_cpu(match.mask->dst));
38729f4651eaSJacob Keller return -EINVAL;
38735ec8b7d1SJesse Brandeburg }
38745ec8b7d1SJesse Brandeburg }
38755ec8b7d1SJesse Brandeburg
38768f256622SPablo Neira Ayuso if (match.mask->src) {
38778f256622SPablo Neira Ayuso if (match.mask->src == cpu_to_be32(0xffffffff)) {
38785ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IIP;
38795ec8b7d1SJesse Brandeburg } else {
38805ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
38816650c8e9SDaniil Tatianin be32_to_cpu(match.mask->src));
38829f4651eaSJacob Keller return -EINVAL;
38835ec8b7d1SJesse Brandeburg }
38845ec8b7d1SJesse Brandeburg }
38855ec8b7d1SJesse Brandeburg
38865ec8b7d1SJesse Brandeburg if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
38875ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
38889f4651eaSJacob Keller return -EINVAL;
38895ec8b7d1SJesse Brandeburg }
38908f256622SPablo Neira Ayuso if (match.key->dst) {
38915ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
38928f256622SPablo Neira Ayuso vf->data.tcp_spec.dst_ip[0] = match.key->dst;
38935ec8b7d1SJesse Brandeburg }
38948f256622SPablo Neira Ayuso if (match.key->src) {
38955ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
38968f256622SPablo Neira Ayuso vf->data.tcp_spec.src_ip[0] = match.key->src;
38975ec8b7d1SJesse Brandeburg }
38985ec8b7d1SJesse Brandeburg }
38995ec8b7d1SJesse Brandeburg
39005ec8b7d1SJesse Brandeburg if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
39018f256622SPablo Neira Ayuso struct flow_match_ipv6_addrs match;
39028f256622SPablo Neira Ayuso
39038f256622SPablo Neira Ayuso flow_rule_match_ipv6_addrs(rule, &match);
39045ec8b7d1SJesse Brandeburg
39055ec8b7d1SJesse Brandeburg /* validate mask, make sure it is not IPV6_ADDR_ANY */
39068f256622SPablo Neira Ayuso if (ipv6_addr_any(&match.mask->dst)) {
39075ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
39085ec8b7d1SJesse Brandeburg IPV6_ADDR_ANY);
39099f4651eaSJacob Keller return -EINVAL;
39105ec8b7d1SJesse Brandeburg }
39115ec8b7d1SJesse Brandeburg
39125ec8b7d1SJesse Brandeburg /* src and dest IPv6 address should not be LOOPBACK
39135ec8b7d1SJesse Brandeburg * (0:0:0:0:0:0:0:1) which can be represented as ::1
39145ec8b7d1SJesse Brandeburg */
39158f256622SPablo Neira Ayuso if (ipv6_addr_loopback(&match.key->dst) ||
39168f256622SPablo Neira Ayuso ipv6_addr_loopback(&match.key->src)) {
39175ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
39185ec8b7d1SJesse Brandeburg "ipv6 addr should not be loopback\n");
39199f4651eaSJacob Keller return -EINVAL;
39205ec8b7d1SJesse Brandeburg }
39218f256622SPablo Neira Ayuso if (!ipv6_addr_any(&match.mask->dst) ||
39228f256622SPablo Neira Ayuso !ipv6_addr_any(&match.mask->src))
39235ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IIP;
39245ec8b7d1SJesse Brandeburg
39255ec8b7d1SJesse Brandeburg for (i = 0; i < 4; i++)
39265ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
39278f256622SPablo Neira Ayuso memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
39285ec8b7d1SJesse Brandeburg sizeof(vf->data.tcp_spec.dst_ip));
39295ec8b7d1SJesse Brandeburg for (i = 0; i < 4; i++)
39305ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
39318f256622SPablo Neira Ayuso memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
39325ec8b7d1SJesse Brandeburg sizeof(vf->data.tcp_spec.src_ip));
39335ec8b7d1SJesse Brandeburg }
39348f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
39358f256622SPablo Neira Ayuso struct flow_match_ports match;
39365ec8b7d1SJesse Brandeburg
39378f256622SPablo Neira Ayuso flow_rule_match_ports(rule, &match);
39388f256622SPablo Neira Ayuso if (match.mask->src) {
39398f256622SPablo Neira Ayuso if (match.mask->src == cpu_to_be16(0xffff)) {
39405ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IIP;
39415ec8b7d1SJesse Brandeburg } else {
39425ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
39438f256622SPablo Neira Ayuso be16_to_cpu(match.mask->src));
39449f4651eaSJacob Keller return -EINVAL;
39455ec8b7d1SJesse Brandeburg }
39465ec8b7d1SJesse Brandeburg }
39475ec8b7d1SJesse Brandeburg
39488f256622SPablo Neira Ayuso if (match.mask->dst) {
39498f256622SPablo Neira Ayuso if (match.mask->dst == cpu_to_be16(0xffff)) {
39505ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IIP;
39515ec8b7d1SJesse Brandeburg } else {
39525ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
39538f256622SPablo Neira Ayuso be16_to_cpu(match.mask->dst));
39549f4651eaSJacob Keller return -EINVAL;
39555ec8b7d1SJesse Brandeburg }
39565ec8b7d1SJesse Brandeburg }
39578f256622SPablo Neira Ayuso if (match.key->dst) {
39585ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
39598f256622SPablo Neira Ayuso vf->data.tcp_spec.dst_port = match.key->dst;
39605ec8b7d1SJesse Brandeburg }
39615ec8b7d1SJesse Brandeburg
39628f256622SPablo Neira Ayuso if (match.key->src) {
39635ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
39648f256622SPablo Neira Ayuso vf->data.tcp_spec.src_port = match.key->src;
39655ec8b7d1SJesse Brandeburg }
39665ec8b7d1SJesse Brandeburg }
39675ec8b7d1SJesse Brandeburg vf->field_flags = field_flags;
39685ec8b7d1SJesse Brandeburg
39695ec8b7d1SJesse Brandeburg return 0;
39705ec8b7d1SJesse Brandeburg }
39715ec8b7d1SJesse Brandeburg
39725ec8b7d1SJesse Brandeburg /**
39735ec8b7d1SJesse Brandeburg * iavf_handle_tclass - Forward to a traffic class on the device
39745ec8b7d1SJesse Brandeburg * @adapter: board private structure
39755ec8b7d1SJesse Brandeburg * @tc: traffic class index on the device
39765ec8b7d1SJesse Brandeburg * @filter: pointer to cloud filter structure
39775ec8b7d1SJesse Brandeburg */
iavf_handle_tclass(struct iavf_adapter * adapter,u32 tc,struct iavf_cloud_filter * filter)39785ec8b7d1SJesse Brandeburg static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
39795ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *filter)
39805ec8b7d1SJesse Brandeburg {
39815ec8b7d1SJesse Brandeburg if (tc == 0)
39825ec8b7d1SJesse Brandeburg return 0;
39835ec8b7d1SJesse Brandeburg if (tc < adapter->num_tc) {
39845ec8b7d1SJesse Brandeburg if (!filter->f.data.tcp_spec.dst_port) {
39855ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
39865ec8b7d1SJesse Brandeburg "Specify destination port to redirect to traffic class other than TC0\n");
39875ec8b7d1SJesse Brandeburg return -EINVAL;
39885ec8b7d1SJesse Brandeburg }
39895ec8b7d1SJesse Brandeburg }
39905ec8b7d1SJesse Brandeburg /* redirect to a traffic class on the same device */
39915ec8b7d1SJesse Brandeburg filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
39925ec8b7d1SJesse Brandeburg filter->f.action_meta = tc;
39935ec8b7d1SJesse Brandeburg return 0;
39945ec8b7d1SJesse Brandeburg }
39955ec8b7d1SJesse Brandeburg
39965ec8b7d1SJesse Brandeburg /**
399740e589baSAvinash Dayanand * iavf_find_cf - Find the cloud filter in the list
399840e589baSAvinash Dayanand * @adapter: Board private structure
399940e589baSAvinash Dayanand * @cookie: filter specific cookie
400040e589baSAvinash Dayanand *
400140e589baSAvinash Dayanand * Returns ptr to the filter object or NULL. Must be called while holding the
400240e589baSAvinash Dayanand * cloud_filter_list_lock.
400340e589baSAvinash Dayanand */
iavf_find_cf(struct iavf_adapter * adapter,unsigned long * cookie)400440e589baSAvinash Dayanand static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
400540e589baSAvinash Dayanand unsigned long *cookie)
400640e589baSAvinash Dayanand {
400740e589baSAvinash Dayanand struct iavf_cloud_filter *filter = NULL;
400840e589baSAvinash Dayanand
400940e589baSAvinash Dayanand if (!cookie)
401040e589baSAvinash Dayanand return NULL;
401140e589baSAvinash Dayanand
401240e589baSAvinash Dayanand list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
401340e589baSAvinash Dayanand if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
401440e589baSAvinash Dayanand return filter;
401540e589baSAvinash Dayanand }
401640e589baSAvinash Dayanand return NULL;
401740e589baSAvinash Dayanand }
401840e589baSAvinash Dayanand
401940e589baSAvinash Dayanand /**
40205ec8b7d1SJesse Brandeburg * iavf_configure_clsflower - Add tc flower filters
40215ec8b7d1SJesse Brandeburg * @adapter: board private structure
4022f9e30088SPablo Neira Ayuso * @cls_flower: Pointer to struct flow_cls_offload
40235ec8b7d1SJesse Brandeburg */
iavf_configure_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)40245ec8b7d1SJesse Brandeburg static int iavf_configure_clsflower(struct iavf_adapter *adapter,
4025f9e30088SPablo Neira Ayuso struct flow_cls_offload *cls_flower)
40265ec8b7d1SJesse Brandeburg {
40275ec8b7d1SJesse Brandeburg int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
40285ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *filter = NULL;
40295ec8b7d1SJesse Brandeburg int err = -EINVAL, count = 50;
40305ec8b7d1SJesse Brandeburg
40315ec8b7d1SJesse Brandeburg if (tc < 0) {
40325ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
40335ec8b7d1SJesse Brandeburg return -EINVAL;
40345ec8b7d1SJesse Brandeburg }
40355ec8b7d1SJesse Brandeburg
40365ec8b7d1SJesse Brandeburg filter = kzalloc(sizeof(*filter), GFP_KERNEL);
40375ec8b7d1SJesse Brandeburg if (!filter)
40385ec8b7d1SJesse Brandeburg return -ENOMEM;
40395ec8b7d1SJesse Brandeburg
40405ac49f3cSStefan Assmann while (!mutex_trylock(&adapter->crit_lock)) {
40412135a8d5SNicholas Nunley if (--count == 0) {
40422135a8d5SNicholas Nunley kfree(filter);
40432135a8d5SNicholas Nunley return err;
40442135a8d5SNicholas Nunley }
40455ec8b7d1SJesse Brandeburg udelay(1);
40465ec8b7d1SJesse Brandeburg }
40475ec8b7d1SJesse Brandeburg
40485ec8b7d1SJesse Brandeburg filter->cookie = cls_flower->cookie;
40495ec8b7d1SJesse Brandeburg
405040e589baSAvinash Dayanand /* bail out here if filter already exists */
405140e589baSAvinash Dayanand spin_lock_bh(&adapter->cloud_filter_list_lock);
405240e589baSAvinash Dayanand if (iavf_find_cf(adapter, &cls_flower->cookie)) {
405340e589baSAvinash Dayanand dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
405440e589baSAvinash Dayanand err = -EEXIST;
405540e589baSAvinash Dayanand goto spin_unlock;
405640e589baSAvinash Dayanand }
405740e589baSAvinash Dayanand spin_unlock_bh(&adapter->cloud_filter_list_lock);
405840e589baSAvinash Dayanand
40595ec8b7d1SJesse Brandeburg /* set the mask to all zeroes to begin with */
40605ec8b7d1SJesse Brandeburg memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
40615ec8b7d1SJesse Brandeburg /* start out with flow type and eth type IPv4 to begin with */
40625ec8b7d1SJesse Brandeburg filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
40635ec8b7d1SJesse Brandeburg err = iavf_parse_cls_flower(adapter, cls_flower, filter);
40644f040080SJacob Keller if (err)
40655ec8b7d1SJesse Brandeburg goto err;
40665ec8b7d1SJesse Brandeburg
40675ec8b7d1SJesse Brandeburg err = iavf_handle_tclass(adapter, tc, filter);
40684f040080SJacob Keller if (err)
40695ec8b7d1SJesse Brandeburg goto err;
40705ec8b7d1SJesse Brandeburg
40715ec8b7d1SJesse Brandeburg /* add filter to the list */
40725ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->cloud_filter_list_lock);
40735ec8b7d1SJesse Brandeburg list_add_tail(&filter->list, &adapter->cloud_filter_list);
40745ec8b7d1SJesse Brandeburg adapter->num_cloud_filters++;
40755ec8b7d1SJesse Brandeburg filter->add = true;
40765ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
407740e589baSAvinash Dayanand spin_unlock:
40785ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->cloud_filter_list_lock);
40795ec8b7d1SJesse Brandeburg err:
40805ec8b7d1SJesse Brandeburg if (err)
40815ec8b7d1SJesse Brandeburg kfree(filter);
40825ec8b7d1SJesse Brandeburg
40835ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
40845ec8b7d1SJesse Brandeburg return err;
40855ec8b7d1SJesse Brandeburg }
40865ec8b7d1SJesse Brandeburg
40875ec8b7d1SJesse Brandeburg /**
40885ec8b7d1SJesse Brandeburg * iavf_delete_clsflower - Remove tc flower filters
40895ec8b7d1SJesse Brandeburg * @adapter: board private structure
4090f9e30088SPablo Neira Ayuso * @cls_flower: Pointer to struct flow_cls_offload
40915ec8b7d1SJesse Brandeburg */
iavf_delete_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)40925ec8b7d1SJesse Brandeburg static int iavf_delete_clsflower(struct iavf_adapter *adapter,
4093f9e30088SPablo Neira Ayuso struct flow_cls_offload *cls_flower)
40945ec8b7d1SJesse Brandeburg {
40955ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *filter = NULL;
40965ec8b7d1SJesse Brandeburg int err = 0;
40975ec8b7d1SJesse Brandeburg
40985ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->cloud_filter_list_lock);
40995ec8b7d1SJesse Brandeburg filter = iavf_find_cf(adapter, &cls_flower->cookie);
41005ec8b7d1SJesse Brandeburg if (filter) {
41015ec8b7d1SJesse Brandeburg filter->del = true;
41025ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
41035ec8b7d1SJesse Brandeburg } else {
41045ec8b7d1SJesse Brandeburg err = -EINVAL;
41055ec8b7d1SJesse Brandeburg }
41065ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->cloud_filter_list_lock);
41075ec8b7d1SJesse Brandeburg
41085ec8b7d1SJesse Brandeburg return err;
41095ec8b7d1SJesse Brandeburg }
41105ec8b7d1SJesse Brandeburg
41115ec8b7d1SJesse Brandeburg /**
41125ec8b7d1SJesse Brandeburg * iavf_setup_tc_cls_flower - flower classifier offloads
4113b50f7bcaSJesse Brandeburg * @adapter: board private structure
4114b50f7bcaSJesse Brandeburg * @cls_flower: pointer to flow_cls_offload struct with flow info
41155ec8b7d1SJesse Brandeburg */
iavf_setup_tc_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)41165ec8b7d1SJesse Brandeburg static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4117f9e30088SPablo Neira Ayuso struct flow_cls_offload *cls_flower)
41185ec8b7d1SJesse Brandeburg {
41195ec8b7d1SJesse Brandeburg switch (cls_flower->command) {
4120f9e30088SPablo Neira Ayuso case FLOW_CLS_REPLACE:
41215ec8b7d1SJesse Brandeburg return iavf_configure_clsflower(adapter, cls_flower);
4122f9e30088SPablo Neira Ayuso case FLOW_CLS_DESTROY:
41235ec8b7d1SJesse Brandeburg return iavf_delete_clsflower(adapter, cls_flower);
4124f9e30088SPablo Neira Ayuso case FLOW_CLS_STATS:
41255ec8b7d1SJesse Brandeburg return -EOPNOTSUPP;
41265ec8b7d1SJesse Brandeburg default:
41275ec8b7d1SJesse Brandeburg return -EOPNOTSUPP;
41285ec8b7d1SJesse Brandeburg }
41295ec8b7d1SJesse Brandeburg }
41305ec8b7d1SJesse Brandeburg
41315ec8b7d1SJesse Brandeburg /**
41325ec8b7d1SJesse Brandeburg * iavf_setup_tc_block_cb - block callback for tc
41335ec8b7d1SJesse Brandeburg * @type: type of offload
41345ec8b7d1SJesse Brandeburg * @type_data: offload data
41355ec8b7d1SJesse Brandeburg * @cb_priv:
41365ec8b7d1SJesse Brandeburg *
41375ec8b7d1SJesse Brandeburg * This function is the block callback for traffic classes
41385ec8b7d1SJesse Brandeburg **/
iavf_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)41395ec8b7d1SJesse Brandeburg static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
41405ec8b7d1SJesse Brandeburg void *cb_priv)
41415ec8b7d1SJesse Brandeburg {
4142bb0858d8SJiri Pirko struct iavf_adapter *adapter = cb_priv;
4143bb0858d8SJiri Pirko
4144bb0858d8SJiri Pirko if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
4145bb0858d8SJiri Pirko return -EOPNOTSUPP;
4146bb0858d8SJiri Pirko
41475ec8b7d1SJesse Brandeburg switch (type) {
41485ec8b7d1SJesse Brandeburg case TC_SETUP_CLSFLOWER:
41495ec8b7d1SJesse Brandeburg return iavf_setup_tc_cls_flower(cb_priv, type_data);
41505ec8b7d1SJesse Brandeburg default:
41515ec8b7d1SJesse Brandeburg return -EOPNOTSUPP;
41525ec8b7d1SJesse Brandeburg }
41535ec8b7d1SJesse Brandeburg }
41545ec8b7d1SJesse Brandeburg
4155955bcb6eSPablo Neira Ayuso static LIST_HEAD(iavf_block_cb_list);
4156955bcb6eSPablo Neira Ayuso
41575ec8b7d1SJesse Brandeburg /**
41585ec8b7d1SJesse Brandeburg * iavf_setup_tc - configure multiple traffic classes
41595ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
41605ec8b7d1SJesse Brandeburg * @type: type of offload
4161b50f7bcaSJesse Brandeburg * @type_data: tc offload data
41625ec8b7d1SJesse Brandeburg *
41635ec8b7d1SJesse Brandeburg * This function is the callback to ndo_setup_tc in the
41645ec8b7d1SJesse Brandeburg * netdev_ops.
41655ec8b7d1SJesse Brandeburg *
41665ec8b7d1SJesse Brandeburg * Returns 0 on success
41675ec8b7d1SJesse Brandeburg **/
iavf_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)41685ec8b7d1SJesse Brandeburg static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
41695ec8b7d1SJesse Brandeburg void *type_data)
41705ec8b7d1SJesse Brandeburg {
41714e95bc26SPablo Neira Ayuso struct iavf_adapter *adapter = netdev_priv(netdev);
41724e95bc26SPablo Neira Ayuso
41735ec8b7d1SJesse Brandeburg switch (type) {
41745ec8b7d1SJesse Brandeburg case TC_SETUP_QDISC_MQPRIO:
41755ec8b7d1SJesse Brandeburg return __iavf_setup_tc(netdev, type_data);
41765ec8b7d1SJesse Brandeburg case TC_SETUP_BLOCK:
4177955bcb6eSPablo Neira Ayuso return flow_block_cb_setup_simple(type_data,
4178955bcb6eSPablo Neira Ayuso &iavf_block_cb_list,
41794e95bc26SPablo Neira Ayuso iavf_setup_tc_block_cb,
41804e95bc26SPablo Neira Ayuso adapter, adapter, true);
41815ec8b7d1SJesse Brandeburg default:
41825ec8b7d1SJesse Brandeburg return -EOPNOTSUPP;
41835ec8b7d1SJesse Brandeburg }
41845ec8b7d1SJesse Brandeburg }
41855ec8b7d1SJesse Brandeburg
41865ec8b7d1SJesse Brandeburg /**
41873beb9d66SPiotr Gardocki * iavf_restore_fdir_filters
41883beb9d66SPiotr Gardocki * @adapter: board private structure
41893beb9d66SPiotr Gardocki *
41903beb9d66SPiotr Gardocki * Restore existing FDIR filters when VF netdev comes back up.
41913beb9d66SPiotr Gardocki **/
iavf_restore_fdir_filters(struct iavf_adapter * adapter)41923beb9d66SPiotr Gardocki static void iavf_restore_fdir_filters(struct iavf_adapter *adapter)
41933beb9d66SPiotr Gardocki {
41943beb9d66SPiotr Gardocki struct iavf_fdir_fltr *f;
41953beb9d66SPiotr Gardocki
41963beb9d66SPiotr Gardocki spin_lock_bh(&adapter->fdir_fltr_lock);
41973beb9d66SPiotr Gardocki list_for_each_entry(f, &adapter->fdir_list_head, list) {
41983beb9d66SPiotr Gardocki if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
41993beb9d66SPiotr Gardocki /* Cancel a request, keep filter as active */
42003beb9d66SPiotr Gardocki f->state = IAVF_FDIR_FLTR_ACTIVE;
42013beb9d66SPiotr Gardocki } else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING ||
42023beb9d66SPiotr Gardocki f->state == IAVF_FDIR_FLTR_INACTIVE) {
42033beb9d66SPiotr Gardocki /* Add filters which are inactive or have a pending
42043beb9d66SPiotr Gardocki * request to PF to be deleted
42053beb9d66SPiotr Gardocki */
42063beb9d66SPiotr Gardocki f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
42073beb9d66SPiotr Gardocki adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
42083beb9d66SPiotr Gardocki }
42093beb9d66SPiotr Gardocki }
42103beb9d66SPiotr Gardocki spin_unlock_bh(&adapter->fdir_fltr_lock);
42113beb9d66SPiotr Gardocki }
42123beb9d66SPiotr Gardocki
42133beb9d66SPiotr Gardocki /**
42145ec8b7d1SJesse Brandeburg * iavf_open - Called when a network interface is made active
42155ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
42165ec8b7d1SJesse Brandeburg *
42175ec8b7d1SJesse Brandeburg * Returns 0 on success, negative value on failure
42185ec8b7d1SJesse Brandeburg *
42195ec8b7d1SJesse Brandeburg * The open entry point is called when a network interface is made
42205ec8b7d1SJesse Brandeburg * active by the system (IFF_UP). At this point all resources needed
42215ec8b7d1SJesse Brandeburg * for transmit and receive operations are allocated, the interrupt
4222fdd4044fSJakub Pawlak * handler is registered with the OS, the watchdog is started,
42235ec8b7d1SJesse Brandeburg * and the stack is notified that the interface is ready.
42245ec8b7d1SJesse Brandeburg **/
iavf_open(struct net_device * netdev)42255ec8b7d1SJesse Brandeburg static int iavf_open(struct net_device *netdev)
42265ec8b7d1SJesse Brandeburg {
42275ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
42285ec8b7d1SJesse Brandeburg int err;
42295ec8b7d1SJesse Brandeburg
42305ec8b7d1SJesse Brandeburg if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
42315ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
42325ec8b7d1SJesse Brandeburg return -EIO;
42335ec8b7d1SJesse Brandeburg }
42345ec8b7d1SJesse Brandeburg
4235cbe9e511SIvan Vecera while (!mutex_trylock(&adapter->crit_lock)) {
4236cbe9e511SIvan Vecera /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
4237cbe9e511SIvan Vecera * is already taken and iavf_open is called from an upper
4238cbe9e511SIvan Vecera * device's notifier reacting on NETDEV_REGISTER event.
4239cbe9e511SIvan Vecera * We have to leave here to avoid dead lock.
4240cbe9e511SIvan Vecera */
4241cbe9e511SIvan Vecera if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
4242cbe9e511SIvan Vecera return -EBUSY;
4243cbe9e511SIvan Vecera
42445ec8b7d1SJesse Brandeburg usleep_range(500, 1000);
4245cbe9e511SIvan Vecera }
42465ec8b7d1SJesse Brandeburg
42475ec8b7d1SJesse Brandeburg if (adapter->state != __IAVF_DOWN) {
42485ec8b7d1SJesse Brandeburg err = -EBUSY;
42495ec8b7d1SJesse Brandeburg goto err_unlock;
42505ec8b7d1SJesse Brandeburg }
42515ec8b7d1SJesse Brandeburg
4252605ca7c5SPrzemyslaw Patynowski if (adapter->state == __IAVF_RUNNING &&
4253605ca7c5SPrzemyslaw Patynowski !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
4254605ca7c5SPrzemyslaw Patynowski dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
4255605ca7c5SPrzemyslaw Patynowski err = 0;
4256605ca7c5SPrzemyslaw Patynowski goto err_unlock;
4257605ca7c5SPrzemyslaw Patynowski }
4258605ca7c5SPrzemyslaw Patynowski
42595ec8b7d1SJesse Brandeburg /* allocate transmit descriptors */
42605ec8b7d1SJesse Brandeburg err = iavf_setup_all_tx_resources(adapter);
42615ec8b7d1SJesse Brandeburg if (err)
42625ec8b7d1SJesse Brandeburg goto err_setup_tx;
42635ec8b7d1SJesse Brandeburg
42645ec8b7d1SJesse Brandeburg /* allocate receive descriptors */
42655ec8b7d1SJesse Brandeburg err = iavf_setup_all_rx_resources(adapter);
42665ec8b7d1SJesse Brandeburg if (err)
42675ec8b7d1SJesse Brandeburg goto err_setup_rx;
42685ec8b7d1SJesse Brandeburg
42695ec8b7d1SJesse Brandeburg /* clear any pending interrupts, may auto mask */
42705ec8b7d1SJesse Brandeburg err = iavf_request_traffic_irqs(adapter, netdev->name);
42715ec8b7d1SJesse Brandeburg if (err)
42725ec8b7d1SJesse Brandeburg goto err_req_irq;
42735ec8b7d1SJesse Brandeburg
42745ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
42755ec8b7d1SJesse Brandeburg
42765ec8b7d1SJesse Brandeburg iavf_add_filter(adapter, adapter->hw.mac.addr);
42775ec8b7d1SJesse Brandeburg
42785ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
42795ec8b7d1SJesse Brandeburg
42803beb9d66SPiotr Gardocki /* Restore filters that were removed with IFF_DOWN */
428142930142SAkeem G Abodunrin iavf_restore_filters(adapter);
42823beb9d66SPiotr Gardocki iavf_restore_fdir_filters(adapter);
428342930142SAkeem G Abodunrin
42845ec8b7d1SJesse Brandeburg iavf_configure(adapter);
42855ec8b7d1SJesse Brandeburg
42865ec8b7d1SJesse Brandeburg iavf_up_complete(adapter);
42875ec8b7d1SJesse Brandeburg
42885ec8b7d1SJesse Brandeburg iavf_irq_enable(adapter, true);
42895ec8b7d1SJesse Brandeburg
42905ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
42915ec8b7d1SJesse Brandeburg
42925ec8b7d1SJesse Brandeburg return 0;
42935ec8b7d1SJesse Brandeburg
42945ec8b7d1SJesse Brandeburg err_req_irq:
42955ec8b7d1SJesse Brandeburg iavf_down(adapter);
42965ec8b7d1SJesse Brandeburg iavf_free_traffic_irqs(adapter);
42975ec8b7d1SJesse Brandeburg err_setup_rx:
42985ec8b7d1SJesse Brandeburg iavf_free_all_rx_resources(adapter);
42995ec8b7d1SJesse Brandeburg err_setup_tx:
43005ec8b7d1SJesse Brandeburg iavf_free_all_tx_resources(adapter);
43015ec8b7d1SJesse Brandeburg err_unlock:
43025ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
43035ec8b7d1SJesse Brandeburg
43045ec8b7d1SJesse Brandeburg return err;
43055ec8b7d1SJesse Brandeburg }
43065ec8b7d1SJesse Brandeburg
43075ec8b7d1SJesse Brandeburg /**
43085ec8b7d1SJesse Brandeburg * iavf_close - Disables a network interface
43095ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
43105ec8b7d1SJesse Brandeburg *
43115ec8b7d1SJesse Brandeburg * Returns 0, this is not allowed to fail
43125ec8b7d1SJesse Brandeburg *
43135ec8b7d1SJesse Brandeburg * The close entry point is called when an interface is de-activated
43145ec8b7d1SJesse Brandeburg * by the OS. The hardware is still under the drivers control, but
43155ec8b7d1SJesse Brandeburg * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
43165ec8b7d1SJesse Brandeburg * are freed, along with all transmit and receive resources.
43175ec8b7d1SJesse Brandeburg **/
iavf_close(struct net_device * netdev)43185ec8b7d1SJesse Brandeburg static int iavf_close(struct net_device *netdev)
43195ec8b7d1SJesse Brandeburg {
43205ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
432111c12adcSMichal Jaron u64 aq_to_restore;
43225ec8b7d1SJesse Brandeburg int status;
43235ec8b7d1SJesse Brandeburg
4324fc2e6b3bSSlawomir Laba mutex_lock(&adapter->crit_lock);
43255ec8b7d1SJesse Brandeburg
4326fc2e6b3bSSlawomir Laba if (adapter->state <= __IAVF_DOWN_PENDING) {
4327fc2e6b3bSSlawomir Laba mutex_unlock(&adapter->crit_lock);
4328fc2e6b3bSSlawomir Laba return 0;
4329fc2e6b3bSSlawomir Laba }
43305ec8b7d1SJesse Brandeburg
433156184e01SJesse Brandeburg set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
43325ec8b7d1SJesse Brandeburg if (CLIENT_ENABLED(adapter))
43335ec8b7d1SJesse Brandeburg adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
433411c12adcSMichal Jaron /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
433511c12adcSMichal Jaron * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
433611c12adcSMichal Jaron * deadlock with adminq_task() until iavf_close timeouts. We must send
433711c12adcSMichal Jaron * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
433811c12adcSMichal Jaron * disable queues possible for vf. Give only necessary flags to
433911c12adcSMichal Jaron * iavf_down and save other to set them right before iavf_close()
434011c12adcSMichal Jaron * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
434111c12adcSMichal Jaron * iavf will be in DOWN state.
434211c12adcSMichal Jaron */
434311c12adcSMichal Jaron aq_to_restore = adapter->aq_required;
434411c12adcSMichal Jaron adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
434511c12adcSMichal Jaron
434611c12adcSMichal Jaron /* Remove flags which we do not want to send after close or we want to
434711c12adcSMichal Jaron * send before disable queues.
434811c12adcSMichal Jaron */
434911c12adcSMichal Jaron aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG |
435011c12adcSMichal Jaron IAVF_FLAG_AQ_ENABLE_QUEUES |
435111c12adcSMichal Jaron IAVF_FLAG_AQ_CONFIGURE_QUEUES |
435211c12adcSMichal Jaron IAVF_FLAG_AQ_ADD_VLAN_FILTER |
435311c12adcSMichal Jaron IAVF_FLAG_AQ_ADD_MAC_FILTER |
435411c12adcSMichal Jaron IAVF_FLAG_AQ_ADD_CLOUD_FILTER |
435511c12adcSMichal Jaron IAVF_FLAG_AQ_ADD_FDIR_FILTER |
435611c12adcSMichal Jaron IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
43575ec8b7d1SJesse Brandeburg
43585ec8b7d1SJesse Brandeburg iavf_down(adapter);
435945eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_DOWN_PENDING);
43605ec8b7d1SJesse Brandeburg iavf_free_traffic_irqs(adapter);
43615ec8b7d1SJesse Brandeburg
43625ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
43635ec8b7d1SJesse Brandeburg
43645ec8b7d1SJesse Brandeburg /* We explicitly don't free resources here because the hardware is
43655ec8b7d1SJesse Brandeburg * still active and can DMA into memory. Resources are cleared in
43665ec8b7d1SJesse Brandeburg * iavf_virtchnl_completion() after we get confirmation from the PF
43675ec8b7d1SJesse Brandeburg * driver that the rings have been stopped.
43685ec8b7d1SJesse Brandeburg *
43695ec8b7d1SJesse Brandeburg * Also, we wait for state to transition to __IAVF_DOWN before
43705ec8b7d1SJesse Brandeburg * returning. State change occurs in iavf_virtchnl_completion() after
43715ec8b7d1SJesse Brandeburg * VF resources are released (which occurs after PF driver processes and
43725ec8b7d1SJesse Brandeburg * responds to admin queue commands).
43735ec8b7d1SJesse Brandeburg */
43745ec8b7d1SJesse Brandeburg
43755ec8b7d1SJesse Brandeburg status = wait_event_timeout(adapter->down_waitqueue,
43765ec8b7d1SJesse Brandeburg adapter->state == __IAVF_DOWN,
437788ec7308SMitch Williams msecs_to_jiffies(500));
43785ec8b7d1SJesse Brandeburg if (!status)
43795ec8b7d1SJesse Brandeburg netdev_warn(netdev, "Device resources not yet released\n");
438011c12adcSMichal Jaron
438111c12adcSMichal Jaron mutex_lock(&adapter->crit_lock);
438211c12adcSMichal Jaron adapter->aq_required |= aq_to_restore;
438311c12adcSMichal Jaron mutex_unlock(&adapter->crit_lock);
43845ec8b7d1SJesse Brandeburg return 0;
43855ec8b7d1SJesse Brandeburg }
43865ec8b7d1SJesse Brandeburg
43875ec8b7d1SJesse Brandeburg /**
43885ec8b7d1SJesse Brandeburg * iavf_change_mtu - Change the Maximum Transfer Unit
43895ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
43905ec8b7d1SJesse Brandeburg * @new_mtu: new value for maximum frame size
43915ec8b7d1SJesse Brandeburg *
43925ec8b7d1SJesse Brandeburg * Returns 0 on success, negative on failure
43935ec8b7d1SJesse Brandeburg **/
iavf_change_mtu(struct net_device * netdev,int new_mtu)43945ec8b7d1SJesse Brandeburg static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
43955ec8b7d1SJesse Brandeburg {
43965ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
4397c2ed2403SMarcin Szycik int ret = 0;
43985ec8b7d1SJesse Brandeburg
4399aeb5d11fSPatryk Małek netdev_dbg(netdev, "changing MTU from %d to %d\n",
4400aeb5d11fSPatryk Małek netdev->mtu, new_mtu);
44015ec8b7d1SJesse Brandeburg netdev->mtu = new_mtu;
44025ec8b7d1SJesse Brandeburg if (CLIENT_ENABLED(adapter)) {
44035ec8b7d1SJesse Brandeburg iavf_notify_client_l2_params(&adapter->vsi);
44045ec8b7d1SJesse Brandeburg adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
44055ec8b7d1SJesse Brandeburg }
4406d2c0f45fSSlawomir Laba
4407d2c0f45fSSlawomir Laba if (netif_running(netdev)) {
4408c34743daSAhmed Zaki iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
4409c2ed2403SMarcin Szycik ret = iavf_wait_for_reset(adapter);
4410c2ed2403SMarcin Szycik if (ret < 0)
4411c2ed2403SMarcin Szycik netdev_warn(netdev, "MTU change interrupted waiting for reset");
4412c2ed2403SMarcin Szycik else if (ret)
4413c2ed2403SMarcin Szycik netdev_warn(netdev, "MTU change timed out waiting for reset");
4414d2c0f45fSSlawomir Laba }
44155ec8b7d1SJesse Brandeburg
4416c2ed2403SMarcin Szycik return ret;
44175ec8b7d1SJesse Brandeburg }
44185ec8b7d1SJesse Brandeburg
4419e768a049SPiotr Gardocki /**
4420e768a049SPiotr Gardocki * iavf_disable_fdir - disable Flow Director and clear existing filters
4421e768a049SPiotr Gardocki * @adapter: board private structure
4422e768a049SPiotr Gardocki **/
iavf_disable_fdir(struct iavf_adapter * adapter)4423e768a049SPiotr Gardocki static void iavf_disable_fdir(struct iavf_adapter *adapter)
4424e768a049SPiotr Gardocki {
4425e768a049SPiotr Gardocki struct iavf_fdir_fltr *fdir, *fdirtmp;
4426e768a049SPiotr Gardocki bool del_filters = false;
4427e768a049SPiotr Gardocki
4428e768a049SPiotr Gardocki adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED;
4429e768a049SPiotr Gardocki
4430e768a049SPiotr Gardocki /* remove all Flow Director filters */
4431e768a049SPiotr Gardocki spin_lock_bh(&adapter->fdir_fltr_lock);
4432e768a049SPiotr Gardocki list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
4433e768a049SPiotr Gardocki list) {
4434e768a049SPiotr Gardocki if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
4435e768a049SPiotr Gardocki fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
4436e768a049SPiotr Gardocki /* Delete filters not registered in PF */
4437e768a049SPiotr Gardocki list_del(&fdir->list);
4438e768a049SPiotr Gardocki kfree(fdir);
4439e768a049SPiotr Gardocki adapter->fdir_active_fltr--;
4440e768a049SPiotr Gardocki } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
4441e768a049SPiotr Gardocki fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
4442e768a049SPiotr Gardocki fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
4443e768a049SPiotr Gardocki /* Filters registered in PF, schedule their deletion */
4444e768a049SPiotr Gardocki fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
4445e768a049SPiotr Gardocki del_filters = true;
4446e768a049SPiotr Gardocki } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
4447e768a049SPiotr Gardocki /* Request to delete filter already sent to PF, change
4448e768a049SPiotr Gardocki * state to DEL_PENDING to delete filter after PF's
4449e768a049SPiotr Gardocki * response, not set as INACTIVE
4450e768a049SPiotr Gardocki */
4451e768a049SPiotr Gardocki fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
4452e768a049SPiotr Gardocki }
4453e768a049SPiotr Gardocki }
4454e768a049SPiotr Gardocki spin_unlock_bh(&adapter->fdir_fltr_lock);
4455e768a049SPiotr Gardocki
4456e768a049SPiotr Gardocki if (del_filters) {
4457e768a049SPiotr Gardocki adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
4458e768a049SPiotr Gardocki mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
4459e768a049SPiotr Gardocki }
4460e768a049SPiotr Gardocki }
4461e768a049SPiotr Gardocki
44628afadd1cSBrett Creeley #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
44638afadd1cSBrett Creeley NETIF_F_HW_VLAN_CTAG_TX | \
44648afadd1cSBrett Creeley NETIF_F_HW_VLAN_STAG_RX | \
44658afadd1cSBrett Creeley NETIF_F_HW_VLAN_STAG_TX)
44668afadd1cSBrett Creeley
44675ec8b7d1SJesse Brandeburg /**
446856184e01SJesse Brandeburg * iavf_set_features - set the netdev feature flags
44695ec8b7d1SJesse Brandeburg * @netdev: ptr to the netdev being adjusted
44705ec8b7d1SJesse Brandeburg * @features: the feature set that the stack is suggesting
44715ec8b7d1SJesse Brandeburg * Note: expects to be called while under rtnl_lock()
44725ec8b7d1SJesse Brandeburg **/
iavf_set_features(struct net_device * netdev,netdev_features_t features)44735ec8b7d1SJesse Brandeburg static int iavf_set_features(struct net_device *netdev,
44745ec8b7d1SJesse Brandeburg netdev_features_t features)
44755ec8b7d1SJesse Brandeburg {
44765ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
44775ec8b7d1SJesse Brandeburg
44788afadd1cSBrett Creeley /* trigger update on any VLAN feature change */
44798afadd1cSBrett Creeley if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
44808afadd1cSBrett Creeley (features & NETIF_VLAN_OFFLOAD_FEATURES))
44818afadd1cSBrett Creeley iavf_set_vlan_offload_features(adapter, netdev->features,
44828afadd1cSBrett Creeley features);
44835ec8b7d1SJesse Brandeburg
4484e768a049SPiotr Gardocki if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) {
4485e768a049SPiotr Gardocki if (features & NETIF_F_NTUPLE)
4486e768a049SPiotr Gardocki adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
4487e768a049SPiotr Gardocki else
4488e768a049SPiotr Gardocki iavf_disable_fdir(adapter);
4489e768a049SPiotr Gardocki }
4490e768a049SPiotr Gardocki
44915ec8b7d1SJesse Brandeburg return 0;
44925ec8b7d1SJesse Brandeburg }
44935ec8b7d1SJesse Brandeburg
44945ec8b7d1SJesse Brandeburg /**
44955ec8b7d1SJesse Brandeburg * iavf_features_check - Validate encapsulated packet conforms to limits
44965ec8b7d1SJesse Brandeburg * @skb: skb buff
44975ec8b7d1SJesse Brandeburg * @dev: This physical port's netdev
44985ec8b7d1SJesse Brandeburg * @features: Offload features that the stack believes apply
44995ec8b7d1SJesse Brandeburg **/
iavf_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)45005ec8b7d1SJesse Brandeburg static netdev_features_t iavf_features_check(struct sk_buff *skb,
45015ec8b7d1SJesse Brandeburg struct net_device *dev,
45025ec8b7d1SJesse Brandeburg netdev_features_t features)
45035ec8b7d1SJesse Brandeburg {
45045ec8b7d1SJesse Brandeburg size_t len;
45055ec8b7d1SJesse Brandeburg
45065ec8b7d1SJesse Brandeburg /* No point in doing any of this if neither checksum nor GSO are
45075ec8b7d1SJesse Brandeburg * being requested for this frame. We can rule out both by just
45085ec8b7d1SJesse Brandeburg * checking for CHECKSUM_PARTIAL
45095ec8b7d1SJesse Brandeburg */
45105ec8b7d1SJesse Brandeburg if (skb->ip_summed != CHECKSUM_PARTIAL)
45115ec8b7d1SJesse Brandeburg return features;
45125ec8b7d1SJesse Brandeburg
45135ec8b7d1SJesse Brandeburg /* We cannot support GSO if the MSS is going to be less than
45145ec8b7d1SJesse Brandeburg * 64 bytes. If it is then we need to drop support for GSO.
45155ec8b7d1SJesse Brandeburg */
45165ec8b7d1SJesse Brandeburg if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
45175ec8b7d1SJesse Brandeburg features &= ~NETIF_F_GSO_MASK;
45185ec8b7d1SJesse Brandeburg
45195ec8b7d1SJesse Brandeburg /* MACLEN can support at most 63 words */
45205ec8b7d1SJesse Brandeburg len = skb_network_header(skb) - skb->data;
45215ec8b7d1SJesse Brandeburg if (len & ~(63 * 2))
45225ec8b7d1SJesse Brandeburg goto out_err;
45235ec8b7d1SJesse Brandeburg
45245ec8b7d1SJesse Brandeburg /* IPLEN and EIPLEN can support at most 127 dwords */
45255ec8b7d1SJesse Brandeburg len = skb_transport_header(skb) - skb_network_header(skb);
45265ec8b7d1SJesse Brandeburg if (len & ~(127 * 4))
45275ec8b7d1SJesse Brandeburg goto out_err;
45285ec8b7d1SJesse Brandeburg
45295ec8b7d1SJesse Brandeburg if (skb->encapsulation) {
45305ec8b7d1SJesse Brandeburg /* L4TUNLEN can support 127 words */
45315ec8b7d1SJesse Brandeburg len = skb_inner_network_header(skb) - skb_transport_header(skb);
45325ec8b7d1SJesse Brandeburg if (len & ~(127 * 2))
45335ec8b7d1SJesse Brandeburg goto out_err;
45345ec8b7d1SJesse Brandeburg
45355ec8b7d1SJesse Brandeburg /* IPLEN can support at most 127 dwords */
45365ec8b7d1SJesse Brandeburg len = skb_inner_transport_header(skb) -
45375ec8b7d1SJesse Brandeburg skb_inner_network_header(skb);
45385ec8b7d1SJesse Brandeburg if (len & ~(127 * 4))
45395ec8b7d1SJesse Brandeburg goto out_err;
45405ec8b7d1SJesse Brandeburg }
45415ec8b7d1SJesse Brandeburg
45425ec8b7d1SJesse Brandeburg /* No need to validate L4LEN as TCP is the only protocol with a
4543afdc8a54SJilin Yuan * flexible value and we support all possible values supported
45445ec8b7d1SJesse Brandeburg * by TCP, which is at most 15 dwords
45455ec8b7d1SJesse Brandeburg */
45465ec8b7d1SJesse Brandeburg
45475ec8b7d1SJesse Brandeburg return features;
45485ec8b7d1SJesse Brandeburg out_err:
45495ec8b7d1SJesse Brandeburg return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
45505ec8b7d1SJesse Brandeburg }
45515ec8b7d1SJesse Brandeburg
45525ec8b7d1SJesse Brandeburg /**
455348ccc43eSBrett Creeley * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
455448ccc43eSBrett Creeley * @adapter: board private structure
455548ccc43eSBrett Creeley *
455648ccc43eSBrett Creeley * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
455748ccc43eSBrett Creeley * were negotiated determine the VLAN features that can be toggled on and off.
455848ccc43eSBrett Creeley **/
455948ccc43eSBrett Creeley static netdev_features_t
iavf_get_netdev_vlan_hw_features(struct iavf_adapter * adapter)456048ccc43eSBrett Creeley iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
456148ccc43eSBrett Creeley {
456248ccc43eSBrett Creeley netdev_features_t hw_features = 0;
456348ccc43eSBrett Creeley
456448ccc43eSBrett Creeley if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
456548ccc43eSBrett Creeley return hw_features;
456648ccc43eSBrett Creeley
456748ccc43eSBrett Creeley /* Enable VLAN features if supported */
456848ccc43eSBrett Creeley if (VLAN_ALLOWED(adapter)) {
456948ccc43eSBrett Creeley hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
457048ccc43eSBrett Creeley NETIF_F_HW_VLAN_CTAG_RX);
457148ccc43eSBrett Creeley } else if (VLAN_V2_ALLOWED(adapter)) {
457248ccc43eSBrett Creeley struct virtchnl_vlan_caps *vlan_v2_caps =
457348ccc43eSBrett Creeley &adapter->vlan_v2_caps;
457448ccc43eSBrett Creeley struct virtchnl_vlan_supported_caps *stripping_support =
457548ccc43eSBrett Creeley &vlan_v2_caps->offloads.stripping_support;
457648ccc43eSBrett Creeley struct virtchnl_vlan_supported_caps *insertion_support =
457748ccc43eSBrett Creeley &vlan_v2_caps->offloads.insertion_support;
457848ccc43eSBrett Creeley
457948ccc43eSBrett Creeley if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
458048ccc43eSBrett Creeley stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
458148ccc43eSBrett Creeley if (stripping_support->outer &
458248ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100)
458348ccc43eSBrett Creeley hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
458448ccc43eSBrett Creeley if (stripping_support->outer &
458548ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_88A8)
458648ccc43eSBrett Creeley hw_features |= NETIF_F_HW_VLAN_STAG_RX;
458748ccc43eSBrett Creeley } else if (stripping_support->inner !=
458848ccc43eSBrett Creeley VIRTCHNL_VLAN_UNSUPPORTED &&
458948ccc43eSBrett Creeley stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
459048ccc43eSBrett Creeley if (stripping_support->inner &
459148ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100)
459248ccc43eSBrett Creeley hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
459348ccc43eSBrett Creeley }
459448ccc43eSBrett Creeley
459548ccc43eSBrett Creeley if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
459648ccc43eSBrett Creeley insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
459748ccc43eSBrett Creeley if (insertion_support->outer &
459848ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100)
459948ccc43eSBrett Creeley hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
460048ccc43eSBrett Creeley if (insertion_support->outer &
460148ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_88A8)
460248ccc43eSBrett Creeley hw_features |= NETIF_F_HW_VLAN_STAG_TX;
460348ccc43eSBrett Creeley } else if (insertion_support->inner &&
460448ccc43eSBrett Creeley insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
460548ccc43eSBrett Creeley if (insertion_support->inner &
460648ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100)
460748ccc43eSBrett Creeley hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
460848ccc43eSBrett Creeley }
460948ccc43eSBrett Creeley }
461048ccc43eSBrett Creeley
461148ccc43eSBrett Creeley return hw_features;
461248ccc43eSBrett Creeley }
461348ccc43eSBrett Creeley
461448ccc43eSBrett Creeley /**
461548ccc43eSBrett Creeley * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
461648ccc43eSBrett Creeley * @adapter: board private structure
461748ccc43eSBrett Creeley *
461848ccc43eSBrett Creeley * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
461948ccc43eSBrett Creeley * were negotiated determine the VLAN features that are enabled by default.
462048ccc43eSBrett Creeley **/
462148ccc43eSBrett Creeley static netdev_features_t
iavf_get_netdev_vlan_features(struct iavf_adapter * adapter)462248ccc43eSBrett Creeley iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
462348ccc43eSBrett Creeley {
462448ccc43eSBrett Creeley netdev_features_t features = 0;
462548ccc43eSBrett Creeley
462648ccc43eSBrett Creeley if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
462748ccc43eSBrett Creeley return features;
462848ccc43eSBrett Creeley
462948ccc43eSBrett Creeley if (VLAN_ALLOWED(adapter)) {
463048ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_FILTER |
463148ccc43eSBrett Creeley NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
463248ccc43eSBrett Creeley } else if (VLAN_V2_ALLOWED(adapter)) {
463348ccc43eSBrett Creeley struct virtchnl_vlan_caps *vlan_v2_caps =
463448ccc43eSBrett Creeley &adapter->vlan_v2_caps;
463548ccc43eSBrett Creeley struct virtchnl_vlan_supported_caps *filtering_support =
463648ccc43eSBrett Creeley &vlan_v2_caps->filtering.filtering_support;
463748ccc43eSBrett Creeley struct virtchnl_vlan_supported_caps *stripping_support =
463848ccc43eSBrett Creeley &vlan_v2_caps->offloads.stripping_support;
463948ccc43eSBrett Creeley struct virtchnl_vlan_supported_caps *insertion_support =
464048ccc43eSBrett Creeley &vlan_v2_caps->offloads.insertion_support;
464148ccc43eSBrett Creeley u32 ethertype_init;
464248ccc43eSBrett Creeley
464348ccc43eSBrett Creeley /* give priority to outer stripping and don't support both outer
464448ccc43eSBrett Creeley * and inner stripping
464548ccc43eSBrett Creeley */
464648ccc43eSBrett Creeley ethertype_init = vlan_v2_caps->offloads.ethertype_init;
464748ccc43eSBrett Creeley if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
464848ccc43eSBrett Creeley if (stripping_support->outer &
464948ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100 &&
465048ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
465148ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_RX;
465248ccc43eSBrett Creeley else if (stripping_support->outer &
465348ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
465448ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
465548ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_STAG_RX;
465648ccc43eSBrett Creeley } else if (stripping_support->inner !=
465748ccc43eSBrett Creeley VIRTCHNL_VLAN_UNSUPPORTED) {
465848ccc43eSBrett Creeley if (stripping_support->inner &
465948ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100 &&
466048ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
466148ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_RX;
466248ccc43eSBrett Creeley }
466348ccc43eSBrett Creeley
466448ccc43eSBrett Creeley /* give priority to outer insertion and don't support both outer
466548ccc43eSBrett Creeley * and inner insertion
466648ccc43eSBrett Creeley */
466748ccc43eSBrett Creeley if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
466848ccc43eSBrett Creeley if (insertion_support->outer &
466948ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100 &&
467048ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
467148ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_TX;
467248ccc43eSBrett Creeley else if (insertion_support->outer &
467348ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
467448ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
467548ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_STAG_TX;
467648ccc43eSBrett Creeley } else if (insertion_support->inner !=
467748ccc43eSBrett Creeley VIRTCHNL_VLAN_UNSUPPORTED) {
467848ccc43eSBrett Creeley if (insertion_support->inner &
467948ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100 &&
468048ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
468148ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_TX;
468248ccc43eSBrett Creeley }
468348ccc43eSBrett Creeley
468448ccc43eSBrett Creeley /* give priority to outer filtering and don't bother if both
468548ccc43eSBrett Creeley * outer and inner filtering are enabled
468648ccc43eSBrett Creeley */
468748ccc43eSBrett Creeley ethertype_init = vlan_v2_caps->filtering.ethertype_init;
468848ccc43eSBrett Creeley if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
468948ccc43eSBrett Creeley if (filtering_support->outer &
469048ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100 &&
469148ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
469248ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_FILTER;
469348ccc43eSBrett Creeley if (filtering_support->outer &
469448ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
469548ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
469648ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_STAG_FILTER;
469748ccc43eSBrett Creeley } else if (filtering_support->inner !=
469848ccc43eSBrett Creeley VIRTCHNL_VLAN_UNSUPPORTED) {
469948ccc43eSBrett Creeley if (filtering_support->inner &
470048ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100 &&
470148ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
470248ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_FILTER;
470348ccc43eSBrett Creeley if (filtering_support->inner &
470448ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
470548ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
470648ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_STAG_FILTER;
470748ccc43eSBrett Creeley }
470848ccc43eSBrett Creeley }
470948ccc43eSBrett Creeley
471048ccc43eSBrett Creeley return features;
471148ccc43eSBrett Creeley }
471248ccc43eSBrett Creeley
471348ccc43eSBrett Creeley #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
471448ccc43eSBrett Creeley (!(((requested) & (feature_bit)) && \
471548ccc43eSBrett Creeley !((allowed) & (feature_bit))))
471648ccc43eSBrett Creeley
471748ccc43eSBrett Creeley /**
471848ccc43eSBrett Creeley * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
471948ccc43eSBrett Creeley * @adapter: board private structure
472048ccc43eSBrett Creeley * @requested_features: stack requested NETDEV features
472148ccc43eSBrett Creeley **/
472248ccc43eSBrett Creeley static netdev_features_t
iavf_fix_netdev_vlan_features(struct iavf_adapter * adapter,netdev_features_t requested_features)472348ccc43eSBrett Creeley iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
472448ccc43eSBrett Creeley netdev_features_t requested_features)
472548ccc43eSBrett Creeley {
472648ccc43eSBrett Creeley netdev_features_t allowed_features;
472748ccc43eSBrett Creeley
472848ccc43eSBrett Creeley allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
472948ccc43eSBrett Creeley iavf_get_netdev_vlan_features(adapter);
473048ccc43eSBrett Creeley
473148ccc43eSBrett Creeley if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
473248ccc43eSBrett Creeley allowed_features,
473348ccc43eSBrett Creeley NETIF_F_HW_VLAN_CTAG_TX))
473448ccc43eSBrett Creeley requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
473548ccc43eSBrett Creeley
473648ccc43eSBrett Creeley if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
473748ccc43eSBrett Creeley allowed_features,
473848ccc43eSBrett Creeley NETIF_F_HW_VLAN_CTAG_RX))
473948ccc43eSBrett Creeley requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
474048ccc43eSBrett Creeley
474148ccc43eSBrett Creeley if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
474248ccc43eSBrett Creeley allowed_features,
474348ccc43eSBrett Creeley NETIF_F_HW_VLAN_STAG_TX))
474448ccc43eSBrett Creeley requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
474548ccc43eSBrett Creeley if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
474648ccc43eSBrett Creeley allowed_features,
474748ccc43eSBrett Creeley NETIF_F_HW_VLAN_STAG_RX))
474848ccc43eSBrett Creeley requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
474948ccc43eSBrett Creeley
475048ccc43eSBrett Creeley if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
475148ccc43eSBrett Creeley allowed_features,
475248ccc43eSBrett Creeley NETIF_F_HW_VLAN_CTAG_FILTER))
475348ccc43eSBrett Creeley requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
475448ccc43eSBrett Creeley
475548ccc43eSBrett Creeley if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
475648ccc43eSBrett Creeley allowed_features,
475748ccc43eSBrett Creeley NETIF_F_HW_VLAN_STAG_FILTER))
475848ccc43eSBrett Creeley requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
475948ccc43eSBrett Creeley
476048ccc43eSBrett Creeley if ((requested_features &
476148ccc43eSBrett Creeley (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
476248ccc43eSBrett Creeley (requested_features &
476348ccc43eSBrett Creeley (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
476448ccc43eSBrett Creeley adapter->vlan_v2_caps.offloads.ethertype_match ==
476548ccc43eSBrett Creeley VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
476648ccc43eSBrett Creeley netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
476748ccc43eSBrett Creeley requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
476848ccc43eSBrett Creeley NETIF_F_HW_VLAN_STAG_TX);
476948ccc43eSBrett Creeley }
477048ccc43eSBrett Creeley
477148ccc43eSBrett Creeley return requested_features;
477248ccc43eSBrett Creeley }
477348ccc43eSBrett Creeley
477448ccc43eSBrett Creeley /**
47755ec8b7d1SJesse Brandeburg * iavf_fix_features - fix up the netdev feature bits
47765ec8b7d1SJesse Brandeburg * @netdev: our net device
47775ec8b7d1SJesse Brandeburg * @features: desired feature bits
47785ec8b7d1SJesse Brandeburg *
47795ec8b7d1SJesse Brandeburg * Returns fixed-up features bits
47805ec8b7d1SJesse Brandeburg **/
iavf_fix_features(struct net_device * netdev,netdev_features_t features)47815ec8b7d1SJesse Brandeburg static netdev_features_t iavf_fix_features(struct net_device *netdev,
47825ec8b7d1SJesse Brandeburg netdev_features_t features)
47835ec8b7d1SJesse Brandeburg {
47845ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
47855ec8b7d1SJesse Brandeburg
4786e768a049SPiotr Gardocki if (!FDIR_FLTR_SUPPORT(adapter))
4787e768a049SPiotr Gardocki features &= ~NETIF_F_NTUPLE;
4788e768a049SPiotr Gardocki
478948ccc43eSBrett Creeley return iavf_fix_netdev_vlan_features(adapter, features);
47905ec8b7d1SJesse Brandeburg }
47915ec8b7d1SJesse Brandeburg
47925ec8b7d1SJesse Brandeburg static const struct net_device_ops iavf_netdev_ops = {
47935ec8b7d1SJesse Brandeburg .ndo_open = iavf_open,
47945ec8b7d1SJesse Brandeburg .ndo_stop = iavf_close,
47955ec8b7d1SJesse Brandeburg .ndo_start_xmit = iavf_xmit_frame,
47965ec8b7d1SJesse Brandeburg .ndo_set_rx_mode = iavf_set_rx_mode,
47975ec8b7d1SJesse Brandeburg .ndo_validate_addr = eth_validate_addr,
47985ec8b7d1SJesse Brandeburg .ndo_set_mac_address = iavf_set_mac,
47995ec8b7d1SJesse Brandeburg .ndo_change_mtu = iavf_change_mtu,
48005ec8b7d1SJesse Brandeburg .ndo_tx_timeout = iavf_tx_timeout,
48015ec8b7d1SJesse Brandeburg .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
48025ec8b7d1SJesse Brandeburg .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
48035ec8b7d1SJesse Brandeburg .ndo_features_check = iavf_features_check,
48045ec8b7d1SJesse Brandeburg .ndo_fix_features = iavf_fix_features,
48055ec8b7d1SJesse Brandeburg .ndo_set_features = iavf_set_features,
48065ec8b7d1SJesse Brandeburg .ndo_setup_tc = iavf_setup_tc,
48075ec8b7d1SJesse Brandeburg };
48085ec8b7d1SJesse Brandeburg
48095ec8b7d1SJesse Brandeburg /**
48105ec8b7d1SJesse Brandeburg * iavf_check_reset_complete - check that VF reset is complete
48115ec8b7d1SJesse Brandeburg * @hw: pointer to hw struct
48125ec8b7d1SJesse Brandeburg *
48135ec8b7d1SJesse Brandeburg * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
48145ec8b7d1SJesse Brandeburg **/
iavf_check_reset_complete(struct iavf_hw * hw)4815f349daa5SJesse Brandeburg static int iavf_check_reset_complete(struct iavf_hw *hw)
48165ec8b7d1SJesse Brandeburg {
48175ec8b7d1SJesse Brandeburg u32 rstat;
48185ec8b7d1SJesse Brandeburg int i;
48195ec8b7d1SJesse Brandeburg
48208e3e4b9dSPaul Greenwalt for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4821f1cad2ceSJesse Brandeburg rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
4822f1cad2ceSJesse Brandeburg IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
48235ec8b7d1SJesse Brandeburg if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
48245ec8b7d1SJesse Brandeburg (rstat == VIRTCHNL_VFR_COMPLETED))
48255ec8b7d1SJesse Brandeburg return 0;
48265ec8b7d1SJesse Brandeburg usleep_range(10, 20);
48275ec8b7d1SJesse Brandeburg }
48285ec8b7d1SJesse Brandeburg return -EBUSY;
48295ec8b7d1SJesse Brandeburg }
48305ec8b7d1SJesse Brandeburg
48315ec8b7d1SJesse Brandeburg /**
48325ec8b7d1SJesse Brandeburg * iavf_process_config - Process the config information we got from the PF
48335ec8b7d1SJesse Brandeburg * @adapter: board private structure
48345ec8b7d1SJesse Brandeburg *
48355ec8b7d1SJesse Brandeburg * Verify that we have a valid config struct, and set up our netdev features
48365ec8b7d1SJesse Brandeburg * and our VSI struct.
48375ec8b7d1SJesse Brandeburg **/
iavf_process_config(struct iavf_adapter * adapter)48385ec8b7d1SJesse Brandeburg int iavf_process_config(struct iavf_adapter *adapter)
48395ec8b7d1SJesse Brandeburg {
48405ec8b7d1SJesse Brandeburg struct virtchnl_vf_resource *vfres = adapter->vf_res;
484148ccc43eSBrett Creeley netdev_features_t hw_vlan_features, vlan_features;
48425ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
48435ec8b7d1SJesse Brandeburg netdev_features_t hw_enc_features;
48445ec8b7d1SJesse Brandeburg netdev_features_t hw_features;
48455ec8b7d1SJesse Brandeburg
48465ec8b7d1SJesse Brandeburg hw_enc_features = NETIF_F_SG |
48475ec8b7d1SJesse Brandeburg NETIF_F_IP_CSUM |
48485ec8b7d1SJesse Brandeburg NETIF_F_IPV6_CSUM |
48495ec8b7d1SJesse Brandeburg NETIF_F_HIGHDMA |
48505ec8b7d1SJesse Brandeburg NETIF_F_SOFT_FEATURES |
48515ec8b7d1SJesse Brandeburg NETIF_F_TSO |
48525ec8b7d1SJesse Brandeburg NETIF_F_TSO_ECN |
48535ec8b7d1SJesse Brandeburg NETIF_F_TSO6 |
48545ec8b7d1SJesse Brandeburg NETIF_F_SCTP_CRC |
48555ec8b7d1SJesse Brandeburg NETIF_F_RXHASH |
48565ec8b7d1SJesse Brandeburg NETIF_F_RXCSUM |
48575ec8b7d1SJesse Brandeburg 0;
48585ec8b7d1SJesse Brandeburg
48595ec8b7d1SJesse Brandeburg /* advertise to stack only if offloads for encapsulated packets is
48605ec8b7d1SJesse Brandeburg * supported
48615ec8b7d1SJesse Brandeburg */
48625ec8b7d1SJesse Brandeburg if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
48635ec8b7d1SJesse Brandeburg hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
48645ec8b7d1SJesse Brandeburg NETIF_F_GSO_GRE |
48655ec8b7d1SJesse Brandeburg NETIF_F_GSO_GRE_CSUM |
48665ec8b7d1SJesse Brandeburg NETIF_F_GSO_IPXIP4 |
48675ec8b7d1SJesse Brandeburg NETIF_F_GSO_IPXIP6 |
48685ec8b7d1SJesse Brandeburg NETIF_F_GSO_UDP_TUNNEL_CSUM |
48695ec8b7d1SJesse Brandeburg NETIF_F_GSO_PARTIAL |
48705ec8b7d1SJesse Brandeburg 0;
48715ec8b7d1SJesse Brandeburg
48725ec8b7d1SJesse Brandeburg if (!(vfres->vf_cap_flags &
48735ec8b7d1SJesse Brandeburg VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
48745ec8b7d1SJesse Brandeburg netdev->gso_partial_features |=
48755ec8b7d1SJesse Brandeburg NETIF_F_GSO_UDP_TUNNEL_CSUM;
48765ec8b7d1SJesse Brandeburg
48775ec8b7d1SJesse Brandeburg netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
48785ec8b7d1SJesse Brandeburg netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
48795ec8b7d1SJesse Brandeburg netdev->hw_enc_features |= hw_enc_features;
48805ec8b7d1SJesse Brandeburg }
48815ec8b7d1SJesse Brandeburg /* record features VLANs can make use of */
48825ec8b7d1SJesse Brandeburg netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
48835ec8b7d1SJesse Brandeburg
48845ec8b7d1SJesse Brandeburg /* Write features and hw_features separately to avoid polluting
48855ec8b7d1SJesse Brandeburg * with, or dropping, features that are set when we registered.
48865ec8b7d1SJesse Brandeburg */
48875ec8b7d1SJesse Brandeburg hw_features = hw_enc_features;
48885ec8b7d1SJesse Brandeburg
488948ccc43eSBrett Creeley /* get HW VLAN features that can be toggled */
489048ccc43eSBrett Creeley hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
489148ccc43eSBrett Creeley
48925ec8b7d1SJesse Brandeburg /* Enable cloud filter if ADQ is supported */
48935ec8b7d1SJesse Brandeburg if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
48945ec8b7d1SJesse Brandeburg hw_features |= NETIF_F_HW_TC;
4895c91a4f9fSBrett Creeley if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
4896c91a4f9fSBrett Creeley hw_features |= NETIF_F_GSO_UDP_L4;
48975ec8b7d1SJesse Brandeburg
489848ccc43eSBrett Creeley netdev->hw_features |= hw_features | hw_vlan_features;
489948ccc43eSBrett Creeley vlan_features = iavf_get_netdev_vlan_features(adapter);
49005ec8b7d1SJesse Brandeburg
490148ccc43eSBrett Creeley netdev->features |= hw_features | vlan_features;
49025ec8b7d1SJesse Brandeburg
49035ec8b7d1SJesse Brandeburg if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
49045ec8b7d1SJesse Brandeburg netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
49055ec8b7d1SJesse Brandeburg
4906e768a049SPiotr Gardocki if (FDIR_FLTR_SUPPORT(adapter)) {
4907e768a049SPiotr Gardocki netdev->hw_features |= NETIF_F_NTUPLE;
4908e768a049SPiotr Gardocki netdev->features |= NETIF_F_NTUPLE;
4909e768a049SPiotr Gardocki adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
4910e768a049SPiotr Gardocki }
4911e768a049SPiotr Gardocki
49125ec8b7d1SJesse Brandeburg netdev->priv_flags |= IFF_UNICAST_FLT;
49135ec8b7d1SJesse Brandeburg
49145ec8b7d1SJesse Brandeburg /* Do not turn on offloads when they are requested to be turned off.
49155ec8b7d1SJesse Brandeburg * TSO needs minimum 576 bytes to work correctly.
49165ec8b7d1SJesse Brandeburg */
49175ec8b7d1SJesse Brandeburg if (netdev->wanted_features) {
49185ec8b7d1SJesse Brandeburg if (!(netdev->wanted_features & NETIF_F_TSO) ||
49195ec8b7d1SJesse Brandeburg netdev->mtu < 576)
49205ec8b7d1SJesse Brandeburg netdev->features &= ~NETIF_F_TSO;
49215ec8b7d1SJesse Brandeburg if (!(netdev->wanted_features & NETIF_F_TSO6) ||
49225ec8b7d1SJesse Brandeburg netdev->mtu < 576)
49235ec8b7d1SJesse Brandeburg netdev->features &= ~NETIF_F_TSO6;
49245ec8b7d1SJesse Brandeburg if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
49255ec8b7d1SJesse Brandeburg netdev->features &= ~NETIF_F_TSO_ECN;
49265ec8b7d1SJesse Brandeburg if (!(netdev->wanted_features & NETIF_F_GRO))
49275ec8b7d1SJesse Brandeburg netdev->features &= ~NETIF_F_GRO;
49285ec8b7d1SJesse Brandeburg if (!(netdev->wanted_features & NETIF_F_GSO))
49295ec8b7d1SJesse Brandeburg netdev->features &= ~NETIF_F_GSO;
49305ec8b7d1SJesse Brandeburg }
49315ec8b7d1SJesse Brandeburg
49325ec8b7d1SJesse Brandeburg return 0;
49335ec8b7d1SJesse Brandeburg }
49345ec8b7d1SJesse Brandeburg
49355ec8b7d1SJesse Brandeburg /**
49365ec8b7d1SJesse Brandeburg * iavf_probe - Device Initialization Routine
49375ec8b7d1SJesse Brandeburg * @pdev: PCI device information struct
49385ec8b7d1SJesse Brandeburg * @ent: entry in iavf_pci_tbl
49395ec8b7d1SJesse Brandeburg *
49405ec8b7d1SJesse Brandeburg * Returns 0 on success, negative on failure
49415ec8b7d1SJesse Brandeburg *
49425ec8b7d1SJesse Brandeburg * iavf_probe initializes an adapter identified by a pci_dev structure.
49435ec8b7d1SJesse Brandeburg * The OS initialization, configuring of the adapter private structure,
49445ec8b7d1SJesse Brandeburg * and a hardware reset occur.
49455ec8b7d1SJesse Brandeburg **/
iavf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)49465ec8b7d1SJesse Brandeburg static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
49475ec8b7d1SJesse Brandeburg {
49485ec8b7d1SJesse Brandeburg struct net_device *netdev;
49495ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = NULL;
4950f349daa5SJesse Brandeburg struct iavf_hw *hw = NULL;
49515ec8b7d1SJesse Brandeburg int err;
49525ec8b7d1SJesse Brandeburg
49535ec8b7d1SJesse Brandeburg err = pci_enable_device(pdev);
49545ec8b7d1SJesse Brandeburg if (err)
49555ec8b7d1SJesse Brandeburg return err;
49565ec8b7d1SJesse Brandeburg
49575ec8b7d1SJesse Brandeburg err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
49585ec8b7d1SJesse Brandeburg if (err) {
49595ec8b7d1SJesse Brandeburg dev_err(&pdev->dev,
49605ec8b7d1SJesse Brandeburg "DMA configuration failed: 0x%x\n", err);
49615ec8b7d1SJesse Brandeburg goto err_dma;
49625ec8b7d1SJesse Brandeburg }
49635ec8b7d1SJesse Brandeburg
49645ec8b7d1SJesse Brandeburg err = pci_request_regions(pdev, iavf_driver_name);
49655ec8b7d1SJesse Brandeburg if (err) {
49665ec8b7d1SJesse Brandeburg dev_err(&pdev->dev,
49675ec8b7d1SJesse Brandeburg "pci_request_regions failed 0x%x\n", err);
49685ec8b7d1SJesse Brandeburg goto err_pci_reg;
49695ec8b7d1SJesse Brandeburg }
49705ec8b7d1SJesse Brandeburg
49715ec8b7d1SJesse Brandeburg pci_set_master(pdev);
49725ec8b7d1SJesse Brandeburg
49735ec8b7d1SJesse Brandeburg netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
49745ec8b7d1SJesse Brandeburg IAVF_MAX_REQ_QUEUES);
49755ec8b7d1SJesse Brandeburg if (!netdev) {
49765ec8b7d1SJesse Brandeburg err = -ENOMEM;
49775ec8b7d1SJesse Brandeburg goto err_alloc_etherdev;
49785ec8b7d1SJesse Brandeburg }
49795ec8b7d1SJesse Brandeburg
49805ec8b7d1SJesse Brandeburg SET_NETDEV_DEV(netdev, &pdev->dev);
49815ec8b7d1SJesse Brandeburg
49825ec8b7d1SJesse Brandeburg pci_set_drvdata(pdev, netdev);
49835ec8b7d1SJesse Brandeburg adapter = netdev_priv(netdev);
49845ec8b7d1SJesse Brandeburg
49855ec8b7d1SJesse Brandeburg adapter->netdev = netdev;
49865ec8b7d1SJesse Brandeburg adapter->pdev = pdev;
49875ec8b7d1SJesse Brandeburg
49885ec8b7d1SJesse Brandeburg hw = &adapter->hw;
49895ec8b7d1SJesse Brandeburg hw->back = adapter;
49905ec8b7d1SJesse Brandeburg
49914411a608SMichal Schmidt adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
49924411a608SMichal Schmidt iavf_driver_name);
49934411a608SMichal Schmidt if (!adapter->wq) {
49944411a608SMichal Schmidt err = -ENOMEM;
49954411a608SMichal Schmidt goto err_alloc_wq;
49964411a608SMichal Schmidt }
49974411a608SMichal Schmidt
49985ec8b7d1SJesse Brandeburg adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
499945eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_STARTUP);
50005ec8b7d1SJesse Brandeburg
50015ec8b7d1SJesse Brandeburg /* Call save state here because it relies on the adapter struct. */
50025ec8b7d1SJesse Brandeburg pci_save_state(pdev);
50035ec8b7d1SJesse Brandeburg
50045ec8b7d1SJesse Brandeburg hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
50055ec8b7d1SJesse Brandeburg pci_resource_len(pdev, 0));
50065ec8b7d1SJesse Brandeburg if (!hw->hw_addr) {
50075ec8b7d1SJesse Brandeburg err = -EIO;
50085ec8b7d1SJesse Brandeburg goto err_ioremap;
50095ec8b7d1SJesse Brandeburg }
50105ec8b7d1SJesse Brandeburg hw->vendor_id = pdev->vendor;
50115ec8b7d1SJesse Brandeburg hw->device_id = pdev->device;
50125ec8b7d1SJesse Brandeburg pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
50135ec8b7d1SJesse Brandeburg hw->subsystem_vendor_id = pdev->subsystem_vendor;
50145ec8b7d1SJesse Brandeburg hw->subsystem_device_id = pdev->subsystem_device;
50155ec8b7d1SJesse Brandeburg hw->bus.device = PCI_SLOT(pdev->devfn);
50165ec8b7d1SJesse Brandeburg hw->bus.func = PCI_FUNC(pdev->devfn);
50175ec8b7d1SJesse Brandeburg hw->bus.bus_id = pdev->bus->number;
50185ec8b7d1SJesse Brandeburg
50195ec8b7d1SJesse Brandeburg /* set up the locks for the AQ, do this only once in probe
50205ec8b7d1SJesse Brandeburg * and destroy them only once in remove
50215ec8b7d1SJesse Brandeburg */
50225ac49f3cSStefan Assmann mutex_init(&adapter->crit_lock);
50235ac49f3cSStefan Assmann mutex_init(&adapter->client_lock);
50245ec8b7d1SJesse Brandeburg mutex_init(&hw->aq.asq_mutex);
50255ec8b7d1SJesse Brandeburg mutex_init(&hw->aq.arq_mutex);
50265ec8b7d1SJesse Brandeburg
50275ec8b7d1SJesse Brandeburg spin_lock_init(&adapter->mac_vlan_list_lock);
50285ec8b7d1SJesse Brandeburg spin_lock_init(&adapter->cloud_filter_list_lock);
50290dbfbabbSHaiyue Wang spin_lock_init(&adapter->fdir_fltr_lock);
50300aaeb4fbSHaiyue Wang spin_lock_init(&adapter->adv_rss_lock);
50317e85cf09SBrett Creeley spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
50325ec8b7d1SJesse Brandeburg
50335ec8b7d1SJesse Brandeburg INIT_LIST_HEAD(&adapter->mac_filter_list);
50345ec8b7d1SJesse Brandeburg INIT_LIST_HEAD(&adapter->vlan_filter_list);
50355ec8b7d1SJesse Brandeburg INIT_LIST_HEAD(&adapter->cloud_filter_list);
50360dbfbabbSHaiyue Wang INIT_LIST_HEAD(&adapter->fdir_list_head);
50370aaeb4fbSHaiyue Wang INIT_LIST_HEAD(&adapter->adv_rss_list_head);
50385ec8b7d1SJesse Brandeburg
50395ec8b7d1SJesse Brandeburg INIT_WORK(&adapter->reset_task, iavf_reset_task);
50405ec8b7d1SJesse Brandeburg INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
5041d1639a17SAhmed Zaki INIT_WORK(&adapter->finish_config, iavf_finish_config);
5042fdd4044fSJakub Pawlak INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
50435ec8b7d1SJesse Brandeburg INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
50445ec8b7d1SJesse Brandeburg
50455ec8b7d1SJesse Brandeburg /* Setup the wait queue for indicating transition to down status */
50465ec8b7d1SJesse Brandeburg init_waitqueue_head(&adapter->down_waitqueue);
50475ec8b7d1SJesse Brandeburg
5048c2ed2403SMarcin Szycik /* Setup the wait queue for indicating transition to running state */
5049c2ed2403SMarcin Szycik init_waitqueue_head(&adapter->reset_waitqueue);
5050c2ed2403SMarcin Szycik
505135a2443dSMateusz Palczewski /* Setup the wait queue for indicating virtchannel events */
505235a2443dSMateusz Palczewski init_waitqueue_head(&adapter->vc_waitqueue);
505335a2443dSMateusz Palczewski
50547db31110SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
50557db31110SMichal Schmidt msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
50567db31110SMichal Schmidt /* Initialization goes on in the work. Do not add more of it below. */
50575ec8b7d1SJesse Brandeburg return 0;
50585ec8b7d1SJesse Brandeburg
50595ec8b7d1SJesse Brandeburg err_ioremap:
50604411a608SMichal Schmidt destroy_workqueue(adapter->wq);
50614411a608SMichal Schmidt err_alloc_wq:
50625ec8b7d1SJesse Brandeburg free_netdev(netdev);
50635ec8b7d1SJesse Brandeburg err_alloc_etherdev:
50645ec8b7d1SJesse Brandeburg pci_release_regions(pdev);
50655ec8b7d1SJesse Brandeburg err_pci_reg:
50665ec8b7d1SJesse Brandeburg err_dma:
50675ec8b7d1SJesse Brandeburg pci_disable_device(pdev);
50685ec8b7d1SJesse Brandeburg return err;
50695ec8b7d1SJesse Brandeburg }
50705ec8b7d1SJesse Brandeburg
50715ec8b7d1SJesse Brandeburg /**
50725ec8b7d1SJesse Brandeburg * iavf_suspend - Power management suspend routine
5073b50f7bcaSJesse Brandeburg * @dev_d: device info pointer
50745ec8b7d1SJesse Brandeburg *
50755ec8b7d1SJesse Brandeburg * Called when the system (VM) is entering sleep/suspend.
50765ec8b7d1SJesse Brandeburg **/
iavf_suspend(struct device * dev_d)5077bc5cbd73SVaibhav Gupta static int __maybe_unused iavf_suspend(struct device *dev_d)
50785ec8b7d1SJesse Brandeburg {
5079bc5cbd73SVaibhav Gupta struct net_device *netdev = dev_get_drvdata(dev_d);
50805ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
50815ec8b7d1SJesse Brandeburg
50825ec8b7d1SJesse Brandeburg netif_device_detach(netdev);
50835ec8b7d1SJesse Brandeburg
50845ac49f3cSStefan Assmann while (!mutex_trylock(&adapter->crit_lock))
50855ec8b7d1SJesse Brandeburg usleep_range(500, 1000);
50865ec8b7d1SJesse Brandeburg
50875ec8b7d1SJesse Brandeburg if (netif_running(netdev)) {
50885ec8b7d1SJesse Brandeburg rtnl_lock();
50895ec8b7d1SJesse Brandeburg iavf_down(adapter);
50905ec8b7d1SJesse Brandeburg rtnl_unlock();
50915ec8b7d1SJesse Brandeburg }
50925ec8b7d1SJesse Brandeburg iavf_free_misc_irq(adapter);
50935ec8b7d1SJesse Brandeburg iavf_reset_interrupt_capability(adapter);
50945ec8b7d1SJesse Brandeburg
50955ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
50965ec8b7d1SJesse Brandeburg
50975ec8b7d1SJesse Brandeburg return 0;
50985ec8b7d1SJesse Brandeburg }
50995ec8b7d1SJesse Brandeburg
51005ec8b7d1SJesse Brandeburg /**
51015ec8b7d1SJesse Brandeburg * iavf_resume - Power management resume routine
5102b50f7bcaSJesse Brandeburg * @dev_d: device info pointer
51035ec8b7d1SJesse Brandeburg *
51045ec8b7d1SJesse Brandeburg * Called when the system (VM) is resumed from sleep/suspend.
51055ec8b7d1SJesse Brandeburg **/
iavf_resume(struct device * dev_d)5106bc5cbd73SVaibhav Gupta static int __maybe_unused iavf_resume(struct device *dev_d)
51075ec8b7d1SJesse Brandeburg {
5108bc5cbd73SVaibhav Gupta struct pci_dev *pdev = to_pci_dev(dev_d);
5109247aa001SKaren Sornek struct iavf_adapter *adapter;
51105ec8b7d1SJesse Brandeburg u32 err;
51115ec8b7d1SJesse Brandeburg
5112247aa001SKaren Sornek adapter = iavf_pdev_to_adapter(pdev);
5113247aa001SKaren Sornek
51145ec8b7d1SJesse Brandeburg pci_set_master(pdev);
51155ec8b7d1SJesse Brandeburg
51165ec8b7d1SJesse Brandeburg rtnl_lock();
51175ec8b7d1SJesse Brandeburg err = iavf_set_interrupt_capability(adapter);
51185ec8b7d1SJesse Brandeburg if (err) {
51195ec8b7d1SJesse Brandeburg rtnl_unlock();
51205ec8b7d1SJesse Brandeburg dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
51215ec8b7d1SJesse Brandeburg return err;
51225ec8b7d1SJesse Brandeburg }
51235ec8b7d1SJesse Brandeburg err = iavf_request_misc_irq(adapter);
51245ec8b7d1SJesse Brandeburg rtnl_unlock();
51255ec8b7d1SJesse Brandeburg if (err) {
51265ec8b7d1SJesse Brandeburg dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
51275ec8b7d1SJesse Brandeburg return err;
51285ec8b7d1SJesse Brandeburg }
51295ec8b7d1SJesse Brandeburg
51304411a608SMichal Schmidt queue_work(adapter->wq, &adapter->reset_task);
51315ec8b7d1SJesse Brandeburg
5132247aa001SKaren Sornek netif_device_attach(adapter->netdev);
51335ec8b7d1SJesse Brandeburg
51345ec8b7d1SJesse Brandeburg return err;
51355ec8b7d1SJesse Brandeburg }
51365ec8b7d1SJesse Brandeburg
51375ec8b7d1SJesse Brandeburg /**
51385ec8b7d1SJesse Brandeburg * iavf_remove - Device Removal Routine
51395ec8b7d1SJesse Brandeburg * @pdev: PCI device information struct
51405ec8b7d1SJesse Brandeburg *
51415ec8b7d1SJesse Brandeburg * iavf_remove is called by the PCI subsystem to alert the driver
51425ec8b7d1SJesse Brandeburg * that it should release a PCI device. The could be caused by a
51435ec8b7d1SJesse Brandeburg * Hot-Plug event, or because the driver is going to be removed from
51445ec8b7d1SJesse Brandeburg * memory.
51455ec8b7d1SJesse Brandeburg **/
iavf_remove(struct pci_dev * pdev)51465ec8b7d1SJesse Brandeburg static void iavf_remove(struct pci_dev *pdev)
51475ec8b7d1SJesse Brandeburg {
51480dbfbabbSHaiyue Wang struct iavf_fdir_fltr *fdir, *fdirtmp;
51495ec8b7d1SJesse Brandeburg struct iavf_vlan_filter *vlf, *vlftmp;
5150a8417330SSlawomir Laba struct iavf_cloud_filter *cf, *cftmp;
51510aaeb4fbSHaiyue Wang struct iavf_adv_rss *rss, *rsstmp;
51525ec8b7d1SJesse Brandeburg struct iavf_mac_filter *f, *ftmp;
515354f59a24SSlawomir Laba struct iavf_adapter *adapter;
5154a8417330SSlawomir Laba struct net_device *netdev;
5155a8417330SSlawomir Laba struct iavf_hw *hw;
51565ec8b7d1SJesse Brandeburg int err;
5157fc2e6b3bSSlawomir Laba
515854f59a24SSlawomir Laba /* Don't proceed with remove if netdev is already freed */
515954f59a24SSlawomir Laba netdev = pci_get_drvdata(pdev);
516054f59a24SSlawomir Laba if (!netdev)
516154f59a24SSlawomir Laba return;
516254f59a24SSlawomir Laba
516354f59a24SSlawomir Laba adapter = iavf_pdev_to_adapter(pdev);
5164a8417330SSlawomir Laba hw = &adapter->hw;
5165a8417330SSlawomir Laba
5166a8417330SSlawomir Laba if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
5167b04683ffSIvan Vecera return;
5168b04683ffSIvan Vecera
516997457801SSlawomir Laba /* Wait until port initialization is complete.
517097457801SSlawomir Laba * There are flows where register/unregister netdev may race.
517197457801SSlawomir Laba */
517297457801SSlawomir Laba while (1) {
517397457801SSlawomir Laba mutex_lock(&adapter->crit_lock);
517497457801SSlawomir Laba if (adapter->state == __IAVF_RUNNING ||
51753ccd54efSSlawomir Laba adapter->state == __IAVF_DOWN ||
51763ccd54efSSlawomir Laba adapter->state == __IAVF_INIT_FAILED) {
517797457801SSlawomir Laba mutex_unlock(&adapter->crit_lock);
517897457801SSlawomir Laba break;
517997457801SSlawomir Laba }
51804e264be9SStefan Assmann /* Simply return if we already went through iavf_shutdown */
51814e264be9SStefan Assmann if (adapter->state == __IAVF_REMOVE) {
51824e264be9SStefan Assmann mutex_unlock(&adapter->crit_lock);
51834e264be9SStefan Assmann return;
51844e264be9SStefan Assmann }
518597457801SSlawomir Laba
518697457801SSlawomir Laba mutex_unlock(&adapter->crit_lock);
518797457801SSlawomir Laba usleep_range(500, 1000);
518897457801SSlawomir Laba }
5189898ef1cbSMateusz Palczewski cancel_delayed_work_sync(&adapter->watchdog_task);
5190d1639a17SAhmed Zaki cancel_work_sync(&adapter->finish_config);
519197457801SSlawomir Laba
51920579fafdSSlawomir Laba rtnl_lock();
5193d1639a17SAhmed Zaki if (adapter->netdev_registered) {
51940579fafdSSlawomir Laba unregister_netdevice(netdev);
51955ec8b7d1SJesse Brandeburg adapter->netdev_registered = false;
51965ec8b7d1SJesse Brandeburg }
5197d1639a17SAhmed Zaki rtnl_unlock();
5198d1639a17SAhmed Zaki
51995ec8b7d1SJesse Brandeburg if (CLIENT_ALLOWED(adapter)) {
52005ec8b7d1SJesse Brandeburg err = iavf_lan_del_device(adapter);
52015ec8b7d1SJesse Brandeburg if (err)
52025ec8b7d1SJesse Brandeburg dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
52035ec8b7d1SJesse Brandeburg err);
52045ec8b7d1SJesse Brandeburg }
52055ec8b7d1SJesse Brandeburg
5206fc2e6b3bSSlawomir Laba mutex_lock(&adapter->crit_lock);
520769b95744SBartosz Staszewski dev_info(&adapter->pdev->dev, "Removing device\n");
5208fc2e6b3bSSlawomir Laba iavf_change_state(adapter, __IAVF_REMOVE);
5209fc2e6b3bSSlawomir Laba
52105ec8b7d1SJesse Brandeburg iavf_request_reset(adapter);
52115ec8b7d1SJesse Brandeburg msleep(50);
52125ec8b7d1SJesse Brandeburg /* If the FW isn't responding, kick it once, but only once. */
52135ec8b7d1SJesse Brandeburg if (!iavf_asq_done(hw)) {
52145ec8b7d1SJesse Brandeburg iavf_request_reset(adapter);
52155ec8b7d1SJesse Brandeburg msleep(50);
52165ec8b7d1SJesse Brandeburg }
5217226d5285SStefan Assmann
5218fc2e6b3bSSlawomir Laba iavf_misc_irq_disable(adapter);
5219226d5285SStefan Assmann /* Shut down all the garbage mashers on the detention level */
5220fc2e6b3bSSlawomir Laba cancel_work_sync(&adapter->reset_task);
5221fc2e6b3bSSlawomir Laba cancel_delayed_work_sync(&adapter->watchdog_task);
5222fc2e6b3bSSlawomir Laba cancel_work_sync(&adapter->adminq_task);
5223fc2e6b3bSSlawomir Laba cancel_delayed_work_sync(&adapter->client_task);
5224fc2e6b3bSSlawomir Laba
5225226d5285SStefan Assmann adapter->aq_required = 0;
5226226d5285SStefan Assmann adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5227605ca7c5SPrzemyslaw Patynowski
52285ec8b7d1SJesse Brandeburg iavf_free_all_tx_resources(adapter);
52295ec8b7d1SJesse Brandeburg iavf_free_all_rx_resources(adapter);
52305ec8b7d1SJesse Brandeburg iavf_free_misc_irq(adapter);
5231605ca7c5SPrzemyslaw Patynowski
52325ec8b7d1SJesse Brandeburg iavf_reset_interrupt_capability(adapter);
52335ec8b7d1SJesse Brandeburg iavf_free_q_vectors(adapter);
52345ec8b7d1SJesse Brandeburg
52355ec8b7d1SJesse Brandeburg iavf_free_rss(adapter);
52365ec8b7d1SJesse Brandeburg
52375ec8b7d1SJesse Brandeburg if (hw->aq.asq.count)
52385ec8b7d1SJesse Brandeburg iavf_shutdown_adminq(hw);
52395ec8b7d1SJesse Brandeburg
52405ec8b7d1SJesse Brandeburg /* destroy the locks only once, here */
52415ec8b7d1SJesse Brandeburg mutex_destroy(&hw->aq.arq_mutex);
52425ec8b7d1SJesse Brandeburg mutex_destroy(&hw->aq.asq_mutex);
52435ac49f3cSStefan Assmann mutex_destroy(&adapter->client_lock);
52445ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
52455ac49f3cSStefan Assmann mutex_destroy(&adapter->crit_lock);
52465ec8b7d1SJesse Brandeburg
52475ec8b7d1SJesse Brandeburg iounmap(hw->hw_addr);
52485ec8b7d1SJesse Brandeburg pci_release_regions(pdev);
52495ec8b7d1SJesse Brandeburg iavf_free_queues(adapter);
52505ec8b7d1SJesse Brandeburg kfree(adapter->vf_res);
52515ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
52525ec8b7d1SJesse Brandeburg /* If we got removed before an up/down sequence, we've got a filter
52535ec8b7d1SJesse Brandeburg * hanging out there that we need to get rid of.
52545ec8b7d1SJesse Brandeburg */
52555ec8b7d1SJesse Brandeburg list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
52565ec8b7d1SJesse Brandeburg list_del(&f->list);
52575ec8b7d1SJesse Brandeburg kfree(f);
52585ec8b7d1SJesse Brandeburg }
52595ec8b7d1SJesse Brandeburg list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
52605ec8b7d1SJesse Brandeburg list) {
52615ec8b7d1SJesse Brandeburg list_del(&vlf->list);
52625ec8b7d1SJesse Brandeburg kfree(vlf);
52635ec8b7d1SJesse Brandeburg }
52645ec8b7d1SJesse Brandeburg
52655ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
52665ec8b7d1SJesse Brandeburg
52675ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->cloud_filter_list_lock);
52685ec8b7d1SJesse Brandeburg list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
52695ec8b7d1SJesse Brandeburg list_del(&cf->list);
52705ec8b7d1SJesse Brandeburg kfree(cf);
52715ec8b7d1SJesse Brandeburg }
52725ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->cloud_filter_list_lock);
52735ec8b7d1SJesse Brandeburg
52740dbfbabbSHaiyue Wang spin_lock_bh(&adapter->fdir_fltr_lock);
52750dbfbabbSHaiyue Wang list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
52760dbfbabbSHaiyue Wang list_del(&fdir->list);
52770dbfbabbSHaiyue Wang kfree(fdir);
52780dbfbabbSHaiyue Wang }
52790dbfbabbSHaiyue Wang spin_unlock_bh(&adapter->fdir_fltr_lock);
52800dbfbabbSHaiyue Wang
52810aaeb4fbSHaiyue Wang spin_lock_bh(&adapter->adv_rss_lock);
52820aaeb4fbSHaiyue Wang list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
52830aaeb4fbSHaiyue Wang list) {
52840aaeb4fbSHaiyue Wang list_del(&rss->list);
52850aaeb4fbSHaiyue Wang kfree(rss);
52860aaeb4fbSHaiyue Wang }
52870aaeb4fbSHaiyue Wang spin_unlock_bh(&adapter->adv_rss_lock);
52880aaeb4fbSHaiyue Wang
52894411a608SMichal Schmidt destroy_workqueue(adapter->wq);
52904411a608SMichal Schmidt
529154f59a24SSlawomir Laba pci_set_drvdata(pdev, NULL);
529254f59a24SSlawomir Laba
52935ec8b7d1SJesse Brandeburg free_netdev(netdev);
52945ec8b7d1SJesse Brandeburg
52955ec8b7d1SJesse Brandeburg pci_disable_device(pdev);
52965ec8b7d1SJesse Brandeburg }
52975ec8b7d1SJesse Brandeburg
529854f59a24SSlawomir Laba /**
529954f59a24SSlawomir Laba * iavf_shutdown - Shutdown the device in preparation for a reboot
530054f59a24SSlawomir Laba * @pdev: pci device structure
530154f59a24SSlawomir Laba **/
iavf_shutdown(struct pci_dev * pdev)530254f59a24SSlawomir Laba static void iavf_shutdown(struct pci_dev *pdev)
530354f59a24SSlawomir Laba {
530454f59a24SSlawomir Laba iavf_remove(pdev);
530554f59a24SSlawomir Laba
530654f59a24SSlawomir Laba if (system_state == SYSTEM_POWER_OFF)
530754f59a24SSlawomir Laba pci_set_power_state(pdev, PCI_D3hot);
530854f59a24SSlawomir Laba }
530954f59a24SSlawomir Laba
5310bc5cbd73SVaibhav Gupta static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
5311bc5cbd73SVaibhav Gupta
53125ec8b7d1SJesse Brandeburg static struct pci_driver iavf_driver = {
53135ec8b7d1SJesse Brandeburg .name = iavf_driver_name,
53145ec8b7d1SJesse Brandeburg .id_table = iavf_pci_tbl,
53155ec8b7d1SJesse Brandeburg .probe = iavf_probe,
53165ec8b7d1SJesse Brandeburg .remove = iavf_remove,
5317bc5cbd73SVaibhav Gupta .driver.pm = &iavf_pm_ops,
53185ec8b7d1SJesse Brandeburg .shutdown = iavf_shutdown,
53195ec8b7d1SJesse Brandeburg };
53205ec8b7d1SJesse Brandeburg
53215ec8b7d1SJesse Brandeburg /**
532256184e01SJesse Brandeburg * iavf_init_module - Driver Registration Routine
53235ec8b7d1SJesse Brandeburg *
532456184e01SJesse Brandeburg * iavf_init_module is the first routine called when the driver is
53255ec8b7d1SJesse Brandeburg * loaded. All it does is register with the PCI subsystem.
53265ec8b7d1SJesse Brandeburg **/
iavf_init_module(void)53275ec8b7d1SJesse Brandeburg static int __init iavf_init_module(void)
53285ec8b7d1SJesse Brandeburg {
532934a2a3b8SJeff Kirsher pr_info("iavf: %s\n", iavf_driver_string);
53305ec8b7d1SJesse Brandeburg
53315ec8b7d1SJesse Brandeburg pr_info("%s\n", iavf_copyright);
53325ec8b7d1SJesse Brandeburg
53334411a608SMichal Schmidt return pci_register_driver(&iavf_driver);
53345ec8b7d1SJesse Brandeburg }
53355ec8b7d1SJesse Brandeburg
53365ec8b7d1SJesse Brandeburg module_init(iavf_init_module);
53375ec8b7d1SJesse Brandeburg
53385ec8b7d1SJesse Brandeburg /**
533956184e01SJesse Brandeburg * iavf_exit_module - Driver Exit Cleanup Routine
53405ec8b7d1SJesse Brandeburg *
534156184e01SJesse Brandeburg * iavf_exit_module is called just before the driver is removed
53425ec8b7d1SJesse Brandeburg * from memory.
53435ec8b7d1SJesse Brandeburg **/
iavf_exit_module(void)53445ec8b7d1SJesse Brandeburg static void __exit iavf_exit_module(void)
53455ec8b7d1SJesse Brandeburg {
53465ec8b7d1SJesse Brandeburg pci_unregister_driver(&iavf_driver);
53475ec8b7d1SJesse Brandeburg }
53485ec8b7d1SJesse Brandeburg
53495ec8b7d1SJesse Brandeburg module_exit(iavf_exit_module);
53505ec8b7d1SJesse Brandeburg
53515ec8b7d1SJesse Brandeburg /* iavf_main.c */
5352