1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8  * be included exactly once across the whole kernel with
9  * CREATE_TRACE_POINTS defined
10  */
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
13 
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static void iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
19 
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22 	"Intel(R) Ethernet Adaptive Virtual Function Network Driver";
23 
24 static const char iavf_copyright[] =
25 	"Copyright (c) 2013 - 2018 Intel Corporation.";
26 
27 /* iavf_pci_tbl - PCI Device ID Table
28  *
29  * Wildcard entries (PCI_ANY_ID) should come last
30  * Last entry must be all 0s
31  *
32  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33  *   Class, Class Mask, private data (not used) }
34  */
35 static const struct pci_device_id iavf_pci_tbl[] = {
36 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40 	/* required last entry */
41 	{0, }
42 };
43 
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
45 
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
50 
51 static const struct net_device_ops iavf_netdev_ops;
52 
53 int iavf_status_to_errno(enum iavf_status status)
54 {
55 	switch (status) {
56 	case IAVF_SUCCESS:
57 		return 0;
58 	case IAVF_ERR_PARAM:
59 	case IAVF_ERR_MAC_TYPE:
60 	case IAVF_ERR_INVALID_MAC_ADDR:
61 	case IAVF_ERR_INVALID_LINK_SETTINGS:
62 	case IAVF_ERR_INVALID_PD_ID:
63 	case IAVF_ERR_INVALID_QP_ID:
64 	case IAVF_ERR_INVALID_CQ_ID:
65 	case IAVF_ERR_INVALID_CEQ_ID:
66 	case IAVF_ERR_INVALID_AEQ_ID:
67 	case IAVF_ERR_INVALID_SIZE:
68 	case IAVF_ERR_INVALID_ARP_INDEX:
69 	case IAVF_ERR_INVALID_FPM_FUNC_ID:
70 	case IAVF_ERR_QP_INVALID_MSG_SIZE:
71 	case IAVF_ERR_INVALID_FRAG_COUNT:
72 	case IAVF_ERR_INVALID_ALIGNMENT:
73 	case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
74 	case IAVF_ERR_INVALID_IMM_DATA_SIZE:
75 	case IAVF_ERR_INVALID_VF_ID:
76 	case IAVF_ERR_INVALID_HMCFN_ID:
77 	case IAVF_ERR_INVALID_PBLE_INDEX:
78 	case IAVF_ERR_INVALID_SD_INDEX:
79 	case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
80 	case IAVF_ERR_INVALID_SD_TYPE:
81 	case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
82 	case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
83 	case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
84 		return -EINVAL;
85 	case IAVF_ERR_NVM:
86 	case IAVF_ERR_NVM_CHECKSUM:
87 	case IAVF_ERR_PHY:
88 	case IAVF_ERR_CONFIG:
89 	case IAVF_ERR_UNKNOWN_PHY:
90 	case IAVF_ERR_LINK_SETUP:
91 	case IAVF_ERR_ADAPTER_STOPPED:
92 	case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
93 	case IAVF_ERR_AUTONEG_NOT_COMPLETE:
94 	case IAVF_ERR_RESET_FAILED:
95 	case IAVF_ERR_BAD_PTR:
96 	case IAVF_ERR_SWFW_SYNC:
97 	case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
98 	case IAVF_ERR_QUEUE_EMPTY:
99 	case IAVF_ERR_FLUSHED_QUEUE:
100 	case IAVF_ERR_OPCODE_MISMATCH:
101 	case IAVF_ERR_CQP_COMPL_ERROR:
102 	case IAVF_ERR_BACKING_PAGE_ERROR:
103 	case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
104 	case IAVF_ERR_MEMCPY_FAILED:
105 	case IAVF_ERR_SRQ_ENABLED:
106 	case IAVF_ERR_ADMIN_QUEUE_ERROR:
107 	case IAVF_ERR_ADMIN_QUEUE_FULL:
108 	case IAVF_ERR_BAD_RDMA_CQE:
109 	case IAVF_ERR_NVM_BLANK_MODE:
110 	case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
111 	case IAVF_ERR_DIAG_TEST_FAILED:
112 	case IAVF_ERR_FIRMWARE_API_VERSION:
113 	case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
114 		return -EIO;
115 	case IAVF_ERR_DEVICE_NOT_SUPPORTED:
116 		return -ENODEV;
117 	case IAVF_ERR_NO_AVAILABLE_VSI:
118 	case IAVF_ERR_RING_FULL:
119 		return -ENOSPC;
120 	case IAVF_ERR_NO_MEMORY:
121 		return -ENOMEM;
122 	case IAVF_ERR_TIMEOUT:
123 	case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
124 		return -ETIMEDOUT;
125 	case IAVF_ERR_NOT_IMPLEMENTED:
126 	case IAVF_NOT_SUPPORTED:
127 		return -EOPNOTSUPP;
128 	case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
129 		return -EALREADY;
130 	case IAVF_ERR_NOT_READY:
131 		return -EBUSY;
132 	case IAVF_ERR_BUF_TOO_SHORT:
133 		return -EMSGSIZE;
134 	}
135 
136 	return -EIO;
137 }
138 
139 int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
140 {
141 	switch (v_status) {
142 	case VIRTCHNL_STATUS_SUCCESS:
143 		return 0;
144 	case VIRTCHNL_STATUS_ERR_PARAM:
145 	case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
146 		return -EINVAL;
147 	case VIRTCHNL_STATUS_ERR_NO_MEMORY:
148 		return -ENOMEM;
149 	case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
150 	case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
151 	case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
152 		return -EIO;
153 	case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
154 		return -EOPNOTSUPP;
155 	}
156 
157 	return -EIO;
158 }
159 
160 /**
161  * iavf_pdev_to_adapter - go from pci_dev to adapter
162  * @pdev: pci_dev pointer
163  */
164 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
165 {
166 	return netdev_priv(pci_get_drvdata(pdev));
167 }
168 
169 /**
170  * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
171  * @hw:   pointer to the HW structure
172  * @mem:  ptr to mem struct to fill out
173  * @size: size of memory requested
174  * @alignment: what to align the allocation to
175  **/
176 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
177 					 struct iavf_dma_mem *mem,
178 					 u64 size, u32 alignment)
179 {
180 	struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
181 
182 	if (!mem)
183 		return IAVF_ERR_PARAM;
184 
185 	mem->size = ALIGN(size, alignment);
186 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
187 				     (dma_addr_t *)&mem->pa, GFP_KERNEL);
188 	if (mem->va)
189 		return 0;
190 	else
191 		return IAVF_ERR_NO_MEMORY;
192 }
193 
194 /**
195  * iavf_free_dma_mem_d - OS specific memory free for shared code
196  * @hw:   pointer to the HW structure
197  * @mem:  ptr to mem struct to free
198  **/
199 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
200 				     struct iavf_dma_mem *mem)
201 {
202 	struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
203 
204 	if (!mem || !mem->va)
205 		return IAVF_ERR_PARAM;
206 	dma_free_coherent(&adapter->pdev->dev, mem->size,
207 			  mem->va, (dma_addr_t)mem->pa);
208 	return 0;
209 }
210 
211 /**
212  * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
213  * @hw:   pointer to the HW structure
214  * @mem:  ptr to mem struct to fill out
215  * @size: size of memory requested
216  **/
217 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
218 					  struct iavf_virt_mem *mem, u32 size)
219 {
220 	if (!mem)
221 		return IAVF_ERR_PARAM;
222 
223 	mem->size = size;
224 	mem->va = kzalloc(size, GFP_KERNEL);
225 
226 	if (mem->va)
227 		return 0;
228 	else
229 		return IAVF_ERR_NO_MEMORY;
230 }
231 
232 /**
233  * iavf_free_virt_mem_d - OS specific memory free for shared code
234  * @hw:   pointer to the HW structure
235  * @mem:  ptr to mem struct to free
236  **/
237 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
238 				      struct iavf_virt_mem *mem)
239 {
240 	if (!mem)
241 		return IAVF_ERR_PARAM;
242 
243 	/* it's ok to kfree a NULL pointer */
244 	kfree(mem->va);
245 
246 	return 0;
247 }
248 
249 /**
250  * iavf_lock_timeout - try to lock mutex but give up after timeout
251  * @lock: mutex that should be locked
252  * @msecs: timeout in msecs
253  *
254  * Returns 0 on success, negative on failure
255  **/
256 int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
257 {
258 	unsigned int wait, delay = 10;
259 
260 	for (wait = 0; wait < msecs; wait += delay) {
261 		if (mutex_trylock(lock))
262 			return 0;
263 
264 		msleep(delay);
265 	}
266 
267 	return -1;
268 }
269 
270 /**
271  * iavf_schedule_reset - Set the flags and schedule a reset event
272  * @adapter: board private structure
273  **/
274 void iavf_schedule_reset(struct iavf_adapter *adapter)
275 {
276 	if (!(adapter->flags &
277 	      (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
278 		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
279 		queue_work(adapter->wq, &adapter->reset_task);
280 	}
281 }
282 
283 /**
284  * iavf_schedule_request_stats - Set the flags and schedule statistics request
285  * @adapter: board private structure
286  *
287  * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
288  * request and refresh ethtool stats
289  **/
290 void iavf_schedule_request_stats(struct iavf_adapter *adapter)
291 {
292 	adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
293 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
294 }
295 
296 /**
297  * iavf_tx_timeout - Respond to a Tx Hang
298  * @netdev: network interface device structure
299  * @txqueue: queue number that is timing out
300  **/
301 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
302 {
303 	struct iavf_adapter *adapter = netdev_priv(netdev);
304 
305 	adapter->tx_timeout_count++;
306 	iavf_schedule_reset(adapter);
307 }
308 
309 /**
310  * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
311  * @adapter: board private structure
312  **/
313 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
314 {
315 	struct iavf_hw *hw = &adapter->hw;
316 
317 	if (!adapter->msix_entries)
318 		return;
319 
320 	wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
321 
322 	iavf_flush(hw);
323 
324 	synchronize_irq(adapter->msix_entries[0].vector);
325 }
326 
327 /**
328  * iavf_misc_irq_enable - Enable default interrupt generation settings
329  * @adapter: board private structure
330  **/
331 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
332 {
333 	struct iavf_hw *hw = &adapter->hw;
334 
335 	wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
336 				       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
337 	wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
338 
339 	iavf_flush(hw);
340 }
341 
342 /**
343  * iavf_irq_disable - Mask off interrupt generation on the NIC
344  * @adapter: board private structure
345  **/
346 static void iavf_irq_disable(struct iavf_adapter *adapter)
347 {
348 	int i;
349 	struct iavf_hw *hw = &adapter->hw;
350 
351 	if (!adapter->msix_entries)
352 		return;
353 
354 	for (i = 1; i < adapter->num_msix_vectors; i++) {
355 		wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
356 		synchronize_irq(adapter->msix_entries[i].vector);
357 	}
358 	iavf_flush(hw);
359 }
360 
361 /**
362  * iavf_irq_enable_queues - Enable interrupt for all queues
363  * @adapter: board private structure
364  **/
365 void iavf_irq_enable_queues(struct iavf_adapter *adapter)
366 {
367 	struct iavf_hw *hw = &adapter->hw;
368 	int i;
369 
370 	for (i = 1; i < adapter->num_msix_vectors; i++) {
371 		wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
372 		     IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
373 		     IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
374 	}
375 }
376 
377 /**
378  * iavf_irq_enable - Enable default interrupt generation settings
379  * @adapter: board private structure
380  * @flush: boolean value whether to run rd32()
381  **/
382 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
383 {
384 	struct iavf_hw *hw = &adapter->hw;
385 
386 	iavf_misc_irq_enable(adapter);
387 	iavf_irq_enable_queues(adapter);
388 
389 	if (flush)
390 		iavf_flush(hw);
391 }
392 
393 /**
394  * iavf_msix_aq - Interrupt handler for vector 0
395  * @irq: interrupt number
396  * @data: pointer to netdev
397  **/
398 static irqreturn_t iavf_msix_aq(int irq, void *data)
399 {
400 	struct net_device *netdev = data;
401 	struct iavf_adapter *adapter = netdev_priv(netdev);
402 	struct iavf_hw *hw = &adapter->hw;
403 
404 	/* handle non-queue interrupts, these reads clear the registers */
405 	rd32(hw, IAVF_VFINT_ICR01);
406 	rd32(hw, IAVF_VFINT_ICR0_ENA1);
407 
408 	if (adapter->state != __IAVF_REMOVE)
409 		/* schedule work on the private workqueue */
410 		queue_work(adapter->wq, &adapter->adminq_task);
411 
412 	return IRQ_HANDLED;
413 }
414 
415 /**
416  * iavf_msix_clean_rings - MSIX mode Interrupt Handler
417  * @irq: interrupt number
418  * @data: pointer to a q_vector
419  **/
420 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
421 {
422 	struct iavf_q_vector *q_vector = data;
423 
424 	if (!q_vector->tx.ring && !q_vector->rx.ring)
425 		return IRQ_HANDLED;
426 
427 	napi_schedule_irqoff(&q_vector->napi);
428 
429 	return IRQ_HANDLED;
430 }
431 
432 /**
433  * iavf_map_vector_to_rxq - associate irqs with rx queues
434  * @adapter: board private structure
435  * @v_idx: interrupt number
436  * @r_idx: queue number
437  **/
438 static void
439 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
440 {
441 	struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
442 	struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
443 	struct iavf_hw *hw = &adapter->hw;
444 
445 	rx_ring->q_vector = q_vector;
446 	rx_ring->next = q_vector->rx.ring;
447 	rx_ring->vsi = &adapter->vsi;
448 	q_vector->rx.ring = rx_ring;
449 	q_vector->rx.count++;
450 	q_vector->rx.next_update = jiffies + 1;
451 	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
452 	q_vector->ring_mask |= BIT(r_idx);
453 	wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
454 	     q_vector->rx.current_itr >> 1);
455 	q_vector->rx.current_itr = q_vector->rx.target_itr;
456 }
457 
458 /**
459  * iavf_map_vector_to_txq - associate irqs with tx queues
460  * @adapter: board private structure
461  * @v_idx: interrupt number
462  * @t_idx: queue number
463  **/
464 static void
465 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
466 {
467 	struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
468 	struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
469 	struct iavf_hw *hw = &adapter->hw;
470 
471 	tx_ring->q_vector = q_vector;
472 	tx_ring->next = q_vector->tx.ring;
473 	tx_ring->vsi = &adapter->vsi;
474 	q_vector->tx.ring = tx_ring;
475 	q_vector->tx.count++;
476 	q_vector->tx.next_update = jiffies + 1;
477 	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
478 	q_vector->num_ringpairs++;
479 	wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
480 	     q_vector->tx.target_itr >> 1);
481 	q_vector->tx.current_itr = q_vector->tx.target_itr;
482 }
483 
484 /**
485  * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
486  * @adapter: board private structure to initialize
487  *
488  * This function maps descriptor rings to the queue-specific vectors
489  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
490  * one vector per ring/queue, but on a constrained vector budget, we
491  * group the rings as "efficiently" as possible.  You would add new
492  * mapping configurations in here.
493  **/
494 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
495 {
496 	int rings_remaining = adapter->num_active_queues;
497 	int ridx = 0, vidx = 0;
498 	int q_vectors;
499 
500 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
501 
502 	for (; ridx < rings_remaining; ridx++) {
503 		iavf_map_vector_to_rxq(adapter, vidx, ridx);
504 		iavf_map_vector_to_txq(adapter, vidx, ridx);
505 
506 		/* In the case where we have more queues than vectors, continue
507 		 * round-robin on vectors until all queues are mapped.
508 		 */
509 		if (++vidx >= q_vectors)
510 			vidx = 0;
511 	}
512 
513 	adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
514 }
515 
516 /**
517  * iavf_irq_affinity_notify - Callback for affinity changes
518  * @notify: context as to what irq was changed
519  * @mask: the new affinity mask
520  *
521  * This is a callback function used by the irq_set_affinity_notifier function
522  * so that we may register to receive changes to the irq affinity masks.
523  **/
524 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
525 				     const cpumask_t *mask)
526 {
527 	struct iavf_q_vector *q_vector =
528 		container_of(notify, struct iavf_q_vector, affinity_notify);
529 
530 	cpumask_copy(&q_vector->affinity_mask, mask);
531 }
532 
533 /**
534  * iavf_irq_affinity_release - Callback for affinity notifier release
535  * @ref: internal core kernel usage
536  *
537  * This is a callback function used by the irq_set_affinity_notifier function
538  * to inform the current notification subscriber that they will no longer
539  * receive notifications.
540  **/
541 static void iavf_irq_affinity_release(struct kref *ref) {}
542 
543 /**
544  * iavf_request_traffic_irqs - Initialize MSI-X interrupts
545  * @adapter: board private structure
546  * @basename: device basename
547  *
548  * Allocates MSI-X vectors for tx and rx handling, and requests
549  * interrupts from the kernel.
550  **/
551 static int
552 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
553 {
554 	unsigned int vector, q_vectors;
555 	unsigned int rx_int_idx = 0, tx_int_idx = 0;
556 	int irq_num, err;
557 	int cpu;
558 
559 	iavf_irq_disable(adapter);
560 	/* Decrement for Other and TCP Timer vectors */
561 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
562 
563 	for (vector = 0; vector < q_vectors; vector++) {
564 		struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
565 
566 		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
567 
568 		if (q_vector->tx.ring && q_vector->rx.ring) {
569 			snprintf(q_vector->name, sizeof(q_vector->name),
570 				 "iavf-%s-TxRx-%u", basename, rx_int_idx++);
571 			tx_int_idx++;
572 		} else if (q_vector->rx.ring) {
573 			snprintf(q_vector->name, sizeof(q_vector->name),
574 				 "iavf-%s-rx-%u", basename, rx_int_idx++);
575 		} else if (q_vector->tx.ring) {
576 			snprintf(q_vector->name, sizeof(q_vector->name),
577 				 "iavf-%s-tx-%u", basename, tx_int_idx++);
578 		} else {
579 			/* skip this unused q_vector */
580 			continue;
581 		}
582 		err = request_irq(irq_num,
583 				  iavf_msix_clean_rings,
584 				  0,
585 				  q_vector->name,
586 				  q_vector);
587 		if (err) {
588 			dev_info(&adapter->pdev->dev,
589 				 "Request_irq failed, error: %d\n", err);
590 			goto free_queue_irqs;
591 		}
592 		/* register for affinity change notifications */
593 		q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
594 		q_vector->affinity_notify.release =
595 						   iavf_irq_affinity_release;
596 		irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
597 		/* Spread the IRQ affinity hints across online CPUs. Note that
598 		 * get_cpu_mask returns a mask with a permanent lifetime so
599 		 * it's safe to use as a hint for irq_update_affinity_hint.
600 		 */
601 		cpu = cpumask_local_spread(q_vector->v_idx, -1);
602 		irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
603 	}
604 
605 	return 0;
606 
607 free_queue_irqs:
608 	while (vector) {
609 		vector--;
610 		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
611 		irq_set_affinity_notifier(irq_num, NULL);
612 		irq_update_affinity_hint(irq_num, NULL);
613 		free_irq(irq_num, &adapter->q_vectors[vector]);
614 	}
615 	return err;
616 }
617 
618 /**
619  * iavf_request_misc_irq - Initialize MSI-X interrupts
620  * @adapter: board private structure
621  *
622  * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
623  * vector is only for the admin queue, and stays active even when the netdev
624  * is closed.
625  **/
626 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
627 {
628 	struct net_device *netdev = adapter->netdev;
629 	int err;
630 
631 	snprintf(adapter->misc_vector_name,
632 		 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
633 		 dev_name(&adapter->pdev->dev));
634 	err = request_irq(adapter->msix_entries[0].vector,
635 			  &iavf_msix_aq, 0,
636 			  adapter->misc_vector_name, netdev);
637 	if (err) {
638 		dev_err(&adapter->pdev->dev,
639 			"request_irq for %s failed: %d\n",
640 			adapter->misc_vector_name, err);
641 		free_irq(adapter->msix_entries[0].vector, netdev);
642 	}
643 	return err;
644 }
645 
646 /**
647  * iavf_free_traffic_irqs - Free MSI-X interrupts
648  * @adapter: board private structure
649  *
650  * Frees all MSI-X vectors other than 0.
651  **/
652 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
653 {
654 	int vector, irq_num, q_vectors;
655 
656 	if (!adapter->msix_entries)
657 		return;
658 
659 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
660 
661 	for (vector = 0; vector < q_vectors; vector++) {
662 		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
663 		irq_set_affinity_notifier(irq_num, NULL);
664 		irq_update_affinity_hint(irq_num, NULL);
665 		free_irq(irq_num, &adapter->q_vectors[vector]);
666 	}
667 }
668 
669 /**
670  * iavf_free_misc_irq - Free MSI-X miscellaneous vector
671  * @adapter: board private structure
672  *
673  * Frees MSI-X vector 0.
674  **/
675 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
676 {
677 	struct net_device *netdev = adapter->netdev;
678 
679 	if (!adapter->msix_entries)
680 		return;
681 
682 	free_irq(adapter->msix_entries[0].vector, netdev);
683 }
684 
685 /**
686  * iavf_configure_tx - Configure Transmit Unit after Reset
687  * @adapter: board private structure
688  *
689  * Configure the Tx unit of the MAC after a reset.
690  **/
691 static void iavf_configure_tx(struct iavf_adapter *adapter)
692 {
693 	struct iavf_hw *hw = &adapter->hw;
694 	int i;
695 
696 	for (i = 0; i < adapter->num_active_queues; i++)
697 		adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
698 }
699 
700 /**
701  * iavf_configure_rx - Configure Receive Unit after Reset
702  * @adapter: board private structure
703  *
704  * Configure the Rx unit of the MAC after a reset.
705  **/
706 static void iavf_configure_rx(struct iavf_adapter *adapter)
707 {
708 	unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
709 	struct iavf_hw *hw = &adapter->hw;
710 	int i;
711 
712 	/* Legacy Rx will always default to a 2048 buffer size. */
713 #if (PAGE_SIZE < 8192)
714 	if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
715 		struct net_device *netdev = adapter->netdev;
716 
717 		/* For jumbo frames on systems with 4K pages we have to use
718 		 * an order 1 page, so we might as well increase the size
719 		 * of our Rx buffer to make better use of the available space
720 		 */
721 		rx_buf_len = IAVF_RXBUFFER_3072;
722 
723 		/* We use a 1536 buffer size for configurations with
724 		 * standard Ethernet mtu.  On x86 this gives us enough room
725 		 * for shared info and 192 bytes of padding.
726 		 */
727 		if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
728 		    (netdev->mtu <= ETH_DATA_LEN))
729 			rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
730 	}
731 #endif
732 
733 	for (i = 0; i < adapter->num_active_queues; i++) {
734 		adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
735 		adapter->rx_rings[i].rx_buf_len = rx_buf_len;
736 
737 		if (adapter->flags & IAVF_FLAG_LEGACY_RX)
738 			clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
739 		else
740 			set_ring_build_skb_enabled(&adapter->rx_rings[i]);
741 	}
742 }
743 
744 /**
745  * iavf_find_vlan - Search filter list for specific vlan filter
746  * @adapter: board private structure
747  * @vlan: vlan tag
748  *
749  * Returns ptr to the filter object or NULL. Must be called while holding the
750  * mac_vlan_list_lock.
751  **/
752 static struct
753 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
754 				 struct iavf_vlan vlan)
755 {
756 	struct iavf_vlan_filter *f;
757 
758 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
759 		if (f->vlan.vid == vlan.vid &&
760 		    f->vlan.tpid == vlan.tpid)
761 			return f;
762 	}
763 
764 	return NULL;
765 }
766 
767 /**
768  * iavf_add_vlan - Add a vlan filter to the list
769  * @adapter: board private structure
770  * @vlan: VLAN tag
771  *
772  * Returns ptr to the filter object or NULL when no memory available.
773  **/
774 static struct
775 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
776 				struct iavf_vlan vlan)
777 {
778 	struct iavf_vlan_filter *f = NULL;
779 
780 	spin_lock_bh(&adapter->mac_vlan_list_lock);
781 
782 	f = iavf_find_vlan(adapter, vlan);
783 	if (!f) {
784 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
785 		if (!f)
786 			goto clearout;
787 
788 		f->vlan = vlan;
789 
790 		list_add_tail(&f->list, &adapter->vlan_filter_list);
791 		f->state = IAVF_VLAN_ADD;
792 		adapter->num_vlan_filters++;
793 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
794 	}
795 
796 clearout:
797 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
798 	return f;
799 }
800 
801 /**
802  * iavf_del_vlan - Remove a vlan filter from the list
803  * @adapter: board private structure
804  * @vlan: VLAN tag
805  **/
806 static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
807 {
808 	struct iavf_vlan_filter *f;
809 
810 	spin_lock_bh(&adapter->mac_vlan_list_lock);
811 
812 	f = iavf_find_vlan(adapter, vlan);
813 	if (f) {
814 		f->state = IAVF_VLAN_REMOVE;
815 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
816 	}
817 
818 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
819 }
820 
821 /**
822  * iavf_restore_filters
823  * @adapter: board private structure
824  *
825  * Restore existing non MAC filters when VF netdev comes back up
826  **/
827 static void iavf_restore_filters(struct iavf_adapter *adapter)
828 {
829 	struct iavf_vlan_filter *f;
830 
831 	/* re-add all VLAN filters */
832 	spin_lock_bh(&adapter->mac_vlan_list_lock);
833 
834 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
835 		if (f->state == IAVF_VLAN_INACTIVE)
836 			f->state = IAVF_VLAN_ADD;
837 	}
838 
839 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
840 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
841 }
842 
843 /**
844  * iavf_get_num_vlans_added - get number of VLANs added
845  * @adapter: board private structure
846  */
847 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
848 {
849 	return adapter->num_vlan_filters;
850 }
851 
852 /**
853  * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
854  * @adapter: board private structure
855  *
856  * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
857  * do not impose a limit as that maintains current behavior and for
858  * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
859  **/
860 static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
861 {
862 	/* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
863 	 * never been a limit on the VF driver side
864 	 */
865 	if (VLAN_ALLOWED(adapter))
866 		return VLAN_N_VID;
867 	else if (VLAN_V2_ALLOWED(adapter))
868 		return adapter->vlan_v2_caps.filtering.max_filters;
869 
870 	return 0;
871 }
872 
873 /**
874  * iavf_max_vlans_added - check if maximum VLANs allowed already exist
875  * @adapter: board private structure
876  **/
877 static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
878 {
879 	if (iavf_get_num_vlans_added(adapter) <
880 	    iavf_get_max_vlans_allowed(adapter))
881 		return false;
882 
883 	return true;
884 }
885 
886 /**
887  * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
888  * @netdev: network device struct
889  * @proto: unused protocol data
890  * @vid: VLAN tag
891  **/
892 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
893 				__always_unused __be16 proto, u16 vid)
894 {
895 	struct iavf_adapter *adapter = netdev_priv(netdev);
896 
897 	/* Do not track VLAN 0 filter, always added by the PF on VF init */
898 	if (!vid)
899 		return 0;
900 
901 	if (!VLAN_FILTERING_ALLOWED(adapter))
902 		return -EIO;
903 
904 	if (iavf_max_vlans_added(adapter)) {
905 		netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
906 			   iavf_get_max_vlans_allowed(adapter));
907 		return -EIO;
908 	}
909 
910 	if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
911 		return -ENOMEM;
912 
913 	return 0;
914 }
915 
916 /**
917  * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
918  * @netdev: network device struct
919  * @proto: unused protocol data
920  * @vid: VLAN tag
921  **/
922 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
923 				 __always_unused __be16 proto, u16 vid)
924 {
925 	struct iavf_adapter *adapter = netdev_priv(netdev);
926 
927 	/* We do not track VLAN 0 filter */
928 	if (!vid)
929 		return 0;
930 
931 	iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
932 	return 0;
933 }
934 
935 /**
936  * iavf_find_filter - Search filter list for specific mac filter
937  * @adapter: board private structure
938  * @macaddr: the MAC address
939  *
940  * Returns ptr to the filter object or NULL. Must be called while holding the
941  * mac_vlan_list_lock.
942  **/
943 static struct
944 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
945 				  const u8 *macaddr)
946 {
947 	struct iavf_mac_filter *f;
948 
949 	if (!macaddr)
950 		return NULL;
951 
952 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
953 		if (ether_addr_equal(macaddr, f->macaddr))
954 			return f;
955 	}
956 	return NULL;
957 }
958 
959 /**
960  * iavf_add_filter - Add a mac filter to the filter list
961  * @adapter: board private structure
962  * @macaddr: the MAC address
963  *
964  * Returns ptr to the filter object or NULL when no memory available.
965  **/
966 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
967 					const u8 *macaddr)
968 {
969 	struct iavf_mac_filter *f;
970 
971 	if (!macaddr)
972 		return NULL;
973 
974 	f = iavf_find_filter(adapter, macaddr);
975 	if (!f) {
976 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
977 		if (!f)
978 			return f;
979 
980 		ether_addr_copy(f->macaddr, macaddr);
981 
982 		list_add_tail(&f->list, &adapter->mac_filter_list);
983 		f->add = true;
984 		f->add_handled = false;
985 		f->is_new_mac = true;
986 		f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
987 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
988 	} else {
989 		f->remove = false;
990 	}
991 
992 	return f;
993 }
994 
995 /**
996  * iavf_replace_primary_mac - Replace current primary address
997  * @adapter: board private structure
998  * @new_mac: new MAC address to be applied
999  *
1000  * Replace current dev_addr and send request to PF for removal of previous
1001  * primary MAC address filter and addition of new primary MAC filter.
1002  * Return 0 for success, -ENOMEM for failure.
1003  *
1004  * Do not call this with mac_vlan_list_lock!
1005  **/
1006 int iavf_replace_primary_mac(struct iavf_adapter *adapter,
1007 			     const u8 *new_mac)
1008 {
1009 	struct iavf_hw *hw = &adapter->hw;
1010 	struct iavf_mac_filter *f;
1011 
1012 	spin_lock_bh(&adapter->mac_vlan_list_lock);
1013 
1014 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
1015 		f->is_primary = false;
1016 	}
1017 
1018 	f = iavf_find_filter(adapter, hw->mac.addr);
1019 	if (f) {
1020 		f->remove = true;
1021 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1022 	}
1023 
1024 	f = iavf_add_filter(adapter, new_mac);
1025 
1026 	if (f) {
1027 		/* Always send the request to add if changing primary MAC
1028 		 * even if filter is already present on the list
1029 		 */
1030 		f->is_primary = true;
1031 		f->add = true;
1032 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1033 		ether_addr_copy(hw->mac.addr, new_mac);
1034 	}
1035 
1036 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1037 
1038 	/* schedule the watchdog task to immediately process the request */
1039 	if (f) {
1040 		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1041 		return 0;
1042 	}
1043 	return -ENOMEM;
1044 }
1045 
1046 /**
1047  * iavf_is_mac_set_handled - wait for a response to set MAC from PF
1048  * @netdev: network interface device structure
1049  * @macaddr: MAC address to set
1050  *
1051  * Returns true on success, false on failure
1052  */
1053 static bool iavf_is_mac_set_handled(struct net_device *netdev,
1054 				    const u8 *macaddr)
1055 {
1056 	struct iavf_adapter *adapter = netdev_priv(netdev);
1057 	struct iavf_mac_filter *f;
1058 	bool ret = false;
1059 
1060 	spin_lock_bh(&adapter->mac_vlan_list_lock);
1061 
1062 	f = iavf_find_filter(adapter, macaddr);
1063 
1064 	if (!f || (!f->add && f->add_handled))
1065 		ret = true;
1066 
1067 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1068 
1069 	return ret;
1070 }
1071 
1072 /**
1073  * iavf_set_mac - NDO callback to set port MAC address
1074  * @netdev: network interface device structure
1075  * @p: pointer to an address structure
1076  *
1077  * Returns 0 on success, negative on failure
1078  */
1079 static int iavf_set_mac(struct net_device *netdev, void *p)
1080 {
1081 	struct iavf_adapter *adapter = netdev_priv(netdev);
1082 	struct sockaddr *addr = p;
1083 	int ret;
1084 
1085 	if (!is_valid_ether_addr(addr->sa_data))
1086 		return -EADDRNOTAVAIL;
1087 
1088 	ret = iavf_replace_primary_mac(adapter, addr->sa_data);
1089 
1090 	if (ret)
1091 		return ret;
1092 
1093 	ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
1094 					       iavf_is_mac_set_handled(netdev, addr->sa_data),
1095 					       msecs_to_jiffies(2500));
1096 
1097 	/* If ret < 0 then it means wait was interrupted.
1098 	 * If ret == 0 then it means we got a timeout.
1099 	 * else it means we got response for set MAC from PF,
1100 	 * check if netdev MAC was updated to requested MAC,
1101 	 * if yes then set MAC succeeded otherwise it failed return -EACCES
1102 	 */
1103 	if (ret < 0)
1104 		return ret;
1105 
1106 	if (!ret)
1107 		return -EAGAIN;
1108 
1109 	if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
1110 		return -EACCES;
1111 
1112 	return 0;
1113 }
1114 
1115 /**
1116  * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1117  * @netdev: the netdevice
1118  * @addr: address to add
1119  *
1120  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1121  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1122  */
1123 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
1124 {
1125 	struct iavf_adapter *adapter = netdev_priv(netdev);
1126 
1127 	if (iavf_add_filter(adapter, addr))
1128 		return 0;
1129 	else
1130 		return -ENOMEM;
1131 }
1132 
1133 /**
1134  * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1135  * @netdev: the netdevice
1136  * @addr: address to add
1137  *
1138  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1139  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1140  */
1141 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
1142 {
1143 	struct iavf_adapter *adapter = netdev_priv(netdev);
1144 	struct iavf_mac_filter *f;
1145 
1146 	/* Under some circumstances, we might receive a request to delete
1147 	 * our own device address from our uc list. Because we store the
1148 	 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1149 	 * such requests and not delete our device address from this list.
1150 	 */
1151 	if (ether_addr_equal(addr, netdev->dev_addr))
1152 		return 0;
1153 
1154 	f = iavf_find_filter(adapter, addr);
1155 	if (f) {
1156 		f->remove = true;
1157 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1158 	}
1159 	return 0;
1160 }
1161 
1162 /**
1163  * iavf_set_rx_mode - NDO callback to set the netdev filters
1164  * @netdev: network interface device structure
1165  **/
1166 static void iavf_set_rx_mode(struct net_device *netdev)
1167 {
1168 	struct iavf_adapter *adapter = netdev_priv(netdev);
1169 
1170 	spin_lock_bh(&adapter->mac_vlan_list_lock);
1171 	__dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1172 	__dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1173 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1174 
1175 	if (netdev->flags & IFF_PROMISC &&
1176 	    !(adapter->flags & IAVF_FLAG_PROMISC_ON))
1177 		adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
1178 	else if (!(netdev->flags & IFF_PROMISC) &&
1179 		 adapter->flags & IAVF_FLAG_PROMISC_ON)
1180 		adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
1181 
1182 	if (netdev->flags & IFF_ALLMULTI &&
1183 	    !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
1184 		adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
1185 	else if (!(netdev->flags & IFF_ALLMULTI) &&
1186 		 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
1187 		adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
1188 }
1189 
1190 /**
1191  * iavf_napi_enable_all - enable NAPI on all queue vectors
1192  * @adapter: board private structure
1193  **/
1194 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
1195 {
1196 	int q_idx;
1197 	struct iavf_q_vector *q_vector;
1198 	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1199 
1200 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1201 		struct napi_struct *napi;
1202 
1203 		q_vector = &adapter->q_vectors[q_idx];
1204 		napi = &q_vector->napi;
1205 		napi_enable(napi);
1206 	}
1207 }
1208 
1209 /**
1210  * iavf_napi_disable_all - disable NAPI on all queue vectors
1211  * @adapter: board private structure
1212  **/
1213 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
1214 {
1215 	int q_idx;
1216 	struct iavf_q_vector *q_vector;
1217 	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1218 
1219 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1220 		q_vector = &adapter->q_vectors[q_idx];
1221 		napi_disable(&q_vector->napi);
1222 	}
1223 }
1224 
1225 /**
1226  * iavf_configure - set up transmit and receive data structures
1227  * @adapter: board private structure
1228  **/
1229 static void iavf_configure(struct iavf_adapter *adapter)
1230 {
1231 	struct net_device *netdev = adapter->netdev;
1232 	int i;
1233 
1234 	iavf_set_rx_mode(netdev);
1235 
1236 	iavf_configure_tx(adapter);
1237 	iavf_configure_rx(adapter);
1238 	adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
1239 
1240 	for (i = 0; i < adapter->num_active_queues; i++) {
1241 		struct iavf_ring *ring = &adapter->rx_rings[i];
1242 
1243 		iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
1244 	}
1245 }
1246 
1247 /**
1248  * iavf_up_complete - Finish the last steps of bringing up a connection
1249  * @adapter: board private structure
1250  *
1251  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1252  **/
1253 static void iavf_up_complete(struct iavf_adapter *adapter)
1254 {
1255 	iavf_change_state(adapter, __IAVF_RUNNING);
1256 	clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1257 
1258 	iavf_napi_enable_all(adapter);
1259 
1260 	adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1261 	if (CLIENT_ENABLED(adapter))
1262 		adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1263 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1264 }
1265 
1266 /**
1267  * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
1268  * yet and mark other to be removed.
1269  * @adapter: board private structure
1270  **/
1271 static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
1272 {
1273 	struct iavf_vlan_filter *vlf, *vlftmp;
1274 	struct iavf_mac_filter *f, *ftmp;
1275 
1276 	spin_lock_bh(&adapter->mac_vlan_list_lock);
1277 	/* clear the sync flag on all filters */
1278 	__dev_uc_unsync(adapter->netdev, NULL);
1279 	__dev_mc_unsync(adapter->netdev, NULL);
1280 
1281 	/* remove all MAC filters */
1282 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
1283 				 list) {
1284 		if (f->add) {
1285 			list_del(&f->list);
1286 			kfree(f);
1287 		} else {
1288 			f->remove = true;
1289 		}
1290 	}
1291 
1292 	/* disable all VLAN filters */
1293 	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
1294 				 list)
1295 		vlf->state = IAVF_VLAN_DISABLE;
1296 
1297 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1298 }
1299 
1300 /**
1301  * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
1302  * mark other to be removed.
1303  * @adapter: board private structure
1304  **/
1305 static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
1306 {
1307 	struct iavf_cloud_filter *cf, *cftmp;
1308 
1309 	/* remove all cloud filters */
1310 	spin_lock_bh(&adapter->cloud_filter_list_lock);
1311 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1312 				 list) {
1313 		if (cf->add) {
1314 			list_del(&cf->list);
1315 			kfree(cf);
1316 			adapter->num_cloud_filters--;
1317 		} else {
1318 			cf->del = true;
1319 		}
1320 	}
1321 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
1322 }
1323 
1324 /**
1325  * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
1326  * other to be removed.
1327  * @adapter: board private structure
1328  **/
1329 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
1330 {
1331 	struct iavf_fdir_fltr *fdir, *fdirtmp;
1332 
1333 	/* remove all Flow Director filters */
1334 	spin_lock_bh(&adapter->fdir_fltr_lock);
1335 	list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
1336 				 list) {
1337 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1338 			list_del(&fdir->list);
1339 			kfree(fdir);
1340 			adapter->fdir_active_fltr--;
1341 		} else {
1342 			fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1343 		}
1344 	}
1345 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1346 }
1347 
1348 /**
1349  * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
1350  * other to be removed.
1351  * @adapter: board private structure
1352  **/
1353 static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
1354 {
1355 	struct iavf_adv_rss *rss, *rsstmp;
1356 
1357 	/* remove all advance RSS configuration */
1358 	spin_lock_bh(&adapter->adv_rss_lock);
1359 	list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
1360 				 list) {
1361 		if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1362 			list_del(&rss->list);
1363 			kfree(rss);
1364 		} else {
1365 			rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1366 		}
1367 	}
1368 	spin_unlock_bh(&adapter->adv_rss_lock);
1369 }
1370 
1371 /**
1372  * iavf_down - Shutdown the connection processing
1373  * @adapter: board private structure
1374  *
1375  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1376  **/
1377 void iavf_down(struct iavf_adapter *adapter)
1378 {
1379 	struct net_device *netdev = adapter->netdev;
1380 
1381 	if (adapter->state <= __IAVF_DOWN_PENDING)
1382 		return;
1383 
1384 	netif_carrier_off(netdev);
1385 	netif_tx_disable(netdev);
1386 	adapter->link_up = false;
1387 	iavf_napi_disable_all(adapter);
1388 	iavf_irq_disable(adapter);
1389 
1390 	iavf_clear_mac_vlan_filters(adapter);
1391 	iavf_clear_cloud_filters(adapter);
1392 	iavf_clear_fdir_filters(adapter);
1393 	iavf_clear_adv_rss_conf(adapter);
1394 
1395 	if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
1396 		/* cancel any current operation */
1397 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1398 		/* Schedule operations to close down the HW. Don't wait
1399 		 * here for this to complete. The watchdog is still running
1400 		 * and it will take care of this.
1401 		 */
1402 		if (!list_empty(&adapter->mac_filter_list))
1403 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1404 		if (!list_empty(&adapter->vlan_filter_list))
1405 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1406 		if (!list_empty(&adapter->cloud_filter_list))
1407 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1408 		if (!list_empty(&adapter->fdir_list_head))
1409 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1410 		if (!list_empty(&adapter->adv_rss_list_head))
1411 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1412 		adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1413 	}
1414 
1415 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1416 }
1417 
1418 /**
1419  * iavf_acquire_msix_vectors - Setup the MSIX capability
1420  * @adapter: board private structure
1421  * @vectors: number of vectors to request
1422  *
1423  * Work with the OS to set up the MSIX vectors needed.
1424  *
1425  * Returns 0 on success, negative on failure
1426  **/
1427 static int
1428 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1429 {
1430 	int err, vector_threshold;
1431 
1432 	/* We'll want at least 3 (vector_threshold):
1433 	 * 0) Other (Admin Queue and link, mostly)
1434 	 * 1) TxQ[0] Cleanup
1435 	 * 2) RxQ[0] Cleanup
1436 	 */
1437 	vector_threshold = MIN_MSIX_COUNT;
1438 
1439 	/* The more we get, the more we will assign to Tx/Rx Cleanup
1440 	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1441 	 * Right now, we simply care about how many we'll get; we'll
1442 	 * set them up later while requesting irq's.
1443 	 */
1444 	err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1445 				    vector_threshold, vectors);
1446 	if (err < 0) {
1447 		dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1448 		kfree(adapter->msix_entries);
1449 		adapter->msix_entries = NULL;
1450 		return err;
1451 	}
1452 
1453 	/* Adjust for only the vectors we'll use, which is minimum
1454 	 * of max_msix_q_vectors + NONQ_VECS, or the number of
1455 	 * vectors we were allocated.
1456 	 */
1457 	adapter->num_msix_vectors = err;
1458 	return 0;
1459 }
1460 
1461 /**
1462  * iavf_free_queues - Free memory for all rings
1463  * @adapter: board private structure to initialize
1464  *
1465  * Free all of the memory associated with queue pairs.
1466  **/
1467 static void iavf_free_queues(struct iavf_adapter *adapter)
1468 {
1469 	if (!adapter->vsi_res)
1470 		return;
1471 	adapter->num_active_queues = 0;
1472 	kfree(adapter->tx_rings);
1473 	adapter->tx_rings = NULL;
1474 	kfree(adapter->rx_rings);
1475 	adapter->rx_rings = NULL;
1476 }
1477 
1478 /**
1479  * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1480  * @adapter: board private structure
1481  *
1482  * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1483  * stripped in certain descriptor fields. Instead of checking the offload
1484  * capability bits in the hot path, cache the location the ring specific
1485  * flags.
1486  */
1487 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1488 {
1489 	int i;
1490 
1491 	for (i = 0; i < adapter->num_active_queues; i++) {
1492 		struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1493 		struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1494 
1495 		/* prevent multiple L2TAG bits being set after VFR */
1496 		tx_ring->flags &=
1497 			~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1498 			  IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1499 		rx_ring->flags &=
1500 			~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1501 			  IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1502 
1503 		if (VLAN_ALLOWED(adapter)) {
1504 			tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1505 			rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1506 		} else if (VLAN_V2_ALLOWED(adapter)) {
1507 			struct virtchnl_vlan_supported_caps *stripping_support;
1508 			struct virtchnl_vlan_supported_caps *insertion_support;
1509 
1510 			stripping_support =
1511 				&adapter->vlan_v2_caps.offloads.stripping_support;
1512 			insertion_support =
1513 				&adapter->vlan_v2_caps.offloads.insertion_support;
1514 
1515 			if (stripping_support->outer) {
1516 				if (stripping_support->outer &
1517 				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1518 					rx_ring->flags |=
1519 						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1520 				else if (stripping_support->outer &
1521 					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1522 					rx_ring->flags |=
1523 						IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1524 			} else if (stripping_support->inner) {
1525 				if (stripping_support->inner &
1526 				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1527 					rx_ring->flags |=
1528 						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1529 				else if (stripping_support->inner &
1530 					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1531 					rx_ring->flags |=
1532 						IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1533 			}
1534 
1535 			if (insertion_support->outer) {
1536 				if (insertion_support->outer &
1537 				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1538 					tx_ring->flags |=
1539 						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1540 				else if (insertion_support->outer &
1541 					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1542 					tx_ring->flags |=
1543 						IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1544 			} else if (insertion_support->inner) {
1545 				if (insertion_support->inner &
1546 				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1547 					tx_ring->flags |=
1548 						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1549 				else if (insertion_support->inner &
1550 					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1551 					tx_ring->flags |=
1552 						IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1553 			}
1554 		}
1555 	}
1556 }
1557 
1558 /**
1559  * iavf_alloc_queues - Allocate memory for all rings
1560  * @adapter: board private structure to initialize
1561  *
1562  * We allocate one ring per queue at run-time since we don't know the
1563  * number of queues at compile-time.  The polling_netdev array is
1564  * intended for Multiqueue, but should work fine with a single queue.
1565  **/
1566 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1567 {
1568 	int i, num_active_queues;
1569 
1570 	/* If we're in reset reallocating queues we don't actually know yet for
1571 	 * certain the PF gave us the number of queues we asked for but we'll
1572 	 * assume it did.  Once basic reset is finished we'll confirm once we
1573 	 * start negotiating config with PF.
1574 	 */
1575 	if (adapter->num_req_queues)
1576 		num_active_queues = adapter->num_req_queues;
1577 	else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1578 		 adapter->num_tc)
1579 		num_active_queues = adapter->ch_config.total_qps;
1580 	else
1581 		num_active_queues = min_t(int,
1582 					  adapter->vsi_res->num_queue_pairs,
1583 					  (int)(num_online_cpus()));
1584 
1585 
1586 	adapter->tx_rings = kcalloc(num_active_queues,
1587 				    sizeof(struct iavf_ring), GFP_KERNEL);
1588 	if (!adapter->tx_rings)
1589 		goto err_out;
1590 	adapter->rx_rings = kcalloc(num_active_queues,
1591 				    sizeof(struct iavf_ring), GFP_KERNEL);
1592 	if (!adapter->rx_rings)
1593 		goto err_out;
1594 
1595 	for (i = 0; i < num_active_queues; i++) {
1596 		struct iavf_ring *tx_ring;
1597 		struct iavf_ring *rx_ring;
1598 
1599 		tx_ring = &adapter->tx_rings[i];
1600 
1601 		tx_ring->queue_index = i;
1602 		tx_ring->netdev = adapter->netdev;
1603 		tx_ring->dev = &adapter->pdev->dev;
1604 		tx_ring->count = adapter->tx_desc_count;
1605 		tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1606 		if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1607 			tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1608 
1609 		rx_ring = &adapter->rx_rings[i];
1610 		rx_ring->queue_index = i;
1611 		rx_ring->netdev = adapter->netdev;
1612 		rx_ring->dev = &adapter->pdev->dev;
1613 		rx_ring->count = adapter->rx_desc_count;
1614 		rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1615 	}
1616 
1617 	adapter->num_active_queues = num_active_queues;
1618 
1619 	iavf_set_queue_vlan_tag_loc(adapter);
1620 
1621 	return 0;
1622 
1623 err_out:
1624 	iavf_free_queues(adapter);
1625 	return -ENOMEM;
1626 }
1627 
1628 /**
1629  * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1630  * @adapter: board private structure to initialize
1631  *
1632  * Attempt to configure the interrupts using the best available
1633  * capabilities of the hardware and the kernel.
1634  **/
1635 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1636 {
1637 	int vector, v_budget;
1638 	int pairs = 0;
1639 	int err = 0;
1640 
1641 	if (!adapter->vsi_res) {
1642 		err = -EIO;
1643 		goto out;
1644 	}
1645 	pairs = adapter->num_active_queues;
1646 
1647 	/* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1648 	 * us much good if we have more vectors than CPUs. However, we already
1649 	 * limit the total number of queues by the number of CPUs so we do not
1650 	 * need any further limiting here.
1651 	 */
1652 	v_budget = min_t(int, pairs + NONQ_VECS,
1653 			 (int)adapter->vf_res->max_vectors);
1654 
1655 	adapter->msix_entries = kcalloc(v_budget,
1656 					sizeof(struct msix_entry), GFP_KERNEL);
1657 	if (!adapter->msix_entries) {
1658 		err = -ENOMEM;
1659 		goto out;
1660 	}
1661 
1662 	for (vector = 0; vector < v_budget; vector++)
1663 		adapter->msix_entries[vector].entry = vector;
1664 
1665 	err = iavf_acquire_msix_vectors(adapter, v_budget);
1666 
1667 out:
1668 	netif_set_real_num_rx_queues(adapter->netdev, pairs);
1669 	netif_set_real_num_tx_queues(adapter->netdev, pairs);
1670 	return err;
1671 }
1672 
1673 /**
1674  * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1675  * @adapter: board private structure
1676  *
1677  * Return 0 on success, negative on failure
1678  **/
1679 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1680 {
1681 	struct iavf_aqc_get_set_rss_key_data *rss_key =
1682 		(struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1683 	struct iavf_hw *hw = &adapter->hw;
1684 	enum iavf_status status;
1685 
1686 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1687 		/* bail because we already have a command pending */
1688 		dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1689 			adapter->current_op);
1690 		return -EBUSY;
1691 	}
1692 
1693 	status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1694 	if (status) {
1695 		dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1696 			iavf_stat_str(hw, status),
1697 			iavf_aq_str(hw, hw->aq.asq_last_status));
1698 		return iavf_status_to_errno(status);
1699 
1700 	}
1701 
1702 	status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1703 				     adapter->rss_lut, adapter->rss_lut_size);
1704 	if (status) {
1705 		dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1706 			iavf_stat_str(hw, status),
1707 			iavf_aq_str(hw, hw->aq.asq_last_status));
1708 		return iavf_status_to_errno(status);
1709 	}
1710 
1711 	return 0;
1712 
1713 }
1714 
1715 /**
1716  * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1717  * @adapter: board private structure
1718  *
1719  * Returns 0 on success, negative on failure
1720  **/
1721 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1722 {
1723 	struct iavf_hw *hw = &adapter->hw;
1724 	u32 *dw;
1725 	u16 i;
1726 
1727 	dw = (u32 *)adapter->rss_key;
1728 	for (i = 0; i <= adapter->rss_key_size / 4; i++)
1729 		wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1730 
1731 	dw = (u32 *)adapter->rss_lut;
1732 	for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1733 		wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1734 
1735 	iavf_flush(hw);
1736 
1737 	return 0;
1738 }
1739 
1740 /**
1741  * iavf_config_rss - Configure RSS keys and lut
1742  * @adapter: board private structure
1743  *
1744  * Returns 0 on success, negative on failure
1745  **/
1746 int iavf_config_rss(struct iavf_adapter *adapter)
1747 {
1748 
1749 	if (RSS_PF(adapter)) {
1750 		adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1751 					IAVF_FLAG_AQ_SET_RSS_KEY;
1752 		return 0;
1753 	} else if (RSS_AQ(adapter)) {
1754 		return iavf_config_rss_aq(adapter);
1755 	} else {
1756 		return iavf_config_rss_reg(adapter);
1757 	}
1758 }
1759 
1760 /**
1761  * iavf_fill_rss_lut - Fill the lut with default values
1762  * @adapter: board private structure
1763  **/
1764 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1765 {
1766 	u16 i;
1767 
1768 	for (i = 0; i < adapter->rss_lut_size; i++)
1769 		adapter->rss_lut[i] = i % adapter->num_active_queues;
1770 }
1771 
1772 /**
1773  * iavf_init_rss - Prepare for RSS
1774  * @adapter: board private structure
1775  *
1776  * Return 0 on success, negative on failure
1777  **/
1778 static int iavf_init_rss(struct iavf_adapter *adapter)
1779 {
1780 	struct iavf_hw *hw = &adapter->hw;
1781 
1782 	if (!RSS_PF(adapter)) {
1783 		/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1784 		if (adapter->vf_res->vf_cap_flags &
1785 		    VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1786 			adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1787 		else
1788 			adapter->hena = IAVF_DEFAULT_RSS_HENA;
1789 
1790 		wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1791 		wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1792 	}
1793 
1794 	iavf_fill_rss_lut(adapter);
1795 	netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1796 
1797 	return iavf_config_rss(adapter);
1798 }
1799 
1800 /**
1801  * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1802  * @adapter: board private structure to initialize
1803  *
1804  * We allocate one q_vector per queue interrupt.  If allocation fails we
1805  * return -ENOMEM.
1806  **/
1807 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1808 {
1809 	int q_idx = 0, num_q_vectors;
1810 	struct iavf_q_vector *q_vector;
1811 
1812 	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1813 	adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1814 				     GFP_KERNEL);
1815 	if (!adapter->q_vectors)
1816 		return -ENOMEM;
1817 
1818 	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1819 		q_vector = &adapter->q_vectors[q_idx];
1820 		q_vector->adapter = adapter;
1821 		q_vector->vsi = &adapter->vsi;
1822 		q_vector->v_idx = q_idx;
1823 		q_vector->reg_idx = q_idx;
1824 		cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1825 		netif_napi_add(adapter->netdev, &q_vector->napi,
1826 			       iavf_napi_poll);
1827 	}
1828 
1829 	return 0;
1830 }
1831 
1832 /**
1833  * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1834  * @adapter: board private structure to initialize
1835  *
1836  * This function frees the memory allocated to the q_vectors.  In addition if
1837  * NAPI is enabled it will delete any references to the NAPI struct prior
1838  * to freeing the q_vector.
1839  **/
1840 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1841 {
1842 	int q_idx, num_q_vectors;
1843 	int napi_vectors;
1844 
1845 	if (!adapter->q_vectors)
1846 		return;
1847 
1848 	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1849 	napi_vectors = adapter->num_active_queues;
1850 
1851 	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1852 		struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1853 
1854 		if (q_idx < napi_vectors)
1855 			netif_napi_del(&q_vector->napi);
1856 	}
1857 	kfree(adapter->q_vectors);
1858 	adapter->q_vectors = NULL;
1859 }
1860 
1861 /**
1862  * iavf_reset_interrupt_capability - Reset MSIX setup
1863  * @adapter: board private structure
1864  *
1865  **/
1866 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1867 {
1868 	if (!adapter->msix_entries)
1869 		return;
1870 
1871 	pci_disable_msix(adapter->pdev);
1872 	kfree(adapter->msix_entries);
1873 	adapter->msix_entries = NULL;
1874 }
1875 
1876 /**
1877  * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1878  * @adapter: board private structure to initialize
1879  *
1880  **/
1881 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1882 {
1883 	int err;
1884 
1885 	err = iavf_alloc_queues(adapter);
1886 	if (err) {
1887 		dev_err(&adapter->pdev->dev,
1888 			"Unable to allocate memory for queues\n");
1889 		goto err_alloc_queues;
1890 	}
1891 
1892 	rtnl_lock();
1893 	err = iavf_set_interrupt_capability(adapter);
1894 	rtnl_unlock();
1895 	if (err) {
1896 		dev_err(&adapter->pdev->dev,
1897 			"Unable to setup interrupt capabilities\n");
1898 		goto err_set_interrupt;
1899 	}
1900 
1901 	err = iavf_alloc_q_vectors(adapter);
1902 	if (err) {
1903 		dev_err(&adapter->pdev->dev,
1904 			"Unable to allocate memory for queue vectors\n");
1905 		goto err_alloc_q_vectors;
1906 	}
1907 
1908 	/* If we've made it so far while ADq flag being ON, then we haven't
1909 	 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1910 	 * resources have been allocated in the reset path.
1911 	 * Now we can truly claim that ADq is enabled.
1912 	 */
1913 	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1914 	    adapter->num_tc)
1915 		dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1916 			 adapter->num_tc);
1917 
1918 	dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1919 		 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1920 		 adapter->num_active_queues);
1921 
1922 	return 0;
1923 err_alloc_q_vectors:
1924 	iavf_reset_interrupt_capability(adapter);
1925 err_set_interrupt:
1926 	iavf_free_queues(adapter);
1927 err_alloc_queues:
1928 	return err;
1929 }
1930 
1931 /**
1932  * iavf_free_rss - Free memory used by RSS structs
1933  * @adapter: board private structure
1934  **/
1935 static void iavf_free_rss(struct iavf_adapter *adapter)
1936 {
1937 	kfree(adapter->rss_key);
1938 	adapter->rss_key = NULL;
1939 
1940 	kfree(adapter->rss_lut);
1941 	adapter->rss_lut = NULL;
1942 }
1943 
1944 /**
1945  * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1946  * @adapter: board private structure
1947  *
1948  * Returns 0 on success, negative on failure
1949  **/
1950 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1951 {
1952 	struct net_device *netdev = adapter->netdev;
1953 	int err;
1954 
1955 	if (netif_running(netdev))
1956 		iavf_free_traffic_irqs(adapter);
1957 	iavf_free_misc_irq(adapter);
1958 	iavf_reset_interrupt_capability(adapter);
1959 	iavf_free_q_vectors(adapter);
1960 	iavf_free_queues(adapter);
1961 
1962 	err =  iavf_init_interrupt_scheme(adapter);
1963 	if (err)
1964 		goto err;
1965 
1966 	netif_tx_stop_all_queues(netdev);
1967 
1968 	err = iavf_request_misc_irq(adapter);
1969 	if (err)
1970 		goto err;
1971 
1972 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1973 
1974 	iavf_map_rings_to_vectors(adapter);
1975 err:
1976 	return err;
1977 }
1978 
1979 /**
1980  * iavf_process_aq_command - process aq_required flags
1981  * and sends aq command
1982  * @adapter: pointer to iavf adapter structure
1983  *
1984  * Returns 0 on success
1985  * Returns error code if no command was sent
1986  * or error code if the command failed.
1987  **/
1988 static int iavf_process_aq_command(struct iavf_adapter *adapter)
1989 {
1990 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1991 		return iavf_send_vf_config_msg(adapter);
1992 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
1993 		return iavf_send_vf_offload_vlan_v2_msg(adapter);
1994 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1995 		iavf_disable_queues(adapter);
1996 		return 0;
1997 	}
1998 
1999 	if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
2000 		iavf_map_queues(adapter);
2001 		return 0;
2002 	}
2003 
2004 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
2005 		iavf_add_ether_addrs(adapter);
2006 		return 0;
2007 	}
2008 
2009 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
2010 		iavf_add_vlans(adapter);
2011 		return 0;
2012 	}
2013 
2014 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
2015 		iavf_del_ether_addrs(adapter);
2016 		return 0;
2017 	}
2018 
2019 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
2020 		iavf_del_vlans(adapter);
2021 		return 0;
2022 	}
2023 
2024 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
2025 		iavf_enable_vlan_stripping(adapter);
2026 		return 0;
2027 	}
2028 
2029 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
2030 		iavf_disable_vlan_stripping(adapter);
2031 		return 0;
2032 	}
2033 
2034 	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
2035 		iavf_configure_queues(adapter);
2036 		return 0;
2037 	}
2038 
2039 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
2040 		iavf_enable_queues(adapter);
2041 		return 0;
2042 	}
2043 
2044 	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
2045 		/* This message goes straight to the firmware, not the
2046 		 * PF, so we don't have to set current_op as we will
2047 		 * not get a response through the ARQ.
2048 		 */
2049 		adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
2050 		return 0;
2051 	}
2052 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
2053 		iavf_get_hena(adapter);
2054 		return 0;
2055 	}
2056 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
2057 		iavf_set_hena(adapter);
2058 		return 0;
2059 	}
2060 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
2061 		iavf_set_rss_key(adapter);
2062 		return 0;
2063 	}
2064 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
2065 		iavf_set_rss_lut(adapter);
2066 		return 0;
2067 	}
2068 
2069 	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
2070 		iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
2071 				       FLAG_VF_MULTICAST_PROMISC);
2072 		return 0;
2073 	}
2074 
2075 	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
2076 		iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
2077 		return 0;
2078 	}
2079 	if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
2080 	    (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
2081 		iavf_set_promiscuous(adapter, 0);
2082 		return 0;
2083 	}
2084 
2085 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
2086 		iavf_enable_channels(adapter);
2087 		return 0;
2088 	}
2089 
2090 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
2091 		iavf_disable_channels(adapter);
2092 		return 0;
2093 	}
2094 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2095 		iavf_add_cloud_filter(adapter);
2096 		return 0;
2097 	}
2098 
2099 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2100 		iavf_del_cloud_filter(adapter);
2101 		return 0;
2102 	}
2103 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2104 		iavf_del_cloud_filter(adapter);
2105 		return 0;
2106 	}
2107 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2108 		iavf_add_cloud_filter(adapter);
2109 		return 0;
2110 	}
2111 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
2112 		iavf_add_fdir_filter(adapter);
2113 		return IAVF_SUCCESS;
2114 	}
2115 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
2116 		iavf_del_fdir_filter(adapter);
2117 		return IAVF_SUCCESS;
2118 	}
2119 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
2120 		iavf_add_adv_rss_cfg(adapter);
2121 		return 0;
2122 	}
2123 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
2124 		iavf_del_adv_rss_cfg(adapter);
2125 		return 0;
2126 	}
2127 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
2128 		iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2129 		return 0;
2130 	}
2131 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
2132 		iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2133 		return 0;
2134 	}
2135 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
2136 		iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2137 		return 0;
2138 	}
2139 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
2140 		iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2141 		return 0;
2142 	}
2143 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
2144 		iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2145 		return 0;
2146 	}
2147 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
2148 		iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2149 		return 0;
2150 	}
2151 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
2152 		iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2153 		return 0;
2154 	}
2155 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
2156 		iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2157 		return 0;
2158 	}
2159 
2160 	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
2161 		iavf_request_stats(adapter);
2162 		return 0;
2163 	}
2164 
2165 	return -EAGAIN;
2166 }
2167 
2168 /**
2169  * iavf_set_vlan_offload_features - set VLAN offload configuration
2170  * @adapter: board private structure
2171  * @prev_features: previous features used for comparison
2172  * @features: updated features used for configuration
2173  *
2174  * Set the aq_required bit(s) based on the requested features passed in to
2175  * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
2176  * the watchdog if any changes are requested to expedite the request via
2177  * virtchnl.
2178  **/
2179 void
2180 iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
2181 			       netdev_features_t prev_features,
2182 			       netdev_features_t features)
2183 {
2184 	bool enable_stripping = true, enable_insertion = true;
2185 	u16 vlan_ethertype = 0;
2186 	u64 aq_required = 0;
2187 
2188 	/* keep cases separate because one ethertype for offloads can be
2189 	 * disabled at the same time as another is disabled, so check for an
2190 	 * enabled ethertype first, then check for disabled. Default to
2191 	 * ETH_P_8021Q so an ethertype is specified if disabling insertion and
2192 	 * stripping.
2193 	 */
2194 	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2195 		vlan_ethertype = ETH_P_8021AD;
2196 	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2197 		vlan_ethertype = ETH_P_8021Q;
2198 	else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2199 		vlan_ethertype = ETH_P_8021AD;
2200 	else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2201 		vlan_ethertype = ETH_P_8021Q;
2202 	else
2203 		vlan_ethertype = ETH_P_8021Q;
2204 
2205 	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
2206 		enable_stripping = false;
2207 	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
2208 		enable_insertion = false;
2209 
2210 	if (VLAN_ALLOWED(adapter)) {
2211 		/* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
2212 		 * stripping via virtchnl. VLAN insertion can be toggled on the
2213 		 * netdev, but it doesn't require a virtchnl message
2214 		 */
2215 		if (enable_stripping)
2216 			aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
2217 		else
2218 			aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
2219 
2220 	} else if (VLAN_V2_ALLOWED(adapter)) {
2221 		switch (vlan_ethertype) {
2222 		case ETH_P_8021Q:
2223 			if (enable_stripping)
2224 				aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
2225 			else
2226 				aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
2227 
2228 			if (enable_insertion)
2229 				aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
2230 			else
2231 				aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
2232 			break;
2233 		case ETH_P_8021AD:
2234 			if (enable_stripping)
2235 				aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
2236 			else
2237 				aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
2238 
2239 			if (enable_insertion)
2240 				aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
2241 			else
2242 				aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
2243 			break;
2244 		}
2245 	}
2246 
2247 	if (aq_required) {
2248 		adapter->aq_required |= aq_required;
2249 		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
2250 	}
2251 }
2252 
2253 /**
2254  * iavf_startup - first step of driver startup
2255  * @adapter: board private structure
2256  *
2257  * Function process __IAVF_STARTUP driver state.
2258  * When success the state is changed to __IAVF_INIT_VERSION_CHECK
2259  * when fails the state is changed to __IAVF_INIT_FAILED
2260  **/
2261 static void iavf_startup(struct iavf_adapter *adapter)
2262 {
2263 	struct pci_dev *pdev = adapter->pdev;
2264 	struct iavf_hw *hw = &adapter->hw;
2265 	enum iavf_status status;
2266 	int ret;
2267 
2268 	WARN_ON(adapter->state != __IAVF_STARTUP);
2269 
2270 	/* driver loaded, probe complete */
2271 	adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2272 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2273 	status = iavf_set_mac_type(hw);
2274 	if (status) {
2275 		dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
2276 		goto err;
2277 	}
2278 
2279 	ret = iavf_check_reset_complete(hw);
2280 	if (ret) {
2281 		dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2282 			 ret);
2283 		goto err;
2284 	}
2285 	hw->aq.num_arq_entries = IAVF_AQ_LEN;
2286 	hw->aq.num_asq_entries = IAVF_AQ_LEN;
2287 	hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2288 	hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2289 
2290 	status = iavf_init_adminq(hw);
2291 	if (status) {
2292 		dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2293 			status);
2294 		goto err;
2295 	}
2296 	ret = iavf_send_api_ver(adapter);
2297 	if (ret) {
2298 		dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2299 		iavf_shutdown_adminq(hw);
2300 		goto err;
2301 	}
2302 	iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2303 	return;
2304 err:
2305 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2306 }
2307 
2308 /**
2309  * iavf_init_version_check - second step of driver startup
2310  * @adapter: board private structure
2311  *
2312  * Function process __IAVF_INIT_VERSION_CHECK driver state.
2313  * When success the state is changed to __IAVF_INIT_GET_RESOURCES
2314  * when fails the state is changed to __IAVF_INIT_FAILED
2315  **/
2316 static void iavf_init_version_check(struct iavf_adapter *adapter)
2317 {
2318 	struct pci_dev *pdev = adapter->pdev;
2319 	struct iavf_hw *hw = &adapter->hw;
2320 	int err = -EAGAIN;
2321 
2322 	WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2323 
2324 	if (!iavf_asq_done(hw)) {
2325 		dev_err(&pdev->dev, "Admin queue command never completed\n");
2326 		iavf_shutdown_adminq(hw);
2327 		iavf_change_state(adapter, __IAVF_STARTUP);
2328 		goto err;
2329 	}
2330 
2331 	/* aq msg sent, awaiting reply */
2332 	err = iavf_verify_api_ver(adapter);
2333 	if (err) {
2334 		if (err == -EALREADY)
2335 			err = iavf_send_api_ver(adapter);
2336 		else
2337 			dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2338 				adapter->pf_version.major,
2339 				adapter->pf_version.minor,
2340 				VIRTCHNL_VERSION_MAJOR,
2341 				VIRTCHNL_VERSION_MINOR);
2342 		goto err;
2343 	}
2344 	err = iavf_send_vf_config_msg(adapter);
2345 	if (err) {
2346 		dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2347 			err);
2348 		goto err;
2349 	}
2350 	iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2351 	return;
2352 err:
2353 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2354 }
2355 
2356 /**
2357  * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2358  * @adapter: board private structure
2359  */
2360 int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2361 {
2362 	int i, num_req_queues = adapter->num_req_queues;
2363 	struct iavf_vsi *vsi = &adapter->vsi;
2364 
2365 	for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2366 		if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2367 			adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2368 	}
2369 	if (!adapter->vsi_res) {
2370 		dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2371 		return -ENODEV;
2372 	}
2373 
2374 	if (num_req_queues &&
2375 	    num_req_queues > adapter->vsi_res->num_queue_pairs) {
2376 		/* Problem.  The PF gave us fewer queues than what we had
2377 		 * negotiated in our request.  Need a reset to see if we can't
2378 		 * get back to a working state.
2379 		 */
2380 		dev_err(&adapter->pdev->dev,
2381 			"Requested %d queues, but PF only gave us %d.\n",
2382 			num_req_queues,
2383 			adapter->vsi_res->num_queue_pairs);
2384 		adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2385 		adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2386 		iavf_schedule_reset(adapter);
2387 
2388 		return -EAGAIN;
2389 	}
2390 	adapter->num_req_queues = 0;
2391 	adapter->vsi.id = adapter->vsi_res->vsi_id;
2392 
2393 	adapter->vsi.back = adapter;
2394 	adapter->vsi.base_vector = 1;
2395 	vsi->netdev = adapter->netdev;
2396 	vsi->qs_handle = adapter->vsi_res->qset_handle;
2397 	if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2398 		adapter->rss_key_size = adapter->vf_res->rss_key_size;
2399 		adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2400 	} else {
2401 		adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2402 		adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2403 	}
2404 
2405 	return 0;
2406 }
2407 
2408 /**
2409  * iavf_init_get_resources - third step of driver startup
2410  * @adapter: board private structure
2411  *
2412  * Function process __IAVF_INIT_GET_RESOURCES driver state and
2413  * finishes driver initialization procedure.
2414  * When success the state is changed to __IAVF_DOWN
2415  * when fails the state is changed to __IAVF_INIT_FAILED
2416  **/
2417 static void iavf_init_get_resources(struct iavf_adapter *adapter)
2418 {
2419 	struct pci_dev *pdev = adapter->pdev;
2420 	struct iavf_hw *hw = &adapter->hw;
2421 	int err;
2422 
2423 	WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2424 	/* aq msg sent, awaiting reply */
2425 	if (!adapter->vf_res) {
2426 		adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2427 					  GFP_KERNEL);
2428 		if (!adapter->vf_res) {
2429 			err = -ENOMEM;
2430 			goto err;
2431 		}
2432 	}
2433 	err = iavf_get_vf_config(adapter);
2434 	if (err == -EALREADY) {
2435 		err = iavf_send_vf_config_msg(adapter);
2436 		goto err;
2437 	} else if (err == -EINVAL) {
2438 		/* We only get -EINVAL if the device is in a very bad
2439 		 * state or if we've been disabled for previous bad
2440 		 * behavior. Either way, we're done now.
2441 		 */
2442 		iavf_shutdown_adminq(hw);
2443 		dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2444 		return;
2445 	}
2446 	if (err) {
2447 		dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2448 		goto err_alloc;
2449 	}
2450 
2451 	err = iavf_parse_vf_resource_msg(adapter);
2452 	if (err) {
2453 		dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2454 			err);
2455 		goto err_alloc;
2456 	}
2457 	/* Some features require additional messages to negotiate extended
2458 	 * capabilities. These are processed in sequence by the
2459 	 * __IAVF_INIT_EXTENDED_CAPS driver state.
2460 	 */
2461 	adapter->extended_caps = IAVF_EXTENDED_CAPS;
2462 
2463 	iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2464 	return;
2465 
2466 err_alloc:
2467 	kfree(adapter->vf_res);
2468 	adapter->vf_res = NULL;
2469 err:
2470 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2471 }
2472 
2473 /**
2474  * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2475  * @adapter: board private structure
2476  *
2477  * Function processes send of the extended VLAN V2 capability message to the
2478  * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
2479  * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2480  */
2481 static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2482 {
2483 	int ret;
2484 
2485 	WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
2486 
2487 	ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
2488 	if (ret && ret == -EOPNOTSUPP) {
2489 		/* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
2490 		 * we did not send the capability exchange message and do not
2491 		 * expect a response.
2492 		 */
2493 		adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2494 	}
2495 
2496 	/* We sent the message, so move on to the next step */
2497 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2498 }
2499 
2500 /**
2501  * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2502  * @adapter: board private structure
2503  *
2504  * Function processes receipt of the extended VLAN V2 capability message from
2505  * the PF.
2506  **/
2507 static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2508 {
2509 	int ret;
2510 
2511 	WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2512 
2513 	memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2514 
2515 	ret = iavf_get_vf_vlan_v2_caps(adapter);
2516 	if (ret)
2517 		goto err;
2518 
2519 	/* We've processed receipt of the VLAN V2 caps message */
2520 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2521 	return;
2522 err:
2523 	/* We didn't receive a reply. Make sure we try sending again when
2524 	 * __IAVF_INIT_FAILED attempts to recover.
2525 	 */
2526 	adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2527 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2528 }
2529 
2530 /**
2531  * iavf_init_process_extended_caps - Part of driver startup
2532  * @adapter: board private structure
2533  *
2534  * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
2535  * handles negotiating capabilities for features which require an additional
2536  * message.
2537  *
2538  * Once all extended capabilities exchanges are finished, the driver will
2539  * transition into __IAVF_INIT_CONFIG_ADAPTER.
2540  */
2541 static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
2542 {
2543 	WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
2544 
2545 	/* Process capability exchange for VLAN V2 */
2546 	if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
2547 		iavf_init_send_offload_vlan_v2_caps(adapter);
2548 		return;
2549 	} else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
2550 		iavf_init_recv_offload_vlan_v2_caps(adapter);
2551 		return;
2552 	}
2553 
2554 	/* When we reach here, no further extended capabilities exchanges are
2555 	 * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
2556 	 */
2557 	iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2558 }
2559 
2560 /**
2561  * iavf_init_config_adapter - last part of driver startup
2562  * @adapter: board private structure
2563  *
2564  * After all the supported capabilities are negotiated, then the
2565  * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2566  */
2567 static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2568 {
2569 	struct net_device *netdev = adapter->netdev;
2570 	struct pci_dev *pdev = adapter->pdev;
2571 	int err;
2572 
2573 	WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2574 
2575 	if (iavf_process_config(adapter))
2576 		goto err;
2577 
2578 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2579 
2580 	adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2581 
2582 	netdev->netdev_ops = &iavf_netdev_ops;
2583 	iavf_set_ethtool_ops(netdev);
2584 	netdev->watchdog_timeo = 5 * HZ;
2585 
2586 	/* MTU range: 68 - 9710 */
2587 	netdev->min_mtu = ETH_MIN_MTU;
2588 	netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
2589 
2590 	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2591 		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2592 			 adapter->hw.mac.addr);
2593 		eth_hw_addr_random(netdev);
2594 		ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2595 	} else {
2596 		eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2597 		ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2598 	}
2599 
2600 	adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2601 	adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2602 	err = iavf_init_interrupt_scheme(adapter);
2603 	if (err)
2604 		goto err_sw_init;
2605 	iavf_map_rings_to_vectors(adapter);
2606 	if (adapter->vf_res->vf_cap_flags &
2607 		VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2608 		adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2609 
2610 	err = iavf_request_misc_irq(adapter);
2611 	if (err)
2612 		goto err_sw_init;
2613 
2614 	netif_carrier_off(netdev);
2615 	adapter->link_up = false;
2616 
2617 	/* set the semaphore to prevent any callbacks after device registration
2618 	 * up to time when state of driver will be set to __IAVF_DOWN
2619 	 */
2620 	rtnl_lock();
2621 	if (!adapter->netdev_registered) {
2622 		err = register_netdevice(netdev);
2623 		if (err) {
2624 			rtnl_unlock();
2625 			goto err_register;
2626 		}
2627 	}
2628 
2629 	adapter->netdev_registered = true;
2630 
2631 	netif_tx_stop_all_queues(netdev);
2632 	if (CLIENT_ALLOWED(adapter)) {
2633 		err = iavf_lan_add_device(adapter);
2634 		if (err)
2635 			dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2636 				 err);
2637 	}
2638 	dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2639 	if (netdev->features & NETIF_F_GRO)
2640 		dev_info(&pdev->dev, "GRO is enabled\n");
2641 
2642 	iavf_change_state(adapter, __IAVF_DOWN);
2643 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2644 	rtnl_unlock();
2645 
2646 	iavf_misc_irq_enable(adapter);
2647 	wake_up(&adapter->down_waitqueue);
2648 
2649 	adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2650 	adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2651 	if (!adapter->rss_key || !adapter->rss_lut) {
2652 		err = -ENOMEM;
2653 		goto err_mem;
2654 	}
2655 	if (RSS_AQ(adapter))
2656 		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2657 	else
2658 		iavf_init_rss(adapter);
2659 
2660 	if (VLAN_V2_ALLOWED(adapter))
2661 		/* request initial VLAN offload settings */
2662 		iavf_set_vlan_offload_features(adapter, 0, netdev->features);
2663 
2664 	return;
2665 err_mem:
2666 	iavf_free_rss(adapter);
2667 err_register:
2668 	iavf_free_misc_irq(adapter);
2669 err_sw_init:
2670 	iavf_reset_interrupt_capability(adapter);
2671 err:
2672 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2673 }
2674 
2675 /**
2676  * iavf_watchdog_task - Periodic call-back task
2677  * @work: pointer to work_struct
2678  **/
2679 static void iavf_watchdog_task(struct work_struct *work)
2680 {
2681 	struct iavf_adapter *adapter = container_of(work,
2682 						    struct iavf_adapter,
2683 						    watchdog_task.work);
2684 	struct iavf_hw *hw = &adapter->hw;
2685 	u32 reg_val;
2686 
2687 	if (!mutex_trylock(&adapter->crit_lock)) {
2688 		if (adapter->state == __IAVF_REMOVE)
2689 			return;
2690 
2691 		goto restart_watchdog;
2692 	}
2693 
2694 	if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
2695 	    adapter->netdev_registered &&
2696 	    !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
2697 	    rtnl_trylock()) {
2698 		netdev_update_features(adapter->netdev);
2699 		rtnl_unlock();
2700 		adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
2701 	}
2702 
2703 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2704 		iavf_change_state(adapter, __IAVF_COMM_FAILED);
2705 
2706 	if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2707 		adapter->aq_required = 0;
2708 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2709 		mutex_unlock(&adapter->crit_lock);
2710 		queue_work(adapter->wq, &adapter->reset_task);
2711 		return;
2712 	}
2713 
2714 	switch (adapter->state) {
2715 	case __IAVF_STARTUP:
2716 		iavf_startup(adapter);
2717 		mutex_unlock(&adapter->crit_lock);
2718 		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2719 				   msecs_to_jiffies(30));
2720 		return;
2721 	case __IAVF_INIT_VERSION_CHECK:
2722 		iavf_init_version_check(adapter);
2723 		mutex_unlock(&adapter->crit_lock);
2724 		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2725 				   msecs_to_jiffies(30));
2726 		return;
2727 	case __IAVF_INIT_GET_RESOURCES:
2728 		iavf_init_get_resources(adapter);
2729 		mutex_unlock(&adapter->crit_lock);
2730 		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2731 				   msecs_to_jiffies(1));
2732 		return;
2733 	case __IAVF_INIT_EXTENDED_CAPS:
2734 		iavf_init_process_extended_caps(adapter);
2735 		mutex_unlock(&adapter->crit_lock);
2736 		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2737 				   msecs_to_jiffies(1));
2738 		return;
2739 	case __IAVF_INIT_CONFIG_ADAPTER:
2740 		iavf_init_config_adapter(adapter);
2741 		mutex_unlock(&adapter->crit_lock);
2742 		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2743 				   msecs_to_jiffies(1));
2744 		return;
2745 	case __IAVF_INIT_FAILED:
2746 		if (test_bit(__IAVF_IN_REMOVE_TASK,
2747 			     &adapter->crit_section)) {
2748 			/* Do not update the state and do not reschedule
2749 			 * watchdog task, iavf_remove should handle this state
2750 			 * as it can loop forever
2751 			 */
2752 			mutex_unlock(&adapter->crit_lock);
2753 			return;
2754 		}
2755 		if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2756 			dev_err(&adapter->pdev->dev,
2757 				"Failed to communicate with PF; waiting before retry\n");
2758 			adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2759 			iavf_shutdown_adminq(hw);
2760 			mutex_unlock(&adapter->crit_lock);
2761 			queue_delayed_work(adapter->wq,
2762 					   &adapter->watchdog_task, (5 * HZ));
2763 			return;
2764 		}
2765 		/* Try again from failed step*/
2766 		iavf_change_state(adapter, adapter->last_state);
2767 		mutex_unlock(&adapter->crit_lock);
2768 		queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
2769 		return;
2770 	case __IAVF_COMM_FAILED:
2771 		if (test_bit(__IAVF_IN_REMOVE_TASK,
2772 			     &adapter->crit_section)) {
2773 			/* Set state to __IAVF_INIT_FAILED and perform remove
2774 			 * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2775 			 * doesn't bring the state back to __IAVF_COMM_FAILED.
2776 			 */
2777 			iavf_change_state(adapter, __IAVF_INIT_FAILED);
2778 			adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2779 			mutex_unlock(&adapter->crit_lock);
2780 			return;
2781 		}
2782 		reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2783 			  IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2784 		if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2785 		    reg_val == VIRTCHNL_VFR_COMPLETED) {
2786 			/* A chance for redemption! */
2787 			dev_err(&adapter->pdev->dev,
2788 				"Hardware came out of reset. Attempting reinit.\n");
2789 			/* When init task contacts the PF and
2790 			 * gets everything set up again, it'll restart the
2791 			 * watchdog for us. Down, boy. Sit. Stay. Woof.
2792 			 */
2793 			iavf_change_state(adapter, __IAVF_STARTUP);
2794 			adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2795 		}
2796 		adapter->aq_required = 0;
2797 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2798 		mutex_unlock(&adapter->crit_lock);
2799 		queue_delayed_work(adapter->wq,
2800 				   &adapter->watchdog_task,
2801 				   msecs_to_jiffies(10));
2802 		return;
2803 	case __IAVF_RESETTING:
2804 		mutex_unlock(&adapter->crit_lock);
2805 		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2806 				   HZ * 2);
2807 		return;
2808 	case __IAVF_DOWN:
2809 	case __IAVF_DOWN_PENDING:
2810 	case __IAVF_TESTING:
2811 	case __IAVF_RUNNING:
2812 		if (adapter->current_op) {
2813 			if (!iavf_asq_done(hw)) {
2814 				dev_dbg(&adapter->pdev->dev,
2815 					"Admin queue timeout\n");
2816 				iavf_send_api_ver(adapter);
2817 			}
2818 		} else {
2819 			int ret = iavf_process_aq_command(adapter);
2820 
2821 			/* An error will be returned if no commands were
2822 			 * processed; use this opportunity to update stats
2823 			 * if the error isn't -ENOTSUPP
2824 			 */
2825 			if (ret && ret != -EOPNOTSUPP &&
2826 			    adapter->state == __IAVF_RUNNING)
2827 				iavf_request_stats(adapter);
2828 		}
2829 		if (adapter->state == __IAVF_RUNNING)
2830 			iavf_detect_recover_hung(&adapter->vsi);
2831 		break;
2832 	case __IAVF_REMOVE:
2833 	default:
2834 		mutex_unlock(&adapter->crit_lock);
2835 		return;
2836 	}
2837 
2838 	/* check for hw reset */
2839 	reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2840 	if (!reg_val) {
2841 		adapter->flags |= IAVF_FLAG_RESET_PENDING;
2842 		adapter->aq_required = 0;
2843 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2844 		dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2845 		queue_work(adapter->wq, &adapter->reset_task);
2846 		mutex_unlock(&adapter->crit_lock);
2847 		queue_delayed_work(adapter->wq,
2848 				   &adapter->watchdog_task, HZ * 2);
2849 		return;
2850 	}
2851 
2852 	schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2853 	mutex_unlock(&adapter->crit_lock);
2854 restart_watchdog:
2855 	if (adapter->state >= __IAVF_DOWN)
2856 		queue_work(adapter->wq, &adapter->adminq_task);
2857 	if (adapter->aq_required)
2858 		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2859 				   msecs_to_jiffies(20));
2860 	else
2861 		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2862 				   HZ * 2);
2863 }
2864 
2865 /**
2866  * iavf_disable_vf - disable VF
2867  * @adapter: board private structure
2868  *
2869  * Set communication failed flag and free all resources.
2870  * NOTE: This function is expected to be called with crit_lock being held.
2871  **/
2872 static void iavf_disable_vf(struct iavf_adapter *adapter)
2873 {
2874 	struct iavf_mac_filter *f, *ftmp;
2875 	struct iavf_vlan_filter *fv, *fvtmp;
2876 	struct iavf_cloud_filter *cf, *cftmp;
2877 
2878 	adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2879 
2880 	/* We don't use netif_running() because it may be true prior to
2881 	 * ndo_open() returning, so we can't assume it means all our open
2882 	 * tasks have finished, since we're not holding the rtnl_lock here.
2883 	 */
2884 	if (adapter->state == __IAVF_RUNNING) {
2885 		set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2886 		netif_carrier_off(adapter->netdev);
2887 		netif_tx_disable(adapter->netdev);
2888 		adapter->link_up = false;
2889 		iavf_napi_disable_all(adapter);
2890 		iavf_irq_disable(adapter);
2891 		iavf_free_traffic_irqs(adapter);
2892 		iavf_free_all_tx_resources(adapter);
2893 		iavf_free_all_rx_resources(adapter);
2894 	}
2895 
2896 	spin_lock_bh(&adapter->mac_vlan_list_lock);
2897 
2898 	/* Delete all of the filters */
2899 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2900 		list_del(&f->list);
2901 		kfree(f);
2902 	}
2903 
2904 	list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2905 		list_del(&fv->list);
2906 		kfree(fv);
2907 	}
2908 	adapter->num_vlan_filters = 0;
2909 
2910 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
2911 
2912 	spin_lock_bh(&adapter->cloud_filter_list_lock);
2913 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2914 		list_del(&cf->list);
2915 		kfree(cf);
2916 		adapter->num_cloud_filters--;
2917 	}
2918 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
2919 
2920 	iavf_free_misc_irq(adapter);
2921 	iavf_reset_interrupt_capability(adapter);
2922 	iavf_free_q_vectors(adapter);
2923 	iavf_free_queues(adapter);
2924 	memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2925 	iavf_shutdown_adminq(&adapter->hw);
2926 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2927 	iavf_change_state(adapter, __IAVF_DOWN);
2928 	wake_up(&adapter->down_waitqueue);
2929 	dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2930 }
2931 
2932 /**
2933  * iavf_reset_task - Call-back task to handle hardware reset
2934  * @work: pointer to work_struct
2935  *
2936  * During reset we need to shut down and reinitialize the admin queue
2937  * before we can use it to communicate with the PF again. We also clear
2938  * and reinit the rings because that context is lost as well.
2939  **/
2940 static void iavf_reset_task(struct work_struct *work)
2941 {
2942 	struct iavf_adapter *adapter = container_of(work,
2943 						      struct iavf_adapter,
2944 						      reset_task);
2945 	struct virtchnl_vf_resource *vfres = adapter->vf_res;
2946 	struct net_device *netdev = adapter->netdev;
2947 	struct iavf_hw *hw = &adapter->hw;
2948 	struct iavf_mac_filter *f, *ftmp;
2949 	struct iavf_cloud_filter *cf;
2950 	enum iavf_status status;
2951 	u32 reg_val;
2952 	int i = 0, err;
2953 	bool running;
2954 
2955 	/* Detach interface to avoid subsequent NDO callbacks */
2956 	rtnl_lock();
2957 	netif_device_detach(netdev);
2958 	rtnl_unlock();
2959 
2960 	/* When device is being removed it doesn't make sense to run the reset
2961 	 * task, just return in such a case.
2962 	 */
2963 	if (!mutex_trylock(&adapter->crit_lock)) {
2964 		if (adapter->state != __IAVF_REMOVE)
2965 			queue_work(adapter->wq, &adapter->reset_task);
2966 
2967 		goto reset_finish;
2968 	}
2969 
2970 	while (!mutex_trylock(&adapter->client_lock))
2971 		usleep_range(500, 1000);
2972 	if (CLIENT_ENABLED(adapter)) {
2973 		adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2974 				    IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2975 				    IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2976 				    IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2977 		cancel_delayed_work_sync(&adapter->client_task);
2978 		iavf_notify_client_close(&adapter->vsi, true);
2979 	}
2980 	iavf_misc_irq_disable(adapter);
2981 	if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2982 		adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2983 		/* Restart the AQ here. If we have been reset but didn't
2984 		 * detect it, or if the PF had to reinit, our AQ will be hosed.
2985 		 */
2986 		iavf_shutdown_adminq(hw);
2987 		iavf_init_adminq(hw);
2988 		iavf_request_reset(adapter);
2989 	}
2990 	adapter->flags |= IAVF_FLAG_RESET_PENDING;
2991 
2992 	/* poll until we see the reset actually happen */
2993 	for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2994 		reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2995 			  IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2996 		if (!reg_val)
2997 			break;
2998 		usleep_range(5000, 10000);
2999 	}
3000 	if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
3001 		dev_info(&adapter->pdev->dev, "Never saw reset\n");
3002 		goto continue_reset; /* act like the reset happened */
3003 	}
3004 
3005 	/* wait until the reset is complete and the PF is responding to us */
3006 	for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3007 		/* sleep first to make sure a minimum wait time is met */
3008 		msleep(IAVF_RESET_WAIT_MS);
3009 
3010 		reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
3011 			  IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3012 		if (reg_val == VIRTCHNL_VFR_VFACTIVE)
3013 			break;
3014 	}
3015 
3016 	pci_set_master(adapter->pdev);
3017 	pci_restore_msi_state(adapter->pdev);
3018 
3019 	if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
3020 		dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
3021 			reg_val);
3022 		iavf_disable_vf(adapter);
3023 		mutex_unlock(&adapter->client_lock);
3024 		mutex_unlock(&adapter->crit_lock);
3025 		if (netif_running(netdev)) {
3026 			rtnl_lock();
3027 			dev_close(netdev);
3028 			rtnl_unlock();
3029 		}
3030 		return; /* Do not attempt to reinit. It's dead, Jim. */
3031 	}
3032 
3033 continue_reset:
3034 	/* We don't use netif_running() because it may be true prior to
3035 	 * ndo_open() returning, so we can't assume it means all our open
3036 	 * tasks have finished, since we're not holding the rtnl_lock here.
3037 	 */
3038 	running = adapter->state == __IAVF_RUNNING;
3039 
3040 	if (running) {
3041 		netif_carrier_off(netdev);
3042 		netif_tx_stop_all_queues(netdev);
3043 		adapter->link_up = false;
3044 		iavf_napi_disable_all(adapter);
3045 	}
3046 	iavf_irq_disable(adapter);
3047 
3048 	iavf_change_state(adapter, __IAVF_RESETTING);
3049 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3050 
3051 	/* free the Tx/Rx rings and descriptors, might be better to just
3052 	 * re-use them sometime in the future
3053 	 */
3054 	iavf_free_all_rx_resources(adapter);
3055 	iavf_free_all_tx_resources(adapter);
3056 
3057 	adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
3058 	/* kill and reinit the admin queue */
3059 	iavf_shutdown_adminq(hw);
3060 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3061 	status = iavf_init_adminq(hw);
3062 	if (status) {
3063 		dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
3064 			 status);
3065 		goto reset_err;
3066 	}
3067 	adapter->aq_required = 0;
3068 
3069 	if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3070 	    (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3071 		err = iavf_reinit_interrupt_scheme(adapter);
3072 		if (err)
3073 			goto reset_err;
3074 	}
3075 
3076 	if (RSS_AQ(adapter)) {
3077 		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3078 	} else {
3079 		err = iavf_init_rss(adapter);
3080 		if (err)
3081 			goto reset_err;
3082 	}
3083 
3084 	adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3085 	/* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
3086 	 * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
3087 	 * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
3088 	 * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
3089 	 * been successfully sent and negotiated
3090 	 */
3091 	adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
3092 	adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
3093 
3094 	spin_lock_bh(&adapter->mac_vlan_list_lock);
3095 
3096 	/* Delete filter for the current MAC address, it could have
3097 	 * been changed by the PF via administratively set MAC.
3098 	 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
3099 	 */
3100 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3101 		if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
3102 			list_del(&f->list);
3103 			kfree(f);
3104 		}
3105 	}
3106 	/* re-add all MAC filters */
3107 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
3108 		f->add = true;
3109 	}
3110 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
3111 
3112 	/* check if TCs are running and re-add all cloud filters */
3113 	spin_lock_bh(&adapter->cloud_filter_list_lock);
3114 	if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
3115 	    adapter->num_tc) {
3116 		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
3117 			cf->add = true;
3118 		}
3119 	}
3120 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
3121 
3122 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
3123 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3124 	iavf_misc_irq_enable(adapter);
3125 
3126 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
3127 
3128 	/* We were running when the reset started, so we need to restore some
3129 	 * state here.
3130 	 */
3131 	if (running) {
3132 		/* allocate transmit descriptors */
3133 		err = iavf_setup_all_tx_resources(adapter);
3134 		if (err)
3135 			goto reset_err;
3136 
3137 		/* allocate receive descriptors */
3138 		err = iavf_setup_all_rx_resources(adapter);
3139 		if (err)
3140 			goto reset_err;
3141 
3142 		if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3143 		    (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3144 			err = iavf_request_traffic_irqs(adapter, netdev->name);
3145 			if (err)
3146 				goto reset_err;
3147 
3148 			adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
3149 		}
3150 
3151 		iavf_configure(adapter);
3152 
3153 		/* iavf_up_complete() will switch device back
3154 		 * to __IAVF_RUNNING
3155 		 */
3156 		iavf_up_complete(adapter);
3157 
3158 		iavf_irq_enable(adapter, true);
3159 	} else {
3160 		iavf_change_state(adapter, __IAVF_DOWN);
3161 		wake_up(&adapter->down_waitqueue);
3162 	}
3163 
3164 	adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3165 
3166 	mutex_unlock(&adapter->client_lock);
3167 	mutex_unlock(&adapter->crit_lock);
3168 
3169 	goto reset_finish;
3170 reset_err:
3171 	if (running) {
3172 		set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3173 		iavf_free_traffic_irqs(adapter);
3174 	}
3175 	iavf_disable_vf(adapter);
3176 
3177 	mutex_unlock(&adapter->client_lock);
3178 	mutex_unlock(&adapter->crit_lock);
3179 
3180 	if (netif_running(netdev)) {
3181 		/* Close device to ensure that Tx queues will not be started
3182 		 * during netif_device_attach() at the end of the reset task.
3183 		 */
3184 		rtnl_lock();
3185 		dev_close(netdev);
3186 		rtnl_unlock();
3187 	}
3188 
3189 	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
3190 reset_finish:
3191 	rtnl_lock();
3192 	netif_device_attach(netdev);
3193 	rtnl_unlock();
3194 }
3195 
3196 /**
3197  * iavf_adminq_task - worker thread to clean the admin queue
3198  * @work: pointer to work_struct containing our data
3199  **/
3200 static void iavf_adminq_task(struct work_struct *work)
3201 {
3202 	struct iavf_adapter *adapter =
3203 		container_of(work, struct iavf_adapter, adminq_task);
3204 	struct iavf_hw *hw = &adapter->hw;
3205 	struct iavf_arq_event_info event;
3206 	enum virtchnl_ops v_op;
3207 	enum iavf_status ret, v_ret;
3208 	u32 val, oldval;
3209 	u16 pending;
3210 
3211 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
3212 		goto out;
3213 
3214 	if (!mutex_trylock(&adapter->crit_lock)) {
3215 		if (adapter->state == __IAVF_REMOVE)
3216 			return;
3217 
3218 		queue_work(adapter->wq, &adapter->adminq_task);
3219 		goto out;
3220 	}
3221 
3222 	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
3223 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
3224 	if (!event.msg_buf)
3225 		goto out;
3226 
3227 	do {
3228 		ret = iavf_clean_arq_element(hw, &event, &pending);
3229 		v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3230 		v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
3231 
3232 		if (ret || !v_op)
3233 			break; /* No event to process or error cleaning ARQ */
3234 
3235 		iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
3236 					 event.msg_len);
3237 		if (pending != 0)
3238 			memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
3239 	} while (pending);
3240 	mutex_unlock(&adapter->crit_lock);
3241 
3242 	if ((adapter->flags &
3243 	     (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
3244 	    adapter->state == __IAVF_RESETTING)
3245 		goto freedom;
3246 
3247 	/* check for error indications */
3248 	val = rd32(hw, hw->aq.arq.len);
3249 	if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
3250 		goto freedom;
3251 	oldval = val;
3252 	if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
3253 		dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3254 		val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
3255 	}
3256 	if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
3257 		dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3258 		val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
3259 	}
3260 	if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
3261 		dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3262 		val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
3263 	}
3264 	if (oldval != val)
3265 		wr32(hw, hw->aq.arq.len, val);
3266 
3267 	val = rd32(hw, hw->aq.asq.len);
3268 	oldval = val;
3269 	if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
3270 		dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3271 		val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
3272 	}
3273 	if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
3274 		dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3275 		val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
3276 	}
3277 	if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
3278 		dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3279 		val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
3280 	}
3281 	if (oldval != val)
3282 		wr32(hw, hw->aq.asq.len, val);
3283 
3284 freedom:
3285 	kfree(event.msg_buf);
3286 out:
3287 	/* re-enable Admin queue interrupt cause */
3288 	iavf_misc_irq_enable(adapter);
3289 }
3290 
3291 /**
3292  * iavf_client_task - worker thread to perform client work
3293  * @work: pointer to work_struct containing our data
3294  *
3295  * This task handles client interactions. Because client calls can be
3296  * reentrant, we can't handle them in the watchdog.
3297  **/
3298 static void iavf_client_task(struct work_struct *work)
3299 {
3300 	struct iavf_adapter *adapter =
3301 		container_of(work, struct iavf_adapter, client_task.work);
3302 
3303 	/* If we can't get the client bit, just give up. We'll be rescheduled
3304 	 * later.
3305 	 */
3306 
3307 	if (!mutex_trylock(&adapter->client_lock))
3308 		return;
3309 
3310 	if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
3311 		iavf_client_subtask(adapter);
3312 		adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3313 		goto out;
3314 	}
3315 	if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
3316 		iavf_notify_client_l2_params(&adapter->vsi);
3317 		adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
3318 		goto out;
3319 	}
3320 	if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
3321 		iavf_notify_client_close(&adapter->vsi, false);
3322 		adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3323 		goto out;
3324 	}
3325 	if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
3326 		iavf_notify_client_open(&adapter->vsi);
3327 		adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
3328 	}
3329 out:
3330 	mutex_unlock(&adapter->client_lock);
3331 }
3332 
3333 /**
3334  * iavf_free_all_tx_resources - Free Tx Resources for All Queues
3335  * @adapter: board private structure
3336  *
3337  * Free all transmit software resources
3338  **/
3339 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
3340 {
3341 	int i;
3342 
3343 	if (!adapter->tx_rings)
3344 		return;
3345 
3346 	for (i = 0; i < adapter->num_active_queues; i++)
3347 		if (adapter->tx_rings[i].desc)
3348 			iavf_free_tx_resources(&adapter->tx_rings[i]);
3349 }
3350 
3351 /**
3352  * iavf_setup_all_tx_resources - allocate all queues Tx resources
3353  * @adapter: board private structure
3354  *
3355  * If this function returns with an error, then it's possible one or
3356  * more of the rings is populated (while the rest are not).  It is the
3357  * callers duty to clean those orphaned rings.
3358  *
3359  * Return 0 on success, negative on failure
3360  **/
3361 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
3362 {
3363 	int i, err = 0;
3364 
3365 	for (i = 0; i < adapter->num_active_queues; i++) {
3366 		adapter->tx_rings[i].count = adapter->tx_desc_count;
3367 		err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
3368 		if (!err)
3369 			continue;
3370 		dev_err(&adapter->pdev->dev,
3371 			"Allocation for Tx Queue %u failed\n", i);
3372 		break;
3373 	}
3374 
3375 	return err;
3376 }
3377 
3378 /**
3379  * iavf_setup_all_rx_resources - allocate all queues Rx resources
3380  * @adapter: board private structure
3381  *
3382  * If this function returns with an error, then it's possible one or
3383  * more of the rings is populated (while the rest are not).  It is the
3384  * callers duty to clean those orphaned rings.
3385  *
3386  * Return 0 on success, negative on failure
3387  **/
3388 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
3389 {
3390 	int i, err = 0;
3391 
3392 	for (i = 0; i < adapter->num_active_queues; i++) {
3393 		adapter->rx_rings[i].count = adapter->rx_desc_count;
3394 		err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
3395 		if (!err)
3396 			continue;
3397 		dev_err(&adapter->pdev->dev,
3398 			"Allocation for Rx Queue %u failed\n", i);
3399 		break;
3400 	}
3401 	return err;
3402 }
3403 
3404 /**
3405  * iavf_free_all_rx_resources - Free Rx Resources for All Queues
3406  * @adapter: board private structure
3407  *
3408  * Free all receive software resources
3409  **/
3410 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
3411 {
3412 	int i;
3413 
3414 	if (!adapter->rx_rings)
3415 		return;
3416 
3417 	for (i = 0; i < adapter->num_active_queues; i++)
3418 		if (adapter->rx_rings[i].desc)
3419 			iavf_free_rx_resources(&adapter->rx_rings[i]);
3420 }
3421 
3422 /**
3423  * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
3424  * @adapter: board private structure
3425  * @max_tx_rate: max Tx bw for a tc
3426  **/
3427 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
3428 				      u64 max_tx_rate)
3429 {
3430 	int speed = 0, ret = 0;
3431 
3432 	if (ADV_LINK_SUPPORT(adapter)) {
3433 		if (adapter->link_speed_mbps < U32_MAX) {
3434 			speed = adapter->link_speed_mbps;
3435 			goto validate_bw;
3436 		} else {
3437 			dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3438 			return -EINVAL;
3439 		}
3440 	}
3441 
3442 	switch (adapter->link_speed) {
3443 	case VIRTCHNL_LINK_SPEED_40GB:
3444 		speed = SPEED_40000;
3445 		break;
3446 	case VIRTCHNL_LINK_SPEED_25GB:
3447 		speed = SPEED_25000;
3448 		break;
3449 	case VIRTCHNL_LINK_SPEED_20GB:
3450 		speed = SPEED_20000;
3451 		break;
3452 	case VIRTCHNL_LINK_SPEED_10GB:
3453 		speed = SPEED_10000;
3454 		break;
3455 	case VIRTCHNL_LINK_SPEED_5GB:
3456 		speed = SPEED_5000;
3457 		break;
3458 	case VIRTCHNL_LINK_SPEED_2_5GB:
3459 		speed = SPEED_2500;
3460 		break;
3461 	case VIRTCHNL_LINK_SPEED_1GB:
3462 		speed = SPEED_1000;
3463 		break;
3464 	case VIRTCHNL_LINK_SPEED_100MB:
3465 		speed = SPEED_100;
3466 		break;
3467 	default:
3468 		break;
3469 	}
3470 
3471 validate_bw:
3472 	if (max_tx_rate > speed) {
3473 		dev_err(&adapter->pdev->dev,
3474 			"Invalid tx rate specified\n");
3475 		ret = -EINVAL;
3476 	}
3477 
3478 	return ret;
3479 }
3480 
3481 /**
3482  * iavf_validate_ch_config - validate queue mapping info
3483  * @adapter: board private structure
3484  * @mqprio_qopt: queue parameters
3485  *
3486  * This function validates if the config provided by the user to
3487  * configure queue channels is valid or not. Returns 0 on a valid
3488  * config.
3489  **/
3490 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
3491 				   struct tc_mqprio_qopt_offload *mqprio_qopt)
3492 {
3493 	u64 total_max_rate = 0;
3494 	u32 tx_rate_rem = 0;
3495 	int i, num_qps = 0;
3496 	u64 tx_rate = 0;
3497 	int ret = 0;
3498 
3499 	if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3500 	    mqprio_qopt->qopt.num_tc < 1)
3501 		return -EINVAL;
3502 
3503 	for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
3504 		if (!mqprio_qopt->qopt.count[i] ||
3505 		    mqprio_qopt->qopt.offset[i] != num_qps)
3506 			return -EINVAL;
3507 		if (mqprio_qopt->min_rate[i]) {
3508 			dev_err(&adapter->pdev->dev,
3509 				"Invalid min tx rate (greater than 0) specified for TC%d\n",
3510 				i);
3511 			return -EINVAL;
3512 		}
3513 
3514 		/* convert to Mbps */
3515 		tx_rate = div_u64(mqprio_qopt->max_rate[i],
3516 				  IAVF_MBPS_DIVISOR);
3517 
3518 		if (mqprio_qopt->max_rate[i] &&
3519 		    tx_rate < IAVF_MBPS_QUANTA) {
3520 			dev_err(&adapter->pdev->dev,
3521 				"Invalid max tx rate for TC%d, minimum %dMbps\n",
3522 				i, IAVF_MBPS_QUANTA);
3523 			return -EINVAL;
3524 		}
3525 
3526 		(void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
3527 
3528 		if (tx_rate_rem != 0) {
3529 			dev_err(&adapter->pdev->dev,
3530 				"Invalid max tx rate for TC%d, not divisible by %d\n",
3531 				i, IAVF_MBPS_QUANTA);
3532 			return -EINVAL;
3533 		}
3534 
3535 		total_max_rate += tx_rate;
3536 		num_qps += mqprio_qopt->qopt.count[i];
3537 	}
3538 	if (num_qps > adapter->num_active_queues) {
3539 		dev_err(&adapter->pdev->dev,
3540 			"Cannot support requested number of queues\n");
3541 		return -EINVAL;
3542 	}
3543 
3544 	ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3545 	return ret;
3546 }
3547 
3548 /**
3549  * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3550  * @adapter: board private structure
3551  **/
3552 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3553 {
3554 	struct iavf_cloud_filter *cf, *cftmp;
3555 
3556 	spin_lock_bh(&adapter->cloud_filter_list_lock);
3557 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
3558 				 list) {
3559 		list_del(&cf->list);
3560 		kfree(cf);
3561 		adapter->num_cloud_filters--;
3562 	}
3563 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
3564 }
3565 
3566 /**
3567  * __iavf_setup_tc - configure multiple traffic classes
3568  * @netdev: network interface device structure
3569  * @type_data: tc offload data
3570  *
3571  * This function processes the config information provided by the
3572  * user to configure traffic classes/queue channels and packages the
3573  * information to request the PF to setup traffic classes.
3574  *
3575  * Returns 0 on success.
3576  **/
3577 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3578 {
3579 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3580 	struct iavf_adapter *adapter = netdev_priv(netdev);
3581 	struct virtchnl_vf_resource *vfres = adapter->vf_res;
3582 	u8 num_tc = 0, total_qps = 0;
3583 	int ret = 0, netdev_tc = 0;
3584 	u64 max_tx_rate;
3585 	u16 mode;
3586 	int i;
3587 
3588 	num_tc = mqprio_qopt->qopt.num_tc;
3589 	mode = mqprio_qopt->mode;
3590 
3591 	/* delete queue_channel */
3592 	if (!mqprio_qopt->qopt.hw) {
3593 		if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3594 			/* reset the tc configuration */
3595 			netdev_reset_tc(netdev);
3596 			adapter->num_tc = 0;
3597 			netif_tx_stop_all_queues(netdev);
3598 			netif_tx_disable(netdev);
3599 			iavf_del_all_cloud_filters(adapter);
3600 			adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3601 			total_qps = adapter->orig_num_active_queues;
3602 			goto exit;
3603 		} else {
3604 			return -EINVAL;
3605 		}
3606 	}
3607 
3608 	/* add queue channel */
3609 	if (mode == TC_MQPRIO_MODE_CHANNEL) {
3610 		if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3611 			dev_err(&adapter->pdev->dev, "ADq not supported\n");
3612 			return -EOPNOTSUPP;
3613 		}
3614 		if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3615 			dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
3616 			return -EINVAL;
3617 		}
3618 
3619 		ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3620 		if (ret)
3621 			return ret;
3622 		/* Return if same TC config is requested */
3623 		if (adapter->num_tc == num_tc)
3624 			return 0;
3625 		adapter->num_tc = num_tc;
3626 
3627 		for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3628 			if (i < num_tc) {
3629 				adapter->ch_config.ch_info[i].count =
3630 					mqprio_qopt->qopt.count[i];
3631 				adapter->ch_config.ch_info[i].offset =
3632 					mqprio_qopt->qopt.offset[i];
3633 				total_qps += mqprio_qopt->qopt.count[i];
3634 				max_tx_rate = mqprio_qopt->max_rate[i];
3635 				/* convert to Mbps */
3636 				max_tx_rate = div_u64(max_tx_rate,
3637 						      IAVF_MBPS_DIVISOR);
3638 				adapter->ch_config.ch_info[i].max_tx_rate =
3639 					max_tx_rate;
3640 			} else {
3641 				adapter->ch_config.ch_info[i].count = 1;
3642 				adapter->ch_config.ch_info[i].offset = 0;
3643 			}
3644 		}
3645 
3646 		/* Take snapshot of original config such as "num_active_queues"
3647 		 * It is used later when delete ADQ flow is exercised, so that
3648 		 * once delete ADQ flow completes, VF shall go back to its
3649 		 * original queue configuration
3650 		 */
3651 
3652 		adapter->orig_num_active_queues = adapter->num_active_queues;
3653 
3654 		/* Store queue info based on TC so that VF gets configured
3655 		 * with correct number of queues when VF completes ADQ config
3656 		 * flow
3657 		 */
3658 		adapter->ch_config.total_qps = total_qps;
3659 
3660 		netif_tx_stop_all_queues(netdev);
3661 		netif_tx_disable(netdev);
3662 		adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3663 		netdev_reset_tc(netdev);
3664 		/* Report the tc mapping up the stack */
3665 		netdev_set_num_tc(adapter->netdev, num_tc);
3666 		for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3667 			u16 qcount = mqprio_qopt->qopt.count[i];
3668 			u16 qoffset = mqprio_qopt->qopt.offset[i];
3669 
3670 			if (i < num_tc)
3671 				netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3672 						    qoffset);
3673 		}
3674 	}
3675 exit:
3676 	if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
3677 		return 0;
3678 
3679 	netif_set_real_num_rx_queues(netdev, total_qps);
3680 	netif_set_real_num_tx_queues(netdev, total_qps);
3681 
3682 	return ret;
3683 }
3684 
3685 /**
3686  * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3687  * @adapter: board private structure
3688  * @f: pointer to struct flow_cls_offload
3689  * @filter: pointer to cloud filter structure
3690  */
3691 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3692 				 struct flow_cls_offload *f,
3693 				 struct iavf_cloud_filter *filter)
3694 {
3695 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3696 	struct flow_dissector *dissector = rule->match.dissector;
3697 	u16 n_proto_mask = 0;
3698 	u16 n_proto_key = 0;
3699 	u8 field_flags = 0;
3700 	u16 addr_type = 0;
3701 	u16 n_proto = 0;
3702 	int i = 0;
3703 	struct virtchnl_filter *vf = &filter->f;
3704 
3705 	if (dissector->used_keys &
3706 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
3707 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
3708 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3709 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
3710 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3711 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3712 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
3713 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3714 		dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
3715 			dissector->used_keys);
3716 		return -EOPNOTSUPP;
3717 	}
3718 
3719 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3720 		struct flow_match_enc_keyid match;
3721 
3722 		flow_rule_match_enc_keyid(rule, &match);
3723 		if (match.mask->keyid != 0)
3724 			field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3725 	}
3726 
3727 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3728 		struct flow_match_basic match;
3729 
3730 		flow_rule_match_basic(rule, &match);
3731 		n_proto_key = ntohs(match.key->n_proto);
3732 		n_proto_mask = ntohs(match.mask->n_proto);
3733 
3734 		if (n_proto_key == ETH_P_ALL) {
3735 			n_proto_key = 0;
3736 			n_proto_mask = 0;
3737 		}
3738 		n_proto = n_proto_key & n_proto_mask;
3739 		if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3740 			return -EINVAL;
3741 		if (n_proto == ETH_P_IPV6) {
3742 			/* specify flow type as TCP IPv6 */
3743 			vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3744 		}
3745 
3746 		if (match.key->ip_proto != IPPROTO_TCP) {
3747 			dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3748 			return -EINVAL;
3749 		}
3750 	}
3751 
3752 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3753 		struct flow_match_eth_addrs match;
3754 
3755 		flow_rule_match_eth_addrs(rule, &match);
3756 
3757 		/* use is_broadcast and is_zero to check for all 0xf or 0 */
3758 		if (!is_zero_ether_addr(match.mask->dst)) {
3759 			if (is_broadcast_ether_addr(match.mask->dst)) {
3760 				field_flags |= IAVF_CLOUD_FIELD_OMAC;
3761 			} else {
3762 				dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3763 					match.mask->dst);
3764 				return -EINVAL;
3765 			}
3766 		}
3767 
3768 		if (!is_zero_ether_addr(match.mask->src)) {
3769 			if (is_broadcast_ether_addr(match.mask->src)) {
3770 				field_flags |= IAVF_CLOUD_FIELD_IMAC;
3771 			} else {
3772 				dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3773 					match.mask->src);
3774 				return -EINVAL;
3775 			}
3776 		}
3777 
3778 		if (!is_zero_ether_addr(match.key->dst))
3779 			if (is_valid_ether_addr(match.key->dst) ||
3780 			    is_multicast_ether_addr(match.key->dst)) {
3781 				/* set the mask if a valid dst_mac address */
3782 				for (i = 0; i < ETH_ALEN; i++)
3783 					vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3784 				ether_addr_copy(vf->data.tcp_spec.dst_mac,
3785 						match.key->dst);
3786 			}
3787 
3788 		if (!is_zero_ether_addr(match.key->src))
3789 			if (is_valid_ether_addr(match.key->src) ||
3790 			    is_multicast_ether_addr(match.key->src)) {
3791 				/* set the mask if a valid dst_mac address */
3792 				for (i = 0; i < ETH_ALEN; i++)
3793 					vf->mask.tcp_spec.src_mac[i] |= 0xff;
3794 				ether_addr_copy(vf->data.tcp_spec.src_mac,
3795 						match.key->src);
3796 		}
3797 	}
3798 
3799 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3800 		struct flow_match_vlan match;
3801 
3802 		flow_rule_match_vlan(rule, &match);
3803 		if (match.mask->vlan_id) {
3804 			if (match.mask->vlan_id == VLAN_VID_MASK) {
3805 				field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3806 			} else {
3807 				dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3808 					match.mask->vlan_id);
3809 				return -EINVAL;
3810 			}
3811 		}
3812 		vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3813 		vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3814 	}
3815 
3816 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3817 		struct flow_match_control match;
3818 
3819 		flow_rule_match_control(rule, &match);
3820 		addr_type = match.key->addr_type;
3821 	}
3822 
3823 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3824 		struct flow_match_ipv4_addrs match;
3825 
3826 		flow_rule_match_ipv4_addrs(rule, &match);
3827 		if (match.mask->dst) {
3828 			if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3829 				field_flags |= IAVF_CLOUD_FIELD_IIP;
3830 			} else {
3831 				dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3832 					be32_to_cpu(match.mask->dst));
3833 				return -EINVAL;
3834 			}
3835 		}
3836 
3837 		if (match.mask->src) {
3838 			if (match.mask->src == cpu_to_be32(0xffffffff)) {
3839 				field_flags |= IAVF_CLOUD_FIELD_IIP;
3840 			} else {
3841 				dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3842 					be32_to_cpu(match.mask->src));
3843 				return -EINVAL;
3844 			}
3845 		}
3846 
3847 		if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3848 			dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3849 			return -EINVAL;
3850 		}
3851 		if (match.key->dst) {
3852 			vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3853 			vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3854 		}
3855 		if (match.key->src) {
3856 			vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3857 			vf->data.tcp_spec.src_ip[0] = match.key->src;
3858 		}
3859 	}
3860 
3861 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3862 		struct flow_match_ipv6_addrs match;
3863 
3864 		flow_rule_match_ipv6_addrs(rule, &match);
3865 
3866 		/* validate mask, make sure it is not IPV6_ADDR_ANY */
3867 		if (ipv6_addr_any(&match.mask->dst)) {
3868 			dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3869 				IPV6_ADDR_ANY);
3870 			return -EINVAL;
3871 		}
3872 
3873 		/* src and dest IPv6 address should not be LOOPBACK
3874 		 * (0:0:0:0:0:0:0:1) which can be represented as ::1
3875 		 */
3876 		if (ipv6_addr_loopback(&match.key->dst) ||
3877 		    ipv6_addr_loopback(&match.key->src)) {
3878 			dev_err(&adapter->pdev->dev,
3879 				"ipv6 addr should not be loopback\n");
3880 			return -EINVAL;
3881 		}
3882 		if (!ipv6_addr_any(&match.mask->dst) ||
3883 		    !ipv6_addr_any(&match.mask->src))
3884 			field_flags |= IAVF_CLOUD_FIELD_IIP;
3885 
3886 		for (i = 0; i < 4; i++)
3887 			vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3888 		memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3889 		       sizeof(vf->data.tcp_spec.dst_ip));
3890 		for (i = 0; i < 4; i++)
3891 			vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3892 		memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3893 		       sizeof(vf->data.tcp_spec.src_ip));
3894 	}
3895 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3896 		struct flow_match_ports match;
3897 
3898 		flow_rule_match_ports(rule, &match);
3899 		if (match.mask->src) {
3900 			if (match.mask->src == cpu_to_be16(0xffff)) {
3901 				field_flags |= IAVF_CLOUD_FIELD_IIP;
3902 			} else {
3903 				dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3904 					be16_to_cpu(match.mask->src));
3905 				return -EINVAL;
3906 			}
3907 		}
3908 
3909 		if (match.mask->dst) {
3910 			if (match.mask->dst == cpu_to_be16(0xffff)) {
3911 				field_flags |= IAVF_CLOUD_FIELD_IIP;
3912 			} else {
3913 				dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3914 					be16_to_cpu(match.mask->dst));
3915 				return -EINVAL;
3916 			}
3917 		}
3918 		if (match.key->dst) {
3919 			vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3920 			vf->data.tcp_spec.dst_port = match.key->dst;
3921 		}
3922 
3923 		if (match.key->src) {
3924 			vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3925 			vf->data.tcp_spec.src_port = match.key->src;
3926 		}
3927 	}
3928 	vf->field_flags = field_flags;
3929 
3930 	return 0;
3931 }
3932 
3933 /**
3934  * iavf_handle_tclass - Forward to a traffic class on the device
3935  * @adapter: board private structure
3936  * @tc: traffic class index on the device
3937  * @filter: pointer to cloud filter structure
3938  */
3939 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3940 			      struct iavf_cloud_filter *filter)
3941 {
3942 	if (tc == 0)
3943 		return 0;
3944 	if (tc < adapter->num_tc) {
3945 		if (!filter->f.data.tcp_spec.dst_port) {
3946 			dev_err(&adapter->pdev->dev,
3947 				"Specify destination port to redirect to traffic class other than TC0\n");
3948 			return -EINVAL;
3949 		}
3950 	}
3951 	/* redirect to a traffic class on the same device */
3952 	filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3953 	filter->f.action_meta = tc;
3954 	return 0;
3955 }
3956 
3957 /**
3958  * iavf_find_cf - Find the cloud filter in the list
3959  * @adapter: Board private structure
3960  * @cookie: filter specific cookie
3961  *
3962  * Returns ptr to the filter object or NULL. Must be called while holding the
3963  * cloud_filter_list_lock.
3964  */
3965 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3966 					      unsigned long *cookie)
3967 {
3968 	struct iavf_cloud_filter *filter = NULL;
3969 
3970 	if (!cookie)
3971 		return NULL;
3972 
3973 	list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3974 		if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3975 			return filter;
3976 	}
3977 	return NULL;
3978 }
3979 
3980 /**
3981  * iavf_configure_clsflower - Add tc flower filters
3982  * @adapter: board private structure
3983  * @cls_flower: Pointer to struct flow_cls_offload
3984  */
3985 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3986 				    struct flow_cls_offload *cls_flower)
3987 {
3988 	int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
3989 	struct iavf_cloud_filter *filter = NULL;
3990 	int err = -EINVAL, count = 50;
3991 
3992 	if (tc < 0) {
3993 		dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
3994 		return -EINVAL;
3995 	}
3996 
3997 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
3998 	if (!filter)
3999 		return -ENOMEM;
4000 
4001 	while (!mutex_trylock(&adapter->crit_lock)) {
4002 		if (--count == 0) {
4003 			kfree(filter);
4004 			return err;
4005 		}
4006 		udelay(1);
4007 	}
4008 
4009 	filter->cookie = cls_flower->cookie;
4010 
4011 	/* bail out here if filter already exists */
4012 	spin_lock_bh(&adapter->cloud_filter_list_lock);
4013 	if (iavf_find_cf(adapter, &cls_flower->cookie)) {
4014 		dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
4015 		err = -EEXIST;
4016 		goto spin_unlock;
4017 	}
4018 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
4019 
4020 	/* set the mask to all zeroes to begin with */
4021 	memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
4022 	/* start out with flow type and eth type IPv4 to begin with */
4023 	filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
4024 	err = iavf_parse_cls_flower(adapter, cls_flower, filter);
4025 	if (err)
4026 		goto err;
4027 
4028 	err = iavf_handle_tclass(adapter, tc, filter);
4029 	if (err)
4030 		goto err;
4031 
4032 	/* add filter to the list */
4033 	spin_lock_bh(&adapter->cloud_filter_list_lock);
4034 	list_add_tail(&filter->list, &adapter->cloud_filter_list);
4035 	adapter->num_cloud_filters++;
4036 	filter->add = true;
4037 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
4038 spin_unlock:
4039 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
4040 err:
4041 	if (err)
4042 		kfree(filter);
4043 
4044 	mutex_unlock(&adapter->crit_lock);
4045 	return err;
4046 }
4047 
4048 /**
4049  * iavf_delete_clsflower - Remove tc flower filters
4050  * @adapter: board private structure
4051  * @cls_flower: Pointer to struct flow_cls_offload
4052  */
4053 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
4054 				 struct flow_cls_offload *cls_flower)
4055 {
4056 	struct iavf_cloud_filter *filter = NULL;
4057 	int err = 0;
4058 
4059 	spin_lock_bh(&adapter->cloud_filter_list_lock);
4060 	filter = iavf_find_cf(adapter, &cls_flower->cookie);
4061 	if (filter) {
4062 		filter->del = true;
4063 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
4064 	} else {
4065 		err = -EINVAL;
4066 	}
4067 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
4068 
4069 	return err;
4070 }
4071 
4072 /**
4073  * iavf_setup_tc_cls_flower - flower classifier offloads
4074  * @adapter: board private structure
4075  * @cls_flower: pointer to flow_cls_offload struct with flow info
4076  */
4077 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4078 				    struct flow_cls_offload *cls_flower)
4079 {
4080 	switch (cls_flower->command) {
4081 	case FLOW_CLS_REPLACE:
4082 		return iavf_configure_clsflower(adapter, cls_flower);
4083 	case FLOW_CLS_DESTROY:
4084 		return iavf_delete_clsflower(adapter, cls_flower);
4085 	case FLOW_CLS_STATS:
4086 		return -EOPNOTSUPP;
4087 	default:
4088 		return -EOPNOTSUPP;
4089 	}
4090 }
4091 
4092 /**
4093  * iavf_setup_tc_block_cb - block callback for tc
4094  * @type: type of offload
4095  * @type_data: offload data
4096  * @cb_priv:
4097  *
4098  * This function is the block callback for traffic classes
4099  **/
4100 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4101 				  void *cb_priv)
4102 {
4103 	struct iavf_adapter *adapter = cb_priv;
4104 
4105 	if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
4106 		return -EOPNOTSUPP;
4107 
4108 	switch (type) {
4109 	case TC_SETUP_CLSFLOWER:
4110 		return iavf_setup_tc_cls_flower(cb_priv, type_data);
4111 	default:
4112 		return -EOPNOTSUPP;
4113 	}
4114 }
4115 
4116 static LIST_HEAD(iavf_block_cb_list);
4117 
4118 /**
4119  * iavf_setup_tc - configure multiple traffic classes
4120  * @netdev: network interface device structure
4121  * @type: type of offload
4122  * @type_data: tc offload data
4123  *
4124  * This function is the callback to ndo_setup_tc in the
4125  * netdev_ops.
4126  *
4127  * Returns 0 on success
4128  **/
4129 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
4130 			 void *type_data)
4131 {
4132 	struct iavf_adapter *adapter = netdev_priv(netdev);
4133 
4134 	switch (type) {
4135 	case TC_SETUP_QDISC_MQPRIO:
4136 		return __iavf_setup_tc(netdev, type_data);
4137 	case TC_SETUP_BLOCK:
4138 		return flow_block_cb_setup_simple(type_data,
4139 						  &iavf_block_cb_list,
4140 						  iavf_setup_tc_block_cb,
4141 						  adapter, adapter, true);
4142 	default:
4143 		return -EOPNOTSUPP;
4144 	}
4145 }
4146 
4147 /**
4148  * iavf_open - Called when a network interface is made active
4149  * @netdev: network interface device structure
4150  *
4151  * Returns 0 on success, negative value on failure
4152  *
4153  * The open entry point is called when a network interface is made
4154  * active by the system (IFF_UP).  At this point all resources needed
4155  * for transmit and receive operations are allocated, the interrupt
4156  * handler is registered with the OS, the watchdog is started,
4157  * and the stack is notified that the interface is ready.
4158  **/
4159 static int iavf_open(struct net_device *netdev)
4160 {
4161 	struct iavf_adapter *adapter = netdev_priv(netdev);
4162 	int err;
4163 
4164 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
4165 		dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
4166 		return -EIO;
4167 	}
4168 
4169 	while (!mutex_trylock(&adapter->crit_lock)) {
4170 		/* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
4171 		 * is already taken and iavf_open is called from an upper
4172 		 * device's notifier reacting on NETDEV_REGISTER event.
4173 		 * We have to leave here to avoid dead lock.
4174 		 */
4175 		if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
4176 			return -EBUSY;
4177 
4178 		usleep_range(500, 1000);
4179 	}
4180 
4181 	if (adapter->state != __IAVF_DOWN) {
4182 		err = -EBUSY;
4183 		goto err_unlock;
4184 	}
4185 
4186 	if (adapter->state == __IAVF_RUNNING &&
4187 	    !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
4188 		dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
4189 		err = 0;
4190 		goto err_unlock;
4191 	}
4192 
4193 	/* allocate transmit descriptors */
4194 	err = iavf_setup_all_tx_resources(adapter);
4195 	if (err)
4196 		goto err_setup_tx;
4197 
4198 	/* allocate receive descriptors */
4199 	err = iavf_setup_all_rx_resources(adapter);
4200 	if (err)
4201 		goto err_setup_rx;
4202 
4203 	/* clear any pending interrupts, may auto mask */
4204 	err = iavf_request_traffic_irqs(adapter, netdev->name);
4205 	if (err)
4206 		goto err_req_irq;
4207 
4208 	spin_lock_bh(&adapter->mac_vlan_list_lock);
4209 
4210 	iavf_add_filter(adapter, adapter->hw.mac.addr);
4211 
4212 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
4213 
4214 	/* Restore VLAN filters that were removed with IFF_DOWN */
4215 	iavf_restore_filters(adapter);
4216 
4217 	iavf_configure(adapter);
4218 
4219 	iavf_up_complete(adapter);
4220 
4221 	iavf_irq_enable(adapter, true);
4222 
4223 	mutex_unlock(&adapter->crit_lock);
4224 
4225 	return 0;
4226 
4227 err_req_irq:
4228 	iavf_down(adapter);
4229 	iavf_free_traffic_irqs(adapter);
4230 err_setup_rx:
4231 	iavf_free_all_rx_resources(adapter);
4232 err_setup_tx:
4233 	iavf_free_all_tx_resources(adapter);
4234 err_unlock:
4235 	mutex_unlock(&adapter->crit_lock);
4236 
4237 	return err;
4238 }
4239 
4240 /**
4241  * iavf_close - Disables a network interface
4242  * @netdev: network interface device structure
4243  *
4244  * Returns 0, this is not allowed to fail
4245  *
4246  * The close entry point is called when an interface is de-activated
4247  * by the OS.  The hardware is still under the drivers control, but
4248  * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
4249  * are freed, along with all transmit and receive resources.
4250  **/
4251 static int iavf_close(struct net_device *netdev)
4252 {
4253 	struct iavf_adapter *adapter = netdev_priv(netdev);
4254 	u64 aq_to_restore;
4255 	int status;
4256 
4257 	mutex_lock(&adapter->crit_lock);
4258 
4259 	if (adapter->state <= __IAVF_DOWN_PENDING) {
4260 		mutex_unlock(&adapter->crit_lock);
4261 		return 0;
4262 	}
4263 
4264 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
4265 	if (CLIENT_ENABLED(adapter))
4266 		adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
4267 	/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
4268 	 * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
4269 	 * deadlock with adminq_task() until iavf_close timeouts. We must send
4270 	 * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
4271 	 * disable queues possible for vf. Give only necessary flags to
4272 	 * iavf_down and save other to set them right before iavf_close()
4273 	 * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
4274 	 * iavf will be in DOWN state.
4275 	 */
4276 	aq_to_restore = adapter->aq_required;
4277 	adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
4278 
4279 	/* Remove flags which we do not want to send after close or we want to
4280 	 * send before disable queues.
4281 	 */
4282 	aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG		|
4283 			   IAVF_FLAG_AQ_ENABLE_QUEUES		|
4284 			   IAVF_FLAG_AQ_CONFIGURE_QUEUES	|
4285 			   IAVF_FLAG_AQ_ADD_VLAN_FILTER		|
4286 			   IAVF_FLAG_AQ_ADD_MAC_FILTER		|
4287 			   IAVF_FLAG_AQ_ADD_CLOUD_FILTER	|
4288 			   IAVF_FLAG_AQ_ADD_FDIR_FILTER		|
4289 			   IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
4290 
4291 	iavf_down(adapter);
4292 	iavf_change_state(adapter, __IAVF_DOWN_PENDING);
4293 	iavf_free_traffic_irqs(adapter);
4294 
4295 	mutex_unlock(&adapter->crit_lock);
4296 
4297 	/* We explicitly don't free resources here because the hardware is
4298 	 * still active and can DMA into memory. Resources are cleared in
4299 	 * iavf_virtchnl_completion() after we get confirmation from the PF
4300 	 * driver that the rings have been stopped.
4301 	 *
4302 	 * Also, we wait for state to transition to __IAVF_DOWN before
4303 	 * returning. State change occurs in iavf_virtchnl_completion() after
4304 	 * VF resources are released (which occurs after PF driver processes and
4305 	 * responds to admin queue commands).
4306 	 */
4307 
4308 	status = wait_event_timeout(adapter->down_waitqueue,
4309 				    adapter->state == __IAVF_DOWN,
4310 				    msecs_to_jiffies(500));
4311 	if (!status)
4312 		netdev_warn(netdev, "Device resources not yet released\n");
4313 
4314 	mutex_lock(&adapter->crit_lock);
4315 	adapter->aq_required |= aq_to_restore;
4316 	mutex_unlock(&adapter->crit_lock);
4317 	return 0;
4318 }
4319 
4320 /**
4321  * iavf_change_mtu - Change the Maximum Transfer Unit
4322  * @netdev: network interface device structure
4323  * @new_mtu: new value for maximum frame size
4324  *
4325  * Returns 0 on success, negative on failure
4326  **/
4327 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
4328 {
4329 	struct iavf_adapter *adapter = netdev_priv(netdev);
4330 
4331 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
4332 		   netdev->mtu, new_mtu);
4333 	netdev->mtu = new_mtu;
4334 	if (CLIENT_ENABLED(adapter)) {
4335 		iavf_notify_client_l2_params(&adapter->vsi);
4336 		adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
4337 	}
4338 
4339 	if (netif_running(netdev)) {
4340 		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4341 		queue_work(adapter->wq, &adapter->reset_task);
4342 	}
4343 
4344 	return 0;
4345 }
4346 
4347 #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
4348 					 NETIF_F_HW_VLAN_CTAG_TX | \
4349 					 NETIF_F_HW_VLAN_STAG_RX | \
4350 					 NETIF_F_HW_VLAN_STAG_TX)
4351 
4352 /**
4353  * iavf_set_features - set the netdev feature flags
4354  * @netdev: ptr to the netdev being adjusted
4355  * @features: the feature set that the stack is suggesting
4356  * Note: expects to be called while under rtnl_lock()
4357  **/
4358 static int iavf_set_features(struct net_device *netdev,
4359 			     netdev_features_t features)
4360 {
4361 	struct iavf_adapter *adapter = netdev_priv(netdev);
4362 
4363 	/* trigger update on any VLAN feature change */
4364 	if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
4365 	    (features & NETIF_VLAN_OFFLOAD_FEATURES))
4366 		iavf_set_vlan_offload_features(adapter, netdev->features,
4367 					       features);
4368 
4369 	return 0;
4370 }
4371 
4372 /**
4373  * iavf_features_check - Validate encapsulated packet conforms to limits
4374  * @skb: skb buff
4375  * @dev: This physical port's netdev
4376  * @features: Offload features that the stack believes apply
4377  **/
4378 static netdev_features_t iavf_features_check(struct sk_buff *skb,
4379 					     struct net_device *dev,
4380 					     netdev_features_t features)
4381 {
4382 	size_t len;
4383 
4384 	/* No point in doing any of this if neither checksum nor GSO are
4385 	 * being requested for this frame.  We can rule out both by just
4386 	 * checking for CHECKSUM_PARTIAL
4387 	 */
4388 	if (skb->ip_summed != CHECKSUM_PARTIAL)
4389 		return features;
4390 
4391 	/* We cannot support GSO if the MSS is going to be less than
4392 	 * 64 bytes.  If it is then we need to drop support for GSO.
4393 	 */
4394 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4395 		features &= ~NETIF_F_GSO_MASK;
4396 
4397 	/* MACLEN can support at most 63 words */
4398 	len = skb_network_header(skb) - skb->data;
4399 	if (len & ~(63 * 2))
4400 		goto out_err;
4401 
4402 	/* IPLEN and EIPLEN can support at most 127 dwords */
4403 	len = skb_transport_header(skb) - skb_network_header(skb);
4404 	if (len & ~(127 * 4))
4405 		goto out_err;
4406 
4407 	if (skb->encapsulation) {
4408 		/* L4TUNLEN can support 127 words */
4409 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
4410 		if (len & ~(127 * 2))
4411 			goto out_err;
4412 
4413 		/* IPLEN can support at most 127 dwords */
4414 		len = skb_inner_transport_header(skb) -
4415 		      skb_inner_network_header(skb);
4416 		if (len & ~(127 * 4))
4417 			goto out_err;
4418 	}
4419 
4420 	/* No need to validate L4LEN as TCP is the only protocol with a
4421 	 * flexible value and we support all possible values supported
4422 	 * by TCP, which is at most 15 dwords
4423 	 */
4424 
4425 	return features;
4426 out_err:
4427 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4428 }
4429 
4430 /**
4431  * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
4432  * @adapter: board private structure
4433  *
4434  * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4435  * were negotiated determine the VLAN features that can be toggled on and off.
4436  **/
4437 static netdev_features_t
4438 iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
4439 {
4440 	netdev_features_t hw_features = 0;
4441 
4442 	if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4443 		return hw_features;
4444 
4445 	/* Enable VLAN features if supported */
4446 	if (VLAN_ALLOWED(adapter)) {
4447 		hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
4448 				NETIF_F_HW_VLAN_CTAG_RX);
4449 	} else if (VLAN_V2_ALLOWED(adapter)) {
4450 		struct virtchnl_vlan_caps *vlan_v2_caps =
4451 			&adapter->vlan_v2_caps;
4452 		struct virtchnl_vlan_supported_caps *stripping_support =
4453 			&vlan_v2_caps->offloads.stripping_support;
4454 		struct virtchnl_vlan_supported_caps *insertion_support =
4455 			&vlan_v2_caps->offloads.insertion_support;
4456 
4457 		if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4458 		    stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4459 			if (stripping_support->outer &
4460 			    VIRTCHNL_VLAN_ETHERTYPE_8100)
4461 				hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4462 			if (stripping_support->outer &
4463 			    VIRTCHNL_VLAN_ETHERTYPE_88A8)
4464 				hw_features |= NETIF_F_HW_VLAN_STAG_RX;
4465 		} else if (stripping_support->inner !=
4466 			   VIRTCHNL_VLAN_UNSUPPORTED &&
4467 			   stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4468 			if (stripping_support->inner &
4469 			    VIRTCHNL_VLAN_ETHERTYPE_8100)
4470 				hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4471 		}
4472 
4473 		if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4474 		    insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4475 			if (insertion_support->outer &
4476 			    VIRTCHNL_VLAN_ETHERTYPE_8100)
4477 				hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4478 			if (insertion_support->outer &
4479 			    VIRTCHNL_VLAN_ETHERTYPE_88A8)
4480 				hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4481 		} else if (insertion_support->inner &&
4482 			   insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4483 			if (insertion_support->inner &
4484 			    VIRTCHNL_VLAN_ETHERTYPE_8100)
4485 				hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4486 		}
4487 	}
4488 
4489 	return hw_features;
4490 }
4491 
4492 /**
4493  * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
4494  * @adapter: board private structure
4495  *
4496  * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4497  * were negotiated determine the VLAN features that are enabled by default.
4498  **/
4499 static netdev_features_t
4500 iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
4501 {
4502 	netdev_features_t features = 0;
4503 
4504 	if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4505 		return features;
4506 
4507 	if (VLAN_ALLOWED(adapter)) {
4508 		features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4509 			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
4510 	} else if (VLAN_V2_ALLOWED(adapter)) {
4511 		struct virtchnl_vlan_caps *vlan_v2_caps =
4512 			&adapter->vlan_v2_caps;
4513 		struct virtchnl_vlan_supported_caps *filtering_support =
4514 			&vlan_v2_caps->filtering.filtering_support;
4515 		struct virtchnl_vlan_supported_caps *stripping_support =
4516 			&vlan_v2_caps->offloads.stripping_support;
4517 		struct virtchnl_vlan_supported_caps *insertion_support =
4518 			&vlan_v2_caps->offloads.insertion_support;
4519 		u32 ethertype_init;
4520 
4521 		/* give priority to outer stripping and don't support both outer
4522 		 * and inner stripping
4523 		 */
4524 		ethertype_init = vlan_v2_caps->offloads.ethertype_init;
4525 		if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4526 			if (stripping_support->outer &
4527 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4528 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4529 				features |= NETIF_F_HW_VLAN_CTAG_RX;
4530 			else if (stripping_support->outer &
4531 				 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4532 				 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4533 				features |= NETIF_F_HW_VLAN_STAG_RX;
4534 		} else if (stripping_support->inner !=
4535 			   VIRTCHNL_VLAN_UNSUPPORTED) {
4536 			if (stripping_support->inner &
4537 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4538 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4539 				features |= NETIF_F_HW_VLAN_CTAG_RX;
4540 		}
4541 
4542 		/* give priority to outer insertion and don't support both outer
4543 		 * and inner insertion
4544 		 */
4545 		if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4546 			if (insertion_support->outer &
4547 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4548 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4549 				features |= NETIF_F_HW_VLAN_CTAG_TX;
4550 			else if (insertion_support->outer &
4551 				 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4552 				 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4553 				features |= NETIF_F_HW_VLAN_STAG_TX;
4554 		} else if (insertion_support->inner !=
4555 			   VIRTCHNL_VLAN_UNSUPPORTED) {
4556 			if (insertion_support->inner &
4557 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4558 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4559 				features |= NETIF_F_HW_VLAN_CTAG_TX;
4560 		}
4561 
4562 		/* give priority to outer filtering and don't bother if both
4563 		 * outer and inner filtering are enabled
4564 		 */
4565 		ethertype_init = vlan_v2_caps->filtering.ethertype_init;
4566 		if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4567 			if (filtering_support->outer &
4568 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4569 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4570 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4571 			if (filtering_support->outer &
4572 			    VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4573 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4574 				features |= NETIF_F_HW_VLAN_STAG_FILTER;
4575 		} else if (filtering_support->inner !=
4576 			   VIRTCHNL_VLAN_UNSUPPORTED) {
4577 			if (filtering_support->inner &
4578 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4579 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4580 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4581 			if (filtering_support->inner &
4582 			    VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4583 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4584 				features |= NETIF_F_HW_VLAN_STAG_FILTER;
4585 		}
4586 	}
4587 
4588 	return features;
4589 }
4590 
4591 #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
4592 	(!(((requested) & (feature_bit)) && \
4593 	   !((allowed) & (feature_bit))))
4594 
4595 /**
4596  * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
4597  * @adapter: board private structure
4598  * @requested_features: stack requested NETDEV features
4599  **/
4600 static netdev_features_t
4601 iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
4602 			      netdev_features_t requested_features)
4603 {
4604 	netdev_features_t allowed_features;
4605 
4606 	allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
4607 		iavf_get_netdev_vlan_features(adapter);
4608 
4609 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4610 					      allowed_features,
4611 					      NETIF_F_HW_VLAN_CTAG_TX))
4612 		requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4613 
4614 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4615 					      allowed_features,
4616 					      NETIF_F_HW_VLAN_CTAG_RX))
4617 		requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4618 
4619 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4620 					      allowed_features,
4621 					      NETIF_F_HW_VLAN_STAG_TX))
4622 		requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
4623 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4624 					      allowed_features,
4625 					      NETIF_F_HW_VLAN_STAG_RX))
4626 		requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
4627 
4628 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4629 					      allowed_features,
4630 					      NETIF_F_HW_VLAN_CTAG_FILTER))
4631 		requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4632 
4633 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4634 					      allowed_features,
4635 					      NETIF_F_HW_VLAN_STAG_FILTER))
4636 		requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
4637 
4638 	if ((requested_features &
4639 	     (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
4640 	    (requested_features &
4641 	     (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
4642 	    adapter->vlan_v2_caps.offloads.ethertype_match ==
4643 	    VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
4644 		netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
4645 		requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
4646 					NETIF_F_HW_VLAN_STAG_TX);
4647 	}
4648 
4649 	return requested_features;
4650 }
4651 
4652 /**
4653  * iavf_fix_features - fix up the netdev feature bits
4654  * @netdev: our net device
4655  * @features: desired feature bits
4656  *
4657  * Returns fixed-up features bits
4658  **/
4659 static netdev_features_t iavf_fix_features(struct net_device *netdev,
4660 					   netdev_features_t features)
4661 {
4662 	struct iavf_adapter *adapter = netdev_priv(netdev);
4663 
4664 	return iavf_fix_netdev_vlan_features(adapter, features);
4665 }
4666 
4667 static const struct net_device_ops iavf_netdev_ops = {
4668 	.ndo_open		= iavf_open,
4669 	.ndo_stop		= iavf_close,
4670 	.ndo_start_xmit		= iavf_xmit_frame,
4671 	.ndo_set_rx_mode	= iavf_set_rx_mode,
4672 	.ndo_validate_addr	= eth_validate_addr,
4673 	.ndo_set_mac_address	= iavf_set_mac,
4674 	.ndo_change_mtu		= iavf_change_mtu,
4675 	.ndo_tx_timeout		= iavf_tx_timeout,
4676 	.ndo_vlan_rx_add_vid	= iavf_vlan_rx_add_vid,
4677 	.ndo_vlan_rx_kill_vid	= iavf_vlan_rx_kill_vid,
4678 	.ndo_features_check	= iavf_features_check,
4679 	.ndo_fix_features	= iavf_fix_features,
4680 	.ndo_set_features	= iavf_set_features,
4681 	.ndo_setup_tc		= iavf_setup_tc,
4682 };
4683 
4684 /**
4685  * iavf_check_reset_complete - check that VF reset is complete
4686  * @hw: pointer to hw struct
4687  *
4688  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
4689  **/
4690 static int iavf_check_reset_complete(struct iavf_hw *hw)
4691 {
4692 	u32 rstat;
4693 	int i;
4694 
4695 	for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4696 		rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
4697 			     IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
4698 		if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
4699 		    (rstat == VIRTCHNL_VFR_COMPLETED))
4700 			return 0;
4701 		usleep_range(10, 20);
4702 	}
4703 	return -EBUSY;
4704 }
4705 
4706 /**
4707  * iavf_process_config - Process the config information we got from the PF
4708  * @adapter: board private structure
4709  *
4710  * Verify that we have a valid config struct, and set up our netdev features
4711  * and our VSI struct.
4712  **/
4713 int iavf_process_config(struct iavf_adapter *adapter)
4714 {
4715 	struct virtchnl_vf_resource *vfres = adapter->vf_res;
4716 	netdev_features_t hw_vlan_features, vlan_features;
4717 	struct net_device *netdev = adapter->netdev;
4718 	netdev_features_t hw_enc_features;
4719 	netdev_features_t hw_features;
4720 
4721 	hw_enc_features = NETIF_F_SG			|
4722 			  NETIF_F_IP_CSUM		|
4723 			  NETIF_F_IPV6_CSUM		|
4724 			  NETIF_F_HIGHDMA		|
4725 			  NETIF_F_SOFT_FEATURES	|
4726 			  NETIF_F_TSO			|
4727 			  NETIF_F_TSO_ECN		|
4728 			  NETIF_F_TSO6			|
4729 			  NETIF_F_SCTP_CRC		|
4730 			  NETIF_F_RXHASH		|
4731 			  NETIF_F_RXCSUM		|
4732 			  0;
4733 
4734 	/* advertise to stack only if offloads for encapsulated packets is
4735 	 * supported
4736 	 */
4737 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
4738 		hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL	|
4739 				   NETIF_F_GSO_GRE		|
4740 				   NETIF_F_GSO_GRE_CSUM		|
4741 				   NETIF_F_GSO_IPXIP4		|
4742 				   NETIF_F_GSO_IPXIP6		|
4743 				   NETIF_F_GSO_UDP_TUNNEL_CSUM	|
4744 				   NETIF_F_GSO_PARTIAL		|
4745 				   0;
4746 
4747 		if (!(vfres->vf_cap_flags &
4748 		      VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
4749 			netdev->gso_partial_features |=
4750 				NETIF_F_GSO_UDP_TUNNEL_CSUM;
4751 
4752 		netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
4753 		netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
4754 		netdev->hw_enc_features |= hw_enc_features;
4755 	}
4756 	/* record features VLANs can make use of */
4757 	netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
4758 
4759 	/* Write features and hw_features separately to avoid polluting
4760 	 * with, or dropping, features that are set when we registered.
4761 	 */
4762 	hw_features = hw_enc_features;
4763 
4764 	/* get HW VLAN features that can be toggled */
4765 	hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
4766 
4767 	/* Enable cloud filter if ADQ is supported */
4768 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
4769 		hw_features |= NETIF_F_HW_TC;
4770 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
4771 		hw_features |= NETIF_F_GSO_UDP_L4;
4772 
4773 	netdev->hw_features |= hw_features | hw_vlan_features;
4774 	vlan_features = iavf_get_netdev_vlan_features(adapter);
4775 
4776 	netdev->features |= hw_features | vlan_features;
4777 
4778 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
4779 		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4780 
4781 	netdev->priv_flags |= IFF_UNICAST_FLT;
4782 
4783 	/* Do not turn on offloads when they are requested to be turned off.
4784 	 * TSO needs minimum 576 bytes to work correctly.
4785 	 */
4786 	if (netdev->wanted_features) {
4787 		if (!(netdev->wanted_features & NETIF_F_TSO) ||
4788 		    netdev->mtu < 576)
4789 			netdev->features &= ~NETIF_F_TSO;
4790 		if (!(netdev->wanted_features & NETIF_F_TSO6) ||
4791 		    netdev->mtu < 576)
4792 			netdev->features &= ~NETIF_F_TSO6;
4793 		if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
4794 			netdev->features &= ~NETIF_F_TSO_ECN;
4795 		if (!(netdev->wanted_features & NETIF_F_GRO))
4796 			netdev->features &= ~NETIF_F_GRO;
4797 		if (!(netdev->wanted_features & NETIF_F_GSO))
4798 			netdev->features &= ~NETIF_F_GSO;
4799 	}
4800 
4801 	return 0;
4802 }
4803 
4804 /**
4805  * iavf_shutdown - Shutdown the device in preparation for a reboot
4806  * @pdev: pci device structure
4807  **/
4808 static void iavf_shutdown(struct pci_dev *pdev)
4809 {
4810 	struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4811 	struct net_device *netdev = adapter->netdev;
4812 
4813 	netif_device_detach(netdev);
4814 
4815 	if (netif_running(netdev))
4816 		iavf_close(netdev);
4817 
4818 	if (iavf_lock_timeout(&adapter->crit_lock, 5000))
4819 		dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);
4820 	/* Prevent the watchdog from running. */
4821 	iavf_change_state(adapter, __IAVF_REMOVE);
4822 	adapter->aq_required = 0;
4823 	mutex_unlock(&adapter->crit_lock);
4824 
4825 #ifdef CONFIG_PM
4826 	pci_save_state(pdev);
4827 
4828 #endif
4829 	pci_disable_device(pdev);
4830 }
4831 
4832 /**
4833  * iavf_probe - Device Initialization Routine
4834  * @pdev: PCI device information struct
4835  * @ent: entry in iavf_pci_tbl
4836  *
4837  * Returns 0 on success, negative on failure
4838  *
4839  * iavf_probe initializes an adapter identified by a pci_dev structure.
4840  * The OS initialization, configuring of the adapter private structure,
4841  * and a hardware reset occur.
4842  **/
4843 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4844 {
4845 	struct net_device *netdev;
4846 	struct iavf_adapter *adapter = NULL;
4847 	struct iavf_hw *hw = NULL;
4848 	int err;
4849 
4850 	err = pci_enable_device(pdev);
4851 	if (err)
4852 		return err;
4853 
4854 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4855 	if (err) {
4856 		dev_err(&pdev->dev,
4857 			"DMA configuration failed: 0x%x\n", err);
4858 		goto err_dma;
4859 	}
4860 
4861 	err = pci_request_regions(pdev, iavf_driver_name);
4862 	if (err) {
4863 		dev_err(&pdev->dev,
4864 			"pci_request_regions failed 0x%x\n", err);
4865 		goto err_pci_reg;
4866 	}
4867 
4868 	pci_set_master(pdev);
4869 
4870 	netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
4871 				   IAVF_MAX_REQ_QUEUES);
4872 	if (!netdev) {
4873 		err = -ENOMEM;
4874 		goto err_alloc_etherdev;
4875 	}
4876 
4877 	SET_NETDEV_DEV(netdev, &pdev->dev);
4878 
4879 	pci_set_drvdata(pdev, netdev);
4880 	adapter = netdev_priv(netdev);
4881 
4882 	adapter->netdev = netdev;
4883 	adapter->pdev = pdev;
4884 
4885 	hw = &adapter->hw;
4886 	hw->back = adapter;
4887 
4888 	adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4889 					      iavf_driver_name);
4890 	if (!adapter->wq) {
4891 		err = -ENOMEM;
4892 		goto err_alloc_wq;
4893 	}
4894 
4895 	adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4896 	iavf_change_state(adapter, __IAVF_STARTUP);
4897 
4898 	/* Call save state here because it relies on the adapter struct. */
4899 	pci_save_state(pdev);
4900 
4901 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4902 			      pci_resource_len(pdev, 0));
4903 	if (!hw->hw_addr) {
4904 		err = -EIO;
4905 		goto err_ioremap;
4906 	}
4907 	hw->vendor_id = pdev->vendor;
4908 	hw->device_id = pdev->device;
4909 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4910 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4911 	hw->subsystem_device_id = pdev->subsystem_device;
4912 	hw->bus.device = PCI_SLOT(pdev->devfn);
4913 	hw->bus.func = PCI_FUNC(pdev->devfn);
4914 	hw->bus.bus_id = pdev->bus->number;
4915 
4916 	/* set up the locks for the AQ, do this only once in probe
4917 	 * and destroy them only once in remove
4918 	 */
4919 	mutex_init(&adapter->crit_lock);
4920 	mutex_init(&adapter->client_lock);
4921 	mutex_init(&hw->aq.asq_mutex);
4922 	mutex_init(&hw->aq.arq_mutex);
4923 
4924 	spin_lock_init(&adapter->mac_vlan_list_lock);
4925 	spin_lock_init(&adapter->cloud_filter_list_lock);
4926 	spin_lock_init(&adapter->fdir_fltr_lock);
4927 	spin_lock_init(&adapter->adv_rss_lock);
4928 
4929 	INIT_LIST_HEAD(&adapter->mac_filter_list);
4930 	INIT_LIST_HEAD(&adapter->vlan_filter_list);
4931 	INIT_LIST_HEAD(&adapter->cloud_filter_list);
4932 	INIT_LIST_HEAD(&adapter->fdir_list_head);
4933 	INIT_LIST_HEAD(&adapter->adv_rss_list_head);
4934 
4935 	INIT_WORK(&adapter->reset_task, iavf_reset_task);
4936 	INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4937 	INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4938 	INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4939 	queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4940 			   msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
4941 
4942 	/* Setup the wait queue for indicating transition to down status */
4943 	init_waitqueue_head(&adapter->down_waitqueue);
4944 
4945 	/* Setup the wait queue for indicating virtchannel events */
4946 	init_waitqueue_head(&adapter->vc_waitqueue);
4947 
4948 	return 0;
4949 
4950 err_ioremap:
4951 	destroy_workqueue(adapter->wq);
4952 err_alloc_wq:
4953 	free_netdev(netdev);
4954 err_alloc_etherdev:
4955 	pci_release_regions(pdev);
4956 err_pci_reg:
4957 err_dma:
4958 	pci_disable_device(pdev);
4959 	return err;
4960 }
4961 
4962 /**
4963  * iavf_suspend - Power management suspend routine
4964  * @dev_d: device info pointer
4965  *
4966  * Called when the system (VM) is entering sleep/suspend.
4967  **/
4968 static int __maybe_unused iavf_suspend(struct device *dev_d)
4969 {
4970 	struct net_device *netdev = dev_get_drvdata(dev_d);
4971 	struct iavf_adapter *adapter = netdev_priv(netdev);
4972 
4973 	netif_device_detach(netdev);
4974 
4975 	while (!mutex_trylock(&adapter->crit_lock))
4976 		usleep_range(500, 1000);
4977 
4978 	if (netif_running(netdev)) {
4979 		rtnl_lock();
4980 		iavf_down(adapter);
4981 		rtnl_unlock();
4982 	}
4983 	iavf_free_misc_irq(adapter);
4984 	iavf_reset_interrupt_capability(adapter);
4985 
4986 	mutex_unlock(&adapter->crit_lock);
4987 
4988 	return 0;
4989 }
4990 
4991 /**
4992  * iavf_resume - Power management resume routine
4993  * @dev_d: device info pointer
4994  *
4995  * Called when the system (VM) is resumed from sleep/suspend.
4996  **/
4997 static int __maybe_unused iavf_resume(struct device *dev_d)
4998 {
4999 	struct pci_dev *pdev = to_pci_dev(dev_d);
5000 	struct iavf_adapter *adapter;
5001 	u32 err;
5002 
5003 	adapter = iavf_pdev_to_adapter(pdev);
5004 
5005 	pci_set_master(pdev);
5006 
5007 	rtnl_lock();
5008 	err = iavf_set_interrupt_capability(adapter);
5009 	if (err) {
5010 		rtnl_unlock();
5011 		dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
5012 		return err;
5013 	}
5014 	err = iavf_request_misc_irq(adapter);
5015 	rtnl_unlock();
5016 	if (err) {
5017 		dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
5018 		return err;
5019 	}
5020 
5021 	queue_work(adapter->wq, &adapter->reset_task);
5022 
5023 	netif_device_attach(adapter->netdev);
5024 
5025 	return err;
5026 }
5027 
5028 /**
5029  * iavf_remove - Device Removal Routine
5030  * @pdev: PCI device information struct
5031  *
5032  * iavf_remove is called by the PCI subsystem to alert the driver
5033  * that it should release a PCI device.  The could be caused by a
5034  * Hot-Plug event, or because the driver is going to be removed from
5035  * memory.
5036  **/
5037 static void iavf_remove(struct pci_dev *pdev)
5038 {
5039 	struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
5040 	struct iavf_fdir_fltr *fdir, *fdirtmp;
5041 	struct iavf_vlan_filter *vlf, *vlftmp;
5042 	struct iavf_cloud_filter *cf, *cftmp;
5043 	struct iavf_adv_rss *rss, *rsstmp;
5044 	struct iavf_mac_filter *f, *ftmp;
5045 	struct net_device *netdev;
5046 	struct iavf_hw *hw;
5047 	int err;
5048 
5049 	netdev = adapter->netdev;
5050 	hw = &adapter->hw;
5051 
5052 	if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
5053 		return;
5054 
5055 	/* Wait until port initialization is complete.
5056 	 * There are flows where register/unregister netdev may race.
5057 	 */
5058 	while (1) {
5059 		mutex_lock(&adapter->crit_lock);
5060 		if (adapter->state == __IAVF_RUNNING ||
5061 		    adapter->state == __IAVF_DOWN ||
5062 		    adapter->state == __IAVF_INIT_FAILED) {
5063 			mutex_unlock(&adapter->crit_lock);
5064 			break;
5065 		}
5066 		/* Simply return if we already went through iavf_shutdown */
5067 		if (adapter->state == __IAVF_REMOVE) {
5068 			mutex_unlock(&adapter->crit_lock);
5069 			return;
5070 		}
5071 
5072 		mutex_unlock(&adapter->crit_lock);
5073 		usleep_range(500, 1000);
5074 	}
5075 	cancel_delayed_work_sync(&adapter->watchdog_task);
5076 
5077 	if (adapter->netdev_registered) {
5078 		rtnl_lock();
5079 		unregister_netdevice(netdev);
5080 		adapter->netdev_registered = false;
5081 		rtnl_unlock();
5082 	}
5083 	if (CLIENT_ALLOWED(adapter)) {
5084 		err = iavf_lan_del_device(adapter);
5085 		if (err)
5086 			dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
5087 				 err);
5088 	}
5089 
5090 	mutex_lock(&adapter->crit_lock);
5091 	dev_info(&adapter->pdev->dev, "Removing device\n");
5092 	iavf_change_state(adapter, __IAVF_REMOVE);
5093 
5094 	iavf_request_reset(adapter);
5095 	msleep(50);
5096 	/* If the FW isn't responding, kick it once, but only once. */
5097 	if (!iavf_asq_done(hw)) {
5098 		iavf_request_reset(adapter);
5099 		msleep(50);
5100 	}
5101 
5102 	iavf_misc_irq_disable(adapter);
5103 	/* Shut down all the garbage mashers on the detention level */
5104 	cancel_work_sync(&adapter->reset_task);
5105 	cancel_delayed_work_sync(&adapter->watchdog_task);
5106 	cancel_work_sync(&adapter->adminq_task);
5107 	cancel_delayed_work_sync(&adapter->client_task);
5108 
5109 	adapter->aq_required = 0;
5110 	adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5111 
5112 	iavf_free_all_tx_resources(adapter);
5113 	iavf_free_all_rx_resources(adapter);
5114 	iavf_free_misc_irq(adapter);
5115 
5116 	iavf_reset_interrupt_capability(adapter);
5117 	iavf_free_q_vectors(adapter);
5118 
5119 	iavf_free_rss(adapter);
5120 
5121 	if (hw->aq.asq.count)
5122 		iavf_shutdown_adminq(hw);
5123 
5124 	/* destroy the locks only once, here */
5125 	mutex_destroy(&hw->aq.arq_mutex);
5126 	mutex_destroy(&hw->aq.asq_mutex);
5127 	mutex_destroy(&adapter->client_lock);
5128 	mutex_unlock(&adapter->crit_lock);
5129 	mutex_destroy(&adapter->crit_lock);
5130 
5131 	iounmap(hw->hw_addr);
5132 	pci_release_regions(pdev);
5133 	iavf_free_queues(adapter);
5134 	kfree(adapter->vf_res);
5135 	spin_lock_bh(&adapter->mac_vlan_list_lock);
5136 	/* If we got removed before an up/down sequence, we've got a filter
5137 	 * hanging out there that we need to get rid of.
5138 	 */
5139 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
5140 		list_del(&f->list);
5141 		kfree(f);
5142 	}
5143 	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
5144 				 list) {
5145 		list_del(&vlf->list);
5146 		kfree(vlf);
5147 	}
5148 
5149 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
5150 
5151 	spin_lock_bh(&adapter->cloud_filter_list_lock);
5152 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
5153 		list_del(&cf->list);
5154 		kfree(cf);
5155 	}
5156 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
5157 
5158 	spin_lock_bh(&adapter->fdir_fltr_lock);
5159 	list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
5160 		list_del(&fdir->list);
5161 		kfree(fdir);
5162 	}
5163 	spin_unlock_bh(&adapter->fdir_fltr_lock);
5164 
5165 	spin_lock_bh(&adapter->adv_rss_lock);
5166 	list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
5167 				 list) {
5168 		list_del(&rss->list);
5169 		kfree(rss);
5170 	}
5171 	spin_unlock_bh(&adapter->adv_rss_lock);
5172 
5173 	destroy_workqueue(adapter->wq);
5174 
5175 	free_netdev(netdev);
5176 
5177 	pci_disable_device(pdev);
5178 }
5179 
5180 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
5181 
5182 static struct pci_driver iavf_driver = {
5183 	.name      = iavf_driver_name,
5184 	.id_table  = iavf_pci_tbl,
5185 	.probe     = iavf_probe,
5186 	.remove    = iavf_remove,
5187 	.driver.pm = &iavf_pm_ops,
5188 	.shutdown  = iavf_shutdown,
5189 };
5190 
5191 /**
5192  * iavf_init_module - Driver Registration Routine
5193  *
5194  * iavf_init_module is the first routine called when the driver is
5195  * loaded. All it does is register with the PCI subsystem.
5196  **/
5197 static int __init iavf_init_module(void)
5198 {
5199 	pr_info("iavf: %s\n", iavf_driver_string);
5200 
5201 	pr_info("%s\n", iavf_copyright);
5202 
5203 	return pci_register_driver(&iavf_driver);
5204 }
5205 
5206 module_init(iavf_init_module);
5207 
5208 /**
5209  * iavf_exit_module - Driver Exit Cleanup Routine
5210  *
5211  * iavf_exit_module is called just before the driver is removed
5212  * from memory.
5213  **/
5214 static void __exit iavf_exit_module(void)
5215 {
5216 	pci_unregister_driver(&iavf_driver);
5217 }
5218 
5219 module_exit(iavf_exit_module);
5220 
5221 /* iavf_main.c */
5222