1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 
8 /* busy wait delay in msec */
9 #define IAVF_BUSY_WAIT_DELAY 10
10 #define IAVF_BUSY_WAIT_COUNT 50
11 
12 /**
13  * iavf_send_pf_msg
14  * @adapter: adapter structure
15  * @op: virtual channel opcode
16  * @msg: pointer to message buffer
17  * @len: message length
18  *
19  * Send message to PF and print status if failure.
20  **/
21 static int iavf_send_pf_msg(struct iavf_adapter *adapter,
22 			    enum virtchnl_ops op, u8 *msg, u16 len)
23 {
24 	struct iavf_hw *hw = &adapter->hw;
25 	enum iavf_status err;
26 
27 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
28 		return 0; /* nothing to see here, move along */
29 
30 	err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
31 	if (err)
32 		dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
33 			op, iavf_stat_str(hw, err),
34 			iavf_aq_str(hw, hw->aq.asq_last_status));
35 	return err;
36 }
37 
38 /**
39  * iavf_send_api_ver
40  * @adapter: adapter structure
41  *
42  * Send API version admin queue message to the PF. The reply is not checked
43  * in this function. Returns 0 if the message was successfully
44  * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
45  **/
46 int iavf_send_api_ver(struct iavf_adapter *adapter)
47 {
48 	struct virtchnl_version_info vvi;
49 
50 	vvi.major = VIRTCHNL_VERSION_MAJOR;
51 	vvi.minor = VIRTCHNL_VERSION_MINOR;
52 
53 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
54 				sizeof(vvi));
55 }
56 
57 /**
58  * iavf_verify_api_ver
59  * @adapter: adapter structure
60  *
61  * Compare API versions with the PF. Must be called after admin queue is
62  * initialized. Returns 0 if API versions match, -EIO if they do not,
63  * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
64  * from the firmware are propagated.
65  **/
66 int iavf_verify_api_ver(struct iavf_adapter *adapter)
67 {
68 	struct virtchnl_version_info *pf_vvi;
69 	struct iavf_hw *hw = &adapter->hw;
70 	struct iavf_arq_event_info event;
71 	enum virtchnl_ops op;
72 	enum iavf_status err;
73 
74 	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
75 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
76 	if (!event.msg_buf) {
77 		err = -ENOMEM;
78 		goto out;
79 	}
80 
81 	while (1) {
82 		err = iavf_clean_arq_element(hw, &event, NULL);
83 		/* When the AQ is empty, iavf_clean_arq_element will return
84 		 * nonzero and this loop will terminate.
85 		 */
86 		if (err)
87 			goto out_alloc;
88 		op =
89 		    (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
90 		if (op == VIRTCHNL_OP_VERSION)
91 			break;
92 	}
93 
94 
95 	err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
96 	if (err)
97 		goto out_alloc;
98 
99 	if (op != VIRTCHNL_OP_VERSION) {
100 		dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
101 			op);
102 		err = -EIO;
103 		goto out_alloc;
104 	}
105 
106 	pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
107 	adapter->pf_version = *pf_vvi;
108 
109 	if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
110 	    ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
111 	     (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
112 		err = -EIO;
113 
114 out_alloc:
115 	kfree(event.msg_buf);
116 out:
117 	return err;
118 }
119 
120 /**
121  * iavf_send_vf_config_msg
122  * @adapter: adapter structure
123  *
124  * Send VF configuration request admin queue message to the PF. The reply
125  * is not checked in this function. Returns 0 if the message was
126  * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
127  **/
128 int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
129 {
130 	u32 caps;
131 
132 	caps = VIRTCHNL_VF_OFFLOAD_L2 |
133 	       VIRTCHNL_VF_OFFLOAD_RSS_PF |
134 	       VIRTCHNL_VF_OFFLOAD_RSS_AQ |
135 	       VIRTCHNL_VF_OFFLOAD_RSS_REG |
136 	       VIRTCHNL_VF_OFFLOAD_VLAN |
137 	       VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
138 	       VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
139 	       VIRTCHNL_VF_OFFLOAD_ENCAP |
140 	       VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
141 	       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
142 	       VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
143 	       VIRTCHNL_VF_OFFLOAD_ADQ |
144 	       VIRTCHNL_VF_OFFLOAD_USO |
145 	       VIRTCHNL_VF_OFFLOAD_FDIR_PF |
146 	       VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
147 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
148 
149 	adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
150 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
151 	if (PF_IS_V11(adapter))
152 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
153 					(u8 *)&caps, sizeof(caps));
154 	else
155 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
156 					NULL, 0);
157 }
158 
159 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
160 {
161 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
162 
163 	if (!VLAN_V2_ALLOWED(adapter))
164 		return -EOPNOTSUPP;
165 
166 	adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
167 
168 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
169 				NULL, 0);
170 }
171 
172 /**
173  * iavf_validate_num_queues
174  * @adapter: adapter structure
175  *
176  * Validate that the number of queues the PF has sent in
177  * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
178  **/
179 static void iavf_validate_num_queues(struct iavf_adapter *adapter)
180 {
181 	if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) {
182 		struct virtchnl_vsi_resource *vsi_res;
183 		int i;
184 
185 		dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
186 			 adapter->vf_res->num_queue_pairs,
187 			 IAVF_MAX_REQ_QUEUES);
188 		dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
189 			 IAVF_MAX_REQ_QUEUES);
190 		adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
191 		for (i = 0; i < adapter->vf_res->num_vsis; i++) {
192 			vsi_res = &adapter->vf_res->vsi_res[i];
193 			vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
194 		}
195 	}
196 }
197 
198 /**
199  * iavf_get_vf_config
200  * @adapter: private adapter structure
201  *
202  * Get VF configuration from PF and populate hw structure. Must be called after
203  * admin queue is initialized. Busy waits until response is received from PF,
204  * with maximum timeout. Response from PF is returned in the buffer for further
205  * processing by the caller.
206  **/
207 int iavf_get_vf_config(struct iavf_adapter *adapter)
208 {
209 	struct iavf_hw *hw = &adapter->hw;
210 	struct iavf_arq_event_info event;
211 	enum virtchnl_ops op;
212 	enum iavf_status err;
213 	u16 len;
214 
215 	len =  sizeof(struct virtchnl_vf_resource) +
216 		IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
217 	event.buf_len = len;
218 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
219 	if (!event.msg_buf) {
220 		err = -ENOMEM;
221 		goto out;
222 	}
223 
224 	while (1) {
225 		/* When the AQ is empty, iavf_clean_arq_element will return
226 		 * nonzero and this loop will terminate.
227 		 */
228 		err = iavf_clean_arq_element(hw, &event, NULL);
229 		if (err)
230 			goto out_alloc;
231 		op =
232 		    (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
233 		if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
234 			break;
235 	}
236 
237 	err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
238 	memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
239 
240 	/* some PFs send more queues than we should have so validate that
241 	 * we aren't getting too many queues
242 	 */
243 	if (!err)
244 		iavf_validate_num_queues(adapter);
245 	iavf_vf_parse_hw_config(hw, adapter->vf_res);
246 out_alloc:
247 	kfree(event.msg_buf);
248 out:
249 	return err;
250 }
251 
252 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
253 {
254 	struct iavf_hw *hw = &adapter->hw;
255 	struct iavf_arq_event_info event;
256 	enum virtchnl_ops op;
257 	enum iavf_status err;
258 	u16 len;
259 
260 	len =  sizeof(struct virtchnl_vlan_caps);
261 	event.buf_len = len;
262 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
263 	if (!event.msg_buf) {
264 		err = -ENOMEM;
265 		goto out;
266 	}
267 
268 	while (1) {
269 		/* When the AQ is empty, iavf_clean_arq_element will return
270 		 * nonzero and this loop will terminate.
271 		 */
272 		err = iavf_clean_arq_element(hw, &event, NULL);
273 		if (err)
274 			goto out_alloc;
275 		op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
276 		if (op == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
277 			break;
278 	}
279 
280 	err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
281 	if (err)
282 		goto out_alloc;
283 
284 	memcpy(&adapter->vlan_v2_caps, event.msg_buf, min(event.msg_len, len));
285 out_alloc:
286 	kfree(event.msg_buf);
287 out:
288 	return err;
289 }
290 
291 /**
292  * iavf_configure_queues
293  * @adapter: adapter structure
294  *
295  * Request that the PF set up our (previously allocated) queues.
296  **/
297 void iavf_configure_queues(struct iavf_adapter *adapter)
298 {
299 	struct virtchnl_vsi_queue_config_info *vqci;
300 	struct virtchnl_queue_pair_info *vqpi;
301 	int pairs = adapter->num_active_queues;
302 	int i, max_frame = IAVF_MAX_RXBUFFER;
303 	size_t len;
304 
305 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
306 		/* bail because we already have a command pending */
307 		dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
308 			adapter->current_op);
309 		return;
310 	}
311 	adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
312 	len = struct_size(vqci, qpair, pairs);
313 	vqci = kzalloc(len, GFP_KERNEL);
314 	if (!vqci)
315 		return;
316 
317 	/* Limit maximum frame size when jumbo frames is not enabled */
318 	if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
319 	    (adapter->netdev->mtu <= ETH_DATA_LEN))
320 		max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
321 
322 	vqci->vsi_id = adapter->vsi_res->vsi_id;
323 	vqci->num_queue_pairs = pairs;
324 	vqpi = vqci->qpair;
325 	/* Size check is not needed here - HW max is 16 queue pairs, and we
326 	 * can fit info for 31 of them into the AQ buffer before it overflows.
327 	 */
328 	for (i = 0; i < pairs; i++) {
329 		vqpi->txq.vsi_id = vqci->vsi_id;
330 		vqpi->txq.queue_id = i;
331 		vqpi->txq.ring_len = adapter->tx_rings[i].count;
332 		vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
333 		vqpi->rxq.vsi_id = vqci->vsi_id;
334 		vqpi->rxq.queue_id = i;
335 		vqpi->rxq.ring_len = adapter->rx_rings[i].count;
336 		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
337 		vqpi->rxq.max_pkt_size = max_frame;
338 		vqpi->rxq.databuffer_size =
339 			ALIGN(adapter->rx_rings[i].rx_buf_len,
340 			      BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
341 		vqpi++;
342 	}
343 
344 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES;
345 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
346 			 (u8 *)vqci, len);
347 	kfree(vqci);
348 }
349 
350 /**
351  * iavf_enable_queues
352  * @adapter: adapter structure
353  *
354  * Request that the PF enable all of our queues.
355  **/
356 void iavf_enable_queues(struct iavf_adapter *adapter)
357 {
358 	struct virtchnl_queue_select vqs;
359 
360 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
361 		/* bail because we already have a command pending */
362 		dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
363 			adapter->current_op);
364 		return;
365 	}
366 	adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
367 	vqs.vsi_id = adapter->vsi_res->vsi_id;
368 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
369 	vqs.rx_queues = vqs.tx_queues;
370 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
371 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
372 			 (u8 *)&vqs, sizeof(vqs));
373 }
374 
375 /**
376  * iavf_disable_queues
377  * @adapter: adapter structure
378  *
379  * Request that the PF disable all of our queues.
380  **/
381 void iavf_disable_queues(struct iavf_adapter *adapter)
382 {
383 	struct virtchnl_queue_select vqs;
384 
385 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
386 		/* bail because we already have a command pending */
387 		dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
388 			adapter->current_op);
389 		return;
390 	}
391 	adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
392 	vqs.vsi_id = adapter->vsi_res->vsi_id;
393 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
394 	vqs.rx_queues = vqs.tx_queues;
395 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
396 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
397 			 (u8 *)&vqs, sizeof(vqs));
398 }
399 
400 /**
401  * iavf_map_queues
402  * @adapter: adapter structure
403  *
404  * Request that the PF map queues to interrupt vectors. Misc causes, including
405  * admin queue, are always mapped to vector 0.
406  **/
407 void iavf_map_queues(struct iavf_adapter *adapter)
408 {
409 	struct virtchnl_irq_map_info *vimi;
410 	struct virtchnl_vector_map *vecmap;
411 	struct iavf_q_vector *q_vector;
412 	int v_idx, q_vectors;
413 	size_t len;
414 
415 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
416 		/* bail because we already have a command pending */
417 		dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
418 			adapter->current_op);
419 		return;
420 	}
421 	adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
422 
423 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
424 
425 	len = struct_size(vimi, vecmap, adapter->num_msix_vectors);
426 	vimi = kzalloc(len, GFP_KERNEL);
427 	if (!vimi)
428 		return;
429 
430 	vimi->num_vectors = adapter->num_msix_vectors;
431 	/* Queue vectors first */
432 	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
433 		q_vector = &adapter->q_vectors[v_idx];
434 		vecmap = &vimi->vecmap[v_idx];
435 
436 		vecmap->vsi_id = adapter->vsi_res->vsi_id;
437 		vecmap->vector_id = v_idx + NONQ_VECS;
438 		vecmap->txq_map = q_vector->ring_mask;
439 		vecmap->rxq_map = q_vector->ring_mask;
440 		vecmap->rxitr_idx = IAVF_RX_ITR;
441 		vecmap->txitr_idx = IAVF_TX_ITR;
442 	}
443 	/* Misc vector last - this is only for AdminQ messages */
444 	vecmap = &vimi->vecmap[v_idx];
445 	vecmap->vsi_id = adapter->vsi_res->vsi_id;
446 	vecmap->vector_id = 0;
447 	vecmap->txq_map = 0;
448 	vecmap->rxq_map = 0;
449 
450 	adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS;
451 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
452 			 (u8 *)vimi, len);
453 	kfree(vimi);
454 }
455 
456 /**
457  * iavf_add_ether_addrs
458  * @adapter: adapter structure
459  *
460  * Request that the PF add one or more addresses to our filters.
461  **/
462 void iavf_add_ether_addrs(struct iavf_adapter *adapter)
463 {
464 	struct virtchnl_ether_addr_list *veal;
465 	struct iavf_mac_filter *f;
466 	int i = 0, count = 0;
467 	bool more = false;
468 	size_t len;
469 
470 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
471 		/* bail because we already have a command pending */
472 		dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
473 			adapter->current_op);
474 		return;
475 	}
476 
477 	spin_lock_bh(&adapter->mac_vlan_list_lock);
478 
479 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
480 		if (f->add)
481 			count++;
482 	}
483 	if (!count) {
484 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
485 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
486 		return;
487 	}
488 	adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
489 
490 	len = struct_size(veal, list, count);
491 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
492 		dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
493 		count = (IAVF_MAX_AQ_BUF_SIZE -
494 			 sizeof(struct virtchnl_ether_addr_list)) /
495 			sizeof(struct virtchnl_ether_addr);
496 		len = struct_size(veal, list, count);
497 		more = true;
498 	}
499 
500 	veal = kzalloc(len, GFP_ATOMIC);
501 	if (!veal) {
502 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
503 		return;
504 	}
505 
506 	veal->vsi_id = adapter->vsi_res->vsi_id;
507 	veal->num_elements = count;
508 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
509 		if (f->add) {
510 			ether_addr_copy(veal->list[i].addr, f->macaddr);
511 			i++;
512 			f->add = false;
513 			if (i == count)
514 				break;
515 		}
516 	}
517 	if (!more)
518 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
519 
520 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
521 
522 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len);
523 	kfree(veal);
524 }
525 
526 /**
527  * iavf_del_ether_addrs
528  * @adapter: adapter structure
529  *
530  * Request that the PF remove one or more addresses from our filters.
531  **/
532 void iavf_del_ether_addrs(struct iavf_adapter *adapter)
533 {
534 	struct virtchnl_ether_addr_list *veal;
535 	struct iavf_mac_filter *f, *ftmp;
536 	int i = 0, count = 0;
537 	bool more = false;
538 	size_t len;
539 
540 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
541 		/* bail because we already have a command pending */
542 		dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
543 			adapter->current_op);
544 		return;
545 	}
546 
547 	spin_lock_bh(&adapter->mac_vlan_list_lock);
548 
549 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
550 		if (f->remove)
551 			count++;
552 	}
553 	if (!count) {
554 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
555 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
556 		return;
557 	}
558 	adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
559 
560 	len = struct_size(veal, list, count);
561 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
562 		dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
563 		count = (IAVF_MAX_AQ_BUF_SIZE -
564 			 sizeof(struct virtchnl_ether_addr_list)) /
565 			sizeof(struct virtchnl_ether_addr);
566 		len = struct_size(veal, list, count);
567 		more = true;
568 	}
569 	veal = kzalloc(len, GFP_ATOMIC);
570 	if (!veal) {
571 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
572 		return;
573 	}
574 
575 	veal->vsi_id = adapter->vsi_res->vsi_id;
576 	veal->num_elements = count;
577 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
578 		if (f->remove) {
579 			ether_addr_copy(veal->list[i].addr, f->macaddr);
580 			i++;
581 			list_del(&f->list);
582 			kfree(f);
583 			if (i == count)
584 				break;
585 		}
586 	}
587 	if (!more)
588 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
589 
590 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
591 
592 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
593 	kfree(veal);
594 }
595 
596 /**
597  * iavf_mac_add_ok
598  * @adapter: adapter structure
599  *
600  * Submit list of filters based on PF response.
601  **/
602 static void iavf_mac_add_ok(struct iavf_adapter *adapter)
603 {
604 	struct iavf_mac_filter *f, *ftmp;
605 
606 	spin_lock_bh(&adapter->mac_vlan_list_lock);
607 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
608 		f->is_new_mac = false;
609 	}
610 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
611 }
612 
613 /**
614  * iavf_mac_add_reject
615  * @adapter: adapter structure
616  *
617  * Remove filters from list based on PF response.
618  **/
619 static void iavf_mac_add_reject(struct iavf_adapter *adapter)
620 {
621 	struct net_device *netdev = adapter->netdev;
622 	struct iavf_mac_filter *f, *ftmp;
623 
624 	spin_lock_bh(&adapter->mac_vlan_list_lock);
625 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
626 		if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
627 			f->remove = false;
628 
629 		if (f->is_new_mac) {
630 			list_del(&f->list);
631 			kfree(f);
632 		}
633 	}
634 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
635 }
636 
637 /**
638  * iavf_add_vlans
639  * @adapter: adapter structure
640  *
641  * Request that the PF add one or more VLAN filters to our VSI.
642  **/
643 void iavf_add_vlans(struct iavf_adapter *adapter)
644 {
645 	int len, i = 0, count = 0;
646 	struct iavf_vlan_filter *f;
647 	bool more = false;
648 
649 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
650 		/* bail because we already have a command pending */
651 		dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
652 			adapter->current_op);
653 		return;
654 	}
655 
656 	spin_lock_bh(&adapter->mac_vlan_list_lock);
657 
658 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
659 		if (f->add)
660 			count++;
661 	}
662 	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
663 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
664 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
665 		return;
666 	}
667 
668 	if (VLAN_ALLOWED(adapter)) {
669 		struct virtchnl_vlan_filter_list *vvfl;
670 
671 		adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
672 
673 		len = sizeof(*vvfl) + (count * sizeof(u16));
674 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
675 			dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
676 			count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
677 				sizeof(u16);
678 			len = sizeof(*vvfl) + (count * sizeof(u16));
679 			more = true;
680 		}
681 		vvfl = kzalloc(len, GFP_ATOMIC);
682 		if (!vvfl) {
683 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
684 			return;
685 		}
686 
687 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
688 		vvfl->num_elements = count;
689 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
690 			if (f->add) {
691 				vvfl->vlan_id[i] = f->vlan.vid;
692 				i++;
693 				f->add = false;
694 				if (i == count)
695 					break;
696 			}
697 		}
698 		if (!more)
699 			adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
700 
701 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
702 
703 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
704 		kfree(vvfl);
705 	} else {
706 		struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
707 
708 		adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
709 
710 		len = sizeof(*vvfl_v2) + ((count - 1) *
711 					  sizeof(struct virtchnl_vlan_filter));
712 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
713 			dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
714 			count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) /
715 				sizeof(struct virtchnl_vlan_filter);
716 			len = sizeof(*vvfl_v2) +
717 				((count - 1) *
718 				 sizeof(struct virtchnl_vlan_filter));
719 			more = true;
720 		}
721 
722 		vvfl_v2 = kzalloc(len, GFP_ATOMIC);
723 		if (!vvfl_v2) {
724 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
725 			return;
726 		}
727 
728 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
729 		vvfl_v2->num_elements = count;
730 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
731 			if (f->add) {
732 				struct virtchnl_vlan_supported_caps *filtering_support =
733 					&adapter->vlan_v2_caps.filtering.filtering_support;
734 				struct virtchnl_vlan *vlan;
735 
736 				/* give priority over outer if it's enabled */
737 				if (filtering_support->outer)
738 					vlan = &vvfl_v2->filters[i].outer;
739 				else
740 					vlan = &vvfl_v2->filters[i].inner;
741 
742 				vlan->tci = f->vlan.vid;
743 				vlan->tpid = f->vlan.tpid;
744 
745 				i++;
746 				f->add = false;
747 				if (i == count)
748 					break;
749 			}
750 		}
751 
752 		if (!more)
753 			adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
754 
755 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
756 
757 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2,
758 				 (u8 *)vvfl_v2, len);
759 		kfree(vvfl_v2);
760 	}
761 }
762 
763 /**
764  * iavf_del_vlans
765  * @adapter: adapter structure
766  *
767  * Request that the PF remove one or more VLAN filters from our VSI.
768  **/
769 void iavf_del_vlans(struct iavf_adapter *adapter)
770 {
771 	struct iavf_vlan_filter *f, *ftmp;
772 	int len, i = 0, count = 0;
773 	bool more = false;
774 
775 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
776 		/* bail because we already have a command pending */
777 		dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
778 			adapter->current_op);
779 		return;
780 	}
781 
782 	spin_lock_bh(&adapter->mac_vlan_list_lock);
783 
784 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
785 		/* since VLAN capabilities are not allowed, we dont want to send
786 		 * a VLAN delete request because it will most likely fail and
787 		 * create unnecessary errors/noise, so just free the VLAN
788 		 * filters marked for removal to enable bailing out before
789 		 * sending a virtchnl message
790 		 */
791 		if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) {
792 			list_del(&f->list);
793 			kfree(f);
794 		} else if (f->remove) {
795 			count++;
796 		}
797 	}
798 	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
799 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
800 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
801 		return;
802 	}
803 
804 	if (VLAN_ALLOWED(adapter)) {
805 		struct virtchnl_vlan_filter_list *vvfl;
806 
807 		adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
808 
809 		len = sizeof(*vvfl) + (count * sizeof(u16));
810 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
811 			dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
812 			count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
813 				sizeof(u16);
814 			len = sizeof(*vvfl) + (count * sizeof(u16));
815 			more = true;
816 		}
817 		vvfl = kzalloc(len, GFP_ATOMIC);
818 		if (!vvfl) {
819 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
820 			return;
821 		}
822 
823 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
824 		vvfl->num_elements = count;
825 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
826 			if (f->remove) {
827 				vvfl->vlan_id[i] = f->vlan.vid;
828 				i++;
829 				list_del(&f->list);
830 				kfree(f);
831 				if (i == count)
832 					break;
833 			}
834 		}
835 
836 		if (!more)
837 			adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
838 
839 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
840 
841 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
842 		kfree(vvfl);
843 	} else {
844 		struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
845 
846 		adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
847 
848 		len = sizeof(*vvfl_v2) +
849 			((count - 1) * sizeof(struct virtchnl_vlan_filter));
850 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
851 			dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
852 			count = (IAVF_MAX_AQ_BUF_SIZE -
853 				 sizeof(*vvfl_v2)) /
854 				sizeof(struct virtchnl_vlan_filter);
855 			len = sizeof(*vvfl_v2) +
856 				((count - 1) *
857 				 sizeof(struct virtchnl_vlan_filter));
858 			more = true;
859 		}
860 
861 		vvfl_v2 = kzalloc(len, GFP_ATOMIC);
862 		if (!vvfl_v2) {
863 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
864 			return;
865 		}
866 
867 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
868 		vvfl_v2->num_elements = count;
869 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
870 			if (f->remove) {
871 				struct virtchnl_vlan_supported_caps *filtering_support =
872 					&adapter->vlan_v2_caps.filtering.filtering_support;
873 				struct virtchnl_vlan *vlan;
874 
875 				/* give priority over outer if it's enabled */
876 				if (filtering_support->outer)
877 					vlan = &vvfl_v2->filters[i].outer;
878 				else
879 					vlan = &vvfl_v2->filters[i].inner;
880 
881 				vlan->tci = f->vlan.vid;
882 				vlan->tpid = f->vlan.tpid;
883 
884 				list_del(&f->list);
885 				kfree(f);
886 				i++;
887 				if (i == count)
888 					break;
889 			}
890 		}
891 
892 		if (!more)
893 			adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
894 
895 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
896 
897 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2,
898 				 (u8 *)vvfl_v2, len);
899 		kfree(vvfl_v2);
900 	}
901 }
902 
903 /**
904  * iavf_set_promiscuous
905  * @adapter: adapter structure
906  * @flags: bitmask to control unicast/multicast promiscuous.
907  *
908  * Request that the PF enable promiscuous mode for our VSI.
909  **/
910 void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
911 {
912 	struct virtchnl_promisc_info vpi;
913 	int promisc_all;
914 
915 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
916 		/* bail because we already have a command pending */
917 		dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
918 			adapter->current_op);
919 		return;
920 	}
921 
922 	promisc_all = FLAG_VF_UNICAST_PROMISC |
923 		      FLAG_VF_MULTICAST_PROMISC;
924 	if ((flags & promisc_all) == promisc_all) {
925 		adapter->flags |= IAVF_FLAG_PROMISC_ON;
926 		adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
927 		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
928 	}
929 
930 	if (flags & FLAG_VF_MULTICAST_PROMISC) {
931 		adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
932 		adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
933 		dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
934 			 adapter->netdev->name);
935 	}
936 
937 	if (!flags) {
938 		if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
939 			adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
940 			adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
941 			dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
942 		}
943 
944 		if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
945 			adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
946 			adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
947 			dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
948 				 adapter->netdev->name);
949 		}
950 	}
951 
952 	adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
953 	vpi.vsi_id = adapter->vsi_res->vsi_id;
954 	vpi.flags = flags;
955 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
956 			 (u8 *)&vpi, sizeof(vpi));
957 }
958 
959 /**
960  * iavf_request_stats
961  * @adapter: adapter structure
962  *
963  * Request VSI statistics from PF.
964  **/
965 void iavf_request_stats(struct iavf_adapter *adapter)
966 {
967 	struct virtchnl_queue_select vqs;
968 
969 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
970 		/* no error message, this isn't crucial */
971 		return;
972 	}
973 
974 	adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
975 	adapter->current_op = VIRTCHNL_OP_GET_STATS;
976 	vqs.vsi_id = adapter->vsi_res->vsi_id;
977 	/* queue maps are ignored for this message - only the vsi is used */
978 	if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
979 			     sizeof(vqs)))
980 		/* if the request failed, don't lock out others */
981 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
982 }
983 
984 /**
985  * iavf_get_hena
986  * @adapter: adapter structure
987  *
988  * Request hash enable capabilities from PF
989  **/
990 void iavf_get_hena(struct iavf_adapter *adapter)
991 {
992 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
993 		/* bail because we already have a command pending */
994 		dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
995 			adapter->current_op);
996 		return;
997 	}
998 	adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
999 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
1000 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
1001 }
1002 
1003 /**
1004  * iavf_set_hena
1005  * @adapter: adapter structure
1006  *
1007  * Request the PF to set our RSS hash capabilities
1008  **/
1009 void iavf_set_hena(struct iavf_adapter *adapter)
1010 {
1011 	struct virtchnl_rss_hena vrh;
1012 
1013 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1014 		/* bail because we already have a command pending */
1015 		dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
1016 			adapter->current_op);
1017 		return;
1018 	}
1019 	vrh.hena = adapter->hena;
1020 	adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
1021 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
1022 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
1023 			 sizeof(vrh));
1024 }
1025 
1026 /**
1027  * iavf_set_rss_key
1028  * @adapter: adapter structure
1029  *
1030  * Request the PF to set our RSS hash key
1031  **/
1032 void iavf_set_rss_key(struct iavf_adapter *adapter)
1033 {
1034 	struct virtchnl_rss_key *vrk;
1035 	int len;
1036 
1037 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1038 		/* bail because we already have a command pending */
1039 		dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
1040 			adapter->current_op);
1041 		return;
1042 	}
1043 	len = sizeof(struct virtchnl_rss_key) +
1044 	      (adapter->rss_key_size * sizeof(u8)) - 1;
1045 	vrk = kzalloc(len, GFP_KERNEL);
1046 	if (!vrk)
1047 		return;
1048 	vrk->vsi_id = adapter->vsi.id;
1049 	vrk->key_len = adapter->rss_key_size;
1050 	memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
1051 
1052 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
1053 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY;
1054 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len);
1055 	kfree(vrk);
1056 }
1057 
1058 /**
1059  * iavf_set_rss_lut
1060  * @adapter: adapter structure
1061  *
1062  * Request the PF to set our RSS lookup table
1063  **/
1064 void iavf_set_rss_lut(struct iavf_adapter *adapter)
1065 {
1066 	struct virtchnl_rss_lut *vrl;
1067 	int len;
1068 
1069 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1070 		/* bail because we already have a command pending */
1071 		dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
1072 			adapter->current_op);
1073 		return;
1074 	}
1075 	len = sizeof(struct virtchnl_rss_lut) +
1076 	      (adapter->rss_lut_size * sizeof(u8)) - 1;
1077 	vrl = kzalloc(len, GFP_KERNEL);
1078 	if (!vrl)
1079 		return;
1080 	vrl->vsi_id = adapter->vsi.id;
1081 	vrl->lut_entries = adapter->rss_lut_size;
1082 	memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
1083 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
1084 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT;
1085 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len);
1086 	kfree(vrl);
1087 }
1088 
1089 /**
1090  * iavf_enable_vlan_stripping
1091  * @adapter: adapter structure
1092  *
1093  * Request VLAN header stripping to be enabled
1094  **/
1095 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter)
1096 {
1097 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1098 		/* bail because we already have a command pending */
1099 		dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
1100 			adapter->current_op);
1101 		return;
1102 	}
1103 	adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
1104 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
1105 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0);
1106 }
1107 
1108 /**
1109  * iavf_disable_vlan_stripping
1110  * @adapter: adapter structure
1111  *
1112  * Request VLAN header stripping to be disabled
1113  **/
1114 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
1115 {
1116 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1117 		/* bail because we already have a command pending */
1118 		dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
1119 			adapter->current_op);
1120 		return;
1121 	}
1122 	adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
1123 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
1124 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
1125 }
1126 
1127 /**
1128  * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype
1129  * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1130  */
1131 static u32 iavf_tpid_to_vc_ethertype(u16 tpid)
1132 {
1133 	switch (tpid) {
1134 	case ETH_P_8021Q:
1135 		return VIRTCHNL_VLAN_ETHERTYPE_8100;
1136 	case ETH_P_8021AD:
1137 		return VIRTCHNL_VLAN_ETHERTYPE_88A8;
1138 	}
1139 
1140 	return 0;
1141 }
1142 
1143 /**
1144  * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message
1145  * @adapter: adapter structure
1146  * @msg: message structure used for updating offloads over virtchnl to update
1147  * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1148  * @offload_op: opcode used to determine which support structure to check
1149  */
1150 static int
1151 iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter,
1152 			      struct virtchnl_vlan_setting *msg, u16 tpid,
1153 			      enum virtchnl_ops offload_op)
1154 {
1155 	struct virtchnl_vlan_supported_caps *offload_support;
1156 	u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid);
1157 
1158 	/* reference the correct offload support structure */
1159 	switch (offload_op) {
1160 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1161 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1162 		offload_support =
1163 			&adapter->vlan_v2_caps.offloads.stripping_support;
1164 		break;
1165 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1166 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1167 		offload_support =
1168 			&adapter->vlan_v2_caps.offloads.insertion_support;
1169 		break;
1170 	default:
1171 		dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n",
1172 			offload_op);
1173 		return -EINVAL;
1174 	}
1175 
1176 	/* make sure ethertype is supported */
1177 	if (offload_support->outer & vc_ethertype &&
1178 	    offload_support->outer & VIRTCHNL_VLAN_TOGGLE) {
1179 		msg->outer_ethertype_setting = vc_ethertype;
1180 	} else if (offload_support->inner & vc_ethertype &&
1181 		   offload_support->inner & VIRTCHNL_VLAN_TOGGLE) {
1182 		msg->inner_ethertype_setting = vc_ethertype;
1183 	} else {
1184 		dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n",
1185 			offload_op, tpid);
1186 		return -EINVAL;
1187 	}
1188 
1189 	return 0;
1190 }
1191 
1192 /**
1193  * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request
1194  * @adapter: adapter structure
1195  * @tpid: VLAN TPID
1196  * @offload_op: opcode used to determine which AQ required bit to clear
1197  */
1198 static void
1199 iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid,
1200 				  enum virtchnl_ops offload_op)
1201 {
1202 	switch (offload_op) {
1203 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1204 		if (tpid == ETH_P_8021Q)
1205 			adapter->aq_required &=
1206 				~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
1207 		else if (tpid == ETH_P_8021AD)
1208 			adapter->aq_required &=
1209 				~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
1210 		break;
1211 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1212 		if (tpid == ETH_P_8021Q)
1213 			adapter->aq_required &=
1214 				~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
1215 		else if (tpid == ETH_P_8021AD)
1216 			adapter->aq_required &=
1217 				~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
1218 		break;
1219 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1220 		if (tpid == ETH_P_8021Q)
1221 			adapter->aq_required &=
1222 				~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
1223 		else if (tpid == ETH_P_8021AD)
1224 			adapter->aq_required &=
1225 				~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
1226 		break;
1227 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1228 		if (tpid == ETH_P_8021Q)
1229 			adapter->aq_required &=
1230 				~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
1231 		else if (tpid == ETH_P_8021AD)
1232 			adapter->aq_required &=
1233 				~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
1234 		break;
1235 	default:
1236 		dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n",
1237 			offload_op);
1238 	}
1239 }
1240 
1241 /**
1242  * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl
1243  * @adapter: adapter structure
1244  * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8)
1245  * @offload_op: offload_op used to make the request over virtchnl
1246  */
1247 static void
1248 iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid,
1249 			  enum virtchnl_ops offload_op)
1250 {
1251 	struct virtchnl_vlan_setting *msg;
1252 	int len = sizeof(*msg);
1253 
1254 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1255 		/* bail because we already have a command pending */
1256 		dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n",
1257 			offload_op, adapter->current_op);
1258 		return;
1259 	}
1260 
1261 	adapter->current_op = offload_op;
1262 
1263 	msg = kzalloc(len, GFP_KERNEL);
1264 	if (!msg)
1265 		return;
1266 
1267 	msg->vport_id = adapter->vsi_res->vsi_id;
1268 
1269 	/* always clear to prevent unsupported and endless requests */
1270 	iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op);
1271 
1272 	/* only send valid offload requests */
1273 	if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op))
1274 		iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len);
1275 	else
1276 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1277 
1278 	kfree(msg);
1279 }
1280 
1281 /**
1282  * iavf_enable_vlan_stripping_v2 - enable VLAN stripping
1283  * @adapter: adapter structure
1284  * @tpid: VLAN TPID used to enable VLAN stripping
1285  */
1286 void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1287 {
1288 	iavf_send_vlan_offload_v2(adapter, tpid,
1289 				  VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2);
1290 }
1291 
1292 /**
1293  * iavf_disable_vlan_stripping_v2 - disable VLAN stripping
1294  * @adapter: adapter structure
1295  * @tpid: VLAN TPID used to disable VLAN stripping
1296  */
1297 void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1298 {
1299 	iavf_send_vlan_offload_v2(adapter, tpid,
1300 				  VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2);
1301 }
1302 
1303 /**
1304  * iavf_enable_vlan_insertion_v2 - enable VLAN insertion
1305  * @adapter: adapter structure
1306  * @tpid: VLAN TPID used to enable VLAN insertion
1307  */
1308 void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1309 {
1310 	iavf_send_vlan_offload_v2(adapter, tpid,
1311 				  VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2);
1312 }
1313 
1314 /**
1315  * iavf_disable_vlan_insertion_v2 - disable VLAN insertion
1316  * @adapter: adapter structure
1317  * @tpid: VLAN TPID used to disable VLAN insertion
1318  */
1319 void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1320 {
1321 	iavf_send_vlan_offload_v2(adapter, tpid,
1322 				  VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
1323 }
1324 
1325 #define IAVF_MAX_SPEED_STRLEN	13
1326 
1327 /**
1328  * iavf_print_link_message - print link up or down
1329  * @adapter: adapter structure
1330  *
1331  * Log a message telling the world of our wonderous link status
1332  */
1333 static void iavf_print_link_message(struct iavf_adapter *adapter)
1334 {
1335 	struct net_device *netdev = adapter->netdev;
1336 	int link_speed_mbps;
1337 	char *speed;
1338 
1339 	if (!adapter->link_up) {
1340 		netdev_info(netdev, "NIC Link is Down\n");
1341 		return;
1342 	}
1343 
1344 	speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
1345 	if (!speed)
1346 		return;
1347 
1348 	if (ADV_LINK_SUPPORT(adapter)) {
1349 		link_speed_mbps = adapter->link_speed_mbps;
1350 		goto print_link_msg;
1351 	}
1352 
1353 	switch (adapter->link_speed) {
1354 	case VIRTCHNL_LINK_SPEED_40GB:
1355 		link_speed_mbps = SPEED_40000;
1356 		break;
1357 	case VIRTCHNL_LINK_SPEED_25GB:
1358 		link_speed_mbps = SPEED_25000;
1359 		break;
1360 	case VIRTCHNL_LINK_SPEED_20GB:
1361 		link_speed_mbps = SPEED_20000;
1362 		break;
1363 	case VIRTCHNL_LINK_SPEED_10GB:
1364 		link_speed_mbps = SPEED_10000;
1365 		break;
1366 	case VIRTCHNL_LINK_SPEED_5GB:
1367 		link_speed_mbps = SPEED_5000;
1368 		break;
1369 	case VIRTCHNL_LINK_SPEED_2_5GB:
1370 		link_speed_mbps = SPEED_2500;
1371 		break;
1372 	case VIRTCHNL_LINK_SPEED_1GB:
1373 		link_speed_mbps = SPEED_1000;
1374 		break;
1375 	case VIRTCHNL_LINK_SPEED_100MB:
1376 		link_speed_mbps = SPEED_100;
1377 		break;
1378 	default:
1379 		link_speed_mbps = SPEED_UNKNOWN;
1380 		break;
1381 	}
1382 
1383 print_link_msg:
1384 	if (link_speed_mbps > SPEED_1000) {
1385 		if (link_speed_mbps == SPEED_2500)
1386 			snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps");
1387 		else
1388 			/* convert to Gbps inline */
1389 			snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
1390 				 link_speed_mbps / 1000, "Gbps");
1391 	} else if (link_speed_mbps == SPEED_UNKNOWN) {
1392 		snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps");
1393 	} else {
1394 		snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
1395 			 link_speed_mbps, "Mbps");
1396 	}
1397 
1398 	netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed);
1399 	kfree(speed);
1400 }
1401 
1402 /**
1403  * iavf_get_vpe_link_status
1404  * @adapter: adapter structure
1405  * @vpe: virtchnl_pf_event structure
1406  *
1407  * Helper function for determining the link status
1408  **/
1409 static bool
1410 iavf_get_vpe_link_status(struct iavf_adapter *adapter,
1411 			 struct virtchnl_pf_event *vpe)
1412 {
1413 	if (ADV_LINK_SUPPORT(adapter))
1414 		return vpe->event_data.link_event_adv.link_status;
1415 	else
1416 		return vpe->event_data.link_event.link_status;
1417 }
1418 
1419 /**
1420  * iavf_set_adapter_link_speed_from_vpe
1421  * @adapter: adapter structure for which we are setting the link speed
1422  * @vpe: virtchnl_pf_event structure that contains the link speed we are setting
1423  *
1424  * Helper function for setting iavf_adapter link speed
1425  **/
1426 static void
1427 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
1428 				     struct virtchnl_pf_event *vpe)
1429 {
1430 	if (ADV_LINK_SUPPORT(adapter))
1431 		adapter->link_speed_mbps =
1432 			vpe->event_data.link_event_adv.link_speed;
1433 	else
1434 		adapter->link_speed = vpe->event_data.link_event.link_speed;
1435 }
1436 
1437 /**
1438  * iavf_enable_channels
1439  * @adapter: adapter structure
1440  *
1441  * Request that the PF enable channels as specified by
1442  * the user via tc tool.
1443  **/
1444 void iavf_enable_channels(struct iavf_adapter *adapter)
1445 {
1446 	struct virtchnl_tc_info *vti = NULL;
1447 	size_t len;
1448 	int i;
1449 
1450 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1451 		/* bail because we already have a command pending */
1452 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1453 			adapter->current_op);
1454 		return;
1455 	}
1456 
1457 	len = struct_size(vti, list, adapter->num_tc - 1);
1458 	vti = kzalloc(len, GFP_KERNEL);
1459 	if (!vti)
1460 		return;
1461 	vti->num_tc = adapter->num_tc;
1462 	for (i = 0; i < vti->num_tc; i++) {
1463 		vti->list[i].count = adapter->ch_config.ch_info[i].count;
1464 		vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
1465 		vti->list[i].pad = 0;
1466 		vti->list[i].max_tx_rate =
1467 				adapter->ch_config.ch_info[i].max_tx_rate;
1468 	}
1469 
1470 	adapter->ch_config.state = __IAVF_TC_RUNNING;
1471 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1472 	adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
1473 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS;
1474 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len);
1475 	kfree(vti);
1476 }
1477 
1478 /**
1479  * iavf_disable_channels
1480  * @adapter: adapter structure
1481  *
1482  * Request that the PF disable channels that are configured
1483  **/
1484 void iavf_disable_channels(struct iavf_adapter *adapter)
1485 {
1486 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1487 		/* bail because we already have a command pending */
1488 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1489 			adapter->current_op);
1490 		return;
1491 	}
1492 
1493 	adapter->ch_config.state = __IAVF_TC_INVALID;
1494 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1495 	adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
1496 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS;
1497 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0);
1498 }
1499 
1500 /**
1501  * iavf_print_cloud_filter
1502  * @adapter: adapter structure
1503  * @f: cloud filter to print
1504  *
1505  * Print the cloud filter
1506  **/
1507 static void iavf_print_cloud_filter(struct iavf_adapter *adapter,
1508 				    struct virtchnl_filter *f)
1509 {
1510 	switch (f->flow_type) {
1511 	case VIRTCHNL_TCP_V4_FLOW:
1512 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
1513 			 &f->data.tcp_spec.dst_mac,
1514 			 &f->data.tcp_spec.src_mac,
1515 			 ntohs(f->data.tcp_spec.vlan_id),
1516 			 &f->data.tcp_spec.dst_ip[0],
1517 			 &f->data.tcp_spec.src_ip[0],
1518 			 ntohs(f->data.tcp_spec.dst_port),
1519 			 ntohs(f->data.tcp_spec.src_port));
1520 		break;
1521 	case VIRTCHNL_TCP_V6_FLOW:
1522 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
1523 			 &f->data.tcp_spec.dst_mac,
1524 			 &f->data.tcp_spec.src_mac,
1525 			 ntohs(f->data.tcp_spec.vlan_id),
1526 			 &f->data.tcp_spec.dst_ip,
1527 			 &f->data.tcp_spec.src_ip,
1528 			 ntohs(f->data.tcp_spec.dst_port),
1529 			 ntohs(f->data.tcp_spec.src_port));
1530 		break;
1531 	}
1532 }
1533 
1534 /**
1535  * iavf_add_cloud_filter
1536  * @adapter: adapter structure
1537  *
1538  * Request that the PF add cloud filters as specified
1539  * by the user via tc tool.
1540  **/
1541 void iavf_add_cloud_filter(struct iavf_adapter *adapter)
1542 {
1543 	struct iavf_cloud_filter *cf;
1544 	struct virtchnl_filter *f;
1545 	int len = 0, count = 0;
1546 
1547 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1548 		/* bail because we already have a command pending */
1549 		dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
1550 			adapter->current_op);
1551 		return;
1552 	}
1553 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1554 		if (cf->add) {
1555 			count++;
1556 			break;
1557 		}
1558 	}
1559 	if (!count) {
1560 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1561 		return;
1562 	}
1563 	adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
1564 
1565 	len = sizeof(struct virtchnl_filter);
1566 	f = kzalloc(len, GFP_KERNEL);
1567 	if (!f)
1568 		return;
1569 
1570 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1571 		if (cf->add) {
1572 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1573 			cf->add = false;
1574 			cf->state = __IAVF_CF_ADD_PENDING;
1575 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER,
1576 					 (u8 *)f, len);
1577 		}
1578 	}
1579 	kfree(f);
1580 }
1581 
1582 /**
1583  * iavf_del_cloud_filter
1584  * @adapter: adapter structure
1585  *
1586  * Request that the PF delete cloud filters as specified
1587  * by the user via tc tool.
1588  **/
1589 void iavf_del_cloud_filter(struct iavf_adapter *adapter)
1590 {
1591 	struct iavf_cloud_filter *cf, *cftmp;
1592 	struct virtchnl_filter *f;
1593 	int len = 0, count = 0;
1594 
1595 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1596 		/* bail because we already have a command pending */
1597 		dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
1598 			adapter->current_op);
1599 		return;
1600 	}
1601 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1602 		if (cf->del) {
1603 			count++;
1604 			break;
1605 		}
1606 	}
1607 	if (!count) {
1608 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1609 		return;
1610 	}
1611 	adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
1612 
1613 	len = sizeof(struct virtchnl_filter);
1614 	f = kzalloc(len, GFP_KERNEL);
1615 	if (!f)
1616 		return;
1617 
1618 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1619 		if (cf->del) {
1620 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1621 			cf->del = false;
1622 			cf->state = __IAVF_CF_DEL_PENDING;
1623 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER,
1624 					 (u8 *)f, len);
1625 		}
1626 	}
1627 	kfree(f);
1628 }
1629 
1630 /**
1631  * iavf_add_fdir_filter
1632  * @adapter: the VF adapter structure
1633  *
1634  * Request that the PF add Flow Director filters as specified
1635  * by the user via ethtool.
1636  **/
1637 void iavf_add_fdir_filter(struct iavf_adapter *adapter)
1638 {
1639 	struct iavf_fdir_fltr *fdir;
1640 	struct virtchnl_fdir_add *f;
1641 	bool process_fltr = false;
1642 	int len;
1643 
1644 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1645 		/* bail because we already have a command pending */
1646 		dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
1647 			adapter->current_op);
1648 		return;
1649 	}
1650 
1651 	len = sizeof(struct virtchnl_fdir_add);
1652 	f = kzalloc(len, GFP_KERNEL);
1653 	if (!f)
1654 		return;
1655 
1656 	spin_lock_bh(&adapter->fdir_fltr_lock);
1657 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1658 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1659 			process_fltr = true;
1660 			fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
1661 			memcpy(f, &fdir->vc_add_msg, len);
1662 			break;
1663 		}
1664 	}
1665 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1666 
1667 	if (!process_fltr) {
1668 		/* prevent iavf_add_fdir_filter() from being called when there
1669 		 * are no filters to add
1670 		 */
1671 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
1672 		kfree(f);
1673 		return;
1674 	}
1675 	adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
1676 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
1677 	kfree(f);
1678 }
1679 
1680 /**
1681  * iavf_del_fdir_filter
1682  * @adapter: the VF adapter structure
1683  *
1684  * Request that the PF delete Flow Director filters as specified
1685  * by the user via ethtool.
1686  **/
1687 void iavf_del_fdir_filter(struct iavf_adapter *adapter)
1688 {
1689 	struct iavf_fdir_fltr *fdir;
1690 	struct virtchnl_fdir_del f;
1691 	bool process_fltr = false;
1692 	int len;
1693 
1694 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1695 		/* bail because we already have a command pending */
1696 		dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
1697 			adapter->current_op);
1698 		return;
1699 	}
1700 
1701 	len = sizeof(struct virtchnl_fdir_del);
1702 
1703 	spin_lock_bh(&adapter->fdir_fltr_lock);
1704 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1705 		if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
1706 			process_fltr = true;
1707 			memset(&f, 0, len);
1708 			f.vsi_id = fdir->vc_add_msg.vsi_id;
1709 			f.flow_id = fdir->flow_id;
1710 			fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
1711 			break;
1712 		}
1713 	}
1714 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1715 
1716 	if (!process_fltr) {
1717 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1718 		return;
1719 	}
1720 
1721 	adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
1722 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
1723 }
1724 
1725 /**
1726  * iavf_add_adv_rss_cfg
1727  * @adapter: the VF adapter structure
1728  *
1729  * Request that the PF add RSS configuration as specified
1730  * by the user via ethtool.
1731  **/
1732 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
1733 {
1734 	struct virtchnl_rss_cfg *rss_cfg;
1735 	struct iavf_adv_rss *rss;
1736 	bool process_rss = false;
1737 	int len;
1738 
1739 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1740 		/* bail because we already have a command pending */
1741 		dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
1742 			adapter->current_op);
1743 		return;
1744 	}
1745 
1746 	len = sizeof(struct virtchnl_rss_cfg);
1747 	rss_cfg = kzalloc(len, GFP_KERNEL);
1748 	if (!rss_cfg)
1749 		return;
1750 
1751 	spin_lock_bh(&adapter->adv_rss_lock);
1752 	list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1753 		if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1754 			process_rss = true;
1755 			rss->state = IAVF_ADV_RSS_ADD_PENDING;
1756 			memcpy(rss_cfg, &rss->cfg_msg, len);
1757 			iavf_print_adv_rss_cfg(adapter, rss,
1758 					       "Input set change for",
1759 					       "is pending");
1760 			break;
1761 		}
1762 	}
1763 	spin_unlock_bh(&adapter->adv_rss_lock);
1764 
1765 	if (process_rss) {
1766 		adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG;
1767 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG,
1768 				 (u8 *)rss_cfg, len);
1769 	} else {
1770 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
1771 	}
1772 
1773 	kfree(rss_cfg);
1774 }
1775 
1776 /**
1777  * iavf_del_adv_rss_cfg
1778  * @adapter: the VF adapter structure
1779  *
1780  * Request that the PF delete RSS configuration as specified
1781  * by the user via ethtool.
1782  **/
1783 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
1784 {
1785 	struct virtchnl_rss_cfg *rss_cfg;
1786 	struct iavf_adv_rss *rss;
1787 	bool process_rss = false;
1788 	int len;
1789 
1790 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1791 		/* bail because we already have a command pending */
1792 		dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
1793 			adapter->current_op);
1794 		return;
1795 	}
1796 
1797 	len = sizeof(struct virtchnl_rss_cfg);
1798 	rss_cfg = kzalloc(len, GFP_KERNEL);
1799 	if (!rss_cfg)
1800 		return;
1801 
1802 	spin_lock_bh(&adapter->adv_rss_lock);
1803 	list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1804 		if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) {
1805 			process_rss = true;
1806 			rss->state = IAVF_ADV_RSS_DEL_PENDING;
1807 			memcpy(rss_cfg, &rss->cfg_msg, len);
1808 			break;
1809 		}
1810 	}
1811 	spin_unlock_bh(&adapter->adv_rss_lock);
1812 
1813 	if (process_rss) {
1814 		adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG;
1815 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG,
1816 				 (u8 *)rss_cfg, len);
1817 	} else {
1818 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1819 	}
1820 
1821 	kfree(rss_cfg);
1822 }
1823 
1824 /**
1825  * iavf_request_reset
1826  * @adapter: adapter structure
1827  *
1828  * Request that the PF reset this VF. No response is expected.
1829  **/
1830 void iavf_request_reset(struct iavf_adapter *adapter)
1831 {
1832 	/* Don't check CURRENT_OP - this is always higher priority */
1833 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
1834 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1835 }
1836 
1837 /**
1838  * iavf_virtchnl_completion
1839  * @adapter: adapter structure
1840  * @v_opcode: opcode sent by PF
1841  * @v_retval: retval sent by PF
1842  * @msg: message sent by PF
1843  * @msglen: message length
1844  *
1845  * Asynchronous completion function for admin queue messages. Rather than busy
1846  * wait, we fire off our requests and assume that no errors will be returned.
1847  * This function handles the reply messages.
1848  **/
1849 void iavf_virtchnl_completion(struct iavf_adapter *adapter,
1850 			      enum virtchnl_ops v_opcode,
1851 			      enum iavf_status v_retval, u8 *msg, u16 msglen)
1852 {
1853 	struct net_device *netdev = adapter->netdev;
1854 
1855 	if (v_opcode == VIRTCHNL_OP_EVENT) {
1856 		struct virtchnl_pf_event *vpe =
1857 			(struct virtchnl_pf_event *)msg;
1858 		bool link_up = iavf_get_vpe_link_status(adapter, vpe);
1859 
1860 		switch (vpe->event) {
1861 		case VIRTCHNL_EVENT_LINK_CHANGE:
1862 			iavf_set_adapter_link_speed_from_vpe(adapter, vpe);
1863 
1864 			/* we've already got the right link status, bail */
1865 			if (adapter->link_up == link_up)
1866 				break;
1867 
1868 			if (link_up) {
1869 				/* If we get link up message and start queues
1870 				 * before our queues are configured it will
1871 				 * trigger a TX hang. In that case, just ignore
1872 				 * the link status message,we'll get another one
1873 				 * after we enable queues and actually prepared
1874 				 * to send traffic.
1875 				 */
1876 				if (adapter->state != __IAVF_RUNNING)
1877 					break;
1878 
1879 				/* For ADq enabled VF, we reconfigure VSIs and
1880 				 * re-allocate queues. Hence wait till all
1881 				 * queues are enabled.
1882 				 */
1883 				if (adapter->flags &
1884 				    IAVF_FLAG_QUEUES_DISABLED)
1885 					break;
1886 			}
1887 
1888 			adapter->link_up = link_up;
1889 			if (link_up) {
1890 				netif_tx_start_all_queues(netdev);
1891 				netif_carrier_on(netdev);
1892 			} else {
1893 				netif_tx_stop_all_queues(netdev);
1894 				netif_carrier_off(netdev);
1895 			}
1896 			iavf_print_link_message(adapter);
1897 			break;
1898 		case VIRTCHNL_EVENT_RESET_IMPENDING:
1899 			dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
1900 			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
1901 				adapter->flags |= IAVF_FLAG_RESET_PENDING;
1902 				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1903 				queue_work(iavf_wq, &adapter->reset_task);
1904 			}
1905 			break;
1906 		default:
1907 			dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
1908 				vpe->event);
1909 			break;
1910 		}
1911 		return;
1912 	}
1913 	if (v_retval) {
1914 		switch (v_opcode) {
1915 		case VIRTCHNL_OP_ADD_VLAN:
1916 			dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
1917 				iavf_stat_str(&adapter->hw, v_retval));
1918 			break;
1919 		case VIRTCHNL_OP_ADD_ETH_ADDR:
1920 			dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
1921 				iavf_stat_str(&adapter->hw, v_retval));
1922 			iavf_mac_add_reject(adapter);
1923 			/* restore administratively set MAC address */
1924 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1925 			break;
1926 		case VIRTCHNL_OP_DEL_VLAN:
1927 			dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
1928 				iavf_stat_str(&adapter->hw, v_retval));
1929 			break;
1930 		case VIRTCHNL_OP_DEL_ETH_ADDR:
1931 			dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
1932 				iavf_stat_str(&adapter->hw, v_retval));
1933 			break;
1934 		case VIRTCHNL_OP_ENABLE_CHANNELS:
1935 			dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
1936 				iavf_stat_str(&adapter->hw, v_retval));
1937 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1938 			adapter->ch_config.state = __IAVF_TC_INVALID;
1939 			netdev_reset_tc(netdev);
1940 			netif_tx_start_all_queues(netdev);
1941 			break;
1942 		case VIRTCHNL_OP_DISABLE_CHANNELS:
1943 			dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
1944 				iavf_stat_str(&adapter->hw, v_retval));
1945 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1946 			adapter->ch_config.state = __IAVF_TC_RUNNING;
1947 			netif_tx_start_all_queues(netdev);
1948 			break;
1949 		case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
1950 			struct iavf_cloud_filter *cf, *cftmp;
1951 
1952 			list_for_each_entry_safe(cf, cftmp,
1953 						 &adapter->cloud_filter_list,
1954 						 list) {
1955 				if (cf->state == __IAVF_CF_ADD_PENDING) {
1956 					cf->state = __IAVF_CF_INVALID;
1957 					dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
1958 						 iavf_stat_str(&adapter->hw,
1959 							       v_retval));
1960 					iavf_print_cloud_filter(adapter,
1961 								&cf->f);
1962 					list_del(&cf->list);
1963 					kfree(cf);
1964 					adapter->num_cloud_filters--;
1965 				}
1966 			}
1967 			}
1968 			break;
1969 		case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
1970 			struct iavf_cloud_filter *cf;
1971 
1972 			list_for_each_entry(cf, &adapter->cloud_filter_list,
1973 					    list) {
1974 				if (cf->state == __IAVF_CF_DEL_PENDING) {
1975 					cf->state = __IAVF_CF_ACTIVE;
1976 					dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
1977 						 iavf_stat_str(&adapter->hw,
1978 							       v_retval));
1979 					iavf_print_cloud_filter(adapter,
1980 								&cf->f);
1981 				}
1982 			}
1983 			}
1984 			break;
1985 		case VIRTCHNL_OP_ADD_FDIR_FILTER: {
1986 			struct iavf_fdir_fltr *fdir, *fdir_tmp;
1987 
1988 			spin_lock_bh(&adapter->fdir_fltr_lock);
1989 			list_for_each_entry_safe(fdir, fdir_tmp,
1990 						 &adapter->fdir_list_head,
1991 						 list) {
1992 				if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
1993 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
1994 						 iavf_stat_str(&adapter->hw,
1995 							       v_retval));
1996 					iavf_print_fdir_fltr(adapter, fdir);
1997 					if (msglen)
1998 						dev_err(&adapter->pdev->dev,
1999 							"%s\n", msg);
2000 					list_del(&fdir->list);
2001 					kfree(fdir);
2002 					adapter->fdir_active_fltr--;
2003 				}
2004 			}
2005 			spin_unlock_bh(&adapter->fdir_fltr_lock);
2006 			}
2007 			break;
2008 		case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2009 			struct iavf_fdir_fltr *fdir;
2010 
2011 			spin_lock_bh(&adapter->fdir_fltr_lock);
2012 			list_for_each_entry(fdir, &adapter->fdir_list_head,
2013 					    list) {
2014 				if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2015 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2016 					dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
2017 						 iavf_stat_str(&adapter->hw,
2018 							       v_retval));
2019 					iavf_print_fdir_fltr(adapter, fdir);
2020 				}
2021 			}
2022 			spin_unlock_bh(&adapter->fdir_fltr_lock);
2023 			}
2024 			break;
2025 		case VIRTCHNL_OP_ADD_RSS_CFG: {
2026 			struct iavf_adv_rss *rss, *rss_tmp;
2027 
2028 			spin_lock_bh(&adapter->adv_rss_lock);
2029 			list_for_each_entry_safe(rss, rss_tmp,
2030 						 &adapter->adv_rss_list_head,
2031 						 list) {
2032 				if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2033 					iavf_print_adv_rss_cfg(adapter, rss,
2034 							       "Failed to change the input set for",
2035 							       NULL);
2036 					list_del(&rss->list);
2037 					kfree(rss);
2038 				}
2039 			}
2040 			spin_unlock_bh(&adapter->adv_rss_lock);
2041 			}
2042 			break;
2043 		case VIRTCHNL_OP_DEL_RSS_CFG: {
2044 			struct iavf_adv_rss *rss;
2045 
2046 			spin_lock_bh(&adapter->adv_rss_lock);
2047 			list_for_each_entry(rss, &adapter->adv_rss_list_head,
2048 					    list) {
2049 				if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2050 					rss->state = IAVF_ADV_RSS_ACTIVE;
2051 					dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
2052 						iavf_stat_str(&adapter->hw,
2053 							      v_retval));
2054 				}
2055 			}
2056 			spin_unlock_bh(&adapter->adv_rss_lock);
2057 			}
2058 			break;
2059 		case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2060 		case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2061 			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2062 			break;
2063 		default:
2064 			dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
2065 				v_retval, iavf_stat_str(&adapter->hw, v_retval),
2066 				v_opcode);
2067 		}
2068 	}
2069 	switch (v_opcode) {
2070 	case VIRTCHNL_OP_ADD_ETH_ADDR:
2071 		if (!v_retval)
2072 			iavf_mac_add_ok(adapter);
2073 		if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
2074 			eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2075 		break;
2076 	case VIRTCHNL_OP_GET_STATS: {
2077 		struct iavf_eth_stats *stats =
2078 			(struct iavf_eth_stats *)msg;
2079 		netdev->stats.rx_packets = stats->rx_unicast +
2080 					   stats->rx_multicast +
2081 					   stats->rx_broadcast;
2082 		netdev->stats.tx_packets = stats->tx_unicast +
2083 					   stats->tx_multicast +
2084 					   stats->tx_broadcast;
2085 		netdev->stats.rx_bytes = stats->rx_bytes;
2086 		netdev->stats.tx_bytes = stats->tx_bytes;
2087 		netdev->stats.tx_errors = stats->tx_errors;
2088 		netdev->stats.rx_dropped = stats->rx_discards;
2089 		netdev->stats.tx_dropped = stats->tx_discards;
2090 		adapter->current_stats = *stats;
2091 		}
2092 		break;
2093 	case VIRTCHNL_OP_GET_VF_RESOURCES: {
2094 		u16 len = sizeof(struct virtchnl_vf_resource) +
2095 			  IAVF_MAX_VF_VSI *
2096 			  sizeof(struct virtchnl_vsi_resource);
2097 		memcpy(adapter->vf_res, msg, min(msglen, len));
2098 		iavf_validate_num_queues(adapter);
2099 		iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
2100 		if (is_zero_ether_addr(adapter->hw.mac.addr)) {
2101 			/* restore current mac address */
2102 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2103 		} else {
2104 			/* refresh current mac address if changed */
2105 			eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2106 			ether_addr_copy(netdev->perm_addr,
2107 					adapter->hw.mac.addr);
2108 		}
2109 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2110 		iavf_add_filter(adapter, adapter->hw.mac.addr);
2111 
2112 		if (VLAN_ALLOWED(adapter)) {
2113 			if (!list_empty(&adapter->vlan_filter_list)) {
2114 				struct iavf_vlan_filter *vlf;
2115 
2116 				/* re-add all VLAN filters over virtchnl */
2117 				list_for_each_entry(vlf,
2118 						    &adapter->vlan_filter_list,
2119 						    list)
2120 					vlf->add = true;
2121 
2122 				adapter->aq_required |=
2123 					IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2124 			}
2125 		}
2126 
2127 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2128 
2129 		iavf_parse_vf_resource_msg(adapter);
2130 
2131 		/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
2132 		 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish
2133 		 * configuration
2134 		 */
2135 		if (VLAN_V2_ALLOWED(adapter))
2136 			break;
2137 		/* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2
2138 		 * wasn't successfully negotiated with the PF
2139 		 */
2140 		}
2141 		fallthrough;
2142 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
2143 		if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
2144 			memcpy(&adapter->vlan_v2_caps, msg,
2145 			       min_t(u16, msglen,
2146 				     sizeof(adapter->vlan_v2_caps)));
2147 
2148 		iavf_process_config(adapter);
2149 
2150 		/* unlock crit_lock before acquiring rtnl_lock as other
2151 		 * processes holding rtnl_lock could be waiting for the same
2152 		 * crit_lock
2153 		 */
2154 		mutex_unlock(&adapter->crit_lock);
2155 		/* VLAN capabilities can change during VFR, so make sure to
2156 		 * update the netdev features with the new capabilities
2157 		 */
2158 		rtnl_lock();
2159 		netdev_update_features(netdev);
2160 		rtnl_unlock();
2161 		if (iavf_lock_timeout(&adapter->crit_lock, 10000))
2162 			dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
2163 				 __FUNCTION__);
2164 
2165 		/* Request VLAN offload settings */
2166 		if (VLAN_V2_ALLOWED(adapter))
2167 			iavf_set_vlan_offload_features(adapter, 0,
2168 						       netdev->features);
2169 
2170 		iavf_set_queue_vlan_tag_loc(adapter);
2171 
2172 		}
2173 		break;
2174 	case VIRTCHNL_OP_ENABLE_QUEUES:
2175 		/* enable transmits */
2176 		iavf_irq_enable(adapter, true);
2177 		adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
2178 		break;
2179 	case VIRTCHNL_OP_DISABLE_QUEUES:
2180 		iavf_free_all_tx_resources(adapter);
2181 		iavf_free_all_rx_resources(adapter);
2182 		if (adapter->state == __IAVF_DOWN_PENDING) {
2183 			iavf_change_state(adapter, __IAVF_DOWN);
2184 			wake_up(&adapter->down_waitqueue);
2185 		}
2186 		break;
2187 	case VIRTCHNL_OP_VERSION:
2188 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2189 		/* Don't display an error if we get these out of sequence.
2190 		 * If the firmware needed to get kicked, we'll get these and
2191 		 * it's no problem.
2192 		 */
2193 		if (v_opcode != adapter->current_op)
2194 			return;
2195 		break;
2196 	case VIRTCHNL_OP_IWARP:
2197 		/* Gobble zero-length replies from the PF. They indicate that
2198 		 * a previous message was received OK, and the client doesn't
2199 		 * care about that.
2200 		 */
2201 		if (msglen && CLIENT_ENABLED(adapter))
2202 			iavf_notify_client_message(&adapter->vsi, msg, msglen);
2203 		break;
2204 
2205 	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
2206 		adapter->client_pending &=
2207 				~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
2208 		break;
2209 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
2210 		struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
2211 
2212 		if (msglen == sizeof(*vrh))
2213 			adapter->hena = vrh->hena;
2214 		else
2215 			dev_warn(&adapter->pdev->dev,
2216 				 "Invalid message %d from PF\n", v_opcode);
2217 		}
2218 		break;
2219 	case VIRTCHNL_OP_REQUEST_QUEUES: {
2220 		struct virtchnl_vf_res_request *vfres =
2221 			(struct virtchnl_vf_res_request *)msg;
2222 
2223 		if (vfres->num_queue_pairs != adapter->num_req_queues) {
2224 			dev_info(&adapter->pdev->dev,
2225 				 "Requested %d queues, PF can support %d\n",
2226 				 adapter->num_req_queues,
2227 				 vfres->num_queue_pairs);
2228 			adapter->num_req_queues = 0;
2229 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2230 		}
2231 		}
2232 		break;
2233 	case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2234 		struct iavf_cloud_filter *cf;
2235 
2236 		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2237 			if (cf->state == __IAVF_CF_ADD_PENDING)
2238 				cf->state = __IAVF_CF_ACTIVE;
2239 		}
2240 		}
2241 		break;
2242 	case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2243 		struct iavf_cloud_filter *cf, *cftmp;
2244 
2245 		list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2246 					 list) {
2247 			if (cf->state == __IAVF_CF_DEL_PENDING) {
2248 				cf->state = __IAVF_CF_INVALID;
2249 				list_del(&cf->list);
2250 				kfree(cf);
2251 				adapter->num_cloud_filters--;
2252 			}
2253 		}
2254 		}
2255 		break;
2256 	case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2257 		struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
2258 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
2259 
2260 		spin_lock_bh(&adapter->fdir_fltr_lock);
2261 		list_for_each_entry_safe(fdir, fdir_tmp,
2262 					 &adapter->fdir_list_head,
2263 					 list) {
2264 			if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2265 				if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
2266 					dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
2267 						 fdir->loc);
2268 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2269 					fdir->flow_id = add_fltr->flow_id;
2270 				} else {
2271 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
2272 						 add_fltr->status);
2273 					iavf_print_fdir_fltr(adapter, fdir);
2274 					list_del(&fdir->list);
2275 					kfree(fdir);
2276 					adapter->fdir_active_fltr--;
2277 				}
2278 			}
2279 		}
2280 		spin_unlock_bh(&adapter->fdir_fltr_lock);
2281 		}
2282 		break;
2283 	case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2284 		struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
2285 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
2286 
2287 		spin_lock_bh(&adapter->fdir_fltr_lock);
2288 		list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
2289 					 list) {
2290 			if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2291 				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
2292 					dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
2293 						 fdir->loc);
2294 					list_del(&fdir->list);
2295 					kfree(fdir);
2296 					adapter->fdir_active_fltr--;
2297 				} else {
2298 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2299 					dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
2300 						 del_fltr->status);
2301 					iavf_print_fdir_fltr(adapter, fdir);
2302 				}
2303 			}
2304 		}
2305 		spin_unlock_bh(&adapter->fdir_fltr_lock);
2306 		}
2307 		break;
2308 	case VIRTCHNL_OP_ADD_RSS_CFG: {
2309 		struct iavf_adv_rss *rss;
2310 
2311 		spin_lock_bh(&adapter->adv_rss_lock);
2312 		list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2313 			if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2314 				iavf_print_adv_rss_cfg(adapter, rss,
2315 						       "Input set change for",
2316 						       "successful");
2317 				rss->state = IAVF_ADV_RSS_ACTIVE;
2318 			}
2319 		}
2320 		spin_unlock_bh(&adapter->adv_rss_lock);
2321 		}
2322 		break;
2323 	case VIRTCHNL_OP_DEL_RSS_CFG: {
2324 		struct iavf_adv_rss *rss, *rss_tmp;
2325 
2326 		spin_lock_bh(&adapter->adv_rss_lock);
2327 		list_for_each_entry_safe(rss, rss_tmp,
2328 					 &adapter->adv_rss_list_head, list) {
2329 			if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2330 				list_del(&rss->list);
2331 				kfree(rss);
2332 			}
2333 		}
2334 		spin_unlock_bh(&adapter->adv_rss_lock);
2335 		}
2336 		break;
2337 	default:
2338 		if (adapter->current_op && (v_opcode != adapter->current_op))
2339 			dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
2340 				 adapter->current_op, v_opcode);
2341 		break;
2342 	} /* switch v_opcode */
2343 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2344 }
2345