xref: /openbmc/linux/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c (revision 8b0adbe3e38dbe5aae9edf6f5159ffdca7cfbdf1)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 
8 /* busy wait delay in msec */
9 #define IAVF_BUSY_WAIT_DELAY 10
10 #define IAVF_BUSY_WAIT_COUNT 50
11 
12 /**
13  * iavf_send_pf_msg
14  * @adapter: adapter structure
15  * @op: virtual channel opcode
16  * @msg: pointer to message buffer
17  * @len: message length
18  *
19  * Send message to PF and print status if failure.
20  **/
21 static int iavf_send_pf_msg(struct iavf_adapter *adapter,
22 			    enum virtchnl_ops op, u8 *msg, u16 len)
23 {
24 	struct iavf_hw *hw = &adapter->hw;
25 	enum iavf_status err;
26 
27 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
28 		return 0; /* nothing to see here, move along */
29 
30 	err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
31 	if (err)
32 		dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
33 			op, iavf_stat_str(hw, err),
34 			iavf_aq_str(hw, hw->aq.asq_last_status));
35 	return err;
36 }
37 
38 /**
39  * iavf_send_api_ver
40  * @adapter: adapter structure
41  *
42  * Send API version admin queue message to the PF. The reply is not checked
43  * in this function. Returns 0 if the message was successfully
44  * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
45  **/
46 int iavf_send_api_ver(struct iavf_adapter *adapter)
47 {
48 	struct virtchnl_version_info vvi;
49 
50 	vvi.major = VIRTCHNL_VERSION_MAJOR;
51 	vvi.minor = VIRTCHNL_VERSION_MINOR;
52 
53 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
54 				sizeof(vvi));
55 }
56 
57 /**
58  * iavf_verify_api_ver
59  * @adapter: adapter structure
60  *
61  * Compare API versions with the PF. Must be called after admin queue is
62  * initialized. Returns 0 if API versions match, -EIO if they do not,
63  * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
64  * from the firmware are propagated.
65  **/
66 int iavf_verify_api_ver(struct iavf_adapter *adapter)
67 {
68 	struct virtchnl_version_info *pf_vvi;
69 	struct iavf_hw *hw = &adapter->hw;
70 	struct iavf_arq_event_info event;
71 	enum virtchnl_ops op;
72 	enum iavf_status err;
73 
74 	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
75 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
76 	if (!event.msg_buf) {
77 		err = -ENOMEM;
78 		goto out;
79 	}
80 
81 	while (1) {
82 		err = iavf_clean_arq_element(hw, &event, NULL);
83 		/* When the AQ is empty, iavf_clean_arq_element will return
84 		 * nonzero and this loop will terminate.
85 		 */
86 		if (err)
87 			goto out_alloc;
88 		op =
89 		    (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
90 		if (op == VIRTCHNL_OP_VERSION)
91 			break;
92 	}
93 
94 
95 	err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
96 	if (err)
97 		goto out_alloc;
98 
99 	if (op != VIRTCHNL_OP_VERSION) {
100 		dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
101 			op);
102 		err = -EIO;
103 		goto out_alloc;
104 	}
105 
106 	pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
107 	adapter->pf_version = *pf_vvi;
108 
109 	if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
110 	    ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
111 	     (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
112 		err = -EIO;
113 
114 out_alloc:
115 	kfree(event.msg_buf);
116 out:
117 	return err;
118 }
119 
120 /**
121  * iavf_send_vf_config_msg
122  * @adapter: adapter structure
123  *
124  * Send VF configuration request admin queue message to the PF. The reply
125  * is not checked in this function. Returns 0 if the message was
126  * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
127  **/
128 int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
129 {
130 	u32 caps;
131 
132 	caps = VIRTCHNL_VF_OFFLOAD_L2 |
133 	       VIRTCHNL_VF_OFFLOAD_RSS_PF |
134 	       VIRTCHNL_VF_OFFLOAD_RSS_AQ |
135 	       VIRTCHNL_VF_OFFLOAD_RSS_REG |
136 	       VIRTCHNL_VF_OFFLOAD_VLAN |
137 	       VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
138 	       VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
139 	       VIRTCHNL_VF_OFFLOAD_ENCAP |
140 	       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
141 	       VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
142 	       VIRTCHNL_VF_OFFLOAD_ADQ |
143 	       VIRTCHNL_VF_OFFLOAD_FDIR_PF |
144 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
145 
146 	adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
147 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
148 	if (PF_IS_V11(adapter))
149 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
150 					(u8 *)&caps, sizeof(caps));
151 	else
152 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
153 					NULL, 0);
154 }
155 
156 /**
157  * iavf_validate_num_queues
158  * @adapter: adapter structure
159  *
160  * Validate that the number of queues the PF has sent in
161  * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
162  **/
163 static void iavf_validate_num_queues(struct iavf_adapter *adapter)
164 {
165 	if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) {
166 		struct virtchnl_vsi_resource *vsi_res;
167 		int i;
168 
169 		dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
170 			 adapter->vf_res->num_queue_pairs,
171 			 IAVF_MAX_REQ_QUEUES);
172 		dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
173 			 IAVF_MAX_REQ_QUEUES);
174 		adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
175 		for (i = 0; i < adapter->vf_res->num_vsis; i++) {
176 			vsi_res = &adapter->vf_res->vsi_res[i];
177 			vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
178 		}
179 	}
180 }
181 
182 /**
183  * iavf_get_vf_config
184  * @adapter: private adapter structure
185  *
186  * Get VF configuration from PF and populate hw structure. Must be called after
187  * admin queue is initialized. Busy waits until response is received from PF,
188  * with maximum timeout. Response from PF is returned in the buffer for further
189  * processing by the caller.
190  **/
191 int iavf_get_vf_config(struct iavf_adapter *adapter)
192 {
193 	struct iavf_hw *hw = &adapter->hw;
194 	struct iavf_arq_event_info event;
195 	enum virtchnl_ops op;
196 	enum iavf_status err;
197 	u16 len;
198 
199 	len =  sizeof(struct virtchnl_vf_resource) +
200 		IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
201 	event.buf_len = len;
202 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
203 	if (!event.msg_buf) {
204 		err = -ENOMEM;
205 		goto out;
206 	}
207 
208 	while (1) {
209 		/* When the AQ is empty, iavf_clean_arq_element will return
210 		 * nonzero and this loop will terminate.
211 		 */
212 		err = iavf_clean_arq_element(hw, &event, NULL);
213 		if (err)
214 			goto out_alloc;
215 		op =
216 		    (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
217 		if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
218 			break;
219 	}
220 
221 	err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
222 	memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
223 
224 	/* some PFs send more queues than we should have so validate that
225 	 * we aren't getting too many queues
226 	 */
227 	if (!err)
228 		iavf_validate_num_queues(adapter);
229 	iavf_vf_parse_hw_config(hw, adapter->vf_res);
230 out_alloc:
231 	kfree(event.msg_buf);
232 out:
233 	return err;
234 }
235 
236 /**
237  * iavf_configure_queues
238  * @adapter: adapter structure
239  *
240  * Request that the PF set up our (previously allocated) queues.
241  **/
242 void iavf_configure_queues(struct iavf_adapter *adapter)
243 {
244 	struct virtchnl_vsi_queue_config_info *vqci;
245 	struct virtchnl_queue_pair_info *vqpi;
246 	int pairs = adapter->num_active_queues;
247 	int i, max_frame = IAVF_MAX_RXBUFFER;
248 	size_t len;
249 
250 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
251 		/* bail because we already have a command pending */
252 		dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
253 			adapter->current_op);
254 		return;
255 	}
256 	adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
257 	len = struct_size(vqci, qpair, pairs);
258 	vqci = kzalloc(len, GFP_KERNEL);
259 	if (!vqci)
260 		return;
261 
262 	/* Limit maximum frame size when jumbo frames is not enabled */
263 	if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
264 	    (adapter->netdev->mtu <= ETH_DATA_LEN))
265 		max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
266 
267 	vqci->vsi_id = adapter->vsi_res->vsi_id;
268 	vqci->num_queue_pairs = pairs;
269 	vqpi = vqci->qpair;
270 	/* Size check is not needed here - HW max is 16 queue pairs, and we
271 	 * can fit info for 31 of them into the AQ buffer before it overflows.
272 	 */
273 	for (i = 0; i < pairs; i++) {
274 		vqpi->txq.vsi_id = vqci->vsi_id;
275 		vqpi->txq.queue_id = i;
276 		vqpi->txq.ring_len = adapter->tx_rings[i].count;
277 		vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
278 		vqpi->rxq.vsi_id = vqci->vsi_id;
279 		vqpi->rxq.queue_id = i;
280 		vqpi->rxq.ring_len = adapter->rx_rings[i].count;
281 		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
282 		vqpi->rxq.max_pkt_size = max_frame;
283 		vqpi->rxq.databuffer_size =
284 			ALIGN(adapter->rx_rings[i].rx_buf_len,
285 			      BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
286 		vqpi++;
287 	}
288 
289 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES;
290 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
291 			 (u8 *)vqci, len);
292 	kfree(vqci);
293 }
294 
295 /**
296  * iavf_enable_queues
297  * @adapter: adapter structure
298  *
299  * Request that the PF enable all of our queues.
300  **/
301 void iavf_enable_queues(struct iavf_adapter *adapter)
302 {
303 	struct virtchnl_queue_select vqs;
304 
305 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
306 		/* bail because we already have a command pending */
307 		dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
308 			adapter->current_op);
309 		return;
310 	}
311 	adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
312 	vqs.vsi_id = adapter->vsi_res->vsi_id;
313 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
314 	vqs.rx_queues = vqs.tx_queues;
315 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
316 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
317 			 (u8 *)&vqs, sizeof(vqs));
318 }
319 
320 /**
321  * iavf_disable_queues
322  * @adapter: adapter structure
323  *
324  * Request that the PF disable all of our queues.
325  **/
326 void iavf_disable_queues(struct iavf_adapter *adapter)
327 {
328 	struct virtchnl_queue_select vqs;
329 
330 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
331 		/* bail because we already have a command pending */
332 		dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
333 			adapter->current_op);
334 		return;
335 	}
336 	adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
337 	vqs.vsi_id = adapter->vsi_res->vsi_id;
338 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
339 	vqs.rx_queues = vqs.tx_queues;
340 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
341 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
342 			 (u8 *)&vqs, sizeof(vqs));
343 }
344 
345 /**
346  * iavf_map_queues
347  * @adapter: adapter structure
348  *
349  * Request that the PF map queues to interrupt vectors. Misc causes, including
350  * admin queue, are always mapped to vector 0.
351  **/
352 void iavf_map_queues(struct iavf_adapter *adapter)
353 {
354 	struct virtchnl_irq_map_info *vimi;
355 	struct virtchnl_vector_map *vecmap;
356 	struct iavf_q_vector *q_vector;
357 	int v_idx, q_vectors;
358 	size_t len;
359 
360 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
361 		/* bail because we already have a command pending */
362 		dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
363 			adapter->current_op);
364 		return;
365 	}
366 	adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
367 
368 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
369 
370 	len = struct_size(vimi, vecmap, adapter->num_msix_vectors);
371 	vimi = kzalloc(len, GFP_KERNEL);
372 	if (!vimi)
373 		return;
374 
375 	vimi->num_vectors = adapter->num_msix_vectors;
376 	/* Queue vectors first */
377 	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
378 		q_vector = &adapter->q_vectors[v_idx];
379 		vecmap = &vimi->vecmap[v_idx];
380 
381 		vecmap->vsi_id = adapter->vsi_res->vsi_id;
382 		vecmap->vector_id = v_idx + NONQ_VECS;
383 		vecmap->txq_map = q_vector->ring_mask;
384 		vecmap->rxq_map = q_vector->ring_mask;
385 		vecmap->rxitr_idx = IAVF_RX_ITR;
386 		vecmap->txitr_idx = IAVF_TX_ITR;
387 	}
388 	/* Misc vector last - this is only for AdminQ messages */
389 	vecmap = &vimi->vecmap[v_idx];
390 	vecmap->vsi_id = adapter->vsi_res->vsi_id;
391 	vecmap->vector_id = 0;
392 	vecmap->txq_map = 0;
393 	vecmap->rxq_map = 0;
394 
395 	adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS;
396 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
397 			 (u8 *)vimi, len);
398 	kfree(vimi);
399 }
400 
401 /**
402  * iavf_add_ether_addrs
403  * @adapter: adapter structure
404  *
405  * Request that the PF add one or more addresses to our filters.
406  **/
407 void iavf_add_ether_addrs(struct iavf_adapter *adapter)
408 {
409 	struct virtchnl_ether_addr_list *veal;
410 	struct iavf_mac_filter *f;
411 	int i = 0, count = 0;
412 	bool more = false;
413 	size_t len;
414 
415 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
416 		/* bail because we already have a command pending */
417 		dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
418 			adapter->current_op);
419 		return;
420 	}
421 
422 	spin_lock_bh(&adapter->mac_vlan_list_lock);
423 
424 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
425 		if (f->add)
426 			count++;
427 	}
428 	if (!count) {
429 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
430 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
431 		return;
432 	}
433 	adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
434 
435 	len = struct_size(veal, list, count);
436 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
437 		dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
438 		count = (IAVF_MAX_AQ_BUF_SIZE -
439 			 sizeof(struct virtchnl_ether_addr_list)) /
440 			sizeof(struct virtchnl_ether_addr);
441 		len = struct_size(veal, list, count);
442 		more = true;
443 	}
444 
445 	veal = kzalloc(len, GFP_ATOMIC);
446 	if (!veal) {
447 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
448 		return;
449 	}
450 
451 	veal->vsi_id = adapter->vsi_res->vsi_id;
452 	veal->num_elements = count;
453 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
454 		if (f->add) {
455 			ether_addr_copy(veal->list[i].addr, f->macaddr);
456 			i++;
457 			f->add = false;
458 			if (i == count)
459 				break;
460 		}
461 	}
462 	if (!more)
463 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
464 
465 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
466 
467 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len);
468 	kfree(veal);
469 }
470 
471 /**
472  * iavf_del_ether_addrs
473  * @adapter: adapter structure
474  *
475  * Request that the PF remove one or more addresses from our filters.
476  **/
477 void iavf_del_ether_addrs(struct iavf_adapter *adapter)
478 {
479 	struct virtchnl_ether_addr_list *veal;
480 	struct iavf_mac_filter *f, *ftmp;
481 	int i = 0, count = 0;
482 	bool more = false;
483 	size_t len;
484 
485 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
486 		/* bail because we already have a command pending */
487 		dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
488 			adapter->current_op);
489 		return;
490 	}
491 
492 	spin_lock_bh(&adapter->mac_vlan_list_lock);
493 
494 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
495 		if (f->remove)
496 			count++;
497 	}
498 	if (!count) {
499 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
500 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
501 		return;
502 	}
503 	adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
504 
505 	len = struct_size(veal, list, count);
506 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
507 		dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
508 		count = (IAVF_MAX_AQ_BUF_SIZE -
509 			 sizeof(struct virtchnl_ether_addr_list)) /
510 			sizeof(struct virtchnl_ether_addr);
511 		len = struct_size(veal, list, count);
512 		more = true;
513 	}
514 	veal = kzalloc(len, GFP_ATOMIC);
515 	if (!veal) {
516 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
517 		return;
518 	}
519 
520 	veal->vsi_id = adapter->vsi_res->vsi_id;
521 	veal->num_elements = count;
522 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
523 		if (f->remove) {
524 			ether_addr_copy(veal->list[i].addr, f->macaddr);
525 			i++;
526 			list_del(&f->list);
527 			kfree(f);
528 			if (i == count)
529 				break;
530 		}
531 	}
532 	if (!more)
533 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
534 
535 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
536 
537 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
538 	kfree(veal);
539 }
540 
541 /**
542  * iavf_add_vlans
543  * @adapter: adapter structure
544  *
545  * Request that the PF add one or more VLAN filters to our VSI.
546  **/
547 void iavf_add_vlans(struct iavf_adapter *adapter)
548 {
549 	struct virtchnl_vlan_filter_list *vvfl;
550 	int len, i = 0, count = 0;
551 	struct iavf_vlan_filter *f;
552 	bool more = false;
553 
554 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
555 		/* bail because we already have a command pending */
556 		dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
557 			adapter->current_op);
558 		return;
559 	}
560 
561 	spin_lock_bh(&adapter->mac_vlan_list_lock);
562 
563 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
564 		if (f->add)
565 			count++;
566 	}
567 	if (!count) {
568 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
569 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
570 		return;
571 	}
572 	adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
573 
574 	len = sizeof(struct virtchnl_vlan_filter_list) +
575 	      (count * sizeof(u16));
576 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
577 		dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
578 		count = (IAVF_MAX_AQ_BUF_SIZE -
579 			 sizeof(struct virtchnl_vlan_filter_list)) /
580 			sizeof(u16);
581 		len = sizeof(struct virtchnl_vlan_filter_list) +
582 		      (count * sizeof(u16));
583 		more = true;
584 	}
585 	vvfl = kzalloc(len, GFP_ATOMIC);
586 	if (!vvfl) {
587 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
588 		return;
589 	}
590 
591 	vvfl->vsi_id = adapter->vsi_res->vsi_id;
592 	vvfl->num_elements = count;
593 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
594 		if (f->add) {
595 			vvfl->vlan_id[i] = f->vlan;
596 			i++;
597 			f->add = false;
598 			if (i == count)
599 				break;
600 		}
601 	}
602 	if (!more)
603 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
604 
605 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
606 
607 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
608 	kfree(vvfl);
609 }
610 
611 /**
612  * iavf_del_vlans
613  * @adapter: adapter structure
614  *
615  * Request that the PF remove one or more VLAN filters from our VSI.
616  **/
617 void iavf_del_vlans(struct iavf_adapter *adapter)
618 {
619 	struct virtchnl_vlan_filter_list *vvfl;
620 	struct iavf_vlan_filter *f, *ftmp;
621 	int len, i = 0, count = 0;
622 	bool more = false;
623 
624 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
625 		/* bail because we already have a command pending */
626 		dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
627 			adapter->current_op);
628 		return;
629 	}
630 
631 	spin_lock_bh(&adapter->mac_vlan_list_lock);
632 
633 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
634 		if (f->remove)
635 			count++;
636 	}
637 	if (!count) {
638 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
639 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
640 		return;
641 	}
642 	adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
643 
644 	len = sizeof(struct virtchnl_vlan_filter_list) +
645 	      (count * sizeof(u16));
646 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
647 		dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
648 		count = (IAVF_MAX_AQ_BUF_SIZE -
649 			 sizeof(struct virtchnl_vlan_filter_list)) /
650 			sizeof(u16);
651 		len = sizeof(struct virtchnl_vlan_filter_list) +
652 		      (count * sizeof(u16));
653 		more = true;
654 	}
655 	vvfl = kzalloc(len, GFP_ATOMIC);
656 	if (!vvfl) {
657 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
658 		return;
659 	}
660 
661 	vvfl->vsi_id = adapter->vsi_res->vsi_id;
662 	vvfl->num_elements = count;
663 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
664 		if (f->remove) {
665 			vvfl->vlan_id[i] = f->vlan;
666 			i++;
667 			list_del(&f->list);
668 			kfree(f);
669 			if (i == count)
670 				break;
671 		}
672 	}
673 	if (!more)
674 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
675 
676 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
677 
678 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
679 	kfree(vvfl);
680 }
681 
682 /**
683  * iavf_set_promiscuous
684  * @adapter: adapter structure
685  * @flags: bitmask to control unicast/multicast promiscuous.
686  *
687  * Request that the PF enable promiscuous mode for our VSI.
688  **/
689 void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
690 {
691 	struct virtchnl_promisc_info vpi;
692 	int promisc_all;
693 
694 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
695 		/* bail because we already have a command pending */
696 		dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
697 			adapter->current_op);
698 		return;
699 	}
700 
701 	promisc_all = FLAG_VF_UNICAST_PROMISC |
702 		      FLAG_VF_MULTICAST_PROMISC;
703 	if ((flags & promisc_all) == promisc_all) {
704 		adapter->flags |= IAVF_FLAG_PROMISC_ON;
705 		adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
706 		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
707 	}
708 
709 	if (flags & FLAG_VF_MULTICAST_PROMISC) {
710 		adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
711 		adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
712 		dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
713 	}
714 
715 	if (!flags) {
716 		adapter->flags &= ~(IAVF_FLAG_PROMISC_ON |
717 				    IAVF_FLAG_ALLMULTI_ON);
718 		adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC |
719 					  IAVF_FLAG_AQ_RELEASE_ALLMULTI);
720 		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
721 	}
722 
723 	adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
724 	vpi.vsi_id = adapter->vsi_res->vsi_id;
725 	vpi.flags = flags;
726 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
727 			 (u8 *)&vpi, sizeof(vpi));
728 }
729 
730 /**
731  * iavf_request_stats
732  * @adapter: adapter structure
733  *
734  * Request VSI statistics from PF.
735  **/
736 void iavf_request_stats(struct iavf_adapter *adapter)
737 {
738 	struct virtchnl_queue_select vqs;
739 
740 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
741 		/* no error message, this isn't crucial */
742 		return;
743 	}
744 	adapter->current_op = VIRTCHNL_OP_GET_STATS;
745 	vqs.vsi_id = adapter->vsi_res->vsi_id;
746 	/* queue maps are ignored for this message - only the vsi is used */
747 	if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
748 			     sizeof(vqs)))
749 		/* if the request failed, don't lock out others */
750 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
751 }
752 
753 /**
754  * iavf_get_hena
755  * @adapter: adapter structure
756  *
757  * Request hash enable capabilities from PF
758  **/
759 void iavf_get_hena(struct iavf_adapter *adapter)
760 {
761 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
762 		/* bail because we already have a command pending */
763 		dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
764 			adapter->current_op);
765 		return;
766 	}
767 	adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
768 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
769 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
770 }
771 
772 /**
773  * iavf_set_hena
774  * @adapter: adapter structure
775  *
776  * Request the PF to set our RSS hash capabilities
777  **/
778 void iavf_set_hena(struct iavf_adapter *adapter)
779 {
780 	struct virtchnl_rss_hena vrh;
781 
782 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
783 		/* bail because we already have a command pending */
784 		dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
785 			adapter->current_op);
786 		return;
787 	}
788 	vrh.hena = adapter->hena;
789 	adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
790 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
791 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
792 			 sizeof(vrh));
793 }
794 
795 /**
796  * iavf_set_rss_key
797  * @adapter: adapter structure
798  *
799  * Request the PF to set our RSS hash key
800  **/
801 void iavf_set_rss_key(struct iavf_adapter *adapter)
802 {
803 	struct virtchnl_rss_key *vrk;
804 	int len;
805 
806 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
807 		/* bail because we already have a command pending */
808 		dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
809 			adapter->current_op);
810 		return;
811 	}
812 	len = sizeof(struct virtchnl_rss_key) +
813 	      (adapter->rss_key_size * sizeof(u8)) - 1;
814 	vrk = kzalloc(len, GFP_KERNEL);
815 	if (!vrk)
816 		return;
817 	vrk->vsi_id = adapter->vsi.id;
818 	vrk->key_len = adapter->rss_key_size;
819 	memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
820 
821 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
822 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY;
823 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len);
824 	kfree(vrk);
825 }
826 
827 /**
828  * iavf_set_rss_lut
829  * @adapter: adapter structure
830  *
831  * Request the PF to set our RSS lookup table
832  **/
833 void iavf_set_rss_lut(struct iavf_adapter *adapter)
834 {
835 	struct virtchnl_rss_lut *vrl;
836 	int len;
837 
838 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
839 		/* bail because we already have a command pending */
840 		dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
841 			adapter->current_op);
842 		return;
843 	}
844 	len = sizeof(struct virtchnl_rss_lut) +
845 	      (adapter->rss_lut_size * sizeof(u8)) - 1;
846 	vrl = kzalloc(len, GFP_KERNEL);
847 	if (!vrl)
848 		return;
849 	vrl->vsi_id = adapter->vsi.id;
850 	vrl->lut_entries = adapter->rss_lut_size;
851 	memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
852 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
853 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT;
854 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len);
855 	kfree(vrl);
856 }
857 
858 /**
859  * iavf_enable_vlan_stripping
860  * @adapter: adapter structure
861  *
862  * Request VLAN header stripping to be enabled
863  **/
864 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter)
865 {
866 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
867 		/* bail because we already have a command pending */
868 		dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
869 			adapter->current_op);
870 		return;
871 	}
872 	adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
873 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
874 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0);
875 }
876 
877 /**
878  * iavf_disable_vlan_stripping
879  * @adapter: adapter structure
880  *
881  * Request VLAN header stripping to be disabled
882  **/
883 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
884 {
885 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
886 		/* bail because we already have a command pending */
887 		dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
888 			adapter->current_op);
889 		return;
890 	}
891 	adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
892 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
893 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
894 }
895 
896 #define IAVF_MAX_SPEED_STRLEN	13
897 
898 /**
899  * iavf_print_link_message - print link up or down
900  * @adapter: adapter structure
901  *
902  * Log a message telling the world of our wonderous link status
903  */
904 static void iavf_print_link_message(struct iavf_adapter *adapter)
905 {
906 	struct net_device *netdev = adapter->netdev;
907 	int link_speed_mbps;
908 	char *speed;
909 
910 	if (!adapter->link_up) {
911 		netdev_info(netdev, "NIC Link is Down\n");
912 		return;
913 	}
914 
915 	speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
916 	if (!speed)
917 		return;
918 
919 	if (ADV_LINK_SUPPORT(adapter)) {
920 		link_speed_mbps = adapter->link_speed_mbps;
921 		goto print_link_msg;
922 	}
923 
924 	switch (adapter->link_speed) {
925 	case VIRTCHNL_LINK_SPEED_40GB:
926 		link_speed_mbps = SPEED_40000;
927 		break;
928 	case VIRTCHNL_LINK_SPEED_25GB:
929 		link_speed_mbps = SPEED_25000;
930 		break;
931 	case VIRTCHNL_LINK_SPEED_20GB:
932 		link_speed_mbps = SPEED_20000;
933 		break;
934 	case VIRTCHNL_LINK_SPEED_10GB:
935 		link_speed_mbps = SPEED_10000;
936 		break;
937 	case VIRTCHNL_LINK_SPEED_5GB:
938 		link_speed_mbps = SPEED_5000;
939 		break;
940 	case VIRTCHNL_LINK_SPEED_2_5GB:
941 		link_speed_mbps = SPEED_2500;
942 		break;
943 	case VIRTCHNL_LINK_SPEED_1GB:
944 		link_speed_mbps = SPEED_1000;
945 		break;
946 	case VIRTCHNL_LINK_SPEED_100MB:
947 		link_speed_mbps = SPEED_100;
948 		break;
949 	default:
950 		link_speed_mbps = SPEED_UNKNOWN;
951 		break;
952 	}
953 
954 print_link_msg:
955 	if (link_speed_mbps > SPEED_1000) {
956 		if (link_speed_mbps == SPEED_2500)
957 			snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps");
958 		else
959 			/* convert to Gbps inline */
960 			snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
961 				 link_speed_mbps / 1000, "Gbps");
962 	} else if (link_speed_mbps == SPEED_UNKNOWN) {
963 		snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps");
964 	} else {
965 		snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s",
966 			 link_speed_mbps, "Mbps");
967 	}
968 
969 	netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed);
970 	kfree(speed);
971 }
972 
973 /**
974  * iavf_get_vpe_link_status
975  * @adapter: adapter structure
976  * @vpe: virtchnl_pf_event structure
977  *
978  * Helper function for determining the link status
979  **/
980 static bool
981 iavf_get_vpe_link_status(struct iavf_adapter *adapter,
982 			 struct virtchnl_pf_event *vpe)
983 {
984 	if (ADV_LINK_SUPPORT(adapter))
985 		return vpe->event_data.link_event_adv.link_status;
986 	else
987 		return vpe->event_data.link_event.link_status;
988 }
989 
990 /**
991  * iavf_set_adapter_link_speed_from_vpe
992  * @adapter: adapter structure for which we are setting the link speed
993  * @vpe: virtchnl_pf_event structure that contains the link speed we are setting
994  *
995  * Helper function for setting iavf_adapter link speed
996  **/
997 static void
998 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
999 				     struct virtchnl_pf_event *vpe)
1000 {
1001 	if (ADV_LINK_SUPPORT(adapter))
1002 		adapter->link_speed_mbps =
1003 			vpe->event_data.link_event_adv.link_speed;
1004 	else
1005 		adapter->link_speed = vpe->event_data.link_event.link_speed;
1006 }
1007 
1008 /**
1009  * iavf_enable_channels
1010  * @adapter: adapter structure
1011  *
1012  * Request that the PF enable channels as specified by
1013  * the user via tc tool.
1014  **/
1015 void iavf_enable_channels(struct iavf_adapter *adapter)
1016 {
1017 	struct virtchnl_tc_info *vti = NULL;
1018 	size_t len;
1019 	int i;
1020 
1021 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1022 		/* bail because we already have a command pending */
1023 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1024 			adapter->current_op);
1025 		return;
1026 	}
1027 
1028 	len = struct_size(vti, list, adapter->num_tc - 1);
1029 	vti = kzalloc(len, GFP_KERNEL);
1030 	if (!vti)
1031 		return;
1032 	vti->num_tc = adapter->num_tc;
1033 	for (i = 0; i < vti->num_tc; i++) {
1034 		vti->list[i].count = adapter->ch_config.ch_info[i].count;
1035 		vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
1036 		vti->list[i].pad = 0;
1037 		vti->list[i].max_tx_rate =
1038 				adapter->ch_config.ch_info[i].max_tx_rate;
1039 	}
1040 
1041 	adapter->ch_config.state = __IAVF_TC_RUNNING;
1042 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1043 	adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
1044 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS;
1045 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len);
1046 	kfree(vti);
1047 }
1048 
1049 /**
1050  * iavf_disable_channels
1051  * @adapter: adapter structure
1052  *
1053  * Request that the PF disable channels that are configured
1054  **/
1055 void iavf_disable_channels(struct iavf_adapter *adapter)
1056 {
1057 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1058 		/* bail because we already have a command pending */
1059 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1060 			adapter->current_op);
1061 		return;
1062 	}
1063 
1064 	adapter->ch_config.state = __IAVF_TC_INVALID;
1065 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1066 	adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
1067 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS;
1068 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0);
1069 }
1070 
1071 /**
1072  * iavf_print_cloud_filter
1073  * @adapter: adapter structure
1074  * @f: cloud filter to print
1075  *
1076  * Print the cloud filter
1077  **/
1078 static void iavf_print_cloud_filter(struct iavf_adapter *adapter,
1079 				    struct virtchnl_filter *f)
1080 {
1081 	switch (f->flow_type) {
1082 	case VIRTCHNL_TCP_V4_FLOW:
1083 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
1084 			 &f->data.tcp_spec.dst_mac,
1085 			 &f->data.tcp_spec.src_mac,
1086 			 ntohs(f->data.tcp_spec.vlan_id),
1087 			 &f->data.tcp_spec.dst_ip[0],
1088 			 &f->data.tcp_spec.src_ip[0],
1089 			 ntohs(f->data.tcp_spec.dst_port),
1090 			 ntohs(f->data.tcp_spec.src_port));
1091 		break;
1092 	case VIRTCHNL_TCP_V6_FLOW:
1093 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
1094 			 &f->data.tcp_spec.dst_mac,
1095 			 &f->data.tcp_spec.src_mac,
1096 			 ntohs(f->data.tcp_spec.vlan_id),
1097 			 &f->data.tcp_spec.dst_ip,
1098 			 &f->data.tcp_spec.src_ip,
1099 			 ntohs(f->data.tcp_spec.dst_port),
1100 			 ntohs(f->data.tcp_spec.src_port));
1101 		break;
1102 	}
1103 }
1104 
1105 /**
1106  * iavf_add_cloud_filter
1107  * @adapter: adapter structure
1108  *
1109  * Request that the PF add cloud filters as specified
1110  * by the user via tc tool.
1111  **/
1112 void iavf_add_cloud_filter(struct iavf_adapter *adapter)
1113 {
1114 	struct iavf_cloud_filter *cf;
1115 	struct virtchnl_filter *f;
1116 	int len = 0, count = 0;
1117 
1118 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1119 		/* bail because we already have a command pending */
1120 		dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
1121 			adapter->current_op);
1122 		return;
1123 	}
1124 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1125 		if (cf->add) {
1126 			count++;
1127 			break;
1128 		}
1129 	}
1130 	if (!count) {
1131 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1132 		return;
1133 	}
1134 	adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
1135 
1136 	len = sizeof(struct virtchnl_filter);
1137 	f = kzalloc(len, GFP_KERNEL);
1138 	if (!f)
1139 		return;
1140 
1141 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1142 		if (cf->add) {
1143 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1144 			cf->add = false;
1145 			cf->state = __IAVF_CF_ADD_PENDING;
1146 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER,
1147 					 (u8 *)f, len);
1148 		}
1149 	}
1150 	kfree(f);
1151 }
1152 
1153 /**
1154  * iavf_del_cloud_filter
1155  * @adapter: adapter structure
1156  *
1157  * Request that the PF delete cloud filters as specified
1158  * by the user via tc tool.
1159  **/
1160 void iavf_del_cloud_filter(struct iavf_adapter *adapter)
1161 {
1162 	struct iavf_cloud_filter *cf, *cftmp;
1163 	struct virtchnl_filter *f;
1164 	int len = 0, count = 0;
1165 
1166 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1167 		/* bail because we already have a command pending */
1168 		dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
1169 			adapter->current_op);
1170 		return;
1171 	}
1172 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1173 		if (cf->del) {
1174 			count++;
1175 			break;
1176 		}
1177 	}
1178 	if (!count) {
1179 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1180 		return;
1181 	}
1182 	adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
1183 
1184 	len = sizeof(struct virtchnl_filter);
1185 	f = kzalloc(len, GFP_KERNEL);
1186 	if (!f)
1187 		return;
1188 
1189 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1190 		if (cf->del) {
1191 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1192 			cf->del = false;
1193 			cf->state = __IAVF_CF_DEL_PENDING;
1194 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER,
1195 					 (u8 *)f, len);
1196 		}
1197 	}
1198 	kfree(f);
1199 }
1200 
1201 /**
1202  * iavf_add_fdir_filter
1203  * @adapter: the VF adapter structure
1204  *
1205  * Request that the PF add Flow Director filters as specified
1206  * by the user via ethtool.
1207  **/
1208 void iavf_add_fdir_filter(struct iavf_adapter *adapter)
1209 {
1210 	struct iavf_fdir_fltr *fdir;
1211 	struct virtchnl_fdir_add *f;
1212 	bool process_fltr = false;
1213 	int len;
1214 
1215 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1216 		/* bail because we already have a command pending */
1217 		dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
1218 			adapter->current_op);
1219 		return;
1220 	}
1221 
1222 	len = sizeof(struct virtchnl_fdir_add);
1223 	f = kzalloc(len, GFP_KERNEL);
1224 	if (!f)
1225 		return;
1226 
1227 	spin_lock_bh(&adapter->fdir_fltr_lock);
1228 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1229 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1230 			process_fltr = true;
1231 			fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
1232 			memcpy(f, &fdir->vc_add_msg, len);
1233 			break;
1234 		}
1235 	}
1236 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1237 
1238 	if (!process_fltr) {
1239 		/* prevent iavf_add_fdir_filter() from being called when there
1240 		 * are no filters to add
1241 		 */
1242 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
1243 		kfree(f);
1244 		return;
1245 	}
1246 	adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
1247 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
1248 	kfree(f);
1249 }
1250 
1251 /**
1252  * iavf_del_fdir_filter
1253  * @adapter: the VF adapter structure
1254  *
1255  * Request that the PF delete Flow Director filters as specified
1256  * by the user via ethtool.
1257  **/
1258 void iavf_del_fdir_filter(struct iavf_adapter *adapter)
1259 {
1260 	struct iavf_fdir_fltr *fdir;
1261 	struct virtchnl_fdir_del f;
1262 	bool process_fltr = false;
1263 	int len;
1264 
1265 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1266 		/* bail because we already have a command pending */
1267 		dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
1268 			adapter->current_op);
1269 		return;
1270 	}
1271 
1272 	len = sizeof(struct virtchnl_fdir_del);
1273 
1274 	spin_lock_bh(&adapter->fdir_fltr_lock);
1275 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1276 		if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
1277 			process_fltr = true;
1278 			memset(&f, 0, len);
1279 			f.vsi_id = fdir->vc_add_msg.vsi_id;
1280 			f.flow_id = fdir->flow_id;
1281 			fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
1282 			break;
1283 		}
1284 	}
1285 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1286 
1287 	if (!process_fltr) {
1288 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1289 		return;
1290 	}
1291 
1292 	adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
1293 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
1294 }
1295 
1296 /**
1297  * iavf_request_reset
1298  * @adapter: adapter structure
1299  *
1300  * Request that the PF reset this VF. No response is expected.
1301  **/
1302 void iavf_request_reset(struct iavf_adapter *adapter)
1303 {
1304 	/* Don't check CURRENT_OP - this is always higher priority */
1305 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
1306 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1307 }
1308 
1309 /**
1310  * iavf_virtchnl_completion
1311  * @adapter: adapter structure
1312  * @v_opcode: opcode sent by PF
1313  * @v_retval: retval sent by PF
1314  * @msg: message sent by PF
1315  * @msglen: message length
1316  *
1317  * Asynchronous completion function for admin queue messages. Rather than busy
1318  * wait, we fire off our requests and assume that no errors will be returned.
1319  * This function handles the reply messages.
1320  **/
1321 void iavf_virtchnl_completion(struct iavf_adapter *adapter,
1322 			      enum virtchnl_ops v_opcode,
1323 			      enum iavf_status v_retval, u8 *msg, u16 msglen)
1324 {
1325 	struct net_device *netdev = adapter->netdev;
1326 
1327 	if (v_opcode == VIRTCHNL_OP_EVENT) {
1328 		struct virtchnl_pf_event *vpe =
1329 			(struct virtchnl_pf_event *)msg;
1330 		bool link_up = iavf_get_vpe_link_status(adapter, vpe);
1331 
1332 		switch (vpe->event) {
1333 		case VIRTCHNL_EVENT_LINK_CHANGE:
1334 			iavf_set_adapter_link_speed_from_vpe(adapter, vpe);
1335 
1336 			/* we've already got the right link status, bail */
1337 			if (adapter->link_up == link_up)
1338 				break;
1339 
1340 			if (link_up) {
1341 				/* If we get link up message and start queues
1342 				 * before our queues are configured it will
1343 				 * trigger a TX hang. In that case, just ignore
1344 				 * the link status message,we'll get another one
1345 				 * after we enable queues and actually prepared
1346 				 * to send traffic.
1347 				 */
1348 				if (adapter->state != __IAVF_RUNNING)
1349 					break;
1350 
1351 				/* For ADq enabled VF, we reconfigure VSIs and
1352 				 * re-allocate queues. Hence wait till all
1353 				 * queues are enabled.
1354 				 */
1355 				if (adapter->flags &
1356 				    IAVF_FLAG_QUEUES_DISABLED)
1357 					break;
1358 			}
1359 
1360 			adapter->link_up = link_up;
1361 			if (link_up) {
1362 				netif_tx_start_all_queues(netdev);
1363 				netif_carrier_on(netdev);
1364 			} else {
1365 				netif_tx_stop_all_queues(netdev);
1366 				netif_carrier_off(netdev);
1367 			}
1368 			iavf_print_link_message(adapter);
1369 			break;
1370 		case VIRTCHNL_EVENT_RESET_IMPENDING:
1371 			dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
1372 			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
1373 				adapter->flags |= IAVF_FLAG_RESET_PENDING;
1374 				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1375 				queue_work(iavf_wq, &adapter->reset_task);
1376 			}
1377 			break;
1378 		default:
1379 			dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
1380 				vpe->event);
1381 			break;
1382 		}
1383 		return;
1384 	}
1385 	if (v_retval) {
1386 		switch (v_opcode) {
1387 		case VIRTCHNL_OP_ADD_VLAN:
1388 			dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
1389 				iavf_stat_str(&adapter->hw, v_retval));
1390 			break;
1391 		case VIRTCHNL_OP_ADD_ETH_ADDR:
1392 			dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
1393 				iavf_stat_str(&adapter->hw, v_retval));
1394 			/* restore administratively set MAC address */
1395 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1396 			break;
1397 		case VIRTCHNL_OP_DEL_VLAN:
1398 			dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
1399 				iavf_stat_str(&adapter->hw, v_retval));
1400 			break;
1401 		case VIRTCHNL_OP_DEL_ETH_ADDR:
1402 			dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
1403 				iavf_stat_str(&adapter->hw, v_retval));
1404 			break;
1405 		case VIRTCHNL_OP_ENABLE_CHANNELS:
1406 			dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
1407 				iavf_stat_str(&adapter->hw, v_retval));
1408 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1409 			adapter->ch_config.state = __IAVF_TC_INVALID;
1410 			netdev_reset_tc(netdev);
1411 			netif_tx_start_all_queues(netdev);
1412 			break;
1413 		case VIRTCHNL_OP_DISABLE_CHANNELS:
1414 			dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
1415 				iavf_stat_str(&adapter->hw, v_retval));
1416 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1417 			adapter->ch_config.state = __IAVF_TC_RUNNING;
1418 			netif_tx_start_all_queues(netdev);
1419 			break;
1420 		case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
1421 			struct iavf_cloud_filter *cf, *cftmp;
1422 
1423 			list_for_each_entry_safe(cf, cftmp,
1424 						 &adapter->cloud_filter_list,
1425 						 list) {
1426 				if (cf->state == __IAVF_CF_ADD_PENDING) {
1427 					cf->state = __IAVF_CF_INVALID;
1428 					dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
1429 						 iavf_stat_str(&adapter->hw,
1430 							       v_retval));
1431 					iavf_print_cloud_filter(adapter,
1432 								&cf->f);
1433 					list_del(&cf->list);
1434 					kfree(cf);
1435 					adapter->num_cloud_filters--;
1436 				}
1437 			}
1438 			}
1439 			break;
1440 		case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
1441 			struct iavf_cloud_filter *cf;
1442 
1443 			list_for_each_entry(cf, &adapter->cloud_filter_list,
1444 					    list) {
1445 				if (cf->state == __IAVF_CF_DEL_PENDING) {
1446 					cf->state = __IAVF_CF_ACTIVE;
1447 					dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
1448 						 iavf_stat_str(&adapter->hw,
1449 							       v_retval));
1450 					iavf_print_cloud_filter(adapter,
1451 								&cf->f);
1452 				}
1453 			}
1454 			}
1455 			break;
1456 		case VIRTCHNL_OP_ADD_FDIR_FILTER: {
1457 			struct iavf_fdir_fltr *fdir, *fdir_tmp;
1458 
1459 			spin_lock_bh(&adapter->fdir_fltr_lock);
1460 			list_for_each_entry_safe(fdir, fdir_tmp,
1461 						 &adapter->fdir_list_head,
1462 						 list) {
1463 				if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
1464 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
1465 						 iavf_stat_str(&adapter->hw,
1466 							       v_retval));
1467 					iavf_print_fdir_fltr(adapter, fdir);
1468 					if (msglen)
1469 						dev_err(&adapter->pdev->dev,
1470 							"%s\n", msg);
1471 					list_del(&fdir->list);
1472 					kfree(fdir);
1473 					adapter->fdir_active_fltr--;
1474 				}
1475 			}
1476 			spin_unlock_bh(&adapter->fdir_fltr_lock);
1477 			}
1478 			break;
1479 		case VIRTCHNL_OP_DEL_FDIR_FILTER: {
1480 			struct iavf_fdir_fltr *fdir;
1481 
1482 			spin_lock_bh(&adapter->fdir_fltr_lock);
1483 			list_for_each_entry(fdir, &adapter->fdir_list_head,
1484 					    list) {
1485 				if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
1486 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
1487 					dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
1488 						 iavf_stat_str(&adapter->hw,
1489 							       v_retval));
1490 					iavf_print_fdir_fltr(adapter, fdir);
1491 				}
1492 			}
1493 			spin_unlock_bh(&adapter->fdir_fltr_lock);
1494 			}
1495 			break;
1496 		case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1497 		case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1498 			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
1499 			break;
1500 		default:
1501 			dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
1502 				v_retval, iavf_stat_str(&adapter->hw, v_retval),
1503 				v_opcode);
1504 		}
1505 	}
1506 	switch (v_opcode) {
1507 	case VIRTCHNL_OP_ADD_ETH_ADDR: {
1508 		if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
1509 			ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
1510 		}
1511 		break;
1512 	case VIRTCHNL_OP_GET_STATS: {
1513 		struct iavf_eth_stats *stats =
1514 			(struct iavf_eth_stats *)msg;
1515 		netdev->stats.rx_packets = stats->rx_unicast +
1516 					   stats->rx_multicast +
1517 					   stats->rx_broadcast;
1518 		netdev->stats.tx_packets = stats->tx_unicast +
1519 					   stats->tx_multicast +
1520 					   stats->tx_broadcast;
1521 		netdev->stats.rx_bytes = stats->rx_bytes;
1522 		netdev->stats.tx_bytes = stats->tx_bytes;
1523 		netdev->stats.tx_errors = stats->tx_errors;
1524 		netdev->stats.rx_dropped = stats->rx_discards;
1525 		netdev->stats.tx_dropped = stats->tx_discards;
1526 		adapter->current_stats = *stats;
1527 		}
1528 		break;
1529 	case VIRTCHNL_OP_GET_VF_RESOURCES: {
1530 		u16 len = sizeof(struct virtchnl_vf_resource) +
1531 			  IAVF_MAX_VF_VSI *
1532 			  sizeof(struct virtchnl_vsi_resource);
1533 		memcpy(adapter->vf_res, msg, min(msglen, len));
1534 		iavf_validate_num_queues(adapter);
1535 		iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
1536 		if (is_zero_ether_addr(adapter->hw.mac.addr)) {
1537 			/* restore current mac address */
1538 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1539 		} else {
1540 			/* refresh current mac address if changed */
1541 			ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
1542 			ether_addr_copy(netdev->perm_addr,
1543 					adapter->hw.mac.addr);
1544 		}
1545 		spin_lock_bh(&adapter->mac_vlan_list_lock);
1546 		iavf_add_filter(adapter, adapter->hw.mac.addr);
1547 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
1548 		iavf_process_config(adapter);
1549 		}
1550 		break;
1551 	case VIRTCHNL_OP_ENABLE_QUEUES:
1552 		/* enable transmits */
1553 		iavf_irq_enable(adapter, true);
1554 		adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
1555 		break;
1556 	case VIRTCHNL_OP_DISABLE_QUEUES:
1557 		iavf_free_all_tx_resources(adapter);
1558 		iavf_free_all_rx_resources(adapter);
1559 		if (adapter->state == __IAVF_DOWN_PENDING) {
1560 			adapter->state = __IAVF_DOWN;
1561 			wake_up(&adapter->down_waitqueue);
1562 		}
1563 		break;
1564 	case VIRTCHNL_OP_VERSION:
1565 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1566 		/* Don't display an error if we get these out of sequence.
1567 		 * If the firmware needed to get kicked, we'll get these and
1568 		 * it's no problem.
1569 		 */
1570 		if (v_opcode != adapter->current_op)
1571 			return;
1572 		break;
1573 	case VIRTCHNL_OP_IWARP:
1574 		/* Gobble zero-length replies from the PF. They indicate that
1575 		 * a previous message was received OK, and the client doesn't
1576 		 * care about that.
1577 		 */
1578 		if (msglen && CLIENT_ENABLED(adapter))
1579 			iavf_notify_client_message(&adapter->vsi, msg, msglen);
1580 		break;
1581 
1582 	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
1583 		adapter->client_pending &=
1584 				~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
1585 		break;
1586 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
1587 		struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
1588 
1589 		if (msglen == sizeof(*vrh))
1590 			adapter->hena = vrh->hena;
1591 		else
1592 			dev_warn(&adapter->pdev->dev,
1593 				 "Invalid message %d from PF\n", v_opcode);
1594 		}
1595 		break;
1596 	case VIRTCHNL_OP_REQUEST_QUEUES: {
1597 		struct virtchnl_vf_res_request *vfres =
1598 			(struct virtchnl_vf_res_request *)msg;
1599 
1600 		if (vfres->num_queue_pairs != adapter->num_req_queues) {
1601 			dev_info(&adapter->pdev->dev,
1602 				 "Requested %d queues, PF can support %d\n",
1603 				 adapter->num_req_queues,
1604 				 vfres->num_queue_pairs);
1605 			adapter->num_req_queues = 0;
1606 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1607 		}
1608 		}
1609 		break;
1610 	case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
1611 		struct iavf_cloud_filter *cf;
1612 
1613 		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1614 			if (cf->state == __IAVF_CF_ADD_PENDING)
1615 				cf->state = __IAVF_CF_ACTIVE;
1616 		}
1617 		}
1618 		break;
1619 	case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
1620 		struct iavf_cloud_filter *cf, *cftmp;
1621 
1622 		list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1623 					 list) {
1624 			if (cf->state == __IAVF_CF_DEL_PENDING) {
1625 				cf->state = __IAVF_CF_INVALID;
1626 				list_del(&cf->list);
1627 				kfree(cf);
1628 				adapter->num_cloud_filters--;
1629 			}
1630 		}
1631 		}
1632 		break;
1633 	case VIRTCHNL_OP_ADD_FDIR_FILTER: {
1634 		struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
1635 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
1636 
1637 		spin_lock_bh(&adapter->fdir_fltr_lock);
1638 		list_for_each_entry_safe(fdir, fdir_tmp,
1639 					 &adapter->fdir_list_head,
1640 					 list) {
1641 			if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
1642 				if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
1643 					dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
1644 						 fdir->loc);
1645 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
1646 					fdir->flow_id = add_fltr->flow_id;
1647 				} else {
1648 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
1649 						 add_fltr->status);
1650 					iavf_print_fdir_fltr(adapter, fdir);
1651 					list_del(&fdir->list);
1652 					kfree(fdir);
1653 					adapter->fdir_active_fltr--;
1654 				}
1655 			}
1656 		}
1657 		spin_unlock_bh(&adapter->fdir_fltr_lock);
1658 		}
1659 		break;
1660 	case VIRTCHNL_OP_DEL_FDIR_FILTER: {
1661 		struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
1662 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
1663 
1664 		spin_lock_bh(&adapter->fdir_fltr_lock);
1665 		list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
1666 					 list) {
1667 			if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
1668 				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
1669 					dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
1670 						 fdir->loc);
1671 					list_del(&fdir->list);
1672 					kfree(fdir);
1673 					adapter->fdir_active_fltr--;
1674 				} else {
1675 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
1676 					dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
1677 						 del_fltr->status);
1678 					iavf_print_fdir_fltr(adapter, fdir);
1679 				}
1680 			}
1681 		}
1682 		spin_unlock_bh(&adapter->fdir_fltr_lock);
1683 		}
1684 		break;
1685 	default:
1686 		if (adapter->current_op && (v_opcode != adapter->current_op))
1687 			dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
1688 				 adapter->current_op, v_opcode);
1689 		break;
1690 	} /* switch v_opcode */
1691 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1692 }
1693