1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "i40e.h"
5 
6 /*********************notification routines***********************/
7 
8 /**
9  * i40e_vc_vf_broadcast
10  * @pf: pointer to the PF structure
11  * @v_opcode: operation code
12  * @v_retval: return value
13  * @msg: pointer to the msg buffer
14  * @msglen: msg length
15  *
16  * send a message to all VFs on a given PF
17  **/
18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 				 enum virtchnl_ops v_opcode,
20 				 i40e_status v_retval, u8 *msg,
21 				 u16 msglen)
22 {
23 	struct i40e_hw *hw = &pf->hw;
24 	struct i40e_vf *vf = pf->vf;
25 	int i;
26 
27 	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 		int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29 		/* Not all vfs are enabled so skip the ones that are not */
30 		if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 		    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
32 			continue;
33 
34 		/* Ignore return value on purpose - a given VF may fail, but
35 		 * we need to keep going and send to all of them
36 		 */
37 		i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
38 				       msg, msglen, NULL);
39 	}
40 }
41 
42 /**
43  * i40e_vc_link_speed2mbps
44  * converts i40e_aq_link_speed to integer value of Mbps
45  * @link_speed: the speed to convert
46  *
47  * return the speed as direct value of Mbps.
48  **/
49 static u32
50 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
51 {
52 	switch (link_speed) {
53 	case I40E_LINK_SPEED_100MB:
54 		return SPEED_100;
55 	case I40E_LINK_SPEED_1GB:
56 		return SPEED_1000;
57 	case I40E_LINK_SPEED_2_5GB:
58 		return SPEED_2500;
59 	case I40E_LINK_SPEED_5GB:
60 		return SPEED_5000;
61 	case I40E_LINK_SPEED_10GB:
62 		return SPEED_10000;
63 	case I40E_LINK_SPEED_20GB:
64 		return SPEED_20000;
65 	case I40E_LINK_SPEED_25GB:
66 		return SPEED_25000;
67 	case I40E_LINK_SPEED_40GB:
68 		return SPEED_40000;
69 	case I40E_LINK_SPEED_UNKNOWN:
70 		return SPEED_UNKNOWN;
71 	}
72 	return SPEED_UNKNOWN;
73 }
74 
75 /**
76  * i40e_set_vf_link_state
77  * @vf: pointer to the VF structure
78  * @pfe: pointer to PF event structure
79  * @ls: pointer to link status structure
80  *
81  * set a link state on a single vf
82  **/
83 static void i40e_set_vf_link_state(struct i40e_vf *vf,
84 				   struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
85 {
86 	u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
87 
88 	if (vf->link_forced)
89 		link_status = vf->link_up;
90 
91 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
92 		pfe->event_data.link_event_adv.link_speed = link_status ?
93 			i40e_vc_link_speed2mbps(ls->link_speed) : 0;
94 		pfe->event_data.link_event_adv.link_status = link_status;
95 	} else {
96 		pfe->event_data.link_event.link_speed = link_status ?
97 			i40e_virtchnl_link_speed(ls->link_speed) : 0;
98 		pfe->event_data.link_event.link_status = link_status;
99 	}
100 }
101 
102 /**
103  * i40e_vc_notify_vf_link_state
104  * @vf: pointer to the VF structure
105  *
106  * send a link status message to a single VF
107  **/
108 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
109 {
110 	struct virtchnl_pf_event pfe;
111 	struct i40e_pf *pf = vf->pf;
112 	struct i40e_hw *hw = &pf->hw;
113 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
114 	int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
115 
116 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
117 	pfe.severity = PF_EVENT_SEVERITY_INFO;
118 
119 	i40e_set_vf_link_state(vf, &pfe, ls);
120 
121 	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
122 			       0, (u8 *)&pfe, sizeof(pfe), NULL);
123 }
124 
125 /**
126  * i40e_vc_notify_link_state
127  * @pf: pointer to the PF structure
128  *
129  * send a link status message to all VFs on a given PF
130  **/
131 void i40e_vc_notify_link_state(struct i40e_pf *pf)
132 {
133 	int i;
134 
135 	for (i = 0; i < pf->num_alloc_vfs; i++)
136 		i40e_vc_notify_vf_link_state(&pf->vf[i]);
137 }
138 
139 /**
140  * i40e_vc_notify_reset
141  * @pf: pointer to the PF structure
142  *
143  * indicate a pending reset to all VFs on a given PF
144  **/
145 void i40e_vc_notify_reset(struct i40e_pf *pf)
146 {
147 	struct virtchnl_pf_event pfe;
148 
149 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
150 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
151 	i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
152 			     (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
153 }
154 
155 /**
156  * i40e_vc_notify_vf_reset
157  * @vf: pointer to the VF structure
158  *
159  * indicate a pending reset to the given VF
160  **/
161 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
162 {
163 	struct virtchnl_pf_event pfe;
164 	int abs_vf_id;
165 
166 	/* validate the request */
167 	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
168 		return;
169 
170 	/* verify if the VF is in either init or active before proceeding */
171 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
172 	    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
173 		return;
174 
175 	abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
176 
177 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
178 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
179 	i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
180 			       0, (u8 *)&pfe,
181 			       sizeof(struct virtchnl_pf_event), NULL);
182 }
183 /***********************misc routines*****************************/
184 
185 /**
186  * i40e_vc_reset_vf
187  * @vf: pointer to the VF info
188  * @notify_vf: notify vf about reset or not
189  * Reset VF handler.
190  **/
191 static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
192 {
193 	struct i40e_pf *pf = vf->pf;
194 	int i;
195 
196 	if (notify_vf)
197 		i40e_vc_notify_vf_reset(vf);
198 
199 	/* We want to ensure that an actual reset occurs initiated after this
200 	 * function was called. However, we do not want to wait forever, so
201 	 * we'll give a reasonable time and print a message if we failed to
202 	 * ensure a reset.
203 	 */
204 	for (i = 0; i < 20; i++) {
205 		/* If PF is in VFs releasing state reset VF is impossible,
206 		 * so leave it.
207 		 */
208 		if (test_bit(__I40E_VFS_RELEASING, pf->state))
209 			return;
210 		if (i40e_reset_vf(vf, false))
211 			return;
212 		usleep_range(10000, 20000);
213 	}
214 
215 	if (notify_vf)
216 		dev_warn(&vf->pf->pdev->dev,
217 			 "Failed to initiate reset for VF %d after 200 milliseconds\n",
218 			 vf->vf_id);
219 	else
220 		dev_dbg(&vf->pf->pdev->dev,
221 			"Failed to initiate reset for VF %d after 200 milliseconds\n",
222 			vf->vf_id);
223 }
224 
225 /**
226  * i40e_vc_isvalid_vsi_id
227  * @vf: pointer to the VF info
228  * @vsi_id: VF relative VSI id
229  *
230  * check for the valid VSI id
231  **/
232 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
233 {
234 	struct i40e_pf *pf = vf->pf;
235 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
236 
237 	return (vsi && (vsi->vf_id == vf->vf_id));
238 }
239 
240 /**
241  * i40e_vc_isvalid_queue_id
242  * @vf: pointer to the VF info
243  * @vsi_id: vsi id
244  * @qid: vsi relative queue id
245  *
246  * check for the valid queue id
247  **/
248 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
249 					    u16 qid)
250 {
251 	struct i40e_pf *pf = vf->pf;
252 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
253 
254 	return (vsi && (qid < vsi->alloc_queue_pairs));
255 }
256 
257 /**
258  * i40e_vc_isvalid_vector_id
259  * @vf: pointer to the VF info
260  * @vector_id: VF relative vector id
261  *
262  * check for the valid vector id
263  **/
264 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
265 {
266 	struct i40e_pf *pf = vf->pf;
267 
268 	return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
269 }
270 
271 /***********************vf resource mgmt routines*****************/
272 
273 /**
274  * i40e_vc_get_pf_queue_id
275  * @vf: pointer to the VF info
276  * @vsi_id: id of VSI as provided by the FW
277  * @vsi_queue_id: vsi relative queue id
278  *
279  * return PF relative queue id
280  **/
281 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
282 				   u8 vsi_queue_id)
283 {
284 	struct i40e_pf *pf = vf->pf;
285 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
286 	u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
287 
288 	if (!vsi)
289 		return pf_queue_id;
290 
291 	if (le16_to_cpu(vsi->info.mapping_flags) &
292 	    I40E_AQ_VSI_QUE_MAP_NONCONTIG)
293 		pf_queue_id =
294 			le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
295 	else
296 		pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
297 			      vsi_queue_id;
298 
299 	return pf_queue_id;
300 }
301 
302 /**
303  * i40e_get_real_pf_qid
304  * @vf: pointer to the VF info
305  * @vsi_id: vsi id
306  * @queue_id: queue number
307  *
308  * wrapper function to get pf_queue_id handling ADq code as well
309  **/
310 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
311 {
312 	int i;
313 
314 	if (vf->adq_enabled) {
315 		/* Although VF considers all the queues(can be 1 to 16) as its
316 		 * own but they may actually belong to different VSIs(up to 4).
317 		 * We need to find which queues belongs to which VSI.
318 		 */
319 		for (i = 0; i < vf->num_tc; i++) {
320 			if (queue_id < vf->ch[i].num_qps) {
321 				vsi_id = vf->ch[i].vsi_id;
322 				break;
323 			}
324 			/* find right queue id which is relative to a
325 			 * given VSI.
326 			 */
327 			queue_id -= vf->ch[i].num_qps;
328 			}
329 		}
330 
331 	return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
332 }
333 
334 /**
335  * i40e_config_irq_link_list
336  * @vf: pointer to the VF info
337  * @vsi_id: id of VSI as given by the FW
338  * @vecmap: irq map info
339  *
340  * configure irq link list from the map
341  **/
342 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
343 				      struct virtchnl_vector_map *vecmap)
344 {
345 	unsigned long linklistmap = 0, tempmap;
346 	struct i40e_pf *pf = vf->pf;
347 	struct i40e_hw *hw = &pf->hw;
348 	u16 vsi_queue_id, pf_queue_id;
349 	enum i40e_queue_type qtype;
350 	u16 next_q, vector_id, size;
351 	u32 reg, reg_idx;
352 	u16 itr_idx = 0;
353 
354 	vector_id = vecmap->vector_id;
355 	/* setup the head */
356 	if (0 == vector_id)
357 		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
358 	else
359 		reg_idx = I40E_VPINT_LNKLSTN(
360 		     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
361 		     (vector_id - 1));
362 
363 	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
364 		/* Special case - No queues mapped on this vector */
365 		wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
366 		goto irq_list_done;
367 	}
368 	tempmap = vecmap->rxq_map;
369 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
370 		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
371 				    vsi_queue_id));
372 	}
373 
374 	tempmap = vecmap->txq_map;
375 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
376 		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
377 				     vsi_queue_id + 1));
378 	}
379 
380 	size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
381 	next_q = find_first_bit(&linklistmap, size);
382 	if (unlikely(next_q == size))
383 		goto irq_list_done;
384 
385 	vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
386 	qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
387 	pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
388 	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
389 
390 	wr32(hw, reg_idx, reg);
391 
392 	while (next_q < size) {
393 		switch (qtype) {
394 		case I40E_QUEUE_TYPE_RX:
395 			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
396 			itr_idx = vecmap->rxitr_idx;
397 			break;
398 		case I40E_QUEUE_TYPE_TX:
399 			reg_idx = I40E_QINT_TQCTL(pf_queue_id);
400 			itr_idx = vecmap->txitr_idx;
401 			break;
402 		default:
403 			break;
404 		}
405 
406 		next_q = find_next_bit(&linklistmap, size, next_q + 1);
407 		if (next_q < size) {
408 			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
409 			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
410 			pf_queue_id = i40e_get_real_pf_qid(vf,
411 							   vsi_id,
412 							   vsi_queue_id);
413 		} else {
414 			pf_queue_id = I40E_QUEUE_END_OF_LIST;
415 			qtype = 0;
416 		}
417 
418 		/* format for the RQCTL & TQCTL regs is same */
419 		reg = (vector_id) |
420 		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
421 		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
422 		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
423 		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
424 		wr32(hw, reg_idx, reg);
425 	}
426 
427 	/* if the vf is running in polling mode and using interrupt zero,
428 	 * need to disable auto-mask on enabling zero interrupt for VFs.
429 	 */
430 	if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
431 	    (vector_id == 0)) {
432 		reg = rd32(hw, I40E_GLINT_CTL);
433 		if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
434 			reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
435 			wr32(hw, I40E_GLINT_CTL, reg);
436 		}
437 	}
438 
439 irq_list_done:
440 	i40e_flush(hw);
441 }
442 
443 /**
444  * i40e_release_iwarp_qvlist
445  * @vf: pointer to the VF.
446  *
447  **/
448 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
449 {
450 	struct i40e_pf *pf = vf->pf;
451 	struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
452 	u32 msix_vf;
453 	u32 i;
454 
455 	if (!vf->qvlist_info)
456 		return;
457 
458 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
459 	for (i = 0; i < qvlist_info->num_vectors; i++) {
460 		struct virtchnl_iwarp_qv_info *qv_info;
461 		u32 next_q_index, next_q_type;
462 		struct i40e_hw *hw = &pf->hw;
463 		u32 v_idx, reg_idx, reg;
464 
465 		qv_info = &qvlist_info->qv_info[i];
466 		if (!qv_info)
467 			continue;
468 		v_idx = qv_info->v_idx;
469 		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
470 			/* Figure out the queue after CEQ and make that the
471 			 * first queue.
472 			 */
473 			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
474 			reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
475 			next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
476 					>> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
477 			next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
478 					>> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
479 
480 			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
481 			reg = (next_q_index &
482 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
483 			       (next_q_type <<
484 			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
485 
486 			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
487 		}
488 	}
489 	kfree(vf->qvlist_info);
490 	vf->qvlist_info = NULL;
491 }
492 
493 /**
494  * i40e_config_iwarp_qvlist
495  * @vf: pointer to the VF info
496  * @qvlist_info: queue and vector list
497  *
498  * Return 0 on success or < 0 on error
499  **/
500 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
501 				    struct virtchnl_iwarp_qvlist_info *qvlist_info)
502 {
503 	struct i40e_pf *pf = vf->pf;
504 	struct i40e_hw *hw = &pf->hw;
505 	struct virtchnl_iwarp_qv_info *qv_info;
506 	u32 v_idx, i, reg_idx, reg;
507 	u32 next_q_idx, next_q_type;
508 	u32 msix_vf;
509 	int ret = 0;
510 
511 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
512 
513 	if (qvlist_info->num_vectors > msix_vf) {
514 		dev_warn(&pf->pdev->dev,
515 			 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
516 			 qvlist_info->num_vectors,
517 			 msix_vf);
518 		ret = -EINVAL;
519 		goto err_out;
520 	}
521 
522 	kfree(vf->qvlist_info);
523 	vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
524 					      qvlist_info->num_vectors - 1),
525 				  GFP_KERNEL);
526 	if (!vf->qvlist_info) {
527 		ret = -ENOMEM;
528 		goto err_out;
529 	}
530 	vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
531 
532 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
533 	for (i = 0; i < qvlist_info->num_vectors; i++) {
534 		qv_info = &qvlist_info->qv_info[i];
535 		if (!qv_info)
536 			continue;
537 
538 		/* Validate vector id belongs to this vf */
539 		if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
540 			ret = -EINVAL;
541 			goto err_free;
542 		}
543 
544 		v_idx = qv_info->v_idx;
545 
546 		vf->qvlist_info->qv_info[i] = *qv_info;
547 
548 		reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
549 		/* We might be sharing the interrupt, so get the first queue
550 		 * index and type, push it down the list by adding the new
551 		 * queue on top. Also link it with the new queue in CEQCTL.
552 		 */
553 		reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
554 		next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
555 				I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
556 		next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
557 				I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
558 
559 		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
560 			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
561 			reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
562 			(v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
563 			(qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
564 			(next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
565 			(next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
566 			wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
567 
568 			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
569 			reg = (qv_info->ceq_idx &
570 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
571 			       (I40E_QUEUE_TYPE_PE_CEQ <<
572 			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
573 			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
574 		}
575 
576 		if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
577 			reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
578 			(v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
579 			(qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
580 
581 			wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
582 		}
583 	}
584 
585 	return 0;
586 err_free:
587 	kfree(vf->qvlist_info);
588 	vf->qvlist_info = NULL;
589 err_out:
590 	return ret;
591 }
592 
593 /**
594  * i40e_config_vsi_tx_queue
595  * @vf: pointer to the VF info
596  * @vsi_id: id of VSI as provided by the FW
597  * @vsi_queue_id: vsi relative queue index
598  * @info: config. info
599  *
600  * configure tx queue
601  **/
602 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
603 				    u16 vsi_queue_id,
604 				    struct virtchnl_txq_info *info)
605 {
606 	struct i40e_pf *pf = vf->pf;
607 	struct i40e_hw *hw = &pf->hw;
608 	struct i40e_hmc_obj_txq tx_ctx;
609 	struct i40e_vsi *vsi;
610 	u16 pf_queue_id;
611 	u32 qtx_ctl;
612 	int ret = 0;
613 
614 	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
615 		ret = -ENOENT;
616 		goto error_context;
617 	}
618 	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
619 	vsi = i40e_find_vsi_from_id(pf, vsi_id);
620 	if (!vsi) {
621 		ret = -ENOENT;
622 		goto error_context;
623 	}
624 
625 	/* clear the context structure first */
626 	memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
627 
628 	/* only set the required fields */
629 	tx_ctx.base = info->dma_ring_addr / 128;
630 	tx_ctx.qlen = info->ring_len;
631 	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
632 	tx_ctx.rdylist_act = 0;
633 	tx_ctx.head_wb_ena = info->headwb_enabled;
634 	tx_ctx.head_wb_addr = info->dma_headwb_addr;
635 
636 	/* clear the context in the HMC */
637 	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
638 	if (ret) {
639 		dev_err(&pf->pdev->dev,
640 			"Failed to clear VF LAN Tx queue context %d, error: %d\n",
641 			pf_queue_id, ret);
642 		ret = -ENOENT;
643 		goto error_context;
644 	}
645 
646 	/* set the context in the HMC */
647 	ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
648 	if (ret) {
649 		dev_err(&pf->pdev->dev,
650 			"Failed to set VF LAN Tx queue context %d error: %d\n",
651 			pf_queue_id, ret);
652 		ret = -ENOENT;
653 		goto error_context;
654 	}
655 
656 	/* associate this queue with the PCI VF function */
657 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
658 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
659 		    & I40E_QTX_CTL_PF_INDX_MASK);
660 	qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
661 		     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
662 		    & I40E_QTX_CTL_VFVM_INDX_MASK);
663 	wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
664 	i40e_flush(hw);
665 
666 error_context:
667 	return ret;
668 }
669 
670 /**
671  * i40e_config_vsi_rx_queue
672  * @vf: pointer to the VF info
673  * @vsi_id: id of VSI  as provided by the FW
674  * @vsi_queue_id: vsi relative queue index
675  * @info: config. info
676  *
677  * configure rx queue
678  **/
679 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
680 				    u16 vsi_queue_id,
681 				    struct virtchnl_rxq_info *info)
682 {
683 	u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
684 	struct i40e_pf *pf = vf->pf;
685 	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
686 	struct i40e_hw *hw = &pf->hw;
687 	struct i40e_hmc_obj_rxq rx_ctx;
688 	int ret = 0;
689 
690 	/* clear the context structure first */
691 	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
692 
693 	/* only set the required fields */
694 	rx_ctx.base = info->dma_ring_addr / 128;
695 	rx_ctx.qlen = info->ring_len;
696 
697 	if (info->splithdr_enabled) {
698 		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
699 				  I40E_RX_SPLIT_IP      |
700 				  I40E_RX_SPLIT_TCP_UDP |
701 				  I40E_RX_SPLIT_SCTP;
702 		/* header length validation */
703 		if (info->hdr_size > ((2 * 1024) - 64)) {
704 			ret = -EINVAL;
705 			goto error_param;
706 		}
707 		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
708 
709 		/* set split mode 10b */
710 		rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
711 	}
712 
713 	/* databuffer length validation */
714 	if (info->databuffer_size > ((16 * 1024) - 128)) {
715 		ret = -EINVAL;
716 		goto error_param;
717 	}
718 	rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
719 
720 	/* max pkt. length validation */
721 	if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
722 		ret = -EINVAL;
723 		goto error_param;
724 	}
725 	rx_ctx.rxmax = info->max_pkt_size;
726 
727 	/* if port VLAN is configured increase the max packet size */
728 	if (vsi->info.pvid)
729 		rx_ctx.rxmax += VLAN_HLEN;
730 
731 	/* enable 32bytes desc always */
732 	rx_ctx.dsize = 1;
733 
734 	/* default values */
735 	rx_ctx.lrxqthresh = 1;
736 	rx_ctx.crcstrip = 1;
737 	rx_ctx.prefena = 1;
738 	rx_ctx.l2tsel = 1;
739 
740 	/* clear the context in the HMC */
741 	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
742 	if (ret) {
743 		dev_err(&pf->pdev->dev,
744 			"Failed to clear VF LAN Rx queue context %d, error: %d\n",
745 			pf_queue_id, ret);
746 		ret = -ENOENT;
747 		goto error_param;
748 	}
749 
750 	/* set the context in the HMC */
751 	ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
752 	if (ret) {
753 		dev_err(&pf->pdev->dev,
754 			"Failed to set VF LAN Rx queue context %d error: %d\n",
755 			pf_queue_id, ret);
756 		ret = -ENOENT;
757 		goto error_param;
758 	}
759 
760 error_param:
761 	return ret;
762 }
763 
764 /**
765  * i40e_alloc_vsi_res
766  * @vf: pointer to the VF info
767  * @idx: VSI index, applies only for ADq mode, zero otherwise
768  *
769  * alloc VF vsi context & resources
770  **/
771 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
772 {
773 	struct i40e_mac_filter *f = NULL;
774 	struct i40e_pf *pf = vf->pf;
775 	struct i40e_vsi *vsi;
776 	u64 max_tx_rate = 0;
777 	int ret = 0;
778 
779 	vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
780 			     vf->vf_id);
781 
782 	if (!vsi) {
783 		dev_err(&pf->pdev->dev,
784 			"add vsi failed for VF %d, aq_err %d\n",
785 			vf->vf_id, pf->hw.aq.asq_last_status);
786 		ret = -ENOENT;
787 		goto error_alloc_vsi_res;
788 	}
789 
790 	if (!idx) {
791 		u64 hena = i40e_pf_get_default_rss_hena(pf);
792 		u8 broadcast[ETH_ALEN];
793 
794 		vf->lan_vsi_idx = vsi->idx;
795 		vf->lan_vsi_id = vsi->id;
796 		/* If the port VLAN has been configured and then the
797 		 * VF driver was removed then the VSI port VLAN
798 		 * configuration was destroyed.  Check if there is
799 		 * a port VLAN and restore the VSI configuration if
800 		 * needed.
801 		 */
802 		if (vf->port_vlan_id)
803 			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
804 
805 		spin_lock_bh(&vsi->mac_filter_hash_lock);
806 		if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
807 			f = i40e_add_mac_filter(vsi,
808 						vf->default_lan_addr.addr);
809 			if (!f)
810 				dev_info(&pf->pdev->dev,
811 					 "Could not add MAC filter %pM for VF %d\n",
812 					vf->default_lan_addr.addr, vf->vf_id);
813 		}
814 		eth_broadcast_addr(broadcast);
815 		f = i40e_add_mac_filter(vsi, broadcast);
816 		if (!f)
817 			dev_info(&pf->pdev->dev,
818 				 "Could not allocate VF broadcast filter\n");
819 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
820 		wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
821 		wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
822 		/* program mac filter only for VF VSI */
823 		ret = i40e_sync_vsi_filters(vsi);
824 		if (ret)
825 			dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
826 	}
827 
828 	/* storing VSI index and id for ADq and don't apply the mac filter */
829 	if (vf->adq_enabled) {
830 		vf->ch[idx].vsi_idx = vsi->idx;
831 		vf->ch[idx].vsi_id = vsi->id;
832 	}
833 
834 	/* Set VF bandwidth if specified */
835 	if (vf->tx_rate) {
836 		max_tx_rate = vf->tx_rate;
837 	} else if (vf->ch[idx].max_tx_rate) {
838 		max_tx_rate = vf->ch[idx].max_tx_rate;
839 	}
840 
841 	if (max_tx_rate) {
842 		max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
843 		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
844 						  max_tx_rate, 0, NULL);
845 		if (ret)
846 			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
847 				vf->vf_id, ret);
848 	}
849 
850 error_alloc_vsi_res:
851 	return ret;
852 }
853 
854 /**
855  * i40e_map_pf_queues_to_vsi
856  * @vf: pointer to the VF info
857  *
858  * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
859  * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
860  **/
861 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
862 {
863 	struct i40e_pf *pf = vf->pf;
864 	struct i40e_hw *hw = &pf->hw;
865 	u32 reg, num_tc = 1; /* VF has at least one traffic class */
866 	u16 vsi_id, qps;
867 	int i, j;
868 
869 	if (vf->adq_enabled)
870 		num_tc = vf->num_tc;
871 
872 	for (i = 0; i < num_tc; i++) {
873 		if (vf->adq_enabled) {
874 			qps = vf->ch[i].num_qps;
875 			vsi_id =  vf->ch[i].vsi_id;
876 		} else {
877 			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
878 			vsi_id = vf->lan_vsi_id;
879 		}
880 
881 		for (j = 0; j < 7; j++) {
882 			if (j * 2 >= qps) {
883 				/* end of list */
884 				reg = 0x07FF07FF;
885 			} else {
886 				u16 qid = i40e_vc_get_pf_queue_id(vf,
887 								  vsi_id,
888 								  j * 2);
889 				reg = qid;
890 				qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
891 							      (j * 2) + 1);
892 				reg |= qid << 16;
893 			}
894 			i40e_write_rx_ctl(hw,
895 					  I40E_VSILAN_QTABLE(j, vsi_id),
896 					  reg);
897 		}
898 	}
899 }
900 
901 /**
902  * i40e_map_pf_to_vf_queues
903  * @vf: pointer to the VF info
904  *
905  * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
906  * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
907  **/
908 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
909 {
910 	struct i40e_pf *pf = vf->pf;
911 	struct i40e_hw *hw = &pf->hw;
912 	u32 reg, total_qps = 0;
913 	u32 qps, num_tc = 1; /* VF has at least one traffic class */
914 	u16 vsi_id, qid;
915 	int i, j;
916 
917 	if (vf->adq_enabled)
918 		num_tc = vf->num_tc;
919 
920 	for (i = 0; i < num_tc; i++) {
921 		if (vf->adq_enabled) {
922 			qps = vf->ch[i].num_qps;
923 			vsi_id =  vf->ch[i].vsi_id;
924 		} else {
925 			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
926 			vsi_id = vf->lan_vsi_id;
927 		}
928 
929 		for (j = 0; j < qps; j++) {
930 			qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
931 
932 			reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
933 			wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
934 			     reg);
935 			total_qps++;
936 		}
937 	}
938 }
939 
940 /**
941  * i40e_enable_vf_mappings
942  * @vf: pointer to the VF info
943  *
944  * enable VF mappings
945  **/
946 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
947 {
948 	struct i40e_pf *pf = vf->pf;
949 	struct i40e_hw *hw = &pf->hw;
950 	u32 reg;
951 
952 	/* Tell the hardware we're using noncontiguous mapping. HW requires
953 	 * that VF queues be mapped using this method, even when they are
954 	 * contiguous in real life
955 	 */
956 	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
957 			  I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
958 
959 	/* enable VF vplan_qtable mappings */
960 	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
961 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
962 
963 	i40e_map_pf_to_vf_queues(vf);
964 	i40e_map_pf_queues_to_vsi(vf);
965 
966 	i40e_flush(hw);
967 }
968 
969 /**
970  * i40e_disable_vf_mappings
971  * @vf: pointer to the VF info
972  *
973  * disable VF mappings
974  **/
975 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
976 {
977 	struct i40e_pf *pf = vf->pf;
978 	struct i40e_hw *hw = &pf->hw;
979 	int i;
980 
981 	/* disable qp mappings */
982 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
983 	for (i = 0; i < I40E_MAX_VSI_QP; i++)
984 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
985 		     I40E_QUEUE_END_OF_LIST);
986 	i40e_flush(hw);
987 }
988 
989 /**
990  * i40e_free_vf_res
991  * @vf: pointer to the VF info
992  *
993  * free VF resources
994  **/
995 static void i40e_free_vf_res(struct i40e_vf *vf)
996 {
997 	struct i40e_pf *pf = vf->pf;
998 	struct i40e_hw *hw = &pf->hw;
999 	u32 reg_idx, reg;
1000 	int i, j, msix_vf;
1001 
1002 	/* Start by disabling VF's configuration API to prevent the OS from
1003 	 * accessing the VF's VSI after it's freed / invalidated.
1004 	 */
1005 	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1006 
1007 	/* It's possible the VF had requeuested more queues than the default so
1008 	 * do the accounting here when we're about to free them.
1009 	 */
1010 	if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1011 		pf->queues_left += vf->num_queue_pairs -
1012 				   I40E_DEFAULT_QUEUES_PER_VF;
1013 	}
1014 
1015 	/* free vsi & disconnect it from the parent uplink */
1016 	if (vf->lan_vsi_idx) {
1017 		i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1018 		vf->lan_vsi_idx = 0;
1019 		vf->lan_vsi_id = 0;
1020 	}
1021 
1022 	/* do the accounting and remove additional ADq VSI's */
1023 	if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1024 		for (j = 0; j < vf->num_tc; j++) {
1025 			/* At this point VSI0 is already released so don't
1026 			 * release it again and only clear their values in
1027 			 * structure variables
1028 			 */
1029 			if (j)
1030 				i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1031 			vf->ch[j].vsi_idx = 0;
1032 			vf->ch[j].vsi_id = 0;
1033 		}
1034 	}
1035 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1036 
1037 	/* disable interrupts so the VF starts in a known state */
1038 	for (i = 0; i < msix_vf; i++) {
1039 		/* format is same for both registers */
1040 		if (0 == i)
1041 			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1042 		else
1043 			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1044 						      (vf->vf_id))
1045 						     + (i - 1));
1046 		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1047 		i40e_flush(hw);
1048 	}
1049 
1050 	/* clear the irq settings */
1051 	for (i = 0; i < msix_vf; i++) {
1052 		/* format is same for both registers */
1053 		if (0 == i)
1054 			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1055 		else
1056 			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1057 						      (vf->vf_id))
1058 						     + (i - 1));
1059 		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1060 		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1061 		wr32(hw, reg_idx, reg);
1062 		i40e_flush(hw);
1063 	}
1064 	/* reset some of the state variables keeping track of the resources */
1065 	vf->num_queue_pairs = 0;
1066 	clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1067 	clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1068 }
1069 
1070 /**
1071  * i40e_alloc_vf_res
1072  * @vf: pointer to the VF info
1073  *
1074  * allocate VF resources
1075  **/
1076 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1077 {
1078 	struct i40e_pf *pf = vf->pf;
1079 	int total_queue_pairs = 0;
1080 	int ret, idx;
1081 
1082 	if (vf->num_req_queues &&
1083 	    vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1084 		pf->num_vf_qps = vf->num_req_queues;
1085 	else
1086 		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1087 
1088 	/* allocate hw vsi context & associated resources */
1089 	ret = i40e_alloc_vsi_res(vf, 0);
1090 	if (ret)
1091 		goto error_alloc;
1092 	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1093 
1094 	/* allocate additional VSIs based on tc information for ADq */
1095 	if (vf->adq_enabled) {
1096 		if (pf->queues_left >=
1097 		    (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1098 			/* TC 0 always belongs to VF VSI */
1099 			for (idx = 1; idx < vf->num_tc; idx++) {
1100 				ret = i40e_alloc_vsi_res(vf, idx);
1101 				if (ret)
1102 					goto error_alloc;
1103 			}
1104 			/* send correct number of queues */
1105 			total_queue_pairs = I40E_MAX_VF_QUEUES;
1106 		} else {
1107 			dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1108 				 vf->vf_id);
1109 			vf->adq_enabled = false;
1110 		}
1111 	}
1112 
1113 	/* We account for each VF to get a default number of queue pairs.  If
1114 	 * the VF has now requested more, we need to account for that to make
1115 	 * certain we never request more queues than we actually have left in
1116 	 * HW.
1117 	 */
1118 	if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1119 		pf->queues_left -=
1120 			total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1121 
1122 	if (vf->trusted)
1123 		set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1124 	else
1125 		clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1126 
1127 	/* store the total qps number for the runtime
1128 	 * VF req validation
1129 	 */
1130 	vf->num_queue_pairs = total_queue_pairs;
1131 
1132 	/* VF is now completely initialized */
1133 	set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1134 
1135 error_alloc:
1136 	if (ret)
1137 		i40e_free_vf_res(vf);
1138 
1139 	return ret;
1140 }
1141 
1142 #define VF_DEVICE_STATUS 0xAA
1143 #define VF_TRANS_PENDING_MASK 0x20
1144 /**
1145  * i40e_quiesce_vf_pci
1146  * @vf: pointer to the VF structure
1147  *
1148  * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1149  * if the transactions never clear.
1150  **/
1151 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1152 {
1153 	struct i40e_pf *pf = vf->pf;
1154 	struct i40e_hw *hw = &pf->hw;
1155 	int vf_abs_id, i;
1156 	u32 reg;
1157 
1158 	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1159 
1160 	wr32(hw, I40E_PF_PCI_CIAA,
1161 	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1162 	for (i = 0; i < 100; i++) {
1163 		reg = rd32(hw, I40E_PF_PCI_CIAD);
1164 		if ((reg & VF_TRANS_PENDING_MASK) == 0)
1165 			return 0;
1166 		udelay(1);
1167 	}
1168 	return -EIO;
1169 }
1170 
1171 /**
1172  * __i40e_getnum_vf_vsi_vlan_filters
1173  * @vsi: pointer to the vsi
1174  *
1175  * called to get the number of VLANs offloaded on this VF
1176  **/
1177 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1178 {
1179 	struct i40e_mac_filter *f;
1180 	u16 num_vlans = 0, bkt;
1181 
1182 	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1183 		if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1184 			num_vlans++;
1185 	}
1186 
1187 	return num_vlans;
1188 }
1189 
1190 /**
1191  * i40e_getnum_vf_vsi_vlan_filters
1192  * @vsi: pointer to the vsi
1193  *
1194  * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1195  **/
1196 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1197 {
1198 	int num_vlans;
1199 
1200 	spin_lock_bh(&vsi->mac_filter_hash_lock);
1201 	num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1202 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1203 
1204 	return num_vlans;
1205 }
1206 
1207 /**
1208  * i40e_get_vlan_list_sync
1209  * @vsi: pointer to the VSI
1210  * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1211  * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1212  *             This array is allocated here, but has to be freed in caller.
1213  *
1214  * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1215  **/
1216 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1217 				    s16 **vlan_list)
1218 {
1219 	struct i40e_mac_filter *f;
1220 	int i = 0;
1221 	int bkt;
1222 
1223 	spin_lock_bh(&vsi->mac_filter_hash_lock);
1224 	*num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1225 	*vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1226 	if (!(*vlan_list))
1227 		goto err;
1228 
1229 	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1230 		if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1231 			continue;
1232 		(*vlan_list)[i++] = f->vlan;
1233 	}
1234 err:
1235 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1236 }
1237 
1238 /**
1239  * i40e_set_vsi_promisc
1240  * @vf: pointer to the VF struct
1241  * @seid: VSI number
1242  * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1243  *                for a given VLAN
1244  * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1245  *                  for a given VLAN
1246  * @vl: List of VLANs - apply filter for given VLANs
1247  * @num_vlans: Number of elements in @vl
1248  **/
1249 static i40e_status
1250 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1251 		     bool unicast_enable, s16 *vl, u16 num_vlans)
1252 {
1253 	i40e_status aq_ret, aq_tmp = 0;
1254 	struct i40e_pf *pf = vf->pf;
1255 	struct i40e_hw *hw = &pf->hw;
1256 	int i;
1257 
1258 	/* No VLAN to set promisc on, set on VSI */
1259 	if (!num_vlans || !vl) {
1260 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1261 							       multi_enable,
1262 							       NULL);
1263 		if (aq_ret) {
1264 			int aq_err = pf->hw.aq.asq_last_status;
1265 
1266 			dev_err(&pf->pdev->dev,
1267 				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1268 				vf->vf_id,
1269 				i40e_stat_str(&pf->hw, aq_ret),
1270 				i40e_aq_str(&pf->hw, aq_err));
1271 
1272 			return aq_ret;
1273 		}
1274 
1275 		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1276 							     unicast_enable,
1277 							     NULL, true);
1278 
1279 		if (aq_ret) {
1280 			int aq_err = pf->hw.aq.asq_last_status;
1281 
1282 			dev_err(&pf->pdev->dev,
1283 				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1284 				vf->vf_id,
1285 				i40e_stat_str(&pf->hw, aq_ret),
1286 				i40e_aq_str(&pf->hw, aq_err));
1287 		}
1288 
1289 		return aq_ret;
1290 	}
1291 
1292 	for (i = 0; i < num_vlans; i++) {
1293 		aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1294 							    multi_enable,
1295 							    vl[i], NULL);
1296 		if (aq_ret) {
1297 			int aq_err = pf->hw.aq.asq_last_status;
1298 
1299 			dev_err(&pf->pdev->dev,
1300 				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1301 				vf->vf_id,
1302 				i40e_stat_str(&pf->hw, aq_ret),
1303 				i40e_aq_str(&pf->hw, aq_err));
1304 
1305 			if (!aq_tmp)
1306 				aq_tmp = aq_ret;
1307 		}
1308 
1309 		aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1310 							    unicast_enable,
1311 							    vl[i], NULL);
1312 		if (aq_ret) {
1313 			int aq_err = pf->hw.aq.asq_last_status;
1314 
1315 			dev_err(&pf->pdev->dev,
1316 				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1317 				vf->vf_id,
1318 				i40e_stat_str(&pf->hw, aq_ret),
1319 				i40e_aq_str(&pf->hw, aq_err));
1320 
1321 			if (!aq_tmp)
1322 				aq_tmp = aq_ret;
1323 		}
1324 	}
1325 
1326 	if (aq_tmp)
1327 		aq_ret = aq_tmp;
1328 
1329 	return aq_ret;
1330 }
1331 
1332 /**
1333  * i40e_config_vf_promiscuous_mode
1334  * @vf: pointer to the VF info
1335  * @vsi_id: VSI id
1336  * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1337  * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1338  *
1339  * Called from the VF to configure the promiscuous mode of
1340  * VF vsis and from the VF reset path to reset promiscuous mode.
1341  **/
1342 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1343 						   u16 vsi_id,
1344 						   bool allmulti,
1345 						   bool alluni)
1346 {
1347 	i40e_status aq_ret = I40E_SUCCESS;
1348 	struct i40e_pf *pf = vf->pf;
1349 	struct i40e_vsi *vsi;
1350 	u16 num_vlans;
1351 	s16 *vl;
1352 
1353 	vsi = i40e_find_vsi_from_id(pf, vsi_id);
1354 	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1355 		return I40E_ERR_PARAM;
1356 
1357 	if (vf->port_vlan_id) {
1358 		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1359 					      alluni, &vf->port_vlan_id, 1);
1360 		return aq_ret;
1361 	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1362 		i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1363 
1364 		if (!vl)
1365 			return I40E_ERR_NO_MEMORY;
1366 
1367 		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1368 					      vl, num_vlans);
1369 		kfree(vl);
1370 		return aq_ret;
1371 	}
1372 
1373 	/* no VLANs to set on, set on VSI */
1374 	aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1375 				      NULL, 0);
1376 	return aq_ret;
1377 }
1378 
1379 /**
1380  * i40e_trigger_vf_reset
1381  * @vf: pointer to the VF structure
1382  * @flr: VFLR was issued or not
1383  *
1384  * Trigger hardware to start a reset for a particular VF. Expects the caller
1385  * to wait the proper amount of time to allow hardware to reset the VF before
1386  * it cleans up and restores VF functionality.
1387  **/
1388 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1389 {
1390 	struct i40e_pf *pf = vf->pf;
1391 	struct i40e_hw *hw = &pf->hw;
1392 	u32 reg, reg_idx, bit_idx;
1393 
1394 	/* warn the VF */
1395 	clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1396 
1397 	/* Disable VF's configuration API during reset. The flag is re-enabled
1398 	 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1399 	 * It's normally disabled in i40e_free_vf_res(), but it's safer
1400 	 * to do it earlier to give some time to finish to any VF config
1401 	 * functions that may still be running at this point.
1402 	 */
1403 	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1404 
1405 	/* In the case of a VFLR, the HW has already reset the VF and we
1406 	 * just need to clean up, so don't hit the VFRTRIG register.
1407 	 */
1408 	if (!flr) {
1409 		/* reset VF using VPGEN_VFRTRIG reg */
1410 		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1411 		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1412 		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1413 		i40e_flush(hw);
1414 	}
1415 	/* clear the VFLR bit in GLGEN_VFLRSTAT */
1416 	reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1417 	bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1418 	wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1419 	i40e_flush(hw);
1420 
1421 	if (i40e_quiesce_vf_pci(vf))
1422 		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1423 			vf->vf_id);
1424 }
1425 
1426 /**
1427  * i40e_cleanup_reset_vf
1428  * @vf: pointer to the VF structure
1429  *
1430  * Cleanup a VF after the hardware reset is finished. Expects the caller to
1431  * have verified whether the reset is finished properly, and ensure the
1432  * minimum amount of wait time has passed.
1433  **/
1434 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1435 {
1436 	struct i40e_pf *pf = vf->pf;
1437 	struct i40e_hw *hw = &pf->hw;
1438 	u32 reg;
1439 
1440 	/* disable promisc modes in case they were enabled */
1441 	i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1442 
1443 	/* free VF resources to begin resetting the VSI state */
1444 	i40e_free_vf_res(vf);
1445 
1446 	/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1447 	 * By doing this we allow HW to access VF memory at any point. If we
1448 	 * did it any sooner, HW could access memory while it was being freed
1449 	 * in i40e_free_vf_res(), causing an IOMMU fault.
1450 	 *
1451 	 * On the other hand, this needs to be done ASAP, because the VF driver
1452 	 * is waiting for this to happen and may report a timeout. It's
1453 	 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1454 	 * it.
1455 	 */
1456 	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1457 	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1458 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1459 
1460 	/* reallocate VF resources to finish resetting the VSI state */
1461 	if (!i40e_alloc_vf_res(vf)) {
1462 		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1463 		i40e_enable_vf_mappings(vf);
1464 		set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1465 		clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1466 		/* Do not notify the client during VF init */
1467 		if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1468 					&vf->vf_states))
1469 			i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1470 		vf->num_vlan = 0;
1471 	}
1472 
1473 	/* Tell the VF driver the reset is done. This needs to be done only
1474 	 * after VF has been fully initialized, because the VF driver may
1475 	 * request resources immediately after setting this flag.
1476 	 */
1477 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1478 }
1479 
1480 /**
1481  * i40e_reset_vf
1482  * @vf: pointer to the VF structure
1483  * @flr: VFLR was issued or not
1484  *
1485  * Returns true if the VF is in reset, resets successfully, or resets
1486  * are disabled and false otherwise.
1487  **/
1488 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1489 {
1490 	struct i40e_pf *pf = vf->pf;
1491 	struct i40e_hw *hw = &pf->hw;
1492 	bool rsd = false;
1493 	u32 reg;
1494 	int i;
1495 
1496 	if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1497 		return true;
1498 
1499 	/* If the VFs have been disabled, this means something else is
1500 	 * resetting the VF, so we shouldn't continue.
1501 	 */
1502 	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1503 		return true;
1504 
1505 	i40e_trigger_vf_reset(vf, flr);
1506 
1507 	/* poll VPGEN_VFRSTAT reg to make sure
1508 	 * that reset is complete
1509 	 */
1510 	for (i = 0; i < 10; i++) {
1511 		/* VF reset requires driver to first reset the VF and then
1512 		 * poll the status register to make sure that the reset
1513 		 * completed successfully. Due to internal HW FIFO flushes,
1514 		 * we must wait 10ms before the register will be valid.
1515 		 */
1516 		usleep_range(10000, 20000);
1517 		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1518 		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1519 			rsd = true;
1520 			break;
1521 		}
1522 	}
1523 
1524 	if (flr)
1525 		usleep_range(10000, 20000);
1526 
1527 	if (!rsd)
1528 		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1529 			vf->vf_id);
1530 	usleep_range(10000, 20000);
1531 
1532 	/* On initial reset, we don't have any queues to disable */
1533 	if (vf->lan_vsi_idx != 0)
1534 		i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1535 
1536 	i40e_cleanup_reset_vf(vf);
1537 
1538 	i40e_flush(hw);
1539 	clear_bit(__I40E_VF_DISABLE, pf->state);
1540 
1541 	return true;
1542 }
1543 
1544 /**
1545  * i40e_reset_all_vfs
1546  * @pf: pointer to the PF structure
1547  * @flr: VFLR was issued or not
1548  *
1549  * Reset all allocated VFs in one go. First, tell the hardware to reset each
1550  * VF, then do all the waiting in one chunk, and finally finish restoring each
1551  * VF after the wait. This is useful during PF routines which need to reset
1552  * all VFs, as otherwise it must perform these resets in a serialized fashion.
1553  *
1554  * Returns true if any VFs were reset, and false otherwise.
1555  **/
1556 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1557 {
1558 	struct i40e_hw *hw = &pf->hw;
1559 	struct i40e_vf *vf;
1560 	int i, v;
1561 	u32 reg;
1562 
1563 	/* If we don't have any VFs, then there is nothing to reset */
1564 	if (!pf->num_alloc_vfs)
1565 		return false;
1566 
1567 	/* If VFs have been disabled, there is no need to reset */
1568 	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1569 		return false;
1570 
1571 	/* Begin reset on all VFs at once */
1572 	for (v = 0; v < pf->num_alloc_vfs; v++)
1573 		i40e_trigger_vf_reset(&pf->vf[v], flr);
1574 
1575 	/* HW requires some time to make sure it can flush the FIFO for a VF
1576 	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1577 	 * sequence to make sure that it has completed. We'll keep track of
1578 	 * the VFs using a simple iterator that increments once that VF has
1579 	 * finished resetting.
1580 	 */
1581 	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1582 		usleep_range(10000, 20000);
1583 
1584 		/* Check each VF in sequence, beginning with the VF to fail
1585 		 * the previous check.
1586 		 */
1587 		while (v < pf->num_alloc_vfs) {
1588 			vf = &pf->vf[v];
1589 			reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1590 			if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1591 				break;
1592 
1593 			/* If the current VF has finished resetting, move on
1594 			 * to the next VF in sequence.
1595 			 */
1596 			v++;
1597 		}
1598 	}
1599 
1600 	if (flr)
1601 		usleep_range(10000, 20000);
1602 
1603 	/* Display a warning if at least one VF didn't manage to reset in
1604 	 * time, but continue on with the operation.
1605 	 */
1606 	if (v < pf->num_alloc_vfs)
1607 		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1608 			pf->vf[v].vf_id);
1609 	usleep_range(10000, 20000);
1610 
1611 	/* Begin disabling all the rings associated with VFs, but do not wait
1612 	 * between each VF.
1613 	 */
1614 	for (v = 0; v < pf->num_alloc_vfs; v++) {
1615 		/* On initial reset, we don't have any queues to disable */
1616 		if (pf->vf[v].lan_vsi_idx == 0)
1617 			continue;
1618 
1619 		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1620 	}
1621 
1622 	/* Now that we've notified HW to disable all of the VF rings, wait
1623 	 * until they finish.
1624 	 */
1625 	for (v = 0; v < pf->num_alloc_vfs; v++) {
1626 		/* On initial reset, we don't have any queues to disable */
1627 		if (pf->vf[v].lan_vsi_idx == 0)
1628 			continue;
1629 
1630 		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1631 	}
1632 
1633 	/* Hw may need up to 50ms to finish disabling the RX queues. We
1634 	 * minimize the wait by delaying only once for all VFs.
1635 	 */
1636 	mdelay(50);
1637 
1638 	/* Finish the reset on each VF */
1639 	for (v = 0; v < pf->num_alloc_vfs; v++)
1640 		i40e_cleanup_reset_vf(&pf->vf[v]);
1641 
1642 	i40e_flush(hw);
1643 	clear_bit(__I40E_VF_DISABLE, pf->state);
1644 
1645 	return true;
1646 }
1647 
1648 /**
1649  * i40e_free_vfs
1650  * @pf: pointer to the PF structure
1651  *
1652  * free VF resources
1653  **/
1654 void i40e_free_vfs(struct i40e_pf *pf)
1655 {
1656 	struct i40e_hw *hw = &pf->hw;
1657 	u32 reg_idx, bit_idx;
1658 	int i, tmp, vf_id;
1659 
1660 	if (!pf->vf)
1661 		return;
1662 
1663 	set_bit(__I40E_VFS_RELEASING, pf->state);
1664 	while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1665 		usleep_range(1000, 2000);
1666 
1667 	i40e_notify_client_of_vf_enable(pf, 0);
1668 
1669 	/* Disable IOV before freeing resources. This lets any VF drivers
1670 	 * running in the host get themselves cleaned up before we yank
1671 	 * the carpet out from underneath their feet.
1672 	 */
1673 	if (!pci_vfs_assigned(pf->pdev))
1674 		pci_disable_sriov(pf->pdev);
1675 	else
1676 		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1677 
1678 	/* Amortize wait time by stopping all VFs at the same time */
1679 	for (i = 0; i < pf->num_alloc_vfs; i++) {
1680 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1681 			continue;
1682 
1683 		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1684 	}
1685 
1686 	for (i = 0; i < pf->num_alloc_vfs; i++) {
1687 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1688 			continue;
1689 
1690 		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1691 	}
1692 
1693 	/* free up VF resources */
1694 	tmp = pf->num_alloc_vfs;
1695 	pf->num_alloc_vfs = 0;
1696 	for (i = 0; i < tmp; i++) {
1697 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1698 			i40e_free_vf_res(&pf->vf[i]);
1699 		/* disable qp mappings */
1700 		i40e_disable_vf_mappings(&pf->vf[i]);
1701 	}
1702 
1703 	kfree(pf->vf);
1704 	pf->vf = NULL;
1705 
1706 	/* This check is for when the driver is unloaded while VFs are
1707 	 * assigned. Setting the number of VFs to 0 through sysfs is caught
1708 	 * before this function ever gets called.
1709 	 */
1710 	if (!pci_vfs_assigned(pf->pdev)) {
1711 		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1712 		 * work correctly when SR-IOV gets re-enabled.
1713 		 */
1714 		for (vf_id = 0; vf_id < tmp; vf_id++) {
1715 			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1716 			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1717 			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1718 		}
1719 	}
1720 	clear_bit(__I40E_VF_DISABLE, pf->state);
1721 	clear_bit(__I40E_VFS_RELEASING, pf->state);
1722 }
1723 
1724 #ifdef CONFIG_PCI_IOV
1725 /**
1726  * i40e_alloc_vfs
1727  * @pf: pointer to the PF structure
1728  * @num_alloc_vfs: number of VFs to allocate
1729  *
1730  * allocate VF resources
1731  **/
1732 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1733 {
1734 	struct i40e_vf *vfs;
1735 	int i, ret = 0;
1736 
1737 	/* Disable interrupt 0 so we don't try to handle the VFLR. */
1738 	i40e_irq_dynamic_disable_icr0(pf);
1739 
1740 	/* Check to see if we're just allocating resources for extant VFs */
1741 	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1742 		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1743 		if (ret) {
1744 			pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1745 			pf->num_alloc_vfs = 0;
1746 			goto err_iov;
1747 		}
1748 	}
1749 	/* allocate memory */
1750 	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1751 	if (!vfs) {
1752 		ret = -ENOMEM;
1753 		goto err_alloc;
1754 	}
1755 	pf->vf = vfs;
1756 
1757 	/* apply default profile */
1758 	for (i = 0; i < num_alloc_vfs; i++) {
1759 		vfs[i].pf = pf;
1760 		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1761 		vfs[i].vf_id = i;
1762 
1763 		/* assign default capabilities */
1764 		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1765 		vfs[i].spoofchk = true;
1766 
1767 		set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1768 
1769 	}
1770 	pf->num_alloc_vfs = num_alloc_vfs;
1771 
1772 	/* VF resources get allocated during reset */
1773 	i40e_reset_all_vfs(pf, false);
1774 
1775 	i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1776 
1777 err_alloc:
1778 	if (ret)
1779 		i40e_free_vfs(pf);
1780 err_iov:
1781 	/* Re-enable interrupt 0. */
1782 	i40e_irq_dynamic_enable_icr0(pf);
1783 	return ret;
1784 }
1785 
1786 #endif
1787 /**
1788  * i40e_pci_sriov_enable
1789  * @pdev: pointer to a pci_dev structure
1790  * @num_vfs: number of VFs to allocate
1791  *
1792  * Enable or change the number of VFs
1793  **/
1794 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1795 {
1796 #ifdef CONFIG_PCI_IOV
1797 	struct i40e_pf *pf = pci_get_drvdata(pdev);
1798 	int pre_existing_vfs = pci_num_vf(pdev);
1799 	int err = 0;
1800 
1801 	if (test_bit(__I40E_TESTING, pf->state)) {
1802 		dev_warn(&pdev->dev,
1803 			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1804 		err = -EPERM;
1805 		goto err_out;
1806 	}
1807 
1808 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1809 		i40e_free_vfs(pf);
1810 	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1811 		goto out;
1812 
1813 	if (num_vfs > pf->num_req_vfs) {
1814 		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1815 			 num_vfs, pf->num_req_vfs);
1816 		err = -EPERM;
1817 		goto err_out;
1818 	}
1819 
1820 	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1821 	err = i40e_alloc_vfs(pf, num_vfs);
1822 	if (err) {
1823 		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1824 		goto err_out;
1825 	}
1826 
1827 out:
1828 	return num_vfs;
1829 
1830 err_out:
1831 	return err;
1832 #endif
1833 	return 0;
1834 }
1835 
1836 /**
1837  * i40e_pci_sriov_configure
1838  * @pdev: pointer to a pci_dev structure
1839  * @num_vfs: number of VFs to allocate
1840  *
1841  * Enable or change the number of VFs. Called when the user updates the number
1842  * of VFs in sysfs.
1843  **/
1844 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1845 {
1846 	struct i40e_pf *pf = pci_get_drvdata(pdev);
1847 	int ret = 0;
1848 
1849 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1850 		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1851 		return -EAGAIN;
1852 	}
1853 
1854 	if (num_vfs) {
1855 		if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1856 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1857 			i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1858 		}
1859 		ret = i40e_pci_sriov_enable(pdev, num_vfs);
1860 		goto sriov_configure_out;
1861 	}
1862 
1863 	if (!pci_vfs_assigned(pf->pdev)) {
1864 		i40e_free_vfs(pf);
1865 		pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1866 		i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1867 	} else {
1868 		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1869 		ret = -EINVAL;
1870 		goto sriov_configure_out;
1871 	}
1872 sriov_configure_out:
1873 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1874 	return ret;
1875 }
1876 
1877 /***********************virtual channel routines******************/
1878 
1879 /**
1880  * i40e_vc_send_msg_to_vf
1881  * @vf: pointer to the VF info
1882  * @v_opcode: virtual channel opcode
1883  * @v_retval: virtual channel return value
1884  * @msg: pointer to the msg buffer
1885  * @msglen: msg length
1886  *
1887  * send msg to VF
1888  **/
1889 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1890 				  u32 v_retval, u8 *msg, u16 msglen)
1891 {
1892 	struct i40e_pf *pf;
1893 	struct i40e_hw *hw;
1894 	int abs_vf_id;
1895 	i40e_status aq_ret;
1896 
1897 	/* validate the request */
1898 	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1899 		return -EINVAL;
1900 
1901 	pf = vf->pf;
1902 	hw = &pf->hw;
1903 	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1904 
1905 	/* single place to detect unsuccessful return values */
1906 	if (v_retval) {
1907 		vf->num_invalid_msgs++;
1908 		dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1909 			 vf->vf_id, v_opcode, v_retval);
1910 		if (vf->num_invalid_msgs >
1911 		    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1912 			dev_err(&pf->pdev->dev,
1913 				"Number of invalid messages exceeded for VF %d\n",
1914 				vf->vf_id);
1915 			dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1916 			set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1917 		}
1918 	} else {
1919 		vf->num_valid_msgs++;
1920 		/* reset the invalid counter, if a valid message is received. */
1921 		vf->num_invalid_msgs = 0;
1922 	}
1923 
1924 	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
1925 					msg, msglen, NULL);
1926 	if (aq_ret) {
1927 		dev_info(&pf->pdev->dev,
1928 			 "Unable to send the message to VF %d aq_err %d\n",
1929 			 vf->vf_id, pf->hw.aq.asq_last_status);
1930 		return -EIO;
1931 	}
1932 
1933 	return 0;
1934 }
1935 
1936 /**
1937  * i40e_vc_send_resp_to_vf
1938  * @vf: pointer to the VF info
1939  * @opcode: operation code
1940  * @retval: return value
1941  *
1942  * send resp msg to VF
1943  **/
1944 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1945 				   enum virtchnl_ops opcode,
1946 				   i40e_status retval)
1947 {
1948 	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1949 }
1950 
1951 /**
1952  * i40e_vc_get_version_msg
1953  * @vf: pointer to the VF info
1954  * @msg: pointer to the msg buffer
1955  *
1956  * called from the VF to request the API version used by the PF
1957  **/
1958 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1959 {
1960 	struct virtchnl_version_info info = {
1961 		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1962 	};
1963 
1964 	vf->vf_ver = *(struct virtchnl_version_info *)msg;
1965 	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1966 	if (VF_IS_V10(&vf->vf_ver))
1967 		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1968 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1969 				      I40E_SUCCESS, (u8 *)&info,
1970 				      sizeof(struct virtchnl_version_info));
1971 }
1972 
1973 /**
1974  * i40e_del_qch - delete all the additional VSIs created as a part of ADq
1975  * @vf: pointer to VF structure
1976  **/
1977 static void i40e_del_qch(struct i40e_vf *vf)
1978 {
1979 	struct i40e_pf *pf = vf->pf;
1980 	int i;
1981 
1982 	/* first element in the array belongs to primary VF VSI and we shouldn't
1983 	 * delete it. We should however delete the rest of the VSIs created
1984 	 */
1985 	for (i = 1; i < vf->num_tc; i++) {
1986 		if (vf->ch[i].vsi_idx) {
1987 			i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1988 			vf->ch[i].vsi_idx = 0;
1989 			vf->ch[i].vsi_id = 0;
1990 		}
1991 	}
1992 }
1993 
1994 /**
1995  * i40e_vc_get_vf_resources_msg
1996  * @vf: pointer to the VF info
1997  * @msg: pointer to the msg buffer
1998  *
1999  * called from the VF to request its resources
2000  **/
2001 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2002 {
2003 	struct virtchnl_vf_resource *vfres = NULL;
2004 	struct i40e_pf *pf = vf->pf;
2005 	i40e_status aq_ret = 0;
2006 	struct i40e_vsi *vsi;
2007 	int num_vsis = 1;
2008 	size_t len = 0;
2009 	int ret;
2010 
2011 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
2012 		aq_ret = I40E_ERR_PARAM;
2013 		goto err;
2014 	}
2015 
2016 	len = struct_size(vfres, vsi_res, num_vsis);
2017 	vfres = kzalloc(len, GFP_KERNEL);
2018 	if (!vfres) {
2019 		aq_ret = I40E_ERR_NO_MEMORY;
2020 		len = 0;
2021 		goto err;
2022 	}
2023 	if (VF_IS_V11(&vf->vf_ver))
2024 		vf->driver_caps = *(u32 *)msg;
2025 	else
2026 		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2027 				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
2028 				  VIRTCHNL_VF_OFFLOAD_VLAN;
2029 
2030 	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2031 	vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2032 	vsi = pf->vsi[vf->lan_vsi_idx];
2033 	if (!vsi->info.pvid)
2034 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2035 
2036 	if (i40e_vf_client_capable(pf, vf->vf_id) &&
2037 	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
2038 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
2039 		set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
2040 	} else {
2041 		clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
2042 	}
2043 
2044 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2045 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2046 	} else {
2047 		if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
2048 		    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2049 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2050 		else
2051 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2052 	}
2053 
2054 	if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
2055 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2056 			vfres->vf_cap_flags |=
2057 				VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2058 	}
2059 
2060 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2061 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2062 
2063 	if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
2064 	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2065 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2066 
2067 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2068 		if (pf->flags & I40E_FLAG_MFP_ENABLED) {
2069 			dev_err(&pf->pdev->dev,
2070 				"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2071 				 vf->vf_id);
2072 			aq_ret = I40E_ERR_PARAM;
2073 			goto err;
2074 		}
2075 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2076 	}
2077 
2078 	if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
2079 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2080 			vfres->vf_cap_flags |=
2081 					VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2082 	}
2083 
2084 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2085 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2086 
2087 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2088 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2089 
2090 	vfres->num_vsis = num_vsis;
2091 	vfres->num_queue_pairs = vf->num_queue_pairs;
2092 	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2093 	vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2094 	vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2095 
2096 	if (vf->lan_vsi_idx) {
2097 		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2098 		vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2099 		vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2100 		/* VFs only use TC 0 */
2101 		vfres->vsi_res[0].qset_handle
2102 					  = le16_to_cpu(vsi->info.qs_handle[0]);
2103 		ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2104 				vf->default_lan_addr.addr);
2105 	}
2106 	set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2107 
2108 err:
2109 	/* send the response back to the VF */
2110 	ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2111 				     aq_ret, (u8 *)vfres, len);
2112 
2113 	kfree(vfres);
2114 	return ret;
2115 }
2116 
2117 /**
2118  * i40e_vc_config_promiscuous_mode_msg
2119  * @vf: pointer to the VF info
2120  * @msg: pointer to the msg buffer
2121  *
2122  * called from the VF to configure the promiscuous mode of
2123  * VF vsis
2124  **/
2125 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2126 {
2127 	struct virtchnl_promisc_info *info =
2128 	    (struct virtchnl_promisc_info *)msg;
2129 	struct i40e_pf *pf = vf->pf;
2130 	i40e_status aq_ret = 0;
2131 	bool allmulti = false;
2132 	bool alluni = false;
2133 
2134 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2135 		aq_ret = I40E_ERR_PARAM;
2136 		goto err_out;
2137 	}
2138 	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2139 		dev_err(&pf->pdev->dev,
2140 			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
2141 			vf->vf_id);
2142 
2143 		/* Lie to the VF on purpose, because this is an error we can
2144 		 * ignore. Unprivileged VF is not a virtual channel error.
2145 		 */
2146 		aq_ret = 0;
2147 		goto err_out;
2148 	}
2149 
2150 	if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2151 		aq_ret = I40E_ERR_PARAM;
2152 		goto err_out;
2153 	}
2154 
2155 	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2156 		aq_ret = I40E_ERR_PARAM;
2157 		goto err_out;
2158 	}
2159 
2160 	/* Multicast promiscuous handling*/
2161 	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2162 		allmulti = true;
2163 
2164 	if (info->flags & FLAG_VF_UNICAST_PROMISC)
2165 		alluni = true;
2166 	aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2167 						 alluni);
2168 	if (aq_ret)
2169 		goto err_out;
2170 
2171 	if (allmulti) {
2172 		if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2173 				      &vf->vf_states))
2174 			dev_info(&pf->pdev->dev,
2175 				 "VF %d successfully set multicast promiscuous mode\n",
2176 				 vf->vf_id);
2177 	} else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2178 				      &vf->vf_states))
2179 		dev_info(&pf->pdev->dev,
2180 			 "VF %d successfully unset multicast promiscuous mode\n",
2181 			 vf->vf_id);
2182 
2183 	if (alluni) {
2184 		if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2185 				      &vf->vf_states))
2186 			dev_info(&pf->pdev->dev,
2187 				 "VF %d successfully set unicast promiscuous mode\n",
2188 				 vf->vf_id);
2189 	} else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2190 				      &vf->vf_states))
2191 		dev_info(&pf->pdev->dev,
2192 			 "VF %d successfully unset unicast promiscuous mode\n",
2193 			 vf->vf_id);
2194 
2195 err_out:
2196 	/* send the response to the VF */
2197 	return i40e_vc_send_resp_to_vf(vf,
2198 				       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2199 				       aq_ret);
2200 }
2201 
2202 /**
2203  * i40e_vc_config_queues_msg
2204  * @vf: pointer to the VF info
2205  * @msg: pointer to the msg buffer
2206  *
2207  * called from the VF to configure the rx/tx
2208  * queues
2209  **/
2210 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2211 {
2212 	struct virtchnl_vsi_queue_config_info *qci =
2213 	    (struct virtchnl_vsi_queue_config_info *)msg;
2214 	struct virtchnl_queue_pair_info *qpi;
2215 	u16 vsi_id, vsi_queue_id = 0;
2216 	struct i40e_pf *pf = vf->pf;
2217 	i40e_status aq_ret = 0;
2218 	int i, j = 0, idx = 0;
2219 	struct i40e_vsi *vsi;
2220 	u16 num_qps_all = 0;
2221 
2222 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2223 		aq_ret = I40E_ERR_PARAM;
2224 		goto error_param;
2225 	}
2226 
2227 	if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2228 		aq_ret = I40E_ERR_PARAM;
2229 		goto error_param;
2230 	}
2231 
2232 	if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2233 		aq_ret = I40E_ERR_PARAM;
2234 		goto error_param;
2235 	}
2236 
2237 	if (vf->adq_enabled) {
2238 		for (i = 0; i < I40E_MAX_VF_VSI; i++)
2239 			num_qps_all += vf->ch[i].num_qps;
2240 		if (num_qps_all != qci->num_queue_pairs) {
2241 			aq_ret = I40E_ERR_PARAM;
2242 			goto error_param;
2243 		}
2244 	}
2245 
2246 	vsi_id = qci->vsi_id;
2247 
2248 	for (i = 0; i < qci->num_queue_pairs; i++) {
2249 		qpi = &qci->qpair[i];
2250 
2251 		if (!vf->adq_enabled) {
2252 			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2253 						      qpi->txq.queue_id)) {
2254 				aq_ret = I40E_ERR_PARAM;
2255 				goto error_param;
2256 			}
2257 
2258 			vsi_queue_id = qpi->txq.queue_id;
2259 
2260 			if (qpi->txq.vsi_id != qci->vsi_id ||
2261 			    qpi->rxq.vsi_id != qci->vsi_id ||
2262 			    qpi->rxq.queue_id != vsi_queue_id) {
2263 				aq_ret = I40E_ERR_PARAM;
2264 				goto error_param;
2265 			}
2266 		}
2267 
2268 		if (vf->adq_enabled) {
2269 			if (idx >= ARRAY_SIZE(vf->ch)) {
2270 				aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2271 				goto error_param;
2272 			}
2273 			vsi_id = vf->ch[idx].vsi_id;
2274 		}
2275 
2276 		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2277 					     &qpi->rxq) ||
2278 		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2279 					     &qpi->txq)) {
2280 			aq_ret = I40E_ERR_PARAM;
2281 			goto error_param;
2282 		}
2283 
2284 		/* For ADq there can be up to 4 VSIs with max 4 queues each.
2285 		 * VF does not know about these additional VSIs and all
2286 		 * it cares is about its own queues. PF configures these queues
2287 		 * to its appropriate VSIs based on TC mapping
2288 		 */
2289 		if (vf->adq_enabled) {
2290 			if (idx >= ARRAY_SIZE(vf->ch)) {
2291 				aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2292 				goto error_param;
2293 			}
2294 			if (j == (vf->ch[idx].num_qps - 1)) {
2295 				idx++;
2296 				j = 0; /* resetting the queue count */
2297 				vsi_queue_id = 0;
2298 			} else {
2299 				j++;
2300 				vsi_queue_id++;
2301 			}
2302 		}
2303 	}
2304 	/* set vsi num_queue_pairs in use to num configured by VF */
2305 	if (!vf->adq_enabled) {
2306 		pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2307 			qci->num_queue_pairs;
2308 	} else {
2309 		for (i = 0; i < vf->num_tc; i++) {
2310 			vsi = pf->vsi[vf->ch[i].vsi_idx];
2311 			vsi->num_queue_pairs = vf->ch[i].num_qps;
2312 
2313 			if (i40e_update_adq_vsi_queues(vsi, i)) {
2314 				aq_ret = I40E_ERR_CONFIG;
2315 				goto error_param;
2316 			}
2317 		}
2318 	}
2319 
2320 error_param:
2321 	/* send the response to the VF */
2322 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2323 				       aq_ret);
2324 }
2325 
2326 /**
2327  * i40e_validate_queue_map - check queue map is valid
2328  * @vf: the VF structure pointer
2329  * @vsi_id: vsi id
2330  * @queuemap: Tx or Rx queue map
2331  *
2332  * check if Tx or Rx queue map is valid
2333  **/
2334 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2335 				   unsigned long queuemap)
2336 {
2337 	u16 vsi_queue_id, queue_id;
2338 
2339 	for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2340 		if (vf->adq_enabled) {
2341 			vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2342 			queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2343 		} else {
2344 			queue_id = vsi_queue_id;
2345 		}
2346 
2347 		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2348 			return -EINVAL;
2349 	}
2350 
2351 	return 0;
2352 }
2353 
2354 /**
2355  * i40e_vc_config_irq_map_msg
2356  * @vf: pointer to the VF info
2357  * @msg: pointer to the msg buffer
2358  *
2359  * called from the VF to configure the irq to
2360  * queue map
2361  **/
2362 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2363 {
2364 	struct virtchnl_irq_map_info *irqmap_info =
2365 	    (struct virtchnl_irq_map_info *)msg;
2366 	struct virtchnl_vector_map *map;
2367 	u16 vsi_id;
2368 	i40e_status aq_ret = 0;
2369 	int i;
2370 
2371 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2372 		aq_ret = I40E_ERR_PARAM;
2373 		goto error_param;
2374 	}
2375 
2376 	if (irqmap_info->num_vectors >
2377 	    vf->pf->hw.func_caps.num_msix_vectors_vf) {
2378 		aq_ret = I40E_ERR_PARAM;
2379 		goto error_param;
2380 	}
2381 
2382 	for (i = 0; i < irqmap_info->num_vectors; i++) {
2383 		map = &irqmap_info->vecmap[i];
2384 		/* validate msg params */
2385 		if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2386 		    !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2387 			aq_ret = I40E_ERR_PARAM;
2388 			goto error_param;
2389 		}
2390 		vsi_id = map->vsi_id;
2391 
2392 		if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2393 			aq_ret = I40E_ERR_PARAM;
2394 			goto error_param;
2395 		}
2396 
2397 		if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2398 			aq_ret = I40E_ERR_PARAM;
2399 			goto error_param;
2400 		}
2401 
2402 		i40e_config_irq_link_list(vf, vsi_id, map);
2403 	}
2404 error_param:
2405 	/* send the response to the VF */
2406 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2407 				       aq_ret);
2408 }
2409 
2410 /**
2411  * i40e_ctrl_vf_tx_rings
2412  * @vsi: the SRIOV VSI being configured
2413  * @q_map: bit map of the queues to be enabled
2414  * @enable: start or stop the queue
2415  **/
2416 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2417 				 bool enable)
2418 {
2419 	struct i40e_pf *pf = vsi->back;
2420 	int ret = 0;
2421 	u16 q_id;
2422 
2423 	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2424 		ret = i40e_control_wait_tx_q(vsi->seid, pf,
2425 					     vsi->base_queue + q_id,
2426 					     false /*is xdp*/, enable);
2427 		if (ret)
2428 			break;
2429 	}
2430 	return ret;
2431 }
2432 
2433 /**
2434  * i40e_ctrl_vf_rx_rings
2435  * @vsi: the SRIOV VSI being configured
2436  * @q_map: bit map of the queues to be enabled
2437  * @enable: start or stop the queue
2438  **/
2439 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2440 				 bool enable)
2441 {
2442 	struct i40e_pf *pf = vsi->back;
2443 	int ret = 0;
2444 	u16 q_id;
2445 
2446 	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2447 		ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2448 					     enable);
2449 		if (ret)
2450 			break;
2451 	}
2452 	return ret;
2453 }
2454 
2455 /**
2456  * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2457  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2458  *
2459  * Returns true if validation was successful, else false.
2460  */
2461 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2462 {
2463 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
2464 	    vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2465 	    vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2466 		return false;
2467 
2468 	return true;
2469 }
2470 
2471 /**
2472  * i40e_vc_enable_queues_msg
2473  * @vf: pointer to the VF info
2474  * @msg: pointer to the msg buffer
2475  *
2476  * called from the VF to enable all or specific queue(s)
2477  **/
2478 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2479 {
2480 	struct virtchnl_queue_select *vqs =
2481 	    (struct virtchnl_queue_select *)msg;
2482 	struct i40e_pf *pf = vf->pf;
2483 	i40e_status aq_ret = 0;
2484 	int i;
2485 
2486 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2487 		aq_ret = I40E_ERR_PARAM;
2488 		goto error_param;
2489 	}
2490 
2491 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2492 		aq_ret = I40E_ERR_PARAM;
2493 		goto error_param;
2494 	}
2495 
2496 	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2497 		aq_ret = I40E_ERR_PARAM;
2498 		goto error_param;
2499 	}
2500 
2501 	/* Use the queue bit map sent by the VF */
2502 	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2503 				  true)) {
2504 		aq_ret = I40E_ERR_TIMEOUT;
2505 		goto error_param;
2506 	}
2507 	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2508 				  true)) {
2509 		aq_ret = I40E_ERR_TIMEOUT;
2510 		goto error_param;
2511 	}
2512 
2513 	/* need to start the rings for additional ADq VSI's as well */
2514 	if (vf->adq_enabled) {
2515 		/* zero belongs to LAN VSI */
2516 		for (i = 1; i < vf->num_tc; i++) {
2517 			if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2518 				aq_ret = I40E_ERR_TIMEOUT;
2519 		}
2520 	}
2521 
2522 error_param:
2523 	/* send the response to the VF */
2524 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2525 				       aq_ret);
2526 }
2527 
2528 /**
2529  * i40e_vc_disable_queues_msg
2530  * @vf: pointer to the VF info
2531  * @msg: pointer to the msg buffer
2532  *
2533  * called from the VF to disable all or specific
2534  * queue(s)
2535  **/
2536 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2537 {
2538 	struct virtchnl_queue_select *vqs =
2539 	    (struct virtchnl_queue_select *)msg;
2540 	struct i40e_pf *pf = vf->pf;
2541 	i40e_status aq_ret = 0;
2542 
2543 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2544 		aq_ret = I40E_ERR_PARAM;
2545 		goto error_param;
2546 	}
2547 
2548 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2549 		aq_ret = I40E_ERR_PARAM;
2550 		goto error_param;
2551 	}
2552 
2553 	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2554 		aq_ret = I40E_ERR_PARAM;
2555 		goto error_param;
2556 	}
2557 
2558 	/* Use the queue bit map sent by the VF */
2559 	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2560 				  false)) {
2561 		aq_ret = I40E_ERR_TIMEOUT;
2562 		goto error_param;
2563 	}
2564 	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2565 				  false)) {
2566 		aq_ret = I40E_ERR_TIMEOUT;
2567 		goto error_param;
2568 	}
2569 error_param:
2570 	/* send the response to the VF */
2571 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2572 				       aq_ret);
2573 }
2574 
2575 /**
2576  * i40e_vc_request_queues_msg
2577  * @vf: pointer to the VF info
2578  * @msg: pointer to the msg buffer
2579  *
2580  * VFs get a default number of queues but can use this message to request a
2581  * different number.  If the request is successful, PF will reset the VF and
2582  * return 0.  If unsuccessful, PF will send message informing VF of number of
2583  * available queues and return result of sending VF a message.
2584  **/
2585 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2586 {
2587 	struct virtchnl_vf_res_request *vfres =
2588 		(struct virtchnl_vf_res_request *)msg;
2589 	u16 req_pairs = vfres->num_queue_pairs;
2590 	u8 cur_pairs = vf->num_queue_pairs;
2591 	struct i40e_pf *pf = vf->pf;
2592 
2593 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2594 		return -EINVAL;
2595 
2596 	if (req_pairs > I40E_MAX_VF_QUEUES) {
2597 		dev_err(&pf->pdev->dev,
2598 			"VF %d tried to request more than %d queues.\n",
2599 			vf->vf_id,
2600 			I40E_MAX_VF_QUEUES);
2601 		vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2602 	} else if (req_pairs - cur_pairs > pf->queues_left) {
2603 		dev_warn(&pf->pdev->dev,
2604 			 "VF %d requested %d more queues, but only %d left.\n",
2605 			 vf->vf_id,
2606 			 req_pairs - cur_pairs,
2607 			 pf->queues_left);
2608 		vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2609 	} else {
2610 		/* successful request */
2611 		vf->num_req_queues = req_pairs;
2612 		i40e_vc_reset_vf(vf, true);
2613 		return 0;
2614 	}
2615 
2616 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2617 				      (u8 *)vfres, sizeof(*vfres));
2618 }
2619 
2620 /**
2621  * i40e_vc_get_stats_msg
2622  * @vf: pointer to the VF info
2623  * @msg: pointer to the msg buffer
2624  *
2625  * called from the VF to get vsi stats
2626  **/
2627 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2628 {
2629 	struct virtchnl_queue_select *vqs =
2630 	    (struct virtchnl_queue_select *)msg;
2631 	struct i40e_pf *pf = vf->pf;
2632 	struct i40e_eth_stats stats;
2633 	i40e_status aq_ret = 0;
2634 	struct i40e_vsi *vsi;
2635 
2636 	memset(&stats, 0, sizeof(struct i40e_eth_stats));
2637 
2638 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2639 		aq_ret = I40E_ERR_PARAM;
2640 		goto error_param;
2641 	}
2642 
2643 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2644 		aq_ret = I40E_ERR_PARAM;
2645 		goto error_param;
2646 	}
2647 
2648 	vsi = pf->vsi[vf->lan_vsi_idx];
2649 	if (!vsi) {
2650 		aq_ret = I40E_ERR_PARAM;
2651 		goto error_param;
2652 	}
2653 	i40e_update_eth_stats(vsi);
2654 	stats = vsi->eth_stats;
2655 
2656 error_param:
2657 	/* send the response back to the VF */
2658 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2659 				      (u8 *)&stats, sizeof(stats));
2660 }
2661 
2662 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2663  * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2664  */
2665 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2666 #define I40E_VC_MAX_VLAN_PER_VF 16
2667 
2668 /**
2669  * i40e_check_vf_permission
2670  * @vf: pointer to the VF info
2671  * @al: MAC address list from virtchnl
2672  *
2673  * Check that the given list of MAC addresses is allowed. Will return -EPERM
2674  * if any address in the list is not valid. Checks the following conditions:
2675  *
2676  * 1) broadcast and zero addresses are never valid
2677  * 2) unicast addresses are not allowed if the VMM has administratively set
2678  *    the VF MAC address, unless the VF is marked as privileged.
2679  * 3) There is enough space to add all the addresses.
2680  *
2681  * Note that to guarantee consistency, it is expected this function be called
2682  * while holding the mac_filter_hash_lock, as otherwise the current number of
2683  * addresses might not be accurate.
2684  **/
2685 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2686 					   struct virtchnl_ether_addr_list *al)
2687 {
2688 	struct i40e_pf *pf = vf->pf;
2689 	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2690 	int mac2add_cnt = 0;
2691 	int i;
2692 
2693 	for (i = 0; i < al->num_elements; i++) {
2694 		struct i40e_mac_filter *f;
2695 		u8 *addr = al->list[i].addr;
2696 
2697 		if (is_broadcast_ether_addr(addr) ||
2698 		    is_zero_ether_addr(addr)) {
2699 			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2700 				addr);
2701 			return I40E_ERR_INVALID_MAC_ADDR;
2702 		}
2703 
2704 		/* If the host VMM administrator has set the VF MAC address
2705 		 * administratively via the ndo_set_vf_mac command then deny
2706 		 * permission to the VF to add or delete unicast MAC addresses.
2707 		 * Unless the VF is privileged and then it can do whatever.
2708 		 * The VF may request to set the MAC address filter already
2709 		 * assigned to it so do not return an error in that case.
2710 		 */
2711 		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2712 		    !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2713 		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2714 			dev_err(&pf->pdev->dev,
2715 				"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2716 			return -EPERM;
2717 		}
2718 
2719 		/*count filters that really will be added*/
2720 		f = i40e_find_mac(vsi, addr);
2721 		if (!f)
2722 			++mac2add_cnt;
2723 	}
2724 
2725 	/* If this VF is not privileged, then we can't add more than a limited
2726 	 * number of addresses. Check to make sure that the additions do not
2727 	 * push us over the limit.
2728 	 */
2729 	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2730 	    (i40e_count_filters(vsi) + mac2add_cnt) >
2731 		    I40E_VC_MAX_MAC_ADDR_PER_VF) {
2732 		dev_err(&pf->pdev->dev,
2733 			"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2734 		return -EPERM;
2735 	}
2736 	return 0;
2737 }
2738 
2739 /**
2740  * i40e_vc_add_mac_addr_msg
2741  * @vf: pointer to the VF info
2742  * @msg: pointer to the msg buffer
2743  *
2744  * add guest mac address filter
2745  **/
2746 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2747 {
2748 	struct virtchnl_ether_addr_list *al =
2749 	    (struct virtchnl_ether_addr_list *)msg;
2750 	struct i40e_pf *pf = vf->pf;
2751 	struct i40e_vsi *vsi = NULL;
2752 	i40e_status ret = 0;
2753 	int i;
2754 
2755 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2756 	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2757 		ret = I40E_ERR_PARAM;
2758 		goto error_param;
2759 	}
2760 
2761 	vsi = pf->vsi[vf->lan_vsi_idx];
2762 
2763 	/* Lock once, because all function inside for loop accesses VSI's
2764 	 * MAC filter list which needs to be protected using same lock.
2765 	 */
2766 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2767 
2768 	ret = i40e_check_vf_permission(vf, al);
2769 	if (ret) {
2770 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
2771 		goto error_param;
2772 	}
2773 
2774 	/* add new addresses to the list */
2775 	for (i = 0; i < al->num_elements; i++) {
2776 		struct i40e_mac_filter *f;
2777 
2778 		f = i40e_find_mac(vsi, al->list[i].addr);
2779 		if (!f) {
2780 			f = i40e_add_mac_filter(vsi, al->list[i].addr);
2781 
2782 			if (!f) {
2783 				dev_err(&pf->pdev->dev,
2784 					"Unable to add MAC filter %pM for VF %d\n",
2785 					al->list[i].addr, vf->vf_id);
2786 				ret = I40E_ERR_PARAM;
2787 				spin_unlock_bh(&vsi->mac_filter_hash_lock);
2788 				goto error_param;
2789 			}
2790 			if (is_valid_ether_addr(al->list[i].addr) &&
2791 			    is_zero_ether_addr(vf->default_lan_addr.addr))
2792 				ether_addr_copy(vf->default_lan_addr.addr,
2793 						al->list[i].addr);
2794 		}
2795 	}
2796 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2797 
2798 	/* program the updated filter list */
2799 	ret = i40e_sync_vsi_filters(vsi);
2800 	if (ret)
2801 		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2802 			vf->vf_id, ret);
2803 
2804 error_param:
2805 	/* send the response to the VF */
2806 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2807 				       ret);
2808 }
2809 
2810 /**
2811  * i40e_vc_del_mac_addr_msg
2812  * @vf: pointer to the VF info
2813  * @msg: pointer to the msg buffer
2814  *
2815  * remove guest mac address filter
2816  **/
2817 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2818 {
2819 	struct virtchnl_ether_addr_list *al =
2820 	    (struct virtchnl_ether_addr_list *)msg;
2821 	bool was_unimac_deleted = false;
2822 	struct i40e_pf *pf = vf->pf;
2823 	struct i40e_vsi *vsi = NULL;
2824 	i40e_status ret = 0;
2825 	int i;
2826 
2827 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2828 	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2829 		ret = I40E_ERR_PARAM;
2830 		goto error_param;
2831 	}
2832 
2833 	for (i = 0; i < al->num_elements; i++) {
2834 		if (is_broadcast_ether_addr(al->list[i].addr) ||
2835 		    is_zero_ether_addr(al->list[i].addr)) {
2836 			dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2837 				al->list[i].addr, vf->vf_id);
2838 			ret = I40E_ERR_INVALID_MAC_ADDR;
2839 			goto error_param;
2840 		}
2841 		if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
2842 			was_unimac_deleted = true;
2843 	}
2844 	vsi = pf->vsi[vf->lan_vsi_idx];
2845 
2846 	spin_lock_bh(&vsi->mac_filter_hash_lock);
2847 	/* delete addresses from the list */
2848 	for (i = 0; i < al->num_elements; i++)
2849 		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2850 			ret = I40E_ERR_INVALID_MAC_ADDR;
2851 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
2852 			goto error_param;
2853 		}
2854 
2855 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
2856 
2857 	/* program the updated filter list */
2858 	ret = i40e_sync_vsi_filters(vsi);
2859 	if (ret)
2860 		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2861 			vf->vf_id, ret);
2862 
2863 	if (vf->trusted && was_unimac_deleted) {
2864 		struct i40e_mac_filter *f;
2865 		struct hlist_node *h;
2866 		u8 *macaddr = NULL;
2867 		int bkt;
2868 
2869 		/* set last unicast mac address as default */
2870 		spin_lock_bh(&vsi->mac_filter_hash_lock);
2871 		hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2872 			if (is_valid_ether_addr(f->macaddr))
2873 				macaddr = f->macaddr;
2874 		}
2875 		if (macaddr)
2876 			ether_addr_copy(vf->default_lan_addr.addr, macaddr);
2877 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
2878 	}
2879 error_param:
2880 	/* send the response to the VF */
2881 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
2882 }
2883 
2884 /**
2885  * i40e_vc_add_vlan_msg
2886  * @vf: pointer to the VF info
2887  * @msg: pointer to the msg buffer
2888  *
2889  * program guest vlan id
2890  **/
2891 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
2892 {
2893 	struct virtchnl_vlan_filter_list *vfl =
2894 	    (struct virtchnl_vlan_filter_list *)msg;
2895 	struct i40e_pf *pf = vf->pf;
2896 	struct i40e_vsi *vsi = NULL;
2897 	i40e_status aq_ret = 0;
2898 	int i;
2899 
2900 	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2901 	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2902 		dev_err(&pf->pdev->dev,
2903 			"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2904 		goto error_param;
2905 	}
2906 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2907 	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2908 		aq_ret = I40E_ERR_PARAM;
2909 		goto error_param;
2910 	}
2911 
2912 	for (i = 0; i < vfl->num_elements; i++) {
2913 		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2914 			aq_ret = I40E_ERR_PARAM;
2915 			dev_err(&pf->pdev->dev,
2916 				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2917 			goto error_param;
2918 		}
2919 	}
2920 	vsi = pf->vsi[vf->lan_vsi_idx];
2921 	if (vsi->info.pvid) {
2922 		aq_ret = I40E_ERR_PARAM;
2923 		goto error_param;
2924 	}
2925 
2926 	i40e_vlan_stripping_enable(vsi);
2927 	for (i = 0; i < vfl->num_elements; i++) {
2928 		/* add new VLAN filter */
2929 		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2930 		if (!ret)
2931 			vf->num_vlan++;
2932 
2933 		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2934 			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2935 							   true,
2936 							   vfl->vlan_id[i],
2937 							   NULL);
2938 		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2939 			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2940 							   true,
2941 							   vfl->vlan_id[i],
2942 							   NULL);
2943 
2944 		if (ret)
2945 			dev_err(&pf->pdev->dev,
2946 				"Unable to add VLAN filter %d for VF %d, error %d\n",
2947 				vfl->vlan_id[i], vf->vf_id, ret);
2948 	}
2949 
2950 error_param:
2951 	/* send the response to the VF */
2952 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2953 }
2954 
2955 /**
2956  * i40e_vc_remove_vlan_msg
2957  * @vf: pointer to the VF info
2958  * @msg: pointer to the msg buffer
2959  *
2960  * remove programmed guest vlan id
2961  **/
2962 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
2963 {
2964 	struct virtchnl_vlan_filter_list *vfl =
2965 	    (struct virtchnl_vlan_filter_list *)msg;
2966 	struct i40e_pf *pf = vf->pf;
2967 	struct i40e_vsi *vsi = NULL;
2968 	i40e_status aq_ret = 0;
2969 	int i;
2970 
2971 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2972 	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2973 		aq_ret = I40E_ERR_PARAM;
2974 		goto error_param;
2975 	}
2976 
2977 	for (i = 0; i < vfl->num_elements; i++) {
2978 		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2979 			aq_ret = I40E_ERR_PARAM;
2980 			goto error_param;
2981 		}
2982 	}
2983 
2984 	vsi = pf->vsi[vf->lan_vsi_idx];
2985 	if (vsi->info.pvid) {
2986 		if (vfl->num_elements > 1 || vfl->vlan_id[0])
2987 			aq_ret = I40E_ERR_PARAM;
2988 		goto error_param;
2989 	}
2990 
2991 	for (i = 0; i < vfl->num_elements; i++) {
2992 		i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2993 		vf->num_vlan--;
2994 
2995 		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2996 			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2997 							   false,
2998 							   vfl->vlan_id[i],
2999 							   NULL);
3000 		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3001 			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3002 							   false,
3003 							   vfl->vlan_id[i],
3004 							   NULL);
3005 	}
3006 
3007 error_param:
3008 	/* send the response to the VF */
3009 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
3010 }
3011 
3012 /**
3013  * i40e_vc_iwarp_msg
3014  * @vf: pointer to the VF info
3015  * @msg: pointer to the msg buffer
3016  * @msglen: msg length
3017  *
3018  * called from the VF for the iwarp msgs
3019  **/
3020 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3021 {
3022 	struct i40e_pf *pf = vf->pf;
3023 	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3024 	i40e_status aq_ret = 0;
3025 
3026 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3027 	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
3028 		aq_ret = I40E_ERR_PARAM;
3029 		goto error_param;
3030 	}
3031 
3032 	i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3033 				     msg, msglen);
3034 
3035 error_param:
3036 	/* send the response to the VF */
3037 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
3038 				       aq_ret);
3039 }
3040 
3041 /**
3042  * i40e_vc_iwarp_qvmap_msg
3043  * @vf: pointer to the VF info
3044  * @msg: pointer to the msg buffer
3045  * @config: config qvmap or release it
3046  *
3047  * called from the VF for the iwarp msgs
3048  **/
3049 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
3050 {
3051 	struct virtchnl_iwarp_qvlist_info *qvlist_info =
3052 				(struct virtchnl_iwarp_qvlist_info *)msg;
3053 	i40e_status aq_ret = 0;
3054 
3055 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3056 	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
3057 		aq_ret = I40E_ERR_PARAM;
3058 		goto error_param;
3059 	}
3060 
3061 	if (config) {
3062 		if (i40e_config_iwarp_qvlist(vf, qvlist_info))
3063 			aq_ret = I40E_ERR_PARAM;
3064 	} else {
3065 		i40e_release_iwarp_qvlist(vf);
3066 	}
3067 
3068 error_param:
3069 	/* send the response to the VF */
3070 	return i40e_vc_send_resp_to_vf(vf,
3071 			       config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
3072 			       VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
3073 			       aq_ret);
3074 }
3075 
3076 /**
3077  * i40e_vc_config_rss_key
3078  * @vf: pointer to the VF info
3079  * @msg: pointer to the msg buffer
3080  *
3081  * Configure the VF's RSS key
3082  **/
3083 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3084 {
3085 	struct virtchnl_rss_key *vrk =
3086 		(struct virtchnl_rss_key *)msg;
3087 	struct i40e_pf *pf = vf->pf;
3088 	struct i40e_vsi *vsi = NULL;
3089 	i40e_status aq_ret = 0;
3090 
3091 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3092 	    !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3093 	    (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
3094 		aq_ret = I40E_ERR_PARAM;
3095 		goto err;
3096 	}
3097 
3098 	vsi = pf->vsi[vf->lan_vsi_idx];
3099 	aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3100 err:
3101 	/* send the response to the VF */
3102 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3103 				       aq_ret);
3104 }
3105 
3106 /**
3107  * i40e_vc_config_rss_lut
3108  * @vf: pointer to the VF info
3109  * @msg: pointer to the msg buffer
3110  *
3111  * Configure the VF's RSS LUT
3112  **/
3113 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3114 {
3115 	struct virtchnl_rss_lut *vrl =
3116 		(struct virtchnl_rss_lut *)msg;
3117 	struct i40e_pf *pf = vf->pf;
3118 	struct i40e_vsi *vsi = NULL;
3119 	i40e_status aq_ret = 0;
3120 	u16 i;
3121 
3122 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3123 	    !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3124 	    (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
3125 		aq_ret = I40E_ERR_PARAM;
3126 		goto err;
3127 	}
3128 
3129 	for (i = 0; i < vrl->lut_entries; i++)
3130 		if (vrl->lut[i] >= vf->num_queue_pairs) {
3131 			aq_ret = I40E_ERR_PARAM;
3132 			goto err;
3133 		}
3134 
3135 	vsi = pf->vsi[vf->lan_vsi_idx];
3136 	aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3137 	/* send the response to the VF */
3138 err:
3139 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3140 				       aq_ret);
3141 }
3142 
3143 /**
3144  * i40e_vc_get_rss_hena
3145  * @vf: pointer to the VF info
3146  * @msg: pointer to the msg buffer
3147  *
3148  * Return the RSS HENA bits allowed by the hardware
3149  **/
3150 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3151 {
3152 	struct virtchnl_rss_hena *vrh = NULL;
3153 	struct i40e_pf *pf = vf->pf;
3154 	i40e_status aq_ret = 0;
3155 	int len = 0;
3156 
3157 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3158 		aq_ret = I40E_ERR_PARAM;
3159 		goto err;
3160 	}
3161 	len = sizeof(struct virtchnl_rss_hena);
3162 
3163 	vrh = kzalloc(len, GFP_KERNEL);
3164 	if (!vrh) {
3165 		aq_ret = I40E_ERR_NO_MEMORY;
3166 		len = 0;
3167 		goto err;
3168 	}
3169 	vrh->hena = i40e_pf_get_default_rss_hena(pf);
3170 err:
3171 	/* send the response back to the VF */
3172 	aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3173 					aq_ret, (u8 *)vrh, len);
3174 	kfree(vrh);
3175 	return aq_ret;
3176 }
3177 
3178 /**
3179  * i40e_vc_set_rss_hena
3180  * @vf: pointer to the VF info
3181  * @msg: pointer to the msg buffer
3182  *
3183  * Set the RSS HENA bits for the VF
3184  **/
3185 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3186 {
3187 	struct virtchnl_rss_hena *vrh =
3188 		(struct virtchnl_rss_hena *)msg;
3189 	struct i40e_pf *pf = vf->pf;
3190 	struct i40e_hw *hw = &pf->hw;
3191 	i40e_status aq_ret = 0;
3192 
3193 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3194 		aq_ret = I40E_ERR_PARAM;
3195 		goto err;
3196 	}
3197 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3198 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3199 			  (u32)(vrh->hena >> 32));
3200 
3201 	/* send the response to the VF */
3202 err:
3203 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3204 }
3205 
3206 /**
3207  * i40e_vc_enable_vlan_stripping
3208  * @vf: pointer to the VF info
3209  * @msg: pointer to the msg buffer
3210  *
3211  * Enable vlan header stripping for the VF
3212  **/
3213 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3214 {
3215 	i40e_status aq_ret = 0;
3216 	struct i40e_vsi *vsi;
3217 
3218 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3219 		aq_ret = I40E_ERR_PARAM;
3220 		goto err;
3221 	}
3222 
3223 	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3224 	i40e_vlan_stripping_enable(vsi);
3225 
3226 	/* send the response to the VF */
3227 err:
3228 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3229 				       aq_ret);
3230 }
3231 
3232 /**
3233  * i40e_vc_disable_vlan_stripping
3234  * @vf: pointer to the VF info
3235  * @msg: pointer to the msg buffer
3236  *
3237  * Disable vlan header stripping for the VF
3238  **/
3239 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3240 {
3241 	i40e_status aq_ret = 0;
3242 	struct i40e_vsi *vsi;
3243 
3244 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3245 		aq_ret = I40E_ERR_PARAM;
3246 		goto err;
3247 	}
3248 
3249 	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3250 	i40e_vlan_stripping_disable(vsi);
3251 
3252 	/* send the response to the VF */
3253 err:
3254 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3255 				       aq_ret);
3256 }
3257 
3258 /**
3259  * i40e_validate_cloud_filter
3260  * @vf: pointer to VF structure
3261  * @tc_filter: pointer to filter requested
3262  *
3263  * This function validates cloud filter programmed as TC filter for ADq
3264  **/
3265 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3266 				      struct virtchnl_filter *tc_filter)
3267 {
3268 	struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3269 	struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3270 	struct i40e_pf *pf = vf->pf;
3271 	struct i40e_vsi *vsi = NULL;
3272 	struct i40e_mac_filter *f;
3273 	struct hlist_node *h;
3274 	bool found = false;
3275 	int bkt;
3276 
3277 	if (!tc_filter->action) {
3278 		dev_info(&pf->pdev->dev,
3279 			 "VF %d: Currently ADq doesn't support Drop Action\n",
3280 			 vf->vf_id);
3281 		goto err;
3282 	}
3283 
3284 	/* action_meta is TC number here to which the filter is applied */
3285 	if (!tc_filter->action_meta ||
3286 	    tc_filter->action_meta > I40E_MAX_VF_VSI) {
3287 		dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3288 			 vf->vf_id, tc_filter->action_meta);
3289 		goto err;
3290 	}
3291 
3292 	/* Check filter if it's programmed for advanced mode or basic mode.
3293 	 * There are two ADq modes (for VF only),
3294 	 * 1. Basic mode: intended to allow as many filter options as possible
3295 	 *		  to be added to a VF in Non-trusted mode. Main goal is
3296 	 *		  to add filters to its own MAC and VLAN id.
3297 	 * 2. Advanced mode: is for allowing filters to be applied other than
3298 	 *		  its own MAC or VLAN. This mode requires the VF to be
3299 	 *		  Trusted.
3300 	 */
3301 	if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3302 		vsi = pf->vsi[vf->lan_vsi_idx];
3303 		f = i40e_find_mac(vsi, data.dst_mac);
3304 
3305 		if (!f) {
3306 			dev_info(&pf->pdev->dev,
3307 				 "Destination MAC %pM doesn't belong to VF %d\n",
3308 				 data.dst_mac, vf->vf_id);
3309 			goto err;
3310 		}
3311 
3312 		if (mask.vlan_id) {
3313 			hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3314 					   hlist) {
3315 				if (f->vlan == ntohs(data.vlan_id)) {
3316 					found = true;
3317 					break;
3318 				}
3319 			}
3320 			if (!found) {
3321 				dev_info(&pf->pdev->dev,
3322 					 "VF %d doesn't have any VLAN id %u\n",
3323 					 vf->vf_id, ntohs(data.vlan_id));
3324 				goto err;
3325 			}
3326 		}
3327 	} else {
3328 		/* Check if VF is trusted */
3329 		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3330 			dev_err(&pf->pdev->dev,
3331 				"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3332 				vf->vf_id);
3333 			return I40E_ERR_CONFIG;
3334 		}
3335 	}
3336 
3337 	if (mask.dst_mac[0] & data.dst_mac[0]) {
3338 		if (is_broadcast_ether_addr(data.dst_mac) ||
3339 		    is_zero_ether_addr(data.dst_mac)) {
3340 			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3341 				 vf->vf_id, data.dst_mac);
3342 			goto err;
3343 		}
3344 	}
3345 
3346 	if (mask.src_mac[0] & data.src_mac[0]) {
3347 		if (is_broadcast_ether_addr(data.src_mac) ||
3348 		    is_zero_ether_addr(data.src_mac)) {
3349 			dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3350 				 vf->vf_id, data.src_mac);
3351 			goto err;
3352 		}
3353 	}
3354 
3355 	if (mask.dst_port & data.dst_port) {
3356 		if (!data.dst_port) {
3357 			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3358 				 vf->vf_id);
3359 			goto err;
3360 		}
3361 	}
3362 
3363 	if (mask.src_port & data.src_port) {
3364 		if (!data.src_port) {
3365 			dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3366 				 vf->vf_id);
3367 			goto err;
3368 		}
3369 	}
3370 
3371 	if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3372 	    tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3373 		dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3374 			 vf->vf_id);
3375 		goto err;
3376 	}
3377 
3378 	if (mask.vlan_id & data.vlan_id) {
3379 		if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3380 			dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3381 				 vf->vf_id);
3382 			goto err;
3383 		}
3384 	}
3385 
3386 	return I40E_SUCCESS;
3387 err:
3388 	return I40E_ERR_CONFIG;
3389 }
3390 
3391 /**
3392  * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3393  * @vf: pointer to the VF info
3394  * @seid: seid of the vsi it is searching for
3395  **/
3396 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3397 {
3398 	struct i40e_pf *pf = vf->pf;
3399 	struct i40e_vsi *vsi = NULL;
3400 	int i;
3401 
3402 	for (i = 0; i < vf->num_tc ; i++) {
3403 		vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3404 		if (vsi && vsi->seid == seid)
3405 			return vsi;
3406 	}
3407 	return NULL;
3408 }
3409 
3410 /**
3411  * i40e_del_all_cloud_filters
3412  * @vf: pointer to the VF info
3413  *
3414  * This function deletes all cloud filters
3415  **/
3416 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3417 {
3418 	struct i40e_cloud_filter *cfilter = NULL;
3419 	struct i40e_pf *pf = vf->pf;
3420 	struct i40e_vsi *vsi = NULL;
3421 	struct hlist_node *node;
3422 	int ret;
3423 
3424 	hlist_for_each_entry_safe(cfilter, node,
3425 				  &vf->cloud_filter_list, cloud_node) {
3426 		vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3427 
3428 		if (!vsi) {
3429 			dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3430 				vf->vf_id, cfilter->seid);
3431 			continue;
3432 		}
3433 
3434 		if (cfilter->dst_port)
3435 			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3436 								false);
3437 		else
3438 			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3439 		if (ret)
3440 			dev_err(&pf->pdev->dev,
3441 				"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3442 				vf->vf_id, i40e_stat_str(&pf->hw, ret),
3443 				i40e_aq_str(&pf->hw,
3444 					    pf->hw.aq.asq_last_status));
3445 
3446 		hlist_del(&cfilter->cloud_node);
3447 		kfree(cfilter);
3448 		vf->num_cloud_filters--;
3449 	}
3450 }
3451 
3452 /**
3453  * i40e_vc_del_cloud_filter
3454  * @vf: pointer to the VF info
3455  * @msg: pointer to the msg buffer
3456  *
3457  * This function deletes a cloud filter programmed as TC filter for ADq
3458  **/
3459 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3460 {
3461 	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3462 	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3463 	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3464 	struct i40e_cloud_filter cfilter, *cf = NULL;
3465 	struct i40e_pf *pf = vf->pf;
3466 	struct i40e_vsi *vsi = NULL;
3467 	struct hlist_node *node;
3468 	i40e_status aq_ret = 0;
3469 	int i, ret;
3470 
3471 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3472 		aq_ret = I40E_ERR_PARAM;
3473 		goto err;
3474 	}
3475 
3476 	if (!vf->adq_enabled) {
3477 		dev_info(&pf->pdev->dev,
3478 			 "VF %d: ADq not enabled, can't apply cloud filter\n",
3479 			 vf->vf_id);
3480 		aq_ret = I40E_ERR_PARAM;
3481 		goto err;
3482 	}
3483 
3484 	if (i40e_validate_cloud_filter(vf, vcf)) {
3485 		dev_info(&pf->pdev->dev,
3486 			 "VF %d: Invalid input, can't apply cloud filter\n",
3487 			 vf->vf_id);
3488 		aq_ret = I40E_ERR_PARAM;
3489 		goto err;
3490 	}
3491 
3492 	memset(&cfilter, 0, sizeof(cfilter));
3493 	/* parse destination mac address */
3494 	for (i = 0; i < ETH_ALEN; i++)
3495 		cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3496 
3497 	/* parse source mac address */
3498 	for (i = 0; i < ETH_ALEN; i++)
3499 		cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3500 
3501 	cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3502 	cfilter.dst_port = mask.dst_port & tcf.dst_port;
3503 	cfilter.src_port = mask.src_port & tcf.src_port;
3504 
3505 	switch (vcf->flow_type) {
3506 	case VIRTCHNL_TCP_V4_FLOW:
3507 		cfilter.n_proto = ETH_P_IP;
3508 		if (mask.dst_ip[0] & tcf.dst_ip[0])
3509 			memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3510 			       ARRAY_SIZE(tcf.dst_ip));
3511 		else if (mask.src_ip[0] & tcf.dst_ip[0])
3512 			memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3513 			       ARRAY_SIZE(tcf.dst_ip));
3514 		break;
3515 	case VIRTCHNL_TCP_V6_FLOW:
3516 		cfilter.n_proto = ETH_P_IPV6;
3517 		if (mask.dst_ip[3] & tcf.dst_ip[3])
3518 			memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3519 			       sizeof(cfilter.ip.v6.dst_ip6));
3520 		if (mask.src_ip[3] & tcf.src_ip[3])
3521 			memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3522 			       sizeof(cfilter.ip.v6.src_ip6));
3523 		break;
3524 	default:
3525 		/* TC filter can be configured based on different combinations
3526 		 * and in this case IP is not a part of filter config
3527 		 */
3528 		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3529 			 vf->vf_id);
3530 	}
3531 
3532 	/* get the vsi to which the tc belongs to */
3533 	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3534 	cfilter.seid = vsi->seid;
3535 	cfilter.flags = vcf->field_flags;
3536 
3537 	/* Deleting TC filter */
3538 	if (tcf.dst_port)
3539 		ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3540 	else
3541 		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3542 	if (ret) {
3543 		dev_err(&pf->pdev->dev,
3544 			"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3545 			vf->vf_id, i40e_stat_str(&pf->hw, ret),
3546 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3547 		goto err;
3548 	}
3549 
3550 	hlist_for_each_entry_safe(cf, node,
3551 				  &vf->cloud_filter_list, cloud_node) {
3552 		if (cf->seid != cfilter.seid)
3553 			continue;
3554 		if (mask.dst_port)
3555 			if (cfilter.dst_port != cf->dst_port)
3556 				continue;
3557 		if (mask.dst_mac[0])
3558 			if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3559 				continue;
3560 		/* for ipv4 data to be valid, only first byte of mask is set */
3561 		if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3562 			if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3563 				   ARRAY_SIZE(tcf.dst_ip)))
3564 				continue;
3565 		/* for ipv6, mask is set for all sixteen bytes (4 words) */
3566 		if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3567 			if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3568 				   sizeof(cfilter.ip.v6.src_ip6)))
3569 				continue;
3570 		if (mask.vlan_id)
3571 			if (cfilter.vlan_id != cf->vlan_id)
3572 				continue;
3573 
3574 		hlist_del(&cf->cloud_node);
3575 		kfree(cf);
3576 		vf->num_cloud_filters--;
3577 	}
3578 
3579 err:
3580 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3581 				       aq_ret);
3582 }
3583 
3584 /**
3585  * i40e_vc_add_cloud_filter
3586  * @vf: pointer to the VF info
3587  * @msg: pointer to the msg buffer
3588  *
3589  * This function adds a cloud filter programmed as TC filter for ADq
3590  **/
3591 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3592 {
3593 	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3594 	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3595 	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3596 	struct i40e_cloud_filter *cfilter = NULL;
3597 	struct i40e_pf *pf = vf->pf;
3598 	struct i40e_vsi *vsi = NULL;
3599 	i40e_status aq_ret = 0;
3600 	int i, ret;
3601 
3602 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3603 		aq_ret = I40E_ERR_PARAM;
3604 		goto err_out;
3605 	}
3606 
3607 	if (!vf->adq_enabled) {
3608 		dev_info(&pf->pdev->dev,
3609 			 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3610 			 vf->vf_id);
3611 		aq_ret = I40E_ERR_PARAM;
3612 		goto err_out;
3613 	}
3614 
3615 	if (i40e_validate_cloud_filter(vf, vcf)) {
3616 		dev_info(&pf->pdev->dev,
3617 			 "VF %d: Invalid input/s, can't apply cloud filter\n",
3618 			 vf->vf_id);
3619 		aq_ret = I40E_ERR_PARAM;
3620 		goto err_out;
3621 	}
3622 
3623 	cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3624 	if (!cfilter)
3625 		return -ENOMEM;
3626 
3627 	/* parse destination mac address */
3628 	for (i = 0; i < ETH_ALEN; i++)
3629 		cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3630 
3631 	/* parse source mac address */
3632 	for (i = 0; i < ETH_ALEN; i++)
3633 		cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3634 
3635 	cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3636 	cfilter->dst_port = mask.dst_port & tcf.dst_port;
3637 	cfilter->src_port = mask.src_port & tcf.src_port;
3638 
3639 	switch (vcf->flow_type) {
3640 	case VIRTCHNL_TCP_V4_FLOW:
3641 		cfilter->n_proto = ETH_P_IP;
3642 		if (mask.dst_ip[0] & tcf.dst_ip[0])
3643 			memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3644 			       ARRAY_SIZE(tcf.dst_ip));
3645 		else if (mask.src_ip[0] & tcf.dst_ip[0])
3646 			memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3647 			       ARRAY_SIZE(tcf.dst_ip));
3648 		break;
3649 	case VIRTCHNL_TCP_V6_FLOW:
3650 		cfilter->n_proto = ETH_P_IPV6;
3651 		if (mask.dst_ip[3] & tcf.dst_ip[3])
3652 			memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3653 			       sizeof(cfilter->ip.v6.dst_ip6));
3654 		if (mask.src_ip[3] & tcf.src_ip[3])
3655 			memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3656 			       sizeof(cfilter->ip.v6.src_ip6));
3657 		break;
3658 	default:
3659 		/* TC filter can be configured based on different combinations
3660 		 * and in this case IP is not a part of filter config
3661 		 */
3662 		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3663 			 vf->vf_id);
3664 	}
3665 
3666 	/* get the VSI to which the TC belongs to */
3667 	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3668 	cfilter->seid = vsi->seid;
3669 	cfilter->flags = vcf->field_flags;
3670 
3671 	/* Adding cloud filter programmed as TC filter */
3672 	if (tcf.dst_port)
3673 		ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3674 	else
3675 		ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3676 	if (ret) {
3677 		dev_err(&pf->pdev->dev,
3678 			"VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3679 			vf->vf_id, i40e_stat_str(&pf->hw, ret),
3680 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3681 		goto err_free;
3682 	}
3683 
3684 	INIT_HLIST_NODE(&cfilter->cloud_node);
3685 	hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3686 	/* release the pointer passing it to the collection */
3687 	cfilter = NULL;
3688 	vf->num_cloud_filters++;
3689 err_free:
3690 	kfree(cfilter);
3691 err_out:
3692 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3693 				       aq_ret);
3694 }
3695 
3696 /**
3697  * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3698  * @vf: pointer to the VF info
3699  * @msg: pointer to the msg buffer
3700  **/
3701 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3702 {
3703 	struct virtchnl_tc_info *tci =
3704 		(struct virtchnl_tc_info *)msg;
3705 	struct i40e_pf *pf = vf->pf;
3706 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
3707 	int i, adq_request_qps = 0;
3708 	i40e_status aq_ret = 0;
3709 	u64 speed = 0;
3710 
3711 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3712 		aq_ret = I40E_ERR_PARAM;
3713 		goto err;
3714 	}
3715 
3716 	/* ADq cannot be applied if spoof check is ON */
3717 	if (vf->spoofchk) {
3718 		dev_err(&pf->pdev->dev,
3719 			"Spoof check is ON, turn it OFF to enable ADq\n");
3720 		aq_ret = I40E_ERR_PARAM;
3721 		goto err;
3722 	}
3723 
3724 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3725 		dev_err(&pf->pdev->dev,
3726 			"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3727 			vf->vf_id);
3728 		aq_ret = I40E_ERR_PARAM;
3729 		goto err;
3730 	}
3731 
3732 	/* max number of traffic classes for VF currently capped at 4 */
3733 	if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3734 		dev_err(&pf->pdev->dev,
3735 			"VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3736 			vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3737 		aq_ret = I40E_ERR_PARAM;
3738 		goto err;
3739 	}
3740 
3741 	/* validate queues for each TC */
3742 	for (i = 0; i < tci->num_tc; i++)
3743 		if (!tci->list[i].count ||
3744 		    tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3745 			dev_err(&pf->pdev->dev,
3746 				"VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3747 				vf->vf_id, i, tci->list[i].count,
3748 				I40E_DEFAULT_QUEUES_PER_VF);
3749 			aq_ret = I40E_ERR_PARAM;
3750 			goto err;
3751 		}
3752 
3753 	/* need Max VF queues but already have default number of queues */
3754 	adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3755 
3756 	if (pf->queues_left < adq_request_qps) {
3757 		dev_err(&pf->pdev->dev,
3758 			"No queues left to allocate to VF %d\n",
3759 			vf->vf_id);
3760 		aq_ret = I40E_ERR_PARAM;
3761 		goto err;
3762 	} else {
3763 		/* we need to allocate max VF queues to enable ADq so as to
3764 		 * make sure ADq enabled VF always gets back queues when it
3765 		 * goes through a reset.
3766 		 */
3767 		vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3768 	}
3769 
3770 	/* get link speed in MB to validate rate limit */
3771 	speed = i40e_vc_link_speed2mbps(ls->link_speed);
3772 	if (speed == SPEED_UNKNOWN) {
3773 		dev_err(&pf->pdev->dev,
3774 			"Cannot detect link speed\n");
3775 		aq_ret = I40E_ERR_PARAM;
3776 		goto err;
3777 	}
3778 
3779 	/* parse data from the queue channel info */
3780 	vf->num_tc = tci->num_tc;
3781 	for (i = 0; i < vf->num_tc; i++) {
3782 		if (tci->list[i].max_tx_rate) {
3783 			if (tci->list[i].max_tx_rate > speed) {
3784 				dev_err(&pf->pdev->dev,
3785 					"Invalid max tx rate %llu specified for VF %d.",
3786 					tci->list[i].max_tx_rate,
3787 					vf->vf_id);
3788 				aq_ret = I40E_ERR_PARAM;
3789 				goto err;
3790 			} else {
3791 				vf->ch[i].max_tx_rate =
3792 					tci->list[i].max_tx_rate;
3793 			}
3794 		}
3795 		vf->ch[i].num_qps = tci->list[i].count;
3796 	}
3797 
3798 	/* set this flag only after making sure all inputs are sane */
3799 	vf->adq_enabled = true;
3800 	/* num_req_queues is set when user changes number of queues via ethtool
3801 	 * and this causes issue for default VSI(which depends on this variable)
3802 	 * when ADq is enabled, hence reset it.
3803 	 */
3804 	vf->num_req_queues = 0;
3805 
3806 	/* reset the VF in order to allocate resources */
3807 	i40e_vc_reset_vf(vf, true);
3808 
3809 	return I40E_SUCCESS;
3810 
3811 	/* send the response to the VF */
3812 err:
3813 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3814 				       aq_ret);
3815 }
3816 
3817 /**
3818  * i40e_vc_del_qch_msg
3819  * @vf: pointer to the VF info
3820  * @msg: pointer to the msg buffer
3821  **/
3822 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3823 {
3824 	struct i40e_pf *pf = vf->pf;
3825 	i40e_status aq_ret = 0;
3826 
3827 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3828 		aq_ret = I40E_ERR_PARAM;
3829 		goto err;
3830 	}
3831 
3832 	if (vf->adq_enabled) {
3833 		i40e_del_all_cloud_filters(vf);
3834 		i40e_del_qch(vf);
3835 		vf->adq_enabled = false;
3836 		vf->num_tc = 0;
3837 		dev_info(&pf->pdev->dev,
3838 			 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3839 			 vf->vf_id);
3840 	} else {
3841 		dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3842 			 vf->vf_id);
3843 		aq_ret = I40E_ERR_PARAM;
3844 	}
3845 
3846 	/* reset the VF in order to allocate resources */
3847 	i40e_vc_reset_vf(vf, true);
3848 
3849 	return I40E_SUCCESS;
3850 
3851 err:
3852 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3853 				       aq_ret);
3854 }
3855 
3856 /**
3857  * i40e_vc_process_vf_msg
3858  * @pf: pointer to the PF structure
3859  * @vf_id: source VF id
3860  * @v_opcode: operation code
3861  * @v_retval: unused return value code
3862  * @msg: pointer to the msg buffer
3863  * @msglen: msg length
3864  *
3865  * called from the common aeq/arq handler to
3866  * process request from VF
3867  **/
3868 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3869 			   u32 __always_unused v_retval, u8 *msg, u16 msglen)
3870 {
3871 	struct i40e_hw *hw = &pf->hw;
3872 	int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3873 	struct i40e_vf *vf;
3874 	int ret;
3875 
3876 	pf->vf_aq_requests++;
3877 	if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
3878 		return -EINVAL;
3879 	vf = &(pf->vf[local_vf_id]);
3880 
3881 	/* Check if VF is disabled. */
3882 	if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3883 		return I40E_ERR_PARAM;
3884 
3885 	/* perform basic checks on the msg */
3886 	ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3887 
3888 	if (ret) {
3889 		i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3890 		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3891 			local_vf_id, v_opcode, msglen);
3892 		switch (ret) {
3893 		case VIRTCHNL_STATUS_ERR_PARAM:
3894 			return -EPERM;
3895 		default:
3896 			return -EINVAL;
3897 		}
3898 	}
3899 
3900 	switch (v_opcode) {
3901 	case VIRTCHNL_OP_VERSION:
3902 		ret = i40e_vc_get_version_msg(vf, msg);
3903 		break;
3904 	case VIRTCHNL_OP_GET_VF_RESOURCES:
3905 		ret = i40e_vc_get_vf_resources_msg(vf, msg);
3906 		i40e_vc_notify_vf_link_state(vf);
3907 		break;
3908 	case VIRTCHNL_OP_RESET_VF:
3909 		i40e_vc_reset_vf(vf, false);
3910 		ret = 0;
3911 		break;
3912 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3913 		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
3914 		break;
3915 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3916 		ret = i40e_vc_config_queues_msg(vf, msg);
3917 		break;
3918 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3919 		ret = i40e_vc_config_irq_map_msg(vf, msg);
3920 		break;
3921 	case VIRTCHNL_OP_ENABLE_QUEUES:
3922 		ret = i40e_vc_enable_queues_msg(vf, msg);
3923 		i40e_vc_notify_vf_link_state(vf);
3924 		break;
3925 	case VIRTCHNL_OP_DISABLE_QUEUES:
3926 		ret = i40e_vc_disable_queues_msg(vf, msg);
3927 		break;
3928 	case VIRTCHNL_OP_ADD_ETH_ADDR:
3929 		ret = i40e_vc_add_mac_addr_msg(vf, msg);
3930 		break;
3931 	case VIRTCHNL_OP_DEL_ETH_ADDR:
3932 		ret = i40e_vc_del_mac_addr_msg(vf, msg);
3933 		break;
3934 	case VIRTCHNL_OP_ADD_VLAN:
3935 		ret = i40e_vc_add_vlan_msg(vf, msg);
3936 		break;
3937 	case VIRTCHNL_OP_DEL_VLAN:
3938 		ret = i40e_vc_remove_vlan_msg(vf, msg);
3939 		break;
3940 	case VIRTCHNL_OP_GET_STATS:
3941 		ret = i40e_vc_get_stats_msg(vf, msg);
3942 		break;
3943 	case VIRTCHNL_OP_IWARP:
3944 		ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3945 		break;
3946 	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3947 		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
3948 		break;
3949 	case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3950 		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
3951 		break;
3952 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
3953 		ret = i40e_vc_config_rss_key(vf, msg);
3954 		break;
3955 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
3956 		ret = i40e_vc_config_rss_lut(vf, msg);
3957 		break;
3958 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3959 		ret = i40e_vc_get_rss_hena(vf, msg);
3960 		break;
3961 	case VIRTCHNL_OP_SET_RSS_HENA:
3962 		ret = i40e_vc_set_rss_hena(vf, msg);
3963 		break;
3964 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3965 		ret = i40e_vc_enable_vlan_stripping(vf, msg);
3966 		break;
3967 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3968 		ret = i40e_vc_disable_vlan_stripping(vf, msg);
3969 		break;
3970 	case VIRTCHNL_OP_REQUEST_QUEUES:
3971 		ret = i40e_vc_request_queues_msg(vf, msg);
3972 		break;
3973 	case VIRTCHNL_OP_ENABLE_CHANNELS:
3974 		ret = i40e_vc_add_qch_msg(vf, msg);
3975 		break;
3976 	case VIRTCHNL_OP_DISABLE_CHANNELS:
3977 		ret = i40e_vc_del_qch_msg(vf, msg);
3978 		break;
3979 	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3980 		ret = i40e_vc_add_cloud_filter(vf, msg);
3981 		break;
3982 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3983 		ret = i40e_vc_del_cloud_filter(vf, msg);
3984 		break;
3985 	case VIRTCHNL_OP_UNKNOWN:
3986 	default:
3987 		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3988 			v_opcode, local_vf_id);
3989 		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3990 					      I40E_ERR_NOT_IMPLEMENTED);
3991 		break;
3992 	}
3993 
3994 	return ret;
3995 }
3996 
3997 /**
3998  * i40e_vc_process_vflr_event
3999  * @pf: pointer to the PF structure
4000  *
4001  * called from the vlfr irq handler to
4002  * free up VF resources and state variables
4003  **/
4004 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
4005 {
4006 	struct i40e_hw *hw = &pf->hw;
4007 	u32 reg, reg_idx, bit_idx;
4008 	struct i40e_vf *vf;
4009 	int vf_id;
4010 
4011 	if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
4012 		return 0;
4013 
4014 	/* Re-enable the VFLR interrupt cause here, before looking for which
4015 	 * VF got reset. Otherwise, if another VF gets a reset while the
4016 	 * first one is being processed, that interrupt will be lost, and
4017 	 * that VF will be stuck in reset forever.
4018 	 */
4019 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4020 	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4021 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4022 	i40e_flush(hw);
4023 
4024 	clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4025 	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4026 		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4027 		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4028 		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
4029 		vf = &pf->vf[vf_id];
4030 		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4031 		if (reg & BIT(bit_idx))
4032 			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4033 			i40e_reset_vf(vf, true);
4034 	}
4035 
4036 	return 0;
4037 }
4038 
4039 /**
4040  * i40e_validate_vf
4041  * @pf: the physical function
4042  * @vf_id: VF identifier
4043  *
4044  * Check that the VF is enabled and the VSI exists.
4045  *
4046  * Returns 0 on success, negative on failure
4047  **/
4048 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4049 {
4050 	struct i40e_vsi *vsi;
4051 	struct i40e_vf *vf;
4052 	int ret = 0;
4053 
4054 	if (vf_id >= pf->num_alloc_vfs) {
4055 		dev_err(&pf->pdev->dev,
4056 			"Invalid VF Identifier %d\n", vf_id);
4057 		ret = -EINVAL;
4058 		goto err_out;
4059 	}
4060 	vf = &pf->vf[vf_id];
4061 	vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4062 	if (!vsi)
4063 		ret = -EINVAL;
4064 err_out:
4065 	return ret;
4066 }
4067 
4068 /**
4069  * i40e_ndo_set_vf_mac
4070  * @netdev: network interface device structure
4071  * @vf_id: VF identifier
4072  * @mac: mac address
4073  *
4074  * program VF mac address
4075  **/
4076 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4077 {
4078 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4079 	struct i40e_vsi *vsi = np->vsi;
4080 	struct i40e_pf *pf = vsi->back;
4081 	struct i40e_mac_filter *f;
4082 	struct i40e_vf *vf;
4083 	int ret = 0;
4084 	struct hlist_node *h;
4085 	int bkt;
4086 	u8 i;
4087 
4088 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4089 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4090 		return -EAGAIN;
4091 	}
4092 
4093 	/* validate the request */
4094 	ret = i40e_validate_vf(pf, vf_id);
4095 	if (ret)
4096 		goto error_param;
4097 
4098 	vf = &pf->vf[vf_id];
4099 
4100 	/* When the VF is resetting wait until it is done.
4101 	 * It can take up to 200 milliseconds,
4102 	 * but wait for up to 300 milliseconds to be safe.
4103 	 * Acquire the VSI pointer only after the VF has been
4104 	 * properly initialized.
4105 	 */
4106 	for (i = 0; i < 15; i++) {
4107 		if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4108 			break;
4109 		msleep(20);
4110 	}
4111 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4112 		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4113 			vf_id);
4114 		ret = -EAGAIN;
4115 		goto error_param;
4116 	}
4117 	vsi = pf->vsi[vf->lan_vsi_idx];
4118 
4119 	if (is_multicast_ether_addr(mac)) {
4120 		dev_err(&pf->pdev->dev,
4121 			"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4122 		ret = -EINVAL;
4123 		goto error_param;
4124 	}
4125 
4126 	/* Lock once because below invoked function add/del_filter requires
4127 	 * mac_filter_hash_lock to be held
4128 	 */
4129 	spin_lock_bh(&vsi->mac_filter_hash_lock);
4130 
4131 	/* delete the temporary mac address */
4132 	if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4133 		i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4134 
4135 	/* Delete all the filters for this VSI - we're going to kill it
4136 	 * anyway.
4137 	 */
4138 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4139 		__i40e_del_filter(vsi, f);
4140 
4141 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4142 
4143 	/* program mac filter */
4144 	if (i40e_sync_vsi_filters(vsi)) {
4145 		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4146 		ret = -EIO;
4147 		goto error_param;
4148 	}
4149 	ether_addr_copy(vf->default_lan_addr.addr, mac);
4150 
4151 	if (is_zero_ether_addr(mac)) {
4152 		vf->pf_set_mac = false;
4153 		dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4154 	} else {
4155 		vf->pf_set_mac = true;
4156 		dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4157 			 mac, vf_id);
4158 	}
4159 
4160 	/* Force the VF interface down so it has to bring up with new MAC
4161 	 * address
4162 	 */
4163 	i40e_vc_reset_vf(vf, true);
4164 	dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4165 
4166 error_param:
4167 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4168 	return ret;
4169 }
4170 
4171 /**
4172  * i40e_ndo_set_vf_port_vlan
4173  * @netdev: network interface device structure
4174  * @vf_id: VF identifier
4175  * @vlan_id: mac address
4176  * @qos: priority setting
4177  * @vlan_proto: vlan protocol
4178  *
4179  * program VF vlan id and/or qos
4180  **/
4181 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4182 			      u16 vlan_id, u8 qos, __be16 vlan_proto)
4183 {
4184 	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4185 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4186 	bool allmulti = false, alluni = false;
4187 	struct i40e_pf *pf = np->vsi->back;
4188 	struct i40e_vsi *vsi;
4189 	struct i40e_vf *vf;
4190 	int ret = 0;
4191 
4192 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4193 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4194 		return -EAGAIN;
4195 	}
4196 
4197 	/* validate the request */
4198 	ret = i40e_validate_vf(pf, vf_id);
4199 	if (ret)
4200 		goto error_pvid;
4201 
4202 	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4203 		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4204 		ret = -EINVAL;
4205 		goto error_pvid;
4206 	}
4207 
4208 	if (vlan_proto != htons(ETH_P_8021Q)) {
4209 		dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4210 		ret = -EPROTONOSUPPORT;
4211 		goto error_pvid;
4212 	}
4213 
4214 	vf = &pf->vf[vf_id];
4215 	vsi = pf->vsi[vf->lan_vsi_idx];
4216 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4217 		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4218 			vf_id);
4219 		ret = -EAGAIN;
4220 		goto error_pvid;
4221 	}
4222 
4223 	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4224 		/* duplicate request, so just return success */
4225 		goto error_pvid;
4226 
4227 	i40e_vc_reset_vf(vf, true);
4228 	/* During reset the VF got a new VSI, so refresh a pointer. */
4229 	vsi = pf->vsi[vf->lan_vsi_idx];
4230 	/* Locked once because multiple functions below iterate list */
4231 	spin_lock_bh(&vsi->mac_filter_hash_lock);
4232 
4233 	/* Check for condition where there was already a port VLAN ID
4234 	 * filter set and now it is being deleted by setting it to zero.
4235 	 * Additionally check for the condition where there was a port
4236 	 * VLAN but now there is a new and different port VLAN being set.
4237 	 * Before deleting all the old VLAN filters we must add new ones
4238 	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4239 	 * MAC addresses deleted.
4240 	 */
4241 	if ((!(vlan_id || qos) ||
4242 	    vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4243 	    vsi->info.pvid) {
4244 		ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4245 		if (ret) {
4246 			dev_info(&vsi->back->pdev->dev,
4247 				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4248 				 vsi->back->hw.aq.asq_last_status);
4249 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4250 			goto error_pvid;
4251 		}
4252 	}
4253 
4254 	if (vsi->info.pvid) {
4255 		/* remove all filters on the old VLAN */
4256 		i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4257 					   VLAN_VID_MASK));
4258 	}
4259 
4260 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4261 
4262 	/* disable promisc modes in case they were enabled */
4263 	ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4264 					      allmulti, alluni);
4265 	if (ret) {
4266 		dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4267 		goto error_pvid;
4268 	}
4269 
4270 	if (vlan_id || qos)
4271 		ret = i40e_vsi_add_pvid(vsi, vlanprio);
4272 	else
4273 		i40e_vsi_remove_pvid(vsi);
4274 	spin_lock_bh(&vsi->mac_filter_hash_lock);
4275 
4276 	if (vlan_id) {
4277 		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4278 			 vlan_id, qos, vf_id);
4279 
4280 		/* add new VLAN filter for each MAC */
4281 		ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4282 		if (ret) {
4283 			dev_info(&vsi->back->pdev->dev,
4284 				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4285 				 vsi->back->hw.aq.asq_last_status);
4286 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4287 			goto error_pvid;
4288 		}
4289 
4290 		/* remove the previously added non-VLAN MAC filters */
4291 		i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4292 	}
4293 
4294 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4295 
4296 	if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4297 		alluni = true;
4298 
4299 	if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4300 		allmulti = true;
4301 
4302 	/* Schedule the worker thread to take care of applying changes */
4303 	i40e_service_event_schedule(vsi->back);
4304 
4305 	if (ret) {
4306 		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4307 		goto error_pvid;
4308 	}
4309 
4310 	/* The Port VLAN needs to be saved across resets the same as the
4311 	 * default LAN MAC address.
4312 	 */
4313 	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4314 
4315 	ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4316 	if (ret) {
4317 		dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4318 		goto error_pvid;
4319 	}
4320 
4321 	ret = 0;
4322 
4323 error_pvid:
4324 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4325 	return ret;
4326 }
4327 
4328 /**
4329  * i40e_ndo_set_vf_bw
4330  * @netdev: network interface device structure
4331  * @vf_id: VF identifier
4332  * @min_tx_rate: Minimum Tx rate
4333  * @max_tx_rate: Maximum Tx rate
4334  *
4335  * configure VF Tx rate
4336  **/
4337 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4338 		       int max_tx_rate)
4339 {
4340 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4341 	struct i40e_pf *pf = np->vsi->back;
4342 	struct i40e_vsi *vsi;
4343 	struct i40e_vf *vf;
4344 	int ret = 0;
4345 
4346 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4347 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4348 		return -EAGAIN;
4349 	}
4350 
4351 	/* validate the request */
4352 	ret = i40e_validate_vf(pf, vf_id);
4353 	if (ret)
4354 		goto error;
4355 
4356 	if (min_tx_rate) {
4357 		dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4358 			min_tx_rate, vf_id);
4359 		ret = -EINVAL;
4360 		goto error;
4361 	}
4362 
4363 	vf = &pf->vf[vf_id];
4364 	vsi = pf->vsi[vf->lan_vsi_idx];
4365 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4366 		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4367 			vf_id);
4368 		ret = -EAGAIN;
4369 		goto error;
4370 	}
4371 
4372 	ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4373 	if (ret)
4374 		goto error;
4375 
4376 	vf->tx_rate = max_tx_rate;
4377 error:
4378 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4379 	return ret;
4380 }
4381 
4382 /**
4383  * i40e_ndo_get_vf_config
4384  * @netdev: network interface device structure
4385  * @vf_id: VF identifier
4386  * @ivi: VF configuration structure
4387  *
4388  * return VF configuration
4389  **/
4390 int i40e_ndo_get_vf_config(struct net_device *netdev,
4391 			   int vf_id, struct ifla_vf_info *ivi)
4392 {
4393 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4394 	struct i40e_vsi *vsi = np->vsi;
4395 	struct i40e_pf *pf = vsi->back;
4396 	struct i40e_vf *vf;
4397 	int ret = 0;
4398 
4399 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4400 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4401 		return -EAGAIN;
4402 	}
4403 
4404 	/* validate the request */
4405 	ret = i40e_validate_vf(pf, vf_id);
4406 	if (ret)
4407 		goto error_param;
4408 
4409 	vf = &pf->vf[vf_id];
4410 	/* first vsi is always the LAN vsi */
4411 	vsi = pf->vsi[vf->lan_vsi_idx];
4412 	if (!vsi) {
4413 		ret = -ENOENT;
4414 		goto error_param;
4415 	}
4416 
4417 	ivi->vf = vf_id;
4418 
4419 	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4420 
4421 	ivi->max_tx_rate = vf->tx_rate;
4422 	ivi->min_tx_rate = 0;
4423 	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4424 	ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4425 		   I40E_VLAN_PRIORITY_SHIFT;
4426 	if (vf->link_forced == false)
4427 		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4428 	else if (vf->link_up == true)
4429 		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4430 	else
4431 		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4432 	ivi->spoofchk = vf->spoofchk;
4433 	ivi->trusted = vf->trusted;
4434 	ret = 0;
4435 
4436 error_param:
4437 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4438 	return ret;
4439 }
4440 
4441 /**
4442  * i40e_ndo_set_vf_link_state
4443  * @netdev: network interface device structure
4444  * @vf_id: VF identifier
4445  * @link: required link state
4446  *
4447  * Set the link state of a specified VF, regardless of physical link state
4448  **/
4449 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4450 {
4451 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4452 	struct i40e_pf *pf = np->vsi->back;
4453 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
4454 	struct virtchnl_pf_event pfe;
4455 	struct i40e_hw *hw = &pf->hw;
4456 	struct i40e_vf *vf;
4457 	int abs_vf_id;
4458 	int ret = 0;
4459 
4460 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4461 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4462 		return -EAGAIN;
4463 	}
4464 
4465 	/* validate the request */
4466 	if (vf_id >= pf->num_alloc_vfs) {
4467 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4468 		ret = -EINVAL;
4469 		goto error_out;
4470 	}
4471 
4472 	vf = &pf->vf[vf_id];
4473 	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4474 
4475 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4476 	pfe.severity = PF_EVENT_SEVERITY_INFO;
4477 
4478 	switch (link) {
4479 	case IFLA_VF_LINK_STATE_AUTO:
4480 		vf->link_forced = false;
4481 		i40e_set_vf_link_state(vf, &pfe, ls);
4482 		break;
4483 	case IFLA_VF_LINK_STATE_ENABLE:
4484 		vf->link_forced = true;
4485 		vf->link_up = true;
4486 		i40e_set_vf_link_state(vf, &pfe, ls);
4487 		break;
4488 	case IFLA_VF_LINK_STATE_DISABLE:
4489 		vf->link_forced = true;
4490 		vf->link_up = false;
4491 		i40e_set_vf_link_state(vf, &pfe, ls);
4492 		break;
4493 	default:
4494 		ret = -EINVAL;
4495 		goto error_out;
4496 	}
4497 	/* Notify the VF of its new link state */
4498 	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4499 			       0, (u8 *)&pfe, sizeof(pfe), NULL);
4500 
4501 error_out:
4502 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4503 	return ret;
4504 }
4505 
4506 /**
4507  * i40e_ndo_set_vf_spoofchk
4508  * @netdev: network interface device structure
4509  * @vf_id: VF identifier
4510  * @enable: flag to enable or disable feature
4511  *
4512  * Enable or disable VF spoof checking
4513  **/
4514 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4515 {
4516 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4517 	struct i40e_vsi *vsi = np->vsi;
4518 	struct i40e_pf *pf = vsi->back;
4519 	struct i40e_vsi_context ctxt;
4520 	struct i40e_hw *hw = &pf->hw;
4521 	struct i40e_vf *vf;
4522 	int ret = 0;
4523 
4524 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4525 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4526 		return -EAGAIN;
4527 	}
4528 
4529 	/* validate the request */
4530 	if (vf_id >= pf->num_alloc_vfs) {
4531 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4532 		ret = -EINVAL;
4533 		goto out;
4534 	}
4535 
4536 	vf = &(pf->vf[vf_id]);
4537 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4538 		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4539 			vf_id);
4540 		ret = -EAGAIN;
4541 		goto out;
4542 	}
4543 
4544 	if (enable == vf->spoofchk)
4545 		goto out;
4546 
4547 	vf->spoofchk = enable;
4548 	memset(&ctxt, 0, sizeof(ctxt));
4549 	ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4550 	ctxt.pf_num = pf->hw.pf_id;
4551 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4552 	if (enable)
4553 		ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4554 					I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4555 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4556 	if (ret) {
4557 		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4558 			ret);
4559 		ret = -EIO;
4560 	}
4561 out:
4562 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4563 	return ret;
4564 }
4565 
4566 /**
4567  * i40e_ndo_set_vf_trust
4568  * @netdev: network interface device structure of the pf
4569  * @vf_id: VF identifier
4570  * @setting: trust setting
4571  *
4572  * Enable or disable VF trust setting
4573  **/
4574 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4575 {
4576 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4577 	struct i40e_pf *pf = np->vsi->back;
4578 	struct i40e_vf *vf;
4579 	int ret = 0;
4580 
4581 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4582 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4583 		return -EAGAIN;
4584 	}
4585 
4586 	/* validate the request */
4587 	if (vf_id >= pf->num_alloc_vfs) {
4588 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4589 		ret = -EINVAL;
4590 		goto out;
4591 	}
4592 
4593 	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4594 		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4595 		ret = -EINVAL;
4596 		goto out;
4597 	}
4598 
4599 	vf = &pf->vf[vf_id];
4600 
4601 	if (setting == vf->trusted)
4602 		goto out;
4603 
4604 	vf->trusted = setting;
4605 	i40e_vc_reset_vf(vf, true);
4606 	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4607 		 vf_id, setting ? "" : "un");
4608 
4609 	if (vf->adq_enabled) {
4610 		if (!vf->trusted) {
4611 			dev_info(&pf->pdev->dev,
4612 				 "VF %u no longer Trusted, deleting all cloud filters\n",
4613 				 vf_id);
4614 			i40e_del_all_cloud_filters(vf);
4615 		}
4616 	}
4617 
4618 out:
4619 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4620 	return ret;
4621 }
4622 
4623 /**
4624  * i40e_get_vf_stats - populate some stats for the VF
4625  * @netdev: the netdev of the PF
4626  * @vf_id: the host OS identifier (0-127)
4627  * @vf_stats: pointer to the OS memory to be initialized
4628  */
4629 int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4630 		      struct ifla_vf_stats *vf_stats)
4631 {
4632 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4633 	struct i40e_pf *pf = np->vsi->back;
4634 	struct i40e_eth_stats *stats;
4635 	struct i40e_vsi *vsi;
4636 	struct i40e_vf *vf;
4637 
4638 	/* validate the request */
4639 	if (i40e_validate_vf(pf, vf_id))
4640 		return -EINVAL;
4641 
4642 	vf = &pf->vf[vf_id];
4643 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4644 		dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4645 		return -EBUSY;
4646 	}
4647 
4648 	vsi = pf->vsi[vf->lan_vsi_idx];
4649 	if (!vsi)
4650 		return -EINVAL;
4651 
4652 	i40e_update_eth_stats(vsi);
4653 	stats = &vsi->eth_stats;
4654 
4655 	memset(vf_stats, 0, sizeof(*vf_stats));
4656 
4657 	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4658 		stats->rx_multicast;
4659 	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4660 		stats->tx_multicast;
4661 	vf_stats->rx_bytes   = stats->rx_bytes;
4662 	vf_stats->tx_bytes   = stats->tx_bytes;
4663 	vf_stats->broadcast  = stats->rx_broadcast;
4664 	vf_stats->multicast  = stats->rx_multicast;
4665 	vf_stats->rx_dropped = stats->rx_discards;
4666 	vf_stats->tx_dropped = stats->tx_discards;
4667 
4668 	return 0;
4669 }
4670