xref: /openbmc/linux/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c (revision 7b7fd0ac7dc1ffcaf24d9bca0f051b0168e43cd4)
1  // SPDX-License-Identifier: GPL-2.0
2  /* Copyright(c) 2013 - 2018 Intel Corporation. */
3  
4  #include "i40e.h"
5  #include "i40e_lan_hmc.h"
6  #include "i40e_virtchnl_pf.h"
7  
8  /*********************notification routines***********************/
9  
10  /**
11   * i40e_vc_vf_broadcast
12   * @pf: pointer to the PF structure
13   * @v_opcode: operation code
14   * @v_retval: return value
15   * @msg: pointer to the msg buffer
16   * @msglen: msg length
17   *
18   * send a message to all VFs on a given PF
19   **/
i40e_vc_vf_broadcast(struct i40e_pf * pf,enum virtchnl_ops v_opcode,int v_retval,u8 * msg,u16 msglen)20  static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
21  				 enum virtchnl_ops v_opcode,
22  				 int v_retval, u8 *msg,
23  				 u16 msglen)
24  {
25  	struct i40e_hw *hw = &pf->hw;
26  	struct i40e_vf *vf = pf->vf;
27  	int i;
28  
29  	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
30  		int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
31  		/* Not all vfs are enabled so skip the ones that are not */
32  		if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
33  		    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
34  			continue;
35  
36  		/* Ignore return value on purpose - a given VF may fail, but
37  		 * we need to keep going and send to all of them
38  		 */
39  		i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
40  				       msg, msglen, NULL);
41  	}
42  }
43  
44  /**
45   * i40e_vc_link_speed2mbps
46   * converts i40e_aq_link_speed to integer value of Mbps
47   * @link_speed: the speed to convert
48   *
49   * return the speed as direct value of Mbps.
50   **/
51  static u32
i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)52  i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
53  {
54  	switch (link_speed) {
55  	case I40E_LINK_SPEED_100MB:
56  		return SPEED_100;
57  	case I40E_LINK_SPEED_1GB:
58  		return SPEED_1000;
59  	case I40E_LINK_SPEED_2_5GB:
60  		return SPEED_2500;
61  	case I40E_LINK_SPEED_5GB:
62  		return SPEED_5000;
63  	case I40E_LINK_SPEED_10GB:
64  		return SPEED_10000;
65  	case I40E_LINK_SPEED_20GB:
66  		return SPEED_20000;
67  	case I40E_LINK_SPEED_25GB:
68  		return SPEED_25000;
69  	case I40E_LINK_SPEED_40GB:
70  		return SPEED_40000;
71  	case I40E_LINK_SPEED_UNKNOWN:
72  		return SPEED_UNKNOWN;
73  	}
74  	return SPEED_UNKNOWN;
75  }
76  
77  /**
78   * i40e_set_vf_link_state
79   * @vf: pointer to the VF structure
80   * @pfe: pointer to PF event structure
81   * @ls: pointer to link status structure
82   *
83   * set a link state on a single vf
84   **/
i40e_set_vf_link_state(struct i40e_vf * vf,struct virtchnl_pf_event * pfe,struct i40e_link_status * ls)85  static void i40e_set_vf_link_state(struct i40e_vf *vf,
86  				   struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
87  {
88  	u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
89  
90  	if (vf->link_forced)
91  		link_status = vf->link_up;
92  
93  	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
94  		pfe->event_data.link_event_adv.link_speed = link_status ?
95  			i40e_vc_link_speed2mbps(ls->link_speed) : 0;
96  		pfe->event_data.link_event_adv.link_status = link_status;
97  	} else {
98  		pfe->event_data.link_event.link_speed = link_status ?
99  			i40e_virtchnl_link_speed(ls->link_speed) : 0;
100  		pfe->event_data.link_event.link_status = link_status;
101  	}
102  }
103  
104  /**
105   * i40e_vc_notify_vf_link_state
106   * @vf: pointer to the VF structure
107   *
108   * send a link status message to a single VF
109   **/
i40e_vc_notify_vf_link_state(struct i40e_vf * vf)110  static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
111  {
112  	struct virtchnl_pf_event pfe;
113  	struct i40e_pf *pf = vf->pf;
114  	struct i40e_hw *hw = &pf->hw;
115  	struct i40e_link_status *ls = &pf->hw.phy.link_info;
116  	int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
117  
118  	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
119  	pfe.severity = PF_EVENT_SEVERITY_INFO;
120  
121  	i40e_set_vf_link_state(vf, &pfe, ls);
122  
123  	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
124  			       0, (u8 *)&pfe, sizeof(pfe), NULL);
125  }
126  
127  /**
128   * i40e_vc_notify_link_state
129   * @pf: pointer to the PF structure
130   *
131   * send a link status message to all VFs on a given PF
132   **/
i40e_vc_notify_link_state(struct i40e_pf * pf)133  void i40e_vc_notify_link_state(struct i40e_pf *pf)
134  {
135  	int i;
136  
137  	for (i = 0; i < pf->num_alloc_vfs; i++)
138  		i40e_vc_notify_vf_link_state(&pf->vf[i]);
139  }
140  
141  /**
142   * i40e_vc_notify_reset
143   * @pf: pointer to the PF structure
144   *
145   * indicate a pending reset to all VFs on a given PF
146   **/
i40e_vc_notify_reset(struct i40e_pf * pf)147  void i40e_vc_notify_reset(struct i40e_pf *pf)
148  {
149  	struct virtchnl_pf_event pfe;
150  
151  	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
152  	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
153  	i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
154  			     (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
155  }
156  
157  #ifdef CONFIG_PCI_IOV
i40e_restore_all_vfs_msi_state(struct pci_dev * pdev)158  void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
159  {
160  	u16 vf_id;
161  	u16 pos;
162  
163  	/* Continue only if this is a PF */
164  	if (!pdev->is_physfn)
165  		return;
166  
167  	if (!pci_num_vf(pdev))
168  		return;
169  
170  	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
171  	if (pos) {
172  		struct pci_dev *vf_dev = NULL;
173  
174  		pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
175  		while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
176  			if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
177  				pci_restore_msi_state(vf_dev);
178  		}
179  	}
180  }
181  #endif /* CONFIG_PCI_IOV */
182  
183  /**
184   * i40e_vc_notify_vf_reset
185   * @vf: pointer to the VF structure
186   *
187   * indicate a pending reset to the given VF
188   **/
i40e_vc_notify_vf_reset(struct i40e_vf * vf)189  void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
190  {
191  	struct virtchnl_pf_event pfe;
192  	int abs_vf_id;
193  
194  	/* validate the request */
195  	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
196  		return;
197  
198  	/* verify if the VF is in either init or active before proceeding */
199  	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
200  	    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
201  		return;
202  
203  	abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
204  
205  	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
206  	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
207  	i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
208  			       0, (u8 *)&pfe,
209  			       sizeof(struct virtchnl_pf_event), NULL);
210  }
211  /***********************misc routines*****************************/
212  
213  /**
214   * i40e_vc_reset_vf
215   * @vf: pointer to the VF info
216   * @notify_vf: notify vf about reset or not
217   * Reset VF handler.
218   **/
i40e_vc_reset_vf(struct i40e_vf * vf,bool notify_vf)219  static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
220  {
221  	struct i40e_pf *pf = vf->pf;
222  	int i;
223  
224  	if (notify_vf)
225  		i40e_vc_notify_vf_reset(vf);
226  
227  	/* We want to ensure that an actual reset occurs initiated after this
228  	 * function was called. However, we do not want to wait forever, so
229  	 * we'll give a reasonable time and print a message if we failed to
230  	 * ensure a reset.
231  	 */
232  	for (i = 0; i < 20; i++) {
233  		/* If PF is in VFs releasing state reset VF is impossible,
234  		 * so leave it.
235  		 */
236  		if (test_bit(__I40E_VFS_RELEASING, pf->state))
237  			return;
238  		if (i40e_reset_vf(vf, false))
239  			return;
240  		usleep_range(10000, 20000);
241  	}
242  
243  	if (notify_vf)
244  		dev_warn(&vf->pf->pdev->dev,
245  			 "Failed to initiate reset for VF %d after 200 milliseconds\n",
246  			 vf->vf_id);
247  	else
248  		dev_dbg(&vf->pf->pdev->dev,
249  			"Failed to initiate reset for VF %d after 200 milliseconds\n",
250  			vf->vf_id);
251  }
252  
253  /**
254   * i40e_vc_isvalid_vsi_id
255   * @vf: pointer to the VF info
256   * @vsi_id: VF relative VSI id
257   *
258   * check for the valid VSI id
259   **/
i40e_vc_isvalid_vsi_id(struct i40e_vf * vf,u16 vsi_id)260  static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
261  {
262  	struct i40e_pf *pf = vf->pf;
263  	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
264  
265  	return (vsi && (vsi->vf_id == vf->vf_id));
266  }
267  
268  /**
269   * i40e_vc_isvalid_queue_id
270   * @vf: pointer to the VF info
271   * @vsi_id: vsi id
272   * @qid: vsi relative queue id
273   *
274   * check for the valid queue id
275   **/
i40e_vc_isvalid_queue_id(struct i40e_vf * vf,u16 vsi_id,u16 qid)276  static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
277  					    u16 qid)
278  {
279  	struct i40e_pf *pf = vf->pf;
280  	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
281  
282  	return (vsi && (qid < vsi->alloc_queue_pairs));
283  }
284  
285  /**
286   * i40e_vc_isvalid_vector_id
287   * @vf: pointer to the VF info
288   * @vector_id: VF relative vector id
289   *
290   * check for the valid vector id
291   **/
i40e_vc_isvalid_vector_id(struct i40e_vf * vf,u32 vector_id)292  static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
293  {
294  	struct i40e_pf *pf = vf->pf;
295  
296  	return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
297  }
298  
299  /***********************vf resource mgmt routines*****************/
300  
301  /**
302   * i40e_vc_get_pf_queue_id
303   * @vf: pointer to the VF info
304   * @vsi_id: id of VSI as provided by the FW
305   * @vsi_queue_id: vsi relative queue id
306   *
307   * return PF relative queue id
308   **/
i40e_vc_get_pf_queue_id(struct i40e_vf * vf,u16 vsi_id,u8 vsi_queue_id)309  static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
310  				   u8 vsi_queue_id)
311  {
312  	struct i40e_pf *pf = vf->pf;
313  	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
314  	u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
315  
316  	if (!vsi)
317  		return pf_queue_id;
318  
319  	if (le16_to_cpu(vsi->info.mapping_flags) &
320  	    I40E_AQ_VSI_QUE_MAP_NONCONTIG)
321  		pf_queue_id =
322  			le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
323  	else
324  		pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
325  			      vsi_queue_id;
326  
327  	return pf_queue_id;
328  }
329  
330  /**
331   * i40e_get_real_pf_qid
332   * @vf: pointer to the VF info
333   * @vsi_id: vsi id
334   * @queue_id: queue number
335   *
336   * wrapper function to get pf_queue_id handling ADq code as well
337   **/
i40e_get_real_pf_qid(struct i40e_vf * vf,u16 vsi_id,u16 queue_id)338  static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
339  {
340  	int i;
341  
342  	if (vf->adq_enabled) {
343  		/* Although VF considers all the queues(can be 1 to 16) as its
344  		 * own but they may actually belong to different VSIs(up to 4).
345  		 * We need to find which queues belongs to which VSI.
346  		 */
347  		for (i = 0; i < vf->num_tc; i++) {
348  			if (queue_id < vf->ch[i].num_qps) {
349  				vsi_id = vf->ch[i].vsi_id;
350  				break;
351  			}
352  			/* find right queue id which is relative to a
353  			 * given VSI.
354  			 */
355  			queue_id -= vf->ch[i].num_qps;
356  			}
357  		}
358  
359  	return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
360  }
361  
362  /**
363   * i40e_config_irq_link_list
364   * @vf: pointer to the VF info
365   * @vsi_id: id of VSI as given by the FW
366   * @vecmap: irq map info
367   *
368   * configure irq link list from the map
369   **/
i40e_config_irq_link_list(struct i40e_vf * vf,u16 vsi_id,struct virtchnl_vector_map * vecmap)370  static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
371  				      struct virtchnl_vector_map *vecmap)
372  {
373  	unsigned long linklistmap = 0, tempmap;
374  	struct i40e_pf *pf = vf->pf;
375  	struct i40e_hw *hw = &pf->hw;
376  	u16 vsi_queue_id, pf_queue_id;
377  	enum i40e_queue_type qtype;
378  	u16 next_q, vector_id, size;
379  	u32 reg, reg_idx;
380  	u16 itr_idx = 0;
381  
382  	vector_id = vecmap->vector_id;
383  	/* setup the head */
384  	if (0 == vector_id)
385  		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
386  	else
387  		reg_idx = I40E_VPINT_LNKLSTN(
388  		     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
389  		     (vector_id - 1));
390  
391  	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
392  		/* Special case - No queues mapped on this vector */
393  		wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
394  		goto irq_list_done;
395  	}
396  	tempmap = vecmap->rxq_map;
397  	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
398  		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
399  				    vsi_queue_id));
400  	}
401  
402  	tempmap = vecmap->txq_map;
403  	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
404  		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
405  				     vsi_queue_id + 1));
406  	}
407  
408  	size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
409  	next_q = find_first_bit(&linklistmap, size);
410  	if (unlikely(next_q == size))
411  		goto irq_list_done;
412  
413  	vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
414  	qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
415  	pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
416  	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
417  
418  	wr32(hw, reg_idx, reg);
419  
420  	while (next_q < size) {
421  		switch (qtype) {
422  		case I40E_QUEUE_TYPE_RX:
423  			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
424  			itr_idx = vecmap->rxitr_idx;
425  			break;
426  		case I40E_QUEUE_TYPE_TX:
427  			reg_idx = I40E_QINT_TQCTL(pf_queue_id);
428  			itr_idx = vecmap->txitr_idx;
429  			break;
430  		default:
431  			break;
432  		}
433  
434  		next_q = find_next_bit(&linklistmap, size, next_q + 1);
435  		if (next_q < size) {
436  			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
437  			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
438  			pf_queue_id = i40e_get_real_pf_qid(vf,
439  							   vsi_id,
440  							   vsi_queue_id);
441  		} else {
442  			pf_queue_id = I40E_QUEUE_END_OF_LIST;
443  			qtype = 0;
444  		}
445  
446  		/* format for the RQCTL & TQCTL regs is same */
447  		reg = (vector_id) |
448  		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
449  		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
450  		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
451  		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
452  		wr32(hw, reg_idx, reg);
453  	}
454  
455  	/* if the vf is running in polling mode and using interrupt zero,
456  	 * need to disable auto-mask on enabling zero interrupt for VFs.
457  	 */
458  	if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
459  	    (vector_id == 0)) {
460  		reg = rd32(hw, I40E_GLINT_CTL);
461  		if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
462  			reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
463  			wr32(hw, I40E_GLINT_CTL, reg);
464  		}
465  	}
466  
467  irq_list_done:
468  	i40e_flush(hw);
469  }
470  
471  /**
472   * i40e_release_rdma_qvlist
473   * @vf: pointer to the VF.
474   *
475   **/
i40e_release_rdma_qvlist(struct i40e_vf * vf)476  static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
477  {
478  	struct i40e_pf *pf = vf->pf;
479  	struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info;
480  	u32 msix_vf;
481  	u32 i;
482  
483  	if (!vf->qvlist_info)
484  		return;
485  
486  	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
487  	for (i = 0; i < qvlist_info->num_vectors; i++) {
488  		struct virtchnl_rdma_qv_info *qv_info;
489  		u32 next_q_index, next_q_type;
490  		struct i40e_hw *hw = &pf->hw;
491  		u32 v_idx, reg_idx, reg;
492  
493  		qv_info = &qvlist_info->qv_info[i];
494  		if (!qv_info)
495  			continue;
496  		v_idx = qv_info->v_idx;
497  		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
498  			/* Figure out the queue after CEQ and make that the
499  			 * first queue.
500  			 */
501  			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
502  			reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
503  			next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
504  					>> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
505  			next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
506  					>> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
507  
508  			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
509  			reg = (next_q_index &
510  			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
511  			       (next_q_type <<
512  			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
513  
514  			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
515  		}
516  	}
517  	kfree(vf->qvlist_info);
518  	vf->qvlist_info = NULL;
519  }
520  
521  /**
522   * i40e_config_rdma_qvlist
523   * @vf: pointer to the VF info
524   * @qvlist_info: queue and vector list
525   *
526   * Return 0 on success or < 0 on error
527   **/
528  static int
i40e_config_rdma_qvlist(struct i40e_vf * vf,struct virtchnl_rdma_qvlist_info * qvlist_info)529  i40e_config_rdma_qvlist(struct i40e_vf *vf,
530  			struct virtchnl_rdma_qvlist_info *qvlist_info)
531  {
532  	struct i40e_pf *pf = vf->pf;
533  	struct i40e_hw *hw = &pf->hw;
534  	struct virtchnl_rdma_qv_info *qv_info;
535  	u32 v_idx, i, reg_idx, reg;
536  	u32 next_q_idx, next_q_type;
537  	size_t size;
538  	u32 msix_vf;
539  	int ret = 0;
540  
541  	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
542  
543  	if (qvlist_info->num_vectors > msix_vf) {
544  		dev_warn(&pf->pdev->dev,
545  			 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
546  			 qvlist_info->num_vectors,
547  			 msix_vf);
548  		ret = -EINVAL;
549  		goto err_out;
550  	}
551  
552  	kfree(vf->qvlist_info);
553  	size = virtchnl_struct_size(vf->qvlist_info, qv_info,
554  				    qvlist_info->num_vectors);
555  	vf->qvlist_info = kzalloc(size, GFP_KERNEL);
556  	if (!vf->qvlist_info) {
557  		ret = -ENOMEM;
558  		goto err_out;
559  	}
560  	vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
561  
562  	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
563  	for (i = 0; i < qvlist_info->num_vectors; i++) {
564  		qv_info = &qvlist_info->qv_info[i];
565  		if (!qv_info)
566  			continue;
567  
568  		/* Validate vector id belongs to this vf */
569  		if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
570  			ret = -EINVAL;
571  			goto err_free;
572  		}
573  
574  		v_idx = qv_info->v_idx;
575  
576  		vf->qvlist_info->qv_info[i] = *qv_info;
577  
578  		reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
579  		/* We might be sharing the interrupt, so get the first queue
580  		 * index and type, push it down the list by adding the new
581  		 * queue on top. Also link it with the new queue in CEQCTL.
582  		 */
583  		reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
584  		next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
585  				I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
586  		next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
587  				I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
588  
589  		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
590  			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
591  			reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
592  			(v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
593  			(qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
594  			(next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
595  			(next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
596  			wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
597  
598  			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
599  			reg = (qv_info->ceq_idx &
600  			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
601  			       (I40E_QUEUE_TYPE_PE_CEQ <<
602  			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
603  			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
604  		}
605  
606  		if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
607  			reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
608  			(v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
609  			(qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
610  
611  			wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
612  		}
613  	}
614  
615  	return 0;
616  err_free:
617  	kfree(vf->qvlist_info);
618  	vf->qvlist_info = NULL;
619  err_out:
620  	return ret;
621  }
622  
623  /**
624   * i40e_config_vsi_tx_queue
625   * @vf: pointer to the VF info
626   * @vsi_id: id of VSI as provided by the FW
627   * @vsi_queue_id: vsi relative queue index
628   * @info: config. info
629   *
630   * configure tx queue
631   **/
i40e_config_vsi_tx_queue(struct i40e_vf * vf,u16 vsi_id,u16 vsi_queue_id,struct virtchnl_txq_info * info)632  static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
633  				    u16 vsi_queue_id,
634  				    struct virtchnl_txq_info *info)
635  {
636  	struct i40e_pf *pf = vf->pf;
637  	struct i40e_hw *hw = &pf->hw;
638  	struct i40e_hmc_obj_txq tx_ctx;
639  	struct i40e_vsi *vsi;
640  	u16 pf_queue_id;
641  	u32 qtx_ctl;
642  	int ret = 0;
643  
644  	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
645  		ret = -ENOENT;
646  		goto error_context;
647  	}
648  	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
649  	vsi = i40e_find_vsi_from_id(pf, vsi_id);
650  	if (!vsi) {
651  		ret = -ENOENT;
652  		goto error_context;
653  	}
654  
655  	/* clear the context structure first */
656  	memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
657  
658  	/* only set the required fields */
659  	tx_ctx.base = info->dma_ring_addr / 128;
660  	tx_ctx.qlen = info->ring_len;
661  	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
662  	tx_ctx.rdylist_act = 0;
663  	tx_ctx.head_wb_ena = info->headwb_enabled;
664  	tx_ctx.head_wb_addr = info->dma_headwb_addr;
665  
666  	/* clear the context in the HMC */
667  	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
668  	if (ret) {
669  		dev_err(&pf->pdev->dev,
670  			"Failed to clear VF LAN Tx queue context %d, error: %d\n",
671  			pf_queue_id, ret);
672  		ret = -ENOENT;
673  		goto error_context;
674  	}
675  
676  	/* set the context in the HMC */
677  	ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
678  	if (ret) {
679  		dev_err(&pf->pdev->dev,
680  			"Failed to set VF LAN Tx queue context %d error: %d\n",
681  			pf_queue_id, ret);
682  		ret = -ENOENT;
683  		goto error_context;
684  	}
685  
686  	/* associate this queue with the PCI VF function */
687  	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
688  	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
689  		    & I40E_QTX_CTL_PF_INDX_MASK);
690  	qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
691  		     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
692  		    & I40E_QTX_CTL_VFVM_INDX_MASK);
693  	wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
694  	i40e_flush(hw);
695  
696  error_context:
697  	return ret;
698  }
699  
700  /**
701   * i40e_config_vsi_rx_queue
702   * @vf: pointer to the VF info
703   * @vsi_id: id of VSI  as provided by the FW
704   * @vsi_queue_id: vsi relative queue index
705   * @info: config. info
706   *
707   * configure rx queue
708   **/
i40e_config_vsi_rx_queue(struct i40e_vf * vf,u16 vsi_id,u16 vsi_queue_id,struct virtchnl_rxq_info * info)709  static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
710  				    u16 vsi_queue_id,
711  				    struct virtchnl_rxq_info *info)
712  {
713  	u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
714  	struct i40e_pf *pf = vf->pf;
715  	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
716  	struct i40e_hw *hw = &pf->hw;
717  	struct i40e_hmc_obj_rxq rx_ctx;
718  	int ret = 0;
719  
720  	/* clear the context structure first */
721  	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
722  
723  	/* only set the required fields */
724  	rx_ctx.base = info->dma_ring_addr / 128;
725  	rx_ctx.qlen = info->ring_len;
726  
727  	if (info->splithdr_enabled) {
728  		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
729  				  I40E_RX_SPLIT_IP      |
730  				  I40E_RX_SPLIT_TCP_UDP |
731  				  I40E_RX_SPLIT_SCTP;
732  		/* header length validation */
733  		if (info->hdr_size > ((2 * 1024) - 64)) {
734  			ret = -EINVAL;
735  			goto error_param;
736  		}
737  		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
738  
739  		/* set split mode 10b */
740  		rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
741  	}
742  
743  	/* databuffer length validation */
744  	if (info->databuffer_size > ((16 * 1024) - 128)) {
745  		ret = -EINVAL;
746  		goto error_param;
747  	}
748  	rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
749  
750  	/* max pkt. length validation */
751  	if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
752  		ret = -EINVAL;
753  		goto error_param;
754  	}
755  	rx_ctx.rxmax = info->max_pkt_size;
756  
757  	/* if port VLAN is configured increase the max packet size */
758  	if (vsi->info.pvid)
759  		rx_ctx.rxmax += VLAN_HLEN;
760  
761  	/* enable 32bytes desc always */
762  	rx_ctx.dsize = 1;
763  
764  	/* default values */
765  	rx_ctx.lrxqthresh = 1;
766  	rx_ctx.crcstrip = 1;
767  	rx_ctx.prefena = 1;
768  	rx_ctx.l2tsel = 1;
769  
770  	/* clear the context in the HMC */
771  	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
772  	if (ret) {
773  		dev_err(&pf->pdev->dev,
774  			"Failed to clear VF LAN Rx queue context %d, error: %d\n",
775  			pf_queue_id, ret);
776  		ret = -ENOENT;
777  		goto error_param;
778  	}
779  
780  	/* set the context in the HMC */
781  	ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
782  	if (ret) {
783  		dev_err(&pf->pdev->dev,
784  			"Failed to set VF LAN Rx queue context %d error: %d\n",
785  			pf_queue_id, ret);
786  		ret = -ENOENT;
787  		goto error_param;
788  	}
789  
790  error_param:
791  	return ret;
792  }
793  
794  /**
795   * i40e_alloc_vsi_res
796   * @vf: pointer to the VF info
797   * @idx: VSI index, applies only for ADq mode, zero otherwise
798   *
799   * alloc VF vsi context & resources
800   **/
i40e_alloc_vsi_res(struct i40e_vf * vf,u8 idx)801  static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
802  {
803  	struct i40e_mac_filter *f = NULL;
804  	struct i40e_pf *pf = vf->pf;
805  	struct i40e_vsi *vsi;
806  	u64 max_tx_rate = 0;
807  	int ret = 0;
808  
809  	vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
810  			     vf->vf_id);
811  
812  	if (!vsi) {
813  		dev_err(&pf->pdev->dev,
814  			"add vsi failed for VF %d, aq_err %d\n",
815  			vf->vf_id, pf->hw.aq.asq_last_status);
816  		ret = -ENOENT;
817  		goto error_alloc_vsi_res;
818  	}
819  
820  	if (!idx) {
821  		u64 hena = i40e_pf_get_default_rss_hena(pf);
822  		u8 broadcast[ETH_ALEN];
823  
824  		vf->lan_vsi_idx = vsi->idx;
825  		vf->lan_vsi_id = vsi->id;
826  		/* If the port VLAN has been configured and then the
827  		 * VF driver was removed then the VSI port VLAN
828  		 * configuration was destroyed.  Check if there is
829  		 * a port VLAN and restore the VSI configuration if
830  		 * needed.
831  		 */
832  		if (vf->port_vlan_id)
833  			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
834  
835  		spin_lock_bh(&vsi->mac_filter_hash_lock);
836  		if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
837  			f = i40e_add_mac_filter(vsi,
838  						vf->default_lan_addr.addr);
839  			if (!f)
840  				dev_info(&pf->pdev->dev,
841  					 "Could not add MAC filter %pM for VF %d\n",
842  					vf->default_lan_addr.addr, vf->vf_id);
843  		}
844  		eth_broadcast_addr(broadcast);
845  		f = i40e_add_mac_filter(vsi, broadcast);
846  		if (!f)
847  			dev_info(&pf->pdev->dev,
848  				 "Could not allocate VF broadcast filter\n");
849  		spin_unlock_bh(&vsi->mac_filter_hash_lock);
850  		wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
851  		wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
852  		/* program mac filter only for VF VSI */
853  		ret = i40e_sync_vsi_filters(vsi);
854  		if (ret)
855  			dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
856  	}
857  
858  	/* storing VSI index and id for ADq and don't apply the mac filter */
859  	if (vf->adq_enabled) {
860  		vf->ch[idx].vsi_idx = vsi->idx;
861  		vf->ch[idx].vsi_id = vsi->id;
862  	}
863  
864  	/* Set VF bandwidth if specified */
865  	if (vf->tx_rate) {
866  		max_tx_rate = vf->tx_rate;
867  	} else if (vf->ch[idx].max_tx_rate) {
868  		max_tx_rate = vf->ch[idx].max_tx_rate;
869  	}
870  
871  	if (max_tx_rate) {
872  		max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
873  		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
874  						  max_tx_rate, 0, NULL);
875  		if (ret)
876  			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
877  				vf->vf_id, ret);
878  	}
879  
880  error_alloc_vsi_res:
881  	return ret;
882  }
883  
884  /**
885   * i40e_map_pf_queues_to_vsi
886   * @vf: pointer to the VF info
887   *
888   * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
889   * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
890   **/
i40e_map_pf_queues_to_vsi(struct i40e_vf * vf)891  static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
892  {
893  	struct i40e_pf *pf = vf->pf;
894  	struct i40e_hw *hw = &pf->hw;
895  	u32 reg, num_tc = 1; /* VF has at least one traffic class */
896  	u16 vsi_id, qps;
897  	int i, j;
898  
899  	if (vf->adq_enabled)
900  		num_tc = vf->num_tc;
901  
902  	for (i = 0; i < num_tc; i++) {
903  		if (vf->adq_enabled) {
904  			qps = vf->ch[i].num_qps;
905  			vsi_id =  vf->ch[i].vsi_id;
906  		} else {
907  			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
908  			vsi_id = vf->lan_vsi_id;
909  		}
910  
911  		for (j = 0; j < 7; j++) {
912  			if (j * 2 >= qps) {
913  				/* end of list */
914  				reg = 0x07FF07FF;
915  			} else {
916  				u16 qid = i40e_vc_get_pf_queue_id(vf,
917  								  vsi_id,
918  								  j * 2);
919  				reg = qid;
920  				qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
921  							      (j * 2) + 1);
922  				reg |= qid << 16;
923  			}
924  			i40e_write_rx_ctl(hw,
925  					  I40E_VSILAN_QTABLE(j, vsi_id),
926  					  reg);
927  		}
928  	}
929  }
930  
931  /**
932   * i40e_map_pf_to_vf_queues
933   * @vf: pointer to the VF info
934   *
935   * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
936   * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
937   **/
i40e_map_pf_to_vf_queues(struct i40e_vf * vf)938  static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
939  {
940  	struct i40e_pf *pf = vf->pf;
941  	struct i40e_hw *hw = &pf->hw;
942  	u32 reg, total_qps = 0;
943  	u32 qps, num_tc = 1; /* VF has at least one traffic class */
944  	u16 vsi_id, qid;
945  	int i, j;
946  
947  	if (vf->adq_enabled)
948  		num_tc = vf->num_tc;
949  
950  	for (i = 0; i < num_tc; i++) {
951  		if (vf->adq_enabled) {
952  			qps = vf->ch[i].num_qps;
953  			vsi_id =  vf->ch[i].vsi_id;
954  		} else {
955  			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
956  			vsi_id = vf->lan_vsi_id;
957  		}
958  
959  		for (j = 0; j < qps; j++) {
960  			qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
961  
962  			reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
963  			wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
964  			     reg);
965  			total_qps++;
966  		}
967  	}
968  }
969  
970  /**
971   * i40e_enable_vf_mappings
972   * @vf: pointer to the VF info
973   *
974   * enable VF mappings
975   **/
i40e_enable_vf_mappings(struct i40e_vf * vf)976  static void i40e_enable_vf_mappings(struct i40e_vf *vf)
977  {
978  	struct i40e_pf *pf = vf->pf;
979  	struct i40e_hw *hw = &pf->hw;
980  	u32 reg;
981  
982  	/* Tell the hardware we're using noncontiguous mapping. HW requires
983  	 * that VF queues be mapped using this method, even when they are
984  	 * contiguous in real life
985  	 */
986  	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
987  			  I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
988  
989  	/* enable VF vplan_qtable mappings */
990  	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
991  	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
992  
993  	i40e_map_pf_to_vf_queues(vf);
994  	i40e_map_pf_queues_to_vsi(vf);
995  
996  	i40e_flush(hw);
997  }
998  
999  /**
1000   * i40e_disable_vf_mappings
1001   * @vf: pointer to the VF info
1002   *
1003   * disable VF mappings
1004   **/
i40e_disable_vf_mappings(struct i40e_vf * vf)1005  static void i40e_disable_vf_mappings(struct i40e_vf *vf)
1006  {
1007  	struct i40e_pf *pf = vf->pf;
1008  	struct i40e_hw *hw = &pf->hw;
1009  	int i;
1010  
1011  	/* disable qp mappings */
1012  	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
1013  	for (i = 0; i < I40E_MAX_VSI_QP; i++)
1014  		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
1015  		     I40E_QUEUE_END_OF_LIST);
1016  	i40e_flush(hw);
1017  }
1018  
1019  /**
1020   * i40e_free_vf_res
1021   * @vf: pointer to the VF info
1022   *
1023   * free VF resources
1024   **/
i40e_free_vf_res(struct i40e_vf * vf)1025  static void i40e_free_vf_res(struct i40e_vf *vf)
1026  {
1027  	struct i40e_pf *pf = vf->pf;
1028  	struct i40e_hw *hw = &pf->hw;
1029  	u32 reg_idx, reg;
1030  	int i, j, msix_vf;
1031  
1032  	/* Start by disabling VF's configuration API to prevent the OS from
1033  	 * accessing the VF's VSI after it's freed / invalidated.
1034  	 */
1035  	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1036  
1037  	/* It's possible the VF had requeuested more queues than the default so
1038  	 * do the accounting here when we're about to free them.
1039  	 */
1040  	if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1041  		pf->queues_left += vf->num_queue_pairs -
1042  				   I40E_DEFAULT_QUEUES_PER_VF;
1043  	}
1044  
1045  	/* free vsi & disconnect it from the parent uplink */
1046  	if (vf->lan_vsi_idx) {
1047  		i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1048  		vf->lan_vsi_idx = 0;
1049  		vf->lan_vsi_id = 0;
1050  	}
1051  
1052  	/* do the accounting and remove additional ADq VSI's */
1053  	if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1054  		for (j = 0; j < vf->num_tc; j++) {
1055  			/* At this point VSI0 is already released so don't
1056  			 * release it again and only clear their values in
1057  			 * structure variables
1058  			 */
1059  			if (j)
1060  				i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1061  			vf->ch[j].vsi_idx = 0;
1062  			vf->ch[j].vsi_id = 0;
1063  		}
1064  	}
1065  	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1066  
1067  	/* disable interrupts so the VF starts in a known state */
1068  	for (i = 0; i < msix_vf; i++) {
1069  		/* format is same for both registers */
1070  		if (0 == i)
1071  			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1072  		else
1073  			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1074  						      (vf->vf_id))
1075  						     + (i - 1));
1076  		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1077  		i40e_flush(hw);
1078  	}
1079  
1080  	/* clear the irq settings */
1081  	for (i = 0; i < msix_vf; i++) {
1082  		/* format is same for both registers */
1083  		if (0 == i)
1084  			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1085  		else
1086  			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1087  						      (vf->vf_id))
1088  						     + (i - 1));
1089  		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1090  		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1091  		wr32(hw, reg_idx, reg);
1092  		i40e_flush(hw);
1093  	}
1094  	/* reset some of the state variables keeping track of the resources */
1095  	vf->num_queue_pairs = 0;
1096  	clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1097  	clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1098  }
1099  
1100  /**
1101   * i40e_alloc_vf_res
1102   * @vf: pointer to the VF info
1103   *
1104   * allocate VF resources
1105   **/
i40e_alloc_vf_res(struct i40e_vf * vf)1106  static int i40e_alloc_vf_res(struct i40e_vf *vf)
1107  {
1108  	struct i40e_pf *pf = vf->pf;
1109  	int total_queue_pairs = 0;
1110  	int ret, idx;
1111  
1112  	if (vf->num_req_queues &&
1113  	    vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1114  		pf->num_vf_qps = vf->num_req_queues;
1115  	else
1116  		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1117  
1118  	/* allocate hw vsi context & associated resources */
1119  	ret = i40e_alloc_vsi_res(vf, 0);
1120  	if (ret)
1121  		goto error_alloc;
1122  	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1123  
1124  	/* allocate additional VSIs based on tc information for ADq */
1125  	if (vf->adq_enabled) {
1126  		if (pf->queues_left >=
1127  		    (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1128  			/* TC 0 always belongs to VF VSI */
1129  			for (idx = 1; idx < vf->num_tc; idx++) {
1130  				ret = i40e_alloc_vsi_res(vf, idx);
1131  				if (ret)
1132  					goto error_alloc;
1133  			}
1134  			/* send correct number of queues */
1135  			total_queue_pairs = I40E_MAX_VF_QUEUES;
1136  		} else {
1137  			dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1138  				 vf->vf_id);
1139  			vf->adq_enabled = false;
1140  		}
1141  	}
1142  
1143  	/* We account for each VF to get a default number of queue pairs.  If
1144  	 * the VF has now requested more, we need to account for that to make
1145  	 * certain we never request more queues than we actually have left in
1146  	 * HW.
1147  	 */
1148  	if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1149  		pf->queues_left -=
1150  			total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1151  
1152  	if (vf->trusted)
1153  		set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1154  	else
1155  		clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1156  
1157  	/* store the total qps number for the runtime
1158  	 * VF req validation
1159  	 */
1160  	vf->num_queue_pairs = total_queue_pairs;
1161  
1162  	/* VF is now completely initialized */
1163  	set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1164  
1165  error_alloc:
1166  	if (ret)
1167  		i40e_free_vf_res(vf);
1168  
1169  	return ret;
1170  }
1171  
1172  #define VF_DEVICE_STATUS 0xAA
1173  #define VF_TRANS_PENDING_MASK 0x20
1174  /**
1175   * i40e_quiesce_vf_pci
1176   * @vf: pointer to the VF structure
1177   *
1178   * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1179   * if the transactions never clear.
1180   **/
i40e_quiesce_vf_pci(struct i40e_vf * vf)1181  static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1182  {
1183  	struct i40e_pf *pf = vf->pf;
1184  	struct i40e_hw *hw = &pf->hw;
1185  	int vf_abs_id, i;
1186  	u32 reg;
1187  
1188  	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1189  
1190  	wr32(hw, I40E_PF_PCI_CIAA,
1191  	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1192  	for (i = 0; i < 100; i++) {
1193  		reg = rd32(hw, I40E_PF_PCI_CIAD);
1194  		if ((reg & VF_TRANS_PENDING_MASK) == 0)
1195  			return 0;
1196  		udelay(1);
1197  	}
1198  	return -EIO;
1199  }
1200  
1201  /**
1202   * __i40e_getnum_vf_vsi_vlan_filters
1203   * @vsi: pointer to the vsi
1204   *
1205   * called to get the number of VLANs offloaded on this VF
1206   **/
__i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi * vsi)1207  static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1208  {
1209  	struct i40e_mac_filter *f;
1210  	u16 num_vlans = 0, bkt;
1211  
1212  	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1213  		if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1214  			num_vlans++;
1215  	}
1216  
1217  	return num_vlans;
1218  }
1219  
1220  /**
1221   * i40e_getnum_vf_vsi_vlan_filters
1222   * @vsi: pointer to the vsi
1223   *
1224   * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1225   **/
i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi * vsi)1226  static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1227  {
1228  	int num_vlans;
1229  
1230  	spin_lock_bh(&vsi->mac_filter_hash_lock);
1231  	num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1232  	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1233  
1234  	return num_vlans;
1235  }
1236  
1237  /**
1238   * i40e_get_vlan_list_sync
1239   * @vsi: pointer to the VSI
1240   * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1241   * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1242   *             This array is allocated here, but has to be freed in caller.
1243   *
1244   * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1245   **/
i40e_get_vlan_list_sync(struct i40e_vsi * vsi,u16 * num_vlans,s16 ** vlan_list)1246  static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1247  				    s16 **vlan_list)
1248  {
1249  	struct i40e_mac_filter *f;
1250  	int i = 0;
1251  	int bkt;
1252  
1253  	spin_lock_bh(&vsi->mac_filter_hash_lock);
1254  	*num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1255  	*vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1256  	if (!(*vlan_list))
1257  		goto err;
1258  
1259  	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1260  		if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1261  			continue;
1262  		(*vlan_list)[i++] = f->vlan;
1263  	}
1264  err:
1265  	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1266  }
1267  
1268  /**
1269   * i40e_set_vsi_promisc
1270   * @vf: pointer to the VF struct
1271   * @seid: VSI number
1272   * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1273   *                for a given VLAN
1274   * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1275   *                  for a given VLAN
1276   * @vl: List of VLANs - apply filter for given VLANs
1277   * @num_vlans: Number of elements in @vl
1278   **/
1279  static int
i40e_set_vsi_promisc(struct i40e_vf * vf,u16 seid,bool multi_enable,bool unicast_enable,s16 * vl,u16 num_vlans)1280  i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1281  		     bool unicast_enable, s16 *vl, u16 num_vlans)
1282  {
1283  	struct i40e_pf *pf = vf->pf;
1284  	struct i40e_hw *hw = &pf->hw;
1285  	int aq_ret, aq_tmp = 0;
1286  	int i;
1287  
1288  	/* No VLAN to set promisc on, set on VSI */
1289  	if (!num_vlans || !vl) {
1290  		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1291  							       multi_enable,
1292  							       NULL);
1293  		if (aq_ret) {
1294  			int aq_err = pf->hw.aq.asq_last_status;
1295  
1296  			dev_err(&pf->pdev->dev,
1297  				"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1298  				vf->vf_id,
1299  				ERR_PTR(aq_ret),
1300  				i40e_aq_str(&pf->hw, aq_err));
1301  
1302  			return aq_ret;
1303  		}
1304  
1305  		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1306  							     unicast_enable,
1307  							     NULL, true);
1308  
1309  		if (aq_ret) {
1310  			int aq_err = pf->hw.aq.asq_last_status;
1311  
1312  			dev_err(&pf->pdev->dev,
1313  				"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1314  				vf->vf_id,
1315  				ERR_PTR(aq_ret),
1316  				i40e_aq_str(&pf->hw, aq_err));
1317  		}
1318  
1319  		return aq_ret;
1320  	}
1321  
1322  	for (i = 0; i < num_vlans; i++) {
1323  		aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1324  							    multi_enable,
1325  							    vl[i], NULL);
1326  		if (aq_ret) {
1327  			int aq_err = pf->hw.aq.asq_last_status;
1328  
1329  			dev_err(&pf->pdev->dev,
1330  				"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1331  				vf->vf_id,
1332  				ERR_PTR(aq_ret),
1333  				i40e_aq_str(&pf->hw, aq_err));
1334  
1335  			if (!aq_tmp)
1336  				aq_tmp = aq_ret;
1337  		}
1338  
1339  		aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1340  							    unicast_enable,
1341  							    vl[i], NULL);
1342  		if (aq_ret) {
1343  			int aq_err = pf->hw.aq.asq_last_status;
1344  
1345  			dev_err(&pf->pdev->dev,
1346  				"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1347  				vf->vf_id,
1348  				ERR_PTR(aq_ret),
1349  				i40e_aq_str(&pf->hw, aq_err));
1350  
1351  			if (!aq_tmp)
1352  				aq_tmp = aq_ret;
1353  		}
1354  	}
1355  
1356  	if (aq_tmp)
1357  		aq_ret = aq_tmp;
1358  
1359  	return aq_ret;
1360  }
1361  
1362  /**
1363   * i40e_config_vf_promiscuous_mode
1364   * @vf: pointer to the VF info
1365   * @vsi_id: VSI id
1366   * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1367   * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1368   *
1369   * Called from the VF to configure the promiscuous mode of
1370   * VF vsis and from the VF reset path to reset promiscuous mode.
1371   **/
i40e_config_vf_promiscuous_mode(struct i40e_vf * vf,u16 vsi_id,bool allmulti,bool alluni)1372  static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1373  					   u16 vsi_id,
1374  					   bool allmulti,
1375  					   bool alluni)
1376  {
1377  	struct i40e_pf *pf = vf->pf;
1378  	struct i40e_vsi *vsi;
1379  	int aq_ret = 0;
1380  	u16 num_vlans;
1381  	s16 *vl;
1382  
1383  	vsi = i40e_find_vsi_from_id(pf, vsi_id);
1384  	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1385  		return -EINVAL;
1386  
1387  	if (vf->port_vlan_id) {
1388  		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1389  					      alluni, &vf->port_vlan_id, 1);
1390  		return aq_ret;
1391  	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1392  		i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1393  
1394  		if (!vl)
1395  			return -ENOMEM;
1396  
1397  		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1398  					      vl, num_vlans);
1399  		kfree(vl);
1400  		return aq_ret;
1401  	}
1402  
1403  	/* no VLANs to set on, set on VSI */
1404  	aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1405  				      NULL, 0);
1406  	return aq_ret;
1407  }
1408  
1409  /**
1410   * i40e_sync_vfr_reset
1411   * @hw: pointer to hw struct
1412   * @vf_id: VF identifier
1413   *
1414   * Before trigger hardware reset, we need to know if no other process has
1415   * reserved the hardware for any reset operations. This check is done by
1416   * examining the status of the RSTAT1 register used to signal the reset.
1417   **/
i40e_sync_vfr_reset(struct i40e_hw * hw,int vf_id)1418  static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
1419  {
1420  	u32 reg;
1421  	int i;
1422  
1423  	for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
1424  		reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
1425  			   I40E_VFINT_ICR0_ADMINQ_MASK;
1426  		if (reg)
1427  			return 0;
1428  
1429  		usleep_range(100, 200);
1430  	}
1431  
1432  	return -EAGAIN;
1433  }
1434  
1435  /**
1436   * i40e_trigger_vf_reset
1437   * @vf: pointer to the VF structure
1438   * @flr: VFLR was issued or not
1439   *
1440   * Trigger hardware to start a reset for a particular VF. Expects the caller
1441   * to wait the proper amount of time to allow hardware to reset the VF before
1442   * it cleans up and restores VF functionality.
1443   **/
i40e_trigger_vf_reset(struct i40e_vf * vf,bool flr)1444  static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1445  {
1446  	struct i40e_pf *pf = vf->pf;
1447  	struct i40e_hw *hw = &pf->hw;
1448  	u32 reg, reg_idx, bit_idx;
1449  	bool vf_active;
1450  	u32 radq;
1451  
1452  	/* warn the VF */
1453  	vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1454  
1455  	/* Disable VF's configuration API during reset. The flag is re-enabled
1456  	 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1457  	 * It's normally disabled in i40e_free_vf_res(), but it's safer
1458  	 * to do it earlier to give some time to finish to any VF config
1459  	 * functions that may still be running at this point.
1460  	 */
1461  	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1462  
1463  	/* In the case of a VFLR, the HW has already reset the VF and we
1464  	 * just need to clean up, so don't hit the VFRTRIG register.
1465  	 */
1466  	if (!flr) {
1467  		/* Sync VFR reset before trigger next one */
1468  		radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
1469  			    I40E_VFINT_ICR0_ADMINQ_MASK;
1470  		if (vf_active && !radq)
1471  			/* waiting for finish reset by virtual driver */
1472  			if (i40e_sync_vfr_reset(hw, vf->vf_id))
1473  				dev_info(&pf->pdev->dev,
1474  					 "Reset VF %d never finished\n",
1475  				vf->vf_id);
1476  
1477  		/* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1478  		 * in progress state in rstat1 register.
1479  		 */
1480  		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1481  		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1482  		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1483  		i40e_flush(hw);
1484  	}
1485  	/* clear the VFLR bit in GLGEN_VFLRSTAT */
1486  	reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1487  	bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1488  	wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1489  	i40e_flush(hw);
1490  
1491  	if (i40e_quiesce_vf_pci(vf))
1492  		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1493  			vf->vf_id);
1494  }
1495  
1496  /**
1497   * i40e_cleanup_reset_vf
1498   * @vf: pointer to the VF structure
1499   *
1500   * Cleanup a VF after the hardware reset is finished. Expects the caller to
1501   * have verified whether the reset is finished properly, and ensure the
1502   * minimum amount of wait time has passed.
1503   **/
i40e_cleanup_reset_vf(struct i40e_vf * vf)1504  static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1505  {
1506  	struct i40e_pf *pf = vf->pf;
1507  	struct i40e_hw *hw = &pf->hw;
1508  	u32 reg;
1509  
1510  	/* disable promisc modes in case they were enabled */
1511  	i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1512  
1513  	/* free VF resources to begin resetting the VSI state */
1514  	i40e_free_vf_res(vf);
1515  
1516  	/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1517  	 * By doing this we allow HW to access VF memory at any point. If we
1518  	 * did it any sooner, HW could access memory while it was being freed
1519  	 * in i40e_free_vf_res(), causing an IOMMU fault.
1520  	 *
1521  	 * On the other hand, this needs to be done ASAP, because the VF driver
1522  	 * is waiting for this to happen and may report a timeout. It's
1523  	 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1524  	 * it.
1525  	 */
1526  	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1527  	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1528  	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1529  
1530  	/* reallocate VF resources to finish resetting the VSI state */
1531  	if (!i40e_alloc_vf_res(vf)) {
1532  		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1533  		i40e_enable_vf_mappings(vf);
1534  		set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1535  		clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1536  		/* Do not notify the client during VF init */
1537  		if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1538  					&vf->vf_states))
1539  			i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1540  		vf->num_vlan = 0;
1541  	}
1542  
1543  	/* Tell the VF driver the reset is done. This needs to be done only
1544  	 * after VF has been fully initialized, because the VF driver may
1545  	 * request resources immediately after setting this flag.
1546  	 */
1547  	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1548  }
1549  
1550  /**
1551   * i40e_reset_vf
1552   * @vf: pointer to the VF structure
1553   * @flr: VFLR was issued or not
1554   *
1555   * Returns true if the VF is in reset, resets successfully, or resets
1556   * are disabled and false otherwise.
1557   **/
i40e_reset_vf(struct i40e_vf * vf,bool flr)1558  bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1559  {
1560  	struct i40e_pf *pf = vf->pf;
1561  	struct i40e_hw *hw = &pf->hw;
1562  	bool rsd = false;
1563  	u32 reg;
1564  	int i;
1565  
1566  	if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1567  		return true;
1568  
1569  	/* Bail out if VFs are disabled. */
1570  	if (test_bit(__I40E_VF_DISABLE, pf->state))
1571  		return true;
1572  
1573  	/* If VF is being reset already we don't need to continue. */
1574  	if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1575  		return true;
1576  
1577  	i40e_trigger_vf_reset(vf, flr);
1578  
1579  	/* poll VPGEN_VFRSTAT reg to make sure
1580  	 * that reset is complete
1581  	 */
1582  	for (i = 0; i < 10; i++) {
1583  		/* VF reset requires driver to first reset the VF and then
1584  		 * poll the status register to make sure that the reset
1585  		 * completed successfully. Due to internal HW FIFO flushes,
1586  		 * we must wait 10ms before the register will be valid.
1587  		 */
1588  		usleep_range(10000, 20000);
1589  		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1590  		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1591  			rsd = true;
1592  			break;
1593  		}
1594  	}
1595  
1596  	if (flr)
1597  		usleep_range(10000, 20000);
1598  
1599  	if (!rsd)
1600  		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1601  			vf->vf_id);
1602  	usleep_range(10000, 20000);
1603  
1604  	/* On initial reset, we don't have any queues to disable */
1605  	if (vf->lan_vsi_idx != 0)
1606  		i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1607  
1608  	i40e_cleanup_reset_vf(vf);
1609  
1610  	i40e_flush(hw);
1611  	usleep_range(20000, 40000);
1612  	clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
1613  
1614  	return true;
1615  }
1616  
1617  /**
1618   * i40e_reset_all_vfs
1619   * @pf: pointer to the PF structure
1620   * @flr: VFLR was issued or not
1621   *
1622   * Reset all allocated VFs in one go. First, tell the hardware to reset each
1623   * VF, then do all the waiting in one chunk, and finally finish restoring each
1624   * VF after the wait. This is useful during PF routines which need to reset
1625   * all VFs, as otherwise it must perform these resets in a serialized fashion.
1626   *
1627   * Returns true if any VFs were reset, and false otherwise.
1628   **/
i40e_reset_all_vfs(struct i40e_pf * pf,bool flr)1629  bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1630  {
1631  	struct i40e_hw *hw = &pf->hw;
1632  	struct i40e_vf *vf;
1633  	u32 reg;
1634  	int i;
1635  
1636  	/* If we don't have any VFs, then there is nothing to reset */
1637  	if (!pf->num_alloc_vfs)
1638  		return false;
1639  
1640  	/* If VFs have been disabled, there is no need to reset */
1641  	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1642  		return false;
1643  
1644  	/* Begin reset on all VFs at once */
1645  	for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1646  		/* If VF is being reset no need to trigger reset again */
1647  		if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1648  			i40e_trigger_vf_reset(vf, flr);
1649  	}
1650  
1651  	/* HW requires some time to make sure it can flush the FIFO for a VF
1652  	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1653  	 * sequence to make sure that it has completed. We'll keep track of
1654  	 * the VFs using a simple iterator that increments once that VF has
1655  	 * finished resetting.
1656  	 */
1657  	for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
1658  		usleep_range(10000, 20000);
1659  
1660  		/* Check each VF in sequence, beginning with the VF to fail
1661  		 * the previous check.
1662  		 */
1663  		while (vf < &pf->vf[pf->num_alloc_vfs]) {
1664  			if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
1665  				reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1666  				if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1667  					break;
1668  			}
1669  
1670  			/* If the current VF has finished resetting, move on
1671  			 * to the next VF in sequence.
1672  			 */
1673  			++vf;
1674  		}
1675  	}
1676  
1677  	if (flr)
1678  		usleep_range(10000, 20000);
1679  
1680  	/* Display a warning if at least one VF didn't manage to reset in
1681  	 * time, but continue on with the operation.
1682  	 */
1683  	if (vf < &pf->vf[pf->num_alloc_vfs])
1684  		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1685  			vf->vf_id);
1686  	usleep_range(10000, 20000);
1687  
1688  	/* Begin disabling all the rings associated with VFs, but do not wait
1689  	 * between each VF.
1690  	 */
1691  	for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1692  		/* On initial reset, we don't have any queues to disable */
1693  		if (vf->lan_vsi_idx == 0)
1694  			continue;
1695  
1696  		/* If VF is reset in another thread just continue */
1697  		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1698  			continue;
1699  
1700  		i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
1701  	}
1702  
1703  	/* Now that we've notified HW to disable all of the VF rings, wait
1704  	 * until they finish.
1705  	 */
1706  	for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1707  		/* On initial reset, we don't have any queues to disable */
1708  		if (vf->lan_vsi_idx == 0)
1709  			continue;
1710  
1711  		/* If VF is reset in another thread just continue */
1712  		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1713  			continue;
1714  
1715  		i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
1716  	}
1717  
1718  	/* Hw may need up to 50ms to finish disabling the RX queues. We
1719  	 * minimize the wait by delaying only once for all VFs.
1720  	 */
1721  	mdelay(50);
1722  
1723  	/* Finish the reset on each VF */
1724  	for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1725  		/* If VF is reset in another thread just continue */
1726  		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1727  			continue;
1728  
1729  		i40e_cleanup_reset_vf(vf);
1730  	}
1731  
1732  	i40e_flush(hw);
1733  	usleep_range(20000, 40000);
1734  	clear_bit(__I40E_VF_DISABLE, pf->state);
1735  
1736  	return true;
1737  }
1738  
1739  /**
1740   * i40e_free_vfs
1741   * @pf: pointer to the PF structure
1742   *
1743   * free VF resources
1744   **/
i40e_free_vfs(struct i40e_pf * pf)1745  void i40e_free_vfs(struct i40e_pf *pf)
1746  {
1747  	struct i40e_hw *hw = &pf->hw;
1748  	u32 reg_idx, bit_idx;
1749  	int i, tmp, vf_id;
1750  
1751  	if (!pf->vf)
1752  		return;
1753  
1754  	set_bit(__I40E_VFS_RELEASING, pf->state);
1755  	while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1756  		usleep_range(1000, 2000);
1757  
1758  	i40e_notify_client_of_vf_enable(pf, 0);
1759  
1760  	/* Disable IOV before freeing resources. This lets any VF drivers
1761  	 * running in the host get themselves cleaned up before we yank
1762  	 * the carpet out from underneath their feet.
1763  	 */
1764  	if (!pci_vfs_assigned(pf->pdev))
1765  		pci_disable_sriov(pf->pdev);
1766  	else
1767  		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1768  
1769  	/* Amortize wait time by stopping all VFs at the same time */
1770  	for (i = 0; i < pf->num_alloc_vfs; i++) {
1771  		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1772  			continue;
1773  
1774  		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1775  	}
1776  
1777  	for (i = 0; i < pf->num_alloc_vfs; i++) {
1778  		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1779  			continue;
1780  
1781  		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1782  	}
1783  
1784  	/* free up VF resources */
1785  	tmp = pf->num_alloc_vfs;
1786  	pf->num_alloc_vfs = 0;
1787  	for (i = 0; i < tmp; i++) {
1788  		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1789  			i40e_free_vf_res(&pf->vf[i]);
1790  		/* disable qp mappings */
1791  		i40e_disable_vf_mappings(&pf->vf[i]);
1792  	}
1793  
1794  	kfree(pf->vf);
1795  	pf->vf = NULL;
1796  
1797  	/* This check is for when the driver is unloaded while VFs are
1798  	 * assigned. Setting the number of VFs to 0 through sysfs is caught
1799  	 * before this function ever gets called.
1800  	 */
1801  	if (!pci_vfs_assigned(pf->pdev)) {
1802  		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1803  		 * work correctly when SR-IOV gets re-enabled.
1804  		 */
1805  		for (vf_id = 0; vf_id < tmp; vf_id++) {
1806  			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1807  			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1808  			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1809  		}
1810  	}
1811  	clear_bit(__I40E_VF_DISABLE, pf->state);
1812  	clear_bit(__I40E_VFS_RELEASING, pf->state);
1813  }
1814  
1815  #ifdef CONFIG_PCI_IOV
1816  /**
1817   * i40e_alloc_vfs
1818   * @pf: pointer to the PF structure
1819   * @num_alloc_vfs: number of VFs to allocate
1820   *
1821   * allocate VF resources
1822   **/
i40e_alloc_vfs(struct i40e_pf * pf,u16 num_alloc_vfs)1823  int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1824  {
1825  	struct i40e_vf *vfs;
1826  	int i, ret = 0;
1827  
1828  	/* Disable interrupt 0 so we don't try to handle the VFLR. */
1829  	i40e_irq_dynamic_disable_icr0(pf);
1830  
1831  	/* Check to see if we're just allocating resources for extant VFs */
1832  	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1833  		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1834  		if (ret) {
1835  			pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1836  			pf->num_alloc_vfs = 0;
1837  			goto err_iov;
1838  		}
1839  	}
1840  	/* allocate memory */
1841  	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1842  	if (!vfs) {
1843  		ret = -ENOMEM;
1844  		goto err_alloc;
1845  	}
1846  	pf->vf = vfs;
1847  
1848  	/* apply default profile */
1849  	for (i = 0; i < num_alloc_vfs; i++) {
1850  		vfs[i].pf = pf;
1851  		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1852  		vfs[i].vf_id = i;
1853  
1854  		/* assign default capabilities */
1855  		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1856  		vfs[i].spoofchk = true;
1857  
1858  		set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1859  
1860  	}
1861  	pf->num_alloc_vfs = num_alloc_vfs;
1862  
1863  	/* VF resources get allocated during reset */
1864  	i40e_reset_all_vfs(pf, false);
1865  
1866  	i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1867  
1868  err_alloc:
1869  	if (ret)
1870  		i40e_free_vfs(pf);
1871  err_iov:
1872  	/* Re-enable interrupt 0. */
1873  	i40e_irq_dynamic_enable_icr0(pf);
1874  	return ret;
1875  }
1876  
1877  #endif
1878  /**
1879   * i40e_pci_sriov_enable
1880   * @pdev: pointer to a pci_dev structure
1881   * @num_vfs: number of VFs to allocate
1882   *
1883   * Enable or change the number of VFs
1884   **/
i40e_pci_sriov_enable(struct pci_dev * pdev,int num_vfs)1885  static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1886  {
1887  #ifdef CONFIG_PCI_IOV
1888  	struct i40e_pf *pf = pci_get_drvdata(pdev);
1889  	int pre_existing_vfs = pci_num_vf(pdev);
1890  	int err = 0;
1891  
1892  	if (test_bit(__I40E_TESTING, pf->state)) {
1893  		dev_warn(&pdev->dev,
1894  			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1895  		err = -EPERM;
1896  		goto err_out;
1897  	}
1898  
1899  	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1900  		i40e_free_vfs(pf);
1901  	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1902  		goto out;
1903  
1904  	if (num_vfs > pf->num_req_vfs) {
1905  		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1906  			 num_vfs, pf->num_req_vfs);
1907  		err = -EPERM;
1908  		goto err_out;
1909  	}
1910  
1911  	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1912  	err = i40e_alloc_vfs(pf, num_vfs);
1913  	if (err) {
1914  		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1915  		goto err_out;
1916  	}
1917  
1918  out:
1919  	return num_vfs;
1920  
1921  err_out:
1922  	return err;
1923  #endif
1924  	return 0;
1925  }
1926  
1927  /**
1928   * i40e_pci_sriov_configure
1929   * @pdev: pointer to a pci_dev structure
1930   * @num_vfs: number of VFs to allocate
1931   *
1932   * Enable or change the number of VFs. Called when the user updates the number
1933   * of VFs in sysfs.
1934   **/
i40e_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)1935  int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1936  {
1937  	struct i40e_pf *pf = pci_get_drvdata(pdev);
1938  	int ret = 0;
1939  
1940  	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1941  		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1942  		return -EAGAIN;
1943  	}
1944  
1945  	if (num_vfs) {
1946  		if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1947  			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1948  			i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1949  		}
1950  		ret = i40e_pci_sriov_enable(pdev, num_vfs);
1951  		goto sriov_configure_out;
1952  	}
1953  
1954  	if (!pci_vfs_assigned(pf->pdev)) {
1955  		i40e_free_vfs(pf);
1956  		pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1957  		i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1958  	} else {
1959  		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1960  		ret = -EINVAL;
1961  		goto sriov_configure_out;
1962  	}
1963  sriov_configure_out:
1964  	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1965  	return ret;
1966  }
1967  
1968  /***********************virtual channel routines******************/
1969  
1970  /**
1971   * i40e_vc_send_msg_to_vf
1972   * @vf: pointer to the VF info
1973   * @v_opcode: virtual channel opcode
1974   * @v_retval: virtual channel return value
1975   * @msg: pointer to the msg buffer
1976   * @msglen: msg length
1977   *
1978   * send msg to VF
1979   **/
i40e_vc_send_msg_to_vf(struct i40e_vf * vf,u32 v_opcode,u32 v_retval,u8 * msg,u16 msglen)1980  static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1981  				  u32 v_retval, u8 *msg, u16 msglen)
1982  {
1983  	struct i40e_pf *pf;
1984  	struct i40e_hw *hw;
1985  	int abs_vf_id;
1986  	int aq_ret;
1987  
1988  	/* validate the request */
1989  	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1990  		return -EINVAL;
1991  
1992  	pf = vf->pf;
1993  	hw = &pf->hw;
1994  	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1995  
1996  	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
1997  					msg, msglen, NULL);
1998  	if (aq_ret) {
1999  		dev_info(&pf->pdev->dev,
2000  			 "Unable to send the message to VF %d aq_err %d\n",
2001  			 vf->vf_id, pf->hw.aq.asq_last_status);
2002  		return -EIO;
2003  	}
2004  
2005  	return 0;
2006  }
2007  
2008  /**
2009   * i40e_vc_send_resp_to_vf
2010   * @vf: pointer to the VF info
2011   * @opcode: operation code
2012   * @retval: return value
2013   *
2014   * send resp msg to VF
2015   **/
i40e_vc_send_resp_to_vf(struct i40e_vf * vf,enum virtchnl_ops opcode,int retval)2016  static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
2017  				   enum virtchnl_ops opcode,
2018  				   int retval)
2019  {
2020  	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
2021  }
2022  
2023  /**
2024   * i40e_sync_vf_state
2025   * @vf: pointer to the VF info
2026   * @state: VF state
2027   *
2028   * Called from a VF message to synchronize the service with a potential
2029   * VF reset state
2030   **/
i40e_sync_vf_state(struct i40e_vf * vf,enum i40e_vf_states state)2031  static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
2032  {
2033  	int i;
2034  
2035  	/* When handling some messages, it needs VF state to be set.
2036  	 * It is possible that this flag is cleared during VF reset,
2037  	 * so there is a need to wait until the end of the reset to
2038  	 * handle the request message correctly.
2039  	 */
2040  	for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
2041  		if (test_bit(state, &vf->vf_states))
2042  			return true;
2043  		usleep_range(10000, 20000);
2044  	}
2045  
2046  	return test_bit(state, &vf->vf_states);
2047  }
2048  
2049  /**
2050   * i40e_vc_get_version_msg
2051   * @vf: pointer to the VF info
2052   * @msg: pointer to the msg buffer
2053   *
2054   * called from the VF to request the API version used by the PF
2055   **/
i40e_vc_get_version_msg(struct i40e_vf * vf,u8 * msg)2056  static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
2057  {
2058  	struct virtchnl_version_info info = {
2059  		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2060  	};
2061  
2062  	vf->vf_ver = *(struct virtchnl_version_info *)msg;
2063  	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2064  	if (VF_IS_V10(&vf->vf_ver))
2065  		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2066  	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2067  				      0, (u8 *)&info,
2068  				      sizeof(struct virtchnl_version_info));
2069  }
2070  
2071  /**
2072   * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2073   * @vf: pointer to VF structure
2074   **/
i40e_del_qch(struct i40e_vf * vf)2075  static void i40e_del_qch(struct i40e_vf *vf)
2076  {
2077  	struct i40e_pf *pf = vf->pf;
2078  	int i;
2079  
2080  	/* first element in the array belongs to primary VF VSI and we shouldn't
2081  	 * delete it. We should however delete the rest of the VSIs created
2082  	 */
2083  	for (i = 1; i < vf->num_tc; i++) {
2084  		if (vf->ch[i].vsi_idx) {
2085  			i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
2086  			vf->ch[i].vsi_idx = 0;
2087  			vf->ch[i].vsi_id = 0;
2088  		}
2089  	}
2090  }
2091  
2092  /**
2093   * i40e_vc_get_max_frame_size
2094   * @vf: pointer to the VF
2095   *
2096   * Max frame size is determined based on the current port's max frame size and
2097   * whether a port VLAN is configured on this VF. The VF is not aware whether
2098   * it's in a port VLAN so the PF needs to account for this in max frame size
2099   * checks and sending the max frame size to the VF.
2100   **/
i40e_vc_get_max_frame_size(struct i40e_vf * vf)2101  static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
2102  {
2103  	u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
2104  
2105  	if (vf->port_vlan_id)
2106  		max_frame_size -= VLAN_HLEN;
2107  
2108  	return max_frame_size;
2109  }
2110  
2111  /**
2112   * i40e_vc_get_vf_resources_msg
2113   * @vf: pointer to the VF info
2114   * @msg: pointer to the msg buffer
2115   *
2116   * called from the VF to request its resources
2117   **/
i40e_vc_get_vf_resources_msg(struct i40e_vf * vf,u8 * msg)2118  static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2119  {
2120  	struct virtchnl_vf_resource *vfres = NULL;
2121  	struct i40e_pf *pf = vf->pf;
2122  	struct i40e_vsi *vsi;
2123  	int num_vsis = 1;
2124  	int aq_ret = 0;
2125  	size_t len = 0;
2126  	int ret;
2127  
2128  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
2129  		aq_ret = -EINVAL;
2130  		goto err;
2131  	}
2132  
2133  	len = virtchnl_struct_size(vfres, vsi_res, num_vsis);
2134  	vfres = kzalloc(len, GFP_KERNEL);
2135  	if (!vfres) {
2136  		aq_ret = -ENOMEM;
2137  		len = 0;
2138  		goto err;
2139  	}
2140  	if (VF_IS_V11(&vf->vf_ver))
2141  		vf->driver_caps = *(u32 *)msg;
2142  	else
2143  		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2144  				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
2145  				  VIRTCHNL_VF_OFFLOAD_VLAN;
2146  
2147  	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2148  	vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2149  	vsi = pf->vsi[vf->lan_vsi_idx];
2150  	if (!vsi->info.pvid)
2151  		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2152  
2153  	if (i40e_vf_client_capable(pf, vf->vf_id) &&
2154  	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) {
2155  		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA;
2156  		set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2157  	} else {
2158  		clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2159  	}
2160  
2161  	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2162  		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2163  	} else {
2164  		if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
2165  		    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2166  			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2167  		else
2168  			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2169  	}
2170  
2171  	if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
2172  		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2173  			vfres->vf_cap_flags |=
2174  				VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2175  	}
2176  
2177  	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2178  		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2179  
2180  	if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
2181  	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2182  		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2183  
2184  	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2185  		if (pf->flags & I40E_FLAG_MFP_ENABLED) {
2186  			dev_err(&pf->pdev->dev,
2187  				"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2188  				 vf->vf_id);
2189  			aq_ret = -EINVAL;
2190  			goto err;
2191  		}
2192  		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2193  	}
2194  
2195  	if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
2196  		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2197  			vfres->vf_cap_flags |=
2198  					VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2199  	}
2200  
2201  	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2202  		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2203  
2204  	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2205  		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2206  
2207  	vfres->num_vsis = num_vsis;
2208  	vfres->num_queue_pairs = vf->num_queue_pairs;
2209  	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2210  	vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2211  	vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2212  	vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
2213  
2214  	if (vf->lan_vsi_idx) {
2215  		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2216  		vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2217  		vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2218  		/* VFs only use TC 0 */
2219  		vfres->vsi_res[0].qset_handle
2220  					  = le16_to_cpu(vsi->info.qs_handle[0]);
2221  		if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
2222  			spin_lock_bh(&vsi->mac_filter_hash_lock);
2223  			i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2224  			eth_zero_addr(vf->default_lan_addr.addr);
2225  			spin_unlock_bh(&vsi->mac_filter_hash_lock);
2226  		}
2227  		ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2228  				vf->default_lan_addr.addr);
2229  	}
2230  	set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2231  
2232  err:
2233  	/* send the response back to the VF */
2234  	ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2235  				     aq_ret, (u8 *)vfres, len);
2236  
2237  	kfree(vfres);
2238  	return ret;
2239  }
2240  
2241  /**
2242   * i40e_vc_config_promiscuous_mode_msg
2243   * @vf: pointer to the VF info
2244   * @msg: pointer to the msg buffer
2245   *
2246   * called from the VF to configure the promiscuous mode of
2247   * VF vsis
2248   **/
i40e_vc_config_promiscuous_mode_msg(struct i40e_vf * vf,u8 * msg)2249  static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2250  {
2251  	struct virtchnl_promisc_info *info =
2252  	    (struct virtchnl_promisc_info *)msg;
2253  	struct i40e_pf *pf = vf->pf;
2254  	bool allmulti = false;
2255  	bool alluni = false;
2256  	int aq_ret = 0;
2257  
2258  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2259  		aq_ret = -EINVAL;
2260  		goto err_out;
2261  	}
2262  	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2263  		dev_err(&pf->pdev->dev,
2264  			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
2265  			vf->vf_id);
2266  
2267  		/* Lie to the VF on purpose, because this is an error we can
2268  		 * ignore. Unprivileged VF is not a virtual channel error.
2269  		 */
2270  		aq_ret = 0;
2271  		goto err_out;
2272  	}
2273  
2274  	if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2275  		aq_ret = -EINVAL;
2276  		goto err_out;
2277  	}
2278  
2279  	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2280  		aq_ret = -EINVAL;
2281  		goto err_out;
2282  	}
2283  
2284  	/* Multicast promiscuous handling*/
2285  	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2286  		allmulti = true;
2287  
2288  	if (info->flags & FLAG_VF_UNICAST_PROMISC)
2289  		alluni = true;
2290  	aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2291  						 alluni);
2292  	if (aq_ret)
2293  		goto err_out;
2294  
2295  	if (allmulti) {
2296  		if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2297  				      &vf->vf_states))
2298  			dev_info(&pf->pdev->dev,
2299  				 "VF %d successfully set multicast promiscuous mode\n",
2300  				 vf->vf_id);
2301  	} else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2302  				      &vf->vf_states))
2303  		dev_info(&pf->pdev->dev,
2304  			 "VF %d successfully unset multicast promiscuous mode\n",
2305  			 vf->vf_id);
2306  
2307  	if (alluni) {
2308  		if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2309  				      &vf->vf_states))
2310  			dev_info(&pf->pdev->dev,
2311  				 "VF %d successfully set unicast promiscuous mode\n",
2312  				 vf->vf_id);
2313  	} else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2314  				      &vf->vf_states))
2315  		dev_info(&pf->pdev->dev,
2316  			 "VF %d successfully unset unicast promiscuous mode\n",
2317  			 vf->vf_id);
2318  
2319  err_out:
2320  	/* send the response to the VF */
2321  	return i40e_vc_send_resp_to_vf(vf,
2322  				       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2323  				       aq_ret);
2324  }
2325  
2326  /**
2327   * i40e_vc_config_queues_msg
2328   * @vf: pointer to the VF info
2329   * @msg: pointer to the msg buffer
2330   *
2331   * called from the VF to configure the rx/tx
2332   * queues
2333   **/
i40e_vc_config_queues_msg(struct i40e_vf * vf,u8 * msg)2334  static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2335  {
2336  	struct virtchnl_vsi_queue_config_info *qci =
2337  	    (struct virtchnl_vsi_queue_config_info *)msg;
2338  	struct virtchnl_queue_pair_info *qpi;
2339  	u16 vsi_id, vsi_queue_id = 0;
2340  	struct i40e_pf *pf = vf->pf;
2341  	int i, j = 0, idx = 0;
2342  	struct i40e_vsi *vsi;
2343  	u16 num_qps_all = 0;
2344  	int aq_ret = 0;
2345  
2346  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2347  		aq_ret = -EINVAL;
2348  		goto error_param;
2349  	}
2350  
2351  	if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2352  		aq_ret = -EINVAL;
2353  		goto error_param;
2354  	}
2355  
2356  	if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2357  		aq_ret = -EINVAL;
2358  		goto error_param;
2359  	}
2360  
2361  	if (vf->adq_enabled) {
2362  		for (i = 0; i < vf->num_tc; i++)
2363  			num_qps_all += vf->ch[i].num_qps;
2364  		if (num_qps_all != qci->num_queue_pairs) {
2365  			aq_ret = -EINVAL;
2366  			goto error_param;
2367  		}
2368  	}
2369  
2370  	vsi_id = qci->vsi_id;
2371  
2372  	for (i = 0; i < qci->num_queue_pairs; i++) {
2373  		qpi = &qci->qpair[i];
2374  
2375  		if (!vf->adq_enabled) {
2376  			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2377  						      qpi->txq.queue_id)) {
2378  				aq_ret = -EINVAL;
2379  				goto error_param;
2380  			}
2381  
2382  			vsi_queue_id = qpi->txq.queue_id;
2383  
2384  			if (qpi->txq.vsi_id != qci->vsi_id ||
2385  			    qpi->rxq.vsi_id != qci->vsi_id ||
2386  			    qpi->rxq.queue_id != vsi_queue_id) {
2387  				aq_ret = -EINVAL;
2388  				goto error_param;
2389  			}
2390  		}
2391  
2392  		if (vf->adq_enabled) {
2393  			if (idx >= ARRAY_SIZE(vf->ch)) {
2394  				aq_ret = -ENODEV;
2395  				goto error_param;
2396  			}
2397  			vsi_id = vf->ch[idx].vsi_id;
2398  		}
2399  
2400  		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2401  					     &qpi->rxq) ||
2402  		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2403  					     &qpi->txq)) {
2404  			aq_ret = -EINVAL;
2405  			goto error_param;
2406  		}
2407  
2408  		/* For ADq there can be up to 4 VSIs with max 4 queues each.
2409  		 * VF does not know about these additional VSIs and all
2410  		 * it cares is about its own queues. PF configures these queues
2411  		 * to its appropriate VSIs based on TC mapping
2412  		 */
2413  		if (vf->adq_enabled) {
2414  			if (idx >= ARRAY_SIZE(vf->ch)) {
2415  				aq_ret = -ENODEV;
2416  				goto error_param;
2417  			}
2418  			if (j == (vf->ch[idx].num_qps - 1)) {
2419  				idx++;
2420  				j = 0; /* resetting the queue count */
2421  				vsi_queue_id = 0;
2422  			} else {
2423  				j++;
2424  				vsi_queue_id++;
2425  			}
2426  		}
2427  	}
2428  	/* set vsi num_queue_pairs in use to num configured by VF */
2429  	if (!vf->adq_enabled) {
2430  		pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2431  			qci->num_queue_pairs;
2432  	} else {
2433  		for (i = 0; i < vf->num_tc; i++) {
2434  			vsi = pf->vsi[vf->ch[i].vsi_idx];
2435  			vsi->num_queue_pairs = vf->ch[i].num_qps;
2436  
2437  			if (i40e_update_adq_vsi_queues(vsi, i)) {
2438  				aq_ret = -EIO;
2439  				goto error_param;
2440  			}
2441  		}
2442  	}
2443  
2444  error_param:
2445  	/* send the response to the VF */
2446  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2447  				       aq_ret);
2448  }
2449  
2450  /**
2451   * i40e_validate_queue_map - check queue map is valid
2452   * @vf: the VF structure pointer
2453   * @vsi_id: vsi id
2454   * @queuemap: Tx or Rx queue map
2455   *
2456   * check if Tx or Rx queue map is valid
2457   **/
i40e_validate_queue_map(struct i40e_vf * vf,u16 vsi_id,unsigned long queuemap)2458  static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2459  				   unsigned long queuemap)
2460  {
2461  	u16 vsi_queue_id, queue_id;
2462  
2463  	for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2464  		if (vf->adq_enabled) {
2465  			vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2466  			queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2467  		} else {
2468  			queue_id = vsi_queue_id;
2469  		}
2470  
2471  		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2472  			return -EINVAL;
2473  	}
2474  
2475  	return 0;
2476  }
2477  
2478  /**
2479   * i40e_vc_config_irq_map_msg
2480   * @vf: pointer to the VF info
2481   * @msg: pointer to the msg buffer
2482   *
2483   * called from the VF to configure the irq to
2484   * queue map
2485   **/
i40e_vc_config_irq_map_msg(struct i40e_vf * vf,u8 * msg)2486  static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2487  {
2488  	struct virtchnl_irq_map_info *irqmap_info =
2489  	    (struct virtchnl_irq_map_info *)msg;
2490  	struct virtchnl_vector_map *map;
2491  	int aq_ret = 0;
2492  	u16 vsi_id;
2493  	int i;
2494  
2495  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2496  		aq_ret = -EINVAL;
2497  		goto error_param;
2498  	}
2499  
2500  	if (irqmap_info->num_vectors >
2501  	    vf->pf->hw.func_caps.num_msix_vectors_vf) {
2502  		aq_ret = -EINVAL;
2503  		goto error_param;
2504  	}
2505  
2506  	for (i = 0; i < irqmap_info->num_vectors; i++) {
2507  		map = &irqmap_info->vecmap[i];
2508  		/* validate msg params */
2509  		if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2510  		    !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2511  			aq_ret = -EINVAL;
2512  			goto error_param;
2513  		}
2514  		vsi_id = map->vsi_id;
2515  
2516  		if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2517  			aq_ret = -EINVAL;
2518  			goto error_param;
2519  		}
2520  
2521  		if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2522  			aq_ret = -EINVAL;
2523  			goto error_param;
2524  		}
2525  
2526  		i40e_config_irq_link_list(vf, vsi_id, map);
2527  	}
2528  error_param:
2529  	/* send the response to the VF */
2530  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2531  				       aq_ret);
2532  }
2533  
2534  /**
2535   * i40e_ctrl_vf_tx_rings
2536   * @vsi: the SRIOV VSI being configured
2537   * @q_map: bit map of the queues to be enabled
2538   * @enable: start or stop the queue
2539   **/
i40e_ctrl_vf_tx_rings(struct i40e_vsi * vsi,unsigned long q_map,bool enable)2540  static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2541  				 bool enable)
2542  {
2543  	struct i40e_pf *pf = vsi->back;
2544  	int ret = 0;
2545  	u16 q_id;
2546  
2547  	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2548  		ret = i40e_control_wait_tx_q(vsi->seid, pf,
2549  					     vsi->base_queue + q_id,
2550  					     false /*is xdp*/, enable);
2551  		if (ret)
2552  			break;
2553  	}
2554  	return ret;
2555  }
2556  
2557  /**
2558   * i40e_ctrl_vf_rx_rings
2559   * @vsi: the SRIOV VSI being configured
2560   * @q_map: bit map of the queues to be enabled
2561   * @enable: start or stop the queue
2562   **/
i40e_ctrl_vf_rx_rings(struct i40e_vsi * vsi,unsigned long q_map,bool enable)2563  static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2564  				 bool enable)
2565  {
2566  	struct i40e_pf *pf = vsi->back;
2567  	int ret = 0;
2568  	u16 q_id;
2569  
2570  	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2571  		ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2572  					     enable);
2573  		if (ret)
2574  			break;
2575  	}
2576  	return ret;
2577  }
2578  
2579  /**
2580   * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2581   * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2582   *
2583   * Returns true if validation was successful, else false.
2584   */
i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select * vqs)2585  static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2586  {
2587  	if ((!vqs->rx_queues && !vqs->tx_queues) ||
2588  	    vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2589  	    vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2590  		return false;
2591  
2592  	return true;
2593  }
2594  
2595  /**
2596   * i40e_vc_enable_queues_msg
2597   * @vf: pointer to the VF info
2598   * @msg: pointer to the msg buffer
2599   *
2600   * called from the VF to enable all or specific queue(s)
2601   **/
i40e_vc_enable_queues_msg(struct i40e_vf * vf,u8 * msg)2602  static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2603  {
2604  	struct virtchnl_queue_select *vqs =
2605  	    (struct virtchnl_queue_select *)msg;
2606  	struct i40e_pf *pf = vf->pf;
2607  	int aq_ret = 0;
2608  	int i;
2609  
2610  	if (vf->is_disabled_from_host) {
2611  		aq_ret = -EPERM;
2612  		dev_info(&pf->pdev->dev,
2613  			 "Admin has disabled VF %d, will not enable queues\n",
2614  			 vf->vf_id);
2615  		goto error_param;
2616  	}
2617  
2618  	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2619  		aq_ret = -EINVAL;
2620  		goto error_param;
2621  	}
2622  
2623  	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2624  		aq_ret = -EINVAL;
2625  		goto error_param;
2626  	}
2627  
2628  	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2629  		aq_ret = -EINVAL;
2630  		goto error_param;
2631  	}
2632  
2633  	/* Use the queue bit map sent by the VF */
2634  	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2635  				  true)) {
2636  		aq_ret = -EIO;
2637  		goto error_param;
2638  	}
2639  	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2640  				  true)) {
2641  		aq_ret = -EIO;
2642  		goto error_param;
2643  	}
2644  
2645  	/* need to start the rings for additional ADq VSI's as well */
2646  	if (vf->adq_enabled) {
2647  		/* zero belongs to LAN VSI */
2648  		for (i = 1; i < vf->num_tc; i++) {
2649  			if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2650  				aq_ret = -EIO;
2651  		}
2652  	}
2653  
2654  error_param:
2655  	/* send the response to the VF */
2656  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2657  				       aq_ret);
2658  }
2659  
2660  /**
2661   * i40e_vc_disable_queues_msg
2662   * @vf: pointer to the VF info
2663   * @msg: pointer to the msg buffer
2664   *
2665   * called from the VF to disable all or specific
2666   * queue(s)
2667   **/
i40e_vc_disable_queues_msg(struct i40e_vf * vf,u8 * msg)2668  static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2669  {
2670  	struct virtchnl_queue_select *vqs =
2671  	    (struct virtchnl_queue_select *)msg;
2672  	struct i40e_pf *pf = vf->pf;
2673  	int aq_ret = 0;
2674  
2675  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2676  		aq_ret = -EINVAL;
2677  		goto error_param;
2678  	}
2679  
2680  	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2681  		aq_ret = -EINVAL;
2682  		goto error_param;
2683  	}
2684  
2685  	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2686  		aq_ret = -EINVAL;
2687  		goto error_param;
2688  	}
2689  
2690  	/* Use the queue bit map sent by the VF */
2691  	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2692  				  false)) {
2693  		aq_ret = -EIO;
2694  		goto error_param;
2695  	}
2696  	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2697  				  false)) {
2698  		aq_ret = -EIO;
2699  		goto error_param;
2700  	}
2701  error_param:
2702  	/* send the response to the VF */
2703  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2704  				       aq_ret);
2705  }
2706  
2707  /**
2708   * i40e_check_enough_queue - find big enough queue number
2709   * @vf: pointer to the VF info
2710   * @needed: the number of items needed
2711   *
2712   * Returns the base item index of the queue, or negative for error
2713   **/
i40e_check_enough_queue(struct i40e_vf * vf,u16 needed)2714  static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
2715  {
2716  	unsigned int  i, cur_queues, more, pool_size;
2717  	struct i40e_lump_tracking *pile;
2718  	struct i40e_pf *pf = vf->pf;
2719  	struct i40e_vsi *vsi;
2720  
2721  	vsi = pf->vsi[vf->lan_vsi_idx];
2722  	cur_queues = vsi->alloc_queue_pairs;
2723  
2724  	/* if current allocated queues are enough for need */
2725  	if (cur_queues >= needed)
2726  		return vsi->base_queue;
2727  
2728  	pile = pf->qp_pile;
2729  	if (cur_queues > 0) {
2730  		/* if the allocated queues are not zero
2731  		 * just check if there are enough queues for more
2732  		 * behind the allocated queues.
2733  		 */
2734  		more = needed - cur_queues;
2735  		for (i = vsi->base_queue + cur_queues;
2736  			i < pile->num_entries; i++) {
2737  			if (pile->list[i] & I40E_PILE_VALID_BIT)
2738  				break;
2739  
2740  			if (more-- == 1)
2741  				/* there is enough */
2742  				return vsi->base_queue;
2743  		}
2744  	}
2745  
2746  	pool_size = 0;
2747  	for (i = 0; i < pile->num_entries; i++) {
2748  		if (pile->list[i] & I40E_PILE_VALID_BIT) {
2749  			pool_size = 0;
2750  			continue;
2751  		}
2752  		if (needed <= ++pool_size)
2753  			/* there is enough */
2754  			return i;
2755  	}
2756  
2757  	return -ENOMEM;
2758  }
2759  
2760  /**
2761   * i40e_vc_request_queues_msg
2762   * @vf: pointer to the VF info
2763   * @msg: pointer to the msg buffer
2764   *
2765   * VFs get a default number of queues but can use this message to request a
2766   * different number.  If the request is successful, PF will reset the VF and
2767   * return 0.  If unsuccessful, PF will send message informing VF of number of
2768   * available queues and return result of sending VF a message.
2769   **/
i40e_vc_request_queues_msg(struct i40e_vf * vf,u8 * msg)2770  static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2771  {
2772  	struct virtchnl_vf_res_request *vfres =
2773  		(struct virtchnl_vf_res_request *)msg;
2774  	u16 req_pairs = vfres->num_queue_pairs;
2775  	u8 cur_pairs = vf->num_queue_pairs;
2776  	struct i40e_pf *pf = vf->pf;
2777  
2778  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
2779  		return -EINVAL;
2780  
2781  	if (req_pairs > I40E_MAX_VF_QUEUES) {
2782  		dev_err(&pf->pdev->dev,
2783  			"VF %d tried to request more than %d queues.\n",
2784  			vf->vf_id,
2785  			I40E_MAX_VF_QUEUES);
2786  		vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2787  	} else if (req_pairs - cur_pairs > pf->queues_left) {
2788  		dev_warn(&pf->pdev->dev,
2789  			 "VF %d requested %d more queues, but only %d left.\n",
2790  			 vf->vf_id,
2791  			 req_pairs - cur_pairs,
2792  			 pf->queues_left);
2793  		vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2794  	} else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
2795  		dev_warn(&pf->pdev->dev,
2796  			 "VF %d requested %d more queues, but there is not enough for it.\n",
2797  			 vf->vf_id,
2798  			 req_pairs - cur_pairs);
2799  		vfres->num_queue_pairs = cur_pairs;
2800  	} else {
2801  		/* successful request */
2802  		vf->num_req_queues = req_pairs;
2803  		i40e_vc_reset_vf(vf, true);
2804  		return 0;
2805  	}
2806  
2807  	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2808  				      (u8 *)vfres, sizeof(*vfres));
2809  }
2810  
2811  /**
2812   * i40e_vc_get_stats_msg
2813   * @vf: pointer to the VF info
2814   * @msg: pointer to the msg buffer
2815   *
2816   * called from the VF to get vsi stats
2817   **/
i40e_vc_get_stats_msg(struct i40e_vf * vf,u8 * msg)2818  static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2819  {
2820  	struct virtchnl_queue_select *vqs =
2821  	    (struct virtchnl_queue_select *)msg;
2822  	struct i40e_pf *pf = vf->pf;
2823  	struct i40e_eth_stats stats;
2824  	int aq_ret = 0;
2825  	struct i40e_vsi *vsi;
2826  
2827  	memset(&stats, 0, sizeof(struct i40e_eth_stats));
2828  
2829  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2830  		aq_ret = -EINVAL;
2831  		goto error_param;
2832  	}
2833  
2834  	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2835  		aq_ret = -EINVAL;
2836  		goto error_param;
2837  	}
2838  
2839  	vsi = pf->vsi[vf->lan_vsi_idx];
2840  	if (!vsi) {
2841  		aq_ret = -EINVAL;
2842  		goto error_param;
2843  	}
2844  	i40e_update_eth_stats(vsi);
2845  	stats = vsi->eth_stats;
2846  
2847  error_param:
2848  	/* send the response back to the VF */
2849  	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2850  				      (u8 *)&stats, sizeof(stats));
2851  }
2852  
2853  /**
2854   * i40e_can_vf_change_mac
2855   * @vf: pointer to the VF info
2856   *
2857   * Return true if the VF is allowed to change its MAC filters, false otherwise
2858   */
i40e_can_vf_change_mac(struct i40e_vf * vf)2859  static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
2860  {
2861  	/* If the VF MAC address has been set administratively (via the
2862  	 * ndo_set_vf_mac command), then deny permission to the VF to
2863  	 * add/delete unicast MAC addresses, unless the VF is trusted
2864  	 */
2865  	if (vf->pf_set_mac && !vf->trusted)
2866  		return false;
2867  
2868  	return true;
2869  }
2870  
2871  #define I40E_MAX_MACVLAN_PER_HW 3072
2872  #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW /	\
2873  	(num_ports))
2874  /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2875   * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2876   */
2877  #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2878  #define I40E_VC_MAX_VLAN_PER_VF 16
2879  
2880  #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports)		\
2881  ({	typeof(vf_num) vf_num_ = (vf_num);				\
2882  	typeof(num_ports) num_ports_ = (num_ports);			\
2883  	((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ *		\
2884  	I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) +			\
2885  	I40E_VC_MAX_MAC_ADDR_PER_VF; })
2886  /**
2887   * i40e_check_vf_permission
2888   * @vf: pointer to the VF info
2889   * @al: MAC address list from virtchnl
2890   *
2891   * Check that the given list of MAC addresses is allowed. Will return -EPERM
2892   * if any address in the list is not valid. Checks the following conditions:
2893   *
2894   * 1) broadcast and zero addresses are never valid
2895   * 2) unicast addresses are not allowed if the VMM has administratively set
2896   *    the VF MAC address, unless the VF is marked as privileged.
2897   * 3) There is enough space to add all the addresses.
2898   *
2899   * Note that to guarantee consistency, it is expected this function be called
2900   * while holding the mac_filter_hash_lock, as otherwise the current number of
2901   * addresses might not be accurate.
2902   **/
i40e_check_vf_permission(struct i40e_vf * vf,struct virtchnl_ether_addr_list * al)2903  static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2904  					   struct virtchnl_ether_addr_list *al)
2905  {
2906  	struct i40e_pf *pf = vf->pf;
2907  	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2908  	struct i40e_hw *hw = &pf->hw;
2909  	int mac2add_cnt = 0;
2910  	int i;
2911  
2912  	for (i = 0; i < al->num_elements; i++) {
2913  		struct i40e_mac_filter *f;
2914  		u8 *addr = al->list[i].addr;
2915  
2916  		if (is_broadcast_ether_addr(addr) ||
2917  		    is_zero_ether_addr(addr)) {
2918  			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2919  				addr);
2920  			return -EINVAL;
2921  		}
2922  
2923  		/* If the host VMM administrator has set the VF MAC address
2924  		 * administratively via the ndo_set_vf_mac command then deny
2925  		 * permission to the VF to add or delete unicast MAC addresses.
2926  		 * Unless the VF is privileged and then it can do whatever.
2927  		 * The VF may request to set the MAC address filter already
2928  		 * assigned to it so do not return an error in that case.
2929  		 */
2930  		if (!i40e_can_vf_change_mac(vf) &&
2931  		    !is_multicast_ether_addr(addr) &&
2932  		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2933  			dev_err(&pf->pdev->dev,
2934  				"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2935  			return -EPERM;
2936  		}
2937  
2938  		/*count filters that really will be added*/
2939  		f = i40e_find_mac(vsi, addr);
2940  		if (!f)
2941  			++mac2add_cnt;
2942  	}
2943  
2944  	/* If this VF is not privileged, then we can't add more than a limited
2945  	 * number of addresses. Check to make sure that the additions do not
2946  	 * push us over the limit.
2947  	 */
2948  	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2949  		if ((i40e_count_filters(vsi) + mac2add_cnt) >
2950  		    I40E_VC_MAX_MAC_ADDR_PER_VF) {
2951  			dev_err(&pf->pdev->dev,
2952  				"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2953  			return -EPERM;
2954  		}
2955  	/* If this VF is trusted, it can use more resources than untrusted.
2956  	 * However to ensure that every trusted VF has appropriate number of
2957  	 * resources, divide whole pool of resources per port and then across
2958  	 * all VFs.
2959  	 */
2960  	} else {
2961  		if ((i40e_count_filters(vsi) + mac2add_cnt) >
2962  		    I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
2963  						       hw->num_ports)) {
2964  			dev_err(&pf->pdev->dev,
2965  				"Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2966  			return -EPERM;
2967  		}
2968  	}
2969  	return 0;
2970  }
2971  
2972  /**
2973   * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr
2974   * @vc_ether_addr: used to extract the type
2975   **/
2976  static u8
i40e_vc_ether_addr_type(struct virtchnl_ether_addr * vc_ether_addr)2977  i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
2978  {
2979  	return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK;
2980  }
2981  
2982  /**
2983   * i40e_is_vc_addr_legacy
2984   * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2985   *
2986   * check if the MAC address is from an older VF
2987   **/
2988  static bool
i40e_is_vc_addr_legacy(struct virtchnl_ether_addr * vc_ether_addr)2989  i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
2990  {
2991  	return i40e_vc_ether_addr_type(vc_ether_addr) ==
2992  		VIRTCHNL_ETHER_ADDR_LEGACY;
2993  }
2994  
2995  /**
2996   * i40e_is_vc_addr_primary
2997   * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2998   *
2999   * check if the MAC address is the VF's primary MAC
3000   * This function should only be called when the MAC address in
3001   * virtchnl_ether_addr is a valid unicast MAC
3002   **/
3003  static bool
i40e_is_vc_addr_primary(struct virtchnl_ether_addr * vc_ether_addr)3004  i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr)
3005  {
3006  	return i40e_vc_ether_addr_type(vc_ether_addr) ==
3007  		VIRTCHNL_ETHER_ADDR_PRIMARY;
3008  }
3009  
3010  /**
3011   * i40e_update_vf_mac_addr
3012   * @vf: VF to update
3013   * @vc_ether_addr: structure from VIRTCHNL with MAC to add
3014   *
3015   * update the VF's cached hardware MAC if allowed
3016   **/
3017  static void
i40e_update_vf_mac_addr(struct i40e_vf * vf,struct virtchnl_ether_addr * vc_ether_addr)3018  i40e_update_vf_mac_addr(struct i40e_vf *vf,
3019  			struct virtchnl_ether_addr *vc_ether_addr)
3020  {
3021  	u8 *mac_addr = vc_ether_addr->addr;
3022  
3023  	if (!is_valid_ether_addr(mac_addr))
3024  		return;
3025  
3026  	/* If request to add MAC filter is a primary request update its default
3027  	 * MAC address with the requested one. If it is a legacy request then
3028  	 * check if current default is empty if so update the default MAC
3029  	 */
3030  	if (i40e_is_vc_addr_primary(vc_ether_addr)) {
3031  		ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
3032  	} else if (i40e_is_vc_addr_legacy(vc_ether_addr)) {
3033  		if (is_zero_ether_addr(vf->default_lan_addr.addr))
3034  			ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
3035  	}
3036  }
3037  
3038  /**
3039   * i40e_vc_add_mac_addr_msg
3040   * @vf: pointer to the VF info
3041   * @msg: pointer to the msg buffer
3042   *
3043   * add guest mac address filter
3044   **/
i40e_vc_add_mac_addr_msg(struct i40e_vf * vf,u8 * msg)3045  static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
3046  {
3047  	struct virtchnl_ether_addr_list *al =
3048  	    (struct virtchnl_ether_addr_list *)msg;
3049  	struct i40e_pf *pf = vf->pf;
3050  	struct i40e_vsi *vsi = NULL;
3051  	int ret = 0;
3052  	int i;
3053  
3054  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3055  	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3056  		ret = -EINVAL;
3057  		goto error_param;
3058  	}
3059  
3060  	vsi = pf->vsi[vf->lan_vsi_idx];
3061  
3062  	/* Lock once, because all function inside for loop accesses VSI's
3063  	 * MAC filter list which needs to be protected using same lock.
3064  	 */
3065  	spin_lock_bh(&vsi->mac_filter_hash_lock);
3066  
3067  	ret = i40e_check_vf_permission(vf, al);
3068  	if (ret) {
3069  		spin_unlock_bh(&vsi->mac_filter_hash_lock);
3070  		goto error_param;
3071  	}
3072  
3073  	/* add new addresses to the list */
3074  	for (i = 0; i < al->num_elements; i++) {
3075  		struct i40e_mac_filter *f;
3076  
3077  		f = i40e_find_mac(vsi, al->list[i].addr);
3078  		if (!f) {
3079  			f = i40e_add_mac_filter(vsi, al->list[i].addr);
3080  
3081  			if (!f) {
3082  				dev_err(&pf->pdev->dev,
3083  					"Unable to add MAC filter %pM for VF %d\n",
3084  					al->list[i].addr, vf->vf_id);
3085  				ret = -EINVAL;
3086  				spin_unlock_bh(&vsi->mac_filter_hash_lock);
3087  				goto error_param;
3088  			}
3089  		}
3090  		i40e_update_vf_mac_addr(vf, &al->list[i]);
3091  	}
3092  	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3093  
3094  	/* program the updated filter list */
3095  	ret = i40e_sync_vsi_filters(vsi);
3096  	if (ret)
3097  		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3098  			vf->vf_id, ret);
3099  
3100  error_param:
3101  	/* send the response to the VF */
3102  	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3103  				      ret, NULL, 0);
3104  }
3105  
3106  /**
3107   * i40e_vc_del_mac_addr_msg
3108   * @vf: pointer to the VF info
3109   * @msg: pointer to the msg buffer
3110   *
3111   * remove guest mac address filter
3112   **/
i40e_vc_del_mac_addr_msg(struct i40e_vf * vf,u8 * msg)3113  static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
3114  {
3115  	struct virtchnl_ether_addr_list *al =
3116  	    (struct virtchnl_ether_addr_list *)msg;
3117  	bool was_unimac_deleted = false;
3118  	struct i40e_pf *pf = vf->pf;
3119  	struct i40e_vsi *vsi = NULL;
3120  	int ret = 0;
3121  	int i;
3122  
3123  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3124  	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3125  		ret = -EINVAL;
3126  		goto error_param;
3127  	}
3128  
3129  	for (i = 0; i < al->num_elements; i++) {
3130  		if (is_broadcast_ether_addr(al->list[i].addr) ||
3131  		    is_zero_ether_addr(al->list[i].addr)) {
3132  			dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
3133  				al->list[i].addr, vf->vf_id);
3134  			ret = -EINVAL;
3135  			goto error_param;
3136  		}
3137  	}
3138  	vsi = pf->vsi[vf->lan_vsi_idx];
3139  
3140  	spin_lock_bh(&vsi->mac_filter_hash_lock);
3141  	/* delete addresses from the list */
3142  	for (i = 0; i < al->num_elements; i++) {
3143  		const u8 *addr = al->list[i].addr;
3144  
3145  		/* Allow to delete VF primary MAC only if it was not set
3146  		 * administratively by PF or if VF is trusted.
3147  		 */
3148  		if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
3149  			if (i40e_can_vf_change_mac(vf))
3150  				was_unimac_deleted = true;
3151  			else
3152  				continue;
3153  		}
3154  
3155  		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
3156  			ret = -EINVAL;
3157  			spin_unlock_bh(&vsi->mac_filter_hash_lock);
3158  			goto error_param;
3159  		}
3160  	}
3161  
3162  	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3163  
3164  	if (was_unimac_deleted)
3165  		eth_zero_addr(vf->default_lan_addr.addr);
3166  
3167  	/* program the updated filter list */
3168  	ret = i40e_sync_vsi_filters(vsi);
3169  	if (ret)
3170  		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3171  			vf->vf_id, ret);
3172  
3173  	if (vf->trusted && was_unimac_deleted) {
3174  		struct i40e_mac_filter *f;
3175  		struct hlist_node *h;
3176  		u8 *macaddr = NULL;
3177  		int bkt;
3178  
3179  		/* set last unicast mac address as default */
3180  		spin_lock_bh(&vsi->mac_filter_hash_lock);
3181  		hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3182  			if (is_valid_ether_addr(f->macaddr))
3183  				macaddr = f->macaddr;
3184  		}
3185  		if (macaddr)
3186  			ether_addr_copy(vf->default_lan_addr.addr, macaddr);
3187  		spin_unlock_bh(&vsi->mac_filter_hash_lock);
3188  	}
3189  error_param:
3190  	/* send the response to the VF */
3191  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
3192  }
3193  
3194  /**
3195   * i40e_vc_add_vlan_msg
3196   * @vf: pointer to the VF info
3197   * @msg: pointer to the msg buffer
3198   *
3199   * program guest vlan id
3200   **/
i40e_vc_add_vlan_msg(struct i40e_vf * vf,u8 * msg)3201  static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
3202  {
3203  	struct virtchnl_vlan_filter_list *vfl =
3204  	    (struct virtchnl_vlan_filter_list *)msg;
3205  	struct i40e_pf *pf = vf->pf;
3206  	struct i40e_vsi *vsi = NULL;
3207  	int aq_ret = 0;
3208  	int i;
3209  
3210  	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
3211  	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3212  		dev_err(&pf->pdev->dev,
3213  			"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3214  		goto error_param;
3215  	}
3216  	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3217  	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3218  		aq_ret = -EINVAL;
3219  		goto error_param;
3220  	}
3221  
3222  	for (i = 0; i < vfl->num_elements; i++) {
3223  		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3224  			aq_ret = -EINVAL;
3225  			dev_err(&pf->pdev->dev,
3226  				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
3227  			goto error_param;
3228  		}
3229  	}
3230  	vsi = pf->vsi[vf->lan_vsi_idx];
3231  	if (vsi->info.pvid) {
3232  		aq_ret = -EINVAL;
3233  		goto error_param;
3234  	}
3235  
3236  	i40e_vlan_stripping_enable(vsi);
3237  	for (i = 0; i < vfl->num_elements; i++) {
3238  		/* add new VLAN filter */
3239  		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
3240  		if (!ret)
3241  			vf->num_vlan++;
3242  
3243  		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3244  			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3245  							   true,
3246  							   vfl->vlan_id[i],
3247  							   NULL);
3248  		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3249  			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3250  							   true,
3251  							   vfl->vlan_id[i],
3252  							   NULL);
3253  
3254  		if (ret)
3255  			dev_err(&pf->pdev->dev,
3256  				"Unable to add VLAN filter %d for VF %d, error %d\n",
3257  				vfl->vlan_id[i], vf->vf_id, ret);
3258  	}
3259  
3260  error_param:
3261  	/* send the response to the VF */
3262  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
3263  }
3264  
3265  /**
3266   * i40e_vc_remove_vlan_msg
3267   * @vf: pointer to the VF info
3268   * @msg: pointer to the msg buffer
3269   *
3270   * remove programmed guest vlan id
3271   **/
i40e_vc_remove_vlan_msg(struct i40e_vf * vf,u8 * msg)3272  static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
3273  {
3274  	struct virtchnl_vlan_filter_list *vfl =
3275  	    (struct virtchnl_vlan_filter_list *)msg;
3276  	struct i40e_pf *pf = vf->pf;
3277  	struct i40e_vsi *vsi = NULL;
3278  	int aq_ret = 0;
3279  	int i;
3280  
3281  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3282  	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3283  		aq_ret = -EINVAL;
3284  		goto error_param;
3285  	}
3286  
3287  	for (i = 0; i < vfl->num_elements; i++) {
3288  		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3289  			aq_ret = -EINVAL;
3290  			goto error_param;
3291  		}
3292  	}
3293  
3294  	vsi = pf->vsi[vf->lan_vsi_idx];
3295  	if (vsi->info.pvid) {
3296  		if (vfl->num_elements > 1 || vfl->vlan_id[0])
3297  			aq_ret = -EINVAL;
3298  		goto error_param;
3299  	}
3300  
3301  	for (i = 0; i < vfl->num_elements; i++) {
3302  		i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
3303  		vf->num_vlan--;
3304  
3305  		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3306  			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3307  							   false,
3308  							   vfl->vlan_id[i],
3309  							   NULL);
3310  		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3311  			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3312  							   false,
3313  							   vfl->vlan_id[i],
3314  							   NULL);
3315  	}
3316  
3317  error_param:
3318  	/* send the response to the VF */
3319  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
3320  }
3321  
3322  /**
3323   * i40e_vc_rdma_msg
3324   * @vf: pointer to the VF info
3325   * @msg: pointer to the msg buffer
3326   * @msglen: msg length
3327   *
3328   * called from the VF for the iwarp msgs
3329   **/
i40e_vc_rdma_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)3330  static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3331  {
3332  	struct i40e_pf *pf = vf->pf;
3333  	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3334  	int aq_ret = 0;
3335  
3336  	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3337  	    !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3338  		aq_ret = -EINVAL;
3339  		goto error_param;
3340  	}
3341  
3342  	i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3343  				     msg, msglen);
3344  
3345  error_param:
3346  	/* send the response to the VF */
3347  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA,
3348  				       aq_ret);
3349  }
3350  
3351  /**
3352   * i40e_vc_rdma_qvmap_msg
3353   * @vf: pointer to the VF info
3354   * @msg: pointer to the msg buffer
3355   * @config: config qvmap or release it
3356   *
3357   * called from the VF for the iwarp msgs
3358   **/
i40e_vc_rdma_qvmap_msg(struct i40e_vf * vf,u8 * msg,bool config)3359  static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
3360  {
3361  	struct virtchnl_rdma_qvlist_info *qvlist_info =
3362  				(struct virtchnl_rdma_qvlist_info *)msg;
3363  	int aq_ret = 0;
3364  
3365  	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3366  	    !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3367  		aq_ret = -EINVAL;
3368  		goto error_param;
3369  	}
3370  
3371  	if (config) {
3372  		if (i40e_config_rdma_qvlist(vf, qvlist_info))
3373  			aq_ret = -EINVAL;
3374  	} else {
3375  		i40e_release_rdma_qvlist(vf);
3376  	}
3377  
3378  error_param:
3379  	/* send the response to the VF */
3380  	return i40e_vc_send_resp_to_vf(vf,
3381  			       config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP :
3382  			       VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
3383  			       aq_ret);
3384  }
3385  
3386  /**
3387   * i40e_vc_config_rss_key
3388   * @vf: pointer to the VF info
3389   * @msg: pointer to the msg buffer
3390   *
3391   * Configure the VF's RSS key
3392   **/
i40e_vc_config_rss_key(struct i40e_vf * vf,u8 * msg)3393  static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3394  {
3395  	struct virtchnl_rss_key *vrk =
3396  		(struct virtchnl_rss_key *)msg;
3397  	struct i40e_pf *pf = vf->pf;
3398  	struct i40e_vsi *vsi = NULL;
3399  	int aq_ret = 0;
3400  
3401  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3402  	    !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3403  	    vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
3404  		aq_ret = -EINVAL;
3405  		goto err;
3406  	}
3407  
3408  	vsi = pf->vsi[vf->lan_vsi_idx];
3409  	aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3410  err:
3411  	/* send the response to the VF */
3412  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3413  				       aq_ret);
3414  }
3415  
3416  /**
3417   * i40e_vc_config_rss_lut
3418   * @vf: pointer to the VF info
3419   * @msg: pointer to the msg buffer
3420   *
3421   * Configure the VF's RSS LUT
3422   **/
i40e_vc_config_rss_lut(struct i40e_vf * vf,u8 * msg)3423  static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3424  {
3425  	struct virtchnl_rss_lut *vrl =
3426  		(struct virtchnl_rss_lut *)msg;
3427  	struct i40e_pf *pf = vf->pf;
3428  	struct i40e_vsi *vsi = NULL;
3429  	int aq_ret = 0;
3430  	u16 i;
3431  
3432  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3433  	    !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3434  	    vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
3435  		aq_ret = -EINVAL;
3436  		goto err;
3437  	}
3438  
3439  	for (i = 0; i < vrl->lut_entries; i++)
3440  		if (vrl->lut[i] >= vf->num_queue_pairs) {
3441  			aq_ret = -EINVAL;
3442  			goto err;
3443  		}
3444  
3445  	vsi = pf->vsi[vf->lan_vsi_idx];
3446  	aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3447  	/* send the response to the VF */
3448  err:
3449  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3450  				       aq_ret);
3451  }
3452  
3453  /**
3454   * i40e_vc_get_rss_hena
3455   * @vf: pointer to the VF info
3456   * @msg: pointer to the msg buffer
3457   *
3458   * Return the RSS HENA bits allowed by the hardware
3459   **/
i40e_vc_get_rss_hena(struct i40e_vf * vf,u8 * msg)3460  static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3461  {
3462  	struct virtchnl_rss_hena *vrh = NULL;
3463  	struct i40e_pf *pf = vf->pf;
3464  	int aq_ret = 0;
3465  	int len = 0;
3466  
3467  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3468  		aq_ret = -EINVAL;
3469  		goto err;
3470  	}
3471  	len = sizeof(struct virtchnl_rss_hena);
3472  
3473  	vrh = kzalloc(len, GFP_KERNEL);
3474  	if (!vrh) {
3475  		aq_ret = -ENOMEM;
3476  		len = 0;
3477  		goto err;
3478  	}
3479  	vrh->hena = i40e_pf_get_default_rss_hena(pf);
3480  err:
3481  	/* send the response back to the VF */
3482  	aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3483  					aq_ret, (u8 *)vrh, len);
3484  	kfree(vrh);
3485  	return aq_ret;
3486  }
3487  
3488  /**
3489   * i40e_vc_set_rss_hena
3490   * @vf: pointer to the VF info
3491   * @msg: pointer to the msg buffer
3492   *
3493   * Set the RSS HENA bits for the VF
3494   **/
i40e_vc_set_rss_hena(struct i40e_vf * vf,u8 * msg)3495  static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3496  {
3497  	struct virtchnl_rss_hena *vrh =
3498  		(struct virtchnl_rss_hena *)msg;
3499  	struct i40e_pf *pf = vf->pf;
3500  	struct i40e_hw *hw = &pf->hw;
3501  	int aq_ret = 0;
3502  
3503  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3504  		aq_ret = -EINVAL;
3505  		goto err;
3506  	}
3507  	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3508  	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3509  			  (u32)(vrh->hena >> 32));
3510  
3511  	/* send the response to the VF */
3512  err:
3513  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3514  }
3515  
3516  /**
3517   * i40e_vc_enable_vlan_stripping
3518   * @vf: pointer to the VF info
3519   * @msg: pointer to the msg buffer
3520   *
3521   * Enable vlan header stripping for the VF
3522   **/
i40e_vc_enable_vlan_stripping(struct i40e_vf * vf,u8 * msg)3523  static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3524  {
3525  	struct i40e_vsi *vsi;
3526  	int aq_ret = 0;
3527  
3528  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3529  		aq_ret = -EINVAL;
3530  		goto err;
3531  	}
3532  
3533  	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3534  	i40e_vlan_stripping_enable(vsi);
3535  
3536  	/* send the response to the VF */
3537  err:
3538  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3539  				       aq_ret);
3540  }
3541  
3542  /**
3543   * i40e_vc_disable_vlan_stripping
3544   * @vf: pointer to the VF info
3545   * @msg: pointer to the msg buffer
3546   *
3547   * Disable vlan header stripping for the VF
3548   **/
i40e_vc_disable_vlan_stripping(struct i40e_vf * vf,u8 * msg)3549  static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3550  {
3551  	struct i40e_vsi *vsi;
3552  	int aq_ret = 0;
3553  
3554  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3555  		aq_ret = -EINVAL;
3556  		goto err;
3557  	}
3558  
3559  	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3560  	i40e_vlan_stripping_disable(vsi);
3561  
3562  	/* send the response to the VF */
3563  err:
3564  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3565  				       aq_ret);
3566  }
3567  
3568  /**
3569   * i40e_validate_cloud_filter
3570   * @vf: pointer to VF structure
3571   * @tc_filter: pointer to filter requested
3572   *
3573   * This function validates cloud filter programmed as TC filter for ADq
3574   **/
i40e_validate_cloud_filter(struct i40e_vf * vf,struct virtchnl_filter * tc_filter)3575  static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3576  				      struct virtchnl_filter *tc_filter)
3577  {
3578  	struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3579  	struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3580  	struct i40e_pf *pf = vf->pf;
3581  	struct i40e_vsi *vsi = NULL;
3582  	struct i40e_mac_filter *f;
3583  	struct hlist_node *h;
3584  	bool found = false;
3585  	int bkt;
3586  
3587  	if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
3588  		dev_info(&pf->pdev->dev,
3589  			 "VF %d: ADQ doesn't support this action (%d)\n",
3590  			 vf->vf_id, tc_filter->action);
3591  		goto err;
3592  	}
3593  
3594  	/* action_meta is TC number here to which the filter is applied */
3595  	if (!tc_filter->action_meta ||
3596  	    tc_filter->action_meta > vf->num_tc) {
3597  		dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3598  			 vf->vf_id, tc_filter->action_meta);
3599  		goto err;
3600  	}
3601  
3602  	/* Check filter if it's programmed for advanced mode or basic mode.
3603  	 * There are two ADq modes (for VF only),
3604  	 * 1. Basic mode: intended to allow as many filter options as possible
3605  	 *		  to be added to a VF in Non-trusted mode. Main goal is
3606  	 *		  to add filters to its own MAC and VLAN id.
3607  	 * 2. Advanced mode: is for allowing filters to be applied other than
3608  	 *		  its own MAC or VLAN. This mode requires the VF to be
3609  	 *		  Trusted.
3610  	 */
3611  	if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3612  		vsi = pf->vsi[vf->lan_vsi_idx];
3613  		f = i40e_find_mac(vsi, data.dst_mac);
3614  
3615  		if (!f) {
3616  			dev_info(&pf->pdev->dev,
3617  				 "Destination MAC %pM doesn't belong to VF %d\n",
3618  				 data.dst_mac, vf->vf_id);
3619  			goto err;
3620  		}
3621  
3622  		if (mask.vlan_id) {
3623  			hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3624  					   hlist) {
3625  				if (f->vlan == ntohs(data.vlan_id)) {
3626  					found = true;
3627  					break;
3628  				}
3629  			}
3630  			if (!found) {
3631  				dev_info(&pf->pdev->dev,
3632  					 "VF %d doesn't have any VLAN id %u\n",
3633  					 vf->vf_id, ntohs(data.vlan_id));
3634  				goto err;
3635  			}
3636  		}
3637  	} else {
3638  		/* Check if VF is trusted */
3639  		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3640  			dev_err(&pf->pdev->dev,
3641  				"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3642  				vf->vf_id);
3643  			return -EIO;
3644  		}
3645  	}
3646  
3647  	if (mask.dst_mac[0] & data.dst_mac[0]) {
3648  		if (is_broadcast_ether_addr(data.dst_mac) ||
3649  		    is_zero_ether_addr(data.dst_mac)) {
3650  			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3651  				 vf->vf_id, data.dst_mac);
3652  			goto err;
3653  		}
3654  	}
3655  
3656  	if (mask.src_mac[0] & data.src_mac[0]) {
3657  		if (is_broadcast_ether_addr(data.src_mac) ||
3658  		    is_zero_ether_addr(data.src_mac)) {
3659  			dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3660  				 vf->vf_id, data.src_mac);
3661  			goto err;
3662  		}
3663  	}
3664  
3665  	if (mask.dst_port & data.dst_port) {
3666  		if (!data.dst_port) {
3667  			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3668  				 vf->vf_id);
3669  			goto err;
3670  		}
3671  	}
3672  
3673  	if (mask.src_port & data.src_port) {
3674  		if (!data.src_port) {
3675  			dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3676  				 vf->vf_id);
3677  			goto err;
3678  		}
3679  	}
3680  
3681  	if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3682  	    tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3683  		dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3684  			 vf->vf_id);
3685  		goto err;
3686  	}
3687  
3688  	if (mask.vlan_id & data.vlan_id) {
3689  		if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3690  			dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3691  				 vf->vf_id);
3692  			goto err;
3693  		}
3694  	}
3695  
3696  	return 0;
3697  err:
3698  	return -EIO;
3699  }
3700  
3701  /**
3702   * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3703   * @vf: pointer to the VF info
3704   * @seid: seid of the vsi it is searching for
3705   **/
i40e_find_vsi_from_seid(struct i40e_vf * vf,u16 seid)3706  static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3707  {
3708  	struct i40e_pf *pf = vf->pf;
3709  	struct i40e_vsi *vsi = NULL;
3710  	int i;
3711  
3712  	for (i = 0; i < vf->num_tc ; i++) {
3713  		vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3714  		if (vsi && vsi->seid == seid)
3715  			return vsi;
3716  	}
3717  	return NULL;
3718  }
3719  
3720  /**
3721   * i40e_del_all_cloud_filters
3722   * @vf: pointer to the VF info
3723   *
3724   * This function deletes all cloud filters
3725   **/
i40e_del_all_cloud_filters(struct i40e_vf * vf)3726  static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3727  {
3728  	struct i40e_cloud_filter *cfilter = NULL;
3729  	struct i40e_pf *pf = vf->pf;
3730  	struct i40e_vsi *vsi = NULL;
3731  	struct hlist_node *node;
3732  	int ret;
3733  
3734  	hlist_for_each_entry_safe(cfilter, node,
3735  				  &vf->cloud_filter_list, cloud_node) {
3736  		vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3737  
3738  		if (!vsi) {
3739  			dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3740  				vf->vf_id, cfilter->seid);
3741  			continue;
3742  		}
3743  
3744  		if (cfilter->dst_port)
3745  			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3746  								false);
3747  		else
3748  			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3749  		if (ret)
3750  			dev_err(&pf->pdev->dev,
3751  				"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3752  				vf->vf_id, ERR_PTR(ret),
3753  				i40e_aq_str(&pf->hw,
3754  					    pf->hw.aq.asq_last_status));
3755  
3756  		hlist_del(&cfilter->cloud_node);
3757  		kfree(cfilter);
3758  		vf->num_cloud_filters--;
3759  	}
3760  }
3761  
3762  /**
3763   * i40e_vc_del_cloud_filter
3764   * @vf: pointer to the VF info
3765   * @msg: pointer to the msg buffer
3766   *
3767   * This function deletes a cloud filter programmed as TC filter for ADq
3768   **/
i40e_vc_del_cloud_filter(struct i40e_vf * vf,u8 * msg)3769  static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3770  {
3771  	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3772  	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3773  	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3774  	struct i40e_cloud_filter cfilter, *cf = NULL;
3775  	struct i40e_pf *pf = vf->pf;
3776  	struct i40e_vsi *vsi = NULL;
3777  	struct hlist_node *node;
3778  	int aq_ret = 0;
3779  	int i, ret;
3780  
3781  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3782  		aq_ret = -EINVAL;
3783  		goto err;
3784  	}
3785  
3786  	if (!vf->adq_enabled) {
3787  		dev_info(&pf->pdev->dev,
3788  			 "VF %d: ADq not enabled, can't apply cloud filter\n",
3789  			 vf->vf_id);
3790  		aq_ret = -EINVAL;
3791  		goto err;
3792  	}
3793  
3794  	if (i40e_validate_cloud_filter(vf, vcf)) {
3795  		dev_info(&pf->pdev->dev,
3796  			 "VF %d: Invalid input, can't apply cloud filter\n",
3797  			 vf->vf_id);
3798  		aq_ret = -EINVAL;
3799  		goto err;
3800  	}
3801  
3802  	memset(&cfilter, 0, sizeof(cfilter));
3803  	/* parse destination mac address */
3804  	for (i = 0; i < ETH_ALEN; i++)
3805  		cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3806  
3807  	/* parse source mac address */
3808  	for (i = 0; i < ETH_ALEN; i++)
3809  		cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3810  
3811  	cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3812  	cfilter.dst_port = mask.dst_port & tcf.dst_port;
3813  	cfilter.src_port = mask.src_port & tcf.src_port;
3814  
3815  	switch (vcf->flow_type) {
3816  	case VIRTCHNL_TCP_V4_FLOW:
3817  		cfilter.n_proto = ETH_P_IP;
3818  		if (mask.dst_ip[0] & tcf.dst_ip[0])
3819  			memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3820  			       ARRAY_SIZE(tcf.dst_ip));
3821  		else if (mask.src_ip[0] & tcf.dst_ip[0])
3822  			memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3823  			       ARRAY_SIZE(tcf.dst_ip));
3824  		break;
3825  	case VIRTCHNL_TCP_V6_FLOW:
3826  		cfilter.n_proto = ETH_P_IPV6;
3827  		if (mask.dst_ip[3] & tcf.dst_ip[3])
3828  			memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3829  			       sizeof(cfilter.ip.v6.dst_ip6));
3830  		if (mask.src_ip[3] & tcf.src_ip[3])
3831  			memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3832  			       sizeof(cfilter.ip.v6.src_ip6));
3833  		break;
3834  	default:
3835  		/* TC filter can be configured based on different combinations
3836  		 * and in this case IP is not a part of filter config
3837  		 */
3838  		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3839  			 vf->vf_id);
3840  	}
3841  
3842  	/* get the vsi to which the tc belongs to */
3843  	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3844  	cfilter.seid = vsi->seid;
3845  	cfilter.flags = vcf->field_flags;
3846  
3847  	/* Deleting TC filter */
3848  	if (tcf.dst_port)
3849  		ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3850  	else
3851  		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3852  	if (ret) {
3853  		dev_err(&pf->pdev->dev,
3854  			"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3855  			vf->vf_id, ERR_PTR(ret),
3856  			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3857  		goto err;
3858  	}
3859  
3860  	hlist_for_each_entry_safe(cf, node,
3861  				  &vf->cloud_filter_list, cloud_node) {
3862  		if (cf->seid != cfilter.seid)
3863  			continue;
3864  		if (mask.dst_port)
3865  			if (cfilter.dst_port != cf->dst_port)
3866  				continue;
3867  		if (mask.dst_mac[0])
3868  			if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3869  				continue;
3870  		/* for ipv4 data to be valid, only first byte of mask is set */
3871  		if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3872  			if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3873  				   ARRAY_SIZE(tcf.dst_ip)))
3874  				continue;
3875  		/* for ipv6, mask is set for all sixteen bytes (4 words) */
3876  		if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3877  			if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3878  				   sizeof(cfilter.ip.v6.src_ip6)))
3879  				continue;
3880  		if (mask.vlan_id)
3881  			if (cfilter.vlan_id != cf->vlan_id)
3882  				continue;
3883  
3884  		hlist_del(&cf->cloud_node);
3885  		kfree(cf);
3886  		vf->num_cloud_filters--;
3887  	}
3888  
3889  err:
3890  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3891  				       aq_ret);
3892  }
3893  
3894  /**
3895   * i40e_vc_add_cloud_filter
3896   * @vf: pointer to the VF info
3897   * @msg: pointer to the msg buffer
3898   *
3899   * This function adds a cloud filter programmed as TC filter for ADq
3900   **/
i40e_vc_add_cloud_filter(struct i40e_vf * vf,u8 * msg)3901  static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3902  {
3903  	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3904  	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3905  	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3906  	struct i40e_cloud_filter *cfilter = NULL;
3907  	struct i40e_pf *pf = vf->pf;
3908  	struct i40e_vsi *vsi = NULL;
3909  	int aq_ret = 0;
3910  	int i;
3911  
3912  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3913  		aq_ret = -EINVAL;
3914  		goto err_out;
3915  	}
3916  
3917  	if (!vf->adq_enabled) {
3918  		dev_info(&pf->pdev->dev,
3919  			 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3920  			 vf->vf_id);
3921  		aq_ret = -EINVAL;
3922  		goto err_out;
3923  	}
3924  
3925  	if (i40e_validate_cloud_filter(vf, vcf)) {
3926  		dev_info(&pf->pdev->dev,
3927  			 "VF %d: Invalid input/s, can't apply cloud filter\n",
3928  			 vf->vf_id);
3929  		aq_ret = -EINVAL;
3930  		goto err_out;
3931  	}
3932  
3933  	cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3934  	if (!cfilter) {
3935  		aq_ret = -ENOMEM;
3936  		goto err_out;
3937  	}
3938  
3939  	/* parse destination mac address */
3940  	for (i = 0; i < ETH_ALEN; i++)
3941  		cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3942  
3943  	/* parse source mac address */
3944  	for (i = 0; i < ETH_ALEN; i++)
3945  		cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3946  
3947  	cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3948  	cfilter->dst_port = mask.dst_port & tcf.dst_port;
3949  	cfilter->src_port = mask.src_port & tcf.src_port;
3950  
3951  	switch (vcf->flow_type) {
3952  	case VIRTCHNL_TCP_V4_FLOW:
3953  		cfilter->n_proto = ETH_P_IP;
3954  		if (mask.dst_ip[0] & tcf.dst_ip[0])
3955  			memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3956  			       ARRAY_SIZE(tcf.dst_ip));
3957  		else if (mask.src_ip[0] & tcf.dst_ip[0])
3958  			memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3959  			       ARRAY_SIZE(tcf.dst_ip));
3960  		break;
3961  	case VIRTCHNL_TCP_V6_FLOW:
3962  		cfilter->n_proto = ETH_P_IPV6;
3963  		if (mask.dst_ip[3] & tcf.dst_ip[3])
3964  			memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3965  			       sizeof(cfilter->ip.v6.dst_ip6));
3966  		if (mask.src_ip[3] & tcf.src_ip[3])
3967  			memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3968  			       sizeof(cfilter->ip.v6.src_ip6));
3969  		break;
3970  	default:
3971  		/* TC filter can be configured based on different combinations
3972  		 * and in this case IP is not a part of filter config
3973  		 */
3974  		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3975  			 vf->vf_id);
3976  	}
3977  
3978  	/* get the VSI to which the TC belongs to */
3979  	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3980  	cfilter->seid = vsi->seid;
3981  	cfilter->flags = vcf->field_flags;
3982  
3983  	/* Adding cloud filter programmed as TC filter */
3984  	if (tcf.dst_port)
3985  		aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3986  	else
3987  		aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3988  	if (aq_ret) {
3989  		dev_err(&pf->pdev->dev,
3990  			"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
3991  			vf->vf_id, ERR_PTR(aq_ret),
3992  			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3993  		goto err_free;
3994  	}
3995  
3996  	INIT_HLIST_NODE(&cfilter->cloud_node);
3997  	hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3998  	/* release the pointer passing it to the collection */
3999  	cfilter = NULL;
4000  	vf->num_cloud_filters++;
4001  err_free:
4002  	kfree(cfilter);
4003  err_out:
4004  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
4005  				       aq_ret);
4006  }
4007  
4008  /**
4009   * i40e_vc_add_qch_msg: Add queue channel and enable ADq
4010   * @vf: pointer to the VF info
4011   * @msg: pointer to the msg buffer
4012   **/
i40e_vc_add_qch_msg(struct i40e_vf * vf,u8 * msg)4013  static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
4014  {
4015  	struct virtchnl_tc_info *tci =
4016  		(struct virtchnl_tc_info *)msg;
4017  	struct i40e_pf *pf = vf->pf;
4018  	struct i40e_link_status *ls = &pf->hw.phy.link_info;
4019  	int i, adq_request_qps = 0;
4020  	int aq_ret = 0;
4021  	u64 speed = 0;
4022  
4023  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4024  		aq_ret = -EINVAL;
4025  		goto err;
4026  	}
4027  
4028  	/* ADq cannot be applied if spoof check is ON */
4029  	if (vf->spoofchk) {
4030  		dev_err(&pf->pdev->dev,
4031  			"Spoof check is ON, turn it OFF to enable ADq\n");
4032  		aq_ret = -EINVAL;
4033  		goto err;
4034  	}
4035  
4036  	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
4037  		dev_err(&pf->pdev->dev,
4038  			"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
4039  			vf->vf_id);
4040  		aq_ret = -EINVAL;
4041  		goto err;
4042  	}
4043  
4044  	/* max number of traffic classes for VF currently capped at 4 */
4045  	if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
4046  		dev_err(&pf->pdev->dev,
4047  			"VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
4048  			vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
4049  		aq_ret = -EINVAL;
4050  		goto err;
4051  	}
4052  
4053  	/* validate queues for each TC */
4054  	for (i = 0; i < tci->num_tc; i++)
4055  		if (!tci->list[i].count ||
4056  		    tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
4057  			dev_err(&pf->pdev->dev,
4058  				"VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
4059  				vf->vf_id, i, tci->list[i].count,
4060  				I40E_DEFAULT_QUEUES_PER_VF);
4061  			aq_ret = -EINVAL;
4062  			goto err;
4063  		}
4064  
4065  	/* need Max VF queues but already have default number of queues */
4066  	adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
4067  
4068  	if (pf->queues_left < adq_request_qps) {
4069  		dev_err(&pf->pdev->dev,
4070  			"No queues left to allocate to VF %d\n",
4071  			vf->vf_id);
4072  		aq_ret = -EINVAL;
4073  		goto err;
4074  	} else {
4075  		/* we need to allocate max VF queues to enable ADq so as to
4076  		 * make sure ADq enabled VF always gets back queues when it
4077  		 * goes through a reset.
4078  		 */
4079  		vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
4080  	}
4081  
4082  	/* get link speed in MB to validate rate limit */
4083  	speed = i40e_vc_link_speed2mbps(ls->link_speed);
4084  	if (speed == SPEED_UNKNOWN) {
4085  		dev_err(&pf->pdev->dev,
4086  			"Cannot detect link speed\n");
4087  		aq_ret = -EINVAL;
4088  		goto err;
4089  	}
4090  
4091  	/* parse data from the queue channel info */
4092  	vf->num_tc = tci->num_tc;
4093  	for (i = 0; i < vf->num_tc; i++) {
4094  		if (tci->list[i].max_tx_rate) {
4095  			if (tci->list[i].max_tx_rate > speed) {
4096  				dev_err(&pf->pdev->dev,
4097  					"Invalid max tx rate %llu specified for VF %d.",
4098  					tci->list[i].max_tx_rate,
4099  					vf->vf_id);
4100  				aq_ret = -EINVAL;
4101  				goto err;
4102  			} else {
4103  				vf->ch[i].max_tx_rate =
4104  					tci->list[i].max_tx_rate;
4105  			}
4106  		}
4107  		vf->ch[i].num_qps = tci->list[i].count;
4108  	}
4109  
4110  	/* set this flag only after making sure all inputs are sane */
4111  	vf->adq_enabled = true;
4112  
4113  	/* reset the VF in order to allocate resources */
4114  	i40e_vc_reset_vf(vf, true);
4115  
4116  	return 0;
4117  
4118  	/* send the response to the VF */
4119  err:
4120  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
4121  				       aq_ret);
4122  }
4123  
4124  /**
4125   * i40e_vc_del_qch_msg
4126   * @vf: pointer to the VF info
4127   * @msg: pointer to the msg buffer
4128   **/
i40e_vc_del_qch_msg(struct i40e_vf * vf,u8 * msg)4129  static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
4130  {
4131  	struct i40e_pf *pf = vf->pf;
4132  	int aq_ret = 0;
4133  
4134  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4135  		aq_ret = -EINVAL;
4136  		goto err;
4137  	}
4138  
4139  	if (vf->adq_enabled) {
4140  		i40e_del_all_cloud_filters(vf);
4141  		i40e_del_qch(vf);
4142  		vf->adq_enabled = false;
4143  		vf->num_tc = 0;
4144  		dev_info(&pf->pdev->dev,
4145  			 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
4146  			 vf->vf_id);
4147  	} else {
4148  		dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
4149  			 vf->vf_id);
4150  		aq_ret = -EINVAL;
4151  	}
4152  
4153  	/* reset the VF in order to allocate resources */
4154  	i40e_vc_reset_vf(vf, true);
4155  
4156  	return 0;
4157  
4158  err:
4159  	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
4160  				       aq_ret);
4161  }
4162  
4163  /**
4164   * i40e_vc_process_vf_msg
4165   * @pf: pointer to the PF structure
4166   * @vf_id: source VF id
4167   * @v_opcode: operation code
4168   * @v_retval: unused return value code
4169   * @msg: pointer to the msg buffer
4170   * @msglen: msg length
4171   *
4172   * called from the common aeq/arq handler to
4173   * process request from VF
4174   **/
i40e_vc_process_vf_msg(struct i40e_pf * pf,s16 vf_id,u32 v_opcode,u32 __always_unused v_retval,u8 * msg,u16 msglen)4175  int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
4176  			   u32 __always_unused v_retval, u8 *msg, u16 msglen)
4177  {
4178  	struct i40e_hw *hw = &pf->hw;
4179  	int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
4180  	struct i40e_vf *vf;
4181  	int ret;
4182  
4183  	pf->vf_aq_requests++;
4184  	if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
4185  		return -EINVAL;
4186  	vf = &(pf->vf[local_vf_id]);
4187  
4188  	/* Check if VF is disabled. */
4189  	if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
4190  		return -EINVAL;
4191  
4192  	/* perform basic checks on the msg */
4193  	ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4194  
4195  	if (ret) {
4196  		i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL);
4197  		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
4198  			local_vf_id, v_opcode, msglen);
4199  		return ret;
4200  	}
4201  
4202  	switch (v_opcode) {
4203  	case VIRTCHNL_OP_VERSION:
4204  		ret = i40e_vc_get_version_msg(vf, msg);
4205  		break;
4206  	case VIRTCHNL_OP_GET_VF_RESOURCES:
4207  		ret = i40e_vc_get_vf_resources_msg(vf, msg);
4208  		i40e_vc_notify_vf_link_state(vf);
4209  		break;
4210  	case VIRTCHNL_OP_RESET_VF:
4211  		i40e_vc_reset_vf(vf, false);
4212  		ret = 0;
4213  		break;
4214  	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4215  		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
4216  		break;
4217  	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4218  		ret = i40e_vc_config_queues_msg(vf, msg);
4219  		break;
4220  	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4221  		ret = i40e_vc_config_irq_map_msg(vf, msg);
4222  		break;
4223  	case VIRTCHNL_OP_ENABLE_QUEUES:
4224  		ret = i40e_vc_enable_queues_msg(vf, msg);
4225  		i40e_vc_notify_vf_link_state(vf);
4226  		break;
4227  	case VIRTCHNL_OP_DISABLE_QUEUES:
4228  		ret = i40e_vc_disable_queues_msg(vf, msg);
4229  		break;
4230  	case VIRTCHNL_OP_ADD_ETH_ADDR:
4231  		ret = i40e_vc_add_mac_addr_msg(vf, msg);
4232  		break;
4233  	case VIRTCHNL_OP_DEL_ETH_ADDR:
4234  		ret = i40e_vc_del_mac_addr_msg(vf, msg);
4235  		break;
4236  	case VIRTCHNL_OP_ADD_VLAN:
4237  		ret = i40e_vc_add_vlan_msg(vf, msg);
4238  		break;
4239  	case VIRTCHNL_OP_DEL_VLAN:
4240  		ret = i40e_vc_remove_vlan_msg(vf, msg);
4241  		break;
4242  	case VIRTCHNL_OP_GET_STATS:
4243  		ret = i40e_vc_get_stats_msg(vf, msg);
4244  		break;
4245  	case VIRTCHNL_OP_RDMA:
4246  		ret = i40e_vc_rdma_msg(vf, msg, msglen);
4247  		break;
4248  	case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
4249  		ret = i40e_vc_rdma_qvmap_msg(vf, msg, true);
4250  		break;
4251  	case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
4252  		ret = i40e_vc_rdma_qvmap_msg(vf, msg, false);
4253  		break;
4254  	case VIRTCHNL_OP_CONFIG_RSS_KEY:
4255  		ret = i40e_vc_config_rss_key(vf, msg);
4256  		break;
4257  	case VIRTCHNL_OP_CONFIG_RSS_LUT:
4258  		ret = i40e_vc_config_rss_lut(vf, msg);
4259  		break;
4260  	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4261  		ret = i40e_vc_get_rss_hena(vf, msg);
4262  		break;
4263  	case VIRTCHNL_OP_SET_RSS_HENA:
4264  		ret = i40e_vc_set_rss_hena(vf, msg);
4265  		break;
4266  	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4267  		ret = i40e_vc_enable_vlan_stripping(vf, msg);
4268  		break;
4269  	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4270  		ret = i40e_vc_disable_vlan_stripping(vf, msg);
4271  		break;
4272  	case VIRTCHNL_OP_REQUEST_QUEUES:
4273  		ret = i40e_vc_request_queues_msg(vf, msg);
4274  		break;
4275  	case VIRTCHNL_OP_ENABLE_CHANNELS:
4276  		ret = i40e_vc_add_qch_msg(vf, msg);
4277  		break;
4278  	case VIRTCHNL_OP_DISABLE_CHANNELS:
4279  		ret = i40e_vc_del_qch_msg(vf, msg);
4280  		break;
4281  	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
4282  		ret = i40e_vc_add_cloud_filter(vf, msg);
4283  		break;
4284  	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
4285  		ret = i40e_vc_del_cloud_filter(vf, msg);
4286  		break;
4287  	case VIRTCHNL_OP_UNKNOWN:
4288  	default:
4289  		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
4290  			v_opcode, local_vf_id);
4291  		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
4292  					      -EOPNOTSUPP);
4293  		break;
4294  	}
4295  
4296  	return ret;
4297  }
4298  
4299  /**
4300   * i40e_vc_process_vflr_event
4301   * @pf: pointer to the PF structure
4302   *
4303   * called from the vlfr irq handler to
4304   * free up VF resources and state variables
4305   **/
i40e_vc_process_vflr_event(struct i40e_pf * pf)4306  int i40e_vc_process_vflr_event(struct i40e_pf *pf)
4307  {
4308  	struct i40e_hw *hw = &pf->hw;
4309  	u32 reg, reg_idx, bit_idx;
4310  	struct i40e_vf *vf;
4311  	int vf_id;
4312  
4313  	if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
4314  		return 0;
4315  
4316  	/* Re-enable the VFLR interrupt cause here, before looking for which
4317  	 * VF got reset. Otherwise, if another VF gets a reset while the
4318  	 * first one is being processed, that interrupt will be lost, and
4319  	 * that VF will be stuck in reset forever.
4320  	 */
4321  	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4322  	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4323  	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4324  	i40e_flush(hw);
4325  
4326  	clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4327  	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4328  		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4329  		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4330  		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
4331  		vf = &pf->vf[vf_id];
4332  		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4333  		if (reg & BIT(bit_idx))
4334  			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4335  			i40e_reset_vf(vf, true);
4336  	}
4337  
4338  	return 0;
4339  }
4340  
4341  /**
4342   * i40e_validate_vf
4343   * @pf: the physical function
4344   * @vf_id: VF identifier
4345   *
4346   * Check that the VF is enabled and the VSI exists.
4347   *
4348   * Returns 0 on success, negative on failure
4349   **/
i40e_validate_vf(struct i40e_pf * pf,int vf_id)4350  static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4351  {
4352  	struct i40e_vsi *vsi;
4353  	struct i40e_vf *vf;
4354  	int ret = 0;
4355  
4356  	if (vf_id >= pf->num_alloc_vfs) {
4357  		dev_err(&pf->pdev->dev,
4358  			"Invalid VF Identifier %d\n", vf_id);
4359  		ret = -EINVAL;
4360  		goto err_out;
4361  	}
4362  	vf = &pf->vf[vf_id];
4363  	vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4364  	if (!vsi)
4365  		ret = -EINVAL;
4366  err_out:
4367  	return ret;
4368  }
4369  
4370  /**
4371   * i40e_check_vf_init_timeout
4372   * @vf: the virtual function
4373   *
4374   * Check that the VF's initialization was successfully done and if not
4375   * wait up to 300ms for its finish.
4376   *
4377   * Returns true when VF is initialized, false on timeout
4378   **/
i40e_check_vf_init_timeout(struct i40e_vf * vf)4379  static bool i40e_check_vf_init_timeout(struct i40e_vf *vf)
4380  {
4381  	int i;
4382  
4383  	/* When the VF is resetting wait until it is done.
4384  	 * It can take up to 200 milliseconds, but wait for
4385  	 * up to 300 milliseconds to be safe.
4386  	 */
4387  	for (i = 0; i < 15; i++) {
4388  		if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4389  			return true;
4390  		msleep(20);
4391  	}
4392  
4393  	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4394  		dev_err(&vf->pf->pdev->dev,
4395  			"VF %d still in reset. Try again.\n", vf->vf_id);
4396  		return false;
4397  	}
4398  
4399  	return true;
4400  }
4401  
4402  /**
4403   * i40e_ndo_set_vf_mac
4404   * @netdev: network interface device structure
4405   * @vf_id: VF identifier
4406   * @mac: mac address
4407   *
4408   * program VF mac address
4409   **/
i40e_ndo_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)4410  int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4411  {
4412  	struct i40e_netdev_priv *np = netdev_priv(netdev);
4413  	struct i40e_vsi *vsi = np->vsi;
4414  	struct i40e_pf *pf = vsi->back;
4415  	struct i40e_mac_filter *f;
4416  	struct i40e_vf *vf;
4417  	int ret = 0;
4418  	struct hlist_node *h;
4419  	int bkt;
4420  
4421  	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4422  		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4423  		return -EAGAIN;
4424  	}
4425  
4426  	/* validate the request */
4427  	ret = i40e_validate_vf(pf, vf_id);
4428  	if (ret)
4429  		goto error_param;
4430  
4431  	vf = &pf->vf[vf_id];
4432  	if (!i40e_check_vf_init_timeout(vf)) {
4433  		ret = -EAGAIN;
4434  		goto error_param;
4435  	}
4436  	vsi = pf->vsi[vf->lan_vsi_idx];
4437  
4438  	if (is_multicast_ether_addr(mac)) {
4439  		dev_err(&pf->pdev->dev,
4440  			"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4441  		ret = -EINVAL;
4442  		goto error_param;
4443  	}
4444  
4445  	/* Lock once because below invoked function add/del_filter requires
4446  	 * mac_filter_hash_lock to be held
4447  	 */
4448  	spin_lock_bh(&vsi->mac_filter_hash_lock);
4449  
4450  	/* delete the temporary mac address */
4451  	if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4452  		i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4453  
4454  	/* Delete all the filters for this VSI - we're going to kill it
4455  	 * anyway.
4456  	 */
4457  	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4458  		__i40e_del_filter(vsi, f);
4459  
4460  	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4461  
4462  	/* program mac filter */
4463  	if (i40e_sync_vsi_filters(vsi)) {
4464  		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4465  		ret = -EIO;
4466  		goto error_param;
4467  	}
4468  	ether_addr_copy(vf->default_lan_addr.addr, mac);
4469  
4470  	if (is_zero_ether_addr(mac)) {
4471  		vf->pf_set_mac = false;
4472  		dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4473  	} else {
4474  		vf->pf_set_mac = true;
4475  		dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4476  			 mac, vf_id);
4477  	}
4478  
4479  	/* Force the VF interface down so it has to bring up with new MAC
4480  	 * address
4481  	 */
4482  	i40e_vc_reset_vf(vf, true);
4483  	dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4484  
4485  error_param:
4486  	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4487  	return ret;
4488  }
4489  
4490  /**
4491   * i40e_ndo_set_vf_port_vlan
4492   * @netdev: network interface device structure
4493   * @vf_id: VF identifier
4494   * @vlan_id: mac address
4495   * @qos: priority setting
4496   * @vlan_proto: vlan protocol
4497   *
4498   * program VF vlan id and/or qos
4499   **/
i40e_ndo_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)4500  int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4501  			      u16 vlan_id, u8 qos, __be16 vlan_proto)
4502  {
4503  	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4504  	struct i40e_netdev_priv *np = netdev_priv(netdev);
4505  	bool allmulti = false, alluni = false;
4506  	struct i40e_pf *pf = np->vsi->back;
4507  	struct i40e_vsi *vsi;
4508  	struct i40e_vf *vf;
4509  	int ret = 0;
4510  
4511  	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4512  		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4513  		return -EAGAIN;
4514  	}
4515  
4516  	/* validate the request */
4517  	ret = i40e_validate_vf(pf, vf_id);
4518  	if (ret)
4519  		goto error_pvid;
4520  
4521  	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4522  		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4523  		ret = -EINVAL;
4524  		goto error_pvid;
4525  	}
4526  
4527  	if (vlan_proto != htons(ETH_P_8021Q)) {
4528  		dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4529  		ret = -EPROTONOSUPPORT;
4530  		goto error_pvid;
4531  	}
4532  
4533  	vf = &pf->vf[vf_id];
4534  	if (!i40e_check_vf_init_timeout(vf)) {
4535  		ret = -EAGAIN;
4536  		goto error_pvid;
4537  	}
4538  	vsi = pf->vsi[vf->lan_vsi_idx];
4539  
4540  	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4541  		/* duplicate request, so just return success */
4542  		goto error_pvid;
4543  
4544  	i40e_vlan_stripping_enable(vsi);
4545  
4546  	/* Locked once because multiple functions below iterate list */
4547  	spin_lock_bh(&vsi->mac_filter_hash_lock);
4548  
4549  	/* Check for condition where there was already a port VLAN ID
4550  	 * filter set and now it is being deleted by setting it to zero.
4551  	 * Additionally check for the condition where there was a port
4552  	 * VLAN but now there is a new and different port VLAN being set.
4553  	 * Before deleting all the old VLAN filters we must add new ones
4554  	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4555  	 * MAC addresses deleted.
4556  	 */
4557  	if ((!(vlan_id || qos) ||
4558  	     vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4559  	    vsi->info.pvid) {
4560  		ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4561  		if (ret) {
4562  			dev_info(&vsi->back->pdev->dev,
4563  				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4564  				 vsi->back->hw.aq.asq_last_status);
4565  			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4566  			goto error_pvid;
4567  		}
4568  	}
4569  
4570  	if (vsi->info.pvid) {
4571  		/* remove all filters on the old VLAN */
4572  		i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4573  					   VLAN_VID_MASK));
4574  	}
4575  
4576  	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4577  
4578  	/* disable promisc modes in case they were enabled */
4579  	ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4580  					      allmulti, alluni);
4581  	if (ret) {
4582  		dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4583  		goto error_pvid;
4584  	}
4585  
4586  	if (vlan_id || qos)
4587  		ret = i40e_vsi_add_pvid(vsi, vlanprio);
4588  	else
4589  		i40e_vsi_remove_pvid(vsi);
4590  	spin_lock_bh(&vsi->mac_filter_hash_lock);
4591  
4592  	if (vlan_id) {
4593  		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4594  			 vlan_id, qos, vf_id);
4595  
4596  		/* add new VLAN filter for each MAC */
4597  		ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4598  		if (ret) {
4599  			dev_info(&vsi->back->pdev->dev,
4600  				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4601  				 vsi->back->hw.aq.asq_last_status);
4602  			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4603  			goto error_pvid;
4604  		}
4605  
4606  		/* remove the previously added non-VLAN MAC filters */
4607  		i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4608  	}
4609  
4610  	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4611  
4612  	if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4613  		alluni = true;
4614  
4615  	if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4616  		allmulti = true;
4617  
4618  	/* Schedule the worker thread to take care of applying changes */
4619  	i40e_service_event_schedule(vsi->back);
4620  
4621  	if (ret) {
4622  		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4623  		goto error_pvid;
4624  	}
4625  
4626  	/* The Port VLAN needs to be saved across resets the same as the
4627  	 * default LAN MAC address.
4628  	 */
4629  	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4630  
4631  	i40e_vc_reset_vf(vf, true);
4632  	/* During reset the VF got a new VSI, so refresh a pointer. */
4633  	vsi = pf->vsi[vf->lan_vsi_idx];
4634  
4635  	ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4636  	if (ret) {
4637  		dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4638  		goto error_pvid;
4639  	}
4640  
4641  	ret = 0;
4642  
4643  error_pvid:
4644  	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4645  	return ret;
4646  }
4647  
4648  /**
4649   * i40e_ndo_set_vf_bw
4650   * @netdev: network interface device structure
4651   * @vf_id: VF identifier
4652   * @min_tx_rate: Minimum Tx rate
4653   * @max_tx_rate: Maximum Tx rate
4654   *
4655   * configure VF Tx rate
4656   **/
i40e_ndo_set_vf_bw(struct net_device * netdev,int vf_id,int min_tx_rate,int max_tx_rate)4657  int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4658  		       int max_tx_rate)
4659  {
4660  	struct i40e_netdev_priv *np = netdev_priv(netdev);
4661  	struct i40e_pf *pf = np->vsi->back;
4662  	struct i40e_vsi *vsi;
4663  	struct i40e_vf *vf;
4664  	int ret = 0;
4665  
4666  	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4667  		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4668  		return -EAGAIN;
4669  	}
4670  
4671  	/* validate the request */
4672  	ret = i40e_validate_vf(pf, vf_id);
4673  	if (ret)
4674  		goto error;
4675  
4676  	if (min_tx_rate) {
4677  		dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4678  			min_tx_rate, vf_id);
4679  		ret = -EINVAL;
4680  		goto error;
4681  	}
4682  
4683  	vf = &pf->vf[vf_id];
4684  	if (!i40e_check_vf_init_timeout(vf)) {
4685  		ret = -EAGAIN;
4686  		goto error;
4687  	}
4688  	vsi = pf->vsi[vf->lan_vsi_idx];
4689  
4690  	ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4691  	if (ret)
4692  		goto error;
4693  
4694  	vf->tx_rate = max_tx_rate;
4695  error:
4696  	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4697  	return ret;
4698  }
4699  
4700  /**
4701   * i40e_ndo_get_vf_config
4702   * @netdev: network interface device structure
4703   * @vf_id: VF identifier
4704   * @ivi: VF configuration structure
4705   *
4706   * return VF configuration
4707   **/
i40e_ndo_get_vf_config(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)4708  int i40e_ndo_get_vf_config(struct net_device *netdev,
4709  			   int vf_id, struct ifla_vf_info *ivi)
4710  {
4711  	struct i40e_netdev_priv *np = netdev_priv(netdev);
4712  	struct i40e_vsi *vsi = np->vsi;
4713  	struct i40e_pf *pf = vsi->back;
4714  	struct i40e_vf *vf;
4715  	int ret = 0;
4716  
4717  	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4718  		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4719  		return -EAGAIN;
4720  	}
4721  
4722  	/* validate the request */
4723  	ret = i40e_validate_vf(pf, vf_id);
4724  	if (ret)
4725  		goto error_param;
4726  
4727  	vf = &pf->vf[vf_id];
4728  	/* first vsi is always the LAN vsi */
4729  	vsi = pf->vsi[vf->lan_vsi_idx];
4730  	if (!vsi) {
4731  		ret = -ENOENT;
4732  		goto error_param;
4733  	}
4734  
4735  	ivi->vf = vf_id;
4736  
4737  	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4738  
4739  	ivi->max_tx_rate = vf->tx_rate;
4740  	ivi->min_tx_rate = 0;
4741  	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4742  	ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4743  		   I40E_VLAN_PRIORITY_SHIFT;
4744  	if (vf->link_forced == false)
4745  		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4746  	else if (vf->link_up == true)
4747  		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4748  	else
4749  		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4750  	ivi->spoofchk = vf->spoofchk;
4751  	ivi->trusted = vf->trusted;
4752  	ret = 0;
4753  
4754  error_param:
4755  	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4756  	return ret;
4757  }
4758  
4759  /**
4760   * i40e_ndo_set_vf_link_state
4761   * @netdev: network interface device structure
4762   * @vf_id: VF identifier
4763   * @link: required link state
4764   *
4765   * Set the link state of a specified VF, regardless of physical link state
4766   **/
i40e_ndo_set_vf_link_state(struct net_device * netdev,int vf_id,int link)4767  int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4768  {
4769  	struct i40e_netdev_priv *np = netdev_priv(netdev);
4770  	struct i40e_pf *pf = np->vsi->back;
4771  	struct i40e_link_status *ls = &pf->hw.phy.link_info;
4772  	struct virtchnl_pf_event pfe;
4773  	struct i40e_hw *hw = &pf->hw;
4774  	struct i40e_vsi *vsi;
4775  	unsigned long q_map;
4776  	struct i40e_vf *vf;
4777  	int abs_vf_id;
4778  	int ret = 0;
4779  	int tmp;
4780  
4781  	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4782  		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4783  		return -EAGAIN;
4784  	}
4785  
4786  	/* validate the request */
4787  	if (vf_id >= pf->num_alloc_vfs) {
4788  		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4789  		ret = -EINVAL;
4790  		goto error_out;
4791  	}
4792  
4793  	vf = &pf->vf[vf_id];
4794  	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4795  
4796  	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4797  	pfe.severity = PF_EVENT_SEVERITY_INFO;
4798  
4799  	switch (link) {
4800  	case IFLA_VF_LINK_STATE_AUTO:
4801  		vf->link_forced = false;
4802  		vf->is_disabled_from_host = false;
4803  		/* reset needed to reinit VF resources */
4804  		i40e_vc_reset_vf(vf, true);
4805  		i40e_set_vf_link_state(vf, &pfe, ls);
4806  		break;
4807  	case IFLA_VF_LINK_STATE_ENABLE:
4808  		vf->link_forced = true;
4809  		vf->link_up = true;
4810  		vf->is_disabled_from_host = false;
4811  		/* reset needed to reinit VF resources */
4812  		i40e_vc_reset_vf(vf, true);
4813  		i40e_set_vf_link_state(vf, &pfe, ls);
4814  		break;
4815  	case IFLA_VF_LINK_STATE_DISABLE:
4816  		vf->link_forced = true;
4817  		vf->link_up = false;
4818  		i40e_set_vf_link_state(vf, &pfe, ls);
4819  
4820  		vsi = pf->vsi[vf->lan_vsi_idx];
4821  		q_map = BIT(vsi->num_queue_pairs) - 1;
4822  
4823  		vf->is_disabled_from_host = true;
4824  
4825  		/* Try to stop both Tx&Rx rings even if one of the calls fails
4826  		 * to ensure we stop the rings even in case of errors.
4827  		 * If any of them returns with an error then the first
4828  		 * error that occurred will be returned.
4829  		 */
4830  		tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false);
4831  		ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false);
4832  
4833  		ret = tmp ? tmp : ret;
4834  		break;
4835  	default:
4836  		ret = -EINVAL;
4837  		goto error_out;
4838  	}
4839  	/* Notify the VF of its new link state */
4840  	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4841  			       0, (u8 *)&pfe, sizeof(pfe), NULL);
4842  
4843  error_out:
4844  	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4845  	return ret;
4846  }
4847  
4848  /**
4849   * i40e_ndo_set_vf_spoofchk
4850   * @netdev: network interface device structure
4851   * @vf_id: VF identifier
4852   * @enable: flag to enable or disable feature
4853   *
4854   * Enable or disable VF spoof checking
4855   **/
i40e_ndo_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool enable)4856  int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4857  {
4858  	struct i40e_netdev_priv *np = netdev_priv(netdev);
4859  	struct i40e_vsi *vsi = np->vsi;
4860  	struct i40e_pf *pf = vsi->back;
4861  	struct i40e_vsi_context ctxt;
4862  	struct i40e_hw *hw = &pf->hw;
4863  	struct i40e_vf *vf;
4864  	int ret = 0;
4865  
4866  	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4867  		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4868  		return -EAGAIN;
4869  	}
4870  
4871  	/* validate the request */
4872  	if (vf_id >= pf->num_alloc_vfs) {
4873  		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4874  		ret = -EINVAL;
4875  		goto out;
4876  	}
4877  
4878  	vf = &(pf->vf[vf_id]);
4879  	if (!i40e_check_vf_init_timeout(vf)) {
4880  		ret = -EAGAIN;
4881  		goto out;
4882  	}
4883  
4884  	if (enable == vf->spoofchk)
4885  		goto out;
4886  
4887  	vf->spoofchk = enable;
4888  	memset(&ctxt, 0, sizeof(ctxt));
4889  	ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4890  	ctxt.pf_num = pf->hw.pf_id;
4891  	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4892  	if (enable)
4893  		ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4894  					I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4895  	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4896  	if (ret) {
4897  		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4898  			ret);
4899  		ret = -EIO;
4900  	}
4901  out:
4902  	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4903  	return ret;
4904  }
4905  
4906  /**
4907   * i40e_ndo_set_vf_trust
4908   * @netdev: network interface device structure of the pf
4909   * @vf_id: VF identifier
4910   * @setting: trust setting
4911   *
4912   * Enable or disable VF trust setting
4913   **/
i40e_ndo_set_vf_trust(struct net_device * netdev,int vf_id,bool setting)4914  int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4915  {
4916  	struct i40e_netdev_priv *np = netdev_priv(netdev);
4917  	struct i40e_pf *pf = np->vsi->back;
4918  	struct i40e_vf *vf;
4919  	int ret = 0;
4920  
4921  	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4922  		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4923  		return -EAGAIN;
4924  	}
4925  
4926  	/* validate the request */
4927  	if (vf_id >= pf->num_alloc_vfs) {
4928  		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4929  		ret = -EINVAL;
4930  		goto out;
4931  	}
4932  
4933  	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4934  		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4935  		ret = -EINVAL;
4936  		goto out;
4937  	}
4938  
4939  	vf = &pf->vf[vf_id];
4940  
4941  	if (setting == vf->trusted)
4942  		goto out;
4943  
4944  	vf->trusted = setting;
4945  
4946  	/* request PF to sync mac/vlan filters for the VF */
4947  	set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
4948  	pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
4949  
4950  	i40e_vc_reset_vf(vf, true);
4951  	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4952  		 vf_id, setting ? "" : "un");
4953  
4954  	if (vf->adq_enabled) {
4955  		if (!vf->trusted) {
4956  			dev_info(&pf->pdev->dev,
4957  				 "VF %u no longer Trusted, deleting all cloud filters\n",
4958  				 vf_id);
4959  			i40e_del_all_cloud_filters(vf);
4960  		}
4961  	}
4962  
4963  out:
4964  	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4965  	return ret;
4966  }
4967  
4968  /**
4969   * i40e_get_vf_stats - populate some stats for the VF
4970   * @netdev: the netdev of the PF
4971   * @vf_id: the host OS identifier (0-127)
4972   * @vf_stats: pointer to the OS memory to be initialized
4973   */
i40e_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)4974  int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4975  		      struct ifla_vf_stats *vf_stats)
4976  {
4977  	struct i40e_netdev_priv *np = netdev_priv(netdev);
4978  	struct i40e_pf *pf = np->vsi->back;
4979  	struct i40e_eth_stats *stats;
4980  	struct i40e_vsi *vsi;
4981  	struct i40e_vf *vf;
4982  
4983  	/* validate the request */
4984  	if (i40e_validate_vf(pf, vf_id))
4985  		return -EINVAL;
4986  
4987  	vf = &pf->vf[vf_id];
4988  	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4989  		dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4990  		return -EBUSY;
4991  	}
4992  
4993  	vsi = pf->vsi[vf->lan_vsi_idx];
4994  	if (!vsi)
4995  		return -EINVAL;
4996  
4997  	i40e_update_eth_stats(vsi);
4998  	stats = &vsi->eth_stats;
4999  
5000  	memset(vf_stats, 0, sizeof(*vf_stats));
5001  
5002  	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
5003  		stats->rx_multicast;
5004  	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
5005  		stats->tx_multicast;
5006  	vf_stats->rx_bytes   = stats->rx_bytes;
5007  	vf_stats->tx_bytes   = stats->tx_bytes;
5008  	vf_stats->broadcast  = stats->rx_broadcast;
5009  	vf_stats->multicast  = stats->rx_multicast;
5010  	vf_stats->rx_dropped = stats->rx_discards;
5011  	vf_stats->tx_dropped = stats->tx_discards;
5012  
5013  	return 0;
5014  }
5015