1ae06c70bSJeff Kirsher // SPDX-License-Identifier: GPL-2.0
251dce24bSJeff Kirsher /* Copyright(c) 2013 - 2018 Intel Corporation. */
35c3c48acSJesse Brandeburg
45c3c48acSJesse Brandeburg #include "i40e.h"
5e77220eeSIvan Vecera #include "i40e_lan_hmc.h"
6e77220eeSIvan Vecera #include "i40e_virtchnl_pf.h"
75c3c48acSJesse Brandeburg
8532b0455SMitch Williams /*********************notification routines***********************/
9532b0455SMitch Williams
10532b0455SMitch Williams /**
11532b0455SMitch Williams * i40e_vc_vf_broadcast
12532b0455SMitch Williams * @pf: pointer to the PF structure
13f5254429SJacob Keller * @v_opcode: operation code
14f5254429SJacob Keller * @v_retval: return value
15532b0455SMitch Williams * @msg: pointer to the msg buffer
16532b0455SMitch Williams * @msglen: msg length
17532b0455SMitch Williams *
18532b0455SMitch Williams * send a message to all VFs on a given PF
19532b0455SMitch Williams **/
i40e_vc_vf_broadcast(struct i40e_pf * pf,enum virtchnl_ops v_opcode,int v_retval,u8 * msg,u16 msglen)20532b0455SMitch Williams static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
21310a2ad9SJesse Brandeburg enum virtchnl_ops v_opcode,
225180ff13SJan Sokolowski int v_retval, u8 *msg,
23532b0455SMitch Williams u16 msglen)
24532b0455SMitch Williams {
25532b0455SMitch Williams struct i40e_hw *hw = &pf->hw;
26532b0455SMitch Williams struct i40e_vf *vf = pf->vf;
27532b0455SMitch Williams int i;
28532b0455SMitch Williams
29532b0455SMitch Williams for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
30a1b5a24fSJesse Brandeburg int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
31532b0455SMitch Williams /* Not all vfs are enabled so skip the ones that are not */
326322e63cSJacob Keller if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
336322e63cSJacob Keller !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
34532b0455SMitch Williams continue;
35532b0455SMitch Williams
36532b0455SMitch Williams /* Ignore return value on purpose - a given VF may fail, but
37532b0455SMitch Williams * we need to keep going and send to all of them
38532b0455SMitch Williams */
39532b0455SMitch Williams i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
40532b0455SMitch Williams msg, msglen, NULL);
41532b0455SMitch Williams }
42532b0455SMitch Williams }
43532b0455SMitch Williams
44532b0455SMitch Williams /**
456d2c322cSAleksandr Loktionov * i40e_vc_link_speed2mbps
466d2c322cSAleksandr Loktionov * converts i40e_aq_link_speed to integer value of Mbps
476d2c322cSAleksandr Loktionov * @link_speed: the speed to convert
486d2c322cSAleksandr Loktionov *
496d2c322cSAleksandr Loktionov * return the speed as direct value of Mbps.
506d2c322cSAleksandr Loktionov **/
516d2c322cSAleksandr Loktionov static u32
i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)526d2c322cSAleksandr Loktionov i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
536d2c322cSAleksandr Loktionov {
546d2c322cSAleksandr Loktionov switch (link_speed) {
556d2c322cSAleksandr Loktionov case I40E_LINK_SPEED_100MB:
566d2c322cSAleksandr Loktionov return SPEED_100;
576d2c322cSAleksandr Loktionov case I40E_LINK_SPEED_1GB:
586d2c322cSAleksandr Loktionov return SPEED_1000;
596d2c322cSAleksandr Loktionov case I40E_LINK_SPEED_2_5GB:
606d2c322cSAleksandr Loktionov return SPEED_2500;
616d2c322cSAleksandr Loktionov case I40E_LINK_SPEED_5GB:
626d2c322cSAleksandr Loktionov return SPEED_5000;
636d2c322cSAleksandr Loktionov case I40E_LINK_SPEED_10GB:
646d2c322cSAleksandr Loktionov return SPEED_10000;
656d2c322cSAleksandr Loktionov case I40E_LINK_SPEED_20GB:
666d2c322cSAleksandr Loktionov return SPEED_20000;
676d2c322cSAleksandr Loktionov case I40E_LINK_SPEED_25GB:
686d2c322cSAleksandr Loktionov return SPEED_25000;
696d2c322cSAleksandr Loktionov case I40E_LINK_SPEED_40GB:
706d2c322cSAleksandr Loktionov return SPEED_40000;
716d2c322cSAleksandr Loktionov case I40E_LINK_SPEED_UNKNOWN:
726d2c322cSAleksandr Loktionov return SPEED_UNKNOWN;
736d2c322cSAleksandr Loktionov }
746d2c322cSAleksandr Loktionov return SPEED_UNKNOWN;
756d2c322cSAleksandr Loktionov }
766d2c322cSAleksandr Loktionov
776d2c322cSAleksandr Loktionov /**
786d2c322cSAleksandr Loktionov * i40e_set_vf_link_state
796d2c322cSAleksandr Loktionov * @vf: pointer to the VF structure
806d2c322cSAleksandr Loktionov * @pfe: pointer to PF event structure
816d2c322cSAleksandr Loktionov * @ls: pointer to link status structure
826d2c322cSAleksandr Loktionov *
836d2c322cSAleksandr Loktionov * set a link state on a single vf
846d2c322cSAleksandr Loktionov **/
i40e_set_vf_link_state(struct i40e_vf * vf,struct virtchnl_pf_event * pfe,struct i40e_link_status * ls)856d2c322cSAleksandr Loktionov static void i40e_set_vf_link_state(struct i40e_vf *vf,
866d2c322cSAleksandr Loktionov struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
876d2c322cSAleksandr Loktionov {
886d2c322cSAleksandr Loktionov u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
896d2c322cSAleksandr Loktionov
906d2c322cSAleksandr Loktionov if (vf->link_forced)
916d2c322cSAleksandr Loktionov link_status = vf->link_up;
926d2c322cSAleksandr Loktionov
936d2c322cSAleksandr Loktionov if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
946d2c322cSAleksandr Loktionov pfe->event_data.link_event_adv.link_speed = link_status ?
956d2c322cSAleksandr Loktionov i40e_vc_link_speed2mbps(ls->link_speed) : 0;
966d2c322cSAleksandr Loktionov pfe->event_data.link_event_adv.link_status = link_status;
976d2c322cSAleksandr Loktionov } else {
986d2c322cSAleksandr Loktionov pfe->event_data.link_event.link_speed = link_status ?
996d2c322cSAleksandr Loktionov i40e_virtchnl_link_speed(ls->link_speed) : 0;
1006d2c322cSAleksandr Loktionov pfe->event_data.link_event.link_status = link_status;
1016d2c322cSAleksandr Loktionov }
1026d2c322cSAleksandr Loktionov }
1036d2c322cSAleksandr Loktionov
1046d2c322cSAleksandr Loktionov /**
10555f7d723SMitch Williams * i40e_vc_notify_vf_link_state
106532b0455SMitch Williams * @vf: pointer to the VF structure
107532b0455SMitch Williams *
108532b0455SMitch Williams * send a link status message to a single VF
109532b0455SMitch Williams **/
i40e_vc_notify_vf_link_state(struct i40e_vf * vf)110532b0455SMitch Williams static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
111532b0455SMitch Williams {
112310a2ad9SJesse Brandeburg struct virtchnl_pf_event pfe;
113532b0455SMitch Williams struct i40e_pf *pf = vf->pf;
114532b0455SMitch Williams struct i40e_hw *hw = &pf->hw;
115532b0455SMitch Williams struct i40e_link_status *ls = &pf->hw.phy.link_info;
116a1b5a24fSJesse Brandeburg int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
117532b0455SMitch Williams
118310a2ad9SJesse Brandeburg pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
119ff3f4cc2SJesse Brandeburg pfe.severity = PF_EVENT_SEVERITY_INFO;
1206d2c322cSAleksandr Loktionov
1216d2c322cSAleksandr Loktionov i40e_set_vf_link_state(vf, &pfe, ls);
1226d2c322cSAleksandr Loktionov
123310a2ad9SJesse Brandeburg i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
124532b0455SMitch Williams 0, (u8 *)&pfe, sizeof(pfe), NULL);
125532b0455SMitch Williams }
126532b0455SMitch Williams
127532b0455SMitch Williams /**
128532b0455SMitch Williams * i40e_vc_notify_link_state
129532b0455SMitch Williams * @pf: pointer to the PF structure
130532b0455SMitch Williams *
131532b0455SMitch Williams * send a link status message to all VFs on a given PF
132532b0455SMitch Williams **/
i40e_vc_notify_link_state(struct i40e_pf * pf)133532b0455SMitch Williams void i40e_vc_notify_link_state(struct i40e_pf *pf)
134532b0455SMitch Williams {
135532b0455SMitch Williams int i;
136532b0455SMitch Williams
137532b0455SMitch Williams for (i = 0; i < pf->num_alloc_vfs; i++)
138532b0455SMitch Williams i40e_vc_notify_vf_link_state(&pf->vf[i]);
139532b0455SMitch Williams }
140532b0455SMitch Williams
141532b0455SMitch Williams /**
142532b0455SMitch Williams * i40e_vc_notify_reset
143532b0455SMitch Williams * @pf: pointer to the PF structure
144532b0455SMitch Williams *
145532b0455SMitch Williams * indicate a pending reset to all VFs on a given PF
146532b0455SMitch Williams **/
i40e_vc_notify_reset(struct i40e_pf * pf)147532b0455SMitch Williams void i40e_vc_notify_reset(struct i40e_pf *pf)
148532b0455SMitch Williams {
149310a2ad9SJesse Brandeburg struct virtchnl_pf_event pfe;
150532b0455SMitch Williams
151310a2ad9SJesse Brandeburg pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
152ff3f4cc2SJesse Brandeburg pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
153310a2ad9SJesse Brandeburg i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
154310a2ad9SJesse Brandeburg (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
155532b0455SMitch Williams }
156532b0455SMitch Williams
15717f5cfebSAndrii Staikov #ifdef CONFIG_PCI_IOV
i40e_restore_all_vfs_msi_state(struct pci_dev * pdev)15817f5cfebSAndrii Staikov void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
15917f5cfebSAndrii Staikov {
16017f5cfebSAndrii Staikov u16 vf_id;
16117f5cfebSAndrii Staikov u16 pos;
16217f5cfebSAndrii Staikov
16317f5cfebSAndrii Staikov /* Continue only if this is a PF */
16417f5cfebSAndrii Staikov if (!pdev->is_physfn)
16517f5cfebSAndrii Staikov return;
16617f5cfebSAndrii Staikov
16717f5cfebSAndrii Staikov if (!pci_num_vf(pdev))
16817f5cfebSAndrii Staikov return;
16917f5cfebSAndrii Staikov
17017f5cfebSAndrii Staikov pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
17117f5cfebSAndrii Staikov if (pos) {
17217f5cfebSAndrii Staikov struct pci_dev *vf_dev = NULL;
17317f5cfebSAndrii Staikov
17417f5cfebSAndrii Staikov pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
17517f5cfebSAndrii Staikov while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
17617f5cfebSAndrii Staikov if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
17717f5cfebSAndrii Staikov pci_restore_msi_state(vf_dev);
17817f5cfebSAndrii Staikov }
17917f5cfebSAndrii Staikov }
18017f5cfebSAndrii Staikov }
18117f5cfebSAndrii Staikov #endif /* CONFIG_PCI_IOV */
18217f5cfebSAndrii Staikov
183532b0455SMitch Williams /**
184532b0455SMitch Williams * i40e_vc_notify_vf_reset
185532b0455SMitch Williams * @vf: pointer to the VF structure
186532b0455SMitch Williams *
187532b0455SMitch Williams * indicate a pending reset to the given VF
188532b0455SMitch Williams **/
i40e_vc_notify_vf_reset(struct i40e_vf * vf)189532b0455SMitch Williams void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
190532b0455SMitch Williams {
191310a2ad9SJesse Brandeburg struct virtchnl_pf_event pfe;
192532b0455SMitch Williams int abs_vf_id;
193532b0455SMitch Williams
194532b0455SMitch Williams /* validate the request */
195532b0455SMitch Williams if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
196532b0455SMitch Williams return;
197532b0455SMitch Williams
198532b0455SMitch Williams /* verify if the VF is in either init or active before proceeding */
1996322e63cSJacob Keller if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
2006322e63cSJacob Keller !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
201532b0455SMitch Williams return;
202532b0455SMitch Williams
203a1b5a24fSJesse Brandeburg abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
204532b0455SMitch Williams
205310a2ad9SJesse Brandeburg pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
206ff3f4cc2SJesse Brandeburg pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
207310a2ad9SJesse Brandeburg i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
208532b0455SMitch Williams 0, (u8 *)&pfe,
209310a2ad9SJesse Brandeburg sizeof(struct virtchnl_pf_event), NULL);
210532b0455SMitch Williams }
2115c3c48acSJesse Brandeburg /***********************misc routines*****************************/
2125c3c48acSJesse Brandeburg
2135c3c48acSJesse Brandeburg /**
2143a3b311eSKaren Sornek * i40e_vc_reset_vf
215b40c82e6SJeff Kirsher * @vf: pointer to the VF info
2163a3b311eSKaren Sornek * @notify_vf: notify vf about reset or not
2173a3b311eSKaren Sornek * Reset VF handler.
218f9b4b627SGreg Rose **/
i40e_vc_reset_vf(struct i40e_vf * vf,bool notify_vf)2193a3b311eSKaren Sornek static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
220f9b4b627SGreg Rose {
221347b5650SEryk Rybak struct i40e_pf *pf = vf->pf;
222d43d60e5SJacob Keller int i;
223d43d60e5SJacob Keller
2243a3b311eSKaren Sornek if (notify_vf)
22554f455eeSMitch Williams i40e_vc_notify_vf_reset(vf);
226d43d60e5SJacob Keller
227d43d60e5SJacob Keller /* We want to ensure that an actual reset occurs initiated after this
228d43d60e5SJacob Keller * function was called. However, we do not want to wait forever, so
229d43d60e5SJacob Keller * we'll give a reasonable time and print a message if we failed to
230d43d60e5SJacob Keller * ensure a reset.
231d43d60e5SJacob Keller */
232d43d60e5SJacob Keller for (i = 0; i < 20; i++) {
233347b5650SEryk Rybak /* If PF is in VFs releasing state reset VF is impossible,
234347b5650SEryk Rybak * so leave it.
235347b5650SEryk Rybak */
236347b5650SEryk Rybak if (test_bit(__I40E_VFS_RELEASING, pf->state))
237347b5650SEryk Rybak return;
238d43d60e5SJacob Keller if (i40e_reset_vf(vf, false))
239d43d60e5SJacob Keller return;
240d43d60e5SJacob Keller usleep_range(10000, 20000);
241d43d60e5SJacob Keller }
242d43d60e5SJacob Keller
2433a3b311eSKaren Sornek if (notify_vf)
244d43d60e5SJacob Keller dev_warn(&vf->pf->pdev->dev,
245d43d60e5SJacob Keller "Failed to initiate reset for VF %d after 200 milliseconds\n",
246d43d60e5SJacob Keller vf->vf_id);
2473a3b311eSKaren Sornek else
2483a3b311eSKaren Sornek dev_dbg(&vf->pf->pdev->dev,
2493a3b311eSKaren Sornek "Failed to initiate reset for VF %d after 200 milliseconds\n",
2503a3b311eSKaren Sornek vf->vf_id);
251f9b4b627SGreg Rose }
252f9b4b627SGreg Rose
253f9b4b627SGreg Rose /**
2545c3c48acSJesse Brandeburg * i40e_vc_isvalid_vsi_id
255b40c82e6SJeff Kirsher * @vf: pointer to the VF info
256b40c82e6SJeff Kirsher * @vsi_id: VF relative VSI id
2575c3c48acSJesse Brandeburg *
258b40c82e6SJeff Kirsher * check for the valid VSI id
2595c3c48acSJesse Brandeburg **/
i40e_vc_isvalid_vsi_id(struct i40e_vf * vf,u16 vsi_id)260fdf0e0bfSAnjali Singhai Jain static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
2615c3c48acSJesse Brandeburg {
2625c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
263fdf0e0bfSAnjali Singhai Jain struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
2645c3c48acSJesse Brandeburg
265fdf0e0bfSAnjali Singhai Jain return (vsi && (vsi->vf_id == vf->vf_id));
2665c3c48acSJesse Brandeburg }
2675c3c48acSJesse Brandeburg
2685c3c48acSJesse Brandeburg /**
2695c3c48acSJesse Brandeburg * i40e_vc_isvalid_queue_id
270b40c82e6SJeff Kirsher * @vf: pointer to the VF info
2715c3c48acSJesse Brandeburg * @vsi_id: vsi id
2725c3c48acSJesse Brandeburg * @qid: vsi relative queue id
2735c3c48acSJesse Brandeburg *
2745c3c48acSJesse Brandeburg * check for the valid queue id
2755c3c48acSJesse Brandeburg **/
i40e_vc_isvalid_queue_id(struct i40e_vf * vf,u16 vsi_id,u16 qid)276fdf0e0bfSAnjali Singhai Jain static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
27724474f27SMartyna Szapar u16 qid)
2785c3c48acSJesse Brandeburg {
2795c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
280fdf0e0bfSAnjali Singhai Jain struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
2815c3c48acSJesse Brandeburg
282fdf0e0bfSAnjali Singhai Jain return (vsi && (qid < vsi->alloc_queue_pairs));
2835c3c48acSJesse Brandeburg }
2845c3c48acSJesse Brandeburg
2855c3c48acSJesse Brandeburg /**
2865c3c48acSJesse Brandeburg * i40e_vc_isvalid_vector_id
287b40c82e6SJeff Kirsher * @vf: pointer to the VF info
288b40c82e6SJeff Kirsher * @vector_id: VF relative vector id
2895c3c48acSJesse Brandeburg *
2905c3c48acSJesse Brandeburg * check for the valid vector id
2915c3c48acSJesse Brandeburg **/
i40e_vc_isvalid_vector_id(struct i40e_vf * vf,u32 vector_id)292c004804dSGrzegorz Siwik static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
2935c3c48acSJesse Brandeburg {
2945c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
2955c3c48acSJesse Brandeburg
2969347eb77SMitch Williams return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
2975c3c48acSJesse Brandeburg }
2985c3c48acSJesse Brandeburg
2995c3c48acSJesse Brandeburg /***********************vf resource mgmt routines*****************/
3005c3c48acSJesse Brandeburg
3015c3c48acSJesse Brandeburg /**
3025c3c48acSJesse Brandeburg * i40e_vc_get_pf_queue_id
303b40c82e6SJeff Kirsher * @vf: pointer to the VF info
304fdf0e0bfSAnjali Singhai Jain * @vsi_id: id of VSI as provided by the FW
3055c3c48acSJesse Brandeburg * @vsi_queue_id: vsi relative queue id
3065c3c48acSJesse Brandeburg *
307b40c82e6SJeff Kirsher * return PF relative queue id
3085c3c48acSJesse Brandeburg **/
i40e_vc_get_pf_queue_id(struct i40e_vf * vf,u16 vsi_id,u8 vsi_queue_id)309fdf0e0bfSAnjali Singhai Jain static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
3105c3c48acSJesse Brandeburg u8 vsi_queue_id)
3115c3c48acSJesse Brandeburg {
3125c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
313fdf0e0bfSAnjali Singhai Jain struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
3145c3c48acSJesse Brandeburg u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
3155c3c48acSJesse Brandeburg
316fdf0e0bfSAnjali Singhai Jain if (!vsi)
317fdf0e0bfSAnjali Singhai Jain return pf_queue_id;
318fdf0e0bfSAnjali Singhai Jain
3195c3c48acSJesse Brandeburg if (le16_to_cpu(vsi->info.mapping_flags) &
3205c3c48acSJesse Brandeburg I40E_AQ_VSI_QUE_MAP_NONCONTIG)
3215c3c48acSJesse Brandeburg pf_queue_id =
3225c3c48acSJesse Brandeburg le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
3235c3c48acSJesse Brandeburg else
3245c3c48acSJesse Brandeburg pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
3255c3c48acSJesse Brandeburg vsi_queue_id;
3265c3c48acSJesse Brandeburg
3275c3c48acSJesse Brandeburg return pf_queue_id;
3285c3c48acSJesse Brandeburg }
3295c3c48acSJesse Brandeburg
3305c3c48acSJesse Brandeburg /**
331c27eac48SAvinash Dayanand * i40e_get_real_pf_qid
332c27eac48SAvinash Dayanand * @vf: pointer to the VF info
333c27eac48SAvinash Dayanand * @vsi_id: vsi id
334c27eac48SAvinash Dayanand * @queue_id: queue number
335c27eac48SAvinash Dayanand *
336c27eac48SAvinash Dayanand * wrapper function to get pf_queue_id handling ADq code as well
337c27eac48SAvinash Dayanand **/
i40e_get_real_pf_qid(struct i40e_vf * vf,u16 vsi_id,u16 queue_id)338c27eac48SAvinash Dayanand static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
339c27eac48SAvinash Dayanand {
340c27eac48SAvinash Dayanand int i;
341c27eac48SAvinash Dayanand
342c27eac48SAvinash Dayanand if (vf->adq_enabled) {
343c27eac48SAvinash Dayanand /* Although VF considers all the queues(can be 1 to 16) as its
344c27eac48SAvinash Dayanand * own but they may actually belong to different VSIs(up to 4).
345c27eac48SAvinash Dayanand * We need to find which queues belongs to which VSI.
346c27eac48SAvinash Dayanand */
347c27eac48SAvinash Dayanand for (i = 0; i < vf->num_tc; i++) {
348c27eac48SAvinash Dayanand if (queue_id < vf->ch[i].num_qps) {
349c27eac48SAvinash Dayanand vsi_id = vf->ch[i].vsi_id;
350c27eac48SAvinash Dayanand break;
351c27eac48SAvinash Dayanand }
352c27eac48SAvinash Dayanand /* find right queue id which is relative to a
353c27eac48SAvinash Dayanand * given VSI.
354c27eac48SAvinash Dayanand */
355c27eac48SAvinash Dayanand queue_id -= vf->ch[i].num_qps;
356c27eac48SAvinash Dayanand }
357c27eac48SAvinash Dayanand }
358c27eac48SAvinash Dayanand
359c27eac48SAvinash Dayanand return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
360c27eac48SAvinash Dayanand }
361c27eac48SAvinash Dayanand
362c27eac48SAvinash Dayanand /**
3635c3c48acSJesse Brandeburg * i40e_config_irq_link_list
364b40c82e6SJeff Kirsher * @vf: pointer to the VF info
365fdf0e0bfSAnjali Singhai Jain * @vsi_id: id of VSI as given by the FW
3665c3c48acSJesse Brandeburg * @vecmap: irq map info
3675c3c48acSJesse Brandeburg *
3685c3c48acSJesse Brandeburg * configure irq link list from the map
3695c3c48acSJesse Brandeburg **/
i40e_config_irq_link_list(struct i40e_vf * vf,u16 vsi_id,struct virtchnl_vector_map * vecmap)370fdf0e0bfSAnjali Singhai Jain static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
371310a2ad9SJesse Brandeburg struct virtchnl_vector_map *vecmap)
3725c3c48acSJesse Brandeburg {
3735c3c48acSJesse Brandeburg unsigned long linklistmap = 0, tempmap;
3745c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
3755c3c48acSJesse Brandeburg struct i40e_hw *hw = &pf->hw;
3765c3c48acSJesse Brandeburg u16 vsi_queue_id, pf_queue_id;
3775c3c48acSJesse Brandeburg enum i40e_queue_type qtype;
3789bcc07f0SLihong Yang u16 next_q, vector_id, size;
3795c3c48acSJesse Brandeburg u32 reg, reg_idx;
3805c3c48acSJesse Brandeburg u16 itr_idx = 0;
3815c3c48acSJesse Brandeburg
3825c3c48acSJesse Brandeburg vector_id = vecmap->vector_id;
3835c3c48acSJesse Brandeburg /* setup the head */
3845c3c48acSJesse Brandeburg if (0 == vector_id)
3855c3c48acSJesse Brandeburg reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
3865c3c48acSJesse Brandeburg else
3875c3c48acSJesse Brandeburg reg_idx = I40E_VPINT_LNKLSTN(
3889347eb77SMitch Williams ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
3899347eb77SMitch Williams (vector_id - 1));
3905c3c48acSJesse Brandeburg
3915c3c48acSJesse Brandeburg if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
3925c3c48acSJesse Brandeburg /* Special case - No queues mapped on this vector */
3935c3c48acSJesse Brandeburg wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
3945c3c48acSJesse Brandeburg goto irq_list_done;
3955c3c48acSJesse Brandeburg }
3965c3c48acSJesse Brandeburg tempmap = vecmap->rxq_map;
3974836650bSWei Yongjun for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
39841a1d04bSJesse Brandeburg linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
3995c3c48acSJesse Brandeburg vsi_queue_id));
4005c3c48acSJesse Brandeburg }
4015c3c48acSJesse Brandeburg
4025c3c48acSJesse Brandeburg tempmap = vecmap->txq_map;
4034836650bSWei Yongjun for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
40441a1d04bSJesse Brandeburg linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
40541a1d04bSJesse Brandeburg vsi_queue_id + 1));
4065c3c48acSJesse Brandeburg }
4075c3c48acSJesse Brandeburg
4089bcc07f0SLihong Yang size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
4099bcc07f0SLihong Yang next_q = find_first_bit(&linklistmap, size);
4109bcc07f0SLihong Yang if (unlikely(next_q == size))
411b861fb76SLihong Yang goto irq_list_done;
412b861fb76SLihong Yang
4135c3c48acSJesse Brandeburg vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
4145c3c48acSJesse Brandeburg qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
415c27eac48SAvinash Dayanand pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
4165c3c48acSJesse Brandeburg reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
4175c3c48acSJesse Brandeburg
4185c3c48acSJesse Brandeburg wr32(hw, reg_idx, reg);
4195c3c48acSJesse Brandeburg
4209bcc07f0SLihong Yang while (next_q < size) {
4215c3c48acSJesse Brandeburg switch (qtype) {
4225c3c48acSJesse Brandeburg case I40E_QUEUE_TYPE_RX:
4235c3c48acSJesse Brandeburg reg_idx = I40E_QINT_RQCTL(pf_queue_id);
4245c3c48acSJesse Brandeburg itr_idx = vecmap->rxitr_idx;
4255c3c48acSJesse Brandeburg break;
4265c3c48acSJesse Brandeburg case I40E_QUEUE_TYPE_TX:
4275c3c48acSJesse Brandeburg reg_idx = I40E_QINT_TQCTL(pf_queue_id);
4285c3c48acSJesse Brandeburg itr_idx = vecmap->txitr_idx;
4295c3c48acSJesse Brandeburg break;
4305c3c48acSJesse Brandeburg default:
4315c3c48acSJesse Brandeburg break;
4325c3c48acSJesse Brandeburg }
4335c3c48acSJesse Brandeburg
4349bcc07f0SLihong Yang next_q = find_next_bit(&linklistmap, size, next_q + 1);
4359bcc07f0SLihong Yang if (next_q < size) {
4365c3c48acSJesse Brandeburg vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
4375c3c48acSJesse Brandeburg qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
438c27eac48SAvinash Dayanand pf_queue_id = i40e_get_real_pf_qid(vf,
439c27eac48SAvinash Dayanand vsi_id,
4405c3c48acSJesse Brandeburg vsi_queue_id);
4415c3c48acSJesse Brandeburg } else {
4425c3c48acSJesse Brandeburg pf_queue_id = I40E_QUEUE_END_OF_LIST;
4435c3c48acSJesse Brandeburg qtype = 0;
4445c3c48acSJesse Brandeburg }
4455c3c48acSJesse Brandeburg
4465c3c48acSJesse Brandeburg /* format for the RQCTL & TQCTL regs is same */
4475c3c48acSJesse Brandeburg reg = (vector_id) |
4485c3c48acSJesse Brandeburg (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
4495c3c48acSJesse Brandeburg (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
45041a1d04bSJesse Brandeburg BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
4515c3c48acSJesse Brandeburg (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
4525c3c48acSJesse Brandeburg wr32(hw, reg_idx, reg);
4535c3c48acSJesse Brandeburg }
4545c3c48acSJesse Brandeburg
455b8262a6dSAnjali Singhai Jain /* if the vf is running in polling mode and using interrupt zero,
456b8262a6dSAnjali Singhai Jain * need to disable auto-mask on enabling zero interrupt for VFs.
457b8262a6dSAnjali Singhai Jain */
458310a2ad9SJesse Brandeburg if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
459b8262a6dSAnjali Singhai Jain (vector_id == 0)) {
460b8262a6dSAnjali Singhai Jain reg = rd32(hw, I40E_GLINT_CTL);
461b8262a6dSAnjali Singhai Jain if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
462b8262a6dSAnjali Singhai Jain reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
463b8262a6dSAnjali Singhai Jain wr32(hw, I40E_GLINT_CTL, reg);
464b8262a6dSAnjali Singhai Jain }
465b8262a6dSAnjali Singhai Jain }
466b8262a6dSAnjali Singhai Jain
4675c3c48acSJesse Brandeburg irq_list_done:
4685c3c48acSJesse Brandeburg i40e_flush(hw);
4695c3c48acSJesse Brandeburg }
4705c3c48acSJesse Brandeburg
4715c3c48acSJesse Brandeburg /**
4722723f3b5SJesse Brandeburg * i40e_release_rdma_qvlist
473e3219ce6SAnjali Singhai Jain * @vf: pointer to the VF.
474e3219ce6SAnjali Singhai Jain *
475e3219ce6SAnjali Singhai Jain **/
i40e_release_rdma_qvlist(struct i40e_vf * vf)4762723f3b5SJesse Brandeburg static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
477e3219ce6SAnjali Singhai Jain {
478e3219ce6SAnjali Singhai Jain struct i40e_pf *pf = vf->pf;
4792723f3b5SJesse Brandeburg struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info;
480e3219ce6SAnjali Singhai Jain u32 msix_vf;
481e3219ce6SAnjali Singhai Jain u32 i;
482e3219ce6SAnjali Singhai Jain
483e3219ce6SAnjali Singhai Jain if (!vf->qvlist_info)
484e3219ce6SAnjali Singhai Jain return;
485e3219ce6SAnjali Singhai Jain
486e3219ce6SAnjali Singhai Jain msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
487e3219ce6SAnjali Singhai Jain for (i = 0; i < qvlist_info->num_vectors; i++) {
4882723f3b5SJesse Brandeburg struct virtchnl_rdma_qv_info *qv_info;
489e3219ce6SAnjali Singhai Jain u32 next_q_index, next_q_type;
490e3219ce6SAnjali Singhai Jain struct i40e_hw *hw = &pf->hw;
491e3219ce6SAnjali Singhai Jain u32 v_idx, reg_idx, reg;
492e3219ce6SAnjali Singhai Jain
493e3219ce6SAnjali Singhai Jain qv_info = &qvlist_info->qv_info[i];
494e3219ce6SAnjali Singhai Jain if (!qv_info)
495e3219ce6SAnjali Singhai Jain continue;
496e3219ce6SAnjali Singhai Jain v_idx = qv_info->v_idx;
497e3219ce6SAnjali Singhai Jain if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
498e3219ce6SAnjali Singhai Jain /* Figure out the queue after CEQ and make that the
499e3219ce6SAnjali Singhai Jain * first queue.
500e3219ce6SAnjali Singhai Jain */
501e3219ce6SAnjali Singhai Jain reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
502e3219ce6SAnjali Singhai Jain reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
503e3219ce6SAnjali Singhai Jain next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
504e3219ce6SAnjali Singhai Jain >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
505e3219ce6SAnjali Singhai Jain next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
506e3219ce6SAnjali Singhai Jain >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
507e3219ce6SAnjali Singhai Jain
508e3219ce6SAnjali Singhai Jain reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
509e3219ce6SAnjali Singhai Jain reg = (next_q_index &
510e3219ce6SAnjali Singhai Jain I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
511e3219ce6SAnjali Singhai Jain (next_q_type <<
512e3219ce6SAnjali Singhai Jain I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
513e3219ce6SAnjali Singhai Jain
514e3219ce6SAnjali Singhai Jain wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
515e3219ce6SAnjali Singhai Jain }
516e3219ce6SAnjali Singhai Jain }
517e3219ce6SAnjali Singhai Jain kfree(vf->qvlist_info);
518e3219ce6SAnjali Singhai Jain vf->qvlist_info = NULL;
519e3219ce6SAnjali Singhai Jain }
520e3219ce6SAnjali Singhai Jain
521e3219ce6SAnjali Singhai Jain /**
5222723f3b5SJesse Brandeburg * i40e_config_rdma_qvlist
523e3219ce6SAnjali Singhai Jain * @vf: pointer to the VF info
524e3219ce6SAnjali Singhai Jain * @qvlist_info: queue and vector list
525e3219ce6SAnjali Singhai Jain *
526e3219ce6SAnjali Singhai Jain * Return 0 on success or < 0 on error
527e3219ce6SAnjali Singhai Jain **/
5282723f3b5SJesse Brandeburg static int
i40e_config_rdma_qvlist(struct i40e_vf * vf,struct virtchnl_rdma_qvlist_info * qvlist_info)5292723f3b5SJesse Brandeburg i40e_config_rdma_qvlist(struct i40e_vf *vf,
5302723f3b5SJesse Brandeburg struct virtchnl_rdma_qvlist_info *qvlist_info)
531e3219ce6SAnjali Singhai Jain {
532e3219ce6SAnjali Singhai Jain struct i40e_pf *pf = vf->pf;
533e3219ce6SAnjali Singhai Jain struct i40e_hw *hw = &pf->hw;
5342723f3b5SJesse Brandeburg struct virtchnl_rdma_qv_info *qv_info;
535e3219ce6SAnjali Singhai Jain u32 v_idx, i, reg_idx, reg;
536e3219ce6SAnjali Singhai Jain u32 next_q_idx, next_q_type;
537b0654e64SAlexander Lobakin size_t size;
538fae6cad1SGustavo A. R. Silva u32 msix_vf;
5390b636446SMartyna Szapar int ret = 0;
540e3219ce6SAnjali Singhai Jain
5417015ca3dSSergey Nemov msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
5427015ca3dSSergey Nemov
5437015ca3dSSergey Nemov if (qvlist_info->num_vectors > msix_vf) {
5447015ca3dSSergey Nemov dev_warn(&pf->pdev->dev,
5457015ca3dSSergey Nemov "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
5467015ca3dSSergey Nemov qvlist_info->num_vectors,
5477015ca3dSSergey Nemov msix_vf);
5480b636446SMartyna Szapar ret = -EINVAL;
5490b636446SMartyna Szapar goto err_out;
5507015ca3dSSergey Nemov }
5517015ca3dSSergey Nemov
5520b636446SMartyna Szapar kfree(vf->qvlist_info);
553b0654e64SAlexander Lobakin size = virtchnl_struct_size(vf->qvlist_info, qv_info,
554b0654e64SAlexander Lobakin qvlist_info->num_vectors);
555b0654e64SAlexander Lobakin vf->qvlist_info = kzalloc(size, GFP_KERNEL);
5560b636446SMartyna Szapar if (!vf->qvlist_info) {
5570b636446SMartyna Szapar ret = -ENOMEM;
5580b636446SMartyna Szapar goto err_out;
5590b636446SMartyna Szapar }
560e3219ce6SAnjali Singhai Jain vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
561e3219ce6SAnjali Singhai Jain
562e3219ce6SAnjali Singhai Jain msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
563e3219ce6SAnjali Singhai Jain for (i = 0; i < qvlist_info->num_vectors; i++) {
564e3219ce6SAnjali Singhai Jain qv_info = &qvlist_info->qv_info[i];
565e3219ce6SAnjali Singhai Jain if (!qv_info)
566e3219ce6SAnjali Singhai Jain continue;
567e3219ce6SAnjali Singhai Jain
568e3219ce6SAnjali Singhai Jain /* Validate vector id belongs to this vf */
569d510497bSSergey Nemov if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
5700b636446SMartyna Szapar ret = -EINVAL;
5710b636446SMartyna Szapar goto err_free;
5720b636446SMartyna Szapar }
573e3219ce6SAnjali Singhai Jain
574d510497bSSergey Nemov v_idx = qv_info->v_idx;
575d510497bSSergey Nemov
576e3219ce6SAnjali Singhai Jain vf->qvlist_info->qv_info[i] = *qv_info;
577e3219ce6SAnjali Singhai Jain
578e3219ce6SAnjali Singhai Jain reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
579e3219ce6SAnjali Singhai Jain /* We might be sharing the interrupt, so get the first queue
580e3219ce6SAnjali Singhai Jain * index and type, push it down the list by adding the new
581e3219ce6SAnjali Singhai Jain * queue on top. Also link it with the new queue in CEQCTL.
582e3219ce6SAnjali Singhai Jain */
583e3219ce6SAnjali Singhai Jain reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
584e3219ce6SAnjali Singhai Jain next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
585e3219ce6SAnjali Singhai Jain I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
586e3219ce6SAnjali Singhai Jain next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
587e3219ce6SAnjali Singhai Jain I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
588e3219ce6SAnjali Singhai Jain
589e3219ce6SAnjali Singhai Jain if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
590e3219ce6SAnjali Singhai Jain reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
591e3219ce6SAnjali Singhai Jain reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
592e3219ce6SAnjali Singhai Jain (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
593e3219ce6SAnjali Singhai Jain (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
594e3219ce6SAnjali Singhai Jain (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
595e3219ce6SAnjali Singhai Jain (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
596e3219ce6SAnjali Singhai Jain wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
597e3219ce6SAnjali Singhai Jain
598e3219ce6SAnjali Singhai Jain reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
599e3219ce6SAnjali Singhai Jain reg = (qv_info->ceq_idx &
600e3219ce6SAnjali Singhai Jain I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
601e3219ce6SAnjali Singhai Jain (I40E_QUEUE_TYPE_PE_CEQ <<
602e3219ce6SAnjali Singhai Jain I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
603e3219ce6SAnjali Singhai Jain wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
604e3219ce6SAnjali Singhai Jain }
605e3219ce6SAnjali Singhai Jain
606e3219ce6SAnjali Singhai Jain if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
607e3219ce6SAnjali Singhai Jain reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
608e3219ce6SAnjali Singhai Jain (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
609e3219ce6SAnjali Singhai Jain (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
610e3219ce6SAnjali Singhai Jain
611e3219ce6SAnjali Singhai Jain wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
612e3219ce6SAnjali Singhai Jain }
613e3219ce6SAnjali Singhai Jain }
614e3219ce6SAnjali Singhai Jain
615e3219ce6SAnjali Singhai Jain return 0;
6160b636446SMartyna Szapar err_free:
617e3219ce6SAnjali Singhai Jain kfree(vf->qvlist_info);
618e3219ce6SAnjali Singhai Jain vf->qvlist_info = NULL;
6190b636446SMartyna Szapar err_out:
6200b636446SMartyna Szapar return ret;
621e3219ce6SAnjali Singhai Jain }
622e3219ce6SAnjali Singhai Jain
623e3219ce6SAnjali Singhai Jain /**
6245c3c48acSJesse Brandeburg * i40e_config_vsi_tx_queue
625b40c82e6SJeff Kirsher * @vf: pointer to the VF info
626fdf0e0bfSAnjali Singhai Jain * @vsi_id: id of VSI as provided by the FW
6275c3c48acSJesse Brandeburg * @vsi_queue_id: vsi relative queue index
6285c3c48acSJesse Brandeburg * @info: config. info
6295c3c48acSJesse Brandeburg *
6305c3c48acSJesse Brandeburg * configure tx queue
6315c3c48acSJesse Brandeburg **/
i40e_config_vsi_tx_queue(struct i40e_vf * vf,u16 vsi_id,u16 vsi_queue_id,struct virtchnl_txq_info * info)632fdf0e0bfSAnjali Singhai Jain static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
6335c3c48acSJesse Brandeburg u16 vsi_queue_id,
634310a2ad9SJesse Brandeburg struct virtchnl_txq_info *info)
6355c3c48acSJesse Brandeburg {
6365c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
6375c3c48acSJesse Brandeburg struct i40e_hw *hw = &pf->hw;
6385c3c48acSJesse Brandeburg struct i40e_hmc_obj_txq tx_ctx;
639fdf0e0bfSAnjali Singhai Jain struct i40e_vsi *vsi;
6405c3c48acSJesse Brandeburg u16 pf_queue_id;
6415c3c48acSJesse Brandeburg u32 qtx_ctl;
6425c3c48acSJesse Brandeburg int ret = 0;
6435c3c48acSJesse Brandeburg
644d4a0658dSCarolyn Wyborny if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
645d4a0658dSCarolyn Wyborny ret = -ENOENT;
646d4a0658dSCarolyn Wyborny goto error_context;
647d4a0658dSCarolyn Wyborny }
648fdf0e0bfSAnjali Singhai Jain pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
649fdf0e0bfSAnjali Singhai Jain vsi = i40e_find_vsi_from_id(pf, vsi_id);
650d4a0658dSCarolyn Wyborny if (!vsi) {
651d4a0658dSCarolyn Wyborny ret = -ENOENT;
652d4a0658dSCarolyn Wyborny goto error_context;
653d4a0658dSCarolyn Wyborny }
6545c3c48acSJesse Brandeburg
6555c3c48acSJesse Brandeburg /* clear the context structure first */
6565c3c48acSJesse Brandeburg memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
6575c3c48acSJesse Brandeburg
6585c3c48acSJesse Brandeburg /* only set the required fields */
6595c3c48acSJesse Brandeburg tx_ctx.base = info->dma_ring_addr / 128;
6605c3c48acSJesse Brandeburg tx_ctx.qlen = info->ring_len;
661fdf0e0bfSAnjali Singhai Jain tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
6625c3c48acSJesse Brandeburg tx_ctx.rdylist_act = 0;
6635d29896aSAshish Shah tx_ctx.head_wb_ena = info->headwb_enabled;
6645d29896aSAshish Shah tx_ctx.head_wb_addr = info->dma_headwb_addr;
6655c3c48acSJesse Brandeburg
6665c3c48acSJesse Brandeburg /* clear the context in the HMC */
6675c3c48acSJesse Brandeburg ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
6685c3c48acSJesse Brandeburg if (ret) {
6695c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev,
6705c3c48acSJesse Brandeburg "Failed to clear VF LAN Tx queue context %d, error: %d\n",
6715c3c48acSJesse Brandeburg pf_queue_id, ret);
6725c3c48acSJesse Brandeburg ret = -ENOENT;
6735c3c48acSJesse Brandeburg goto error_context;
6745c3c48acSJesse Brandeburg }
6755c3c48acSJesse Brandeburg
6765c3c48acSJesse Brandeburg /* set the context in the HMC */
6775c3c48acSJesse Brandeburg ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
6785c3c48acSJesse Brandeburg if (ret) {
6795c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev,
6805c3c48acSJesse Brandeburg "Failed to set VF LAN Tx queue context %d error: %d\n",
6815c3c48acSJesse Brandeburg pf_queue_id, ret);
6825c3c48acSJesse Brandeburg ret = -ENOENT;
6835c3c48acSJesse Brandeburg goto error_context;
6845c3c48acSJesse Brandeburg }
6855c3c48acSJesse Brandeburg
6865c3c48acSJesse Brandeburg /* associate this queue with the PCI VF function */
6875c3c48acSJesse Brandeburg qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
68813fd9774SShannon Nelson qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
6895c3c48acSJesse Brandeburg & I40E_QTX_CTL_PF_INDX_MASK);
6905c3c48acSJesse Brandeburg qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
6915c3c48acSJesse Brandeburg << I40E_QTX_CTL_VFVM_INDX_SHIFT)
6925c3c48acSJesse Brandeburg & I40E_QTX_CTL_VFVM_INDX_MASK);
6935c3c48acSJesse Brandeburg wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
6945c3c48acSJesse Brandeburg i40e_flush(hw);
6955c3c48acSJesse Brandeburg
6965c3c48acSJesse Brandeburg error_context:
6975c3c48acSJesse Brandeburg return ret;
6985c3c48acSJesse Brandeburg }
6995c3c48acSJesse Brandeburg
7005c3c48acSJesse Brandeburg /**
7015c3c48acSJesse Brandeburg * i40e_config_vsi_rx_queue
702b40c82e6SJeff Kirsher * @vf: pointer to the VF info
703fdf0e0bfSAnjali Singhai Jain * @vsi_id: id of VSI as provided by the FW
7045c3c48acSJesse Brandeburg * @vsi_queue_id: vsi relative queue index
7055c3c48acSJesse Brandeburg * @info: config. info
7065c3c48acSJesse Brandeburg *
7075c3c48acSJesse Brandeburg * configure rx queue
7085c3c48acSJesse Brandeburg **/
i40e_config_vsi_rx_queue(struct i40e_vf * vf,u16 vsi_id,u16 vsi_queue_id,struct virtchnl_rxq_info * info)709fdf0e0bfSAnjali Singhai Jain static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
7105c3c48acSJesse Brandeburg u16 vsi_queue_id,
711310a2ad9SJesse Brandeburg struct virtchnl_rxq_info *info)
7125c3c48acSJesse Brandeburg {
7136afbd7b3SEryk Rybak u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
7145c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
7156afbd7b3SEryk Rybak struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
7165c3c48acSJesse Brandeburg struct i40e_hw *hw = &pf->hw;
7175c3c48acSJesse Brandeburg struct i40e_hmc_obj_rxq rx_ctx;
7185c3c48acSJesse Brandeburg int ret = 0;
7195c3c48acSJesse Brandeburg
7205c3c48acSJesse Brandeburg /* clear the context structure first */
7215c3c48acSJesse Brandeburg memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
7225c3c48acSJesse Brandeburg
7235c3c48acSJesse Brandeburg /* only set the required fields */
7245c3c48acSJesse Brandeburg rx_ctx.base = info->dma_ring_addr / 128;
7255c3c48acSJesse Brandeburg rx_ctx.qlen = info->ring_len;
7265c3c48acSJesse Brandeburg
7275c3c48acSJesse Brandeburg if (info->splithdr_enabled) {
7285c3c48acSJesse Brandeburg rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
7295c3c48acSJesse Brandeburg I40E_RX_SPLIT_IP |
7305c3c48acSJesse Brandeburg I40E_RX_SPLIT_TCP_UDP |
7315c3c48acSJesse Brandeburg I40E_RX_SPLIT_SCTP;
7325c3c48acSJesse Brandeburg /* header length validation */
7335c3c48acSJesse Brandeburg if (info->hdr_size > ((2 * 1024) - 64)) {
7345c3c48acSJesse Brandeburg ret = -EINVAL;
7355c3c48acSJesse Brandeburg goto error_param;
7365c3c48acSJesse Brandeburg }
7375c3c48acSJesse Brandeburg rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
7385c3c48acSJesse Brandeburg
73919b85e67SJesse Brandeburg /* set split mode 10b */
740d6b3bca1SMitch Williams rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
7415c3c48acSJesse Brandeburg }
7425c3c48acSJesse Brandeburg
7435c3c48acSJesse Brandeburg /* databuffer length validation */
7445c3c48acSJesse Brandeburg if (info->databuffer_size > ((16 * 1024) - 128)) {
7455c3c48acSJesse Brandeburg ret = -EINVAL;
7465c3c48acSJesse Brandeburg goto error_param;
7475c3c48acSJesse Brandeburg }
7485c3c48acSJesse Brandeburg rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
7495c3c48acSJesse Brandeburg
7505c3c48acSJesse Brandeburg /* max pkt. length validation */
7515c3c48acSJesse Brandeburg if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
7525c3c48acSJesse Brandeburg ret = -EINVAL;
7535c3c48acSJesse Brandeburg goto error_param;
7545c3c48acSJesse Brandeburg }
7555c3c48acSJesse Brandeburg rx_ctx.rxmax = info->max_pkt_size;
7565c3c48acSJesse Brandeburg
7576afbd7b3SEryk Rybak /* if port VLAN is configured increase the max packet size */
7586afbd7b3SEryk Rybak if (vsi->info.pvid)
7596afbd7b3SEryk Rybak rx_ctx.rxmax += VLAN_HLEN;
7606afbd7b3SEryk Rybak
7615c3c48acSJesse Brandeburg /* enable 32bytes desc always */
7625c3c48acSJesse Brandeburg rx_ctx.dsize = 1;
7635c3c48acSJesse Brandeburg
7645c3c48acSJesse Brandeburg /* default values */
7657362be9eSJacob Keller rx_ctx.lrxqthresh = 1;
7665c3c48acSJesse Brandeburg rx_ctx.crcstrip = 1;
76750d41659SMitch Williams rx_ctx.prefena = 1;
768c1d11cefSShannon Nelson rx_ctx.l2tsel = 1;
7695c3c48acSJesse Brandeburg
7705c3c48acSJesse Brandeburg /* clear the context in the HMC */
7715c3c48acSJesse Brandeburg ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
7725c3c48acSJesse Brandeburg if (ret) {
7735c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev,
7745c3c48acSJesse Brandeburg "Failed to clear VF LAN Rx queue context %d, error: %d\n",
7755c3c48acSJesse Brandeburg pf_queue_id, ret);
7765c3c48acSJesse Brandeburg ret = -ENOENT;
7775c3c48acSJesse Brandeburg goto error_param;
7785c3c48acSJesse Brandeburg }
7795c3c48acSJesse Brandeburg
7805c3c48acSJesse Brandeburg /* set the context in the HMC */
7815c3c48acSJesse Brandeburg ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
7825c3c48acSJesse Brandeburg if (ret) {
7835c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev,
7845c3c48acSJesse Brandeburg "Failed to set VF LAN Rx queue context %d error: %d\n",
7855c3c48acSJesse Brandeburg pf_queue_id, ret);
7865c3c48acSJesse Brandeburg ret = -ENOENT;
7875c3c48acSJesse Brandeburg goto error_param;
7885c3c48acSJesse Brandeburg }
7895c3c48acSJesse Brandeburg
7905c3c48acSJesse Brandeburg error_param:
7915c3c48acSJesse Brandeburg return ret;
7925c3c48acSJesse Brandeburg }
7935c3c48acSJesse Brandeburg
7945c3c48acSJesse Brandeburg /**
7955c3c48acSJesse Brandeburg * i40e_alloc_vsi_res
796b40c82e6SJeff Kirsher * @vf: pointer to the VF info
797c27eac48SAvinash Dayanand * @idx: VSI index, applies only for ADq mode, zero otherwise
7985c3c48acSJesse Brandeburg *
799b40c82e6SJeff Kirsher * alloc VF vsi context & resources
8005c3c48acSJesse Brandeburg **/
i40e_alloc_vsi_res(struct i40e_vf * vf,u8 idx)801c27eac48SAvinash Dayanand static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
8025c3c48acSJesse Brandeburg {
8035c3c48acSJesse Brandeburg struct i40e_mac_filter *f = NULL;
8045c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
8055c3c48acSJesse Brandeburg struct i40e_vsi *vsi;
8060c483bd4SAvinash Dayanand u64 max_tx_rate = 0;
8075c3c48acSJesse Brandeburg int ret = 0;
8085c3c48acSJesse Brandeburg
809c27eac48SAvinash Dayanand vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
810c27eac48SAvinash Dayanand vf->vf_id);
8115c3c48acSJesse Brandeburg
8125c3c48acSJesse Brandeburg if (!vsi) {
8135c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev,
814b40c82e6SJeff Kirsher "add vsi failed for VF %d, aq_err %d\n",
8155c3c48acSJesse Brandeburg vf->vf_id, pf->hw.aq.asq_last_status);
8165c3c48acSJesse Brandeburg ret = -ENOENT;
8175c3c48acSJesse Brandeburg goto error_alloc_vsi_res;
8185c3c48acSJesse Brandeburg }
819c27eac48SAvinash Dayanand
820c27eac48SAvinash Dayanand if (!idx) {
821bb360717SMitch Williams u64 hena = i40e_pf_get_default_rss_hena(pf);
822435c084aSJacob Keller u8 broadcast[ETH_ALEN];
823bb360717SMitch Williams
824fdf0e0bfSAnjali Singhai Jain vf->lan_vsi_idx = vsi->idx;
8255c3c48acSJesse Brandeburg vf->lan_vsi_id = vsi->id;
8266c12fcbfSGreg Rose /* If the port VLAN has been configured and then the
8276c12fcbfSGreg Rose * VF driver was removed then the VSI port VLAN
8286c12fcbfSGreg Rose * configuration was destroyed. Check if there is
8296c12fcbfSGreg Rose * a port VLAN and restore the VSI configuration if
8306c12fcbfSGreg Rose * needed.
8316c12fcbfSGreg Rose */
8326c12fcbfSGreg Rose if (vf->port_vlan_id)
8336c12fcbfSGreg Rose i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
83421659035SKiran Patil
835278e7d0bSJacob Keller spin_lock_bh(&vsi->mac_filter_hash_lock);
836b7b713a8SMitch Williams if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
8379569a9a4SJacob Keller f = i40e_add_mac_filter(vsi,
8389569a9a4SJacob Keller vf->default_lan_addr.addr);
8391a10370aSGreg Rose if (!f)
8401a10370aSGreg Rose dev_info(&pf->pdev->dev,
841b7b713a8SMitch Williams "Could not add MAC filter %pM for VF %d\n",
842b7b713a8SMitch Williams vf->default_lan_addr.addr, vf->vf_id);
843b7b713a8SMitch Williams }
844435c084aSJacob Keller eth_broadcast_addr(broadcast);
8459569a9a4SJacob Keller f = i40e_add_mac_filter(vsi, broadcast);
846435c084aSJacob Keller if (!f)
847435c084aSJacob Keller dev_info(&pf->pdev->dev,
848435c084aSJacob Keller "Could not allocate VF broadcast filter\n");
849278e7d0bSJacob Keller spin_unlock_bh(&vsi->mac_filter_hash_lock);
85026f77e53SLihong Yang wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
85126f77e53SLihong Yang wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
852c27eac48SAvinash Dayanand /* program mac filter only for VF VSI */
85317652c63SJesse Brandeburg ret = i40e_sync_vsi_filters(vsi);
854fd1646eeSMitch Williams if (ret)
8555c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
856c27eac48SAvinash Dayanand }
857c27eac48SAvinash Dayanand
858c27eac48SAvinash Dayanand /* storing VSI index and id for ADq and don't apply the mac filter */
859c27eac48SAvinash Dayanand if (vf->adq_enabled) {
860c27eac48SAvinash Dayanand vf->ch[idx].vsi_idx = vsi->idx;
861c27eac48SAvinash Dayanand vf->ch[idx].vsi_id = vsi->id;
862c27eac48SAvinash Dayanand }
8635c3c48acSJesse Brandeburg
8646b192891SMitch Williams /* Set VF bandwidth if specified */
8656b192891SMitch Williams if (vf->tx_rate) {
8660c483bd4SAvinash Dayanand max_tx_rate = vf->tx_rate;
8670c483bd4SAvinash Dayanand } else if (vf->ch[idx].max_tx_rate) {
8680c483bd4SAvinash Dayanand max_tx_rate = vf->ch[idx].max_tx_rate;
8690c483bd4SAvinash Dayanand }
8700c483bd4SAvinash Dayanand
8710c483bd4SAvinash Dayanand if (max_tx_rate) {
8720c483bd4SAvinash Dayanand max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
8736b192891SMitch Williams ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
8740c483bd4SAvinash Dayanand max_tx_rate, 0, NULL);
8756b192891SMitch Williams if (ret)
8766b192891SMitch Williams dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
8776b192891SMitch Williams vf->vf_id, ret);
8786b192891SMitch Williams }
8796b192891SMitch Williams
8805c3c48acSJesse Brandeburg error_alloc_vsi_res:
8815c3c48acSJesse Brandeburg return ret;
8825c3c48acSJesse Brandeburg }
8835c3c48acSJesse Brandeburg
8845c3c48acSJesse Brandeburg /**
885c27eac48SAvinash Dayanand * i40e_map_pf_queues_to_vsi
886c27eac48SAvinash Dayanand * @vf: pointer to the VF info
887c27eac48SAvinash Dayanand *
888c27eac48SAvinash Dayanand * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
889c27eac48SAvinash Dayanand * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
890c27eac48SAvinash Dayanand **/
i40e_map_pf_queues_to_vsi(struct i40e_vf * vf)891c27eac48SAvinash Dayanand static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
892c27eac48SAvinash Dayanand {
893c27eac48SAvinash Dayanand struct i40e_pf *pf = vf->pf;
894c27eac48SAvinash Dayanand struct i40e_hw *hw = &pf->hw;
895c27eac48SAvinash Dayanand u32 reg, num_tc = 1; /* VF has at least one traffic class */
896c27eac48SAvinash Dayanand u16 vsi_id, qps;
897c27eac48SAvinash Dayanand int i, j;
898c27eac48SAvinash Dayanand
899c27eac48SAvinash Dayanand if (vf->adq_enabled)
900c27eac48SAvinash Dayanand num_tc = vf->num_tc;
901c27eac48SAvinash Dayanand
902c27eac48SAvinash Dayanand for (i = 0; i < num_tc; i++) {
903c27eac48SAvinash Dayanand if (vf->adq_enabled) {
904c27eac48SAvinash Dayanand qps = vf->ch[i].num_qps;
905c27eac48SAvinash Dayanand vsi_id = vf->ch[i].vsi_id;
906c27eac48SAvinash Dayanand } else {
907c27eac48SAvinash Dayanand qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
908c27eac48SAvinash Dayanand vsi_id = vf->lan_vsi_id;
909c27eac48SAvinash Dayanand }
910c27eac48SAvinash Dayanand
911c27eac48SAvinash Dayanand for (j = 0; j < 7; j++) {
912c27eac48SAvinash Dayanand if (j * 2 >= qps) {
913c27eac48SAvinash Dayanand /* end of list */
914c27eac48SAvinash Dayanand reg = 0x07FF07FF;
915c27eac48SAvinash Dayanand } else {
916c27eac48SAvinash Dayanand u16 qid = i40e_vc_get_pf_queue_id(vf,
917c27eac48SAvinash Dayanand vsi_id,
918c27eac48SAvinash Dayanand j * 2);
919c27eac48SAvinash Dayanand reg = qid;
920c27eac48SAvinash Dayanand qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
921c27eac48SAvinash Dayanand (j * 2) + 1);
922c27eac48SAvinash Dayanand reg |= qid << 16;
923c27eac48SAvinash Dayanand }
924c27eac48SAvinash Dayanand i40e_write_rx_ctl(hw,
925c27eac48SAvinash Dayanand I40E_VSILAN_QTABLE(j, vsi_id),
926c27eac48SAvinash Dayanand reg);
927c27eac48SAvinash Dayanand }
928c27eac48SAvinash Dayanand }
929c27eac48SAvinash Dayanand }
930c27eac48SAvinash Dayanand
931c27eac48SAvinash Dayanand /**
932c27eac48SAvinash Dayanand * i40e_map_pf_to_vf_queues
933c27eac48SAvinash Dayanand * @vf: pointer to the VF info
934c27eac48SAvinash Dayanand *
935c27eac48SAvinash Dayanand * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
936c27eac48SAvinash Dayanand * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
937c27eac48SAvinash Dayanand **/
i40e_map_pf_to_vf_queues(struct i40e_vf * vf)938c27eac48SAvinash Dayanand static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
939c27eac48SAvinash Dayanand {
940c27eac48SAvinash Dayanand struct i40e_pf *pf = vf->pf;
941c27eac48SAvinash Dayanand struct i40e_hw *hw = &pf->hw;
942c27eac48SAvinash Dayanand u32 reg, total_qps = 0;
943c27eac48SAvinash Dayanand u32 qps, num_tc = 1; /* VF has at least one traffic class */
944c27eac48SAvinash Dayanand u16 vsi_id, qid;
945c27eac48SAvinash Dayanand int i, j;
946c27eac48SAvinash Dayanand
947c27eac48SAvinash Dayanand if (vf->adq_enabled)
948c27eac48SAvinash Dayanand num_tc = vf->num_tc;
949c27eac48SAvinash Dayanand
950c27eac48SAvinash Dayanand for (i = 0; i < num_tc; i++) {
951c27eac48SAvinash Dayanand if (vf->adq_enabled) {
952c27eac48SAvinash Dayanand qps = vf->ch[i].num_qps;
953c27eac48SAvinash Dayanand vsi_id = vf->ch[i].vsi_id;
954c27eac48SAvinash Dayanand } else {
955c27eac48SAvinash Dayanand qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
956c27eac48SAvinash Dayanand vsi_id = vf->lan_vsi_id;
957c27eac48SAvinash Dayanand }
958c27eac48SAvinash Dayanand
959c27eac48SAvinash Dayanand for (j = 0; j < qps; j++) {
960c27eac48SAvinash Dayanand qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
961c27eac48SAvinash Dayanand
962c27eac48SAvinash Dayanand reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
963c27eac48SAvinash Dayanand wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
964c27eac48SAvinash Dayanand reg);
965c27eac48SAvinash Dayanand total_qps++;
966c27eac48SAvinash Dayanand }
967c27eac48SAvinash Dayanand }
968c27eac48SAvinash Dayanand }
969c27eac48SAvinash Dayanand
970c27eac48SAvinash Dayanand /**
971805bd5bdSMitch Williams * i40e_enable_vf_mappings
972b40c82e6SJeff Kirsher * @vf: pointer to the VF info
973805bd5bdSMitch Williams *
974b40c82e6SJeff Kirsher * enable VF mappings
975805bd5bdSMitch Williams **/
i40e_enable_vf_mappings(struct i40e_vf * vf)976805bd5bdSMitch Williams static void i40e_enable_vf_mappings(struct i40e_vf *vf)
977805bd5bdSMitch Williams {
978805bd5bdSMitch Williams struct i40e_pf *pf = vf->pf;
979805bd5bdSMitch Williams struct i40e_hw *hw = &pf->hw;
980c27eac48SAvinash Dayanand u32 reg;
981805bd5bdSMitch Williams
982805bd5bdSMitch Williams /* Tell the hardware we're using noncontiguous mapping. HW requires
983805bd5bdSMitch Williams * that VF queues be mapped using this method, even when they are
984805bd5bdSMitch Williams * contiguous in real life
985805bd5bdSMitch Williams */
986272cdaf2SShannon Nelson i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
987805bd5bdSMitch Williams I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
988805bd5bdSMitch Williams
989805bd5bdSMitch Williams /* enable VF vplan_qtable mappings */
990805bd5bdSMitch Williams reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
991805bd5bdSMitch Williams wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
992805bd5bdSMitch Williams
993c27eac48SAvinash Dayanand i40e_map_pf_to_vf_queues(vf);
994c27eac48SAvinash Dayanand i40e_map_pf_queues_to_vsi(vf);
995805bd5bdSMitch Williams
996805bd5bdSMitch Williams i40e_flush(hw);
997805bd5bdSMitch Williams }
998805bd5bdSMitch Williams
999805bd5bdSMitch Williams /**
1000805bd5bdSMitch Williams * i40e_disable_vf_mappings
1001b40c82e6SJeff Kirsher * @vf: pointer to the VF info
1002805bd5bdSMitch Williams *
1003b40c82e6SJeff Kirsher * disable VF mappings
1004805bd5bdSMitch Williams **/
i40e_disable_vf_mappings(struct i40e_vf * vf)1005805bd5bdSMitch Williams static void i40e_disable_vf_mappings(struct i40e_vf *vf)
1006805bd5bdSMitch Williams {
1007805bd5bdSMitch Williams struct i40e_pf *pf = vf->pf;
1008805bd5bdSMitch Williams struct i40e_hw *hw = &pf->hw;
1009805bd5bdSMitch Williams int i;
1010805bd5bdSMitch Williams
1011805bd5bdSMitch Williams /* disable qp mappings */
1012805bd5bdSMitch Williams wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
1013805bd5bdSMitch Williams for (i = 0; i < I40E_MAX_VSI_QP; i++)
1014805bd5bdSMitch Williams wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
1015805bd5bdSMitch Williams I40E_QUEUE_END_OF_LIST);
1016805bd5bdSMitch Williams i40e_flush(hw);
1017805bd5bdSMitch Williams }
1018805bd5bdSMitch Williams
1019805bd5bdSMitch Williams /**
1020805bd5bdSMitch Williams * i40e_free_vf_res
1021b40c82e6SJeff Kirsher * @vf: pointer to the VF info
1022805bd5bdSMitch Williams *
1023b40c82e6SJeff Kirsher * free VF resources
1024805bd5bdSMitch Williams **/
i40e_free_vf_res(struct i40e_vf * vf)1025805bd5bdSMitch Williams static void i40e_free_vf_res(struct i40e_vf *vf)
1026805bd5bdSMitch Williams {
1027805bd5bdSMitch Williams struct i40e_pf *pf = vf->pf;
1028fc18eaa0SMitch Williams struct i40e_hw *hw = &pf->hw;
1029fc18eaa0SMitch Williams u32 reg_idx, reg;
1030c27eac48SAvinash Dayanand int i, j, msix_vf;
1031805bd5bdSMitch Williams
1032beff3e9dSRobert Konklewski /* Start by disabling VF's configuration API to prevent the OS from
1033beff3e9dSRobert Konklewski * accessing the VF's VSI after it's freed / invalidated.
1034beff3e9dSRobert Konklewski */
10356322e63cSJacob Keller clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1036beff3e9dSRobert Konklewski
1037a3f5aa90SAlan Brady /* It's possible the VF had requeuested more queues than the default so
1038a3f5aa90SAlan Brady * do the accounting here when we're about to free them.
1039a3f5aa90SAlan Brady */
1040a3f5aa90SAlan Brady if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1041a3f5aa90SAlan Brady pf->queues_left += vf->num_queue_pairs -
1042a3f5aa90SAlan Brady I40E_DEFAULT_QUEUES_PER_VF;
1043a3f5aa90SAlan Brady }
1044a3f5aa90SAlan Brady
1045805bd5bdSMitch Williams /* free vsi & disconnect it from the parent uplink */
1046fdf0e0bfSAnjali Singhai Jain if (vf->lan_vsi_idx) {
1047fdf0e0bfSAnjali Singhai Jain i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1048fdf0e0bfSAnjali Singhai Jain vf->lan_vsi_idx = 0;
1049805bd5bdSMitch Williams vf->lan_vsi_id = 0;
1050805bd5bdSMitch Williams }
1051c27eac48SAvinash Dayanand
1052c27eac48SAvinash Dayanand /* do the accounting and remove additional ADq VSI's */
1053c27eac48SAvinash Dayanand if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1054c27eac48SAvinash Dayanand for (j = 0; j < vf->num_tc; j++) {
1055c27eac48SAvinash Dayanand /* At this point VSI0 is already released so don't
1056c27eac48SAvinash Dayanand * release it again and only clear their values in
1057c27eac48SAvinash Dayanand * structure variables
1058c27eac48SAvinash Dayanand */
1059c27eac48SAvinash Dayanand if (j)
1060c27eac48SAvinash Dayanand i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1061c27eac48SAvinash Dayanand vf->ch[j].vsi_idx = 0;
1062c27eac48SAvinash Dayanand vf->ch[j].vsi_id = 0;
1063c27eac48SAvinash Dayanand }
1064c27eac48SAvinash Dayanand }
10659347eb77SMitch Williams msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
10669347eb77SMitch Williams
1067fc18eaa0SMitch Williams /* disable interrupts so the VF starts in a known state */
1068fc18eaa0SMitch Williams for (i = 0; i < msix_vf; i++) {
1069fc18eaa0SMitch Williams /* format is same for both registers */
1070fc18eaa0SMitch Williams if (0 == i)
1071fc18eaa0SMitch Williams reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1072fc18eaa0SMitch Williams else
1073fc18eaa0SMitch Williams reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1074fc18eaa0SMitch Williams (vf->vf_id))
1075fc18eaa0SMitch Williams + (i - 1));
1076fc18eaa0SMitch Williams wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1077fc18eaa0SMitch Williams i40e_flush(hw);
1078fc18eaa0SMitch Williams }
1079805bd5bdSMitch Williams
1080fc18eaa0SMitch Williams /* clear the irq settings */
1081fc18eaa0SMitch Williams for (i = 0; i < msix_vf; i++) {
1082fc18eaa0SMitch Williams /* format is same for both registers */
1083fc18eaa0SMitch Williams if (0 == i)
1084fc18eaa0SMitch Williams reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1085fc18eaa0SMitch Williams else
1086fc18eaa0SMitch Williams reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1087fc18eaa0SMitch Williams (vf->vf_id))
1088fc18eaa0SMitch Williams + (i - 1));
1089fc18eaa0SMitch Williams reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1090fc18eaa0SMitch Williams I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1091fc18eaa0SMitch Williams wr32(hw, reg_idx, reg);
1092fc18eaa0SMitch Williams i40e_flush(hw);
1093fc18eaa0SMitch Williams }
1094b564d62eSMasahiro Yamada /* reset some of the state variables keeping track of the resources */
1095805bd5bdSMitch Williams vf->num_queue_pairs = 0;
109641d0a4d0SAlan Brady clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
109741d0a4d0SAlan Brady clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1098805bd5bdSMitch Williams }
1099805bd5bdSMitch Williams
1100805bd5bdSMitch Williams /**
1101805bd5bdSMitch Williams * i40e_alloc_vf_res
1102b40c82e6SJeff Kirsher * @vf: pointer to the VF info
1103805bd5bdSMitch Williams *
1104b40c82e6SJeff Kirsher * allocate VF resources
1105805bd5bdSMitch Williams **/
i40e_alloc_vf_res(struct i40e_vf * vf)1106805bd5bdSMitch Williams static int i40e_alloc_vf_res(struct i40e_vf *vf)
1107805bd5bdSMitch Williams {
1108805bd5bdSMitch Williams struct i40e_pf *pf = vf->pf;
1109805bd5bdSMitch Williams int total_queue_pairs = 0;
1110c27eac48SAvinash Dayanand int ret, idx;
1111805bd5bdSMitch Williams
1112a3f5aa90SAlan Brady if (vf->num_req_queues &&
1113a3f5aa90SAlan Brady vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1114a3f5aa90SAlan Brady pf->num_vf_qps = vf->num_req_queues;
1115a3f5aa90SAlan Brady else
1116a3f5aa90SAlan Brady pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1117a3f5aa90SAlan Brady
1118805bd5bdSMitch Williams /* allocate hw vsi context & associated resources */
1119c27eac48SAvinash Dayanand ret = i40e_alloc_vsi_res(vf, 0);
1120805bd5bdSMitch Williams if (ret)
1121805bd5bdSMitch Williams goto error_alloc;
1122fdf0e0bfSAnjali Singhai Jain total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1123692fb0a7SAnjali Singhai Jain
1124c27eac48SAvinash Dayanand /* allocate additional VSIs based on tc information for ADq */
1125c27eac48SAvinash Dayanand if (vf->adq_enabled) {
1126c27eac48SAvinash Dayanand if (pf->queues_left >=
1127c27eac48SAvinash Dayanand (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1128c27eac48SAvinash Dayanand /* TC 0 always belongs to VF VSI */
1129c27eac48SAvinash Dayanand for (idx = 1; idx < vf->num_tc; idx++) {
1130c27eac48SAvinash Dayanand ret = i40e_alloc_vsi_res(vf, idx);
1131c27eac48SAvinash Dayanand if (ret)
1132c27eac48SAvinash Dayanand goto error_alloc;
1133c27eac48SAvinash Dayanand }
1134c27eac48SAvinash Dayanand /* send correct number of queues */
1135c27eac48SAvinash Dayanand total_queue_pairs = I40E_MAX_VF_QUEUES;
1136c27eac48SAvinash Dayanand } else {
1137c27eac48SAvinash Dayanand dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1138c27eac48SAvinash Dayanand vf->vf_id);
1139c27eac48SAvinash Dayanand vf->adq_enabled = false;
1140c27eac48SAvinash Dayanand }
1141c27eac48SAvinash Dayanand }
1142c27eac48SAvinash Dayanand
1143a3f5aa90SAlan Brady /* We account for each VF to get a default number of queue pairs. If
1144a3f5aa90SAlan Brady * the VF has now requested more, we need to account for that to make
1145a3f5aa90SAlan Brady * certain we never request more queues than we actually have left in
1146a3f5aa90SAlan Brady * HW.
1147a3f5aa90SAlan Brady */
1148a3f5aa90SAlan Brady if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1149a3f5aa90SAlan Brady pf->queues_left -=
1150a3f5aa90SAlan Brady total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1151a3f5aa90SAlan Brady
1152692fb0a7SAnjali Singhai Jain if (vf->trusted)
1153805bd5bdSMitch Williams set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1154692fb0a7SAnjali Singhai Jain else
1155692fb0a7SAnjali Singhai Jain clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1156805bd5bdSMitch Williams
1157805bd5bdSMitch Williams /* store the total qps number for the runtime
1158b40c82e6SJeff Kirsher * VF req validation
1159805bd5bdSMitch Williams */
1160805bd5bdSMitch Williams vf->num_queue_pairs = total_queue_pairs;
1161805bd5bdSMitch Williams
1162b40c82e6SJeff Kirsher /* VF is now completely initialized */
11636322e63cSJacob Keller set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1164805bd5bdSMitch Williams
1165805bd5bdSMitch Williams error_alloc:
1166805bd5bdSMitch Williams if (ret)
1167805bd5bdSMitch Williams i40e_free_vf_res(vf);
1168805bd5bdSMitch Williams
1169805bd5bdSMitch Williams return ret;
1170805bd5bdSMitch Williams }
1171805bd5bdSMitch Williams
1172fc18eaa0SMitch Williams #define VF_DEVICE_STATUS 0xAA
1173fc18eaa0SMitch Williams #define VF_TRANS_PENDING_MASK 0x20
1174fc18eaa0SMitch Williams /**
1175fc18eaa0SMitch Williams * i40e_quiesce_vf_pci
1176b40c82e6SJeff Kirsher * @vf: pointer to the VF structure
1177fc18eaa0SMitch Williams *
1178fc18eaa0SMitch Williams * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1179fc18eaa0SMitch Williams * if the transactions never clear.
1180fc18eaa0SMitch Williams **/
i40e_quiesce_vf_pci(struct i40e_vf * vf)1181fc18eaa0SMitch Williams static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1182fc18eaa0SMitch Williams {
1183fc18eaa0SMitch Williams struct i40e_pf *pf = vf->pf;
1184fc18eaa0SMitch Williams struct i40e_hw *hw = &pf->hw;
1185fc18eaa0SMitch Williams int vf_abs_id, i;
1186fc18eaa0SMitch Williams u32 reg;
1187fc18eaa0SMitch Williams
1188b141d619SMitch Williams vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1189fc18eaa0SMitch Williams
1190fc18eaa0SMitch Williams wr32(hw, I40E_PF_PCI_CIAA,
1191fc18eaa0SMitch Williams VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1192fc18eaa0SMitch Williams for (i = 0; i < 100; i++) {
1193fc18eaa0SMitch Williams reg = rd32(hw, I40E_PF_PCI_CIAD);
1194fc18eaa0SMitch Williams if ((reg & VF_TRANS_PENDING_MASK) == 0)
1195fc18eaa0SMitch Williams return 0;
1196fc18eaa0SMitch Williams udelay(1);
1197fc18eaa0SMitch Williams }
1198fc18eaa0SMitch Williams return -EIO;
1199fc18eaa0SMitch Williams }
1200fc18eaa0SMitch Williams
120137d318d7SAleksandr Loktionov /**
12028b4b0691SStefan Assmann * __i40e_getnum_vf_vsi_vlan_filters
120337d318d7SAleksandr Loktionov * @vsi: pointer to the vsi
120437d318d7SAleksandr Loktionov *
120537d318d7SAleksandr Loktionov * called to get the number of VLANs offloaded on this VF
120637d318d7SAleksandr Loktionov **/
__i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi * vsi)12078b4b0691SStefan Assmann static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
120837d318d7SAleksandr Loktionov {
120937d318d7SAleksandr Loktionov struct i40e_mac_filter *f;
1210e1e1b535SStefan Assmann u16 num_vlans = 0, bkt;
121137d318d7SAleksandr Loktionov
121237d318d7SAleksandr Loktionov hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
121337d318d7SAleksandr Loktionov if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
121437d318d7SAleksandr Loktionov num_vlans++;
121537d318d7SAleksandr Loktionov }
121637d318d7SAleksandr Loktionov
121737d318d7SAleksandr Loktionov return num_vlans;
121837d318d7SAleksandr Loktionov }
121937d318d7SAleksandr Loktionov
122037d318d7SAleksandr Loktionov /**
12218b4b0691SStefan Assmann * i40e_getnum_vf_vsi_vlan_filters
12228b4b0691SStefan Assmann * @vsi: pointer to the vsi
12238b4b0691SStefan Assmann *
12248b4b0691SStefan Assmann * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
12258b4b0691SStefan Assmann **/
i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi * vsi)12268b4b0691SStefan Assmann static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
12278b4b0691SStefan Assmann {
12288b4b0691SStefan Assmann int num_vlans;
12298b4b0691SStefan Assmann
12308b4b0691SStefan Assmann spin_lock_bh(&vsi->mac_filter_hash_lock);
12318b4b0691SStefan Assmann num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
12328b4b0691SStefan Assmann spin_unlock_bh(&vsi->mac_filter_hash_lock);
12338b4b0691SStefan Assmann
12348b4b0691SStefan Assmann return num_vlans;
12358b4b0691SStefan Assmann }
12368b4b0691SStefan Assmann
12378b4b0691SStefan Assmann /**
123837d318d7SAleksandr Loktionov * i40e_get_vlan_list_sync
123937d318d7SAleksandr Loktionov * @vsi: pointer to the VSI
124037d318d7SAleksandr Loktionov * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
124137d318d7SAleksandr Loktionov * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
124237d318d7SAleksandr Loktionov * This array is allocated here, but has to be freed in caller.
124337d318d7SAleksandr Loktionov *
124437d318d7SAleksandr Loktionov * Called to get number of VLANs and VLAN list present in mac_filter_hash.
124537d318d7SAleksandr Loktionov **/
i40e_get_vlan_list_sync(struct i40e_vsi * vsi,u16 * num_vlans,s16 ** vlan_list)1246e1e1b535SStefan Assmann static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
124737d318d7SAleksandr Loktionov s16 **vlan_list)
124837d318d7SAleksandr Loktionov {
124937d318d7SAleksandr Loktionov struct i40e_mac_filter *f;
125037d318d7SAleksandr Loktionov int i = 0;
125137d318d7SAleksandr Loktionov int bkt;
125237d318d7SAleksandr Loktionov
125337d318d7SAleksandr Loktionov spin_lock_bh(&vsi->mac_filter_hash_lock);
12548b4b0691SStefan Assmann *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
125537d318d7SAleksandr Loktionov *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
125637d318d7SAleksandr Loktionov if (!(*vlan_list))
125737d318d7SAleksandr Loktionov goto err;
125837d318d7SAleksandr Loktionov
125937d318d7SAleksandr Loktionov hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
126037d318d7SAleksandr Loktionov if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
126137d318d7SAleksandr Loktionov continue;
126237d318d7SAleksandr Loktionov (*vlan_list)[i++] = f->vlan;
126337d318d7SAleksandr Loktionov }
126437d318d7SAleksandr Loktionov err:
126537d318d7SAleksandr Loktionov spin_unlock_bh(&vsi->mac_filter_hash_lock);
126637d318d7SAleksandr Loktionov }
126737d318d7SAleksandr Loktionov
126837d318d7SAleksandr Loktionov /**
126937d318d7SAleksandr Loktionov * i40e_set_vsi_promisc
127037d318d7SAleksandr Loktionov * @vf: pointer to the VF struct
127137d318d7SAleksandr Loktionov * @seid: VSI number
127237d318d7SAleksandr Loktionov * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
127337d318d7SAleksandr Loktionov * for a given VLAN
127437d318d7SAleksandr Loktionov * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
127537d318d7SAleksandr Loktionov * for a given VLAN
127637d318d7SAleksandr Loktionov * @vl: List of VLANs - apply filter for given VLANs
127737d318d7SAleksandr Loktionov * @num_vlans: Number of elements in @vl
127837d318d7SAleksandr Loktionov **/
12795180ff13SJan Sokolowski static int
i40e_set_vsi_promisc(struct i40e_vf * vf,u16 seid,bool multi_enable,bool unicast_enable,s16 * vl,u16 num_vlans)128037d318d7SAleksandr Loktionov i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1281e1e1b535SStefan Assmann bool unicast_enable, s16 *vl, u16 num_vlans)
128237d318d7SAleksandr Loktionov {
128337d318d7SAleksandr Loktionov struct i40e_pf *pf = vf->pf;
128437d318d7SAleksandr Loktionov struct i40e_hw *hw = &pf->hw;
12855180ff13SJan Sokolowski int aq_ret, aq_tmp = 0;
128637d318d7SAleksandr Loktionov int i;
128737d318d7SAleksandr Loktionov
128837d318d7SAleksandr Loktionov /* No VLAN to set promisc on, set on VSI */
128937d318d7SAleksandr Loktionov if (!num_vlans || !vl) {
129037d318d7SAleksandr Loktionov aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
129137d318d7SAleksandr Loktionov multi_enable,
129237d318d7SAleksandr Loktionov NULL);
129337d318d7SAleksandr Loktionov if (aq_ret) {
129437d318d7SAleksandr Loktionov int aq_err = pf->hw.aq.asq_last_status;
129537d318d7SAleksandr Loktionov
129637d318d7SAleksandr Loktionov dev_err(&pf->pdev->dev,
1297d5ba1842SJan Sokolowski "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
129837d318d7SAleksandr Loktionov vf->vf_id,
1299d5ba1842SJan Sokolowski ERR_PTR(aq_ret),
130037d318d7SAleksandr Loktionov i40e_aq_str(&pf->hw, aq_err));
130137d318d7SAleksandr Loktionov
130237d318d7SAleksandr Loktionov return aq_ret;
130337d318d7SAleksandr Loktionov }
130437d318d7SAleksandr Loktionov
130537d318d7SAleksandr Loktionov aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
130637d318d7SAleksandr Loktionov unicast_enable,
130737d318d7SAleksandr Loktionov NULL, true);
130837d318d7SAleksandr Loktionov
130937d318d7SAleksandr Loktionov if (aq_ret) {
131037d318d7SAleksandr Loktionov int aq_err = pf->hw.aq.asq_last_status;
131137d318d7SAleksandr Loktionov
131237d318d7SAleksandr Loktionov dev_err(&pf->pdev->dev,
1313d5ba1842SJan Sokolowski "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
131437d318d7SAleksandr Loktionov vf->vf_id,
1315d5ba1842SJan Sokolowski ERR_PTR(aq_ret),
131637d318d7SAleksandr Loktionov i40e_aq_str(&pf->hw, aq_err));
131737d318d7SAleksandr Loktionov }
131837d318d7SAleksandr Loktionov
131937d318d7SAleksandr Loktionov return aq_ret;
132037d318d7SAleksandr Loktionov }
132137d318d7SAleksandr Loktionov
132237d318d7SAleksandr Loktionov for (i = 0; i < num_vlans; i++) {
132337d318d7SAleksandr Loktionov aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
132437d318d7SAleksandr Loktionov multi_enable,
132537d318d7SAleksandr Loktionov vl[i], NULL);
132637d318d7SAleksandr Loktionov if (aq_ret) {
132737d318d7SAleksandr Loktionov int aq_err = pf->hw.aq.asq_last_status;
132837d318d7SAleksandr Loktionov
132937d318d7SAleksandr Loktionov dev_err(&pf->pdev->dev,
1330d5ba1842SJan Sokolowski "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
133137d318d7SAleksandr Loktionov vf->vf_id,
1332d5ba1842SJan Sokolowski ERR_PTR(aq_ret),
133337d318d7SAleksandr Loktionov i40e_aq_str(&pf->hw, aq_err));
1334b6f23d38SStefan Assmann
1335b6f23d38SStefan Assmann if (!aq_tmp)
1336b6f23d38SStefan Assmann aq_tmp = aq_ret;
133737d318d7SAleksandr Loktionov }
133837d318d7SAleksandr Loktionov
133937d318d7SAleksandr Loktionov aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
134037d318d7SAleksandr Loktionov unicast_enable,
134137d318d7SAleksandr Loktionov vl[i], NULL);
134237d318d7SAleksandr Loktionov if (aq_ret) {
134337d318d7SAleksandr Loktionov int aq_err = pf->hw.aq.asq_last_status;
134437d318d7SAleksandr Loktionov
134537d318d7SAleksandr Loktionov dev_err(&pf->pdev->dev,
1346d5ba1842SJan Sokolowski "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
134737d318d7SAleksandr Loktionov vf->vf_id,
1348d5ba1842SJan Sokolowski ERR_PTR(aq_ret),
134937d318d7SAleksandr Loktionov i40e_aq_str(&pf->hw, aq_err));
1350b6f23d38SStefan Assmann
1351b6f23d38SStefan Assmann if (!aq_tmp)
1352b6f23d38SStefan Assmann aq_tmp = aq_ret;
135337d318d7SAleksandr Loktionov }
135437d318d7SAleksandr Loktionov }
1355b6f23d38SStefan Assmann
1356b6f23d38SStefan Assmann if (aq_tmp)
1357b6f23d38SStefan Assmann aq_ret = aq_tmp;
1358b6f23d38SStefan Assmann
135937d318d7SAleksandr Loktionov return aq_ret;
136037d318d7SAleksandr Loktionov }
13610ce5233eSMariusz Stachura
13620ce5233eSMariusz Stachura /**
13630ce5233eSMariusz Stachura * i40e_config_vf_promiscuous_mode
13640ce5233eSMariusz Stachura * @vf: pointer to the VF info
13650ce5233eSMariusz Stachura * @vsi_id: VSI id
13660ce5233eSMariusz Stachura * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
13670ce5233eSMariusz Stachura * @alluni: set MAC L2 layer unicast promiscuous enable/disable
13680ce5233eSMariusz Stachura *
13690ce5233eSMariusz Stachura * Called from the VF to configure the promiscuous mode of
13700ce5233eSMariusz Stachura * VF vsis and from the VF reset path to reset promiscuous mode.
13710ce5233eSMariusz Stachura **/
i40e_config_vf_promiscuous_mode(struct i40e_vf * vf,u16 vsi_id,bool allmulti,bool alluni)13725180ff13SJan Sokolowski static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
13730ce5233eSMariusz Stachura u16 vsi_id,
13740ce5233eSMariusz Stachura bool allmulti,
13750ce5233eSMariusz Stachura bool alluni)
13760ce5233eSMariusz Stachura {
13770ce5233eSMariusz Stachura struct i40e_pf *pf = vf->pf;
13780ce5233eSMariusz Stachura struct i40e_vsi *vsi;
1379230f3d53SJan Sokolowski int aq_ret = 0;
1380e1e1b535SStefan Assmann u16 num_vlans;
138137d318d7SAleksandr Loktionov s16 *vl;
13820ce5233eSMariusz Stachura
13830ce5233eSMariusz Stachura vsi = i40e_find_vsi_from_id(pf, vsi_id);
13840ce5233eSMariusz Stachura if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1385230f3d53SJan Sokolowski return -EINVAL;
13860ce5233eSMariusz Stachura
13870ce5233eSMariusz Stachura if (vf->port_vlan_id) {
138837d318d7SAleksandr Loktionov aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
138937d318d7SAleksandr Loktionov alluni, &vf->port_vlan_id, 1);
13900ce5233eSMariusz Stachura return aq_ret;
13910ce5233eSMariusz Stachura } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
139237d318d7SAleksandr Loktionov i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
13930ce5233eSMariusz Stachura
139437d318d7SAleksandr Loktionov if (!vl)
1395230f3d53SJan Sokolowski return -ENOMEM;
13960ce5233eSMariusz Stachura
139737d318d7SAleksandr Loktionov aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
139837d318d7SAleksandr Loktionov vl, num_vlans);
139937d318d7SAleksandr Loktionov kfree(vl);
14000ce5233eSMariusz Stachura return aq_ret;
14010ce5233eSMariusz Stachura }
14020ce5233eSMariusz Stachura
140337d318d7SAleksandr Loktionov /* no VLANs to set on, set on VSI */
140437d318d7SAleksandr Loktionov aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
140537d318d7SAleksandr Loktionov NULL, 0);
14060ce5233eSMariusz Stachura return aq_ret;
14070ce5233eSMariusz Stachura }
14080ce5233eSMariusz Stachura
1409805bd5bdSMitch Williams /**
14100f344c81SKaren Sornek * i40e_sync_vfr_reset
14110f344c81SKaren Sornek * @hw: pointer to hw struct
14120f344c81SKaren Sornek * @vf_id: VF identifier
14130f344c81SKaren Sornek *
14140f344c81SKaren Sornek * Before trigger hardware reset, we need to know if no other process has
14150f344c81SKaren Sornek * reserved the hardware for any reset operations. This check is done by
14160f344c81SKaren Sornek * examining the status of the RSTAT1 register used to signal the reset.
14170f344c81SKaren Sornek **/
i40e_sync_vfr_reset(struct i40e_hw * hw,int vf_id)14180f344c81SKaren Sornek static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
14190f344c81SKaren Sornek {
14200f344c81SKaren Sornek u32 reg;
14210f344c81SKaren Sornek int i;
14220f344c81SKaren Sornek
14230f344c81SKaren Sornek for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
14240f344c81SKaren Sornek reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
14250f344c81SKaren Sornek I40E_VFINT_ICR0_ADMINQ_MASK;
14260f344c81SKaren Sornek if (reg)
14270f344c81SKaren Sornek return 0;
14280f344c81SKaren Sornek
14290f344c81SKaren Sornek usleep_range(100, 200);
14300f344c81SKaren Sornek }
14310f344c81SKaren Sornek
14320f344c81SKaren Sornek return -EAGAIN;
14330f344c81SKaren Sornek }
14340f344c81SKaren Sornek
14350f344c81SKaren Sornek /**
14369dc2e417SJacob Keller * i40e_trigger_vf_reset
1437b40c82e6SJeff Kirsher * @vf: pointer to the VF structure
14385c3c48acSJesse Brandeburg * @flr: VFLR was issued or not
14395c3c48acSJesse Brandeburg *
14409dc2e417SJacob Keller * Trigger hardware to start a reset for a particular VF. Expects the caller
14419dc2e417SJacob Keller * to wait the proper amount of time to allow hardware to reset the VF before
14429dc2e417SJacob Keller * it cleans up and restores VF functionality.
14435c3c48acSJesse Brandeburg **/
i40e_trigger_vf_reset(struct i40e_vf * vf,bool flr)14449dc2e417SJacob Keller static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
14455c3c48acSJesse Brandeburg {
14465c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
14475c3c48acSJesse Brandeburg struct i40e_hw *hw = &pf->hw;
14487e5a313eSMitch Williams u32 reg, reg_idx, bit_idx;
14490f344c81SKaren Sornek bool vf_active;
14500f344c81SKaren Sornek u32 radq;
14513ba9bcb4SMitch Williams
14525c3c48acSJesse Brandeburg /* warn the VF */
14530f344c81SKaren Sornek vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
14545c3c48acSJesse Brandeburg
1455beff3e9dSRobert Konklewski /* Disable VF's configuration API during reset. The flag is re-enabled
1456beff3e9dSRobert Konklewski * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1457beff3e9dSRobert Konklewski * It's normally disabled in i40e_free_vf_res(), but it's safer
1458beff3e9dSRobert Konklewski * to do it earlier to give some time to finish to any VF config
1459beff3e9dSRobert Konklewski * functions that may still be running at this point.
1460beff3e9dSRobert Konklewski */
14616322e63cSJacob Keller clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1462beff3e9dSRobert Konklewski
1463fc18eaa0SMitch Williams /* In the case of a VFLR, the HW has already reset the VF and we
1464fc18eaa0SMitch Williams * just need to clean up, so don't hit the VFRTRIG register.
14655c3c48acSJesse Brandeburg */
14665c3c48acSJesse Brandeburg if (!flr) {
14670f344c81SKaren Sornek /* Sync VFR reset before trigger next one */
14680f344c81SKaren Sornek radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
14690f344c81SKaren Sornek I40E_VFINT_ICR0_ADMINQ_MASK;
14700f344c81SKaren Sornek if (vf_active && !radq)
14710f344c81SKaren Sornek /* waiting for finish reset by virtual driver */
14720f344c81SKaren Sornek if (i40e_sync_vfr_reset(hw, vf->vf_id))
14730f344c81SKaren Sornek dev_info(&pf->pdev->dev,
14740f344c81SKaren Sornek "Reset VF %d never finished\n",
14750f344c81SKaren Sornek vf->vf_id);
14760f344c81SKaren Sornek
14770f344c81SKaren Sornek /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
14780f344c81SKaren Sornek * in progress state in rstat1 register.
14790f344c81SKaren Sornek */
1480fc18eaa0SMitch Williams reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1481fc18eaa0SMitch Williams reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
14825c3c48acSJesse Brandeburg wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
14835c3c48acSJesse Brandeburg i40e_flush(hw);
14845c3c48acSJesse Brandeburg }
14857369ca87SMitch Williams /* clear the VFLR bit in GLGEN_VFLRSTAT */
14867369ca87SMitch Williams reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
14877369ca87SMitch Williams bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
14887369ca87SMitch Williams wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
148930728c5bSAkeem G Abodunrin i40e_flush(hw);
14905c3c48acSJesse Brandeburg
1491fc18eaa0SMitch Williams if (i40e_quiesce_vf_pci(vf))
1492fc18eaa0SMitch Williams dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1493fc18eaa0SMitch Williams vf->vf_id);
14945c3c48acSJesse Brandeburg }
14955c3c48acSJesse Brandeburg
14969dc2e417SJacob Keller /**
14979dc2e417SJacob Keller * i40e_cleanup_reset_vf
14989dc2e417SJacob Keller * @vf: pointer to the VF structure
14999dc2e417SJacob Keller *
15009dc2e417SJacob Keller * Cleanup a VF after the hardware reset is finished. Expects the caller to
15019dc2e417SJacob Keller * have verified whether the reset is finished properly, and ensure the
15029dc2e417SJacob Keller * minimum amount of wait time has passed.
15039dc2e417SJacob Keller **/
i40e_cleanup_reset_vf(struct i40e_vf * vf)15049dc2e417SJacob Keller static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
15059dc2e417SJacob Keller {
15069dc2e417SJacob Keller struct i40e_pf *pf = vf->pf;
15079dc2e417SJacob Keller struct i40e_hw *hw = &pf->hw;
15089dc2e417SJacob Keller u32 reg;
150957175ac1SAnjali Singhai Jain
15100ce5233eSMariusz Stachura /* disable promisc modes in case they were enabled */
15110ce5233eSMariusz Stachura i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
15120ce5233eSMariusz Stachura
1513beff3e9dSRobert Konklewski /* free VF resources to begin resetting the VSI state */
1514fc18eaa0SMitch Williams i40e_free_vf_res(vf);
1515beff3e9dSRobert Konklewski
1516beff3e9dSRobert Konklewski /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1517beff3e9dSRobert Konklewski * By doing this we allow HW to access VF memory at any point. If we
1518beff3e9dSRobert Konklewski * did it any sooner, HW could access memory while it was being freed
1519beff3e9dSRobert Konklewski * in i40e_free_vf_res(), causing an IOMMU fault.
1520beff3e9dSRobert Konklewski *
1521beff3e9dSRobert Konklewski * On the other hand, this needs to be done ASAP, because the VF driver
1522beff3e9dSRobert Konklewski * is waiting for this to happen and may report a timeout. It's
1523beff3e9dSRobert Konklewski * harmless, but it gets logged into Guest OS kernel log, so best avoid
1524beff3e9dSRobert Konklewski * it.
1525beff3e9dSRobert Konklewski */
1526beff3e9dSRobert Konklewski reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1527beff3e9dSRobert Konklewski reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1528beff3e9dSRobert Konklewski wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1529beff3e9dSRobert Konklewski
1530beff3e9dSRobert Konklewski /* reallocate VF resources to finish resetting the VSI state */
153121be99ecSMitch Williams if (!i40e_alloc_vf_res(vf)) {
1532e3219ce6SAnjali Singhai Jain int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1533fc18eaa0SMitch Williams i40e_enable_vf_mappings(vf);
15346322e63cSJacob Keller set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
15356322e63cSJacob Keller clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
15366a23449aSAvinash Dayanand /* Do not notify the client during VF init */
1537c53d11f6SAlan Brady if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
15381b484370SJacob Keller &vf->vf_states))
1539e3219ce6SAnjali Singhai Jain i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1540dc5b4e9fSCatherine Sullivan vf->num_vlan = 0;
154121be99ecSMitch Williams }
1542beff3e9dSRobert Konklewski
1543beff3e9dSRobert Konklewski /* Tell the VF driver the reset is done. This needs to be done only
1544beff3e9dSRobert Konklewski * after VF has been fully initialized, because the VF driver may
1545beff3e9dSRobert Konklewski * request resources immediately after setting this flag.
1546beff3e9dSRobert Konklewski */
1547310a2ad9SJesse Brandeburg wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
15489dc2e417SJacob Keller }
15499dc2e417SJacob Keller
15509dc2e417SJacob Keller /**
15519dc2e417SJacob Keller * i40e_reset_vf
15529dc2e417SJacob Keller * @vf: pointer to the VF structure
15539dc2e417SJacob Keller * @flr: VFLR was issued or not
15549dc2e417SJacob Keller *
15552980cbd4SSylwester Dziedziuch * Returns true if the VF is in reset, resets successfully, or resets
15562980cbd4SSylwester Dziedziuch * are disabled and false otherwise.
15579dc2e417SJacob Keller **/
i40e_reset_vf(struct i40e_vf * vf,bool flr)1558d43d60e5SJacob Keller bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
15599dc2e417SJacob Keller {
15609dc2e417SJacob Keller struct i40e_pf *pf = vf->pf;
15619dc2e417SJacob Keller struct i40e_hw *hw = &pf->hw;
15629dc2e417SJacob Keller bool rsd = false;
15639dc2e417SJacob Keller u32 reg;
15649dc2e417SJacob Keller int i;
15659dc2e417SJacob Keller
15662980cbd4SSylwester Dziedziuch if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
15672980cbd4SSylwester Dziedziuch return true;
15682980cbd4SSylwester Dziedziuch
156952424f97SSylwester Dziedziuch /* Bail out if VFs are disabled. */
157052424f97SSylwester Dziedziuch if (test_bit(__I40E_VF_DISABLE, pf->state))
157152424f97SSylwester Dziedziuch return true;
157252424f97SSylwester Dziedziuch
157352424f97SSylwester Dziedziuch /* If VF is being reset already we don't need to continue. */
157452424f97SSylwester Dziedziuch if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
15752980cbd4SSylwester Dziedziuch return true;
15769dc2e417SJacob Keller
15779dc2e417SJacob Keller i40e_trigger_vf_reset(vf, flr);
15789dc2e417SJacob Keller
15799dc2e417SJacob Keller /* poll VPGEN_VFRSTAT reg to make sure
15809dc2e417SJacob Keller * that reset is complete
15819dc2e417SJacob Keller */
15829dc2e417SJacob Keller for (i = 0; i < 10; i++) {
15839dc2e417SJacob Keller /* VF reset requires driver to first reset the VF and then
15849dc2e417SJacob Keller * poll the status register to make sure that the reset
15859dc2e417SJacob Keller * completed successfully. Due to internal HW FIFO flushes,
15869dc2e417SJacob Keller * we must wait 10ms before the register will be valid.
15879dc2e417SJacob Keller */
15889dc2e417SJacob Keller usleep_range(10000, 20000);
15899dc2e417SJacob Keller reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
15909dc2e417SJacob Keller if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
15919dc2e417SJacob Keller rsd = true;
15929dc2e417SJacob Keller break;
15939dc2e417SJacob Keller }
15949dc2e417SJacob Keller }
15959dc2e417SJacob Keller
15969dc2e417SJacob Keller if (flr)
15979dc2e417SJacob Keller usleep_range(10000, 20000);
15989dc2e417SJacob Keller
15999dc2e417SJacob Keller if (!rsd)
16009dc2e417SJacob Keller dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
16019dc2e417SJacob Keller vf->vf_id);
16029dc2e417SJacob Keller usleep_range(10000, 20000);
16039dc2e417SJacob Keller
16049dc2e417SJacob Keller /* On initial reset, we don't have any queues to disable */
16059dc2e417SJacob Keller if (vf->lan_vsi_idx != 0)
16069dc2e417SJacob Keller i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
16079dc2e417SJacob Keller
16089dc2e417SJacob Keller i40e_cleanup_reset_vf(vf);
16097e5a313eSMitch Williams
1610fc18eaa0SMitch Williams i40e_flush(hw);
161108501970SSylwester Dziedziuch usleep_range(20000, 40000);
161252424f97SSylwester Dziedziuch clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
1613d43d60e5SJacob Keller
1614d43d60e5SJacob Keller return true;
16155c3c48acSJesse Brandeburg }
1616c354229fSGreg Rose
1617c354229fSGreg Rose /**
1618e4b433f4SJacob Keller * i40e_reset_all_vfs
1619e4b433f4SJacob Keller * @pf: pointer to the PF structure
1620e4b433f4SJacob Keller * @flr: VFLR was issued or not
1621e4b433f4SJacob Keller *
1622e4b433f4SJacob Keller * Reset all allocated VFs in one go. First, tell the hardware to reset each
1623e4b433f4SJacob Keller * VF, then do all the waiting in one chunk, and finally finish restoring each
1624e4b433f4SJacob Keller * VF after the wait. This is useful during PF routines which need to reset
1625e4b433f4SJacob Keller * all VFs, as otherwise it must perform these resets in a serialized fashion.
1626d43d60e5SJacob Keller *
1627d43d60e5SJacob Keller * Returns true if any VFs were reset, and false otherwise.
1628e4b433f4SJacob Keller **/
i40e_reset_all_vfs(struct i40e_pf * pf,bool flr)1629d43d60e5SJacob Keller bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1630e4b433f4SJacob Keller {
1631e4b433f4SJacob Keller struct i40e_hw *hw = &pf->hw;
1632e4b433f4SJacob Keller struct i40e_vf *vf;
1633e4b433f4SJacob Keller u32 reg;
16340dcf573fSAleksandr Loktionov int i;
1635e4b433f4SJacob Keller
1636e4b433f4SJacob Keller /* If we don't have any VFs, then there is nothing to reset */
1637e4b433f4SJacob Keller if (!pf->num_alloc_vfs)
1638d43d60e5SJacob Keller return false;
1639e4b433f4SJacob Keller
1640e4b433f4SJacob Keller /* If VFs have been disabled, there is no need to reset */
16410da36b97SJacob Keller if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1642d43d60e5SJacob Keller return false;
1643e4b433f4SJacob Keller
1644e4b433f4SJacob Keller /* Begin reset on all VFs at once */
16450dcf573fSAleksandr Loktionov for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
164652424f97SSylwester Dziedziuch /* If VF is being reset no need to trigger reset again */
164752424f97SSylwester Dziedziuch if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
16480dcf573fSAleksandr Loktionov i40e_trigger_vf_reset(vf, flr);
164952424f97SSylwester Dziedziuch }
1650e4b433f4SJacob Keller
1651e4b433f4SJacob Keller /* HW requires some time to make sure it can flush the FIFO for a VF
1652e4b433f4SJacob Keller * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1653e4b433f4SJacob Keller * sequence to make sure that it has completed. We'll keep track of
1654e4b433f4SJacob Keller * the VFs using a simple iterator that increments once that VF has
1655e4b433f4SJacob Keller * finished resetting.
1656e4b433f4SJacob Keller */
16570dcf573fSAleksandr Loktionov for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
1658e4b433f4SJacob Keller usleep_range(10000, 20000);
1659e4b433f4SJacob Keller
1660e4b433f4SJacob Keller /* Check each VF in sequence, beginning with the VF to fail
1661e4b433f4SJacob Keller * the previous check.
1662e4b433f4SJacob Keller */
16630dcf573fSAleksandr Loktionov while (vf < &pf->vf[pf->num_alloc_vfs]) {
166452424f97SSylwester Dziedziuch if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
1665e4b433f4SJacob Keller reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1666e4b433f4SJacob Keller if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1667e4b433f4SJacob Keller break;
166852424f97SSylwester Dziedziuch }
1669e4b433f4SJacob Keller
1670e4b433f4SJacob Keller /* If the current VF has finished resetting, move on
1671e4b433f4SJacob Keller * to the next VF in sequence.
1672e4b433f4SJacob Keller */
16730dcf573fSAleksandr Loktionov ++vf;
1674e4b433f4SJacob Keller }
1675e4b433f4SJacob Keller }
1676e4b433f4SJacob Keller
1677e4b433f4SJacob Keller if (flr)
1678e4b433f4SJacob Keller usleep_range(10000, 20000);
1679e4b433f4SJacob Keller
1680e4b433f4SJacob Keller /* Display a warning if at least one VF didn't manage to reset in
1681e4b433f4SJacob Keller * time, but continue on with the operation.
1682e4b433f4SJacob Keller */
16830dcf573fSAleksandr Loktionov if (vf < &pf->vf[pf->num_alloc_vfs])
1684e4b433f4SJacob Keller dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
16850dcf573fSAleksandr Loktionov vf->vf_id);
1686e4b433f4SJacob Keller usleep_range(10000, 20000);
1687e4b433f4SJacob Keller
1688e4b433f4SJacob Keller /* Begin disabling all the rings associated with VFs, but do not wait
1689e4b433f4SJacob Keller * between each VF.
1690e4b433f4SJacob Keller */
16910dcf573fSAleksandr Loktionov for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1692e4b433f4SJacob Keller /* On initial reset, we don't have any queues to disable */
16930dcf573fSAleksandr Loktionov if (vf->lan_vsi_idx == 0)
1694e4b433f4SJacob Keller continue;
1695e4b433f4SJacob Keller
169652424f97SSylwester Dziedziuch /* If VF is reset in another thread just continue */
169752424f97SSylwester Dziedziuch if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
169852424f97SSylwester Dziedziuch continue;
169952424f97SSylwester Dziedziuch
17000dcf573fSAleksandr Loktionov i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
1701e4b433f4SJacob Keller }
1702e4b433f4SJacob Keller
1703e4b433f4SJacob Keller /* Now that we've notified HW to disable all of the VF rings, wait
1704e4b433f4SJacob Keller * until they finish.
1705e4b433f4SJacob Keller */
17060dcf573fSAleksandr Loktionov for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1707e4b433f4SJacob Keller /* On initial reset, we don't have any queues to disable */
17080dcf573fSAleksandr Loktionov if (vf->lan_vsi_idx == 0)
1709e4b433f4SJacob Keller continue;
1710e4b433f4SJacob Keller
171152424f97SSylwester Dziedziuch /* If VF is reset in another thread just continue */
171252424f97SSylwester Dziedziuch if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
171352424f97SSylwester Dziedziuch continue;
171452424f97SSylwester Dziedziuch
17150dcf573fSAleksandr Loktionov i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
1716e4b433f4SJacob Keller }
1717e4b433f4SJacob Keller
1718e4b433f4SJacob Keller /* Hw may need up to 50ms to finish disabling the RX queues. We
1719e4b433f4SJacob Keller * minimize the wait by delaying only once for all VFs.
1720e4b433f4SJacob Keller */
1721e4b433f4SJacob Keller mdelay(50);
1722e4b433f4SJacob Keller
1723e4b433f4SJacob Keller /* Finish the reset on each VF */
17240dcf573fSAleksandr Loktionov for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
172552424f97SSylwester Dziedziuch /* If VF is reset in another thread just continue */
172652424f97SSylwester Dziedziuch if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
172752424f97SSylwester Dziedziuch continue;
172852424f97SSylwester Dziedziuch
17290dcf573fSAleksandr Loktionov i40e_cleanup_reset_vf(vf);
173052424f97SSylwester Dziedziuch }
1731e4b433f4SJacob Keller
1732e4b433f4SJacob Keller i40e_flush(hw);
173308501970SSylwester Dziedziuch usleep_range(20000, 40000);
17340da36b97SJacob Keller clear_bit(__I40E_VF_DISABLE, pf->state);
1735d43d60e5SJacob Keller
1736d43d60e5SJacob Keller return true;
1737e4b433f4SJacob Keller }
1738e4b433f4SJacob Keller
1739e4b433f4SJacob Keller /**
17405c3c48acSJesse Brandeburg * i40e_free_vfs
1741b40c82e6SJeff Kirsher * @pf: pointer to the PF structure
17425c3c48acSJesse Brandeburg *
1743b40c82e6SJeff Kirsher * free VF resources
17445c3c48acSJesse Brandeburg **/
i40e_free_vfs(struct i40e_pf * pf)17455c3c48acSJesse Brandeburg void i40e_free_vfs(struct i40e_pf *pf)
17465c3c48acSJesse Brandeburg {
1747f7414531SMitch Williams struct i40e_hw *hw = &pf->hw;
1748f7414531SMitch Williams u32 reg_idx, bit_idx;
1749f7414531SMitch Williams int i, tmp, vf_id;
17505c3c48acSJesse Brandeburg
17515c3c48acSJesse Brandeburg if (!pf->vf)
17525c3c48acSJesse Brandeburg return;
1753347b5650SEryk Rybak
1754347b5650SEryk Rybak set_bit(__I40E_VFS_RELEASING, pf->state);
17550da36b97SJacob Keller while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
17563ba9bcb4SMitch Williams usleep_range(1000, 2000);
17575c3c48acSJesse Brandeburg
1758e3219ce6SAnjali Singhai Jain i40e_notify_client_of_vf_enable(pf, 0);
1759707d088aSJacob Keller
17602980cbd4SSylwester Dziedziuch /* Disable IOV before freeing resources. This lets any VF drivers
17612980cbd4SSylwester Dziedziuch * running in the host get themselves cleaned up before we yank
17622980cbd4SSylwester Dziedziuch * the carpet out from underneath their feet.
17632980cbd4SSylwester Dziedziuch */
17642980cbd4SSylwester Dziedziuch if (!pci_vfs_assigned(pf->pdev))
17652980cbd4SSylwester Dziedziuch pci_disable_sriov(pf->pdev);
17662980cbd4SSylwester Dziedziuch else
17672980cbd4SSylwester Dziedziuch dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
17682980cbd4SSylwester Dziedziuch
1769707d088aSJacob Keller /* Amortize wait time by stopping all VFs at the same time */
1770707d088aSJacob Keller for (i = 0; i < pf->num_alloc_vfs; i++) {
17716322e63cSJacob Keller if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1772707d088aSJacob Keller continue;
1773707d088aSJacob Keller
1774707d088aSJacob Keller i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1775707d088aSJacob Keller }
1776707d088aSJacob Keller
1777707d088aSJacob Keller for (i = 0; i < pf->num_alloc_vfs; i++) {
1778707d088aSJacob Keller if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1779707d088aSJacob Keller continue;
1780707d088aSJacob Keller
1781707d088aSJacob Keller i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1782707d088aSJacob Keller }
17830325fca7SMitch Williams
1784b40c82e6SJeff Kirsher /* free up VF resources */
17856c1b5bffSMitch Williams tmp = pf->num_alloc_vfs;
17866c1b5bffSMitch Williams pf->num_alloc_vfs = 0;
17876c1b5bffSMitch Williams for (i = 0; i < tmp; i++) {
17886322e63cSJacob Keller if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
17895c3c48acSJesse Brandeburg i40e_free_vf_res(&pf->vf[i]);
17905c3c48acSJesse Brandeburg /* disable qp mappings */
17915c3c48acSJesse Brandeburg i40e_disable_vf_mappings(&pf->vf[i]);
17925c3c48acSJesse Brandeburg }
17935c3c48acSJesse Brandeburg
17945c3c48acSJesse Brandeburg kfree(pf->vf);
17955c3c48acSJesse Brandeburg pf->vf = NULL;
17965c3c48acSJesse Brandeburg
17979e5634dfSMitch Williams /* This check is for when the driver is unloaded while VFs are
17989e5634dfSMitch Williams * assigned. Setting the number of VFs to 0 through sysfs is caught
17999e5634dfSMitch Williams * before this function ever gets called.
18009e5634dfSMitch Williams */
1801c24817b6SEthan Zhao if (!pci_vfs_assigned(pf->pdev)) {
1802f7414531SMitch Williams /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1803f7414531SMitch Williams * work correctly when SR-IOV gets re-enabled.
1804f7414531SMitch Williams */
1805f7414531SMitch Williams for (vf_id = 0; vf_id < tmp; vf_id++) {
1806f7414531SMitch Williams reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1807f7414531SMitch Williams bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
180841a1d04bSJesse Brandeburg wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1809f7414531SMitch Williams }
1810c354229fSGreg Rose }
18110da36b97SJacob Keller clear_bit(__I40E_VF_DISABLE, pf->state);
1812347b5650SEryk Rybak clear_bit(__I40E_VFS_RELEASING, pf->state);
18135c3c48acSJesse Brandeburg }
18145c3c48acSJesse Brandeburg
18155c3c48acSJesse Brandeburg #ifdef CONFIG_PCI_IOV
18165c3c48acSJesse Brandeburg /**
18175c3c48acSJesse Brandeburg * i40e_alloc_vfs
1818b40c82e6SJeff Kirsher * @pf: pointer to the PF structure
1819b40c82e6SJeff Kirsher * @num_alloc_vfs: number of VFs to allocate
18205c3c48acSJesse Brandeburg *
1821b40c82e6SJeff Kirsher * allocate VF resources
18225c3c48acSJesse Brandeburg **/
i40e_alloc_vfs(struct i40e_pf * pf,u16 num_alloc_vfs)18234aeec010SMitch Williams int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
18245c3c48acSJesse Brandeburg {
18255c3c48acSJesse Brandeburg struct i40e_vf *vfs;
18265c3c48acSJesse Brandeburg int i, ret = 0;
18275c3c48acSJesse Brandeburg
18286c1b5bffSMitch Williams /* Disable interrupt 0 so we don't try to handle the VFLR. */
18292ef28cfbSMitch Williams i40e_irq_dynamic_disable_icr0(pf);
18302ef28cfbSMitch Williams
18314aeec010SMitch Williams /* Check to see if we're just allocating resources for extant VFs */
18324aeec010SMitch Williams if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
18335c3c48acSJesse Brandeburg ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
18345c3c48acSJesse Brandeburg if (ret) {
1835de445b3dSAkeem G Abodunrin pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
18365c3c48acSJesse Brandeburg pf->num_alloc_vfs = 0;
18375c3c48acSJesse Brandeburg goto err_iov;
18385c3c48acSJesse Brandeburg }
18394aeec010SMitch Williams }
18405c3c48acSJesse Brandeburg /* allocate memory */
1841cc6456afSAkeem G Abodunrin vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
18425c3c48acSJesse Brandeburg if (!vfs) {
18435c3c48acSJesse Brandeburg ret = -ENOMEM;
18445c3c48acSJesse Brandeburg goto err_alloc;
18455c3c48acSJesse Brandeburg }
1846c674d125SMitch Williams pf->vf = vfs;
18475c3c48acSJesse Brandeburg
18485c3c48acSJesse Brandeburg /* apply default profile */
18495c3c48acSJesse Brandeburg for (i = 0; i < num_alloc_vfs; i++) {
18505c3c48acSJesse Brandeburg vfs[i].pf = pf;
18515c3c48acSJesse Brandeburg vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
18525c3c48acSJesse Brandeburg vfs[i].vf_id = i;
18535c3c48acSJesse Brandeburg
18545c3c48acSJesse Brandeburg /* assign default capabilities */
18555c3c48acSJesse Brandeburg set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1856c674d125SMitch Williams vfs[i].spoofchk = true;
18571b484370SJacob Keller
18581b484370SJacob Keller set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
18595c3c48acSJesse Brandeburg
18605c3c48acSJesse Brandeburg }
18615c3c48acSJesse Brandeburg pf->num_alloc_vfs = num_alloc_vfs;
18625c3c48acSJesse Brandeburg
18631b484370SJacob Keller /* VF resources get allocated during reset */
18641b484370SJacob Keller i40e_reset_all_vfs(pf, false);
18651b484370SJacob Keller
18666a23449aSAvinash Dayanand i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
18676a23449aSAvinash Dayanand
18685c3c48acSJesse Brandeburg err_alloc:
18695c3c48acSJesse Brandeburg if (ret)
18705c3c48acSJesse Brandeburg i40e_free_vfs(pf);
18715c3c48acSJesse Brandeburg err_iov:
18726c1b5bffSMitch Williams /* Re-enable interrupt 0. */
1873dbadbbe2SJacob Keller i40e_irq_dynamic_enable_icr0(pf);
18745c3c48acSJesse Brandeburg return ret;
18755c3c48acSJesse Brandeburg }
18765c3c48acSJesse Brandeburg
18775c3c48acSJesse Brandeburg #endif
18785c3c48acSJesse Brandeburg /**
18795c3c48acSJesse Brandeburg * i40e_pci_sriov_enable
18805c3c48acSJesse Brandeburg * @pdev: pointer to a pci_dev structure
1881b40c82e6SJeff Kirsher * @num_vfs: number of VFs to allocate
18825c3c48acSJesse Brandeburg *
18835c3c48acSJesse Brandeburg * Enable or change the number of VFs
18845c3c48acSJesse Brandeburg **/
i40e_pci_sriov_enable(struct pci_dev * pdev,int num_vfs)18855c3c48acSJesse Brandeburg static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
18865c3c48acSJesse Brandeburg {
18875c3c48acSJesse Brandeburg #ifdef CONFIG_PCI_IOV
18885c3c48acSJesse Brandeburg struct i40e_pf *pf = pci_get_drvdata(pdev);
18895c3c48acSJesse Brandeburg int pre_existing_vfs = pci_num_vf(pdev);
18905c3c48acSJesse Brandeburg int err = 0;
18915c3c48acSJesse Brandeburg
18920da36b97SJacob Keller if (test_bit(__I40E_TESTING, pf->state)) {
1893e17bc411SGreg Rose dev_warn(&pdev->dev,
1894e17bc411SGreg Rose "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1895e17bc411SGreg Rose err = -EPERM;
1896e17bc411SGreg Rose goto err_out;
1897e17bc411SGreg Rose }
1898e17bc411SGreg Rose
18995c3c48acSJesse Brandeburg if (pre_existing_vfs && pre_existing_vfs != num_vfs)
19005c3c48acSJesse Brandeburg i40e_free_vfs(pf);
19015c3c48acSJesse Brandeburg else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
19025c3c48acSJesse Brandeburg goto out;
19035c3c48acSJesse Brandeburg
19045c3c48acSJesse Brandeburg if (num_vfs > pf->num_req_vfs) {
190596c8d073SMitch Williams dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
190696c8d073SMitch Williams num_vfs, pf->num_req_vfs);
19075c3c48acSJesse Brandeburg err = -EPERM;
19085c3c48acSJesse Brandeburg goto err_out;
19095c3c48acSJesse Brandeburg }
19105c3c48acSJesse Brandeburg
191196c8d073SMitch Williams dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
19125c3c48acSJesse Brandeburg err = i40e_alloc_vfs(pf, num_vfs);
19135c3c48acSJesse Brandeburg if (err) {
19145c3c48acSJesse Brandeburg dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
19155c3c48acSJesse Brandeburg goto err_out;
19165c3c48acSJesse Brandeburg }
19175c3c48acSJesse Brandeburg
19185c3c48acSJesse Brandeburg out:
19195c3c48acSJesse Brandeburg return num_vfs;
19205c3c48acSJesse Brandeburg
19215c3c48acSJesse Brandeburg err_out:
19225c3c48acSJesse Brandeburg return err;
19235c3c48acSJesse Brandeburg #endif
19245c3c48acSJesse Brandeburg return 0;
19255c3c48acSJesse Brandeburg }
19265c3c48acSJesse Brandeburg
19275c3c48acSJesse Brandeburg /**
19285c3c48acSJesse Brandeburg * i40e_pci_sriov_configure
19295c3c48acSJesse Brandeburg * @pdev: pointer to a pci_dev structure
1930b40c82e6SJeff Kirsher * @num_vfs: number of VFs to allocate
19315c3c48acSJesse Brandeburg *
19325c3c48acSJesse Brandeburg * Enable or change the number of VFs. Called when the user updates the number
19335c3c48acSJesse Brandeburg * of VFs in sysfs.
19345c3c48acSJesse Brandeburg **/
i40e_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)19355c3c48acSJesse Brandeburg int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
19365c3c48acSJesse Brandeburg {
19375c3c48acSJesse Brandeburg struct i40e_pf *pf = pci_get_drvdata(pdev);
1938f5a7b21bSJan Sokolowski int ret = 0;
1939f5a7b21bSJan Sokolowski
1940f5a7b21bSJan Sokolowski if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1941f5a7b21bSJan Sokolowski dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1942f5a7b21bSJan Sokolowski return -EAGAIN;
1943f5a7b21bSJan Sokolowski }
19445c3c48acSJesse Brandeburg
1945fc60861eSAnjali Singhai Jain if (num_vfs) {
1946fc60861eSAnjali Singhai Jain if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1947fc60861eSAnjali Singhai Jain pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
19483ac874faSSylwester Dziedziuch i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1949fc60861eSAnjali Singhai Jain }
1950f5a7b21bSJan Sokolowski ret = i40e_pci_sriov_enable(pdev, num_vfs);
1951f5a7b21bSJan Sokolowski goto sriov_configure_out;
1952fc60861eSAnjali Singhai Jain }
19535c3c48acSJesse Brandeburg
1954c24817b6SEthan Zhao if (!pci_vfs_assigned(pf->pdev)) {
19555c3c48acSJesse Brandeburg i40e_free_vfs(pf);
1956fc60861eSAnjali Singhai Jain pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
19573ac874faSSylwester Dziedziuch i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
19589e5634dfSMitch Williams } else {
19599e5634dfSMitch Williams dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1960f5a7b21bSJan Sokolowski ret = -EINVAL;
1961f5a7b21bSJan Sokolowski goto sriov_configure_out;
19629e5634dfSMitch Williams }
1963f5a7b21bSJan Sokolowski sriov_configure_out:
1964f5a7b21bSJan Sokolowski clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1965f5a7b21bSJan Sokolowski return ret;
19665c3c48acSJesse Brandeburg }
19675c3c48acSJesse Brandeburg
19685c3c48acSJesse Brandeburg /***********************virtual channel routines******************/
19695c3c48acSJesse Brandeburg
19705c3c48acSJesse Brandeburg /**
19715710ab79SJacob Keller * i40e_vc_send_msg_to_vf
1972b40c82e6SJeff Kirsher * @vf: pointer to the VF info
19735c3c48acSJesse Brandeburg * @v_opcode: virtual channel opcode
19745c3c48acSJesse Brandeburg * @v_retval: virtual channel return value
19755c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
19765c3c48acSJesse Brandeburg * @msglen: msg length
19775c3c48acSJesse Brandeburg *
1978b40c82e6SJeff Kirsher * send msg to VF
19795c3c48acSJesse Brandeburg **/
i40e_vc_send_msg_to_vf(struct i40e_vf * vf,u32 v_opcode,u32 v_retval,u8 * msg,u16 msglen)19805710ab79SJacob Keller static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
19815710ab79SJacob Keller u32 v_retval, u8 *msg, u16 msglen)
19825c3c48acSJesse Brandeburg {
19836e7b5bd3SAnjali Singhai Jain struct i40e_pf *pf;
19846e7b5bd3SAnjali Singhai Jain struct i40e_hw *hw;
19856e7b5bd3SAnjali Singhai Jain int abs_vf_id;
19865180ff13SJan Sokolowski int aq_ret;
19875c3c48acSJesse Brandeburg
19886e7b5bd3SAnjali Singhai Jain /* validate the request */
19896e7b5bd3SAnjali Singhai Jain if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
19906e7b5bd3SAnjali Singhai Jain return -EINVAL;
19916e7b5bd3SAnjali Singhai Jain
19926e7b5bd3SAnjali Singhai Jain pf = vf->pf;
19936e7b5bd3SAnjali Singhai Jain hw = &pf->hw;
19946e7b5bd3SAnjali Singhai Jain abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
19956e7b5bd3SAnjali Singhai Jain
1996f19efbb5SAshish Shah aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
19975c3c48acSJesse Brandeburg msg, msglen, NULL);
19985c3c48acSJesse Brandeburg if (aq_ret) {
199918b7af57SMitch Williams dev_info(&pf->pdev->dev,
20005c3c48acSJesse Brandeburg "Unable to send the message to VF %d aq_err %d\n",
20015c3c48acSJesse Brandeburg vf->vf_id, pf->hw.aq.asq_last_status);
20025c3c48acSJesse Brandeburg return -EIO;
20035c3c48acSJesse Brandeburg }
20045c3c48acSJesse Brandeburg
20055c3c48acSJesse Brandeburg return 0;
20065c3c48acSJesse Brandeburg }
20075c3c48acSJesse Brandeburg
20085c3c48acSJesse Brandeburg /**
20095c3c48acSJesse Brandeburg * i40e_vc_send_resp_to_vf
2010b40c82e6SJeff Kirsher * @vf: pointer to the VF info
20115c3c48acSJesse Brandeburg * @opcode: operation code
20125c3c48acSJesse Brandeburg * @retval: return value
20135c3c48acSJesse Brandeburg *
2014b40c82e6SJeff Kirsher * send resp msg to VF
20155c3c48acSJesse Brandeburg **/
i40e_vc_send_resp_to_vf(struct i40e_vf * vf,enum virtchnl_ops opcode,int retval)20165c3c48acSJesse Brandeburg static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
2017310a2ad9SJesse Brandeburg enum virtchnl_ops opcode,
20185180ff13SJan Sokolowski int retval)
20195c3c48acSJesse Brandeburg {
20205c3c48acSJesse Brandeburg return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
20215c3c48acSJesse Brandeburg }
20225c3c48acSJesse Brandeburg
20235c3c48acSJesse Brandeburg /**
202461125b8bSKaren Sornek * i40e_sync_vf_state
202561125b8bSKaren Sornek * @vf: pointer to the VF info
202661125b8bSKaren Sornek * @state: VF state
202761125b8bSKaren Sornek *
202861125b8bSKaren Sornek * Called from a VF message to synchronize the service with a potential
202961125b8bSKaren Sornek * VF reset state
203061125b8bSKaren Sornek **/
i40e_sync_vf_state(struct i40e_vf * vf,enum i40e_vf_states state)203161125b8bSKaren Sornek static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
203261125b8bSKaren Sornek {
203361125b8bSKaren Sornek int i;
203461125b8bSKaren Sornek
203561125b8bSKaren Sornek /* When handling some messages, it needs VF state to be set.
203661125b8bSKaren Sornek * It is possible that this flag is cleared during VF reset,
203761125b8bSKaren Sornek * so there is a need to wait until the end of the reset to
203861125b8bSKaren Sornek * handle the request message correctly.
203961125b8bSKaren Sornek */
204061125b8bSKaren Sornek for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
204161125b8bSKaren Sornek if (test_bit(state, &vf->vf_states))
204261125b8bSKaren Sornek return true;
204361125b8bSKaren Sornek usleep_range(10000, 20000);
204461125b8bSKaren Sornek }
204561125b8bSKaren Sornek
204661125b8bSKaren Sornek return test_bit(state, &vf->vf_states);
204761125b8bSKaren Sornek }
204861125b8bSKaren Sornek
204961125b8bSKaren Sornek /**
20505c3c48acSJesse Brandeburg * i40e_vc_get_version_msg
2051b40c82e6SJeff Kirsher * @vf: pointer to the VF info
2052f5254429SJacob Keller * @msg: pointer to the msg buffer
20535c3c48acSJesse Brandeburg *
2054b40c82e6SJeff Kirsher * called from the VF to request the API version used by the PF
20555c3c48acSJesse Brandeburg **/
i40e_vc_get_version_msg(struct i40e_vf * vf,u8 * msg)2056f4ca1a22SMitch Williams static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
20575c3c48acSJesse Brandeburg {
2058310a2ad9SJesse Brandeburg struct virtchnl_version_info info = {
2059310a2ad9SJesse Brandeburg VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
20605c3c48acSJesse Brandeburg };
20615c3c48acSJesse Brandeburg
2062310a2ad9SJesse Brandeburg vf->vf_ver = *(struct virtchnl_version_info *)msg;
2063606a5488SMitch Williams /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2064eedcfef8SJesse Brandeburg if (VF_IS_V10(&vf->vf_ver))
2065310a2ad9SJesse Brandeburg info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2066310a2ad9SJesse Brandeburg return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2067230f3d53SJan Sokolowski 0, (u8 *)&info,
2068f0adc6e8SJesse Brandeburg sizeof(struct virtchnl_version_info));
20695c3c48acSJesse Brandeburg }
20705c3c48acSJesse Brandeburg
20715c3c48acSJesse Brandeburg /**
2072c4998aa3SAvinash Dayanand * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2073c4998aa3SAvinash Dayanand * @vf: pointer to VF structure
2074c4998aa3SAvinash Dayanand **/
i40e_del_qch(struct i40e_vf * vf)2075c4998aa3SAvinash Dayanand static void i40e_del_qch(struct i40e_vf *vf)
2076c4998aa3SAvinash Dayanand {
2077c4998aa3SAvinash Dayanand struct i40e_pf *pf = vf->pf;
2078c4998aa3SAvinash Dayanand int i;
2079c4998aa3SAvinash Dayanand
2080c4998aa3SAvinash Dayanand /* first element in the array belongs to primary VF VSI and we shouldn't
2081c4998aa3SAvinash Dayanand * delete it. We should however delete the rest of the VSIs created
2082c4998aa3SAvinash Dayanand */
2083c4998aa3SAvinash Dayanand for (i = 1; i < vf->num_tc; i++) {
2084c4998aa3SAvinash Dayanand if (vf->ch[i].vsi_idx) {
2085c4998aa3SAvinash Dayanand i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
2086c4998aa3SAvinash Dayanand vf->ch[i].vsi_idx = 0;
2087c4998aa3SAvinash Dayanand vf->ch[i].vsi_id = 0;
2088c4998aa3SAvinash Dayanand }
2089c4998aa3SAvinash Dayanand }
2090c4998aa3SAvinash Dayanand }
2091c4998aa3SAvinash Dayanand
2092c4998aa3SAvinash Dayanand /**
2093372539deSMichal Jaron * i40e_vc_get_max_frame_size
2094372539deSMichal Jaron * @vf: pointer to the VF
2095372539deSMichal Jaron *
2096372539deSMichal Jaron * Max frame size is determined based on the current port's max frame size and
2097372539deSMichal Jaron * whether a port VLAN is configured on this VF. The VF is not aware whether
2098372539deSMichal Jaron * it's in a port VLAN so the PF needs to account for this in max frame size
2099372539deSMichal Jaron * checks and sending the max frame size to the VF.
2100372539deSMichal Jaron **/
i40e_vc_get_max_frame_size(struct i40e_vf * vf)2101372539deSMichal Jaron static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
2102372539deSMichal Jaron {
2103372539deSMichal Jaron u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
2104372539deSMichal Jaron
2105372539deSMichal Jaron if (vf->port_vlan_id)
2106372539deSMichal Jaron max_frame_size -= VLAN_HLEN;
2107372539deSMichal Jaron
2108372539deSMichal Jaron return max_frame_size;
2109372539deSMichal Jaron }
2110372539deSMichal Jaron
2111372539deSMichal Jaron /**
21125c3c48acSJesse Brandeburg * i40e_vc_get_vf_resources_msg
2113b40c82e6SJeff Kirsher * @vf: pointer to the VF info
21145c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
21155c3c48acSJesse Brandeburg *
2116b40c82e6SJeff Kirsher * called from the VF to request its resources
21175c3c48acSJesse Brandeburg **/
i40e_vc_get_vf_resources_msg(struct i40e_vf * vf,u8 * msg)2118f4ca1a22SMitch Williams static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
21195c3c48acSJesse Brandeburg {
2120310a2ad9SJesse Brandeburg struct virtchnl_vf_resource *vfres = NULL;
21215c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
21225c3c48acSJesse Brandeburg struct i40e_vsi *vsi;
21235c3c48acSJesse Brandeburg int num_vsis = 1;
21245180ff13SJan Sokolowski int aq_ret = 0;
2125fae6cad1SGustavo A. R. Silva size_t len = 0;
21265c3c48acSJesse Brandeburg int ret;
21275c3c48acSJesse Brandeburg
212861125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
2129230f3d53SJan Sokolowski aq_ret = -EINVAL;
21305c3c48acSJesse Brandeburg goto err;
21315c3c48acSJesse Brandeburg }
21325c3c48acSJesse Brandeburg
21335e7f59faSAlexander Lobakin len = virtchnl_struct_size(vfres, vsi_res, num_vsis);
21345c3c48acSJesse Brandeburg vfres = kzalloc(len, GFP_KERNEL);
21355c3c48acSJesse Brandeburg if (!vfres) {
2136230f3d53SJan Sokolowski aq_ret = -ENOMEM;
21375c3c48acSJesse Brandeburg len = 0;
21385c3c48acSJesse Brandeburg goto err;
21395c3c48acSJesse Brandeburg }
2140eedcfef8SJesse Brandeburg if (VF_IS_V11(&vf->vf_ver))
2141f4ca1a22SMitch Williams vf->driver_caps = *(u32 *)msg;
2142f4ca1a22SMitch Williams else
2143310a2ad9SJesse Brandeburg vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2144310a2ad9SJesse Brandeburg VIRTCHNL_VF_OFFLOAD_RSS_REG |
2145310a2ad9SJesse Brandeburg VIRTCHNL_VF_OFFLOAD_VLAN;
21465c3c48acSJesse Brandeburg
2147fbb113f7SStefan Assmann vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
21486d2c322cSAleksandr Loktionov vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2149fdf0e0bfSAnjali Singhai Jain vsi = pf->vsi[vf->lan_vsi_idx];
21505c3c48acSJesse Brandeburg if (!vsi->info.pvid)
2151fbb113f7SStefan Assmann vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2152e3219ce6SAnjali Singhai Jain
21530ef2d5afSMitch Williams if (i40e_vf_client_capable(pf, vf->vf_id) &&
21542723f3b5SJesse Brandeburg (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) {
21552723f3b5SJesse Brandeburg vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA;
21562723f3b5SJesse Brandeburg set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
215741d0a4d0SAlan Brady } else {
21582723f3b5SJesse Brandeburg clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2159e3219ce6SAnjali Singhai Jain }
2160e3219ce6SAnjali Singhai Jain
2161310a2ad9SJesse Brandeburg if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2162fbb113f7SStefan Assmann vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2163c4e1868cSMitch Williams } else {
2164d36e41dcSJacob Keller if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
2165310a2ad9SJesse Brandeburg (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2166fbb113f7SStefan Assmann vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2167c4e1868cSMitch Williams else
2168fbb113f7SStefan Assmann vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2169e25d00b8SAnjali Singhai Jain }
21701f012279SAnjali Singhai Jain
2171d36e41dcSJacob Keller if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
2172310a2ad9SJesse Brandeburg if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2173fbb113f7SStefan Assmann vfres->vf_cap_flags |=
2174310a2ad9SJesse Brandeburg VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
21753d0da5b7SAnjali Singhai Jain }
21763d0da5b7SAnjali Singhai Jain
2177310a2ad9SJesse Brandeburg if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2178fbb113f7SStefan Assmann vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2179bacd75cfSPreethi Banala
2180d36e41dcSJacob Keller if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
2181310a2ad9SJesse Brandeburg (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2182fbb113f7SStefan Assmann vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2183bacd75cfSPreethi Banala
2184310a2ad9SJesse Brandeburg if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
218514c5f5d2SShannon Nelson if (pf->flags & I40E_FLAG_MFP_ENABLED) {
218614c5f5d2SShannon Nelson dev_err(&pf->pdev->dev,
218714c5f5d2SShannon Nelson "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
218814c5f5d2SShannon Nelson vf->vf_id);
2189230f3d53SJan Sokolowski aq_ret = -EINVAL;
219014c5f5d2SShannon Nelson goto err;
219114c5f5d2SShannon Nelson }
2192fbb113f7SStefan Assmann vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
219314c5f5d2SShannon Nelson }
21941f012279SAnjali Singhai Jain
2195d36e41dcSJacob Keller if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
2196310a2ad9SJesse Brandeburg if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2197fbb113f7SStefan Assmann vfres->vf_cap_flags |=
2198310a2ad9SJesse Brandeburg VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2199f6d83d13SAnjali Singhai Jain }
2200f6d83d13SAnjali Singhai Jain
2201a3f5aa90SAlan Brady if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2202a3f5aa90SAlan Brady vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2203a3f5aa90SAlan Brady
2204c27eac48SAvinash Dayanand if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2205c27eac48SAvinash Dayanand vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2206c27eac48SAvinash Dayanand
22075c3c48acSJesse Brandeburg vfres->num_vsis = num_vsis;
22085c3c48acSJesse Brandeburg vfres->num_queue_pairs = vf->num_queue_pairs;
22095c3c48acSJesse Brandeburg vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2210c4e1868cSMitch Williams vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2211c4e1868cSMitch Williams vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2212372539deSMichal Jaron vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
2213c4e1868cSMitch Williams
2214fdf0e0bfSAnjali Singhai Jain if (vf->lan_vsi_idx) {
2215442b25e4SMitch Williams vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2216ff3f4cc2SJesse Brandeburg vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2217442b25e4SMitch Williams vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2218f578f5f4SMitch Williams /* VFs only use TC 0 */
2219442b25e4SMitch Williams vfres->vsi_res[0].qset_handle
2220f578f5f4SMitch Williams = le16_to_cpu(vsi->info.qs_handle[0]);
2221fed0d9f1SNorbert Zulinski if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
2222*703c4d82SAleksandr Loktionov spin_lock_bh(&vsi->mac_filter_hash_lock);
2223fed0d9f1SNorbert Zulinski i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2224fed0d9f1SNorbert Zulinski eth_zero_addr(vf->default_lan_addr.addr);
2225*703c4d82SAleksandr Loktionov spin_unlock_bh(&vsi->mac_filter_hash_lock);
2226fed0d9f1SNorbert Zulinski }
2227442b25e4SMitch Williams ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
22286995b36cSJesse Brandeburg vf->default_lan_addr.addr);
22295c3c48acSJesse Brandeburg }
22306322e63cSJacob Keller set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
22315c3c48acSJesse Brandeburg
22325c3c48acSJesse Brandeburg err:
2233b40c82e6SJeff Kirsher /* send the response back to the VF */
2234310a2ad9SJesse Brandeburg ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
22355c3c48acSJesse Brandeburg aq_ret, (u8 *)vfres, len);
22365c3c48acSJesse Brandeburg
22375c3c48acSJesse Brandeburg kfree(vfres);
22385c3c48acSJesse Brandeburg return ret;
22395c3c48acSJesse Brandeburg }
22405c3c48acSJesse Brandeburg
22415c3c48acSJesse Brandeburg /**
22425c3c48acSJesse Brandeburg * i40e_vc_config_promiscuous_mode_msg
2243b40c82e6SJeff Kirsher * @vf: pointer to the VF info
22445c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
22455c3c48acSJesse Brandeburg *
2246b40c82e6SJeff Kirsher * called from the VF to configure the promiscuous mode of
2247b40c82e6SJeff Kirsher * VF vsis
22485c3c48acSJesse Brandeburg **/
i40e_vc_config_promiscuous_mode_msg(struct i40e_vf * vf,u8 * msg)2249679b05c0SPatryk Małek static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
22505c3c48acSJesse Brandeburg {
2251310a2ad9SJesse Brandeburg struct virtchnl_promisc_info *info =
2252310a2ad9SJesse Brandeburg (struct virtchnl_promisc_info *)msg;
22535c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
22545c3c48acSJesse Brandeburg bool allmulti = false;
22555676a8b9SAnjali Singhai Jain bool alluni = false;
22565180ff13SJan Sokolowski int aq_ret = 0;
22575c3c48acSJesse Brandeburg
225861125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2259230f3d53SJan Sokolowski aq_ret = -EINVAL;
22601e846827SHarshitha Ramamurthy goto err_out;
22611e846827SHarshitha Ramamurthy }
22621e846827SHarshitha Ramamurthy if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
22631e846827SHarshitha Ramamurthy dev_err(&pf->pdev->dev,
22641e846827SHarshitha Ramamurthy "Unprivileged VF %d is attempting to configure promiscuous mode\n",
22651e846827SHarshitha Ramamurthy vf->vf_id);
22661e846827SHarshitha Ramamurthy
22671e846827SHarshitha Ramamurthy /* Lie to the VF on purpose, because this is an error we can
22681e846827SHarshitha Ramamurthy * ignore. Unprivileged VF is not a virtual channel error.
22691e846827SHarshitha Ramamurthy */
22701e846827SHarshitha Ramamurthy aq_ret = 0;
22711e846827SHarshitha Ramamurthy goto err_out;
22721e846827SHarshitha Ramamurthy }
22730ce5233eSMariusz Stachura
2274d29e0d23SMartyna Szapar if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2275230f3d53SJan Sokolowski aq_ret = -EINVAL;
2276d29e0d23SMartyna Szapar goto err_out;
2277d29e0d23SMartyna Szapar }
2278d29e0d23SMartyna Szapar
2279d29e0d23SMartyna Szapar if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2280230f3d53SJan Sokolowski aq_ret = -EINVAL;
2281d29e0d23SMartyna Szapar goto err_out;
2282d29e0d23SMartyna Szapar }
2283d29e0d23SMartyna Szapar
22845676a8b9SAnjali Singhai Jain /* Multicast promiscuous handling*/
2285ff3f4cc2SJesse Brandeburg if (info->flags & FLAG_VF_MULTICAST_PROMISC)
22865c3c48acSJesse Brandeburg allmulti = true;
22875676a8b9SAnjali Singhai Jain
22880ce5233eSMariusz Stachura if (info->flags & FLAG_VF_UNICAST_PROMISC)
22890ce5233eSMariusz Stachura alluni = true;
22900ce5233eSMariusz Stachura aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
22910ce5233eSMariusz Stachura alluni);
2292558e93c9SCzeslaw Zagorski if (aq_ret)
2293558e93c9SCzeslaw Zagorski goto err_out;
2294558e93c9SCzeslaw Zagorski
22950ce5233eSMariusz Stachura if (allmulti) {
2296558e93c9SCzeslaw Zagorski if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2297558e93c9SCzeslaw Zagorski &vf->vf_states))
22985676a8b9SAnjali Singhai Jain dev_info(&pf->pdev->dev,
22995676a8b9SAnjali Singhai Jain "VF %d successfully set multicast promiscuous mode\n",
23005676a8b9SAnjali Singhai Jain vf->vf_id);
2301558e93c9SCzeslaw Zagorski } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2302558e93c9SCzeslaw Zagorski &vf->vf_states))
23030ce5233eSMariusz Stachura dev_info(&pf->pdev->dev,
23040ce5233eSMariusz Stachura "VF %d successfully unset multicast promiscuous mode\n",
23050ce5233eSMariusz Stachura vf->vf_id);
2306558e93c9SCzeslaw Zagorski
23070ce5233eSMariusz Stachura if (alluni) {
2308558e93c9SCzeslaw Zagorski if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2309558e93c9SCzeslaw Zagorski &vf->vf_states))
23105676a8b9SAnjali Singhai Jain dev_info(&pf->pdev->dev,
23115676a8b9SAnjali Singhai Jain "VF %d successfully set unicast promiscuous mode\n",
23125676a8b9SAnjali Singhai Jain vf->vf_id);
2313558e93c9SCzeslaw Zagorski } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2314558e93c9SCzeslaw Zagorski &vf->vf_states))
23150ce5233eSMariusz Stachura dev_info(&pf->pdev->dev,
23160ce5233eSMariusz Stachura "VF %d successfully unset unicast promiscuous mode\n",
23170ce5233eSMariusz Stachura vf->vf_id);
2318558e93c9SCzeslaw Zagorski
23191e846827SHarshitha Ramamurthy err_out:
2320b40c82e6SJeff Kirsher /* send the response to the VF */
23215c3c48acSJesse Brandeburg return i40e_vc_send_resp_to_vf(vf,
2322310a2ad9SJesse Brandeburg VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
23235c3c48acSJesse Brandeburg aq_ret);
23245c3c48acSJesse Brandeburg }
23255c3c48acSJesse Brandeburg
23265c3c48acSJesse Brandeburg /**
23275c3c48acSJesse Brandeburg * i40e_vc_config_queues_msg
2328b40c82e6SJeff Kirsher * @vf: pointer to the VF info
23295c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
23305c3c48acSJesse Brandeburg *
2331b40c82e6SJeff Kirsher * called from the VF to configure the rx/tx
23325c3c48acSJesse Brandeburg * queues
23335c3c48acSJesse Brandeburg **/
i40e_vc_config_queues_msg(struct i40e_vf * vf,u8 * msg)2334679b05c0SPatryk Małek static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
23355c3c48acSJesse Brandeburg {
2336310a2ad9SJesse Brandeburg struct virtchnl_vsi_queue_config_info *qci =
2337310a2ad9SJesse Brandeburg (struct virtchnl_vsi_queue_config_info *)msg;
2338310a2ad9SJesse Brandeburg struct virtchnl_queue_pair_info *qpi;
2339c27eac48SAvinash Dayanand u16 vsi_id, vsi_queue_id = 0;
23409e0a603cSEryk Rybak struct i40e_pf *pf = vf->pf;
2341c27eac48SAvinash Dayanand int i, j = 0, idx = 0;
23429e0a603cSEryk Rybak struct i40e_vsi *vsi;
23439e0a603cSEryk Rybak u16 num_qps_all = 0;
23445180ff13SJan Sokolowski int aq_ret = 0;
2345c27eac48SAvinash Dayanand
234661125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2347230f3d53SJan Sokolowski aq_ret = -EINVAL;
23485c3c48acSJesse Brandeburg goto error_param;
23495c3c48acSJesse Brandeburg }
23505c3c48acSJesse Brandeburg
2351d29e0d23SMartyna Szapar if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2352230f3d53SJan Sokolowski aq_ret = -EINVAL;
23535c3c48acSJesse Brandeburg goto error_param;
23545c3c48acSJesse Brandeburg }
2355c27eac48SAvinash Dayanand
23563f8af412SSergey Nemov if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2357230f3d53SJan Sokolowski aq_ret = -EINVAL;
23583f8af412SSergey Nemov goto error_param;
23593f8af412SSergey Nemov }
23603f8af412SSergey Nemov
2361d29e0d23SMartyna Szapar if (vf->adq_enabled) {
23620bb05067SGrzegorz Szczurek for (i = 0; i < vf->num_tc; i++)
2363d29e0d23SMartyna Szapar num_qps_all += vf->ch[i].num_qps;
2364d29e0d23SMartyna Szapar if (num_qps_all != qci->num_queue_pairs) {
2365230f3d53SJan Sokolowski aq_ret = -EINVAL;
2366d29e0d23SMartyna Szapar goto error_param;
2367d29e0d23SMartyna Szapar }
2368d29e0d23SMartyna Szapar }
2369d29e0d23SMartyna Szapar
2370d29e0d23SMartyna Szapar vsi_id = qci->vsi_id;
2371d29e0d23SMartyna Szapar
23725c3c48acSJesse Brandeburg for (i = 0; i < qci->num_queue_pairs; i++) {
23735c3c48acSJesse Brandeburg qpi = &qci->qpair[i];
2374c27eac48SAvinash Dayanand
2375c27eac48SAvinash Dayanand if (!vf->adq_enabled) {
2376d29e0d23SMartyna Szapar if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2377d29e0d23SMartyna Szapar qpi->txq.queue_id)) {
2378230f3d53SJan Sokolowski aq_ret = -EINVAL;
2379d29e0d23SMartyna Szapar goto error_param;
2380d29e0d23SMartyna Szapar }
2381d29e0d23SMartyna Szapar
23825c3c48acSJesse Brandeburg vsi_queue_id = qpi->txq.queue_id;
2383c27eac48SAvinash Dayanand
2384c27eac48SAvinash Dayanand if (qpi->txq.vsi_id != qci->vsi_id ||
2385c27eac48SAvinash Dayanand qpi->rxq.vsi_id != qci->vsi_id ||
2386c27eac48SAvinash Dayanand qpi->rxq.queue_id != vsi_queue_id) {
2387230f3d53SJan Sokolowski aq_ret = -EINVAL;
2388c27eac48SAvinash Dayanand goto error_param;
2389c27eac48SAvinash Dayanand }
2390c27eac48SAvinash Dayanand }
2391c27eac48SAvinash Dayanand
2392f5a2b3ffSPiotr Kwapulinski if (vf->adq_enabled) {
2393f5a2b3ffSPiotr Kwapulinski if (idx >= ARRAY_SIZE(vf->ch)) {
2394230f3d53SJan Sokolowski aq_ret = -ENODEV;
2395f5a2b3ffSPiotr Kwapulinski goto error_param;
2396f5a2b3ffSPiotr Kwapulinski }
2397d29e0d23SMartyna Szapar vsi_id = vf->ch[idx].vsi_id;
2398f5a2b3ffSPiotr Kwapulinski }
23995c3c48acSJesse Brandeburg
24005c3c48acSJesse Brandeburg if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
24015c3c48acSJesse Brandeburg &qpi->rxq) ||
24025c3c48acSJesse Brandeburg i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
24035c3c48acSJesse Brandeburg &qpi->txq)) {
2404230f3d53SJan Sokolowski aq_ret = -EINVAL;
24055c3c48acSJesse Brandeburg goto error_param;
24065c3c48acSJesse Brandeburg }
2407c27eac48SAvinash Dayanand
2408c27eac48SAvinash Dayanand /* For ADq there can be up to 4 VSIs with max 4 queues each.
2409c27eac48SAvinash Dayanand * VF does not know about these additional VSIs and all
2410c27eac48SAvinash Dayanand * it cares is about its own queues. PF configures these queues
2411c27eac48SAvinash Dayanand * to its appropriate VSIs based on TC mapping
24126db60322SJeff Kirsher */
2413c27eac48SAvinash Dayanand if (vf->adq_enabled) {
2414f5a2b3ffSPiotr Kwapulinski if (idx >= ARRAY_SIZE(vf->ch)) {
2415230f3d53SJan Sokolowski aq_ret = -ENODEV;
2416f5a2b3ffSPiotr Kwapulinski goto error_param;
2417f5a2b3ffSPiotr Kwapulinski }
2418c27eac48SAvinash Dayanand if (j == (vf->ch[idx].num_qps - 1)) {
2419c27eac48SAvinash Dayanand idx++;
2420c27eac48SAvinash Dayanand j = 0; /* resetting the queue count */
2421c27eac48SAvinash Dayanand vsi_queue_id = 0;
2422c27eac48SAvinash Dayanand } else {
2423c27eac48SAvinash Dayanand j++;
2424c27eac48SAvinash Dayanand vsi_queue_id++;
2425c27eac48SAvinash Dayanand }
2426c27eac48SAvinash Dayanand }
24275c3c48acSJesse Brandeburg }
2428b40c82e6SJeff Kirsher /* set vsi num_queue_pairs in use to num configured by VF */
2429c27eac48SAvinash Dayanand if (!vf->adq_enabled) {
2430c27eac48SAvinash Dayanand pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2431c27eac48SAvinash Dayanand qci->num_queue_pairs;
2432c27eac48SAvinash Dayanand } else {
24339e0a603cSEryk Rybak for (i = 0; i < vf->num_tc; i++) {
24349e0a603cSEryk Rybak vsi = pf->vsi[vf->ch[i].vsi_idx];
24359e0a603cSEryk Rybak vsi->num_queue_pairs = vf->ch[i].num_qps;
24369e0a603cSEryk Rybak
24379e0a603cSEryk Rybak if (i40e_update_adq_vsi_queues(vsi, i)) {
2438230f3d53SJan Sokolowski aq_ret = -EIO;
24399e0a603cSEryk Rybak goto error_param;
24409e0a603cSEryk Rybak }
24419e0a603cSEryk Rybak }
2442c27eac48SAvinash Dayanand }
24435c3c48acSJesse Brandeburg
24445c3c48acSJesse Brandeburg error_param:
2445b40c82e6SJeff Kirsher /* send the response to the VF */
2446310a2ad9SJesse Brandeburg return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
24475c3c48acSJesse Brandeburg aq_ret);
24485c3c48acSJesse Brandeburg }
24495c3c48acSJesse Brandeburg
24505c3c48acSJesse Brandeburg /**
2451b50f7bcaSJesse Brandeburg * i40e_validate_queue_map - check queue map is valid
2452b50f7bcaSJesse Brandeburg * @vf: the VF structure pointer
2453c27eac48SAvinash Dayanand * @vsi_id: vsi id
2454c27eac48SAvinash Dayanand * @queuemap: Tx or Rx queue map
2455c27eac48SAvinash Dayanand *
2456c27eac48SAvinash Dayanand * check if Tx or Rx queue map is valid
2457c27eac48SAvinash Dayanand **/
i40e_validate_queue_map(struct i40e_vf * vf,u16 vsi_id,unsigned long queuemap)2458c27eac48SAvinash Dayanand static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2459c27eac48SAvinash Dayanand unsigned long queuemap)
2460c27eac48SAvinash Dayanand {
2461c27eac48SAvinash Dayanand u16 vsi_queue_id, queue_id;
2462c27eac48SAvinash Dayanand
2463c27eac48SAvinash Dayanand for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2464c27eac48SAvinash Dayanand if (vf->adq_enabled) {
2465c27eac48SAvinash Dayanand vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2466c27eac48SAvinash Dayanand queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2467c27eac48SAvinash Dayanand } else {
2468c27eac48SAvinash Dayanand queue_id = vsi_queue_id;
2469c27eac48SAvinash Dayanand }
2470c27eac48SAvinash Dayanand
2471c27eac48SAvinash Dayanand if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2472c27eac48SAvinash Dayanand return -EINVAL;
2473c27eac48SAvinash Dayanand }
2474c27eac48SAvinash Dayanand
2475c27eac48SAvinash Dayanand return 0;
2476c27eac48SAvinash Dayanand }
2477c27eac48SAvinash Dayanand
2478c27eac48SAvinash Dayanand /**
24795c3c48acSJesse Brandeburg * i40e_vc_config_irq_map_msg
2480b40c82e6SJeff Kirsher * @vf: pointer to the VF info
24815c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
24825c3c48acSJesse Brandeburg *
2483b40c82e6SJeff Kirsher * called from the VF to configure the irq to
24845c3c48acSJesse Brandeburg * queue map
24855c3c48acSJesse Brandeburg **/
i40e_vc_config_irq_map_msg(struct i40e_vf * vf,u8 * msg)2486679b05c0SPatryk Małek static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
24875c3c48acSJesse Brandeburg {
2488310a2ad9SJesse Brandeburg struct virtchnl_irq_map_info *irqmap_info =
2489310a2ad9SJesse Brandeburg (struct virtchnl_irq_map_info *)msg;
2490310a2ad9SJesse Brandeburg struct virtchnl_vector_map *map;
24915180ff13SJan Sokolowski int aq_ret = 0;
2492d29e0d23SMartyna Szapar u16 vsi_id;
24935c3c48acSJesse Brandeburg int i;
24945c3c48acSJesse Brandeburg
249561125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2496230f3d53SJan Sokolowski aq_ret = -EINVAL;
24975c3c48acSJesse Brandeburg goto error_param;
24985c3c48acSJesse Brandeburg }
24995c3c48acSJesse Brandeburg
2500d29e0d23SMartyna Szapar if (irqmap_info->num_vectors >
2501d29e0d23SMartyna Szapar vf->pf->hw.func_caps.num_msix_vectors_vf) {
2502230f3d53SJan Sokolowski aq_ret = -EINVAL;
25035c3c48acSJesse Brandeburg goto error_param;
25045c3c48acSJesse Brandeburg }
25055c3c48acSJesse Brandeburg
2506d29e0d23SMartyna Szapar for (i = 0; i < irqmap_info->num_vectors; i++) {
2507d29e0d23SMartyna Szapar map = &irqmap_info->vecmap[i];
2508d29e0d23SMartyna Szapar /* validate msg params */
2509d29e0d23SMartyna Szapar if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2510d29e0d23SMartyna Szapar !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2511230f3d53SJan Sokolowski aq_ret = -EINVAL;
2512d29e0d23SMartyna Szapar goto error_param;
2513d29e0d23SMartyna Szapar }
2514d29e0d23SMartyna Szapar vsi_id = map->vsi_id;
2515d29e0d23SMartyna Szapar
2516c27eac48SAvinash Dayanand if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2517230f3d53SJan Sokolowski aq_ret = -EINVAL;
25185c3c48acSJesse Brandeburg goto error_param;
25195c3c48acSJesse Brandeburg }
25205c3c48acSJesse Brandeburg
2521c27eac48SAvinash Dayanand if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2522230f3d53SJan Sokolowski aq_ret = -EINVAL;
25235c3c48acSJesse Brandeburg goto error_param;
25245c3c48acSJesse Brandeburg }
25255c3c48acSJesse Brandeburg
25265c3c48acSJesse Brandeburg i40e_config_irq_link_list(vf, vsi_id, map);
25275c3c48acSJesse Brandeburg }
25285c3c48acSJesse Brandeburg error_param:
2529b40c82e6SJeff Kirsher /* send the response to the VF */
2530310a2ad9SJesse Brandeburg return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
25315c3c48acSJesse Brandeburg aq_ret);
25325c3c48acSJesse Brandeburg }
25335c3c48acSJesse Brandeburg
25345c3c48acSJesse Brandeburg /**
2535d0fda04dSHarshitha Ramamurthy * i40e_ctrl_vf_tx_rings
2536d0fda04dSHarshitha Ramamurthy * @vsi: the SRIOV VSI being configured
2537d0fda04dSHarshitha Ramamurthy * @q_map: bit map of the queues to be enabled
2538d0fda04dSHarshitha Ramamurthy * @enable: start or stop the queue
2539d0fda04dSHarshitha Ramamurthy **/
i40e_ctrl_vf_tx_rings(struct i40e_vsi * vsi,unsigned long q_map,bool enable)2540d0fda04dSHarshitha Ramamurthy static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2541d0fda04dSHarshitha Ramamurthy bool enable)
2542d0fda04dSHarshitha Ramamurthy {
2543d0fda04dSHarshitha Ramamurthy struct i40e_pf *pf = vsi->back;
2544d0fda04dSHarshitha Ramamurthy int ret = 0;
2545d0fda04dSHarshitha Ramamurthy u16 q_id;
2546d0fda04dSHarshitha Ramamurthy
2547d0fda04dSHarshitha Ramamurthy for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2548d0fda04dSHarshitha Ramamurthy ret = i40e_control_wait_tx_q(vsi->seid, pf,
2549d0fda04dSHarshitha Ramamurthy vsi->base_queue + q_id,
2550d0fda04dSHarshitha Ramamurthy false /*is xdp*/, enable);
2551d0fda04dSHarshitha Ramamurthy if (ret)
2552d0fda04dSHarshitha Ramamurthy break;
2553d0fda04dSHarshitha Ramamurthy }
2554d0fda04dSHarshitha Ramamurthy return ret;
2555d0fda04dSHarshitha Ramamurthy }
2556d0fda04dSHarshitha Ramamurthy
2557d0fda04dSHarshitha Ramamurthy /**
2558d0fda04dSHarshitha Ramamurthy * i40e_ctrl_vf_rx_rings
2559d0fda04dSHarshitha Ramamurthy * @vsi: the SRIOV VSI being configured
2560d0fda04dSHarshitha Ramamurthy * @q_map: bit map of the queues to be enabled
2561d0fda04dSHarshitha Ramamurthy * @enable: start or stop the queue
2562d0fda04dSHarshitha Ramamurthy **/
i40e_ctrl_vf_rx_rings(struct i40e_vsi * vsi,unsigned long q_map,bool enable)2563d0fda04dSHarshitha Ramamurthy static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2564d0fda04dSHarshitha Ramamurthy bool enable)
2565d0fda04dSHarshitha Ramamurthy {
2566d0fda04dSHarshitha Ramamurthy struct i40e_pf *pf = vsi->back;
2567d0fda04dSHarshitha Ramamurthy int ret = 0;
2568d0fda04dSHarshitha Ramamurthy u16 q_id;
2569d0fda04dSHarshitha Ramamurthy
2570d0fda04dSHarshitha Ramamurthy for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2571d0fda04dSHarshitha Ramamurthy ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2572d0fda04dSHarshitha Ramamurthy enable);
2573d0fda04dSHarshitha Ramamurthy if (ret)
2574d0fda04dSHarshitha Ramamurthy break;
2575d0fda04dSHarshitha Ramamurthy }
2576d0fda04dSHarshitha Ramamurthy return ret;
2577d0fda04dSHarshitha Ramamurthy }
2578d0fda04dSHarshitha Ramamurthy
2579d0fda04dSHarshitha Ramamurthy /**
2580d9d6a9aeSBrett Creeley * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2581d9d6a9aeSBrett Creeley * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2582d9d6a9aeSBrett Creeley *
2583d9d6a9aeSBrett Creeley * Returns true if validation was successful, else false.
2584d9d6a9aeSBrett Creeley */
i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select * vqs)2585d9d6a9aeSBrett Creeley static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2586d9d6a9aeSBrett Creeley {
2587d9d6a9aeSBrett Creeley if ((!vqs->rx_queues && !vqs->tx_queues) ||
2588d9d6a9aeSBrett Creeley vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2589d9d6a9aeSBrett Creeley vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2590d9d6a9aeSBrett Creeley return false;
2591d9d6a9aeSBrett Creeley
2592d9d6a9aeSBrett Creeley return true;
2593d9d6a9aeSBrett Creeley }
2594d9d6a9aeSBrett Creeley
2595d9d6a9aeSBrett Creeley /**
25965c3c48acSJesse Brandeburg * i40e_vc_enable_queues_msg
2597b40c82e6SJeff Kirsher * @vf: pointer to the VF info
25985c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
25995c3c48acSJesse Brandeburg *
2600b40c82e6SJeff Kirsher * called from the VF to enable all or specific queue(s)
26015c3c48acSJesse Brandeburg **/
i40e_vc_enable_queues_msg(struct i40e_vf * vf,u8 * msg)2602679b05c0SPatryk Małek static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
26035c3c48acSJesse Brandeburg {
2604310a2ad9SJesse Brandeburg struct virtchnl_queue_select *vqs =
2605310a2ad9SJesse Brandeburg (struct virtchnl_queue_select *)msg;
26065c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
26075180ff13SJan Sokolowski int aq_ret = 0;
2608c27eac48SAvinash Dayanand int i;
26095c3c48acSJesse Brandeburg
2610d973bf8eSAndrii Staikov if (vf->is_disabled_from_host) {
2611d973bf8eSAndrii Staikov aq_ret = -EPERM;
2612d973bf8eSAndrii Staikov dev_info(&pf->pdev->dev,
2613d973bf8eSAndrii Staikov "Admin has disabled VF %d, will not enable queues\n",
2614d973bf8eSAndrii Staikov vf->vf_id);
2615d973bf8eSAndrii Staikov goto error_param;
2616d973bf8eSAndrii Staikov }
2617d973bf8eSAndrii Staikov
26186322e63cSJacob Keller if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2619230f3d53SJan Sokolowski aq_ret = -EINVAL;
26205c3c48acSJesse Brandeburg goto error_param;
26215c3c48acSJesse Brandeburg }
26225c3c48acSJesse Brandeburg
2623d510497bSSergey Nemov if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2624230f3d53SJan Sokolowski aq_ret = -EINVAL;
26255c3c48acSJesse Brandeburg goto error_param;
26265c3c48acSJesse Brandeburg }
26275c3c48acSJesse Brandeburg
2628f27f37a0SBrett Creeley if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2629230f3d53SJan Sokolowski aq_ret = -EINVAL;
26305c3c48acSJesse Brandeburg goto error_param;
26315c3c48acSJesse Brandeburg }
2632fdf0e0bfSAnjali Singhai Jain
2633d0fda04dSHarshitha Ramamurthy /* Use the queue bit map sent by the VF */
2634d0fda04dSHarshitha Ramamurthy if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2635d0fda04dSHarshitha Ramamurthy true)) {
2636230f3d53SJan Sokolowski aq_ret = -EIO;
2637d0fda04dSHarshitha Ramamurthy goto error_param;
2638d0fda04dSHarshitha Ramamurthy }
2639d0fda04dSHarshitha Ramamurthy if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2640d0fda04dSHarshitha Ramamurthy true)) {
2641230f3d53SJan Sokolowski aq_ret = -EIO;
2642d0fda04dSHarshitha Ramamurthy goto error_param;
2643d0fda04dSHarshitha Ramamurthy }
2644c27eac48SAvinash Dayanand
2645c27eac48SAvinash Dayanand /* need to start the rings for additional ADq VSI's as well */
2646c27eac48SAvinash Dayanand if (vf->adq_enabled) {
2647c27eac48SAvinash Dayanand /* zero belongs to LAN VSI */
2648c27eac48SAvinash Dayanand for (i = 1; i < vf->num_tc; i++) {
2649c27eac48SAvinash Dayanand if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2650230f3d53SJan Sokolowski aq_ret = -EIO;
2651c27eac48SAvinash Dayanand }
2652c27eac48SAvinash Dayanand }
2653c27eac48SAvinash Dayanand
26545c3c48acSJesse Brandeburg error_param:
2655b40c82e6SJeff Kirsher /* send the response to the VF */
2656310a2ad9SJesse Brandeburg return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
26575c3c48acSJesse Brandeburg aq_ret);
26585c3c48acSJesse Brandeburg }
26595c3c48acSJesse Brandeburg
26605c3c48acSJesse Brandeburg /**
26615c3c48acSJesse Brandeburg * i40e_vc_disable_queues_msg
2662b40c82e6SJeff Kirsher * @vf: pointer to the VF info
26635c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
26645c3c48acSJesse Brandeburg *
2665b40c82e6SJeff Kirsher * called from the VF to disable all or specific
26665c3c48acSJesse Brandeburg * queue(s)
26675c3c48acSJesse Brandeburg **/
i40e_vc_disable_queues_msg(struct i40e_vf * vf,u8 * msg)2668679b05c0SPatryk Małek static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
26695c3c48acSJesse Brandeburg {
2670310a2ad9SJesse Brandeburg struct virtchnl_queue_select *vqs =
2671310a2ad9SJesse Brandeburg (struct virtchnl_queue_select *)msg;
26725c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
26735180ff13SJan Sokolowski int aq_ret = 0;
26745c3c48acSJesse Brandeburg
267561125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2676230f3d53SJan Sokolowski aq_ret = -EINVAL;
26775c3c48acSJesse Brandeburg goto error_param;
26785c3c48acSJesse Brandeburg }
26795c3c48acSJesse Brandeburg
26805c3c48acSJesse Brandeburg if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2681230f3d53SJan Sokolowski aq_ret = -EINVAL;
26825c3c48acSJesse Brandeburg goto error_param;
26835c3c48acSJesse Brandeburg }
26845c3c48acSJesse Brandeburg
2685f27f37a0SBrett Creeley if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2686230f3d53SJan Sokolowski aq_ret = -EINVAL;
26875c3c48acSJesse Brandeburg goto error_param;
26885c3c48acSJesse Brandeburg }
2689fdf0e0bfSAnjali Singhai Jain
2690d0fda04dSHarshitha Ramamurthy /* Use the queue bit map sent by the VF */
2691d0fda04dSHarshitha Ramamurthy if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2692d0fda04dSHarshitha Ramamurthy false)) {
2693230f3d53SJan Sokolowski aq_ret = -EIO;
2694d0fda04dSHarshitha Ramamurthy goto error_param;
2695d0fda04dSHarshitha Ramamurthy }
2696d0fda04dSHarshitha Ramamurthy if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2697d0fda04dSHarshitha Ramamurthy false)) {
2698230f3d53SJan Sokolowski aq_ret = -EIO;
2699d0fda04dSHarshitha Ramamurthy goto error_param;
2700d0fda04dSHarshitha Ramamurthy }
27015c3c48acSJesse Brandeburg error_param:
2702b40c82e6SJeff Kirsher /* send the response to the VF */
2703310a2ad9SJesse Brandeburg return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
27045c3c48acSJesse Brandeburg aq_ret);
27055c3c48acSJesse Brandeburg }
27065c3c48acSJesse Brandeburg
27075c3c48acSJesse Brandeburg /**
2708d701658aSJedrzej Jagielski * i40e_check_enough_queue - find big enough queue number
2709d701658aSJedrzej Jagielski * @vf: pointer to the VF info
2710d701658aSJedrzej Jagielski * @needed: the number of items needed
2711d701658aSJedrzej Jagielski *
2712d701658aSJedrzej Jagielski * Returns the base item index of the queue, or negative for error
2713d701658aSJedrzej Jagielski **/
i40e_check_enough_queue(struct i40e_vf * vf,u16 needed)2714d701658aSJedrzej Jagielski static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
2715d701658aSJedrzej Jagielski {
2716d701658aSJedrzej Jagielski unsigned int i, cur_queues, more, pool_size;
2717d701658aSJedrzej Jagielski struct i40e_lump_tracking *pile;
2718d701658aSJedrzej Jagielski struct i40e_pf *pf = vf->pf;
2719d701658aSJedrzej Jagielski struct i40e_vsi *vsi;
2720d701658aSJedrzej Jagielski
2721d701658aSJedrzej Jagielski vsi = pf->vsi[vf->lan_vsi_idx];
2722d701658aSJedrzej Jagielski cur_queues = vsi->alloc_queue_pairs;
2723d701658aSJedrzej Jagielski
2724d701658aSJedrzej Jagielski /* if current allocated queues are enough for need */
2725d701658aSJedrzej Jagielski if (cur_queues >= needed)
2726d701658aSJedrzej Jagielski return vsi->base_queue;
2727d701658aSJedrzej Jagielski
2728d701658aSJedrzej Jagielski pile = pf->qp_pile;
2729d701658aSJedrzej Jagielski if (cur_queues > 0) {
2730d701658aSJedrzej Jagielski /* if the allocated queues are not zero
2731d701658aSJedrzej Jagielski * just check if there are enough queues for more
2732d701658aSJedrzej Jagielski * behind the allocated queues.
2733d701658aSJedrzej Jagielski */
2734d701658aSJedrzej Jagielski more = needed - cur_queues;
2735d701658aSJedrzej Jagielski for (i = vsi->base_queue + cur_queues;
2736d701658aSJedrzej Jagielski i < pile->num_entries; i++) {
2737d701658aSJedrzej Jagielski if (pile->list[i] & I40E_PILE_VALID_BIT)
2738d701658aSJedrzej Jagielski break;
2739d701658aSJedrzej Jagielski
2740d701658aSJedrzej Jagielski if (more-- == 1)
2741d701658aSJedrzej Jagielski /* there is enough */
2742d701658aSJedrzej Jagielski return vsi->base_queue;
2743d701658aSJedrzej Jagielski }
2744d701658aSJedrzej Jagielski }
2745d701658aSJedrzej Jagielski
2746d701658aSJedrzej Jagielski pool_size = 0;
2747d701658aSJedrzej Jagielski for (i = 0; i < pile->num_entries; i++) {
2748d701658aSJedrzej Jagielski if (pile->list[i] & I40E_PILE_VALID_BIT) {
2749d701658aSJedrzej Jagielski pool_size = 0;
2750d701658aSJedrzej Jagielski continue;
2751d701658aSJedrzej Jagielski }
2752d701658aSJedrzej Jagielski if (needed <= ++pool_size)
2753d701658aSJedrzej Jagielski /* there is enough */
2754d701658aSJedrzej Jagielski return i;
2755d701658aSJedrzej Jagielski }
2756d701658aSJedrzej Jagielski
2757d701658aSJedrzej Jagielski return -ENOMEM;
2758d701658aSJedrzej Jagielski }
2759d701658aSJedrzej Jagielski
2760d701658aSJedrzej Jagielski /**
2761a3f5aa90SAlan Brady * i40e_vc_request_queues_msg
2762a3f5aa90SAlan Brady * @vf: pointer to the VF info
2763a3f5aa90SAlan Brady * @msg: pointer to the msg buffer
2764a3f5aa90SAlan Brady *
2765a3f5aa90SAlan Brady * VFs get a default number of queues but can use this message to request a
276617a9422dSAlan Brady * different number. If the request is successful, PF will reset the VF and
276717a9422dSAlan Brady * return 0. If unsuccessful, PF will send message informing VF of number of
276817a9422dSAlan Brady * available queues and return result of sending VF a message.
2769a3f5aa90SAlan Brady **/
i40e_vc_request_queues_msg(struct i40e_vf * vf,u8 * msg)2770679b05c0SPatryk Małek static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2771a3f5aa90SAlan Brady {
2772a3f5aa90SAlan Brady struct virtchnl_vf_res_request *vfres =
2773a3f5aa90SAlan Brady (struct virtchnl_vf_res_request *)msg;
2774d510497bSSergey Nemov u16 req_pairs = vfres->num_queue_pairs;
2775d510497bSSergey Nemov u8 cur_pairs = vf->num_queue_pairs;
2776a3f5aa90SAlan Brady struct i40e_pf *pf = vf->pf;
2777a3f5aa90SAlan Brady
277861125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
2779a3f5aa90SAlan Brady return -EINVAL;
2780a3f5aa90SAlan Brady
2781d510497bSSergey Nemov if (req_pairs > I40E_MAX_VF_QUEUES) {
2782a3f5aa90SAlan Brady dev_err(&pf->pdev->dev,
2783a3f5aa90SAlan Brady "VF %d tried to request more than %d queues.\n",
2784a3f5aa90SAlan Brady vf->vf_id,
2785a3f5aa90SAlan Brady I40E_MAX_VF_QUEUES);
2786a3f5aa90SAlan Brady vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2787a3f5aa90SAlan Brady } else if (req_pairs - cur_pairs > pf->queues_left) {
2788a3f5aa90SAlan Brady dev_warn(&pf->pdev->dev,
2789a3f5aa90SAlan Brady "VF %d requested %d more queues, but only %d left.\n",
2790a3f5aa90SAlan Brady vf->vf_id,
2791a3f5aa90SAlan Brady req_pairs - cur_pairs,
2792a3f5aa90SAlan Brady pf->queues_left);
2793a3f5aa90SAlan Brady vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2794d701658aSJedrzej Jagielski } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
2795d701658aSJedrzej Jagielski dev_warn(&pf->pdev->dev,
2796d701658aSJedrzej Jagielski "VF %d requested %d more queues, but there is not enough for it.\n",
2797d701658aSJedrzej Jagielski vf->vf_id,
2798d701658aSJedrzej Jagielski req_pairs - cur_pairs);
2799d701658aSJedrzej Jagielski vfres->num_queue_pairs = cur_pairs;
2800a3f5aa90SAlan Brady } else {
280117a9422dSAlan Brady /* successful request */
2802a3f5aa90SAlan Brady vf->num_req_queues = req_pairs;
28033a3b311eSKaren Sornek i40e_vc_reset_vf(vf, true);
280417a9422dSAlan Brady return 0;
2805a3f5aa90SAlan Brady }
2806a3f5aa90SAlan Brady
2807a3f5aa90SAlan Brady return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2808c30bf8ceSGustavo A R Silva (u8 *)vfres, sizeof(*vfres));
2809a3f5aa90SAlan Brady }
2810a3f5aa90SAlan Brady
2811a3f5aa90SAlan Brady /**
28125c3c48acSJesse Brandeburg * i40e_vc_get_stats_msg
2813b40c82e6SJeff Kirsher * @vf: pointer to the VF info
28145c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
28155c3c48acSJesse Brandeburg *
2816b40c82e6SJeff Kirsher * called from the VF to get vsi stats
28175c3c48acSJesse Brandeburg **/
i40e_vc_get_stats_msg(struct i40e_vf * vf,u8 * msg)2818679b05c0SPatryk Małek static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
28195c3c48acSJesse Brandeburg {
2820310a2ad9SJesse Brandeburg struct virtchnl_queue_select *vqs =
2821310a2ad9SJesse Brandeburg (struct virtchnl_queue_select *)msg;
28225c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
28235c3c48acSJesse Brandeburg struct i40e_eth_stats stats;
28245180ff13SJan Sokolowski int aq_ret = 0;
28255c3c48acSJesse Brandeburg struct i40e_vsi *vsi;
28265c3c48acSJesse Brandeburg
28275c3c48acSJesse Brandeburg memset(&stats, 0, sizeof(struct i40e_eth_stats));
28285c3c48acSJesse Brandeburg
282961125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2830230f3d53SJan Sokolowski aq_ret = -EINVAL;
28315c3c48acSJesse Brandeburg goto error_param;
28325c3c48acSJesse Brandeburg }
28335c3c48acSJesse Brandeburg
28345c3c48acSJesse Brandeburg if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2835230f3d53SJan Sokolowski aq_ret = -EINVAL;
28365c3c48acSJesse Brandeburg goto error_param;
28375c3c48acSJesse Brandeburg }
28385c3c48acSJesse Brandeburg
2839fdf0e0bfSAnjali Singhai Jain vsi = pf->vsi[vf->lan_vsi_idx];
28405c3c48acSJesse Brandeburg if (!vsi) {
2841230f3d53SJan Sokolowski aq_ret = -EINVAL;
28425c3c48acSJesse Brandeburg goto error_param;
28435c3c48acSJesse Brandeburg }
28445c3c48acSJesse Brandeburg i40e_update_eth_stats(vsi);
28455a9769c8SMitch Williams stats = vsi->eth_stats;
28465c3c48acSJesse Brandeburg
28475c3c48acSJesse Brandeburg error_param:
2848b40c82e6SJeff Kirsher /* send the response back to the VF */
2849310a2ad9SJesse Brandeburg return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
28505c3c48acSJesse Brandeburg (u8 *)&stats, sizeof(stats));
28515c3c48acSJesse Brandeburg }
28525c3c48acSJesse Brandeburg
2853be147926SIvan Vecera /**
2854be147926SIvan Vecera * i40e_can_vf_change_mac
2855be147926SIvan Vecera * @vf: pointer to the VF info
2856be147926SIvan Vecera *
2857be147926SIvan Vecera * Return true if the VF is allowed to change its MAC filters, false otherwise
2858be147926SIvan Vecera */
i40e_can_vf_change_mac(struct i40e_vf * vf)2859be147926SIvan Vecera static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
2860be147926SIvan Vecera {
2861be147926SIvan Vecera /* If the VF MAC address has been set administratively (via the
2862be147926SIvan Vecera * ndo_set_vf_mac command), then deny permission to the VF to
2863be147926SIvan Vecera * add/delete unicast MAC addresses, unless the VF is trusted
2864be147926SIvan Vecera */
2865be147926SIvan Vecera if (vf->pf_set_mac && !vf->trusted)
2866be147926SIvan Vecera return false;
2867be147926SIvan Vecera
2868be147926SIvan Vecera return true;
2869be147926SIvan Vecera }
2870be147926SIvan Vecera
2871cfb1d572SKaren Sornek #define I40E_MAX_MACVLAN_PER_HW 3072
2872cfb1d572SKaren Sornek #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
2873cfb1d572SKaren Sornek (num_ports))
287406b6e2a2SAdam Ludkiewicz /* If the VF is not trusted restrict the number of MAC/VLAN it can program
287506b6e2a2SAdam Ludkiewicz * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
287606b6e2a2SAdam Ludkiewicz */
287706b6e2a2SAdam Ludkiewicz #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
287851110f16SPiotr Kwapulinski #define I40E_VC_MAX_VLAN_PER_VF 16
28795f527ba9SAnjali Singhai Jain
2880cfb1d572SKaren Sornek #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \
2881cfb1d572SKaren Sornek ({ typeof(vf_num) vf_num_ = (vf_num); \
2882cfb1d572SKaren Sornek typeof(num_ports) num_ports_ = (num_ports); \
2883cfb1d572SKaren Sornek ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \
2884cfb1d572SKaren Sornek I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \
2885cfb1d572SKaren Sornek I40E_VC_MAX_MAC_ADDR_PER_VF; })
28865c3c48acSJesse Brandeburg /**
2887f657a6e1SGreg Rose * i40e_check_vf_permission
2888b40c82e6SJeff Kirsher * @vf: pointer to the VF info
288903ce7b1dSFilip Sadowski * @al: MAC address list from virtchnl
2890f657a6e1SGreg Rose *
289103ce7b1dSFilip Sadowski * Check that the given list of MAC addresses is allowed. Will return -EPERM
289203ce7b1dSFilip Sadowski * if any address in the list is not valid. Checks the following conditions:
289303ce7b1dSFilip Sadowski *
289403ce7b1dSFilip Sadowski * 1) broadcast and zero addresses are never valid
289503ce7b1dSFilip Sadowski * 2) unicast addresses are not allowed if the VMM has administratively set
289603ce7b1dSFilip Sadowski * the VF MAC address, unless the VF is marked as privileged.
289703ce7b1dSFilip Sadowski * 3) There is enough space to add all the addresses.
289803ce7b1dSFilip Sadowski *
289903ce7b1dSFilip Sadowski * Note that to guarantee consistency, it is expected this function be called
290003ce7b1dSFilip Sadowski * while holding the mac_filter_hash_lock, as otherwise the current number of
290103ce7b1dSFilip Sadowski * addresses might not be accurate.
2902f657a6e1SGreg Rose **/
i40e_check_vf_permission(struct i40e_vf * vf,struct virtchnl_ether_addr_list * al)290303ce7b1dSFilip Sadowski static inline int i40e_check_vf_permission(struct i40e_vf *vf,
29045710ab79SJacob Keller struct virtchnl_ether_addr_list *al)
2905f657a6e1SGreg Rose {
2906f657a6e1SGreg Rose struct i40e_pf *pf = vf->pf;
2907621650caSAleksandr Loktionov struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2908cfb1d572SKaren Sornek struct i40e_hw *hw = &pf->hw;
2909621650caSAleksandr Loktionov int mac2add_cnt = 0;
291003ce7b1dSFilip Sadowski int i;
2911f657a6e1SGreg Rose
291203ce7b1dSFilip Sadowski for (i = 0; i < al->num_elements; i++) {
2913621650caSAleksandr Loktionov struct i40e_mac_filter *f;
291403ce7b1dSFilip Sadowski u8 *addr = al->list[i].addr;
291503ce7b1dSFilip Sadowski
291603ce7b1dSFilip Sadowski if (is_broadcast_ether_addr(addr) ||
291703ce7b1dSFilip Sadowski is_zero_ether_addr(addr)) {
291803ce7b1dSFilip Sadowski dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
291903ce7b1dSFilip Sadowski addr);
2920230f3d53SJan Sokolowski return -EINVAL;
292103ce7b1dSFilip Sadowski }
292203ce7b1dSFilip Sadowski
2923f657a6e1SGreg Rose /* If the host VMM administrator has set the VF MAC address
2924f657a6e1SGreg Rose * administratively via the ndo_set_vf_mac command then deny
2925f657a6e1SGreg Rose * permission to the VF to add or delete unicast MAC addresses.
2926692fb0a7SAnjali Singhai Jain * Unless the VF is privileged and then it can do whatever.
29275017c2a8SGreg Rose * The VF may request to set the MAC address filter already
29285017c2a8SGreg Rose * assigned to it so do not return an error in that case.
2929f657a6e1SGreg Rose */
2930be147926SIvan Vecera if (!i40e_can_vf_change_mac(vf) &&
2931be147926SIvan Vecera !is_multicast_ether_addr(addr) &&
293203ce7b1dSFilip Sadowski !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2933f657a6e1SGreg Rose dev_err(&pf->pdev->dev,
2934ae1e29f6SPaweł Jabłoński "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
293503ce7b1dSFilip Sadowski return -EPERM;
2936f657a6e1SGreg Rose }
2937621650caSAleksandr Loktionov
2938621650caSAleksandr Loktionov /*count filters that really will be added*/
2939621650caSAleksandr Loktionov f = i40e_find_mac(vsi, addr);
2940621650caSAleksandr Loktionov if (!f)
2941621650caSAleksandr Loktionov ++mac2add_cnt;
294203ce7b1dSFilip Sadowski }
294303ce7b1dSFilip Sadowski
2944621650caSAleksandr Loktionov /* If this VF is not privileged, then we can't add more than a limited
2945621650caSAleksandr Loktionov * number of addresses. Check to make sure that the additions do not
2946621650caSAleksandr Loktionov * push us over the limit.
2947621650caSAleksandr Loktionov */
2948cfb1d572SKaren Sornek if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2949cfb1d572SKaren Sornek if ((i40e_count_filters(vsi) + mac2add_cnt) >
2950621650caSAleksandr Loktionov I40E_VC_MAX_MAC_ADDR_PER_VF) {
2951621650caSAleksandr Loktionov dev_err(&pf->pdev->dev,
2952621650caSAleksandr Loktionov "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2953621650caSAleksandr Loktionov return -EPERM;
2954621650caSAleksandr Loktionov }
2955cfb1d572SKaren Sornek /* If this VF is trusted, it can use more resources than untrusted.
2956cfb1d572SKaren Sornek * However to ensure that every trusted VF has appropriate number of
2957cfb1d572SKaren Sornek * resources, divide whole pool of resources per port and then across
2958cfb1d572SKaren Sornek * all VFs.
2959cfb1d572SKaren Sornek */
2960cfb1d572SKaren Sornek } else {
2961cfb1d572SKaren Sornek if ((i40e_count_filters(vsi) + mac2add_cnt) >
2962cfb1d572SKaren Sornek I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
2963cfb1d572SKaren Sornek hw->num_ports)) {
2964cfb1d572SKaren Sornek dev_err(&pf->pdev->dev,
2965cfb1d572SKaren Sornek "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2966cfb1d572SKaren Sornek return -EPERM;
2967cfb1d572SKaren Sornek }
2968cfb1d572SKaren Sornek }
296903ce7b1dSFilip Sadowski return 0;
2970f657a6e1SGreg Rose }
2971f657a6e1SGreg Rose
2972f657a6e1SGreg Rose /**
2973ceb29474SSylwester Dziedziuch * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr
2974ceb29474SSylwester Dziedziuch * @vc_ether_addr: used to extract the type
2975ceb29474SSylwester Dziedziuch **/
2976ceb29474SSylwester Dziedziuch static u8
i40e_vc_ether_addr_type(struct virtchnl_ether_addr * vc_ether_addr)2977ceb29474SSylwester Dziedziuch i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
2978ceb29474SSylwester Dziedziuch {
2979ceb29474SSylwester Dziedziuch return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK;
2980ceb29474SSylwester Dziedziuch }
2981ceb29474SSylwester Dziedziuch
2982ceb29474SSylwester Dziedziuch /**
2983ceb29474SSylwester Dziedziuch * i40e_is_vc_addr_legacy
2984ceb29474SSylwester Dziedziuch * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2985ceb29474SSylwester Dziedziuch *
2986ceb29474SSylwester Dziedziuch * check if the MAC address is from an older VF
2987ceb29474SSylwester Dziedziuch **/
2988ceb29474SSylwester Dziedziuch static bool
i40e_is_vc_addr_legacy(struct virtchnl_ether_addr * vc_ether_addr)2989ceb29474SSylwester Dziedziuch i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
2990ceb29474SSylwester Dziedziuch {
2991ceb29474SSylwester Dziedziuch return i40e_vc_ether_addr_type(vc_ether_addr) ==
2992ceb29474SSylwester Dziedziuch VIRTCHNL_ETHER_ADDR_LEGACY;
2993ceb29474SSylwester Dziedziuch }
2994ceb29474SSylwester Dziedziuch
2995ceb29474SSylwester Dziedziuch /**
2996ceb29474SSylwester Dziedziuch * i40e_is_vc_addr_primary
2997ceb29474SSylwester Dziedziuch * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2998ceb29474SSylwester Dziedziuch *
2999ceb29474SSylwester Dziedziuch * check if the MAC address is the VF's primary MAC
3000ceb29474SSylwester Dziedziuch * This function should only be called when the MAC address in
3001ceb29474SSylwester Dziedziuch * virtchnl_ether_addr is a valid unicast MAC
3002ceb29474SSylwester Dziedziuch **/
3003ceb29474SSylwester Dziedziuch static bool
i40e_is_vc_addr_primary(struct virtchnl_ether_addr * vc_ether_addr)3004ceb29474SSylwester Dziedziuch i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr)
3005ceb29474SSylwester Dziedziuch {
3006ceb29474SSylwester Dziedziuch return i40e_vc_ether_addr_type(vc_ether_addr) ==
3007ceb29474SSylwester Dziedziuch VIRTCHNL_ETHER_ADDR_PRIMARY;
3008ceb29474SSylwester Dziedziuch }
3009ceb29474SSylwester Dziedziuch
3010ceb29474SSylwester Dziedziuch /**
3011ceb29474SSylwester Dziedziuch * i40e_update_vf_mac_addr
3012ceb29474SSylwester Dziedziuch * @vf: VF to update
3013ceb29474SSylwester Dziedziuch * @vc_ether_addr: structure from VIRTCHNL with MAC to add
3014ceb29474SSylwester Dziedziuch *
3015ceb29474SSylwester Dziedziuch * update the VF's cached hardware MAC if allowed
3016ceb29474SSylwester Dziedziuch **/
3017ceb29474SSylwester Dziedziuch static void
i40e_update_vf_mac_addr(struct i40e_vf * vf,struct virtchnl_ether_addr * vc_ether_addr)3018ceb29474SSylwester Dziedziuch i40e_update_vf_mac_addr(struct i40e_vf *vf,
3019ceb29474SSylwester Dziedziuch struct virtchnl_ether_addr *vc_ether_addr)
3020ceb29474SSylwester Dziedziuch {
3021ceb29474SSylwester Dziedziuch u8 *mac_addr = vc_ether_addr->addr;
3022ceb29474SSylwester Dziedziuch
3023ceb29474SSylwester Dziedziuch if (!is_valid_ether_addr(mac_addr))
3024ceb29474SSylwester Dziedziuch return;
3025ceb29474SSylwester Dziedziuch
3026ceb29474SSylwester Dziedziuch /* If request to add MAC filter is a primary request update its default
3027ceb29474SSylwester Dziedziuch * MAC address with the requested one. If it is a legacy request then
3028ceb29474SSylwester Dziedziuch * check if current default is empty if so update the default MAC
3029ceb29474SSylwester Dziedziuch */
3030ceb29474SSylwester Dziedziuch if (i40e_is_vc_addr_primary(vc_ether_addr)) {
3031ceb29474SSylwester Dziedziuch ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
3032ceb29474SSylwester Dziedziuch } else if (i40e_is_vc_addr_legacy(vc_ether_addr)) {
3033ceb29474SSylwester Dziedziuch if (is_zero_ether_addr(vf->default_lan_addr.addr))
3034ceb29474SSylwester Dziedziuch ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
3035ceb29474SSylwester Dziedziuch }
3036ceb29474SSylwester Dziedziuch }
3037ceb29474SSylwester Dziedziuch
3038ceb29474SSylwester Dziedziuch /**
30395c3c48acSJesse Brandeburg * i40e_vc_add_mac_addr_msg
3040b40c82e6SJeff Kirsher * @vf: pointer to the VF info
30415c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
30425c3c48acSJesse Brandeburg *
30435c3c48acSJesse Brandeburg * add guest mac address filter
30445c3c48acSJesse Brandeburg **/
i40e_vc_add_mac_addr_msg(struct i40e_vf * vf,u8 * msg)3045679b05c0SPatryk Małek static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
30465c3c48acSJesse Brandeburg {
3047310a2ad9SJesse Brandeburg struct virtchnl_ether_addr_list *al =
3048310a2ad9SJesse Brandeburg (struct virtchnl_ether_addr_list *)msg;
30495c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
30505c3c48acSJesse Brandeburg struct i40e_vsi *vsi = NULL;
30515180ff13SJan Sokolowski int ret = 0;
30525c3c48acSJesse Brandeburg int i;
30535c3c48acSJesse Brandeburg
305461125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3055d510497bSSergey Nemov !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3056230f3d53SJan Sokolowski ret = -EINVAL;
30575c3c48acSJesse Brandeburg goto error_param;
30585c3c48acSJesse Brandeburg }
30595c3c48acSJesse Brandeburg
3060fdf0e0bfSAnjali Singhai Jain vsi = pf->vsi[vf->lan_vsi_idx];
30615c3c48acSJesse Brandeburg
306221659035SKiran Patil /* Lock once, because all function inside for loop accesses VSI's
306321659035SKiran Patil * MAC filter list which needs to be protected using same lock.
306421659035SKiran Patil */
3065278e7d0bSJacob Keller spin_lock_bh(&vsi->mac_filter_hash_lock);
306621659035SKiran Patil
30675710ab79SJacob Keller ret = i40e_check_vf_permission(vf, al);
306803ce7b1dSFilip Sadowski if (ret) {
306903ce7b1dSFilip Sadowski spin_unlock_bh(&vsi->mac_filter_hash_lock);
307003ce7b1dSFilip Sadowski goto error_param;
307103ce7b1dSFilip Sadowski }
307203ce7b1dSFilip Sadowski
30735c3c48acSJesse Brandeburg /* add new addresses to the list */
30745c3c48acSJesse Brandeburg for (i = 0; i < al->num_elements; i++) {
30755c3c48acSJesse Brandeburg struct i40e_mac_filter *f;
30765c3c48acSJesse Brandeburg
30771bc87e80SJacob Keller f = i40e_find_mac(vsi, al->list[i].addr);
307834c164deSZijie Pan if (!f) {
3079feffdbe4SJacob Keller f = i40e_add_mac_filter(vsi, al->list[i].addr);
30805c3c48acSJesse Brandeburg
30815c3c48acSJesse Brandeburg if (!f) {
30825c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev,
30838d8f2295SMitch Williams "Unable to add MAC filter %pM for VF %d\n",
30848d8f2295SMitch Williams al->list[i].addr, vf->vf_id);
3085230f3d53SJan Sokolowski ret = -EINVAL;
3086278e7d0bSJacob Keller spin_unlock_bh(&vsi->mac_filter_hash_lock);
30875c3c48acSJesse Brandeburg goto error_param;
30885c3c48acSJesse Brandeburg }
30895c3c48acSJesse Brandeburg }
3090ceb29474SSylwester Dziedziuch i40e_update_vf_mac_addr(vf, &al->list[i]);
309134c164deSZijie Pan }
3092278e7d0bSJacob Keller spin_unlock_bh(&vsi->mac_filter_hash_lock);
30935c3c48acSJesse Brandeburg
30945c3c48acSJesse Brandeburg /* program the updated filter list */
3095ea02e90bSMitch Williams ret = i40e_sync_vsi_filters(vsi);
3096ea02e90bSMitch Williams if (ret)
3097ea02e90bSMitch Williams dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3098ea02e90bSMitch Williams vf->vf_id, ret);
30995c3c48acSJesse Brandeburg
31005c3c48acSJesse Brandeburg error_param:
3101b40c82e6SJeff Kirsher /* send the response to the VF */
31025710ab79SJacob Keller return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
31035710ab79SJacob Keller ret, NULL, 0);
31045c3c48acSJesse Brandeburg }
31055c3c48acSJesse Brandeburg
31065c3c48acSJesse Brandeburg /**
31075c3c48acSJesse Brandeburg * i40e_vc_del_mac_addr_msg
3108b40c82e6SJeff Kirsher * @vf: pointer to the VF info
31095c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
31105c3c48acSJesse Brandeburg *
31115c3c48acSJesse Brandeburg * remove guest mac address filter
31125c3c48acSJesse Brandeburg **/
i40e_vc_del_mac_addr_msg(struct i40e_vf * vf,u8 * msg)3113679b05c0SPatryk Małek static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
31145c3c48acSJesse Brandeburg {
3115310a2ad9SJesse Brandeburg struct virtchnl_ether_addr_list *al =
3116310a2ad9SJesse Brandeburg (struct virtchnl_ether_addr_list *)msg;
31173a700178SSlawomir Laba bool was_unimac_deleted = false;
31185c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
31195c3c48acSJesse Brandeburg struct i40e_vsi *vsi = NULL;
31205180ff13SJan Sokolowski int ret = 0;
31215c3c48acSJesse Brandeburg int i;
31225c3c48acSJesse Brandeburg
312361125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3124d510497bSSergey Nemov !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3125230f3d53SJan Sokolowski ret = -EINVAL;
3126f657a6e1SGreg Rose goto error_param;
3127f657a6e1SGreg Rose }
3128f657a6e1SGreg Rose
3129f657a6e1SGreg Rose for (i = 0; i < al->num_elements; i++) {
3130700bbf6cSMitch Williams if (is_broadcast_ether_addr(al->list[i].addr) ||
3131700bbf6cSMitch Williams is_zero_ether_addr(al->list[i].addr)) {
31328d8f2295SMitch Williams dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
31338d8f2295SMitch Williams al->list[i].addr, vf->vf_id);
3134230f3d53SJan Sokolowski ret = -EINVAL;
31355c3c48acSJesse Brandeburg goto error_param;
31365c3c48acSJesse Brandeburg }
3137700bbf6cSMitch Williams }
3138fdf0e0bfSAnjali Singhai Jain vsi = pf->vsi[vf->lan_vsi_idx];
31395c3c48acSJesse Brandeburg
3140278e7d0bSJacob Keller spin_lock_bh(&vsi->mac_filter_hash_lock);
31415c3c48acSJesse Brandeburg /* delete addresses from the list */
3142be147926SIvan Vecera for (i = 0; i < al->num_elements; i++) {
3143be147926SIvan Vecera const u8 *addr = al->list[i].addr;
3144be147926SIvan Vecera
3145be147926SIvan Vecera /* Allow to delete VF primary MAC only if it was not set
3146be147926SIvan Vecera * administratively by PF or if VF is trusted.
3147be147926SIvan Vecera */
3148a03e138dSIvan Vecera if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
3149a03e138dSIvan Vecera if (i40e_can_vf_change_mac(vf))
3150be147926SIvan Vecera was_unimac_deleted = true;
3151be147926SIvan Vecera else
3152be147926SIvan Vecera continue;
3153a03e138dSIvan Vecera }
3154be147926SIvan Vecera
3155feffdbe4SJacob Keller if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
3156230f3d53SJan Sokolowski ret = -EINVAL;
3157278e7d0bSJacob Keller spin_unlock_bh(&vsi->mac_filter_hash_lock);
3158b36e9ab5SMitch Williams goto error_param;
3159b36e9ab5SMitch Williams }
3160be147926SIvan Vecera }
3161b36e9ab5SMitch Williams
3162278e7d0bSJacob Keller spin_unlock_bh(&vsi->mac_filter_hash_lock);
31635c3c48acSJesse Brandeburg
3164ceb29474SSylwester Dziedziuch if (was_unimac_deleted)
3165ceb29474SSylwester Dziedziuch eth_zero_addr(vf->default_lan_addr.addr);
3166ceb29474SSylwester Dziedziuch
31675c3c48acSJesse Brandeburg /* program the updated filter list */
3168ea02e90bSMitch Williams ret = i40e_sync_vsi_filters(vsi);
3169ea02e90bSMitch Williams if (ret)
3170ea02e90bSMitch Williams dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3171ea02e90bSMitch Williams vf->vf_id, ret);
31725c3c48acSJesse Brandeburg
31733a700178SSlawomir Laba if (vf->trusted && was_unimac_deleted) {
31743a700178SSlawomir Laba struct i40e_mac_filter *f;
31753a700178SSlawomir Laba struct hlist_node *h;
31763a700178SSlawomir Laba u8 *macaddr = NULL;
31773a700178SSlawomir Laba int bkt;
31783a700178SSlawomir Laba
31793a700178SSlawomir Laba /* set last unicast mac address as default */
31803a700178SSlawomir Laba spin_lock_bh(&vsi->mac_filter_hash_lock);
31813a700178SSlawomir Laba hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
31823a700178SSlawomir Laba if (is_valid_ether_addr(f->macaddr))
31833a700178SSlawomir Laba macaddr = f->macaddr;
31843a700178SSlawomir Laba }
31853a700178SSlawomir Laba if (macaddr)
31863a700178SSlawomir Laba ether_addr_copy(vf->default_lan_addr.addr, macaddr);
31873a700178SSlawomir Laba spin_unlock_bh(&vsi->mac_filter_hash_lock);
31883a700178SSlawomir Laba }
31895c3c48acSJesse Brandeburg error_param:
3190b40c82e6SJeff Kirsher /* send the response to the VF */
31913a700178SSlawomir Laba return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
31925c3c48acSJesse Brandeburg }
31935c3c48acSJesse Brandeburg
31945c3c48acSJesse Brandeburg /**
31955c3c48acSJesse Brandeburg * i40e_vc_add_vlan_msg
3196b40c82e6SJeff Kirsher * @vf: pointer to the VF info
31975c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
31985c3c48acSJesse Brandeburg *
31995c3c48acSJesse Brandeburg * program guest vlan id
32005c3c48acSJesse Brandeburg **/
i40e_vc_add_vlan_msg(struct i40e_vf * vf,u8 * msg)3201679b05c0SPatryk Małek static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
32025c3c48acSJesse Brandeburg {
3203310a2ad9SJesse Brandeburg struct virtchnl_vlan_filter_list *vfl =
3204310a2ad9SJesse Brandeburg (struct virtchnl_vlan_filter_list *)msg;
32055c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
32065c3c48acSJesse Brandeburg struct i40e_vsi *vsi = NULL;
32075180ff13SJan Sokolowski int aq_ret = 0;
32085c3c48acSJesse Brandeburg int i;
32095c3c48acSJesse Brandeburg
32105f527ba9SAnjali Singhai Jain if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
32115f527ba9SAnjali Singhai Jain !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
32125f527ba9SAnjali Singhai Jain dev_err(&pf->pdev->dev,
32135f527ba9SAnjali Singhai Jain "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
32145f527ba9SAnjali Singhai Jain goto error_param;
32155f527ba9SAnjali Singhai Jain }
32166322e63cSJacob Keller if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3217d510497bSSergey Nemov !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3218230f3d53SJan Sokolowski aq_ret = -EINVAL;
32195c3c48acSJesse Brandeburg goto error_param;
32205c3c48acSJesse Brandeburg }
32215c3c48acSJesse Brandeburg
32225c3c48acSJesse Brandeburg for (i = 0; i < vfl->num_elements; i++) {
32235c3c48acSJesse Brandeburg if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3224230f3d53SJan Sokolowski aq_ret = -EINVAL;
32255c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev,
32265c3c48acSJesse Brandeburg "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
32275c3c48acSJesse Brandeburg goto error_param;
32285c3c48acSJesse Brandeburg }
32295c3c48acSJesse Brandeburg }
3230fdf0e0bfSAnjali Singhai Jain vsi = pf->vsi[vf->lan_vsi_idx];
32315c3c48acSJesse Brandeburg if (vsi->info.pvid) {
3232230f3d53SJan Sokolowski aq_ret = -EINVAL;
32335c3c48acSJesse Brandeburg goto error_param;
32345c3c48acSJesse Brandeburg }
32355c3c48acSJesse Brandeburg
32365c3c48acSJesse Brandeburg i40e_vlan_stripping_enable(vsi);
32375c3c48acSJesse Brandeburg for (i = 0; i < vfl->num_elements; i++) {
32385c3c48acSJesse Brandeburg /* add new VLAN filter */
32395c3c48acSJesse Brandeburg int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
32405f527ba9SAnjali Singhai Jain if (!ret)
32415f527ba9SAnjali Singhai Jain vf->num_vlan++;
32426995b36cSJesse Brandeburg
32436322e63cSJacob Keller if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
32445676a8b9SAnjali Singhai Jain i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
32455676a8b9SAnjali Singhai Jain true,
32465676a8b9SAnjali Singhai Jain vfl->vlan_id[i],
32475676a8b9SAnjali Singhai Jain NULL);
32486322e63cSJacob Keller if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
32495676a8b9SAnjali Singhai Jain i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
32505676a8b9SAnjali Singhai Jain true,
32515676a8b9SAnjali Singhai Jain vfl->vlan_id[i],
32525676a8b9SAnjali Singhai Jain NULL);
32535676a8b9SAnjali Singhai Jain
32545c3c48acSJesse Brandeburg if (ret)
32555c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev,
32568d8f2295SMitch Williams "Unable to add VLAN filter %d for VF %d, error %d\n",
32578d8f2295SMitch Williams vfl->vlan_id[i], vf->vf_id, ret);
32585c3c48acSJesse Brandeburg }
32595c3c48acSJesse Brandeburg
32605c3c48acSJesse Brandeburg error_param:
3261b40c82e6SJeff Kirsher /* send the response to the VF */
3262310a2ad9SJesse Brandeburg return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
32635c3c48acSJesse Brandeburg }
32645c3c48acSJesse Brandeburg
32655c3c48acSJesse Brandeburg /**
32665c3c48acSJesse Brandeburg * i40e_vc_remove_vlan_msg
3267b40c82e6SJeff Kirsher * @vf: pointer to the VF info
32685c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
32695c3c48acSJesse Brandeburg *
32705c3c48acSJesse Brandeburg * remove programmed guest vlan id
32715c3c48acSJesse Brandeburg **/
i40e_vc_remove_vlan_msg(struct i40e_vf * vf,u8 * msg)3272679b05c0SPatryk Małek static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
32735c3c48acSJesse Brandeburg {
3274310a2ad9SJesse Brandeburg struct virtchnl_vlan_filter_list *vfl =
3275310a2ad9SJesse Brandeburg (struct virtchnl_vlan_filter_list *)msg;
32765c3c48acSJesse Brandeburg struct i40e_pf *pf = vf->pf;
32775c3c48acSJesse Brandeburg struct i40e_vsi *vsi = NULL;
32785180ff13SJan Sokolowski int aq_ret = 0;
32795c3c48acSJesse Brandeburg int i;
32805c3c48acSJesse Brandeburg
328161125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3282d510497bSSergey Nemov !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3283230f3d53SJan Sokolowski aq_ret = -EINVAL;
32845c3c48acSJesse Brandeburg goto error_param;
32855c3c48acSJesse Brandeburg }
32865c3c48acSJesse Brandeburg
32875c3c48acSJesse Brandeburg for (i = 0; i < vfl->num_elements; i++) {
32885c3c48acSJesse Brandeburg if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3289230f3d53SJan Sokolowski aq_ret = -EINVAL;
32905c3c48acSJesse Brandeburg goto error_param;
32915c3c48acSJesse Brandeburg }
32925c3c48acSJesse Brandeburg }
32935c3c48acSJesse Brandeburg
3294fdf0e0bfSAnjali Singhai Jain vsi = pf->vsi[vf->lan_vsi_idx];
32955c3c48acSJesse Brandeburg if (vsi->info.pvid) {
32965a189f15SAleksandr Loktionov if (vfl->num_elements > 1 || vfl->vlan_id[0])
3297230f3d53SJan Sokolowski aq_ret = -EINVAL;
32985c3c48acSJesse Brandeburg goto error_param;
32995c3c48acSJesse Brandeburg }
33005c3c48acSJesse Brandeburg
33015c3c48acSJesse Brandeburg for (i = 0; i < vfl->num_elements; i++) {
33023aa7b74dSFilip Sadowski i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
33035f527ba9SAnjali Singhai Jain vf->num_vlan--;
33046995b36cSJesse Brandeburg
33056322e63cSJacob Keller if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
33065676a8b9SAnjali Singhai Jain i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
33075676a8b9SAnjali Singhai Jain false,
33085676a8b9SAnjali Singhai Jain vfl->vlan_id[i],
33095676a8b9SAnjali Singhai Jain NULL);
33106322e63cSJacob Keller if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
33115676a8b9SAnjali Singhai Jain i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
33125676a8b9SAnjali Singhai Jain false,
33135676a8b9SAnjali Singhai Jain vfl->vlan_id[i],
33145676a8b9SAnjali Singhai Jain NULL);
33155c3c48acSJesse Brandeburg }
33165c3c48acSJesse Brandeburg
33175c3c48acSJesse Brandeburg error_param:
3318b40c82e6SJeff Kirsher /* send the response to the VF */
3319310a2ad9SJesse Brandeburg return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
33205c3c48acSJesse Brandeburg }
33215c3c48acSJesse Brandeburg
33225c3c48acSJesse Brandeburg /**
33232723f3b5SJesse Brandeburg * i40e_vc_rdma_msg
3324e3219ce6SAnjali Singhai Jain * @vf: pointer to the VF info
3325e3219ce6SAnjali Singhai Jain * @msg: pointer to the msg buffer
3326e3219ce6SAnjali Singhai Jain * @msglen: msg length
3327e3219ce6SAnjali Singhai Jain *
3328e3219ce6SAnjali Singhai Jain * called from the VF for the iwarp msgs
3329e3219ce6SAnjali Singhai Jain **/
i40e_vc_rdma_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)33302723f3b5SJesse Brandeburg static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3331e3219ce6SAnjali Singhai Jain {
3332e3219ce6SAnjali Singhai Jain struct i40e_pf *pf = vf->pf;
3333e3219ce6SAnjali Singhai Jain int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
33345180ff13SJan Sokolowski int aq_ret = 0;
3335e3219ce6SAnjali Singhai Jain
33366322e63cSJacob Keller if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
33372723f3b5SJesse Brandeburg !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3338230f3d53SJan Sokolowski aq_ret = -EINVAL;
3339e3219ce6SAnjali Singhai Jain goto error_param;
3340e3219ce6SAnjali Singhai Jain }
3341e3219ce6SAnjali Singhai Jain
3342e3219ce6SAnjali Singhai Jain i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3343e3219ce6SAnjali Singhai Jain msg, msglen);
3344e3219ce6SAnjali Singhai Jain
3345e3219ce6SAnjali Singhai Jain error_param:
3346e3219ce6SAnjali Singhai Jain /* send the response to the VF */
33472723f3b5SJesse Brandeburg return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA,
3348e3219ce6SAnjali Singhai Jain aq_ret);
3349e3219ce6SAnjali Singhai Jain }
3350e3219ce6SAnjali Singhai Jain
3351e3219ce6SAnjali Singhai Jain /**
33522723f3b5SJesse Brandeburg * i40e_vc_rdma_qvmap_msg
3353e3219ce6SAnjali Singhai Jain * @vf: pointer to the VF info
3354e3219ce6SAnjali Singhai Jain * @msg: pointer to the msg buffer
3355e3219ce6SAnjali Singhai Jain * @config: config qvmap or release it
3356e3219ce6SAnjali Singhai Jain *
3357e3219ce6SAnjali Singhai Jain * called from the VF for the iwarp msgs
3358e3219ce6SAnjali Singhai Jain **/
i40e_vc_rdma_qvmap_msg(struct i40e_vf * vf,u8 * msg,bool config)33592723f3b5SJesse Brandeburg static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
3360e3219ce6SAnjali Singhai Jain {
33612723f3b5SJesse Brandeburg struct virtchnl_rdma_qvlist_info *qvlist_info =
33622723f3b5SJesse Brandeburg (struct virtchnl_rdma_qvlist_info *)msg;
33635180ff13SJan Sokolowski int aq_ret = 0;
3364e3219ce6SAnjali Singhai Jain
33656322e63cSJacob Keller if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
33662723f3b5SJesse Brandeburg !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3367230f3d53SJan Sokolowski aq_ret = -EINVAL;
3368e3219ce6SAnjali Singhai Jain goto error_param;
3369e3219ce6SAnjali Singhai Jain }
3370e3219ce6SAnjali Singhai Jain
3371e3219ce6SAnjali Singhai Jain if (config) {
33722723f3b5SJesse Brandeburg if (i40e_config_rdma_qvlist(vf, qvlist_info))
3373230f3d53SJan Sokolowski aq_ret = -EINVAL;
3374e3219ce6SAnjali Singhai Jain } else {
33752723f3b5SJesse Brandeburg i40e_release_rdma_qvlist(vf);
3376e3219ce6SAnjali Singhai Jain }
3377e3219ce6SAnjali Singhai Jain
3378e3219ce6SAnjali Singhai Jain error_param:
3379e3219ce6SAnjali Singhai Jain /* send the response to the VF */
3380e3219ce6SAnjali Singhai Jain return i40e_vc_send_resp_to_vf(vf,
33812723f3b5SJesse Brandeburg config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP :
33822723f3b5SJesse Brandeburg VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
3383e3219ce6SAnjali Singhai Jain aq_ret);
3384e3219ce6SAnjali Singhai Jain }
3385e3219ce6SAnjali Singhai Jain
3386e3219ce6SAnjali Singhai Jain /**
3387c4e1868cSMitch Williams * i40e_vc_config_rss_key
3388c4e1868cSMitch Williams * @vf: pointer to the VF info
3389c4e1868cSMitch Williams * @msg: pointer to the msg buffer
3390c4e1868cSMitch Williams *
3391c4e1868cSMitch Williams * Configure the VF's RSS key
3392c4e1868cSMitch Williams **/
i40e_vc_config_rss_key(struct i40e_vf * vf,u8 * msg)3393679b05c0SPatryk Małek static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3394c4e1868cSMitch Williams {
3395310a2ad9SJesse Brandeburg struct virtchnl_rss_key *vrk =
3396310a2ad9SJesse Brandeburg (struct virtchnl_rss_key *)msg;
3397c4e1868cSMitch Williams struct i40e_pf *pf = vf->pf;
3398c4e1868cSMitch Williams struct i40e_vsi *vsi = NULL;
33995180ff13SJan Sokolowski int aq_ret = 0;
3400c4e1868cSMitch Williams
340161125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3402d510497bSSergey Nemov !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
340361125b8bSKaren Sornek vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
3404230f3d53SJan Sokolowski aq_ret = -EINVAL;
3405c4e1868cSMitch Williams goto err;
3406c4e1868cSMitch Williams }
3407c4e1868cSMitch Williams
3408c4e1868cSMitch Williams vsi = pf->vsi[vf->lan_vsi_idx];
3409c4e1868cSMitch Williams aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3410c4e1868cSMitch Williams err:
3411c4e1868cSMitch Williams /* send the response to the VF */
3412310a2ad9SJesse Brandeburg return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3413c4e1868cSMitch Williams aq_ret);
3414c4e1868cSMitch Williams }
3415c4e1868cSMitch Williams
3416c4e1868cSMitch Williams /**
3417c4e1868cSMitch Williams * i40e_vc_config_rss_lut
3418c4e1868cSMitch Williams * @vf: pointer to the VF info
3419c4e1868cSMitch Williams * @msg: pointer to the msg buffer
3420c4e1868cSMitch Williams *
3421c4e1868cSMitch Williams * Configure the VF's RSS LUT
3422c4e1868cSMitch Williams **/
i40e_vc_config_rss_lut(struct i40e_vf * vf,u8 * msg)3423679b05c0SPatryk Małek static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3424c4e1868cSMitch Williams {
3425310a2ad9SJesse Brandeburg struct virtchnl_rss_lut *vrl =
3426310a2ad9SJesse Brandeburg (struct virtchnl_rss_lut *)msg;
3427c4e1868cSMitch Williams struct i40e_pf *pf = vf->pf;
3428c4e1868cSMitch Williams struct i40e_vsi *vsi = NULL;
34295180ff13SJan Sokolowski int aq_ret = 0;
3430d510497bSSergey Nemov u16 i;
3431c4e1868cSMitch Williams
343261125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3433d510497bSSergey Nemov !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
343461125b8bSKaren Sornek vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
3435230f3d53SJan Sokolowski aq_ret = -EINVAL;
3436c4e1868cSMitch Williams goto err;
3437c4e1868cSMitch Williams }
3438c4e1868cSMitch Williams
3439d510497bSSergey Nemov for (i = 0; i < vrl->lut_entries; i++)
3440d510497bSSergey Nemov if (vrl->lut[i] >= vf->num_queue_pairs) {
3441230f3d53SJan Sokolowski aq_ret = -EINVAL;
3442d510497bSSergey Nemov goto err;
3443d510497bSSergey Nemov }
3444d510497bSSergey Nemov
3445c4e1868cSMitch Williams vsi = pf->vsi[vf->lan_vsi_idx];
3446c4e1868cSMitch Williams aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3447c4e1868cSMitch Williams /* send the response to the VF */
3448c4e1868cSMitch Williams err:
3449310a2ad9SJesse Brandeburg return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3450c4e1868cSMitch Williams aq_ret);
3451c4e1868cSMitch Williams }
3452c4e1868cSMitch Williams
3453c4e1868cSMitch Williams /**
3454c4e1868cSMitch Williams * i40e_vc_get_rss_hena
3455c4e1868cSMitch Williams * @vf: pointer to the VF info
3456c4e1868cSMitch Williams * @msg: pointer to the msg buffer
3457c4e1868cSMitch Williams *
3458c4e1868cSMitch Williams * Return the RSS HENA bits allowed by the hardware
3459c4e1868cSMitch Williams **/
i40e_vc_get_rss_hena(struct i40e_vf * vf,u8 * msg)3460679b05c0SPatryk Małek static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3461c4e1868cSMitch Williams {
3462310a2ad9SJesse Brandeburg struct virtchnl_rss_hena *vrh = NULL;
3463c4e1868cSMitch Williams struct i40e_pf *pf = vf->pf;
34645180ff13SJan Sokolowski int aq_ret = 0;
3465c4e1868cSMitch Williams int len = 0;
3466c4e1868cSMitch Williams
346761125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3468230f3d53SJan Sokolowski aq_ret = -EINVAL;
3469c4e1868cSMitch Williams goto err;
3470c4e1868cSMitch Williams }
3471310a2ad9SJesse Brandeburg len = sizeof(struct virtchnl_rss_hena);
3472c4e1868cSMitch Williams
3473c4e1868cSMitch Williams vrh = kzalloc(len, GFP_KERNEL);
3474c4e1868cSMitch Williams if (!vrh) {
3475230f3d53SJan Sokolowski aq_ret = -ENOMEM;
3476c4e1868cSMitch Williams len = 0;
3477c4e1868cSMitch Williams goto err;
3478c4e1868cSMitch Williams }
3479c4e1868cSMitch Williams vrh->hena = i40e_pf_get_default_rss_hena(pf);
3480c4e1868cSMitch Williams err:
3481c4e1868cSMitch Williams /* send the response back to the VF */
3482310a2ad9SJesse Brandeburg aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3483c4e1868cSMitch Williams aq_ret, (u8 *)vrh, len);
3484b7d2cd95SMitch Williams kfree(vrh);
3485c4e1868cSMitch Williams return aq_ret;
3486c4e1868cSMitch Williams }
3487c4e1868cSMitch Williams
3488c4e1868cSMitch Williams /**
3489c4e1868cSMitch Williams * i40e_vc_set_rss_hena
3490c4e1868cSMitch Williams * @vf: pointer to the VF info
3491c4e1868cSMitch Williams * @msg: pointer to the msg buffer
3492c4e1868cSMitch Williams *
3493c4e1868cSMitch Williams * Set the RSS HENA bits for the VF
3494c4e1868cSMitch Williams **/
i40e_vc_set_rss_hena(struct i40e_vf * vf,u8 * msg)3495679b05c0SPatryk Małek static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3496c4e1868cSMitch Williams {
3497310a2ad9SJesse Brandeburg struct virtchnl_rss_hena *vrh =
3498310a2ad9SJesse Brandeburg (struct virtchnl_rss_hena *)msg;
3499c4e1868cSMitch Williams struct i40e_pf *pf = vf->pf;
3500c4e1868cSMitch Williams struct i40e_hw *hw = &pf->hw;
35015180ff13SJan Sokolowski int aq_ret = 0;
3502c4e1868cSMitch Williams
350361125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3504230f3d53SJan Sokolowski aq_ret = -EINVAL;
3505c4e1868cSMitch Williams goto err;
3506c4e1868cSMitch Williams }
3507c4e1868cSMitch Williams i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3508c4e1868cSMitch Williams i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3509c4e1868cSMitch Williams (u32)(vrh->hena >> 32));
3510c4e1868cSMitch Williams
3511c4e1868cSMitch Williams /* send the response to the VF */
3512c4e1868cSMitch Williams err:
3513f0adc6e8SJesse Brandeburg return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3514c4e1868cSMitch Williams }
3515c4e1868cSMitch Williams
3516c4e1868cSMitch Williams /**
35178774370dSMariusz Stachura * i40e_vc_enable_vlan_stripping
35188774370dSMariusz Stachura * @vf: pointer to the VF info
35198774370dSMariusz Stachura * @msg: pointer to the msg buffer
35208774370dSMariusz Stachura *
35218774370dSMariusz Stachura * Enable vlan header stripping for the VF
35228774370dSMariusz Stachura **/
i40e_vc_enable_vlan_stripping(struct i40e_vf * vf,u8 * msg)3523679b05c0SPatryk Małek static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
35248774370dSMariusz Stachura {
3525d510497bSSergey Nemov struct i40e_vsi *vsi;
35265180ff13SJan Sokolowski int aq_ret = 0;
35278774370dSMariusz Stachura
352861125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3529230f3d53SJan Sokolowski aq_ret = -EINVAL;
35308774370dSMariusz Stachura goto err;
35318774370dSMariusz Stachura }
35328774370dSMariusz Stachura
3533d510497bSSergey Nemov vsi = vf->pf->vsi[vf->lan_vsi_idx];
35348774370dSMariusz Stachura i40e_vlan_stripping_enable(vsi);
35358774370dSMariusz Stachura
35368774370dSMariusz Stachura /* send the response to the VF */
35378774370dSMariusz Stachura err:
35388774370dSMariusz Stachura return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
35398774370dSMariusz Stachura aq_ret);
35408774370dSMariusz Stachura }
35418774370dSMariusz Stachura
35428774370dSMariusz Stachura /**
35438774370dSMariusz Stachura * i40e_vc_disable_vlan_stripping
35448774370dSMariusz Stachura * @vf: pointer to the VF info
35458774370dSMariusz Stachura * @msg: pointer to the msg buffer
35468774370dSMariusz Stachura *
35478774370dSMariusz Stachura * Disable vlan header stripping for the VF
35488774370dSMariusz Stachura **/
i40e_vc_disable_vlan_stripping(struct i40e_vf * vf,u8 * msg)3549679b05c0SPatryk Małek static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
35508774370dSMariusz Stachura {
3551d510497bSSergey Nemov struct i40e_vsi *vsi;
35525180ff13SJan Sokolowski int aq_ret = 0;
35538774370dSMariusz Stachura
355461125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3555230f3d53SJan Sokolowski aq_ret = -EINVAL;
35568774370dSMariusz Stachura goto err;
35578774370dSMariusz Stachura }
35588774370dSMariusz Stachura
3559d510497bSSergey Nemov vsi = vf->pf->vsi[vf->lan_vsi_idx];
35608774370dSMariusz Stachura i40e_vlan_stripping_disable(vsi);
35618774370dSMariusz Stachura
35628774370dSMariusz Stachura /* send the response to the VF */
35638774370dSMariusz Stachura err:
35648774370dSMariusz Stachura return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
35658774370dSMariusz Stachura aq_ret);
35668774370dSMariusz Stachura }
35678774370dSMariusz Stachura
35688774370dSMariusz Stachura /**
3569e284fc28SAvinash Dayanand * i40e_validate_cloud_filter
3570b50f7bcaSJesse Brandeburg * @vf: pointer to VF structure
3571b50f7bcaSJesse Brandeburg * @tc_filter: pointer to filter requested
3572e284fc28SAvinash Dayanand *
3573e284fc28SAvinash Dayanand * This function validates cloud filter programmed as TC filter for ADq
3574e284fc28SAvinash Dayanand **/
i40e_validate_cloud_filter(struct i40e_vf * vf,struct virtchnl_filter * tc_filter)3575e284fc28SAvinash Dayanand static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3576e284fc28SAvinash Dayanand struct virtchnl_filter *tc_filter)
3577e284fc28SAvinash Dayanand {
3578e284fc28SAvinash Dayanand struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3579e284fc28SAvinash Dayanand struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3580e284fc28SAvinash Dayanand struct i40e_pf *pf = vf->pf;
3581e284fc28SAvinash Dayanand struct i40e_vsi *vsi = NULL;
3582e284fc28SAvinash Dayanand struct i40e_mac_filter *f;
3583e284fc28SAvinash Dayanand struct hlist_node *h;
3584e284fc28SAvinash Dayanand bool found = false;
3585e284fc28SAvinash Dayanand int bkt;
3586e284fc28SAvinash Dayanand
35870ec87fc8SSudheer Mogilappagari if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
3588e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev,
35890ec87fc8SSudheer Mogilappagari "VF %d: ADQ doesn't support this action (%d)\n",
35900ec87fc8SSudheer Mogilappagari vf->vf_id, tc_filter->action);
3591e284fc28SAvinash Dayanand goto err;
3592e284fc28SAvinash Dayanand }
3593e284fc28SAvinash Dayanand
3594e284fc28SAvinash Dayanand /* action_meta is TC number here to which the filter is applied */
3595e284fc28SAvinash Dayanand if (!tc_filter->action_meta ||
35960ec87fc8SSudheer Mogilappagari tc_filter->action_meta > vf->num_tc) {
3597e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3598e284fc28SAvinash Dayanand vf->vf_id, tc_filter->action_meta);
3599e284fc28SAvinash Dayanand goto err;
3600e284fc28SAvinash Dayanand }
3601e284fc28SAvinash Dayanand
3602e284fc28SAvinash Dayanand /* Check filter if it's programmed for advanced mode or basic mode.
3603e284fc28SAvinash Dayanand * There are two ADq modes (for VF only),
3604e284fc28SAvinash Dayanand * 1. Basic mode: intended to allow as many filter options as possible
3605e284fc28SAvinash Dayanand * to be added to a VF in Non-trusted mode. Main goal is
3606e284fc28SAvinash Dayanand * to add filters to its own MAC and VLAN id.
3607e284fc28SAvinash Dayanand * 2. Advanced mode: is for allowing filters to be applied other than
3608e284fc28SAvinash Dayanand * its own MAC or VLAN. This mode requires the VF to be
3609e284fc28SAvinash Dayanand * Trusted.
3610e284fc28SAvinash Dayanand */
3611e284fc28SAvinash Dayanand if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3612e284fc28SAvinash Dayanand vsi = pf->vsi[vf->lan_vsi_idx];
3613e284fc28SAvinash Dayanand f = i40e_find_mac(vsi, data.dst_mac);
3614e284fc28SAvinash Dayanand
3615e284fc28SAvinash Dayanand if (!f) {
3616e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev,
3617e284fc28SAvinash Dayanand "Destination MAC %pM doesn't belong to VF %d\n",
3618e284fc28SAvinash Dayanand data.dst_mac, vf->vf_id);
3619e284fc28SAvinash Dayanand goto err;
3620e284fc28SAvinash Dayanand }
3621e284fc28SAvinash Dayanand
3622e284fc28SAvinash Dayanand if (mask.vlan_id) {
3623e284fc28SAvinash Dayanand hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3624e284fc28SAvinash Dayanand hlist) {
3625e284fc28SAvinash Dayanand if (f->vlan == ntohs(data.vlan_id)) {
3626e284fc28SAvinash Dayanand found = true;
3627e284fc28SAvinash Dayanand break;
3628e284fc28SAvinash Dayanand }
3629e284fc28SAvinash Dayanand }
3630e284fc28SAvinash Dayanand if (!found) {
3631e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev,
3632e284fc28SAvinash Dayanand "VF %d doesn't have any VLAN id %u\n",
3633e284fc28SAvinash Dayanand vf->vf_id, ntohs(data.vlan_id));
3634e284fc28SAvinash Dayanand goto err;
3635e284fc28SAvinash Dayanand }
3636e284fc28SAvinash Dayanand }
3637e284fc28SAvinash Dayanand } else {
3638e284fc28SAvinash Dayanand /* Check if VF is trusted */
3639e284fc28SAvinash Dayanand if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3640e284fc28SAvinash Dayanand dev_err(&pf->pdev->dev,
3641e284fc28SAvinash Dayanand "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3642e284fc28SAvinash Dayanand vf->vf_id);
3643230f3d53SJan Sokolowski return -EIO;
3644e284fc28SAvinash Dayanand }
3645e284fc28SAvinash Dayanand }
3646e284fc28SAvinash Dayanand
3647e284fc28SAvinash Dayanand if (mask.dst_mac[0] & data.dst_mac[0]) {
3648e284fc28SAvinash Dayanand if (is_broadcast_ether_addr(data.dst_mac) ||
3649e284fc28SAvinash Dayanand is_zero_ether_addr(data.dst_mac)) {
3650e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3651e284fc28SAvinash Dayanand vf->vf_id, data.dst_mac);
3652e284fc28SAvinash Dayanand goto err;
3653e284fc28SAvinash Dayanand }
3654e284fc28SAvinash Dayanand }
3655e284fc28SAvinash Dayanand
3656e284fc28SAvinash Dayanand if (mask.src_mac[0] & data.src_mac[0]) {
3657e284fc28SAvinash Dayanand if (is_broadcast_ether_addr(data.src_mac) ||
3658e284fc28SAvinash Dayanand is_zero_ether_addr(data.src_mac)) {
3659e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3660e284fc28SAvinash Dayanand vf->vf_id, data.src_mac);
3661e284fc28SAvinash Dayanand goto err;
3662e284fc28SAvinash Dayanand }
3663e284fc28SAvinash Dayanand }
3664e284fc28SAvinash Dayanand
3665e284fc28SAvinash Dayanand if (mask.dst_port & data.dst_port) {
3666a01e5f22SJacob Keller if (!data.dst_port) {
3667e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3668e284fc28SAvinash Dayanand vf->vf_id);
3669e284fc28SAvinash Dayanand goto err;
3670e284fc28SAvinash Dayanand }
3671e284fc28SAvinash Dayanand }
3672e284fc28SAvinash Dayanand
3673e284fc28SAvinash Dayanand if (mask.src_port & data.src_port) {
3674a01e5f22SJacob Keller if (!data.src_port) {
3675e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3676e284fc28SAvinash Dayanand vf->vf_id);
3677e284fc28SAvinash Dayanand goto err;
3678e284fc28SAvinash Dayanand }
3679e284fc28SAvinash Dayanand }
3680e284fc28SAvinash Dayanand
3681e284fc28SAvinash Dayanand if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3682e284fc28SAvinash Dayanand tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3683e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3684e284fc28SAvinash Dayanand vf->vf_id);
3685e284fc28SAvinash Dayanand goto err;
3686e284fc28SAvinash Dayanand }
3687e284fc28SAvinash Dayanand
3688e284fc28SAvinash Dayanand if (mask.vlan_id & data.vlan_id) {
3689e284fc28SAvinash Dayanand if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3690e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3691e284fc28SAvinash Dayanand vf->vf_id);
3692e284fc28SAvinash Dayanand goto err;
3693e284fc28SAvinash Dayanand }
3694e284fc28SAvinash Dayanand }
3695e284fc28SAvinash Dayanand
3696230f3d53SJan Sokolowski return 0;
3697e284fc28SAvinash Dayanand err:
3698230f3d53SJan Sokolowski return -EIO;
3699e284fc28SAvinash Dayanand }
3700e284fc28SAvinash Dayanand
3701e284fc28SAvinash Dayanand /**
3702e284fc28SAvinash Dayanand * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3703e284fc28SAvinash Dayanand * @vf: pointer to the VF info
3704b50f7bcaSJesse Brandeburg * @seid: seid of the vsi it is searching for
3705e284fc28SAvinash Dayanand **/
i40e_find_vsi_from_seid(struct i40e_vf * vf,u16 seid)3706e284fc28SAvinash Dayanand static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3707e284fc28SAvinash Dayanand {
3708e284fc28SAvinash Dayanand struct i40e_pf *pf = vf->pf;
3709e284fc28SAvinash Dayanand struct i40e_vsi *vsi = NULL;
3710e284fc28SAvinash Dayanand int i;
3711e284fc28SAvinash Dayanand
3712e284fc28SAvinash Dayanand for (i = 0; i < vf->num_tc ; i++) {
3713e284fc28SAvinash Dayanand vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
371446345b38SColin Ian King if (vsi && vsi->seid == seid)
3715e284fc28SAvinash Dayanand return vsi;
3716e284fc28SAvinash Dayanand }
3717e284fc28SAvinash Dayanand return NULL;
3718e284fc28SAvinash Dayanand }
3719e284fc28SAvinash Dayanand
3720e284fc28SAvinash Dayanand /**
3721e284fc28SAvinash Dayanand * i40e_del_all_cloud_filters
3722e284fc28SAvinash Dayanand * @vf: pointer to the VF info
3723e284fc28SAvinash Dayanand *
3724e284fc28SAvinash Dayanand * This function deletes all cloud filters
3725e284fc28SAvinash Dayanand **/
i40e_del_all_cloud_filters(struct i40e_vf * vf)3726e284fc28SAvinash Dayanand static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3727e284fc28SAvinash Dayanand {
3728e284fc28SAvinash Dayanand struct i40e_cloud_filter *cfilter = NULL;
3729e284fc28SAvinash Dayanand struct i40e_pf *pf = vf->pf;
3730e284fc28SAvinash Dayanand struct i40e_vsi *vsi = NULL;
3731e284fc28SAvinash Dayanand struct hlist_node *node;
3732e284fc28SAvinash Dayanand int ret;
3733e284fc28SAvinash Dayanand
3734e284fc28SAvinash Dayanand hlist_for_each_entry_safe(cfilter, node,
3735e284fc28SAvinash Dayanand &vf->cloud_filter_list, cloud_node) {
3736e284fc28SAvinash Dayanand vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3737e284fc28SAvinash Dayanand
3738e284fc28SAvinash Dayanand if (!vsi) {
3739e284fc28SAvinash Dayanand dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3740e284fc28SAvinash Dayanand vf->vf_id, cfilter->seid);
3741e284fc28SAvinash Dayanand continue;
3742e284fc28SAvinash Dayanand }
3743e284fc28SAvinash Dayanand
3744e284fc28SAvinash Dayanand if (cfilter->dst_port)
3745e284fc28SAvinash Dayanand ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3746e284fc28SAvinash Dayanand false);
3747e284fc28SAvinash Dayanand else
3748e284fc28SAvinash Dayanand ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3749e284fc28SAvinash Dayanand if (ret)
3750e284fc28SAvinash Dayanand dev_err(&pf->pdev->dev,
3751d5ba1842SJan Sokolowski "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3752d5ba1842SJan Sokolowski vf->vf_id, ERR_PTR(ret),
3753e284fc28SAvinash Dayanand i40e_aq_str(&pf->hw,
3754e284fc28SAvinash Dayanand pf->hw.aq.asq_last_status));
3755e284fc28SAvinash Dayanand
3756e284fc28SAvinash Dayanand hlist_del(&cfilter->cloud_node);
3757e284fc28SAvinash Dayanand kfree(cfilter);
3758e284fc28SAvinash Dayanand vf->num_cloud_filters--;
3759e284fc28SAvinash Dayanand }
3760e284fc28SAvinash Dayanand }
3761e284fc28SAvinash Dayanand
3762e284fc28SAvinash Dayanand /**
3763e284fc28SAvinash Dayanand * i40e_vc_del_cloud_filter
3764e284fc28SAvinash Dayanand * @vf: pointer to the VF info
3765e284fc28SAvinash Dayanand * @msg: pointer to the msg buffer
3766e284fc28SAvinash Dayanand *
3767e284fc28SAvinash Dayanand * This function deletes a cloud filter programmed as TC filter for ADq
3768e284fc28SAvinash Dayanand **/
i40e_vc_del_cloud_filter(struct i40e_vf * vf,u8 * msg)3769e284fc28SAvinash Dayanand static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3770e284fc28SAvinash Dayanand {
3771e284fc28SAvinash Dayanand struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3772e284fc28SAvinash Dayanand struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3773e284fc28SAvinash Dayanand struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3774e284fc28SAvinash Dayanand struct i40e_cloud_filter cfilter, *cf = NULL;
3775e284fc28SAvinash Dayanand struct i40e_pf *pf = vf->pf;
3776e284fc28SAvinash Dayanand struct i40e_vsi *vsi = NULL;
3777e284fc28SAvinash Dayanand struct hlist_node *node;
37785180ff13SJan Sokolowski int aq_ret = 0;
3779e284fc28SAvinash Dayanand int i, ret;
3780e284fc28SAvinash Dayanand
378161125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3782230f3d53SJan Sokolowski aq_ret = -EINVAL;
3783e284fc28SAvinash Dayanand goto err;
3784e284fc28SAvinash Dayanand }
3785e284fc28SAvinash Dayanand
3786e284fc28SAvinash Dayanand if (!vf->adq_enabled) {
3787e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev,
3788e284fc28SAvinash Dayanand "VF %d: ADq not enabled, can't apply cloud filter\n",
3789e284fc28SAvinash Dayanand vf->vf_id);
3790230f3d53SJan Sokolowski aq_ret = -EINVAL;
3791e284fc28SAvinash Dayanand goto err;
3792e284fc28SAvinash Dayanand }
3793e284fc28SAvinash Dayanand
3794e284fc28SAvinash Dayanand if (i40e_validate_cloud_filter(vf, vcf)) {
3795e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev,
3796e284fc28SAvinash Dayanand "VF %d: Invalid input, can't apply cloud filter\n",
3797e284fc28SAvinash Dayanand vf->vf_id);
3798230f3d53SJan Sokolowski aq_ret = -EINVAL;
3799e284fc28SAvinash Dayanand goto err;
3800e284fc28SAvinash Dayanand }
3801e284fc28SAvinash Dayanand
3802e284fc28SAvinash Dayanand memset(&cfilter, 0, sizeof(cfilter));
3803e284fc28SAvinash Dayanand /* parse destination mac address */
3804e284fc28SAvinash Dayanand for (i = 0; i < ETH_ALEN; i++)
3805e284fc28SAvinash Dayanand cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3806e284fc28SAvinash Dayanand
3807e284fc28SAvinash Dayanand /* parse source mac address */
3808e284fc28SAvinash Dayanand for (i = 0; i < ETH_ALEN; i++)
3809e284fc28SAvinash Dayanand cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3810e284fc28SAvinash Dayanand
3811e284fc28SAvinash Dayanand cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3812e284fc28SAvinash Dayanand cfilter.dst_port = mask.dst_port & tcf.dst_port;
3813e284fc28SAvinash Dayanand cfilter.src_port = mask.src_port & tcf.src_port;
3814e284fc28SAvinash Dayanand
3815e284fc28SAvinash Dayanand switch (vcf->flow_type) {
3816e284fc28SAvinash Dayanand case VIRTCHNL_TCP_V4_FLOW:
3817e284fc28SAvinash Dayanand cfilter.n_proto = ETH_P_IP;
3818e284fc28SAvinash Dayanand if (mask.dst_ip[0] & tcf.dst_ip[0])
3819e284fc28SAvinash Dayanand memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3820e284fc28SAvinash Dayanand ARRAY_SIZE(tcf.dst_ip));
3821e284fc28SAvinash Dayanand else if (mask.src_ip[0] & tcf.dst_ip[0])
3822e284fc28SAvinash Dayanand memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3823e284fc28SAvinash Dayanand ARRAY_SIZE(tcf.dst_ip));
3824e284fc28SAvinash Dayanand break;
3825e284fc28SAvinash Dayanand case VIRTCHNL_TCP_V6_FLOW:
3826e284fc28SAvinash Dayanand cfilter.n_proto = ETH_P_IPV6;
3827e284fc28SAvinash Dayanand if (mask.dst_ip[3] & tcf.dst_ip[3])
3828e284fc28SAvinash Dayanand memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3829e284fc28SAvinash Dayanand sizeof(cfilter.ip.v6.dst_ip6));
3830e284fc28SAvinash Dayanand if (mask.src_ip[3] & tcf.src_ip[3])
3831e284fc28SAvinash Dayanand memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3832e284fc28SAvinash Dayanand sizeof(cfilter.ip.v6.src_ip6));
3833e284fc28SAvinash Dayanand break;
3834e284fc28SAvinash Dayanand default:
3835e284fc28SAvinash Dayanand /* TC filter can be configured based on different combinations
3836e284fc28SAvinash Dayanand * and in this case IP is not a part of filter config
3837e284fc28SAvinash Dayanand */
3838e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3839e284fc28SAvinash Dayanand vf->vf_id);
3840e284fc28SAvinash Dayanand }
3841e284fc28SAvinash Dayanand
3842e284fc28SAvinash Dayanand /* get the vsi to which the tc belongs to */
3843e284fc28SAvinash Dayanand vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3844e284fc28SAvinash Dayanand cfilter.seid = vsi->seid;
3845e284fc28SAvinash Dayanand cfilter.flags = vcf->field_flags;
3846e284fc28SAvinash Dayanand
3847e284fc28SAvinash Dayanand /* Deleting TC filter */
3848e284fc28SAvinash Dayanand if (tcf.dst_port)
3849e284fc28SAvinash Dayanand ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3850e284fc28SAvinash Dayanand else
3851e284fc28SAvinash Dayanand ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3852e284fc28SAvinash Dayanand if (ret) {
3853e284fc28SAvinash Dayanand dev_err(&pf->pdev->dev,
3854d5ba1842SJan Sokolowski "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3855d5ba1842SJan Sokolowski vf->vf_id, ERR_PTR(ret),
3856e284fc28SAvinash Dayanand i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3857e284fc28SAvinash Dayanand goto err;
3858e284fc28SAvinash Dayanand }
3859e284fc28SAvinash Dayanand
3860e284fc28SAvinash Dayanand hlist_for_each_entry_safe(cf, node,
3861e284fc28SAvinash Dayanand &vf->cloud_filter_list, cloud_node) {
3862e284fc28SAvinash Dayanand if (cf->seid != cfilter.seid)
3863e284fc28SAvinash Dayanand continue;
3864e284fc28SAvinash Dayanand if (mask.dst_port)
3865e284fc28SAvinash Dayanand if (cfilter.dst_port != cf->dst_port)
3866e284fc28SAvinash Dayanand continue;
3867e284fc28SAvinash Dayanand if (mask.dst_mac[0])
3868e284fc28SAvinash Dayanand if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3869e284fc28SAvinash Dayanand continue;
3870e284fc28SAvinash Dayanand /* for ipv4 data to be valid, only first byte of mask is set */
3871e284fc28SAvinash Dayanand if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3872e284fc28SAvinash Dayanand if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3873e284fc28SAvinash Dayanand ARRAY_SIZE(tcf.dst_ip)))
3874e284fc28SAvinash Dayanand continue;
3875e284fc28SAvinash Dayanand /* for ipv6, mask is set for all sixteen bytes (4 words) */
3876e284fc28SAvinash Dayanand if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3877e284fc28SAvinash Dayanand if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3878e284fc28SAvinash Dayanand sizeof(cfilter.ip.v6.src_ip6)))
3879e284fc28SAvinash Dayanand continue;
3880e284fc28SAvinash Dayanand if (mask.vlan_id)
3881e284fc28SAvinash Dayanand if (cfilter.vlan_id != cf->vlan_id)
3882e284fc28SAvinash Dayanand continue;
3883e284fc28SAvinash Dayanand
3884e284fc28SAvinash Dayanand hlist_del(&cf->cloud_node);
3885e284fc28SAvinash Dayanand kfree(cf);
3886e284fc28SAvinash Dayanand vf->num_cloud_filters--;
3887e284fc28SAvinash Dayanand }
3888e284fc28SAvinash Dayanand
3889e284fc28SAvinash Dayanand err:
3890e284fc28SAvinash Dayanand return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3891e284fc28SAvinash Dayanand aq_ret);
3892e284fc28SAvinash Dayanand }
3893e284fc28SAvinash Dayanand
3894e284fc28SAvinash Dayanand /**
3895e284fc28SAvinash Dayanand * i40e_vc_add_cloud_filter
3896e284fc28SAvinash Dayanand * @vf: pointer to the VF info
3897e284fc28SAvinash Dayanand * @msg: pointer to the msg buffer
3898e284fc28SAvinash Dayanand *
3899e284fc28SAvinash Dayanand * This function adds a cloud filter programmed as TC filter for ADq
3900e284fc28SAvinash Dayanand **/
i40e_vc_add_cloud_filter(struct i40e_vf * vf,u8 * msg)3901e284fc28SAvinash Dayanand static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3902e284fc28SAvinash Dayanand {
3903e284fc28SAvinash Dayanand struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3904e284fc28SAvinash Dayanand struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3905e284fc28SAvinash Dayanand struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3906e284fc28SAvinash Dayanand struct i40e_cloud_filter *cfilter = NULL;
3907e284fc28SAvinash Dayanand struct i40e_pf *pf = vf->pf;
3908e284fc28SAvinash Dayanand struct i40e_vsi *vsi = NULL;
39095180ff13SJan Sokolowski int aq_ret = 0;
39100bdd88afSIvan Vecera int i;
3911e284fc28SAvinash Dayanand
391261125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3913230f3d53SJan Sokolowski aq_ret = -EINVAL;
391424474f27SMartyna Szapar goto err_out;
3915e284fc28SAvinash Dayanand }
3916e284fc28SAvinash Dayanand
3917e284fc28SAvinash Dayanand if (!vf->adq_enabled) {
3918e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev,
3919e284fc28SAvinash Dayanand "VF %d: ADq is not enabled, can't apply cloud filter\n",
3920e284fc28SAvinash Dayanand vf->vf_id);
3921230f3d53SJan Sokolowski aq_ret = -EINVAL;
392224474f27SMartyna Szapar goto err_out;
3923e284fc28SAvinash Dayanand }
3924e284fc28SAvinash Dayanand
3925e284fc28SAvinash Dayanand if (i40e_validate_cloud_filter(vf, vcf)) {
3926e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev,
3927e284fc28SAvinash Dayanand "VF %d: Invalid input/s, can't apply cloud filter\n",
3928e284fc28SAvinash Dayanand vf->vf_id);
3929230f3d53SJan Sokolowski aq_ret = -EINVAL;
393024474f27SMartyna Szapar goto err_out;
3931e284fc28SAvinash Dayanand }
3932e284fc28SAvinash Dayanand
3933e284fc28SAvinash Dayanand cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
39340bdd88afSIvan Vecera if (!cfilter) {
39350bdd88afSIvan Vecera aq_ret = -ENOMEM;
39360bdd88afSIvan Vecera goto err_out;
39370bdd88afSIvan Vecera }
3938e284fc28SAvinash Dayanand
3939e284fc28SAvinash Dayanand /* parse destination mac address */
3940e284fc28SAvinash Dayanand for (i = 0; i < ETH_ALEN; i++)
3941e284fc28SAvinash Dayanand cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3942e284fc28SAvinash Dayanand
3943e284fc28SAvinash Dayanand /* parse source mac address */
3944e284fc28SAvinash Dayanand for (i = 0; i < ETH_ALEN; i++)
3945e284fc28SAvinash Dayanand cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3946e284fc28SAvinash Dayanand
3947e284fc28SAvinash Dayanand cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3948e284fc28SAvinash Dayanand cfilter->dst_port = mask.dst_port & tcf.dst_port;
3949e284fc28SAvinash Dayanand cfilter->src_port = mask.src_port & tcf.src_port;
3950e284fc28SAvinash Dayanand
3951e284fc28SAvinash Dayanand switch (vcf->flow_type) {
3952e284fc28SAvinash Dayanand case VIRTCHNL_TCP_V4_FLOW:
3953e284fc28SAvinash Dayanand cfilter->n_proto = ETH_P_IP;
3954e284fc28SAvinash Dayanand if (mask.dst_ip[0] & tcf.dst_ip[0])
3955e284fc28SAvinash Dayanand memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3956e284fc28SAvinash Dayanand ARRAY_SIZE(tcf.dst_ip));
3957e284fc28SAvinash Dayanand else if (mask.src_ip[0] & tcf.dst_ip[0])
3958e284fc28SAvinash Dayanand memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3959e284fc28SAvinash Dayanand ARRAY_SIZE(tcf.dst_ip));
3960e284fc28SAvinash Dayanand break;
3961e284fc28SAvinash Dayanand case VIRTCHNL_TCP_V6_FLOW:
3962e284fc28SAvinash Dayanand cfilter->n_proto = ETH_P_IPV6;
3963e284fc28SAvinash Dayanand if (mask.dst_ip[3] & tcf.dst_ip[3])
3964e284fc28SAvinash Dayanand memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3965e284fc28SAvinash Dayanand sizeof(cfilter->ip.v6.dst_ip6));
3966e284fc28SAvinash Dayanand if (mask.src_ip[3] & tcf.src_ip[3])
3967e284fc28SAvinash Dayanand memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3968e284fc28SAvinash Dayanand sizeof(cfilter->ip.v6.src_ip6));
3969e284fc28SAvinash Dayanand break;
3970e284fc28SAvinash Dayanand default:
3971e284fc28SAvinash Dayanand /* TC filter can be configured based on different combinations
3972e284fc28SAvinash Dayanand * and in this case IP is not a part of filter config
3973e284fc28SAvinash Dayanand */
3974e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3975e284fc28SAvinash Dayanand vf->vf_id);
3976e284fc28SAvinash Dayanand }
3977e284fc28SAvinash Dayanand
3978e284fc28SAvinash Dayanand /* get the VSI to which the TC belongs to */
3979e284fc28SAvinash Dayanand vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3980e284fc28SAvinash Dayanand cfilter->seid = vsi->seid;
3981e284fc28SAvinash Dayanand cfilter->flags = vcf->field_flags;
3982e284fc28SAvinash Dayanand
3983e284fc28SAvinash Dayanand /* Adding cloud filter programmed as TC filter */
3984e284fc28SAvinash Dayanand if (tcf.dst_port)
39850bdd88afSIvan Vecera aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3986e284fc28SAvinash Dayanand else
39870bdd88afSIvan Vecera aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
39880bdd88afSIvan Vecera if (aq_ret) {
3989e284fc28SAvinash Dayanand dev_err(&pf->pdev->dev,
3990d5ba1842SJan Sokolowski "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
39910bdd88afSIvan Vecera vf->vf_id, ERR_PTR(aq_ret),
3992e284fc28SAvinash Dayanand i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
399324474f27SMartyna Szapar goto err_free;
3994e284fc28SAvinash Dayanand }
3995e284fc28SAvinash Dayanand
3996e284fc28SAvinash Dayanand INIT_HLIST_NODE(&cfilter->cloud_node);
3997e284fc28SAvinash Dayanand hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
399824474f27SMartyna Szapar /* release the pointer passing it to the collection */
399924474f27SMartyna Szapar cfilter = NULL;
4000e284fc28SAvinash Dayanand vf->num_cloud_filters++;
400124474f27SMartyna Szapar err_free:
400224474f27SMartyna Szapar kfree(cfilter);
400324474f27SMartyna Szapar err_out:
4004e284fc28SAvinash Dayanand return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
4005e284fc28SAvinash Dayanand aq_ret);
4006e284fc28SAvinash Dayanand }
4007e284fc28SAvinash Dayanand
4008e284fc28SAvinash Dayanand /**
4009c27eac48SAvinash Dayanand * i40e_vc_add_qch_msg: Add queue channel and enable ADq
4010c27eac48SAvinash Dayanand * @vf: pointer to the VF info
4011c27eac48SAvinash Dayanand * @msg: pointer to the msg buffer
4012c27eac48SAvinash Dayanand **/
i40e_vc_add_qch_msg(struct i40e_vf * vf,u8 * msg)4013c27eac48SAvinash Dayanand static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
4014c27eac48SAvinash Dayanand {
4015c27eac48SAvinash Dayanand struct virtchnl_tc_info *tci =
4016c27eac48SAvinash Dayanand (struct virtchnl_tc_info *)msg;
4017c27eac48SAvinash Dayanand struct i40e_pf *pf = vf->pf;
40180c483bd4SAvinash Dayanand struct i40e_link_status *ls = &pf->hw.phy.link_info;
4019d510497bSSergey Nemov int i, adq_request_qps = 0;
40205180ff13SJan Sokolowski int aq_ret = 0;
4021d510497bSSergey Nemov u64 speed = 0;
4022c27eac48SAvinash Dayanand
402361125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4024230f3d53SJan Sokolowski aq_ret = -EINVAL;
4025c27eac48SAvinash Dayanand goto err;
4026c27eac48SAvinash Dayanand }
4027c27eac48SAvinash Dayanand
4028c27eac48SAvinash Dayanand /* ADq cannot be applied if spoof check is ON */
4029c27eac48SAvinash Dayanand if (vf->spoofchk) {
4030c27eac48SAvinash Dayanand dev_err(&pf->pdev->dev,
4031c27eac48SAvinash Dayanand "Spoof check is ON, turn it OFF to enable ADq\n");
4032230f3d53SJan Sokolowski aq_ret = -EINVAL;
4033c27eac48SAvinash Dayanand goto err;
4034c27eac48SAvinash Dayanand }
4035c27eac48SAvinash Dayanand
4036c27eac48SAvinash Dayanand if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
4037c27eac48SAvinash Dayanand dev_err(&pf->pdev->dev,
4038c27eac48SAvinash Dayanand "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
4039c27eac48SAvinash Dayanand vf->vf_id);
4040230f3d53SJan Sokolowski aq_ret = -EINVAL;
4041c27eac48SAvinash Dayanand goto err;
4042c27eac48SAvinash Dayanand }
4043c27eac48SAvinash Dayanand
4044c27eac48SAvinash Dayanand /* max number of traffic classes for VF currently capped at 4 */
4045c27eac48SAvinash Dayanand if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
4046c27eac48SAvinash Dayanand dev_err(&pf->pdev->dev,
4047d510497bSSergey Nemov "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
4048d510497bSSergey Nemov vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
4049230f3d53SJan Sokolowski aq_ret = -EINVAL;
4050c27eac48SAvinash Dayanand goto err;
4051c27eac48SAvinash Dayanand }
4052c27eac48SAvinash Dayanand
4053c27eac48SAvinash Dayanand /* validate queues for each TC */
4054c27eac48SAvinash Dayanand for (i = 0; i < tci->num_tc; i++)
4055c27eac48SAvinash Dayanand if (!tci->list[i].count ||
4056c27eac48SAvinash Dayanand tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
4057c27eac48SAvinash Dayanand dev_err(&pf->pdev->dev,
4058d510497bSSergey Nemov "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
4059d510497bSSergey Nemov vf->vf_id, i, tci->list[i].count,
4060d510497bSSergey Nemov I40E_DEFAULT_QUEUES_PER_VF);
4061230f3d53SJan Sokolowski aq_ret = -EINVAL;
4062c27eac48SAvinash Dayanand goto err;
4063c27eac48SAvinash Dayanand }
4064c27eac48SAvinash Dayanand
4065c27eac48SAvinash Dayanand /* need Max VF queues but already have default number of queues */
4066c27eac48SAvinash Dayanand adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
4067c27eac48SAvinash Dayanand
4068c27eac48SAvinash Dayanand if (pf->queues_left < adq_request_qps) {
4069c27eac48SAvinash Dayanand dev_err(&pf->pdev->dev,
4070c27eac48SAvinash Dayanand "No queues left to allocate to VF %d\n",
4071c27eac48SAvinash Dayanand vf->vf_id);
4072230f3d53SJan Sokolowski aq_ret = -EINVAL;
4073c27eac48SAvinash Dayanand goto err;
4074c27eac48SAvinash Dayanand } else {
4075c27eac48SAvinash Dayanand /* we need to allocate max VF queues to enable ADq so as to
4076c27eac48SAvinash Dayanand * make sure ADq enabled VF always gets back queues when it
4077c27eac48SAvinash Dayanand * goes through a reset.
4078c27eac48SAvinash Dayanand */
4079c27eac48SAvinash Dayanand vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
4080c27eac48SAvinash Dayanand }
4081c27eac48SAvinash Dayanand
40820c483bd4SAvinash Dayanand /* get link speed in MB to validate rate limit */
40836d2c322cSAleksandr Loktionov speed = i40e_vc_link_speed2mbps(ls->link_speed);
40846d2c322cSAleksandr Loktionov if (speed == SPEED_UNKNOWN) {
40850c483bd4SAvinash Dayanand dev_err(&pf->pdev->dev,
40860c483bd4SAvinash Dayanand "Cannot detect link speed\n");
4087230f3d53SJan Sokolowski aq_ret = -EINVAL;
40880c483bd4SAvinash Dayanand goto err;
40890c483bd4SAvinash Dayanand }
40900c483bd4SAvinash Dayanand
4091c27eac48SAvinash Dayanand /* parse data from the queue channel info */
4092c27eac48SAvinash Dayanand vf->num_tc = tci->num_tc;
40930c483bd4SAvinash Dayanand for (i = 0; i < vf->num_tc; i++) {
40940c483bd4SAvinash Dayanand if (tci->list[i].max_tx_rate) {
40950c483bd4SAvinash Dayanand if (tci->list[i].max_tx_rate > speed) {
40960c483bd4SAvinash Dayanand dev_err(&pf->pdev->dev,
40970c483bd4SAvinash Dayanand "Invalid max tx rate %llu specified for VF %d.",
40980c483bd4SAvinash Dayanand tci->list[i].max_tx_rate,
40990c483bd4SAvinash Dayanand vf->vf_id);
4100230f3d53SJan Sokolowski aq_ret = -EINVAL;
41010c483bd4SAvinash Dayanand goto err;
41020c483bd4SAvinash Dayanand } else {
41030c483bd4SAvinash Dayanand vf->ch[i].max_tx_rate =
41040c483bd4SAvinash Dayanand tci->list[i].max_tx_rate;
41050c483bd4SAvinash Dayanand }
41060c483bd4SAvinash Dayanand }
4107c27eac48SAvinash Dayanand vf->ch[i].num_qps = tci->list[i].count;
41080c483bd4SAvinash Dayanand }
4109c27eac48SAvinash Dayanand
4110c27eac48SAvinash Dayanand /* set this flag only after making sure all inputs are sane */
4111c27eac48SAvinash Dayanand vf->adq_enabled = true;
4112c27eac48SAvinash Dayanand
4113c27eac48SAvinash Dayanand /* reset the VF in order to allocate resources */
41143a3b311eSKaren Sornek i40e_vc_reset_vf(vf, true);
4115c27eac48SAvinash Dayanand
4116230f3d53SJan Sokolowski return 0;
4117c27eac48SAvinash Dayanand
4118c27eac48SAvinash Dayanand /* send the response to the VF */
4119c27eac48SAvinash Dayanand err:
4120c27eac48SAvinash Dayanand return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
4121c27eac48SAvinash Dayanand aq_ret);
4122c27eac48SAvinash Dayanand }
4123c27eac48SAvinash Dayanand
4124c27eac48SAvinash Dayanand /**
4125c4998aa3SAvinash Dayanand * i40e_vc_del_qch_msg
4126c4998aa3SAvinash Dayanand * @vf: pointer to the VF info
4127c4998aa3SAvinash Dayanand * @msg: pointer to the msg buffer
4128c4998aa3SAvinash Dayanand **/
i40e_vc_del_qch_msg(struct i40e_vf * vf,u8 * msg)4129c4998aa3SAvinash Dayanand static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
4130c4998aa3SAvinash Dayanand {
4131c4998aa3SAvinash Dayanand struct i40e_pf *pf = vf->pf;
41325180ff13SJan Sokolowski int aq_ret = 0;
4133c4998aa3SAvinash Dayanand
413461125b8bSKaren Sornek if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4135230f3d53SJan Sokolowski aq_ret = -EINVAL;
4136c4998aa3SAvinash Dayanand goto err;
4137c4998aa3SAvinash Dayanand }
4138c4998aa3SAvinash Dayanand
4139c4998aa3SAvinash Dayanand if (vf->adq_enabled) {
4140e284fc28SAvinash Dayanand i40e_del_all_cloud_filters(vf);
4141c4998aa3SAvinash Dayanand i40e_del_qch(vf);
4142c4998aa3SAvinash Dayanand vf->adq_enabled = false;
4143c4998aa3SAvinash Dayanand vf->num_tc = 0;
4144c4998aa3SAvinash Dayanand dev_info(&pf->pdev->dev,
4145e284fc28SAvinash Dayanand "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
4146c4998aa3SAvinash Dayanand vf->vf_id);
4147c4998aa3SAvinash Dayanand } else {
4148c4998aa3SAvinash Dayanand dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
4149c4998aa3SAvinash Dayanand vf->vf_id);
4150230f3d53SJan Sokolowski aq_ret = -EINVAL;
4151c4998aa3SAvinash Dayanand }
4152c4998aa3SAvinash Dayanand
4153c4998aa3SAvinash Dayanand /* reset the VF in order to allocate resources */
41543a3b311eSKaren Sornek i40e_vc_reset_vf(vf, true);
4155c4998aa3SAvinash Dayanand
4156230f3d53SJan Sokolowski return 0;
4157c4998aa3SAvinash Dayanand
4158c4998aa3SAvinash Dayanand err:
4159c4998aa3SAvinash Dayanand return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
4160c4998aa3SAvinash Dayanand aq_ret);
4161c4998aa3SAvinash Dayanand }
4162c4998aa3SAvinash Dayanand
4163c4998aa3SAvinash Dayanand /**
41645c3c48acSJesse Brandeburg * i40e_vc_process_vf_msg
4165b40c82e6SJeff Kirsher * @pf: pointer to the PF structure
4166b40c82e6SJeff Kirsher * @vf_id: source VF id
4167f5254429SJacob Keller * @v_opcode: operation code
4168f5254429SJacob Keller * @v_retval: unused return value code
41695c3c48acSJesse Brandeburg * @msg: pointer to the msg buffer
41705c3c48acSJesse Brandeburg * @msglen: msg length
41715c3c48acSJesse Brandeburg *
41725c3c48acSJesse Brandeburg * called from the common aeq/arq handler to
4173b40c82e6SJeff Kirsher * process request from VF
41745c3c48acSJesse Brandeburg **/
i40e_vc_process_vf_msg(struct i40e_pf * pf,s16 vf_id,u32 v_opcode,u32 __always_unused v_retval,u8 * msg,u16 msglen)4175a1b5a24fSJesse Brandeburg int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
4176f5254429SJacob Keller u32 __always_unused v_retval, u8 *msg, u16 msglen)
41775c3c48acSJesse Brandeburg {
41785c3c48acSJesse Brandeburg struct i40e_hw *hw = &pf->hw;
4179a1b5a24fSJesse Brandeburg int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
41806c1b5bffSMitch Williams struct i40e_vf *vf;
41815c3c48acSJesse Brandeburg int ret;
41825c3c48acSJesse Brandeburg
41835c3c48acSJesse Brandeburg pf->vf_aq_requests++;
41843f8af412SSergey Nemov if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
41856c1b5bffSMitch Williams return -EINVAL;
41867efa84b7SMitch Williams vf = &(pf->vf[local_vf_id]);
4187260e9382SJesse Brandeburg
4188260e9382SJesse Brandeburg /* Check if VF is disabled. */
4189260e9382SJesse Brandeburg if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
4190230f3d53SJan Sokolowski return -EINVAL;
4191260e9382SJesse Brandeburg
41925c3c48acSJesse Brandeburg /* perform basic checks on the msg */
4193735e35c5SJesse Brandeburg ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
41945c3c48acSJesse Brandeburg
41955c3c48acSJesse Brandeburg if (ret) {
4196230f3d53SJan Sokolowski i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL);
4197b40c82e6SJeff Kirsher dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
41987efa84b7SMitch Williams local_vf_id, v_opcode, msglen);
4199230f3d53SJan Sokolowski return ret;
42005c3c48acSJesse Brandeburg }
4201bae3cae4SMitch Williams
42025c3c48acSJesse Brandeburg switch (v_opcode) {
4203310a2ad9SJesse Brandeburg case VIRTCHNL_OP_VERSION:
4204f4ca1a22SMitch Williams ret = i40e_vc_get_version_msg(vf, msg);
42055c3c48acSJesse Brandeburg break;
4206310a2ad9SJesse Brandeburg case VIRTCHNL_OP_GET_VF_RESOURCES:
4207f4ca1a22SMitch Williams ret = i40e_vc_get_vf_resources_msg(vf, msg);
4208d3d657a9SJacob Keller i40e_vc_notify_vf_link_state(vf);
42095c3c48acSJesse Brandeburg break;
4210310a2ad9SJesse Brandeburg case VIRTCHNL_OP_RESET_VF:
42113a3b311eSKaren Sornek i40e_vc_reset_vf(vf, false);
4212fc18eaa0SMitch Williams ret = 0;
42135c3c48acSJesse Brandeburg break;
4214310a2ad9SJesse Brandeburg case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4215679b05c0SPatryk Małek ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
42165c3c48acSJesse Brandeburg break;
4217310a2ad9SJesse Brandeburg case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4218679b05c0SPatryk Małek ret = i40e_vc_config_queues_msg(vf, msg);
42195c3c48acSJesse Brandeburg break;
4220310a2ad9SJesse Brandeburg case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4221679b05c0SPatryk Małek ret = i40e_vc_config_irq_map_msg(vf, msg);
42225c3c48acSJesse Brandeburg break;
4223310a2ad9SJesse Brandeburg case VIRTCHNL_OP_ENABLE_QUEUES:
4224679b05c0SPatryk Małek ret = i40e_vc_enable_queues_msg(vf, msg);
4225055b295dSMitch Williams i40e_vc_notify_vf_link_state(vf);
42265c3c48acSJesse Brandeburg break;
4227310a2ad9SJesse Brandeburg case VIRTCHNL_OP_DISABLE_QUEUES:
4228679b05c0SPatryk Małek ret = i40e_vc_disable_queues_msg(vf, msg);
42295c3c48acSJesse Brandeburg break;
4230310a2ad9SJesse Brandeburg case VIRTCHNL_OP_ADD_ETH_ADDR:
4231679b05c0SPatryk Małek ret = i40e_vc_add_mac_addr_msg(vf, msg);
42325c3c48acSJesse Brandeburg break;
4233310a2ad9SJesse Brandeburg case VIRTCHNL_OP_DEL_ETH_ADDR:
4234679b05c0SPatryk Małek ret = i40e_vc_del_mac_addr_msg(vf, msg);
42355c3c48acSJesse Brandeburg break;
4236310a2ad9SJesse Brandeburg case VIRTCHNL_OP_ADD_VLAN:
4237679b05c0SPatryk Małek ret = i40e_vc_add_vlan_msg(vf, msg);
42385c3c48acSJesse Brandeburg break;
4239310a2ad9SJesse Brandeburg case VIRTCHNL_OP_DEL_VLAN:
4240679b05c0SPatryk Małek ret = i40e_vc_remove_vlan_msg(vf, msg);
42415c3c48acSJesse Brandeburg break;
4242310a2ad9SJesse Brandeburg case VIRTCHNL_OP_GET_STATS:
4243679b05c0SPatryk Małek ret = i40e_vc_get_stats_msg(vf, msg);
42445c3c48acSJesse Brandeburg break;
42452723f3b5SJesse Brandeburg case VIRTCHNL_OP_RDMA:
42462723f3b5SJesse Brandeburg ret = i40e_vc_rdma_msg(vf, msg, msglen);
4247e3219ce6SAnjali Singhai Jain break;
42482723f3b5SJesse Brandeburg case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
42492723f3b5SJesse Brandeburg ret = i40e_vc_rdma_qvmap_msg(vf, msg, true);
4250e3219ce6SAnjali Singhai Jain break;
42512723f3b5SJesse Brandeburg case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
42522723f3b5SJesse Brandeburg ret = i40e_vc_rdma_qvmap_msg(vf, msg, false);
4253e3219ce6SAnjali Singhai Jain break;
4254310a2ad9SJesse Brandeburg case VIRTCHNL_OP_CONFIG_RSS_KEY:
4255679b05c0SPatryk Małek ret = i40e_vc_config_rss_key(vf, msg);
4256c4e1868cSMitch Williams break;
4257310a2ad9SJesse Brandeburg case VIRTCHNL_OP_CONFIG_RSS_LUT:
4258679b05c0SPatryk Małek ret = i40e_vc_config_rss_lut(vf, msg);
4259c4e1868cSMitch Williams break;
4260310a2ad9SJesse Brandeburg case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4261679b05c0SPatryk Małek ret = i40e_vc_get_rss_hena(vf, msg);
4262c4e1868cSMitch Williams break;
4263310a2ad9SJesse Brandeburg case VIRTCHNL_OP_SET_RSS_HENA:
4264679b05c0SPatryk Małek ret = i40e_vc_set_rss_hena(vf, msg);
4265c4e1868cSMitch Williams break;
42668774370dSMariusz Stachura case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4267679b05c0SPatryk Małek ret = i40e_vc_enable_vlan_stripping(vf, msg);
42688774370dSMariusz Stachura break;
42698774370dSMariusz Stachura case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4270679b05c0SPatryk Małek ret = i40e_vc_disable_vlan_stripping(vf, msg);
42718774370dSMariusz Stachura break;
4272a3f5aa90SAlan Brady case VIRTCHNL_OP_REQUEST_QUEUES:
4273679b05c0SPatryk Małek ret = i40e_vc_request_queues_msg(vf, msg);
4274a3f5aa90SAlan Brady break;
4275c27eac48SAvinash Dayanand case VIRTCHNL_OP_ENABLE_CHANNELS:
4276c27eac48SAvinash Dayanand ret = i40e_vc_add_qch_msg(vf, msg);
4277c27eac48SAvinash Dayanand break;
4278c4998aa3SAvinash Dayanand case VIRTCHNL_OP_DISABLE_CHANNELS:
4279c4998aa3SAvinash Dayanand ret = i40e_vc_del_qch_msg(vf, msg);
4280c4998aa3SAvinash Dayanand break;
4281e284fc28SAvinash Dayanand case VIRTCHNL_OP_ADD_CLOUD_FILTER:
4282e284fc28SAvinash Dayanand ret = i40e_vc_add_cloud_filter(vf, msg);
4283e284fc28SAvinash Dayanand break;
4284e284fc28SAvinash Dayanand case VIRTCHNL_OP_DEL_CLOUD_FILTER:
4285e284fc28SAvinash Dayanand ret = i40e_vc_del_cloud_filter(vf, msg);
4286e284fc28SAvinash Dayanand break;
4287310a2ad9SJesse Brandeburg case VIRTCHNL_OP_UNKNOWN:
42885c3c48acSJesse Brandeburg default:
4289b40c82e6SJeff Kirsher dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
42907efa84b7SMitch Williams v_opcode, local_vf_id);
42915c3c48acSJesse Brandeburg ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
4292230f3d53SJan Sokolowski -EOPNOTSUPP);
42935c3c48acSJesse Brandeburg break;
42945c3c48acSJesse Brandeburg }
42955c3c48acSJesse Brandeburg
42965c3c48acSJesse Brandeburg return ret;
42975c3c48acSJesse Brandeburg }
42985c3c48acSJesse Brandeburg
42995c3c48acSJesse Brandeburg /**
43005c3c48acSJesse Brandeburg * i40e_vc_process_vflr_event
4301b40c82e6SJeff Kirsher * @pf: pointer to the PF structure
43025c3c48acSJesse Brandeburg *
43035c3c48acSJesse Brandeburg * called from the vlfr irq handler to
4304b40c82e6SJeff Kirsher * free up VF resources and state variables
43055c3c48acSJesse Brandeburg **/
i40e_vc_process_vflr_event(struct i40e_pf * pf)43065c3c48acSJesse Brandeburg int i40e_vc_process_vflr_event(struct i40e_pf *pf)
43075c3c48acSJesse Brandeburg {
43085c3c48acSJesse Brandeburg struct i40e_hw *hw = &pf->hw;
4309a1b5a24fSJesse Brandeburg u32 reg, reg_idx, bit_idx;
43105c3c48acSJesse Brandeburg struct i40e_vf *vf;
4311a1b5a24fSJesse Brandeburg int vf_id;
43125c3c48acSJesse Brandeburg
43130da36b97SJacob Keller if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
43145c3c48acSJesse Brandeburg return 0;
43155c3c48acSJesse Brandeburg
43160d790327SMitch Williams /* Re-enable the VFLR interrupt cause here, before looking for which
43170d790327SMitch Williams * VF got reset. Otherwise, if another VF gets a reset while the
43180d790327SMitch Williams * first one is being processed, that interrupt will be lost, and
43190d790327SMitch Williams * that VF will be stuck in reset forever.
43200d790327SMitch Williams */
4321c5c2f7c3SMitch Williams reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4322c5c2f7c3SMitch Williams reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4323c5c2f7c3SMitch Williams wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4324c5c2f7c3SMitch Williams i40e_flush(hw);
4325c5c2f7c3SMitch Williams
43260da36b97SJacob Keller clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
43275c3c48acSJesse Brandeburg for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
43285c3c48acSJesse Brandeburg reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
43295c3c48acSJesse Brandeburg bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4330b40c82e6SJeff Kirsher /* read GLGEN_VFLRSTAT register to find out the flr VFs */
43315c3c48acSJesse Brandeburg vf = &pf->vf[vf_id];
43325c3c48acSJesse Brandeburg reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
43337369ca87SMitch Williams if (reg & BIT(bit_idx))
43347e5a313eSMitch Williams /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4335fc18eaa0SMitch Williams i40e_reset_vf(vf, true);
43365c3c48acSJesse Brandeburg }
43375c3c48acSJesse Brandeburg
43385c3c48acSJesse Brandeburg return 0;
43395c3c48acSJesse Brandeburg }
43405c3c48acSJesse Brandeburg
43415c3c48acSJesse Brandeburg /**
4342ed277c50SHarshitha Ramamurthy * i40e_validate_vf
4343ed277c50SHarshitha Ramamurthy * @pf: the physical function
4344ed277c50SHarshitha Ramamurthy * @vf_id: VF identifier
4345ed277c50SHarshitha Ramamurthy *
4346ed277c50SHarshitha Ramamurthy * Check that the VF is enabled and the VSI exists.
4347ed277c50SHarshitha Ramamurthy *
4348ed277c50SHarshitha Ramamurthy * Returns 0 on success, negative on failure
4349ed277c50SHarshitha Ramamurthy **/
i40e_validate_vf(struct i40e_pf * pf,int vf_id)4350ed277c50SHarshitha Ramamurthy static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4351ed277c50SHarshitha Ramamurthy {
4352ed277c50SHarshitha Ramamurthy struct i40e_vsi *vsi;
4353ed277c50SHarshitha Ramamurthy struct i40e_vf *vf;
4354ed277c50SHarshitha Ramamurthy int ret = 0;
4355ed277c50SHarshitha Ramamurthy
4356ed277c50SHarshitha Ramamurthy if (vf_id >= pf->num_alloc_vfs) {
4357ed277c50SHarshitha Ramamurthy dev_err(&pf->pdev->dev,
4358ed277c50SHarshitha Ramamurthy "Invalid VF Identifier %d\n", vf_id);
4359ed277c50SHarshitha Ramamurthy ret = -EINVAL;
4360ed277c50SHarshitha Ramamurthy goto err_out;
4361ed277c50SHarshitha Ramamurthy }
4362ed277c50SHarshitha Ramamurthy vf = &pf->vf[vf_id];
4363ed277c50SHarshitha Ramamurthy vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4364ed277c50SHarshitha Ramamurthy if (!vsi)
4365ed277c50SHarshitha Ramamurthy ret = -EINVAL;
4366ed277c50SHarshitha Ramamurthy err_out:
4367ed277c50SHarshitha Ramamurthy return ret;
4368ed277c50SHarshitha Ramamurthy }
4369ed277c50SHarshitha Ramamurthy
4370ed277c50SHarshitha Ramamurthy /**
4371df84f0ceSIvan Vecera * i40e_check_vf_init_timeout
4372df84f0ceSIvan Vecera * @vf: the virtual function
4373df84f0ceSIvan Vecera *
4374df84f0ceSIvan Vecera * Check that the VF's initialization was successfully done and if not
4375df84f0ceSIvan Vecera * wait up to 300ms for its finish.
4376df84f0ceSIvan Vecera *
4377df84f0ceSIvan Vecera * Returns true when VF is initialized, false on timeout
4378df84f0ceSIvan Vecera **/
i40e_check_vf_init_timeout(struct i40e_vf * vf)4379df84f0ceSIvan Vecera static bool i40e_check_vf_init_timeout(struct i40e_vf *vf)
4380df84f0ceSIvan Vecera {
4381df84f0ceSIvan Vecera int i;
4382df84f0ceSIvan Vecera
4383df84f0ceSIvan Vecera /* When the VF is resetting wait until it is done.
4384df84f0ceSIvan Vecera * It can take up to 200 milliseconds, but wait for
4385df84f0ceSIvan Vecera * up to 300 milliseconds to be safe.
4386df84f0ceSIvan Vecera */
4387df84f0ceSIvan Vecera for (i = 0; i < 15; i++) {
4388df84f0ceSIvan Vecera if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4389df84f0ceSIvan Vecera return true;
4390df84f0ceSIvan Vecera msleep(20);
4391df84f0ceSIvan Vecera }
4392df84f0ceSIvan Vecera
4393df84f0ceSIvan Vecera if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4394df84f0ceSIvan Vecera dev_err(&vf->pf->pdev->dev,
4395df84f0ceSIvan Vecera "VF %d still in reset. Try again.\n", vf->vf_id);
4396df84f0ceSIvan Vecera return false;
4397df84f0ceSIvan Vecera }
4398df84f0ceSIvan Vecera
4399df84f0ceSIvan Vecera return true;
4400df84f0ceSIvan Vecera }
4401df84f0ceSIvan Vecera
4402df84f0ceSIvan Vecera /**
44035c3c48acSJesse Brandeburg * i40e_ndo_set_vf_mac
44045c3c48acSJesse Brandeburg * @netdev: network interface device structure
4405b40c82e6SJeff Kirsher * @vf_id: VF identifier
44065c3c48acSJesse Brandeburg * @mac: mac address
44075c3c48acSJesse Brandeburg *
4408b40c82e6SJeff Kirsher * program VF mac address
44095c3c48acSJesse Brandeburg **/
i40e_ndo_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)44105c3c48acSJesse Brandeburg int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
44115c3c48acSJesse Brandeburg {
44125c3c48acSJesse Brandeburg struct i40e_netdev_priv *np = netdev_priv(netdev);
44135c3c48acSJesse Brandeburg struct i40e_vsi *vsi = np->vsi;
44145c3c48acSJesse Brandeburg struct i40e_pf *pf = vsi->back;
44155c3c48acSJesse Brandeburg struct i40e_mac_filter *f;
44165c3c48acSJesse Brandeburg struct i40e_vf *vf;
44175c3c48acSJesse Brandeburg int ret = 0;
4418784548c4SLihong Yang struct hlist_node *h;
4419278e7d0bSJacob Keller int bkt;
44205c3c48acSJesse Brandeburg
442180598e62SLihong Yang if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
442280598e62SLihong Yang dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
442380598e62SLihong Yang return -EAGAIN;
442480598e62SLihong Yang }
442580598e62SLihong Yang
44265c3c48acSJesse Brandeburg /* validate the request */
4427ed277c50SHarshitha Ramamurthy ret = i40e_validate_vf(pf, vf_id);
4428ed277c50SHarshitha Ramamurthy if (ret)
44295c3c48acSJesse Brandeburg goto error_param;
44305c3c48acSJesse Brandeburg
4431ed277c50SHarshitha Ramamurthy vf = &pf->vf[vf_id];
4432df84f0ceSIvan Vecera if (!i40e_check_vf_init_timeout(vf)) {
44332d166c30SMitch Williams ret = -EAGAIN;
44345c3c48acSJesse Brandeburg goto error_param;
44355c3c48acSJesse Brandeburg }
443667a3c6b3SStefan Assmann vsi = pf->vsi[vf->lan_vsi_idx];
44375c3c48acSJesse Brandeburg
4438efd8e39aSMitch Williams if (is_multicast_ether_addr(mac)) {
44395c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev,
4440efd8e39aSMitch Williams "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
44415c3c48acSJesse Brandeburg ret = -EINVAL;
44425c3c48acSJesse Brandeburg goto error_param;
44435c3c48acSJesse Brandeburg }
44445c3c48acSJesse Brandeburg
444521659035SKiran Patil /* Lock once because below invoked function add/del_filter requires
4446278e7d0bSJacob Keller * mac_filter_hash_lock to be held
444721659035SKiran Patil */
4448278e7d0bSJacob Keller spin_lock_bh(&vsi->mac_filter_hash_lock);
444921659035SKiran Patil
44505c3c48acSJesse Brandeburg /* delete the temporary mac address */
4451efd8e39aSMitch Williams if (!is_zero_ether_addr(vf->default_lan_addr.addr))
44529569a9a4SJacob Keller i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
44535c3c48acSJesse Brandeburg
445429f71bb0SGreg Rose /* Delete all the filters for this VSI - we're going to kill it
445529f71bb0SGreg Rose * anyway.
445629f71bb0SGreg Rose */
4457784548c4SLihong Yang hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4458148141bbSJacob Keller __i40e_del_filter(vsi, f);
44595c3c48acSJesse Brandeburg
4460278e7d0bSJacob Keller spin_unlock_bh(&vsi->mac_filter_hash_lock);
446121659035SKiran Patil
44625c3c48acSJesse Brandeburg /* program mac filter */
446317652c63SJesse Brandeburg if (i40e_sync_vsi_filters(vsi)) {
44645c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
44655c3c48acSJesse Brandeburg ret = -EIO;
44665c3c48acSJesse Brandeburg goto error_param;
44675c3c48acSJesse Brandeburg }
44689a173901SGreg Rose ether_addr_copy(vf->default_lan_addr.addr, mac);
44692f1d86e4SStefan Assmann
44702f1d86e4SStefan Assmann if (is_zero_ether_addr(mac)) {
44712f1d86e4SStefan Assmann vf->pf_set_mac = false;
44722f1d86e4SStefan Assmann dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
44732f1d86e4SStefan Assmann } else {
4474f657a6e1SGreg Rose vf->pf_set_mac = true;
44752f1d86e4SStefan Assmann dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
44762f1d86e4SStefan Assmann mac, vf_id);
44772f1d86e4SStefan Assmann }
44782f1d86e4SStefan Assmann
4479ae1e29f6SPaweł Jabłoński /* Force the VF interface down so it has to bring up with new MAC
4480ae1e29f6SPaweł Jabłoński * address
4481ae1e29f6SPaweł Jabłoński */
44823a3b311eSKaren Sornek i40e_vc_reset_vf(vf, true);
4483ae1e29f6SPaweł Jabłoński dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
44845c3c48acSJesse Brandeburg
44855c3c48acSJesse Brandeburg error_param:
4486f5a7b21bSJan Sokolowski clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
44875c3c48acSJesse Brandeburg return ret;
44885c3c48acSJesse Brandeburg }
44895c3c48acSJesse Brandeburg
44905c3c48acSJesse Brandeburg /**
44915c3c48acSJesse Brandeburg * i40e_ndo_set_vf_port_vlan
44925c3c48acSJesse Brandeburg * @netdev: network interface device structure
4493b40c82e6SJeff Kirsher * @vf_id: VF identifier
44945c3c48acSJesse Brandeburg * @vlan_id: mac address
44955c3c48acSJesse Brandeburg * @qos: priority setting
449679aab093SMoshe Shemesh * @vlan_proto: vlan protocol
44975c3c48acSJesse Brandeburg *
4498b40c82e6SJeff Kirsher * program VF vlan id and/or qos
44995c3c48acSJesse Brandeburg **/
i40e_ndo_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)450079aab093SMoshe Shemesh int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
450179aab093SMoshe Shemesh u16 vlan_id, u8 qos, __be16 vlan_proto)
45025c3c48acSJesse Brandeburg {
4503f7fc2f2eSMitch Williams u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
45045c3c48acSJesse Brandeburg struct i40e_netdev_priv *np = netdev_priv(netdev);
4505937f599aSGrzegorz Siwik bool allmulti = false, alluni = false;
45065c3c48acSJesse Brandeburg struct i40e_pf *pf = np->vsi->back;
45075c3c48acSJesse Brandeburg struct i40e_vsi *vsi;
45085c3c48acSJesse Brandeburg struct i40e_vf *vf;
45095c3c48acSJesse Brandeburg int ret = 0;
45105c3c48acSJesse Brandeburg
4511f5a7b21bSJan Sokolowski if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4512f5a7b21bSJan Sokolowski dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4513f5a7b21bSJan Sokolowski return -EAGAIN;
4514f5a7b21bSJan Sokolowski }
4515f5a7b21bSJan Sokolowski
45165c3c48acSJesse Brandeburg /* validate the request */
4517ed277c50SHarshitha Ramamurthy ret = i40e_validate_vf(pf, vf_id);
4518ed277c50SHarshitha Ramamurthy if (ret)
45195c3c48acSJesse Brandeburg goto error_pvid;
45205c3c48acSJesse Brandeburg
45215c3c48acSJesse Brandeburg if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
45225c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
45235c3c48acSJesse Brandeburg ret = -EINVAL;
45245c3c48acSJesse Brandeburg goto error_pvid;
45255c3c48acSJesse Brandeburg }
45265c3c48acSJesse Brandeburg
452779aab093SMoshe Shemesh if (vlan_proto != htons(ETH_P_8021Q)) {
452879aab093SMoshe Shemesh dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
452979aab093SMoshe Shemesh ret = -EPROTONOSUPPORT;
453079aab093SMoshe Shemesh goto error_pvid;
453179aab093SMoshe Shemesh }
453279aab093SMoshe Shemesh
4533ed277c50SHarshitha Ramamurthy vf = &pf->vf[vf_id];
4534efb6f4a3SIvan Vecera if (!i40e_check_vf_init_timeout(vf)) {
45352d166c30SMitch Williams ret = -EAGAIN;
45365c3c48acSJesse Brandeburg goto error_pvid;
45375c3c48acSJesse Brandeburg }
4538efb6f4a3SIvan Vecera vsi = pf->vsi[vf->lan_vsi_idx];
45395c3c48acSJesse Brandeburg
4540f7fc2f2eSMitch Williams if (le16_to_cpu(vsi->info.pvid) == vlanprio)
454185927ec1SMitch Williams /* duplicate request, so just return success */
454285927ec1SMitch Williams goto error_pvid;
454385927ec1SMitch Williams
4544c87c938fSMateusz Palczewski i40e_vlan_stripping_enable(vsi);
4545d0d362ffSIvan Vecera
4546ba4e003dSJacob Keller /* Locked once because multiple functions below iterate list */
4547ba4e003dSJacob Keller spin_lock_bh(&vsi->mac_filter_hash_lock);
4548ba4e003dSJacob Keller
45498d82a7c5SGreg Rose /* Check for condition where there was already a port VLAN ID
45508d82a7c5SGreg Rose * filter set and now it is being deleted by setting it to zero.
45511315f7c3SGreg Rose * Additionally check for the condition where there was a port
45521315f7c3SGreg Rose * VLAN but now there is a new and different port VLAN being set.
45538d82a7c5SGreg Rose * Before deleting all the old VLAN filters we must add new ones
45548d82a7c5SGreg Rose * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
45558d82a7c5SGreg Rose * MAC addresses deleted.
45568d82a7c5SGreg Rose */
45571315f7c3SGreg Rose if ((!(vlan_id || qos) ||
4558f7fc2f2eSMitch Williams vlanprio != le16_to_cpu(vsi->info.pvid)) &&
45599af52f60SJacob Keller vsi->info.pvid) {
45609af52f60SJacob Keller ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
45619af52f60SJacob Keller if (ret) {
45629af52f60SJacob Keller dev_info(&vsi->back->pdev->dev,
45639af52f60SJacob Keller "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
45649af52f60SJacob Keller vsi->back->hw.aq.asq_last_status);
45659af52f60SJacob Keller spin_unlock_bh(&vsi->mac_filter_hash_lock);
45669af52f60SJacob Keller goto error_pvid;
45679af52f60SJacob Keller }
45689af52f60SJacob Keller }
45698d82a7c5SGreg Rose
45705c3c48acSJesse Brandeburg if (vsi->info.pvid) {
45719af52f60SJacob Keller /* remove all filters on the old VLAN */
45729af52f60SJacob Keller i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
45735c3c48acSJesse Brandeburg VLAN_VID_MASK));
45745c3c48acSJesse Brandeburg }
45759af52f60SJacob Keller
4576640f93ccSJia-Ju Bai spin_unlock_bh(&vsi->mac_filter_hash_lock);
4577937f599aSGrzegorz Siwik
4578937f599aSGrzegorz Siwik /* disable promisc modes in case they were enabled */
4579937f599aSGrzegorz Siwik ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4580937f599aSGrzegorz Siwik allmulti, alluni);
4581937f599aSGrzegorz Siwik if (ret) {
4582937f599aSGrzegorz Siwik dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4583937f599aSGrzegorz Siwik goto error_pvid;
4584937f599aSGrzegorz Siwik }
4585937f599aSGrzegorz Siwik
45865c3c48acSJesse Brandeburg if (vlan_id || qos)
4587f7fc2f2eSMitch Williams ret = i40e_vsi_add_pvid(vsi, vlanprio);
45885c3c48acSJesse Brandeburg else
45896c12fcbfSGreg Rose i40e_vsi_remove_pvid(vsi);
4590640f93ccSJia-Ju Bai spin_lock_bh(&vsi->mac_filter_hash_lock);
45915c3c48acSJesse Brandeburg
45925c3c48acSJesse Brandeburg if (vlan_id) {
45935c3c48acSJesse Brandeburg dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
45945c3c48acSJesse Brandeburg vlan_id, qos, vf_id);
45955c3c48acSJesse Brandeburg
45969af52f60SJacob Keller /* add new VLAN filter for each MAC */
45979af52f60SJacob Keller ret = i40e_add_vlan_all_mac(vsi, vlan_id);
45985c3c48acSJesse Brandeburg if (ret) {
45995c3c48acSJesse Brandeburg dev_info(&vsi->back->pdev->dev,
46005c3c48acSJesse Brandeburg "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
46015c3c48acSJesse Brandeburg vsi->back->hw.aq.asq_last_status);
46029af52f60SJacob Keller spin_unlock_bh(&vsi->mac_filter_hash_lock);
46035c3c48acSJesse Brandeburg goto error_pvid;
46045c3c48acSJesse Brandeburg }
46059af52f60SJacob Keller
46069af52f60SJacob Keller /* remove the previously added non-VLAN MAC filters */
46079af52f60SJacob Keller i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
46085c3c48acSJesse Brandeburg }
46095c3c48acSJesse Brandeburg
46109af52f60SJacob Keller spin_unlock_bh(&vsi->mac_filter_hash_lock);
46119af52f60SJacob Keller
4612937f599aSGrzegorz Siwik if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4613937f599aSGrzegorz Siwik alluni = true;
4614937f599aSGrzegorz Siwik
4615937f599aSGrzegorz Siwik if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4616937f599aSGrzegorz Siwik allmulti = true;
4617937f599aSGrzegorz Siwik
46189af52f60SJacob Keller /* Schedule the worker thread to take care of applying changes */
46199af52f60SJacob Keller i40e_service_event_schedule(vsi->back);
46209af52f60SJacob Keller
46215c3c48acSJesse Brandeburg if (ret) {
46225c3c48acSJesse Brandeburg dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
46235c3c48acSJesse Brandeburg goto error_pvid;
46245c3c48acSJesse Brandeburg }
46259af52f60SJacob Keller
46266c12fcbfSGreg Rose /* The Port VLAN needs to be saved across resets the same as the
46276c12fcbfSGreg Rose * default LAN MAC address.
46286c12fcbfSGreg Rose */
46296c12fcbfSGreg Rose vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4630937f599aSGrzegorz Siwik
4631d0d362ffSIvan Vecera i40e_vc_reset_vf(vf, true);
4632d0d362ffSIvan Vecera /* During reset the VF got a new VSI, so refresh a pointer. */
4633d0d362ffSIvan Vecera vsi = pf->vsi[vf->lan_vsi_idx];
4634d0d362ffSIvan Vecera
4635937f599aSGrzegorz Siwik ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4636937f599aSGrzegorz Siwik if (ret) {
4637937f599aSGrzegorz Siwik dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4638937f599aSGrzegorz Siwik goto error_pvid;
4639937f599aSGrzegorz Siwik }
4640937f599aSGrzegorz Siwik
46415c3c48acSJesse Brandeburg ret = 0;
46425c3c48acSJesse Brandeburg
46435c3c48acSJesse Brandeburg error_pvid:
4644f5a7b21bSJan Sokolowski clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
46455c3c48acSJesse Brandeburg return ret;
46465c3c48acSJesse Brandeburg }
46475c3c48acSJesse Brandeburg
46485c3c48acSJesse Brandeburg /**
46495c3c48acSJesse Brandeburg * i40e_ndo_set_vf_bw
46505c3c48acSJesse Brandeburg * @netdev: network interface device structure
4651b40c82e6SJeff Kirsher * @vf_id: VF identifier
4652f5254429SJacob Keller * @min_tx_rate: Minimum Tx rate
4653f5254429SJacob Keller * @max_tx_rate: Maximum Tx rate
46545c3c48acSJesse Brandeburg *
4655b40c82e6SJeff Kirsher * configure VF Tx rate
46565c3c48acSJesse Brandeburg **/
i40e_ndo_set_vf_bw(struct net_device * netdev,int vf_id,int min_tx_rate,int max_tx_rate)4657ed616689SSucheta Chakraborty int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4658ed616689SSucheta Chakraborty int max_tx_rate)
46595c3c48acSJesse Brandeburg {
46606b192891SMitch Williams struct i40e_netdev_priv *np = netdev_priv(netdev);
46616b192891SMitch Williams struct i40e_pf *pf = np->vsi->back;
46626b192891SMitch Williams struct i40e_vsi *vsi;
46636b192891SMitch Williams struct i40e_vf *vf;
46646b192891SMitch Williams int ret = 0;
46656b192891SMitch Williams
4666f5a7b21bSJan Sokolowski if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4667f5a7b21bSJan Sokolowski dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4668f5a7b21bSJan Sokolowski return -EAGAIN;
4669f5a7b21bSJan Sokolowski }
4670f5a7b21bSJan Sokolowski
46716b192891SMitch Williams /* validate the request */
4672ed277c50SHarshitha Ramamurthy ret = i40e_validate_vf(pf, vf_id);
4673ed277c50SHarshitha Ramamurthy if (ret)
46746b192891SMitch Williams goto error;
46756b192891SMitch Williams
4676ed616689SSucheta Chakraborty if (min_tx_rate) {
4677b40c82e6SJeff Kirsher dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4678ed616689SSucheta Chakraborty min_tx_rate, vf_id);
46798ad2e298SStefan Assmann ret = -EINVAL;
46808ad2e298SStefan Assmann goto error;
4681ed616689SSucheta Chakraborty }
4682ed616689SSucheta Chakraborty
4683ed277c50SHarshitha Ramamurthy vf = &pf->vf[vf_id];
4684efb6f4a3SIvan Vecera if (!i40e_check_vf_init_timeout(vf)) {
46852d166c30SMitch Williams ret = -EAGAIN;
46866b192891SMitch Williams goto error;
46876b192891SMitch Williams }
4688efb6f4a3SIvan Vecera vsi = pf->vsi[vf->lan_vsi_idx];
46896b192891SMitch Williams
46905ecae412SAmritha Nambiar ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
46915ecae412SAmritha Nambiar if (ret)
46926b192891SMitch Williams goto error;
46936b192891SMitch Williams
4694ed616689SSucheta Chakraborty vf->tx_rate = max_tx_rate;
46956b192891SMitch Williams error:
4696f5a7b21bSJan Sokolowski clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
46976b192891SMitch Williams return ret;
46985c3c48acSJesse Brandeburg }
46995c3c48acSJesse Brandeburg
47005c3c48acSJesse Brandeburg /**
47015c3c48acSJesse Brandeburg * i40e_ndo_get_vf_config
47025c3c48acSJesse Brandeburg * @netdev: network interface device structure
4703b40c82e6SJeff Kirsher * @vf_id: VF identifier
4704b40c82e6SJeff Kirsher * @ivi: VF configuration structure
47055c3c48acSJesse Brandeburg *
4706b40c82e6SJeff Kirsher * return VF configuration
47075c3c48acSJesse Brandeburg **/
i40e_ndo_get_vf_config(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)47085c3c48acSJesse Brandeburg int i40e_ndo_get_vf_config(struct net_device *netdev,
47095c3c48acSJesse Brandeburg int vf_id, struct ifla_vf_info *ivi)
47105c3c48acSJesse Brandeburg {
47115c3c48acSJesse Brandeburg struct i40e_netdev_priv *np = netdev_priv(netdev);
47125c3c48acSJesse Brandeburg struct i40e_vsi *vsi = np->vsi;
47135c3c48acSJesse Brandeburg struct i40e_pf *pf = vsi->back;
47145c3c48acSJesse Brandeburg struct i40e_vf *vf;
47155c3c48acSJesse Brandeburg int ret = 0;
47165c3c48acSJesse Brandeburg
4717f5a7b21bSJan Sokolowski if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4718f5a7b21bSJan Sokolowski dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4719f5a7b21bSJan Sokolowski return -EAGAIN;
4720f5a7b21bSJan Sokolowski }
4721f5a7b21bSJan Sokolowski
47225c3c48acSJesse Brandeburg /* validate the request */
4723ed277c50SHarshitha Ramamurthy ret = i40e_validate_vf(pf, vf_id);
4724ed277c50SHarshitha Ramamurthy if (ret)
47255c3c48acSJesse Brandeburg goto error_param;
47265c3c48acSJesse Brandeburg
4727ed277c50SHarshitha Ramamurthy vf = &pf->vf[vf_id];
47285c3c48acSJesse Brandeburg /* first vsi is always the LAN vsi */
4729fdf0e0bfSAnjali Singhai Jain vsi = pf->vsi[vf->lan_vsi_idx];
4730745b32c1SLihong Yang if (!vsi) {
4731745b32c1SLihong Yang ret = -ENOENT;
47325c3c48acSJesse Brandeburg goto error_param;
47335c3c48acSJesse Brandeburg }
47345c3c48acSJesse Brandeburg
47355c3c48acSJesse Brandeburg ivi->vf = vf_id;
47365c3c48acSJesse Brandeburg
47376995b36cSJesse Brandeburg ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
47385c3c48acSJesse Brandeburg
4739ed616689SSucheta Chakraborty ivi->max_tx_rate = vf->tx_rate;
4740ed616689SSucheta Chakraborty ivi->min_tx_rate = 0;
47415c3c48acSJesse Brandeburg ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
47425c3c48acSJesse Brandeburg ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
47435c3c48acSJesse Brandeburg I40E_VLAN_PRIORITY_SHIFT;
474484ca55a0SMitch Williams if (vf->link_forced == false)
474584ca55a0SMitch Williams ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
474684ca55a0SMitch Williams else if (vf->link_up == true)
474784ca55a0SMitch Williams ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
474884ca55a0SMitch Williams else
474984ca55a0SMitch Williams ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4750c674d125SMitch Williams ivi->spoofchk = vf->spoofchk;
4751d40062f3SSridhar Samudrala ivi->trusted = vf->trusted;
47525c3c48acSJesse Brandeburg ret = 0;
47535c3c48acSJesse Brandeburg
47545c3c48acSJesse Brandeburg error_param:
4755f5a7b21bSJan Sokolowski clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
47565c3c48acSJesse Brandeburg return ret;
47575c3c48acSJesse Brandeburg }
4758588aefa0SMitch Williams
4759588aefa0SMitch Williams /**
4760588aefa0SMitch Williams * i40e_ndo_set_vf_link_state
4761588aefa0SMitch Williams * @netdev: network interface device structure
4762b40c82e6SJeff Kirsher * @vf_id: VF identifier
4763588aefa0SMitch Williams * @link: required link state
4764588aefa0SMitch Williams *
4765588aefa0SMitch Williams * Set the link state of a specified VF, regardless of physical link state
4766588aefa0SMitch Williams **/
i40e_ndo_set_vf_link_state(struct net_device * netdev,int vf_id,int link)4767588aefa0SMitch Williams int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4768588aefa0SMitch Williams {
4769588aefa0SMitch Williams struct i40e_netdev_priv *np = netdev_priv(netdev);
4770588aefa0SMitch Williams struct i40e_pf *pf = np->vsi->back;
47716ec12e1eSStefan Assmann struct i40e_link_status *ls = &pf->hw.phy.link_info;
4772310a2ad9SJesse Brandeburg struct virtchnl_pf_event pfe;
4773588aefa0SMitch Williams struct i40e_hw *hw = &pf->hw;
4774d973bf8eSAndrii Staikov struct i40e_vsi *vsi;
4775d973bf8eSAndrii Staikov unsigned long q_map;
4776588aefa0SMitch Williams struct i40e_vf *vf;
4777f19efbb5SAshish Shah int abs_vf_id;
4778588aefa0SMitch Williams int ret = 0;
4779d973bf8eSAndrii Staikov int tmp;
4780588aefa0SMitch Williams
4781f5a7b21bSJan Sokolowski if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4782f5a7b21bSJan Sokolowski dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4783f5a7b21bSJan Sokolowski return -EAGAIN;
4784f5a7b21bSJan Sokolowski }
4785f5a7b21bSJan Sokolowski
4786588aefa0SMitch Williams /* validate the request */
4787588aefa0SMitch Williams if (vf_id >= pf->num_alloc_vfs) {
4788588aefa0SMitch Williams dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4789588aefa0SMitch Williams ret = -EINVAL;
4790588aefa0SMitch Williams goto error_out;
4791588aefa0SMitch Williams }
4792588aefa0SMitch Williams
4793588aefa0SMitch Williams vf = &pf->vf[vf_id];
4794f19efbb5SAshish Shah abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4795588aefa0SMitch Williams
4796310a2ad9SJesse Brandeburg pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4797ff3f4cc2SJesse Brandeburg pfe.severity = PF_EVENT_SEVERITY_INFO;
4798588aefa0SMitch Williams
4799588aefa0SMitch Williams switch (link) {
4800588aefa0SMitch Williams case IFLA_VF_LINK_STATE_AUTO:
4801588aefa0SMitch Williams vf->link_forced = false;
4802d973bf8eSAndrii Staikov vf->is_disabled_from_host = false;
4803d973bf8eSAndrii Staikov /* reset needed to reinit VF resources */
4804d973bf8eSAndrii Staikov i40e_vc_reset_vf(vf, true);
48056d2c322cSAleksandr Loktionov i40e_set_vf_link_state(vf, &pfe, ls);
4806588aefa0SMitch Williams break;
4807588aefa0SMitch Williams case IFLA_VF_LINK_STATE_ENABLE:
4808588aefa0SMitch Williams vf->link_forced = true;
4809588aefa0SMitch Williams vf->link_up = true;
4810d973bf8eSAndrii Staikov vf->is_disabled_from_host = false;
4811d973bf8eSAndrii Staikov /* reset needed to reinit VF resources */
4812d973bf8eSAndrii Staikov i40e_vc_reset_vf(vf, true);
48136d2c322cSAleksandr Loktionov i40e_set_vf_link_state(vf, &pfe, ls);
4814588aefa0SMitch Williams break;
4815588aefa0SMitch Williams case IFLA_VF_LINK_STATE_DISABLE:
4816588aefa0SMitch Williams vf->link_forced = true;
4817588aefa0SMitch Williams vf->link_up = false;
48186d2c322cSAleksandr Loktionov i40e_set_vf_link_state(vf, &pfe, ls);
4819d973bf8eSAndrii Staikov
4820d973bf8eSAndrii Staikov vsi = pf->vsi[vf->lan_vsi_idx];
4821d973bf8eSAndrii Staikov q_map = BIT(vsi->num_queue_pairs) - 1;
4822d973bf8eSAndrii Staikov
4823d973bf8eSAndrii Staikov vf->is_disabled_from_host = true;
4824d973bf8eSAndrii Staikov
4825d973bf8eSAndrii Staikov /* Try to stop both Tx&Rx rings even if one of the calls fails
4826d973bf8eSAndrii Staikov * to ensure we stop the rings even in case of errors.
4827d973bf8eSAndrii Staikov * If any of them returns with an error then the first
4828d973bf8eSAndrii Staikov * error that occurred will be returned.
4829d973bf8eSAndrii Staikov */
4830d973bf8eSAndrii Staikov tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false);
4831d973bf8eSAndrii Staikov ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false);
4832d973bf8eSAndrii Staikov
4833d973bf8eSAndrii Staikov ret = tmp ? tmp : ret;
4834588aefa0SMitch Williams break;
4835588aefa0SMitch Williams default:
4836588aefa0SMitch Williams ret = -EINVAL;
4837588aefa0SMitch Williams goto error_out;
4838588aefa0SMitch Williams }
4839588aefa0SMitch Williams /* Notify the VF of its new link state */
4840310a2ad9SJesse Brandeburg i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4841588aefa0SMitch Williams 0, (u8 *)&pfe, sizeof(pfe), NULL);
4842588aefa0SMitch Williams
4843588aefa0SMitch Williams error_out:
4844f5a7b21bSJan Sokolowski clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4845588aefa0SMitch Williams return ret;
4846588aefa0SMitch Williams }
4847c674d125SMitch Williams
4848c674d125SMitch Williams /**
4849c674d125SMitch Williams * i40e_ndo_set_vf_spoofchk
4850c674d125SMitch Williams * @netdev: network interface device structure
4851b40c82e6SJeff Kirsher * @vf_id: VF identifier
4852c674d125SMitch Williams * @enable: flag to enable or disable feature
4853c674d125SMitch Williams *
4854c674d125SMitch Williams * Enable or disable VF spoof checking
4855c674d125SMitch Williams **/
i40e_ndo_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool enable)4856e6d9004dSSerey Kong int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4857c674d125SMitch Williams {
4858c674d125SMitch Williams struct i40e_netdev_priv *np = netdev_priv(netdev);
4859c674d125SMitch Williams struct i40e_vsi *vsi = np->vsi;
4860c674d125SMitch Williams struct i40e_pf *pf = vsi->back;
4861c674d125SMitch Williams struct i40e_vsi_context ctxt;
4862c674d125SMitch Williams struct i40e_hw *hw = &pf->hw;
4863c674d125SMitch Williams struct i40e_vf *vf;
4864c674d125SMitch Williams int ret = 0;
4865c674d125SMitch Williams
4866f5a7b21bSJan Sokolowski if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4867f5a7b21bSJan Sokolowski dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4868f5a7b21bSJan Sokolowski return -EAGAIN;
4869f5a7b21bSJan Sokolowski }
4870f5a7b21bSJan Sokolowski
4871c674d125SMitch Williams /* validate the request */
4872c674d125SMitch Williams if (vf_id >= pf->num_alloc_vfs) {
4873c674d125SMitch Williams dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4874c674d125SMitch Williams ret = -EINVAL;
4875c674d125SMitch Williams goto out;
4876c674d125SMitch Williams }
4877c674d125SMitch Williams
4878c674d125SMitch Williams vf = &(pf->vf[vf_id]);
4879efb6f4a3SIvan Vecera if (!i40e_check_vf_init_timeout(vf)) {
48802d166c30SMitch Williams ret = -EAGAIN;
48812d166c30SMitch Williams goto out;
48822d166c30SMitch Williams }
4883c674d125SMitch Williams
4884c674d125SMitch Williams if (enable == vf->spoofchk)
4885c674d125SMitch Williams goto out;
4886c674d125SMitch Williams
4887c674d125SMitch Williams vf->spoofchk = enable;
4888c674d125SMitch Williams memset(&ctxt, 0, sizeof(ctxt));
4889fdf0e0bfSAnjali Singhai Jain ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4890c674d125SMitch Williams ctxt.pf_num = pf->hw.pf_id;
4891c674d125SMitch Williams ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4892c674d125SMitch Williams if (enable)
489330d71af5SGreg Rose ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
489430d71af5SGreg Rose I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4895c674d125SMitch Williams ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4896c674d125SMitch Williams if (ret) {
4897c674d125SMitch Williams dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4898c674d125SMitch Williams ret);
4899c674d125SMitch Williams ret = -EIO;
4900c674d125SMitch Williams }
4901c674d125SMitch Williams out:
4902f5a7b21bSJan Sokolowski clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4903c674d125SMitch Williams return ret;
4904c674d125SMitch Williams }
4905c3bbbd20SAnjali Singhai Jain
4906c3bbbd20SAnjali Singhai Jain /**
4907c3bbbd20SAnjali Singhai Jain * i40e_ndo_set_vf_trust
4908c3bbbd20SAnjali Singhai Jain * @netdev: network interface device structure of the pf
4909c3bbbd20SAnjali Singhai Jain * @vf_id: VF identifier
4910c3bbbd20SAnjali Singhai Jain * @setting: trust setting
4911c3bbbd20SAnjali Singhai Jain *
4912c3bbbd20SAnjali Singhai Jain * Enable or disable VF trust setting
4913c3bbbd20SAnjali Singhai Jain **/
i40e_ndo_set_vf_trust(struct net_device * netdev,int vf_id,bool setting)4914c3bbbd20SAnjali Singhai Jain int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4915c3bbbd20SAnjali Singhai Jain {
4916c3bbbd20SAnjali Singhai Jain struct i40e_netdev_priv *np = netdev_priv(netdev);
4917c3bbbd20SAnjali Singhai Jain struct i40e_pf *pf = np->vsi->back;
4918c3bbbd20SAnjali Singhai Jain struct i40e_vf *vf;
4919c3bbbd20SAnjali Singhai Jain int ret = 0;
4920c3bbbd20SAnjali Singhai Jain
4921f5a7b21bSJan Sokolowski if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4922f5a7b21bSJan Sokolowski dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4923f5a7b21bSJan Sokolowski return -EAGAIN;
4924f5a7b21bSJan Sokolowski }
4925f5a7b21bSJan Sokolowski
4926c3bbbd20SAnjali Singhai Jain /* validate the request */
4927c3bbbd20SAnjali Singhai Jain if (vf_id >= pf->num_alloc_vfs) {
4928c3bbbd20SAnjali Singhai Jain dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4929f5a7b21bSJan Sokolowski ret = -EINVAL;
4930f5a7b21bSJan Sokolowski goto out;
4931c3bbbd20SAnjali Singhai Jain }
4932c3bbbd20SAnjali Singhai Jain
4933c3bbbd20SAnjali Singhai Jain if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4934c3bbbd20SAnjali Singhai Jain dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4935f5a7b21bSJan Sokolowski ret = -EINVAL;
4936f5a7b21bSJan Sokolowski goto out;
4937c3bbbd20SAnjali Singhai Jain }
4938c3bbbd20SAnjali Singhai Jain
4939c3bbbd20SAnjali Singhai Jain vf = &pf->vf[vf_id];
4940c3bbbd20SAnjali Singhai Jain
4941c3bbbd20SAnjali Singhai Jain if (setting == vf->trusted)
4942c3bbbd20SAnjali Singhai Jain goto out;
4943c3bbbd20SAnjali Singhai Jain
4944c3bbbd20SAnjali Singhai Jain vf->trusted = setting;
4945c87c938fSMateusz Palczewski
4946c87c938fSMateusz Palczewski /* request PF to sync mac/vlan filters for the VF */
4947c87c938fSMateusz Palczewski set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
4948c87c938fSMateusz Palczewski pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
4949c87c938fSMateusz Palczewski
49503a3b311eSKaren Sornek i40e_vc_reset_vf(vf, true);
4951c3bbbd20SAnjali Singhai Jain dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4952c3bbbd20SAnjali Singhai Jain vf_id, setting ? "" : "un");
4953e284fc28SAvinash Dayanand
4954e284fc28SAvinash Dayanand if (vf->adq_enabled) {
4955e284fc28SAvinash Dayanand if (!vf->trusted) {
4956e284fc28SAvinash Dayanand dev_info(&pf->pdev->dev,
4957e284fc28SAvinash Dayanand "VF %u no longer Trusted, deleting all cloud filters\n",
4958e284fc28SAvinash Dayanand vf_id);
4959e284fc28SAvinash Dayanand i40e_del_all_cloud_filters(vf);
4960e284fc28SAvinash Dayanand }
4961e284fc28SAvinash Dayanand }
4962e284fc28SAvinash Dayanand
4963c3bbbd20SAnjali Singhai Jain out:
4964f5a7b21bSJan Sokolowski clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4965c3bbbd20SAnjali Singhai Jain return ret;
4966c3bbbd20SAnjali Singhai Jain }
4967dc645daeSJesse Brandeburg
4968dc645daeSJesse Brandeburg /**
4969dc645daeSJesse Brandeburg * i40e_get_vf_stats - populate some stats for the VF
4970dc645daeSJesse Brandeburg * @netdev: the netdev of the PF
4971dc645daeSJesse Brandeburg * @vf_id: the host OS identifier (0-127)
4972dc645daeSJesse Brandeburg * @vf_stats: pointer to the OS memory to be initialized
4973dc645daeSJesse Brandeburg */
i40e_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)4974dc645daeSJesse Brandeburg int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4975dc645daeSJesse Brandeburg struct ifla_vf_stats *vf_stats)
4976dc645daeSJesse Brandeburg {
4977dc645daeSJesse Brandeburg struct i40e_netdev_priv *np = netdev_priv(netdev);
4978dc645daeSJesse Brandeburg struct i40e_pf *pf = np->vsi->back;
4979dc645daeSJesse Brandeburg struct i40e_eth_stats *stats;
4980dc645daeSJesse Brandeburg struct i40e_vsi *vsi;
4981dc645daeSJesse Brandeburg struct i40e_vf *vf;
4982dc645daeSJesse Brandeburg
4983dc645daeSJesse Brandeburg /* validate the request */
4984dc645daeSJesse Brandeburg if (i40e_validate_vf(pf, vf_id))
4985dc645daeSJesse Brandeburg return -EINVAL;
4986dc645daeSJesse Brandeburg
4987dc645daeSJesse Brandeburg vf = &pf->vf[vf_id];
4988dc645daeSJesse Brandeburg if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4989dc645daeSJesse Brandeburg dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4990dc645daeSJesse Brandeburg return -EBUSY;
4991dc645daeSJesse Brandeburg }
4992dc645daeSJesse Brandeburg
4993dc645daeSJesse Brandeburg vsi = pf->vsi[vf->lan_vsi_idx];
4994dc645daeSJesse Brandeburg if (!vsi)
4995dc645daeSJesse Brandeburg return -EINVAL;
4996dc645daeSJesse Brandeburg
4997dc645daeSJesse Brandeburg i40e_update_eth_stats(vsi);
4998dc645daeSJesse Brandeburg stats = &vsi->eth_stats;
4999dc645daeSJesse Brandeburg
5000dc645daeSJesse Brandeburg memset(vf_stats, 0, sizeof(*vf_stats));
5001dc645daeSJesse Brandeburg
5002dc645daeSJesse Brandeburg vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
5003dc645daeSJesse Brandeburg stats->rx_multicast;
5004dc645daeSJesse Brandeburg vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
5005dc645daeSJesse Brandeburg stats->tx_multicast;
5006dc645daeSJesse Brandeburg vf_stats->rx_bytes = stats->rx_bytes;
5007dc645daeSJesse Brandeburg vf_stats->tx_bytes = stats->tx_bytes;
5008dc645daeSJesse Brandeburg vf_stats->broadcast = stats->rx_broadcast;
5009dc645daeSJesse Brandeburg vf_stats->multicast = stats->rx_multicast;
5010dc645daeSJesse Brandeburg vf_stats->rx_dropped = stats->rx_discards;
5011dc645daeSJesse Brandeburg vf_stats->tx_dropped = stats->tx_discards;
5012dc645daeSJesse Brandeburg
5013dc645daeSJesse Brandeburg return 0;
5014dc645daeSJesse Brandeburg }
5015