1ae06c70bSJeff Kirsher // SPDX-License-Identifier: GPL-2.0
257928c58SJacob Keller /* Copyright(c) 2013 - 2019 Intel Corporation. */
3883a9ccbSAlexander Duyck
4883a9ccbSAlexander Duyck #include "fm10k.h"
5883a9ccbSAlexander Duyck #include "fm10k_vf.h"
6883a9ccbSAlexander Duyck #include "fm10k_pf.h"
7883a9ccbSAlexander Duyck
fm10k_iov_msg_error(struct fm10k_hw * hw,u32 ** results,struct fm10k_mbx_info * mbx)8883a9ccbSAlexander Duyck static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
9883a9ccbSAlexander Duyck struct fm10k_mbx_info *mbx)
10883a9ccbSAlexander Duyck {
11883a9ccbSAlexander Duyck struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
12883a9ccbSAlexander Duyck struct fm10k_intfc *interface = hw->back;
13883a9ccbSAlexander Duyck struct pci_dev *pdev = interface->pdev;
14883a9ccbSAlexander Duyck
15883a9ccbSAlexander Duyck dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
16883a9ccbSAlexander Duyck **results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
17883a9ccbSAlexander Duyck
18883a9ccbSAlexander Duyck return fm10k_tlv_msg_error(hw, results, mbx);
19883a9ccbSAlexander Duyck }
20883a9ccbSAlexander Duyck
211f5c27e5SJacob Keller /**
221f5c27e5SJacob Keller * fm10k_iov_msg_queue_mac_vlan - Message handler for MAC/VLAN request from VF
231f5c27e5SJacob Keller * @hw: Pointer to hardware structure
241f5c27e5SJacob Keller * @results: Pointer array to message, results[0] is pointer to message
251f5c27e5SJacob Keller * @mbx: Pointer to mailbox information structure
261f5c27e5SJacob Keller *
271f5c27e5SJacob Keller * This function is a custom handler for MAC/VLAN requests from the VF. The
281f5c27e5SJacob Keller * assumption is that it is acceptable to directly hand off the message from
291f5c27e5SJacob Keller * the VF to the PF's switch manager. However, we use a MAC/VLAN message
301f5c27e5SJacob Keller * queue to avoid overloading the mailbox when a large number of requests
311f5c27e5SJacob Keller * come in.
321f5c27e5SJacob Keller **/
fm10k_iov_msg_queue_mac_vlan(struct fm10k_hw * hw,u32 ** results,struct fm10k_mbx_info * mbx)331f5c27e5SJacob Keller static s32 fm10k_iov_msg_queue_mac_vlan(struct fm10k_hw *hw, u32 **results,
341f5c27e5SJacob Keller struct fm10k_mbx_info *mbx)
351f5c27e5SJacob Keller {
361f5c27e5SJacob Keller struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
371f5c27e5SJacob Keller struct fm10k_intfc *interface = hw->back;
381f5c27e5SJacob Keller u8 mac[ETH_ALEN];
391f5c27e5SJacob Keller u32 *result;
401f5c27e5SJacob Keller int err = 0;
411f5c27e5SJacob Keller bool set;
421f5c27e5SJacob Keller u16 vlan;
431f5c27e5SJacob Keller u32 vid;
441f5c27e5SJacob Keller
451f5c27e5SJacob Keller /* we shouldn't be updating rules on a disabled interface */
461f5c27e5SJacob Keller if (!FM10K_VF_FLAG_ENABLED(vf_info))
471f5c27e5SJacob Keller err = FM10K_ERR_PARAM;
481f5c27e5SJacob Keller
491f5c27e5SJacob Keller if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
501f5c27e5SJacob Keller result = results[FM10K_MAC_VLAN_MSG_VLAN];
511f5c27e5SJacob Keller
521f5c27e5SJacob Keller /* record VLAN id requested */
531f5c27e5SJacob Keller err = fm10k_tlv_attr_get_u32(result, &vid);
541f5c27e5SJacob Keller if (err)
551f5c27e5SJacob Keller return err;
561f5c27e5SJacob Keller
571f5c27e5SJacob Keller set = !(vid & FM10K_VLAN_CLEAR);
581f5c27e5SJacob Keller vid &= ~FM10K_VLAN_CLEAR;
591f5c27e5SJacob Keller
601f5c27e5SJacob Keller /* if the length field has been set, this is a multi-bit
611f5c27e5SJacob Keller * update request. For multi-bit requests, simply disallow
621f5c27e5SJacob Keller * them when the pf_vid has been set. In this case, the PF
631f5c27e5SJacob Keller * should have already cleared the VLAN_TABLE, and if we
641f5c27e5SJacob Keller * allowed them, it could allow a rogue VF to receive traffic
651f5c27e5SJacob Keller * on a VLAN it was not assigned. In the single-bit case, we
661f5c27e5SJacob Keller * need to modify requests for VLAN 0 to use the default PF or
671f5c27e5SJacob Keller * SW vid when assigned.
681f5c27e5SJacob Keller */
691f5c27e5SJacob Keller
701f5c27e5SJacob Keller if (vid >> 16) {
711f5c27e5SJacob Keller /* prevent multi-bit requests when PF has
721f5c27e5SJacob Keller * administratively set the VLAN for this VF
731f5c27e5SJacob Keller */
741f5c27e5SJacob Keller if (vf_info->pf_vid)
751f5c27e5SJacob Keller return FM10K_ERR_PARAM;
761f5c27e5SJacob Keller } else {
771f5c27e5SJacob Keller err = fm10k_iov_select_vid(vf_info, (u16)vid);
781f5c27e5SJacob Keller if (err < 0)
791f5c27e5SJacob Keller return err;
801f5c27e5SJacob Keller
811f5c27e5SJacob Keller vid = err;
821f5c27e5SJacob Keller }
831f5c27e5SJacob Keller
841f5c27e5SJacob Keller /* update VSI info for VF in regards to VLAN table */
851f5c27e5SJacob Keller err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
861f5c27e5SJacob Keller }
871f5c27e5SJacob Keller
881f5c27e5SJacob Keller if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
891f5c27e5SJacob Keller result = results[FM10K_MAC_VLAN_MSG_MAC];
901f5c27e5SJacob Keller
911f5c27e5SJacob Keller /* record unicast MAC address requested */
921f5c27e5SJacob Keller err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
931f5c27e5SJacob Keller if (err)
941f5c27e5SJacob Keller return err;
951f5c27e5SJacob Keller
961f5c27e5SJacob Keller /* block attempts to set MAC for a locked device */
971f5c27e5SJacob Keller if (is_valid_ether_addr(vf_info->mac) &&
981f5c27e5SJacob Keller !ether_addr_equal(mac, vf_info->mac))
991f5c27e5SJacob Keller return FM10K_ERR_PARAM;
1001f5c27e5SJacob Keller
1011f5c27e5SJacob Keller set = !(vlan & FM10K_VLAN_CLEAR);
1021f5c27e5SJacob Keller vlan &= ~FM10K_VLAN_CLEAR;
1031f5c27e5SJacob Keller
1041f5c27e5SJacob Keller err = fm10k_iov_select_vid(vf_info, vlan);
1051f5c27e5SJacob Keller if (err < 0)
1061f5c27e5SJacob Keller return err;
1071f5c27e5SJacob Keller
1081f5c27e5SJacob Keller vlan = (u16)err;
1091f5c27e5SJacob Keller
1101f5c27e5SJacob Keller /* Add this request to the MAC/VLAN queue */
1111f5c27e5SJacob Keller err = fm10k_queue_mac_request(interface, vf_info->glort,
1121f5c27e5SJacob Keller mac, vlan, set);
1131f5c27e5SJacob Keller }
1141f5c27e5SJacob Keller
1151f5c27e5SJacob Keller if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
1161f5c27e5SJacob Keller result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
1171f5c27e5SJacob Keller
1181f5c27e5SJacob Keller /* record multicast MAC address requested */
1191f5c27e5SJacob Keller err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
1201f5c27e5SJacob Keller if (err)
1211f5c27e5SJacob Keller return err;
1221f5c27e5SJacob Keller
1231f5c27e5SJacob Keller /* verify that the VF is allowed to request multicast */
1241f5c27e5SJacob Keller if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
1251f5c27e5SJacob Keller return FM10K_ERR_PARAM;
1261f5c27e5SJacob Keller
1271f5c27e5SJacob Keller set = !(vlan & FM10K_VLAN_CLEAR);
1281f5c27e5SJacob Keller vlan &= ~FM10K_VLAN_CLEAR;
1291f5c27e5SJacob Keller
1301f5c27e5SJacob Keller err = fm10k_iov_select_vid(vf_info, vlan);
1311f5c27e5SJacob Keller if (err < 0)
1321f5c27e5SJacob Keller return err;
1331f5c27e5SJacob Keller
1341f5c27e5SJacob Keller vlan = (u16)err;
1351f5c27e5SJacob Keller
1361f5c27e5SJacob Keller /* Add this request to the MAC/VLAN queue */
1371f5c27e5SJacob Keller err = fm10k_queue_mac_request(interface, vf_info->glort,
1381f5c27e5SJacob Keller mac, vlan, set);
1391f5c27e5SJacob Keller }
1401f5c27e5SJacob Keller
1411f5c27e5SJacob Keller return err;
1421f5c27e5SJacob Keller }
1431f5c27e5SJacob Keller
144883a9ccbSAlexander Duyck static const struct fm10k_msg_data iov_mbx_data[] = {
145883a9ccbSAlexander Duyck FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
146883a9ccbSAlexander Duyck FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
1471f5c27e5SJacob Keller FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_queue_mac_vlan),
148883a9ccbSAlexander Duyck FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
149883a9ccbSAlexander Duyck FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
150883a9ccbSAlexander Duyck };
151883a9ccbSAlexander Duyck
fm10k_iov_event(struct fm10k_intfc * interface)152883a9ccbSAlexander Duyck s32 fm10k_iov_event(struct fm10k_intfc *interface)
153883a9ccbSAlexander Duyck {
154883a9ccbSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
155883a9ccbSAlexander Duyck struct fm10k_iov_data *iov_data;
1569de15bdaSJeff Kirsher s64 vflre;
157883a9ccbSAlexander Duyck int i;
158883a9ccbSAlexander Duyck
159d8ec92f2SJacob Keller /* if there is no iov_data then there is no mailbox to process */
160ce4dad2cSJacob Keller if (!READ_ONCE(interface->iov_data))
161883a9ccbSAlexander Duyck return 0;
162883a9ccbSAlexander Duyck
163883a9ccbSAlexander Duyck rcu_read_lock();
164883a9ccbSAlexander Duyck
165883a9ccbSAlexander Duyck iov_data = interface->iov_data;
166883a9ccbSAlexander Duyck
167883a9ccbSAlexander Duyck /* check again now that we are in the RCU block */
168883a9ccbSAlexander Duyck if (!iov_data)
169883a9ccbSAlexander Duyck goto read_unlock;
170883a9ccbSAlexander Duyck
171883a9ccbSAlexander Duyck if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
1729de15bdaSJeff Kirsher goto read_unlock;
173883a9ccbSAlexander Duyck
174883a9ccbSAlexander Duyck /* read VFLRE to determine if any VFs have been reset */
1754abf01b4SJacob Keller vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1));
176883a9ccbSAlexander Duyck vflre <<= 32;
177883a9ccbSAlexander Duyck vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
178883a9ccbSAlexander Duyck
179883a9ccbSAlexander Duyck i = iov_data->num_vfs;
180883a9ccbSAlexander Duyck
181883a9ccbSAlexander Duyck for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
182883a9ccbSAlexander Duyck struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
183883a9ccbSAlexander Duyck
184883a9ccbSAlexander Duyck if (vflre >= 0)
185883a9ccbSAlexander Duyck continue;
186883a9ccbSAlexander Duyck
187883a9ccbSAlexander Duyck hw->iov.ops.reset_resources(hw, vf_info);
188883a9ccbSAlexander Duyck vf_info->mbx.ops.connect(hw, &vf_info->mbx);
189883a9ccbSAlexander Duyck }
190883a9ccbSAlexander Duyck
191883a9ccbSAlexander Duyck read_unlock:
192883a9ccbSAlexander Duyck rcu_read_unlock();
193883a9ccbSAlexander Duyck
194883a9ccbSAlexander Duyck return 0;
195883a9ccbSAlexander Duyck }
196883a9ccbSAlexander Duyck
fm10k_iov_mbx(struct fm10k_intfc * interface)197883a9ccbSAlexander Duyck s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
198883a9ccbSAlexander Duyck {
199883a9ccbSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
200883a9ccbSAlexander Duyck struct fm10k_iov_data *iov_data;
201883a9ccbSAlexander Duyck int i;
202883a9ccbSAlexander Duyck
203d8ec92f2SJacob Keller /* if there is no iov_data then there is no mailbox to process */
204ce4dad2cSJacob Keller if (!READ_ONCE(interface->iov_data))
205883a9ccbSAlexander Duyck return 0;
206883a9ccbSAlexander Duyck
207883a9ccbSAlexander Duyck rcu_read_lock();
208883a9ccbSAlexander Duyck
209883a9ccbSAlexander Duyck iov_data = interface->iov_data;
210883a9ccbSAlexander Duyck
211883a9ccbSAlexander Duyck /* check again now that we are in the RCU block */
212883a9ccbSAlexander Duyck if (!iov_data)
213883a9ccbSAlexander Duyck goto read_unlock;
214883a9ccbSAlexander Duyck
215883a9ccbSAlexander Duyck /* lock the mailbox for transmit and receive */
216883a9ccbSAlexander Duyck fm10k_mbx_lock(interface);
217883a9ccbSAlexander Duyck
218ada2411dSJeff Kirsher /* Most VF messages sent to the PF cause the PF to respond by
219ada2411dSJeff Kirsher * requesting from the SM mailbox. This means that too many VF
220ada2411dSJeff Kirsher * messages processed at once could cause a mailbox timeout on the PF.
221ada2411dSJeff Kirsher * To prevent this, store a pointer to the next VF mbx to process. Use
222ada2411dSJeff Kirsher * that as the start of the loop so that we don't starve whichever VF
223ada2411dSJeff Kirsher * got ignored on the previous run.
224ada2411dSJeff Kirsher */
225883a9ccbSAlexander Duyck process_mbx:
226883a9ccbSAlexander Duyck for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
227883a9ccbSAlexander Duyck struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
228883a9ccbSAlexander Duyck struct fm10k_mbx_info *mbx = &vf_info->mbx;
229883a9ccbSAlexander Duyck u16 glort = vf_info->glort;
230883a9ccbSAlexander Duyck
23117a91809SJacob Keller /* process the SM mailbox first to drain outgoing messages */
23217a91809SJacob Keller hw->mbx.ops.process(hw, &hw->mbx);
23317a91809SJacob Keller
234883a9ccbSAlexander Duyck /* verify port mapping is valid, if not reset port */
2351f5c27e5SJacob Keller if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) {
236883a9ccbSAlexander Duyck hw->iov.ops.reset_lport(hw, vf_info);
2371f5c27e5SJacob Keller fm10k_clear_macvlan_queue(interface, glort, false);
2381f5c27e5SJacob Keller }
239883a9ccbSAlexander Duyck
240883a9ccbSAlexander Duyck /* reset VFs that have mailbox timed out */
241883a9ccbSAlexander Duyck if (!mbx->timeout) {
242883a9ccbSAlexander Duyck hw->iov.ops.reset_resources(hw, vf_info);
243883a9ccbSAlexander Duyck mbx->ops.connect(hw, mbx);
244883a9ccbSAlexander Duyck }
245883a9ccbSAlexander Duyck
246883a9ccbSAlexander Duyck /* guarantee we have free space in the SM mailbox */
247e69e40c8SNgai-Mint Kwan if (hw->mbx.state == FM10K_STATE_OPEN &&
248e69e40c8SNgai-Mint Kwan !hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
24980043f3bSJacob Keller /* keep track of how many times this occurs */
25080043f3bSJacob Keller interface->hw_sm_mbx_full++;
251b52b7f70SJacob Keller
252b52b7f70SJacob Keller /* make sure we try again momentarily */
253b52b7f70SJacob Keller fm10k_service_event_schedule(interface);
254b52b7f70SJacob Keller
255883a9ccbSAlexander Duyck break;
25680043f3bSJacob Keller }
257883a9ccbSAlexander Duyck
258883a9ccbSAlexander Duyck /* cleanup mailbox and process received messages */
259883a9ccbSAlexander Duyck mbx->ops.process(hw, mbx);
260883a9ccbSAlexander Duyck }
261883a9ccbSAlexander Duyck
262ada2411dSJeff Kirsher /* if we stopped processing mailboxes early, update next_vf_mbx.
263ada2411dSJeff Kirsher * Otherwise, reset next_vf_mbx, and restart loop so that we process
264ada2411dSJeff Kirsher * the remaining mailboxes we skipped at the start.
265ada2411dSJeff Kirsher */
266883a9ccbSAlexander Duyck if (i >= 0) {
267883a9ccbSAlexander Duyck iov_data->next_vf_mbx = i + 1;
268883a9ccbSAlexander Duyck } else if (iov_data->next_vf_mbx) {
269883a9ccbSAlexander Duyck iov_data->next_vf_mbx = 0;
270883a9ccbSAlexander Duyck goto process_mbx;
271883a9ccbSAlexander Duyck }
272883a9ccbSAlexander Duyck
273883a9ccbSAlexander Duyck /* free the lock */
274883a9ccbSAlexander Duyck fm10k_mbx_unlock(interface);
275883a9ccbSAlexander Duyck
276883a9ccbSAlexander Duyck read_unlock:
277883a9ccbSAlexander Duyck rcu_read_unlock();
278883a9ccbSAlexander Duyck
279883a9ccbSAlexander Duyck return 0;
280883a9ccbSAlexander Duyck }
281883a9ccbSAlexander Duyck
fm10k_iov_suspend(struct pci_dev * pdev)282883a9ccbSAlexander Duyck void fm10k_iov_suspend(struct pci_dev *pdev)
283883a9ccbSAlexander Duyck {
284883a9ccbSAlexander Duyck struct fm10k_intfc *interface = pci_get_drvdata(pdev);
285883a9ccbSAlexander Duyck struct fm10k_iov_data *iov_data = interface->iov_data;
286883a9ccbSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
287883a9ccbSAlexander Duyck int num_vfs, i;
288883a9ccbSAlexander Duyck
289883a9ccbSAlexander Duyck /* pull out num_vfs from iov_data */
290883a9ccbSAlexander Duyck num_vfs = iov_data ? iov_data->num_vfs : 0;
291883a9ccbSAlexander Duyck
292883a9ccbSAlexander Duyck /* shut down queue mapping for VFs */
293883a9ccbSAlexander Duyck fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
294883a9ccbSAlexander Duyck FM10K_DGLORTMAP_NONE);
295883a9ccbSAlexander Duyck
296883a9ccbSAlexander Duyck /* Stop any active VFs and reset their resources */
297883a9ccbSAlexander Duyck for (i = 0; i < num_vfs; i++) {
298883a9ccbSAlexander Duyck struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
299883a9ccbSAlexander Duyck
300883a9ccbSAlexander Duyck hw->iov.ops.reset_resources(hw, vf_info);
301883a9ccbSAlexander Duyck hw->iov.ops.reset_lport(hw, vf_info);
3021f5c27e5SJacob Keller fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
303883a9ccbSAlexander Duyck }
304883a9ccbSAlexander Duyck }
305883a9ccbSAlexander Duyck
fm10k_mask_aer_comp_abort(struct pci_dev * pdev)306e330af78SJacob Keller static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev)
307e330af78SJacob Keller {
308e330af78SJacob Keller u32 err_mask;
309e330af78SJacob Keller int pos;
310e330af78SJacob Keller
311e330af78SJacob Keller pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
312e330af78SJacob Keller if (!pos)
313e330af78SJacob Keller return;
314e330af78SJacob Keller
315e330af78SJacob Keller /* Mask the completion abort bit in the ERR_UNCOR_MASK register,
316e330af78SJacob Keller * preventing the device from reporting these errors to the upstream
317e330af78SJacob Keller * PCIe root device. This avoids bringing down platforms which upgrade
318e330af78SJacob Keller * non-fatal completer aborts into machine check exceptions. Completer
319e330af78SJacob Keller * aborts can occur whenever a VF reads a queue it doesn't own.
320e330af78SJacob Keller */
321e330af78SJacob Keller pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask);
322e330af78SJacob Keller err_mask |= PCI_ERR_UNC_COMP_ABORT;
323e330af78SJacob Keller pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask);
324e330af78SJacob Keller }
325e330af78SJacob Keller
fm10k_iov_resume(struct pci_dev * pdev)326883a9ccbSAlexander Duyck int fm10k_iov_resume(struct pci_dev *pdev)
327883a9ccbSAlexander Duyck {
328883a9ccbSAlexander Duyck struct fm10k_intfc *interface = pci_get_drvdata(pdev);
329883a9ccbSAlexander Duyck struct fm10k_iov_data *iov_data = interface->iov_data;
330883a9ccbSAlexander Duyck struct fm10k_dglort_cfg dglort = { 0 };
331883a9ccbSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
332883a9ccbSAlexander Duyck int num_vfs, i;
333883a9ccbSAlexander Duyck
334883a9ccbSAlexander Duyck /* pull out num_vfs from iov_data */
335883a9ccbSAlexander Duyck num_vfs = iov_data ? iov_data->num_vfs : 0;
336883a9ccbSAlexander Duyck
337883a9ccbSAlexander Duyck /* return error if iov_data is not already populated */
338883a9ccbSAlexander Duyck if (!iov_data)
339883a9ccbSAlexander Duyck return -ENOMEM;
340883a9ccbSAlexander Duyck
341e330af78SJacob Keller /* Lower severity of completer abort error reporting as
342e330af78SJacob Keller * the VFs can trigger this any time they read a queue
343e330af78SJacob Keller * that they don't own.
344e330af78SJacob Keller */
345e330af78SJacob Keller fm10k_mask_aer_comp_abort(pdev);
346e330af78SJacob Keller
347883a9ccbSAlexander Duyck /* allocate hardware resources for the VFs */
348883a9ccbSAlexander Duyck hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
349883a9ccbSAlexander Duyck
350883a9ccbSAlexander Duyck /* configure DGLORT mapping for RSS */
351883a9ccbSAlexander Duyck dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
352883a9ccbSAlexander Duyck dglort.idx = fm10k_dglort_vf_rss;
353883a9ccbSAlexander Duyck dglort.inner_rss = 1;
354883a9ccbSAlexander Duyck dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
355883a9ccbSAlexander Duyck dglort.queue_b = fm10k_vf_queue_index(hw, 0);
356883a9ccbSAlexander Duyck dglort.vsi_l = fls(hw->iov.total_vfs - 1);
357883a9ccbSAlexander Duyck dglort.vsi_b = 1;
358883a9ccbSAlexander Duyck
359883a9ccbSAlexander Duyck hw->mac.ops.configure_dglort_map(hw, &dglort);
360883a9ccbSAlexander Duyck
361883a9ccbSAlexander Duyck /* assign resources to the device */
362883a9ccbSAlexander Duyck for (i = 0; i < num_vfs; i++) {
363883a9ccbSAlexander Duyck struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
364883a9ccbSAlexander Duyck
365883a9ccbSAlexander Duyck /* allocate all but the last GLORT to the VFs */
366c8eeacb3SJacob Keller if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT))
367883a9ccbSAlexander Duyck break;
368883a9ccbSAlexander Duyck
369883a9ccbSAlexander Duyck /* assign GLORT to VF, and restrict it to multicast */
370883a9ccbSAlexander Duyck hw->iov.ops.set_lport(hw, vf_info, i,
371883a9ccbSAlexander Duyck FM10K_VF_FLAG_MULTI_CAPABLE);
372883a9ccbSAlexander Duyck
373883a9ccbSAlexander Duyck /* mailbox is disconnected so we don't send a message */
374883a9ccbSAlexander Duyck hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
375883a9ccbSAlexander Duyck
376883a9ccbSAlexander Duyck /* now we are ready so we can connect */
377883a9ccbSAlexander Duyck vf_info->mbx.ops.connect(hw, &vf_info->mbx);
378883a9ccbSAlexander Duyck }
379883a9ccbSAlexander Duyck
380883a9ccbSAlexander Duyck return 0;
381883a9ccbSAlexander Duyck }
382883a9ccbSAlexander Duyck
fm10k_iov_update_pvid(struct fm10k_intfc * interface,u16 glort,u16 pvid)383883a9ccbSAlexander Duyck s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
384883a9ccbSAlexander Duyck {
385883a9ccbSAlexander Duyck struct fm10k_iov_data *iov_data = interface->iov_data;
386883a9ccbSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
387883a9ccbSAlexander Duyck struct fm10k_vf_info *vf_info;
388883a9ccbSAlexander Duyck u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
389883a9ccbSAlexander Duyck
390883a9ccbSAlexander Duyck /* no IOV support, not our message to process */
391883a9ccbSAlexander Duyck if (!iov_data)
392883a9ccbSAlexander Duyck return FM10K_ERR_PARAM;
393883a9ccbSAlexander Duyck
394883a9ccbSAlexander Duyck /* glort outside our range, not our message to process */
395883a9ccbSAlexander Duyck if (vf_idx >= iov_data->num_vfs)
396883a9ccbSAlexander Duyck return FM10K_ERR_PARAM;
397883a9ccbSAlexander Duyck
398eca32047SMatthew Vick /* determine if an update has occurred and if so notify the VF */
399883a9ccbSAlexander Duyck vf_info = &iov_data->vf_info[vf_idx];
400883a9ccbSAlexander Duyck if (vf_info->sw_vid != pvid) {
401883a9ccbSAlexander Duyck vf_info->sw_vid = pvid;
402883a9ccbSAlexander Duyck hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
403883a9ccbSAlexander Duyck }
404883a9ccbSAlexander Duyck
405883a9ccbSAlexander Duyck return 0;
406883a9ccbSAlexander Duyck }
407883a9ccbSAlexander Duyck
fm10k_iov_free_data(struct pci_dev * pdev)408883a9ccbSAlexander Duyck static void fm10k_iov_free_data(struct pci_dev *pdev)
409883a9ccbSAlexander Duyck {
410883a9ccbSAlexander Duyck struct fm10k_intfc *interface = pci_get_drvdata(pdev);
411883a9ccbSAlexander Duyck
412883a9ccbSAlexander Duyck if (!interface->iov_data)
413883a9ccbSAlexander Duyck return;
414883a9ccbSAlexander Duyck
415883a9ccbSAlexander Duyck /* reclaim hardware resources */
416883a9ccbSAlexander Duyck fm10k_iov_suspend(pdev);
417883a9ccbSAlexander Duyck
418883a9ccbSAlexander Duyck /* drop iov_data from interface */
419883a9ccbSAlexander Duyck kfree_rcu(interface->iov_data, rcu);
420883a9ccbSAlexander Duyck interface->iov_data = NULL;
421883a9ccbSAlexander Duyck }
422883a9ccbSAlexander Duyck
fm10k_iov_alloc_data(struct pci_dev * pdev,int num_vfs)423883a9ccbSAlexander Duyck static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
424883a9ccbSAlexander Duyck {
425883a9ccbSAlexander Duyck struct fm10k_intfc *interface = pci_get_drvdata(pdev);
426883a9ccbSAlexander Duyck struct fm10k_iov_data *iov_data = interface->iov_data;
427883a9ccbSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
428883a9ccbSAlexander Duyck size_t size;
42957928c58SJacob Keller int i;
430883a9ccbSAlexander Duyck
431883a9ccbSAlexander Duyck /* return error if iov_data is already populated */
432883a9ccbSAlexander Duyck if (iov_data)
433883a9ccbSAlexander Duyck return -EBUSY;
434883a9ccbSAlexander Duyck
435883a9ccbSAlexander Duyck /* The PF should always be able to assign resources */
436883a9ccbSAlexander Duyck if (!hw->iov.ops.assign_resources)
437883a9ccbSAlexander Duyck return -ENODEV;
438883a9ccbSAlexander Duyck
439883a9ccbSAlexander Duyck /* nothing to do if no VFs are requested */
440883a9ccbSAlexander Duyck if (!num_vfs)
441883a9ccbSAlexander Duyck return 0;
442883a9ccbSAlexander Duyck
443883a9ccbSAlexander Duyck /* allocate memory for VF storage */
444883a9ccbSAlexander Duyck size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
445883a9ccbSAlexander Duyck iov_data = kzalloc(size, GFP_KERNEL);
446883a9ccbSAlexander Duyck if (!iov_data)
447883a9ccbSAlexander Duyck return -ENOMEM;
448883a9ccbSAlexander Duyck
449883a9ccbSAlexander Duyck /* record number of VFs */
450883a9ccbSAlexander Duyck iov_data->num_vfs = num_vfs;
451883a9ccbSAlexander Duyck
452883a9ccbSAlexander Duyck /* loop through vf_info structures initializing each entry */
453883a9ccbSAlexander Duyck for (i = 0; i < num_vfs; i++) {
454883a9ccbSAlexander Duyck struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
45557928c58SJacob Keller int err;
456883a9ccbSAlexander Duyck
457883a9ccbSAlexander Duyck /* Record VF VSI value */
458883a9ccbSAlexander Duyck vf_info->vsi = i + 1;
459883a9ccbSAlexander Duyck vf_info->vf_idx = i;
460883a9ccbSAlexander Duyck
461883a9ccbSAlexander Duyck /* initialize mailbox memory */
462883a9ccbSAlexander Duyck err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
463883a9ccbSAlexander Duyck if (err) {
464883a9ccbSAlexander Duyck dev_err(&pdev->dev,
465883a9ccbSAlexander Duyck "Unable to initialize SR-IOV mailbox\n");
466883a9ccbSAlexander Duyck kfree(iov_data);
467883a9ccbSAlexander Duyck return err;
468883a9ccbSAlexander Duyck }
469883a9ccbSAlexander Duyck }
470883a9ccbSAlexander Duyck
471883a9ccbSAlexander Duyck /* assign iov_data to interface */
472883a9ccbSAlexander Duyck interface->iov_data = iov_data;
473883a9ccbSAlexander Duyck
474883a9ccbSAlexander Duyck /* allocate hardware resources for the VFs */
475883a9ccbSAlexander Duyck fm10k_iov_resume(pdev);
476883a9ccbSAlexander Duyck
477883a9ccbSAlexander Duyck return 0;
478883a9ccbSAlexander Duyck }
479883a9ccbSAlexander Duyck
fm10k_iov_disable(struct pci_dev * pdev)480883a9ccbSAlexander Duyck void fm10k_iov_disable(struct pci_dev *pdev)
481883a9ccbSAlexander Duyck {
482883a9ccbSAlexander Duyck if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
483883a9ccbSAlexander Duyck dev_err(&pdev->dev,
484883a9ccbSAlexander Duyck "Cannot disable SR-IOV while VFs are assigned\n");
485883a9ccbSAlexander Duyck else
486883a9ccbSAlexander Duyck pci_disable_sriov(pdev);
487883a9ccbSAlexander Duyck
488883a9ccbSAlexander Duyck fm10k_iov_free_data(pdev);
489883a9ccbSAlexander Duyck }
490883a9ccbSAlexander Duyck
fm10k_iov_configure(struct pci_dev * pdev,int num_vfs)491883a9ccbSAlexander Duyck int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
492883a9ccbSAlexander Duyck {
493883a9ccbSAlexander Duyck int current_vfs = pci_num_vf(pdev);
494883a9ccbSAlexander Duyck int err = 0;
495883a9ccbSAlexander Duyck
496883a9ccbSAlexander Duyck if (current_vfs && pci_vfs_assigned(pdev)) {
497883a9ccbSAlexander Duyck dev_err(&pdev->dev,
498883a9ccbSAlexander Duyck "Cannot modify SR-IOV while VFs are assigned\n");
499883a9ccbSAlexander Duyck num_vfs = current_vfs;
500883a9ccbSAlexander Duyck } else {
501883a9ccbSAlexander Duyck pci_disable_sriov(pdev);
502883a9ccbSAlexander Duyck fm10k_iov_free_data(pdev);
503883a9ccbSAlexander Duyck }
504883a9ccbSAlexander Duyck
505883a9ccbSAlexander Duyck /* allocate resources for the VFs */
506883a9ccbSAlexander Duyck err = fm10k_iov_alloc_data(pdev, num_vfs);
507883a9ccbSAlexander Duyck if (err)
508883a9ccbSAlexander Duyck return err;
509883a9ccbSAlexander Duyck
510883a9ccbSAlexander Duyck /* allocate VFs if not already allocated */
511c8eeacb3SJacob Keller if (num_vfs && num_vfs != current_vfs) {
512883a9ccbSAlexander Duyck err = pci_enable_sriov(pdev, num_vfs);
513883a9ccbSAlexander Duyck if (err) {
514883a9ccbSAlexander Duyck dev_err(&pdev->dev,
515883a9ccbSAlexander Duyck "Enable PCI SR-IOV failed: %d\n", err);
516883a9ccbSAlexander Duyck return err;
517883a9ccbSAlexander Duyck }
518883a9ccbSAlexander Duyck }
519883a9ccbSAlexander Duyck
520883a9ccbSAlexander Duyck return num_vfs;
521883a9ccbSAlexander Duyck }
522883a9ccbSAlexander Duyck
523*0e100440SJacob Keller /**
524*0e100440SJacob Keller * fm10k_iov_update_stats - Update stats for all VFs
525*0e100440SJacob Keller * @interface: device private structure
526*0e100440SJacob Keller *
527*0e100440SJacob Keller * Updates the VF statistics for all enabled VFs. Expects to be called by
528*0e100440SJacob Keller * fm10k_update_stats and assumes that locking via the __FM10K_UPDATING_STATS
529*0e100440SJacob Keller * bit is already handled.
530*0e100440SJacob Keller */
fm10k_iov_update_stats(struct fm10k_intfc * interface)531*0e100440SJacob Keller void fm10k_iov_update_stats(struct fm10k_intfc *interface)
532*0e100440SJacob Keller {
533*0e100440SJacob Keller struct fm10k_iov_data *iov_data = interface->iov_data;
534*0e100440SJacob Keller struct fm10k_hw *hw = &interface->hw;
535*0e100440SJacob Keller int i;
536*0e100440SJacob Keller
537*0e100440SJacob Keller if (!iov_data)
538*0e100440SJacob Keller return;
539*0e100440SJacob Keller
540*0e100440SJacob Keller for (i = 0; i < iov_data->num_vfs; i++)
541*0e100440SJacob Keller hw->iov.ops.update_stats(hw, iov_data->vf_info[i].stats, i);
542*0e100440SJacob Keller }
543*0e100440SJacob Keller
fm10k_reset_vf_info(struct fm10k_intfc * interface,struct fm10k_vf_info * vf_info)544a38488f5SJacob Keller static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
545a38488f5SJacob Keller struct fm10k_vf_info *vf_info)
546a38488f5SJacob Keller {
547a38488f5SJacob Keller struct fm10k_hw *hw = &interface->hw;
548a38488f5SJacob Keller
549a38488f5SJacob Keller /* assigning the MAC address will send a mailbox message */
550a38488f5SJacob Keller fm10k_mbx_lock(interface);
551a38488f5SJacob Keller
552a38488f5SJacob Keller /* disable LPORT for this VF which clears switch rules */
553a38488f5SJacob Keller hw->iov.ops.reset_lport(hw, vf_info);
554a38488f5SJacob Keller
5551f5c27e5SJacob Keller fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
5561f5c27e5SJacob Keller
557a38488f5SJacob Keller /* assign new MAC+VLAN for this VF */
558a38488f5SJacob Keller hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
559a38488f5SJacob Keller
560a38488f5SJacob Keller /* re-enable the LPORT for this VF */
561a38488f5SJacob Keller hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
562a38488f5SJacob Keller FM10K_VF_FLAG_MULTI_CAPABLE);
563a38488f5SJacob Keller
564a38488f5SJacob Keller fm10k_mbx_unlock(interface);
565a38488f5SJacob Keller }
566a38488f5SJacob Keller
fm10k_ndo_set_vf_mac(struct net_device * netdev,int vf_idx,u8 * mac)567883a9ccbSAlexander Duyck int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
568883a9ccbSAlexander Duyck {
569883a9ccbSAlexander Duyck struct fm10k_intfc *interface = netdev_priv(netdev);
570883a9ccbSAlexander Duyck struct fm10k_iov_data *iov_data = interface->iov_data;
571883a9ccbSAlexander Duyck struct fm10k_vf_info *vf_info;
572883a9ccbSAlexander Duyck
573883a9ccbSAlexander Duyck /* verify SR-IOV is active and that vf idx is valid */
574883a9ccbSAlexander Duyck if (!iov_data || vf_idx >= iov_data->num_vfs)
575883a9ccbSAlexander Duyck return -EINVAL;
576883a9ccbSAlexander Duyck
577883a9ccbSAlexander Duyck /* verify MAC addr is valid */
578883a9ccbSAlexander Duyck if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
579883a9ccbSAlexander Duyck return -EINVAL;
580883a9ccbSAlexander Duyck
581883a9ccbSAlexander Duyck /* record new MAC address */
582883a9ccbSAlexander Duyck vf_info = &iov_data->vf_info[vf_idx];
583883a9ccbSAlexander Duyck ether_addr_copy(vf_info->mac, mac);
584883a9ccbSAlexander Duyck
585a38488f5SJacob Keller fm10k_reset_vf_info(interface, vf_info);
586883a9ccbSAlexander Duyck
587883a9ccbSAlexander Duyck return 0;
588883a9ccbSAlexander Duyck }
589883a9ccbSAlexander Duyck
fm10k_ndo_set_vf_vlan(struct net_device * netdev,int vf_idx,u16 vid,u8 qos,__be16 vlan_proto)590883a9ccbSAlexander Duyck int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
59179aab093SMoshe Shemesh u8 qos, __be16 vlan_proto)
592883a9ccbSAlexander Duyck {
593883a9ccbSAlexander Duyck struct fm10k_intfc *interface = netdev_priv(netdev);
594883a9ccbSAlexander Duyck struct fm10k_iov_data *iov_data = interface->iov_data;
595883a9ccbSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
596883a9ccbSAlexander Duyck struct fm10k_vf_info *vf_info;
597883a9ccbSAlexander Duyck
598883a9ccbSAlexander Duyck /* verify SR-IOV is active and that vf idx is valid */
599883a9ccbSAlexander Duyck if (!iov_data || vf_idx >= iov_data->num_vfs)
600883a9ccbSAlexander Duyck return -EINVAL;
601883a9ccbSAlexander Duyck
602883a9ccbSAlexander Duyck /* QOS is unsupported and VLAN IDs accepted range 0-4094 */
603883a9ccbSAlexander Duyck if (qos || (vid > (VLAN_VID_MASK - 1)))
604883a9ccbSAlexander Duyck return -EINVAL;
605883a9ccbSAlexander Duyck
60679aab093SMoshe Shemesh /* VF VLAN Protocol part to default is unsupported */
60779aab093SMoshe Shemesh if (vlan_proto != htons(ETH_P_8021Q))
60879aab093SMoshe Shemesh return -EPROTONOSUPPORT;
60979aab093SMoshe Shemesh
610883a9ccbSAlexander Duyck vf_info = &iov_data->vf_info[vf_idx];
611883a9ccbSAlexander Duyck
612883a9ccbSAlexander Duyck /* exit if there is nothing to do */
613883a9ccbSAlexander Duyck if (vf_info->pf_vid == vid)
614883a9ccbSAlexander Duyck return 0;
615883a9ccbSAlexander Duyck
616883a9ccbSAlexander Duyck /* record default VLAN ID for VF */
617883a9ccbSAlexander Duyck vf_info->pf_vid = vid;
618883a9ccbSAlexander Duyck
619883a9ccbSAlexander Duyck /* Clear the VLAN table for the VF */
620883a9ccbSAlexander Duyck hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
621883a9ccbSAlexander Duyck
622a38488f5SJacob Keller fm10k_reset_vf_info(interface, vf_info);
623883a9ccbSAlexander Duyck
624883a9ccbSAlexander Duyck return 0;
625883a9ccbSAlexander Duyck }
626883a9ccbSAlexander Duyck
fm10k_ndo_set_vf_bw(struct net_device * netdev,int vf_idx,int __always_unused min_rate,int max_rate)627de445199SJeff Kirsher int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
6283e256ac5SJacob Keller int __always_unused min_rate, int max_rate)
629883a9ccbSAlexander Duyck {
630883a9ccbSAlexander Duyck struct fm10k_intfc *interface = netdev_priv(netdev);
631883a9ccbSAlexander Duyck struct fm10k_iov_data *iov_data = interface->iov_data;
632883a9ccbSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
633883a9ccbSAlexander Duyck
634883a9ccbSAlexander Duyck /* verify SR-IOV is active and that vf idx is valid */
635883a9ccbSAlexander Duyck if (!iov_data || vf_idx >= iov_data->num_vfs)
636883a9ccbSAlexander Duyck return -EINVAL;
637883a9ccbSAlexander Duyck
638883a9ccbSAlexander Duyck /* rate limit cannot be less than 10Mbs or greater than link speed */
6393e256ac5SJacob Keller if (max_rate &&
6403e256ac5SJacob Keller (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX))
641883a9ccbSAlexander Duyck return -EINVAL;
642883a9ccbSAlexander Duyck
643883a9ccbSAlexander Duyck /* store values */
6443e256ac5SJacob Keller iov_data->vf_info[vf_idx].rate = max_rate;
645883a9ccbSAlexander Duyck
646883a9ccbSAlexander Duyck /* update hardware configuration */
6473e256ac5SJacob Keller hw->iov.ops.configure_tc(hw, vf_idx, max_rate);
648883a9ccbSAlexander Duyck
649883a9ccbSAlexander Duyck return 0;
650883a9ccbSAlexander Duyck }
651883a9ccbSAlexander Duyck
fm10k_ndo_get_vf_config(struct net_device * netdev,int vf_idx,struct ifla_vf_info * ivi)652883a9ccbSAlexander Duyck int fm10k_ndo_get_vf_config(struct net_device *netdev,
653883a9ccbSAlexander Duyck int vf_idx, struct ifla_vf_info *ivi)
654883a9ccbSAlexander Duyck {
655883a9ccbSAlexander Duyck struct fm10k_intfc *interface = netdev_priv(netdev);
656883a9ccbSAlexander Duyck struct fm10k_iov_data *iov_data = interface->iov_data;
657883a9ccbSAlexander Duyck struct fm10k_vf_info *vf_info;
658883a9ccbSAlexander Duyck
659883a9ccbSAlexander Duyck /* verify SR-IOV is active and that vf idx is valid */
660883a9ccbSAlexander Duyck if (!iov_data || vf_idx >= iov_data->num_vfs)
661883a9ccbSAlexander Duyck return -EINVAL;
662883a9ccbSAlexander Duyck
663883a9ccbSAlexander Duyck vf_info = &iov_data->vf_info[vf_idx];
664883a9ccbSAlexander Duyck
665883a9ccbSAlexander Duyck ivi->vf = vf_idx;
666883a9ccbSAlexander Duyck ivi->max_tx_rate = vf_info->rate;
667883a9ccbSAlexander Duyck ivi->min_tx_rate = 0;
668883a9ccbSAlexander Duyck ether_addr_copy(ivi->mac, vf_info->mac);
669883a9ccbSAlexander Duyck ivi->vlan = vf_info->pf_vid;
670883a9ccbSAlexander Duyck ivi->qos = 0;
671883a9ccbSAlexander Duyck
672883a9ccbSAlexander Duyck return 0;
673883a9ccbSAlexander Duyck }
674*0e100440SJacob Keller
fm10k_ndo_get_vf_stats(struct net_device * netdev,int vf_idx,struct ifla_vf_stats * stats)675*0e100440SJacob Keller int fm10k_ndo_get_vf_stats(struct net_device *netdev,
676*0e100440SJacob Keller int vf_idx, struct ifla_vf_stats *stats)
677*0e100440SJacob Keller {
678*0e100440SJacob Keller struct fm10k_intfc *interface = netdev_priv(netdev);
679*0e100440SJacob Keller struct fm10k_iov_data *iov_data = interface->iov_data;
680*0e100440SJacob Keller struct fm10k_hw *hw = &interface->hw;
681*0e100440SJacob Keller struct fm10k_hw_stats_q *hw_stats;
682*0e100440SJacob Keller u32 idx, qpp;
683*0e100440SJacob Keller
684*0e100440SJacob Keller /* verify SR-IOV is active and that vf idx is valid */
685*0e100440SJacob Keller if (!iov_data || vf_idx >= iov_data->num_vfs)
686*0e100440SJacob Keller return -EINVAL;
687*0e100440SJacob Keller
688*0e100440SJacob Keller qpp = fm10k_queues_per_pool(hw);
689*0e100440SJacob Keller hw_stats = iov_data->vf_info[vf_idx].stats;
690*0e100440SJacob Keller
691*0e100440SJacob Keller for (idx = 0; idx < qpp; idx++) {
692*0e100440SJacob Keller stats->rx_packets += hw_stats[idx].rx_packets.count;
693*0e100440SJacob Keller stats->tx_packets += hw_stats[idx].tx_packets.count;
694*0e100440SJacob Keller stats->rx_bytes += hw_stats[idx].rx_bytes.count;
695*0e100440SJacob Keller stats->tx_bytes += hw_stats[idx].tx_bytes.count;
696*0e100440SJacob Keller stats->rx_dropped += hw_stats[idx].rx_drops.count;
697*0e100440SJacob Keller }
698*0e100440SJacob Keller
699*0e100440SJacob Keller return 0;
700*0e100440SJacob Keller }
701