xref: /openbmc/linux/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c (revision 278002edb19bce2c628fafb0af936e77000f3a5b)
151dce24bSJeff Kirsher // SPDX-License-Identifier: GPL-2.0
251dce24bSJeff Kirsher /* Copyright(c) 1999 - 2018 Intel Corporation. */
3dee1ad47SJeff Kirsher 
4dee1ad47SJeff Kirsher #include <linux/types.h>
5dee1ad47SJeff Kirsher #include <linux/module.h>
6dee1ad47SJeff Kirsher #include <linux/pci.h>
7dee1ad47SJeff Kirsher #include <linux/netdevice.h>
8dee1ad47SJeff Kirsher #include <linux/vmalloc.h>
9dee1ad47SJeff Kirsher #include <linux/string.h>
10dee1ad47SJeff Kirsher #include <linux/in.h>
11dee1ad47SJeff Kirsher #include <linux/ip.h>
12dee1ad47SJeff Kirsher #include <linux/tcp.h>
13dee1ad47SJeff Kirsher #include <linux/ipv6.h>
14aa2bacb6SDon Skidmore #include <linux/if_bridge.h>
15f646968fSPatrick McHardy #ifdef NETIF_F_HW_VLAN_CTAG_TX
16dee1ad47SJeff Kirsher #include <linux/if_vlan.h>
17dee1ad47SJeff Kirsher #endif
18dee1ad47SJeff Kirsher 
19dee1ad47SJeff Kirsher #include "ixgbe.h"
20c6bda30aSGreg Rose #include "ixgbe_type.h"
21dee1ad47SJeff Kirsher #include "ixgbe_sriov.h"
22dee1ad47SJeff Kirsher 
23c6bda30aSGreg Rose #ifdef CONFIG_PCI_IOV
ixgbe_alloc_vf_macvlans(struct ixgbe_adapter * adapter,unsigned int num_vfs)245c11f00dSEmil Tantilov static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
255c11f00dSEmil Tantilov 					   unsigned int num_vfs)
262bc09729SEmil Tantilov {
272bc09729SEmil Tantilov 	struct ixgbe_hw *hw = &adapter->hw;
282bc09729SEmil Tantilov 	struct vf_macvlans *mv_list;
292bc09729SEmil Tantilov 	int num_vf_macvlans, i;
302bc09729SEmil Tantilov 
317b5add9aSDan Carpenter 	/* Initialize list of VF macvlans */
327b5add9aSDan Carpenter 	INIT_LIST_HEAD(&adapter->vf_mvs.l);
337b5add9aSDan Carpenter 
342bc09729SEmil Tantilov 	num_vf_macvlans = hw->mac.num_rar_entries -
355c11f00dSEmil Tantilov 			  (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
362bc09729SEmil Tantilov 	if (!num_vf_macvlans)
372bc09729SEmil Tantilov 		return;
382bc09729SEmil Tantilov 
392bc09729SEmil Tantilov 	mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
402bc09729SEmil Tantilov 			  GFP_KERNEL);
412bc09729SEmil Tantilov 	if (mv_list) {
422bc09729SEmil Tantilov 		for (i = 0; i < num_vf_macvlans; i++) {
432bc09729SEmil Tantilov 			mv_list[i].vf = -1;
442bc09729SEmil Tantilov 			mv_list[i].free = true;
452bc09729SEmil Tantilov 			list_add(&mv_list[i].l, &adapter->vf_mvs.l);
462bc09729SEmil Tantilov 		}
472bc09729SEmil Tantilov 		adapter->mv_list = mv_list;
482bc09729SEmil Tantilov 	}
492bc09729SEmil Tantilov }
502bc09729SEmil Tantilov 
__ixgbe_enable_sriov(struct ixgbe_adapter * adapter,unsigned int num_vfs)515c11f00dSEmil Tantilov static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
525c11f00dSEmil Tantilov 				unsigned int num_vfs)
53c6bda30aSGreg Rose {
54c6bda30aSGreg Rose 	struct ixgbe_hw *hw = &adapter->hw;
55da614d04SEmil Tantilov 	int i;
56c6bda30aSGreg Rose 
57fabf1bceSTony Nguyen 	if (adapter->xdp_prog) {
58fabf1bceSTony Nguyen 		e_warn(probe, "SRIOV is not supported with XDP\n");
59fabf1bceSTony Nguyen 		return -EINVAL;
60fabf1bceSTony Nguyen 	}
61fabf1bceSTony Nguyen 
6273079ea0SAlexander Duyck 	/* Enable VMDq flag so device will be set in VM mode */
63a8e87d9fSAlexander Duyck 	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
64a8e87d9fSAlexander Duyck 			  IXGBE_FLAG_VMDQ_ENABLED;
6573079ea0SAlexander Duyck 
665c11f00dSEmil Tantilov 	/* Allocate memory for per VF control structures */
675c11f00dSEmil Tantilov 	adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
685c11f00dSEmil Tantilov 				  GFP_KERNEL);
69da614d04SEmil Tantilov 	if (!adapter->vfinfo)
70da614d04SEmil Tantilov 		return -ENOMEM;
71da614d04SEmil Tantilov 
725c11f00dSEmil Tantilov 	adapter->num_vfs = num_vfs;
735c11f00dSEmil Tantilov 
745c11f00dSEmil Tantilov 	ixgbe_alloc_vf_macvlans(adapter, num_vfs);
755c11f00dSEmil Tantilov 	adapter->ring_feature[RING_F_VMDQ].offset = num_vfs;
76da614d04SEmil Tantilov 
77da614d04SEmil Tantilov 	/* Initialize default switching mode VEB */
78da614d04SEmil Tantilov 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
79da614d04SEmil Tantilov 	adapter->bridge_mode = BRIDGE_MODE_VEB;
802bc09729SEmil Tantilov 
81138f9f50SJulia Lawall 	/* limit traffic classes based on VFs enabled */
825c11f00dSEmil Tantilov 	if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
8373079ea0SAlexander Duyck 		adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
8473079ea0SAlexander Duyck 		adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
855c11f00dSEmil Tantilov 	} else if (num_vfs < 32) {
8673079ea0SAlexander Duyck 		adapter->dcb_cfg.num_tcs.pg_tcs = 4;
8773079ea0SAlexander Duyck 		adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
8873079ea0SAlexander Duyck 	} else {
8973079ea0SAlexander Duyck 		adapter->dcb_cfg.num_tcs.pg_tcs = 1;
9073079ea0SAlexander Duyck 		adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
9173079ea0SAlexander Duyck 	}
9273079ea0SAlexander Duyck 
93c6bda30aSGreg Rose 	/* Disable RSC when in SR-IOV mode */
94c6bda30aSGreg Rose 	adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
95c6bda30aSGreg Rose 			     IXGBE_FLAG2_RSC_ENABLED);
9673079ea0SAlexander Duyck 
975c11f00dSEmil Tantilov 	for (i = 0; i < num_vfs; i++) {
9873079ea0SAlexander Duyck 		/* enable spoof checking for all VFs */
99de4c7f65SGreg Rose 		adapter->vfinfo[i].spoofchk_enabled = true;
100366fd100SSlawomir Mrozowicz 		adapter->vfinfo[i].link_enable = true;
101e65ce0d3SVlad Zolotarov 
102e65ce0d3SVlad Zolotarov 		/* We support VF RSS querying only for 82599 and x540
103e65ce0d3SVlad Zolotarov 		 * devices at the moment. These devices share RSS
104e65ce0d3SVlad Zolotarov 		 * indirection table and RSS hash key with PF therefore
105e65ce0d3SVlad Zolotarov 		 * we want to disable the querying by default.
106e65ce0d3SVlad Zolotarov 		 */
107c2d77e59SJason Yan 		adapter->vfinfo[i].rss_query_enabled = false;
10854011e4dSHiroshi Shimamoto 
10954011e4dSHiroshi Shimamoto 		/* Untrust all VFs */
11054011e4dSHiroshi Shimamoto 		adapter->vfinfo[i].trusted = false;
1118443c1a4SHiroshi Shimamoto 
1128443c1a4SHiroshi Shimamoto 		/* set the default xcast mode */
1138443c1a4SHiroshi Shimamoto 		adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
114e65ce0d3SVlad Zolotarov 	}
115e65ce0d3SVlad Zolotarov 
1165c11f00dSEmil Tantilov 	e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs);
11766dcfd75SGreg Rose 	return 0;
118c6bda30aSGreg Rose }
119c6bda30aSGreg Rose 
120988d1307SMark Rustad /**
121988d1307SMark Rustad  * ixgbe_get_vfs - Find and take references to all vf devices
122988d1307SMark Rustad  * @adapter: Pointer to adapter struct
123988d1307SMark Rustad  */
ixgbe_get_vfs(struct ixgbe_adapter * adapter)124988d1307SMark Rustad static void ixgbe_get_vfs(struct ixgbe_adapter *adapter)
125988d1307SMark Rustad {
126988d1307SMark Rustad 	struct pci_dev *pdev = adapter->pdev;
127988d1307SMark Rustad 	u16 vendor = pdev->vendor;
128988d1307SMark Rustad 	struct pci_dev *vfdev;
129988d1307SMark Rustad 	int vf = 0;
130988d1307SMark Rustad 	u16 vf_id;
131988d1307SMark Rustad 	int pos;
132988d1307SMark Rustad 
133988d1307SMark Rustad 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
134988d1307SMark Rustad 	if (!pos)
135988d1307SMark Rustad 		return;
136988d1307SMark Rustad 	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
137988d1307SMark Rustad 
138988d1307SMark Rustad 	vfdev = pci_get_device(vendor, vf_id, NULL);
139988d1307SMark Rustad 	for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) {
140988d1307SMark Rustad 		if (!vfdev->is_virtfn)
141988d1307SMark Rustad 			continue;
142988d1307SMark Rustad 		if (vfdev->physfn != pdev)
143988d1307SMark Rustad 			continue;
144988d1307SMark Rustad 		if (vf >= adapter->num_vfs)
145988d1307SMark Rustad 			continue;
146988d1307SMark Rustad 		pci_dev_get(vfdev);
147988d1307SMark Rustad 		adapter->vfinfo[vf].vfdev = vfdev;
148988d1307SMark Rustad 		++vf;
149988d1307SMark Rustad 	}
150988d1307SMark Rustad }
151988d1307SMark Rustad 
15266dcfd75SGreg Rose /* Note this function is called when the user wants to enable SR-IOV
15366dcfd75SGreg Rose  * VFs using the now deprecated module parameter
15466dcfd75SGreg Rose  */
ixgbe_enable_sriov(struct ixgbe_adapter * adapter,unsigned int max_vfs)1555c11f00dSEmil Tantilov void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
15666dcfd75SGreg Rose {
15766dcfd75SGreg Rose 	int pre_existing_vfs = 0;
1585c11f00dSEmil Tantilov 	unsigned int num_vfs;
15966dcfd75SGreg Rose 
16066dcfd75SGreg Rose 	pre_existing_vfs = pci_num_vf(adapter->pdev);
1615c11f00dSEmil Tantilov 	if (!pre_existing_vfs && !max_vfs)
16266dcfd75SGreg Rose 		return;
16366dcfd75SGreg Rose 
16466dcfd75SGreg Rose 	/* If there are pre-existing VFs then we have to force
16566dcfd75SGreg Rose 	 * use of that many - over ride any module parameter value.
16666dcfd75SGreg Rose 	 * This may result from the user unloading the PF driver
16766dcfd75SGreg Rose 	 * while VFs were assigned to guest VMs or because the VFs
16866dcfd75SGreg Rose 	 * have been created via the new PCI SR-IOV sysfs interface.
16966dcfd75SGreg Rose 	 */
17066dcfd75SGreg Rose 	if (pre_existing_vfs) {
1715c11f00dSEmil Tantilov 		num_vfs = pre_existing_vfs;
17266dcfd75SGreg Rose 		dev_warn(&adapter->pdev->dev,
17366dcfd75SGreg Rose 			 "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
17466dcfd75SGreg Rose 	} else {
17566dcfd75SGreg Rose 		int err;
17666dcfd75SGreg Rose 		/*
17766dcfd75SGreg Rose 		 * The 82599 supports up to 64 VFs per physical function
17866dcfd75SGreg Rose 		 * but this implementation limits allocation to 63 so that
17966dcfd75SGreg Rose 		 * basic networking resources are still available to the
180dbedd44eSJoe Perches 		 * physical function.  If the user requests greater than
18166dcfd75SGreg Rose 		 * 63 VFs then it is an error - reset to default of zero.
18266dcfd75SGreg Rose 		 */
1835c11f00dSEmil Tantilov 		num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
18466dcfd75SGreg Rose 
1855c11f00dSEmil Tantilov 		err = pci_enable_sriov(adapter->pdev, num_vfs);
18666dcfd75SGreg Rose 		if (err) {
18766dcfd75SGreg Rose 			e_err(probe, "Failed to enable PCI sriov: %d\n", err);
18866dcfd75SGreg Rose 			return;
18966dcfd75SGreg Rose 		}
19066dcfd75SGreg Rose 	}
19166dcfd75SGreg Rose 
1925c11f00dSEmil Tantilov 	if (!__ixgbe_enable_sriov(adapter, num_vfs)) {
193988d1307SMark Rustad 		ixgbe_get_vfs(adapter);
19466dcfd75SGreg Rose 		return;
195988d1307SMark Rustad 	}
19666dcfd75SGreg Rose 
19766dcfd75SGreg Rose 	/* If we have gotten to this point then there is no memory available
19866dcfd75SGreg Rose 	 * to manage the VF devices - print message and bail.
19966dcfd75SGreg Rose 	 */
200c6bda30aSGreg Rose 	e_err(probe, "Unable to allocate memory for VF Data Storage - "
201c6bda30aSGreg Rose 	      "SRIOV disabled\n");
20299d74487SAlexander Duyck 	ixgbe_disable_sriov(adapter);
203c6bda30aSGreg Rose }
204c6bda30aSGreg Rose 
2059297127bSAlexander Duyck #endif /* #ifdef CONFIG_PCI_IOV */
ixgbe_disable_sriov(struct ixgbe_adapter * adapter)206da36b647SGreg Rose int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
207c6bda30aSGreg Rose {
208988d1307SMark Rustad 	unsigned int num_vfs = adapter->num_vfs, vf;
2091e53834cSPiotr Skajewski 	unsigned long flags;
210da36b647SGreg Rose 	int rss;
211c6bda30aSGreg Rose 
2121e53834cSPiotr Skajewski 	spin_lock_irqsave(&adapter->vfs_lock, flags);
213d773d131SAlexander Duyck 	/* set num VFs to 0 to prevent access to vfinfo */
214d773d131SAlexander Duyck 	adapter->num_vfs = 0;
2151e53834cSPiotr Skajewski 	spin_unlock_irqrestore(&adapter->vfs_lock, flags);
216d773d131SAlexander Duyck 
217988d1307SMark Rustad 	/* put the reference to all of the vf devices */
218988d1307SMark Rustad 	for (vf = 0; vf < num_vfs; ++vf) {
219988d1307SMark Rustad 		struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
220988d1307SMark Rustad 
221988d1307SMark Rustad 		if (!vfdev)
222988d1307SMark Rustad 			continue;
223988d1307SMark Rustad 		adapter->vfinfo[vf].vfdev = NULL;
224988d1307SMark Rustad 		pci_dev_put(vfdev);
225988d1307SMark Rustad 	}
226988d1307SMark Rustad 
227d773d131SAlexander Duyck 	/* free VF control structures */
228d773d131SAlexander Duyck 	kfree(adapter->vfinfo);
229d773d131SAlexander Duyck 	adapter->vfinfo = NULL;
230d773d131SAlexander Duyck 
231d773d131SAlexander Duyck 	/* free macvlan list */
232d773d131SAlexander Duyck 	kfree(adapter->mv_list);
233d773d131SAlexander Duyck 	adapter->mv_list = NULL;
234d773d131SAlexander Duyck 
23599d74487SAlexander Duyck 	/* if SR-IOV is already disabled then there is nothing to do */
23699d74487SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
237da36b647SGreg Rose 		return 0;
23899d74487SAlexander Duyck 
239c6bda30aSGreg Rose #ifdef CONFIG_PCI_IOV
2409297127bSAlexander Duyck 	/*
2419297127bSAlexander Duyck 	 * If our VFs are assigned we cannot shut down SR-IOV
2429297127bSAlexander Duyck 	 * without causing issues, so just leave the hardware
2439297127bSAlexander Duyck 	 * available but disabled
2449297127bSAlexander Duyck 	 */
245e507d0cdSAlexander Duyck 	if (pci_vfs_assigned(adapter->pdev)) {
2469297127bSAlexander Duyck 		e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
247da36b647SGreg Rose 		return -EPERM;
248d47e12d6SDavid S. Miller 	}
249c6bda30aSGreg Rose 	/* disable iov and allow time for transactions to clear */
250c6bda30aSGreg Rose 	pci_disable_sriov(adapter->pdev);
251c6bda30aSGreg Rose #endif
252c6bda30aSGreg Rose 
2531d9c0bfdSAlexander Duyck 	/* Disable VMDq flag so device will be set in VM mode */
2548315ef6fSAlexander Duyck 	if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) {
2551d9c0bfdSAlexander Duyck 		adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
2562a47fa45SJohn Fastabend 		adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
2570f9b232bSDon Skidmore 		rss = min_t(int, ixgbe_max_rss_indices(adapter),
2580f9b232bSDon Skidmore 			    num_online_cpus());
2592a47fa45SJohn Fastabend 	} else {
2602a47fa45SJohn Fastabend 		rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
2612a47fa45SJohn Fastabend 	}
2622a47fa45SJohn Fastabend 
2632a47fa45SJohn Fastabend 	adapter->ring_feature[RING_F_VMDQ].offset = 0;
264da36b647SGreg Rose 	adapter->ring_feature[RING_F_RSS].limit = rss;
265da36b647SGreg Rose 
266c6bda30aSGreg Rose 	/* take a breather then clean up driver data */
267c6bda30aSGreg Rose 	msleep(100);
268da36b647SGreg Rose 	return 0;
269da36b647SGreg Rose }
270da36b647SGreg Rose 
ixgbe_pci_sriov_enable(struct pci_dev * dev,int num_vfs)271da36b647SGreg Rose static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
272da36b647SGreg Rose {
273da36b647SGreg Rose #ifdef CONFIG_PCI_IOV
274da36b647SGreg Rose 	struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
275da36b647SGreg Rose 	int pre_existing_vfs = pci_num_vf(dev);
2764e039c16SAlexander Duyck 	int err = 0, num_rx_pools, i, limit;
2774e039c16SAlexander Duyck 	u8 num_tc;
278da36b647SGreg Rose 
279da36b647SGreg Rose 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
280da36b647SGreg Rose 		err = ixgbe_disable_sriov(adapter);
281da36b647SGreg Rose 	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
282e90dd264SMark Rustad 		return num_vfs;
283da36b647SGreg Rose 
284da36b647SGreg Rose 	if (err)
285e90dd264SMark Rustad 		return err;
286da36b647SGreg Rose 
287aac2f1bfSJacob Keller 	/* While the SR-IOV capability structure reports total VFs to be 64,
288b5d8acbbSUsha Ketineni 	 * we limit the actual number allocated as below based on two factors.
289b5d8acbbSUsha Ketineni 	 *    Num_TCs	MAX_VFs
290b5d8acbbSUsha Ketineni 	 *	1	  63
291b5d8acbbSUsha Ketineni 	 *	<=4	  31
292b5d8acbbSUsha Ketineni 	 *	>4	  15
293aac2f1bfSJacob Keller 	 * First, we reserve some transmit/receive resources for the PF.
294aac2f1bfSJacob Keller 	 * Second, VMDQ also uses the same pools that SR-IOV does. We need to
295aac2f1bfSJacob Keller 	 * account for this, so that we don't accidentally allocate more VFs
296aac2f1bfSJacob Keller 	 * than we have available pools. The PCI bus driver already checks for
297aac2f1bfSJacob Keller 	 * other values out of range.
298da36b647SGreg Rose 	 */
2990efbf12bSAlexander Duyck 	num_tc = adapter->hw_tcs;
3008315ef6fSAlexander Duyck 	num_rx_pools = bitmap_weight(adapter->fwd_bitmask,
3018315ef6fSAlexander Duyck 				     adapter->num_rx_pools);
3024e039c16SAlexander Duyck 	limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
3034e039c16SAlexander Duyck 		(num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
304da36b647SGreg Rose 
3054e039c16SAlexander Duyck 	if (num_vfs > (limit - num_rx_pools)) {
3064e039c16SAlexander Duyck 		e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
3074e039c16SAlexander Duyck 			  num_tc, num_rx_pools - 1, limit - num_rx_pools);
308b5d8acbbSUsha Ketineni 		return -EPERM;
309b5d8acbbSUsha Ketineni 	}
310da36b647SGreg Rose 
3115c11f00dSEmil Tantilov 	err = __ixgbe_enable_sriov(adapter, num_vfs);
312da36b647SGreg Rose 	if (err)
313e90dd264SMark Rustad 		return  err;
314da36b647SGreg Rose 
3155c11f00dSEmil Tantilov 	for (i = 0; i < num_vfs; i++)
316da36b647SGreg Rose 		ixgbe_vf_configuration(dev, (i | 0x10000000));
317da36b647SGreg Rose 
3180c339bf9SEmil Tantilov 	/* reset before enabling SRIOV to avoid mailbox issues */
3190c339bf9SEmil Tantilov 	ixgbe_sriov_reinit(adapter);
3200c339bf9SEmil Tantilov 
321da36b647SGreg Rose 	err = pci_enable_sriov(dev, num_vfs);
322da36b647SGreg Rose 	if (err) {
323da36b647SGreg Rose 		e_dev_warn("Failed to enable PCI sriov: %d\n", err);
324e90dd264SMark Rustad 		return err;
325da36b647SGreg Rose 	}
326988d1307SMark Rustad 	ixgbe_get_vfs(adapter);
327da36b647SGreg Rose 
328da36b647SGreg Rose 	return num_vfs;
329e90dd264SMark Rustad #else
330da36b647SGreg Rose 	return 0;
331e90dd264SMark Rustad #endif
332da36b647SGreg Rose }
333da36b647SGreg Rose 
ixgbe_pci_sriov_disable(struct pci_dev * dev)334da36b647SGreg Rose static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
335da36b647SGreg Rose {
336da36b647SGreg Rose 	struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
337da36b647SGreg Rose 	int err;
3388f48f5bcSDon Skidmore #ifdef CONFIG_PCI_IOV
339da36b647SGreg Rose 	u32 current_flags = adapter->flags;
3402097db7dSAlexander Duyck 	int prev_num_vf = pci_num_vf(dev);
3418f48f5bcSDon Skidmore #endif
342da36b647SGreg Rose 
343da36b647SGreg Rose 	err = ixgbe_disable_sriov(adapter);
344da36b647SGreg Rose 
345da36b647SGreg Rose 	/* Only reinit if no error and state changed */
346da36b647SGreg Rose #ifdef CONFIG_PCI_IOV
3472097db7dSAlexander Duyck 	if (!err && (current_flags != adapter->flags ||
3482097db7dSAlexander Duyck 		     prev_num_vf != pci_num_vf(dev)))
349da36b647SGreg Rose 		ixgbe_sriov_reinit(adapter);
350da36b647SGreg Rose #endif
351da36b647SGreg Rose 
352da36b647SGreg Rose 	return err;
353da36b647SGreg Rose }
354da36b647SGreg Rose 
ixgbe_pci_sriov_configure(struct pci_dev * dev,int num_vfs)355da36b647SGreg Rose int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
356da36b647SGreg Rose {
357da36b647SGreg Rose 	if (num_vfs == 0)
358da36b647SGreg Rose 		return ixgbe_pci_sriov_disable(dev);
359da36b647SGreg Rose 	else
360da36b647SGreg Rose 		return ixgbe_pci_sriov_enable(dev, num_vfs);
361c6bda30aSGreg Rose }
362c6bda30aSGreg Rose 
ixgbe_set_vf_multicasts(struct ixgbe_adapter * adapter,u32 * msgbuf,u32 vf)363dee1ad47SJeff Kirsher static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
36458a02beeSAlexander Duyck 				   u32 *msgbuf, u32 vf)
365dee1ad47SJeff Kirsher {
366d5752c7bSJesse Brandeburg 	int entries = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
36758a02beeSAlexander Duyck 	u16 *hash_list = (u16 *)&msgbuf[1];
368dee1ad47SJeff Kirsher 	struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
369dee1ad47SJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
370dee1ad47SJeff Kirsher 	int i;
371dee1ad47SJeff Kirsher 	u32 vector_bit;
372dee1ad47SJeff Kirsher 	u32 vector_reg;
373dee1ad47SJeff Kirsher 	u32 mta_reg;
374b335e75bSJacob Keller 	u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
375dee1ad47SJeff Kirsher 
376dee1ad47SJeff Kirsher 	/* only so many hash values supported */
377dee1ad47SJeff Kirsher 	entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
378dee1ad47SJeff Kirsher 
379dee1ad47SJeff Kirsher 	/*
380dee1ad47SJeff Kirsher 	 * salt away the number of multi cast addresses assigned
381dee1ad47SJeff Kirsher 	 * to this VF for later use to restore when the PF multi cast
382dee1ad47SJeff Kirsher 	 * list changes
383dee1ad47SJeff Kirsher 	 */
384dee1ad47SJeff Kirsher 	vfinfo->num_vf_mc_hashes = entries;
385dee1ad47SJeff Kirsher 
386dee1ad47SJeff Kirsher 	/*
387dee1ad47SJeff Kirsher 	 * VFs are limited to using the MTA hash table for their multicast
388dee1ad47SJeff Kirsher 	 * addresses
389dee1ad47SJeff Kirsher 	 */
390dee1ad47SJeff Kirsher 	for (i = 0; i < entries; i++) {
391dee1ad47SJeff Kirsher 		vfinfo->vf_mc_hashes[i] = hash_list[i];
392dee1ad47SJeff Kirsher 	}
393dee1ad47SJeff Kirsher 
394dee1ad47SJeff Kirsher 	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
395dee1ad47SJeff Kirsher 		vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
396dee1ad47SJeff Kirsher 		vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
397dee1ad47SJeff Kirsher 		mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
398b4f47a48SJacob Keller 		mta_reg |= BIT(vector_bit);
399dee1ad47SJeff Kirsher 		IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
400dee1ad47SJeff Kirsher 	}
401b335e75bSJacob Keller 	vmolr |= IXGBE_VMOLR_ROMPE;
402b335e75bSJacob Keller 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
403dee1ad47SJeff Kirsher 
404dee1ad47SJeff Kirsher 	return 0;
405dee1ad47SJeff Kirsher }
406dee1ad47SJeff Kirsher 
407b335e75bSJacob Keller #ifdef CONFIG_PCI_IOV
ixgbe_restore_vf_multicasts(struct ixgbe_adapter * adapter)408dee1ad47SJeff Kirsher void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
409dee1ad47SJeff Kirsher {
410dee1ad47SJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
411dee1ad47SJeff Kirsher 	struct vf_data_storage *vfinfo;
412dee1ad47SJeff Kirsher 	int i, j;
413dee1ad47SJeff Kirsher 	u32 vector_bit;
414dee1ad47SJeff Kirsher 	u32 vector_reg;
415dee1ad47SJeff Kirsher 	u32 mta_reg;
416dee1ad47SJeff Kirsher 
417dee1ad47SJeff Kirsher 	for (i = 0; i < adapter->num_vfs; i++) {
418b335e75bSJacob Keller 		u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
419dee1ad47SJeff Kirsher 		vfinfo = &adapter->vfinfo[i];
420dee1ad47SJeff Kirsher 		for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
421dee1ad47SJeff Kirsher 			hw->addr_ctrl.mta_in_use++;
422dee1ad47SJeff Kirsher 			vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
423dee1ad47SJeff Kirsher 			vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
424dee1ad47SJeff Kirsher 			mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
425b4f47a48SJacob Keller 			mta_reg |= BIT(vector_bit);
426dee1ad47SJeff Kirsher 			IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
427dee1ad47SJeff Kirsher 		}
428b335e75bSJacob Keller 
429b335e75bSJacob Keller 		if (vfinfo->num_vf_mc_hashes)
430b335e75bSJacob Keller 			vmolr |= IXGBE_VMOLR_ROMPE;
431b335e75bSJacob Keller 		else
432b335e75bSJacob Keller 			vmolr &= ~IXGBE_VMOLR_ROMPE;
433b335e75bSJacob Keller 		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
434dee1ad47SJeff Kirsher 	}
435dee1ad47SJeff Kirsher 
436dee1ad47SJeff Kirsher 	/* Restore any VF macvlans */
4375d7daa35SJacob Keller 	ixgbe_full_sync_mac_table(adapter);
438dee1ad47SJeff Kirsher }
439b335e75bSJacob Keller #endif
440dee1ad47SJeff Kirsher 
ixgbe_set_vf_vlan(struct ixgbe_adapter * adapter,int add,int vid,u32 vf)441dee1ad47SJeff Kirsher static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
442dee1ad47SJeff Kirsher 			     u32 vf)
443dee1ad47SJeff Kirsher {
444b6488b66SAlexander Duyck 	struct ixgbe_hw *hw = &adapter->hw;
445b6488b66SAlexander Duyck 	int err;
446b6488b66SAlexander Duyck 
447b6488b66SAlexander Duyck 	/* If VLAN overlaps with one the PF is currently monitoring make
448b6488b66SAlexander Duyck 	 * sure that we are able to allocate a VLVF entry.  This may be
449b6488b66SAlexander Duyck 	 * redundant but it guarantees PF will maintain visibility to
450b6488b66SAlexander Duyck 	 * the VLAN.
451b6488b66SAlexander Duyck 	 */
452b6488b66SAlexander Duyck 	if (add && test_bit(vid, adapter->active_vlans)) {
453b6488b66SAlexander Duyck 		err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false);
454b6488b66SAlexander Duyck 		if (err)
455b6488b66SAlexander Duyck 			return err;
456b6488b66SAlexander Duyck 	}
457b6488b66SAlexander Duyck 
458b6488b66SAlexander Duyck 	err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false);
459b6488b66SAlexander Duyck 
460e1d0a2afSAlexander Duyck 	if (add && !err)
461e1d0a2afSAlexander Duyck 		return err;
462e1d0a2afSAlexander Duyck 
463e1d0a2afSAlexander Duyck 	/* If we failed to add the VF VLAN or we are removing the VF VLAN
464e1d0a2afSAlexander Duyck 	 * we may need to drop the PF pool bit in order to allow us to free
465e1d0a2afSAlexander Duyck 	 * up the VLVF resources.
466e1d0a2afSAlexander Duyck 	 */
467e1d0a2afSAlexander Duyck 	if (test_bit(vid, adapter->active_vlans) ||
468e1d0a2afSAlexander Duyck 	    (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
469e1d0a2afSAlexander Duyck 		ixgbe_update_pf_promisc_vlvf(adapter, vid);
470e1d0a2afSAlexander Duyck 
471b6488b66SAlexander Duyck 	return err;
472dee1ad47SJeff Kirsher }
473dee1ad47SJeff Kirsher 
ixgbe_set_vf_lpe(struct ixgbe_adapter * adapter,u32 max_frame,u32 vf)47463e39d29SJesse Brandeburg static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf)
475dee1ad47SJeff Kirsher {
476dee1ad47SJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
477dee1ad47SJeff Kirsher 	u32 max_frs;
478dee1ad47SJeff Kirsher 
47963e39d29SJesse Brandeburg 	if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
48063e39d29SJesse Brandeburg 		e_err(drv, "VF max_frame %d out of range\n", max_frame);
48163e39d29SJesse Brandeburg 		return -EINVAL;
48263e39d29SJesse Brandeburg 	}
48363e39d29SJesse Brandeburg 
484872844ddSAlexander Duyck 	/*
485872844ddSAlexander Duyck 	 * For 82599EB we have to keep all PFs and VFs operating with
486872844ddSAlexander Duyck 	 * the same max_frame value in order to avoid sending an oversize
487872844ddSAlexander Duyck 	 * frame to a VF.  In order to guarantee this is handled correctly
488872844ddSAlexander Duyck 	 * for all cases we have several special exceptions to take into
489872844ddSAlexander Duyck 	 * account before we can enable the VF for receive
490872844ddSAlexander Duyck 	 */
491872844ddSAlexander Duyck 	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
492872844ddSAlexander Duyck 		struct net_device *dev = adapter->netdev;
493872844ddSAlexander Duyck 		int pf_max_frame = dev->mtu + ETH_HLEN;
494872844ddSAlexander Duyck 		u32 reg_offset, vf_shift, vfre;
495872844ddSAlexander Duyck 		s32 err = 0;
496dee1ad47SJeff Kirsher 
497872844ddSAlexander Duyck #ifdef CONFIG_FCOE
498872844ddSAlexander Duyck 		if (dev->features & NETIF_F_FCOE_MTU)
499872844ddSAlexander Duyck 			pf_max_frame = max_t(int, pf_max_frame,
500872844ddSAlexander Duyck 					     IXGBE_FCOE_JUMBO_FRAME_SIZE);
501872844ddSAlexander Duyck 
502872844ddSAlexander Duyck #endif /* CONFIG_FCOE */
503bffb3bc9SAlexander Duyck 		switch (adapter->vfinfo[vf].vf_api) {
504bffb3bc9SAlexander Duyck 		case ixgbe_mbox_api_11:
5054ce37a4cSVlad Zolotarov 		case ixgbe_mbox_api_12:
50607eea570SDon Skidmore 		case ixgbe_mbox_api_13:
50772698240SShannon Nelson 		case ixgbe_mbox_api_14:
50893df9465STony Nguyen 			/* Version 1.1 supports jumbo frames on VFs if PF has
509bffb3bc9SAlexander Duyck 			 * jumbo frames enabled which means legacy VFs are
510bffb3bc9SAlexander Duyck 			 * disabled
511bffb3bc9SAlexander Duyck 			 */
512bffb3bc9SAlexander Duyck 			if (pf_max_frame > ETH_FRAME_LEN)
513bffb3bc9SAlexander Duyck 				break;
5145463fce6SJeff Kirsher 			fallthrough;
515bffb3bc9SAlexander Duyck 		default:
51693df9465STony Nguyen 			/* If the PF or VF are running w/ jumbo frames enabled
517bffb3bc9SAlexander Duyck 			 * we need to shut down the VF Rx path as we cannot
518bffb3bc9SAlexander Duyck 			 * support jumbo frames on legacy VFs
519872844ddSAlexander Duyck 			 */
520872844ddSAlexander Duyck 			if ((pf_max_frame > ETH_FRAME_LEN) ||
521872844ddSAlexander Duyck 			    (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
522872844ddSAlexander Duyck 				err = -EINVAL;
523bffb3bc9SAlexander Duyck 			break;
524bffb3bc9SAlexander Duyck 		}
525872844ddSAlexander Duyck 
526872844ddSAlexander Duyck 		/* determine VF receive enable location */
527872844ddSAlexander Duyck 		vf_shift = vf % 32;
528872844ddSAlexander Duyck 		reg_offset = vf / 32;
529872844ddSAlexander Duyck 
530872844ddSAlexander Duyck 		/* enable or disable receive depending on error */
531872844ddSAlexander Duyck 		vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
532872844ddSAlexander Duyck 		if (err)
533b4f47a48SJacob Keller 			vfre &= ~BIT(vf_shift);
534872844ddSAlexander Duyck 		else
535b4f47a48SJacob Keller 			vfre |= BIT(vf_shift);
536872844ddSAlexander Duyck 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
537872844ddSAlexander Duyck 
538872844ddSAlexander Duyck 		if (err) {
539872844ddSAlexander Duyck 			e_err(drv, "VF max_frame %d out of range\n", max_frame);
540872844ddSAlexander Duyck 			return err;
541872844ddSAlexander Duyck 		}
542dee1ad47SJeff Kirsher 	}
543dee1ad47SJeff Kirsher 
544872844ddSAlexander Duyck 	/* pull current max frame size from hardware */
545872844ddSAlexander Duyck 	max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
546872844ddSAlexander Duyck 	max_frs &= IXGBE_MHADD_MFS_MASK;
547872844ddSAlexander Duyck 	max_frs >>= IXGBE_MHADD_MFS_SHIFT;
548872844ddSAlexander Duyck 
549872844ddSAlexander Duyck 	if (max_frs < max_frame) {
550872844ddSAlexander Duyck 		max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
551dee1ad47SJeff Kirsher 		IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
552dee1ad47SJeff Kirsher 	}
553dee1ad47SJeff Kirsher 
554872844ddSAlexander Duyck 	e_info(hw, "VF requests change max MTU to %d\n", max_frame);
555872844ddSAlexander Duyck 
556872844ddSAlexander Duyck 	return 0;
557dee1ad47SJeff Kirsher }
558dee1ad47SJeff Kirsher 
ixgbe_set_vmolr(struct ixgbe_hw * hw,u32 vf,bool aupe)559dee1ad47SJeff Kirsher static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
560dee1ad47SJeff Kirsher {
561dee1ad47SJeff Kirsher 	u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
562b335e75bSJacob Keller 	vmolr |= IXGBE_VMOLR_BAM;
563dee1ad47SJeff Kirsher 	if (aupe)
564dee1ad47SJeff Kirsher 		vmolr |= IXGBE_VMOLR_AUPE;
565dee1ad47SJeff Kirsher 	else
566dee1ad47SJeff Kirsher 		vmolr &= ~IXGBE_VMOLR_AUPE;
567dee1ad47SJeff Kirsher 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
568dee1ad47SJeff Kirsher }
569dee1ad47SJeff Kirsher 
ixgbe_clear_vmvir(struct ixgbe_adapter * adapter,u32 vf)570107d3018SAlexander Duyck static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
571dee1ad47SJeff Kirsher {
572dee1ad47SJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
573dee1ad47SJeff Kirsher 
574dee1ad47SJeff Kirsher 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
575dee1ad47SJeff Kirsher }
5764c7f35f6SAlexander Duyck 
ixgbe_clear_vf_vlans(struct ixgbe_adapter * adapter,u32 vf)5774c7f35f6SAlexander Duyck static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
5784c7f35f6SAlexander Duyck {
5794c7f35f6SAlexander Duyck 	struct ixgbe_hw *hw = &adapter->hw;
58018be4fceSAlexander Duyck 	u32 vlvfb_mask, pool_mask, i;
58118be4fceSAlexander Duyck 
58218be4fceSAlexander Duyck 	/* create mask for VF and other pools */
583b4f47a48SJacob Keller 	pool_mask = ~BIT(VMDQ_P(0) % 32);
584b4f47a48SJacob Keller 	vlvfb_mask = BIT(vf % 32);
5854c7f35f6SAlexander Duyck 
5864c7f35f6SAlexander Duyck 	/* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
5874c7f35f6SAlexander Duyck 	for (i = IXGBE_VLVF_ENTRIES; i--;) {
5884c7f35f6SAlexander Duyck 		u32 bits[2], vlvfb, vid, vfta, vlvf;
589ab3a3b7bSAlexander Duyck 		u32 word = i * 2 + vf / 32;
59018be4fceSAlexander Duyck 		u32 mask;
5914c7f35f6SAlexander Duyck 
592ab3a3b7bSAlexander Duyck 		vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
5934c7f35f6SAlexander Duyck 
5944c7f35f6SAlexander Duyck 		/* if our bit isn't set we can skip it */
59518be4fceSAlexander Duyck 		if (!(vlvfb & vlvfb_mask))
5964c7f35f6SAlexander Duyck 			continue;
5974c7f35f6SAlexander Duyck 
5984c7f35f6SAlexander Duyck 		/* clear our bit from vlvfb */
59918be4fceSAlexander Duyck 		vlvfb ^= vlvfb_mask;
6004c7f35f6SAlexander Duyck 
6014c7f35f6SAlexander Duyck 		/* create 64b mask to chedk to see if we should clear VLVF */
6024c7f35f6SAlexander Duyck 		bits[word % 2] = vlvfb;
603ab3a3b7bSAlexander Duyck 		bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
6044c7f35f6SAlexander Duyck 
6054c7f35f6SAlexander Duyck 		/* if other pools are present, just remove ourselves */
60618be4fceSAlexander Duyck 		if (bits[(VMDQ_P(0) / 32) ^ 1] ||
60718be4fceSAlexander Duyck 		    (bits[VMDQ_P(0) / 32] & pool_mask))
6084c7f35f6SAlexander Duyck 			goto update_vlvfb;
6094c7f35f6SAlexander Duyck 
61018be4fceSAlexander Duyck 		/* if PF is present, leave VFTA */
61118be4fceSAlexander Duyck 		if (bits[0] || bits[1])
61218be4fceSAlexander Duyck 			goto update_vlvf;
61318be4fceSAlexander Duyck 
6144c7f35f6SAlexander Duyck 		/* if we cannot determine VLAN just remove ourselves */
6154c7f35f6SAlexander Duyck 		vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
6164c7f35f6SAlexander Duyck 		if (!vlvf)
6174c7f35f6SAlexander Duyck 			goto update_vlvfb;
6184c7f35f6SAlexander Duyck 
6194c7f35f6SAlexander Duyck 		vid = vlvf & VLAN_VID_MASK;
620b4f47a48SJacob Keller 		mask = BIT(vid % 32);
6214c7f35f6SAlexander Duyck 
6224c7f35f6SAlexander Duyck 		/* clear bit from VFTA */
6234c7f35f6SAlexander Duyck 		vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
6244c7f35f6SAlexander Duyck 		if (vfta & mask)
6254c7f35f6SAlexander Duyck 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask);
6264c7f35f6SAlexander Duyck update_vlvf:
6274c7f35f6SAlexander Duyck 		/* clear POOL selection enable */
6284c7f35f6SAlexander Duyck 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
62918be4fceSAlexander Duyck 
63018be4fceSAlexander Duyck 		if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
63118be4fceSAlexander Duyck 			vlvfb = 0;
6324c7f35f6SAlexander Duyck update_vlvfb:
6334c7f35f6SAlexander Duyck 		/* clear pool bits */
6344c7f35f6SAlexander Duyck 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
6354c7f35f6SAlexander Duyck 	}
6364c7f35f6SAlexander Duyck }
6374c7f35f6SAlexander Duyck 
ixgbe_set_vf_macvlan(struct ixgbe_adapter * adapter,int vf,int index,unsigned char * mac_addr)638dee1ad47SJeff Kirsher static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
639dee1ad47SJeff Kirsher 				int vf, int index, unsigned char *mac_addr)
640dee1ad47SJeff Kirsher {
641dee1ad47SJeff Kirsher 	struct vf_macvlans *entry;
6420e1ff306STony Nguyen 	struct list_head *pos;
6430e1ff306STony Nguyen 	int retval = 0;
644dee1ad47SJeff Kirsher 
645dee1ad47SJeff Kirsher 	if (index <= 1) {
646dee1ad47SJeff Kirsher 		list_for_each(pos, &adapter->vf_mvs.l) {
647dee1ad47SJeff Kirsher 			entry = list_entry(pos, struct vf_macvlans, l);
648dee1ad47SJeff Kirsher 			if (entry->vf == vf) {
649dee1ad47SJeff Kirsher 				entry->vf = -1;
650dee1ad47SJeff Kirsher 				entry->free = true;
651dee1ad47SJeff Kirsher 				entry->is_macvlan = false;
6525d7daa35SJacob Keller 				ixgbe_del_mac_filter(adapter,
6535d7daa35SJacob Keller 						     entry->vf_macvlan, vf);
654dee1ad47SJeff Kirsher 			}
655dee1ad47SJeff Kirsher 		}
656dee1ad47SJeff Kirsher 	}
657dee1ad47SJeff Kirsher 
658dee1ad47SJeff Kirsher 	/*
659dee1ad47SJeff Kirsher 	 * If index was zero then we were asked to clear the uc list
660dee1ad47SJeff Kirsher 	 * for the VF.  We're done.
661dee1ad47SJeff Kirsher 	 */
662dee1ad47SJeff Kirsher 	if (!index)
663dee1ad47SJeff Kirsher 		return 0;
664dee1ad47SJeff Kirsher 
665dee1ad47SJeff Kirsher 	entry = NULL;
666dee1ad47SJeff Kirsher 
667dee1ad47SJeff Kirsher 	list_for_each(pos, &adapter->vf_mvs.l) {
668dee1ad47SJeff Kirsher 		entry = list_entry(pos, struct vf_macvlans, l);
669dee1ad47SJeff Kirsher 		if (entry->free)
670dee1ad47SJeff Kirsher 			break;
671dee1ad47SJeff Kirsher 	}
672dee1ad47SJeff Kirsher 
673dee1ad47SJeff Kirsher 	/*
674dee1ad47SJeff Kirsher 	 * If we traversed the entire list and didn't find a free entry
675dee1ad47SJeff Kirsher 	 * then we're out of space on the RAR table.  Also entry may
676dee1ad47SJeff Kirsher 	 * be NULL because the original memory allocation for the list
677dee1ad47SJeff Kirsher 	 * failed, which is not fatal but does mean we can't support
678dee1ad47SJeff Kirsher 	 * VF requests for MACVLAN because we couldn't allocate
679dee1ad47SJeff Kirsher 	 * memory for the list management required.
680dee1ad47SJeff Kirsher 	 */
681dee1ad47SJeff Kirsher 	if (!entry || !entry->free)
682dee1ad47SJeff Kirsher 		return -ENOSPC;
683dee1ad47SJeff Kirsher 
6840e1ff306STony Nguyen 	retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
6850e1ff306STony Nguyen 	if (retval < 0)
6860e1ff306STony Nguyen 		return retval;
6870e1ff306STony Nguyen 
688dee1ad47SJeff Kirsher 	entry->free = false;
689dee1ad47SJeff Kirsher 	entry->is_macvlan = true;
690dee1ad47SJeff Kirsher 	entry->vf = vf;
691dee1ad47SJeff Kirsher 	memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
692dee1ad47SJeff Kirsher 
693dee1ad47SJeff Kirsher 	return 0;
694dee1ad47SJeff Kirsher }
695dee1ad47SJeff Kirsher 
ixgbe_vf_reset_event(struct ixgbe_adapter * adapter,u32 vf)696e251ecf7SEmil Tantilov static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
697e251ecf7SEmil Tantilov {
698e251ecf7SEmil Tantilov 	struct ixgbe_hw *hw = &adapter->hw;
699939b701aSSebastian Basierski 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
700e251ecf7SEmil Tantilov 	struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
701939b701aSSebastian Basierski 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7020efbf12bSAlexander Duyck 	u8 num_tcs = adapter->hw_tcs;
703939b701aSSebastian Basierski 	u32 reg_val;
704939b701aSSebastian Basierski 	u32 queue;
705e251ecf7SEmil Tantilov 
706e251ecf7SEmil Tantilov 	/* remove VLAN filters beloning to this VF */
707e251ecf7SEmil Tantilov 	ixgbe_clear_vf_vlans(adapter, vf);
708e251ecf7SEmil Tantilov 
709e251ecf7SEmil Tantilov 	/* add back PF assigned VLAN or VLAN 0 */
710e251ecf7SEmil Tantilov 	ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
711e251ecf7SEmil Tantilov 
712e251ecf7SEmil Tantilov 	/* reset offloads to defaults */
713e251ecf7SEmil Tantilov 	ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
714e251ecf7SEmil Tantilov 
715e251ecf7SEmil Tantilov 	/* set outgoing tags for VFs */
716e251ecf7SEmil Tantilov 	if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
717e251ecf7SEmil Tantilov 		ixgbe_clear_vmvir(adapter, vf);
718e251ecf7SEmil Tantilov 	} else {
719e251ecf7SEmil Tantilov 		if (vfinfo->pf_qos || !num_tcs)
720e251ecf7SEmil Tantilov 			ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
721e251ecf7SEmil Tantilov 					vfinfo->pf_qos, vf);
722e251ecf7SEmil Tantilov 		else
723e251ecf7SEmil Tantilov 			ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
724e251ecf7SEmil Tantilov 					adapter->default_up, vf);
725e251ecf7SEmil Tantilov 
7266702185cSRadoslaw Tyl 		if (vfinfo->spoofchk_enabled) {
727e251ecf7SEmil Tantilov 			hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
7286702185cSRadoslaw Tyl 			hw->mac.ops.set_mac_anti_spoofing(hw, true, vf);
7296702185cSRadoslaw Tyl 		}
730e251ecf7SEmil Tantilov 	}
731e251ecf7SEmil Tantilov 
732e251ecf7SEmil Tantilov 	/* reset multicast table array for vf */
733e251ecf7SEmil Tantilov 	adapter->vfinfo[vf].num_vf_mc_hashes = 0;
734e251ecf7SEmil Tantilov 
73572698240SShannon Nelson 	/* clear any ipsec table info */
73672698240SShannon Nelson 	ixgbe_ipsec_vf_clear(adapter, vf);
73772698240SShannon Nelson 
738e251ecf7SEmil Tantilov 	/* Flush and reset the mta with the new values */
739e251ecf7SEmil Tantilov 	ixgbe_set_rx_mode(adapter->netdev);
740e251ecf7SEmil Tantilov 
741e251ecf7SEmil Tantilov 	ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
742e251ecf7SEmil Tantilov 	ixgbe_set_vf_macvlan(adapter, vf, 0, NULL);
743e251ecf7SEmil Tantilov 
744e251ecf7SEmil Tantilov 	/* reset VF api back to unknown */
745e251ecf7SEmil Tantilov 	adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
746939b701aSSebastian Basierski 
747939b701aSSebastian Basierski 	/* Restart each queue for given VF */
748939b701aSSebastian Basierski 	for (queue = 0; queue < q_per_pool; queue++) {
749939b701aSSebastian Basierski 		unsigned int reg_idx = (vf * q_per_pool) + queue;
750939b701aSSebastian Basierski 
751939b701aSSebastian Basierski 		reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx));
752939b701aSSebastian Basierski 
753939b701aSSebastian Basierski 		/* Re-enabling only configured queues */
754939b701aSSebastian Basierski 		if (reg_val) {
755939b701aSSebastian Basierski 			reg_val |= IXGBE_TXDCTL_ENABLE;
756939b701aSSebastian Basierski 			IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
757939b701aSSebastian Basierski 			reg_val &= ~IXGBE_TXDCTL_ENABLE;
758939b701aSSebastian Basierski 			IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
759939b701aSSebastian Basierski 		}
760939b701aSSebastian Basierski 	}
761939b701aSSebastian Basierski 
76296d1a731SRoss Lagerwall 	IXGBE_WRITE_FLUSH(hw);
76396d1a731SRoss Lagerwall }
76496d1a731SRoss Lagerwall 
ixgbe_vf_clear_mbx(struct ixgbe_adapter * adapter,u32 vf)76596d1a731SRoss Lagerwall static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
76696d1a731SRoss Lagerwall {
76796d1a731SRoss Lagerwall 	struct ixgbe_hw *hw = &adapter->hw;
76896d1a731SRoss Lagerwall 	u32 word;
76996d1a731SRoss Lagerwall 
770939b701aSSebastian Basierski 	/* Clear VF's mailbox memory */
771939b701aSSebastian Basierski 	for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
772939b701aSSebastian Basierski 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
773939b701aSSebastian Basierski 
774939b701aSSebastian Basierski 	IXGBE_WRITE_FLUSH(hw);
775e251ecf7SEmil Tantilov }
776e251ecf7SEmil Tantilov 
ixgbe_set_vf_mac(struct ixgbe_adapter * adapter,int vf,unsigned char * mac_addr)777e251ecf7SEmil Tantilov static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
778e251ecf7SEmil Tantilov 			    int vf, unsigned char *mac_addr)
779e251ecf7SEmil Tantilov {
7806af3d0faSTony Nguyen 	s32 retval;
781e251ecf7SEmil Tantilov 
7826af3d0faSTony Nguyen 	ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
7836af3d0faSTony Nguyen 	retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
7846af3d0faSTony Nguyen 	if (retval >= 0)
7856af3d0faSTony Nguyen 		memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr,
7866af3d0faSTony Nguyen 		       ETH_ALEN);
7876af3d0faSTony Nguyen 	else
788935f73bdSMiaohe Lin 		eth_zero_addr(adapter->vfinfo[vf].vf_mac_addresses);
7896af3d0faSTony Nguyen 
7906af3d0faSTony Nguyen 	return retval;
791e251ecf7SEmil Tantilov }
792e251ecf7SEmil Tantilov 
ixgbe_vf_configuration(struct pci_dev * pdev,unsigned int event_mask)793dee1ad47SJeff Kirsher int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
794dee1ad47SJeff Kirsher {
795dee1ad47SJeff Kirsher 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
796dee1ad47SJeff Kirsher 	unsigned int vfn = (event_mask & 0x3f);
797dee1ad47SJeff Kirsher 
798dee1ad47SJeff Kirsher 	bool enable = ((event_mask & 0x10000000U) != 0);
799dee1ad47SJeff Kirsher 
800d458cdf7SJoe Perches 	if (enable)
801d458cdf7SJoe Perches 		eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
802dee1ad47SJeff Kirsher 
803dee1ad47SJeff Kirsher 	return 0;
804dee1ad47SJeff Kirsher }
805dee1ad47SJeff Kirsher 
ixgbe_write_qde(struct ixgbe_adapter * adapter,u32 vf,u32 qde)8068d697e7eSDon Skidmore static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
8078d697e7eSDon Skidmore 				   u32 qde)
8088d697e7eSDon Skidmore {
8098d697e7eSDon Skidmore 	struct ixgbe_hw *hw = &adapter->hw;
8108d697e7eSDon Skidmore 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
8118d697e7eSDon Skidmore 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
8128d697e7eSDon Skidmore 	int i;
8138d697e7eSDon Skidmore 
8148d697e7eSDon Skidmore 	for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
8158d697e7eSDon Skidmore 		u32 reg;
8168d697e7eSDon Skidmore 
8178d697e7eSDon Skidmore 		/* flush previous write */
8188d697e7eSDon Skidmore 		IXGBE_WRITE_FLUSH(hw);
8198d697e7eSDon Skidmore 
8208d697e7eSDon Skidmore 		/* indicate to hardware that we want to set drop enable */
821d28b1949SEmil Tantilov 		reg = IXGBE_QDE_WRITE | qde;
8228d697e7eSDon Skidmore 		reg |= i <<  IXGBE_QDE_IDX_SHIFT;
8238d697e7eSDon Skidmore 		IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
8248d697e7eSDon Skidmore 	}
8258d697e7eSDon Skidmore }
8268d697e7eSDon Skidmore 
827366fd100SSlawomir Mrozowicz /**
828366fd100SSlawomir Mrozowicz  * ixgbe_set_vf_rx_tx - Set VF rx tx
829366fd100SSlawomir Mrozowicz  * @adapter: Pointer to adapter struct
830366fd100SSlawomir Mrozowicz  * @vf: VF identifier
831366fd100SSlawomir Mrozowicz  *
832366fd100SSlawomir Mrozowicz  * Set or reset correct transmit and receive for vf
833366fd100SSlawomir Mrozowicz  **/
ixgbe_set_vf_rx_tx(struct ixgbe_adapter * adapter,int vf)834366fd100SSlawomir Mrozowicz static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf)
835366fd100SSlawomir Mrozowicz {
836366fd100SSlawomir Mrozowicz 	u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
837366fd100SSlawomir Mrozowicz 	struct ixgbe_hw *hw = &adapter->hw;
838366fd100SSlawomir Mrozowicz 	u32 reg_offset, vf_shift;
839366fd100SSlawomir Mrozowicz 
840366fd100SSlawomir Mrozowicz 	vf_shift = vf % 32;
841366fd100SSlawomir Mrozowicz 	reg_offset = vf / 32;
842366fd100SSlawomir Mrozowicz 
843366fd100SSlawomir Mrozowicz 	reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
844366fd100SSlawomir Mrozowicz 	reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
845366fd100SSlawomir Mrozowicz 
846366fd100SSlawomir Mrozowicz 	if (adapter->vfinfo[vf].link_enable) {
847366fd100SSlawomir Mrozowicz 		reg_req_tx = reg_cur_tx | 1 << vf_shift;
848366fd100SSlawomir Mrozowicz 		reg_req_rx = reg_cur_rx | 1 << vf_shift;
849366fd100SSlawomir Mrozowicz 	} else {
850366fd100SSlawomir Mrozowicz 		reg_req_tx = reg_cur_tx & ~(1 << vf_shift);
851366fd100SSlawomir Mrozowicz 		reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
852366fd100SSlawomir Mrozowicz 	}
853366fd100SSlawomir Mrozowicz 
854366fd100SSlawomir Mrozowicz 	/* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
855366fd100SSlawomir Mrozowicz 	 * For more info take a look at ixgbe_set_vf_lpe
856366fd100SSlawomir Mrozowicz 	 */
857366fd100SSlawomir Mrozowicz 	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
858366fd100SSlawomir Mrozowicz 		struct net_device *dev = adapter->netdev;
859366fd100SSlawomir Mrozowicz 		int pf_max_frame = dev->mtu + ETH_HLEN;
860366fd100SSlawomir Mrozowicz 
861366fd100SSlawomir Mrozowicz #if IS_ENABLED(CONFIG_FCOE)
862366fd100SSlawomir Mrozowicz 		if (dev->features & NETIF_F_FCOE_MTU)
863366fd100SSlawomir Mrozowicz 			pf_max_frame = max_t(int, pf_max_frame,
864366fd100SSlawomir Mrozowicz 					     IXGBE_FCOE_JUMBO_FRAME_SIZE);
865366fd100SSlawomir Mrozowicz #endif /* CONFIG_FCOE */
866366fd100SSlawomir Mrozowicz 
867366fd100SSlawomir Mrozowicz 		if (pf_max_frame > ETH_FRAME_LEN)
868366fd100SSlawomir Mrozowicz 			reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
869366fd100SSlawomir Mrozowicz 	}
870366fd100SSlawomir Mrozowicz 
871366fd100SSlawomir Mrozowicz 	/* Enable/Disable particular VF */
872366fd100SSlawomir Mrozowicz 	if (reg_cur_tx != reg_req_tx)
873366fd100SSlawomir Mrozowicz 		IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx);
874366fd100SSlawomir Mrozowicz 	if (reg_cur_rx != reg_req_rx)
875366fd100SSlawomir Mrozowicz 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx);
876366fd100SSlawomir Mrozowicz }
877366fd100SSlawomir Mrozowicz 
ixgbe_vf_reset_msg(struct ixgbe_adapter * adapter,u32 vf)87858a02beeSAlexander Duyck static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
879dee1ad47SJeff Kirsher {
88087397379SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
881dee1ad47SJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
88258a02beeSAlexander Duyck 	unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
883b08e1ed9SEmil Tantilov 	u32 reg, reg_offset, vf_shift;
884b08e1ed9SEmil Tantilov 	u32 msgbuf[4] = {0, 0, 0, 0};
88558a02beeSAlexander Duyck 	u8 *addr = (u8 *)(&msgbuf[1]);
88687397379SAlexander Duyck 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
88787397379SAlexander Duyck 	int i;
88858a02beeSAlexander Duyck 
88958a02beeSAlexander Duyck 	e_info(probe, "VF Reset msg received from vf %d\n", vf);
89058a02beeSAlexander Duyck 
89158a02beeSAlexander Duyck 	/* reset the filters for the device */
89258a02beeSAlexander Duyck 	ixgbe_vf_reset_event(adapter, vf);
89358a02beeSAlexander Duyck 
89496d1a731SRoss Lagerwall 	ixgbe_vf_clear_mbx(adapter, vf);
89596d1a731SRoss Lagerwall 
89658a02beeSAlexander Duyck 	/* set vf mac address */
89735055928SGreg Rose 	if (!is_zero_ether_addr(vf_mac))
89858a02beeSAlexander Duyck 		ixgbe_set_vf_mac(adapter, vf, vf_mac);
899dee1ad47SJeff Kirsher 
900dee1ad47SJeff Kirsher 	vf_shift = vf % 32;
901dee1ad47SJeff Kirsher 	reg_offset = vf / 32;
902dee1ad47SJeff Kirsher 
90387397379SAlexander Duyck 	/* force drop enable for all VF Rx queues */
904b03254d7SPaul Greenwalt 	reg = IXGBE_QDE_ENABLE;
905b03254d7SPaul Greenwalt 	if (adapter->vfinfo[vf].pf_vlan)
906b03254d7SPaul Greenwalt 		reg |= IXGBE_QDE_HIDE_VLAN;
907b03254d7SPaul Greenwalt 
908b03254d7SPaul Greenwalt 	ixgbe_write_qde(adapter, vf, reg);
90987397379SAlexander Duyck 
910366fd100SSlawomir Mrozowicz 	ixgbe_set_vf_rx_tx(adapter, vf);
911dee1ad47SJeff Kirsher 
91258a02beeSAlexander Duyck 	/* enable VF mailbox for further messages */
91358a02beeSAlexander Duyck 	adapter->vfinfo[vf].clear_to_send = true;
91458a02beeSAlexander Duyck 
915dee1ad47SJeff Kirsher 	/* Enable counting of spoofed packets in the SSVPC register */
916dee1ad47SJeff Kirsher 	reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
917b4f47a48SJacob Keller 	reg |= BIT(vf_shift);
918dee1ad47SJeff Kirsher 	IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
919dee1ad47SJeff Kirsher 
920dbf231afSAlexander Duyck 	/*
921dbf231afSAlexander Duyck 	 * Reset the VFs TDWBAL and TDWBAH registers
922dbf231afSAlexander Duyck 	 * which are not cleared by an FLR
923dbf231afSAlexander Duyck 	 */
924dbf231afSAlexander Duyck 	for (i = 0; i < q_per_pool; i++) {
925dbf231afSAlexander Duyck 		IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
926dbf231afSAlexander Duyck 		IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
927dbf231afSAlexander Duyck 	}
928dbf231afSAlexander Duyck 
92958a02beeSAlexander Duyck 	/* reply to reset with ack and vf mac address */
93035055928SGreg Rose 	msgbuf[0] = IXGBE_VF_RESET;
931a8d9bb3dSEmil Tantilov 	if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) {
93235055928SGreg Rose 		msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
93358a02beeSAlexander Duyck 		memcpy(addr, vf_mac, ETH_ALEN);
93435055928SGreg Rose 	} else {
93535055928SGreg Rose 		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
93635055928SGreg Rose 	}
93758a02beeSAlexander Duyck 
93858a02beeSAlexander Duyck 	/*
93958a02beeSAlexander Duyck 	 * Piggyback the multicast filter type so VF can compute the
94058a02beeSAlexander Duyck 	 * correct vectors
94158a02beeSAlexander Duyck 	 */
94258a02beeSAlexander Duyck 	msgbuf[3] = hw->mac.mc_filter_type;
94358a02beeSAlexander Duyck 	ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
94458a02beeSAlexander Duyck 
94558a02beeSAlexander Duyck 	return 0;
94658a02beeSAlexander Duyck }
94758a02beeSAlexander Duyck 
ixgbe_set_vf_mac_addr(struct ixgbe_adapter * adapter,u32 * msgbuf,u32 vf)94858a02beeSAlexander Duyck static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
94958a02beeSAlexander Duyck 				 u32 *msgbuf, u32 vf)
95058a02beeSAlexander Duyck {
95158a02beeSAlexander Duyck 	u8 *new_mac = ((u8 *)(&msgbuf[1]));
95258a02beeSAlexander Duyck 
95358a02beeSAlexander Duyck 	if (!is_valid_ether_addr(new_mac)) {
95458a02beeSAlexander Duyck 		e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
95558a02beeSAlexander Duyck 		return -1;
95658a02beeSAlexander Duyck 	}
95758a02beeSAlexander Duyck 
9581d96cf98Schas williams 	if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
9594012dda3Sdingtianhong 	    !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
96058a02beeSAlexander Duyck 		e_warn(drv,
96158a02beeSAlexander Duyck 		       "VF %d attempted to override administratively set MAC address\n"
96258a02beeSAlexander Duyck 		       "Reload the VF driver to resume operations\n",
96358a02beeSAlexander Duyck 		       vf);
96458a02beeSAlexander Duyck 		return -1;
96558a02beeSAlexander Duyck 	}
96658a02beeSAlexander Duyck 
9673970c323SGreg Rose 	return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
96858a02beeSAlexander Duyck }
96958a02beeSAlexander Duyck 
ixgbe_set_vf_vlan_msg(struct ixgbe_adapter * adapter,u32 * msgbuf,u32 vf)97058a02beeSAlexander Duyck static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
97158a02beeSAlexander Duyck 				 u32 *msgbuf, u32 vf)
97258a02beeSAlexander Duyck {
973d5752c7bSJesse Brandeburg 	u32 add = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
974e1d0a2afSAlexander Duyck 	u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
9750efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
97658a02beeSAlexander Duyck 
977107d3018SAlexander Duyck 	if (adapter->vfinfo[vf].pf_vlan || tcs) {
97858a02beeSAlexander Duyck 		e_warn(drv,
97958a02beeSAlexander Duyck 		       "VF %d attempted to override administratively set VLAN configuration\n"
98058a02beeSAlexander Duyck 		       "Reload the VF driver to resume operations\n",
98158a02beeSAlexander Duyck 		       vf);
98258a02beeSAlexander Duyck 		return -1;
98358a02beeSAlexander Duyck 	}
98458a02beeSAlexander Duyck 
9854c7f35f6SAlexander Duyck 	/* VLAN 0 is a special case, don't allow it to be removed */
9864c7f35f6SAlexander Duyck 	if (!vid && !add)
9874c7f35f6SAlexander Duyck 		return 0;
9884c7f35f6SAlexander Duyck 
989d3dec7c7SEmil Tantilov 	return ixgbe_set_vf_vlan(adapter, add, vid, vf);
99058a02beeSAlexander Duyck }
99158a02beeSAlexander Duyck 
ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter * adapter,u32 * msgbuf,u32 vf)99258a02beeSAlexander Duyck static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
99358a02beeSAlexander Duyck 				    u32 *msgbuf, u32 vf)
99458a02beeSAlexander Duyck {
99558a02beeSAlexander Duyck 	u8 *new_mac = ((u8 *)(&msgbuf[1]));
996d5752c7bSJesse Brandeburg 	int index = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
99758a02beeSAlexander Duyck 	int err;
99858a02beeSAlexander Duyck 
999a9d2d53aSKen Cox 	if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
1000a9d2d53aSKen Cox 	    index > 0) {
100158a02beeSAlexander Duyck 		e_warn(drv,
100258a02beeSAlexander Duyck 		       "VF %d requested MACVLAN filter but is administratively denied\n",
100358a02beeSAlexander Duyck 		       vf);
100458a02beeSAlexander Duyck 		return -1;
100558a02beeSAlexander Duyck 	}
100658a02beeSAlexander Duyck 
100758a02beeSAlexander Duyck 	/* An non-zero index indicates the VF is setting a filter */
100858a02beeSAlexander Duyck 	if (index) {
100958a02beeSAlexander Duyck 		if (!is_valid_ether_addr(new_mac)) {
101058a02beeSAlexander Duyck 			e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
101158a02beeSAlexander Duyck 			return -1;
101258a02beeSAlexander Duyck 		}
101358a02beeSAlexander Duyck 
101458a02beeSAlexander Duyck 		/*
101558a02beeSAlexander Duyck 		 * If the VF is allowed to set MAC filters then turn off
101658a02beeSAlexander Duyck 		 * anti-spoofing to avoid false positives.
101758a02beeSAlexander Duyck 		 */
101877f192afSEmil Tantilov 		if (adapter->vfinfo[vf].spoofchk_enabled) {
101977f192afSEmil Tantilov 			struct ixgbe_hw *hw = &adapter->hw;
102077f192afSEmil Tantilov 
102177f192afSEmil Tantilov 			hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
1022581e0c7dSEmil Tantilov 			hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
102377f192afSEmil Tantilov 		}
102458a02beeSAlexander Duyck 	}
102558a02beeSAlexander Duyck 
102658a02beeSAlexander Duyck 	err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
102758a02beeSAlexander Duyck 	if (err == -ENOSPC)
102858a02beeSAlexander Duyck 		e_warn(drv,
102958a02beeSAlexander Duyck 		       "VF %d has requested a MACVLAN filter but there is no space for it\n",
103058a02beeSAlexander Duyck 		       vf);
1031a3013405SGreg Rose 
1032a3013405SGreg Rose 	return err < 0;
1033dee1ad47SJeff Kirsher }
1034dee1ad47SJeff Kirsher 
ixgbe_negotiate_vf_api(struct ixgbe_adapter * adapter,u32 * msgbuf,u32 vf)1035374c65d6SAlexander Duyck static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
1036374c65d6SAlexander Duyck 				  u32 *msgbuf, u32 vf)
1037374c65d6SAlexander Duyck {
1038374c65d6SAlexander Duyck 	int api = msgbuf[1];
1039374c65d6SAlexander Duyck 
1040374c65d6SAlexander Duyck 	switch (api) {
1041374c65d6SAlexander Duyck 	case ixgbe_mbox_api_10:
1042bffb3bc9SAlexander Duyck 	case ixgbe_mbox_api_11:
10434ce37a4cSVlad Zolotarov 	case ixgbe_mbox_api_12:
104407eea570SDon Skidmore 	case ixgbe_mbox_api_13:
104572698240SShannon Nelson 	case ixgbe_mbox_api_14:
1046374c65d6SAlexander Duyck 		adapter->vfinfo[vf].vf_api = api;
1047374c65d6SAlexander Duyck 		return 0;
1048374c65d6SAlexander Duyck 	default:
1049374c65d6SAlexander Duyck 		break;
1050374c65d6SAlexander Duyck 	}
1051374c65d6SAlexander Duyck 
1052*0a4d8b1eSJacob Keller 	e_dbg(drv, "VF %d requested unsupported api version %u\n", vf, api);
1053374c65d6SAlexander Duyck 
1054374c65d6SAlexander Duyck 	return -1;
1055374c65d6SAlexander Duyck }
1056374c65d6SAlexander Duyck 
ixgbe_get_vf_queues(struct ixgbe_adapter * adapter,u32 * msgbuf,u32 vf)1057f591cd9dSAlexander Duyck static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
1058f591cd9dSAlexander Duyck 			       u32 *msgbuf, u32 vf)
1059f591cd9dSAlexander Duyck {
1060f591cd9dSAlexander Duyck 	struct net_device *dev = adapter->netdev;
1061f591cd9dSAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
1062f591cd9dSAlexander Duyck 	unsigned int default_tc = 0;
10630efbf12bSAlexander Duyck 	u8 num_tcs = adapter->hw_tcs;
1064f591cd9dSAlexander Duyck 
1065f591cd9dSAlexander Duyck 	/* verify the PF is supporting the correct APIs */
1066f591cd9dSAlexander Duyck 	switch (adapter->vfinfo[vf].vf_api) {
1067f591cd9dSAlexander Duyck 	case ixgbe_mbox_api_20:
1068f591cd9dSAlexander Duyck 	case ixgbe_mbox_api_11:
10694ce37a4cSVlad Zolotarov 	case ixgbe_mbox_api_12:
107007eea570SDon Skidmore 	case ixgbe_mbox_api_13:
107172698240SShannon Nelson 	case ixgbe_mbox_api_14:
1072f591cd9dSAlexander Duyck 		break;
1073f591cd9dSAlexander Duyck 	default:
1074f591cd9dSAlexander Duyck 		return -1;
1075f591cd9dSAlexander Duyck 	}
1076f591cd9dSAlexander Duyck 
1077f591cd9dSAlexander Duyck 	/* only allow 1 Tx queue for bandwidth limiting */
1078f591cd9dSAlexander Duyck 	msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
1079f591cd9dSAlexander Duyck 	msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
1080f591cd9dSAlexander Duyck 
1081f591cd9dSAlexander Duyck 	/* if TCs > 1 determine which TC belongs to default user priority */
1082f591cd9dSAlexander Duyck 	if (num_tcs > 1)
1083f591cd9dSAlexander Duyck 		default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
1084f591cd9dSAlexander Duyck 
1085f591cd9dSAlexander Duyck 	/* notify VF of need for VLAN tag stripping, and correct queue */
1086f591cd9dSAlexander Duyck 	if (num_tcs)
1087f591cd9dSAlexander Duyck 		msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
1088f591cd9dSAlexander Duyck 	else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
1089f591cd9dSAlexander Duyck 		msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
1090f591cd9dSAlexander Duyck 	else
1091f591cd9dSAlexander Duyck 		msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
1092f591cd9dSAlexander Duyck 
1093f591cd9dSAlexander Duyck 	/* notify VF of default queue */
1094f591cd9dSAlexander Duyck 	msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
1095f591cd9dSAlexander Duyck 
1096f591cd9dSAlexander Duyck 	return 0;
1097f591cd9dSAlexander Duyck }
1098f591cd9dSAlexander Duyck 
ixgbe_get_vf_reta(struct ixgbe_adapter * adapter,u32 * msgbuf,u32 vf)10994ce37a4cSVlad Zolotarov static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
11004ce37a4cSVlad Zolotarov {
11014ce37a4cSVlad Zolotarov 	u32 i, j;
11024ce37a4cSVlad Zolotarov 	u32 *out_buf = &msgbuf[1];
11034ce37a4cSVlad Zolotarov 	const u8 *reta = adapter->rss_indir_tbl;
11044ce37a4cSVlad Zolotarov 	u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter);
11054ce37a4cSVlad Zolotarov 
11064ce37a4cSVlad Zolotarov 	/* Check if operation is permitted */
11074ce37a4cSVlad Zolotarov 	if (!adapter->vfinfo[vf].rss_query_enabled)
11084ce37a4cSVlad Zolotarov 		return -EPERM;
11094ce37a4cSVlad Zolotarov 
11104ce37a4cSVlad Zolotarov 	/* verify the PF is supporting the correct API */
111107eea570SDon Skidmore 	switch (adapter->vfinfo[vf].vf_api) {
111272698240SShannon Nelson 	case ixgbe_mbox_api_14:
111307eea570SDon Skidmore 	case ixgbe_mbox_api_13:
111407eea570SDon Skidmore 	case ixgbe_mbox_api_12:
111507eea570SDon Skidmore 		break;
111607eea570SDon Skidmore 	default:
11174ce37a4cSVlad Zolotarov 		return -EOPNOTSUPP;
111807eea570SDon Skidmore 	}
11194ce37a4cSVlad Zolotarov 
11204ce37a4cSVlad Zolotarov 	/* This mailbox command is supported (required) only for 82599 and x540
11214ce37a4cSVlad Zolotarov 	 * VFs which support up to 4 RSS queues. Therefore we will compress the
11224ce37a4cSVlad Zolotarov 	 * RETA by saving only 2 bits from each entry. This way we will be able
11234ce37a4cSVlad Zolotarov 	 * to transfer the whole RETA in a single mailbox operation.
11244ce37a4cSVlad Zolotarov 	 */
11254ce37a4cSVlad Zolotarov 	for (i = 0; i < reta_size / 16; i++) {
11264ce37a4cSVlad Zolotarov 		out_buf[i] = 0;
11274ce37a4cSVlad Zolotarov 		for (j = 0; j < 16; j++)
11284ce37a4cSVlad Zolotarov 			out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j);
11294ce37a4cSVlad Zolotarov 	}
11304ce37a4cSVlad Zolotarov 
11314ce37a4cSVlad Zolotarov 	return 0;
11324ce37a4cSVlad Zolotarov }
11334ce37a4cSVlad Zolotarov 
ixgbe_get_vf_rss_key(struct ixgbe_adapter * adapter,u32 * msgbuf,u32 vf)11343c0841a9SVlad Zolotarov static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
11353c0841a9SVlad Zolotarov 				u32 *msgbuf, u32 vf)
11363c0841a9SVlad Zolotarov {
11373c0841a9SVlad Zolotarov 	u32 *rss_key = &msgbuf[1];
11383c0841a9SVlad Zolotarov 
11393c0841a9SVlad Zolotarov 	/* Check if the operation is permitted */
11403c0841a9SVlad Zolotarov 	if (!adapter->vfinfo[vf].rss_query_enabled)
11413c0841a9SVlad Zolotarov 		return -EPERM;
11423c0841a9SVlad Zolotarov 
11433c0841a9SVlad Zolotarov 	/* verify the PF is supporting the correct API */
114407eea570SDon Skidmore 	switch (adapter->vfinfo[vf].vf_api) {
114572698240SShannon Nelson 	case ixgbe_mbox_api_14:
114607eea570SDon Skidmore 	case ixgbe_mbox_api_13:
114707eea570SDon Skidmore 	case ixgbe_mbox_api_12:
114807eea570SDon Skidmore 		break;
114907eea570SDon Skidmore 	default:
11503c0841a9SVlad Zolotarov 		return -EOPNOTSUPP;
115107eea570SDon Skidmore 	}
11523c0841a9SVlad Zolotarov 
11533dfbfc7eSTony Nguyen 	memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE);
11543c0841a9SVlad Zolotarov 
11553c0841a9SVlad Zolotarov 	return 0;
11563c0841a9SVlad Zolotarov }
11573c0841a9SVlad Zolotarov 
ixgbe_update_vf_xcast_mode(struct ixgbe_adapter * adapter,u32 * msgbuf,u32 vf)11588443c1a4SHiroshi Shimamoto static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
11598443c1a4SHiroshi Shimamoto 				      u32 *msgbuf, u32 vf)
11608443c1a4SHiroshi Shimamoto {
11618443c1a4SHiroshi Shimamoto 	struct ixgbe_hw *hw = &adapter->hw;
11628443c1a4SHiroshi Shimamoto 	int xcast_mode = msgbuf[1];
116307eea570SDon Skidmore 	u32 vmolr, fctrl, disable, enable;
11648443c1a4SHiroshi Shimamoto 
11658443c1a4SHiroshi Shimamoto 	/* verify the PF is supporting the correct APIs */
11668443c1a4SHiroshi Shimamoto 	switch (adapter->vfinfo[vf].vf_api) {
11678443c1a4SHiroshi Shimamoto 	case ixgbe_mbox_api_12:
116807eea570SDon Skidmore 		/* promisc introduced in 1.3 version */
116907eea570SDon Skidmore 		if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
117007eea570SDon Skidmore 			return -EOPNOTSUPP;
11715463fce6SJeff Kirsher 		fallthrough;
117207eea570SDon Skidmore 	case ixgbe_mbox_api_13:
117372698240SShannon Nelson 	case ixgbe_mbox_api_14:
11748443c1a4SHiroshi Shimamoto 		break;
11758443c1a4SHiroshi Shimamoto 	default:
11768443c1a4SHiroshi Shimamoto 		return -EOPNOTSUPP;
11778443c1a4SHiroshi Shimamoto 	}
11788443c1a4SHiroshi Shimamoto 
11798443c1a4SHiroshi Shimamoto 	if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI &&
11808443c1a4SHiroshi Shimamoto 	    !adapter->vfinfo[vf].trusted) {
11818443c1a4SHiroshi Shimamoto 		xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
11828443c1a4SHiroshi Shimamoto 	}
11838443c1a4SHiroshi Shimamoto 
11848443c1a4SHiroshi Shimamoto 	if (adapter->vfinfo[vf].xcast_mode == xcast_mode)
11858443c1a4SHiroshi Shimamoto 		goto out;
11868443c1a4SHiroshi Shimamoto 
11878443c1a4SHiroshi Shimamoto 	switch (xcast_mode) {
11888443c1a4SHiroshi Shimamoto 	case IXGBEVF_XCAST_MODE_NONE:
1189803e9895SOlivier Matz 		disable = IXGBE_VMOLR_ROMPE |
119007eea570SDon Skidmore 			  IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1191803e9895SOlivier Matz 		enable = IXGBE_VMOLR_BAM;
11928443c1a4SHiroshi Shimamoto 		break;
11938443c1a4SHiroshi Shimamoto 	case IXGBEVF_XCAST_MODE_MULTI:
119407eea570SDon Skidmore 		disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
11958443c1a4SHiroshi Shimamoto 		enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
11968443c1a4SHiroshi Shimamoto 		break;
11978443c1a4SHiroshi Shimamoto 	case IXGBEVF_XCAST_MODE_ALLMULTI:
119807eea570SDon Skidmore 		disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
11998443c1a4SHiroshi Shimamoto 		enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
12008443c1a4SHiroshi Shimamoto 		break;
120107eea570SDon Skidmore 	case IXGBEVF_XCAST_MODE_PROMISC:
120207eea570SDon Skidmore 		if (hw->mac.type <= ixgbe_mac_82599EB)
120307eea570SDon Skidmore 			return -EOPNOTSUPP;
120407eea570SDon Skidmore 
120507eea570SDon Skidmore 		fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
120607eea570SDon Skidmore 		if (!(fctrl & IXGBE_FCTRL_UPE)) {
120707eea570SDon Skidmore 			/* VF promisc requires PF in promisc */
120807eea570SDon Skidmore 			e_warn(drv,
120907eea570SDon Skidmore 			       "Enabling VF promisc requires PF in promisc\n");
121007eea570SDon Skidmore 			return -EPERM;
121107eea570SDon Skidmore 		}
121207eea570SDon Skidmore 
12137bb0fb7cSOlivier Matz 		disable = IXGBE_VMOLR_VPE;
121407eea570SDon Skidmore 		enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
12157bb0fb7cSOlivier Matz 			 IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
121607eea570SDon Skidmore 		break;
12178443c1a4SHiroshi Shimamoto 	default:
12188443c1a4SHiroshi Shimamoto 		return -EOPNOTSUPP;
12198443c1a4SHiroshi Shimamoto 	}
12208443c1a4SHiroshi Shimamoto 
12218443c1a4SHiroshi Shimamoto 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
12228443c1a4SHiroshi Shimamoto 	vmolr &= ~disable;
12238443c1a4SHiroshi Shimamoto 	vmolr |= enable;
12248443c1a4SHiroshi Shimamoto 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
12258443c1a4SHiroshi Shimamoto 
12268443c1a4SHiroshi Shimamoto 	adapter->vfinfo[vf].xcast_mode = xcast_mode;
12278443c1a4SHiroshi Shimamoto 
12288443c1a4SHiroshi Shimamoto out:
12298443c1a4SHiroshi Shimamoto 	msgbuf[1] = xcast_mode;
12308443c1a4SHiroshi Shimamoto 
12318443c1a4SHiroshi Shimamoto 	return 0;
12328443c1a4SHiroshi Shimamoto }
12338443c1a4SHiroshi Shimamoto 
ixgbe_get_vf_link_state(struct ixgbe_adapter * adapter,u32 * msgbuf,u32 vf)1234366fd100SSlawomir Mrozowicz static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
1235366fd100SSlawomir Mrozowicz 				   u32 *msgbuf, u32 vf)
1236366fd100SSlawomir Mrozowicz {
1237366fd100SSlawomir Mrozowicz 	u32 *link_state = &msgbuf[1];
1238366fd100SSlawomir Mrozowicz 
1239366fd100SSlawomir Mrozowicz 	/* verify the PF is supporting the correct API */
1240366fd100SSlawomir Mrozowicz 	switch (adapter->vfinfo[vf].vf_api) {
1241366fd100SSlawomir Mrozowicz 	case ixgbe_mbox_api_12:
1242366fd100SSlawomir Mrozowicz 	case ixgbe_mbox_api_13:
1243366fd100SSlawomir Mrozowicz 	case ixgbe_mbox_api_14:
1244366fd100SSlawomir Mrozowicz 		break;
1245366fd100SSlawomir Mrozowicz 	default:
1246366fd100SSlawomir Mrozowicz 		return -EOPNOTSUPP;
1247366fd100SSlawomir Mrozowicz 	}
1248366fd100SSlawomir Mrozowicz 
1249366fd100SSlawomir Mrozowicz 	*link_state = adapter->vfinfo[vf].link_enable;
1250366fd100SSlawomir Mrozowicz 
1251366fd100SSlawomir Mrozowicz 	return 0;
1252366fd100SSlawomir Mrozowicz }
1253366fd100SSlawomir Mrozowicz 
ixgbe_rcv_msg_from_vf(struct ixgbe_adapter * adapter,u32 vf)1254dee1ad47SJeff Kirsher static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1255dee1ad47SJeff Kirsher {
1256dee1ad47SJeff Kirsher 	u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
1257dee1ad47SJeff Kirsher 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
1258dee1ad47SJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
1259dee1ad47SJeff Kirsher 	s32 retval;
1260dee1ad47SJeff Kirsher 
1261dee1ad47SJeff Kirsher 	retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
1262dee1ad47SJeff Kirsher 
1263dcaccc82SAlexander Duyck 	if (retval) {
1264dee1ad47SJeff Kirsher 		pr_err("Error receiving message from VF\n");
1265dcaccc82SAlexander Duyck 		return retval;
1266dcaccc82SAlexander Duyck 	}
1267dee1ad47SJeff Kirsher 
1268dee1ad47SJeff Kirsher 	/* this is a message we already processed, do nothing */
1269dee1ad47SJeff Kirsher 	if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
1270e90dd264SMark Rustad 		return 0;
1271dee1ad47SJeff Kirsher 
1272dcaccc82SAlexander Duyck 	/* flush the ack before we write any messages back */
1273dcaccc82SAlexander Duyck 	IXGBE_WRITE_FLUSH(hw);
1274dcaccc82SAlexander Duyck 
1275374c65d6SAlexander Duyck 	if (msgbuf[0] == IXGBE_VF_RESET)
1276374c65d6SAlexander Duyck 		return ixgbe_vf_reset_msg(adapter, vf);
1277374c65d6SAlexander Duyck 
1278dee1ad47SJeff Kirsher 	/*
1279dee1ad47SJeff Kirsher 	 * until the vf completes a virtual function reset it should not be
1280dee1ad47SJeff Kirsher 	 * allowed to start any configuration.
1281dee1ad47SJeff Kirsher 	 */
1282dee1ad47SJeff Kirsher 	if (!adapter->vfinfo[vf].clear_to_send) {
1283dee1ad47SJeff Kirsher 		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
1284dee1ad47SJeff Kirsher 		ixgbe_write_mbx(hw, msgbuf, 1, vf);
1285e90dd264SMark Rustad 		return 0;
1286dee1ad47SJeff Kirsher 	}
1287dee1ad47SJeff Kirsher 
1288dee1ad47SJeff Kirsher 	switch ((msgbuf[0] & 0xFFFF)) {
1289dee1ad47SJeff Kirsher 	case IXGBE_VF_SET_MAC_ADDR:
129058a02beeSAlexander Duyck 		retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
1291dee1ad47SJeff Kirsher 		break;
1292dee1ad47SJeff Kirsher 	case IXGBE_VF_SET_MULTICAST:
129358a02beeSAlexander Duyck 		retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
129458a02beeSAlexander Duyck 		break;
129558a02beeSAlexander Duyck 	case IXGBE_VF_SET_VLAN:
129658a02beeSAlexander Duyck 		retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
1297dee1ad47SJeff Kirsher 		break;
1298dee1ad47SJeff Kirsher 	case IXGBE_VF_SET_LPE:
129963e39d29SJesse Brandeburg 		retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf);
1300dee1ad47SJeff Kirsher 		break;
1301dee1ad47SJeff Kirsher 	case IXGBE_VF_SET_MACVLAN:
130258a02beeSAlexander Duyck 		retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
1303dee1ad47SJeff Kirsher 		break;
1304374c65d6SAlexander Duyck 	case IXGBE_VF_API_NEGOTIATE:
1305374c65d6SAlexander Duyck 		retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
1306374c65d6SAlexander Duyck 		break;
1307f591cd9dSAlexander Duyck 	case IXGBE_VF_GET_QUEUES:
1308f591cd9dSAlexander Duyck 		retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
1309f591cd9dSAlexander Duyck 		break;
13104ce37a4cSVlad Zolotarov 	case IXGBE_VF_GET_RETA:
13114ce37a4cSVlad Zolotarov 		retval = ixgbe_get_vf_reta(adapter, msgbuf, vf);
13124ce37a4cSVlad Zolotarov 		break;
13133c0841a9SVlad Zolotarov 	case IXGBE_VF_GET_RSS_KEY:
13143c0841a9SVlad Zolotarov 		retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
13153c0841a9SVlad Zolotarov 		break;
13168443c1a4SHiroshi Shimamoto 	case IXGBE_VF_UPDATE_XCAST_MODE:
13178443c1a4SHiroshi Shimamoto 		retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
13188443c1a4SHiroshi Shimamoto 		break;
1319366fd100SSlawomir Mrozowicz 	case IXGBE_VF_GET_LINK_STATE:
1320366fd100SSlawomir Mrozowicz 		retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf);
1321366fd100SSlawomir Mrozowicz 		break;
132272698240SShannon Nelson 	case IXGBE_VF_IPSEC_ADD:
132372698240SShannon Nelson 		retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf);
132472698240SShannon Nelson 		break;
132572698240SShannon Nelson 	case IXGBE_VF_IPSEC_DEL:
132672698240SShannon Nelson 		retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf);
132772698240SShannon Nelson 		break;
1328dee1ad47SJeff Kirsher 	default:
1329dee1ad47SJeff Kirsher 		e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
133003c5b6d4SJedrzej Jagielski 		retval = -EIO;
1331dee1ad47SJeff Kirsher 		break;
1332dee1ad47SJeff Kirsher 	}
1333dee1ad47SJeff Kirsher 
1334dee1ad47SJeff Kirsher 	/* notify the VF of the results of what it sent us */
1335dee1ad47SJeff Kirsher 	if (retval)
1336dee1ad47SJeff Kirsher 		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
1337dee1ad47SJeff Kirsher 	else
1338dee1ad47SJeff Kirsher 		msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
1339dee1ad47SJeff Kirsher 
1340dee1ad47SJeff Kirsher 	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
1341dee1ad47SJeff Kirsher 
1342374c65d6SAlexander Duyck 	ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
1343dee1ad47SJeff Kirsher 
1344dee1ad47SJeff Kirsher 	return retval;
1345dee1ad47SJeff Kirsher }
1346dee1ad47SJeff Kirsher 
ixgbe_rcv_ack_from_vf(struct ixgbe_adapter * adapter,u32 vf)1347dee1ad47SJeff Kirsher static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1348dee1ad47SJeff Kirsher {
1349dee1ad47SJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
1350dee1ad47SJeff Kirsher 	u32 msg = IXGBE_VT_MSGTYPE_NACK;
1351dee1ad47SJeff Kirsher 
1352dee1ad47SJeff Kirsher 	/* if device isn't clear to send it shouldn't be reading either */
1353dee1ad47SJeff Kirsher 	if (!adapter->vfinfo[vf].clear_to_send)
1354dee1ad47SJeff Kirsher 		ixgbe_write_mbx(hw, &msg, 1, vf);
1355dee1ad47SJeff Kirsher }
1356dee1ad47SJeff Kirsher 
ixgbe_msg_task(struct ixgbe_adapter * adapter)1357dee1ad47SJeff Kirsher void ixgbe_msg_task(struct ixgbe_adapter *adapter)
1358dee1ad47SJeff Kirsher {
1359dee1ad47SJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
13601e53834cSPiotr Skajewski 	unsigned long flags;
1361dee1ad47SJeff Kirsher 	u32 vf;
1362dee1ad47SJeff Kirsher 
13631e53834cSPiotr Skajewski 	spin_lock_irqsave(&adapter->vfs_lock, flags);
1364dee1ad47SJeff Kirsher 	for (vf = 0; vf < adapter->num_vfs; vf++) {
1365dee1ad47SJeff Kirsher 		/* process any reset requests */
1366dee1ad47SJeff Kirsher 		if (!ixgbe_check_for_rst(hw, vf))
1367dee1ad47SJeff Kirsher 			ixgbe_vf_reset_event(adapter, vf);
1368dee1ad47SJeff Kirsher 
1369dee1ad47SJeff Kirsher 		/* process any messages pending */
1370dee1ad47SJeff Kirsher 		if (!ixgbe_check_for_msg(hw, vf))
1371dee1ad47SJeff Kirsher 			ixgbe_rcv_msg_from_vf(adapter, vf);
1372dee1ad47SJeff Kirsher 
1373dee1ad47SJeff Kirsher 		/* process any acks */
1374dee1ad47SJeff Kirsher 		if (!ixgbe_check_for_ack(hw, vf))
1375dee1ad47SJeff Kirsher 			ixgbe_rcv_ack_from_vf(adapter, vf);
1376dee1ad47SJeff Kirsher 	}
13771e53834cSPiotr Skajewski 	spin_unlock_irqrestore(&adapter->vfs_lock, flags);
1378dee1ad47SJeff Kirsher }
1379dee1ad47SJeff Kirsher 
ixgbe_ping_vf(struct ixgbe_adapter * adapter,int vf)138054011e4dSHiroshi Shimamoto static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
138154011e4dSHiroshi Shimamoto {
138254011e4dSHiroshi Shimamoto 	struct ixgbe_hw *hw = &adapter->hw;
138354011e4dSHiroshi Shimamoto 	u32 ping;
138454011e4dSHiroshi Shimamoto 
138554011e4dSHiroshi Shimamoto 	ping = IXGBE_PF_CONTROL_MSG;
138654011e4dSHiroshi Shimamoto 	if (adapter->vfinfo[vf].clear_to_send)
138754011e4dSHiroshi Shimamoto 		ping |= IXGBE_VT_MSGTYPE_CTS;
138854011e4dSHiroshi Shimamoto 	ixgbe_write_mbx(hw, &ping, 1, vf);
138954011e4dSHiroshi Shimamoto }
139054011e4dSHiroshi Shimamoto 
ixgbe_ping_all_vfs(struct ixgbe_adapter * adapter)1391dee1ad47SJeff Kirsher void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
1392dee1ad47SJeff Kirsher {
1393dee1ad47SJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
1394dee1ad47SJeff Kirsher 	u32 ping;
1395dee1ad47SJeff Kirsher 	int i;
1396dee1ad47SJeff Kirsher 
1397dee1ad47SJeff Kirsher 	for (i = 0 ; i < adapter->num_vfs; i++) {
1398dee1ad47SJeff Kirsher 		ping = IXGBE_PF_CONTROL_MSG;
1399dee1ad47SJeff Kirsher 		if (adapter->vfinfo[i].clear_to_send)
1400dee1ad47SJeff Kirsher 			ping |= IXGBE_VT_MSGTYPE_CTS;
1401dee1ad47SJeff Kirsher 		ixgbe_write_mbx(hw, &ping, 1, i);
1402dee1ad47SJeff Kirsher 	}
1403dee1ad47SJeff Kirsher }
1404dee1ad47SJeff Kirsher 
1405366fd100SSlawomir Mrozowicz /**
1406366fd100SSlawomir Mrozowicz  * ixgbe_set_all_vfs - update vfs queues
1407366fd100SSlawomir Mrozowicz  * @adapter: Pointer to adapter struct
1408366fd100SSlawomir Mrozowicz  *
1409366fd100SSlawomir Mrozowicz  * Update setting transmit and receive queues for all vfs
1410366fd100SSlawomir Mrozowicz  **/
ixgbe_set_all_vfs(struct ixgbe_adapter * adapter)1411366fd100SSlawomir Mrozowicz void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
1412366fd100SSlawomir Mrozowicz {
1413366fd100SSlawomir Mrozowicz 	int i;
1414366fd100SSlawomir Mrozowicz 
1415366fd100SSlawomir Mrozowicz 	for (i = 0 ; i < adapter->num_vfs; i++)
1416366fd100SSlawomir Mrozowicz 		ixgbe_set_vf_link_state(adapter, i,
1417366fd100SSlawomir Mrozowicz 					adapter->vfinfo[i].link_state);
1418366fd100SSlawomir Mrozowicz }
1419366fd100SSlawomir Mrozowicz 
ixgbe_ndo_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)1420dee1ad47SJeff Kirsher int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1421dee1ad47SJeff Kirsher {
1422dee1ad47SJeff Kirsher 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
14236af3d0faSTony Nguyen 	s32 retval;
142427bdc44cSTony Nguyen 
142527bdc44cSTony Nguyen 	if (vf >= adapter->num_vfs)
1426dee1ad47SJeff Kirsher 		return -EINVAL;
142727bdc44cSTony Nguyen 
14286af3d0faSTony Nguyen 	if (is_valid_ether_addr(mac)) {
142927bdc44cSTony Nguyen 		dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
143027bdc44cSTony Nguyen 			 mac, vf);
143127bdc44cSTony Nguyen 		dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.");
14326af3d0faSTony Nguyen 
14336af3d0faSTony Nguyen 		retval = ixgbe_set_vf_mac(adapter, vf, mac);
14346af3d0faSTony Nguyen 		if (retval >= 0) {
14356af3d0faSTony Nguyen 			adapter->vfinfo[vf].pf_set_mac = true;
14366af3d0faSTony Nguyen 
1437dee1ad47SJeff Kirsher 			if (test_bit(__IXGBE_DOWN, &adapter->state)) {
143827bdc44cSTony Nguyen 				dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
143927bdc44cSTony Nguyen 				dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
1440dee1ad47SJeff Kirsher 			}
144127bdc44cSTony Nguyen 		} else {
14426af3d0faSTony Nguyen 			dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n");
14436af3d0faSTony Nguyen 		}
14446af3d0faSTony Nguyen 	} else if (is_zero_ether_addr(mac)) {
14456af3d0faSTony Nguyen 		unsigned char *vf_mac_addr =
14466af3d0faSTony Nguyen 					   adapter->vfinfo[vf].vf_mac_addresses;
14476af3d0faSTony Nguyen 
14486af3d0faSTony Nguyen 		/* nothing to do */
14496af3d0faSTony Nguyen 		if (is_zero_ether_addr(vf_mac_addr))
14506af3d0faSTony Nguyen 			return 0;
14516af3d0faSTony Nguyen 
14526af3d0faSTony Nguyen 		dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf);
14536af3d0faSTony Nguyen 
14546af3d0faSTony Nguyen 		retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf);
14556af3d0faSTony Nguyen 		if (retval >= 0) {
14566af3d0faSTony Nguyen 			adapter->vfinfo[vf].pf_set_mac = false;
14576af3d0faSTony Nguyen 			memcpy(vf_mac_addr, mac, ETH_ALEN);
14586af3d0faSTony Nguyen 		} else {
14596af3d0faSTony Nguyen 			dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n");
14606af3d0faSTony Nguyen 		}
14616af3d0faSTony Nguyen 	} else {
14626af3d0faSTony Nguyen 		retval = -EINVAL;
146327bdc44cSTony Nguyen 	}
146427bdc44cSTony Nguyen 
14656af3d0faSTony Nguyen 	return retval;
1466dee1ad47SJeff Kirsher }
1467dee1ad47SJeff Kirsher 
ixgbe_enable_port_vlan(struct ixgbe_adapter * adapter,int vf,u16 vlan,u8 qos)14682b509c0cSDon Skidmore static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
14692b509c0cSDon Skidmore 				  u16 vlan, u8 qos)
1470dee1ad47SJeff Kirsher {
1471dee1ad47SJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
147242ce2c8eSEmil Tantilov 	int err;
1473dee1ad47SJeff Kirsher 
147442ce2c8eSEmil Tantilov 	err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
1475026ac677SGreg Rose 	if (err)
1476026ac677SGreg Rose 		goto out;
147742ce2c8eSEmil Tantilov 
14784c7f35f6SAlexander Duyck 	/* Revoke tagless access via VLAN 0 */
14794c7f35f6SAlexander Duyck 	ixgbe_set_vf_vlan(adapter, false, 0, vf);
14804c7f35f6SAlexander Duyck 
1481107d3018SAlexander Duyck 	ixgbe_set_vmvir(adapter, vlan, qos, vf);
1482dee1ad47SJeff Kirsher 	ixgbe_set_vmolr(hw, vf, false);
14839a75a1acSDon Skidmore 
14849a75a1acSDon Skidmore 	/* enable hide vlan on X550 */
14859a75a1acSDon Skidmore 	if (hw->mac.type >= ixgbe_mac_X550)
14869a75a1acSDon Skidmore 		ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE |
14879a75a1acSDon Skidmore 				IXGBE_QDE_HIDE_VLAN);
14889a75a1acSDon Skidmore 
1489dee1ad47SJeff Kirsher 	adapter->vfinfo[vf].pf_vlan = vlan;
1490dee1ad47SJeff Kirsher 	adapter->vfinfo[vf].pf_qos = qos;
1491dee1ad47SJeff Kirsher 	dev_info(&adapter->pdev->dev,
1492dee1ad47SJeff Kirsher 		 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
1493dee1ad47SJeff Kirsher 	if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1494dee1ad47SJeff Kirsher 		dev_warn(&adapter->pdev->dev,
14952b509c0cSDon Skidmore 			 "The VF VLAN has been set, but the PF device is not up.\n");
1496dee1ad47SJeff Kirsher 		dev_warn(&adapter->pdev->dev,
14972b509c0cSDon Skidmore 			 "Bring the PF device up before attempting to use the VF device.\n");
1498dee1ad47SJeff Kirsher 	}
14992b509c0cSDon Skidmore 
15002b509c0cSDon Skidmore out:
15012b509c0cSDon Skidmore 	return err;
15022b509c0cSDon Skidmore }
15032b509c0cSDon Skidmore 
ixgbe_disable_port_vlan(struct ixgbe_adapter * adapter,int vf)15042b509c0cSDon Skidmore static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
15052b509c0cSDon Skidmore {
15062b509c0cSDon Skidmore 	struct ixgbe_hw *hw = &adapter->hw;
15072b509c0cSDon Skidmore 	int err;
15082b509c0cSDon Skidmore 
1509dee1ad47SJeff Kirsher 	err = ixgbe_set_vf_vlan(adapter, false,
1510dee1ad47SJeff Kirsher 				adapter->vfinfo[vf].pf_vlan, vf);
15114c7f35f6SAlexander Duyck 	/* Restore tagless access via VLAN 0 */
15124c7f35f6SAlexander Duyck 	ixgbe_set_vf_vlan(adapter, true, 0, vf);
1513107d3018SAlexander Duyck 	ixgbe_clear_vmvir(adapter, vf);
1514dee1ad47SJeff Kirsher 	ixgbe_set_vmolr(hw, vf, true);
151542ce2c8eSEmil Tantilov 
151642ce2c8eSEmil Tantilov 	/* disable hide VLAN on X550 */
151742ce2c8eSEmil Tantilov 	if (hw->mac.type >= ixgbe_mac_X550)
151842ce2c8eSEmil Tantilov 		ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
151942ce2c8eSEmil Tantilov 
1520dee1ad47SJeff Kirsher 	adapter->vfinfo[vf].pf_vlan = 0;
1521dee1ad47SJeff Kirsher 	adapter->vfinfo[vf].pf_qos = 0;
15222b509c0cSDon Skidmore 
15232b509c0cSDon Skidmore 	return err;
1524dee1ad47SJeff Kirsher }
15252b509c0cSDon Skidmore 
ixgbe_ndo_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)152679aab093SMoshe Shemesh int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
152779aab093SMoshe Shemesh 			  u8 qos, __be16 vlan_proto)
15282b509c0cSDon Skidmore {
15292b509c0cSDon Skidmore 	int err = 0;
15302b509c0cSDon Skidmore 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
15312b509c0cSDon Skidmore 
15322b509c0cSDon Skidmore 	if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
15332b509c0cSDon Skidmore 		return -EINVAL;
153479aab093SMoshe Shemesh 	if (vlan_proto != htons(ETH_P_8021Q))
153579aab093SMoshe Shemesh 		return -EPROTONOSUPPORT;
15362b509c0cSDon Skidmore 	if (vlan || qos) {
15372b509c0cSDon Skidmore 		/* Check if there is already a port VLAN set, if so
15382b509c0cSDon Skidmore 		 * we have to delete the old one first before we
15392b509c0cSDon Skidmore 		 * can set the new one.  The usage model had
15402b509c0cSDon Skidmore 		 * previously assumed the user would delete the
15412b509c0cSDon Skidmore 		 * old port VLAN before setting a new one but this
15422b509c0cSDon Skidmore 		 * is not necessarily the case.
15432b509c0cSDon Skidmore 		 */
15442b509c0cSDon Skidmore 		if (adapter->vfinfo[vf].pf_vlan)
15452b509c0cSDon Skidmore 			err = ixgbe_disable_port_vlan(adapter, vf);
15462b509c0cSDon Skidmore 		if (err)
15472b509c0cSDon Skidmore 			goto out;
15482b509c0cSDon Skidmore 		err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos);
15492b509c0cSDon Skidmore 	} else {
15502b509c0cSDon Skidmore 		err = ixgbe_disable_port_vlan(adapter, vf);
15512b509c0cSDon Skidmore 	}
15522b509c0cSDon Skidmore 
1553dee1ad47SJeff Kirsher out:
1554dee1ad47SJeff Kirsher 	return err;
1555dee1ad47SJeff Kirsher }
1556dee1ad47SJeff Kirsher 
ixgbe_link_mbps(struct ixgbe_adapter * adapter)1557c04f90e5SRostislav Pehlivanov int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
1558dee1ad47SJeff Kirsher {
15599f66d3eeSAlexander Duyck 	switch (adapter->link_speed) {
1560dee1ad47SJeff Kirsher 	case IXGBE_LINK_SPEED_100_FULL:
1561dee1ad47SJeff Kirsher 		return 100;
1562dee1ad47SJeff Kirsher 	case IXGBE_LINK_SPEED_1GB_FULL:
1563dee1ad47SJeff Kirsher 		return 1000;
1564dee1ad47SJeff Kirsher 	case IXGBE_LINK_SPEED_10GB_FULL:
1565dee1ad47SJeff Kirsher 		return 10000;
1566dee1ad47SJeff Kirsher 	default:
1567dee1ad47SJeff Kirsher 		return 0;
1568dee1ad47SJeff Kirsher 	}
1569dee1ad47SJeff Kirsher }
1570dee1ad47SJeff Kirsher 
ixgbe_set_vf_rate_limit(struct ixgbe_adapter * adapter,int vf)15719f66d3eeSAlexander Duyck static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
1572dee1ad47SJeff Kirsher {
15739f66d3eeSAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
15749f66d3eeSAlexander Duyck 	struct ixgbe_hw *hw = &adapter->hw;
15759f66d3eeSAlexander Duyck 	u32 bcnrc_val = 0;
15769f66d3eeSAlexander Duyck 	u16 queue, queues_per_pool;
15779f66d3eeSAlexander Duyck 	u16 tx_rate = adapter->vfinfo[vf].tx_rate;
1578dee1ad47SJeff Kirsher 
15799f66d3eeSAlexander Duyck 	if (tx_rate) {
15809f66d3eeSAlexander Duyck 		/* start with base link speed value */
15819f66d3eeSAlexander Duyck 		bcnrc_val = adapter->vf_rate_link_speed;
15829f66d3eeSAlexander Duyck 
1583dee1ad47SJeff Kirsher 		/* Calculate the rate factor values to set */
15849f66d3eeSAlexander Duyck 		bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
15859f66d3eeSAlexander Duyck 		bcnrc_val /= tx_rate;
1586dee1ad47SJeff Kirsher 
15879f66d3eeSAlexander Duyck 		/* clear everything but the rate factor */
15889f66d3eeSAlexander Duyck 		bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
15899f66d3eeSAlexander Duyck 			     IXGBE_RTTBCNRC_RF_DEC_MASK;
15909f66d3eeSAlexander Duyck 
15919f66d3eeSAlexander Duyck 		/* enable the rate scheduler */
15929f66d3eeSAlexander Duyck 		bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1593dee1ad47SJeff Kirsher 	}
1594dee1ad47SJeff Kirsher 
1595dee1ad47SJeff Kirsher 	/*
1596dee1ad47SJeff Kirsher 	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
1597dee1ad47SJeff Kirsher 	 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
1598dee1ad47SJeff Kirsher 	 * and 0x004 otherwise.
1599dee1ad47SJeff Kirsher 	 */
1600dee1ad47SJeff Kirsher 	switch (hw->mac.type) {
1601dee1ad47SJeff Kirsher 	case ixgbe_mac_82599EB:
1602dee1ad47SJeff Kirsher 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
1603dee1ad47SJeff Kirsher 		break;
1604dee1ad47SJeff Kirsher 	case ixgbe_mac_X540:
1605dee1ad47SJeff Kirsher 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
1606dee1ad47SJeff Kirsher 		break;
1607dee1ad47SJeff Kirsher 	default:
1608dee1ad47SJeff Kirsher 		break;
1609dee1ad47SJeff Kirsher 	}
1610dee1ad47SJeff Kirsher 
16119f66d3eeSAlexander Duyck 	/* determine how many queues per pool based on VMDq mask */
16129f66d3eeSAlexander Duyck 	queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
16139f66d3eeSAlexander Duyck 
16149f66d3eeSAlexander Duyck 	/* write value for all Tx queues belonging to VF */
16159f66d3eeSAlexander Duyck 	for (queue = 0; queue < queues_per_pool; queue++) {
16169f66d3eeSAlexander Duyck 		unsigned int reg_idx = (vf * queues_per_pool) + queue;
16179f66d3eeSAlexander Duyck 
16189f66d3eeSAlexander Duyck 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
1619dee1ad47SJeff Kirsher 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1620dee1ad47SJeff Kirsher 	}
16219f66d3eeSAlexander Duyck }
1622dee1ad47SJeff Kirsher 
ixgbe_check_vf_rate_limit(struct ixgbe_adapter * adapter)1623dee1ad47SJeff Kirsher void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
1624dee1ad47SJeff Kirsher {
16259f66d3eeSAlexander Duyck 	int i;
1626dee1ad47SJeff Kirsher 
1627dee1ad47SJeff Kirsher 	/* VF Tx rate limit was not set */
16289f66d3eeSAlexander Duyck 	if (!adapter->vf_rate_link_speed)
1629dee1ad47SJeff Kirsher 		return;
1630dee1ad47SJeff Kirsher 
16319f66d3eeSAlexander Duyck 	if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
1632dee1ad47SJeff Kirsher 		adapter->vf_rate_link_speed = 0;
1633dee1ad47SJeff Kirsher 		dev_info(&adapter->pdev->dev,
16349f66d3eeSAlexander Duyck 			 "Link speed has been changed. VF Transmit rate is disabled\n");
1635dee1ad47SJeff Kirsher 	}
1636dee1ad47SJeff Kirsher 
1637dee1ad47SJeff Kirsher 	for (i = 0; i < adapter->num_vfs; i++) {
16389f66d3eeSAlexander Duyck 		if (!adapter->vf_rate_link_speed)
1639dee1ad47SJeff Kirsher 			adapter->vfinfo[i].tx_rate = 0;
1640dee1ad47SJeff Kirsher 
16419f66d3eeSAlexander Duyck 		ixgbe_set_vf_rate_limit(adapter, i);
1642dee1ad47SJeff Kirsher 	}
1643dee1ad47SJeff Kirsher }
1644dee1ad47SJeff Kirsher 
ixgbe_ndo_set_vf_bw(struct net_device * netdev,int vf,int min_tx_rate,int max_tx_rate)1645ed616689SSucheta Chakraborty int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
1646ed616689SSucheta Chakraborty 			int max_tx_rate)
1647dee1ad47SJeff Kirsher {
1648dee1ad47SJeff Kirsher 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
16499f66d3eeSAlexander Duyck 	int link_speed;
1650dee1ad47SJeff Kirsher 
16519f66d3eeSAlexander Duyck 	/* verify VF is active */
16529f66d3eeSAlexander Duyck 	if (vf >= adapter->num_vfs)
1653dee1ad47SJeff Kirsher 		return -EINVAL;
1654dee1ad47SJeff Kirsher 
16559f66d3eeSAlexander Duyck 	/* verify link is up */
16569f66d3eeSAlexander Duyck 	if (!adapter->link_up)
16579f66d3eeSAlexander Duyck 		return -EINVAL;
16589f66d3eeSAlexander Duyck 
16599f66d3eeSAlexander Duyck 	/* verify we are linked at 10Gbps */
16609f66d3eeSAlexander Duyck 	link_speed = ixgbe_link_mbps(adapter);
16619f66d3eeSAlexander Duyck 	if (link_speed != 10000)
16629f66d3eeSAlexander Duyck 		return -EINVAL;
16639f66d3eeSAlexander Duyck 
1664ed616689SSucheta Chakraborty 	if (min_tx_rate)
1665ed616689SSucheta Chakraborty 		return -EINVAL;
1666ed616689SSucheta Chakraborty 
16679f66d3eeSAlexander Duyck 	/* rate limit cannot be less than 10Mbs or greater than link speed */
1668ed616689SSucheta Chakraborty 	if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
16699f66d3eeSAlexander Duyck 		return -EINVAL;
16709f66d3eeSAlexander Duyck 
16719f66d3eeSAlexander Duyck 	/* store values */
16729f66d3eeSAlexander Duyck 	adapter->vf_rate_link_speed = link_speed;
1673ed616689SSucheta Chakraborty 	adapter->vfinfo[vf].tx_rate = max_tx_rate;
16749f66d3eeSAlexander Duyck 
16759f66d3eeSAlexander Duyck 	/* update hardware configuration */
16769f66d3eeSAlexander Duyck 	ixgbe_set_vf_rate_limit(adapter, vf);
1677dee1ad47SJeff Kirsher 
1678dee1ad47SJeff Kirsher 	return 0;
1679dee1ad47SJeff Kirsher }
1680dee1ad47SJeff Kirsher 
ixgbe_ndo_set_vf_spoofchk(struct net_device * netdev,int vf,bool setting)1681de4c7f65SGreg Rose int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
1682de4c7f65SGreg Rose {
1683de4c7f65SGreg Rose 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1684de4c7f65SGreg Rose 	struct ixgbe_hw *hw = &adapter->hw;
1685de4c7f65SGreg Rose 
1686600a507dSEmil Tantilov 	if (vf >= adapter->num_vfs)
1687600a507dSEmil Tantilov 		return -EINVAL;
1688600a507dSEmil Tantilov 
1689de4c7f65SGreg Rose 	adapter->vfinfo[vf].spoofchk_enabled = setting;
1690de4c7f65SGreg Rose 
169177f192afSEmil Tantilov 	/* configure MAC spoofing */
169277f192afSEmil Tantilov 	hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
1693de4c7f65SGreg Rose 
169477f192afSEmil Tantilov 	/* configure VLAN spoofing */
169577f192afSEmil Tantilov 	hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
169677f192afSEmil Tantilov 
169777f192afSEmil Tantilov 	/* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
169877f192afSEmil Tantilov 	 * calling set_ethertype_anti_spoofing for each VF in loop below
169977f192afSEmil Tantilov 	 */
170077f192afSEmil Tantilov 	if (hw->mac.ops.set_ethertype_anti_spoofing) {
170177f192afSEmil Tantilov 		IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
170277f192afSEmil Tantilov 				(IXGBE_ETQF_FILTER_EN    |
170377f192afSEmil Tantilov 				 IXGBE_ETQF_TX_ANTISPOOF |
1704f0843b68SAnirudh Venkataramanan 				 ETH_P_LLDP));
170577f192afSEmil Tantilov 
170677f192afSEmil Tantilov 		IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
170777f192afSEmil Tantilov 				(IXGBE_ETQF_FILTER_EN |
170877f192afSEmil Tantilov 				 IXGBE_ETQF_TX_ANTISPOOF |
170977f192afSEmil Tantilov 				 ETH_P_PAUSE));
171077f192afSEmil Tantilov 
171177f192afSEmil Tantilov 		hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
1712de4c7f65SGreg Rose 	}
1713de4c7f65SGreg Rose 
1714de4c7f65SGreg Rose 	return 0;
1715de4c7f65SGreg Rose }
1716de4c7f65SGreg Rose 
1717366fd100SSlawomir Mrozowicz /**
1718366fd100SSlawomir Mrozowicz  * ixgbe_set_vf_link_state - Set link state
1719366fd100SSlawomir Mrozowicz  * @adapter: Pointer to adapter struct
1720366fd100SSlawomir Mrozowicz  * @vf: VF identifier
1721366fd100SSlawomir Mrozowicz  * @state: required link state
1722366fd100SSlawomir Mrozowicz  *
1723366fd100SSlawomir Mrozowicz  * Set a link force state on/off a single vf
1724366fd100SSlawomir Mrozowicz  **/
ixgbe_set_vf_link_state(struct ixgbe_adapter * adapter,int vf,int state)1725366fd100SSlawomir Mrozowicz void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state)
1726366fd100SSlawomir Mrozowicz {
1727366fd100SSlawomir Mrozowicz 	adapter->vfinfo[vf].link_state = state;
1728366fd100SSlawomir Mrozowicz 
1729366fd100SSlawomir Mrozowicz 	switch (state) {
1730366fd100SSlawomir Mrozowicz 	case IFLA_VF_LINK_STATE_AUTO:
1731366fd100SSlawomir Mrozowicz 		if (test_bit(__IXGBE_DOWN, &adapter->state))
1732366fd100SSlawomir Mrozowicz 			adapter->vfinfo[vf].link_enable = false;
1733366fd100SSlawomir Mrozowicz 		else
1734366fd100SSlawomir Mrozowicz 			adapter->vfinfo[vf].link_enable = true;
1735366fd100SSlawomir Mrozowicz 		break;
1736366fd100SSlawomir Mrozowicz 	case IFLA_VF_LINK_STATE_ENABLE:
1737366fd100SSlawomir Mrozowicz 		adapter->vfinfo[vf].link_enable = true;
1738366fd100SSlawomir Mrozowicz 		break;
1739366fd100SSlawomir Mrozowicz 	case IFLA_VF_LINK_STATE_DISABLE:
1740366fd100SSlawomir Mrozowicz 		adapter->vfinfo[vf].link_enable = false;
1741366fd100SSlawomir Mrozowicz 		break;
1742366fd100SSlawomir Mrozowicz 	}
1743366fd100SSlawomir Mrozowicz 
1744366fd100SSlawomir Mrozowicz 	ixgbe_set_vf_rx_tx(adapter, vf);
1745366fd100SSlawomir Mrozowicz 
1746366fd100SSlawomir Mrozowicz 	/* restart the VF */
1747366fd100SSlawomir Mrozowicz 	adapter->vfinfo[vf].clear_to_send = false;
1748366fd100SSlawomir Mrozowicz 	ixgbe_ping_vf(adapter, vf);
1749366fd100SSlawomir Mrozowicz }
1750366fd100SSlawomir Mrozowicz 
1751366fd100SSlawomir Mrozowicz /**
1752366fd100SSlawomir Mrozowicz  * ixgbe_ndo_set_vf_link_state - Set link state
1753366fd100SSlawomir Mrozowicz  * @netdev: network interface device structure
1754366fd100SSlawomir Mrozowicz  * @vf: VF identifier
1755366fd100SSlawomir Mrozowicz  * @state: required link state
1756366fd100SSlawomir Mrozowicz  *
1757366fd100SSlawomir Mrozowicz  * Set the link state of a specified VF, regardless of physical link state
1758366fd100SSlawomir Mrozowicz  **/
ixgbe_ndo_set_vf_link_state(struct net_device * netdev,int vf,int state)1759366fd100SSlawomir Mrozowicz int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
1760366fd100SSlawomir Mrozowicz {
1761366fd100SSlawomir Mrozowicz 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1762366fd100SSlawomir Mrozowicz 	int ret = 0;
1763366fd100SSlawomir Mrozowicz 
1764366fd100SSlawomir Mrozowicz 	if (vf < 0 || vf >= adapter->num_vfs) {
1765366fd100SSlawomir Mrozowicz 		dev_err(&adapter->pdev->dev,
1766366fd100SSlawomir Mrozowicz 			"NDO set VF link - invalid VF identifier %d\n", vf);
1767366fd100SSlawomir Mrozowicz 		return -EINVAL;
1768366fd100SSlawomir Mrozowicz 	}
1769366fd100SSlawomir Mrozowicz 
1770366fd100SSlawomir Mrozowicz 	switch (state) {
1771366fd100SSlawomir Mrozowicz 	case IFLA_VF_LINK_STATE_ENABLE:
1772366fd100SSlawomir Mrozowicz 		dev_info(&adapter->pdev->dev,
1773366fd100SSlawomir Mrozowicz 			 "NDO set VF %d link state %d - not supported\n",
1774366fd100SSlawomir Mrozowicz 			vf, state);
1775366fd100SSlawomir Mrozowicz 		break;
1776366fd100SSlawomir Mrozowicz 	case IFLA_VF_LINK_STATE_DISABLE:
1777366fd100SSlawomir Mrozowicz 		dev_info(&adapter->pdev->dev,
1778366fd100SSlawomir Mrozowicz 			 "NDO set VF %d link state disable\n", vf);
1779366fd100SSlawomir Mrozowicz 		ixgbe_set_vf_link_state(adapter, vf, state);
1780366fd100SSlawomir Mrozowicz 		break;
1781366fd100SSlawomir Mrozowicz 	case IFLA_VF_LINK_STATE_AUTO:
1782366fd100SSlawomir Mrozowicz 		dev_info(&adapter->pdev->dev,
1783366fd100SSlawomir Mrozowicz 			 "NDO set VF %d link state auto\n", vf);
1784366fd100SSlawomir Mrozowicz 		ixgbe_set_vf_link_state(adapter, vf, state);
1785366fd100SSlawomir Mrozowicz 		break;
1786366fd100SSlawomir Mrozowicz 	default:
1787366fd100SSlawomir Mrozowicz 		dev_err(&adapter->pdev->dev,
1788366fd100SSlawomir Mrozowicz 			"NDO set VF %d - invalid link state %d\n", vf, state);
1789366fd100SSlawomir Mrozowicz 		ret = -EINVAL;
1790366fd100SSlawomir Mrozowicz 	}
1791366fd100SSlawomir Mrozowicz 
1792366fd100SSlawomir Mrozowicz 	return ret;
1793366fd100SSlawomir Mrozowicz }
1794366fd100SSlawomir Mrozowicz 
ixgbe_ndo_set_vf_rss_query_en(struct net_device * netdev,int vf,bool setting)1795e65ce0d3SVlad Zolotarov int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
1796e65ce0d3SVlad Zolotarov 				  bool setting)
1797e65ce0d3SVlad Zolotarov {
1798e65ce0d3SVlad Zolotarov 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1799e65ce0d3SVlad Zolotarov 
1800e65ce0d3SVlad Zolotarov 	/* This operation is currently supported only for 82599 and x540
1801e65ce0d3SVlad Zolotarov 	 * devices.
1802e65ce0d3SVlad Zolotarov 	 */
1803e65ce0d3SVlad Zolotarov 	if (adapter->hw.mac.type < ixgbe_mac_82599EB ||
1804e65ce0d3SVlad Zolotarov 	    adapter->hw.mac.type >= ixgbe_mac_X550)
1805e65ce0d3SVlad Zolotarov 		return -EOPNOTSUPP;
1806e65ce0d3SVlad Zolotarov 
1807e65ce0d3SVlad Zolotarov 	if (vf >= adapter->num_vfs)
1808e65ce0d3SVlad Zolotarov 		return -EINVAL;
1809e65ce0d3SVlad Zolotarov 
1810e65ce0d3SVlad Zolotarov 	adapter->vfinfo[vf].rss_query_enabled = setting;
1811e65ce0d3SVlad Zolotarov 
1812e65ce0d3SVlad Zolotarov 	return 0;
1813e65ce0d3SVlad Zolotarov }
1814e65ce0d3SVlad Zolotarov 
ixgbe_ndo_set_vf_trust(struct net_device * netdev,int vf,bool setting)181554011e4dSHiroshi Shimamoto int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
181654011e4dSHiroshi Shimamoto {
181754011e4dSHiroshi Shimamoto 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
181854011e4dSHiroshi Shimamoto 
181954011e4dSHiroshi Shimamoto 	if (vf >= adapter->num_vfs)
182054011e4dSHiroshi Shimamoto 		return -EINVAL;
182154011e4dSHiroshi Shimamoto 
182254011e4dSHiroshi Shimamoto 	/* nothing to do */
182354011e4dSHiroshi Shimamoto 	if (adapter->vfinfo[vf].trusted == setting)
182454011e4dSHiroshi Shimamoto 		return 0;
182554011e4dSHiroshi Shimamoto 
182654011e4dSHiroshi Shimamoto 	adapter->vfinfo[vf].trusted = setting;
182754011e4dSHiroshi Shimamoto 
182854011e4dSHiroshi Shimamoto 	/* reset VF to reconfigure features */
182954011e4dSHiroshi Shimamoto 	adapter->vfinfo[vf].clear_to_send = false;
183054011e4dSHiroshi Shimamoto 	ixgbe_ping_vf(adapter, vf);
183154011e4dSHiroshi Shimamoto 
183254011e4dSHiroshi Shimamoto 	e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
183354011e4dSHiroshi Shimamoto 
183454011e4dSHiroshi Shimamoto 	return 0;
183554011e4dSHiroshi Shimamoto }
183654011e4dSHiroshi Shimamoto 
ixgbe_ndo_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * ivi)1837dee1ad47SJeff Kirsher int ixgbe_ndo_get_vf_config(struct net_device *netdev,
1838dee1ad47SJeff Kirsher 			    int vf, struct ifla_vf_info *ivi)
1839dee1ad47SJeff Kirsher {
1840dee1ad47SJeff Kirsher 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1841dee1ad47SJeff Kirsher 	if (vf >= adapter->num_vfs)
1842dee1ad47SJeff Kirsher 		return -EINVAL;
1843dee1ad47SJeff Kirsher 	ivi->vf = vf;
1844dee1ad47SJeff Kirsher 	memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
1845ed616689SSucheta Chakraborty 	ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
1846ed616689SSucheta Chakraborty 	ivi->min_tx_rate = 0;
1847dee1ad47SJeff Kirsher 	ivi->vlan = adapter->vfinfo[vf].pf_vlan;
1848dee1ad47SJeff Kirsher 	ivi->qos = adapter->vfinfo[vf].pf_qos;
1849de4c7f65SGreg Rose 	ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
1850e65ce0d3SVlad Zolotarov 	ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
185154011e4dSHiroshi Shimamoto 	ivi->trusted = adapter->vfinfo[vf].trusted;
1852dee1ad47SJeff Kirsher 	return 0;
1853dee1ad47SJeff Kirsher }
1854