151dce24bSJeff Kirsher // SPDX-License-Identifier: GPL-2.0
251dce24bSJeff Kirsher /* Copyright(c) 1999 - 2018 Intel Corporation. */
38af3c33fSJeff Kirsher 
48af3c33fSJeff Kirsher #include "ixgbe.h"
58af3c33fSJeff Kirsher #include "ixgbe_sriov.h"
68af3c33fSJeff Kirsher 
7800bd607SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
873079ea0SAlexander Duyck /**
973079ea0SAlexander Duyck  * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
1073079ea0SAlexander Duyck  * @adapter: board private structure to initialize
1173079ea0SAlexander Duyck  *
1273079ea0SAlexander Duyck  * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
1373079ea0SAlexander Duyck  * will also try to cache the proper offsets if RSS/FCoE are enabled along
1473079ea0SAlexander Duyck  * with VMDq.
1573079ea0SAlexander Duyck  *
1673079ea0SAlexander Duyck  **/
ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter * adapter)1773079ea0SAlexander Duyck static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
1873079ea0SAlexander Duyck {
1973079ea0SAlexander Duyck #ifdef IXGBE_FCOE
2073079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
2173079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
2273079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
2373079ea0SAlexander Duyck 	int i;
24b5f69ccfSAlexander Duyck 	u16 reg_idx, pool;
250efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
2673079ea0SAlexander Duyck 
2773079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
2873079ea0SAlexander Duyck 	if (tcs <= 1)
2973079ea0SAlexander Duyck 		return false;
3073079ea0SAlexander Duyck 
3173079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
3273079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3373079ea0SAlexander Duyck 		return false;
3473079ea0SAlexander Duyck 
3573079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
3673079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
37b5f69ccfSAlexander Duyck 	for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
3873079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
39b5f69ccfSAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs) {
40b5f69ccfSAlexander Duyck 			pool++;
4173079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
42b5f69ccfSAlexander Duyck 		}
4373079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
44b5f69ccfSAlexander Duyck 		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
4573079ea0SAlexander Duyck 	}
4673079ea0SAlexander Duyck 
4773079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
4873079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
4973079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
5073079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs)
5173079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
5273079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
5373079ea0SAlexander Duyck 	}
5473079ea0SAlexander Duyck 
5573079ea0SAlexander Duyck #ifdef IXGBE_FCOE
5673079ea0SAlexander Duyck 	/* nothing to do if FCoE is disabled */
5773079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
5873079ea0SAlexander Duyck 		return true;
5973079ea0SAlexander Duyck 
6073079ea0SAlexander Duyck 	/* The work is already done if the FCoE ring is shared */
6173079ea0SAlexander Duyck 	if (fcoe->offset < tcs)
6273079ea0SAlexander Duyck 		return true;
6373079ea0SAlexander Duyck 
6473079ea0SAlexander Duyck 	/* The FCoE rings exist separately, we need to move their reg_idx */
6573079ea0SAlexander Duyck 	if (fcoe->indices) {
6673079ea0SAlexander Duyck 		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
6773079ea0SAlexander Duyck 		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
6873079ea0SAlexander Duyck 
6973079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
7073079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
7173079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
7273079ea0SAlexander Duyck 			adapter->rx_ring[i]->reg_idx = reg_idx;
73b5f69ccfSAlexander Duyck 			adapter->rx_ring[i]->netdev = adapter->netdev;
7473079ea0SAlexander Duyck 			reg_idx++;
7573079ea0SAlexander Duyck 		}
7673079ea0SAlexander Duyck 
7773079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
7873079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
7973079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
8073079ea0SAlexander Duyck 			adapter->tx_ring[i]->reg_idx = reg_idx;
8173079ea0SAlexander Duyck 			reg_idx++;
8273079ea0SAlexander Duyck 		}
8373079ea0SAlexander Duyck 	}
8473079ea0SAlexander Duyck 
8573079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
8673079ea0SAlexander Duyck 	return true;
8773079ea0SAlexander Duyck }
8873079ea0SAlexander Duyck 
898af3c33fSJeff Kirsher /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
ixgbe_get_first_reg_idx(struct ixgbe_adapter * adapter,u8 tc,unsigned int * tx,unsigned int * rx)908af3c33fSJeff Kirsher static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
918af3c33fSJeff Kirsher 				    unsigned int *tx, unsigned int *rx)
928af3c33fSJeff Kirsher {
938af3c33fSJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
940efbf12bSAlexander Duyck 	u8 num_tcs = adapter->hw_tcs;
958af3c33fSJeff Kirsher 
968af3c33fSJeff Kirsher 	*tx = 0;
978af3c33fSJeff Kirsher 	*rx = 0;
988af3c33fSJeff Kirsher 
998af3c33fSJeff Kirsher 	switch (hw->mac.type) {
1008af3c33fSJeff Kirsher 	case ixgbe_mac_82598EB:
1014ae63730SAlexander Duyck 		/* TxQs/TC: 4	RxQs/TC: 8 */
1024ae63730SAlexander Duyck 		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
1034ae63730SAlexander Duyck 		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
1048af3c33fSJeff Kirsher 		break;
1058af3c33fSJeff Kirsher 	case ixgbe_mac_82599EB:
1068af3c33fSJeff Kirsher 	case ixgbe_mac_X540:
1079a75a1acSDon Skidmore 	case ixgbe_mac_X550:
1089a75a1acSDon Skidmore 	case ixgbe_mac_X550EM_x:
10949425dfcSMark Rustad 	case ixgbe_mac_x550em_a:
1108af3c33fSJeff Kirsher 		if (num_tcs > 4) {
1114ae63730SAlexander Duyck 			/*
1124ae63730SAlexander Duyck 			 * TCs    : TC0/1 TC2/3 TC4-7
1134ae63730SAlexander Duyck 			 * TxQs/TC:    32    16     8
1144ae63730SAlexander Duyck 			 * RxQs/TC:    16    16    16
1154ae63730SAlexander Duyck 			 */
1168af3c33fSJeff Kirsher 			*rx = tc << 4;
1174ae63730SAlexander Duyck 			if (tc < 3)
1184ae63730SAlexander Duyck 				*tx = tc << 5;		/*   0,  32,  64 */
1194ae63730SAlexander Duyck 			else if (tc < 5)
1204ae63730SAlexander Duyck 				*tx = (tc + 2) << 4;	/*  80,  96 */
1214ae63730SAlexander Duyck 			else
1224ae63730SAlexander Duyck 				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
1238af3c33fSJeff Kirsher 		} else {
1244ae63730SAlexander Duyck 			/*
1254ae63730SAlexander Duyck 			 * TCs    : TC0 TC1 TC2/3
1264ae63730SAlexander Duyck 			 * TxQs/TC:  64  32    16
1274ae63730SAlexander Duyck 			 * RxQs/TC:  32  32    32
1284ae63730SAlexander Duyck 			 */
1298af3c33fSJeff Kirsher 			*rx = tc << 5;
1304ae63730SAlexander Duyck 			if (tc < 2)
1314ae63730SAlexander Duyck 				*tx = tc << 6;		/*  0,  64 */
1324ae63730SAlexander Duyck 			else
1334ae63730SAlexander Duyck 				*tx = (tc + 4) << 4;	/* 96, 112 */
1348af3c33fSJeff Kirsher 		}
13527e40255SGustavo A. R. Silva 		break;
1368af3c33fSJeff Kirsher 	default:
1378af3c33fSJeff Kirsher 		break;
1388af3c33fSJeff Kirsher 	}
1398af3c33fSJeff Kirsher }
1408af3c33fSJeff Kirsher 
1418af3c33fSJeff Kirsher /**
1428af3c33fSJeff Kirsher  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
1438af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1448af3c33fSJeff Kirsher  *
1458af3c33fSJeff Kirsher  * Cache the descriptor ring offsets for DCB to the assigned rings.
1468af3c33fSJeff Kirsher  *
1478af3c33fSJeff Kirsher  **/
ixgbe_cache_ring_dcb(struct ixgbe_adapter * adapter)1484ae63730SAlexander Duyck static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
1498af3c33fSJeff Kirsher {
1500efbf12bSAlexander Duyck 	u8 num_tcs = adapter->hw_tcs;
1514ae63730SAlexander Duyck 	unsigned int tx_idx, rx_idx;
1524ae63730SAlexander Duyck 	int tc, offset, rss_i, i;
1538af3c33fSJeff Kirsher 
1544ae63730SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
1554ae63730SAlexander Duyck 	if (num_tcs <= 1)
1568af3c33fSJeff Kirsher 		return false;
1578af3c33fSJeff Kirsher 
1584ae63730SAlexander Duyck 	rss_i = adapter->ring_feature[RING_F_RSS].indices;
1598af3c33fSJeff Kirsher 
1604ae63730SAlexander Duyck 	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
1614ae63730SAlexander Duyck 		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
1624ae63730SAlexander Duyck 		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
1634ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
1644ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
165b5f69ccfSAlexander Duyck 			adapter->rx_ring[offset + i]->netdev = adapter->netdev;
1664ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->dcb_tc = tc;
1674ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->dcb_tc = tc;
1688af3c33fSJeff Kirsher 		}
1698af3c33fSJeff Kirsher 	}
1708af3c33fSJeff Kirsher 
1718af3c33fSJeff Kirsher 	return true;
1728af3c33fSJeff Kirsher }
173d411a936SAlexander Duyck 
1748af3c33fSJeff Kirsher #endif
1758af3c33fSJeff Kirsher /**
1768af3c33fSJeff Kirsher  * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
1778af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1788af3c33fSJeff Kirsher  *
1798af3c33fSJeff Kirsher  * SR-IOV doesn't use any descriptor rings but changes the default if
1808af3c33fSJeff Kirsher  * no other mapping is used.
1818af3c33fSJeff Kirsher  *
1828af3c33fSJeff Kirsher  */
ixgbe_cache_ring_sriov(struct ixgbe_adapter * adapter)18373079ea0SAlexander Duyck static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
1848af3c33fSJeff Kirsher {
18573079ea0SAlexander Duyck #ifdef IXGBE_FCOE
18673079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
18773079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
18873079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
18973079ea0SAlexander Duyck 	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
190b5f69ccfSAlexander Duyck 	u16 reg_idx, pool;
19173079ea0SAlexander Duyck 	int i;
19273079ea0SAlexander Duyck 
19373079ea0SAlexander Duyck 	/* only proceed if VMDq is enabled */
19473079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
1958af3c33fSJeff Kirsher 		return false;
19673079ea0SAlexander Duyck 
19773079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
198b5f69ccfSAlexander Duyck 	pool = 0;
19973079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
20073079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
20173079ea0SAlexander Duyck #ifdef IXGBE_FCOE
20273079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
20373079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
20473079ea0SAlexander Duyck 			break;
20573079ea0SAlexander Duyck #endif
20673079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
207b5f69ccfSAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= rss->indices) {
208b5f69ccfSAlexander Duyck 			pool++;
20973079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
210b5f69ccfSAlexander Duyck 		}
21173079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
212b5f69ccfSAlexander Duyck 		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
21373079ea0SAlexander Duyck 	}
21473079ea0SAlexander Duyck 
21573079ea0SAlexander Duyck #ifdef IXGBE_FCOE
21673079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
217b5f69ccfSAlexander Duyck 	for (; i < adapter->num_rx_queues; i++, reg_idx++) {
21873079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
219b5f69ccfSAlexander Duyck 		adapter->rx_ring[i]->netdev = adapter->netdev;
220b5f69ccfSAlexander Duyck 	}
22173079ea0SAlexander Duyck 
22273079ea0SAlexander Duyck #endif
22373079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
22473079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
22573079ea0SAlexander Duyck #ifdef IXGBE_FCOE
22673079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
22773079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
22873079ea0SAlexander Duyck 			break;
22973079ea0SAlexander Duyck #endif
23073079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
23173079ea0SAlexander Duyck 		if ((reg_idx & rss->mask) >= rss->indices)
23273079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
23373079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
23473079ea0SAlexander Duyck 	}
23573079ea0SAlexander Duyck 
23673079ea0SAlexander Duyck #ifdef IXGBE_FCOE
23773079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
23873079ea0SAlexander Duyck 	for (; i < adapter->num_tx_queues; i++, reg_idx++)
23973079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
24073079ea0SAlexander Duyck 
24173079ea0SAlexander Duyck #endif
24273079ea0SAlexander Duyck 
24373079ea0SAlexander Duyck 	return true;
2448af3c33fSJeff Kirsher }
2458af3c33fSJeff Kirsher 
2468af3c33fSJeff Kirsher /**
247d411a936SAlexander Duyck  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
248d411a936SAlexander Duyck  * @adapter: board private structure to initialize
249d411a936SAlexander Duyck  *
250d411a936SAlexander Duyck  * Cache the descriptor ring offsets for RSS to the assigned rings.
251d411a936SAlexander Duyck  *
252d411a936SAlexander Duyck  **/
ixgbe_cache_ring_rss(struct ixgbe_adapter * adapter)253d411a936SAlexander Duyck static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
254d411a936SAlexander Duyck {
25533fdc82fSJohn Fastabend 	int i, reg_idx;
256d411a936SAlexander Duyck 
257b5f69ccfSAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++) {
258d411a936SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = i;
259b5f69ccfSAlexander Duyck 		adapter->rx_ring[i]->netdev = adapter->netdev;
260b5f69ccfSAlexander Duyck 	}
26133fdc82fSJohn Fastabend 	for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
26233fdc82fSJohn Fastabend 		adapter->tx_ring[i]->reg_idx = reg_idx;
26333fdc82fSJohn Fastabend 	for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
26433fdc82fSJohn Fastabend 		adapter->xdp_ring[i]->reg_idx = reg_idx;
265d411a936SAlexander Duyck 
266d411a936SAlexander Duyck 	return true;
267d411a936SAlexander Duyck }
268d411a936SAlexander Duyck 
269d411a936SAlexander Duyck /**
2708af3c33fSJeff Kirsher  * ixgbe_cache_ring_register - Descriptor ring to register mapping
2718af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2728af3c33fSJeff Kirsher  *
2738af3c33fSJeff Kirsher  * Once we know the feature-set enabled for the device, we'll cache
2748af3c33fSJeff Kirsher  * the register offset the descriptor ring is assigned to.
2758af3c33fSJeff Kirsher  *
2768af3c33fSJeff Kirsher  * Note, the order the various feature calls is important.  It must start with
2778af3c33fSJeff Kirsher  * the "most" features enabled at the same time, then trickle down to the
2788af3c33fSJeff Kirsher  * least amount of features turned on at once.
2798af3c33fSJeff Kirsher  **/
ixgbe_cache_ring_register(struct ixgbe_adapter * adapter)2808af3c33fSJeff Kirsher static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2818af3c33fSJeff Kirsher {
2828af3c33fSJeff Kirsher 	/* start with default case */
2838af3c33fSJeff Kirsher 	adapter->rx_ring[0]->reg_idx = 0;
2848af3c33fSJeff Kirsher 	adapter->tx_ring[0]->reg_idx = 0;
2858af3c33fSJeff Kirsher 
28673079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
28773079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb_sriov(adapter))
28873079ea0SAlexander Duyck 		return;
28973079ea0SAlexander Duyck 
29073079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb(adapter))
29173079ea0SAlexander Duyck 		return;
29273079ea0SAlexander Duyck 
29373079ea0SAlexander Duyck #endif
2948af3c33fSJeff Kirsher 	if (ixgbe_cache_ring_sriov(adapter))
2958af3c33fSJeff Kirsher 		return;
2968af3c33fSJeff Kirsher 
297d411a936SAlexander Duyck 	ixgbe_cache_ring_rss(adapter);
2988af3c33fSJeff Kirsher }
2998af3c33fSJeff Kirsher 
ixgbe_xdp_queues(struct ixgbe_adapter * adapter)30033fdc82fSJohn Fastabend static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
30133fdc82fSJohn Fastabend {
3024fe81585SJason Xing 	int queues;
3034fe81585SJason Xing 
3044fe81585SJason Xing 	queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids);
3054fe81585SJason Xing 	return adapter->xdp_prog ? queues : 0;
30633fdc82fSJohn Fastabend }
30733fdc82fSJohn Fastabend 
3082bf1a87bSEmil Tantilov #define IXGBE_RSS_64Q_MASK	0x3F
309d411a936SAlexander Duyck #define IXGBE_RSS_16Q_MASK	0xF
310d411a936SAlexander Duyck #define IXGBE_RSS_8Q_MASK	0x7
311d411a936SAlexander Duyck #define IXGBE_RSS_4Q_MASK	0x3
312d411a936SAlexander Duyck #define IXGBE_RSS_2Q_MASK	0x1
313d411a936SAlexander Duyck #define IXGBE_RSS_DISABLED_MASK	0x0
314d411a936SAlexander Duyck 
315d411a936SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
31673079ea0SAlexander Duyck /**
31773079ea0SAlexander Duyck  * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
31873079ea0SAlexander Duyck  * @adapter: board private structure to initialize
31973079ea0SAlexander Duyck  *
32073079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
32173079ea0SAlexander Duyck  * and VM pools where appropriate.  Also assign queues based on DCB
32273079ea0SAlexander Duyck  * priorities and map accordingly..
32373079ea0SAlexander Duyck  *
32473079ea0SAlexander Duyck  **/
ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter * adapter)32573079ea0SAlexander Duyck static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
32673079ea0SAlexander Duyck {
32773079ea0SAlexander Duyck 	int i;
32873079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
32973079ea0SAlexander Duyck 	u16 vmdq_m = 0;
33073079ea0SAlexander Duyck #ifdef IXGBE_FCOE
33173079ea0SAlexander Duyck 	u16 fcoe_i = 0;
33273079ea0SAlexander Duyck #endif
3330efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
33473079ea0SAlexander Duyck 
33573079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
33673079ea0SAlexander Duyck 	if (tcs <= 1)
33773079ea0SAlexander Duyck 		return false;
33873079ea0SAlexander Duyck 
33973079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
34073079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
34173079ea0SAlexander Duyck 		return false;
34273079ea0SAlexander Duyck 
3434e039c16SAlexander Duyck 	/* limit VMDq instances on the PF by number of Tx queues */
3444e039c16SAlexander Duyck 	vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
3454e039c16SAlexander Duyck 
34673079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
34773079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
34873079ea0SAlexander Duyck 
34973079ea0SAlexander Duyck 	/* 16 pools w/ 8 TC per pool */
35073079ea0SAlexander Duyck 	if (tcs > 4) {
35173079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 16);
35273079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
35373079ea0SAlexander Duyck 	/* 32 pools w/ 4 TC per pool */
35473079ea0SAlexander Duyck 	} else {
35573079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 32);
35673079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
35773079ea0SAlexander Duyck 	}
35873079ea0SAlexander Duyck 
35973079ea0SAlexander Duyck #ifdef IXGBE_FCOE
36073079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
36173079ea0SAlexander Duyck 	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
36273079ea0SAlexander Duyck 
36373079ea0SAlexander Duyck #endif
36473079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
36573079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
36673079ea0SAlexander Duyck 
36773079ea0SAlexander Duyck 	/* save features for later use */
36873079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
36973079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
37073079ea0SAlexander Duyck 
37173079ea0SAlexander Duyck 	/*
37273079ea0SAlexander Duyck 	 * We do not support DCB, VMDq, and RSS all simultaneously
37373079ea0SAlexander Duyck 	 * so we will disable RSS since it is the lowest priority
37473079ea0SAlexander Duyck 	 */
37573079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = 1;
37673079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
37773079ea0SAlexander Duyck 
37839cb681bSAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
37939cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
38039cb681bSAlexander Duyck 
38173079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
38273079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = tcs;
38373079ea0SAlexander Duyck 
38473079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * tcs;
38533fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
38673079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * tcs;
38773079ea0SAlexander Duyck 
38873079ea0SAlexander Duyck #ifdef IXGBE_FCOE
38973079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
39073079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
39173079ea0SAlexander Duyck 
39273079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
39373079ea0SAlexander Duyck 
39473079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
39573079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
39673079ea0SAlexander Duyck 
39773079ea0SAlexander Duyck 		if (fcoe_i) {
39873079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
39973079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
40073079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * tcs;
40173079ea0SAlexander Duyck 
40273079ea0SAlexander Duyck 			/* add queues to adapter */
40373079ea0SAlexander Duyck 			adapter->num_tx_queues += fcoe_i;
40473079ea0SAlexander Duyck 			adapter->num_rx_queues += fcoe_i;
40573079ea0SAlexander Duyck 		} else if (tcs > 1) {
40673079ea0SAlexander Duyck 			/* use queue belonging to FcoE TC */
40773079ea0SAlexander Duyck 			fcoe->indices = 1;
40873079ea0SAlexander Duyck 			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
40973079ea0SAlexander Duyck 		} else {
41073079ea0SAlexander Duyck 			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
41173079ea0SAlexander Duyck 
41273079ea0SAlexander Duyck 			fcoe->indices = 0;
41373079ea0SAlexander Duyck 			fcoe->offset = 0;
41473079ea0SAlexander Duyck 		}
41573079ea0SAlexander Duyck 	}
41673079ea0SAlexander Duyck 
41773079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
41873079ea0SAlexander Duyck 	/* configure TC to queue mapping */
41973079ea0SAlexander Duyck 	for (i = 0; i < tcs; i++)
42073079ea0SAlexander Duyck 		netdev_set_tc_queue(adapter->netdev, i, 1, i);
42173079ea0SAlexander Duyck 
42273079ea0SAlexander Duyck 	return true;
42373079ea0SAlexander Duyck }
42473079ea0SAlexander Duyck 
ixgbe_set_dcb_queues(struct ixgbe_adapter * adapter)425d411a936SAlexander Duyck static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
426d411a936SAlexander Duyck {
427d411a936SAlexander Duyck 	struct net_device *dev = adapter->netdev;
428d411a936SAlexander Duyck 	struct ixgbe_ring_feature *f;
429d411a936SAlexander Duyck 	int rss_i, rss_m, i;
430d411a936SAlexander Duyck 	int tcs;
431d411a936SAlexander Duyck 
432d411a936SAlexander Duyck 	/* Map queue offset and counts onto allocated tx queues */
4330efbf12bSAlexander Duyck 	tcs = adapter->hw_tcs;
434d411a936SAlexander Duyck 
435d411a936SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
436d411a936SAlexander Duyck 	if (tcs <= 1)
437d411a936SAlexander Duyck 		return false;
438d411a936SAlexander Duyck 
439d411a936SAlexander Duyck 	/* determine the upper limit for our current DCB mode */
440d411a936SAlexander Duyck 	rss_i = dev->num_tx_queues / tcs;
441d411a936SAlexander Duyck 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
442d411a936SAlexander Duyck 		/* 8 TC w/ 4 queues per TC */
443d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 4);
444d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
445d411a936SAlexander Duyck 	} else if (tcs > 4) {
446d411a936SAlexander Duyck 		/* 8 TC w/ 8 queues per TC */
447d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 8);
448d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_8Q_MASK;
449d411a936SAlexander Duyck 	} else {
450d411a936SAlexander Duyck 		/* 4 TC w/ 16 queues per TC */
451d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 16);
452d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_16Q_MASK;
453d411a936SAlexander Duyck 	}
454d411a936SAlexander Duyck 
455d411a936SAlexander Duyck 	/* set RSS mask and indices */
456d411a936SAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
457d411a936SAlexander Duyck 	rss_i = min_t(int, rss_i, f->limit);
458d411a936SAlexander Duyck 	f->indices = rss_i;
459d411a936SAlexander Duyck 	f->mask = rss_m;
460d411a936SAlexander Duyck 
46139cb681bSAlexander Duyck 	/* disable ATR as it is not supported when multiple TCs are enabled */
46239cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
46339cb681bSAlexander Duyck 
464d411a936SAlexander Duyck #ifdef IXGBE_FCOE
465d411a936SAlexander Duyck 	/* FCoE enabled queues require special configuration indexed
466d411a936SAlexander Duyck 	 * by feature specific indices and offset. Here we map FCoE
467d411a936SAlexander Duyck 	 * indices onto the DCB queue pairs allowing FCoE to own
468d411a936SAlexander Duyck 	 * configuration later.
469d411a936SAlexander Duyck 	 */
470d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
471d411a936SAlexander Duyck 		u8 tc = ixgbe_fcoe_get_tc(adapter);
472d411a936SAlexander Duyck 
473d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
474d411a936SAlexander Duyck 		f->indices = min_t(u16, rss_i, f->limit);
475d411a936SAlexander Duyck 		f->offset = rss_i * tc;
476d411a936SAlexander Duyck 	}
477d411a936SAlexander Duyck 
478d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
479d411a936SAlexander Duyck 	for (i = 0; i < tcs; i++)
480d411a936SAlexander Duyck 		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
481d411a936SAlexander Duyck 
482d411a936SAlexander Duyck 	adapter->num_tx_queues = rss_i * tcs;
48333fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
484d411a936SAlexander Duyck 	adapter->num_rx_queues = rss_i * tcs;
485d411a936SAlexander Duyck 
486d411a936SAlexander Duyck 	return true;
487d411a936SAlexander Duyck }
488d411a936SAlexander Duyck 
489d411a936SAlexander Duyck #endif
4908af3c33fSJeff Kirsher /**
49173079ea0SAlexander Duyck  * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
49273079ea0SAlexander Duyck  * @adapter: board private structure to initialize
49373079ea0SAlexander Duyck  *
49473079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
49573079ea0SAlexander Duyck  * and VM pools where appropriate.  If RSS is available, then also try and
49673079ea0SAlexander Duyck  * enable RSS and map accordingly.
49773079ea0SAlexander Duyck  *
49873079ea0SAlexander Duyck  **/
ixgbe_set_sriov_queues(struct ixgbe_adapter * adapter)49973079ea0SAlexander Duyck static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
50073079ea0SAlexander Duyck {
50173079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
50273079ea0SAlexander Duyck 	u16 vmdq_m = 0;
50373079ea0SAlexander Duyck 	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
50473079ea0SAlexander Duyck 	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
50573079ea0SAlexander Duyck #ifdef IXGBE_FCOE
50673079ea0SAlexander Duyck 	u16 fcoe_i = 0;
50773079ea0SAlexander Duyck #endif
50873079ea0SAlexander Duyck 
50973079ea0SAlexander Duyck 	/* only proceed if SR-IOV is enabled */
51073079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
51173079ea0SAlexander Duyck 		return false;
51273079ea0SAlexander Duyck 
5134e039c16SAlexander Duyck 	/* limit l2fwd RSS based on total Tx queue limit */
5144e039c16SAlexander Duyck 	rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
5154e039c16SAlexander Duyck 
51673079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
51773079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
51873079ea0SAlexander Duyck 
51973079ea0SAlexander Duyck 	/* double check we are limited to maximum pools */
52073079ea0SAlexander Duyck 	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
52173079ea0SAlexander Duyck 
52273079ea0SAlexander Duyck 	/* 64 pool mode with 2 queues per pool */
5234e039c16SAlexander Duyck 	if (vmdq_i > 32) {
52473079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
52573079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_2Q_MASK;
52673079ea0SAlexander Duyck 		rss_i = min_t(u16, rss_i, 2);
527e24fcf28SAlexander Duyck 	/* 32 pool mode with up to 4 queues per pool */
52873079ea0SAlexander Duyck 	} else {
52973079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
53073079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
531e24fcf28SAlexander Duyck 		/* We can support 4, 2, or 1 queues */
532e24fcf28SAlexander Duyck 		rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
53373079ea0SAlexander Duyck 	}
53473079ea0SAlexander Duyck 
53573079ea0SAlexander Duyck #ifdef IXGBE_FCOE
53673079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
53773079ea0SAlexander Duyck 	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
53873079ea0SAlexander Duyck 
53973079ea0SAlexander Duyck #endif
54073079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
54173079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
54273079ea0SAlexander Duyck 
54373079ea0SAlexander Duyck 	/* save features for later use */
54473079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
54573079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
54673079ea0SAlexander Duyck 
54773079ea0SAlexander Duyck 	/* limit RSS based on user input and save for later use */
54873079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = rss_i;
54973079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = rss_m;
55073079ea0SAlexander Duyck 
55173079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
55273079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = rss_i;
55373079ea0SAlexander Duyck 
55473079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * rss_i;
55573079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * rss_i;
55633fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
55773079ea0SAlexander Duyck 
55873079ea0SAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
55973079ea0SAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
56073079ea0SAlexander Duyck 
56173079ea0SAlexander Duyck #ifdef IXGBE_FCOE
56273079ea0SAlexander Duyck 	/*
56373079ea0SAlexander Duyck 	 * FCoE can use rings from adjacent buffers to allow RSS
56473079ea0SAlexander Duyck 	 * like behavior.  To account for this we need to add the
56573079ea0SAlexander Duyck 	 * FCoE indices to the total ring count.
56673079ea0SAlexander Duyck 	 */
56773079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
56873079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
56973079ea0SAlexander Duyck 
57073079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
57173079ea0SAlexander Duyck 
57273079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
57373079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
57473079ea0SAlexander Duyck 
57573079ea0SAlexander Duyck 		if (vmdq_i > 1 && fcoe_i) {
57673079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
57773079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
57873079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * rss_i;
57973079ea0SAlexander Duyck 		} else {
58073079ea0SAlexander Duyck 			/* merge FCoE queues with RSS queues */
58173079ea0SAlexander Duyck 			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
58273079ea0SAlexander Duyck 
58373079ea0SAlexander Duyck 			/* limit indices to rss_i if MSI-X is disabled */
58473079ea0SAlexander Duyck 			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
58573079ea0SAlexander Duyck 				fcoe_i = rss_i;
58673079ea0SAlexander Duyck 
58773079ea0SAlexander Duyck 			/* attempt to reserve some queues for just FCoE */
58873079ea0SAlexander Duyck 			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
58973079ea0SAlexander Duyck 			fcoe->offset = fcoe_i - fcoe->indices;
59073079ea0SAlexander Duyck 
59173079ea0SAlexander Duyck 			fcoe_i -= rss_i;
59273079ea0SAlexander Duyck 		}
59373079ea0SAlexander Duyck 
59473079ea0SAlexander Duyck 		/* add queues to adapter */
59573079ea0SAlexander Duyck 		adapter->num_tx_queues += fcoe_i;
59673079ea0SAlexander Duyck 		adapter->num_rx_queues += fcoe_i;
59773079ea0SAlexander Duyck 	}
59873079ea0SAlexander Duyck 
59973079ea0SAlexander Duyck #endif
600646bb57cSAlexander Duyck 	/* To support macvlan offload we have to use num_tc to
601646bb57cSAlexander Duyck 	 * restrict the queues that can be used by the device.
602646bb57cSAlexander Duyck 	 * By doing this we can avoid reporting a false number of
603646bb57cSAlexander Duyck 	 * queues.
604646bb57cSAlexander Duyck 	 */
605646bb57cSAlexander Duyck 	if (vmdq_i > 1)
606646bb57cSAlexander Duyck 		netdev_set_num_tc(adapter->netdev, 1);
607646bb57cSAlexander Duyck 
60849cfbeb7SAlexander Duyck 	/* populate TC0 for use by pool 0 */
60949cfbeb7SAlexander Duyck 	netdev_set_tc_queue(adapter->netdev, 0,
61049cfbeb7SAlexander Duyck 			    adapter->num_rx_queues_per_pool, 0);
61149cfbeb7SAlexander Duyck 
61273079ea0SAlexander Duyck 	return true;
61373079ea0SAlexander Duyck }
61473079ea0SAlexander Duyck 
61573079ea0SAlexander Duyck /**
61649ce9c2cSBen Hutchings  * ixgbe_set_rss_queues - Allocate queues for RSS
6178af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
6188af3c33fSJeff Kirsher  *
6198af3c33fSJeff Kirsher  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
6208af3c33fSJeff Kirsher  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
6218af3c33fSJeff Kirsher  *
6228af3c33fSJeff Kirsher  **/
ixgbe_set_rss_queues(struct ixgbe_adapter * adapter)6230b7f5d0bSAlexander Duyck static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
6248af3c33fSJeff Kirsher {
6252bf1a87bSEmil Tantilov 	struct ixgbe_hw *hw = &adapter->hw;
6260b7f5d0bSAlexander Duyck 	struct ixgbe_ring_feature *f;
6270b7f5d0bSAlexander Duyck 	u16 rss_i;
6288af3c33fSJeff Kirsher 
6290b7f5d0bSAlexander Duyck 	/* set mask for 16 queue limit of RSS */
6300b7f5d0bSAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
6310b7f5d0bSAlexander Duyck 	rss_i = f->limit;
6320b7f5d0bSAlexander Duyck 
6330b7f5d0bSAlexander Duyck 	f->indices = rss_i;
6342bf1a87bSEmil Tantilov 
6352bf1a87bSEmil Tantilov 	if (hw->mac.type < ixgbe_mac_X550)
636d411a936SAlexander Duyck 		f->mask = IXGBE_RSS_16Q_MASK;
6372bf1a87bSEmil Tantilov 	else
6382bf1a87bSEmil Tantilov 		f->mask = IXGBE_RSS_64Q_MASK;
6398af3c33fSJeff Kirsher 
64039cb681bSAlexander Duyck 	/* disable ATR by default, it will be configured below */
64139cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
64239cb681bSAlexander Duyck 
6438af3c33fSJeff Kirsher 	/*
6440b7f5d0bSAlexander Duyck 	 * Use Flow Director in addition to RSS to ensure the best
6458af3c33fSJeff Kirsher 	 * distribution of flows across cores, even when an FDIR flow
6468af3c33fSJeff Kirsher 	 * isn't matched.
6478af3c33fSJeff Kirsher 	 */
64839cb681bSAlexander Duyck 	if (rss_i > 1 && adapter->atr_sample_rate) {
6490b7f5d0bSAlexander Duyck 		f = &adapter->ring_feature[RING_F_FDIR];
6500b7f5d0bSAlexander Duyck 
651d3cb9869SAlexander Duyck 		rss_i = f->indices = f->limit;
65239cb681bSAlexander Duyck 
65339cb681bSAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
65439cb681bSAlexander Duyck 			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
6558af3c33fSJeff Kirsher 	}
6560b7f5d0bSAlexander Duyck 
657d411a936SAlexander Duyck #ifdef IXGBE_FCOE
658d411a936SAlexander Duyck 	/*
659d411a936SAlexander Duyck 	 * FCoE can exist on the same rings as standard network traffic
660d411a936SAlexander Duyck 	 * however it is preferred to avoid that if possible.  In order
661d411a936SAlexander Duyck 	 * to get the best performance we allocate as many FCoE queues
662d411a936SAlexander Duyck 	 * as we can and we place them at the end of the ring array to
663d411a936SAlexander Duyck 	 * avoid sharing queues with standard RSS on systems with 24 or
664d411a936SAlexander Duyck 	 * more CPUs.
665d411a936SAlexander Duyck 	 */
666d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
667d411a936SAlexander Duyck 		struct net_device *dev = adapter->netdev;
668d411a936SAlexander Duyck 		u16 fcoe_i;
669d411a936SAlexander Duyck 
670d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
671d411a936SAlexander Duyck 
672d411a936SAlexander Duyck 		/* merge FCoE queues with RSS queues */
673d411a936SAlexander Duyck 		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
674d411a936SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
675d411a936SAlexander Duyck 
676d411a936SAlexander Duyck 		/* limit indices to rss_i if MSI-X is disabled */
677d411a936SAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
678d411a936SAlexander Duyck 			fcoe_i = rss_i;
679d411a936SAlexander Duyck 
680d411a936SAlexander Duyck 		/* attempt to reserve some queues for just FCoE */
681d411a936SAlexander Duyck 		f->indices = min_t(u16, fcoe_i, f->limit);
682d411a936SAlexander Duyck 		f->offset = fcoe_i - f->indices;
683d411a936SAlexander Duyck 		rss_i = max_t(u16, fcoe_i, rss_i);
684d411a936SAlexander Duyck 	}
685d411a936SAlexander Duyck 
686d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
6870b7f5d0bSAlexander Duyck 	adapter->num_rx_queues = rss_i;
6880b7f5d0bSAlexander Duyck 	adapter->num_tx_queues = rss_i;
68933fdc82fSJohn Fastabend 	adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
6900b7f5d0bSAlexander Duyck 
6910b7f5d0bSAlexander Duyck 	return true;
6928af3c33fSJeff Kirsher }
6938af3c33fSJeff Kirsher 
6948af3c33fSJeff Kirsher /**
69549ce9c2cSBen Hutchings  * ixgbe_set_num_queues - Allocate queues for device, feature dependent
6968af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
6978af3c33fSJeff Kirsher  *
6988af3c33fSJeff Kirsher  * This is the top level queue allocation routine.  The order here is very
6998af3c33fSJeff Kirsher  * important, starting with the "most" number of features turned on at once,
7008af3c33fSJeff Kirsher  * and ending with the smallest set of features.  This way large combinations
7018af3c33fSJeff Kirsher  * can be allocated if they're turned on, and smaller combinations are the
7028af3c33fSJeff Kirsher  * fallthrough conditions.
7038af3c33fSJeff Kirsher  *
7048af3c33fSJeff Kirsher  **/
ixgbe_set_num_queues(struct ixgbe_adapter * adapter)705ac802f5dSAlexander Duyck static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
7068af3c33fSJeff Kirsher {
7078af3c33fSJeff Kirsher 	/* Start with base case */
7088af3c33fSJeff Kirsher 	adapter->num_rx_queues = 1;
7098af3c33fSJeff Kirsher 	adapter->num_tx_queues = 1;
71033fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
711ff815fb2SAlexander Duyck 	adapter->num_rx_pools = 1;
7128af3c33fSJeff Kirsher 	adapter->num_rx_queues_per_pool = 1;
7138af3c33fSJeff Kirsher 
71473079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
71573079ea0SAlexander Duyck 	if (ixgbe_set_dcb_sriov_queues(adapter))
716ac802f5dSAlexander Duyck 		return;
7178af3c33fSJeff Kirsher 
7188af3c33fSJeff Kirsher 	if (ixgbe_set_dcb_queues(adapter))
719ac802f5dSAlexander Duyck 		return;
7208af3c33fSJeff Kirsher 
7218af3c33fSJeff Kirsher #endif
72273079ea0SAlexander Duyck 	if (ixgbe_set_sriov_queues(adapter))
72373079ea0SAlexander Duyck 		return;
72473079ea0SAlexander Duyck 
725ac802f5dSAlexander Duyck 	ixgbe_set_rss_queues(adapter);
7268af3c33fSJeff Kirsher }
7278af3c33fSJeff Kirsher 
7283bcf3446SJacob Keller /**
7293bcf3446SJacob Keller  * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
7303bcf3446SJacob Keller  * @adapter: board private structure
7313bcf3446SJacob Keller  *
7323bcf3446SJacob Keller  * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
7333bcf3446SJacob Keller  * return a negative error code if unable to acquire MSI-X vectors for any
7343bcf3446SJacob Keller  * reason.
7353bcf3446SJacob Keller  */
ixgbe_acquire_msix_vectors(struct ixgbe_adapter * adapter)7363bcf3446SJacob Keller static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
7378af3c33fSJeff Kirsher {
7383bcf3446SJacob Keller 	struct ixgbe_hw *hw = &adapter->hw;
7393bcf3446SJacob Keller 	int i, vectors, vector_threshold;
7408af3c33fSJeff Kirsher 
74133fdc82fSJohn Fastabend 	/* We start by asking for one vector per queue pair with XDP queues
74233fdc82fSJohn Fastabend 	 * being stacked with TX queues.
74333fdc82fSJohn Fastabend 	 */
7443bcf3446SJacob Keller 	vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
74533fdc82fSJohn Fastabend 	vectors = max(vectors, adapter->num_xdp_queues);
7463bcf3446SJacob Keller 
7473bcf3446SJacob Keller 	/* It is easy to be greedy for MSI-X vectors. However, it really
7483bcf3446SJacob Keller 	 * doesn't do much good if we have a lot more vectors than CPUs. We'll
7493bcf3446SJacob Keller 	 * be somewhat conservative and only ask for (roughly) the same number
7503bcf3446SJacob Keller 	 * of vectors as there are CPUs.
7513bcf3446SJacob Keller 	 */
7523bcf3446SJacob Keller 	vectors = min_t(int, vectors, num_online_cpus());
7533bcf3446SJacob Keller 
7543bcf3446SJacob Keller 	/* Some vectors are necessary for non-queue interrupts */
7553bcf3446SJacob Keller 	vectors += NON_Q_VECTORS;
7563bcf3446SJacob Keller 
7573bcf3446SJacob Keller 	/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
7583bcf3446SJacob Keller 	 * With features such as RSS and VMDq, we can easily surpass the
7593bcf3446SJacob Keller 	 * number of Rx and Tx descriptor queues supported by our device.
7603bcf3446SJacob Keller 	 * Thus, we cap the maximum in the rare cases where the CPU count also
7613bcf3446SJacob Keller 	 * exceeds our vector limit
7623bcf3446SJacob Keller 	 */
7633bcf3446SJacob Keller 	vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
7643bcf3446SJacob Keller 
7653bcf3446SJacob Keller 	/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
7663bcf3446SJacob Keller 	 * handler, and (2) an Other (Link Status Change, etc.) handler.
7678af3c33fSJeff Kirsher 	 */
7688af3c33fSJeff Kirsher 	vector_threshold = MIN_MSIX_COUNT;
7698af3c33fSJeff Kirsher 
770027bb561SJacob Keller 	adapter->msix_entries = kcalloc(vectors,
771027bb561SJacob Keller 					sizeof(struct msix_entry),
772027bb561SJacob Keller 					GFP_KERNEL);
773027bb561SJacob Keller 	if (!adapter->msix_entries)
774027bb561SJacob Keller 		return -ENOMEM;
775027bb561SJacob Keller 
776027bb561SJacob Keller 	for (i = 0; i < vectors; i++)
777027bb561SJacob Keller 		adapter->msix_entries[i].entry = i;
778027bb561SJacob Keller 
779b45e620cSAlexander Gordeev 	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
780b45e620cSAlexander Gordeev 					vector_threshold, vectors);
7818af3c33fSJeff Kirsher 
782b45e620cSAlexander Gordeev 	if (vectors < 0) {
783493043e5SJacob Keller 		/* A negative count of allocated vectors indicates an error in
784493043e5SJacob Keller 		 * acquiring within the specified range of MSI-X vectors
7858af3c33fSJeff Kirsher 		 */
786493043e5SJacob Keller 		e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
787493043e5SJacob Keller 			   vectors);
788493043e5SJacob Keller 
7898af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
7908af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
7918af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
792d7de3c6eSJacob Keller 
793d7de3c6eSJacob Keller 		return vectors;
794d7de3c6eSJacob Keller 	}
795d7de3c6eSJacob Keller 
796d7de3c6eSJacob Keller 	/* we successfully allocated some number of vectors within our
797d7de3c6eSJacob Keller 	 * requested range.
798d7de3c6eSJacob Keller 	 */
799d7de3c6eSJacob Keller 	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
800d7de3c6eSJacob Keller 
801d7de3c6eSJacob Keller 	/* Adjust for only the vectors we'll use, which is minimum
802d7de3c6eSJacob Keller 	 * of max_q_vectors, or the number of vectors we were allocated.
8038af3c33fSJeff Kirsher 	 */
80449c7ffbeSAlexander Duyck 	vectors -= NON_Q_VECTORS;
805d7de3c6eSJacob Keller 	adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
806d7de3c6eSJacob Keller 
807d7de3c6eSJacob Keller 	return 0;
8088af3c33fSJeff Kirsher }
8098af3c33fSJeff Kirsher 
ixgbe_add_ring(struct ixgbe_ring * ring,struct ixgbe_ring_container * head)8108af3c33fSJeff Kirsher static void ixgbe_add_ring(struct ixgbe_ring *ring,
8118af3c33fSJeff Kirsher 			   struct ixgbe_ring_container *head)
8128af3c33fSJeff Kirsher {
8138af3c33fSJeff Kirsher 	ring->next = head->ring;
8148af3c33fSJeff Kirsher 	head->ring = ring;
8158af3c33fSJeff Kirsher 	head->count++;
816b4ded832SAlexander Duyck 	head->next_update = jiffies + 1;
8178af3c33fSJeff Kirsher }
8188af3c33fSJeff Kirsher 
8198af3c33fSJeff Kirsher /**
8208af3c33fSJeff Kirsher  * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
8218af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
822d0bfcdfdSAlexander Duyck  * @v_count: q_vectors allocated on adapter, used for ring interleaving
8238af3c33fSJeff Kirsher  * @v_idx: index of vector in adapter struct
824d0bfcdfdSAlexander Duyck  * @txr_count: total number of Tx rings to allocate
825d0bfcdfdSAlexander Duyck  * @txr_idx: index of first Tx ring to allocate
82633fdc82fSJohn Fastabend  * @xdp_count: total number of XDP rings to allocate
82733fdc82fSJohn Fastabend  * @xdp_idx: index of first XDP ring to allocate
828d0bfcdfdSAlexander Duyck  * @rxr_count: total number of Rx rings to allocate
829d0bfcdfdSAlexander Duyck  * @rxr_idx: index of first Rx ring to allocate
8308af3c33fSJeff Kirsher  *
8318af3c33fSJeff Kirsher  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
8328af3c33fSJeff Kirsher  **/
ixgbe_alloc_q_vector(struct ixgbe_adapter * adapter,int v_count,int v_idx,int txr_count,int txr_idx,int xdp_count,int xdp_idx,int rxr_count,int rxr_idx)833d0bfcdfdSAlexander Duyck static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
834d0bfcdfdSAlexander Duyck 				int v_count, int v_idx,
8358af3c33fSJeff Kirsher 				int txr_count, int txr_idx,
83633fdc82fSJohn Fastabend 				int xdp_count, int xdp_idx,
8378af3c33fSJeff Kirsher 				int rxr_count, int rxr_idx)
8388af3c33fSJeff Kirsher {
839780e354dSAlexander Duyck 	int node = dev_to_node(&adapter->pdev->dev);
8408af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector;
8418af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
8428af3c33fSJeff Kirsher 	int cpu = -1;
843439bb9edSGustavo A. R. Silva 	int ring_count;
8440efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
8458af3c33fSJeff Kirsher 
84633fdc82fSJohn Fastabend 	ring_count = txr_count + rxr_count + xdp_count;
8478af3c33fSJeff Kirsher 
8488af3c33fSJeff Kirsher 	/* customize cpu for Flow Director mapping */
849fd786b7bSAlexander Duyck 	if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
850fd786b7bSAlexander Duyck 		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
851fd786b7bSAlexander Duyck 		if (rss_i > 1 && adapter->atr_sample_rate) {
852780e354dSAlexander Duyck 			cpu = cpumask_local_spread(v_idx, node);
8538af3c33fSJeff Kirsher 			node = cpu_to_node(cpu);
8548af3c33fSJeff Kirsher 		}
8558af3c33fSJeff Kirsher 	}
8568af3c33fSJeff Kirsher 
8578af3c33fSJeff Kirsher 	/* allocate q_vector and rings */
858439bb9edSGustavo A. R. Silva 	q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count),
859439bb9edSGustavo A. R. Silva 				GFP_KERNEL, node);
8608af3c33fSJeff Kirsher 	if (!q_vector)
861439bb9edSGustavo A. R. Silva 		q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
862439bb9edSGustavo A. R. Silva 				   GFP_KERNEL);
8638af3c33fSJeff Kirsher 	if (!q_vector)
8648af3c33fSJeff Kirsher 		return -ENOMEM;
8658af3c33fSJeff Kirsher 
8668af3c33fSJeff Kirsher 	/* setup affinity mask and node */
8678af3c33fSJeff Kirsher 	if (cpu != -1)
8688af3c33fSJeff Kirsher 		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
8698af3c33fSJeff Kirsher 	q_vector->numa_node = node;
8708af3c33fSJeff Kirsher 
871245f292dSAlexander Duyck #ifdef CONFIG_IXGBE_DCA
872245f292dSAlexander Duyck 	/* initialize CPU for DCA */
873245f292dSAlexander Duyck 	q_vector->cpu = -1;
874245f292dSAlexander Duyck 
875245f292dSAlexander Duyck #endif
8768af3c33fSJeff Kirsher 	/* initialize NAPI */
877*b48b89f9SJakub Kicinski 	netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll);
8788af3c33fSJeff Kirsher 
8798af3c33fSJeff Kirsher 	/* tie q_vector and adapter together */
8808af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = q_vector;
8818af3c33fSJeff Kirsher 	q_vector->adapter = adapter;
8828af3c33fSJeff Kirsher 	q_vector->v_idx = v_idx;
8838af3c33fSJeff Kirsher 
8848af3c33fSJeff Kirsher 	/* initialize work limits */
8858af3c33fSJeff Kirsher 	q_vector->tx.work_limit = adapter->tx_work_limit;
8868af3c33fSJeff Kirsher 
887b4ded832SAlexander Duyck 	/* Initialize setting for adaptive ITR */
888b4ded832SAlexander Duyck 	q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
889b4ded832SAlexander Duyck 			   IXGBE_ITR_ADAPTIVE_LATENCY;
890b4ded832SAlexander Duyck 	q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
891b4ded832SAlexander Duyck 			   IXGBE_ITR_ADAPTIVE_LATENCY;
8928af3c33fSJeff Kirsher 
8933af3361eSEmil Tantilov 	/* intialize ITR */
8943af3361eSEmil Tantilov 	if (txr_count && !rxr_count) {
8953af3361eSEmil Tantilov 		/* tx only vector */
8963af3361eSEmil Tantilov 		if (adapter->tx_itr_setting == 1)
8978ac34f10SAlexander Duyck 			q_vector->itr = IXGBE_12K_ITR;
8983af3361eSEmil Tantilov 		else
8993af3361eSEmil Tantilov 			q_vector->itr = adapter->tx_itr_setting;
9003af3361eSEmil Tantilov 	} else {
9013af3361eSEmil Tantilov 		/* rx or rx/tx vector */
9023af3361eSEmil Tantilov 		if (adapter->rx_itr_setting == 1)
9033af3361eSEmil Tantilov 			q_vector->itr = IXGBE_20K_ITR;
9043af3361eSEmil Tantilov 		else
9053af3361eSEmil Tantilov 			q_vector->itr = adapter->rx_itr_setting;
9063af3361eSEmil Tantilov 	}
9073af3361eSEmil Tantilov 
908b4ded832SAlexander Duyck 	/* initialize pointer to rings */
909b4ded832SAlexander Duyck 	ring = q_vector->ring;
910b4ded832SAlexander Duyck 
9118af3c33fSJeff Kirsher 	while (txr_count) {
9128af3c33fSJeff Kirsher 		/* assign generic ring traits */
9138af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
9148af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
9158af3c33fSJeff Kirsher 
9168af3c33fSJeff Kirsher 		/* configure backlink on ring */
9178af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
9188af3c33fSJeff Kirsher 
9198af3c33fSJeff Kirsher 		/* update q_vector Tx values */
9208af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->tx);
9218af3c33fSJeff Kirsher 
9228af3c33fSJeff Kirsher 		/* apply Tx specific ring traits */
9238af3c33fSJeff Kirsher 		ring->count = adapter->tx_ring_count;
9248af3c33fSJeff Kirsher 		ring->queue_index = txr_idx;
9258af3c33fSJeff Kirsher 
9268af3c33fSJeff Kirsher 		/* assign ring to adapter */
927f140ad9fSCiara Loftus 		WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
9288af3c33fSJeff Kirsher 
9298af3c33fSJeff Kirsher 		/* update count and index */
9308af3c33fSJeff Kirsher 		txr_count--;
931d0bfcdfdSAlexander Duyck 		txr_idx += v_count;
9328af3c33fSJeff Kirsher 
9338af3c33fSJeff Kirsher 		/* push pointer to next ring */
9348af3c33fSJeff Kirsher 		ring++;
9358af3c33fSJeff Kirsher 	}
9368af3c33fSJeff Kirsher 
93733fdc82fSJohn Fastabend 	while (xdp_count) {
93833fdc82fSJohn Fastabend 		/* assign generic ring traits */
93933fdc82fSJohn Fastabend 		ring->dev = &adapter->pdev->dev;
94033fdc82fSJohn Fastabend 		ring->netdev = adapter->netdev;
94133fdc82fSJohn Fastabend 
94233fdc82fSJohn Fastabend 		/* configure backlink on ring */
94333fdc82fSJohn Fastabend 		ring->q_vector = q_vector;
94433fdc82fSJohn Fastabend 
94533fdc82fSJohn Fastabend 		/* update q_vector Tx values */
94633fdc82fSJohn Fastabend 		ixgbe_add_ring(ring, &q_vector->tx);
94733fdc82fSJohn Fastabend 
94833fdc82fSJohn Fastabend 		/* apply Tx specific ring traits */
94933fdc82fSJohn Fastabend 		ring->count = adapter->tx_ring_count;
95033fdc82fSJohn Fastabend 		ring->queue_index = xdp_idx;
95133fdc82fSJohn Fastabend 		set_ring_xdp(ring);
9524fe81585SJason Xing 		spin_lock_init(&ring->tx_lock);
95333fdc82fSJohn Fastabend 
95433fdc82fSJohn Fastabend 		/* assign ring to adapter */
955f140ad9fSCiara Loftus 		WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
95633fdc82fSJohn Fastabend 
95733fdc82fSJohn Fastabend 		/* update count and index */
95833fdc82fSJohn Fastabend 		xdp_count--;
95933fdc82fSJohn Fastabend 		xdp_idx++;
96033fdc82fSJohn Fastabend 
96133fdc82fSJohn Fastabend 		/* push pointer to next ring */
96233fdc82fSJohn Fastabend 		ring++;
96333fdc82fSJohn Fastabend 	}
96433fdc82fSJohn Fastabend 
9658af3c33fSJeff Kirsher 	while (rxr_count) {
9668af3c33fSJeff Kirsher 		/* assign generic ring traits */
9678af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
9688af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
9698af3c33fSJeff Kirsher 
9708af3c33fSJeff Kirsher 		/* configure backlink on ring */
9718af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
9728af3c33fSJeff Kirsher 
9738af3c33fSJeff Kirsher 		/* update q_vector Rx values */
9748af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->rx);
9758af3c33fSJeff Kirsher 
9768af3c33fSJeff Kirsher 		/*
9778af3c33fSJeff Kirsher 		 * 82599 errata, UDP frames with a 0 checksum
9788af3c33fSJeff Kirsher 		 * can be marked as checksum errors.
9798af3c33fSJeff Kirsher 		 */
9808af3c33fSJeff Kirsher 		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
9818af3c33fSJeff Kirsher 			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
9828af3c33fSJeff Kirsher 
983b2db497eSAlexander Duyck #ifdef IXGBE_FCOE
984b2db497eSAlexander Duyck 		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
985b2db497eSAlexander Duyck 			struct ixgbe_ring_feature *f;
986b2db497eSAlexander Duyck 			f = &adapter->ring_feature[RING_F_FCOE];
987e4b317e9SAlexander Duyck 			if ((rxr_idx >= f->offset) &&
988e4b317e9SAlexander Duyck 			    (rxr_idx < f->offset + f->indices))
98957efd44cSAlexander Duyck 				set_bit(__IXGBE_RX_FCOE, &ring->state);
990b2db497eSAlexander Duyck 		}
991b2db497eSAlexander Duyck 
992b2db497eSAlexander Duyck #endif /* IXGBE_FCOE */
9938af3c33fSJeff Kirsher 		/* apply Rx specific ring traits */
9948af3c33fSJeff Kirsher 		ring->count = adapter->rx_ring_count;
9958af3c33fSJeff Kirsher 		ring->queue_index = rxr_idx;
9968af3c33fSJeff Kirsher 
9978af3c33fSJeff Kirsher 		/* assign ring to adapter */
998f140ad9fSCiara Loftus 		WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
9998af3c33fSJeff Kirsher 
10008af3c33fSJeff Kirsher 		/* update count and index */
10018af3c33fSJeff Kirsher 		rxr_count--;
1002d0bfcdfdSAlexander Duyck 		rxr_idx += v_count;
10038af3c33fSJeff Kirsher 
10048af3c33fSJeff Kirsher 		/* push pointer to next ring */
10058af3c33fSJeff Kirsher 		ring++;
10068af3c33fSJeff Kirsher 	}
10078af3c33fSJeff Kirsher 
10088af3c33fSJeff Kirsher 	return 0;
10098af3c33fSJeff Kirsher }
10108af3c33fSJeff Kirsher 
10118af3c33fSJeff Kirsher /**
10128af3c33fSJeff Kirsher  * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
10138af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10148af3c33fSJeff Kirsher  * @v_idx: Index of vector to be freed
10158af3c33fSJeff Kirsher  *
10168af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vector.  In addition if
10178af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
10188af3c33fSJeff Kirsher  * to freeing the q_vector.
10198af3c33fSJeff Kirsher  **/
ixgbe_free_q_vector(struct ixgbe_adapter * adapter,int v_idx)10208af3c33fSJeff Kirsher static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
10218af3c33fSJeff Kirsher {
10228af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
10238af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
10248af3c33fSJeff Kirsher 
102590382dcaSJohn Fastabend 	ixgbe_for_each_ring(ring, q_vector->tx) {
102690382dcaSJohn Fastabend 		if (ring_is_xdp(ring))
1027f140ad9fSCiara Loftus 			WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
102890382dcaSJohn Fastabend 		else
1029f140ad9fSCiara Loftus 			WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
103090382dcaSJohn Fastabend 	}
10318af3c33fSJeff Kirsher 
10328af3c33fSJeff Kirsher 	ixgbe_for_each_ring(ring, q_vector->rx)
1033f140ad9fSCiara Loftus 		WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
10348af3c33fSJeff Kirsher 
10358af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = NULL;
10365198d545SJakub Kicinski 	__netif_napi_del(&q_vector->napi);
10378af3c33fSJeff Kirsher 
10388af3c33fSJeff Kirsher 	/*
10395198d545SJakub Kicinski 	 * after a call to __netif_napi_del() napi may still be used and
10408af3c33fSJeff Kirsher 	 * ixgbe_get_stats64() might access the rings on this vector,
10418af3c33fSJeff Kirsher 	 * we must wait a grace period before freeing it.
10428af3c33fSJeff Kirsher 	 */
10438af3c33fSJeff Kirsher 	kfree_rcu(q_vector, rcu);
10448af3c33fSJeff Kirsher }
10458af3c33fSJeff Kirsher 
10468af3c33fSJeff Kirsher /**
10478af3c33fSJeff Kirsher  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
10488af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10498af3c33fSJeff Kirsher  *
10508af3c33fSJeff Kirsher  * We allocate one q_vector per queue interrupt.  If allocation fails we
10518af3c33fSJeff Kirsher  * return -ENOMEM.
10528af3c33fSJeff Kirsher  **/
ixgbe_alloc_q_vectors(struct ixgbe_adapter * adapter)10538af3c33fSJeff Kirsher static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
10548af3c33fSJeff Kirsher {
105549c7ffbeSAlexander Duyck 	int q_vectors = adapter->num_q_vectors;
10568af3c33fSJeff Kirsher 	int rxr_remaining = adapter->num_rx_queues;
10578af3c33fSJeff Kirsher 	int txr_remaining = adapter->num_tx_queues;
105833fdc82fSJohn Fastabend 	int xdp_remaining = adapter->num_xdp_queues;
105933fdc82fSJohn Fastabend 	int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
1060d0bcacd0SBjörn Töpel 	int err, i;
10618af3c33fSJeff Kirsher 
10628af3c33fSJeff Kirsher 	/* only one q_vector if MSI-X is disabled. */
10638af3c33fSJeff Kirsher 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
10648af3c33fSJeff Kirsher 		q_vectors = 1;
10658af3c33fSJeff Kirsher 
106633fdc82fSJohn Fastabend 	if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1067d0bfcdfdSAlexander Duyck 		for (; rxr_remaining; v_idx++) {
1068d0bfcdfdSAlexander Duyck 			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
106933fdc82fSJohn Fastabend 						   0, 0, 0, 0, 1, rxr_idx);
10708af3c33fSJeff Kirsher 
10718af3c33fSJeff Kirsher 			if (err)
10728af3c33fSJeff Kirsher 				goto err_out;
10738af3c33fSJeff Kirsher 
10748af3c33fSJeff Kirsher 			/* update counts and index */
1075d0bfcdfdSAlexander Duyck 			rxr_remaining--;
1076d0bfcdfdSAlexander Duyck 			rxr_idx++;
10778af3c33fSJeff Kirsher 		}
10788af3c33fSJeff Kirsher 	}
10798af3c33fSJeff Kirsher 
1080d0bfcdfdSAlexander Duyck 	for (; v_idx < q_vectors; v_idx++) {
1081d0bfcdfdSAlexander Duyck 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1082d0bfcdfdSAlexander Duyck 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
108333fdc82fSJohn Fastabend 		int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
108433fdc82fSJohn Fastabend 
1085d0bfcdfdSAlexander Duyck 		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
10868af3c33fSJeff Kirsher 					   tqpv, txr_idx,
108733fdc82fSJohn Fastabend 					   xqpv, xdp_idx,
10888af3c33fSJeff Kirsher 					   rqpv, rxr_idx);
10898af3c33fSJeff Kirsher 
10908af3c33fSJeff Kirsher 		if (err)
10918af3c33fSJeff Kirsher 			goto err_out;
10928af3c33fSJeff Kirsher 
10938af3c33fSJeff Kirsher 		/* update counts and index */
10948af3c33fSJeff Kirsher 		rxr_remaining -= rqpv;
10958af3c33fSJeff Kirsher 		txr_remaining -= tqpv;
109633fdc82fSJohn Fastabend 		xdp_remaining -= xqpv;
1097d0bfcdfdSAlexander Duyck 		rxr_idx++;
1098d0bfcdfdSAlexander Duyck 		txr_idx++;
109933fdc82fSJohn Fastabend 		xdp_idx += xqpv;
11008af3c33fSJeff Kirsher 	}
11018af3c33fSJeff Kirsher 
1102d0bcacd0SBjörn Töpel 	for (i = 0; i < adapter->num_rx_queues; i++) {
1103d0bcacd0SBjörn Töpel 		if (adapter->rx_ring[i])
1104d0bcacd0SBjörn Töpel 			adapter->rx_ring[i]->ring_idx = i;
1105d0bcacd0SBjörn Töpel 	}
1106d0bcacd0SBjörn Töpel 
1107d0bcacd0SBjörn Töpel 	for (i = 0; i < adapter->num_tx_queues; i++) {
1108d0bcacd0SBjörn Töpel 		if (adapter->tx_ring[i])
1109d0bcacd0SBjörn Töpel 			adapter->tx_ring[i]->ring_idx = i;
1110d0bcacd0SBjörn Töpel 	}
1111d0bcacd0SBjörn Töpel 
1112d0bcacd0SBjörn Töpel 	for (i = 0; i < adapter->num_xdp_queues; i++) {
1113d0bcacd0SBjörn Töpel 		if (adapter->xdp_ring[i])
1114d0bcacd0SBjörn Töpel 			adapter->xdp_ring[i]->ring_idx = i;
1115d0bcacd0SBjörn Töpel 	}
1116d0bcacd0SBjörn Töpel 
11178af3c33fSJeff Kirsher 	return 0;
11188af3c33fSJeff Kirsher 
11198af3c33fSJeff Kirsher err_out:
112049c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
112133fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
112249c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
112349c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
112449c7ffbeSAlexander Duyck 
112549c7ffbeSAlexander Duyck 	while (v_idx--)
11268af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
11278af3c33fSJeff Kirsher 
11288af3c33fSJeff Kirsher 	return -ENOMEM;
11298af3c33fSJeff Kirsher }
11308af3c33fSJeff Kirsher 
11318af3c33fSJeff Kirsher /**
11328af3c33fSJeff Kirsher  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
11338af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
11348af3c33fSJeff Kirsher  *
11358af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vectors.  In addition if
11368af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
11378af3c33fSJeff Kirsher  * to freeing the q_vector.
11388af3c33fSJeff Kirsher  **/
ixgbe_free_q_vectors(struct ixgbe_adapter * adapter)11398af3c33fSJeff Kirsher static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
11408af3c33fSJeff Kirsher {
114149c7ffbeSAlexander Duyck 	int v_idx = adapter->num_q_vectors;
11428af3c33fSJeff Kirsher 
114349c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
114433fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
114549c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
114649c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
11478af3c33fSJeff Kirsher 
114849c7ffbeSAlexander Duyck 	while (v_idx--)
11498af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
11508af3c33fSJeff Kirsher }
11518af3c33fSJeff Kirsher 
ixgbe_reset_interrupt_capability(struct ixgbe_adapter * adapter)11528af3c33fSJeff Kirsher static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
11538af3c33fSJeff Kirsher {
11548af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
11558af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
11568af3c33fSJeff Kirsher 		pci_disable_msix(adapter->pdev);
11578af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
11588af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
11598af3c33fSJeff Kirsher 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
11608af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
11618af3c33fSJeff Kirsher 		pci_disable_msi(adapter->pdev);
11628af3c33fSJeff Kirsher 	}
11638af3c33fSJeff Kirsher }
11648af3c33fSJeff Kirsher 
11658af3c33fSJeff Kirsher /**
11668af3c33fSJeff Kirsher  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
11678af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
11688af3c33fSJeff Kirsher  *
11698af3c33fSJeff Kirsher  * Attempt to configure the interrupts using the best available
11708af3c33fSJeff Kirsher  * capabilities of the hardware and the kernel.
11718af3c33fSJeff Kirsher  **/
ixgbe_set_interrupt_capability(struct ixgbe_adapter * adapter)1172ac802f5dSAlexander Duyck static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
11738af3c33fSJeff Kirsher {
11743bcf3446SJacob Keller 	int err;
11758af3c33fSJeff Kirsher 
11763bcf3446SJacob Keller 	/* We will try to get MSI-X interrupts first */
11773bcf3446SJacob Keller 	if (!ixgbe_acquire_msix_vectors(adapter))
1178ac802f5dSAlexander Duyck 		return;
11798af3c33fSJeff Kirsher 
1180eec66731SJacob Keller 	/* At this point, we do not have MSI-X capabilities. We need to
1181eec66731SJacob Keller 	 * reconfigure or disable various features which require MSI-X
1182eec66731SJacob Keller 	 * capability.
1183eec66731SJacob Keller 	 */
1184eec66731SJacob Keller 
1185c1c55f63SJacob Keller 	/* Disable DCB unless we only have a single traffic class */
11860efbf12bSAlexander Duyck 	if (adapter->hw_tcs > 1) {
1187c1c55f63SJacob Keller 		e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1188b724e9f2SAlexander Duyck 		netdev_reset_tc(adapter->netdev);
118939cb681bSAlexander Duyck 
1190b724e9f2SAlexander Duyck 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1191b724e9f2SAlexander Duyck 			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1192b724e9f2SAlexander Duyck 
1193b724e9f2SAlexander Duyck 		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1194b724e9f2SAlexander Duyck 		adapter->temp_dcb_cfg.pfc_mode_enable = false;
1195b724e9f2SAlexander Duyck 		adapter->dcb_cfg.pfc_mode_enable = false;
1196b724e9f2SAlexander Duyck 	}
1197d786cf7bSJacob Keller 
11980efbf12bSAlexander Duyck 	adapter->hw_tcs = 0;
1199b724e9f2SAlexander Duyck 	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1200b724e9f2SAlexander Duyck 	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1201b724e9f2SAlexander Duyck 
1202d786cf7bSJacob Keller 	/* Disable SR-IOV support */
1203d786cf7bSJacob Keller 	e_dev_warn("Disabling SR-IOV support\n");
12048af3c33fSJeff Kirsher 	ixgbe_disable_sriov(adapter);
12058af3c33fSJeff Kirsher 
1206d786cf7bSJacob Keller 	/* Disable RSS */
1207d786cf7bSJacob Keller 	e_dev_warn("Disabling RSS support\n");
1208fbe7ca7fSAlexander Duyck 	adapter->ring_feature[RING_F_RSS].limit = 1;
1209b724e9f2SAlexander Duyck 
1210eec66731SJacob Keller 	/* recalculate number of queues now that many features have been
1211eec66731SJacob Keller 	 * changed or disabled.
1212eec66731SJacob Keller 	 */
1213ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
121449c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 1;
121549c7ffbeSAlexander Duyck 
12168af3c33fSJeff Kirsher 	err = pci_enable_msi(adapter->pdev);
12175d31b48aSJacob Keller 	if (err)
12185d31b48aSJacob Keller 		e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
12196ec1b71fSJacob Keller 			   err);
12205d31b48aSJacob Keller 	else
1221ac802f5dSAlexander Duyck 		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
12228af3c33fSJeff Kirsher }
12238af3c33fSJeff Kirsher 
12248af3c33fSJeff Kirsher /**
12258af3c33fSJeff Kirsher  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
12268af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
12278af3c33fSJeff Kirsher  *
12288af3c33fSJeff Kirsher  * We determine which interrupt scheme to use based on...
12298af3c33fSJeff Kirsher  * - Kernel support (MSI, MSI-X)
12308af3c33fSJeff Kirsher  *   - which can be user-defined (via MODULE_PARAM)
12318af3c33fSJeff Kirsher  * - Hardware queue count (num_*_queues)
12328af3c33fSJeff Kirsher  *   - defined by miscellaneous hardware support/features (RSS, etc.)
12338af3c33fSJeff Kirsher  **/
ixgbe_init_interrupt_scheme(struct ixgbe_adapter * adapter)12348af3c33fSJeff Kirsher int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
12358af3c33fSJeff Kirsher {
12368af3c33fSJeff Kirsher 	int err;
12378af3c33fSJeff Kirsher 
12388af3c33fSJeff Kirsher 	/* Number of supported queues */
1239ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
12408af3c33fSJeff Kirsher 
1241ac802f5dSAlexander Duyck 	/* Set interrupt mode */
1242ac802f5dSAlexander Duyck 	ixgbe_set_interrupt_capability(adapter);
12438af3c33fSJeff Kirsher 
12448af3c33fSJeff Kirsher 	err = ixgbe_alloc_q_vectors(adapter);
12458af3c33fSJeff Kirsher 	if (err) {
12468af3c33fSJeff Kirsher 		e_dev_err("Unable to allocate memory for queue vectors\n");
12478af3c33fSJeff Kirsher 		goto err_alloc_q_vectors;
12488af3c33fSJeff Kirsher 	}
12498af3c33fSJeff Kirsher 
12508af3c33fSJeff Kirsher 	ixgbe_cache_ring_register(adapter);
12518af3c33fSJeff Kirsher 
125233fdc82fSJohn Fastabend 	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
12538af3c33fSJeff Kirsher 		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
125433fdc82fSJohn Fastabend 		   adapter->num_rx_queues, adapter->num_tx_queues,
125533fdc82fSJohn Fastabend 		   adapter->num_xdp_queues);
12568af3c33fSJeff Kirsher 
12578af3c33fSJeff Kirsher 	set_bit(__IXGBE_DOWN, &adapter->state);
12588af3c33fSJeff Kirsher 
12598af3c33fSJeff Kirsher 	return 0;
12608af3c33fSJeff Kirsher 
12618af3c33fSJeff Kirsher err_alloc_q_vectors:
12628af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
12638af3c33fSJeff Kirsher 	return err;
12648af3c33fSJeff Kirsher }
12658af3c33fSJeff Kirsher 
12668af3c33fSJeff Kirsher /**
12678af3c33fSJeff Kirsher  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
12688af3c33fSJeff Kirsher  * @adapter: board private structure to clear interrupt scheme on
12698af3c33fSJeff Kirsher  *
12708af3c33fSJeff Kirsher  * We go through and clear interrupt specific resources and reset the structure
12718af3c33fSJeff Kirsher  * to pre-load conditions
12728af3c33fSJeff Kirsher  **/
ixgbe_clear_interrupt_scheme(struct ixgbe_adapter * adapter)12738af3c33fSJeff Kirsher void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
12748af3c33fSJeff Kirsher {
12758af3c33fSJeff Kirsher 	adapter->num_tx_queues = 0;
127633fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
12778af3c33fSJeff Kirsher 	adapter->num_rx_queues = 0;
12788af3c33fSJeff Kirsher 
12798af3c33fSJeff Kirsher 	ixgbe_free_q_vectors(adapter);
12808af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
12818af3c33fSJeff Kirsher }
12828af3c33fSJeff Kirsher 
ixgbe_tx_ctxtdesc(struct ixgbe_ring * tx_ring,u32 vlan_macip_lens,u32 fceof_saidx,u32 type_tucmd,u32 mss_l4len_idx)12838af3c33fSJeff Kirsher void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
128459259470SShannon Nelson 		       u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
12858af3c33fSJeff Kirsher {
12868af3c33fSJeff Kirsher 	struct ixgbe_adv_tx_context_desc *context_desc;
12878af3c33fSJeff Kirsher 	u16 i = tx_ring->next_to_use;
12888af3c33fSJeff Kirsher 
12898af3c33fSJeff Kirsher 	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
12908af3c33fSJeff Kirsher 
12918af3c33fSJeff Kirsher 	i++;
12928af3c33fSJeff Kirsher 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
12938af3c33fSJeff Kirsher 
12948af3c33fSJeff Kirsher 	/* set bits to identify this as an advanced context descriptor */
12958af3c33fSJeff Kirsher 	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
12968af3c33fSJeff Kirsher 
12978af3c33fSJeff Kirsher 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
129859259470SShannon Nelson 	context_desc->fceof_saidx	= cpu_to_le32(fceof_saidx);
12998af3c33fSJeff Kirsher 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
13008af3c33fSJeff Kirsher 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
13018af3c33fSJeff Kirsher }
13028af3c33fSJeff Kirsher 
1303