151dce24bSJeff Kirsher // SPDX-License-Identifier: GPL-2.0
251dce24bSJeff Kirsher /* Copyright(c) 1999 - 2018 Intel Corporation. */
38af3c33fSJeff Kirsher 
48af3c33fSJeff Kirsher #include "ixgbe.h"
58af3c33fSJeff Kirsher #include "ixgbe_sriov.h"
68af3c33fSJeff Kirsher 
7800bd607SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
873079ea0SAlexander Duyck /**
973079ea0SAlexander Duyck  * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
1073079ea0SAlexander Duyck  * @adapter: board private structure to initialize
1173079ea0SAlexander Duyck  *
1273079ea0SAlexander Duyck  * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
1373079ea0SAlexander Duyck  * will also try to cache the proper offsets if RSS/FCoE are enabled along
1473079ea0SAlexander Duyck  * with VMDq.
1573079ea0SAlexander Duyck  *
1673079ea0SAlexander Duyck  **/
1773079ea0SAlexander Duyck static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
1873079ea0SAlexander Duyck {
1973079ea0SAlexander Duyck #ifdef IXGBE_FCOE
2073079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
2173079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
2273079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
2373079ea0SAlexander Duyck 	int i;
24b5f69ccfSAlexander Duyck 	u16 reg_idx, pool;
250efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
2673079ea0SAlexander Duyck 
2773079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
2873079ea0SAlexander Duyck 	if (tcs <= 1)
2973079ea0SAlexander Duyck 		return false;
3073079ea0SAlexander Duyck 
3173079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
3273079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3373079ea0SAlexander Duyck 		return false;
3473079ea0SAlexander Duyck 
3573079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
3673079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
37b5f69ccfSAlexander Duyck 	for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
3873079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
39b5f69ccfSAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs) {
40b5f69ccfSAlexander Duyck 			pool++;
4173079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
42b5f69ccfSAlexander Duyck 		}
4373079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
44b5f69ccfSAlexander Duyck 		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
4573079ea0SAlexander Duyck 	}
4673079ea0SAlexander Duyck 
4773079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
4873079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
4973079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
5073079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs)
5173079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
5273079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
5373079ea0SAlexander Duyck 	}
5473079ea0SAlexander Duyck 
5573079ea0SAlexander Duyck #ifdef IXGBE_FCOE
5673079ea0SAlexander Duyck 	/* nothing to do if FCoE is disabled */
5773079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
5873079ea0SAlexander Duyck 		return true;
5973079ea0SAlexander Duyck 
6073079ea0SAlexander Duyck 	/* The work is already done if the FCoE ring is shared */
6173079ea0SAlexander Duyck 	if (fcoe->offset < tcs)
6273079ea0SAlexander Duyck 		return true;
6373079ea0SAlexander Duyck 
6473079ea0SAlexander Duyck 	/* The FCoE rings exist separately, we need to move their reg_idx */
6573079ea0SAlexander Duyck 	if (fcoe->indices) {
6673079ea0SAlexander Duyck 		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
6773079ea0SAlexander Duyck 		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
6873079ea0SAlexander Duyck 
6973079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
7073079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
7173079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
7273079ea0SAlexander Duyck 			adapter->rx_ring[i]->reg_idx = reg_idx;
73b5f69ccfSAlexander Duyck 			adapter->rx_ring[i]->netdev = adapter->netdev;
7473079ea0SAlexander Duyck 			reg_idx++;
7573079ea0SAlexander Duyck 		}
7673079ea0SAlexander Duyck 
7773079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
7873079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
7973079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
8073079ea0SAlexander Duyck 			adapter->tx_ring[i]->reg_idx = reg_idx;
8173079ea0SAlexander Duyck 			reg_idx++;
8273079ea0SAlexander Duyck 		}
8373079ea0SAlexander Duyck 	}
8473079ea0SAlexander Duyck 
8573079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
8673079ea0SAlexander Duyck 	return true;
8773079ea0SAlexander Duyck }
8873079ea0SAlexander Duyck 
898af3c33fSJeff Kirsher /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
908af3c33fSJeff Kirsher static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
918af3c33fSJeff Kirsher 				    unsigned int *tx, unsigned int *rx)
928af3c33fSJeff Kirsher {
938af3c33fSJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
940efbf12bSAlexander Duyck 	u8 num_tcs = adapter->hw_tcs;
958af3c33fSJeff Kirsher 
968af3c33fSJeff Kirsher 	*tx = 0;
978af3c33fSJeff Kirsher 	*rx = 0;
988af3c33fSJeff Kirsher 
998af3c33fSJeff Kirsher 	switch (hw->mac.type) {
1008af3c33fSJeff Kirsher 	case ixgbe_mac_82598EB:
1014ae63730SAlexander Duyck 		/* TxQs/TC: 4	RxQs/TC: 8 */
1024ae63730SAlexander Duyck 		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
1034ae63730SAlexander Duyck 		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
1048af3c33fSJeff Kirsher 		break;
1058af3c33fSJeff Kirsher 	case ixgbe_mac_82599EB:
1068af3c33fSJeff Kirsher 	case ixgbe_mac_X540:
1079a75a1acSDon Skidmore 	case ixgbe_mac_X550:
1089a75a1acSDon Skidmore 	case ixgbe_mac_X550EM_x:
10949425dfcSMark Rustad 	case ixgbe_mac_x550em_a:
1108af3c33fSJeff Kirsher 		if (num_tcs > 4) {
1114ae63730SAlexander Duyck 			/*
1124ae63730SAlexander Duyck 			 * TCs    : TC0/1 TC2/3 TC4-7
1134ae63730SAlexander Duyck 			 * TxQs/TC:    32    16     8
1144ae63730SAlexander Duyck 			 * RxQs/TC:    16    16    16
1154ae63730SAlexander Duyck 			 */
1168af3c33fSJeff Kirsher 			*rx = tc << 4;
1174ae63730SAlexander Duyck 			if (tc < 3)
1184ae63730SAlexander Duyck 				*tx = tc << 5;		/*   0,  32,  64 */
1194ae63730SAlexander Duyck 			else if (tc < 5)
1204ae63730SAlexander Duyck 				*tx = (tc + 2) << 4;	/*  80,  96 */
1214ae63730SAlexander Duyck 			else
1224ae63730SAlexander Duyck 				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
1238af3c33fSJeff Kirsher 		} else {
1244ae63730SAlexander Duyck 			/*
1254ae63730SAlexander Duyck 			 * TCs    : TC0 TC1 TC2/3
1264ae63730SAlexander Duyck 			 * TxQs/TC:  64  32    16
1274ae63730SAlexander Duyck 			 * RxQs/TC:  32  32    32
1284ae63730SAlexander Duyck 			 */
1298af3c33fSJeff Kirsher 			*rx = tc << 5;
1304ae63730SAlexander Duyck 			if (tc < 2)
1314ae63730SAlexander Duyck 				*tx = tc << 6;		/*  0,  64 */
1324ae63730SAlexander Duyck 			else
1334ae63730SAlexander Duyck 				*tx = (tc + 4) << 4;	/* 96, 112 */
1348af3c33fSJeff Kirsher 		}
1358af3c33fSJeff Kirsher 	default:
1368af3c33fSJeff Kirsher 		break;
1378af3c33fSJeff Kirsher 	}
1388af3c33fSJeff Kirsher }
1398af3c33fSJeff Kirsher 
1408af3c33fSJeff Kirsher /**
1418af3c33fSJeff Kirsher  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
1428af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1438af3c33fSJeff Kirsher  *
1448af3c33fSJeff Kirsher  * Cache the descriptor ring offsets for DCB to the assigned rings.
1458af3c33fSJeff Kirsher  *
1468af3c33fSJeff Kirsher  **/
1474ae63730SAlexander Duyck static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
1488af3c33fSJeff Kirsher {
1490efbf12bSAlexander Duyck 	u8 num_tcs = adapter->hw_tcs;
1504ae63730SAlexander Duyck 	unsigned int tx_idx, rx_idx;
1514ae63730SAlexander Duyck 	int tc, offset, rss_i, i;
1528af3c33fSJeff Kirsher 
1534ae63730SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
1544ae63730SAlexander Duyck 	if (num_tcs <= 1)
1558af3c33fSJeff Kirsher 		return false;
1568af3c33fSJeff Kirsher 
1574ae63730SAlexander Duyck 	rss_i = adapter->ring_feature[RING_F_RSS].indices;
1588af3c33fSJeff Kirsher 
1594ae63730SAlexander Duyck 	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
1604ae63730SAlexander Duyck 		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
1614ae63730SAlexander Duyck 		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
1624ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
1634ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
164b5f69ccfSAlexander Duyck 			adapter->rx_ring[offset + i]->netdev = adapter->netdev;
1654ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->dcb_tc = tc;
1664ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->dcb_tc = tc;
1678af3c33fSJeff Kirsher 		}
1688af3c33fSJeff Kirsher 	}
1698af3c33fSJeff Kirsher 
1708af3c33fSJeff Kirsher 	return true;
1718af3c33fSJeff Kirsher }
172d411a936SAlexander Duyck 
1738af3c33fSJeff Kirsher #endif
1748af3c33fSJeff Kirsher /**
1758af3c33fSJeff Kirsher  * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
1768af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1778af3c33fSJeff Kirsher  *
1788af3c33fSJeff Kirsher  * SR-IOV doesn't use any descriptor rings but changes the default if
1798af3c33fSJeff Kirsher  * no other mapping is used.
1808af3c33fSJeff Kirsher  *
1818af3c33fSJeff Kirsher  */
18273079ea0SAlexander Duyck static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
1838af3c33fSJeff Kirsher {
18473079ea0SAlexander Duyck #ifdef IXGBE_FCOE
18573079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
18673079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
18773079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
18873079ea0SAlexander Duyck 	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
189b5f69ccfSAlexander Duyck 	u16 reg_idx, pool;
19073079ea0SAlexander Duyck 	int i;
19173079ea0SAlexander Duyck 
19273079ea0SAlexander Duyck 	/* only proceed if VMDq is enabled */
19373079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
1948af3c33fSJeff Kirsher 		return false;
19573079ea0SAlexander Duyck 
19673079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
197b5f69ccfSAlexander Duyck 	pool = 0;
19873079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
19973079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
20073079ea0SAlexander Duyck #ifdef IXGBE_FCOE
20173079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
20273079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
20373079ea0SAlexander Duyck 			break;
20473079ea0SAlexander Duyck #endif
20573079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
206b5f69ccfSAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= rss->indices) {
207b5f69ccfSAlexander Duyck 			pool++;
20873079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
209b5f69ccfSAlexander Duyck 		}
21073079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
211b5f69ccfSAlexander Duyck 		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
21273079ea0SAlexander Duyck 	}
21373079ea0SAlexander Duyck 
21473079ea0SAlexander Duyck #ifdef IXGBE_FCOE
21573079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
216b5f69ccfSAlexander Duyck 	for (; i < adapter->num_rx_queues; i++, reg_idx++) {
21773079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
218b5f69ccfSAlexander Duyck 		adapter->rx_ring[i]->netdev = adapter->netdev;
219b5f69ccfSAlexander Duyck 	}
22073079ea0SAlexander Duyck 
22173079ea0SAlexander Duyck #endif
22273079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
22373079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
22473079ea0SAlexander Duyck #ifdef IXGBE_FCOE
22573079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
22673079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
22773079ea0SAlexander Duyck 			break;
22873079ea0SAlexander Duyck #endif
22973079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
23073079ea0SAlexander Duyck 		if ((reg_idx & rss->mask) >= rss->indices)
23173079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
23273079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
23373079ea0SAlexander Duyck 	}
23473079ea0SAlexander Duyck 
23573079ea0SAlexander Duyck #ifdef IXGBE_FCOE
23673079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
23773079ea0SAlexander Duyck 	for (; i < adapter->num_tx_queues; i++, reg_idx++)
23873079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
23973079ea0SAlexander Duyck 
24073079ea0SAlexander Duyck #endif
24173079ea0SAlexander Duyck 
24273079ea0SAlexander Duyck 	return true;
2438af3c33fSJeff Kirsher }
2448af3c33fSJeff Kirsher 
2458af3c33fSJeff Kirsher /**
246d411a936SAlexander Duyck  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
247d411a936SAlexander Duyck  * @adapter: board private structure to initialize
248d411a936SAlexander Duyck  *
249d411a936SAlexander Duyck  * Cache the descriptor ring offsets for RSS to the assigned rings.
250d411a936SAlexander Duyck  *
251d411a936SAlexander Duyck  **/
252d411a936SAlexander Duyck static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
253d411a936SAlexander Duyck {
25433fdc82fSJohn Fastabend 	int i, reg_idx;
255d411a936SAlexander Duyck 
256b5f69ccfSAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++) {
257d411a936SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = i;
258b5f69ccfSAlexander Duyck 		adapter->rx_ring[i]->netdev = adapter->netdev;
259b5f69ccfSAlexander Duyck 	}
26033fdc82fSJohn Fastabend 	for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
26133fdc82fSJohn Fastabend 		adapter->tx_ring[i]->reg_idx = reg_idx;
26233fdc82fSJohn Fastabend 	for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
26333fdc82fSJohn Fastabend 		adapter->xdp_ring[i]->reg_idx = reg_idx;
264d411a936SAlexander Duyck 
265d411a936SAlexander Duyck 	return true;
266d411a936SAlexander Duyck }
267d411a936SAlexander Duyck 
268d411a936SAlexander Duyck /**
2698af3c33fSJeff Kirsher  * ixgbe_cache_ring_register - Descriptor ring to register mapping
2708af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2718af3c33fSJeff Kirsher  *
2728af3c33fSJeff Kirsher  * Once we know the feature-set enabled for the device, we'll cache
2738af3c33fSJeff Kirsher  * the register offset the descriptor ring is assigned to.
2748af3c33fSJeff Kirsher  *
2758af3c33fSJeff Kirsher  * Note, the order the various feature calls is important.  It must start with
2768af3c33fSJeff Kirsher  * the "most" features enabled at the same time, then trickle down to the
2778af3c33fSJeff Kirsher  * least amount of features turned on at once.
2788af3c33fSJeff Kirsher  **/
2798af3c33fSJeff Kirsher static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2808af3c33fSJeff Kirsher {
2818af3c33fSJeff Kirsher 	/* start with default case */
2828af3c33fSJeff Kirsher 	adapter->rx_ring[0]->reg_idx = 0;
2838af3c33fSJeff Kirsher 	adapter->tx_ring[0]->reg_idx = 0;
2848af3c33fSJeff Kirsher 
28573079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
28673079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb_sriov(adapter))
28773079ea0SAlexander Duyck 		return;
28873079ea0SAlexander Duyck 
28973079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb(adapter))
29073079ea0SAlexander Duyck 		return;
29173079ea0SAlexander Duyck 
29273079ea0SAlexander Duyck #endif
2938af3c33fSJeff Kirsher 	if (ixgbe_cache_ring_sriov(adapter))
2948af3c33fSJeff Kirsher 		return;
2958af3c33fSJeff Kirsher 
296d411a936SAlexander Duyck 	ixgbe_cache_ring_rss(adapter);
2978af3c33fSJeff Kirsher }
2988af3c33fSJeff Kirsher 
29933fdc82fSJohn Fastabend static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
30033fdc82fSJohn Fastabend {
30133fdc82fSJohn Fastabend 	return adapter->xdp_prog ? nr_cpu_ids : 0;
30233fdc82fSJohn Fastabend }
30333fdc82fSJohn Fastabend 
3042bf1a87bSEmil Tantilov #define IXGBE_RSS_64Q_MASK	0x3F
305d411a936SAlexander Duyck #define IXGBE_RSS_16Q_MASK	0xF
306d411a936SAlexander Duyck #define IXGBE_RSS_8Q_MASK	0x7
307d411a936SAlexander Duyck #define IXGBE_RSS_4Q_MASK	0x3
308d411a936SAlexander Duyck #define IXGBE_RSS_2Q_MASK	0x1
309d411a936SAlexander Duyck #define IXGBE_RSS_DISABLED_MASK	0x0
310d411a936SAlexander Duyck 
311d411a936SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
31273079ea0SAlexander Duyck /**
31373079ea0SAlexander Duyck  * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
31473079ea0SAlexander Duyck  * @adapter: board private structure to initialize
31573079ea0SAlexander Duyck  *
31673079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
31773079ea0SAlexander Duyck  * and VM pools where appropriate.  Also assign queues based on DCB
31873079ea0SAlexander Duyck  * priorities and map accordingly..
31973079ea0SAlexander Duyck  *
32073079ea0SAlexander Duyck  **/
32173079ea0SAlexander Duyck static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
32273079ea0SAlexander Duyck {
32373079ea0SAlexander Duyck 	int i;
32473079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
32573079ea0SAlexander Duyck 	u16 vmdq_m = 0;
32673079ea0SAlexander Duyck #ifdef IXGBE_FCOE
32773079ea0SAlexander Duyck 	u16 fcoe_i = 0;
32873079ea0SAlexander Duyck #endif
3290efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
33073079ea0SAlexander Duyck 
33173079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
33273079ea0SAlexander Duyck 	if (tcs <= 1)
33373079ea0SAlexander Duyck 		return false;
33473079ea0SAlexander Duyck 
33573079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
33673079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
33773079ea0SAlexander Duyck 		return false;
33873079ea0SAlexander Duyck 
3394e039c16SAlexander Duyck 	/* limit VMDq instances on the PF by number of Tx queues */
3404e039c16SAlexander Duyck 	vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
3414e039c16SAlexander Duyck 
34273079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
34373079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
34473079ea0SAlexander Duyck 
34573079ea0SAlexander Duyck 	/* 16 pools w/ 8 TC per pool */
34673079ea0SAlexander Duyck 	if (tcs > 4) {
34773079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 16);
34873079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
34973079ea0SAlexander Duyck 	/* 32 pools w/ 4 TC per pool */
35073079ea0SAlexander Duyck 	} else {
35173079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 32);
35273079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
35373079ea0SAlexander Duyck 	}
35473079ea0SAlexander Duyck 
35573079ea0SAlexander Duyck #ifdef IXGBE_FCOE
35673079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
35773079ea0SAlexander Duyck 	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
35873079ea0SAlexander Duyck 
35973079ea0SAlexander Duyck #endif
36073079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
36173079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
36273079ea0SAlexander Duyck 
36373079ea0SAlexander Duyck 	/* save features for later use */
36473079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
36573079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
36673079ea0SAlexander Duyck 
36773079ea0SAlexander Duyck 	/*
36873079ea0SAlexander Duyck 	 * We do not support DCB, VMDq, and RSS all simultaneously
36973079ea0SAlexander Duyck 	 * so we will disable RSS since it is the lowest priority
37073079ea0SAlexander Duyck 	 */
37173079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = 1;
37273079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
37373079ea0SAlexander Duyck 
37439cb681bSAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
37539cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
37639cb681bSAlexander Duyck 
37773079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
37873079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = tcs;
37973079ea0SAlexander Duyck 
38073079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * tcs;
38133fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
38273079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * tcs;
38373079ea0SAlexander Duyck 
38473079ea0SAlexander Duyck #ifdef IXGBE_FCOE
38573079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
38673079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
38773079ea0SAlexander Duyck 
38873079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
38973079ea0SAlexander Duyck 
39073079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
39173079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
39273079ea0SAlexander Duyck 
39373079ea0SAlexander Duyck 		if (fcoe_i) {
39473079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
39573079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
39673079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * tcs;
39773079ea0SAlexander Duyck 
39873079ea0SAlexander Duyck 			/* add queues to adapter */
39973079ea0SAlexander Duyck 			adapter->num_tx_queues += fcoe_i;
40073079ea0SAlexander Duyck 			adapter->num_rx_queues += fcoe_i;
40173079ea0SAlexander Duyck 		} else if (tcs > 1) {
40273079ea0SAlexander Duyck 			/* use queue belonging to FcoE TC */
40373079ea0SAlexander Duyck 			fcoe->indices = 1;
40473079ea0SAlexander Duyck 			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
40573079ea0SAlexander Duyck 		} else {
40673079ea0SAlexander Duyck 			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
40773079ea0SAlexander Duyck 
40873079ea0SAlexander Duyck 			fcoe->indices = 0;
40973079ea0SAlexander Duyck 			fcoe->offset = 0;
41073079ea0SAlexander Duyck 		}
41173079ea0SAlexander Duyck 	}
41273079ea0SAlexander Duyck 
41373079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
41473079ea0SAlexander Duyck 	/* configure TC to queue mapping */
41573079ea0SAlexander Duyck 	for (i = 0; i < tcs; i++)
41673079ea0SAlexander Duyck 		netdev_set_tc_queue(adapter->netdev, i, 1, i);
41773079ea0SAlexander Duyck 
41873079ea0SAlexander Duyck 	return true;
41973079ea0SAlexander Duyck }
42073079ea0SAlexander Duyck 
421d411a936SAlexander Duyck static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
422d411a936SAlexander Duyck {
423d411a936SAlexander Duyck 	struct net_device *dev = adapter->netdev;
424d411a936SAlexander Duyck 	struct ixgbe_ring_feature *f;
425d411a936SAlexander Duyck 	int rss_i, rss_m, i;
426d411a936SAlexander Duyck 	int tcs;
427d411a936SAlexander Duyck 
428d411a936SAlexander Duyck 	/* Map queue offset and counts onto allocated tx queues */
4290efbf12bSAlexander Duyck 	tcs = adapter->hw_tcs;
430d411a936SAlexander Duyck 
431d411a936SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
432d411a936SAlexander Duyck 	if (tcs <= 1)
433d411a936SAlexander Duyck 		return false;
434d411a936SAlexander Duyck 
435d411a936SAlexander Duyck 	/* determine the upper limit for our current DCB mode */
436d411a936SAlexander Duyck 	rss_i = dev->num_tx_queues / tcs;
437d411a936SAlexander Duyck 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
438d411a936SAlexander Duyck 		/* 8 TC w/ 4 queues per TC */
439d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 4);
440d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
441d411a936SAlexander Duyck 	} else if (tcs > 4) {
442d411a936SAlexander Duyck 		/* 8 TC w/ 8 queues per TC */
443d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 8);
444d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_8Q_MASK;
445d411a936SAlexander Duyck 	} else {
446d411a936SAlexander Duyck 		/* 4 TC w/ 16 queues per TC */
447d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 16);
448d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_16Q_MASK;
449d411a936SAlexander Duyck 	}
450d411a936SAlexander Duyck 
451d411a936SAlexander Duyck 	/* set RSS mask and indices */
452d411a936SAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
453d411a936SAlexander Duyck 	rss_i = min_t(int, rss_i, f->limit);
454d411a936SAlexander Duyck 	f->indices = rss_i;
455d411a936SAlexander Duyck 	f->mask = rss_m;
456d411a936SAlexander Duyck 
45739cb681bSAlexander Duyck 	/* disable ATR as it is not supported when multiple TCs are enabled */
45839cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
45939cb681bSAlexander Duyck 
460d411a936SAlexander Duyck #ifdef IXGBE_FCOE
461d411a936SAlexander Duyck 	/* FCoE enabled queues require special configuration indexed
462d411a936SAlexander Duyck 	 * by feature specific indices and offset. Here we map FCoE
463d411a936SAlexander Duyck 	 * indices onto the DCB queue pairs allowing FCoE to own
464d411a936SAlexander Duyck 	 * configuration later.
465d411a936SAlexander Duyck 	 */
466d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
467d411a936SAlexander Duyck 		u8 tc = ixgbe_fcoe_get_tc(adapter);
468d411a936SAlexander Duyck 
469d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
470d411a936SAlexander Duyck 		f->indices = min_t(u16, rss_i, f->limit);
471d411a936SAlexander Duyck 		f->offset = rss_i * tc;
472d411a936SAlexander Duyck 	}
473d411a936SAlexander Duyck 
474d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
475d411a936SAlexander Duyck 	for (i = 0; i < tcs; i++)
476d411a936SAlexander Duyck 		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
477d411a936SAlexander Duyck 
478d411a936SAlexander Duyck 	adapter->num_tx_queues = rss_i * tcs;
47933fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
480d411a936SAlexander Duyck 	adapter->num_rx_queues = rss_i * tcs;
481d411a936SAlexander Duyck 
482d411a936SAlexander Duyck 	return true;
483d411a936SAlexander Duyck }
484d411a936SAlexander Duyck 
485d411a936SAlexander Duyck #endif
4868af3c33fSJeff Kirsher /**
48773079ea0SAlexander Duyck  * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
48873079ea0SAlexander Duyck  * @adapter: board private structure to initialize
48973079ea0SAlexander Duyck  *
49073079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
49173079ea0SAlexander Duyck  * and VM pools where appropriate.  If RSS is available, then also try and
49273079ea0SAlexander Duyck  * enable RSS and map accordingly.
49373079ea0SAlexander Duyck  *
49473079ea0SAlexander Duyck  **/
49573079ea0SAlexander Duyck static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
49673079ea0SAlexander Duyck {
49773079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
49873079ea0SAlexander Duyck 	u16 vmdq_m = 0;
49973079ea0SAlexander Duyck 	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
50073079ea0SAlexander Duyck 	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
50173079ea0SAlexander Duyck #ifdef IXGBE_FCOE
50273079ea0SAlexander Duyck 	u16 fcoe_i = 0;
50373079ea0SAlexander Duyck #endif
50473079ea0SAlexander Duyck 
50573079ea0SAlexander Duyck 	/* only proceed if SR-IOV is enabled */
50673079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
50773079ea0SAlexander Duyck 		return false;
50873079ea0SAlexander Duyck 
5094e039c16SAlexander Duyck 	/* limit l2fwd RSS based on total Tx queue limit */
5104e039c16SAlexander Duyck 	rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
5114e039c16SAlexander Duyck 
51273079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
51373079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
51473079ea0SAlexander Duyck 
51573079ea0SAlexander Duyck 	/* double check we are limited to maximum pools */
51673079ea0SAlexander Duyck 	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
51773079ea0SAlexander Duyck 
51873079ea0SAlexander Duyck 	/* 64 pool mode with 2 queues per pool */
5194e039c16SAlexander Duyck 	if (vmdq_i > 32) {
52073079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
52173079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_2Q_MASK;
52273079ea0SAlexander Duyck 		rss_i = min_t(u16, rss_i, 2);
523e24fcf28SAlexander Duyck 	/* 32 pool mode with up to 4 queues per pool */
52473079ea0SAlexander Duyck 	} else {
52573079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
52673079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
527e24fcf28SAlexander Duyck 		/* We can support 4, 2, or 1 queues */
528e24fcf28SAlexander Duyck 		rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
52973079ea0SAlexander Duyck 	}
53073079ea0SAlexander Duyck 
53173079ea0SAlexander Duyck #ifdef IXGBE_FCOE
53273079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
53373079ea0SAlexander Duyck 	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
53473079ea0SAlexander Duyck 
53573079ea0SAlexander Duyck #endif
53673079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
53773079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
53873079ea0SAlexander Duyck 
53973079ea0SAlexander Duyck 	/* save features for later use */
54073079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
54173079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
54273079ea0SAlexander Duyck 
54373079ea0SAlexander Duyck 	/* limit RSS based on user input and save for later use */
54473079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = rss_i;
54573079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = rss_m;
54673079ea0SAlexander Duyck 
54773079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
54873079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = rss_i;
54973079ea0SAlexander Duyck 
55073079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * rss_i;
55173079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * rss_i;
55233fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
55373079ea0SAlexander Duyck 
55473079ea0SAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
55573079ea0SAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
55673079ea0SAlexander Duyck 
55773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
55873079ea0SAlexander Duyck 	/*
55973079ea0SAlexander Duyck 	 * FCoE can use rings from adjacent buffers to allow RSS
56073079ea0SAlexander Duyck 	 * like behavior.  To account for this we need to add the
56173079ea0SAlexander Duyck 	 * FCoE indices to the total ring count.
56273079ea0SAlexander Duyck 	 */
56373079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
56473079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
56573079ea0SAlexander Duyck 
56673079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
56773079ea0SAlexander Duyck 
56873079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
56973079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
57073079ea0SAlexander Duyck 
57173079ea0SAlexander Duyck 		if (vmdq_i > 1 && fcoe_i) {
57273079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
57373079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
57473079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * rss_i;
57573079ea0SAlexander Duyck 		} else {
57673079ea0SAlexander Duyck 			/* merge FCoE queues with RSS queues */
57773079ea0SAlexander Duyck 			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
57873079ea0SAlexander Duyck 
57973079ea0SAlexander Duyck 			/* limit indices to rss_i if MSI-X is disabled */
58073079ea0SAlexander Duyck 			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
58173079ea0SAlexander Duyck 				fcoe_i = rss_i;
58273079ea0SAlexander Duyck 
58373079ea0SAlexander Duyck 			/* attempt to reserve some queues for just FCoE */
58473079ea0SAlexander Duyck 			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
58573079ea0SAlexander Duyck 			fcoe->offset = fcoe_i - fcoe->indices;
58673079ea0SAlexander Duyck 
58773079ea0SAlexander Duyck 			fcoe_i -= rss_i;
58873079ea0SAlexander Duyck 		}
58973079ea0SAlexander Duyck 
59073079ea0SAlexander Duyck 		/* add queues to adapter */
59173079ea0SAlexander Duyck 		adapter->num_tx_queues += fcoe_i;
59273079ea0SAlexander Duyck 		adapter->num_rx_queues += fcoe_i;
59373079ea0SAlexander Duyck 	}
59473079ea0SAlexander Duyck 
59573079ea0SAlexander Duyck #endif
596646bb57cSAlexander Duyck 	/* To support macvlan offload we have to use num_tc to
597646bb57cSAlexander Duyck 	 * restrict the queues that can be used by the device.
598646bb57cSAlexander Duyck 	 * By doing this we can avoid reporting a false number of
599646bb57cSAlexander Duyck 	 * queues.
600646bb57cSAlexander Duyck 	 */
601646bb57cSAlexander Duyck 	if (vmdq_i > 1)
602646bb57cSAlexander Duyck 		netdev_set_num_tc(adapter->netdev, 1);
603646bb57cSAlexander Duyck 
60449cfbeb7SAlexander Duyck 	/* populate TC0 for use by pool 0 */
60549cfbeb7SAlexander Duyck 	netdev_set_tc_queue(adapter->netdev, 0,
60649cfbeb7SAlexander Duyck 			    adapter->num_rx_queues_per_pool, 0);
60749cfbeb7SAlexander Duyck 
60873079ea0SAlexander Duyck 	return true;
60973079ea0SAlexander Duyck }
61073079ea0SAlexander Duyck 
61173079ea0SAlexander Duyck /**
61249ce9c2cSBen Hutchings  * ixgbe_set_rss_queues - Allocate queues for RSS
6138af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
6148af3c33fSJeff Kirsher  *
6158af3c33fSJeff Kirsher  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
6168af3c33fSJeff Kirsher  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
6178af3c33fSJeff Kirsher  *
6188af3c33fSJeff Kirsher  **/
6190b7f5d0bSAlexander Duyck static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
6208af3c33fSJeff Kirsher {
6212bf1a87bSEmil Tantilov 	struct ixgbe_hw *hw = &adapter->hw;
6220b7f5d0bSAlexander Duyck 	struct ixgbe_ring_feature *f;
6230b7f5d0bSAlexander Duyck 	u16 rss_i;
6248af3c33fSJeff Kirsher 
6250b7f5d0bSAlexander Duyck 	/* set mask for 16 queue limit of RSS */
6260b7f5d0bSAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
6270b7f5d0bSAlexander Duyck 	rss_i = f->limit;
6280b7f5d0bSAlexander Duyck 
6290b7f5d0bSAlexander Duyck 	f->indices = rss_i;
6302bf1a87bSEmil Tantilov 
6312bf1a87bSEmil Tantilov 	if (hw->mac.type < ixgbe_mac_X550)
632d411a936SAlexander Duyck 		f->mask = IXGBE_RSS_16Q_MASK;
6332bf1a87bSEmil Tantilov 	else
6342bf1a87bSEmil Tantilov 		f->mask = IXGBE_RSS_64Q_MASK;
6358af3c33fSJeff Kirsher 
63639cb681bSAlexander Duyck 	/* disable ATR by default, it will be configured below */
63739cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
63839cb681bSAlexander Duyck 
6398af3c33fSJeff Kirsher 	/*
6400b7f5d0bSAlexander Duyck 	 * Use Flow Director in addition to RSS to ensure the best
6418af3c33fSJeff Kirsher 	 * distribution of flows across cores, even when an FDIR flow
6428af3c33fSJeff Kirsher 	 * isn't matched.
6438af3c33fSJeff Kirsher 	 */
64439cb681bSAlexander Duyck 	if (rss_i > 1 && adapter->atr_sample_rate) {
6450b7f5d0bSAlexander Duyck 		f = &adapter->ring_feature[RING_F_FDIR];
6460b7f5d0bSAlexander Duyck 
647d3cb9869SAlexander Duyck 		rss_i = f->indices = f->limit;
64839cb681bSAlexander Duyck 
64939cb681bSAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
65039cb681bSAlexander Duyck 			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
6518af3c33fSJeff Kirsher 	}
6520b7f5d0bSAlexander Duyck 
653d411a936SAlexander Duyck #ifdef IXGBE_FCOE
654d411a936SAlexander Duyck 	/*
655d411a936SAlexander Duyck 	 * FCoE can exist on the same rings as standard network traffic
656d411a936SAlexander Duyck 	 * however it is preferred to avoid that if possible.  In order
657d411a936SAlexander Duyck 	 * to get the best performance we allocate as many FCoE queues
658d411a936SAlexander Duyck 	 * as we can and we place them at the end of the ring array to
659d411a936SAlexander Duyck 	 * avoid sharing queues with standard RSS on systems with 24 or
660d411a936SAlexander Duyck 	 * more CPUs.
661d411a936SAlexander Duyck 	 */
662d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
663d411a936SAlexander Duyck 		struct net_device *dev = adapter->netdev;
664d411a936SAlexander Duyck 		u16 fcoe_i;
665d411a936SAlexander Duyck 
666d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
667d411a936SAlexander Duyck 
668d411a936SAlexander Duyck 		/* merge FCoE queues with RSS queues */
669d411a936SAlexander Duyck 		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
670d411a936SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
671d411a936SAlexander Duyck 
672d411a936SAlexander Duyck 		/* limit indices to rss_i if MSI-X is disabled */
673d411a936SAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
674d411a936SAlexander Duyck 			fcoe_i = rss_i;
675d411a936SAlexander Duyck 
676d411a936SAlexander Duyck 		/* attempt to reserve some queues for just FCoE */
677d411a936SAlexander Duyck 		f->indices = min_t(u16, fcoe_i, f->limit);
678d411a936SAlexander Duyck 		f->offset = fcoe_i - f->indices;
679d411a936SAlexander Duyck 		rss_i = max_t(u16, fcoe_i, rss_i);
680d411a936SAlexander Duyck 	}
681d411a936SAlexander Duyck 
682d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
6830b7f5d0bSAlexander Duyck 	adapter->num_rx_queues = rss_i;
6840b7f5d0bSAlexander Duyck 	adapter->num_tx_queues = rss_i;
68533fdc82fSJohn Fastabend 	adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
6860b7f5d0bSAlexander Duyck 
6870b7f5d0bSAlexander Duyck 	return true;
6888af3c33fSJeff Kirsher }
6898af3c33fSJeff Kirsher 
6908af3c33fSJeff Kirsher /**
69149ce9c2cSBen Hutchings  * ixgbe_set_num_queues - Allocate queues for device, feature dependent
6928af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
6938af3c33fSJeff Kirsher  *
6948af3c33fSJeff Kirsher  * This is the top level queue allocation routine.  The order here is very
6958af3c33fSJeff Kirsher  * important, starting with the "most" number of features turned on at once,
6968af3c33fSJeff Kirsher  * and ending with the smallest set of features.  This way large combinations
6978af3c33fSJeff Kirsher  * can be allocated if they're turned on, and smaller combinations are the
6988af3c33fSJeff Kirsher  * fallthrough conditions.
6998af3c33fSJeff Kirsher  *
7008af3c33fSJeff Kirsher  **/
701ac802f5dSAlexander Duyck static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
7028af3c33fSJeff Kirsher {
7038af3c33fSJeff Kirsher 	/* Start with base case */
7048af3c33fSJeff Kirsher 	adapter->num_rx_queues = 1;
7058af3c33fSJeff Kirsher 	adapter->num_tx_queues = 1;
70633fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
707ff815fb2SAlexander Duyck 	adapter->num_rx_pools = 1;
7088af3c33fSJeff Kirsher 	adapter->num_rx_queues_per_pool = 1;
7098af3c33fSJeff Kirsher 
71073079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
71173079ea0SAlexander Duyck 	if (ixgbe_set_dcb_sriov_queues(adapter))
712ac802f5dSAlexander Duyck 		return;
7138af3c33fSJeff Kirsher 
7148af3c33fSJeff Kirsher 	if (ixgbe_set_dcb_queues(adapter))
715ac802f5dSAlexander Duyck 		return;
7168af3c33fSJeff Kirsher 
7178af3c33fSJeff Kirsher #endif
71873079ea0SAlexander Duyck 	if (ixgbe_set_sriov_queues(adapter))
71973079ea0SAlexander Duyck 		return;
72073079ea0SAlexander Duyck 
721ac802f5dSAlexander Duyck 	ixgbe_set_rss_queues(adapter);
7228af3c33fSJeff Kirsher }
7238af3c33fSJeff Kirsher 
7243bcf3446SJacob Keller /**
7253bcf3446SJacob Keller  * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
7263bcf3446SJacob Keller  * @adapter: board private structure
7273bcf3446SJacob Keller  *
7283bcf3446SJacob Keller  * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
7293bcf3446SJacob Keller  * return a negative error code if unable to acquire MSI-X vectors for any
7303bcf3446SJacob Keller  * reason.
7313bcf3446SJacob Keller  */
7323bcf3446SJacob Keller static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
7338af3c33fSJeff Kirsher {
7343bcf3446SJacob Keller 	struct ixgbe_hw *hw = &adapter->hw;
7353bcf3446SJacob Keller 	int i, vectors, vector_threshold;
7368af3c33fSJeff Kirsher 
73733fdc82fSJohn Fastabend 	/* We start by asking for one vector per queue pair with XDP queues
73833fdc82fSJohn Fastabend 	 * being stacked with TX queues.
73933fdc82fSJohn Fastabend 	 */
7403bcf3446SJacob Keller 	vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
74133fdc82fSJohn Fastabend 	vectors = max(vectors, adapter->num_xdp_queues);
7423bcf3446SJacob Keller 
7433bcf3446SJacob Keller 	/* It is easy to be greedy for MSI-X vectors. However, it really
7443bcf3446SJacob Keller 	 * doesn't do much good if we have a lot more vectors than CPUs. We'll
7453bcf3446SJacob Keller 	 * be somewhat conservative and only ask for (roughly) the same number
7463bcf3446SJacob Keller 	 * of vectors as there are CPUs.
7473bcf3446SJacob Keller 	 */
7483bcf3446SJacob Keller 	vectors = min_t(int, vectors, num_online_cpus());
7493bcf3446SJacob Keller 
7503bcf3446SJacob Keller 	/* Some vectors are necessary for non-queue interrupts */
7513bcf3446SJacob Keller 	vectors += NON_Q_VECTORS;
7523bcf3446SJacob Keller 
7533bcf3446SJacob Keller 	/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
7543bcf3446SJacob Keller 	 * With features such as RSS and VMDq, we can easily surpass the
7553bcf3446SJacob Keller 	 * number of Rx and Tx descriptor queues supported by our device.
7563bcf3446SJacob Keller 	 * Thus, we cap the maximum in the rare cases where the CPU count also
7573bcf3446SJacob Keller 	 * exceeds our vector limit
7583bcf3446SJacob Keller 	 */
7593bcf3446SJacob Keller 	vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
7603bcf3446SJacob Keller 
7613bcf3446SJacob Keller 	/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
7623bcf3446SJacob Keller 	 * handler, and (2) an Other (Link Status Change, etc.) handler.
7638af3c33fSJeff Kirsher 	 */
7648af3c33fSJeff Kirsher 	vector_threshold = MIN_MSIX_COUNT;
7658af3c33fSJeff Kirsher 
766027bb561SJacob Keller 	adapter->msix_entries = kcalloc(vectors,
767027bb561SJacob Keller 					sizeof(struct msix_entry),
768027bb561SJacob Keller 					GFP_KERNEL);
769027bb561SJacob Keller 	if (!adapter->msix_entries)
770027bb561SJacob Keller 		return -ENOMEM;
771027bb561SJacob Keller 
772027bb561SJacob Keller 	for (i = 0; i < vectors; i++)
773027bb561SJacob Keller 		adapter->msix_entries[i].entry = i;
774027bb561SJacob Keller 
775b45e620cSAlexander Gordeev 	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
776b45e620cSAlexander Gordeev 					vector_threshold, vectors);
7778af3c33fSJeff Kirsher 
778b45e620cSAlexander Gordeev 	if (vectors < 0) {
779493043e5SJacob Keller 		/* A negative count of allocated vectors indicates an error in
780493043e5SJacob Keller 		 * acquiring within the specified range of MSI-X vectors
7818af3c33fSJeff Kirsher 		 */
782493043e5SJacob Keller 		e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
783493043e5SJacob Keller 			   vectors);
784493043e5SJacob Keller 
7858af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
7868af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
7878af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
788d7de3c6eSJacob Keller 
789d7de3c6eSJacob Keller 		return vectors;
790d7de3c6eSJacob Keller 	}
791d7de3c6eSJacob Keller 
792d7de3c6eSJacob Keller 	/* we successfully allocated some number of vectors within our
793d7de3c6eSJacob Keller 	 * requested range.
794d7de3c6eSJacob Keller 	 */
795d7de3c6eSJacob Keller 	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
796d7de3c6eSJacob Keller 
797d7de3c6eSJacob Keller 	/* Adjust for only the vectors we'll use, which is minimum
798d7de3c6eSJacob Keller 	 * of max_q_vectors, or the number of vectors we were allocated.
7998af3c33fSJeff Kirsher 	 */
80049c7ffbeSAlexander Duyck 	vectors -= NON_Q_VECTORS;
801d7de3c6eSJacob Keller 	adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
802d7de3c6eSJacob Keller 
803d7de3c6eSJacob Keller 	return 0;
8048af3c33fSJeff Kirsher }
8058af3c33fSJeff Kirsher 
8068af3c33fSJeff Kirsher static void ixgbe_add_ring(struct ixgbe_ring *ring,
8078af3c33fSJeff Kirsher 			   struct ixgbe_ring_container *head)
8088af3c33fSJeff Kirsher {
8098af3c33fSJeff Kirsher 	ring->next = head->ring;
8108af3c33fSJeff Kirsher 	head->ring = ring;
8118af3c33fSJeff Kirsher 	head->count++;
812b4ded832SAlexander Duyck 	head->next_update = jiffies + 1;
8138af3c33fSJeff Kirsher }
8148af3c33fSJeff Kirsher 
8158af3c33fSJeff Kirsher /**
8168af3c33fSJeff Kirsher  * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
8178af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
818d0bfcdfdSAlexander Duyck  * @v_count: q_vectors allocated on adapter, used for ring interleaving
8198af3c33fSJeff Kirsher  * @v_idx: index of vector in adapter struct
820d0bfcdfdSAlexander Duyck  * @txr_count: total number of Tx rings to allocate
821d0bfcdfdSAlexander Duyck  * @txr_idx: index of first Tx ring to allocate
82233fdc82fSJohn Fastabend  * @xdp_count: total number of XDP rings to allocate
82333fdc82fSJohn Fastabend  * @xdp_idx: index of first XDP ring to allocate
824d0bfcdfdSAlexander Duyck  * @rxr_count: total number of Rx rings to allocate
825d0bfcdfdSAlexander Duyck  * @rxr_idx: index of first Rx ring to allocate
8268af3c33fSJeff Kirsher  *
8278af3c33fSJeff Kirsher  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
8288af3c33fSJeff Kirsher  **/
829d0bfcdfdSAlexander Duyck static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
830d0bfcdfdSAlexander Duyck 				int v_count, int v_idx,
8318af3c33fSJeff Kirsher 				int txr_count, int txr_idx,
83233fdc82fSJohn Fastabend 				int xdp_count, int xdp_idx,
8338af3c33fSJeff Kirsher 				int rxr_count, int rxr_idx)
8348af3c33fSJeff Kirsher {
8358af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector;
8368af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
837fd786b7bSAlexander Duyck 	int node = NUMA_NO_NODE;
8388af3c33fSJeff Kirsher 	int cpu = -1;
8398af3c33fSJeff Kirsher 	int ring_count, size;
8400efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
8418af3c33fSJeff Kirsher 
84233fdc82fSJohn Fastabend 	ring_count = txr_count + rxr_count + xdp_count;
8438af3c33fSJeff Kirsher 	size = sizeof(struct ixgbe_q_vector) +
8448af3c33fSJeff Kirsher 	       (sizeof(struct ixgbe_ring) * ring_count);
8458af3c33fSJeff Kirsher 
8468af3c33fSJeff Kirsher 	/* customize cpu for Flow Director mapping */
847fd786b7bSAlexander Duyck 	if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
848fd786b7bSAlexander Duyck 		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
849fd786b7bSAlexander Duyck 		if (rss_i > 1 && adapter->atr_sample_rate) {
8508af3c33fSJeff Kirsher 			if (cpu_online(v_idx)) {
8518af3c33fSJeff Kirsher 				cpu = v_idx;
8528af3c33fSJeff Kirsher 				node = cpu_to_node(cpu);
8538af3c33fSJeff Kirsher 			}
8548af3c33fSJeff Kirsher 		}
855fd786b7bSAlexander Duyck 	}
8568af3c33fSJeff Kirsher 
8578af3c33fSJeff Kirsher 	/* allocate q_vector and rings */
8588af3c33fSJeff Kirsher 	q_vector = kzalloc_node(size, GFP_KERNEL, node);
8598af3c33fSJeff Kirsher 	if (!q_vector)
8608af3c33fSJeff Kirsher 		q_vector = kzalloc(size, GFP_KERNEL);
8618af3c33fSJeff Kirsher 	if (!q_vector)
8628af3c33fSJeff Kirsher 		return -ENOMEM;
8638af3c33fSJeff Kirsher 
8648af3c33fSJeff Kirsher 	/* setup affinity mask and node */
8658af3c33fSJeff Kirsher 	if (cpu != -1)
8668af3c33fSJeff Kirsher 		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
8678af3c33fSJeff Kirsher 	q_vector->numa_node = node;
8688af3c33fSJeff Kirsher 
869245f292dSAlexander Duyck #ifdef CONFIG_IXGBE_DCA
870245f292dSAlexander Duyck 	/* initialize CPU for DCA */
871245f292dSAlexander Duyck 	q_vector->cpu = -1;
872245f292dSAlexander Duyck 
873245f292dSAlexander Duyck #endif
8748af3c33fSJeff Kirsher 	/* initialize NAPI */
8758af3c33fSJeff Kirsher 	netif_napi_add(adapter->netdev, &q_vector->napi,
8768af3c33fSJeff Kirsher 		       ixgbe_poll, 64);
8778af3c33fSJeff Kirsher 
8788af3c33fSJeff Kirsher 	/* tie q_vector and adapter together */
8798af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = q_vector;
8808af3c33fSJeff Kirsher 	q_vector->adapter = adapter;
8818af3c33fSJeff Kirsher 	q_vector->v_idx = v_idx;
8828af3c33fSJeff Kirsher 
8838af3c33fSJeff Kirsher 	/* initialize work limits */
8848af3c33fSJeff Kirsher 	q_vector->tx.work_limit = adapter->tx_work_limit;
8858af3c33fSJeff Kirsher 
886b4ded832SAlexander Duyck 	/* Initialize setting for adaptive ITR */
887b4ded832SAlexander Duyck 	q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
888b4ded832SAlexander Duyck 			   IXGBE_ITR_ADAPTIVE_LATENCY;
889b4ded832SAlexander Duyck 	q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
890b4ded832SAlexander Duyck 			   IXGBE_ITR_ADAPTIVE_LATENCY;
8918af3c33fSJeff Kirsher 
8923af3361eSEmil Tantilov 	/* intialize ITR */
8933af3361eSEmil Tantilov 	if (txr_count && !rxr_count) {
8943af3361eSEmil Tantilov 		/* tx only vector */
8953af3361eSEmil Tantilov 		if (adapter->tx_itr_setting == 1)
8968ac34f10SAlexander Duyck 			q_vector->itr = IXGBE_12K_ITR;
8973af3361eSEmil Tantilov 		else
8983af3361eSEmil Tantilov 			q_vector->itr = adapter->tx_itr_setting;
8993af3361eSEmil Tantilov 	} else {
9003af3361eSEmil Tantilov 		/* rx or rx/tx vector */
9013af3361eSEmil Tantilov 		if (adapter->rx_itr_setting == 1)
9023af3361eSEmil Tantilov 			q_vector->itr = IXGBE_20K_ITR;
9033af3361eSEmil Tantilov 		else
9043af3361eSEmil Tantilov 			q_vector->itr = adapter->rx_itr_setting;
9053af3361eSEmil Tantilov 	}
9063af3361eSEmil Tantilov 
907b4ded832SAlexander Duyck 	/* initialize pointer to rings */
908b4ded832SAlexander Duyck 	ring = q_vector->ring;
909b4ded832SAlexander Duyck 
9108af3c33fSJeff Kirsher 	while (txr_count) {
9118af3c33fSJeff Kirsher 		/* assign generic ring traits */
9128af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
9138af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
9148af3c33fSJeff Kirsher 
9158af3c33fSJeff Kirsher 		/* configure backlink on ring */
9168af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
9178af3c33fSJeff Kirsher 
9188af3c33fSJeff Kirsher 		/* update q_vector Tx values */
9198af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->tx);
9208af3c33fSJeff Kirsher 
9218af3c33fSJeff Kirsher 		/* apply Tx specific ring traits */
9228af3c33fSJeff Kirsher 		ring->count = adapter->tx_ring_count;
9238af3c33fSJeff Kirsher 		ring->queue_index = txr_idx;
9248af3c33fSJeff Kirsher 
9258af3c33fSJeff Kirsher 		/* assign ring to adapter */
9268af3c33fSJeff Kirsher 		adapter->tx_ring[txr_idx] = ring;
9278af3c33fSJeff Kirsher 
9288af3c33fSJeff Kirsher 		/* update count and index */
9298af3c33fSJeff Kirsher 		txr_count--;
930d0bfcdfdSAlexander Duyck 		txr_idx += v_count;
9318af3c33fSJeff Kirsher 
9328af3c33fSJeff Kirsher 		/* push pointer to next ring */
9338af3c33fSJeff Kirsher 		ring++;
9348af3c33fSJeff Kirsher 	}
9358af3c33fSJeff Kirsher 
93633fdc82fSJohn Fastabend 	while (xdp_count) {
93733fdc82fSJohn Fastabend 		/* assign generic ring traits */
93833fdc82fSJohn Fastabend 		ring->dev = &adapter->pdev->dev;
93933fdc82fSJohn Fastabend 		ring->netdev = adapter->netdev;
94033fdc82fSJohn Fastabend 
94133fdc82fSJohn Fastabend 		/* configure backlink on ring */
94233fdc82fSJohn Fastabend 		ring->q_vector = q_vector;
94333fdc82fSJohn Fastabend 
94433fdc82fSJohn Fastabend 		/* update q_vector Tx values */
94533fdc82fSJohn Fastabend 		ixgbe_add_ring(ring, &q_vector->tx);
94633fdc82fSJohn Fastabend 
94733fdc82fSJohn Fastabend 		/* apply Tx specific ring traits */
94833fdc82fSJohn Fastabend 		ring->count = adapter->tx_ring_count;
94933fdc82fSJohn Fastabend 		ring->queue_index = xdp_idx;
95033fdc82fSJohn Fastabend 		set_ring_xdp(ring);
95133fdc82fSJohn Fastabend 
95233fdc82fSJohn Fastabend 		/* assign ring to adapter */
95333fdc82fSJohn Fastabend 		adapter->xdp_ring[xdp_idx] = ring;
95433fdc82fSJohn Fastabend 
95533fdc82fSJohn Fastabend 		/* update count and index */
95633fdc82fSJohn Fastabend 		xdp_count--;
95733fdc82fSJohn Fastabend 		xdp_idx++;
95833fdc82fSJohn Fastabend 
95933fdc82fSJohn Fastabend 		/* push pointer to next ring */
96033fdc82fSJohn Fastabend 		ring++;
96133fdc82fSJohn Fastabend 	}
96233fdc82fSJohn Fastabend 
9638af3c33fSJeff Kirsher 	while (rxr_count) {
9648af3c33fSJeff Kirsher 		/* assign generic ring traits */
9658af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
9668af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
9678af3c33fSJeff Kirsher 
9688af3c33fSJeff Kirsher 		/* configure backlink on ring */
9698af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
9708af3c33fSJeff Kirsher 
9718af3c33fSJeff Kirsher 		/* update q_vector Rx values */
9728af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->rx);
9738af3c33fSJeff Kirsher 
9748af3c33fSJeff Kirsher 		/*
9758af3c33fSJeff Kirsher 		 * 82599 errata, UDP frames with a 0 checksum
9768af3c33fSJeff Kirsher 		 * can be marked as checksum errors.
9778af3c33fSJeff Kirsher 		 */
9788af3c33fSJeff Kirsher 		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
9798af3c33fSJeff Kirsher 			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
9808af3c33fSJeff Kirsher 
981b2db497eSAlexander Duyck #ifdef IXGBE_FCOE
982b2db497eSAlexander Duyck 		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
983b2db497eSAlexander Duyck 			struct ixgbe_ring_feature *f;
984b2db497eSAlexander Duyck 			f = &adapter->ring_feature[RING_F_FCOE];
985e4b317e9SAlexander Duyck 			if ((rxr_idx >= f->offset) &&
986e4b317e9SAlexander Duyck 			    (rxr_idx < f->offset + f->indices))
98757efd44cSAlexander Duyck 				set_bit(__IXGBE_RX_FCOE, &ring->state);
988b2db497eSAlexander Duyck 		}
989b2db497eSAlexander Duyck 
990b2db497eSAlexander Duyck #endif /* IXGBE_FCOE */
9918af3c33fSJeff Kirsher 		/* apply Rx specific ring traits */
9928af3c33fSJeff Kirsher 		ring->count = adapter->rx_ring_count;
9938af3c33fSJeff Kirsher 		ring->queue_index = rxr_idx;
9948af3c33fSJeff Kirsher 
9958af3c33fSJeff Kirsher 		/* assign ring to adapter */
9968af3c33fSJeff Kirsher 		adapter->rx_ring[rxr_idx] = ring;
9978af3c33fSJeff Kirsher 
9988af3c33fSJeff Kirsher 		/* update count and index */
9998af3c33fSJeff Kirsher 		rxr_count--;
1000d0bfcdfdSAlexander Duyck 		rxr_idx += v_count;
10018af3c33fSJeff Kirsher 
10028af3c33fSJeff Kirsher 		/* push pointer to next ring */
10038af3c33fSJeff Kirsher 		ring++;
10048af3c33fSJeff Kirsher 	}
10058af3c33fSJeff Kirsher 
10068af3c33fSJeff Kirsher 	return 0;
10078af3c33fSJeff Kirsher }
10088af3c33fSJeff Kirsher 
10098af3c33fSJeff Kirsher /**
10108af3c33fSJeff Kirsher  * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
10118af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10128af3c33fSJeff Kirsher  * @v_idx: Index of vector to be freed
10138af3c33fSJeff Kirsher  *
10148af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vector.  In addition if
10158af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
10168af3c33fSJeff Kirsher  * to freeing the q_vector.
10178af3c33fSJeff Kirsher  **/
10188af3c33fSJeff Kirsher static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
10198af3c33fSJeff Kirsher {
10208af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
10218af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
10228af3c33fSJeff Kirsher 
102390382dcaSJohn Fastabend 	ixgbe_for_each_ring(ring, q_vector->tx) {
102490382dcaSJohn Fastabend 		if (ring_is_xdp(ring))
102590382dcaSJohn Fastabend 			adapter->xdp_ring[ring->queue_index] = NULL;
102690382dcaSJohn Fastabend 		else
10278af3c33fSJeff Kirsher 			adapter->tx_ring[ring->queue_index] = NULL;
102890382dcaSJohn Fastabend 	}
10298af3c33fSJeff Kirsher 
10308af3c33fSJeff Kirsher 	ixgbe_for_each_ring(ring, q_vector->rx)
10318af3c33fSJeff Kirsher 		adapter->rx_ring[ring->queue_index] = NULL;
10328af3c33fSJeff Kirsher 
10338af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = NULL;
10345a85e737SEliezer Tamir 	napi_hash_del(&q_vector->napi);
10358af3c33fSJeff Kirsher 	netif_napi_del(&q_vector->napi);
10368af3c33fSJeff Kirsher 
10378af3c33fSJeff Kirsher 	/*
10388af3c33fSJeff Kirsher 	 * ixgbe_get_stats64() might access the rings on this vector,
10398af3c33fSJeff Kirsher 	 * we must wait a grace period before freeing it.
10408af3c33fSJeff Kirsher 	 */
10418af3c33fSJeff Kirsher 	kfree_rcu(q_vector, rcu);
10428af3c33fSJeff Kirsher }
10438af3c33fSJeff Kirsher 
10448af3c33fSJeff Kirsher /**
10458af3c33fSJeff Kirsher  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
10468af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10478af3c33fSJeff Kirsher  *
10488af3c33fSJeff Kirsher  * We allocate one q_vector per queue interrupt.  If allocation fails we
10498af3c33fSJeff Kirsher  * return -ENOMEM.
10508af3c33fSJeff Kirsher  **/
10518af3c33fSJeff Kirsher static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
10528af3c33fSJeff Kirsher {
105349c7ffbeSAlexander Duyck 	int q_vectors = adapter->num_q_vectors;
10548af3c33fSJeff Kirsher 	int rxr_remaining = adapter->num_rx_queues;
10558af3c33fSJeff Kirsher 	int txr_remaining = adapter->num_tx_queues;
105633fdc82fSJohn Fastabend 	int xdp_remaining = adapter->num_xdp_queues;
105733fdc82fSJohn Fastabend 	int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
10588af3c33fSJeff Kirsher 	int err;
10598af3c33fSJeff Kirsher 
10608af3c33fSJeff Kirsher 	/* only one q_vector if MSI-X is disabled. */
10618af3c33fSJeff Kirsher 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
10628af3c33fSJeff Kirsher 		q_vectors = 1;
10638af3c33fSJeff Kirsher 
106433fdc82fSJohn Fastabend 	if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1065d0bfcdfdSAlexander Duyck 		for (; rxr_remaining; v_idx++) {
1066d0bfcdfdSAlexander Duyck 			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
106733fdc82fSJohn Fastabend 						   0, 0, 0, 0, 1, rxr_idx);
10688af3c33fSJeff Kirsher 
10698af3c33fSJeff Kirsher 			if (err)
10708af3c33fSJeff Kirsher 				goto err_out;
10718af3c33fSJeff Kirsher 
10728af3c33fSJeff Kirsher 			/* update counts and index */
1073d0bfcdfdSAlexander Duyck 			rxr_remaining--;
1074d0bfcdfdSAlexander Duyck 			rxr_idx++;
10758af3c33fSJeff Kirsher 		}
10768af3c33fSJeff Kirsher 	}
10778af3c33fSJeff Kirsher 
1078d0bfcdfdSAlexander Duyck 	for (; v_idx < q_vectors; v_idx++) {
1079d0bfcdfdSAlexander Duyck 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1080d0bfcdfdSAlexander Duyck 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
108133fdc82fSJohn Fastabend 		int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
108233fdc82fSJohn Fastabend 
1083d0bfcdfdSAlexander Duyck 		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
10848af3c33fSJeff Kirsher 					   tqpv, txr_idx,
108533fdc82fSJohn Fastabend 					   xqpv, xdp_idx,
10868af3c33fSJeff Kirsher 					   rqpv, rxr_idx);
10878af3c33fSJeff Kirsher 
10888af3c33fSJeff Kirsher 		if (err)
10898af3c33fSJeff Kirsher 			goto err_out;
10908af3c33fSJeff Kirsher 
10918af3c33fSJeff Kirsher 		/* update counts and index */
10928af3c33fSJeff Kirsher 		rxr_remaining -= rqpv;
10938af3c33fSJeff Kirsher 		txr_remaining -= tqpv;
109433fdc82fSJohn Fastabend 		xdp_remaining -= xqpv;
1095d0bfcdfdSAlexander Duyck 		rxr_idx++;
1096d0bfcdfdSAlexander Duyck 		txr_idx++;
109733fdc82fSJohn Fastabend 		xdp_idx += xqpv;
10988af3c33fSJeff Kirsher 	}
10998af3c33fSJeff Kirsher 
11008af3c33fSJeff Kirsher 	return 0;
11018af3c33fSJeff Kirsher 
11028af3c33fSJeff Kirsher err_out:
110349c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
110433fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
110549c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
110649c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
110749c7ffbeSAlexander Duyck 
110849c7ffbeSAlexander Duyck 	while (v_idx--)
11098af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
11108af3c33fSJeff Kirsher 
11118af3c33fSJeff Kirsher 	return -ENOMEM;
11128af3c33fSJeff Kirsher }
11138af3c33fSJeff Kirsher 
11148af3c33fSJeff Kirsher /**
11158af3c33fSJeff Kirsher  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
11168af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
11178af3c33fSJeff Kirsher  *
11188af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vectors.  In addition if
11198af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
11208af3c33fSJeff Kirsher  * to freeing the q_vector.
11218af3c33fSJeff Kirsher  **/
11228af3c33fSJeff Kirsher static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
11238af3c33fSJeff Kirsher {
112449c7ffbeSAlexander Duyck 	int v_idx = adapter->num_q_vectors;
11258af3c33fSJeff Kirsher 
112649c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
112733fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
112849c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
112949c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
11308af3c33fSJeff Kirsher 
113149c7ffbeSAlexander Duyck 	while (v_idx--)
11328af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
11338af3c33fSJeff Kirsher }
11348af3c33fSJeff Kirsher 
11358af3c33fSJeff Kirsher static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
11368af3c33fSJeff Kirsher {
11378af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
11388af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
11398af3c33fSJeff Kirsher 		pci_disable_msix(adapter->pdev);
11408af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
11418af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
11428af3c33fSJeff Kirsher 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
11438af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
11448af3c33fSJeff Kirsher 		pci_disable_msi(adapter->pdev);
11458af3c33fSJeff Kirsher 	}
11468af3c33fSJeff Kirsher }
11478af3c33fSJeff Kirsher 
11488af3c33fSJeff Kirsher /**
11498af3c33fSJeff Kirsher  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
11508af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
11518af3c33fSJeff Kirsher  *
11528af3c33fSJeff Kirsher  * Attempt to configure the interrupts using the best available
11538af3c33fSJeff Kirsher  * capabilities of the hardware and the kernel.
11548af3c33fSJeff Kirsher  **/
1155ac802f5dSAlexander Duyck static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
11568af3c33fSJeff Kirsher {
11573bcf3446SJacob Keller 	int err;
11588af3c33fSJeff Kirsher 
11593bcf3446SJacob Keller 	/* We will try to get MSI-X interrupts first */
11603bcf3446SJacob Keller 	if (!ixgbe_acquire_msix_vectors(adapter))
1161ac802f5dSAlexander Duyck 		return;
11628af3c33fSJeff Kirsher 
1163eec66731SJacob Keller 	/* At this point, we do not have MSI-X capabilities. We need to
1164eec66731SJacob Keller 	 * reconfigure or disable various features which require MSI-X
1165eec66731SJacob Keller 	 * capability.
1166eec66731SJacob Keller 	 */
1167eec66731SJacob Keller 
1168c1c55f63SJacob Keller 	/* Disable DCB unless we only have a single traffic class */
11690efbf12bSAlexander Duyck 	if (adapter->hw_tcs > 1) {
1170c1c55f63SJacob Keller 		e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1171b724e9f2SAlexander Duyck 		netdev_reset_tc(adapter->netdev);
117239cb681bSAlexander Duyck 
1173b724e9f2SAlexander Duyck 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1174b724e9f2SAlexander Duyck 			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1175b724e9f2SAlexander Duyck 
1176b724e9f2SAlexander Duyck 		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1177b724e9f2SAlexander Duyck 		adapter->temp_dcb_cfg.pfc_mode_enable = false;
1178b724e9f2SAlexander Duyck 		adapter->dcb_cfg.pfc_mode_enable = false;
1179b724e9f2SAlexander Duyck 	}
1180d786cf7bSJacob Keller 
11810efbf12bSAlexander Duyck 	adapter->hw_tcs = 0;
1182b724e9f2SAlexander Duyck 	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1183b724e9f2SAlexander Duyck 	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1184b724e9f2SAlexander Duyck 
1185d786cf7bSJacob Keller 	/* Disable SR-IOV support */
1186d786cf7bSJacob Keller 	e_dev_warn("Disabling SR-IOV support\n");
11878af3c33fSJeff Kirsher 	ixgbe_disable_sriov(adapter);
11888af3c33fSJeff Kirsher 
1189d786cf7bSJacob Keller 	/* Disable RSS */
1190d786cf7bSJacob Keller 	e_dev_warn("Disabling RSS support\n");
1191fbe7ca7fSAlexander Duyck 	adapter->ring_feature[RING_F_RSS].limit = 1;
1192b724e9f2SAlexander Duyck 
1193eec66731SJacob Keller 	/* recalculate number of queues now that many features have been
1194eec66731SJacob Keller 	 * changed or disabled.
1195eec66731SJacob Keller 	 */
1196ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
119749c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 1;
119849c7ffbeSAlexander Duyck 
11998af3c33fSJeff Kirsher 	err = pci_enable_msi(adapter->pdev);
12005d31b48aSJacob Keller 	if (err)
12015d31b48aSJacob Keller 		e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
12026ec1b71fSJacob Keller 			   err);
12035d31b48aSJacob Keller 	else
1204ac802f5dSAlexander Duyck 		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
12058af3c33fSJeff Kirsher }
12068af3c33fSJeff Kirsher 
12078af3c33fSJeff Kirsher /**
12088af3c33fSJeff Kirsher  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
12098af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
12108af3c33fSJeff Kirsher  *
12118af3c33fSJeff Kirsher  * We determine which interrupt scheme to use based on...
12128af3c33fSJeff Kirsher  * - Kernel support (MSI, MSI-X)
12138af3c33fSJeff Kirsher  *   - which can be user-defined (via MODULE_PARAM)
12148af3c33fSJeff Kirsher  * - Hardware queue count (num_*_queues)
12158af3c33fSJeff Kirsher  *   - defined by miscellaneous hardware support/features (RSS, etc.)
12168af3c33fSJeff Kirsher  **/
12178af3c33fSJeff Kirsher int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
12188af3c33fSJeff Kirsher {
12198af3c33fSJeff Kirsher 	int err;
12208af3c33fSJeff Kirsher 
12218af3c33fSJeff Kirsher 	/* Number of supported queues */
1222ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
12238af3c33fSJeff Kirsher 
1224ac802f5dSAlexander Duyck 	/* Set interrupt mode */
1225ac802f5dSAlexander Duyck 	ixgbe_set_interrupt_capability(adapter);
12268af3c33fSJeff Kirsher 
12278af3c33fSJeff Kirsher 	err = ixgbe_alloc_q_vectors(adapter);
12288af3c33fSJeff Kirsher 	if (err) {
12298af3c33fSJeff Kirsher 		e_dev_err("Unable to allocate memory for queue vectors\n");
12308af3c33fSJeff Kirsher 		goto err_alloc_q_vectors;
12318af3c33fSJeff Kirsher 	}
12328af3c33fSJeff Kirsher 
12338af3c33fSJeff Kirsher 	ixgbe_cache_ring_register(adapter);
12348af3c33fSJeff Kirsher 
123533fdc82fSJohn Fastabend 	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
12368af3c33fSJeff Kirsher 		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
123733fdc82fSJohn Fastabend 		   adapter->num_rx_queues, adapter->num_tx_queues,
123833fdc82fSJohn Fastabend 		   adapter->num_xdp_queues);
12398af3c33fSJeff Kirsher 
12408af3c33fSJeff Kirsher 	set_bit(__IXGBE_DOWN, &adapter->state);
12418af3c33fSJeff Kirsher 
12428af3c33fSJeff Kirsher 	return 0;
12438af3c33fSJeff Kirsher 
12448af3c33fSJeff Kirsher err_alloc_q_vectors:
12458af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
12468af3c33fSJeff Kirsher 	return err;
12478af3c33fSJeff Kirsher }
12488af3c33fSJeff Kirsher 
12498af3c33fSJeff Kirsher /**
12508af3c33fSJeff Kirsher  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
12518af3c33fSJeff Kirsher  * @adapter: board private structure to clear interrupt scheme on
12528af3c33fSJeff Kirsher  *
12538af3c33fSJeff Kirsher  * We go through and clear interrupt specific resources and reset the structure
12548af3c33fSJeff Kirsher  * to pre-load conditions
12558af3c33fSJeff Kirsher  **/
12568af3c33fSJeff Kirsher void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
12578af3c33fSJeff Kirsher {
12588af3c33fSJeff Kirsher 	adapter->num_tx_queues = 0;
125933fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
12608af3c33fSJeff Kirsher 	adapter->num_rx_queues = 0;
12618af3c33fSJeff Kirsher 
12628af3c33fSJeff Kirsher 	ixgbe_free_q_vectors(adapter);
12638af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
12648af3c33fSJeff Kirsher }
12658af3c33fSJeff Kirsher 
12668af3c33fSJeff Kirsher void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
126759259470SShannon Nelson 		       u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
12688af3c33fSJeff Kirsher {
12698af3c33fSJeff Kirsher 	struct ixgbe_adv_tx_context_desc *context_desc;
12708af3c33fSJeff Kirsher 	u16 i = tx_ring->next_to_use;
12718af3c33fSJeff Kirsher 
12728af3c33fSJeff Kirsher 	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
12738af3c33fSJeff Kirsher 
12748af3c33fSJeff Kirsher 	i++;
12758af3c33fSJeff Kirsher 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
12768af3c33fSJeff Kirsher 
12778af3c33fSJeff Kirsher 	/* set bits to identify this as an advanced context descriptor */
12788af3c33fSJeff Kirsher 	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
12798af3c33fSJeff Kirsher 
12808af3c33fSJeff Kirsher 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
128159259470SShannon Nelson 	context_desc->fceof_saidx	= cpu_to_le32(fceof_saidx);
12828af3c33fSJeff Kirsher 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
12838af3c33fSJeff Kirsher 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
12848af3c33fSJeff Kirsher }
12858af3c33fSJeff Kirsher 
1286