18af3c33fSJeff Kirsher /*******************************************************************************
28af3c33fSJeff Kirsher 
38af3c33fSJeff Kirsher   Intel 10 Gigabit PCI Express Linux driver
449425dfcSMark Rustad   Copyright(c) 1999 - 2016 Intel Corporation.
58af3c33fSJeff Kirsher 
68af3c33fSJeff Kirsher   This program is free software; you can redistribute it and/or modify it
78af3c33fSJeff Kirsher   under the terms and conditions of the GNU General Public License,
88af3c33fSJeff Kirsher   version 2, as published by the Free Software Foundation.
98af3c33fSJeff Kirsher 
108af3c33fSJeff Kirsher   This program is distributed in the hope it will be useful, but WITHOUT
118af3c33fSJeff Kirsher   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
128af3c33fSJeff Kirsher   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
138af3c33fSJeff Kirsher   more details.
148af3c33fSJeff Kirsher 
158af3c33fSJeff Kirsher   You should have received a copy of the GNU General Public License along with
168af3c33fSJeff Kirsher   this program; if not, write to the Free Software Foundation, Inc.,
178af3c33fSJeff Kirsher   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
188af3c33fSJeff Kirsher 
198af3c33fSJeff Kirsher   The full GNU General Public License is included in this distribution in
208af3c33fSJeff Kirsher   the file called "COPYING".
218af3c33fSJeff Kirsher 
228af3c33fSJeff Kirsher   Contact Information:
23b89aae71SJacob Keller   Linux NICS <linux.nics@intel.com>
248af3c33fSJeff Kirsher   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
258af3c33fSJeff Kirsher   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
268af3c33fSJeff Kirsher 
278af3c33fSJeff Kirsher *******************************************************************************/
288af3c33fSJeff Kirsher 
298af3c33fSJeff Kirsher #include "ixgbe.h"
308af3c33fSJeff Kirsher #include "ixgbe_sriov.h"
318af3c33fSJeff Kirsher 
32800bd607SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
3373079ea0SAlexander Duyck /**
3473079ea0SAlexander Duyck  * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
3573079ea0SAlexander Duyck  * @adapter: board private structure to initialize
3673079ea0SAlexander Duyck  *
3773079ea0SAlexander Duyck  * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
3873079ea0SAlexander Duyck  * will also try to cache the proper offsets if RSS/FCoE are enabled along
3973079ea0SAlexander Duyck  * with VMDq.
4073079ea0SAlexander Duyck  *
4173079ea0SAlexander Duyck  **/
4273079ea0SAlexander Duyck static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
4373079ea0SAlexander Duyck {
4473079ea0SAlexander Duyck #ifdef IXGBE_FCOE
4573079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
4673079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
4773079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
4873079ea0SAlexander Duyck 	int i;
49b5f69ccfSAlexander Duyck 	u16 reg_idx, pool;
500efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
5173079ea0SAlexander Duyck 
5273079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
5373079ea0SAlexander Duyck 	if (tcs <= 1)
5473079ea0SAlexander Duyck 		return false;
5573079ea0SAlexander Duyck 
5673079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
5773079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
5873079ea0SAlexander Duyck 		return false;
5973079ea0SAlexander Duyck 
6073079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
61b5f69ccfSAlexander Duyck 	pool = 0;
6273079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
63b5f69ccfSAlexander Duyck 	for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
6473079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
65b5f69ccfSAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs) {
66b5f69ccfSAlexander Duyck 			pool++;
6773079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
68b5f69ccfSAlexander Duyck 		}
6973079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
70b5f69ccfSAlexander Duyck 		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
7173079ea0SAlexander Duyck 	}
7273079ea0SAlexander Duyck 
7373079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
7473079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
7573079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
7673079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs)
7773079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
7873079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
7973079ea0SAlexander Duyck 	}
8073079ea0SAlexander Duyck 
8173079ea0SAlexander Duyck #ifdef IXGBE_FCOE
8273079ea0SAlexander Duyck 	/* nothing to do if FCoE is disabled */
8373079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
8473079ea0SAlexander Duyck 		return true;
8573079ea0SAlexander Duyck 
8673079ea0SAlexander Duyck 	/* The work is already done if the FCoE ring is shared */
8773079ea0SAlexander Duyck 	if (fcoe->offset < tcs)
8873079ea0SAlexander Duyck 		return true;
8973079ea0SAlexander Duyck 
9073079ea0SAlexander Duyck 	/* The FCoE rings exist separately, we need to move their reg_idx */
9173079ea0SAlexander Duyck 	if (fcoe->indices) {
9273079ea0SAlexander Duyck 		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
9373079ea0SAlexander Duyck 		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
9473079ea0SAlexander Duyck 
9573079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
9673079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
9773079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
9873079ea0SAlexander Duyck 			adapter->rx_ring[i]->reg_idx = reg_idx;
99b5f69ccfSAlexander Duyck 			adapter->rx_ring[i]->netdev = adapter->netdev;
10073079ea0SAlexander Duyck 			reg_idx++;
10173079ea0SAlexander Duyck 		}
10273079ea0SAlexander Duyck 
10373079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
10473079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
10573079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
10673079ea0SAlexander Duyck 			adapter->tx_ring[i]->reg_idx = reg_idx;
10773079ea0SAlexander Duyck 			reg_idx++;
10873079ea0SAlexander Duyck 		}
10973079ea0SAlexander Duyck 	}
11073079ea0SAlexander Duyck 
11173079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
11273079ea0SAlexander Duyck 	return true;
11373079ea0SAlexander Duyck }
11473079ea0SAlexander Duyck 
1158af3c33fSJeff Kirsher /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
1168af3c33fSJeff Kirsher static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
1178af3c33fSJeff Kirsher 				    unsigned int *tx, unsigned int *rx)
1188af3c33fSJeff Kirsher {
1198af3c33fSJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
1200efbf12bSAlexander Duyck 	u8 num_tcs = adapter->hw_tcs;
1218af3c33fSJeff Kirsher 
1228af3c33fSJeff Kirsher 	*tx = 0;
1238af3c33fSJeff Kirsher 	*rx = 0;
1248af3c33fSJeff Kirsher 
1258af3c33fSJeff Kirsher 	switch (hw->mac.type) {
1268af3c33fSJeff Kirsher 	case ixgbe_mac_82598EB:
1274ae63730SAlexander Duyck 		/* TxQs/TC: 4	RxQs/TC: 8 */
1284ae63730SAlexander Duyck 		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
1294ae63730SAlexander Duyck 		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
1308af3c33fSJeff Kirsher 		break;
1318af3c33fSJeff Kirsher 	case ixgbe_mac_82599EB:
1328af3c33fSJeff Kirsher 	case ixgbe_mac_X540:
1339a75a1acSDon Skidmore 	case ixgbe_mac_X550:
1349a75a1acSDon Skidmore 	case ixgbe_mac_X550EM_x:
13549425dfcSMark Rustad 	case ixgbe_mac_x550em_a:
1368af3c33fSJeff Kirsher 		if (num_tcs > 4) {
1374ae63730SAlexander Duyck 			/*
1384ae63730SAlexander Duyck 			 * TCs    : TC0/1 TC2/3 TC4-7
1394ae63730SAlexander Duyck 			 * TxQs/TC:    32    16     8
1404ae63730SAlexander Duyck 			 * RxQs/TC:    16    16    16
1414ae63730SAlexander Duyck 			 */
1428af3c33fSJeff Kirsher 			*rx = tc << 4;
1434ae63730SAlexander Duyck 			if (tc < 3)
1444ae63730SAlexander Duyck 				*tx = tc << 5;		/*   0,  32,  64 */
1454ae63730SAlexander Duyck 			else if (tc < 5)
1464ae63730SAlexander Duyck 				*tx = (tc + 2) << 4;	/*  80,  96 */
1474ae63730SAlexander Duyck 			else
1484ae63730SAlexander Duyck 				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
1498af3c33fSJeff Kirsher 		} else {
1504ae63730SAlexander Duyck 			/*
1514ae63730SAlexander Duyck 			 * TCs    : TC0 TC1 TC2/3
1524ae63730SAlexander Duyck 			 * TxQs/TC:  64  32    16
1534ae63730SAlexander Duyck 			 * RxQs/TC:  32  32    32
1544ae63730SAlexander Duyck 			 */
1558af3c33fSJeff Kirsher 			*rx = tc << 5;
1564ae63730SAlexander Duyck 			if (tc < 2)
1574ae63730SAlexander Duyck 				*tx = tc << 6;		/*  0,  64 */
1584ae63730SAlexander Duyck 			else
1594ae63730SAlexander Duyck 				*tx = (tc + 4) << 4;	/* 96, 112 */
1608af3c33fSJeff Kirsher 		}
1618af3c33fSJeff Kirsher 	default:
1628af3c33fSJeff Kirsher 		break;
1638af3c33fSJeff Kirsher 	}
1648af3c33fSJeff Kirsher }
1658af3c33fSJeff Kirsher 
1668af3c33fSJeff Kirsher /**
1678af3c33fSJeff Kirsher  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
1688af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1698af3c33fSJeff Kirsher  *
1708af3c33fSJeff Kirsher  * Cache the descriptor ring offsets for DCB to the assigned rings.
1718af3c33fSJeff Kirsher  *
1728af3c33fSJeff Kirsher  **/
1734ae63730SAlexander Duyck static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
1748af3c33fSJeff Kirsher {
1750efbf12bSAlexander Duyck 	u8 num_tcs = adapter->hw_tcs;
1764ae63730SAlexander Duyck 	unsigned int tx_idx, rx_idx;
1774ae63730SAlexander Duyck 	int tc, offset, rss_i, i;
1788af3c33fSJeff Kirsher 
1794ae63730SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
1804ae63730SAlexander Duyck 	if (num_tcs <= 1)
1818af3c33fSJeff Kirsher 		return false;
1828af3c33fSJeff Kirsher 
1834ae63730SAlexander Duyck 	rss_i = adapter->ring_feature[RING_F_RSS].indices;
1848af3c33fSJeff Kirsher 
1854ae63730SAlexander Duyck 	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
1864ae63730SAlexander Duyck 		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
1874ae63730SAlexander Duyck 		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
1884ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
1894ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
190b5f69ccfSAlexander Duyck 			adapter->rx_ring[offset + i]->netdev = adapter->netdev;
1914ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->dcb_tc = tc;
1924ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->dcb_tc = tc;
1938af3c33fSJeff Kirsher 		}
1948af3c33fSJeff Kirsher 	}
1958af3c33fSJeff Kirsher 
1968af3c33fSJeff Kirsher 	return true;
1978af3c33fSJeff Kirsher }
198d411a936SAlexander Duyck 
1998af3c33fSJeff Kirsher #endif
2008af3c33fSJeff Kirsher /**
2018af3c33fSJeff Kirsher  * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
2028af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2038af3c33fSJeff Kirsher  *
2048af3c33fSJeff Kirsher  * SR-IOV doesn't use any descriptor rings but changes the default if
2058af3c33fSJeff Kirsher  * no other mapping is used.
2068af3c33fSJeff Kirsher  *
2078af3c33fSJeff Kirsher  */
20873079ea0SAlexander Duyck static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
2098af3c33fSJeff Kirsher {
21073079ea0SAlexander Duyck #ifdef IXGBE_FCOE
21173079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
21273079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
21373079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
21473079ea0SAlexander Duyck 	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
215b5f69ccfSAlexander Duyck 	u16 reg_idx, pool;
21673079ea0SAlexander Duyck 	int i;
21773079ea0SAlexander Duyck 
21873079ea0SAlexander Duyck 	/* only proceed if VMDq is enabled */
21973079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
2208af3c33fSJeff Kirsher 		return false;
22173079ea0SAlexander Duyck 
22273079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
223b5f69ccfSAlexander Duyck 	pool = 0;
22473079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
22573079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
22673079ea0SAlexander Duyck #ifdef IXGBE_FCOE
22773079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
22873079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
22973079ea0SAlexander Duyck 			break;
23073079ea0SAlexander Duyck #endif
23173079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
232b5f69ccfSAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= rss->indices) {
233b5f69ccfSAlexander Duyck 			pool++;
23473079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
235b5f69ccfSAlexander Duyck 		}
23673079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
237b5f69ccfSAlexander Duyck 		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
23873079ea0SAlexander Duyck 	}
23973079ea0SAlexander Duyck 
24073079ea0SAlexander Duyck #ifdef IXGBE_FCOE
24173079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
242b5f69ccfSAlexander Duyck 	for (; i < adapter->num_rx_queues; i++, reg_idx++) {
24373079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
244b5f69ccfSAlexander Duyck 		adapter->rx_ring[i]->netdev = adapter->netdev;
245b5f69ccfSAlexander Duyck 	}
24673079ea0SAlexander Duyck 
24773079ea0SAlexander Duyck #endif
24873079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
24973079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
25073079ea0SAlexander Duyck #ifdef IXGBE_FCOE
25173079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
25273079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
25373079ea0SAlexander Duyck 			break;
25473079ea0SAlexander Duyck #endif
25573079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
25673079ea0SAlexander Duyck 		if ((reg_idx & rss->mask) >= rss->indices)
25773079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
25873079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
25973079ea0SAlexander Duyck 	}
26073079ea0SAlexander Duyck 
26173079ea0SAlexander Duyck #ifdef IXGBE_FCOE
26273079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
26373079ea0SAlexander Duyck 	for (; i < adapter->num_tx_queues; i++, reg_idx++)
26473079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
26573079ea0SAlexander Duyck 
26673079ea0SAlexander Duyck #endif
26773079ea0SAlexander Duyck 
26873079ea0SAlexander Duyck 	return true;
2698af3c33fSJeff Kirsher }
2708af3c33fSJeff Kirsher 
2718af3c33fSJeff Kirsher /**
272d411a936SAlexander Duyck  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
273d411a936SAlexander Duyck  * @adapter: board private structure to initialize
274d411a936SAlexander Duyck  *
275d411a936SAlexander Duyck  * Cache the descriptor ring offsets for RSS to the assigned rings.
276d411a936SAlexander Duyck  *
277d411a936SAlexander Duyck  **/
278d411a936SAlexander Duyck static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
279d411a936SAlexander Duyck {
28033fdc82fSJohn Fastabend 	int i, reg_idx;
281d411a936SAlexander Duyck 
282b5f69ccfSAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++) {
283d411a936SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = i;
284b5f69ccfSAlexander Duyck 		adapter->rx_ring[i]->netdev = adapter->netdev;
285b5f69ccfSAlexander Duyck 	}
28633fdc82fSJohn Fastabend 	for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
28733fdc82fSJohn Fastabend 		adapter->tx_ring[i]->reg_idx = reg_idx;
28833fdc82fSJohn Fastabend 	for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
28933fdc82fSJohn Fastabend 		adapter->xdp_ring[i]->reg_idx = reg_idx;
290d411a936SAlexander Duyck 
291d411a936SAlexander Duyck 	return true;
292d411a936SAlexander Duyck }
293d411a936SAlexander Duyck 
294d411a936SAlexander Duyck /**
2958af3c33fSJeff Kirsher  * ixgbe_cache_ring_register - Descriptor ring to register mapping
2968af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2978af3c33fSJeff Kirsher  *
2988af3c33fSJeff Kirsher  * Once we know the feature-set enabled for the device, we'll cache
2998af3c33fSJeff Kirsher  * the register offset the descriptor ring is assigned to.
3008af3c33fSJeff Kirsher  *
3018af3c33fSJeff Kirsher  * Note, the order the various feature calls is important.  It must start with
3028af3c33fSJeff Kirsher  * the "most" features enabled at the same time, then trickle down to the
3038af3c33fSJeff Kirsher  * least amount of features turned on at once.
3048af3c33fSJeff Kirsher  **/
3058af3c33fSJeff Kirsher static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3068af3c33fSJeff Kirsher {
3078af3c33fSJeff Kirsher 	/* start with default case */
3088af3c33fSJeff Kirsher 	adapter->rx_ring[0]->reg_idx = 0;
3098af3c33fSJeff Kirsher 	adapter->tx_ring[0]->reg_idx = 0;
3108af3c33fSJeff Kirsher 
31173079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
31273079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb_sriov(adapter))
31373079ea0SAlexander Duyck 		return;
31473079ea0SAlexander Duyck 
31573079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb(adapter))
31673079ea0SAlexander Duyck 		return;
31773079ea0SAlexander Duyck 
31873079ea0SAlexander Duyck #endif
3198af3c33fSJeff Kirsher 	if (ixgbe_cache_ring_sriov(adapter))
3208af3c33fSJeff Kirsher 		return;
3218af3c33fSJeff Kirsher 
322d411a936SAlexander Duyck 	ixgbe_cache_ring_rss(adapter);
3238af3c33fSJeff Kirsher }
3248af3c33fSJeff Kirsher 
32533fdc82fSJohn Fastabend static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
32633fdc82fSJohn Fastabend {
32733fdc82fSJohn Fastabend 	return adapter->xdp_prog ? nr_cpu_ids : 0;
32833fdc82fSJohn Fastabend }
32933fdc82fSJohn Fastabend 
3302bf1a87bSEmil Tantilov #define IXGBE_RSS_64Q_MASK	0x3F
331d411a936SAlexander Duyck #define IXGBE_RSS_16Q_MASK	0xF
332d411a936SAlexander Duyck #define IXGBE_RSS_8Q_MASK	0x7
333d411a936SAlexander Duyck #define IXGBE_RSS_4Q_MASK	0x3
334d411a936SAlexander Duyck #define IXGBE_RSS_2Q_MASK	0x1
335d411a936SAlexander Duyck #define IXGBE_RSS_DISABLED_MASK	0x0
336d411a936SAlexander Duyck 
337d411a936SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
33873079ea0SAlexander Duyck /**
33973079ea0SAlexander Duyck  * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
34073079ea0SAlexander Duyck  * @adapter: board private structure to initialize
34173079ea0SAlexander Duyck  *
34273079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
34373079ea0SAlexander Duyck  * and VM pools where appropriate.  Also assign queues based on DCB
34473079ea0SAlexander Duyck  * priorities and map accordingly..
34573079ea0SAlexander Duyck  *
34673079ea0SAlexander Duyck  **/
34773079ea0SAlexander Duyck static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
34873079ea0SAlexander Duyck {
34973079ea0SAlexander Duyck 	int i;
35073079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
35173079ea0SAlexander Duyck 	u16 vmdq_m = 0;
35273079ea0SAlexander Duyck #ifdef IXGBE_FCOE
35373079ea0SAlexander Duyck 	u16 fcoe_i = 0;
35473079ea0SAlexander Duyck #endif
3550efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
35673079ea0SAlexander Duyck 
35773079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
35873079ea0SAlexander Duyck 	if (tcs <= 1)
35973079ea0SAlexander Duyck 		return false;
36073079ea0SAlexander Duyck 
36173079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
36273079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
36373079ea0SAlexander Duyck 		return false;
36473079ea0SAlexander Duyck 
3654e039c16SAlexander Duyck 	/* limit VMDq instances on the PF by number of Tx queues */
3664e039c16SAlexander Duyck 	vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
3674e039c16SAlexander Duyck 
36873079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
36973079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
37073079ea0SAlexander Duyck 
37173079ea0SAlexander Duyck 	/* 16 pools w/ 8 TC per pool */
37273079ea0SAlexander Duyck 	if (tcs > 4) {
37373079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 16);
37473079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
37573079ea0SAlexander Duyck 	/* 32 pools w/ 4 TC per pool */
37673079ea0SAlexander Duyck 	} else {
37773079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 32);
37873079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
37973079ea0SAlexander Duyck 	}
38073079ea0SAlexander Duyck 
38173079ea0SAlexander Duyck #ifdef IXGBE_FCOE
38273079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
38373079ea0SAlexander Duyck 	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
38473079ea0SAlexander Duyck 
38573079ea0SAlexander Duyck #endif
38673079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
38773079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
38873079ea0SAlexander Duyck 
38973079ea0SAlexander Duyck 	/* save features for later use */
39073079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
39173079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
39273079ea0SAlexander Duyck 
39373079ea0SAlexander Duyck 	/*
39473079ea0SAlexander Duyck 	 * We do not support DCB, VMDq, and RSS all simultaneously
39573079ea0SAlexander Duyck 	 * so we will disable RSS since it is the lowest priority
39673079ea0SAlexander Duyck 	 */
39773079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = 1;
39873079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
39973079ea0SAlexander Duyck 
40039cb681bSAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
40139cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
40239cb681bSAlexander Duyck 
40373079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
40473079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = tcs;
40573079ea0SAlexander Duyck 
40673079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * tcs;
40733fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
40873079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * tcs;
40973079ea0SAlexander Duyck 
41073079ea0SAlexander Duyck #ifdef IXGBE_FCOE
41173079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
41273079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
41373079ea0SAlexander Duyck 
41473079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
41573079ea0SAlexander Duyck 
41673079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
41773079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
41873079ea0SAlexander Duyck 
41973079ea0SAlexander Duyck 		if (fcoe_i) {
42073079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
42173079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
42273079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * tcs;
42373079ea0SAlexander Duyck 
42473079ea0SAlexander Duyck 			/* add queues to adapter */
42573079ea0SAlexander Duyck 			adapter->num_tx_queues += fcoe_i;
42673079ea0SAlexander Duyck 			adapter->num_rx_queues += fcoe_i;
42773079ea0SAlexander Duyck 		} else if (tcs > 1) {
42873079ea0SAlexander Duyck 			/* use queue belonging to FcoE TC */
42973079ea0SAlexander Duyck 			fcoe->indices = 1;
43073079ea0SAlexander Duyck 			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
43173079ea0SAlexander Duyck 		} else {
43273079ea0SAlexander Duyck 			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
43373079ea0SAlexander Duyck 
43473079ea0SAlexander Duyck 			fcoe->indices = 0;
43573079ea0SAlexander Duyck 			fcoe->offset = 0;
43673079ea0SAlexander Duyck 		}
43773079ea0SAlexander Duyck 	}
43873079ea0SAlexander Duyck 
43973079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
44073079ea0SAlexander Duyck 	/* configure TC to queue mapping */
44173079ea0SAlexander Duyck 	for (i = 0; i < tcs; i++)
44273079ea0SAlexander Duyck 		netdev_set_tc_queue(adapter->netdev, i, 1, i);
44373079ea0SAlexander Duyck 
44473079ea0SAlexander Duyck 	return true;
44573079ea0SAlexander Duyck }
44673079ea0SAlexander Duyck 
447d411a936SAlexander Duyck static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
448d411a936SAlexander Duyck {
449d411a936SAlexander Duyck 	struct net_device *dev = adapter->netdev;
450d411a936SAlexander Duyck 	struct ixgbe_ring_feature *f;
451d411a936SAlexander Duyck 	int rss_i, rss_m, i;
452d411a936SAlexander Duyck 	int tcs;
453d411a936SAlexander Duyck 
454d411a936SAlexander Duyck 	/* Map queue offset and counts onto allocated tx queues */
4550efbf12bSAlexander Duyck 	tcs = adapter->hw_tcs;
456d411a936SAlexander Duyck 
457d411a936SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
458d411a936SAlexander Duyck 	if (tcs <= 1)
459d411a936SAlexander Duyck 		return false;
460d411a936SAlexander Duyck 
461d411a936SAlexander Duyck 	/* determine the upper limit for our current DCB mode */
462d411a936SAlexander Duyck 	rss_i = dev->num_tx_queues / tcs;
463d411a936SAlexander Duyck 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
464d411a936SAlexander Duyck 		/* 8 TC w/ 4 queues per TC */
465d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 4);
466d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
467d411a936SAlexander Duyck 	} else if (tcs > 4) {
468d411a936SAlexander Duyck 		/* 8 TC w/ 8 queues per TC */
469d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 8);
470d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_8Q_MASK;
471d411a936SAlexander Duyck 	} else {
472d411a936SAlexander Duyck 		/* 4 TC w/ 16 queues per TC */
473d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 16);
474d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_16Q_MASK;
475d411a936SAlexander Duyck 	}
476d411a936SAlexander Duyck 
477d411a936SAlexander Duyck 	/* set RSS mask and indices */
478d411a936SAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
479d411a936SAlexander Duyck 	rss_i = min_t(int, rss_i, f->limit);
480d411a936SAlexander Duyck 	f->indices = rss_i;
481d411a936SAlexander Duyck 	f->mask = rss_m;
482d411a936SAlexander Duyck 
48339cb681bSAlexander Duyck 	/* disable ATR as it is not supported when multiple TCs are enabled */
48439cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
48539cb681bSAlexander Duyck 
486d411a936SAlexander Duyck #ifdef IXGBE_FCOE
487d411a936SAlexander Duyck 	/* FCoE enabled queues require special configuration indexed
488d411a936SAlexander Duyck 	 * by feature specific indices and offset. Here we map FCoE
489d411a936SAlexander Duyck 	 * indices onto the DCB queue pairs allowing FCoE to own
490d411a936SAlexander Duyck 	 * configuration later.
491d411a936SAlexander Duyck 	 */
492d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
493d411a936SAlexander Duyck 		u8 tc = ixgbe_fcoe_get_tc(adapter);
494d411a936SAlexander Duyck 
495d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
496d411a936SAlexander Duyck 		f->indices = min_t(u16, rss_i, f->limit);
497d411a936SAlexander Duyck 		f->offset = rss_i * tc;
498d411a936SAlexander Duyck 	}
499d411a936SAlexander Duyck 
500d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
501d411a936SAlexander Duyck 	for (i = 0; i < tcs; i++)
502d411a936SAlexander Duyck 		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
503d411a936SAlexander Duyck 
504d411a936SAlexander Duyck 	adapter->num_tx_queues = rss_i * tcs;
50533fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
506d411a936SAlexander Duyck 	adapter->num_rx_queues = rss_i * tcs;
507d411a936SAlexander Duyck 
508d411a936SAlexander Duyck 	return true;
509d411a936SAlexander Duyck }
510d411a936SAlexander Duyck 
511d411a936SAlexander Duyck #endif
5128af3c33fSJeff Kirsher /**
51373079ea0SAlexander Duyck  * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
51473079ea0SAlexander Duyck  * @adapter: board private structure to initialize
51573079ea0SAlexander Duyck  *
51673079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
51773079ea0SAlexander Duyck  * and VM pools where appropriate.  If RSS is available, then also try and
51873079ea0SAlexander Duyck  * enable RSS and map accordingly.
51973079ea0SAlexander Duyck  *
52073079ea0SAlexander Duyck  **/
52173079ea0SAlexander Duyck static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
52273079ea0SAlexander Duyck {
52373079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
52473079ea0SAlexander Duyck 	u16 vmdq_m = 0;
52573079ea0SAlexander Duyck 	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
52673079ea0SAlexander Duyck 	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
52773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
52873079ea0SAlexander Duyck 	u16 fcoe_i = 0;
52973079ea0SAlexander Duyck #endif
53073079ea0SAlexander Duyck 
53173079ea0SAlexander Duyck 	/* only proceed if SR-IOV is enabled */
53273079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
53373079ea0SAlexander Duyck 		return false;
53473079ea0SAlexander Duyck 
5354e039c16SAlexander Duyck 	/* limit l2fwd RSS based on total Tx queue limit */
5364e039c16SAlexander Duyck 	rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
5374e039c16SAlexander Duyck 
53873079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
53973079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
54073079ea0SAlexander Duyck 
54173079ea0SAlexander Duyck 	/* double check we are limited to maximum pools */
54273079ea0SAlexander Duyck 	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
54373079ea0SAlexander Duyck 
54473079ea0SAlexander Duyck 	/* 64 pool mode with 2 queues per pool */
5454e039c16SAlexander Duyck 	if (vmdq_i > 32) {
54673079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
54773079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_2Q_MASK;
54873079ea0SAlexander Duyck 		rss_i = min_t(u16, rss_i, 2);
549e24fcf28SAlexander Duyck 	/* 32 pool mode with up to 4 queues per pool */
55073079ea0SAlexander Duyck 	} else {
55173079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
55273079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
553e24fcf28SAlexander Duyck 		/* We can support 4, 2, or 1 queues */
554e24fcf28SAlexander Duyck 		rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
55573079ea0SAlexander Duyck 	}
55673079ea0SAlexander Duyck 
55773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
55873079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
55973079ea0SAlexander Duyck 	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
56073079ea0SAlexander Duyck 
56173079ea0SAlexander Duyck #endif
56273079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
56373079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
56473079ea0SAlexander Duyck 
56573079ea0SAlexander Duyck 	/* save features for later use */
56673079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
56773079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
56873079ea0SAlexander Duyck 
56973079ea0SAlexander Duyck 	/* limit RSS based on user input and save for later use */
57073079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = rss_i;
57173079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = rss_m;
57273079ea0SAlexander Duyck 
57373079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
57473079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = rss_i;
57573079ea0SAlexander Duyck 
57673079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * rss_i;
57773079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * rss_i;
57833fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
57973079ea0SAlexander Duyck 
58073079ea0SAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
58173079ea0SAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
58273079ea0SAlexander Duyck 
58373079ea0SAlexander Duyck #ifdef IXGBE_FCOE
58473079ea0SAlexander Duyck 	/*
58573079ea0SAlexander Duyck 	 * FCoE can use rings from adjacent buffers to allow RSS
58673079ea0SAlexander Duyck 	 * like behavior.  To account for this we need to add the
58773079ea0SAlexander Duyck 	 * FCoE indices to the total ring count.
58873079ea0SAlexander Duyck 	 */
58973079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
59073079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
59173079ea0SAlexander Duyck 
59273079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
59373079ea0SAlexander Duyck 
59473079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
59573079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
59673079ea0SAlexander Duyck 
59773079ea0SAlexander Duyck 		if (vmdq_i > 1 && fcoe_i) {
59873079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
59973079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
60073079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * rss_i;
60173079ea0SAlexander Duyck 		} else {
60273079ea0SAlexander Duyck 			/* merge FCoE queues with RSS queues */
60373079ea0SAlexander Duyck 			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
60473079ea0SAlexander Duyck 
60573079ea0SAlexander Duyck 			/* limit indices to rss_i if MSI-X is disabled */
60673079ea0SAlexander Duyck 			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
60773079ea0SAlexander Duyck 				fcoe_i = rss_i;
60873079ea0SAlexander Duyck 
60973079ea0SAlexander Duyck 			/* attempt to reserve some queues for just FCoE */
61073079ea0SAlexander Duyck 			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
61173079ea0SAlexander Duyck 			fcoe->offset = fcoe_i - fcoe->indices;
61273079ea0SAlexander Duyck 
61373079ea0SAlexander Duyck 			fcoe_i -= rss_i;
61473079ea0SAlexander Duyck 		}
61573079ea0SAlexander Duyck 
61673079ea0SAlexander Duyck 		/* add queues to adapter */
61773079ea0SAlexander Duyck 		adapter->num_tx_queues += fcoe_i;
61873079ea0SAlexander Duyck 		adapter->num_rx_queues += fcoe_i;
61973079ea0SAlexander Duyck 	}
62073079ea0SAlexander Duyck 
62173079ea0SAlexander Duyck #endif
62249cfbeb7SAlexander Duyck 	/* populate TC0 for use by pool 0 */
62349cfbeb7SAlexander Duyck 	netdev_set_tc_queue(adapter->netdev, 0,
62449cfbeb7SAlexander Duyck 			    adapter->num_rx_queues_per_pool, 0);
62549cfbeb7SAlexander Duyck 
62673079ea0SAlexander Duyck 	return true;
62773079ea0SAlexander Duyck }
62873079ea0SAlexander Duyck 
62973079ea0SAlexander Duyck /**
63049ce9c2cSBen Hutchings  * ixgbe_set_rss_queues - Allocate queues for RSS
6318af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
6328af3c33fSJeff Kirsher  *
6338af3c33fSJeff Kirsher  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
6348af3c33fSJeff Kirsher  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
6358af3c33fSJeff Kirsher  *
6368af3c33fSJeff Kirsher  **/
6370b7f5d0bSAlexander Duyck static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
6388af3c33fSJeff Kirsher {
6392bf1a87bSEmil Tantilov 	struct ixgbe_hw *hw = &adapter->hw;
6400b7f5d0bSAlexander Duyck 	struct ixgbe_ring_feature *f;
6410b7f5d0bSAlexander Duyck 	u16 rss_i;
6428af3c33fSJeff Kirsher 
6430b7f5d0bSAlexander Duyck 	/* set mask for 16 queue limit of RSS */
6440b7f5d0bSAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
6450b7f5d0bSAlexander Duyck 	rss_i = f->limit;
6460b7f5d0bSAlexander Duyck 
6470b7f5d0bSAlexander Duyck 	f->indices = rss_i;
6482bf1a87bSEmil Tantilov 
6492bf1a87bSEmil Tantilov 	if (hw->mac.type < ixgbe_mac_X550)
650d411a936SAlexander Duyck 		f->mask = IXGBE_RSS_16Q_MASK;
6512bf1a87bSEmil Tantilov 	else
6522bf1a87bSEmil Tantilov 		f->mask = IXGBE_RSS_64Q_MASK;
6538af3c33fSJeff Kirsher 
65439cb681bSAlexander Duyck 	/* disable ATR by default, it will be configured below */
65539cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
65639cb681bSAlexander Duyck 
6578af3c33fSJeff Kirsher 	/*
6580b7f5d0bSAlexander Duyck 	 * Use Flow Director in addition to RSS to ensure the best
6598af3c33fSJeff Kirsher 	 * distribution of flows across cores, even when an FDIR flow
6608af3c33fSJeff Kirsher 	 * isn't matched.
6618af3c33fSJeff Kirsher 	 */
66239cb681bSAlexander Duyck 	if (rss_i > 1 && adapter->atr_sample_rate) {
6630b7f5d0bSAlexander Duyck 		f = &adapter->ring_feature[RING_F_FDIR];
6640b7f5d0bSAlexander Duyck 
665d3cb9869SAlexander Duyck 		rss_i = f->indices = f->limit;
66639cb681bSAlexander Duyck 
66739cb681bSAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
66839cb681bSAlexander Duyck 			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
6698af3c33fSJeff Kirsher 	}
6700b7f5d0bSAlexander Duyck 
671d411a936SAlexander Duyck #ifdef IXGBE_FCOE
672d411a936SAlexander Duyck 	/*
673d411a936SAlexander Duyck 	 * FCoE can exist on the same rings as standard network traffic
674d411a936SAlexander Duyck 	 * however it is preferred to avoid that if possible.  In order
675d411a936SAlexander Duyck 	 * to get the best performance we allocate as many FCoE queues
676d411a936SAlexander Duyck 	 * as we can and we place them at the end of the ring array to
677d411a936SAlexander Duyck 	 * avoid sharing queues with standard RSS on systems with 24 or
678d411a936SAlexander Duyck 	 * more CPUs.
679d411a936SAlexander Duyck 	 */
680d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
681d411a936SAlexander Duyck 		struct net_device *dev = adapter->netdev;
682d411a936SAlexander Duyck 		u16 fcoe_i;
683d411a936SAlexander Duyck 
684d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
685d411a936SAlexander Duyck 
686d411a936SAlexander Duyck 		/* merge FCoE queues with RSS queues */
687d411a936SAlexander Duyck 		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
688d411a936SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
689d411a936SAlexander Duyck 
690d411a936SAlexander Duyck 		/* limit indices to rss_i if MSI-X is disabled */
691d411a936SAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
692d411a936SAlexander Duyck 			fcoe_i = rss_i;
693d411a936SAlexander Duyck 
694d411a936SAlexander Duyck 		/* attempt to reserve some queues for just FCoE */
695d411a936SAlexander Duyck 		f->indices = min_t(u16, fcoe_i, f->limit);
696d411a936SAlexander Duyck 		f->offset = fcoe_i - f->indices;
697d411a936SAlexander Duyck 		rss_i = max_t(u16, fcoe_i, rss_i);
698d411a936SAlexander Duyck 	}
699d411a936SAlexander Duyck 
700d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
7010b7f5d0bSAlexander Duyck 	adapter->num_rx_queues = rss_i;
7020b7f5d0bSAlexander Duyck 	adapter->num_tx_queues = rss_i;
70333fdc82fSJohn Fastabend 	adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
7040b7f5d0bSAlexander Duyck 
7050b7f5d0bSAlexander Duyck 	return true;
7068af3c33fSJeff Kirsher }
7078af3c33fSJeff Kirsher 
7088af3c33fSJeff Kirsher /**
70949ce9c2cSBen Hutchings  * ixgbe_set_num_queues - Allocate queues for device, feature dependent
7108af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
7118af3c33fSJeff Kirsher  *
7128af3c33fSJeff Kirsher  * This is the top level queue allocation routine.  The order here is very
7138af3c33fSJeff Kirsher  * important, starting with the "most" number of features turned on at once,
7148af3c33fSJeff Kirsher  * and ending with the smallest set of features.  This way large combinations
7158af3c33fSJeff Kirsher  * can be allocated if they're turned on, and smaller combinations are the
7168af3c33fSJeff Kirsher  * fallthrough conditions.
7178af3c33fSJeff Kirsher  *
7188af3c33fSJeff Kirsher  **/
719ac802f5dSAlexander Duyck static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
7208af3c33fSJeff Kirsher {
7218af3c33fSJeff Kirsher 	/* Start with base case */
7228af3c33fSJeff Kirsher 	adapter->num_rx_queues = 1;
7238af3c33fSJeff Kirsher 	adapter->num_tx_queues = 1;
72433fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
725ff815fb2SAlexander Duyck 	adapter->num_rx_pools = 1;
7268af3c33fSJeff Kirsher 	adapter->num_rx_queues_per_pool = 1;
7278af3c33fSJeff Kirsher 
72873079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
72973079ea0SAlexander Duyck 	if (ixgbe_set_dcb_sriov_queues(adapter))
730ac802f5dSAlexander Duyck 		return;
7318af3c33fSJeff Kirsher 
7328af3c33fSJeff Kirsher 	if (ixgbe_set_dcb_queues(adapter))
733ac802f5dSAlexander Duyck 		return;
7348af3c33fSJeff Kirsher 
7358af3c33fSJeff Kirsher #endif
73673079ea0SAlexander Duyck 	if (ixgbe_set_sriov_queues(adapter))
73773079ea0SAlexander Duyck 		return;
73873079ea0SAlexander Duyck 
739ac802f5dSAlexander Duyck 	ixgbe_set_rss_queues(adapter);
7408af3c33fSJeff Kirsher }
7418af3c33fSJeff Kirsher 
7423bcf3446SJacob Keller /**
7433bcf3446SJacob Keller  * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
7443bcf3446SJacob Keller  * @adapter: board private structure
7453bcf3446SJacob Keller  *
7463bcf3446SJacob Keller  * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
7473bcf3446SJacob Keller  * return a negative error code if unable to acquire MSI-X vectors for any
7483bcf3446SJacob Keller  * reason.
7493bcf3446SJacob Keller  */
7503bcf3446SJacob Keller static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
7518af3c33fSJeff Kirsher {
7523bcf3446SJacob Keller 	struct ixgbe_hw *hw = &adapter->hw;
7533bcf3446SJacob Keller 	int i, vectors, vector_threshold;
7548af3c33fSJeff Kirsher 
75533fdc82fSJohn Fastabend 	/* We start by asking for one vector per queue pair with XDP queues
75633fdc82fSJohn Fastabend 	 * being stacked with TX queues.
75733fdc82fSJohn Fastabend 	 */
7583bcf3446SJacob Keller 	vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
75933fdc82fSJohn Fastabend 	vectors = max(vectors, adapter->num_xdp_queues);
7603bcf3446SJacob Keller 
7613bcf3446SJacob Keller 	/* It is easy to be greedy for MSI-X vectors. However, it really
7623bcf3446SJacob Keller 	 * doesn't do much good if we have a lot more vectors than CPUs. We'll
7633bcf3446SJacob Keller 	 * be somewhat conservative and only ask for (roughly) the same number
7643bcf3446SJacob Keller 	 * of vectors as there are CPUs.
7653bcf3446SJacob Keller 	 */
7663bcf3446SJacob Keller 	vectors = min_t(int, vectors, num_online_cpus());
7673bcf3446SJacob Keller 
7683bcf3446SJacob Keller 	/* Some vectors are necessary for non-queue interrupts */
7693bcf3446SJacob Keller 	vectors += NON_Q_VECTORS;
7703bcf3446SJacob Keller 
7713bcf3446SJacob Keller 	/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
7723bcf3446SJacob Keller 	 * With features such as RSS and VMDq, we can easily surpass the
7733bcf3446SJacob Keller 	 * number of Rx and Tx descriptor queues supported by our device.
7743bcf3446SJacob Keller 	 * Thus, we cap the maximum in the rare cases where the CPU count also
7753bcf3446SJacob Keller 	 * exceeds our vector limit
7763bcf3446SJacob Keller 	 */
7773bcf3446SJacob Keller 	vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
7783bcf3446SJacob Keller 
7793bcf3446SJacob Keller 	/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
7803bcf3446SJacob Keller 	 * handler, and (2) an Other (Link Status Change, etc.) handler.
7818af3c33fSJeff Kirsher 	 */
7828af3c33fSJeff Kirsher 	vector_threshold = MIN_MSIX_COUNT;
7838af3c33fSJeff Kirsher 
784027bb561SJacob Keller 	adapter->msix_entries = kcalloc(vectors,
785027bb561SJacob Keller 					sizeof(struct msix_entry),
786027bb561SJacob Keller 					GFP_KERNEL);
787027bb561SJacob Keller 	if (!adapter->msix_entries)
788027bb561SJacob Keller 		return -ENOMEM;
789027bb561SJacob Keller 
790027bb561SJacob Keller 	for (i = 0; i < vectors; i++)
791027bb561SJacob Keller 		adapter->msix_entries[i].entry = i;
792027bb561SJacob Keller 
793b45e620cSAlexander Gordeev 	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
794b45e620cSAlexander Gordeev 					vector_threshold, vectors);
7958af3c33fSJeff Kirsher 
796b45e620cSAlexander Gordeev 	if (vectors < 0) {
797493043e5SJacob Keller 		/* A negative count of allocated vectors indicates an error in
798493043e5SJacob Keller 		 * acquiring within the specified range of MSI-X vectors
7998af3c33fSJeff Kirsher 		 */
800493043e5SJacob Keller 		e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
801493043e5SJacob Keller 			   vectors);
802493043e5SJacob Keller 
8038af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
8048af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
8058af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
806d7de3c6eSJacob Keller 
807d7de3c6eSJacob Keller 		return vectors;
808d7de3c6eSJacob Keller 	}
809d7de3c6eSJacob Keller 
810d7de3c6eSJacob Keller 	/* we successfully allocated some number of vectors within our
811d7de3c6eSJacob Keller 	 * requested range.
812d7de3c6eSJacob Keller 	 */
813d7de3c6eSJacob Keller 	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
814d7de3c6eSJacob Keller 
815d7de3c6eSJacob Keller 	/* Adjust for only the vectors we'll use, which is minimum
816d7de3c6eSJacob Keller 	 * of max_q_vectors, or the number of vectors we were allocated.
8178af3c33fSJeff Kirsher 	 */
81849c7ffbeSAlexander Duyck 	vectors -= NON_Q_VECTORS;
819d7de3c6eSJacob Keller 	adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
820d7de3c6eSJacob Keller 
821d7de3c6eSJacob Keller 	return 0;
8228af3c33fSJeff Kirsher }
8238af3c33fSJeff Kirsher 
8248af3c33fSJeff Kirsher static void ixgbe_add_ring(struct ixgbe_ring *ring,
8258af3c33fSJeff Kirsher 			   struct ixgbe_ring_container *head)
8268af3c33fSJeff Kirsher {
8278af3c33fSJeff Kirsher 	ring->next = head->ring;
8288af3c33fSJeff Kirsher 	head->ring = ring;
8298af3c33fSJeff Kirsher 	head->count++;
830b4ded832SAlexander Duyck 	head->next_update = jiffies + 1;
8318af3c33fSJeff Kirsher }
8328af3c33fSJeff Kirsher 
8338af3c33fSJeff Kirsher /**
8348af3c33fSJeff Kirsher  * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
8358af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
836d0bfcdfdSAlexander Duyck  * @v_count: q_vectors allocated on adapter, used for ring interleaving
8378af3c33fSJeff Kirsher  * @v_idx: index of vector in adapter struct
838d0bfcdfdSAlexander Duyck  * @txr_count: total number of Tx rings to allocate
839d0bfcdfdSAlexander Duyck  * @txr_idx: index of first Tx ring to allocate
84033fdc82fSJohn Fastabend  * @xdp_count: total number of XDP rings to allocate
84133fdc82fSJohn Fastabend  * @xdp_idx: index of first XDP ring to allocate
842d0bfcdfdSAlexander Duyck  * @rxr_count: total number of Rx rings to allocate
843d0bfcdfdSAlexander Duyck  * @rxr_idx: index of first Rx ring to allocate
8448af3c33fSJeff Kirsher  *
8458af3c33fSJeff Kirsher  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
8468af3c33fSJeff Kirsher  **/
847d0bfcdfdSAlexander Duyck static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
848d0bfcdfdSAlexander Duyck 				int v_count, int v_idx,
8498af3c33fSJeff Kirsher 				int txr_count, int txr_idx,
85033fdc82fSJohn Fastabend 				int xdp_count, int xdp_idx,
8518af3c33fSJeff Kirsher 				int rxr_count, int rxr_idx)
8528af3c33fSJeff Kirsher {
8538af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector;
8548af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
855fd786b7bSAlexander Duyck 	int node = NUMA_NO_NODE;
8568af3c33fSJeff Kirsher 	int cpu = -1;
8578af3c33fSJeff Kirsher 	int ring_count, size;
8580efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
8598af3c33fSJeff Kirsher 
86033fdc82fSJohn Fastabend 	ring_count = txr_count + rxr_count + xdp_count;
8618af3c33fSJeff Kirsher 	size = sizeof(struct ixgbe_q_vector) +
8628af3c33fSJeff Kirsher 	       (sizeof(struct ixgbe_ring) * ring_count);
8638af3c33fSJeff Kirsher 
8648af3c33fSJeff Kirsher 	/* customize cpu for Flow Director mapping */
865fd786b7bSAlexander Duyck 	if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
866fd786b7bSAlexander Duyck 		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
867fd786b7bSAlexander Duyck 		if (rss_i > 1 && adapter->atr_sample_rate) {
8688af3c33fSJeff Kirsher 			if (cpu_online(v_idx)) {
8698af3c33fSJeff Kirsher 				cpu = v_idx;
8708af3c33fSJeff Kirsher 				node = cpu_to_node(cpu);
8718af3c33fSJeff Kirsher 			}
8728af3c33fSJeff Kirsher 		}
873fd786b7bSAlexander Duyck 	}
8748af3c33fSJeff Kirsher 
8758af3c33fSJeff Kirsher 	/* allocate q_vector and rings */
8768af3c33fSJeff Kirsher 	q_vector = kzalloc_node(size, GFP_KERNEL, node);
8778af3c33fSJeff Kirsher 	if (!q_vector)
8788af3c33fSJeff Kirsher 		q_vector = kzalloc(size, GFP_KERNEL);
8798af3c33fSJeff Kirsher 	if (!q_vector)
8808af3c33fSJeff Kirsher 		return -ENOMEM;
8818af3c33fSJeff Kirsher 
8828af3c33fSJeff Kirsher 	/* setup affinity mask and node */
8838af3c33fSJeff Kirsher 	if (cpu != -1)
8848af3c33fSJeff Kirsher 		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
8858af3c33fSJeff Kirsher 	q_vector->numa_node = node;
8868af3c33fSJeff Kirsher 
887245f292dSAlexander Duyck #ifdef CONFIG_IXGBE_DCA
888245f292dSAlexander Duyck 	/* initialize CPU for DCA */
889245f292dSAlexander Duyck 	q_vector->cpu = -1;
890245f292dSAlexander Duyck 
891245f292dSAlexander Duyck #endif
8928af3c33fSJeff Kirsher 	/* initialize NAPI */
8938af3c33fSJeff Kirsher 	netif_napi_add(adapter->netdev, &q_vector->napi,
8948af3c33fSJeff Kirsher 		       ixgbe_poll, 64);
8958af3c33fSJeff Kirsher 
8968af3c33fSJeff Kirsher 	/* tie q_vector and adapter together */
8978af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = q_vector;
8988af3c33fSJeff Kirsher 	q_vector->adapter = adapter;
8998af3c33fSJeff Kirsher 	q_vector->v_idx = v_idx;
9008af3c33fSJeff Kirsher 
9018af3c33fSJeff Kirsher 	/* initialize work limits */
9028af3c33fSJeff Kirsher 	q_vector->tx.work_limit = adapter->tx_work_limit;
9038af3c33fSJeff Kirsher 
904b4ded832SAlexander Duyck 	/* Initialize setting for adaptive ITR */
905b4ded832SAlexander Duyck 	q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
906b4ded832SAlexander Duyck 			   IXGBE_ITR_ADAPTIVE_LATENCY;
907b4ded832SAlexander Duyck 	q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
908b4ded832SAlexander Duyck 			   IXGBE_ITR_ADAPTIVE_LATENCY;
9098af3c33fSJeff Kirsher 
9103af3361eSEmil Tantilov 	/* intialize ITR */
9113af3361eSEmil Tantilov 	if (txr_count && !rxr_count) {
9123af3361eSEmil Tantilov 		/* tx only vector */
9133af3361eSEmil Tantilov 		if (adapter->tx_itr_setting == 1)
9148ac34f10SAlexander Duyck 			q_vector->itr = IXGBE_12K_ITR;
9153af3361eSEmil Tantilov 		else
9163af3361eSEmil Tantilov 			q_vector->itr = adapter->tx_itr_setting;
9173af3361eSEmil Tantilov 	} else {
9183af3361eSEmil Tantilov 		/* rx or rx/tx vector */
9193af3361eSEmil Tantilov 		if (adapter->rx_itr_setting == 1)
9203af3361eSEmil Tantilov 			q_vector->itr = IXGBE_20K_ITR;
9213af3361eSEmil Tantilov 		else
9223af3361eSEmil Tantilov 			q_vector->itr = adapter->rx_itr_setting;
9233af3361eSEmil Tantilov 	}
9243af3361eSEmil Tantilov 
925b4ded832SAlexander Duyck 	/* initialize pointer to rings */
926b4ded832SAlexander Duyck 	ring = q_vector->ring;
927b4ded832SAlexander Duyck 
9288af3c33fSJeff Kirsher 	while (txr_count) {
9298af3c33fSJeff Kirsher 		/* assign generic ring traits */
9308af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
9318af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
9328af3c33fSJeff Kirsher 
9338af3c33fSJeff Kirsher 		/* configure backlink on ring */
9348af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
9358af3c33fSJeff Kirsher 
9368af3c33fSJeff Kirsher 		/* update q_vector Tx values */
9378af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->tx);
9388af3c33fSJeff Kirsher 
9398af3c33fSJeff Kirsher 		/* apply Tx specific ring traits */
9408af3c33fSJeff Kirsher 		ring->count = adapter->tx_ring_count;
9418af3c33fSJeff Kirsher 		ring->queue_index = txr_idx;
9428af3c33fSJeff Kirsher 
9438af3c33fSJeff Kirsher 		/* assign ring to adapter */
9448af3c33fSJeff Kirsher 		adapter->tx_ring[txr_idx] = ring;
9458af3c33fSJeff Kirsher 
9468af3c33fSJeff Kirsher 		/* update count and index */
9478af3c33fSJeff Kirsher 		txr_count--;
948d0bfcdfdSAlexander Duyck 		txr_idx += v_count;
9498af3c33fSJeff Kirsher 
9508af3c33fSJeff Kirsher 		/* push pointer to next ring */
9518af3c33fSJeff Kirsher 		ring++;
9528af3c33fSJeff Kirsher 	}
9538af3c33fSJeff Kirsher 
95433fdc82fSJohn Fastabend 	while (xdp_count) {
95533fdc82fSJohn Fastabend 		/* assign generic ring traits */
95633fdc82fSJohn Fastabend 		ring->dev = &adapter->pdev->dev;
95733fdc82fSJohn Fastabend 		ring->netdev = adapter->netdev;
95833fdc82fSJohn Fastabend 
95933fdc82fSJohn Fastabend 		/* configure backlink on ring */
96033fdc82fSJohn Fastabend 		ring->q_vector = q_vector;
96133fdc82fSJohn Fastabend 
96233fdc82fSJohn Fastabend 		/* update q_vector Tx values */
96333fdc82fSJohn Fastabend 		ixgbe_add_ring(ring, &q_vector->tx);
96433fdc82fSJohn Fastabend 
96533fdc82fSJohn Fastabend 		/* apply Tx specific ring traits */
96633fdc82fSJohn Fastabend 		ring->count = adapter->tx_ring_count;
96733fdc82fSJohn Fastabend 		ring->queue_index = xdp_idx;
96833fdc82fSJohn Fastabend 		set_ring_xdp(ring);
96933fdc82fSJohn Fastabend 
97033fdc82fSJohn Fastabend 		/* assign ring to adapter */
97133fdc82fSJohn Fastabend 		adapter->xdp_ring[xdp_idx] = ring;
97233fdc82fSJohn Fastabend 
97333fdc82fSJohn Fastabend 		/* update count and index */
97433fdc82fSJohn Fastabend 		xdp_count--;
97533fdc82fSJohn Fastabend 		xdp_idx++;
97633fdc82fSJohn Fastabend 
97733fdc82fSJohn Fastabend 		/* push pointer to next ring */
97833fdc82fSJohn Fastabend 		ring++;
97933fdc82fSJohn Fastabend 	}
98033fdc82fSJohn Fastabend 
9818af3c33fSJeff Kirsher 	while (rxr_count) {
9828af3c33fSJeff Kirsher 		/* assign generic ring traits */
9838af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
9848af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
9858af3c33fSJeff Kirsher 
9868af3c33fSJeff Kirsher 		/* configure backlink on ring */
9878af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
9888af3c33fSJeff Kirsher 
9898af3c33fSJeff Kirsher 		/* update q_vector Rx values */
9908af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->rx);
9918af3c33fSJeff Kirsher 
9928af3c33fSJeff Kirsher 		/*
9938af3c33fSJeff Kirsher 		 * 82599 errata, UDP frames with a 0 checksum
9948af3c33fSJeff Kirsher 		 * can be marked as checksum errors.
9958af3c33fSJeff Kirsher 		 */
9968af3c33fSJeff Kirsher 		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
9978af3c33fSJeff Kirsher 			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
9988af3c33fSJeff Kirsher 
999b2db497eSAlexander Duyck #ifdef IXGBE_FCOE
1000b2db497eSAlexander Duyck 		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
1001b2db497eSAlexander Duyck 			struct ixgbe_ring_feature *f;
1002b2db497eSAlexander Duyck 			f = &adapter->ring_feature[RING_F_FCOE];
1003e4b317e9SAlexander Duyck 			if ((rxr_idx >= f->offset) &&
1004e4b317e9SAlexander Duyck 			    (rxr_idx < f->offset + f->indices))
100557efd44cSAlexander Duyck 				set_bit(__IXGBE_RX_FCOE, &ring->state);
1006b2db497eSAlexander Duyck 		}
1007b2db497eSAlexander Duyck 
1008b2db497eSAlexander Duyck #endif /* IXGBE_FCOE */
10098af3c33fSJeff Kirsher 		/* apply Rx specific ring traits */
10108af3c33fSJeff Kirsher 		ring->count = adapter->rx_ring_count;
10118af3c33fSJeff Kirsher 		ring->queue_index = rxr_idx;
10128af3c33fSJeff Kirsher 
10138af3c33fSJeff Kirsher 		/* assign ring to adapter */
10148af3c33fSJeff Kirsher 		adapter->rx_ring[rxr_idx] = ring;
10158af3c33fSJeff Kirsher 
10168af3c33fSJeff Kirsher 		/* update count and index */
10178af3c33fSJeff Kirsher 		rxr_count--;
1018d0bfcdfdSAlexander Duyck 		rxr_idx += v_count;
10198af3c33fSJeff Kirsher 
10208af3c33fSJeff Kirsher 		/* push pointer to next ring */
10218af3c33fSJeff Kirsher 		ring++;
10228af3c33fSJeff Kirsher 	}
10238af3c33fSJeff Kirsher 
10248af3c33fSJeff Kirsher 	return 0;
10258af3c33fSJeff Kirsher }
10268af3c33fSJeff Kirsher 
10278af3c33fSJeff Kirsher /**
10288af3c33fSJeff Kirsher  * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
10298af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10308af3c33fSJeff Kirsher  * @v_idx: Index of vector to be freed
10318af3c33fSJeff Kirsher  *
10328af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vector.  In addition if
10338af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
10348af3c33fSJeff Kirsher  * to freeing the q_vector.
10358af3c33fSJeff Kirsher  **/
10368af3c33fSJeff Kirsher static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
10378af3c33fSJeff Kirsher {
10388af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
10398af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
10408af3c33fSJeff Kirsher 
104190382dcaSJohn Fastabend 	ixgbe_for_each_ring(ring, q_vector->tx) {
104290382dcaSJohn Fastabend 		if (ring_is_xdp(ring))
104390382dcaSJohn Fastabend 			adapter->xdp_ring[ring->queue_index] = NULL;
104490382dcaSJohn Fastabend 		else
10458af3c33fSJeff Kirsher 			adapter->tx_ring[ring->queue_index] = NULL;
104690382dcaSJohn Fastabend 	}
10478af3c33fSJeff Kirsher 
10488af3c33fSJeff Kirsher 	ixgbe_for_each_ring(ring, q_vector->rx)
10498af3c33fSJeff Kirsher 		adapter->rx_ring[ring->queue_index] = NULL;
10508af3c33fSJeff Kirsher 
10518af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = NULL;
10525a85e737SEliezer Tamir 	napi_hash_del(&q_vector->napi);
10538af3c33fSJeff Kirsher 	netif_napi_del(&q_vector->napi);
10548af3c33fSJeff Kirsher 
10558af3c33fSJeff Kirsher 	/*
10568af3c33fSJeff Kirsher 	 * ixgbe_get_stats64() might access the rings on this vector,
10578af3c33fSJeff Kirsher 	 * we must wait a grace period before freeing it.
10588af3c33fSJeff Kirsher 	 */
10598af3c33fSJeff Kirsher 	kfree_rcu(q_vector, rcu);
10608af3c33fSJeff Kirsher }
10618af3c33fSJeff Kirsher 
10628af3c33fSJeff Kirsher /**
10638af3c33fSJeff Kirsher  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
10648af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10658af3c33fSJeff Kirsher  *
10668af3c33fSJeff Kirsher  * We allocate one q_vector per queue interrupt.  If allocation fails we
10678af3c33fSJeff Kirsher  * return -ENOMEM.
10688af3c33fSJeff Kirsher  **/
10698af3c33fSJeff Kirsher static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
10708af3c33fSJeff Kirsher {
107149c7ffbeSAlexander Duyck 	int q_vectors = adapter->num_q_vectors;
10728af3c33fSJeff Kirsher 	int rxr_remaining = adapter->num_rx_queues;
10738af3c33fSJeff Kirsher 	int txr_remaining = adapter->num_tx_queues;
107433fdc82fSJohn Fastabend 	int xdp_remaining = adapter->num_xdp_queues;
107533fdc82fSJohn Fastabend 	int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
10768af3c33fSJeff Kirsher 	int err;
10778af3c33fSJeff Kirsher 
10788af3c33fSJeff Kirsher 	/* only one q_vector if MSI-X is disabled. */
10798af3c33fSJeff Kirsher 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
10808af3c33fSJeff Kirsher 		q_vectors = 1;
10818af3c33fSJeff Kirsher 
108233fdc82fSJohn Fastabend 	if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1083d0bfcdfdSAlexander Duyck 		for (; rxr_remaining; v_idx++) {
1084d0bfcdfdSAlexander Duyck 			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
108533fdc82fSJohn Fastabend 						   0, 0, 0, 0, 1, rxr_idx);
10868af3c33fSJeff Kirsher 
10878af3c33fSJeff Kirsher 			if (err)
10888af3c33fSJeff Kirsher 				goto err_out;
10898af3c33fSJeff Kirsher 
10908af3c33fSJeff Kirsher 			/* update counts and index */
1091d0bfcdfdSAlexander Duyck 			rxr_remaining--;
1092d0bfcdfdSAlexander Duyck 			rxr_idx++;
10938af3c33fSJeff Kirsher 		}
10948af3c33fSJeff Kirsher 	}
10958af3c33fSJeff Kirsher 
1096d0bfcdfdSAlexander Duyck 	for (; v_idx < q_vectors; v_idx++) {
1097d0bfcdfdSAlexander Duyck 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1098d0bfcdfdSAlexander Duyck 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
109933fdc82fSJohn Fastabend 		int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
110033fdc82fSJohn Fastabend 
1101d0bfcdfdSAlexander Duyck 		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
11028af3c33fSJeff Kirsher 					   tqpv, txr_idx,
110333fdc82fSJohn Fastabend 					   xqpv, xdp_idx,
11048af3c33fSJeff Kirsher 					   rqpv, rxr_idx);
11058af3c33fSJeff Kirsher 
11068af3c33fSJeff Kirsher 		if (err)
11078af3c33fSJeff Kirsher 			goto err_out;
11088af3c33fSJeff Kirsher 
11098af3c33fSJeff Kirsher 		/* update counts and index */
11108af3c33fSJeff Kirsher 		rxr_remaining -= rqpv;
11118af3c33fSJeff Kirsher 		txr_remaining -= tqpv;
111233fdc82fSJohn Fastabend 		xdp_remaining -= xqpv;
1113d0bfcdfdSAlexander Duyck 		rxr_idx++;
1114d0bfcdfdSAlexander Duyck 		txr_idx++;
111533fdc82fSJohn Fastabend 		xdp_idx += xqpv;
11168af3c33fSJeff Kirsher 	}
11178af3c33fSJeff Kirsher 
11188af3c33fSJeff Kirsher 	return 0;
11198af3c33fSJeff Kirsher 
11208af3c33fSJeff Kirsher err_out:
112149c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
112233fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
112349c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
112449c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
112549c7ffbeSAlexander Duyck 
112649c7ffbeSAlexander Duyck 	while (v_idx--)
11278af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
11288af3c33fSJeff Kirsher 
11298af3c33fSJeff Kirsher 	return -ENOMEM;
11308af3c33fSJeff Kirsher }
11318af3c33fSJeff Kirsher 
11328af3c33fSJeff Kirsher /**
11338af3c33fSJeff Kirsher  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
11348af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
11358af3c33fSJeff Kirsher  *
11368af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vectors.  In addition if
11378af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
11388af3c33fSJeff Kirsher  * to freeing the q_vector.
11398af3c33fSJeff Kirsher  **/
11408af3c33fSJeff Kirsher static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
11418af3c33fSJeff Kirsher {
114249c7ffbeSAlexander Duyck 	int v_idx = adapter->num_q_vectors;
11438af3c33fSJeff Kirsher 
114449c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
114533fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
114649c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
114749c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
11488af3c33fSJeff Kirsher 
114949c7ffbeSAlexander Duyck 	while (v_idx--)
11508af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
11518af3c33fSJeff Kirsher }
11528af3c33fSJeff Kirsher 
11538af3c33fSJeff Kirsher static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
11548af3c33fSJeff Kirsher {
11558af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
11568af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
11578af3c33fSJeff Kirsher 		pci_disable_msix(adapter->pdev);
11588af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
11598af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
11608af3c33fSJeff Kirsher 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
11618af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
11628af3c33fSJeff Kirsher 		pci_disable_msi(adapter->pdev);
11638af3c33fSJeff Kirsher 	}
11648af3c33fSJeff Kirsher }
11658af3c33fSJeff Kirsher 
11668af3c33fSJeff Kirsher /**
11678af3c33fSJeff Kirsher  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
11688af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
11698af3c33fSJeff Kirsher  *
11708af3c33fSJeff Kirsher  * Attempt to configure the interrupts using the best available
11718af3c33fSJeff Kirsher  * capabilities of the hardware and the kernel.
11728af3c33fSJeff Kirsher  **/
1173ac802f5dSAlexander Duyck static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
11748af3c33fSJeff Kirsher {
11753bcf3446SJacob Keller 	int err;
11768af3c33fSJeff Kirsher 
11773bcf3446SJacob Keller 	/* We will try to get MSI-X interrupts first */
11783bcf3446SJacob Keller 	if (!ixgbe_acquire_msix_vectors(adapter))
1179ac802f5dSAlexander Duyck 		return;
11808af3c33fSJeff Kirsher 
1181eec66731SJacob Keller 	/* At this point, we do not have MSI-X capabilities. We need to
1182eec66731SJacob Keller 	 * reconfigure or disable various features which require MSI-X
1183eec66731SJacob Keller 	 * capability.
1184eec66731SJacob Keller 	 */
1185eec66731SJacob Keller 
1186c1c55f63SJacob Keller 	/* Disable DCB unless we only have a single traffic class */
11870efbf12bSAlexander Duyck 	if (adapter->hw_tcs > 1) {
1188c1c55f63SJacob Keller 		e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1189b724e9f2SAlexander Duyck 		netdev_reset_tc(adapter->netdev);
119039cb681bSAlexander Duyck 
1191b724e9f2SAlexander Duyck 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1192b724e9f2SAlexander Duyck 			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1193b724e9f2SAlexander Duyck 
1194b724e9f2SAlexander Duyck 		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1195b724e9f2SAlexander Duyck 		adapter->temp_dcb_cfg.pfc_mode_enable = false;
1196b724e9f2SAlexander Duyck 		adapter->dcb_cfg.pfc_mode_enable = false;
1197b724e9f2SAlexander Duyck 	}
1198d786cf7bSJacob Keller 
11990efbf12bSAlexander Duyck 	adapter->hw_tcs = 0;
1200b724e9f2SAlexander Duyck 	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1201b724e9f2SAlexander Duyck 	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1202b724e9f2SAlexander Duyck 
1203d786cf7bSJacob Keller 	/* Disable SR-IOV support */
1204d786cf7bSJacob Keller 	e_dev_warn("Disabling SR-IOV support\n");
12058af3c33fSJeff Kirsher 	ixgbe_disable_sriov(adapter);
12068af3c33fSJeff Kirsher 
1207d786cf7bSJacob Keller 	/* Disable RSS */
1208d786cf7bSJacob Keller 	e_dev_warn("Disabling RSS support\n");
1209fbe7ca7fSAlexander Duyck 	adapter->ring_feature[RING_F_RSS].limit = 1;
1210b724e9f2SAlexander Duyck 
1211eec66731SJacob Keller 	/* recalculate number of queues now that many features have been
1212eec66731SJacob Keller 	 * changed or disabled.
1213eec66731SJacob Keller 	 */
1214ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
121549c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 1;
121649c7ffbeSAlexander Duyck 
12178af3c33fSJeff Kirsher 	err = pci_enable_msi(adapter->pdev);
12185d31b48aSJacob Keller 	if (err)
12195d31b48aSJacob Keller 		e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
12206ec1b71fSJacob Keller 			   err);
12215d31b48aSJacob Keller 	else
1222ac802f5dSAlexander Duyck 		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
12238af3c33fSJeff Kirsher }
12248af3c33fSJeff Kirsher 
12258af3c33fSJeff Kirsher /**
12268af3c33fSJeff Kirsher  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
12278af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
12288af3c33fSJeff Kirsher  *
12298af3c33fSJeff Kirsher  * We determine which interrupt scheme to use based on...
12308af3c33fSJeff Kirsher  * - Kernel support (MSI, MSI-X)
12318af3c33fSJeff Kirsher  *   - which can be user-defined (via MODULE_PARAM)
12328af3c33fSJeff Kirsher  * - Hardware queue count (num_*_queues)
12338af3c33fSJeff Kirsher  *   - defined by miscellaneous hardware support/features (RSS, etc.)
12348af3c33fSJeff Kirsher  **/
12358af3c33fSJeff Kirsher int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
12368af3c33fSJeff Kirsher {
12378af3c33fSJeff Kirsher 	int err;
12388af3c33fSJeff Kirsher 
12398af3c33fSJeff Kirsher 	/* Number of supported queues */
1240ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
12418af3c33fSJeff Kirsher 
1242ac802f5dSAlexander Duyck 	/* Set interrupt mode */
1243ac802f5dSAlexander Duyck 	ixgbe_set_interrupt_capability(adapter);
12448af3c33fSJeff Kirsher 
12458af3c33fSJeff Kirsher 	err = ixgbe_alloc_q_vectors(adapter);
12468af3c33fSJeff Kirsher 	if (err) {
12478af3c33fSJeff Kirsher 		e_dev_err("Unable to allocate memory for queue vectors\n");
12488af3c33fSJeff Kirsher 		goto err_alloc_q_vectors;
12498af3c33fSJeff Kirsher 	}
12508af3c33fSJeff Kirsher 
12518af3c33fSJeff Kirsher 	ixgbe_cache_ring_register(adapter);
12528af3c33fSJeff Kirsher 
125333fdc82fSJohn Fastabend 	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
12548af3c33fSJeff Kirsher 		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
125533fdc82fSJohn Fastabend 		   adapter->num_rx_queues, adapter->num_tx_queues,
125633fdc82fSJohn Fastabend 		   adapter->num_xdp_queues);
12578af3c33fSJeff Kirsher 
12588af3c33fSJeff Kirsher 	set_bit(__IXGBE_DOWN, &adapter->state);
12598af3c33fSJeff Kirsher 
12608af3c33fSJeff Kirsher 	return 0;
12618af3c33fSJeff Kirsher 
12628af3c33fSJeff Kirsher err_alloc_q_vectors:
12638af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
12648af3c33fSJeff Kirsher 	return err;
12658af3c33fSJeff Kirsher }
12668af3c33fSJeff Kirsher 
12678af3c33fSJeff Kirsher /**
12688af3c33fSJeff Kirsher  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
12698af3c33fSJeff Kirsher  * @adapter: board private structure to clear interrupt scheme on
12708af3c33fSJeff Kirsher  *
12718af3c33fSJeff Kirsher  * We go through and clear interrupt specific resources and reset the structure
12728af3c33fSJeff Kirsher  * to pre-load conditions
12738af3c33fSJeff Kirsher  **/
12748af3c33fSJeff Kirsher void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
12758af3c33fSJeff Kirsher {
12768af3c33fSJeff Kirsher 	adapter->num_tx_queues = 0;
127733fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
12788af3c33fSJeff Kirsher 	adapter->num_rx_queues = 0;
12798af3c33fSJeff Kirsher 
12808af3c33fSJeff Kirsher 	ixgbe_free_q_vectors(adapter);
12818af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
12828af3c33fSJeff Kirsher }
12838af3c33fSJeff Kirsher 
12848af3c33fSJeff Kirsher void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
128559259470SShannon Nelson 		       u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
12868af3c33fSJeff Kirsher {
12878af3c33fSJeff Kirsher 	struct ixgbe_adv_tx_context_desc *context_desc;
12888af3c33fSJeff Kirsher 	u16 i = tx_ring->next_to_use;
12898af3c33fSJeff Kirsher 
12908af3c33fSJeff Kirsher 	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
12918af3c33fSJeff Kirsher 
12928af3c33fSJeff Kirsher 	i++;
12938af3c33fSJeff Kirsher 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
12948af3c33fSJeff Kirsher 
12958af3c33fSJeff Kirsher 	/* set bits to identify this as an advanced context descriptor */
12968af3c33fSJeff Kirsher 	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
12978af3c33fSJeff Kirsher 
12988af3c33fSJeff Kirsher 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
129959259470SShannon Nelson 	context_desc->fceof_saidx	= cpu_to_le32(fceof_saidx);
13008af3c33fSJeff Kirsher 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
13018af3c33fSJeff Kirsher 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
13028af3c33fSJeff Kirsher }
13038af3c33fSJeff Kirsher 
1304