18af3c33fSJeff Kirsher /*******************************************************************************
28af3c33fSJeff Kirsher 
38af3c33fSJeff Kirsher   Intel 10 Gigabit PCI Express Linux driver
449425dfcSMark Rustad   Copyright(c) 1999 - 2016 Intel Corporation.
58af3c33fSJeff Kirsher 
68af3c33fSJeff Kirsher   This program is free software; you can redistribute it and/or modify it
78af3c33fSJeff Kirsher   under the terms and conditions of the GNU General Public License,
88af3c33fSJeff Kirsher   version 2, as published by the Free Software Foundation.
98af3c33fSJeff Kirsher 
108af3c33fSJeff Kirsher   This program is distributed in the hope it will be useful, but WITHOUT
118af3c33fSJeff Kirsher   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
128af3c33fSJeff Kirsher   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
138af3c33fSJeff Kirsher   more details.
148af3c33fSJeff Kirsher 
158af3c33fSJeff Kirsher   You should have received a copy of the GNU General Public License along with
168af3c33fSJeff Kirsher   this program; if not, write to the Free Software Foundation, Inc.,
178af3c33fSJeff Kirsher   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
188af3c33fSJeff Kirsher 
198af3c33fSJeff Kirsher   The full GNU General Public License is included in this distribution in
208af3c33fSJeff Kirsher   the file called "COPYING".
218af3c33fSJeff Kirsher 
228af3c33fSJeff Kirsher   Contact Information:
23b89aae71SJacob Keller   Linux NICS <linux.nics@intel.com>
248af3c33fSJeff Kirsher   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
258af3c33fSJeff Kirsher   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
268af3c33fSJeff Kirsher 
278af3c33fSJeff Kirsher *******************************************************************************/
288af3c33fSJeff Kirsher 
298af3c33fSJeff Kirsher #include "ixgbe.h"
308af3c33fSJeff Kirsher #include "ixgbe_sriov.h"
318af3c33fSJeff Kirsher 
32800bd607SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
3373079ea0SAlexander Duyck /**
3473079ea0SAlexander Duyck  * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
3573079ea0SAlexander Duyck  * @adapter: board private structure to initialize
3673079ea0SAlexander Duyck  *
3773079ea0SAlexander Duyck  * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
3873079ea0SAlexander Duyck  * will also try to cache the proper offsets if RSS/FCoE are enabled along
3973079ea0SAlexander Duyck  * with VMDq.
4073079ea0SAlexander Duyck  *
4173079ea0SAlexander Duyck  **/
4273079ea0SAlexander Duyck static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
4373079ea0SAlexander Duyck {
4473079ea0SAlexander Duyck #ifdef IXGBE_FCOE
4573079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
4673079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
4773079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
4873079ea0SAlexander Duyck 	int i;
4973079ea0SAlexander Duyck 	u16 reg_idx;
500efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
5173079ea0SAlexander Duyck 
5273079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
5373079ea0SAlexander Duyck 	if (tcs <= 1)
5473079ea0SAlexander Duyck 		return false;
5573079ea0SAlexander Duyck 
5673079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
5773079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
5873079ea0SAlexander Duyck 		return false;
5973079ea0SAlexander Duyck 
6073079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
6173079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
6273079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
6373079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
6473079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs)
6573079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
6673079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
6773079ea0SAlexander Duyck 	}
6873079ea0SAlexander Duyck 
6973079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
7073079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
7173079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
7273079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs)
7373079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
7473079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
7573079ea0SAlexander Duyck 	}
7673079ea0SAlexander Duyck 
7773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
7873079ea0SAlexander Duyck 	/* nothing to do if FCoE is disabled */
7973079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
8073079ea0SAlexander Duyck 		return true;
8173079ea0SAlexander Duyck 
8273079ea0SAlexander Duyck 	/* The work is already done if the FCoE ring is shared */
8373079ea0SAlexander Duyck 	if (fcoe->offset < tcs)
8473079ea0SAlexander Duyck 		return true;
8573079ea0SAlexander Duyck 
8673079ea0SAlexander Duyck 	/* The FCoE rings exist separately, we need to move their reg_idx */
8773079ea0SAlexander Duyck 	if (fcoe->indices) {
8873079ea0SAlexander Duyck 		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
8973079ea0SAlexander Duyck 		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
9073079ea0SAlexander Duyck 
9173079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
9273079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
9373079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
9473079ea0SAlexander Duyck 			adapter->rx_ring[i]->reg_idx = reg_idx;
9573079ea0SAlexander Duyck 			reg_idx++;
9673079ea0SAlexander Duyck 		}
9773079ea0SAlexander Duyck 
9873079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
9973079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
10073079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
10173079ea0SAlexander Duyck 			adapter->tx_ring[i]->reg_idx = reg_idx;
10273079ea0SAlexander Duyck 			reg_idx++;
10373079ea0SAlexander Duyck 		}
10473079ea0SAlexander Duyck 	}
10573079ea0SAlexander Duyck 
10673079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
10773079ea0SAlexander Duyck 	return true;
10873079ea0SAlexander Duyck }
10973079ea0SAlexander Duyck 
1108af3c33fSJeff Kirsher /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
1118af3c33fSJeff Kirsher static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
1128af3c33fSJeff Kirsher 				    unsigned int *tx, unsigned int *rx)
1138af3c33fSJeff Kirsher {
1148af3c33fSJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
1150efbf12bSAlexander Duyck 	u8 num_tcs = adapter->hw_tcs;
1168af3c33fSJeff Kirsher 
1178af3c33fSJeff Kirsher 	*tx = 0;
1188af3c33fSJeff Kirsher 	*rx = 0;
1198af3c33fSJeff Kirsher 
1208af3c33fSJeff Kirsher 	switch (hw->mac.type) {
1218af3c33fSJeff Kirsher 	case ixgbe_mac_82598EB:
1224ae63730SAlexander Duyck 		/* TxQs/TC: 4	RxQs/TC: 8 */
1234ae63730SAlexander Duyck 		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
1244ae63730SAlexander Duyck 		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
1258af3c33fSJeff Kirsher 		break;
1268af3c33fSJeff Kirsher 	case ixgbe_mac_82599EB:
1278af3c33fSJeff Kirsher 	case ixgbe_mac_X540:
1289a75a1acSDon Skidmore 	case ixgbe_mac_X550:
1299a75a1acSDon Skidmore 	case ixgbe_mac_X550EM_x:
13049425dfcSMark Rustad 	case ixgbe_mac_x550em_a:
1318af3c33fSJeff Kirsher 		if (num_tcs > 4) {
1324ae63730SAlexander Duyck 			/*
1334ae63730SAlexander Duyck 			 * TCs    : TC0/1 TC2/3 TC4-7
1344ae63730SAlexander Duyck 			 * TxQs/TC:    32    16     8
1354ae63730SAlexander Duyck 			 * RxQs/TC:    16    16    16
1364ae63730SAlexander Duyck 			 */
1378af3c33fSJeff Kirsher 			*rx = tc << 4;
1384ae63730SAlexander Duyck 			if (tc < 3)
1394ae63730SAlexander Duyck 				*tx = tc << 5;		/*   0,  32,  64 */
1404ae63730SAlexander Duyck 			else if (tc < 5)
1414ae63730SAlexander Duyck 				*tx = (tc + 2) << 4;	/*  80,  96 */
1424ae63730SAlexander Duyck 			else
1434ae63730SAlexander Duyck 				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
1448af3c33fSJeff Kirsher 		} else {
1454ae63730SAlexander Duyck 			/*
1464ae63730SAlexander Duyck 			 * TCs    : TC0 TC1 TC2/3
1474ae63730SAlexander Duyck 			 * TxQs/TC:  64  32    16
1484ae63730SAlexander Duyck 			 * RxQs/TC:  32  32    32
1494ae63730SAlexander Duyck 			 */
1508af3c33fSJeff Kirsher 			*rx = tc << 5;
1514ae63730SAlexander Duyck 			if (tc < 2)
1524ae63730SAlexander Duyck 				*tx = tc << 6;		/*  0,  64 */
1534ae63730SAlexander Duyck 			else
1544ae63730SAlexander Duyck 				*tx = (tc + 4) << 4;	/* 96, 112 */
1558af3c33fSJeff Kirsher 		}
1568af3c33fSJeff Kirsher 	default:
1578af3c33fSJeff Kirsher 		break;
1588af3c33fSJeff Kirsher 	}
1598af3c33fSJeff Kirsher }
1608af3c33fSJeff Kirsher 
1618af3c33fSJeff Kirsher /**
1628af3c33fSJeff Kirsher  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
1638af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1648af3c33fSJeff Kirsher  *
1658af3c33fSJeff Kirsher  * Cache the descriptor ring offsets for DCB to the assigned rings.
1668af3c33fSJeff Kirsher  *
1678af3c33fSJeff Kirsher  **/
1684ae63730SAlexander Duyck static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
1698af3c33fSJeff Kirsher {
1700efbf12bSAlexander Duyck 	u8 num_tcs = adapter->hw_tcs;
1714ae63730SAlexander Duyck 	unsigned int tx_idx, rx_idx;
1724ae63730SAlexander Duyck 	int tc, offset, rss_i, i;
1738af3c33fSJeff Kirsher 
1744ae63730SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
1754ae63730SAlexander Duyck 	if (num_tcs <= 1)
1768af3c33fSJeff Kirsher 		return false;
1778af3c33fSJeff Kirsher 
1784ae63730SAlexander Duyck 	rss_i = adapter->ring_feature[RING_F_RSS].indices;
1798af3c33fSJeff Kirsher 
1804ae63730SAlexander Duyck 	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
1814ae63730SAlexander Duyck 		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
1824ae63730SAlexander Duyck 		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
1834ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
1844ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
1854ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->dcb_tc = tc;
1864ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->dcb_tc = tc;
1878af3c33fSJeff Kirsher 		}
1888af3c33fSJeff Kirsher 	}
1898af3c33fSJeff Kirsher 
1908af3c33fSJeff Kirsher 	return true;
1918af3c33fSJeff Kirsher }
192d411a936SAlexander Duyck 
1938af3c33fSJeff Kirsher #endif
1948af3c33fSJeff Kirsher /**
1958af3c33fSJeff Kirsher  * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
1968af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1978af3c33fSJeff Kirsher  *
1988af3c33fSJeff Kirsher  * SR-IOV doesn't use any descriptor rings but changes the default if
1998af3c33fSJeff Kirsher  * no other mapping is used.
2008af3c33fSJeff Kirsher  *
2018af3c33fSJeff Kirsher  */
20273079ea0SAlexander Duyck static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
2038af3c33fSJeff Kirsher {
20473079ea0SAlexander Duyck #ifdef IXGBE_FCOE
20573079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
20673079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
20773079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
20873079ea0SAlexander Duyck 	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
20973079ea0SAlexander Duyck 	int i;
21073079ea0SAlexander Duyck 	u16 reg_idx;
21173079ea0SAlexander Duyck 
21273079ea0SAlexander Duyck 	/* only proceed if VMDq is enabled */
21373079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
2148af3c33fSJeff Kirsher 		return false;
21573079ea0SAlexander Duyck 
21673079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
21773079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
21873079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
21973079ea0SAlexander Duyck #ifdef IXGBE_FCOE
22073079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
22173079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
22273079ea0SAlexander Duyck 			break;
22373079ea0SAlexander Duyck #endif
22473079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
22573079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= rss->indices)
22673079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
22773079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
22873079ea0SAlexander Duyck 	}
22973079ea0SAlexander Duyck 
23073079ea0SAlexander Duyck #ifdef IXGBE_FCOE
23173079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
23273079ea0SAlexander Duyck 	for (; i < adapter->num_rx_queues; i++, reg_idx++)
23373079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
23473079ea0SAlexander Duyck 
23573079ea0SAlexander Duyck #endif
23673079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
23773079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
23873079ea0SAlexander Duyck #ifdef IXGBE_FCOE
23973079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
24073079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
24173079ea0SAlexander Duyck 			break;
24273079ea0SAlexander Duyck #endif
24373079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
24473079ea0SAlexander Duyck 		if ((reg_idx & rss->mask) >= rss->indices)
24573079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
24673079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
24773079ea0SAlexander Duyck 	}
24873079ea0SAlexander Duyck 
24973079ea0SAlexander Duyck #ifdef IXGBE_FCOE
25073079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
25173079ea0SAlexander Duyck 	for (; i < adapter->num_tx_queues; i++, reg_idx++)
25273079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
25373079ea0SAlexander Duyck 
25473079ea0SAlexander Duyck #endif
25573079ea0SAlexander Duyck 
25673079ea0SAlexander Duyck 	return true;
2578af3c33fSJeff Kirsher }
2588af3c33fSJeff Kirsher 
2598af3c33fSJeff Kirsher /**
260d411a936SAlexander Duyck  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
261d411a936SAlexander Duyck  * @adapter: board private structure to initialize
262d411a936SAlexander Duyck  *
263d411a936SAlexander Duyck  * Cache the descriptor ring offsets for RSS to the assigned rings.
264d411a936SAlexander Duyck  *
265d411a936SAlexander Duyck  **/
266d411a936SAlexander Duyck static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
267d411a936SAlexander Duyck {
26833fdc82fSJohn Fastabend 	int i, reg_idx;
269d411a936SAlexander Duyck 
270d411a936SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++)
271d411a936SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = i;
27233fdc82fSJohn Fastabend 	for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
27333fdc82fSJohn Fastabend 		adapter->tx_ring[i]->reg_idx = reg_idx;
27433fdc82fSJohn Fastabend 	for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
27533fdc82fSJohn Fastabend 		adapter->xdp_ring[i]->reg_idx = reg_idx;
276d411a936SAlexander Duyck 
277d411a936SAlexander Duyck 	return true;
278d411a936SAlexander Duyck }
279d411a936SAlexander Duyck 
280d411a936SAlexander Duyck /**
2818af3c33fSJeff Kirsher  * ixgbe_cache_ring_register - Descriptor ring to register mapping
2828af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2838af3c33fSJeff Kirsher  *
2848af3c33fSJeff Kirsher  * Once we know the feature-set enabled for the device, we'll cache
2858af3c33fSJeff Kirsher  * the register offset the descriptor ring is assigned to.
2868af3c33fSJeff Kirsher  *
2878af3c33fSJeff Kirsher  * Note, the order the various feature calls is important.  It must start with
2888af3c33fSJeff Kirsher  * the "most" features enabled at the same time, then trickle down to the
2898af3c33fSJeff Kirsher  * least amount of features turned on at once.
2908af3c33fSJeff Kirsher  **/
2918af3c33fSJeff Kirsher static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2928af3c33fSJeff Kirsher {
2938af3c33fSJeff Kirsher 	/* start with default case */
2948af3c33fSJeff Kirsher 	adapter->rx_ring[0]->reg_idx = 0;
2958af3c33fSJeff Kirsher 	adapter->tx_ring[0]->reg_idx = 0;
2968af3c33fSJeff Kirsher 
29773079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
29873079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb_sriov(adapter))
29973079ea0SAlexander Duyck 		return;
30073079ea0SAlexander Duyck 
30173079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb(adapter))
30273079ea0SAlexander Duyck 		return;
30373079ea0SAlexander Duyck 
30473079ea0SAlexander Duyck #endif
3058af3c33fSJeff Kirsher 	if (ixgbe_cache_ring_sriov(adapter))
3068af3c33fSJeff Kirsher 		return;
3078af3c33fSJeff Kirsher 
308d411a936SAlexander Duyck 	ixgbe_cache_ring_rss(adapter);
3098af3c33fSJeff Kirsher }
3108af3c33fSJeff Kirsher 
31133fdc82fSJohn Fastabend static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
31233fdc82fSJohn Fastabend {
31333fdc82fSJohn Fastabend 	return adapter->xdp_prog ? nr_cpu_ids : 0;
31433fdc82fSJohn Fastabend }
31533fdc82fSJohn Fastabend 
3162bf1a87bSEmil Tantilov #define IXGBE_RSS_64Q_MASK	0x3F
317d411a936SAlexander Duyck #define IXGBE_RSS_16Q_MASK	0xF
318d411a936SAlexander Duyck #define IXGBE_RSS_8Q_MASK	0x7
319d411a936SAlexander Duyck #define IXGBE_RSS_4Q_MASK	0x3
320d411a936SAlexander Duyck #define IXGBE_RSS_2Q_MASK	0x1
321d411a936SAlexander Duyck #define IXGBE_RSS_DISABLED_MASK	0x0
322d411a936SAlexander Duyck 
323d411a936SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
32473079ea0SAlexander Duyck /**
32573079ea0SAlexander Duyck  * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
32673079ea0SAlexander Duyck  * @adapter: board private structure to initialize
32773079ea0SAlexander Duyck  *
32873079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
32973079ea0SAlexander Duyck  * and VM pools where appropriate.  Also assign queues based on DCB
33073079ea0SAlexander Duyck  * priorities and map accordingly..
33173079ea0SAlexander Duyck  *
33273079ea0SAlexander Duyck  **/
33373079ea0SAlexander Duyck static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
33473079ea0SAlexander Duyck {
33573079ea0SAlexander Duyck 	int i;
33673079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
33773079ea0SAlexander Duyck 	u16 vmdq_m = 0;
33873079ea0SAlexander Duyck #ifdef IXGBE_FCOE
33973079ea0SAlexander Duyck 	u16 fcoe_i = 0;
34073079ea0SAlexander Duyck #endif
3410efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
34273079ea0SAlexander Duyck 
34373079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
34473079ea0SAlexander Duyck 	if (tcs <= 1)
34573079ea0SAlexander Duyck 		return false;
34673079ea0SAlexander Duyck 
34773079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
34873079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
34973079ea0SAlexander Duyck 		return false;
35073079ea0SAlexander Duyck 
3514e039c16SAlexander Duyck 	/* limit VMDq instances on the PF by number of Tx queues */
3524e039c16SAlexander Duyck 	vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
3534e039c16SAlexander Duyck 
35473079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
35573079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
35673079ea0SAlexander Duyck 
35773079ea0SAlexander Duyck 	/* 16 pools w/ 8 TC per pool */
35873079ea0SAlexander Duyck 	if (tcs > 4) {
35973079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 16);
36073079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
36173079ea0SAlexander Duyck 	/* 32 pools w/ 4 TC per pool */
36273079ea0SAlexander Duyck 	} else {
36373079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 32);
36473079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
36573079ea0SAlexander Duyck 	}
36673079ea0SAlexander Duyck 
36773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
36873079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
36973079ea0SAlexander Duyck 	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
37073079ea0SAlexander Duyck 
37173079ea0SAlexander Duyck #endif
37273079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
37373079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
37473079ea0SAlexander Duyck 
37573079ea0SAlexander Duyck 	/* save features for later use */
37673079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
37773079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
37873079ea0SAlexander Duyck 
37973079ea0SAlexander Duyck 	/*
38073079ea0SAlexander Duyck 	 * We do not support DCB, VMDq, and RSS all simultaneously
38173079ea0SAlexander Duyck 	 * so we will disable RSS since it is the lowest priority
38273079ea0SAlexander Duyck 	 */
38373079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = 1;
38473079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
38573079ea0SAlexander Duyck 
38639cb681bSAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
38739cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
38839cb681bSAlexander Duyck 
38973079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
39073079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = tcs;
39173079ea0SAlexander Duyck 
39273079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * tcs;
39333fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
39473079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * tcs;
39573079ea0SAlexander Duyck 
39673079ea0SAlexander Duyck #ifdef IXGBE_FCOE
39773079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
39873079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
39973079ea0SAlexander Duyck 
40073079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
40173079ea0SAlexander Duyck 
40273079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
40373079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
40473079ea0SAlexander Duyck 
40573079ea0SAlexander Duyck 		if (fcoe_i) {
40673079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
40773079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
40873079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * tcs;
40973079ea0SAlexander Duyck 
41073079ea0SAlexander Duyck 			/* add queues to adapter */
41173079ea0SAlexander Duyck 			adapter->num_tx_queues += fcoe_i;
41273079ea0SAlexander Duyck 			adapter->num_rx_queues += fcoe_i;
41373079ea0SAlexander Duyck 		} else if (tcs > 1) {
41473079ea0SAlexander Duyck 			/* use queue belonging to FcoE TC */
41573079ea0SAlexander Duyck 			fcoe->indices = 1;
41673079ea0SAlexander Duyck 			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
41773079ea0SAlexander Duyck 		} else {
41873079ea0SAlexander Duyck 			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
41973079ea0SAlexander Duyck 
42073079ea0SAlexander Duyck 			fcoe->indices = 0;
42173079ea0SAlexander Duyck 			fcoe->offset = 0;
42273079ea0SAlexander Duyck 		}
42373079ea0SAlexander Duyck 	}
42473079ea0SAlexander Duyck 
42573079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
42673079ea0SAlexander Duyck 	/* configure TC to queue mapping */
42773079ea0SAlexander Duyck 	for (i = 0; i < tcs; i++)
42873079ea0SAlexander Duyck 		netdev_set_tc_queue(adapter->netdev, i, 1, i);
42973079ea0SAlexander Duyck 
43073079ea0SAlexander Duyck 	return true;
43173079ea0SAlexander Duyck }
43273079ea0SAlexander Duyck 
433d411a936SAlexander Duyck static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
434d411a936SAlexander Duyck {
435d411a936SAlexander Duyck 	struct net_device *dev = adapter->netdev;
436d411a936SAlexander Duyck 	struct ixgbe_ring_feature *f;
437d411a936SAlexander Duyck 	int rss_i, rss_m, i;
438d411a936SAlexander Duyck 	int tcs;
439d411a936SAlexander Duyck 
440d411a936SAlexander Duyck 	/* Map queue offset and counts onto allocated tx queues */
4410efbf12bSAlexander Duyck 	tcs = adapter->hw_tcs;
442d411a936SAlexander Duyck 
443d411a936SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
444d411a936SAlexander Duyck 	if (tcs <= 1)
445d411a936SAlexander Duyck 		return false;
446d411a936SAlexander Duyck 
447d411a936SAlexander Duyck 	/* determine the upper limit for our current DCB mode */
448d411a936SAlexander Duyck 	rss_i = dev->num_tx_queues / tcs;
449d411a936SAlexander Duyck 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
450d411a936SAlexander Duyck 		/* 8 TC w/ 4 queues per TC */
451d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 4);
452d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
453d411a936SAlexander Duyck 	} else if (tcs > 4) {
454d411a936SAlexander Duyck 		/* 8 TC w/ 8 queues per TC */
455d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 8);
456d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_8Q_MASK;
457d411a936SAlexander Duyck 	} else {
458d411a936SAlexander Duyck 		/* 4 TC w/ 16 queues per TC */
459d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 16);
460d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_16Q_MASK;
461d411a936SAlexander Duyck 	}
462d411a936SAlexander Duyck 
463d411a936SAlexander Duyck 	/* set RSS mask and indices */
464d411a936SAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
465d411a936SAlexander Duyck 	rss_i = min_t(int, rss_i, f->limit);
466d411a936SAlexander Duyck 	f->indices = rss_i;
467d411a936SAlexander Duyck 	f->mask = rss_m;
468d411a936SAlexander Duyck 
46939cb681bSAlexander Duyck 	/* disable ATR as it is not supported when multiple TCs are enabled */
47039cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
47139cb681bSAlexander Duyck 
472d411a936SAlexander Duyck #ifdef IXGBE_FCOE
473d411a936SAlexander Duyck 	/* FCoE enabled queues require special configuration indexed
474d411a936SAlexander Duyck 	 * by feature specific indices and offset. Here we map FCoE
475d411a936SAlexander Duyck 	 * indices onto the DCB queue pairs allowing FCoE to own
476d411a936SAlexander Duyck 	 * configuration later.
477d411a936SAlexander Duyck 	 */
478d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
479d411a936SAlexander Duyck 		u8 tc = ixgbe_fcoe_get_tc(adapter);
480d411a936SAlexander Duyck 
481d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
482d411a936SAlexander Duyck 		f->indices = min_t(u16, rss_i, f->limit);
483d411a936SAlexander Duyck 		f->offset = rss_i * tc;
484d411a936SAlexander Duyck 	}
485d411a936SAlexander Duyck 
486d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
487d411a936SAlexander Duyck 	for (i = 0; i < tcs; i++)
488d411a936SAlexander Duyck 		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
489d411a936SAlexander Duyck 
490d411a936SAlexander Duyck 	adapter->num_tx_queues = rss_i * tcs;
49133fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
492d411a936SAlexander Duyck 	adapter->num_rx_queues = rss_i * tcs;
493d411a936SAlexander Duyck 
494d411a936SAlexander Duyck 	return true;
495d411a936SAlexander Duyck }
496d411a936SAlexander Duyck 
497d411a936SAlexander Duyck #endif
4988af3c33fSJeff Kirsher /**
49973079ea0SAlexander Duyck  * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
50073079ea0SAlexander Duyck  * @adapter: board private structure to initialize
50173079ea0SAlexander Duyck  *
50273079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
50373079ea0SAlexander Duyck  * and VM pools where appropriate.  If RSS is available, then also try and
50473079ea0SAlexander Duyck  * enable RSS and map accordingly.
50573079ea0SAlexander Duyck  *
50673079ea0SAlexander Duyck  **/
50773079ea0SAlexander Duyck static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
50873079ea0SAlexander Duyck {
50973079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
51073079ea0SAlexander Duyck 	u16 vmdq_m = 0;
51173079ea0SAlexander Duyck 	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
51273079ea0SAlexander Duyck 	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
51373079ea0SAlexander Duyck #ifdef IXGBE_FCOE
51473079ea0SAlexander Duyck 	u16 fcoe_i = 0;
51573079ea0SAlexander Duyck #endif
51673079ea0SAlexander Duyck 
51773079ea0SAlexander Duyck 	/* only proceed if SR-IOV is enabled */
51873079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
51973079ea0SAlexander Duyck 		return false;
52073079ea0SAlexander Duyck 
5214e039c16SAlexander Duyck 	/* limit l2fwd RSS based on total Tx queue limit */
5224e039c16SAlexander Duyck 	rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
5234e039c16SAlexander Duyck 
52473079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
52573079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
52673079ea0SAlexander Duyck 
52773079ea0SAlexander Duyck 	/* double check we are limited to maximum pools */
52873079ea0SAlexander Duyck 	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
52973079ea0SAlexander Duyck 
53073079ea0SAlexander Duyck 	/* 64 pool mode with 2 queues per pool */
5314e039c16SAlexander Duyck 	if (vmdq_i > 32) {
53273079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
53373079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_2Q_MASK;
53473079ea0SAlexander Duyck 		rss_i = min_t(u16, rss_i, 2);
535e24fcf28SAlexander Duyck 	/* 32 pool mode with up to 4 queues per pool */
53673079ea0SAlexander Duyck 	} else {
53773079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
53873079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
539e24fcf28SAlexander Duyck 		/* We can support 4, 2, or 1 queues */
540e24fcf28SAlexander Duyck 		rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
54173079ea0SAlexander Duyck 	}
54273079ea0SAlexander Duyck 
54373079ea0SAlexander Duyck #ifdef IXGBE_FCOE
54473079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
54573079ea0SAlexander Duyck 	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
54673079ea0SAlexander Duyck 
54773079ea0SAlexander Duyck #endif
54873079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
54973079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
55073079ea0SAlexander Duyck 
55173079ea0SAlexander Duyck 	/* save features for later use */
55273079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
55373079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
55473079ea0SAlexander Duyck 
55573079ea0SAlexander Duyck 	/* limit RSS based on user input and save for later use */
55673079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = rss_i;
55773079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = rss_m;
55873079ea0SAlexander Duyck 
55973079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
56073079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = rss_i;
56173079ea0SAlexander Duyck 
56273079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * rss_i;
56373079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * rss_i;
56433fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
56573079ea0SAlexander Duyck 
56673079ea0SAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
56773079ea0SAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
56873079ea0SAlexander Duyck 
56973079ea0SAlexander Duyck #ifdef IXGBE_FCOE
57073079ea0SAlexander Duyck 	/*
57173079ea0SAlexander Duyck 	 * FCoE can use rings from adjacent buffers to allow RSS
57273079ea0SAlexander Duyck 	 * like behavior.  To account for this we need to add the
57373079ea0SAlexander Duyck 	 * FCoE indices to the total ring count.
57473079ea0SAlexander Duyck 	 */
57573079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
57673079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
57773079ea0SAlexander Duyck 
57873079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
57973079ea0SAlexander Duyck 
58073079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
58173079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
58273079ea0SAlexander Duyck 
58373079ea0SAlexander Duyck 		if (vmdq_i > 1 && fcoe_i) {
58473079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
58573079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
58673079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * rss_i;
58773079ea0SAlexander Duyck 		} else {
58873079ea0SAlexander Duyck 			/* merge FCoE queues with RSS queues */
58973079ea0SAlexander Duyck 			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
59073079ea0SAlexander Duyck 
59173079ea0SAlexander Duyck 			/* limit indices to rss_i if MSI-X is disabled */
59273079ea0SAlexander Duyck 			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
59373079ea0SAlexander Duyck 				fcoe_i = rss_i;
59473079ea0SAlexander Duyck 
59573079ea0SAlexander Duyck 			/* attempt to reserve some queues for just FCoE */
59673079ea0SAlexander Duyck 			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
59773079ea0SAlexander Duyck 			fcoe->offset = fcoe_i - fcoe->indices;
59873079ea0SAlexander Duyck 
59973079ea0SAlexander Duyck 			fcoe_i -= rss_i;
60073079ea0SAlexander Duyck 		}
60173079ea0SAlexander Duyck 
60273079ea0SAlexander Duyck 		/* add queues to adapter */
60373079ea0SAlexander Duyck 		adapter->num_tx_queues += fcoe_i;
60473079ea0SAlexander Duyck 		adapter->num_rx_queues += fcoe_i;
60573079ea0SAlexander Duyck 	}
60673079ea0SAlexander Duyck 
60773079ea0SAlexander Duyck #endif
60873079ea0SAlexander Duyck 	return true;
60973079ea0SAlexander Duyck }
61073079ea0SAlexander Duyck 
61173079ea0SAlexander Duyck /**
61249ce9c2cSBen Hutchings  * ixgbe_set_rss_queues - Allocate queues for RSS
6138af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
6148af3c33fSJeff Kirsher  *
6158af3c33fSJeff Kirsher  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
6168af3c33fSJeff Kirsher  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
6178af3c33fSJeff Kirsher  *
6188af3c33fSJeff Kirsher  **/
6190b7f5d0bSAlexander Duyck static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
6208af3c33fSJeff Kirsher {
6212bf1a87bSEmil Tantilov 	struct ixgbe_hw *hw = &adapter->hw;
6220b7f5d0bSAlexander Duyck 	struct ixgbe_ring_feature *f;
6230b7f5d0bSAlexander Duyck 	u16 rss_i;
6248af3c33fSJeff Kirsher 
6250b7f5d0bSAlexander Duyck 	/* set mask for 16 queue limit of RSS */
6260b7f5d0bSAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
6270b7f5d0bSAlexander Duyck 	rss_i = f->limit;
6280b7f5d0bSAlexander Duyck 
6290b7f5d0bSAlexander Duyck 	f->indices = rss_i;
6302bf1a87bSEmil Tantilov 
6312bf1a87bSEmil Tantilov 	if (hw->mac.type < ixgbe_mac_X550)
632d411a936SAlexander Duyck 		f->mask = IXGBE_RSS_16Q_MASK;
6332bf1a87bSEmil Tantilov 	else
6342bf1a87bSEmil Tantilov 		f->mask = IXGBE_RSS_64Q_MASK;
6358af3c33fSJeff Kirsher 
63639cb681bSAlexander Duyck 	/* disable ATR by default, it will be configured below */
63739cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
63839cb681bSAlexander Duyck 
6398af3c33fSJeff Kirsher 	/*
6400b7f5d0bSAlexander Duyck 	 * Use Flow Director in addition to RSS to ensure the best
6418af3c33fSJeff Kirsher 	 * distribution of flows across cores, even when an FDIR flow
6428af3c33fSJeff Kirsher 	 * isn't matched.
6438af3c33fSJeff Kirsher 	 */
64439cb681bSAlexander Duyck 	if (rss_i > 1 && adapter->atr_sample_rate) {
6450b7f5d0bSAlexander Duyck 		f = &adapter->ring_feature[RING_F_FDIR];
6460b7f5d0bSAlexander Duyck 
647d3cb9869SAlexander Duyck 		rss_i = f->indices = f->limit;
64839cb681bSAlexander Duyck 
64939cb681bSAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
65039cb681bSAlexander Duyck 			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
6518af3c33fSJeff Kirsher 	}
6520b7f5d0bSAlexander Duyck 
653d411a936SAlexander Duyck #ifdef IXGBE_FCOE
654d411a936SAlexander Duyck 	/*
655d411a936SAlexander Duyck 	 * FCoE can exist on the same rings as standard network traffic
656d411a936SAlexander Duyck 	 * however it is preferred to avoid that if possible.  In order
657d411a936SAlexander Duyck 	 * to get the best performance we allocate as many FCoE queues
658d411a936SAlexander Duyck 	 * as we can and we place them at the end of the ring array to
659d411a936SAlexander Duyck 	 * avoid sharing queues with standard RSS on systems with 24 or
660d411a936SAlexander Duyck 	 * more CPUs.
661d411a936SAlexander Duyck 	 */
662d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
663d411a936SAlexander Duyck 		struct net_device *dev = adapter->netdev;
664d411a936SAlexander Duyck 		u16 fcoe_i;
665d411a936SAlexander Duyck 
666d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
667d411a936SAlexander Duyck 
668d411a936SAlexander Duyck 		/* merge FCoE queues with RSS queues */
669d411a936SAlexander Duyck 		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
670d411a936SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
671d411a936SAlexander Duyck 
672d411a936SAlexander Duyck 		/* limit indices to rss_i if MSI-X is disabled */
673d411a936SAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
674d411a936SAlexander Duyck 			fcoe_i = rss_i;
675d411a936SAlexander Duyck 
676d411a936SAlexander Duyck 		/* attempt to reserve some queues for just FCoE */
677d411a936SAlexander Duyck 		f->indices = min_t(u16, fcoe_i, f->limit);
678d411a936SAlexander Duyck 		f->offset = fcoe_i - f->indices;
679d411a936SAlexander Duyck 		rss_i = max_t(u16, fcoe_i, rss_i);
680d411a936SAlexander Duyck 	}
681d411a936SAlexander Duyck 
682d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
6830b7f5d0bSAlexander Duyck 	adapter->num_rx_queues = rss_i;
6840b7f5d0bSAlexander Duyck 	adapter->num_tx_queues = rss_i;
68533fdc82fSJohn Fastabend 	adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
6860b7f5d0bSAlexander Duyck 
6870b7f5d0bSAlexander Duyck 	return true;
6888af3c33fSJeff Kirsher }
6898af3c33fSJeff Kirsher 
6908af3c33fSJeff Kirsher /**
69149ce9c2cSBen Hutchings  * ixgbe_set_num_queues - Allocate queues for device, feature dependent
6928af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
6938af3c33fSJeff Kirsher  *
6948af3c33fSJeff Kirsher  * This is the top level queue allocation routine.  The order here is very
6958af3c33fSJeff Kirsher  * important, starting with the "most" number of features turned on at once,
6968af3c33fSJeff Kirsher  * and ending with the smallest set of features.  This way large combinations
6978af3c33fSJeff Kirsher  * can be allocated if they're turned on, and smaller combinations are the
6988af3c33fSJeff Kirsher  * fallthrough conditions.
6998af3c33fSJeff Kirsher  *
7008af3c33fSJeff Kirsher  **/
701ac802f5dSAlexander Duyck static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
7028af3c33fSJeff Kirsher {
7038af3c33fSJeff Kirsher 	/* Start with base case */
7048af3c33fSJeff Kirsher 	adapter->num_rx_queues = 1;
7058af3c33fSJeff Kirsher 	adapter->num_tx_queues = 1;
70633fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
707ff815fb2SAlexander Duyck 	adapter->num_rx_pools = 1;
7088af3c33fSJeff Kirsher 	adapter->num_rx_queues_per_pool = 1;
7098af3c33fSJeff Kirsher 
71073079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
71173079ea0SAlexander Duyck 	if (ixgbe_set_dcb_sriov_queues(adapter))
712ac802f5dSAlexander Duyck 		return;
7138af3c33fSJeff Kirsher 
7148af3c33fSJeff Kirsher 	if (ixgbe_set_dcb_queues(adapter))
715ac802f5dSAlexander Duyck 		return;
7168af3c33fSJeff Kirsher 
7178af3c33fSJeff Kirsher #endif
71873079ea0SAlexander Duyck 	if (ixgbe_set_sriov_queues(adapter))
71973079ea0SAlexander Duyck 		return;
72073079ea0SAlexander Duyck 
721ac802f5dSAlexander Duyck 	ixgbe_set_rss_queues(adapter);
7228af3c33fSJeff Kirsher }
7238af3c33fSJeff Kirsher 
7243bcf3446SJacob Keller /**
7253bcf3446SJacob Keller  * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
7263bcf3446SJacob Keller  * @adapter: board private structure
7273bcf3446SJacob Keller  *
7283bcf3446SJacob Keller  * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
7293bcf3446SJacob Keller  * return a negative error code if unable to acquire MSI-X vectors for any
7303bcf3446SJacob Keller  * reason.
7313bcf3446SJacob Keller  */
7323bcf3446SJacob Keller static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
7338af3c33fSJeff Kirsher {
7343bcf3446SJacob Keller 	struct ixgbe_hw *hw = &adapter->hw;
7353bcf3446SJacob Keller 	int i, vectors, vector_threshold;
7368af3c33fSJeff Kirsher 
73733fdc82fSJohn Fastabend 	/* We start by asking for one vector per queue pair with XDP queues
73833fdc82fSJohn Fastabend 	 * being stacked with TX queues.
73933fdc82fSJohn Fastabend 	 */
7403bcf3446SJacob Keller 	vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
74133fdc82fSJohn Fastabend 	vectors = max(vectors, adapter->num_xdp_queues);
7423bcf3446SJacob Keller 
7433bcf3446SJacob Keller 	/* It is easy to be greedy for MSI-X vectors. However, it really
7443bcf3446SJacob Keller 	 * doesn't do much good if we have a lot more vectors than CPUs. We'll
7453bcf3446SJacob Keller 	 * be somewhat conservative and only ask for (roughly) the same number
7463bcf3446SJacob Keller 	 * of vectors as there are CPUs.
7473bcf3446SJacob Keller 	 */
7483bcf3446SJacob Keller 	vectors = min_t(int, vectors, num_online_cpus());
7493bcf3446SJacob Keller 
7503bcf3446SJacob Keller 	/* Some vectors are necessary for non-queue interrupts */
7513bcf3446SJacob Keller 	vectors += NON_Q_VECTORS;
7523bcf3446SJacob Keller 
7533bcf3446SJacob Keller 	/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
7543bcf3446SJacob Keller 	 * With features such as RSS and VMDq, we can easily surpass the
7553bcf3446SJacob Keller 	 * number of Rx and Tx descriptor queues supported by our device.
7563bcf3446SJacob Keller 	 * Thus, we cap the maximum in the rare cases where the CPU count also
7573bcf3446SJacob Keller 	 * exceeds our vector limit
7583bcf3446SJacob Keller 	 */
7593bcf3446SJacob Keller 	vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
7603bcf3446SJacob Keller 
7613bcf3446SJacob Keller 	/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
7623bcf3446SJacob Keller 	 * handler, and (2) an Other (Link Status Change, etc.) handler.
7638af3c33fSJeff Kirsher 	 */
7648af3c33fSJeff Kirsher 	vector_threshold = MIN_MSIX_COUNT;
7658af3c33fSJeff Kirsher 
766027bb561SJacob Keller 	adapter->msix_entries = kcalloc(vectors,
767027bb561SJacob Keller 					sizeof(struct msix_entry),
768027bb561SJacob Keller 					GFP_KERNEL);
769027bb561SJacob Keller 	if (!adapter->msix_entries)
770027bb561SJacob Keller 		return -ENOMEM;
771027bb561SJacob Keller 
772027bb561SJacob Keller 	for (i = 0; i < vectors; i++)
773027bb561SJacob Keller 		adapter->msix_entries[i].entry = i;
774027bb561SJacob Keller 
775b45e620cSAlexander Gordeev 	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
776b45e620cSAlexander Gordeev 					vector_threshold, vectors);
7778af3c33fSJeff Kirsher 
778b45e620cSAlexander Gordeev 	if (vectors < 0) {
779493043e5SJacob Keller 		/* A negative count of allocated vectors indicates an error in
780493043e5SJacob Keller 		 * acquiring within the specified range of MSI-X vectors
7818af3c33fSJeff Kirsher 		 */
782493043e5SJacob Keller 		e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
783493043e5SJacob Keller 			   vectors);
784493043e5SJacob Keller 
7858af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
7868af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
7878af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
788d7de3c6eSJacob Keller 
789d7de3c6eSJacob Keller 		return vectors;
790d7de3c6eSJacob Keller 	}
791d7de3c6eSJacob Keller 
792d7de3c6eSJacob Keller 	/* we successfully allocated some number of vectors within our
793d7de3c6eSJacob Keller 	 * requested range.
794d7de3c6eSJacob Keller 	 */
795d7de3c6eSJacob Keller 	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
796d7de3c6eSJacob Keller 
797d7de3c6eSJacob Keller 	/* Adjust for only the vectors we'll use, which is minimum
798d7de3c6eSJacob Keller 	 * of max_q_vectors, or the number of vectors we were allocated.
7998af3c33fSJeff Kirsher 	 */
80049c7ffbeSAlexander Duyck 	vectors -= NON_Q_VECTORS;
801d7de3c6eSJacob Keller 	adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
802d7de3c6eSJacob Keller 
803d7de3c6eSJacob Keller 	return 0;
8048af3c33fSJeff Kirsher }
8058af3c33fSJeff Kirsher 
8068af3c33fSJeff Kirsher static void ixgbe_add_ring(struct ixgbe_ring *ring,
8078af3c33fSJeff Kirsher 			   struct ixgbe_ring_container *head)
8088af3c33fSJeff Kirsher {
8098af3c33fSJeff Kirsher 	ring->next = head->ring;
8108af3c33fSJeff Kirsher 	head->ring = ring;
8118af3c33fSJeff Kirsher 	head->count++;
812b4ded832SAlexander Duyck 	head->next_update = jiffies + 1;
8138af3c33fSJeff Kirsher }
8148af3c33fSJeff Kirsher 
8158af3c33fSJeff Kirsher /**
8168af3c33fSJeff Kirsher  * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
8178af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
818d0bfcdfdSAlexander Duyck  * @v_count: q_vectors allocated on adapter, used for ring interleaving
8198af3c33fSJeff Kirsher  * @v_idx: index of vector in adapter struct
820d0bfcdfdSAlexander Duyck  * @txr_count: total number of Tx rings to allocate
821d0bfcdfdSAlexander Duyck  * @txr_idx: index of first Tx ring to allocate
82233fdc82fSJohn Fastabend  * @xdp_count: total number of XDP rings to allocate
82333fdc82fSJohn Fastabend  * @xdp_idx: index of first XDP ring to allocate
824d0bfcdfdSAlexander Duyck  * @rxr_count: total number of Rx rings to allocate
825d0bfcdfdSAlexander Duyck  * @rxr_idx: index of first Rx ring to allocate
8268af3c33fSJeff Kirsher  *
8278af3c33fSJeff Kirsher  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
8288af3c33fSJeff Kirsher  **/
829d0bfcdfdSAlexander Duyck static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
830d0bfcdfdSAlexander Duyck 				int v_count, int v_idx,
8318af3c33fSJeff Kirsher 				int txr_count, int txr_idx,
83233fdc82fSJohn Fastabend 				int xdp_count, int xdp_idx,
8338af3c33fSJeff Kirsher 				int rxr_count, int rxr_idx)
8348af3c33fSJeff Kirsher {
8358af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector;
8368af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
837fd786b7bSAlexander Duyck 	int node = NUMA_NO_NODE;
8388af3c33fSJeff Kirsher 	int cpu = -1;
8398af3c33fSJeff Kirsher 	int ring_count, size;
8400efbf12bSAlexander Duyck 	u8 tcs = adapter->hw_tcs;
8418af3c33fSJeff Kirsher 
84233fdc82fSJohn Fastabend 	ring_count = txr_count + rxr_count + xdp_count;
8438af3c33fSJeff Kirsher 	size = sizeof(struct ixgbe_q_vector) +
8448af3c33fSJeff Kirsher 	       (sizeof(struct ixgbe_ring) * ring_count);
8458af3c33fSJeff Kirsher 
8468af3c33fSJeff Kirsher 	/* customize cpu for Flow Director mapping */
847fd786b7bSAlexander Duyck 	if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
848fd786b7bSAlexander Duyck 		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
849fd786b7bSAlexander Duyck 		if (rss_i > 1 && adapter->atr_sample_rate) {
8508af3c33fSJeff Kirsher 			if (cpu_online(v_idx)) {
8518af3c33fSJeff Kirsher 				cpu = v_idx;
8528af3c33fSJeff Kirsher 				node = cpu_to_node(cpu);
8538af3c33fSJeff Kirsher 			}
8548af3c33fSJeff Kirsher 		}
855fd786b7bSAlexander Duyck 	}
8568af3c33fSJeff Kirsher 
8578af3c33fSJeff Kirsher 	/* allocate q_vector and rings */
8588af3c33fSJeff Kirsher 	q_vector = kzalloc_node(size, GFP_KERNEL, node);
8598af3c33fSJeff Kirsher 	if (!q_vector)
8608af3c33fSJeff Kirsher 		q_vector = kzalloc(size, GFP_KERNEL);
8618af3c33fSJeff Kirsher 	if (!q_vector)
8628af3c33fSJeff Kirsher 		return -ENOMEM;
8638af3c33fSJeff Kirsher 
8648af3c33fSJeff Kirsher 	/* setup affinity mask and node */
8658af3c33fSJeff Kirsher 	if (cpu != -1)
8668af3c33fSJeff Kirsher 		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
8678af3c33fSJeff Kirsher 	q_vector->numa_node = node;
8688af3c33fSJeff Kirsher 
869245f292dSAlexander Duyck #ifdef CONFIG_IXGBE_DCA
870245f292dSAlexander Duyck 	/* initialize CPU for DCA */
871245f292dSAlexander Duyck 	q_vector->cpu = -1;
872245f292dSAlexander Duyck 
873245f292dSAlexander Duyck #endif
8748af3c33fSJeff Kirsher 	/* initialize NAPI */
8758af3c33fSJeff Kirsher 	netif_napi_add(adapter->netdev, &q_vector->napi,
8768af3c33fSJeff Kirsher 		       ixgbe_poll, 64);
8778af3c33fSJeff Kirsher 
8788af3c33fSJeff Kirsher 	/* tie q_vector and adapter together */
8798af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = q_vector;
8808af3c33fSJeff Kirsher 	q_vector->adapter = adapter;
8818af3c33fSJeff Kirsher 	q_vector->v_idx = v_idx;
8828af3c33fSJeff Kirsher 
8838af3c33fSJeff Kirsher 	/* initialize work limits */
8848af3c33fSJeff Kirsher 	q_vector->tx.work_limit = adapter->tx_work_limit;
8858af3c33fSJeff Kirsher 
886b4ded832SAlexander Duyck 	/* Initialize setting for adaptive ITR */
887b4ded832SAlexander Duyck 	q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
888b4ded832SAlexander Duyck 			   IXGBE_ITR_ADAPTIVE_LATENCY;
889b4ded832SAlexander Duyck 	q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
890b4ded832SAlexander Duyck 			   IXGBE_ITR_ADAPTIVE_LATENCY;
8918af3c33fSJeff Kirsher 
8923af3361eSEmil Tantilov 	/* intialize ITR */
8933af3361eSEmil Tantilov 	if (txr_count && !rxr_count) {
8943af3361eSEmil Tantilov 		/* tx only vector */
8953af3361eSEmil Tantilov 		if (adapter->tx_itr_setting == 1)
8968ac34f10SAlexander Duyck 			q_vector->itr = IXGBE_12K_ITR;
8973af3361eSEmil Tantilov 		else
8983af3361eSEmil Tantilov 			q_vector->itr = adapter->tx_itr_setting;
8993af3361eSEmil Tantilov 	} else {
9003af3361eSEmil Tantilov 		/* rx or rx/tx vector */
9013af3361eSEmil Tantilov 		if (adapter->rx_itr_setting == 1)
9023af3361eSEmil Tantilov 			q_vector->itr = IXGBE_20K_ITR;
9033af3361eSEmil Tantilov 		else
9043af3361eSEmil Tantilov 			q_vector->itr = adapter->rx_itr_setting;
9053af3361eSEmil Tantilov 	}
9063af3361eSEmil Tantilov 
907b4ded832SAlexander Duyck 	/* initialize pointer to rings */
908b4ded832SAlexander Duyck 	ring = q_vector->ring;
909b4ded832SAlexander Duyck 
9108af3c33fSJeff Kirsher 	while (txr_count) {
9118af3c33fSJeff Kirsher 		/* assign generic ring traits */
9128af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
9138af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
9148af3c33fSJeff Kirsher 
9158af3c33fSJeff Kirsher 		/* configure backlink on ring */
9168af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
9178af3c33fSJeff Kirsher 
9188af3c33fSJeff Kirsher 		/* update q_vector Tx values */
9198af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->tx);
9208af3c33fSJeff Kirsher 
9218af3c33fSJeff Kirsher 		/* apply Tx specific ring traits */
9228af3c33fSJeff Kirsher 		ring->count = adapter->tx_ring_count;
9232a47fa45SJohn Fastabend 		if (adapter->num_rx_pools > 1)
9242a47fa45SJohn Fastabend 			ring->queue_index =
9252a47fa45SJohn Fastabend 				txr_idx % adapter->num_rx_queues_per_pool;
9262a47fa45SJohn Fastabend 		else
9278af3c33fSJeff Kirsher 			ring->queue_index = txr_idx;
9288af3c33fSJeff Kirsher 
9298af3c33fSJeff Kirsher 		/* assign ring to adapter */
9308af3c33fSJeff Kirsher 		adapter->tx_ring[txr_idx] = ring;
9318af3c33fSJeff Kirsher 
9328af3c33fSJeff Kirsher 		/* update count and index */
9338af3c33fSJeff Kirsher 		txr_count--;
934d0bfcdfdSAlexander Duyck 		txr_idx += v_count;
9358af3c33fSJeff Kirsher 
9368af3c33fSJeff Kirsher 		/* push pointer to next ring */
9378af3c33fSJeff Kirsher 		ring++;
9388af3c33fSJeff Kirsher 	}
9398af3c33fSJeff Kirsher 
94033fdc82fSJohn Fastabend 	while (xdp_count) {
94133fdc82fSJohn Fastabend 		/* assign generic ring traits */
94233fdc82fSJohn Fastabend 		ring->dev = &adapter->pdev->dev;
94333fdc82fSJohn Fastabend 		ring->netdev = adapter->netdev;
94433fdc82fSJohn Fastabend 
94533fdc82fSJohn Fastabend 		/* configure backlink on ring */
94633fdc82fSJohn Fastabend 		ring->q_vector = q_vector;
94733fdc82fSJohn Fastabend 
94833fdc82fSJohn Fastabend 		/* update q_vector Tx values */
94933fdc82fSJohn Fastabend 		ixgbe_add_ring(ring, &q_vector->tx);
95033fdc82fSJohn Fastabend 
95133fdc82fSJohn Fastabend 		/* apply Tx specific ring traits */
95233fdc82fSJohn Fastabend 		ring->count = adapter->tx_ring_count;
95333fdc82fSJohn Fastabend 		ring->queue_index = xdp_idx;
95433fdc82fSJohn Fastabend 		set_ring_xdp(ring);
95533fdc82fSJohn Fastabend 
95633fdc82fSJohn Fastabend 		/* assign ring to adapter */
95733fdc82fSJohn Fastabend 		adapter->xdp_ring[xdp_idx] = ring;
95833fdc82fSJohn Fastabend 
95933fdc82fSJohn Fastabend 		/* update count and index */
96033fdc82fSJohn Fastabend 		xdp_count--;
96133fdc82fSJohn Fastabend 		xdp_idx++;
96233fdc82fSJohn Fastabend 
96333fdc82fSJohn Fastabend 		/* push pointer to next ring */
96433fdc82fSJohn Fastabend 		ring++;
96533fdc82fSJohn Fastabend 	}
96633fdc82fSJohn Fastabend 
9678af3c33fSJeff Kirsher 	while (rxr_count) {
9688af3c33fSJeff Kirsher 		/* assign generic ring traits */
9698af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
9708af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
9718af3c33fSJeff Kirsher 
9728af3c33fSJeff Kirsher 		/* configure backlink on ring */
9738af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
9748af3c33fSJeff Kirsher 
9758af3c33fSJeff Kirsher 		/* update q_vector Rx values */
9768af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->rx);
9778af3c33fSJeff Kirsher 
9788af3c33fSJeff Kirsher 		/*
9798af3c33fSJeff Kirsher 		 * 82599 errata, UDP frames with a 0 checksum
9808af3c33fSJeff Kirsher 		 * can be marked as checksum errors.
9818af3c33fSJeff Kirsher 		 */
9828af3c33fSJeff Kirsher 		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
9838af3c33fSJeff Kirsher 			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
9848af3c33fSJeff Kirsher 
985b2db497eSAlexander Duyck #ifdef IXGBE_FCOE
986b2db497eSAlexander Duyck 		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
987b2db497eSAlexander Duyck 			struct ixgbe_ring_feature *f;
988b2db497eSAlexander Duyck 			f = &adapter->ring_feature[RING_F_FCOE];
989e4b317e9SAlexander Duyck 			if ((rxr_idx >= f->offset) &&
990e4b317e9SAlexander Duyck 			    (rxr_idx < f->offset + f->indices))
99157efd44cSAlexander Duyck 				set_bit(__IXGBE_RX_FCOE, &ring->state);
992b2db497eSAlexander Duyck 		}
993b2db497eSAlexander Duyck 
994b2db497eSAlexander Duyck #endif /* IXGBE_FCOE */
9958af3c33fSJeff Kirsher 		/* apply Rx specific ring traits */
9968af3c33fSJeff Kirsher 		ring->count = adapter->rx_ring_count;
9972a47fa45SJohn Fastabend 		if (adapter->num_rx_pools > 1)
9982a47fa45SJohn Fastabend 			ring->queue_index =
9992a47fa45SJohn Fastabend 				rxr_idx % adapter->num_rx_queues_per_pool;
10002a47fa45SJohn Fastabend 		else
10018af3c33fSJeff Kirsher 			ring->queue_index = rxr_idx;
10028af3c33fSJeff Kirsher 
10038af3c33fSJeff Kirsher 		/* assign ring to adapter */
10048af3c33fSJeff Kirsher 		adapter->rx_ring[rxr_idx] = ring;
10058af3c33fSJeff Kirsher 
10068af3c33fSJeff Kirsher 		/* update count and index */
10078af3c33fSJeff Kirsher 		rxr_count--;
1008d0bfcdfdSAlexander Duyck 		rxr_idx += v_count;
10098af3c33fSJeff Kirsher 
10108af3c33fSJeff Kirsher 		/* push pointer to next ring */
10118af3c33fSJeff Kirsher 		ring++;
10128af3c33fSJeff Kirsher 	}
10138af3c33fSJeff Kirsher 
10148af3c33fSJeff Kirsher 	return 0;
10158af3c33fSJeff Kirsher }
10168af3c33fSJeff Kirsher 
10178af3c33fSJeff Kirsher /**
10188af3c33fSJeff Kirsher  * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
10198af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10208af3c33fSJeff Kirsher  * @v_idx: Index of vector to be freed
10218af3c33fSJeff Kirsher  *
10228af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vector.  In addition if
10238af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
10248af3c33fSJeff Kirsher  * to freeing the q_vector.
10258af3c33fSJeff Kirsher  **/
10268af3c33fSJeff Kirsher static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
10278af3c33fSJeff Kirsher {
10288af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
10298af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
10308af3c33fSJeff Kirsher 
103190382dcaSJohn Fastabend 	ixgbe_for_each_ring(ring, q_vector->tx) {
103290382dcaSJohn Fastabend 		if (ring_is_xdp(ring))
103390382dcaSJohn Fastabend 			adapter->xdp_ring[ring->queue_index] = NULL;
103490382dcaSJohn Fastabend 		else
10358af3c33fSJeff Kirsher 			adapter->tx_ring[ring->queue_index] = NULL;
103690382dcaSJohn Fastabend 	}
10378af3c33fSJeff Kirsher 
10388af3c33fSJeff Kirsher 	ixgbe_for_each_ring(ring, q_vector->rx)
10398af3c33fSJeff Kirsher 		adapter->rx_ring[ring->queue_index] = NULL;
10408af3c33fSJeff Kirsher 
10418af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = NULL;
10425a85e737SEliezer Tamir 	napi_hash_del(&q_vector->napi);
10438af3c33fSJeff Kirsher 	netif_napi_del(&q_vector->napi);
10448af3c33fSJeff Kirsher 
10458af3c33fSJeff Kirsher 	/*
10468af3c33fSJeff Kirsher 	 * ixgbe_get_stats64() might access the rings on this vector,
10478af3c33fSJeff Kirsher 	 * we must wait a grace period before freeing it.
10488af3c33fSJeff Kirsher 	 */
10498af3c33fSJeff Kirsher 	kfree_rcu(q_vector, rcu);
10508af3c33fSJeff Kirsher }
10518af3c33fSJeff Kirsher 
10528af3c33fSJeff Kirsher /**
10538af3c33fSJeff Kirsher  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
10548af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10558af3c33fSJeff Kirsher  *
10568af3c33fSJeff Kirsher  * We allocate one q_vector per queue interrupt.  If allocation fails we
10578af3c33fSJeff Kirsher  * return -ENOMEM.
10588af3c33fSJeff Kirsher  **/
10598af3c33fSJeff Kirsher static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
10608af3c33fSJeff Kirsher {
106149c7ffbeSAlexander Duyck 	int q_vectors = adapter->num_q_vectors;
10628af3c33fSJeff Kirsher 	int rxr_remaining = adapter->num_rx_queues;
10638af3c33fSJeff Kirsher 	int txr_remaining = adapter->num_tx_queues;
106433fdc82fSJohn Fastabend 	int xdp_remaining = adapter->num_xdp_queues;
106533fdc82fSJohn Fastabend 	int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
10668af3c33fSJeff Kirsher 	int err;
10678af3c33fSJeff Kirsher 
10688af3c33fSJeff Kirsher 	/* only one q_vector if MSI-X is disabled. */
10698af3c33fSJeff Kirsher 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
10708af3c33fSJeff Kirsher 		q_vectors = 1;
10718af3c33fSJeff Kirsher 
107233fdc82fSJohn Fastabend 	if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1073d0bfcdfdSAlexander Duyck 		for (; rxr_remaining; v_idx++) {
1074d0bfcdfdSAlexander Duyck 			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
107533fdc82fSJohn Fastabend 						   0, 0, 0, 0, 1, rxr_idx);
10768af3c33fSJeff Kirsher 
10778af3c33fSJeff Kirsher 			if (err)
10788af3c33fSJeff Kirsher 				goto err_out;
10798af3c33fSJeff Kirsher 
10808af3c33fSJeff Kirsher 			/* update counts and index */
1081d0bfcdfdSAlexander Duyck 			rxr_remaining--;
1082d0bfcdfdSAlexander Duyck 			rxr_idx++;
10838af3c33fSJeff Kirsher 		}
10848af3c33fSJeff Kirsher 	}
10858af3c33fSJeff Kirsher 
1086d0bfcdfdSAlexander Duyck 	for (; v_idx < q_vectors; v_idx++) {
1087d0bfcdfdSAlexander Duyck 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1088d0bfcdfdSAlexander Duyck 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
108933fdc82fSJohn Fastabend 		int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
109033fdc82fSJohn Fastabend 
1091d0bfcdfdSAlexander Duyck 		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
10928af3c33fSJeff Kirsher 					   tqpv, txr_idx,
109333fdc82fSJohn Fastabend 					   xqpv, xdp_idx,
10948af3c33fSJeff Kirsher 					   rqpv, rxr_idx);
10958af3c33fSJeff Kirsher 
10968af3c33fSJeff Kirsher 		if (err)
10978af3c33fSJeff Kirsher 			goto err_out;
10988af3c33fSJeff Kirsher 
10998af3c33fSJeff Kirsher 		/* update counts and index */
11008af3c33fSJeff Kirsher 		rxr_remaining -= rqpv;
11018af3c33fSJeff Kirsher 		txr_remaining -= tqpv;
110233fdc82fSJohn Fastabend 		xdp_remaining -= xqpv;
1103d0bfcdfdSAlexander Duyck 		rxr_idx++;
1104d0bfcdfdSAlexander Duyck 		txr_idx++;
110533fdc82fSJohn Fastabend 		xdp_idx += xqpv;
11068af3c33fSJeff Kirsher 	}
11078af3c33fSJeff Kirsher 
11088af3c33fSJeff Kirsher 	return 0;
11098af3c33fSJeff Kirsher 
11108af3c33fSJeff Kirsher err_out:
111149c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
111233fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
111349c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
111449c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
111549c7ffbeSAlexander Duyck 
111649c7ffbeSAlexander Duyck 	while (v_idx--)
11178af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
11188af3c33fSJeff Kirsher 
11198af3c33fSJeff Kirsher 	return -ENOMEM;
11208af3c33fSJeff Kirsher }
11218af3c33fSJeff Kirsher 
11228af3c33fSJeff Kirsher /**
11238af3c33fSJeff Kirsher  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
11248af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
11258af3c33fSJeff Kirsher  *
11268af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vectors.  In addition if
11278af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
11288af3c33fSJeff Kirsher  * to freeing the q_vector.
11298af3c33fSJeff Kirsher  **/
11308af3c33fSJeff Kirsher static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
11318af3c33fSJeff Kirsher {
113249c7ffbeSAlexander Duyck 	int v_idx = adapter->num_q_vectors;
11338af3c33fSJeff Kirsher 
113449c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
113533fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
113649c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
113749c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
11388af3c33fSJeff Kirsher 
113949c7ffbeSAlexander Duyck 	while (v_idx--)
11408af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
11418af3c33fSJeff Kirsher }
11428af3c33fSJeff Kirsher 
11438af3c33fSJeff Kirsher static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
11448af3c33fSJeff Kirsher {
11458af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
11468af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
11478af3c33fSJeff Kirsher 		pci_disable_msix(adapter->pdev);
11488af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
11498af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
11508af3c33fSJeff Kirsher 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
11518af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
11528af3c33fSJeff Kirsher 		pci_disable_msi(adapter->pdev);
11538af3c33fSJeff Kirsher 	}
11548af3c33fSJeff Kirsher }
11558af3c33fSJeff Kirsher 
11568af3c33fSJeff Kirsher /**
11578af3c33fSJeff Kirsher  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
11588af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
11598af3c33fSJeff Kirsher  *
11608af3c33fSJeff Kirsher  * Attempt to configure the interrupts using the best available
11618af3c33fSJeff Kirsher  * capabilities of the hardware and the kernel.
11628af3c33fSJeff Kirsher  **/
1163ac802f5dSAlexander Duyck static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
11648af3c33fSJeff Kirsher {
11653bcf3446SJacob Keller 	int err;
11668af3c33fSJeff Kirsher 
11673bcf3446SJacob Keller 	/* We will try to get MSI-X interrupts first */
11683bcf3446SJacob Keller 	if (!ixgbe_acquire_msix_vectors(adapter))
1169ac802f5dSAlexander Duyck 		return;
11708af3c33fSJeff Kirsher 
1171eec66731SJacob Keller 	/* At this point, we do not have MSI-X capabilities. We need to
1172eec66731SJacob Keller 	 * reconfigure or disable various features which require MSI-X
1173eec66731SJacob Keller 	 * capability.
1174eec66731SJacob Keller 	 */
1175eec66731SJacob Keller 
1176c1c55f63SJacob Keller 	/* Disable DCB unless we only have a single traffic class */
11770efbf12bSAlexander Duyck 	if (adapter->hw_tcs > 1) {
1178c1c55f63SJacob Keller 		e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1179b724e9f2SAlexander Duyck 		netdev_reset_tc(adapter->netdev);
118039cb681bSAlexander Duyck 
1181b724e9f2SAlexander Duyck 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1182b724e9f2SAlexander Duyck 			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1183b724e9f2SAlexander Duyck 
1184b724e9f2SAlexander Duyck 		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1185b724e9f2SAlexander Duyck 		adapter->temp_dcb_cfg.pfc_mode_enable = false;
1186b724e9f2SAlexander Duyck 		adapter->dcb_cfg.pfc_mode_enable = false;
1187b724e9f2SAlexander Duyck 	}
1188d786cf7bSJacob Keller 
11890efbf12bSAlexander Duyck 	adapter->hw_tcs = 0;
1190b724e9f2SAlexander Duyck 	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1191b724e9f2SAlexander Duyck 	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1192b724e9f2SAlexander Duyck 
1193d786cf7bSJacob Keller 	/* Disable SR-IOV support */
1194d786cf7bSJacob Keller 	e_dev_warn("Disabling SR-IOV support\n");
11958af3c33fSJeff Kirsher 	ixgbe_disable_sriov(adapter);
11968af3c33fSJeff Kirsher 
1197d786cf7bSJacob Keller 	/* Disable RSS */
1198d786cf7bSJacob Keller 	e_dev_warn("Disabling RSS support\n");
1199fbe7ca7fSAlexander Duyck 	adapter->ring_feature[RING_F_RSS].limit = 1;
1200b724e9f2SAlexander Duyck 
1201eec66731SJacob Keller 	/* recalculate number of queues now that many features have been
1202eec66731SJacob Keller 	 * changed or disabled.
1203eec66731SJacob Keller 	 */
1204ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
120549c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 1;
120649c7ffbeSAlexander Duyck 
12078af3c33fSJeff Kirsher 	err = pci_enable_msi(adapter->pdev);
12085d31b48aSJacob Keller 	if (err)
12095d31b48aSJacob Keller 		e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
12106ec1b71fSJacob Keller 			   err);
12115d31b48aSJacob Keller 	else
1212ac802f5dSAlexander Duyck 		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
12138af3c33fSJeff Kirsher }
12148af3c33fSJeff Kirsher 
12158af3c33fSJeff Kirsher /**
12168af3c33fSJeff Kirsher  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
12178af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
12188af3c33fSJeff Kirsher  *
12198af3c33fSJeff Kirsher  * We determine which interrupt scheme to use based on...
12208af3c33fSJeff Kirsher  * - Kernel support (MSI, MSI-X)
12218af3c33fSJeff Kirsher  *   - which can be user-defined (via MODULE_PARAM)
12228af3c33fSJeff Kirsher  * - Hardware queue count (num_*_queues)
12238af3c33fSJeff Kirsher  *   - defined by miscellaneous hardware support/features (RSS, etc.)
12248af3c33fSJeff Kirsher  **/
12258af3c33fSJeff Kirsher int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
12268af3c33fSJeff Kirsher {
12278af3c33fSJeff Kirsher 	int err;
12288af3c33fSJeff Kirsher 
12298af3c33fSJeff Kirsher 	/* Number of supported queues */
1230ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
12318af3c33fSJeff Kirsher 
1232ac802f5dSAlexander Duyck 	/* Set interrupt mode */
1233ac802f5dSAlexander Duyck 	ixgbe_set_interrupt_capability(adapter);
12348af3c33fSJeff Kirsher 
12358af3c33fSJeff Kirsher 	err = ixgbe_alloc_q_vectors(adapter);
12368af3c33fSJeff Kirsher 	if (err) {
12378af3c33fSJeff Kirsher 		e_dev_err("Unable to allocate memory for queue vectors\n");
12388af3c33fSJeff Kirsher 		goto err_alloc_q_vectors;
12398af3c33fSJeff Kirsher 	}
12408af3c33fSJeff Kirsher 
12418af3c33fSJeff Kirsher 	ixgbe_cache_ring_register(adapter);
12428af3c33fSJeff Kirsher 
124333fdc82fSJohn Fastabend 	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
12448af3c33fSJeff Kirsher 		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
124533fdc82fSJohn Fastabend 		   adapter->num_rx_queues, adapter->num_tx_queues,
124633fdc82fSJohn Fastabend 		   adapter->num_xdp_queues);
12478af3c33fSJeff Kirsher 
12488af3c33fSJeff Kirsher 	set_bit(__IXGBE_DOWN, &adapter->state);
12498af3c33fSJeff Kirsher 
12508af3c33fSJeff Kirsher 	return 0;
12518af3c33fSJeff Kirsher 
12528af3c33fSJeff Kirsher err_alloc_q_vectors:
12538af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
12548af3c33fSJeff Kirsher 	return err;
12558af3c33fSJeff Kirsher }
12568af3c33fSJeff Kirsher 
12578af3c33fSJeff Kirsher /**
12588af3c33fSJeff Kirsher  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
12598af3c33fSJeff Kirsher  * @adapter: board private structure to clear interrupt scheme on
12608af3c33fSJeff Kirsher  *
12618af3c33fSJeff Kirsher  * We go through and clear interrupt specific resources and reset the structure
12628af3c33fSJeff Kirsher  * to pre-load conditions
12638af3c33fSJeff Kirsher  **/
12648af3c33fSJeff Kirsher void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
12658af3c33fSJeff Kirsher {
12668af3c33fSJeff Kirsher 	adapter->num_tx_queues = 0;
126733fdc82fSJohn Fastabend 	adapter->num_xdp_queues = 0;
12688af3c33fSJeff Kirsher 	adapter->num_rx_queues = 0;
12698af3c33fSJeff Kirsher 
12708af3c33fSJeff Kirsher 	ixgbe_free_q_vectors(adapter);
12718af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
12728af3c33fSJeff Kirsher }
12738af3c33fSJeff Kirsher 
12748af3c33fSJeff Kirsher void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
12758af3c33fSJeff Kirsher 		       u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
12768af3c33fSJeff Kirsher {
12778af3c33fSJeff Kirsher 	struct ixgbe_adv_tx_context_desc *context_desc;
12788af3c33fSJeff Kirsher 	u16 i = tx_ring->next_to_use;
12798af3c33fSJeff Kirsher 
12808af3c33fSJeff Kirsher 	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
12818af3c33fSJeff Kirsher 
12828af3c33fSJeff Kirsher 	i++;
12838af3c33fSJeff Kirsher 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
12848af3c33fSJeff Kirsher 
12858af3c33fSJeff Kirsher 	/* set bits to identify this as an advanced context descriptor */
12868af3c33fSJeff Kirsher 	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
12878af3c33fSJeff Kirsher 
12888af3c33fSJeff Kirsher 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
12898af3c33fSJeff Kirsher 	context_desc->seqnum_seed	= cpu_to_le32(fcoe_sof_eof);
12908af3c33fSJeff Kirsher 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
12918af3c33fSJeff Kirsher 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
12928af3c33fSJeff Kirsher }
12938af3c33fSJeff Kirsher 
1294