18af3c33fSJeff Kirsher /*******************************************************************************
28af3c33fSJeff Kirsher 
38af3c33fSJeff Kirsher   Intel 10 Gigabit PCI Express Linux driver
4434c5e39SDon Skidmore   Copyright(c) 1999 - 2013 Intel Corporation.
58af3c33fSJeff Kirsher 
68af3c33fSJeff Kirsher   This program is free software; you can redistribute it and/or modify it
78af3c33fSJeff Kirsher   under the terms and conditions of the GNU General Public License,
88af3c33fSJeff Kirsher   version 2, as published by the Free Software Foundation.
98af3c33fSJeff Kirsher 
108af3c33fSJeff Kirsher   This program is distributed in the hope it will be useful, but WITHOUT
118af3c33fSJeff Kirsher   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
128af3c33fSJeff Kirsher   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
138af3c33fSJeff Kirsher   more details.
148af3c33fSJeff Kirsher 
158af3c33fSJeff Kirsher   You should have received a copy of the GNU General Public License along with
168af3c33fSJeff Kirsher   this program; if not, write to the Free Software Foundation, Inc.,
178af3c33fSJeff Kirsher   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
188af3c33fSJeff Kirsher 
198af3c33fSJeff Kirsher   The full GNU General Public License is included in this distribution in
208af3c33fSJeff Kirsher   the file called "COPYING".
218af3c33fSJeff Kirsher 
228af3c33fSJeff Kirsher   Contact Information:
238af3c33fSJeff Kirsher   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
248af3c33fSJeff Kirsher   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
258af3c33fSJeff Kirsher 
268af3c33fSJeff Kirsher *******************************************************************************/
278af3c33fSJeff Kirsher 
288af3c33fSJeff Kirsher #include "ixgbe.h"
298af3c33fSJeff Kirsher #include "ixgbe_sriov.h"
308af3c33fSJeff Kirsher 
31800bd607SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
3273079ea0SAlexander Duyck /**
3373079ea0SAlexander Duyck  * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
3473079ea0SAlexander Duyck  * @adapter: board private structure to initialize
3573079ea0SAlexander Duyck  *
3673079ea0SAlexander Duyck  * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
3773079ea0SAlexander Duyck  * will also try to cache the proper offsets if RSS/FCoE are enabled along
3873079ea0SAlexander Duyck  * with VMDq.
3973079ea0SAlexander Duyck  *
4073079ea0SAlexander Duyck  **/
4173079ea0SAlexander Duyck static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
4273079ea0SAlexander Duyck {
4373079ea0SAlexander Duyck #ifdef IXGBE_FCOE
4473079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
4573079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
4673079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
4773079ea0SAlexander Duyck 	int i;
4873079ea0SAlexander Duyck 	u16 reg_idx;
4973079ea0SAlexander Duyck 	u8 tcs = netdev_get_num_tc(adapter->netdev);
5073079ea0SAlexander Duyck 
5173079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
5273079ea0SAlexander Duyck 	if (tcs <= 1)
5373079ea0SAlexander Duyck 		return false;
5473079ea0SAlexander Duyck 
5573079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
5673079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
5773079ea0SAlexander Duyck 		return false;
5873079ea0SAlexander Duyck 
5973079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
6073079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
6173079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
6273079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
6373079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs)
6473079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
6573079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
6673079ea0SAlexander Duyck 	}
6773079ea0SAlexander Duyck 
6873079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
6973079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
7073079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
7173079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs)
7273079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
7373079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
7473079ea0SAlexander Duyck 	}
7573079ea0SAlexander Duyck 
7673079ea0SAlexander Duyck #ifdef IXGBE_FCOE
7773079ea0SAlexander Duyck 	/* nothing to do if FCoE is disabled */
7873079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
7973079ea0SAlexander Duyck 		return true;
8073079ea0SAlexander Duyck 
8173079ea0SAlexander Duyck 	/* The work is already done if the FCoE ring is shared */
8273079ea0SAlexander Duyck 	if (fcoe->offset < tcs)
8373079ea0SAlexander Duyck 		return true;
8473079ea0SAlexander Duyck 
8573079ea0SAlexander Duyck 	/* The FCoE rings exist separately, we need to move their reg_idx */
8673079ea0SAlexander Duyck 	if (fcoe->indices) {
8773079ea0SAlexander Duyck 		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
8873079ea0SAlexander Duyck 		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
8973079ea0SAlexander Duyck 
9073079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
9173079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
9273079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
9373079ea0SAlexander Duyck 			adapter->rx_ring[i]->reg_idx = reg_idx;
9473079ea0SAlexander Duyck 			reg_idx++;
9573079ea0SAlexander Duyck 		}
9673079ea0SAlexander Duyck 
9773079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
9873079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
9973079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
10073079ea0SAlexander Duyck 			adapter->tx_ring[i]->reg_idx = reg_idx;
10173079ea0SAlexander Duyck 			reg_idx++;
10273079ea0SAlexander Duyck 		}
10373079ea0SAlexander Duyck 	}
10473079ea0SAlexander Duyck 
10573079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
10673079ea0SAlexander Duyck 	return true;
10773079ea0SAlexander Duyck }
10873079ea0SAlexander Duyck 
1098af3c33fSJeff Kirsher /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
1108af3c33fSJeff Kirsher static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
1118af3c33fSJeff Kirsher 				    unsigned int *tx, unsigned int *rx)
1128af3c33fSJeff Kirsher {
1138af3c33fSJeff Kirsher 	struct net_device *dev = adapter->netdev;
1148af3c33fSJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
1158af3c33fSJeff Kirsher 	u8 num_tcs = netdev_get_num_tc(dev);
1168af3c33fSJeff Kirsher 
1178af3c33fSJeff Kirsher 	*tx = 0;
1188af3c33fSJeff Kirsher 	*rx = 0;
1198af3c33fSJeff Kirsher 
1208af3c33fSJeff Kirsher 	switch (hw->mac.type) {
1218af3c33fSJeff Kirsher 	case ixgbe_mac_82598EB:
1224ae63730SAlexander Duyck 		/* TxQs/TC: 4	RxQs/TC: 8 */
1234ae63730SAlexander Duyck 		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
1244ae63730SAlexander Duyck 		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
1258af3c33fSJeff Kirsher 		break;
1268af3c33fSJeff Kirsher 	case ixgbe_mac_82599EB:
1278af3c33fSJeff Kirsher 	case ixgbe_mac_X540:
1288af3c33fSJeff Kirsher 		if (num_tcs > 4) {
1294ae63730SAlexander Duyck 			/*
1304ae63730SAlexander Duyck 			 * TCs    : TC0/1 TC2/3 TC4-7
1314ae63730SAlexander Duyck 			 * TxQs/TC:    32    16     8
1324ae63730SAlexander Duyck 			 * RxQs/TC:    16    16    16
1334ae63730SAlexander Duyck 			 */
1348af3c33fSJeff Kirsher 			*rx = tc << 4;
1354ae63730SAlexander Duyck 			if (tc < 3)
1364ae63730SAlexander Duyck 				*tx = tc << 5;		/*   0,  32,  64 */
1374ae63730SAlexander Duyck 			else if (tc < 5)
1384ae63730SAlexander Duyck 				*tx = (tc + 2) << 4;	/*  80,  96 */
1394ae63730SAlexander Duyck 			else
1404ae63730SAlexander Duyck 				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
1418af3c33fSJeff Kirsher 		} else {
1424ae63730SAlexander Duyck 			/*
1434ae63730SAlexander Duyck 			 * TCs    : TC0 TC1 TC2/3
1444ae63730SAlexander Duyck 			 * TxQs/TC:  64  32    16
1454ae63730SAlexander Duyck 			 * RxQs/TC:  32  32    32
1464ae63730SAlexander Duyck 			 */
1478af3c33fSJeff Kirsher 			*rx = tc << 5;
1484ae63730SAlexander Duyck 			if (tc < 2)
1494ae63730SAlexander Duyck 				*tx = tc << 6;		/*  0,  64 */
1504ae63730SAlexander Duyck 			else
1514ae63730SAlexander Duyck 				*tx = (tc + 4) << 4;	/* 96, 112 */
1528af3c33fSJeff Kirsher 		}
1538af3c33fSJeff Kirsher 	default:
1548af3c33fSJeff Kirsher 		break;
1558af3c33fSJeff Kirsher 	}
1568af3c33fSJeff Kirsher }
1578af3c33fSJeff Kirsher 
1588af3c33fSJeff Kirsher /**
1598af3c33fSJeff Kirsher  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
1608af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1618af3c33fSJeff Kirsher  *
1628af3c33fSJeff Kirsher  * Cache the descriptor ring offsets for DCB to the assigned rings.
1638af3c33fSJeff Kirsher  *
1648af3c33fSJeff Kirsher  **/
1654ae63730SAlexander Duyck static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
1668af3c33fSJeff Kirsher {
1678af3c33fSJeff Kirsher 	struct net_device *dev = adapter->netdev;
1684ae63730SAlexander Duyck 	unsigned int tx_idx, rx_idx;
1694ae63730SAlexander Duyck 	int tc, offset, rss_i, i;
1708af3c33fSJeff Kirsher 	u8 num_tcs = netdev_get_num_tc(dev);
1718af3c33fSJeff Kirsher 
1724ae63730SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
1734ae63730SAlexander Duyck 	if (num_tcs <= 1)
1748af3c33fSJeff Kirsher 		return false;
1758af3c33fSJeff Kirsher 
1764ae63730SAlexander Duyck 	rss_i = adapter->ring_feature[RING_F_RSS].indices;
1778af3c33fSJeff Kirsher 
1784ae63730SAlexander Duyck 	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
1794ae63730SAlexander Duyck 		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
1804ae63730SAlexander Duyck 		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
1814ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
1824ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
1834ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->dcb_tc = tc;
1844ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->dcb_tc = tc;
1858af3c33fSJeff Kirsher 		}
1868af3c33fSJeff Kirsher 	}
1878af3c33fSJeff Kirsher 
1888af3c33fSJeff Kirsher 	return true;
1898af3c33fSJeff Kirsher }
190d411a936SAlexander Duyck 
1918af3c33fSJeff Kirsher #endif
1928af3c33fSJeff Kirsher /**
1938af3c33fSJeff Kirsher  * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
1948af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1958af3c33fSJeff Kirsher  *
1968af3c33fSJeff Kirsher  * SR-IOV doesn't use any descriptor rings but changes the default if
1978af3c33fSJeff Kirsher  * no other mapping is used.
1988af3c33fSJeff Kirsher  *
1998af3c33fSJeff Kirsher  */
20073079ea0SAlexander Duyck static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
2018af3c33fSJeff Kirsher {
20273079ea0SAlexander Duyck #ifdef IXGBE_FCOE
20373079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
20473079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
20573079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
20673079ea0SAlexander Duyck 	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
20773079ea0SAlexander Duyck 	int i;
20873079ea0SAlexander Duyck 	u16 reg_idx;
20973079ea0SAlexander Duyck 
21073079ea0SAlexander Duyck 	/* only proceed if VMDq is enabled */
21173079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
2128af3c33fSJeff Kirsher 		return false;
21373079ea0SAlexander Duyck 
21473079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
21573079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
21673079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
21773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
21873079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
21973079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
22073079ea0SAlexander Duyck 			break;
22173079ea0SAlexander Duyck #endif
22273079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
22373079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= rss->indices)
22473079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
22573079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
22673079ea0SAlexander Duyck 	}
22773079ea0SAlexander Duyck 
22873079ea0SAlexander Duyck #ifdef IXGBE_FCOE
22973079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
23073079ea0SAlexander Duyck 	for (; i < adapter->num_rx_queues; i++, reg_idx++)
23173079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
23273079ea0SAlexander Duyck 
23373079ea0SAlexander Duyck #endif
23473079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
23573079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
23673079ea0SAlexander Duyck #ifdef IXGBE_FCOE
23773079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
23873079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
23973079ea0SAlexander Duyck 			break;
24073079ea0SAlexander Duyck #endif
24173079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
24273079ea0SAlexander Duyck 		if ((reg_idx & rss->mask) >= rss->indices)
24373079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
24473079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
24573079ea0SAlexander Duyck 	}
24673079ea0SAlexander Duyck 
24773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
24873079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
24973079ea0SAlexander Duyck 	for (; i < adapter->num_tx_queues; i++, reg_idx++)
25073079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
25173079ea0SAlexander Duyck 
25273079ea0SAlexander Duyck #endif
25373079ea0SAlexander Duyck 
25473079ea0SAlexander Duyck 	return true;
2558af3c33fSJeff Kirsher }
2568af3c33fSJeff Kirsher 
2578af3c33fSJeff Kirsher /**
258d411a936SAlexander Duyck  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
259d411a936SAlexander Duyck  * @adapter: board private structure to initialize
260d411a936SAlexander Duyck  *
261d411a936SAlexander Duyck  * Cache the descriptor ring offsets for RSS to the assigned rings.
262d411a936SAlexander Duyck  *
263d411a936SAlexander Duyck  **/
264d411a936SAlexander Duyck static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
265d411a936SAlexander Duyck {
266d411a936SAlexander Duyck 	int i;
267d411a936SAlexander Duyck 
268d411a936SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++)
269d411a936SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = i;
270d411a936SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++)
271d411a936SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = i;
272d411a936SAlexander Duyck 
273d411a936SAlexander Duyck 	return true;
274d411a936SAlexander Duyck }
275d411a936SAlexander Duyck 
276d411a936SAlexander Duyck /**
2778af3c33fSJeff Kirsher  * ixgbe_cache_ring_register - Descriptor ring to register mapping
2788af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2798af3c33fSJeff Kirsher  *
2808af3c33fSJeff Kirsher  * Once we know the feature-set enabled for the device, we'll cache
2818af3c33fSJeff Kirsher  * the register offset the descriptor ring is assigned to.
2828af3c33fSJeff Kirsher  *
2838af3c33fSJeff Kirsher  * Note, the order the various feature calls is important.  It must start with
2848af3c33fSJeff Kirsher  * the "most" features enabled at the same time, then trickle down to the
2858af3c33fSJeff Kirsher  * least amount of features turned on at once.
2868af3c33fSJeff Kirsher  **/
2878af3c33fSJeff Kirsher static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2888af3c33fSJeff Kirsher {
2898af3c33fSJeff Kirsher 	/* start with default case */
2908af3c33fSJeff Kirsher 	adapter->rx_ring[0]->reg_idx = 0;
2918af3c33fSJeff Kirsher 	adapter->tx_ring[0]->reg_idx = 0;
2928af3c33fSJeff Kirsher 
29373079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
29473079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb_sriov(adapter))
29573079ea0SAlexander Duyck 		return;
29673079ea0SAlexander Duyck 
29773079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb(adapter))
29873079ea0SAlexander Duyck 		return;
29973079ea0SAlexander Duyck 
30073079ea0SAlexander Duyck #endif
3018af3c33fSJeff Kirsher 	if (ixgbe_cache_ring_sriov(adapter))
3028af3c33fSJeff Kirsher 		return;
3038af3c33fSJeff Kirsher 
304d411a936SAlexander Duyck 	ixgbe_cache_ring_rss(adapter);
3058af3c33fSJeff Kirsher }
3068af3c33fSJeff Kirsher 
307d411a936SAlexander Duyck #define IXGBE_RSS_16Q_MASK	0xF
308d411a936SAlexander Duyck #define IXGBE_RSS_8Q_MASK	0x7
309d411a936SAlexander Duyck #define IXGBE_RSS_4Q_MASK	0x3
310d411a936SAlexander Duyck #define IXGBE_RSS_2Q_MASK	0x1
311d411a936SAlexander Duyck #define IXGBE_RSS_DISABLED_MASK	0x0
312d411a936SAlexander Duyck 
313d411a936SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
31473079ea0SAlexander Duyck /**
31573079ea0SAlexander Duyck  * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
31673079ea0SAlexander Duyck  * @adapter: board private structure to initialize
31773079ea0SAlexander Duyck  *
31873079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
31973079ea0SAlexander Duyck  * and VM pools where appropriate.  Also assign queues based on DCB
32073079ea0SAlexander Duyck  * priorities and map accordingly..
32173079ea0SAlexander Duyck  *
32273079ea0SAlexander Duyck  **/
32373079ea0SAlexander Duyck static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
32473079ea0SAlexander Duyck {
32573079ea0SAlexander Duyck 	int i;
32673079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
32773079ea0SAlexander Duyck 	u16 vmdq_m = 0;
32873079ea0SAlexander Duyck #ifdef IXGBE_FCOE
32973079ea0SAlexander Duyck 	u16 fcoe_i = 0;
33073079ea0SAlexander Duyck #endif
33173079ea0SAlexander Duyck 	u8 tcs = netdev_get_num_tc(adapter->netdev);
33273079ea0SAlexander Duyck 
33373079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
33473079ea0SAlexander Duyck 	if (tcs <= 1)
33573079ea0SAlexander Duyck 		return false;
33673079ea0SAlexander Duyck 
33773079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
33873079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
33973079ea0SAlexander Duyck 		return false;
34073079ea0SAlexander Duyck 
34173079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
34273079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
34373079ea0SAlexander Duyck 
34473079ea0SAlexander Duyck 	/* 16 pools w/ 8 TC per pool */
34573079ea0SAlexander Duyck 	if (tcs > 4) {
34673079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 16);
34773079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
34873079ea0SAlexander Duyck 	/* 32 pools w/ 4 TC per pool */
34973079ea0SAlexander Duyck 	} else {
35073079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 32);
35173079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
35273079ea0SAlexander Duyck 	}
35373079ea0SAlexander Duyck 
35473079ea0SAlexander Duyck #ifdef IXGBE_FCOE
35573079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
35673079ea0SAlexander Duyck 	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
35773079ea0SAlexander Duyck 
35873079ea0SAlexander Duyck #endif
35973079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
36073079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
36173079ea0SAlexander Duyck 
36273079ea0SAlexander Duyck 	/* save features for later use */
36373079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
36473079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
36573079ea0SAlexander Duyck 
36673079ea0SAlexander Duyck 	/*
36773079ea0SAlexander Duyck 	 * We do not support DCB, VMDq, and RSS all simultaneously
36873079ea0SAlexander Duyck 	 * so we will disable RSS since it is the lowest priority
36973079ea0SAlexander Duyck 	 */
37073079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = 1;
37173079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
37273079ea0SAlexander Duyck 
37339cb681bSAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
37439cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
37539cb681bSAlexander Duyck 
37673079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
37773079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = tcs;
37873079ea0SAlexander Duyck 
37973079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * tcs;
38073079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * tcs;
38173079ea0SAlexander Duyck 
38273079ea0SAlexander Duyck #ifdef IXGBE_FCOE
38373079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
38473079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
38573079ea0SAlexander Duyck 
38673079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
38773079ea0SAlexander Duyck 
38873079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
38973079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
39073079ea0SAlexander Duyck 
39173079ea0SAlexander Duyck 		if (fcoe_i) {
39273079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
39373079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
39473079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * tcs;
39573079ea0SAlexander Duyck 
39673079ea0SAlexander Duyck 			/* add queues to adapter */
39773079ea0SAlexander Duyck 			adapter->num_tx_queues += fcoe_i;
39873079ea0SAlexander Duyck 			adapter->num_rx_queues += fcoe_i;
39973079ea0SAlexander Duyck 		} else if (tcs > 1) {
40073079ea0SAlexander Duyck 			/* use queue belonging to FcoE TC */
40173079ea0SAlexander Duyck 			fcoe->indices = 1;
40273079ea0SAlexander Duyck 			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
40373079ea0SAlexander Duyck 		} else {
40473079ea0SAlexander Duyck 			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
40573079ea0SAlexander Duyck 
40673079ea0SAlexander Duyck 			fcoe->indices = 0;
40773079ea0SAlexander Duyck 			fcoe->offset = 0;
40873079ea0SAlexander Duyck 		}
40973079ea0SAlexander Duyck 	}
41073079ea0SAlexander Duyck 
41173079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
41273079ea0SAlexander Duyck 	/* configure TC to queue mapping */
41373079ea0SAlexander Duyck 	for (i = 0; i < tcs; i++)
41473079ea0SAlexander Duyck 		netdev_set_tc_queue(adapter->netdev, i, 1, i);
41573079ea0SAlexander Duyck 
41673079ea0SAlexander Duyck 	return true;
41773079ea0SAlexander Duyck }
41873079ea0SAlexander Duyck 
419d411a936SAlexander Duyck static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
420d411a936SAlexander Duyck {
421d411a936SAlexander Duyck 	struct net_device *dev = adapter->netdev;
422d411a936SAlexander Duyck 	struct ixgbe_ring_feature *f;
423d411a936SAlexander Duyck 	int rss_i, rss_m, i;
424d411a936SAlexander Duyck 	int tcs;
425d411a936SAlexander Duyck 
426d411a936SAlexander Duyck 	/* Map queue offset and counts onto allocated tx queues */
427d411a936SAlexander Duyck 	tcs = netdev_get_num_tc(dev);
428d411a936SAlexander Duyck 
429d411a936SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
430d411a936SAlexander Duyck 	if (tcs <= 1)
431d411a936SAlexander Duyck 		return false;
432d411a936SAlexander Duyck 
433d411a936SAlexander Duyck 	/* determine the upper limit for our current DCB mode */
434d411a936SAlexander Duyck 	rss_i = dev->num_tx_queues / tcs;
435d411a936SAlexander Duyck 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
436d411a936SAlexander Duyck 		/* 8 TC w/ 4 queues per TC */
437d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 4);
438d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
439d411a936SAlexander Duyck 	} else if (tcs > 4) {
440d411a936SAlexander Duyck 		/* 8 TC w/ 8 queues per TC */
441d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 8);
442d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_8Q_MASK;
443d411a936SAlexander Duyck 	} else {
444d411a936SAlexander Duyck 		/* 4 TC w/ 16 queues per TC */
445d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 16);
446d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_16Q_MASK;
447d411a936SAlexander Duyck 	}
448d411a936SAlexander Duyck 
449d411a936SAlexander Duyck 	/* set RSS mask and indices */
450d411a936SAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
451d411a936SAlexander Duyck 	rss_i = min_t(int, rss_i, f->limit);
452d411a936SAlexander Duyck 	f->indices = rss_i;
453d411a936SAlexander Duyck 	f->mask = rss_m;
454d411a936SAlexander Duyck 
45539cb681bSAlexander Duyck 	/* disable ATR as it is not supported when multiple TCs are enabled */
45639cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
45739cb681bSAlexander Duyck 
458d411a936SAlexander Duyck #ifdef IXGBE_FCOE
459d411a936SAlexander Duyck 	/* FCoE enabled queues require special configuration indexed
460d411a936SAlexander Duyck 	 * by feature specific indices and offset. Here we map FCoE
461d411a936SAlexander Duyck 	 * indices onto the DCB queue pairs allowing FCoE to own
462d411a936SAlexander Duyck 	 * configuration later.
463d411a936SAlexander Duyck 	 */
464d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
465d411a936SAlexander Duyck 		u8 tc = ixgbe_fcoe_get_tc(adapter);
466d411a936SAlexander Duyck 
467d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
468d411a936SAlexander Duyck 		f->indices = min_t(u16, rss_i, f->limit);
469d411a936SAlexander Duyck 		f->offset = rss_i * tc;
470d411a936SAlexander Duyck 	}
471d411a936SAlexander Duyck 
472d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
473d411a936SAlexander Duyck 	for (i = 0; i < tcs; i++)
474d411a936SAlexander Duyck 		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
475d411a936SAlexander Duyck 
476d411a936SAlexander Duyck 	adapter->num_tx_queues = rss_i * tcs;
477d411a936SAlexander Duyck 	adapter->num_rx_queues = rss_i * tcs;
478d411a936SAlexander Duyck 
479d411a936SAlexander Duyck 	return true;
480d411a936SAlexander Duyck }
481d411a936SAlexander Duyck 
482d411a936SAlexander Duyck #endif
4838af3c33fSJeff Kirsher /**
48473079ea0SAlexander Duyck  * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
48573079ea0SAlexander Duyck  * @adapter: board private structure to initialize
48673079ea0SAlexander Duyck  *
48773079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
48873079ea0SAlexander Duyck  * and VM pools where appropriate.  If RSS is available, then also try and
48973079ea0SAlexander Duyck  * enable RSS and map accordingly.
49073079ea0SAlexander Duyck  *
49173079ea0SAlexander Duyck  **/
49273079ea0SAlexander Duyck static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
49373079ea0SAlexander Duyck {
49473079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
49573079ea0SAlexander Duyck 	u16 vmdq_m = 0;
49673079ea0SAlexander Duyck 	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
49773079ea0SAlexander Duyck 	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
49873079ea0SAlexander Duyck #ifdef IXGBE_FCOE
49973079ea0SAlexander Duyck 	u16 fcoe_i = 0;
50073079ea0SAlexander Duyck #endif
5012a47fa45SJohn Fastabend 	bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
50273079ea0SAlexander Duyck 
50373079ea0SAlexander Duyck 	/* only proceed if SR-IOV is enabled */
50473079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
50573079ea0SAlexander Duyck 		return false;
50673079ea0SAlexander Duyck 
50773079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
50873079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
50973079ea0SAlexander Duyck 
51073079ea0SAlexander Duyck 	/* double check we are limited to maximum pools */
51173079ea0SAlexander Duyck 	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
51273079ea0SAlexander Duyck 
51373079ea0SAlexander Duyck 	/* 64 pool mode with 2 queues per pool */
5142a47fa45SJohn Fastabend 	if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
51573079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
51673079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_2Q_MASK;
51773079ea0SAlexander Duyck 		rss_i = min_t(u16, rss_i, 2);
51873079ea0SAlexander Duyck 	/* 32 pool mode with 4 queues per pool */
51973079ea0SAlexander Duyck 	} else {
52073079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
52173079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
52273079ea0SAlexander Duyck 		rss_i = 4;
52373079ea0SAlexander Duyck 	}
52473079ea0SAlexander Duyck 
52573079ea0SAlexander Duyck #ifdef IXGBE_FCOE
52673079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
52773079ea0SAlexander Duyck 	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
52873079ea0SAlexander Duyck 
52973079ea0SAlexander Duyck #endif
53073079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
53173079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
53273079ea0SAlexander Duyck 
53373079ea0SAlexander Duyck 	/* save features for later use */
53473079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
53573079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
53673079ea0SAlexander Duyck 
53773079ea0SAlexander Duyck 	/* limit RSS based on user input and save for later use */
53873079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = rss_i;
53973079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = rss_m;
54073079ea0SAlexander Duyck 
54173079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
54273079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = rss_i;
54373079ea0SAlexander Duyck 
54473079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * rss_i;
54573079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * rss_i;
54673079ea0SAlexander Duyck 
54773079ea0SAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
54873079ea0SAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
54973079ea0SAlexander Duyck 
55073079ea0SAlexander Duyck #ifdef IXGBE_FCOE
55173079ea0SAlexander Duyck 	/*
55273079ea0SAlexander Duyck 	 * FCoE can use rings from adjacent buffers to allow RSS
55373079ea0SAlexander Duyck 	 * like behavior.  To account for this we need to add the
55473079ea0SAlexander Duyck 	 * FCoE indices to the total ring count.
55573079ea0SAlexander Duyck 	 */
55673079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
55773079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
55873079ea0SAlexander Duyck 
55973079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
56073079ea0SAlexander Duyck 
56173079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
56273079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
56373079ea0SAlexander Duyck 
56473079ea0SAlexander Duyck 		if (vmdq_i > 1 && fcoe_i) {
56573079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
56673079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
56773079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * rss_i;
56873079ea0SAlexander Duyck 		} else {
56973079ea0SAlexander Duyck 			/* merge FCoE queues with RSS queues */
57073079ea0SAlexander Duyck 			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
57173079ea0SAlexander Duyck 
57273079ea0SAlexander Duyck 			/* limit indices to rss_i if MSI-X is disabled */
57373079ea0SAlexander Duyck 			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
57473079ea0SAlexander Duyck 				fcoe_i = rss_i;
57573079ea0SAlexander Duyck 
57673079ea0SAlexander Duyck 			/* attempt to reserve some queues for just FCoE */
57773079ea0SAlexander Duyck 			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
57873079ea0SAlexander Duyck 			fcoe->offset = fcoe_i - fcoe->indices;
57973079ea0SAlexander Duyck 
58073079ea0SAlexander Duyck 			fcoe_i -= rss_i;
58173079ea0SAlexander Duyck 		}
58273079ea0SAlexander Duyck 
58373079ea0SAlexander Duyck 		/* add queues to adapter */
58473079ea0SAlexander Duyck 		adapter->num_tx_queues += fcoe_i;
58573079ea0SAlexander Duyck 		adapter->num_rx_queues += fcoe_i;
58673079ea0SAlexander Duyck 	}
58773079ea0SAlexander Duyck 
58873079ea0SAlexander Duyck #endif
58973079ea0SAlexander Duyck 	return true;
59073079ea0SAlexander Duyck }
59173079ea0SAlexander Duyck 
59273079ea0SAlexander Duyck /**
59349ce9c2cSBen Hutchings  * ixgbe_set_rss_queues - Allocate queues for RSS
5948af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
5958af3c33fSJeff Kirsher  *
5968af3c33fSJeff Kirsher  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
5978af3c33fSJeff Kirsher  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
5988af3c33fSJeff Kirsher  *
5998af3c33fSJeff Kirsher  **/
6000b7f5d0bSAlexander Duyck static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
6018af3c33fSJeff Kirsher {
6020b7f5d0bSAlexander Duyck 	struct ixgbe_ring_feature *f;
6030b7f5d0bSAlexander Duyck 	u16 rss_i;
6048af3c33fSJeff Kirsher 
6050b7f5d0bSAlexander Duyck 	/* set mask for 16 queue limit of RSS */
6060b7f5d0bSAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
6070b7f5d0bSAlexander Duyck 	rss_i = f->limit;
6080b7f5d0bSAlexander Duyck 
6090b7f5d0bSAlexander Duyck 	f->indices = rss_i;
610d411a936SAlexander Duyck 	f->mask = IXGBE_RSS_16Q_MASK;
6118af3c33fSJeff Kirsher 
61239cb681bSAlexander Duyck 	/* disable ATR by default, it will be configured below */
61339cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
61439cb681bSAlexander Duyck 
6158af3c33fSJeff Kirsher 	/*
6160b7f5d0bSAlexander Duyck 	 * Use Flow Director in addition to RSS to ensure the best
6178af3c33fSJeff Kirsher 	 * distribution of flows across cores, even when an FDIR flow
6188af3c33fSJeff Kirsher 	 * isn't matched.
6198af3c33fSJeff Kirsher 	 */
62039cb681bSAlexander Duyck 	if (rss_i > 1 && adapter->atr_sample_rate) {
6210b7f5d0bSAlexander Duyck 		f = &adapter->ring_feature[RING_F_FDIR];
6220b7f5d0bSAlexander Duyck 
623d3cb9869SAlexander Duyck 		rss_i = f->indices = f->limit;
62439cb681bSAlexander Duyck 
62539cb681bSAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
62639cb681bSAlexander Duyck 			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
6278af3c33fSJeff Kirsher 	}
6280b7f5d0bSAlexander Duyck 
629d411a936SAlexander Duyck #ifdef IXGBE_FCOE
630d411a936SAlexander Duyck 	/*
631d411a936SAlexander Duyck 	 * FCoE can exist on the same rings as standard network traffic
632d411a936SAlexander Duyck 	 * however it is preferred to avoid that if possible.  In order
633d411a936SAlexander Duyck 	 * to get the best performance we allocate as many FCoE queues
634d411a936SAlexander Duyck 	 * as we can and we place them at the end of the ring array to
635d411a936SAlexander Duyck 	 * avoid sharing queues with standard RSS on systems with 24 or
636d411a936SAlexander Duyck 	 * more CPUs.
637d411a936SAlexander Duyck 	 */
638d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
639d411a936SAlexander Duyck 		struct net_device *dev = adapter->netdev;
640d411a936SAlexander Duyck 		u16 fcoe_i;
641d411a936SAlexander Duyck 
642d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
643d411a936SAlexander Duyck 
644d411a936SAlexander Duyck 		/* merge FCoE queues with RSS queues */
645d411a936SAlexander Duyck 		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
646d411a936SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
647d411a936SAlexander Duyck 
648d411a936SAlexander Duyck 		/* limit indices to rss_i if MSI-X is disabled */
649d411a936SAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
650d411a936SAlexander Duyck 			fcoe_i = rss_i;
651d411a936SAlexander Duyck 
652d411a936SAlexander Duyck 		/* attempt to reserve some queues for just FCoE */
653d411a936SAlexander Duyck 		f->indices = min_t(u16, fcoe_i, f->limit);
654d411a936SAlexander Duyck 		f->offset = fcoe_i - f->indices;
655d411a936SAlexander Duyck 		rss_i = max_t(u16, fcoe_i, rss_i);
656d411a936SAlexander Duyck 	}
657d411a936SAlexander Duyck 
658d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
6590b7f5d0bSAlexander Duyck 	adapter->num_rx_queues = rss_i;
6600b7f5d0bSAlexander Duyck 	adapter->num_tx_queues = rss_i;
6610b7f5d0bSAlexander Duyck 
6620b7f5d0bSAlexander Duyck 	return true;
6638af3c33fSJeff Kirsher }
6648af3c33fSJeff Kirsher 
6658af3c33fSJeff Kirsher /**
66649ce9c2cSBen Hutchings  * ixgbe_set_num_queues - Allocate queues for device, feature dependent
6678af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
6688af3c33fSJeff Kirsher  *
6698af3c33fSJeff Kirsher  * This is the top level queue allocation routine.  The order here is very
6708af3c33fSJeff Kirsher  * important, starting with the "most" number of features turned on at once,
6718af3c33fSJeff Kirsher  * and ending with the smallest set of features.  This way large combinations
6728af3c33fSJeff Kirsher  * can be allocated if they're turned on, and smaller combinations are the
6738af3c33fSJeff Kirsher  * fallthrough conditions.
6748af3c33fSJeff Kirsher  *
6758af3c33fSJeff Kirsher  **/
676ac802f5dSAlexander Duyck static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
6778af3c33fSJeff Kirsher {
6788af3c33fSJeff Kirsher 	/* Start with base case */
6798af3c33fSJeff Kirsher 	adapter->num_rx_queues = 1;
6808af3c33fSJeff Kirsher 	adapter->num_tx_queues = 1;
6818af3c33fSJeff Kirsher 	adapter->num_rx_pools = adapter->num_rx_queues;
6828af3c33fSJeff Kirsher 	adapter->num_rx_queues_per_pool = 1;
6838af3c33fSJeff Kirsher 
68473079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
68573079ea0SAlexander Duyck 	if (ixgbe_set_dcb_sriov_queues(adapter))
686ac802f5dSAlexander Duyck 		return;
6878af3c33fSJeff Kirsher 
6888af3c33fSJeff Kirsher 	if (ixgbe_set_dcb_queues(adapter))
689ac802f5dSAlexander Duyck 		return;
6908af3c33fSJeff Kirsher 
6918af3c33fSJeff Kirsher #endif
69273079ea0SAlexander Duyck 	if (ixgbe_set_sriov_queues(adapter))
69373079ea0SAlexander Duyck 		return;
69473079ea0SAlexander Duyck 
695ac802f5dSAlexander Duyck 	ixgbe_set_rss_queues(adapter);
6968af3c33fSJeff Kirsher }
6978af3c33fSJeff Kirsher 
6988af3c33fSJeff Kirsher static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
6998af3c33fSJeff Kirsher 				       int vectors)
7008af3c33fSJeff Kirsher {
7018af3c33fSJeff Kirsher 	int err, vector_threshold;
7028af3c33fSJeff Kirsher 
7038af3c33fSJeff Kirsher 	/* We'll want at least 2 (vector_threshold):
7048af3c33fSJeff Kirsher 	 * 1) TxQ[0] + RxQ[0] handler
7058af3c33fSJeff Kirsher 	 * 2) Other (Link Status Change, etc.)
7068af3c33fSJeff Kirsher 	 */
7078af3c33fSJeff Kirsher 	vector_threshold = MIN_MSIX_COUNT;
7088af3c33fSJeff Kirsher 
7098af3c33fSJeff Kirsher 	/*
7108af3c33fSJeff Kirsher 	 * The more we get, the more we will assign to Tx/Rx Cleanup
7118af3c33fSJeff Kirsher 	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
7128af3c33fSJeff Kirsher 	 * Right now, we simply care about how many we'll get; we'll
7138af3c33fSJeff Kirsher 	 * set them up later while requesting irq's.
7148af3c33fSJeff Kirsher 	 */
7158af3c33fSJeff Kirsher 	while (vectors >= vector_threshold) {
7168af3c33fSJeff Kirsher 		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
7178af3c33fSJeff Kirsher 				      vectors);
7188af3c33fSJeff Kirsher 		if (!err) /* Success in acquiring all requested vectors. */
7198af3c33fSJeff Kirsher 			break;
7208af3c33fSJeff Kirsher 		else if (err < 0)
7218af3c33fSJeff Kirsher 			vectors = 0; /* Nasty failure, quit now */
7228af3c33fSJeff Kirsher 		else /* err == number of vectors we should try again with */
7238af3c33fSJeff Kirsher 			vectors = err;
7248af3c33fSJeff Kirsher 	}
7258af3c33fSJeff Kirsher 
7268af3c33fSJeff Kirsher 	if (vectors < vector_threshold) {
7278af3c33fSJeff Kirsher 		/* Can't allocate enough MSI-X interrupts?  Oh well.
7288af3c33fSJeff Kirsher 		 * This just means we'll go with either a single MSI
7298af3c33fSJeff Kirsher 		 * vector or fall back to legacy interrupts.
7308af3c33fSJeff Kirsher 		 */
7318af3c33fSJeff Kirsher 		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
7328af3c33fSJeff Kirsher 			     "Unable to allocate MSI-X interrupts\n");
7338af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
7348af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
7358af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
7368af3c33fSJeff Kirsher 	} else {
7378af3c33fSJeff Kirsher 		adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
7388af3c33fSJeff Kirsher 		/*
7398af3c33fSJeff Kirsher 		 * Adjust for only the vectors we'll use, which is minimum
7408af3c33fSJeff Kirsher 		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
7418af3c33fSJeff Kirsher 		 * vectors we were allocated.
7428af3c33fSJeff Kirsher 		 */
74349c7ffbeSAlexander Duyck 		vectors -= NON_Q_VECTORS;
74449c7ffbeSAlexander Duyck 		adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
7458af3c33fSJeff Kirsher 	}
7468af3c33fSJeff Kirsher }
7478af3c33fSJeff Kirsher 
7488af3c33fSJeff Kirsher static void ixgbe_add_ring(struct ixgbe_ring *ring,
7498af3c33fSJeff Kirsher 			   struct ixgbe_ring_container *head)
7508af3c33fSJeff Kirsher {
7518af3c33fSJeff Kirsher 	ring->next = head->ring;
7528af3c33fSJeff Kirsher 	head->ring = ring;
7538af3c33fSJeff Kirsher 	head->count++;
7548af3c33fSJeff Kirsher }
7558af3c33fSJeff Kirsher 
7568af3c33fSJeff Kirsher /**
7578af3c33fSJeff Kirsher  * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
7588af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
759d0bfcdfdSAlexander Duyck  * @v_count: q_vectors allocated on adapter, used for ring interleaving
7608af3c33fSJeff Kirsher  * @v_idx: index of vector in adapter struct
761d0bfcdfdSAlexander Duyck  * @txr_count: total number of Tx rings to allocate
762d0bfcdfdSAlexander Duyck  * @txr_idx: index of first Tx ring to allocate
763d0bfcdfdSAlexander Duyck  * @rxr_count: total number of Rx rings to allocate
764d0bfcdfdSAlexander Duyck  * @rxr_idx: index of first Rx ring to allocate
7658af3c33fSJeff Kirsher  *
7668af3c33fSJeff Kirsher  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7678af3c33fSJeff Kirsher  **/
768d0bfcdfdSAlexander Duyck static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
769d0bfcdfdSAlexander Duyck 				int v_count, int v_idx,
7708af3c33fSJeff Kirsher 				int txr_count, int txr_idx,
7718af3c33fSJeff Kirsher 				int rxr_count, int rxr_idx)
7728af3c33fSJeff Kirsher {
7738af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector;
7748af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
775fd786b7bSAlexander Duyck 	int node = NUMA_NO_NODE;
7768af3c33fSJeff Kirsher 	int cpu = -1;
7778af3c33fSJeff Kirsher 	int ring_count, size;
778fd786b7bSAlexander Duyck 	u8 tcs = netdev_get_num_tc(adapter->netdev);
7798af3c33fSJeff Kirsher 
7808af3c33fSJeff Kirsher 	ring_count = txr_count + rxr_count;
7818af3c33fSJeff Kirsher 	size = sizeof(struct ixgbe_q_vector) +
7828af3c33fSJeff Kirsher 	       (sizeof(struct ixgbe_ring) * ring_count);
7838af3c33fSJeff Kirsher 
7848af3c33fSJeff Kirsher 	/* customize cpu for Flow Director mapping */
785fd786b7bSAlexander Duyck 	if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
786fd786b7bSAlexander Duyck 		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
787fd786b7bSAlexander Duyck 		if (rss_i > 1 && adapter->atr_sample_rate) {
7888af3c33fSJeff Kirsher 			if (cpu_online(v_idx)) {
7898af3c33fSJeff Kirsher 				cpu = v_idx;
7908af3c33fSJeff Kirsher 				node = cpu_to_node(cpu);
7918af3c33fSJeff Kirsher 			}
7928af3c33fSJeff Kirsher 		}
793fd786b7bSAlexander Duyck 	}
7948af3c33fSJeff Kirsher 
7958af3c33fSJeff Kirsher 	/* allocate q_vector and rings */
7968af3c33fSJeff Kirsher 	q_vector = kzalloc_node(size, GFP_KERNEL, node);
7978af3c33fSJeff Kirsher 	if (!q_vector)
7988af3c33fSJeff Kirsher 		q_vector = kzalloc(size, GFP_KERNEL);
7998af3c33fSJeff Kirsher 	if (!q_vector)
8008af3c33fSJeff Kirsher 		return -ENOMEM;
8018af3c33fSJeff Kirsher 
8028af3c33fSJeff Kirsher 	/* setup affinity mask and node */
8038af3c33fSJeff Kirsher 	if (cpu != -1)
8048af3c33fSJeff Kirsher 		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
8058af3c33fSJeff Kirsher 	q_vector->numa_node = node;
8068af3c33fSJeff Kirsher 
807245f292dSAlexander Duyck #ifdef CONFIG_IXGBE_DCA
808245f292dSAlexander Duyck 	/* initialize CPU for DCA */
809245f292dSAlexander Duyck 	q_vector->cpu = -1;
810245f292dSAlexander Duyck 
811245f292dSAlexander Duyck #endif
8128af3c33fSJeff Kirsher 	/* initialize NAPI */
8138af3c33fSJeff Kirsher 	netif_napi_add(adapter->netdev, &q_vector->napi,
8148af3c33fSJeff Kirsher 		       ixgbe_poll, 64);
8155a85e737SEliezer Tamir 	napi_hash_add(&q_vector->napi);
8168af3c33fSJeff Kirsher 
8178af3c33fSJeff Kirsher 	/* tie q_vector and adapter together */
8188af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = q_vector;
8198af3c33fSJeff Kirsher 	q_vector->adapter = adapter;
8208af3c33fSJeff Kirsher 	q_vector->v_idx = v_idx;
8218af3c33fSJeff Kirsher 
8228af3c33fSJeff Kirsher 	/* initialize work limits */
8238af3c33fSJeff Kirsher 	q_vector->tx.work_limit = adapter->tx_work_limit;
8248af3c33fSJeff Kirsher 
8258af3c33fSJeff Kirsher 	/* initialize pointer to rings */
8268af3c33fSJeff Kirsher 	ring = q_vector->ring;
8278af3c33fSJeff Kirsher 
8283af3361eSEmil Tantilov 	/* intialize ITR */
8293af3361eSEmil Tantilov 	if (txr_count && !rxr_count) {
8303af3361eSEmil Tantilov 		/* tx only vector */
8313af3361eSEmil Tantilov 		if (adapter->tx_itr_setting == 1)
8323af3361eSEmil Tantilov 			q_vector->itr = IXGBE_10K_ITR;
8333af3361eSEmil Tantilov 		else
8343af3361eSEmil Tantilov 			q_vector->itr = adapter->tx_itr_setting;
8353af3361eSEmil Tantilov 	} else {
8363af3361eSEmil Tantilov 		/* rx or rx/tx vector */
8373af3361eSEmil Tantilov 		if (adapter->rx_itr_setting == 1)
8383af3361eSEmil Tantilov 			q_vector->itr = IXGBE_20K_ITR;
8393af3361eSEmil Tantilov 		else
8403af3361eSEmil Tantilov 			q_vector->itr = adapter->rx_itr_setting;
8413af3361eSEmil Tantilov 	}
8423af3361eSEmil Tantilov 
8438af3c33fSJeff Kirsher 	while (txr_count) {
8448af3c33fSJeff Kirsher 		/* assign generic ring traits */
8458af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
8468af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
8478af3c33fSJeff Kirsher 
8488af3c33fSJeff Kirsher 		/* configure backlink on ring */
8498af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
8508af3c33fSJeff Kirsher 
8518af3c33fSJeff Kirsher 		/* update q_vector Tx values */
8528af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->tx);
8538af3c33fSJeff Kirsher 
8548af3c33fSJeff Kirsher 		/* apply Tx specific ring traits */
8558af3c33fSJeff Kirsher 		ring->count = adapter->tx_ring_count;
8562a47fa45SJohn Fastabend 		if (adapter->num_rx_pools > 1)
8572a47fa45SJohn Fastabend 			ring->queue_index =
8582a47fa45SJohn Fastabend 				txr_idx % adapter->num_rx_queues_per_pool;
8592a47fa45SJohn Fastabend 		else
8608af3c33fSJeff Kirsher 			ring->queue_index = txr_idx;
8618af3c33fSJeff Kirsher 
8628af3c33fSJeff Kirsher 		/* assign ring to adapter */
8638af3c33fSJeff Kirsher 		adapter->tx_ring[txr_idx] = ring;
8648af3c33fSJeff Kirsher 
8658af3c33fSJeff Kirsher 		/* update count and index */
8668af3c33fSJeff Kirsher 		txr_count--;
867d0bfcdfdSAlexander Duyck 		txr_idx += v_count;
8688af3c33fSJeff Kirsher 
8698af3c33fSJeff Kirsher 		/* push pointer to next ring */
8708af3c33fSJeff Kirsher 		ring++;
8718af3c33fSJeff Kirsher 	}
8728af3c33fSJeff Kirsher 
8738af3c33fSJeff Kirsher 	while (rxr_count) {
8748af3c33fSJeff Kirsher 		/* assign generic ring traits */
8758af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
8768af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
8778af3c33fSJeff Kirsher 
8788af3c33fSJeff Kirsher 		/* configure backlink on ring */
8798af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
8808af3c33fSJeff Kirsher 
8818af3c33fSJeff Kirsher 		/* update q_vector Rx values */
8828af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->rx);
8838af3c33fSJeff Kirsher 
8848af3c33fSJeff Kirsher 		/*
8858af3c33fSJeff Kirsher 		 * 82599 errata, UDP frames with a 0 checksum
8868af3c33fSJeff Kirsher 		 * can be marked as checksum errors.
8878af3c33fSJeff Kirsher 		 */
8888af3c33fSJeff Kirsher 		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
8898af3c33fSJeff Kirsher 			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
8908af3c33fSJeff Kirsher 
891b2db497eSAlexander Duyck #ifdef IXGBE_FCOE
892b2db497eSAlexander Duyck 		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
893b2db497eSAlexander Duyck 			struct ixgbe_ring_feature *f;
894b2db497eSAlexander Duyck 			f = &adapter->ring_feature[RING_F_FCOE];
895e4b317e9SAlexander Duyck 			if ((rxr_idx >= f->offset) &&
896e4b317e9SAlexander Duyck 			    (rxr_idx < f->offset + f->indices))
89757efd44cSAlexander Duyck 				set_bit(__IXGBE_RX_FCOE, &ring->state);
898b2db497eSAlexander Duyck 		}
899b2db497eSAlexander Duyck 
900b2db497eSAlexander Duyck #endif /* IXGBE_FCOE */
9018af3c33fSJeff Kirsher 		/* apply Rx specific ring traits */
9028af3c33fSJeff Kirsher 		ring->count = adapter->rx_ring_count;
9032a47fa45SJohn Fastabend 		if (adapter->num_rx_pools > 1)
9042a47fa45SJohn Fastabend 			ring->queue_index =
9052a47fa45SJohn Fastabend 				rxr_idx % adapter->num_rx_queues_per_pool;
9062a47fa45SJohn Fastabend 		else
9078af3c33fSJeff Kirsher 			ring->queue_index = rxr_idx;
9088af3c33fSJeff Kirsher 
9098af3c33fSJeff Kirsher 		/* assign ring to adapter */
9108af3c33fSJeff Kirsher 		adapter->rx_ring[rxr_idx] = ring;
9118af3c33fSJeff Kirsher 
9128af3c33fSJeff Kirsher 		/* update count and index */
9138af3c33fSJeff Kirsher 		rxr_count--;
914d0bfcdfdSAlexander Duyck 		rxr_idx += v_count;
9158af3c33fSJeff Kirsher 
9168af3c33fSJeff Kirsher 		/* push pointer to next ring */
9178af3c33fSJeff Kirsher 		ring++;
9188af3c33fSJeff Kirsher 	}
9198af3c33fSJeff Kirsher 
9208af3c33fSJeff Kirsher 	return 0;
9218af3c33fSJeff Kirsher }
9228af3c33fSJeff Kirsher 
9238af3c33fSJeff Kirsher /**
9248af3c33fSJeff Kirsher  * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
9258af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
9268af3c33fSJeff Kirsher  * @v_idx: Index of vector to be freed
9278af3c33fSJeff Kirsher  *
9288af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vector.  In addition if
9298af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
9308af3c33fSJeff Kirsher  * to freeing the q_vector.
9318af3c33fSJeff Kirsher  **/
9328af3c33fSJeff Kirsher static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
9338af3c33fSJeff Kirsher {
9348af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
9358af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
9368af3c33fSJeff Kirsher 
9378af3c33fSJeff Kirsher 	ixgbe_for_each_ring(ring, q_vector->tx)
9388af3c33fSJeff Kirsher 		adapter->tx_ring[ring->queue_index] = NULL;
9398af3c33fSJeff Kirsher 
9408af3c33fSJeff Kirsher 	ixgbe_for_each_ring(ring, q_vector->rx)
9418af3c33fSJeff Kirsher 		adapter->rx_ring[ring->queue_index] = NULL;
9428af3c33fSJeff Kirsher 
9438af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = NULL;
9445a85e737SEliezer Tamir 	napi_hash_del(&q_vector->napi);
9458af3c33fSJeff Kirsher 	netif_napi_del(&q_vector->napi);
9468af3c33fSJeff Kirsher 
9478af3c33fSJeff Kirsher 	/*
9488af3c33fSJeff Kirsher 	 * ixgbe_get_stats64() might access the rings on this vector,
9498af3c33fSJeff Kirsher 	 * we must wait a grace period before freeing it.
9508af3c33fSJeff Kirsher 	 */
9518af3c33fSJeff Kirsher 	kfree_rcu(q_vector, rcu);
9528af3c33fSJeff Kirsher }
9538af3c33fSJeff Kirsher 
9548af3c33fSJeff Kirsher /**
9558af3c33fSJeff Kirsher  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
9568af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
9578af3c33fSJeff Kirsher  *
9588af3c33fSJeff Kirsher  * We allocate one q_vector per queue interrupt.  If allocation fails we
9598af3c33fSJeff Kirsher  * return -ENOMEM.
9608af3c33fSJeff Kirsher  **/
9618af3c33fSJeff Kirsher static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
9628af3c33fSJeff Kirsher {
96349c7ffbeSAlexander Duyck 	int q_vectors = adapter->num_q_vectors;
9648af3c33fSJeff Kirsher 	int rxr_remaining = adapter->num_rx_queues;
9658af3c33fSJeff Kirsher 	int txr_remaining = adapter->num_tx_queues;
9668af3c33fSJeff Kirsher 	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
9678af3c33fSJeff Kirsher 	int err;
9688af3c33fSJeff Kirsher 
9698af3c33fSJeff Kirsher 	/* only one q_vector if MSI-X is disabled. */
9708af3c33fSJeff Kirsher 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
9718af3c33fSJeff Kirsher 		q_vectors = 1;
9728af3c33fSJeff Kirsher 
9738af3c33fSJeff Kirsher 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
974d0bfcdfdSAlexander Duyck 		for (; rxr_remaining; v_idx++) {
975d0bfcdfdSAlexander Duyck 			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
976d0bfcdfdSAlexander Duyck 						   0, 0, 1, rxr_idx);
9778af3c33fSJeff Kirsher 
9788af3c33fSJeff Kirsher 			if (err)
9798af3c33fSJeff Kirsher 				goto err_out;
9808af3c33fSJeff Kirsher 
9818af3c33fSJeff Kirsher 			/* update counts and index */
982d0bfcdfdSAlexander Duyck 			rxr_remaining--;
983d0bfcdfdSAlexander Duyck 			rxr_idx++;
9848af3c33fSJeff Kirsher 		}
9858af3c33fSJeff Kirsher 	}
9868af3c33fSJeff Kirsher 
987d0bfcdfdSAlexander Duyck 	for (; v_idx < q_vectors; v_idx++) {
988d0bfcdfdSAlexander Duyck 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
989d0bfcdfdSAlexander Duyck 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
990d0bfcdfdSAlexander Duyck 		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
9918af3c33fSJeff Kirsher 					   tqpv, txr_idx,
9928af3c33fSJeff Kirsher 					   rqpv, rxr_idx);
9938af3c33fSJeff Kirsher 
9948af3c33fSJeff Kirsher 		if (err)
9958af3c33fSJeff Kirsher 			goto err_out;
9968af3c33fSJeff Kirsher 
9978af3c33fSJeff Kirsher 		/* update counts and index */
9988af3c33fSJeff Kirsher 		rxr_remaining -= rqpv;
9998af3c33fSJeff Kirsher 		txr_remaining -= tqpv;
1000d0bfcdfdSAlexander Duyck 		rxr_idx++;
1001d0bfcdfdSAlexander Duyck 		txr_idx++;
10028af3c33fSJeff Kirsher 	}
10038af3c33fSJeff Kirsher 
10048af3c33fSJeff Kirsher 	return 0;
10058af3c33fSJeff Kirsher 
10068af3c33fSJeff Kirsher err_out:
100749c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
100849c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
100949c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
101049c7ffbeSAlexander Duyck 
101149c7ffbeSAlexander Duyck 	while (v_idx--)
10128af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
10138af3c33fSJeff Kirsher 
10148af3c33fSJeff Kirsher 	return -ENOMEM;
10158af3c33fSJeff Kirsher }
10168af3c33fSJeff Kirsher 
10178af3c33fSJeff Kirsher /**
10188af3c33fSJeff Kirsher  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
10198af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10208af3c33fSJeff Kirsher  *
10218af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vectors.  In addition if
10228af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
10238af3c33fSJeff Kirsher  * to freeing the q_vector.
10248af3c33fSJeff Kirsher  **/
10258af3c33fSJeff Kirsher static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
10268af3c33fSJeff Kirsher {
102749c7ffbeSAlexander Duyck 	int v_idx = adapter->num_q_vectors;
10288af3c33fSJeff Kirsher 
102949c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
103049c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
103149c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
10328af3c33fSJeff Kirsher 
103349c7ffbeSAlexander Duyck 	while (v_idx--)
10348af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
10358af3c33fSJeff Kirsher }
10368af3c33fSJeff Kirsher 
10378af3c33fSJeff Kirsher static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
10388af3c33fSJeff Kirsher {
10398af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
10408af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
10418af3c33fSJeff Kirsher 		pci_disable_msix(adapter->pdev);
10428af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
10438af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
10448af3c33fSJeff Kirsher 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
10458af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
10468af3c33fSJeff Kirsher 		pci_disable_msi(adapter->pdev);
10478af3c33fSJeff Kirsher 	}
10488af3c33fSJeff Kirsher }
10498af3c33fSJeff Kirsher 
10508af3c33fSJeff Kirsher /**
10518af3c33fSJeff Kirsher  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
10528af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10538af3c33fSJeff Kirsher  *
10548af3c33fSJeff Kirsher  * Attempt to configure the interrupts using the best available
10558af3c33fSJeff Kirsher  * capabilities of the hardware and the kernel.
10568af3c33fSJeff Kirsher  **/
1057ac802f5dSAlexander Duyck static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
10588af3c33fSJeff Kirsher {
10598af3c33fSJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
1060ac802f5dSAlexander Duyck 	int vector, v_budget, err;
10618af3c33fSJeff Kirsher 
10628af3c33fSJeff Kirsher 	/*
10638af3c33fSJeff Kirsher 	 * It's easy to be greedy for MSI-X vectors, but it really
10648af3c33fSJeff Kirsher 	 * doesn't do us much good if we have a lot more vectors
10658af3c33fSJeff Kirsher 	 * than CPU's.  So let's be conservative and only ask for
10668af3c33fSJeff Kirsher 	 * (roughly) the same number of vectors as there are CPU's.
10678af3c33fSJeff Kirsher 	 * The default is to use pairs of vectors.
10688af3c33fSJeff Kirsher 	 */
10698af3c33fSJeff Kirsher 	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
10708af3c33fSJeff Kirsher 	v_budget = min_t(int, v_budget, num_online_cpus());
10718af3c33fSJeff Kirsher 	v_budget += NON_Q_VECTORS;
10728af3c33fSJeff Kirsher 
10738af3c33fSJeff Kirsher 	/*
10748af3c33fSJeff Kirsher 	 * At the same time, hardware can only support a maximum of
10758af3c33fSJeff Kirsher 	 * hw.mac->max_msix_vectors vectors.  With features
10768af3c33fSJeff Kirsher 	 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
10778af3c33fSJeff Kirsher 	 * descriptor queues supported by our device.  Thus, we cap it off in
10788af3c33fSJeff Kirsher 	 * those rare cases where the cpu count also exceeds our vector limit.
10798af3c33fSJeff Kirsher 	 */
10808af3c33fSJeff Kirsher 	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
10818af3c33fSJeff Kirsher 
10828af3c33fSJeff Kirsher 	/* A failure in MSI-X entry allocation isn't fatal, but it does
10838af3c33fSJeff Kirsher 	 * mean we disable MSI-X capabilities of the adapter. */
10848af3c33fSJeff Kirsher 	adapter->msix_entries = kcalloc(v_budget,
10858af3c33fSJeff Kirsher 					sizeof(struct msix_entry), GFP_KERNEL);
10868af3c33fSJeff Kirsher 	if (adapter->msix_entries) {
10878af3c33fSJeff Kirsher 		for (vector = 0; vector < v_budget; vector++)
10888af3c33fSJeff Kirsher 			adapter->msix_entries[vector].entry = vector;
10898af3c33fSJeff Kirsher 
10908af3c33fSJeff Kirsher 		ixgbe_acquire_msix_vectors(adapter, v_budget);
10918af3c33fSJeff Kirsher 
10928af3c33fSJeff Kirsher 		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1093ac802f5dSAlexander Duyck 			return;
10948af3c33fSJeff Kirsher 	}
10958af3c33fSJeff Kirsher 
1096b724e9f2SAlexander Duyck 	/* disable DCB if number of TCs exceeds 1 */
1097b724e9f2SAlexander Duyck 	if (netdev_get_num_tc(adapter->netdev) > 1) {
1098b724e9f2SAlexander Duyck 		e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
1099b724e9f2SAlexander Duyck 		netdev_reset_tc(adapter->netdev);
110039cb681bSAlexander Duyck 
1101b724e9f2SAlexander Duyck 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1102b724e9f2SAlexander Duyck 			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1103b724e9f2SAlexander Duyck 
1104b724e9f2SAlexander Duyck 		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1105b724e9f2SAlexander Duyck 		adapter->temp_dcb_cfg.pfc_mode_enable = false;
1106b724e9f2SAlexander Duyck 		adapter->dcb_cfg.pfc_mode_enable = false;
1107b724e9f2SAlexander Duyck 	}
1108b724e9f2SAlexander Duyck 	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1109b724e9f2SAlexander Duyck 	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1110b724e9f2SAlexander Duyck 
1111b724e9f2SAlexander Duyck 	/* disable SR-IOV */
11128af3c33fSJeff Kirsher 	ixgbe_disable_sriov(adapter);
11138af3c33fSJeff Kirsher 
1114b724e9f2SAlexander Duyck 	/* disable RSS */
1115fbe7ca7fSAlexander Duyck 	adapter->ring_feature[RING_F_RSS].limit = 1;
1116b724e9f2SAlexander Duyck 
1117ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
111849c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 1;
111949c7ffbeSAlexander Duyck 
11208af3c33fSJeff Kirsher 	err = pci_enable_msi(adapter->pdev);
1121ac802f5dSAlexander Duyck 	if (err) {
11228af3c33fSJeff Kirsher 		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
11238af3c33fSJeff Kirsher 			     "Unable to allocate MSI interrupt, "
11248af3c33fSJeff Kirsher 			     "falling back to legacy.  Error: %d\n", err);
1125ac802f5dSAlexander Duyck 		return;
11268af3c33fSJeff Kirsher 	}
1127ac802f5dSAlexander Duyck 	adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
11288af3c33fSJeff Kirsher }
11298af3c33fSJeff Kirsher 
11308af3c33fSJeff Kirsher /**
11318af3c33fSJeff Kirsher  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
11328af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
11338af3c33fSJeff Kirsher  *
11348af3c33fSJeff Kirsher  * We determine which interrupt scheme to use based on...
11358af3c33fSJeff Kirsher  * - Kernel support (MSI, MSI-X)
11368af3c33fSJeff Kirsher  *   - which can be user-defined (via MODULE_PARAM)
11378af3c33fSJeff Kirsher  * - Hardware queue count (num_*_queues)
11388af3c33fSJeff Kirsher  *   - defined by miscellaneous hardware support/features (RSS, etc.)
11398af3c33fSJeff Kirsher  **/
11408af3c33fSJeff Kirsher int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
11418af3c33fSJeff Kirsher {
11428af3c33fSJeff Kirsher 	int err;
11438af3c33fSJeff Kirsher 
11448af3c33fSJeff Kirsher 	/* Number of supported queues */
1145ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
11468af3c33fSJeff Kirsher 
1147ac802f5dSAlexander Duyck 	/* Set interrupt mode */
1148ac802f5dSAlexander Duyck 	ixgbe_set_interrupt_capability(adapter);
11498af3c33fSJeff Kirsher 
11508af3c33fSJeff Kirsher 	err = ixgbe_alloc_q_vectors(adapter);
11518af3c33fSJeff Kirsher 	if (err) {
11528af3c33fSJeff Kirsher 		e_dev_err("Unable to allocate memory for queue vectors\n");
11538af3c33fSJeff Kirsher 		goto err_alloc_q_vectors;
11548af3c33fSJeff Kirsher 	}
11558af3c33fSJeff Kirsher 
11568af3c33fSJeff Kirsher 	ixgbe_cache_ring_register(adapter);
11578af3c33fSJeff Kirsher 
11588af3c33fSJeff Kirsher 	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
11598af3c33fSJeff Kirsher 		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
11608af3c33fSJeff Kirsher 		   adapter->num_rx_queues, adapter->num_tx_queues);
11618af3c33fSJeff Kirsher 
11628af3c33fSJeff Kirsher 	set_bit(__IXGBE_DOWN, &adapter->state);
11638af3c33fSJeff Kirsher 
11648af3c33fSJeff Kirsher 	return 0;
11658af3c33fSJeff Kirsher 
11668af3c33fSJeff Kirsher err_alloc_q_vectors:
11678af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
11688af3c33fSJeff Kirsher 	return err;
11698af3c33fSJeff Kirsher }
11708af3c33fSJeff Kirsher 
11718af3c33fSJeff Kirsher /**
11728af3c33fSJeff Kirsher  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
11738af3c33fSJeff Kirsher  * @adapter: board private structure to clear interrupt scheme on
11748af3c33fSJeff Kirsher  *
11758af3c33fSJeff Kirsher  * We go through and clear interrupt specific resources and reset the structure
11768af3c33fSJeff Kirsher  * to pre-load conditions
11778af3c33fSJeff Kirsher  **/
11788af3c33fSJeff Kirsher void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
11798af3c33fSJeff Kirsher {
11808af3c33fSJeff Kirsher 	adapter->num_tx_queues = 0;
11818af3c33fSJeff Kirsher 	adapter->num_rx_queues = 0;
11828af3c33fSJeff Kirsher 
11838af3c33fSJeff Kirsher 	ixgbe_free_q_vectors(adapter);
11848af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
11858af3c33fSJeff Kirsher }
11868af3c33fSJeff Kirsher 
11878af3c33fSJeff Kirsher void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
11888af3c33fSJeff Kirsher 		       u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
11898af3c33fSJeff Kirsher {
11908af3c33fSJeff Kirsher 	struct ixgbe_adv_tx_context_desc *context_desc;
11918af3c33fSJeff Kirsher 	u16 i = tx_ring->next_to_use;
11928af3c33fSJeff Kirsher 
11938af3c33fSJeff Kirsher 	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
11948af3c33fSJeff Kirsher 
11958af3c33fSJeff Kirsher 	i++;
11968af3c33fSJeff Kirsher 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
11978af3c33fSJeff Kirsher 
11988af3c33fSJeff Kirsher 	/* set bits to identify this as an advanced context descriptor */
11998af3c33fSJeff Kirsher 	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
12008af3c33fSJeff Kirsher 
12018af3c33fSJeff Kirsher 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
12028af3c33fSJeff Kirsher 	context_desc->seqnum_seed	= cpu_to_le32(fcoe_sof_eof);
12038af3c33fSJeff Kirsher 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
12048af3c33fSJeff Kirsher 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
12058af3c33fSJeff Kirsher }
12068af3c33fSJeff Kirsher 
1207