18af3c33fSJeff Kirsher /*******************************************************************************
28af3c33fSJeff Kirsher 
38af3c33fSJeff Kirsher   Intel 10 Gigabit PCI Express Linux driver
48af3c33fSJeff Kirsher   Copyright(c) 1999 - 2012 Intel Corporation.
58af3c33fSJeff Kirsher 
68af3c33fSJeff Kirsher   This program is free software; you can redistribute it and/or modify it
78af3c33fSJeff Kirsher   under the terms and conditions of the GNU General Public License,
88af3c33fSJeff Kirsher   version 2, as published by the Free Software Foundation.
98af3c33fSJeff Kirsher 
108af3c33fSJeff Kirsher   This program is distributed in the hope it will be useful, but WITHOUT
118af3c33fSJeff Kirsher   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
128af3c33fSJeff Kirsher   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
138af3c33fSJeff Kirsher   more details.
148af3c33fSJeff Kirsher 
158af3c33fSJeff Kirsher   You should have received a copy of the GNU General Public License along with
168af3c33fSJeff Kirsher   this program; if not, write to the Free Software Foundation, Inc.,
178af3c33fSJeff Kirsher   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
188af3c33fSJeff Kirsher 
198af3c33fSJeff Kirsher   The full GNU General Public License is included in this distribution in
208af3c33fSJeff Kirsher   the file called "COPYING".
218af3c33fSJeff Kirsher 
228af3c33fSJeff Kirsher   Contact Information:
238af3c33fSJeff Kirsher   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
248af3c33fSJeff Kirsher   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
258af3c33fSJeff Kirsher 
268af3c33fSJeff Kirsher *******************************************************************************/
278af3c33fSJeff Kirsher 
288af3c33fSJeff Kirsher #include "ixgbe.h"
298af3c33fSJeff Kirsher #include "ixgbe_sriov.h"
308af3c33fSJeff Kirsher 
31800bd607SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
3273079ea0SAlexander Duyck /**
3373079ea0SAlexander Duyck  * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
3473079ea0SAlexander Duyck  * @adapter: board private structure to initialize
3573079ea0SAlexander Duyck  *
3673079ea0SAlexander Duyck  * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
3773079ea0SAlexander Duyck  * will also try to cache the proper offsets if RSS/FCoE are enabled along
3873079ea0SAlexander Duyck  * with VMDq.
3973079ea0SAlexander Duyck  *
4073079ea0SAlexander Duyck  **/
4173079ea0SAlexander Duyck static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
4273079ea0SAlexander Duyck {
4373079ea0SAlexander Duyck #ifdef IXGBE_FCOE
4473079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
4573079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
4673079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
4773079ea0SAlexander Duyck 	int i;
4873079ea0SAlexander Duyck 	u16 reg_idx;
4973079ea0SAlexander Duyck 	u8 tcs = netdev_get_num_tc(adapter->netdev);
5073079ea0SAlexander Duyck 
5173079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
5273079ea0SAlexander Duyck 	if (tcs <= 1)
5373079ea0SAlexander Duyck 		return false;
5473079ea0SAlexander Duyck 
5573079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
5673079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
5773079ea0SAlexander Duyck 		return false;
5873079ea0SAlexander Duyck 
5973079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
6073079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
6173079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
6273079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
6373079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs)
6473079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
6573079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
6673079ea0SAlexander Duyck 	}
6773079ea0SAlexander Duyck 
6873079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
6973079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
7073079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
7173079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs)
7273079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
7373079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
7473079ea0SAlexander Duyck 	}
7573079ea0SAlexander Duyck 
7673079ea0SAlexander Duyck #ifdef IXGBE_FCOE
7773079ea0SAlexander Duyck 	/* nothing to do if FCoE is disabled */
7873079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
7973079ea0SAlexander Duyck 		return true;
8073079ea0SAlexander Duyck 
8173079ea0SAlexander Duyck 	/* The work is already done if the FCoE ring is shared */
8273079ea0SAlexander Duyck 	if (fcoe->offset < tcs)
8373079ea0SAlexander Duyck 		return true;
8473079ea0SAlexander Duyck 
8573079ea0SAlexander Duyck 	/* The FCoE rings exist separately, we need to move their reg_idx */
8673079ea0SAlexander Duyck 	if (fcoe->indices) {
8773079ea0SAlexander Duyck 		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
8873079ea0SAlexander Duyck 		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
8973079ea0SAlexander Duyck 
9073079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
9173079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
9273079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
9373079ea0SAlexander Duyck 			adapter->rx_ring[i]->reg_idx = reg_idx;
9473079ea0SAlexander Duyck 			reg_idx++;
9573079ea0SAlexander Duyck 		}
9673079ea0SAlexander Duyck 
9773079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
9873079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
9973079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
10073079ea0SAlexander Duyck 			adapter->tx_ring[i]->reg_idx = reg_idx;
10173079ea0SAlexander Duyck 			reg_idx++;
10273079ea0SAlexander Duyck 		}
10373079ea0SAlexander Duyck 	}
10473079ea0SAlexander Duyck 
10573079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
10673079ea0SAlexander Duyck 	return true;
10773079ea0SAlexander Duyck }
10873079ea0SAlexander Duyck 
1098af3c33fSJeff Kirsher /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
1108af3c33fSJeff Kirsher static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
1118af3c33fSJeff Kirsher 				    unsigned int *tx, unsigned int *rx)
1128af3c33fSJeff Kirsher {
1138af3c33fSJeff Kirsher 	struct net_device *dev = adapter->netdev;
1148af3c33fSJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
1158af3c33fSJeff Kirsher 	u8 num_tcs = netdev_get_num_tc(dev);
1168af3c33fSJeff Kirsher 
1178af3c33fSJeff Kirsher 	*tx = 0;
1188af3c33fSJeff Kirsher 	*rx = 0;
1198af3c33fSJeff Kirsher 
1208af3c33fSJeff Kirsher 	switch (hw->mac.type) {
1218af3c33fSJeff Kirsher 	case ixgbe_mac_82598EB:
1224ae63730SAlexander Duyck 		/* TxQs/TC: 4	RxQs/TC: 8 */
1234ae63730SAlexander Duyck 		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
1244ae63730SAlexander Duyck 		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
1258af3c33fSJeff Kirsher 		break;
1268af3c33fSJeff Kirsher 	case ixgbe_mac_82599EB:
1278af3c33fSJeff Kirsher 	case ixgbe_mac_X540:
1288af3c33fSJeff Kirsher 		if (num_tcs > 4) {
1294ae63730SAlexander Duyck 			/*
1304ae63730SAlexander Duyck 			 * TCs    : TC0/1 TC2/3 TC4-7
1314ae63730SAlexander Duyck 			 * TxQs/TC:    32    16     8
1324ae63730SAlexander Duyck 			 * RxQs/TC:    16    16    16
1334ae63730SAlexander Duyck 			 */
1348af3c33fSJeff Kirsher 			*rx = tc << 4;
1354ae63730SAlexander Duyck 			if (tc < 3)
1364ae63730SAlexander Duyck 				*tx = tc << 5;		/*   0,  32,  64 */
1374ae63730SAlexander Duyck 			else if (tc < 5)
1384ae63730SAlexander Duyck 				*tx = (tc + 2) << 4;	/*  80,  96 */
1394ae63730SAlexander Duyck 			else
1404ae63730SAlexander Duyck 				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
1418af3c33fSJeff Kirsher 		} else {
1424ae63730SAlexander Duyck 			/*
1434ae63730SAlexander Duyck 			 * TCs    : TC0 TC1 TC2/3
1444ae63730SAlexander Duyck 			 * TxQs/TC:  64  32    16
1454ae63730SAlexander Duyck 			 * RxQs/TC:  32  32    32
1464ae63730SAlexander Duyck 			 */
1478af3c33fSJeff Kirsher 			*rx = tc << 5;
1484ae63730SAlexander Duyck 			if (tc < 2)
1494ae63730SAlexander Duyck 				*tx = tc << 6;		/*  0,  64 */
1504ae63730SAlexander Duyck 			else
1514ae63730SAlexander Duyck 				*tx = (tc + 4) << 4;	/* 96, 112 */
1528af3c33fSJeff Kirsher 		}
1538af3c33fSJeff Kirsher 	default:
1548af3c33fSJeff Kirsher 		break;
1558af3c33fSJeff Kirsher 	}
1568af3c33fSJeff Kirsher }
1578af3c33fSJeff Kirsher 
1588af3c33fSJeff Kirsher /**
1598af3c33fSJeff Kirsher  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
1608af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1618af3c33fSJeff Kirsher  *
1628af3c33fSJeff Kirsher  * Cache the descriptor ring offsets for DCB to the assigned rings.
1638af3c33fSJeff Kirsher  *
1648af3c33fSJeff Kirsher  **/
1654ae63730SAlexander Duyck static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
1668af3c33fSJeff Kirsher {
1678af3c33fSJeff Kirsher 	struct net_device *dev = adapter->netdev;
1684ae63730SAlexander Duyck 	unsigned int tx_idx, rx_idx;
1694ae63730SAlexander Duyck 	int tc, offset, rss_i, i;
1708af3c33fSJeff Kirsher 	u8 num_tcs = netdev_get_num_tc(dev);
1718af3c33fSJeff Kirsher 
1724ae63730SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
1734ae63730SAlexander Duyck 	if (num_tcs <= 1)
1748af3c33fSJeff Kirsher 		return false;
1758af3c33fSJeff Kirsher 
1764ae63730SAlexander Duyck 	rss_i = adapter->ring_feature[RING_F_RSS].indices;
1778af3c33fSJeff Kirsher 
1784ae63730SAlexander Duyck 	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
1794ae63730SAlexander Duyck 		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
1804ae63730SAlexander Duyck 		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
1814ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
1824ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
1834ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->dcb_tc = tc;
1844ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->dcb_tc = tc;
1858af3c33fSJeff Kirsher 		}
1868af3c33fSJeff Kirsher 	}
1878af3c33fSJeff Kirsher 
1888af3c33fSJeff Kirsher 	return true;
1898af3c33fSJeff Kirsher }
190d411a936SAlexander Duyck 
1918af3c33fSJeff Kirsher #endif
1928af3c33fSJeff Kirsher /**
1938af3c33fSJeff Kirsher  * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
1948af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1958af3c33fSJeff Kirsher  *
1968af3c33fSJeff Kirsher  * SR-IOV doesn't use any descriptor rings but changes the default if
1978af3c33fSJeff Kirsher  * no other mapping is used.
1988af3c33fSJeff Kirsher  *
1998af3c33fSJeff Kirsher  */
20073079ea0SAlexander Duyck static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
2018af3c33fSJeff Kirsher {
20273079ea0SAlexander Duyck #ifdef IXGBE_FCOE
20373079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
20473079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
20573079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
20673079ea0SAlexander Duyck 	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
20773079ea0SAlexander Duyck 	int i;
20873079ea0SAlexander Duyck 	u16 reg_idx;
20973079ea0SAlexander Duyck 
21073079ea0SAlexander Duyck 	/* only proceed if VMDq is enabled */
21173079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
2128af3c33fSJeff Kirsher 		return false;
21373079ea0SAlexander Duyck 
21473079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
21573079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
21673079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
21773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
21873079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
21973079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
22073079ea0SAlexander Duyck 			break;
22173079ea0SAlexander Duyck #endif
22273079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
22373079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= rss->indices)
22473079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
22573079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
22673079ea0SAlexander Duyck 	}
22773079ea0SAlexander Duyck 
22873079ea0SAlexander Duyck #ifdef IXGBE_FCOE
22973079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
23073079ea0SAlexander Duyck 	for (; i < adapter->num_rx_queues; i++, reg_idx++)
23173079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
23273079ea0SAlexander Duyck 
23373079ea0SAlexander Duyck #endif
23473079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
23573079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
23673079ea0SAlexander Duyck #ifdef IXGBE_FCOE
23773079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
23873079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
23973079ea0SAlexander Duyck 			break;
24073079ea0SAlexander Duyck #endif
24173079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
24273079ea0SAlexander Duyck 		if ((reg_idx & rss->mask) >= rss->indices)
24373079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
24473079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
24573079ea0SAlexander Duyck 	}
24673079ea0SAlexander Duyck 
24773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
24873079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
24973079ea0SAlexander Duyck 	for (; i < adapter->num_tx_queues; i++, reg_idx++)
25073079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
25173079ea0SAlexander Duyck 
25273079ea0SAlexander Duyck #endif
25373079ea0SAlexander Duyck 
25473079ea0SAlexander Duyck 	return true;
2558af3c33fSJeff Kirsher }
2568af3c33fSJeff Kirsher 
2578af3c33fSJeff Kirsher /**
258d411a936SAlexander Duyck  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
259d411a936SAlexander Duyck  * @adapter: board private structure to initialize
260d411a936SAlexander Duyck  *
261d411a936SAlexander Duyck  * Cache the descriptor ring offsets for RSS to the assigned rings.
262d411a936SAlexander Duyck  *
263d411a936SAlexander Duyck  **/
264d411a936SAlexander Duyck static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
265d411a936SAlexander Duyck {
266d411a936SAlexander Duyck 	int i;
267d411a936SAlexander Duyck 
268d411a936SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
269d411a936SAlexander Duyck 		return false;
270d411a936SAlexander Duyck 
271d411a936SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++)
272d411a936SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = i;
273d411a936SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++)
274d411a936SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = i;
275d411a936SAlexander Duyck 
276d411a936SAlexander Duyck 	return true;
277d411a936SAlexander Duyck }
278d411a936SAlexander Duyck 
279d411a936SAlexander Duyck /**
2808af3c33fSJeff Kirsher  * ixgbe_cache_ring_register - Descriptor ring to register mapping
2818af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2828af3c33fSJeff Kirsher  *
2838af3c33fSJeff Kirsher  * Once we know the feature-set enabled for the device, we'll cache
2848af3c33fSJeff Kirsher  * the register offset the descriptor ring is assigned to.
2858af3c33fSJeff Kirsher  *
2868af3c33fSJeff Kirsher  * Note, the order the various feature calls is important.  It must start with
2878af3c33fSJeff Kirsher  * the "most" features enabled at the same time, then trickle down to the
2888af3c33fSJeff Kirsher  * least amount of features turned on at once.
2898af3c33fSJeff Kirsher  **/
2908af3c33fSJeff Kirsher static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2918af3c33fSJeff Kirsher {
2928af3c33fSJeff Kirsher 	/* start with default case */
2938af3c33fSJeff Kirsher 	adapter->rx_ring[0]->reg_idx = 0;
2948af3c33fSJeff Kirsher 	adapter->tx_ring[0]->reg_idx = 0;
2958af3c33fSJeff Kirsher 
29673079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
29773079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb_sriov(adapter))
29873079ea0SAlexander Duyck 		return;
29973079ea0SAlexander Duyck 
30073079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb(adapter))
30173079ea0SAlexander Duyck 		return;
30273079ea0SAlexander Duyck 
30373079ea0SAlexander Duyck #endif
3048af3c33fSJeff Kirsher 	if (ixgbe_cache_ring_sriov(adapter))
3058af3c33fSJeff Kirsher 		return;
3068af3c33fSJeff Kirsher 
307d411a936SAlexander Duyck 	ixgbe_cache_ring_rss(adapter);
3088af3c33fSJeff Kirsher }
3098af3c33fSJeff Kirsher 
310d411a936SAlexander Duyck #define IXGBE_RSS_16Q_MASK	0xF
311d411a936SAlexander Duyck #define IXGBE_RSS_8Q_MASK	0x7
312d411a936SAlexander Duyck #define IXGBE_RSS_4Q_MASK	0x3
313d411a936SAlexander Duyck #define IXGBE_RSS_2Q_MASK	0x1
314d411a936SAlexander Duyck #define IXGBE_RSS_DISABLED_MASK	0x0
315d411a936SAlexander Duyck 
316d411a936SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
31773079ea0SAlexander Duyck /**
31873079ea0SAlexander Duyck  * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
31973079ea0SAlexander Duyck  * @adapter: board private structure to initialize
32073079ea0SAlexander Duyck  *
32173079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
32273079ea0SAlexander Duyck  * and VM pools where appropriate.  Also assign queues based on DCB
32373079ea0SAlexander Duyck  * priorities and map accordingly..
32473079ea0SAlexander Duyck  *
32573079ea0SAlexander Duyck  **/
32673079ea0SAlexander Duyck static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
32773079ea0SAlexander Duyck {
32873079ea0SAlexander Duyck 	int i;
32973079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
33073079ea0SAlexander Duyck 	u16 vmdq_m = 0;
33173079ea0SAlexander Duyck #ifdef IXGBE_FCOE
33273079ea0SAlexander Duyck 	u16 fcoe_i = 0;
33373079ea0SAlexander Duyck #endif
33473079ea0SAlexander Duyck 	u8 tcs = netdev_get_num_tc(adapter->netdev);
33573079ea0SAlexander Duyck 
33673079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
33773079ea0SAlexander Duyck 	if (tcs <= 1)
33873079ea0SAlexander Duyck 		return false;
33973079ea0SAlexander Duyck 
34073079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
34173079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
34273079ea0SAlexander Duyck 		return false;
34373079ea0SAlexander Duyck 
34473079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
34573079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
34673079ea0SAlexander Duyck 
34773079ea0SAlexander Duyck 	/* 16 pools w/ 8 TC per pool */
34873079ea0SAlexander Duyck 	if (tcs > 4) {
34973079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 16);
35073079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
35173079ea0SAlexander Duyck 	/* 32 pools w/ 4 TC per pool */
35273079ea0SAlexander Duyck 	} else {
35373079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 32);
35473079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
35573079ea0SAlexander Duyck 	}
35673079ea0SAlexander Duyck 
35773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
35873079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
35973079ea0SAlexander Duyck 	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
36073079ea0SAlexander Duyck 
36173079ea0SAlexander Duyck #endif
36273079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
36373079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
36473079ea0SAlexander Duyck 
36573079ea0SAlexander Duyck 	/* save features for later use */
36673079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
36773079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
36873079ea0SAlexander Duyck 
36973079ea0SAlexander Duyck 	/*
37073079ea0SAlexander Duyck 	 * We do not support DCB, VMDq, and RSS all simultaneously
37173079ea0SAlexander Duyck 	 * so we will disable RSS since it is the lowest priority
37273079ea0SAlexander Duyck 	 */
37373079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = 1;
37473079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
37573079ea0SAlexander Duyck 
37673079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
37773079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = tcs;
37873079ea0SAlexander Duyck 
37973079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * tcs;
38073079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * tcs;
38173079ea0SAlexander Duyck 
38273079ea0SAlexander Duyck #ifdef IXGBE_FCOE
38373079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
38473079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
38573079ea0SAlexander Duyck 
38673079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
38773079ea0SAlexander Duyck 
38873079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
38973079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
39073079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
39173079ea0SAlexander Duyck 
39273079ea0SAlexander Duyck 		if (fcoe_i) {
39373079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
39473079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
39573079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * tcs;
39673079ea0SAlexander Duyck 
39773079ea0SAlexander Duyck 			/* add queues to adapter */
39873079ea0SAlexander Duyck 			adapter->num_tx_queues += fcoe_i;
39973079ea0SAlexander Duyck 			adapter->num_rx_queues += fcoe_i;
40073079ea0SAlexander Duyck 		} else if (tcs > 1) {
40173079ea0SAlexander Duyck 			/* use queue belonging to FcoE TC */
40273079ea0SAlexander Duyck 			fcoe->indices = 1;
40373079ea0SAlexander Duyck 			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
40473079ea0SAlexander Duyck 		} else {
40573079ea0SAlexander Duyck 			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
40673079ea0SAlexander Duyck 
40773079ea0SAlexander Duyck 			fcoe->indices = 0;
40873079ea0SAlexander Duyck 			fcoe->offset = 0;
40973079ea0SAlexander Duyck 		}
41073079ea0SAlexander Duyck 	}
41173079ea0SAlexander Duyck 
41273079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
41373079ea0SAlexander Duyck 	/* configure TC to queue mapping */
41473079ea0SAlexander Duyck 	for (i = 0; i < tcs; i++)
41573079ea0SAlexander Duyck 		netdev_set_tc_queue(adapter->netdev, i, 1, i);
41673079ea0SAlexander Duyck 
41773079ea0SAlexander Duyck 	return true;
41873079ea0SAlexander Duyck }
41973079ea0SAlexander Duyck 
420d411a936SAlexander Duyck static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
421d411a936SAlexander Duyck {
422d411a936SAlexander Duyck 	struct net_device *dev = adapter->netdev;
423d411a936SAlexander Duyck 	struct ixgbe_ring_feature *f;
424d411a936SAlexander Duyck 	int rss_i, rss_m, i;
425d411a936SAlexander Duyck 	int tcs;
426d411a936SAlexander Duyck 
427d411a936SAlexander Duyck 	/* Map queue offset and counts onto allocated tx queues */
428d411a936SAlexander Duyck 	tcs = netdev_get_num_tc(dev);
429d411a936SAlexander Duyck 
430d411a936SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
431d411a936SAlexander Duyck 	if (tcs <= 1)
432d411a936SAlexander Duyck 		return false;
433d411a936SAlexander Duyck 
434d411a936SAlexander Duyck 	/* determine the upper limit for our current DCB mode */
435d411a936SAlexander Duyck 	rss_i = dev->num_tx_queues / tcs;
436d411a936SAlexander Duyck 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
437d411a936SAlexander Duyck 		/* 8 TC w/ 4 queues per TC */
438d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 4);
439d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
440d411a936SAlexander Duyck 	} else if (tcs > 4) {
441d411a936SAlexander Duyck 		/* 8 TC w/ 8 queues per TC */
442d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 8);
443d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_8Q_MASK;
444d411a936SAlexander Duyck 	} else {
445d411a936SAlexander Duyck 		/* 4 TC w/ 16 queues per TC */
446d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 16);
447d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_16Q_MASK;
448d411a936SAlexander Duyck 	}
449d411a936SAlexander Duyck 
450d411a936SAlexander Duyck 	/* set RSS mask and indices */
451d411a936SAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
452d411a936SAlexander Duyck 	rss_i = min_t(int, rss_i, f->limit);
453d411a936SAlexander Duyck 	f->indices = rss_i;
454d411a936SAlexander Duyck 	f->mask = rss_m;
455d411a936SAlexander Duyck 
456d411a936SAlexander Duyck #ifdef IXGBE_FCOE
457d411a936SAlexander Duyck 	/* FCoE enabled queues require special configuration indexed
458d411a936SAlexander Duyck 	 * by feature specific indices and offset. Here we map FCoE
459d411a936SAlexander Duyck 	 * indices onto the DCB queue pairs allowing FCoE to own
460d411a936SAlexander Duyck 	 * configuration later.
461d411a936SAlexander Duyck 	 */
462d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
463d411a936SAlexander Duyck 		u8 tc = ixgbe_fcoe_get_tc(adapter);
464d411a936SAlexander Duyck 
465d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
466d411a936SAlexander Duyck 		f->indices = min_t(u16, rss_i, f->limit);
467d411a936SAlexander Duyck 		f->offset = rss_i * tc;
468d411a936SAlexander Duyck 	}
469d411a936SAlexander Duyck 
470d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
471d411a936SAlexander Duyck 	for (i = 0; i < tcs; i++)
472d411a936SAlexander Duyck 		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
473d411a936SAlexander Duyck 
474d411a936SAlexander Duyck 	adapter->num_tx_queues = rss_i * tcs;
475d411a936SAlexander Duyck 	adapter->num_rx_queues = rss_i * tcs;
476d411a936SAlexander Duyck 
477d411a936SAlexander Duyck 	return true;
478d411a936SAlexander Duyck }
479d411a936SAlexander Duyck 
480d411a936SAlexander Duyck #endif
4818af3c33fSJeff Kirsher /**
48273079ea0SAlexander Duyck  * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
48373079ea0SAlexander Duyck  * @adapter: board private structure to initialize
48473079ea0SAlexander Duyck  *
48573079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
48673079ea0SAlexander Duyck  * and VM pools where appropriate.  If RSS is available, then also try and
48773079ea0SAlexander Duyck  * enable RSS and map accordingly.
48873079ea0SAlexander Duyck  *
48973079ea0SAlexander Duyck  **/
49073079ea0SAlexander Duyck static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
49173079ea0SAlexander Duyck {
49273079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
49373079ea0SAlexander Duyck 	u16 vmdq_m = 0;
49473079ea0SAlexander Duyck 	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
49573079ea0SAlexander Duyck 	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
49673079ea0SAlexander Duyck #ifdef IXGBE_FCOE
49773079ea0SAlexander Duyck 	u16 fcoe_i = 0;
49873079ea0SAlexander Duyck #endif
49973079ea0SAlexander Duyck 
50073079ea0SAlexander Duyck 	/* only proceed if SR-IOV is enabled */
50173079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
50273079ea0SAlexander Duyck 		return false;
50373079ea0SAlexander Duyck 
50473079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
50573079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
50673079ea0SAlexander Duyck 
50773079ea0SAlexander Duyck 	/* double check we are limited to maximum pools */
50873079ea0SAlexander Duyck 	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
50973079ea0SAlexander Duyck 
51073079ea0SAlexander Duyck 	/* 64 pool mode with 2 queues per pool */
51173079ea0SAlexander Duyck 	if ((vmdq_i > 32) || (rss_i < 4)) {
51273079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
51373079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_2Q_MASK;
51473079ea0SAlexander Duyck 		rss_i = min_t(u16, rss_i, 2);
51573079ea0SAlexander Duyck 	/* 32 pool mode with 4 queues per pool */
51673079ea0SAlexander Duyck 	} else {
51773079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
51873079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
51973079ea0SAlexander Duyck 		rss_i = 4;
52073079ea0SAlexander Duyck 	}
52173079ea0SAlexander Duyck 
52273079ea0SAlexander Duyck #ifdef IXGBE_FCOE
52373079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
52473079ea0SAlexander Duyck 	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
52573079ea0SAlexander Duyck 
52673079ea0SAlexander Duyck #endif
52773079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
52873079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
52973079ea0SAlexander Duyck 
53073079ea0SAlexander Duyck 	/* save features for later use */
53173079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
53273079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
53373079ea0SAlexander Duyck 
53473079ea0SAlexander Duyck 	/* limit RSS based on user input and save for later use */
53573079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = rss_i;
53673079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = rss_m;
53773079ea0SAlexander Duyck 
53873079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
53973079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = rss_i;
54073079ea0SAlexander Duyck 
54173079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * rss_i;
54273079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * rss_i;
54373079ea0SAlexander Duyck 
54473079ea0SAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
54573079ea0SAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
54673079ea0SAlexander Duyck 
54773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
54873079ea0SAlexander Duyck 	/*
54973079ea0SAlexander Duyck 	 * FCoE can use rings from adjacent buffers to allow RSS
55073079ea0SAlexander Duyck 	 * like behavior.  To account for this we need to add the
55173079ea0SAlexander Duyck 	 * FCoE indices to the total ring count.
55273079ea0SAlexander Duyck 	 */
55373079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
55473079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
55573079ea0SAlexander Duyck 
55673079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
55773079ea0SAlexander Duyck 
55873079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
55973079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
56073079ea0SAlexander Duyck 
56173079ea0SAlexander Duyck 		if (vmdq_i > 1 && fcoe_i) {
56273079ea0SAlexander Duyck 			/* reserve no more than number of CPUs */
56373079ea0SAlexander Duyck 			fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
56473079ea0SAlexander Duyck 
56573079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
56673079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
56773079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * rss_i;
56873079ea0SAlexander Duyck 		} else {
56973079ea0SAlexander Duyck 			/* merge FCoE queues with RSS queues */
57073079ea0SAlexander Duyck 			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
57173079ea0SAlexander Duyck 
57273079ea0SAlexander Duyck 			/* limit indices to rss_i if MSI-X is disabled */
57373079ea0SAlexander Duyck 			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
57473079ea0SAlexander Duyck 				fcoe_i = rss_i;
57573079ea0SAlexander Duyck 
57673079ea0SAlexander Duyck 			/* attempt to reserve some queues for just FCoE */
57773079ea0SAlexander Duyck 			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
57873079ea0SAlexander Duyck 			fcoe->offset = fcoe_i - fcoe->indices;
57973079ea0SAlexander Duyck 
58073079ea0SAlexander Duyck 			fcoe_i -= rss_i;
58173079ea0SAlexander Duyck 		}
58273079ea0SAlexander Duyck 
58373079ea0SAlexander Duyck 		/* add queues to adapter */
58473079ea0SAlexander Duyck 		adapter->num_tx_queues += fcoe_i;
58573079ea0SAlexander Duyck 		adapter->num_rx_queues += fcoe_i;
58673079ea0SAlexander Duyck 	}
58773079ea0SAlexander Duyck 
58873079ea0SAlexander Duyck #endif
58973079ea0SAlexander Duyck 	return true;
59073079ea0SAlexander Duyck }
59173079ea0SAlexander Duyck 
59273079ea0SAlexander Duyck /**
59349ce9c2cSBen Hutchings  * ixgbe_set_rss_queues - Allocate queues for RSS
5948af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
5958af3c33fSJeff Kirsher  *
5968af3c33fSJeff Kirsher  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
5978af3c33fSJeff Kirsher  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
5988af3c33fSJeff Kirsher  *
5998af3c33fSJeff Kirsher  **/
6000b7f5d0bSAlexander Duyck static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
6018af3c33fSJeff Kirsher {
6020b7f5d0bSAlexander Duyck 	struct ixgbe_ring_feature *f;
6030b7f5d0bSAlexander Duyck 	u16 rss_i;
6048af3c33fSJeff Kirsher 
6050b7f5d0bSAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
6060b7f5d0bSAlexander Duyck 		adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
6070b7f5d0bSAlexander Duyck 		return false;
6080b7f5d0bSAlexander Duyck 	}
6090b7f5d0bSAlexander Duyck 
6100b7f5d0bSAlexander Duyck 	/* set mask for 16 queue limit of RSS */
6110b7f5d0bSAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
6120b7f5d0bSAlexander Duyck 	rss_i = f->limit;
6130b7f5d0bSAlexander Duyck 
6140b7f5d0bSAlexander Duyck 	f->indices = rss_i;
615d411a936SAlexander Duyck 	f->mask = IXGBE_RSS_16Q_MASK;
6168af3c33fSJeff Kirsher 
6178af3c33fSJeff Kirsher 	/*
6180b7f5d0bSAlexander Duyck 	 * Use Flow Director in addition to RSS to ensure the best
6198af3c33fSJeff Kirsher 	 * distribution of flows across cores, even when an FDIR flow
6208af3c33fSJeff Kirsher 	 * isn't matched.
6218af3c33fSJeff Kirsher 	 */
6220b7f5d0bSAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
6230b7f5d0bSAlexander Duyck 		f = &adapter->ring_feature[RING_F_FDIR];
6240b7f5d0bSAlexander Duyck 
6250b7f5d0bSAlexander Duyck 		f->indices = min_t(u16, num_online_cpus(), f->limit);
6260b7f5d0bSAlexander Duyck 		rss_i = max_t(u16, rss_i, f->indices);
6278af3c33fSJeff Kirsher 	}
6280b7f5d0bSAlexander Duyck 
629d411a936SAlexander Duyck #ifdef IXGBE_FCOE
630d411a936SAlexander Duyck 	/*
631d411a936SAlexander Duyck 	 * FCoE can exist on the same rings as standard network traffic
632d411a936SAlexander Duyck 	 * however it is preferred to avoid that if possible.  In order
633d411a936SAlexander Duyck 	 * to get the best performance we allocate as many FCoE queues
634d411a936SAlexander Duyck 	 * as we can and we place them at the end of the ring array to
635d411a936SAlexander Duyck 	 * avoid sharing queues with standard RSS on systems with 24 or
636d411a936SAlexander Duyck 	 * more CPUs.
637d411a936SAlexander Duyck 	 */
638d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
639d411a936SAlexander Duyck 		struct net_device *dev = adapter->netdev;
640d411a936SAlexander Duyck 		u16 fcoe_i;
641d411a936SAlexander Duyck 
642d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
643d411a936SAlexander Duyck 
644d411a936SAlexander Duyck 		/* merge FCoE queues with RSS queues */
645d411a936SAlexander Duyck 		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
646d411a936SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
647d411a936SAlexander Duyck 
648d411a936SAlexander Duyck 		/* limit indices to rss_i if MSI-X is disabled */
649d411a936SAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
650d411a936SAlexander Duyck 			fcoe_i = rss_i;
651d411a936SAlexander Duyck 
652d411a936SAlexander Duyck 		/* attempt to reserve some queues for just FCoE */
653d411a936SAlexander Duyck 		f->indices = min_t(u16, fcoe_i, f->limit);
654d411a936SAlexander Duyck 		f->offset = fcoe_i - f->indices;
655d411a936SAlexander Duyck 		rss_i = max_t(u16, fcoe_i, rss_i);
656d411a936SAlexander Duyck 	}
657d411a936SAlexander Duyck 
658d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
6590b7f5d0bSAlexander Duyck 	adapter->num_rx_queues = rss_i;
6600b7f5d0bSAlexander Duyck 	adapter->num_tx_queues = rss_i;
6610b7f5d0bSAlexander Duyck 
6620b7f5d0bSAlexander Duyck 	return true;
6638af3c33fSJeff Kirsher }
6648af3c33fSJeff Kirsher 
6658af3c33fSJeff Kirsher /**
66649ce9c2cSBen Hutchings  * ixgbe_set_num_queues - Allocate queues for device, feature dependent
6678af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
6688af3c33fSJeff Kirsher  *
6698af3c33fSJeff Kirsher  * This is the top level queue allocation routine.  The order here is very
6708af3c33fSJeff Kirsher  * important, starting with the "most" number of features turned on at once,
6718af3c33fSJeff Kirsher  * and ending with the smallest set of features.  This way large combinations
6728af3c33fSJeff Kirsher  * can be allocated if they're turned on, and smaller combinations are the
6738af3c33fSJeff Kirsher  * fallthrough conditions.
6748af3c33fSJeff Kirsher  *
6758af3c33fSJeff Kirsher  **/
676ac802f5dSAlexander Duyck static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
6778af3c33fSJeff Kirsher {
6788af3c33fSJeff Kirsher 	/* Start with base case */
6798af3c33fSJeff Kirsher 	adapter->num_rx_queues = 1;
6808af3c33fSJeff Kirsher 	adapter->num_tx_queues = 1;
6818af3c33fSJeff Kirsher 	adapter->num_rx_pools = adapter->num_rx_queues;
6828af3c33fSJeff Kirsher 	adapter->num_rx_queues_per_pool = 1;
6838af3c33fSJeff Kirsher 
68473079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
68573079ea0SAlexander Duyck 	if (ixgbe_set_dcb_sriov_queues(adapter))
686ac802f5dSAlexander Duyck 		return;
6878af3c33fSJeff Kirsher 
6888af3c33fSJeff Kirsher 	if (ixgbe_set_dcb_queues(adapter))
689ac802f5dSAlexander Duyck 		return;
6908af3c33fSJeff Kirsher 
6918af3c33fSJeff Kirsher #endif
69273079ea0SAlexander Duyck 	if (ixgbe_set_sriov_queues(adapter))
69373079ea0SAlexander Duyck 		return;
69473079ea0SAlexander Duyck 
695ac802f5dSAlexander Duyck 	ixgbe_set_rss_queues(adapter);
6968af3c33fSJeff Kirsher }
6978af3c33fSJeff Kirsher 
6988af3c33fSJeff Kirsher static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
6998af3c33fSJeff Kirsher 				       int vectors)
7008af3c33fSJeff Kirsher {
7018af3c33fSJeff Kirsher 	int err, vector_threshold;
7028af3c33fSJeff Kirsher 
7038af3c33fSJeff Kirsher 	/* We'll want at least 2 (vector_threshold):
7048af3c33fSJeff Kirsher 	 * 1) TxQ[0] + RxQ[0] handler
7058af3c33fSJeff Kirsher 	 * 2) Other (Link Status Change, etc.)
7068af3c33fSJeff Kirsher 	 */
7078af3c33fSJeff Kirsher 	vector_threshold = MIN_MSIX_COUNT;
7088af3c33fSJeff Kirsher 
7098af3c33fSJeff Kirsher 	/*
7108af3c33fSJeff Kirsher 	 * The more we get, the more we will assign to Tx/Rx Cleanup
7118af3c33fSJeff Kirsher 	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
7128af3c33fSJeff Kirsher 	 * Right now, we simply care about how many we'll get; we'll
7138af3c33fSJeff Kirsher 	 * set them up later while requesting irq's.
7148af3c33fSJeff Kirsher 	 */
7158af3c33fSJeff Kirsher 	while (vectors >= vector_threshold) {
7168af3c33fSJeff Kirsher 		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
7178af3c33fSJeff Kirsher 				      vectors);
7188af3c33fSJeff Kirsher 		if (!err) /* Success in acquiring all requested vectors. */
7198af3c33fSJeff Kirsher 			break;
7208af3c33fSJeff Kirsher 		else if (err < 0)
7218af3c33fSJeff Kirsher 			vectors = 0; /* Nasty failure, quit now */
7228af3c33fSJeff Kirsher 		else /* err == number of vectors we should try again with */
7238af3c33fSJeff Kirsher 			vectors = err;
7248af3c33fSJeff Kirsher 	}
7258af3c33fSJeff Kirsher 
7268af3c33fSJeff Kirsher 	if (vectors < vector_threshold) {
7278af3c33fSJeff Kirsher 		/* Can't allocate enough MSI-X interrupts?  Oh well.
7288af3c33fSJeff Kirsher 		 * This just means we'll go with either a single MSI
7298af3c33fSJeff Kirsher 		 * vector or fall back to legacy interrupts.
7308af3c33fSJeff Kirsher 		 */
7318af3c33fSJeff Kirsher 		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
7328af3c33fSJeff Kirsher 			     "Unable to allocate MSI-X interrupts\n");
7338af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
7348af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
7358af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
7368af3c33fSJeff Kirsher 	} else {
7378af3c33fSJeff Kirsher 		adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
7388af3c33fSJeff Kirsher 		/*
7398af3c33fSJeff Kirsher 		 * Adjust for only the vectors we'll use, which is minimum
7408af3c33fSJeff Kirsher 		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
7418af3c33fSJeff Kirsher 		 * vectors we were allocated.
7428af3c33fSJeff Kirsher 		 */
74349c7ffbeSAlexander Duyck 		vectors -= NON_Q_VECTORS;
74449c7ffbeSAlexander Duyck 		adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
7458af3c33fSJeff Kirsher 	}
7468af3c33fSJeff Kirsher }
7478af3c33fSJeff Kirsher 
7488af3c33fSJeff Kirsher static void ixgbe_add_ring(struct ixgbe_ring *ring,
7498af3c33fSJeff Kirsher 			   struct ixgbe_ring_container *head)
7508af3c33fSJeff Kirsher {
7518af3c33fSJeff Kirsher 	ring->next = head->ring;
7528af3c33fSJeff Kirsher 	head->ring = ring;
7538af3c33fSJeff Kirsher 	head->count++;
7548af3c33fSJeff Kirsher }
7558af3c33fSJeff Kirsher 
7568af3c33fSJeff Kirsher /**
7578af3c33fSJeff Kirsher  * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
7588af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
759d0bfcdfdSAlexander Duyck  * @v_count: q_vectors allocated on adapter, used for ring interleaving
7608af3c33fSJeff Kirsher  * @v_idx: index of vector in adapter struct
761d0bfcdfdSAlexander Duyck  * @txr_count: total number of Tx rings to allocate
762d0bfcdfdSAlexander Duyck  * @txr_idx: index of first Tx ring to allocate
763d0bfcdfdSAlexander Duyck  * @rxr_count: total number of Rx rings to allocate
764d0bfcdfdSAlexander Duyck  * @rxr_idx: index of first Rx ring to allocate
7658af3c33fSJeff Kirsher  *
7668af3c33fSJeff Kirsher  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7678af3c33fSJeff Kirsher  **/
768d0bfcdfdSAlexander Duyck static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
769d0bfcdfdSAlexander Duyck 				int v_count, int v_idx,
7708af3c33fSJeff Kirsher 				int txr_count, int txr_idx,
7718af3c33fSJeff Kirsher 				int rxr_count, int rxr_idx)
7728af3c33fSJeff Kirsher {
7738af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector;
7748af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
7758af3c33fSJeff Kirsher 	int node = -1;
7768af3c33fSJeff Kirsher 	int cpu = -1;
7778af3c33fSJeff Kirsher 	int ring_count, size;
7788af3c33fSJeff Kirsher 
7798af3c33fSJeff Kirsher 	ring_count = txr_count + rxr_count;
7808af3c33fSJeff Kirsher 	size = sizeof(struct ixgbe_q_vector) +
7818af3c33fSJeff Kirsher 	       (sizeof(struct ixgbe_ring) * ring_count);
7828af3c33fSJeff Kirsher 
7838af3c33fSJeff Kirsher 	/* customize cpu for Flow Director mapping */
7848af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
7858af3c33fSJeff Kirsher 		if (cpu_online(v_idx)) {
7868af3c33fSJeff Kirsher 			cpu = v_idx;
7878af3c33fSJeff Kirsher 			node = cpu_to_node(cpu);
7888af3c33fSJeff Kirsher 		}
7898af3c33fSJeff Kirsher 	}
7908af3c33fSJeff Kirsher 
7918af3c33fSJeff Kirsher 	/* allocate q_vector and rings */
7928af3c33fSJeff Kirsher 	q_vector = kzalloc_node(size, GFP_KERNEL, node);
7938af3c33fSJeff Kirsher 	if (!q_vector)
7948af3c33fSJeff Kirsher 		q_vector = kzalloc(size, GFP_KERNEL);
7958af3c33fSJeff Kirsher 	if (!q_vector)
7968af3c33fSJeff Kirsher 		return -ENOMEM;
7978af3c33fSJeff Kirsher 
7988af3c33fSJeff Kirsher 	/* setup affinity mask and node */
7998af3c33fSJeff Kirsher 	if (cpu != -1)
8008af3c33fSJeff Kirsher 		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
8018af3c33fSJeff Kirsher 	else
8028af3c33fSJeff Kirsher 		cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
8038af3c33fSJeff Kirsher 	q_vector->numa_node = node;
8048af3c33fSJeff Kirsher 
8058af3c33fSJeff Kirsher 	/* initialize NAPI */
8068af3c33fSJeff Kirsher 	netif_napi_add(adapter->netdev, &q_vector->napi,
8078af3c33fSJeff Kirsher 		       ixgbe_poll, 64);
8088af3c33fSJeff Kirsher 
8098af3c33fSJeff Kirsher 	/* tie q_vector and adapter together */
8108af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = q_vector;
8118af3c33fSJeff Kirsher 	q_vector->adapter = adapter;
8128af3c33fSJeff Kirsher 	q_vector->v_idx = v_idx;
8138af3c33fSJeff Kirsher 
8148af3c33fSJeff Kirsher 	/* initialize work limits */
8158af3c33fSJeff Kirsher 	q_vector->tx.work_limit = adapter->tx_work_limit;
8168af3c33fSJeff Kirsher 
8178af3c33fSJeff Kirsher 	/* initialize pointer to rings */
8188af3c33fSJeff Kirsher 	ring = q_vector->ring;
8198af3c33fSJeff Kirsher 
8208af3c33fSJeff Kirsher 	while (txr_count) {
8218af3c33fSJeff Kirsher 		/* assign generic ring traits */
8228af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
8238af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
8248af3c33fSJeff Kirsher 
8258af3c33fSJeff Kirsher 		/* configure backlink on ring */
8268af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
8278af3c33fSJeff Kirsher 
8288af3c33fSJeff Kirsher 		/* update q_vector Tx values */
8298af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->tx);
8308af3c33fSJeff Kirsher 
8318af3c33fSJeff Kirsher 		/* apply Tx specific ring traits */
8328af3c33fSJeff Kirsher 		ring->count = adapter->tx_ring_count;
8338af3c33fSJeff Kirsher 		ring->queue_index = txr_idx;
8348af3c33fSJeff Kirsher 
8358af3c33fSJeff Kirsher 		/* assign ring to adapter */
8368af3c33fSJeff Kirsher 		adapter->tx_ring[txr_idx] = ring;
8378af3c33fSJeff Kirsher 
8388af3c33fSJeff Kirsher 		/* update count and index */
8398af3c33fSJeff Kirsher 		txr_count--;
840d0bfcdfdSAlexander Duyck 		txr_idx += v_count;
8418af3c33fSJeff Kirsher 
8428af3c33fSJeff Kirsher 		/* push pointer to next ring */
8438af3c33fSJeff Kirsher 		ring++;
8448af3c33fSJeff Kirsher 	}
8458af3c33fSJeff Kirsher 
8468af3c33fSJeff Kirsher 	while (rxr_count) {
8478af3c33fSJeff Kirsher 		/* assign generic ring traits */
8488af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
8498af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
8508af3c33fSJeff Kirsher 
8518af3c33fSJeff Kirsher 		/* configure backlink on ring */
8528af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
8538af3c33fSJeff Kirsher 
8548af3c33fSJeff Kirsher 		/* update q_vector Rx values */
8558af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->rx);
8568af3c33fSJeff Kirsher 
8578af3c33fSJeff Kirsher 		/*
8588af3c33fSJeff Kirsher 		 * 82599 errata, UDP frames with a 0 checksum
8598af3c33fSJeff Kirsher 		 * can be marked as checksum errors.
8608af3c33fSJeff Kirsher 		 */
8618af3c33fSJeff Kirsher 		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
8628af3c33fSJeff Kirsher 			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
8638af3c33fSJeff Kirsher 
864b2db497eSAlexander Duyck #ifdef IXGBE_FCOE
865b2db497eSAlexander Duyck 		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
866b2db497eSAlexander Duyck 			struct ixgbe_ring_feature *f;
867b2db497eSAlexander Duyck 			f = &adapter->ring_feature[RING_F_FCOE];
868e4b317e9SAlexander Duyck 			if ((rxr_idx >= f->offset) &&
869e4b317e9SAlexander Duyck 			    (rxr_idx < f->offset + f->indices))
87057efd44cSAlexander Duyck 				set_bit(__IXGBE_RX_FCOE, &ring->state);
871b2db497eSAlexander Duyck 		}
872b2db497eSAlexander Duyck 
873b2db497eSAlexander Duyck #endif /* IXGBE_FCOE */
8748af3c33fSJeff Kirsher 		/* apply Rx specific ring traits */
8758af3c33fSJeff Kirsher 		ring->count = adapter->rx_ring_count;
8768af3c33fSJeff Kirsher 		ring->queue_index = rxr_idx;
8778af3c33fSJeff Kirsher 
8788af3c33fSJeff Kirsher 		/* assign ring to adapter */
8798af3c33fSJeff Kirsher 		adapter->rx_ring[rxr_idx] = ring;
8808af3c33fSJeff Kirsher 
8818af3c33fSJeff Kirsher 		/* update count and index */
8828af3c33fSJeff Kirsher 		rxr_count--;
883d0bfcdfdSAlexander Duyck 		rxr_idx += v_count;
8848af3c33fSJeff Kirsher 
8858af3c33fSJeff Kirsher 		/* push pointer to next ring */
8868af3c33fSJeff Kirsher 		ring++;
8878af3c33fSJeff Kirsher 	}
8888af3c33fSJeff Kirsher 
8898af3c33fSJeff Kirsher 	return 0;
8908af3c33fSJeff Kirsher }
8918af3c33fSJeff Kirsher 
8928af3c33fSJeff Kirsher /**
8938af3c33fSJeff Kirsher  * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
8948af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
8958af3c33fSJeff Kirsher  * @v_idx: Index of vector to be freed
8968af3c33fSJeff Kirsher  *
8978af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vector.  In addition if
8988af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
8998af3c33fSJeff Kirsher  * to freeing the q_vector.
9008af3c33fSJeff Kirsher  **/
9018af3c33fSJeff Kirsher static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
9028af3c33fSJeff Kirsher {
9038af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
9048af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
9058af3c33fSJeff Kirsher 
9068af3c33fSJeff Kirsher 	ixgbe_for_each_ring(ring, q_vector->tx)
9078af3c33fSJeff Kirsher 		adapter->tx_ring[ring->queue_index] = NULL;
9088af3c33fSJeff Kirsher 
9098af3c33fSJeff Kirsher 	ixgbe_for_each_ring(ring, q_vector->rx)
9108af3c33fSJeff Kirsher 		adapter->rx_ring[ring->queue_index] = NULL;
9118af3c33fSJeff Kirsher 
9128af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = NULL;
9138af3c33fSJeff Kirsher 	netif_napi_del(&q_vector->napi);
9148af3c33fSJeff Kirsher 
9158af3c33fSJeff Kirsher 	/*
9168af3c33fSJeff Kirsher 	 * ixgbe_get_stats64() might access the rings on this vector,
9178af3c33fSJeff Kirsher 	 * we must wait a grace period before freeing it.
9188af3c33fSJeff Kirsher 	 */
9198af3c33fSJeff Kirsher 	kfree_rcu(q_vector, rcu);
9208af3c33fSJeff Kirsher }
9218af3c33fSJeff Kirsher 
9228af3c33fSJeff Kirsher /**
9238af3c33fSJeff Kirsher  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
9248af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
9258af3c33fSJeff Kirsher  *
9268af3c33fSJeff Kirsher  * We allocate one q_vector per queue interrupt.  If allocation fails we
9278af3c33fSJeff Kirsher  * return -ENOMEM.
9288af3c33fSJeff Kirsher  **/
9298af3c33fSJeff Kirsher static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
9308af3c33fSJeff Kirsher {
93149c7ffbeSAlexander Duyck 	int q_vectors = adapter->num_q_vectors;
9328af3c33fSJeff Kirsher 	int rxr_remaining = adapter->num_rx_queues;
9338af3c33fSJeff Kirsher 	int txr_remaining = adapter->num_tx_queues;
9348af3c33fSJeff Kirsher 	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
9358af3c33fSJeff Kirsher 	int err;
9368af3c33fSJeff Kirsher 
9378af3c33fSJeff Kirsher 	/* only one q_vector if MSI-X is disabled. */
9388af3c33fSJeff Kirsher 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
9398af3c33fSJeff Kirsher 		q_vectors = 1;
9408af3c33fSJeff Kirsher 
9418af3c33fSJeff Kirsher 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
942d0bfcdfdSAlexander Duyck 		for (; rxr_remaining; v_idx++) {
943d0bfcdfdSAlexander Duyck 			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
944d0bfcdfdSAlexander Duyck 						   0, 0, 1, rxr_idx);
9458af3c33fSJeff Kirsher 
9468af3c33fSJeff Kirsher 			if (err)
9478af3c33fSJeff Kirsher 				goto err_out;
9488af3c33fSJeff Kirsher 
9498af3c33fSJeff Kirsher 			/* update counts and index */
950d0bfcdfdSAlexander Duyck 			rxr_remaining--;
951d0bfcdfdSAlexander Duyck 			rxr_idx++;
9528af3c33fSJeff Kirsher 		}
9538af3c33fSJeff Kirsher 	}
9548af3c33fSJeff Kirsher 
955d0bfcdfdSAlexander Duyck 	for (; v_idx < q_vectors; v_idx++) {
956d0bfcdfdSAlexander Duyck 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
957d0bfcdfdSAlexander Duyck 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
958d0bfcdfdSAlexander Duyck 		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
9598af3c33fSJeff Kirsher 					   tqpv, txr_idx,
9608af3c33fSJeff Kirsher 					   rqpv, rxr_idx);
9618af3c33fSJeff Kirsher 
9628af3c33fSJeff Kirsher 		if (err)
9638af3c33fSJeff Kirsher 			goto err_out;
9648af3c33fSJeff Kirsher 
9658af3c33fSJeff Kirsher 		/* update counts and index */
9668af3c33fSJeff Kirsher 		rxr_remaining -= rqpv;
9678af3c33fSJeff Kirsher 		txr_remaining -= tqpv;
968d0bfcdfdSAlexander Duyck 		rxr_idx++;
969d0bfcdfdSAlexander Duyck 		txr_idx++;
9708af3c33fSJeff Kirsher 	}
9718af3c33fSJeff Kirsher 
9728af3c33fSJeff Kirsher 	return 0;
9738af3c33fSJeff Kirsher 
9748af3c33fSJeff Kirsher err_out:
97549c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
97649c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
97749c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
97849c7ffbeSAlexander Duyck 
97949c7ffbeSAlexander Duyck 	while (v_idx--)
9808af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
9818af3c33fSJeff Kirsher 
9828af3c33fSJeff Kirsher 	return -ENOMEM;
9838af3c33fSJeff Kirsher }
9848af3c33fSJeff Kirsher 
9858af3c33fSJeff Kirsher /**
9868af3c33fSJeff Kirsher  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
9878af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
9888af3c33fSJeff Kirsher  *
9898af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vectors.  In addition if
9908af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
9918af3c33fSJeff Kirsher  * to freeing the q_vector.
9928af3c33fSJeff Kirsher  **/
9938af3c33fSJeff Kirsher static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
9948af3c33fSJeff Kirsher {
99549c7ffbeSAlexander Duyck 	int v_idx = adapter->num_q_vectors;
9968af3c33fSJeff Kirsher 
99749c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
99849c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
99949c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
10008af3c33fSJeff Kirsher 
100149c7ffbeSAlexander Duyck 	while (v_idx--)
10028af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
10038af3c33fSJeff Kirsher }
10048af3c33fSJeff Kirsher 
10058af3c33fSJeff Kirsher static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
10068af3c33fSJeff Kirsher {
10078af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
10088af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
10098af3c33fSJeff Kirsher 		pci_disable_msix(adapter->pdev);
10108af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
10118af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
10128af3c33fSJeff Kirsher 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
10138af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
10148af3c33fSJeff Kirsher 		pci_disable_msi(adapter->pdev);
10158af3c33fSJeff Kirsher 	}
10168af3c33fSJeff Kirsher }
10178af3c33fSJeff Kirsher 
10188af3c33fSJeff Kirsher /**
10198af3c33fSJeff Kirsher  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
10208af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10218af3c33fSJeff Kirsher  *
10228af3c33fSJeff Kirsher  * Attempt to configure the interrupts using the best available
10238af3c33fSJeff Kirsher  * capabilities of the hardware and the kernel.
10248af3c33fSJeff Kirsher  **/
1025ac802f5dSAlexander Duyck static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
10268af3c33fSJeff Kirsher {
10278af3c33fSJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
1028ac802f5dSAlexander Duyck 	int vector, v_budget, err;
10298af3c33fSJeff Kirsher 
10308af3c33fSJeff Kirsher 	/*
10318af3c33fSJeff Kirsher 	 * It's easy to be greedy for MSI-X vectors, but it really
10328af3c33fSJeff Kirsher 	 * doesn't do us much good if we have a lot more vectors
10338af3c33fSJeff Kirsher 	 * than CPU's.  So let's be conservative and only ask for
10348af3c33fSJeff Kirsher 	 * (roughly) the same number of vectors as there are CPU's.
10358af3c33fSJeff Kirsher 	 * The default is to use pairs of vectors.
10368af3c33fSJeff Kirsher 	 */
10378af3c33fSJeff Kirsher 	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
10388af3c33fSJeff Kirsher 	v_budget = min_t(int, v_budget, num_online_cpus());
10398af3c33fSJeff Kirsher 	v_budget += NON_Q_VECTORS;
10408af3c33fSJeff Kirsher 
10418af3c33fSJeff Kirsher 	/*
10428af3c33fSJeff Kirsher 	 * At the same time, hardware can only support a maximum of
10438af3c33fSJeff Kirsher 	 * hw.mac->max_msix_vectors vectors.  With features
10448af3c33fSJeff Kirsher 	 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
10458af3c33fSJeff Kirsher 	 * descriptor queues supported by our device.  Thus, we cap it off in
10468af3c33fSJeff Kirsher 	 * those rare cases where the cpu count also exceeds our vector limit.
10478af3c33fSJeff Kirsher 	 */
10488af3c33fSJeff Kirsher 	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
10498af3c33fSJeff Kirsher 
10508af3c33fSJeff Kirsher 	/* A failure in MSI-X entry allocation isn't fatal, but it does
10518af3c33fSJeff Kirsher 	 * mean we disable MSI-X capabilities of the adapter. */
10528af3c33fSJeff Kirsher 	adapter->msix_entries = kcalloc(v_budget,
10538af3c33fSJeff Kirsher 					sizeof(struct msix_entry), GFP_KERNEL);
10548af3c33fSJeff Kirsher 	if (adapter->msix_entries) {
10558af3c33fSJeff Kirsher 		for (vector = 0; vector < v_budget; vector++)
10568af3c33fSJeff Kirsher 			adapter->msix_entries[vector].entry = vector;
10578af3c33fSJeff Kirsher 
10588af3c33fSJeff Kirsher 		ixgbe_acquire_msix_vectors(adapter, v_budget);
10598af3c33fSJeff Kirsher 
10608af3c33fSJeff Kirsher 		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1061ac802f5dSAlexander Duyck 			return;
10628af3c33fSJeff Kirsher 	}
10638af3c33fSJeff Kirsher 
10648af3c33fSJeff Kirsher 	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
10658af3c33fSJeff Kirsher 	adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
10668af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
10678af3c33fSJeff Kirsher 		e_err(probe,
10688af3c33fSJeff Kirsher 		      "ATR is not supported while multiple "
10698af3c33fSJeff Kirsher 		      "queues are disabled.  Disabling Flow Director\n");
10708af3c33fSJeff Kirsher 	}
10718af3c33fSJeff Kirsher 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
10728af3c33fSJeff Kirsher 	adapter->atr_sample_rate = 0;
10738af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10748af3c33fSJeff Kirsher 		ixgbe_disable_sriov(adapter);
10758af3c33fSJeff Kirsher 
1076ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
107749c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 1;
107849c7ffbeSAlexander Duyck 
10798af3c33fSJeff Kirsher 	err = pci_enable_msi(adapter->pdev);
1080ac802f5dSAlexander Duyck 	if (err) {
10818af3c33fSJeff Kirsher 		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
10828af3c33fSJeff Kirsher 			     "Unable to allocate MSI interrupt, "
10838af3c33fSJeff Kirsher 			     "falling back to legacy.  Error: %d\n", err);
1084ac802f5dSAlexander Duyck 		return;
10858af3c33fSJeff Kirsher 	}
1086ac802f5dSAlexander Duyck 	adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
10878af3c33fSJeff Kirsher }
10888af3c33fSJeff Kirsher 
10898af3c33fSJeff Kirsher /**
10908af3c33fSJeff Kirsher  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
10918af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10928af3c33fSJeff Kirsher  *
10938af3c33fSJeff Kirsher  * We determine which interrupt scheme to use based on...
10948af3c33fSJeff Kirsher  * - Kernel support (MSI, MSI-X)
10958af3c33fSJeff Kirsher  *   - which can be user-defined (via MODULE_PARAM)
10968af3c33fSJeff Kirsher  * - Hardware queue count (num_*_queues)
10978af3c33fSJeff Kirsher  *   - defined by miscellaneous hardware support/features (RSS, etc.)
10988af3c33fSJeff Kirsher  **/
10998af3c33fSJeff Kirsher int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
11008af3c33fSJeff Kirsher {
11018af3c33fSJeff Kirsher 	int err;
11028af3c33fSJeff Kirsher 
11038af3c33fSJeff Kirsher 	/* Number of supported queues */
1104ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
11058af3c33fSJeff Kirsher 
1106ac802f5dSAlexander Duyck 	/* Set interrupt mode */
1107ac802f5dSAlexander Duyck 	ixgbe_set_interrupt_capability(adapter);
11088af3c33fSJeff Kirsher 
11098af3c33fSJeff Kirsher 	err = ixgbe_alloc_q_vectors(adapter);
11108af3c33fSJeff Kirsher 	if (err) {
11118af3c33fSJeff Kirsher 		e_dev_err("Unable to allocate memory for queue vectors\n");
11128af3c33fSJeff Kirsher 		goto err_alloc_q_vectors;
11138af3c33fSJeff Kirsher 	}
11148af3c33fSJeff Kirsher 
11158af3c33fSJeff Kirsher 	ixgbe_cache_ring_register(adapter);
11168af3c33fSJeff Kirsher 
11178af3c33fSJeff Kirsher 	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
11188af3c33fSJeff Kirsher 		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
11198af3c33fSJeff Kirsher 		   adapter->num_rx_queues, adapter->num_tx_queues);
11208af3c33fSJeff Kirsher 
11218af3c33fSJeff Kirsher 	set_bit(__IXGBE_DOWN, &adapter->state);
11228af3c33fSJeff Kirsher 
11238af3c33fSJeff Kirsher 	return 0;
11248af3c33fSJeff Kirsher 
11258af3c33fSJeff Kirsher err_alloc_q_vectors:
11268af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
11278af3c33fSJeff Kirsher 	return err;
11288af3c33fSJeff Kirsher }
11298af3c33fSJeff Kirsher 
11308af3c33fSJeff Kirsher /**
11318af3c33fSJeff Kirsher  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
11328af3c33fSJeff Kirsher  * @adapter: board private structure to clear interrupt scheme on
11338af3c33fSJeff Kirsher  *
11348af3c33fSJeff Kirsher  * We go through and clear interrupt specific resources and reset the structure
11358af3c33fSJeff Kirsher  * to pre-load conditions
11368af3c33fSJeff Kirsher  **/
11378af3c33fSJeff Kirsher void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
11388af3c33fSJeff Kirsher {
11398af3c33fSJeff Kirsher 	adapter->num_tx_queues = 0;
11408af3c33fSJeff Kirsher 	adapter->num_rx_queues = 0;
11418af3c33fSJeff Kirsher 
11428af3c33fSJeff Kirsher 	ixgbe_free_q_vectors(adapter);
11438af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
11448af3c33fSJeff Kirsher }
11458af3c33fSJeff Kirsher 
11468af3c33fSJeff Kirsher void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
11478af3c33fSJeff Kirsher 		       u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
11488af3c33fSJeff Kirsher {
11498af3c33fSJeff Kirsher 	struct ixgbe_adv_tx_context_desc *context_desc;
11508af3c33fSJeff Kirsher 	u16 i = tx_ring->next_to_use;
11518af3c33fSJeff Kirsher 
11528af3c33fSJeff Kirsher 	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
11538af3c33fSJeff Kirsher 
11548af3c33fSJeff Kirsher 	i++;
11558af3c33fSJeff Kirsher 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
11568af3c33fSJeff Kirsher 
11578af3c33fSJeff Kirsher 	/* set bits to identify this as an advanced context descriptor */
11588af3c33fSJeff Kirsher 	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
11598af3c33fSJeff Kirsher 
11608af3c33fSJeff Kirsher 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
11618af3c33fSJeff Kirsher 	context_desc->seqnum_seed	= cpu_to_le32(fcoe_sof_eof);
11628af3c33fSJeff Kirsher 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
11638af3c33fSJeff Kirsher 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
11648af3c33fSJeff Kirsher }
11658af3c33fSJeff Kirsher 
1166