18af3c33fSJeff Kirsher /*******************************************************************************
28af3c33fSJeff Kirsher 
38af3c33fSJeff Kirsher   Intel 10 Gigabit PCI Express Linux driver
4434c5e39SDon Skidmore   Copyright(c) 1999 - 2013 Intel Corporation.
58af3c33fSJeff Kirsher 
68af3c33fSJeff Kirsher   This program is free software; you can redistribute it and/or modify it
78af3c33fSJeff Kirsher   under the terms and conditions of the GNU General Public License,
88af3c33fSJeff Kirsher   version 2, as published by the Free Software Foundation.
98af3c33fSJeff Kirsher 
108af3c33fSJeff Kirsher   This program is distributed in the hope it will be useful, but WITHOUT
118af3c33fSJeff Kirsher   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
128af3c33fSJeff Kirsher   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
138af3c33fSJeff Kirsher   more details.
148af3c33fSJeff Kirsher 
158af3c33fSJeff Kirsher   You should have received a copy of the GNU General Public License along with
168af3c33fSJeff Kirsher   this program; if not, write to the Free Software Foundation, Inc.,
178af3c33fSJeff Kirsher   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
188af3c33fSJeff Kirsher 
198af3c33fSJeff Kirsher   The full GNU General Public License is included in this distribution in
208af3c33fSJeff Kirsher   the file called "COPYING".
218af3c33fSJeff Kirsher 
228af3c33fSJeff Kirsher   Contact Information:
23b89aae71SJacob Keller   Linux NICS <linux.nics@intel.com>
248af3c33fSJeff Kirsher   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
258af3c33fSJeff Kirsher   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
268af3c33fSJeff Kirsher 
278af3c33fSJeff Kirsher *******************************************************************************/
288af3c33fSJeff Kirsher 
298af3c33fSJeff Kirsher #include "ixgbe.h"
308af3c33fSJeff Kirsher #include "ixgbe_sriov.h"
318af3c33fSJeff Kirsher 
32800bd607SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
3373079ea0SAlexander Duyck /**
3473079ea0SAlexander Duyck  * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
3573079ea0SAlexander Duyck  * @adapter: board private structure to initialize
3673079ea0SAlexander Duyck  *
3773079ea0SAlexander Duyck  * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
3873079ea0SAlexander Duyck  * will also try to cache the proper offsets if RSS/FCoE are enabled along
3973079ea0SAlexander Duyck  * with VMDq.
4073079ea0SAlexander Duyck  *
4173079ea0SAlexander Duyck  **/
4273079ea0SAlexander Duyck static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
4373079ea0SAlexander Duyck {
4473079ea0SAlexander Duyck #ifdef IXGBE_FCOE
4573079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
4673079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
4773079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
4873079ea0SAlexander Duyck 	int i;
4973079ea0SAlexander Duyck 	u16 reg_idx;
5073079ea0SAlexander Duyck 	u8 tcs = netdev_get_num_tc(adapter->netdev);
5173079ea0SAlexander Duyck 
5273079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
5373079ea0SAlexander Duyck 	if (tcs <= 1)
5473079ea0SAlexander Duyck 		return false;
5573079ea0SAlexander Duyck 
5673079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
5773079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
5873079ea0SAlexander Duyck 		return false;
5973079ea0SAlexander Duyck 
6073079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
6173079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
6273079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
6373079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
6473079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs)
6573079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
6673079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
6773079ea0SAlexander Duyck 	}
6873079ea0SAlexander Duyck 
6973079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
7073079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
7173079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
7273079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= tcs)
7373079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
7473079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
7573079ea0SAlexander Duyck 	}
7673079ea0SAlexander Duyck 
7773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
7873079ea0SAlexander Duyck 	/* nothing to do if FCoE is disabled */
7973079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
8073079ea0SAlexander Duyck 		return true;
8173079ea0SAlexander Duyck 
8273079ea0SAlexander Duyck 	/* The work is already done if the FCoE ring is shared */
8373079ea0SAlexander Duyck 	if (fcoe->offset < tcs)
8473079ea0SAlexander Duyck 		return true;
8573079ea0SAlexander Duyck 
8673079ea0SAlexander Duyck 	/* The FCoE rings exist separately, we need to move their reg_idx */
8773079ea0SAlexander Duyck 	if (fcoe->indices) {
8873079ea0SAlexander Duyck 		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
8973079ea0SAlexander Duyck 		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
9073079ea0SAlexander Duyck 
9173079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
9273079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
9373079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
9473079ea0SAlexander Duyck 			adapter->rx_ring[i]->reg_idx = reg_idx;
9573079ea0SAlexander Duyck 			reg_idx++;
9673079ea0SAlexander Duyck 		}
9773079ea0SAlexander Duyck 
9873079ea0SAlexander Duyck 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
9973079ea0SAlexander Duyck 		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
10073079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
10173079ea0SAlexander Duyck 			adapter->tx_ring[i]->reg_idx = reg_idx;
10273079ea0SAlexander Duyck 			reg_idx++;
10373079ea0SAlexander Duyck 		}
10473079ea0SAlexander Duyck 	}
10573079ea0SAlexander Duyck 
10673079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
10773079ea0SAlexander Duyck 	return true;
10873079ea0SAlexander Duyck }
10973079ea0SAlexander Duyck 
1108af3c33fSJeff Kirsher /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
1118af3c33fSJeff Kirsher static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
1128af3c33fSJeff Kirsher 				    unsigned int *tx, unsigned int *rx)
1138af3c33fSJeff Kirsher {
1148af3c33fSJeff Kirsher 	struct net_device *dev = adapter->netdev;
1158af3c33fSJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
1168af3c33fSJeff Kirsher 	u8 num_tcs = netdev_get_num_tc(dev);
1178af3c33fSJeff Kirsher 
1188af3c33fSJeff Kirsher 	*tx = 0;
1198af3c33fSJeff Kirsher 	*rx = 0;
1208af3c33fSJeff Kirsher 
1218af3c33fSJeff Kirsher 	switch (hw->mac.type) {
1228af3c33fSJeff Kirsher 	case ixgbe_mac_82598EB:
1234ae63730SAlexander Duyck 		/* TxQs/TC: 4	RxQs/TC: 8 */
1244ae63730SAlexander Duyck 		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
1254ae63730SAlexander Duyck 		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
1268af3c33fSJeff Kirsher 		break;
1278af3c33fSJeff Kirsher 	case ixgbe_mac_82599EB:
1288af3c33fSJeff Kirsher 	case ixgbe_mac_X540:
1298af3c33fSJeff Kirsher 		if (num_tcs > 4) {
1304ae63730SAlexander Duyck 			/*
1314ae63730SAlexander Duyck 			 * TCs    : TC0/1 TC2/3 TC4-7
1324ae63730SAlexander Duyck 			 * TxQs/TC:    32    16     8
1334ae63730SAlexander Duyck 			 * RxQs/TC:    16    16    16
1344ae63730SAlexander Duyck 			 */
1358af3c33fSJeff Kirsher 			*rx = tc << 4;
1364ae63730SAlexander Duyck 			if (tc < 3)
1374ae63730SAlexander Duyck 				*tx = tc << 5;		/*   0,  32,  64 */
1384ae63730SAlexander Duyck 			else if (tc < 5)
1394ae63730SAlexander Duyck 				*tx = (tc + 2) << 4;	/*  80,  96 */
1404ae63730SAlexander Duyck 			else
1414ae63730SAlexander Duyck 				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
1428af3c33fSJeff Kirsher 		} else {
1434ae63730SAlexander Duyck 			/*
1444ae63730SAlexander Duyck 			 * TCs    : TC0 TC1 TC2/3
1454ae63730SAlexander Duyck 			 * TxQs/TC:  64  32    16
1464ae63730SAlexander Duyck 			 * RxQs/TC:  32  32    32
1474ae63730SAlexander Duyck 			 */
1488af3c33fSJeff Kirsher 			*rx = tc << 5;
1494ae63730SAlexander Duyck 			if (tc < 2)
1504ae63730SAlexander Duyck 				*tx = tc << 6;		/*  0,  64 */
1514ae63730SAlexander Duyck 			else
1524ae63730SAlexander Duyck 				*tx = (tc + 4) << 4;	/* 96, 112 */
1538af3c33fSJeff Kirsher 		}
1548af3c33fSJeff Kirsher 	default:
1558af3c33fSJeff Kirsher 		break;
1568af3c33fSJeff Kirsher 	}
1578af3c33fSJeff Kirsher }
1588af3c33fSJeff Kirsher 
1598af3c33fSJeff Kirsher /**
1608af3c33fSJeff Kirsher  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
1618af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1628af3c33fSJeff Kirsher  *
1638af3c33fSJeff Kirsher  * Cache the descriptor ring offsets for DCB to the assigned rings.
1648af3c33fSJeff Kirsher  *
1658af3c33fSJeff Kirsher  **/
1664ae63730SAlexander Duyck static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
1678af3c33fSJeff Kirsher {
1688af3c33fSJeff Kirsher 	struct net_device *dev = adapter->netdev;
1694ae63730SAlexander Duyck 	unsigned int tx_idx, rx_idx;
1704ae63730SAlexander Duyck 	int tc, offset, rss_i, i;
1718af3c33fSJeff Kirsher 	u8 num_tcs = netdev_get_num_tc(dev);
1728af3c33fSJeff Kirsher 
1734ae63730SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
1744ae63730SAlexander Duyck 	if (num_tcs <= 1)
1758af3c33fSJeff Kirsher 		return false;
1768af3c33fSJeff Kirsher 
1774ae63730SAlexander Duyck 	rss_i = adapter->ring_feature[RING_F_RSS].indices;
1788af3c33fSJeff Kirsher 
1794ae63730SAlexander Duyck 	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
1804ae63730SAlexander Duyck 		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
1814ae63730SAlexander Duyck 		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
1824ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
1834ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
1844ae63730SAlexander Duyck 			adapter->tx_ring[offset + i]->dcb_tc = tc;
1854ae63730SAlexander Duyck 			adapter->rx_ring[offset + i]->dcb_tc = tc;
1868af3c33fSJeff Kirsher 		}
1878af3c33fSJeff Kirsher 	}
1888af3c33fSJeff Kirsher 
1898af3c33fSJeff Kirsher 	return true;
1908af3c33fSJeff Kirsher }
191d411a936SAlexander Duyck 
1928af3c33fSJeff Kirsher #endif
1938af3c33fSJeff Kirsher /**
1948af3c33fSJeff Kirsher  * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
1958af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1968af3c33fSJeff Kirsher  *
1978af3c33fSJeff Kirsher  * SR-IOV doesn't use any descriptor rings but changes the default if
1988af3c33fSJeff Kirsher  * no other mapping is used.
1998af3c33fSJeff Kirsher  *
2008af3c33fSJeff Kirsher  */
20173079ea0SAlexander Duyck static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
2028af3c33fSJeff Kirsher {
20373079ea0SAlexander Duyck #ifdef IXGBE_FCOE
20473079ea0SAlexander Duyck 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
20573079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
20673079ea0SAlexander Duyck 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
20773079ea0SAlexander Duyck 	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
20873079ea0SAlexander Duyck 	int i;
20973079ea0SAlexander Duyck 	u16 reg_idx;
21073079ea0SAlexander Duyck 
21173079ea0SAlexander Duyck 	/* only proceed if VMDq is enabled */
21273079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
2138af3c33fSJeff Kirsher 		return false;
21473079ea0SAlexander Duyck 
21573079ea0SAlexander Duyck 	/* start at VMDq register offset for SR-IOV enabled setups */
21673079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
21773079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
21873079ea0SAlexander Duyck #ifdef IXGBE_FCOE
21973079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
22073079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
22173079ea0SAlexander Duyck 			break;
22273079ea0SAlexander Duyck #endif
22373079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
22473079ea0SAlexander Duyck 		if ((reg_idx & ~vmdq->mask) >= rss->indices)
22573079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
22673079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
22773079ea0SAlexander Duyck 	}
22873079ea0SAlexander Duyck 
22973079ea0SAlexander Duyck #ifdef IXGBE_FCOE
23073079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
23173079ea0SAlexander Duyck 	for (; i < adapter->num_rx_queues; i++, reg_idx++)
23273079ea0SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = reg_idx;
23373079ea0SAlexander Duyck 
23473079ea0SAlexander Duyck #endif
23573079ea0SAlexander Duyck 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
23673079ea0SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
23773079ea0SAlexander Duyck #ifdef IXGBE_FCOE
23873079ea0SAlexander Duyck 		/* Allow first FCoE queue to be mapped as RSS */
23973079ea0SAlexander Duyck 		if (fcoe->offset && (i > fcoe->offset))
24073079ea0SAlexander Duyck 			break;
24173079ea0SAlexander Duyck #endif
24273079ea0SAlexander Duyck 		/* If we are greater than indices move to next pool */
24373079ea0SAlexander Duyck 		if ((reg_idx & rss->mask) >= rss->indices)
24473079ea0SAlexander Duyck 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
24573079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
24673079ea0SAlexander Duyck 	}
24773079ea0SAlexander Duyck 
24873079ea0SAlexander Duyck #ifdef IXGBE_FCOE
24973079ea0SAlexander Duyck 	/* FCoE uses a linear block of queues so just assigning 1:1 */
25073079ea0SAlexander Duyck 	for (; i < adapter->num_tx_queues; i++, reg_idx++)
25173079ea0SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = reg_idx;
25273079ea0SAlexander Duyck 
25373079ea0SAlexander Duyck #endif
25473079ea0SAlexander Duyck 
25573079ea0SAlexander Duyck 	return true;
2568af3c33fSJeff Kirsher }
2578af3c33fSJeff Kirsher 
2588af3c33fSJeff Kirsher /**
259d411a936SAlexander Duyck  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
260d411a936SAlexander Duyck  * @adapter: board private structure to initialize
261d411a936SAlexander Duyck  *
262d411a936SAlexander Duyck  * Cache the descriptor ring offsets for RSS to the assigned rings.
263d411a936SAlexander Duyck  *
264d411a936SAlexander Duyck  **/
265d411a936SAlexander Duyck static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
266d411a936SAlexander Duyck {
267d411a936SAlexander Duyck 	int i;
268d411a936SAlexander Duyck 
269d411a936SAlexander Duyck 	for (i = 0; i < adapter->num_rx_queues; i++)
270d411a936SAlexander Duyck 		adapter->rx_ring[i]->reg_idx = i;
271d411a936SAlexander Duyck 	for (i = 0; i < adapter->num_tx_queues; i++)
272d411a936SAlexander Duyck 		adapter->tx_ring[i]->reg_idx = i;
273d411a936SAlexander Duyck 
274d411a936SAlexander Duyck 	return true;
275d411a936SAlexander Duyck }
276d411a936SAlexander Duyck 
277d411a936SAlexander Duyck /**
2788af3c33fSJeff Kirsher  * ixgbe_cache_ring_register - Descriptor ring to register mapping
2798af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2808af3c33fSJeff Kirsher  *
2818af3c33fSJeff Kirsher  * Once we know the feature-set enabled for the device, we'll cache
2828af3c33fSJeff Kirsher  * the register offset the descriptor ring is assigned to.
2838af3c33fSJeff Kirsher  *
2848af3c33fSJeff Kirsher  * Note, the order the various feature calls is important.  It must start with
2858af3c33fSJeff Kirsher  * the "most" features enabled at the same time, then trickle down to the
2868af3c33fSJeff Kirsher  * least amount of features turned on at once.
2878af3c33fSJeff Kirsher  **/
2888af3c33fSJeff Kirsher static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2898af3c33fSJeff Kirsher {
2908af3c33fSJeff Kirsher 	/* start with default case */
2918af3c33fSJeff Kirsher 	adapter->rx_ring[0]->reg_idx = 0;
2928af3c33fSJeff Kirsher 	adapter->tx_ring[0]->reg_idx = 0;
2938af3c33fSJeff Kirsher 
29473079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
29573079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb_sriov(adapter))
29673079ea0SAlexander Duyck 		return;
29773079ea0SAlexander Duyck 
29873079ea0SAlexander Duyck 	if (ixgbe_cache_ring_dcb(adapter))
29973079ea0SAlexander Duyck 		return;
30073079ea0SAlexander Duyck 
30173079ea0SAlexander Duyck #endif
3028af3c33fSJeff Kirsher 	if (ixgbe_cache_ring_sriov(adapter))
3038af3c33fSJeff Kirsher 		return;
3048af3c33fSJeff Kirsher 
305d411a936SAlexander Duyck 	ixgbe_cache_ring_rss(adapter);
3068af3c33fSJeff Kirsher }
3078af3c33fSJeff Kirsher 
308d411a936SAlexander Duyck #define IXGBE_RSS_16Q_MASK	0xF
309d411a936SAlexander Duyck #define IXGBE_RSS_8Q_MASK	0x7
310d411a936SAlexander Duyck #define IXGBE_RSS_4Q_MASK	0x3
311d411a936SAlexander Duyck #define IXGBE_RSS_2Q_MASK	0x1
312d411a936SAlexander Duyck #define IXGBE_RSS_DISABLED_MASK	0x0
313d411a936SAlexander Duyck 
314d411a936SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
31573079ea0SAlexander Duyck /**
31673079ea0SAlexander Duyck  * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
31773079ea0SAlexander Duyck  * @adapter: board private structure to initialize
31873079ea0SAlexander Duyck  *
31973079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
32073079ea0SAlexander Duyck  * and VM pools where appropriate.  Also assign queues based on DCB
32173079ea0SAlexander Duyck  * priorities and map accordingly..
32273079ea0SAlexander Duyck  *
32373079ea0SAlexander Duyck  **/
32473079ea0SAlexander Duyck static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
32573079ea0SAlexander Duyck {
32673079ea0SAlexander Duyck 	int i;
32773079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
32873079ea0SAlexander Duyck 	u16 vmdq_m = 0;
32973079ea0SAlexander Duyck #ifdef IXGBE_FCOE
33073079ea0SAlexander Duyck 	u16 fcoe_i = 0;
33173079ea0SAlexander Duyck #endif
33273079ea0SAlexander Duyck 	u8 tcs = netdev_get_num_tc(adapter->netdev);
33373079ea0SAlexander Duyck 
33473079ea0SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
33573079ea0SAlexander Duyck 	if (tcs <= 1)
33673079ea0SAlexander Duyck 		return false;
33773079ea0SAlexander Duyck 
33873079ea0SAlexander Duyck 	/* verify we have VMDq enabled before proceeding */
33973079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
34073079ea0SAlexander Duyck 		return false;
34173079ea0SAlexander Duyck 
34273079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
34373079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
34473079ea0SAlexander Duyck 
34573079ea0SAlexander Duyck 	/* 16 pools w/ 8 TC per pool */
34673079ea0SAlexander Duyck 	if (tcs > 4) {
34773079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 16);
34873079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
34973079ea0SAlexander Duyck 	/* 32 pools w/ 4 TC per pool */
35073079ea0SAlexander Duyck 	} else {
35173079ea0SAlexander Duyck 		vmdq_i = min_t(u16, vmdq_i, 32);
35273079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
35373079ea0SAlexander Duyck 	}
35473079ea0SAlexander Duyck 
35573079ea0SAlexander Duyck #ifdef IXGBE_FCOE
35673079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
35773079ea0SAlexander Duyck 	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
35873079ea0SAlexander Duyck 
35973079ea0SAlexander Duyck #endif
36073079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
36173079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
36273079ea0SAlexander Duyck 
36373079ea0SAlexander Duyck 	/* save features for later use */
36473079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
36573079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
36673079ea0SAlexander Duyck 
36773079ea0SAlexander Duyck 	/*
36873079ea0SAlexander Duyck 	 * We do not support DCB, VMDq, and RSS all simultaneously
36973079ea0SAlexander Duyck 	 * so we will disable RSS since it is the lowest priority
37073079ea0SAlexander Duyck 	 */
37173079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = 1;
37273079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
37373079ea0SAlexander Duyck 
37439cb681bSAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
37539cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
37639cb681bSAlexander Duyck 
37773079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
37873079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = tcs;
37973079ea0SAlexander Duyck 
38073079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * tcs;
38173079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * tcs;
38273079ea0SAlexander Duyck 
38373079ea0SAlexander Duyck #ifdef IXGBE_FCOE
38473079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
38573079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
38673079ea0SAlexander Duyck 
38773079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
38873079ea0SAlexander Duyck 
38973079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
39073079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
39173079ea0SAlexander Duyck 
39273079ea0SAlexander Duyck 		if (fcoe_i) {
39373079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
39473079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
39573079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * tcs;
39673079ea0SAlexander Duyck 
39773079ea0SAlexander Duyck 			/* add queues to adapter */
39873079ea0SAlexander Duyck 			adapter->num_tx_queues += fcoe_i;
39973079ea0SAlexander Duyck 			adapter->num_rx_queues += fcoe_i;
40073079ea0SAlexander Duyck 		} else if (tcs > 1) {
40173079ea0SAlexander Duyck 			/* use queue belonging to FcoE TC */
40273079ea0SAlexander Duyck 			fcoe->indices = 1;
40373079ea0SAlexander Duyck 			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
40473079ea0SAlexander Duyck 		} else {
40573079ea0SAlexander Duyck 			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
40673079ea0SAlexander Duyck 
40773079ea0SAlexander Duyck 			fcoe->indices = 0;
40873079ea0SAlexander Duyck 			fcoe->offset = 0;
40973079ea0SAlexander Duyck 		}
41073079ea0SAlexander Duyck 	}
41173079ea0SAlexander Duyck 
41273079ea0SAlexander Duyck #endif /* IXGBE_FCOE */
41373079ea0SAlexander Duyck 	/* configure TC to queue mapping */
41473079ea0SAlexander Duyck 	for (i = 0; i < tcs; i++)
41573079ea0SAlexander Duyck 		netdev_set_tc_queue(adapter->netdev, i, 1, i);
41673079ea0SAlexander Duyck 
41773079ea0SAlexander Duyck 	return true;
41873079ea0SAlexander Duyck }
41973079ea0SAlexander Duyck 
420d411a936SAlexander Duyck static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
421d411a936SAlexander Duyck {
422d411a936SAlexander Duyck 	struct net_device *dev = adapter->netdev;
423d411a936SAlexander Duyck 	struct ixgbe_ring_feature *f;
424d411a936SAlexander Duyck 	int rss_i, rss_m, i;
425d411a936SAlexander Duyck 	int tcs;
426d411a936SAlexander Duyck 
427d411a936SAlexander Duyck 	/* Map queue offset and counts onto allocated tx queues */
428d411a936SAlexander Duyck 	tcs = netdev_get_num_tc(dev);
429d411a936SAlexander Duyck 
430d411a936SAlexander Duyck 	/* verify we have DCB queueing enabled before proceeding */
431d411a936SAlexander Duyck 	if (tcs <= 1)
432d411a936SAlexander Duyck 		return false;
433d411a936SAlexander Duyck 
434d411a936SAlexander Duyck 	/* determine the upper limit for our current DCB mode */
435d411a936SAlexander Duyck 	rss_i = dev->num_tx_queues / tcs;
436d411a936SAlexander Duyck 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
437d411a936SAlexander Duyck 		/* 8 TC w/ 4 queues per TC */
438d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 4);
439d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
440d411a936SAlexander Duyck 	} else if (tcs > 4) {
441d411a936SAlexander Duyck 		/* 8 TC w/ 8 queues per TC */
442d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 8);
443d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_8Q_MASK;
444d411a936SAlexander Duyck 	} else {
445d411a936SAlexander Duyck 		/* 4 TC w/ 16 queues per TC */
446d411a936SAlexander Duyck 		rss_i = min_t(u16, rss_i, 16);
447d411a936SAlexander Duyck 		rss_m = IXGBE_RSS_16Q_MASK;
448d411a936SAlexander Duyck 	}
449d411a936SAlexander Duyck 
450d411a936SAlexander Duyck 	/* set RSS mask and indices */
451d411a936SAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
452d411a936SAlexander Duyck 	rss_i = min_t(int, rss_i, f->limit);
453d411a936SAlexander Duyck 	f->indices = rss_i;
454d411a936SAlexander Duyck 	f->mask = rss_m;
455d411a936SAlexander Duyck 
45639cb681bSAlexander Duyck 	/* disable ATR as it is not supported when multiple TCs are enabled */
45739cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
45839cb681bSAlexander Duyck 
459d411a936SAlexander Duyck #ifdef IXGBE_FCOE
460d411a936SAlexander Duyck 	/* FCoE enabled queues require special configuration indexed
461d411a936SAlexander Duyck 	 * by feature specific indices and offset. Here we map FCoE
462d411a936SAlexander Duyck 	 * indices onto the DCB queue pairs allowing FCoE to own
463d411a936SAlexander Duyck 	 * configuration later.
464d411a936SAlexander Duyck 	 */
465d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
466d411a936SAlexander Duyck 		u8 tc = ixgbe_fcoe_get_tc(adapter);
467d411a936SAlexander Duyck 
468d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
469d411a936SAlexander Duyck 		f->indices = min_t(u16, rss_i, f->limit);
470d411a936SAlexander Duyck 		f->offset = rss_i * tc;
471d411a936SAlexander Duyck 	}
472d411a936SAlexander Duyck 
473d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
474d411a936SAlexander Duyck 	for (i = 0; i < tcs; i++)
475d411a936SAlexander Duyck 		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
476d411a936SAlexander Duyck 
477d411a936SAlexander Duyck 	adapter->num_tx_queues = rss_i * tcs;
478d411a936SAlexander Duyck 	adapter->num_rx_queues = rss_i * tcs;
479d411a936SAlexander Duyck 
480d411a936SAlexander Duyck 	return true;
481d411a936SAlexander Duyck }
482d411a936SAlexander Duyck 
483d411a936SAlexander Duyck #endif
4848af3c33fSJeff Kirsher /**
48573079ea0SAlexander Duyck  * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
48673079ea0SAlexander Duyck  * @adapter: board private structure to initialize
48773079ea0SAlexander Duyck  *
48873079ea0SAlexander Duyck  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
48973079ea0SAlexander Duyck  * and VM pools where appropriate.  If RSS is available, then also try and
49073079ea0SAlexander Duyck  * enable RSS and map accordingly.
49173079ea0SAlexander Duyck  *
49273079ea0SAlexander Duyck  **/
49373079ea0SAlexander Duyck static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
49473079ea0SAlexander Duyck {
49573079ea0SAlexander Duyck 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
49673079ea0SAlexander Duyck 	u16 vmdq_m = 0;
49773079ea0SAlexander Duyck 	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
49873079ea0SAlexander Duyck 	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
49973079ea0SAlexander Duyck #ifdef IXGBE_FCOE
50073079ea0SAlexander Duyck 	u16 fcoe_i = 0;
50173079ea0SAlexander Duyck #endif
5022a47fa45SJohn Fastabend 	bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
50373079ea0SAlexander Duyck 
50473079ea0SAlexander Duyck 	/* only proceed if SR-IOV is enabled */
50573079ea0SAlexander Duyck 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
50673079ea0SAlexander Duyck 		return false;
50773079ea0SAlexander Duyck 
50873079ea0SAlexander Duyck 	/* Add starting offset to total pool count */
50973079ea0SAlexander Duyck 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
51073079ea0SAlexander Duyck 
51173079ea0SAlexander Duyck 	/* double check we are limited to maximum pools */
51273079ea0SAlexander Duyck 	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
51373079ea0SAlexander Duyck 
51473079ea0SAlexander Duyck 	/* 64 pool mode with 2 queues per pool */
5152a47fa45SJohn Fastabend 	if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
51673079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
51773079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_2Q_MASK;
51873079ea0SAlexander Duyck 		rss_i = min_t(u16, rss_i, 2);
51973079ea0SAlexander Duyck 	/* 32 pool mode with 4 queues per pool */
52073079ea0SAlexander Duyck 	} else {
52173079ea0SAlexander Duyck 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
52273079ea0SAlexander Duyck 		rss_m = IXGBE_RSS_4Q_MASK;
52373079ea0SAlexander Duyck 		rss_i = 4;
52473079ea0SAlexander Duyck 	}
52573079ea0SAlexander Duyck 
52673079ea0SAlexander Duyck #ifdef IXGBE_FCOE
52773079ea0SAlexander Duyck 	/* queues in the remaining pools are available for FCoE */
52873079ea0SAlexander Duyck 	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
52973079ea0SAlexander Duyck 
53073079ea0SAlexander Duyck #endif
53173079ea0SAlexander Duyck 	/* remove the starting offset from the pool count */
53273079ea0SAlexander Duyck 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
53373079ea0SAlexander Duyck 
53473079ea0SAlexander Duyck 	/* save features for later use */
53573079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
53673079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
53773079ea0SAlexander Duyck 
53873079ea0SAlexander Duyck 	/* limit RSS based on user input and save for later use */
53973079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].indices = rss_i;
54073079ea0SAlexander Duyck 	adapter->ring_feature[RING_F_RSS].mask = rss_m;
54173079ea0SAlexander Duyck 
54273079ea0SAlexander Duyck 	adapter->num_rx_pools = vmdq_i;
54373079ea0SAlexander Duyck 	adapter->num_rx_queues_per_pool = rss_i;
54473079ea0SAlexander Duyck 
54573079ea0SAlexander Duyck 	adapter->num_rx_queues = vmdq_i * rss_i;
54673079ea0SAlexander Duyck 	adapter->num_tx_queues = vmdq_i * rss_i;
54773079ea0SAlexander Duyck 
54873079ea0SAlexander Duyck 	/* disable ATR as it is not supported when VMDq is enabled */
54973079ea0SAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
55073079ea0SAlexander Duyck 
55173079ea0SAlexander Duyck #ifdef IXGBE_FCOE
55273079ea0SAlexander Duyck 	/*
55373079ea0SAlexander Duyck 	 * FCoE can use rings from adjacent buffers to allow RSS
55473079ea0SAlexander Duyck 	 * like behavior.  To account for this we need to add the
55573079ea0SAlexander Duyck 	 * FCoE indices to the total ring count.
55673079ea0SAlexander Duyck 	 */
55773079ea0SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
55873079ea0SAlexander Duyck 		struct ixgbe_ring_feature *fcoe;
55973079ea0SAlexander Duyck 
56073079ea0SAlexander Duyck 		fcoe = &adapter->ring_feature[RING_F_FCOE];
56173079ea0SAlexander Duyck 
56273079ea0SAlexander Duyck 		/* limit ourselves based on feature limits */
56373079ea0SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
56473079ea0SAlexander Duyck 
56573079ea0SAlexander Duyck 		if (vmdq_i > 1 && fcoe_i) {
56673079ea0SAlexander Duyck 			/* alloc queues for FCoE separately */
56773079ea0SAlexander Duyck 			fcoe->indices = fcoe_i;
56873079ea0SAlexander Duyck 			fcoe->offset = vmdq_i * rss_i;
56973079ea0SAlexander Duyck 		} else {
57073079ea0SAlexander Duyck 			/* merge FCoE queues with RSS queues */
57173079ea0SAlexander Duyck 			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
57273079ea0SAlexander Duyck 
57373079ea0SAlexander Duyck 			/* limit indices to rss_i if MSI-X is disabled */
57473079ea0SAlexander Duyck 			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
57573079ea0SAlexander Duyck 				fcoe_i = rss_i;
57673079ea0SAlexander Duyck 
57773079ea0SAlexander Duyck 			/* attempt to reserve some queues for just FCoE */
57873079ea0SAlexander Duyck 			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
57973079ea0SAlexander Duyck 			fcoe->offset = fcoe_i - fcoe->indices;
58073079ea0SAlexander Duyck 
58173079ea0SAlexander Duyck 			fcoe_i -= rss_i;
58273079ea0SAlexander Duyck 		}
58373079ea0SAlexander Duyck 
58473079ea0SAlexander Duyck 		/* add queues to adapter */
58573079ea0SAlexander Duyck 		adapter->num_tx_queues += fcoe_i;
58673079ea0SAlexander Duyck 		adapter->num_rx_queues += fcoe_i;
58773079ea0SAlexander Duyck 	}
58873079ea0SAlexander Duyck 
58973079ea0SAlexander Duyck #endif
59073079ea0SAlexander Duyck 	return true;
59173079ea0SAlexander Duyck }
59273079ea0SAlexander Duyck 
59373079ea0SAlexander Duyck /**
59449ce9c2cSBen Hutchings  * ixgbe_set_rss_queues - Allocate queues for RSS
5958af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
5968af3c33fSJeff Kirsher  *
5978af3c33fSJeff Kirsher  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
5988af3c33fSJeff Kirsher  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
5998af3c33fSJeff Kirsher  *
6008af3c33fSJeff Kirsher  **/
6010b7f5d0bSAlexander Duyck static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
6028af3c33fSJeff Kirsher {
6030b7f5d0bSAlexander Duyck 	struct ixgbe_ring_feature *f;
6040b7f5d0bSAlexander Duyck 	u16 rss_i;
6058af3c33fSJeff Kirsher 
6060b7f5d0bSAlexander Duyck 	/* set mask for 16 queue limit of RSS */
6070b7f5d0bSAlexander Duyck 	f = &adapter->ring_feature[RING_F_RSS];
6080b7f5d0bSAlexander Duyck 	rss_i = f->limit;
6090b7f5d0bSAlexander Duyck 
6100b7f5d0bSAlexander Duyck 	f->indices = rss_i;
611d411a936SAlexander Duyck 	f->mask = IXGBE_RSS_16Q_MASK;
6128af3c33fSJeff Kirsher 
61339cb681bSAlexander Duyck 	/* disable ATR by default, it will be configured below */
61439cb681bSAlexander Duyck 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
61539cb681bSAlexander Duyck 
6168af3c33fSJeff Kirsher 	/*
6170b7f5d0bSAlexander Duyck 	 * Use Flow Director in addition to RSS to ensure the best
6188af3c33fSJeff Kirsher 	 * distribution of flows across cores, even when an FDIR flow
6198af3c33fSJeff Kirsher 	 * isn't matched.
6208af3c33fSJeff Kirsher 	 */
62139cb681bSAlexander Duyck 	if (rss_i > 1 && adapter->atr_sample_rate) {
6220b7f5d0bSAlexander Duyck 		f = &adapter->ring_feature[RING_F_FDIR];
6230b7f5d0bSAlexander Duyck 
624d3cb9869SAlexander Duyck 		rss_i = f->indices = f->limit;
62539cb681bSAlexander Duyck 
62639cb681bSAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
62739cb681bSAlexander Duyck 			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
6288af3c33fSJeff Kirsher 	}
6290b7f5d0bSAlexander Duyck 
630d411a936SAlexander Duyck #ifdef IXGBE_FCOE
631d411a936SAlexander Duyck 	/*
632d411a936SAlexander Duyck 	 * FCoE can exist on the same rings as standard network traffic
633d411a936SAlexander Duyck 	 * however it is preferred to avoid that if possible.  In order
634d411a936SAlexander Duyck 	 * to get the best performance we allocate as many FCoE queues
635d411a936SAlexander Duyck 	 * as we can and we place them at the end of the ring array to
636d411a936SAlexander Duyck 	 * avoid sharing queues with standard RSS on systems with 24 or
637d411a936SAlexander Duyck 	 * more CPUs.
638d411a936SAlexander Duyck 	 */
639d411a936SAlexander Duyck 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
640d411a936SAlexander Duyck 		struct net_device *dev = adapter->netdev;
641d411a936SAlexander Duyck 		u16 fcoe_i;
642d411a936SAlexander Duyck 
643d411a936SAlexander Duyck 		f = &adapter->ring_feature[RING_F_FCOE];
644d411a936SAlexander Duyck 
645d411a936SAlexander Duyck 		/* merge FCoE queues with RSS queues */
646d411a936SAlexander Duyck 		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
647d411a936SAlexander Duyck 		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
648d411a936SAlexander Duyck 
649d411a936SAlexander Duyck 		/* limit indices to rss_i if MSI-X is disabled */
650d411a936SAlexander Duyck 		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
651d411a936SAlexander Duyck 			fcoe_i = rss_i;
652d411a936SAlexander Duyck 
653d411a936SAlexander Duyck 		/* attempt to reserve some queues for just FCoE */
654d411a936SAlexander Duyck 		f->indices = min_t(u16, fcoe_i, f->limit);
655d411a936SAlexander Duyck 		f->offset = fcoe_i - f->indices;
656d411a936SAlexander Duyck 		rss_i = max_t(u16, fcoe_i, rss_i);
657d411a936SAlexander Duyck 	}
658d411a936SAlexander Duyck 
659d411a936SAlexander Duyck #endif /* IXGBE_FCOE */
6600b7f5d0bSAlexander Duyck 	adapter->num_rx_queues = rss_i;
6610b7f5d0bSAlexander Duyck 	adapter->num_tx_queues = rss_i;
6620b7f5d0bSAlexander Duyck 
6630b7f5d0bSAlexander Duyck 	return true;
6648af3c33fSJeff Kirsher }
6658af3c33fSJeff Kirsher 
6668af3c33fSJeff Kirsher /**
66749ce9c2cSBen Hutchings  * ixgbe_set_num_queues - Allocate queues for device, feature dependent
6688af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
6698af3c33fSJeff Kirsher  *
6708af3c33fSJeff Kirsher  * This is the top level queue allocation routine.  The order here is very
6718af3c33fSJeff Kirsher  * important, starting with the "most" number of features turned on at once,
6728af3c33fSJeff Kirsher  * and ending with the smallest set of features.  This way large combinations
6738af3c33fSJeff Kirsher  * can be allocated if they're turned on, and smaller combinations are the
6748af3c33fSJeff Kirsher  * fallthrough conditions.
6758af3c33fSJeff Kirsher  *
6768af3c33fSJeff Kirsher  **/
677ac802f5dSAlexander Duyck static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
6788af3c33fSJeff Kirsher {
6798af3c33fSJeff Kirsher 	/* Start with base case */
6808af3c33fSJeff Kirsher 	adapter->num_rx_queues = 1;
6818af3c33fSJeff Kirsher 	adapter->num_tx_queues = 1;
6828af3c33fSJeff Kirsher 	adapter->num_rx_pools = adapter->num_rx_queues;
6838af3c33fSJeff Kirsher 	adapter->num_rx_queues_per_pool = 1;
6848af3c33fSJeff Kirsher 
68573079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB
68673079ea0SAlexander Duyck 	if (ixgbe_set_dcb_sriov_queues(adapter))
687ac802f5dSAlexander Duyck 		return;
6888af3c33fSJeff Kirsher 
6898af3c33fSJeff Kirsher 	if (ixgbe_set_dcb_queues(adapter))
690ac802f5dSAlexander Duyck 		return;
6918af3c33fSJeff Kirsher 
6928af3c33fSJeff Kirsher #endif
69373079ea0SAlexander Duyck 	if (ixgbe_set_sriov_queues(adapter))
69473079ea0SAlexander Duyck 		return;
69573079ea0SAlexander Duyck 
696ac802f5dSAlexander Duyck 	ixgbe_set_rss_queues(adapter);
6978af3c33fSJeff Kirsher }
6988af3c33fSJeff Kirsher 
6998af3c33fSJeff Kirsher static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
7008af3c33fSJeff Kirsher 				       int vectors)
7018af3c33fSJeff Kirsher {
702b45e620cSAlexander Gordeev 	int vector_threshold;
7038af3c33fSJeff Kirsher 
7048af3c33fSJeff Kirsher 	/* We'll want at least 2 (vector_threshold):
7058af3c33fSJeff Kirsher 	 * 1) TxQ[0] + RxQ[0] handler
7068af3c33fSJeff Kirsher 	 * 2) Other (Link Status Change, etc.)
7078af3c33fSJeff Kirsher 	 */
7088af3c33fSJeff Kirsher 	vector_threshold = MIN_MSIX_COUNT;
7098af3c33fSJeff Kirsher 
7108af3c33fSJeff Kirsher 	/*
7118af3c33fSJeff Kirsher 	 * The more we get, the more we will assign to Tx/Rx Cleanup
7128af3c33fSJeff Kirsher 	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
7138af3c33fSJeff Kirsher 	 * Right now, we simply care about how many we'll get; we'll
7148af3c33fSJeff Kirsher 	 * set them up later while requesting irq's.
7158af3c33fSJeff Kirsher 	 */
716b45e620cSAlexander Gordeev 	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
717b45e620cSAlexander Gordeev 					vector_threshold, vectors);
7188af3c33fSJeff Kirsher 
719b45e620cSAlexander Gordeev 	if (vectors < 0) {
7208af3c33fSJeff Kirsher 		/* Can't allocate enough MSI-X interrupts?  Oh well.
7218af3c33fSJeff Kirsher 		 * This just means we'll go with either a single MSI
7228af3c33fSJeff Kirsher 		 * vector or fall back to legacy interrupts.
7238af3c33fSJeff Kirsher 		 */
7248af3c33fSJeff Kirsher 		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
7258af3c33fSJeff Kirsher 			     "Unable to allocate MSI-X interrupts\n");
7268af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
7278af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
7288af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
7298af3c33fSJeff Kirsher 	} else {
7308af3c33fSJeff Kirsher 		adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
7318af3c33fSJeff Kirsher 		/*
7328af3c33fSJeff Kirsher 		 * Adjust for only the vectors we'll use, which is minimum
7338af3c33fSJeff Kirsher 		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
7348af3c33fSJeff Kirsher 		 * vectors we were allocated.
7358af3c33fSJeff Kirsher 		 */
73649c7ffbeSAlexander Duyck 		vectors -= NON_Q_VECTORS;
73749c7ffbeSAlexander Duyck 		adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
7388af3c33fSJeff Kirsher 	}
7398af3c33fSJeff Kirsher }
7408af3c33fSJeff Kirsher 
7418af3c33fSJeff Kirsher static void ixgbe_add_ring(struct ixgbe_ring *ring,
7428af3c33fSJeff Kirsher 			   struct ixgbe_ring_container *head)
7438af3c33fSJeff Kirsher {
7448af3c33fSJeff Kirsher 	ring->next = head->ring;
7458af3c33fSJeff Kirsher 	head->ring = ring;
7468af3c33fSJeff Kirsher 	head->count++;
7478af3c33fSJeff Kirsher }
7488af3c33fSJeff Kirsher 
7498af3c33fSJeff Kirsher /**
7508af3c33fSJeff Kirsher  * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
7518af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
752d0bfcdfdSAlexander Duyck  * @v_count: q_vectors allocated on adapter, used for ring interleaving
7538af3c33fSJeff Kirsher  * @v_idx: index of vector in adapter struct
754d0bfcdfdSAlexander Duyck  * @txr_count: total number of Tx rings to allocate
755d0bfcdfdSAlexander Duyck  * @txr_idx: index of first Tx ring to allocate
756d0bfcdfdSAlexander Duyck  * @rxr_count: total number of Rx rings to allocate
757d0bfcdfdSAlexander Duyck  * @rxr_idx: index of first Rx ring to allocate
7588af3c33fSJeff Kirsher  *
7598af3c33fSJeff Kirsher  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7608af3c33fSJeff Kirsher  **/
761d0bfcdfdSAlexander Duyck static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
762d0bfcdfdSAlexander Duyck 				int v_count, int v_idx,
7638af3c33fSJeff Kirsher 				int txr_count, int txr_idx,
7648af3c33fSJeff Kirsher 				int rxr_count, int rxr_idx)
7658af3c33fSJeff Kirsher {
7668af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector;
7678af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
768fd786b7bSAlexander Duyck 	int node = NUMA_NO_NODE;
7698af3c33fSJeff Kirsher 	int cpu = -1;
7708af3c33fSJeff Kirsher 	int ring_count, size;
771fd786b7bSAlexander Duyck 	u8 tcs = netdev_get_num_tc(adapter->netdev);
7728af3c33fSJeff Kirsher 
7738af3c33fSJeff Kirsher 	ring_count = txr_count + rxr_count;
7748af3c33fSJeff Kirsher 	size = sizeof(struct ixgbe_q_vector) +
7758af3c33fSJeff Kirsher 	       (sizeof(struct ixgbe_ring) * ring_count);
7768af3c33fSJeff Kirsher 
7778af3c33fSJeff Kirsher 	/* customize cpu for Flow Director mapping */
778fd786b7bSAlexander Duyck 	if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
779fd786b7bSAlexander Duyck 		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
780fd786b7bSAlexander Duyck 		if (rss_i > 1 && adapter->atr_sample_rate) {
7818af3c33fSJeff Kirsher 			if (cpu_online(v_idx)) {
7828af3c33fSJeff Kirsher 				cpu = v_idx;
7838af3c33fSJeff Kirsher 				node = cpu_to_node(cpu);
7848af3c33fSJeff Kirsher 			}
7858af3c33fSJeff Kirsher 		}
786fd786b7bSAlexander Duyck 	}
7878af3c33fSJeff Kirsher 
7888af3c33fSJeff Kirsher 	/* allocate q_vector and rings */
7898af3c33fSJeff Kirsher 	q_vector = kzalloc_node(size, GFP_KERNEL, node);
7908af3c33fSJeff Kirsher 	if (!q_vector)
7918af3c33fSJeff Kirsher 		q_vector = kzalloc(size, GFP_KERNEL);
7928af3c33fSJeff Kirsher 	if (!q_vector)
7938af3c33fSJeff Kirsher 		return -ENOMEM;
7948af3c33fSJeff Kirsher 
7958af3c33fSJeff Kirsher 	/* setup affinity mask and node */
7968af3c33fSJeff Kirsher 	if (cpu != -1)
7978af3c33fSJeff Kirsher 		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
7988af3c33fSJeff Kirsher 	q_vector->numa_node = node;
7998af3c33fSJeff Kirsher 
800245f292dSAlexander Duyck #ifdef CONFIG_IXGBE_DCA
801245f292dSAlexander Duyck 	/* initialize CPU for DCA */
802245f292dSAlexander Duyck 	q_vector->cpu = -1;
803245f292dSAlexander Duyck 
804245f292dSAlexander Duyck #endif
8058af3c33fSJeff Kirsher 	/* initialize NAPI */
8068af3c33fSJeff Kirsher 	netif_napi_add(adapter->netdev, &q_vector->napi,
8078af3c33fSJeff Kirsher 		       ixgbe_poll, 64);
8085a85e737SEliezer Tamir 	napi_hash_add(&q_vector->napi);
8098af3c33fSJeff Kirsher 
8108af3c33fSJeff Kirsher 	/* tie q_vector and adapter together */
8118af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = q_vector;
8128af3c33fSJeff Kirsher 	q_vector->adapter = adapter;
8138af3c33fSJeff Kirsher 	q_vector->v_idx = v_idx;
8148af3c33fSJeff Kirsher 
8158af3c33fSJeff Kirsher 	/* initialize work limits */
8168af3c33fSJeff Kirsher 	q_vector->tx.work_limit = adapter->tx_work_limit;
8178af3c33fSJeff Kirsher 
8188af3c33fSJeff Kirsher 	/* initialize pointer to rings */
8198af3c33fSJeff Kirsher 	ring = q_vector->ring;
8208af3c33fSJeff Kirsher 
8213af3361eSEmil Tantilov 	/* intialize ITR */
8223af3361eSEmil Tantilov 	if (txr_count && !rxr_count) {
8233af3361eSEmil Tantilov 		/* tx only vector */
8243af3361eSEmil Tantilov 		if (adapter->tx_itr_setting == 1)
8253af3361eSEmil Tantilov 			q_vector->itr = IXGBE_10K_ITR;
8263af3361eSEmil Tantilov 		else
8273af3361eSEmil Tantilov 			q_vector->itr = adapter->tx_itr_setting;
8283af3361eSEmil Tantilov 	} else {
8293af3361eSEmil Tantilov 		/* rx or rx/tx vector */
8303af3361eSEmil Tantilov 		if (adapter->rx_itr_setting == 1)
8313af3361eSEmil Tantilov 			q_vector->itr = IXGBE_20K_ITR;
8323af3361eSEmil Tantilov 		else
8333af3361eSEmil Tantilov 			q_vector->itr = adapter->rx_itr_setting;
8343af3361eSEmil Tantilov 	}
8353af3361eSEmil Tantilov 
8368af3c33fSJeff Kirsher 	while (txr_count) {
8378af3c33fSJeff Kirsher 		/* assign generic ring traits */
8388af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
8398af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
8408af3c33fSJeff Kirsher 
8418af3c33fSJeff Kirsher 		/* configure backlink on ring */
8428af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
8438af3c33fSJeff Kirsher 
8448af3c33fSJeff Kirsher 		/* update q_vector Tx values */
8458af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->tx);
8468af3c33fSJeff Kirsher 
8478af3c33fSJeff Kirsher 		/* apply Tx specific ring traits */
8488af3c33fSJeff Kirsher 		ring->count = adapter->tx_ring_count;
8492a47fa45SJohn Fastabend 		if (adapter->num_rx_pools > 1)
8502a47fa45SJohn Fastabend 			ring->queue_index =
8512a47fa45SJohn Fastabend 				txr_idx % adapter->num_rx_queues_per_pool;
8522a47fa45SJohn Fastabend 		else
8538af3c33fSJeff Kirsher 			ring->queue_index = txr_idx;
8548af3c33fSJeff Kirsher 
8558af3c33fSJeff Kirsher 		/* assign ring to adapter */
8568af3c33fSJeff Kirsher 		adapter->tx_ring[txr_idx] = ring;
8578af3c33fSJeff Kirsher 
8588af3c33fSJeff Kirsher 		/* update count and index */
8598af3c33fSJeff Kirsher 		txr_count--;
860d0bfcdfdSAlexander Duyck 		txr_idx += v_count;
8618af3c33fSJeff Kirsher 
8628af3c33fSJeff Kirsher 		/* push pointer to next ring */
8638af3c33fSJeff Kirsher 		ring++;
8648af3c33fSJeff Kirsher 	}
8658af3c33fSJeff Kirsher 
8668af3c33fSJeff Kirsher 	while (rxr_count) {
8678af3c33fSJeff Kirsher 		/* assign generic ring traits */
8688af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
8698af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
8708af3c33fSJeff Kirsher 
8718af3c33fSJeff Kirsher 		/* configure backlink on ring */
8728af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
8738af3c33fSJeff Kirsher 
8748af3c33fSJeff Kirsher 		/* update q_vector Rx values */
8758af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->rx);
8768af3c33fSJeff Kirsher 
8778af3c33fSJeff Kirsher 		/*
8788af3c33fSJeff Kirsher 		 * 82599 errata, UDP frames with a 0 checksum
8798af3c33fSJeff Kirsher 		 * can be marked as checksum errors.
8808af3c33fSJeff Kirsher 		 */
8818af3c33fSJeff Kirsher 		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
8828af3c33fSJeff Kirsher 			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
8838af3c33fSJeff Kirsher 
884b2db497eSAlexander Duyck #ifdef IXGBE_FCOE
885b2db497eSAlexander Duyck 		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
886b2db497eSAlexander Duyck 			struct ixgbe_ring_feature *f;
887b2db497eSAlexander Duyck 			f = &adapter->ring_feature[RING_F_FCOE];
888e4b317e9SAlexander Duyck 			if ((rxr_idx >= f->offset) &&
889e4b317e9SAlexander Duyck 			    (rxr_idx < f->offset + f->indices))
89057efd44cSAlexander Duyck 				set_bit(__IXGBE_RX_FCOE, &ring->state);
891b2db497eSAlexander Duyck 		}
892b2db497eSAlexander Duyck 
893b2db497eSAlexander Duyck #endif /* IXGBE_FCOE */
8948af3c33fSJeff Kirsher 		/* apply Rx specific ring traits */
8958af3c33fSJeff Kirsher 		ring->count = adapter->rx_ring_count;
8962a47fa45SJohn Fastabend 		if (adapter->num_rx_pools > 1)
8972a47fa45SJohn Fastabend 			ring->queue_index =
8982a47fa45SJohn Fastabend 				rxr_idx % adapter->num_rx_queues_per_pool;
8992a47fa45SJohn Fastabend 		else
9008af3c33fSJeff Kirsher 			ring->queue_index = rxr_idx;
9018af3c33fSJeff Kirsher 
9028af3c33fSJeff Kirsher 		/* assign ring to adapter */
9038af3c33fSJeff Kirsher 		adapter->rx_ring[rxr_idx] = ring;
9048af3c33fSJeff Kirsher 
9058af3c33fSJeff Kirsher 		/* update count and index */
9068af3c33fSJeff Kirsher 		rxr_count--;
907d0bfcdfdSAlexander Duyck 		rxr_idx += v_count;
9088af3c33fSJeff Kirsher 
9098af3c33fSJeff Kirsher 		/* push pointer to next ring */
9108af3c33fSJeff Kirsher 		ring++;
9118af3c33fSJeff Kirsher 	}
9128af3c33fSJeff Kirsher 
9138af3c33fSJeff Kirsher 	return 0;
9148af3c33fSJeff Kirsher }
9158af3c33fSJeff Kirsher 
9168af3c33fSJeff Kirsher /**
9178af3c33fSJeff Kirsher  * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
9188af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
9198af3c33fSJeff Kirsher  * @v_idx: Index of vector to be freed
9208af3c33fSJeff Kirsher  *
9218af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vector.  In addition if
9228af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
9238af3c33fSJeff Kirsher  * to freeing the q_vector.
9248af3c33fSJeff Kirsher  **/
9258af3c33fSJeff Kirsher static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
9268af3c33fSJeff Kirsher {
9278af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
9288af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
9298af3c33fSJeff Kirsher 
9308af3c33fSJeff Kirsher 	ixgbe_for_each_ring(ring, q_vector->tx)
9318af3c33fSJeff Kirsher 		adapter->tx_ring[ring->queue_index] = NULL;
9328af3c33fSJeff Kirsher 
9338af3c33fSJeff Kirsher 	ixgbe_for_each_ring(ring, q_vector->rx)
9348af3c33fSJeff Kirsher 		adapter->rx_ring[ring->queue_index] = NULL;
9358af3c33fSJeff Kirsher 
9368af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = NULL;
9375a85e737SEliezer Tamir 	napi_hash_del(&q_vector->napi);
9388af3c33fSJeff Kirsher 	netif_napi_del(&q_vector->napi);
9398af3c33fSJeff Kirsher 
9408af3c33fSJeff Kirsher 	/*
9418af3c33fSJeff Kirsher 	 * ixgbe_get_stats64() might access the rings on this vector,
9428af3c33fSJeff Kirsher 	 * we must wait a grace period before freeing it.
9438af3c33fSJeff Kirsher 	 */
9448af3c33fSJeff Kirsher 	kfree_rcu(q_vector, rcu);
9458af3c33fSJeff Kirsher }
9468af3c33fSJeff Kirsher 
9478af3c33fSJeff Kirsher /**
9488af3c33fSJeff Kirsher  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
9498af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
9508af3c33fSJeff Kirsher  *
9518af3c33fSJeff Kirsher  * We allocate one q_vector per queue interrupt.  If allocation fails we
9528af3c33fSJeff Kirsher  * return -ENOMEM.
9538af3c33fSJeff Kirsher  **/
9548af3c33fSJeff Kirsher static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
9558af3c33fSJeff Kirsher {
95649c7ffbeSAlexander Duyck 	int q_vectors = adapter->num_q_vectors;
9578af3c33fSJeff Kirsher 	int rxr_remaining = adapter->num_rx_queues;
9588af3c33fSJeff Kirsher 	int txr_remaining = adapter->num_tx_queues;
9598af3c33fSJeff Kirsher 	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
9608af3c33fSJeff Kirsher 	int err;
9618af3c33fSJeff Kirsher 
9628af3c33fSJeff Kirsher 	/* only one q_vector if MSI-X is disabled. */
9638af3c33fSJeff Kirsher 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
9648af3c33fSJeff Kirsher 		q_vectors = 1;
9658af3c33fSJeff Kirsher 
9668af3c33fSJeff Kirsher 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
967d0bfcdfdSAlexander Duyck 		for (; rxr_remaining; v_idx++) {
968d0bfcdfdSAlexander Duyck 			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
969d0bfcdfdSAlexander Duyck 						   0, 0, 1, rxr_idx);
9708af3c33fSJeff Kirsher 
9718af3c33fSJeff Kirsher 			if (err)
9728af3c33fSJeff Kirsher 				goto err_out;
9738af3c33fSJeff Kirsher 
9748af3c33fSJeff Kirsher 			/* update counts and index */
975d0bfcdfdSAlexander Duyck 			rxr_remaining--;
976d0bfcdfdSAlexander Duyck 			rxr_idx++;
9778af3c33fSJeff Kirsher 		}
9788af3c33fSJeff Kirsher 	}
9798af3c33fSJeff Kirsher 
980d0bfcdfdSAlexander Duyck 	for (; v_idx < q_vectors; v_idx++) {
981d0bfcdfdSAlexander Duyck 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
982d0bfcdfdSAlexander Duyck 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
983d0bfcdfdSAlexander Duyck 		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
9848af3c33fSJeff Kirsher 					   tqpv, txr_idx,
9858af3c33fSJeff Kirsher 					   rqpv, rxr_idx);
9868af3c33fSJeff Kirsher 
9878af3c33fSJeff Kirsher 		if (err)
9888af3c33fSJeff Kirsher 			goto err_out;
9898af3c33fSJeff Kirsher 
9908af3c33fSJeff Kirsher 		/* update counts and index */
9918af3c33fSJeff Kirsher 		rxr_remaining -= rqpv;
9928af3c33fSJeff Kirsher 		txr_remaining -= tqpv;
993d0bfcdfdSAlexander Duyck 		rxr_idx++;
994d0bfcdfdSAlexander Duyck 		txr_idx++;
9958af3c33fSJeff Kirsher 	}
9968af3c33fSJeff Kirsher 
9978af3c33fSJeff Kirsher 	return 0;
9988af3c33fSJeff Kirsher 
9998af3c33fSJeff Kirsher err_out:
100049c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
100149c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
100249c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
100349c7ffbeSAlexander Duyck 
100449c7ffbeSAlexander Duyck 	while (v_idx--)
10058af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
10068af3c33fSJeff Kirsher 
10078af3c33fSJeff Kirsher 	return -ENOMEM;
10088af3c33fSJeff Kirsher }
10098af3c33fSJeff Kirsher 
10108af3c33fSJeff Kirsher /**
10118af3c33fSJeff Kirsher  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
10128af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10138af3c33fSJeff Kirsher  *
10148af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vectors.  In addition if
10158af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
10168af3c33fSJeff Kirsher  * to freeing the q_vector.
10178af3c33fSJeff Kirsher  **/
10188af3c33fSJeff Kirsher static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
10198af3c33fSJeff Kirsher {
102049c7ffbeSAlexander Duyck 	int v_idx = adapter->num_q_vectors;
10218af3c33fSJeff Kirsher 
102249c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
102349c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
102449c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
10258af3c33fSJeff Kirsher 
102649c7ffbeSAlexander Duyck 	while (v_idx--)
10278af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
10288af3c33fSJeff Kirsher }
10298af3c33fSJeff Kirsher 
10308af3c33fSJeff Kirsher static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
10318af3c33fSJeff Kirsher {
10328af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
10338af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
10348af3c33fSJeff Kirsher 		pci_disable_msix(adapter->pdev);
10358af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
10368af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
10378af3c33fSJeff Kirsher 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
10388af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
10398af3c33fSJeff Kirsher 		pci_disable_msi(adapter->pdev);
10408af3c33fSJeff Kirsher 	}
10418af3c33fSJeff Kirsher }
10428af3c33fSJeff Kirsher 
10438af3c33fSJeff Kirsher /**
10448af3c33fSJeff Kirsher  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
10458af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
10468af3c33fSJeff Kirsher  *
10478af3c33fSJeff Kirsher  * Attempt to configure the interrupts using the best available
10488af3c33fSJeff Kirsher  * capabilities of the hardware and the kernel.
10498af3c33fSJeff Kirsher  **/
1050ac802f5dSAlexander Duyck static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
10518af3c33fSJeff Kirsher {
10528af3c33fSJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
1053ac802f5dSAlexander Duyck 	int vector, v_budget, err;
10548af3c33fSJeff Kirsher 
10558af3c33fSJeff Kirsher 	/*
10568af3c33fSJeff Kirsher 	 * It's easy to be greedy for MSI-X vectors, but it really
10578af3c33fSJeff Kirsher 	 * doesn't do us much good if we have a lot more vectors
10588af3c33fSJeff Kirsher 	 * than CPU's.  So let's be conservative and only ask for
10598af3c33fSJeff Kirsher 	 * (roughly) the same number of vectors as there are CPU's.
10608af3c33fSJeff Kirsher 	 * The default is to use pairs of vectors.
10618af3c33fSJeff Kirsher 	 */
10628af3c33fSJeff Kirsher 	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
10638af3c33fSJeff Kirsher 	v_budget = min_t(int, v_budget, num_online_cpus());
10648af3c33fSJeff Kirsher 	v_budget += NON_Q_VECTORS;
10658af3c33fSJeff Kirsher 
10668af3c33fSJeff Kirsher 	/*
10678af3c33fSJeff Kirsher 	 * At the same time, hardware can only support a maximum of
10688af3c33fSJeff Kirsher 	 * hw.mac->max_msix_vectors vectors.  With features
10698af3c33fSJeff Kirsher 	 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
10708af3c33fSJeff Kirsher 	 * descriptor queues supported by our device.  Thus, we cap it off in
10718af3c33fSJeff Kirsher 	 * those rare cases where the cpu count also exceeds our vector limit.
10728af3c33fSJeff Kirsher 	 */
10738af3c33fSJeff Kirsher 	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
10748af3c33fSJeff Kirsher 
10758af3c33fSJeff Kirsher 	/* A failure in MSI-X entry allocation isn't fatal, but it does
10768af3c33fSJeff Kirsher 	 * mean we disable MSI-X capabilities of the adapter. */
10778af3c33fSJeff Kirsher 	adapter->msix_entries = kcalloc(v_budget,
10788af3c33fSJeff Kirsher 					sizeof(struct msix_entry), GFP_KERNEL);
10798af3c33fSJeff Kirsher 	if (adapter->msix_entries) {
10808af3c33fSJeff Kirsher 		for (vector = 0; vector < v_budget; vector++)
10818af3c33fSJeff Kirsher 			adapter->msix_entries[vector].entry = vector;
10828af3c33fSJeff Kirsher 
10838af3c33fSJeff Kirsher 		ixgbe_acquire_msix_vectors(adapter, v_budget);
10848af3c33fSJeff Kirsher 
10858af3c33fSJeff Kirsher 		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1086ac802f5dSAlexander Duyck 			return;
10878af3c33fSJeff Kirsher 	}
10888af3c33fSJeff Kirsher 
1089b724e9f2SAlexander Duyck 	/* disable DCB if number of TCs exceeds 1 */
1090b724e9f2SAlexander Duyck 	if (netdev_get_num_tc(adapter->netdev) > 1) {
1091b724e9f2SAlexander Duyck 		e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
1092b724e9f2SAlexander Duyck 		netdev_reset_tc(adapter->netdev);
109339cb681bSAlexander Duyck 
1094b724e9f2SAlexander Duyck 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1095b724e9f2SAlexander Duyck 			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1096b724e9f2SAlexander Duyck 
1097b724e9f2SAlexander Duyck 		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1098b724e9f2SAlexander Duyck 		adapter->temp_dcb_cfg.pfc_mode_enable = false;
1099b724e9f2SAlexander Duyck 		adapter->dcb_cfg.pfc_mode_enable = false;
1100b724e9f2SAlexander Duyck 	}
1101b724e9f2SAlexander Duyck 	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1102b724e9f2SAlexander Duyck 	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1103b724e9f2SAlexander Duyck 
1104b724e9f2SAlexander Duyck 	/* disable SR-IOV */
11058af3c33fSJeff Kirsher 	ixgbe_disable_sriov(adapter);
11068af3c33fSJeff Kirsher 
1107b724e9f2SAlexander Duyck 	/* disable RSS */
1108fbe7ca7fSAlexander Duyck 	adapter->ring_feature[RING_F_RSS].limit = 1;
1109b724e9f2SAlexander Duyck 
1110ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
111149c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 1;
111249c7ffbeSAlexander Duyck 
11138af3c33fSJeff Kirsher 	err = pci_enable_msi(adapter->pdev);
1114ac802f5dSAlexander Duyck 	if (err) {
11158af3c33fSJeff Kirsher 		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
11168af3c33fSJeff Kirsher 			     "Unable to allocate MSI interrupt, "
11178af3c33fSJeff Kirsher 			     "falling back to legacy.  Error: %d\n", err);
1118ac802f5dSAlexander Duyck 		return;
11198af3c33fSJeff Kirsher 	}
1120ac802f5dSAlexander Duyck 	adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
11218af3c33fSJeff Kirsher }
11228af3c33fSJeff Kirsher 
11238af3c33fSJeff Kirsher /**
11248af3c33fSJeff Kirsher  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
11258af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
11268af3c33fSJeff Kirsher  *
11278af3c33fSJeff Kirsher  * We determine which interrupt scheme to use based on...
11288af3c33fSJeff Kirsher  * - Kernel support (MSI, MSI-X)
11298af3c33fSJeff Kirsher  *   - which can be user-defined (via MODULE_PARAM)
11308af3c33fSJeff Kirsher  * - Hardware queue count (num_*_queues)
11318af3c33fSJeff Kirsher  *   - defined by miscellaneous hardware support/features (RSS, etc.)
11328af3c33fSJeff Kirsher  **/
11338af3c33fSJeff Kirsher int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
11348af3c33fSJeff Kirsher {
11358af3c33fSJeff Kirsher 	int err;
11368af3c33fSJeff Kirsher 
11378af3c33fSJeff Kirsher 	/* Number of supported queues */
1138ac802f5dSAlexander Duyck 	ixgbe_set_num_queues(adapter);
11398af3c33fSJeff Kirsher 
1140ac802f5dSAlexander Duyck 	/* Set interrupt mode */
1141ac802f5dSAlexander Duyck 	ixgbe_set_interrupt_capability(adapter);
11428af3c33fSJeff Kirsher 
11438af3c33fSJeff Kirsher 	err = ixgbe_alloc_q_vectors(adapter);
11448af3c33fSJeff Kirsher 	if (err) {
11458af3c33fSJeff Kirsher 		e_dev_err("Unable to allocate memory for queue vectors\n");
11468af3c33fSJeff Kirsher 		goto err_alloc_q_vectors;
11478af3c33fSJeff Kirsher 	}
11488af3c33fSJeff Kirsher 
11498af3c33fSJeff Kirsher 	ixgbe_cache_ring_register(adapter);
11508af3c33fSJeff Kirsher 
11518af3c33fSJeff Kirsher 	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
11528af3c33fSJeff Kirsher 		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
11538af3c33fSJeff Kirsher 		   adapter->num_rx_queues, adapter->num_tx_queues);
11548af3c33fSJeff Kirsher 
11558af3c33fSJeff Kirsher 	set_bit(__IXGBE_DOWN, &adapter->state);
11568af3c33fSJeff Kirsher 
11578af3c33fSJeff Kirsher 	return 0;
11588af3c33fSJeff Kirsher 
11598af3c33fSJeff Kirsher err_alloc_q_vectors:
11608af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
11618af3c33fSJeff Kirsher 	return err;
11628af3c33fSJeff Kirsher }
11638af3c33fSJeff Kirsher 
11648af3c33fSJeff Kirsher /**
11658af3c33fSJeff Kirsher  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
11668af3c33fSJeff Kirsher  * @adapter: board private structure to clear interrupt scheme on
11678af3c33fSJeff Kirsher  *
11688af3c33fSJeff Kirsher  * We go through and clear interrupt specific resources and reset the structure
11698af3c33fSJeff Kirsher  * to pre-load conditions
11708af3c33fSJeff Kirsher  **/
11718af3c33fSJeff Kirsher void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
11728af3c33fSJeff Kirsher {
11738af3c33fSJeff Kirsher 	adapter->num_tx_queues = 0;
11748af3c33fSJeff Kirsher 	adapter->num_rx_queues = 0;
11758af3c33fSJeff Kirsher 
11768af3c33fSJeff Kirsher 	ixgbe_free_q_vectors(adapter);
11778af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
11788af3c33fSJeff Kirsher }
11798af3c33fSJeff Kirsher 
11808af3c33fSJeff Kirsher void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
11818af3c33fSJeff Kirsher 		       u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
11828af3c33fSJeff Kirsher {
11838af3c33fSJeff Kirsher 	struct ixgbe_adv_tx_context_desc *context_desc;
11848af3c33fSJeff Kirsher 	u16 i = tx_ring->next_to_use;
11858af3c33fSJeff Kirsher 
11868af3c33fSJeff Kirsher 	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
11878af3c33fSJeff Kirsher 
11888af3c33fSJeff Kirsher 	i++;
11898af3c33fSJeff Kirsher 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
11908af3c33fSJeff Kirsher 
11918af3c33fSJeff Kirsher 	/* set bits to identify this as an advanced context descriptor */
11928af3c33fSJeff Kirsher 	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
11938af3c33fSJeff Kirsher 
11948af3c33fSJeff Kirsher 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
11958af3c33fSJeff Kirsher 	context_desc->seqnum_seed	= cpu_to_le32(fcoe_sof_eof);
11968af3c33fSJeff Kirsher 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
11978af3c33fSJeff Kirsher 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
11988af3c33fSJeff Kirsher }
11998af3c33fSJeff Kirsher 
1200