18af3c33fSJeff Kirsher /*******************************************************************************
28af3c33fSJeff Kirsher 
38af3c33fSJeff Kirsher   Intel 10 Gigabit PCI Express Linux driver
48af3c33fSJeff Kirsher   Copyright(c) 1999 - 2012 Intel Corporation.
58af3c33fSJeff Kirsher 
68af3c33fSJeff Kirsher   This program is free software; you can redistribute it and/or modify it
78af3c33fSJeff Kirsher   under the terms and conditions of the GNU General Public License,
88af3c33fSJeff Kirsher   version 2, as published by the Free Software Foundation.
98af3c33fSJeff Kirsher 
108af3c33fSJeff Kirsher   This program is distributed in the hope it will be useful, but WITHOUT
118af3c33fSJeff Kirsher   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
128af3c33fSJeff Kirsher   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
138af3c33fSJeff Kirsher   more details.
148af3c33fSJeff Kirsher 
158af3c33fSJeff Kirsher   You should have received a copy of the GNU General Public License along with
168af3c33fSJeff Kirsher   this program; if not, write to the Free Software Foundation, Inc.,
178af3c33fSJeff Kirsher   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
188af3c33fSJeff Kirsher 
198af3c33fSJeff Kirsher   The full GNU General Public License is included in this distribution in
208af3c33fSJeff Kirsher   the file called "COPYING".
218af3c33fSJeff Kirsher 
228af3c33fSJeff Kirsher   Contact Information:
238af3c33fSJeff Kirsher   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
248af3c33fSJeff Kirsher   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
258af3c33fSJeff Kirsher 
268af3c33fSJeff Kirsher *******************************************************************************/
278af3c33fSJeff Kirsher 
288af3c33fSJeff Kirsher #include "ixgbe.h"
298af3c33fSJeff Kirsher #include "ixgbe_sriov.h"
308af3c33fSJeff Kirsher 
318af3c33fSJeff Kirsher /**
328af3c33fSJeff Kirsher  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
338af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
348af3c33fSJeff Kirsher  *
358af3c33fSJeff Kirsher  * Cache the descriptor ring offsets for RSS to the assigned rings.
368af3c33fSJeff Kirsher  *
378af3c33fSJeff Kirsher  **/
388af3c33fSJeff Kirsher static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
398af3c33fSJeff Kirsher {
408af3c33fSJeff Kirsher 	int i;
418af3c33fSJeff Kirsher 
428af3c33fSJeff Kirsher 	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
438af3c33fSJeff Kirsher 		return false;
448af3c33fSJeff Kirsher 
458af3c33fSJeff Kirsher 	for (i = 0; i < adapter->num_rx_queues; i++)
468af3c33fSJeff Kirsher 		adapter->rx_ring[i]->reg_idx = i;
478af3c33fSJeff Kirsher 	for (i = 0; i < adapter->num_tx_queues; i++)
488af3c33fSJeff Kirsher 		adapter->tx_ring[i]->reg_idx = i;
498af3c33fSJeff Kirsher 
508af3c33fSJeff Kirsher 	return true;
518af3c33fSJeff Kirsher }
528af3c33fSJeff Kirsher #ifdef CONFIG_IXGBE_DCB
538af3c33fSJeff Kirsher 
548af3c33fSJeff Kirsher /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
558af3c33fSJeff Kirsher static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
568af3c33fSJeff Kirsher 				    unsigned int *tx, unsigned int *rx)
578af3c33fSJeff Kirsher {
588af3c33fSJeff Kirsher 	struct net_device *dev = adapter->netdev;
598af3c33fSJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
608af3c33fSJeff Kirsher 	u8 num_tcs = netdev_get_num_tc(dev);
618af3c33fSJeff Kirsher 
628af3c33fSJeff Kirsher 	*tx = 0;
638af3c33fSJeff Kirsher 	*rx = 0;
648af3c33fSJeff Kirsher 
658af3c33fSJeff Kirsher 	switch (hw->mac.type) {
668af3c33fSJeff Kirsher 	case ixgbe_mac_82598EB:
678af3c33fSJeff Kirsher 		*tx = tc << 2;
688af3c33fSJeff Kirsher 		*rx = tc << 3;
698af3c33fSJeff Kirsher 		break;
708af3c33fSJeff Kirsher 	case ixgbe_mac_82599EB:
718af3c33fSJeff Kirsher 	case ixgbe_mac_X540:
728af3c33fSJeff Kirsher 		if (num_tcs > 4) {
738af3c33fSJeff Kirsher 			if (tc < 3) {
748af3c33fSJeff Kirsher 				*tx = tc << 5;
758af3c33fSJeff Kirsher 				*rx = tc << 4;
768af3c33fSJeff Kirsher 			} else if (tc <  5) {
778af3c33fSJeff Kirsher 				*tx = ((tc + 2) << 4);
788af3c33fSJeff Kirsher 				*rx = tc << 4;
798af3c33fSJeff Kirsher 			} else if (tc < num_tcs) {
808af3c33fSJeff Kirsher 				*tx = ((tc + 8) << 3);
818af3c33fSJeff Kirsher 				*rx = tc << 4;
828af3c33fSJeff Kirsher 			}
838af3c33fSJeff Kirsher 		} else {
848af3c33fSJeff Kirsher 			*rx =  tc << 5;
858af3c33fSJeff Kirsher 			switch (tc) {
868af3c33fSJeff Kirsher 			case 0:
878af3c33fSJeff Kirsher 				*tx =  0;
888af3c33fSJeff Kirsher 				break;
898af3c33fSJeff Kirsher 			case 1:
908af3c33fSJeff Kirsher 				*tx = 64;
918af3c33fSJeff Kirsher 				break;
928af3c33fSJeff Kirsher 			case 2:
938af3c33fSJeff Kirsher 				*tx = 96;
948af3c33fSJeff Kirsher 				break;
958af3c33fSJeff Kirsher 			case 3:
968af3c33fSJeff Kirsher 				*tx = 112;
978af3c33fSJeff Kirsher 				break;
988af3c33fSJeff Kirsher 			default:
998af3c33fSJeff Kirsher 				break;
1008af3c33fSJeff Kirsher 			}
1018af3c33fSJeff Kirsher 		}
1028af3c33fSJeff Kirsher 		break;
1038af3c33fSJeff Kirsher 	default:
1048af3c33fSJeff Kirsher 		break;
1058af3c33fSJeff Kirsher 	}
1068af3c33fSJeff Kirsher }
1078af3c33fSJeff Kirsher 
1088af3c33fSJeff Kirsher /**
1098af3c33fSJeff Kirsher  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
1108af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1118af3c33fSJeff Kirsher  *
1128af3c33fSJeff Kirsher  * Cache the descriptor ring offsets for DCB to the assigned rings.
1138af3c33fSJeff Kirsher  *
1148af3c33fSJeff Kirsher  **/
1158af3c33fSJeff Kirsher static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
1168af3c33fSJeff Kirsher {
1178af3c33fSJeff Kirsher 	struct net_device *dev = adapter->netdev;
1188af3c33fSJeff Kirsher 	int i, j, k;
1198af3c33fSJeff Kirsher 	u8 num_tcs = netdev_get_num_tc(dev);
1208af3c33fSJeff Kirsher 
1218af3c33fSJeff Kirsher 	if (!num_tcs)
1228af3c33fSJeff Kirsher 		return false;
1238af3c33fSJeff Kirsher 
1248af3c33fSJeff Kirsher 	for (i = 0, k = 0; i < num_tcs; i++) {
1258af3c33fSJeff Kirsher 		unsigned int tx_s, rx_s;
1268af3c33fSJeff Kirsher 		u16 count = dev->tc_to_txq[i].count;
1278af3c33fSJeff Kirsher 
1288af3c33fSJeff Kirsher 		ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
1298af3c33fSJeff Kirsher 		for (j = 0; j < count; j++, k++) {
1308af3c33fSJeff Kirsher 			adapter->tx_ring[k]->reg_idx = tx_s + j;
1318af3c33fSJeff Kirsher 			adapter->rx_ring[k]->reg_idx = rx_s + j;
1328af3c33fSJeff Kirsher 			adapter->tx_ring[k]->dcb_tc = i;
1338af3c33fSJeff Kirsher 			adapter->rx_ring[k]->dcb_tc = i;
1348af3c33fSJeff Kirsher 		}
1358af3c33fSJeff Kirsher 	}
1368af3c33fSJeff Kirsher 
1378af3c33fSJeff Kirsher 	return true;
1388af3c33fSJeff Kirsher }
1398af3c33fSJeff Kirsher #endif
1408af3c33fSJeff Kirsher 
1418af3c33fSJeff Kirsher /**
1428af3c33fSJeff Kirsher  * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
1438af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1448af3c33fSJeff Kirsher  *
1458af3c33fSJeff Kirsher  * Cache the descriptor ring offsets for Flow Director to the assigned rings.
1468af3c33fSJeff Kirsher  *
1478af3c33fSJeff Kirsher  **/
1488af3c33fSJeff Kirsher static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
1498af3c33fSJeff Kirsher {
1508af3c33fSJeff Kirsher 	int i;
1518af3c33fSJeff Kirsher 	bool ret = false;
1528af3c33fSJeff Kirsher 
1538af3c33fSJeff Kirsher 	if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
1548af3c33fSJeff Kirsher 	    (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
1558af3c33fSJeff Kirsher 		for (i = 0; i < adapter->num_rx_queues; i++)
1568af3c33fSJeff Kirsher 			adapter->rx_ring[i]->reg_idx = i;
1578af3c33fSJeff Kirsher 		for (i = 0; i < adapter->num_tx_queues; i++)
1588af3c33fSJeff Kirsher 			adapter->tx_ring[i]->reg_idx = i;
1598af3c33fSJeff Kirsher 		ret = true;
1608af3c33fSJeff Kirsher 	}
1618af3c33fSJeff Kirsher 
1628af3c33fSJeff Kirsher 	return ret;
1638af3c33fSJeff Kirsher }
1648af3c33fSJeff Kirsher 
1658af3c33fSJeff Kirsher #ifdef IXGBE_FCOE
1668af3c33fSJeff Kirsher /**
1678af3c33fSJeff Kirsher  * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
1688af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
1698af3c33fSJeff Kirsher  *
1708af3c33fSJeff Kirsher  * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
1718af3c33fSJeff Kirsher  *
1728af3c33fSJeff Kirsher  */
1738af3c33fSJeff Kirsher static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
1748af3c33fSJeff Kirsher {
1758af3c33fSJeff Kirsher 	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
1768af3c33fSJeff Kirsher 	int i;
1778af3c33fSJeff Kirsher 	u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
1788af3c33fSJeff Kirsher 
1798af3c33fSJeff Kirsher 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
1808af3c33fSJeff Kirsher 		return false;
1818af3c33fSJeff Kirsher 
1828af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1838af3c33fSJeff Kirsher 		if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
1848af3c33fSJeff Kirsher 			ixgbe_cache_ring_fdir(adapter);
1858af3c33fSJeff Kirsher 		else
1868af3c33fSJeff Kirsher 			ixgbe_cache_ring_rss(adapter);
1878af3c33fSJeff Kirsher 
1888af3c33fSJeff Kirsher 		fcoe_rx_i = f->mask;
1898af3c33fSJeff Kirsher 		fcoe_tx_i = f->mask;
1908af3c33fSJeff Kirsher 	}
1918af3c33fSJeff Kirsher 	for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
1928af3c33fSJeff Kirsher 		adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
1938af3c33fSJeff Kirsher 		adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
1948af3c33fSJeff Kirsher 	}
1958af3c33fSJeff Kirsher 	return true;
1968af3c33fSJeff Kirsher }
1978af3c33fSJeff Kirsher 
1988af3c33fSJeff Kirsher #endif /* IXGBE_FCOE */
1998af3c33fSJeff Kirsher /**
2008af3c33fSJeff Kirsher  * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
2018af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2028af3c33fSJeff Kirsher  *
2038af3c33fSJeff Kirsher  * SR-IOV doesn't use any descriptor rings but changes the default if
2048af3c33fSJeff Kirsher  * no other mapping is used.
2058af3c33fSJeff Kirsher  *
2068af3c33fSJeff Kirsher  */
2078af3c33fSJeff Kirsher static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
2088af3c33fSJeff Kirsher {
2098af3c33fSJeff Kirsher 	adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
2108af3c33fSJeff Kirsher 	adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
2118af3c33fSJeff Kirsher 	if (adapter->num_vfs)
2128af3c33fSJeff Kirsher 		return true;
2138af3c33fSJeff Kirsher 	else
2148af3c33fSJeff Kirsher 		return false;
2158af3c33fSJeff Kirsher }
2168af3c33fSJeff Kirsher 
2178af3c33fSJeff Kirsher /**
2188af3c33fSJeff Kirsher  * ixgbe_cache_ring_register - Descriptor ring to register mapping
2198af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2208af3c33fSJeff Kirsher  *
2218af3c33fSJeff Kirsher  * Once we know the feature-set enabled for the device, we'll cache
2228af3c33fSJeff Kirsher  * the register offset the descriptor ring is assigned to.
2238af3c33fSJeff Kirsher  *
2248af3c33fSJeff Kirsher  * Note, the order the various feature calls is important.  It must start with
2258af3c33fSJeff Kirsher  * the "most" features enabled at the same time, then trickle down to the
2268af3c33fSJeff Kirsher  * least amount of features turned on at once.
2278af3c33fSJeff Kirsher  **/
2288af3c33fSJeff Kirsher static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2298af3c33fSJeff Kirsher {
2308af3c33fSJeff Kirsher 	/* start with default case */
2318af3c33fSJeff Kirsher 	adapter->rx_ring[0]->reg_idx = 0;
2328af3c33fSJeff Kirsher 	adapter->tx_ring[0]->reg_idx = 0;
2338af3c33fSJeff Kirsher 
2348af3c33fSJeff Kirsher 	if (ixgbe_cache_ring_sriov(adapter))
2358af3c33fSJeff Kirsher 		return;
2368af3c33fSJeff Kirsher 
2378af3c33fSJeff Kirsher #ifdef CONFIG_IXGBE_DCB
2388af3c33fSJeff Kirsher 	if (ixgbe_cache_ring_dcb(adapter))
2398af3c33fSJeff Kirsher 		return;
2408af3c33fSJeff Kirsher #endif
2418af3c33fSJeff Kirsher 
2428af3c33fSJeff Kirsher #ifdef IXGBE_FCOE
2438af3c33fSJeff Kirsher 	if (ixgbe_cache_ring_fcoe(adapter))
2448af3c33fSJeff Kirsher 		return;
2458af3c33fSJeff Kirsher #endif /* IXGBE_FCOE */
2468af3c33fSJeff Kirsher 
2478af3c33fSJeff Kirsher 	if (ixgbe_cache_ring_fdir(adapter))
2488af3c33fSJeff Kirsher 		return;
2498af3c33fSJeff Kirsher 
2508af3c33fSJeff Kirsher 	if (ixgbe_cache_ring_rss(adapter))
2518af3c33fSJeff Kirsher 		return;
2528af3c33fSJeff Kirsher }
2538af3c33fSJeff Kirsher 
2548af3c33fSJeff Kirsher /**
25549ce9c2cSBen Hutchings  * ixgbe_set_sriov_queues - Allocate queues for IOV use
2568af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2578af3c33fSJeff Kirsher  *
2588af3c33fSJeff Kirsher  * IOV doesn't actually use anything, so just NAK the
2598af3c33fSJeff Kirsher  * request for now and let the other queue routines
2608af3c33fSJeff Kirsher  * figure out what to do.
2618af3c33fSJeff Kirsher  */
2628af3c33fSJeff Kirsher static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
2638af3c33fSJeff Kirsher {
2648af3c33fSJeff Kirsher 	return false;
2658af3c33fSJeff Kirsher }
2668af3c33fSJeff Kirsher 
2678af3c33fSJeff Kirsher /**
26849ce9c2cSBen Hutchings  * ixgbe_set_rss_queues - Allocate queues for RSS
2698af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2708af3c33fSJeff Kirsher  *
2718af3c33fSJeff Kirsher  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
2728af3c33fSJeff Kirsher  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
2738af3c33fSJeff Kirsher  *
2748af3c33fSJeff Kirsher  **/
2758af3c33fSJeff Kirsher static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
2768af3c33fSJeff Kirsher {
2778af3c33fSJeff Kirsher 	bool ret = false;
2788af3c33fSJeff Kirsher 	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
2798af3c33fSJeff Kirsher 
2808af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
2818af3c33fSJeff Kirsher 		f->mask = 0xF;
2828af3c33fSJeff Kirsher 		adapter->num_rx_queues = f->indices;
2838af3c33fSJeff Kirsher 		adapter->num_tx_queues = f->indices;
2848af3c33fSJeff Kirsher 		ret = true;
2858af3c33fSJeff Kirsher 	}
2868af3c33fSJeff Kirsher 
2878af3c33fSJeff Kirsher 	return ret;
2888af3c33fSJeff Kirsher }
2898af3c33fSJeff Kirsher 
2908af3c33fSJeff Kirsher /**
29149ce9c2cSBen Hutchings  * ixgbe_set_fdir_queues - Allocate queues for Flow Director
2928af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
2938af3c33fSJeff Kirsher  *
2948af3c33fSJeff Kirsher  * Flow Director is an advanced Rx filter, attempting to get Rx flows back
2958af3c33fSJeff Kirsher  * to the original CPU that initiated the Tx session.  This runs in addition
2968af3c33fSJeff Kirsher  * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
2978af3c33fSJeff Kirsher  * Rx load across CPUs using RSS.
2988af3c33fSJeff Kirsher  *
2998af3c33fSJeff Kirsher  **/
3008af3c33fSJeff Kirsher static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3018af3c33fSJeff Kirsher {
3028af3c33fSJeff Kirsher 	bool ret = false;
3038af3c33fSJeff Kirsher 	struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
3048af3c33fSJeff Kirsher 
3058af3c33fSJeff Kirsher 	f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices);
3068af3c33fSJeff Kirsher 	f_fdir->mask = 0;
3078af3c33fSJeff Kirsher 
3088af3c33fSJeff Kirsher 	/*
3098af3c33fSJeff Kirsher 	 * Use RSS in addition to Flow Director to ensure the best
3108af3c33fSJeff Kirsher 	 * distribution of flows across cores, even when an FDIR flow
3118af3c33fSJeff Kirsher 	 * isn't matched.
3128af3c33fSJeff Kirsher 	 */
3138af3c33fSJeff Kirsher 	if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
3148af3c33fSJeff Kirsher 	    (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
3158af3c33fSJeff Kirsher 		adapter->num_tx_queues = f_fdir->indices;
3168af3c33fSJeff Kirsher 		adapter->num_rx_queues = f_fdir->indices;
3178af3c33fSJeff Kirsher 		ret = true;
3188af3c33fSJeff Kirsher 	} else {
3198af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3208af3c33fSJeff Kirsher 	}
3218af3c33fSJeff Kirsher 	return ret;
3228af3c33fSJeff Kirsher }
3238af3c33fSJeff Kirsher 
3248af3c33fSJeff Kirsher #ifdef IXGBE_FCOE
3258af3c33fSJeff Kirsher /**
32649ce9c2cSBen Hutchings  * ixgbe_set_fcoe_queues - Allocate queues for Fiber Channel over Ethernet (FCoE)
3278af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
3288af3c33fSJeff Kirsher  *
3298af3c33fSJeff Kirsher  * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
3308af3c33fSJeff Kirsher  * The ring feature mask is not used as a mask for FCoE, as it can take any 8
3318af3c33fSJeff Kirsher  * rx queues out of the max number of rx queues, instead, it is used as the
3328af3c33fSJeff Kirsher  * index of the first rx queue used by FCoE.
3338af3c33fSJeff Kirsher  *
3348af3c33fSJeff Kirsher  **/
3358af3c33fSJeff Kirsher static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3368af3c33fSJeff Kirsher {
3378af3c33fSJeff Kirsher 	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3388af3c33fSJeff Kirsher 
3398af3c33fSJeff Kirsher 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
3408af3c33fSJeff Kirsher 		return false;
3418af3c33fSJeff Kirsher 
3428af3c33fSJeff Kirsher 	f->indices = min_t(int, num_online_cpus(), f->indices);
3438af3c33fSJeff Kirsher 
3448af3c33fSJeff Kirsher 	adapter->num_rx_queues = 1;
3458af3c33fSJeff Kirsher 	adapter->num_tx_queues = 1;
3468af3c33fSJeff Kirsher 
3478af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3488af3c33fSJeff Kirsher 		e_info(probe, "FCoE enabled with RSS\n");
3498af3c33fSJeff Kirsher 		if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
3508af3c33fSJeff Kirsher 			ixgbe_set_fdir_queues(adapter);
3518af3c33fSJeff Kirsher 		else
3528af3c33fSJeff Kirsher 			ixgbe_set_rss_queues(adapter);
3538af3c33fSJeff Kirsher 	}
3548af3c33fSJeff Kirsher 
3558af3c33fSJeff Kirsher 	/* adding FCoE rx rings to the end */
3568af3c33fSJeff Kirsher 	f->mask = adapter->num_rx_queues;
3578af3c33fSJeff Kirsher 	adapter->num_rx_queues += f->indices;
3588af3c33fSJeff Kirsher 	adapter->num_tx_queues += f->indices;
3598af3c33fSJeff Kirsher 
3608af3c33fSJeff Kirsher 	return true;
3618af3c33fSJeff Kirsher }
3628af3c33fSJeff Kirsher #endif /* IXGBE_FCOE */
3638af3c33fSJeff Kirsher 
3648af3c33fSJeff Kirsher /* Artificial max queue cap per traffic class in DCB mode */
3658af3c33fSJeff Kirsher #define DCB_QUEUE_CAP 8
3668af3c33fSJeff Kirsher 
3678af3c33fSJeff Kirsher #ifdef CONFIG_IXGBE_DCB
3688af3c33fSJeff Kirsher static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
3698af3c33fSJeff Kirsher {
3708af3c33fSJeff Kirsher 	int per_tc_q, q, i, offset = 0;
3718af3c33fSJeff Kirsher 	struct net_device *dev = adapter->netdev;
3728af3c33fSJeff Kirsher 	int tcs = netdev_get_num_tc(dev);
3738af3c33fSJeff Kirsher 
3748af3c33fSJeff Kirsher 	if (!tcs)
3758af3c33fSJeff Kirsher 		return false;
3768af3c33fSJeff Kirsher 
3778af3c33fSJeff Kirsher 	/* Map queue offset and counts onto allocated tx queues */
3788af3c33fSJeff Kirsher 	per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
3798af3c33fSJeff Kirsher 	q = min_t(int, num_online_cpus(), per_tc_q);
3808af3c33fSJeff Kirsher 
3818af3c33fSJeff Kirsher 	for (i = 0; i < tcs; i++) {
3828af3c33fSJeff Kirsher 		netdev_set_tc_queue(dev, i, q, offset);
3838af3c33fSJeff Kirsher 		offset += q;
3848af3c33fSJeff Kirsher 	}
3858af3c33fSJeff Kirsher 
3868af3c33fSJeff Kirsher 	adapter->num_tx_queues = q * tcs;
3878af3c33fSJeff Kirsher 	adapter->num_rx_queues = q * tcs;
3888af3c33fSJeff Kirsher 
3898af3c33fSJeff Kirsher #ifdef IXGBE_FCOE
3908af3c33fSJeff Kirsher 	/* FCoE enabled queues require special configuration indexed
3918af3c33fSJeff Kirsher 	 * by feature specific indices and mask. Here we map FCoE
3928af3c33fSJeff Kirsher 	 * indices onto the DCB queue pairs allowing FCoE to own
3938af3c33fSJeff Kirsher 	 * configuration later.
3948af3c33fSJeff Kirsher 	 */
3958af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
3968af3c33fSJeff Kirsher 		u8 prio_tc[MAX_USER_PRIORITY] = {0};
3978af3c33fSJeff Kirsher 		int tc;
3988af3c33fSJeff Kirsher 		struct ixgbe_ring_feature *f =
3998af3c33fSJeff Kirsher 					&adapter->ring_feature[RING_F_FCOE];
4008af3c33fSJeff Kirsher 
4018af3c33fSJeff Kirsher 		ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
4028af3c33fSJeff Kirsher 		tc = prio_tc[adapter->fcoe.up];
4038af3c33fSJeff Kirsher 		f->indices = dev->tc_to_txq[tc].count;
4048af3c33fSJeff Kirsher 		f->mask = dev->tc_to_txq[tc].offset;
4058af3c33fSJeff Kirsher 	}
4068af3c33fSJeff Kirsher #endif
4078af3c33fSJeff Kirsher 
4088af3c33fSJeff Kirsher 	return true;
4098af3c33fSJeff Kirsher }
4108af3c33fSJeff Kirsher #endif
4118af3c33fSJeff Kirsher 
4128af3c33fSJeff Kirsher /**
41349ce9c2cSBen Hutchings  * ixgbe_set_num_queues - Allocate queues for device, feature dependent
4148af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
4158af3c33fSJeff Kirsher  *
4168af3c33fSJeff Kirsher  * This is the top level queue allocation routine.  The order here is very
4178af3c33fSJeff Kirsher  * important, starting with the "most" number of features turned on at once,
4188af3c33fSJeff Kirsher  * and ending with the smallest set of features.  This way large combinations
4198af3c33fSJeff Kirsher  * can be allocated if they're turned on, and smaller combinations are the
4208af3c33fSJeff Kirsher  * fallthrough conditions.
4218af3c33fSJeff Kirsher  *
4228af3c33fSJeff Kirsher  **/
4238af3c33fSJeff Kirsher static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4248af3c33fSJeff Kirsher {
4258af3c33fSJeff Kirsher 	/* Start with base case */
4268af3c33fSJeff Kirsher 	adapter->num_rx_queues = 1;
4278af3c33fSJeff Kirsher 	adapter->num_tx_queues = 1;
4288af3c33fSJeff Kirsher 	adapter->num_rx_pools = adapter->num_rx_queues;
4298af3c33fSJeff Kirsher 	adapter->num_rx_queues_per_pool = 1;
4308af3c33fSJeff Kirsher 
4318af3c33fSJeff Kirsher 	if (ixgbe_set_sriov_queues(adapter))
4328af3c33fSJeff Kirsher 		goto done;
4338af3c33fSJeff Kirsher 
4348af3c33fSJeff Kirsher #ifdef CONFIG_IXGBE_DCB
4358af3c33fSJeff Kirsher 	if (ixgbe_set_dcb_queues(adapter))
4368af3c33fSJeff Kirsher 		goto done;
4378af3c33fSJeff Kirsher 
4388af3c33fSJeff Kirsher #endif
4398af3c33fSJeff Kirsher #ifdef IXGBE_FCOE
4408af3c33fSJeff Kirsher 	if (ixgbe_set_fcoe_queues(adapter))
4418af3c33fSJeff Kirsher 		goto done;
4428af3c33fSJeff Kirsher 
4438af3c33fSJeff Kirsher #endif /* IXGBE_FCOE */
4448af3c33fSJeff Kirsher 	if (ixgbe_set_fdir_queues(adapter))
4458af3c33fSJeff Kirsher 		goto done;
4468af3c33fSJeff Kirsher 
4478af3c33fSJeff Kirsher 	if (ixgbe_set_rss_queues(adapter))
4488af3c33fSJeff Kirsher 		goto done;
4498af3c33fSJeff Kirsher 
4508af3c33fSJeff Kirsher 	/* fallback to base case */
4518af3c33fSJeff Kirsher 	adapter->num_rx_queues = 1;
4528af3c33fSJeff Kirsher 	adapter->num_tx_queues = 1;
4538af3c33fSJeff Kirsher 
4548af3c33fSJeff Kirsher done:
4558af3c33fSJeff Kirsher 	if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
4568af3c33fSJeff Kirsher 	    (adapter->netdev->reg_state == NETREG_UNREGISTERING))
4578af3c33fSJeff Kirsher 		return 0;
4588af3c33fSJeff Kirsher 
4598af3c33fSJeff Kirsher 	/* Notify the stack of the (possibly) reduced queue counts. */
4608af3c33fSJeff Kirsher 	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4618af3c33fSJeff Kirsher 	return netif_set_real_num_rx_queues(adapter->netdev,
4628af3c33fSJeff Kirsher 					    adapter->num_rx_queues);
4638af3c33fSJeff Kirsher }
4648af3c33fSJeff Kirsher 
4658af3c33fSJeff Kirsher static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4668af3c33fSJeff Kirsher 				       int vectors)
4678af3c33fSJeff Kirsher {
4688af3c33fSJeff Kirsher 	int err, vector_threshold;
4698af3c33fSJeff Kirsher 
4708af3c33fSJeff Kirsher 	/* We'll want at least 2 (vector_threshold):
4718af3c33fSJeff Kirsher 	 * 1) TxQ[0] + RxQ[0] handler
4728af3c33fSJeff Kirsher 	 * 2) Other (Link Status Change, etc.)
4738af3c33fSJeff Kirsher 	 */
4748af3c33fSJeff Kirsher 	vector_threshold = MIN_MSIX_COUNT;
4758af3c33fSJeff Kirsher 
4768af3c33fSJeff Kirsher 	/*
4778af3c33fSJeff Kirsher 	 * The more we get, the more we will assign to Tx/Rx Cleanup
4788af3c33fSJeff Kirsher 	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
4798af3c33fSJeff Kirsher 	 * Right now, we simply care about how many we'll get; we'll
4808af3c33fSJeff Kirsher 	 * set them up later while requesting irq's.
4818af3c33fSJeff Kirsher 	 */
4828af3c33fSJeff Kirsher 	while (vectors >= vector_threshold) {
4838af3c33fSJeff Kirsher 		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
4848af3c33fSJeff Kirsher 				      vectors);
4858af3c33fSJeff Kirsher 		if (!err) /* Success in acquiring all requested vectors. */
4868af3c33fSJeff Kirsher 			break;
4878af3c33fSJeff Kirsher 		else if (err < 0)
4888af3c33fSJeff Kirsher 			vectors = 0; /* Nasty failure, quit now */
4898af3c33fSJeff Kirsher 		else /* err == number of vectors we should try again with */
4908af3c33fSJeff Kirsher 			vectors = err;
4918af3c33fSJeff Kirsher 	}
4928af3c33fSJeff Kirsher 
4938af3c33fSJeff Kirsher 	if (vectors < vector_threshold) {
4948af3c33fSJeff Kirsher 		/* Can't allocate enough MSI-X interrupts?  Oh well.
4958af3c33fSJeff Kirsher 		 * This just means we'll go with either a single MSI
4968af3c33fSJeff Kirsher 		 * vector or fall back to legacy interrupts.
4978af3c33fSJeff Kirsher 		 */
4988af3c33fSJeff Kirsher 		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4998af3c33fSJeff Kirsher 			     "Unable to allocate MSI-X interrupts\n");
5008af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
5018af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
5028af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
5038af3c33fSJeff Kirsher 	} else {
5048af3c33fSJeff Kirsher 		adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
5058af3c33fSJeff Kirsher 		/*
5068af3c33fSJeff Kirsher 		 * Adjust for only the vectors we'll use, which is minimum
5078af3c33fSJeff Kirsher 		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
5088af3c33fSJeff Kirsher 		 * vectors we were allocated.
5098af3c33fSJeff Kirsher 		 */
51049c7ffbeSAlexander Duyck 		vectors -= NON_Q_VECTORS;
51149c7ffbeSAlexander Duyck 		adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
5128af3c33fSJeff Kirsher 	}
5138af3c33fSJeff Kirsher }
5148af3c33fSJeff Kirsher 
5158af3c33fSJeff Kirsher static void ixgbe_add_ring(struct ixgbe_ring *ring,
5168af3c33fSJeff Kirsher 			   struct ixgbe_ring_container *head)
5178af3c33fSJeff Kirsher {
5188af3c33fSJeff Kirsher 	ring->next = head->ring;
5198af3c33fSJeff Kirsher 	head->ring = ring;
5208af3c33fSJeff Kirsher 	head->count++;
5218af3c33fSJeff Kirsher }
5228af3c33fSJeff Kirsher 
5238af3c33fSJeff Kirsher /**
5248af3c33fSJeff Kirsher  * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
5258af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
526d0bfcdfdSAlexander Duyck  * @v_count: q_vectors allocated on adapter, used for ring interleaving
5278af3c33fSJeff Kirsher  * @v_idx: index of vector in adapter struct
528d0bfcdfdSAlexander Duyck  * @txr_count: total number of Tx rings to allocate
529d0bfcdfdSAlexander Duyck  * @txr_idx: index of first Tx ring to allocate
530d0bfcdfdSAlexander Duyck  * @rxr_count: total number of Rx rings to allocate
531d0bfcdfdSAlexander Duyck  * @rxr_idx: index of first Rx ring to allocate
5328af3c33fSJeff Kirsher  *
5338af3c33fSJeff Kirsher  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
5348af3c33fSJeff Kirsher  **/
535d0bfcdfdSAlexander Duyck static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
536d0bfcdfdSAlexander Duyck 				int v_count, int v_idx,
5378af3c33fSJeff Kirsher 				int txr_count, int txr_idx,
5388af3c33fSJeff Kirsher 				int rxr_count, int rxr_idx)
5398af3c33fSJeff Kirsher {
5408af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector;
5418af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
5428af3c33fSJeff Kirsher 	int node = -1;
5438af3c33fSJeff Kirsher 	int cpu = -1;
5448af3c33fSJeff Kirsher 	int ring_count, size;
5458af3c33fSJeff Kirsher 
5468af3c33fSJeff Kirsher 	ring_count = txr_count + rxr_count;
5478af3c33fSJeff Kirsher 	size = sizeof(struct ixgbe_q_vector) +
5488af3c33fSJeff Kirsher 	       (sizeof(struct ixgbe_ring) * ring_count);
5498af3c33fSJeff Kirsher 
5508af3c33fSJeff Kirsher 	/* customize cpu for Flow Director mapping */
5518af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5528af3c33fSJeff Kirsher 		if (cpu_online(v_idx)) {
5538af3c33fSJeff Kirsher 			cpu = v_idx;
5548af3c33fSJeff Kirsher 			node = cpu_to_node(cpu);
5558af3c33fSJeff Kirsher 		}
5568af3c33fSJeff Kirsher 	}
5578af3c33fSJeff Kirsher 
5588af3c33fSJeff Kirsher 	/* allocate q_vector and rings */
5598af3c33fSJeff Kirsher 	q_vector = kzalloc_node(size, GFP_KERNEL, node);
5608af3c33fSJeff Kirsher 	if (!q_vector)
5618af3c33fSJeff Kirsher 		q_vector = kzalloc(size, GFP_KERNEL);
5628af3c33fSJeff Kirsher 	if (!q_vector)
5638af3c33fSJeff Kirsher 		return -ENOMEM;
5648af3c33fSJeff Kirsher 
5658af3c33fSJeff Kirsher 	/* setup affinity mask and node */
5668af3c33fSJeff Kirsher 	if (cpu != -1)
5678af3c33fSJeff Kirsher 		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
5688af3c33fSJeff Kirsher 	else
5698af3c33fSJeff Kirsher 		cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
5708af3c33fSJeff Kirsher 	q_vector->numa_node = node;
5718af3c33fSJeff Kirsher 
5728af3c33fSJeff Kirsher 	/* initialize NAPI */
5738af3c33fSJeff Kirsher 	netif_napi_add(adapter->netdev, &q_vector->napi,
5748af3c33fSJeff Kirsher 		       ixgbe_poll, 64);
5758af3c33fSJeff Kirsher 
5768af3c33fSJeff Kirsher 	/* tie q_vector and adapter together */
5778af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = q_vector;
5788af3c33fSJeff Kirsher 	q_vector->adapter = adapter;
5798af3c33fSJeff Kirsher 	q_vector->v_idx = v_idx;
5808af3c33fSJeff Kirsher 
5818af3c33fSJeff Kirsher 	/* initialize work limits */
5828af3c33fSJeff Kirsher 	q_vector->tx.work_limit = adapter->tx_work_limit;
5838af3c33fSJeff Kirsher 
5848af3c33fSJeff Kirsher 	/* initialize pointer to rings */
5858af3c33fSJeff Kirsher 	ring = q_vector->ring;
5868af3c33fSJeff Kirsher 
5878af3c33fSJeff Kirsher 	while (txr_count) {
5888af3c33fSJeff Kirsher 		/* assign generic ring traits */
5898af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
5908af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
5918af3c33fSJeff Kirsher 
5928af3c33fSJeff Kirsher 		/* configure backlink on ring */
5938af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
5948af3c33fSJeff Kirsher 
5958af3c33fSJeff Kirsher 		/* update q_vector Tx values */
5968af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->tx);
5978af3c33fSJeff Kirsher 
5988af3c33fSJeff Kirsher 		/* apply Tx specific ring traits */
5998af3c33fSJeff Kirsher 		ring->count = adapter->tx_ring_count;
6008af3c33fSJeff Kirsher 		ring->queue_index = txr_idx;
6018af3c33fSJeff Kirsher 
6028af3c33fSJeff Kirsher 		/* assign ring to adapter */
6038af3c33fSJeff Kirsher 		adapter->tx_ring[txr_idx] = ring;
6048af3c33fSJeff Kirsher 
6058af3c33fSJeff Kirsher 		/* update count and index */
6068af3c33fSJeff Kirsher 		txr_count--;
607d0bfcdfdSAlexander Duyck 		txr_idx += v_count;
6088af3c33fSJeff Kirsher 
6098af3c33fSJeff Kirsher 		/* push pointer to next ring */
6108af3c33fSJeff Kirsher 		ring++;
6118af3c33fSJeff Kirsher 	}
6128af3c33fSJeff Kirsher 
6138af3c33fSJeff Kirsher 	while (rxr_count) {
6148af3c33fSJeff Kirsher 		/* assign generic ring traits */
6158af3c33fSJeff Kirsher 		ring->dev = &adapter->pdev->dev;
6168af3c33fSJeff Kirsher 		ring->netdev = adapter->netdev;
6178af3c33fSJeff Kirsher 
6188af3c33fSJeff Kirsher 		/* configure backlink on ring */
6198af3c33fSJeff Kirsher 		ring->q_vector = q_vector;
6208af3c33fSJeff Kirsher 
6218af3c33fSJeff Kirsher 		/* update q_vector Rx values */
6228af3c33fSJeff Kirsher 		ixgbe_add_ring(ring, &q_vector->rx);
6238af3c33fSJeff Kirsher 
6248af3c33fSJeff Kirsher 		/*
6258af3c33fSJeff Kirsher 		 * 82599 errata, UDP frames with a 0 checksum
6268af3c33fSJeff Kirsher 		 * can be marked as checksum errors.
6278af3c33fSJeff Kirsher 		 */
6288af3c33fSJeff Kirsher 		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
6298af3c33fSJeff Kirsher 			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
6308af3c33fSJeff Kirsher 
631b2db497eSAlexander Duyck #ifdef IXGBE_FCOE
632b2db497eSAlexander Duyck 		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
633b2db497eSAlexander Duyck 			struct ixgbe_ring_feature *f;
634b2db497eSAlexander Duyck 			f = &adapter->ring_feature[RING_F_FCOE];
635b2db497eSAlexander Duyck 			if ((rxr_idx >= f->mask) &&
636b2db497eSAlexander Duyck 			    (rxr_idx < f->mask + f->indices))
63757efd44cSAlexander Duyck 				set_bit(__IXGBE_RX_FCOE, &ring->state);
638b2db497eSAlexander Duyck 		}
639b2db497eSAlexander Duyck 
640b2db497eSAlexander Duyck #endif /* IXGBE_FCOE */
6418af3c33fSJeff Kirsher 		/* apply Rx specific ring traits */
6428af3c33fSJeff Kirsher 		ring->count = adapter->rx_ring_count;
6438af3c33fSJeff Kirsher 		ring->queue_index = rxr_idx;
6448af3c33fSJeff Kirsher 
6458af3c33fSJeff Kirsher 		/* assign ring to adapter */
6468af3c33fSJeff Kirsher 		adapter->rx_ring[rxr_idx] = ring;
6478af3c33fSJeff Kirsher 
6488af3c33fSJeff Kirsher 		/* update count and index */
6498af3c33fSJeff Kirsher 		rxr_count--;
650d0bfcdfdSAlexander Duyck 		rxr_idx += v_count;
6518af3c33fSJeff Kirsher 
6528af3c33fSJeff Kirsher 		/* push pointer to next ring */
6538af3c33fSJeff Kirsher 		ring++;
6548af3c33fSJeff Kirsher 	}
6558af3c33fSJeff Kirsher 
6568af3c33fSJeff Kirsher 	return 0;
6578af3c33fSJeff Kirsher }
6588af3c33fSJeff Kirsher 
6598af3c33fSJeff Kirsher /**
6608af3c33fSJeff Kirsher  * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
6618af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
6628af3c33fSJeff Kirsher  * @v_idx: Index of vector to be freed
6638af3c33fSJeff Kirsher  *
6648af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vector.  In addition if
6658af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
6668af3c33fSJeff Kirsher  * to freeing the q_vector.
6678af3c33fSJeff Kirsher  **/
6688af3c33fSJeff Kirsher static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
6698af3c33fSJeff Kirsher {
6708af3c33fSJeff Kirsher 	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
6718af3c33fSJeff Kirsher 	struct ixgbe_ring *ring;
6728af3c33fSJeff Kirsher 
6738af3c33fSJeff Kirsher 	ixgbe_for_each_ring(ring, q_vector->tx)
6748af3c33fSJeff Kirsher 		adapter->tx_ring[ring->queue_index] = NULL;
6758af3c33fSJeff Kirsher 
6768af3c33fSJeff Kirsher 	ixgbe_for_each_ring(ring, q_vector->rx)
6778af3c33fSJeff Kirsher 		adapter->rx_ring[ring->queue_index] = NULL;
6788af3c33fSJeff Kirsher 
6798af3c33fSJeff Kirsher 	adapter->q_vector[v_idx] = NULL;
6808af3c33fSJeff Kirsher 	netif_napi_del(&q_vector->napi);
6818af3c33fSJeff Kirsher 
6828af3c33fSJeff Kirsher 	/*
6838af3c33fSJeff Kirsher 	 * ixgbe_get_stats64() might access the rings on this vector,
6848af3c33fSJeff Kirsher 	 * we must wait a grace period before freeing it.
6858af3c33fSJeff Kirsher 	 */
6868af3c33fSJeff Kirsher 	kfree_rcu(q_vector, rcu);
6878af3c33fSJeff Kirsher }
6888af3c33fSJeff Kirsher 
6898af3c33fSJeff Kirsher /**
6908af3c33fSJeff Kirsher  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
6918af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
6928af3c33fSJeff Kirsher  *
6938af3c33fSJeff Kirsher  * We allocate one q_vector per queue interrupt.  If allocation fails we
6948af3c33fSJeff Kirsher  * return -ENOMEM.
6958af3c33fSJeff Kirsher  **/
6968af3c33fSJeff Kirsher static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
6978af3c33fSJeff Kirsher {
69849c7ffbeSAlexander Duyck 	int q_vectors = adapter->num_q_vectors;
6998af3c33fSJeff Kirsher 	int rxr_remaining = adapter->num_rx_queues;
7008af3c33fSJeff Kirsher 	int txr_remaining = adapter->num_tx_queues;
7018af3c33fSJeff Kirsher 	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
7028af3c33fSJeff Kirsher 	int err;
7038af3c33fSJeff Kirsher 
7048af3c33fSJeff Kirsher 	/* only one q_vector if MSI-X is disabled. */
7058af3c33fSJeff Kirsher 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
7068af3c33fSJeff Kirsher 		q_vectors = 1;
7078af3c33fSJeff Kirsher 
7088af3c33fSJeff Kirsher 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
709d0bfcdfdSAlexander Duyck 		for (; rxr_remaining; v_idx++) {
710d0bfcdfdSAlexander Duyck 			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
711d0bfcdfdSAlexander Duyck 						   0, 0, 1, rxr_idx);
7128af3c33fSJeff Kirsher 
7138af3c33fSJeff Kirsher 			if (err)
7148af3c33fSJeff Kirsher 				goto err_out;
7158af3c33fSJeff Kirsher 
7168af3c33fSJeff Kirsher 			/* update counts and index */
717d0bfcdfdSAlexander Duyck 			rxr_remaining--;
718d0bfcdfdSAlexander Duyck 			rxr_idx++;
7198af3c33fSJeff Kirsher 		}
7208af3c33fSJeff Kirsher 	}
7218af3c33fSJeff Kirsher 
722d0bfcdfdSAlexander Duyck 	for (; v_idx < q_vectors; v_idx++) {
723d0bfcdfdSAlexander Duyck 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
724d0bfcdfdSAlexander Duyck 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
725d0bfcdfdSAlexander Duyck 		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
7268af3c33fSJeff Kirsher 					   tqpv, txr_idx,
7278af3c33fSJeff Kirsher 					   rqpv, rxr_idx);
7288af3c33fSJeff Kirsher 
7298af3c33fSJeff Kirsher 		if (err)
7308af3c33fSJeff Kirsher 			goto err_out;
7318af3c33fSJeff Kirsher 
7328af3c33fSJeff Kirsher 		/* update counts and index */
7338af3c33fSJeff Kirsher 		rxr_remaining -= rqpv;
7348af3c33fSJeff Kirsher 		txr_remaining -= tqpv;
735d0bfcdfdSAlexander Duyck 		rxr_idx++;
736d0bfcdfdSAlexander Duyck 		txr_idx++;
7378af3c33fSJeff Kirsher 	}
7388af3c33fSJeff Kirsher 
7398af3c33fSJeff Kirsher 	return 0;
7408af3c33fSJeff Kirsher 
7418af3c33fSJeff Kirsher err_out:
74249c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
74349c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
74449c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
74549c7ffbeSAlexander Duyck 
74649c7ffbeSAlexander Duyck 	while (v_idx--)
7478af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
7488af3c33fSJeff Kirsher 
7498af3c33fSJeff Kirsher 	return -ENOMEM;
7508af3c33fSJeff Kirsher }
7518af3c33fSJeff Kirsher 
7528af3c33fSJeff Kirsher /**
7538af3c33fSJeff Kirsher  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
7548af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
7558af3c33fSJeff Kirsher  *
7568af3c33fSJeff Kirsher  * This function frees the memory allocated to the q_vectors.  In addition if
7578af3c33fSJeff Kirsher  * NAPI is enabled it will delete any references to the NAPI struct prior
7588af3c33fSJeff Kirsher  * to freeing the q_vector.
7598af3c33fSJeff Kirsher  **/
7608af3c33fSJeff Kirsher static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
7618af3c33fSJeff Kirsher {
76249c7ffbeSAlexander Duyck 	int v_idx = adapter->num_q_vectors;
7638af3c33fSJeff Kirsher 
76449c7ffbeSAlexander Duyck 	adapter->num_tx_queues = 0;
76549c7ffbeSAlexander Duyck 	adapter->num_rx_queues = 0;
76649c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 0;
7678af3c33fSJeff Kirsher 
76849c7ffbeSAlexander Duyck 	while (v_idx--)
7698af3c33fSJeff Kirsher 		ixgbe_free_q_vector(adapter, v_idx);
7708af3c33fSJeff Kirsher }
7718af3c33fSJeff Kirsher 
7728af3c33fSJeff Kirsher static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
7738af3c33fSJeff Kirsher {
7748af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
7758af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
7768af3c33fSJeff Kirsher 		pci_disable_msix(adapter->pdev);
7778af3c33fSJeff Kirsher 		kfree(adapter->msix_entries);
7788af3c33fSJeff Kirsher 		adapter->msix_entries = NULL;
7798af3c33fSJeff Kirsher 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
7808af3c33fSJeff Kirsher 		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
7818af3c33fSJeff Kirsher 		pci_disable_msi(adapter->pdev);
7828af3c33fSJeff Kirsher 	}
7838af3c33fSJeff Kirsher }
7848af3c33fSJeff Kirsher 
7858af3c33fSJeff Kirsher /**
7868af3c33fSJeff Kirsher  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
7878af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
7888af3c33fSJeff Kirsher  *
7898af3c33fSJeff Kirsher  * Attempt to configure the interrupts using the best available
7908af3c33fSJeff Kirsher  * capabilities of the hardware and the kernel.
7918af3c33fSJeff Kirsher  **/
7928af3c33fSJeff Kirsher static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
7938af3c33fSJeff Kirsher {
7948af3c33fSJeff Kirsher 	struct ixgbe_hw *hw = &adapter->hw;
7958af3c33fSJeff Kirsher 	int err = 0;
7968af3c33fSJeff Kirsher 	int vector, v_budget;
7978af3c33fSJeff Kirsher 
7988af3c33fSJeff Kirsher 	/*
7998af3c33fSJeff Kirsher 	 * It's easy to be greedy for MSI-X vectors, but it really
8008af3c33fSJeff Kirsher 	 * doesn't do us much good if we have a lot more vectors
8018af3c33fSJeff Kirsher 	 * than CPU's.  So let's be conservative and only ask for
8028af3c33fSJeff Kirsher 	 * (roughly) the same number of vectors as there are CPU's.
8038af3c33fSJeff Kirsher 	 * The default is to use pairs of vectors.
8048af3c33fSJeff Kirsher 	 */
8058af3c33fSJeff Kirsher 	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
8068af3c33fSJeff Kirsher 	v_budget = min_t(int, v_budget, num_online_cpus());
8078af3c33fSJeff Kirsher 	v_budget += NON_Q_VECTORS;
8088af3c33fSJeff Kirsher 
8098af3c33fSJeff Kirsher 	/*
8108af3c33fSJeff Kirsher 	 * At the same time, hardware can only support a maximum of
8118af3c33fSJeff Kirsher 	 * hw.mac->max_msix_vectors vectors.  With features
8128af3c33fSJeff Kirsher 	 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
8138af3c33fSJeff Kirsher 	 * descriptor queues supported by our device.  Thus, we cap it off in
8148af3c33fSJeff Kirsher 	 * those rare cases where the cpu count also exceeds our vector limit.
8158af3c33fSJeff Kirsher 	 */
8168af3c33fSJeff Kirsher 	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
8178af3c33fSJeff Kirsher 
8188af3c33fSJeff Kirsher 	/* A failure in MSI-X entry allocation isn't fatal, but it does
8198af3c33fSJeff Kirsher 	 * mean we disable MSI-X capabilities of the adapter. */
8208af3c33fSJeff Kirsher 	adapter->msix_entries = kcalloc(v_budget,
8218af3c33fSJeff Kirsher 					sizeof(struct msix_entry), GFP_KERNEL);
8228af3c33fSJeff Kirsher 	if (adapter->msix_entries) {
8238af3c33fSJeff Kirsher 		for (vector = 0; vector < v_budget; vector++)
8248af3c33fSJeff Kirsher 			adapter->msix_entries[vector].entry = vector;
8258af3c33fSJeff Kirsher 
8268af3c33fSJeff Kirsher 		ixgbe_acquire_msix_vectors(adapter, v_budget);
8278af3c33fSJeff Kirsher 
8288af3c33fSJeff Kirsher 		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
8298af3c33fSJeff Kirsher 			goto out;
8308af3c33fSJeff Kirsher 	}
8318af3c33fSJeff Kirsher 
8328af3c33fSJeff Kirsher 	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
8338af3c33fSJeff Kirsher 	adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
8348af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
8358af3c33fSJeff Kirsher 		e_err(probe,
8368af3c33fSJeff Kirsher 		      "ATR is not supported while multiple "
8378af3c33fSJeff Kirsher 		      "queues are disabled.  Disabling Flow Director\n");
8388af3c33fSJeff Kirsher 	}
8398af3c33fSJeff Kirsher 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
8408af3c33fSJeff Kirsher 	adapter->atr_sample_rate = 0;
8418af3c33fSJeff Kirsher 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8428af3c33fSJeff Kirsher 		ixgbe_disable_sriov(adapter);
8438af3c33fSJeff Kirsher 
8448af3c33fSJeff Kirsher 	err = ixgbe_set_num_queues(adapter);
8458af3c33fSJeff Kirsher 	if (err)
8468af3c33fSJeff Kirsher 		return err;
8478af3c33fSJeff Kirsher 
84849c7ffbeSAlexander Duyck 	adapter->num_q_vectors = 1;
84949c7ffbeSAlexander Duyck 
8508af3c33fSJeff Kirsher 	err = pci_enable_msi(adapter->pdev);
8518af3c33fSJeff Kirsher 	if (!err) {
8528af3c33fSJeff Kirsher 		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
8538af3c33fSJeff Kirsher 	} else {
8548af3c33fSJeff Kirsher 		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
8558af3c33fSJeff Kirsher 			     "Unable to allocate MSI interrupt, "
8568af3c33fSJeff Kirsher 			     "falling back to legacy.  Error: %d\n", err);
8578af3c33fSJeff Kirsher 		/* reset err */
8588af3c33fSJeff Kirsher 		err = 0;
8598af3c33fSJeff Kirsher 	}
8608af3c33fSJeff Kirsher 
8618af3c33fSJeff Kirsher out:
8628af3c33fSJeff Kirsher 	return err;
8638af3c33fSJeff Kirsher }
8648af3c33fSJeff Kirsher 
8658af3c33fSJeff Kirsher /**
8668af3c33fSJeff Kirsher  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
8678af3c33fSJeff Kirsher  * @adapter: board private structure to initialize
8688af3c33fSJeff Kirsher  *
8698af3c33fSJeff Kirsher  * We determine which interrupt scheme to use based on...
8708af3c33fSJeff Kirsher  * - Kernel support (MSI, MSI-X)
8718af3c33fSJeff Kirsher  *   - which can be user-defined (via MODULE_PARAM)
8728af3c33fSJeff Kirsher  * - Hardware queue count (num_*_queues)
8738af3c33fSJeff Kirsher  *   - defined by miscellaneous hardware support/features (RSS, etc.)
8748af3c33fSJeff Kirsher  **/
8758af3c33fSJeff Kirsher int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
8768af3c33fSJeff Kirsher {
8778af3c33fSJeff Kirsher 	int err;
8788af3c33fSJeff Kirsher 
8798af3c33fSJeff Kirsher 	/* Number of supported queues */
8808af3c33fSJeff Kirsher 	err = ixgbe_set_num_queues(adapter);
8818af3c33fSJeff Kirsher 	if (err)
8828af3c33fSJeff Kirsher 		return err;
8838af3c33fSJeff Kirsher 
8848af3c33fSJeff Kirsher 	err = ixgbe_set_interrupt_capability(adapter);
8858af3c33fSJeff Kirsher 	if (err) {
8868af3c33fSJeff Kirsher 		e_dev_err("Unable to setup interrupt capabilities\n");
8878af3c33fSJeff Kirsher 		goto err_set_interrupt;
8888af3c33fSJeff Kirsher 	}
8898af3c33fSJeff Kirsher 
8908af3c33fSJeff Kirsher 	err = ixgbe_alloc_q_vectors(adapter);
8918af3c33fSJeff Kirsher 	if (err) {
8928af3c33fSJeff Kirsher 		e_dev_err("Unable to allocate memory for queue vectors\n");
8938af3c33fSJeff Kirsher 		goto err_alloc_q_vectors;
8948af3c33fSJeff Kirsher 	}
8958af3c33fSJeff Kirsher 
8968af3c33fSJeff Kirsher 	ixgbe_cache_ring_register(adapter);
8978af3c33fSJeff Kirsher 
8988af3c33fSJeff Kirsher 	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
8998af3c33fSJeff Kirsher 		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
9008af3c33fSJeff Kirsher 		   adapter->num_rx_queues, adapter->num_tx_queues);
9018af3c33fSJeff Kirsher 
9028af3c33fSJeff Kirsher 	set_bit(__IXGBE_DOWN, &adapter->state);
9038af3c33fSJeff Kirsher 
9048af3c33fSJeff Kirsher 	return 0;
9058af3c33fSJeff Kirsher 
9068af3c33fSJeff Kirsher err_alloc_q_vectors:
9078af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
9088af3c33fSJeff Kirsher err_set_interrupt:
9098af3c33fSJeff Kirsher 	return err;
9108af3c33fSJeff Kirsher }
9118af3c33fSJeff Kirsher 
9128af3c33fSJeff Kirsher /**
9138af3c33fSJeff Kirsher  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
9148af3c33fSJeff Kirsher  * @adapter: board private structure to clear interrupt scheme on
9158af3c33fSJeff Kirsher  *
9168af3c33fSJeff Kirsher  * We go through and clear interrupt specific resources and reset the structure
9178af3c33fSJeff Kirsher  * to pre-load conditions
9188af3c33fSJeff Kirsher  **/
9198af3c33fSJeff Kirsher void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
9208af3c33fSJeff Kirsher {
9218af3c33fSJeff Kirsher 	adapter->num_tx_queues = 0;
9228af3c33fSJeff Kirsher 	adapter->num_rx_queues = 0;
9238af3c33fSJeff Kirsher 
9248af3c33fSJeff Kirsher 	ixgbe_free_q_vectors(adapter);
9258af3c33fSJeff Kirsher 	ixgbe_reset_interrupt_capability(adapter);
9268af3c33fSJeff Kirsher }
9278af3c33fSJeff Kirsher 
9288af3c33fSJeff Kirsher void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
9298af3c33fSJeff Kirsher 		       u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
9308af3c33fSJeff Kirsher {
9318af3c33fSJeff Kirsher 	struct ixgbe_adv_tx_context_desc *context_desc;
9328af3c33fSJeff Kirsher 	u16 i = tx_ring->next_to_use;
9338af3c33fSJeff Kirsher 
9348af3c33fSJeff Kirsher 	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
9358af3c33fSJeff Kirsher 
9368af3c33fSJeff Kirsher 	i++;
9378af3c33fSJeff Kirsher 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
9388af3c33fSJeff Kirsher 
9398af3c33fSJeff Kirsher 	/* set bits to identify this as an advanced context descriptor */
9408af3c33fSJeff Kirsher 	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
9418af3c33fSJeff Kirsher 
9428af3c33fSJeff Kirsher 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
9438af3c33fSJeff Kirsher 	context_desc->seqnum_seed	= cpu_to_le32(fcoe_sof_eof);
9448af3c33fSJeff Kirsher 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
9458af3c33fSJeff Kirsher 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
9468af3c33fSJeff Kirsher }
9478af3c33fSJeff Kirsher 
948