18af3c33fSJeff Kirsher /******************************************************************************* 28af3c33fSJeff Kirsher 38af3c33fSJeff Kirsher Intel 10 Gigabit PCI Express Linux driver 449425dfcSMark Rustad Copyright(c) 1999 - 2016 Intel Corporation. 58af3c33fSJeff Kirsher 68af3c33fSJeff Kirsher This program is free software; you can redistribute it and/or modify it 78af3c33fSJeff Kirsher under the terms and conditions of the GNU General Public License, 88af3c33fSJeff Kirsher version 2, as published by the Free Software Foundation. 98af3c33fSJeff Kirsher 108af3c33fSJeff Kirsher This program is distributed in the hope it will be useful, but WITHOUT 118af3c33fSJeff Kirsher ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 128af3c33fSJeff Kirsher FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 138af3c33fSJeff Kirsher more details. 148af3c33fSJeff Kirsher 158af3c33fSJeff Kirsher You should have received a copy of the GNU General Public License along with 168af3c33fSJeff Kirsher this program; if not, write to the Free Software Foundation, Inc., 178af3c33fSJeff Kirsher 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 188af3c33fSJeff Kirsher 198af3c33fSJeff Kirsher The full GNU General Public License is included in this distribution in 208af3c33fSJeff Kirsher the file called "COPYING". 218af3c33fSJeff Kirsher 228af3c33fSJeff Kirsher Contact Information: 23b89aae71SJacob Keller Linux NICS <linux.nics@intel.com> 248af3c33fSJeff Kirsher e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 258af3c33fSJeff Kirsher Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 268af3c33fSJeff Kirsher 278af3c33fSJeff Kirsher *******************************************************************************/ 288af3c33fSJeff Kirsher 298af3c33fSJeff Kirsher #include "ixgbe.h" 308af3c33fSJeff Kirsher #include "ixgbe_sriov.h" 318af3c33fSJeff Kirsher 32800bd607SAlexander Duyck #ifdef CONFIG_IXGBE_DCB 3373079ea0SAlexander Duyck /** 3473079ea0SAlexander Duyck * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV 3573079ea0SAlexander Duyck * @adapter: board private structure to initialize 3673079ea0SAlexander Duyck * 3773079ea0SAlexander Duyck * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It 3873079ea0SAlexander Duyck * will also try to cache the proper offsets if RSS/FCoE are enabled along 3973079ea0SAlexander Duyck * with VMDq. 4073079ea0SAlexander Duyck * 4173079ea0SAlexander Duyck **/ 4273079ea0SAlexander Duyck static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) 4373079ea0SAlexander Duyck { 4473079ea0SAlexander Duyck #ifdef IXGBE_FCOE 4573079ea0SAlexander Duyck struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 4673079ea0SAlexander Duyck #endif /* IXGBE_FCOE */ 4773079ea0SAlexander Duyck struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 4873079ea0SAlexander Duyck int i; 4973079ea0SAlexander Duyck u16 reg_idx; 5073079ea0SAlexander Duyck u8 tcs = netdev_get_num_tc(adapter->netdev); 5173079ea0SAlexander Duyck 5273079ea0SAlexander Duyck /* verify we have DCB queueing enabled before proceeding */ 5373079ea0SAlexander Duyck if (tcs <= 1) 5473079ea0SAlexander Duyck return false; 5573079ea0SAlexander Duyck 5673079ea0SAlexander Duyck /* verify we have VMDq enabled before proceeding */ 5773079ea0SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 5873079ea0SAlexander Duyck return false; 5973079ea0SAlexander Duyck 6073079ea0SAlexander Duyck /* start at VMDq register offset for SR-IOV enabled setups */ 6173079ea0SAlexander Duyck reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 6273079ea0SAlexander Duyck for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 6373079ea0SAlexander Duyck /* If we are greater than indices move to next pool */ 6473079ea0SAlexander Duyck if ((reg_idx & ~vmdq->mask) >= tcs) 6573079ea0SAlexander Duyck reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 6673079ea0SAlexander Duyck adapter->rx_ring[i]->reg_idx = reg_idx; 6773079ea0SAlexander Duyck } 6873079ea0SAlexander Duyck 6973079ea0SAlexander Duyck reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 7073079ea0SAlexander Duyck for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 7173079ea0SAlexander Duyck /* If we are greater than indices move to next pool */ 7273079ea0SAlexander Duyck if ((reg_idx & ~vmdq->mask) >= tcs) 7373079ea0SAlexander Duyck reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 7473079ea0SAlexander Duyck adapter->tx_ring[i]->reg_idx = reg_idx; 7573079ea0SAlexander Duyck } 7673079ea0SAlexander Duyck 7773079ea0SAlexander Duyck #ifdef IXGBE_FCOE 7873079ea0SAlexander Duyck /* nothing to do if FCoE is disabled */ 7973079ea0SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 8073079ea0SAlexander Duyck return true; 8173079ea0SAlexander Duyck 8273079ea0SAlexander Duyck /* The work is already done if the FCoE ring is shared */ 8373079ea0SAlexander Duyck if (fcoe->offset < tcs) 8473079ea0SAlexander Duyck return true; 8573079ea0SAlexander Duyck 8673079ea0SAlexander Duyck /* The FCoE rings exist separately, we need to move their reg_idx */ 8773079ea0SAlexander Duyck if (fcoe->indices) { 8873079ea0SAlexander Duyck u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 8973079ea0SAlexander Duyck u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); 9073079ea0SAlexander Duyck 9173079ea0SAlexander Duyck reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 9273079ea0SAlexander Duyck for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { 9373079ea0SAlexander Duyck reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 9473079ea0SAlexander Duyck adapter->rx_ring[i]->reg_idx = reg_idx; 9573079ea0SAlexander Duyck reg_idx++; 9673079ea0SAlexander Duyck } 9773079ea0SAlexander Duyck 9873079ea0SAlexander Duyck reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 9973079ea0SAlexander Duyck for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { 10073079ea0SAlexander Duyck reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 10173079ea0SAlexander Duyck adapter->tx_ring[i]->reg_idx = reg_idx; 10273079ea0SAlexander Duyck reg_idx++; 10373079ea0SAlexander Duyck } 10473079ea0SAlexander Duyck } 10573079ea0SAlexander Duyck 10673079ea0SAlexander Duyck #endif /* IXGBE_FCOE */ 10773079ea0SAlexander Duyck return true; 10873079ea0SAlexander Duyck } 10973079ea0SAlexander Duyck 1108af3c33fSJeff Kirsher /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ 1118af3c33fSJeff Kirsher static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, 1128af3c33fSJeff Kirsher unsigned int *tx, unsigned int *rx) 1138af3c33fSJeff Kirsher { 1148af3c33fSJeff Kirsher struct net_device *dev = adapter->netdev; 1158af3c33fSJeff Kirsher struct ixgbe_hw *hw = &adapter->hw; 1168af3c33fSJeff Kirsher u8 num_tcs = netdev_get_num_tc(dev); 1178af3c33fSJeff Kirsher 1188af3c33fSJeff Kirsher *tx = 0; 1198af3c33fSJeff Kirsher *rx = 0; 1208af3c33fSJeff Kirsher 1218af3c33fSJeff Kirsher switch (hw->mac.type) { 1228af3c33fSJeff Kirsher case ixgbe_mac_82598EB: 1234ae63730SAlexander Duyck /* TxQs/TC: 4 RxQs/TC: 8 */ 1244ae63730SAlexander Duyck *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ 1254ae63730SAlexander Duyck *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ 1268af3c33fSJeff Kirsher break; 1278af3c33fSJeff Kirsher case ixgbe_mac_82599EB: 1288af3c33fSJeff Kirsher case ixgbe_mac_X540: 1299a75a1acSDon Skidmore case ixgbe_mac_X550: 1309a75a1acSDon Skidmore case ixgbe_mac_X550EM_x: 13149425dfcSMark Rustad case ixgbe_mac_x550em_a: 1328af3c33fSJeff Kirsher if (num_tcs > 4) { 1334ae63730SAlexander Duyck /* 1344ae63730SAlexander Duyck * TCs : TC0/1 TC2/3 TC4-7 1354ae63730SAlexander Duyck * TxQs/TC: 32 16 8 1364ae63730SAlexander Duyck * RxQs/TC: 16 16 16 1374ae63730SAlexander Duyck */ 1388af3c33fSJeff Kirsher *rx = tc << 4; 1394ae63730SAlexander Duyck if (tc < 3) 1404ae63730SAlexander Duyck *tx = tc << 5; /* 0, 32, 64 */ 1414ae63730SAlexander Duyck else if (tc < 5) 1424ae63730SAlexander Duyck *tx = (tc + 2) << 4; /* 80, 96 */ 1434ae63730SAlexander Duyck else 1444ae63730SAlexander Duyck *tx = (tc + 8) << 3; /* 104, 112, 120 */ 1458af3c33fSJeff Kirsher } else { 1464ae63730SAlexander Duyck /* 1474ae63730SAlexander Duyck * TCs : TC0 TC1 TC2/3 1484ae63730SAlexander Duyck * TxQs/TC: 64 32 16 1494ae63730SAlexander Duyck * RxQs/TC: 32 32 32 1504ae63730SAlexander Duyck */ 1518af3c33fSJeff Kirsher *rx = tc << 5; 1524ae63730SAlexander Duyck if (tc < 2) 1534ae63730SAlexander Duyck *tx = tc << 6; /* 0, 64 */ 1544ae63730SAlexander Duyck else 1554ae63730SAlexander Duyck *tx = (tc + 4) << 4; /* 96, 112 */ 1568af3c33fSJeff Kirsher } 1578af3c33fSJeff Kirsher default: 1588af3c33fSJeff Kirsher break; 1598af3c33fSJeff Kirsher } 1608af3c33fSJeff Kirsher } 1618af3c33fSJeff Kirsher 1628af3c33fSJeff Kirsher /** 1638af3c33fSJeff Kirsher * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 1648af3c33fSJeff Kirsher * @adapter: board private structure to initialize 1658af3c33fSJeff Kirsher * 1668af3c33fSJeff Kirsher * Cache the descriptor ring offsets for DCB to the assigned rings. 1678af3c33fSJeff Kirsher * 1688af3c33fSJeff Kirsher **/ 1694ae63730SAlexander Duyck static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 1708af3c33fSJeff Kirsher { 1718af3c33fSJeff Kirsher struct net_device *dev = adapter->netdev; 1724ae63730SAlexander Duyck unsigned int tx_idx, rx_idx; 1734ae63730SAlexander Duyck int tc, offset, rss_i, i; 1748af3c33fSJeff Kirsher u8 num_tcs = netdev_get_num_tc(dev); 1758af3c33fSJeff Kirsher 1764ae63730SAlexander Duyck /* verify we have DCB queueing enabled before proceeding */ 1774ae63730SAlexander Duyck if (num_tcs <= 1) 1788af3c33fSJeff Kirsher return false; 1798af3c33fSJeff Kirsher 1804ae63730SAlexander Duyck rss_i = adapter->ring_feature[RING_F_RSS].indices; 1818af3c33fSJeff Kirsher 1824ae63730SAlexander Duyck for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { 1834ae63730SAlexander Duyck ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); 1844ae63730SAlexander Duyck for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { 1854ae63730SAlexander Duyck adapter->tx_ring[offset + i]->reg_idx = tx_idx; 1864ae63730SAlexander Duyck adapter->rx_ring[offset + i]->reg_idx = rx_idx; 1874ae63730SAlexander Duyck adapter->tx_ring[offset + i]->dcb_tc = tc; 1884ae63730SAlexander Duyck adapter->rx_ring[offset + i]->dcb_tc = tc; 1898af3c33fSJeff Kirsher } 1908af3c33fSJeff Kirsher } 1918af3c33fSJeff Kirsher 1928af3c33fSJeff Kirsher return true; 1938af3c33fSJeff Kirsher } 194d411a936SAlexander Duyck 1958af3c33fSJeff Kirsher #endif 1968af3c33fSJeff Kirsher /** 1978af3c33fSJeff Kirsher * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov 1988af3c33fSJeff Kirsher * @adapter: board private structure to initialize 1998af3c33fSJeff Kirsher * 2008af3c33fSJeff Kirsher * SR-IOV doesn't use any descriptor rings but changes the default if 2018af3c33fSJeff Kirsher * no other mapping is used. 2028af3c33fSJeff Kirsher * 2038af3c33fSJeff Kirsher */ 20473079ea0SAlexander Duyck static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) 2058af3c33fSJeff Kirsher { 20673079ea0SAlexander Duyck #ifdef IXGBE_FCOE 20773079ea0SAlexander Duyck struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 20873079ea0SAlexander Duyck #endif /* IXGBE_FCOE */ 20973079ea0SAlexander Duyck struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 21073079ea0SAlexander Duyck struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; 21173079ea0SAlexander Duyck int i; 21273079ea0SAlexander Duyck u16 reg_idx; 21373079ea0SAlexander Duyck 21473079ea0SAlexander Duyck /* only proceed if VMDq is enabled */ 21573079ea0SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) 2168af3c33fSJeff Kirsher return false; 21773079ea0SAlexander Duyck 21873079ea0SAlexander Duyck /* start at VMDq register offset for SR-IOV enabled setups */ 21973079ea0SAlexander Duyck reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 22073079ea0SAlexander Duyck for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 22173079ea0SAlexander Duyck #ifdef IXGBE_FCOE 22273079ea0SAlexander Duyck /* Allow first FCoE queue to be mapped as RSS */ 22373079ea0SAlexander Duyck if (fcoe->offset && (i > fcoe->offset)) 22473079ea0SAlexander Duyck break; 22573079ea0SAlexander Duyck #endif 22673079ea0SAlexander Duyck /* If we are greater than indices move to next pool */ 22773079ea0SAlexander Duyck if ((reg_idx & ~vmdq->mask) >= rss->indices) 22873079ea0SAlexander Duyck reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 22973079ea0SAlexander Duyck adapter->rx_ring[i]->reg_idx = reg_idx; 23073079ea0SAlexander Duyck } 23173079ea0SAlexander Duyck 23273079ea0SAlexander Duyck #ifdef IXGBE_FCOE 23373079ea0SAlexander Duyck /* FCoE uses a linear block of queues so just assigning 1:1 */ 23473079ea0SAlexander Duyck for (; i < adapter->num_rx_queues; i++, reg_idx++) 23573079ea0SAlexander Duyck adapter->rx_ring[i]->reg_idx = reg_idx; 23673079ea0SAlexander Duyck 23773079ea0SAlexander Duyck #endif 23873079ea0SAlexander Duyck reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 23973079ea0SAlexander Duyck for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 24073079ea0SAlexander Duyck #ifdef IXGBE_FCOE 24173079ea0SAlexander Duyck /* Allow first FCoE queue to be mapped as RSS */ 24273079ea0SAlexander Duyck if (fcoe->offset && (i > fcoe->offset)) 24373079ea0SAlexander Duyck break; 24473079ea0SAlexander Duyck #endif 24573079ea0SAlexander Duyck /* If we are greater than indices move to next pool */ 24673079ea0SAlexander Duyck if ((reg_idx & rss->mask) >= rss->indices) 24773079ea0SAlexander Duyck reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 24873079ea0SAlexander Duyck adapter->tx_ring[i]->reg_idx = reg_idx; 24973079ea0SAlexander Duyck } 25073079ea0SAlexander Duyck 25173079ea0SAlexander Duyck #ifdef IXGBE_FCOE 25273079ea0SAlexander Duyck /* FCoE uses a linear block of queues so just assigning 1:1 */ 25373079ea0SAlexander Duyck for (; i < adapter->num_tx_queues; i++, reg_idx++) 25473079ea0SAlexander Duyck adapter->tx_ring[i]->reg_idx = reg_idx; 25573079ea0SAlexander Duyck 25673079ea0SAlexander Duyck #endif 25773079ea0SAlexander Duyck 25873079ea0SAlexander Duyck return true; 2598af3c33fSJeff Kirsher } 2608af3c33fSJeff Kirsher 2618af3c33fSJeff Kirsher /** 262d411a936SAlexander Duyck * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS 263d411a936SAlexander Duyck * @adapter: board private structure to initialize 264d411a936SAlexander Duyck * 265d411a936SAlexander Duyck * Cache the descriptor ring offsets for RSS to the assigned rings. 266d411a936SAlexander Duyck * 267d411a936SAlexander Duyck **/ 268d411a936SAlexander Duyck static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 269d411a936SAlexander Duyck { 270d411a936SAlexander Duyck int i; 271d411a936SAlexander Duyck 272d411a936SAlexander Duyck for (i = 0; i < adapter->num_rx_queues; i++) 273d411a936SAlexander Duyck adapter->rx_ring[i]->reg_idx = i; 274d411a936SAlexander Duyck for (i = 0; i < adapter->num_tx_queues; i++) 275d411a936SAlexander Duyck adapter->tx_ring[i]->reg_idx = i; 276d411a936SAlexander Duyck 277d411a936SAlexander Duyck return true; 278d411a936SAlexander Duyck } 279d411a936SAlexander Duyck 280d411a936SAlexander Duyck /** 2818af3c33fSJeff Kirsher * ixgbe_cache_ring_register - Descriptor ring to register mapping 2828af3c33fSJeff Kirsher * @adapter: board private structure to initialize 2838af3c33fSJeff Kirsher * 2848af3c33fSJeff Kirsher * Once we know the feature-set enabled for the device, we'll cache 2858af3c33fSJeff Kirsher * the register offset the descriptor ring is assigned to. 2868af3c33fSJeff Kirsher * 2878af3c33fSJeff Kirsher * Note, the order the various feature calls is important. It must start with 2888af3c33fSJeff Kirsher * the "most" features enabled at the same time, then trickle down to the 2898af3c33fSJeff Kirsher * least amount of features turned on at once. 2908af3c33fSJeff Kirsher **/ 2918af3c33fSJeff Kirsher static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 2928af3c33fSJeff Kirsher { 2938af3c33fSJeff Kirsher /* start with default case */ 2948af3c33fSJeff Kirsher adapter->rx_ring[0]->reg_idx = 0; 2958af3c33fSJeff Kirsher adapter->tx_ring[0]->reg_idx = 0; 2968af3c33fSJeff Kirsher 29773079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB 29873079ea0SAlexander Duyck if (ixgbe_cache_ring_dcb_sriov(adapter)) 29973079ea0SAlexander Duyck return; 30073079ea0SAlexander Duyck 30173079ea0SAlexander Duyck if (ixgbe_cache_ring_dcb(adapter)) 30273079ea0SAlexander Duyck return; 30373079ea0SAlexander Duyck 30473079ea0SAlexander Duyck #endif 3058af3c33fSJeff Kirsher if (ixgbe_cache_ring_sriov(adapter)) 3068af3c33fSJeff Kirsher return; 3078af3c33fSJeff Kirsher 308d411a936SAlexander Duyck ixgbe_cache_ring_rss(adapter); 3098af3c33fSJeff Kirsher } 3108af3c33fSJeff Kirsher 311d411a936SAlexander Duyck #define IXGBE_RSS_16Q_MASK 0xF 312d411a936SAlexander Duyck #define IXGBE_RSS_8Q_MASK 0x7 313d411a936SAlexander Duyck #define IXGBE_RSS_4Q_MASK 0x3 314d411a936SAlexander Duyck #define IXGBE_RSS_2Q_MASK 0x1 315d411a936SAlexander Duyck #define IXGBE_RSS_DISABLED_MASK 0x0 316d411a936SAlexander Duyck 317d411a936SAlexander Duyck #ifdef CONFIG_IXGBE_DCB 31873079ea0SAlexander Duyck /** 31973079ea0SAlexander Duyck * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB 32073079ea0SAlexander Duyck * @adapter: board private structure to initialize 32173079ea0SAlexander Duyck * 32273079ea0SAlexander Duyck * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 32373079ea0SAlexander Duyck * and VM pools where appropriate. Also assign queues based on DCB 32473079ea0SAlexander Duyck * priorities and map accordingly.. 32573079ea0SAlexander Duyck * 32673079ea0SAlexander Duyck **/ 32773079ea0SAlexander Duyck static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) 32873079ea0SAlexander Duyck { 32973079ea0SAlexander Duyck int i; 33073079ea0SAlexander Duyck u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 33173079ea0SAlexander Duyck u16 vmdq_m = 0; 33273079ea0SAlexander Duyck #ifdef IXGBE_FCOE 33373079ea0SAlexander Duyck u16 fcoe_i = 0; 33473079ea0SAlexander Duyck #endif 33573079ea0SAlexander Duyck u8 tcs = netdev_get_num_tc(adapter->netdev); 33673079ea0SAlexander Duyck 33773079ea0SAlexander Duyck /* verify we have DCB queueing enabled before proceeding */ 33873079ea0SAlexander Duyck if (tcs <= 1) 33973079ea0SAlexander Duyck return false; 34073079ea0SAlexander Duyck 34173079ea0SAlexander Duyck /* verify we have VMDq enabled before proceeding */ 34273079ea0SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 34373079ea0SAlexander Duyck return false; 34473079ea0SAlexander Duyck 34573079ea0SAlexander Duyck /* Add starting offset to total pool count */ 34673079ea0SAlexander Duyck vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 34773079ea0SAlexander Duyck 34873079ea0SAlexander Duyck /* 16 pools w/ 8 TC per pool */ 34973079ea0SAlexander Duyck if (tcs > 4) { 35073079ea0SAlexander Duyck vmdq_i = min_t(u16, vmdq_i, 16); 35173079ea0SAlexander Duyck vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; 35273079ea0SAlexander Duyck /* 32 pools w/ 4 TC per pool */ 35373079ea0SAlexander Duyck } else { 35473079ea0SAlexander Duyck vmdq_i = min_t(u16, vmdq_i, 32); 35573079ea0SAlexander Duyck vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 35673079ea0SAlexander Duyck } 35773079ea0SAlexander Duyck 35873079ea0SAlexander Duyck #ifdef IXGBE_FCOE 35973079ea0SAlexander Duyck /* queues in the remaining pools are available for FCoE */ 36073079ea0SAlexander Duyck fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; 36173079ea0SAlexander Duyck 36273079ea0SAlexander Duyck #endif 36373079ea0SAlexander Duyck /* remove the starting offset from the pool count */ 36473079ea0SAlexander Duyck vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 36573079ea0SAlexander Duyck 36673079ea0SAlexander Duyck /* save features for later use */ 36773079ea0SAlexander Duyck adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 36873079ea0SAlexander Duyck adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 36973079ea0SAlexander Duyck 37073079ea0SAlexander Duyck /* 37173079ea0SAlexander Duyck * We do not support DCB, VMDq, and RSS all simultaneously 37273079ea0SAlexander Duyck * so we will disable RSS since it is the lowest priority 37373079ea0SAlexander Duyck */ 37473079ea0SAlexander Duyck adapter->ring_feature[RING_F_RSS].indices = 1; 37573079ea0SAlexander Duyck adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; 37673079ea0SAlexander Duyck 37739cb681bSAlexander Duyck /* disable ATR as it is not supported when VMDq is enabled */ 37839cb681bSAlexander Duyck adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 37939cb681bSAlexander Duyck 38073079ea0SAlexander Duyck adapter->num_rx_pools = vmdq_i; 38173079ea0SAlexander Duyck adapter->num_rx_queues_per_pool = tcs; 38273079ea0SAlexander Duyck 38373079ea0SAlexander Duyck adapter->num_tx_queues = vmdq_i * tcs; 38473079ea0SAlexander Duyck adapter->num_rx_queues = vmdq_i * tcs; 38573079ea0SAlexander Duyck 38673079ea0SAlexander Duyck #ifdef IXGBE_FCOE 38773079ea0SAlexander Duyck if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 38873079ea0SAlexander Duyck struct ixgbe_ring_feature *fcoe; 38973079ea0SAlexander Duyck 39073079ea0SAlexander Duyck fcoe = &adapter->ring_feature[RING_F_FCOE]; 39173079ea0SAlexander Duyck 39273079ea0SAlexander Duyck /* limit ourselves based on feature limits */ 39373079ea0SAlexander Duyck fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 39473079ea0SAlexander Duyck 39573079ea0SAlexander Duyck if (fcoe_i) { 39673079ea0SAlexander Duyck /* alloc queues for FCoE separately */ 39773079ea0SAlexander Duyck fcoe->indices = fcoe_i; 39873079ea0SAlexander Duyck fcoe->offset = vmdq_i * tcs; 39973079ea0SAlexander Duyck 40073079ea0SAlexander Duyck /* add queues to adapter */ 40173079ea0SAlexander Duyck adapter->num_tx_queues += fcoe_i; 40273079ea0SAlexander Duyck adapter->num_rx_queues += fcoe_i; 40373079ea0SAlexander Duyck } else if (tcs > 1) { 40473079ea0SAlexander Duyck /* use queue belonging to FcoE TC */ 40573079ea0SAlexander Duyck fcoe->indices = 1; 40673079ea0SAlexander Duyck fcoe->offset = ixgbe_fcoe_get_tc(adapter); 40773079ea0SAlexander Duyck } else { 40873079ea0SAlexander Duyck adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 40973079ea0SAlexander Duyck 41073079ea0SAlexander Duyck fcoe->indices = 0; 41173079ea0SAlexander Duyck fcoe->offset = 0; 41273079ea0SAlexander Duyck } 41373079ea0SAlexander Duyck } 41473079ea0SAlexander Duyck 41573079ea0SAlexander Duyck #endif /* IXGBE_FCOE */ 41673079ea0SAlexander Duyck /* configure TC to queue mapping */ 41773079ea0SAlexander Duyck for (i = 0; i < tcs; i++) 41873079ea0SAlexander Duyck netdev_set_tc_queue(adapter->netdev, i, 1, i); 41973079ea0SAlexander Duyck 42073079ea0SAlexander Duyck return true; 42173079ea0SAlexander Duyck } 42273079ea0SAlexander Duyck 423d411a936SAlexander Duyck static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) 424d411a936SAlexander Duyck { 425d411a936SAlexander Duyck struct net_device *dev = adapter->netdev; 426d411a936SAlexander Duyck struct ixgbe_ring_feature *f; 427d411a936SAlexander Duyck int rss_i, rss_m, i; 428d411a936SAlexander Duyck int tcs; 429d411a936SAlexander Duyck 430d411a936SAlexander Duyck /* Map queue offset and counts onto allocated tx queues */ 431d411a936SAlexander Duyck tcs = netdev_get_num_tc(dev); 432d411a936SAlexander Duyck 433d411a936SAlexander Duyck /* verify we have DCB queueing enabled before proceeding */ 434d411a936SAlexander Duyck if (tcs <= 1) 435d411a936SAlexander Duyck return false; 436d411a936SAlexander Duyck 437d411a936SAlexander Duyck /* determine the upper limit for our current DCB mode */ 438d411a936SAlexander Duyck rss_i = dev->num_tx_queues / tcs; 439d411a936SAlexander Duyck if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 440d411a936SAlexander Duyck /* 8 TC w/ 4 queues per TC */ 441d411a936SAlexander Duyck rss_i = min_t(u16, rss_i, 4); 442d411a936SAlexander Duyck rss_m = IXGBE_RSS_4Q_MASK; 443d411a936SAlexander Duyck } else if (tcs > 4) { 444d411a936SAlexander Duyck /* 8 TC w/ 8 queues per TC */ 445d411a936SAlexander Duyck rss_i = min_t(u16, rss_i, 8); 446d411a936SAlexander Duyck rss_m = IXGBE_RSS_8Q_MASK; 447d411a936SAlexander Duyck } else { 448d411a936SAlexander Duyck /* 4 TC w/ 16 queues per TC */ 449d411a936SAlexander Duyck rss_i = min_t(u16, rss_i, 16); 450d411a936SAlexander Duyck rss_m = IXGBE_RSS_16Q_MASK; 451d411a936SAlexander Duyck } 452d411a936SAlexander Duyck 453d411a936SAlexander Duyck /* set RSS mask and indices */ 454d411a936SAlexander Duyck f = &adapter->ring_feature[RING_F_RSS]; 455d411a936SAlexander Duyck rss_i = min_t(int, rss_i, f->limit); 456d411a936SAlexander Duyck f->indices = rss_i; 457d411a936SAlexander Duyck f->mask = rss_m; 458d411a936SAlexander Duyck 45939cb681bSAlexander Duyck /* disable ATR as it is not supported when multiple TCs are enabled */ 46039cb681bSAlexander Duyck adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 46139cb681bSAlexander Duyck 462d411a936SAlexander Duyck #ifdef IXGBE_FCOE 463d411a936SAlexander Duyck /* FCoE enabled queues require special configuration indexed 464d411a936SAlexander Duyck * by feature specific indices and offset. Here we map FCoE 465d411a936SAlexander Duyck * indices onto the DCB queue pairs allowing FCoE to own 466d411a936SAlexander Duyck * configuration later. 467d411a936SAlexander Duyck */ 468d411a936SAlexander Duyck if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 469d411a936SAlexander Duyck u8 tc = ixgbe_fcoe_get_tc(adapter); 470d411a936SAlexander Duyck 471d411a936SAlexander Duyck f = &adapter->ring_feature[RING_F_FCOE]; 472d411a936SAlexander Duyck f->indices = min_t(u16, rss_i, f->limit); 473d411a936SAlexander Duyck f->offset = rss_i * tc; 474d411a936SAlexander Duyck } 475d411a936SAlexander Duyck 476d411a936SAlexander Duyck #endif /* IXGBE_FCOE */ 477d411a936SAlexander Duyck for (i = 0; i < tcs; i++) 478d411a936SAlexander Duyck netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 479d411a936SAlexander Duyck 480d411a936SAlexander Duyck adapter->num_tx_queues = rss_i * tcs; 481d411a936SAlexander Duyck adapter->num_rx_queues = rss_i * tcs; 482d411a936SAlexander Duyck 483d411a936SAlexander Duyck return true; 484d411a936SAlexander Duyck } 485d411a936SAlexander Duyck 486d411a936SAlexander Duyck #endif 4878af3c33fSJeff Kirsher /** 48873079ea0SAlexander Duyck * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices 48973079ea0SAlexander Duyck * @adapter: board private structure to initialize 49073079ea0SAlexander Duyck * 49173079ea0SAlexander Duyck * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 49273079ea0SAlexander Duyck * and VM pools where appropriate. If RSS is available, then also try and 49373079ea0SAlexander Duyck * enable RSS and map accordingly. 49473079ea0SAlexander Duyck * 49573079ea0SAlexander Duyck **/ 49673079ea0SAlexander Duyck static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) 49773079ea0SAlexander Duyck { 49873079ea0SAlexander Duyck u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 49973079ea0SAlexander Duyck u16 vmdq_m = 0; 50073079ea0SAlexander Duyck u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; 50173079ea0SAlexander Duyck u16 rss_m = IXGBE_RSS_DISABLED_MASK; 50273079ea0SAlexander Duyck #ifdef IXGBE_FCOE 50373079ea0SAlexander Duyck u16 fcoe_i = 0; 50473079ea0SAlexander Duyck #endif 5052a47fa45SJohn Fastabend bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); 50673079ea0SAlexander Duyck 50773079ea0SAlexander Duyck /* only proceed if SR-IOV is enabled */ 50873079ea0SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 50973079ea0SAlexander Duyck return false; 51073079ea0SAlexander Duyck 51173079ea0SAlexander Duyck /* Add starting offset to total pool count */ 51273079ea0SAlexander Duyck vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 51373079ea0SAlexander Duyck 51473079ea0SAlexander Duyck /* double check we are limited to maximum pools */ 51573079ea0SAlexander Duyck vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 51673079ea0SAlexander Duyck 51773079ea0SAlexander Duyck /* 64 pool mode with 2 queues per pool */ 5182a47fa45SJohn Fastabend if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) { 51973079ea0SAlexander Duyck vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 52073079ea0SAlexander Duyck rss_m = IXGBE_RSS_2Q_MASK; 52173079ea0SAlexander Duyck rss_i = min_t(u16, rss_i, 2); 52273079ea0SAlexander Duyck /* 32 pool mode with 4 queues per pool */ 52373079ea0SAlexander Duyck } else { 52473079ea0SAlexander Duyck vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 52573079ea0SAlexander Duyck rss_m = IXGBE_RSS_4Q_MASK; 52673079ea0SAlexander Duyck rss_i = 4; 52773079ea0SAlexander Duyck } 52873079ea0SAlexander Duyck 52973079ea0SAlexander Duyck #ifdef IXGBE_FCOE 53073079ea0SAlexander Duyck /* queues in the remaining pools are available for FCoE */ 53173079ea0SAlexander Duyck fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); 53273079ea0SAlexander Duyck 53373079ea0SAlexander Duyck #endif 53473079ea0SAlexander Duyck /* remove the starting offset from the pool count */ 53573079ea0SAlexander Duyck vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 53673079ea0SAlexander Duyck 53773079ea0SAlexander Duyck /* save features for later use */ 53873079ea0SAlexander Duyck adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 53973079ea0SAlexander Duyck adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 54073079ea0SAlexander Duyck 54173079ea0SAlexander Duyck /* limit RSS based on user input and save for later use */ 54273079ea0SAlexander Duyck adapter->ring_feature[RING_F_RSS].indices = rss_i; 54373079ea0SAlexander Duyck adapter->ring_feature[RING_F_RSS].mask = rss_m; 54473079ea0SAlexander Duyck 54573079ea0SAlexander Duyck adapter->num_rx_pools = vmdq_i; 54673079ea0SAlexander Duyck adapter->num_rx_queues_per_pool = rss_i; 54773079ea0SAlexander Duyck 54873079ea0SAlexander Duyck adapter->num_rx_queues = vmdq_i * rss_i; 54973079ea0SAlexander Duyck adapter->num_tx_queues = vmdq_i * rss_i; 55073079ea0SAlexander Duyck 55173079ea0SAlexander Duyck /* disable ATR as it is not supported when VMDq is enabled */ 55273079ea0SAlexander Duyck adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 55373079ea0SAlexander Duyck 55473079ea0SAlexander Duyck #ifdef IXGBE_FCOE 55573079ea0SAlexander Duyck /* 55673079ea0SAlexander Duyck * FCoE can use rings from adjacent buffers to allow RSS 55773079ea0SAlexander Duyck * like behavior. To account for this we need to add the 55873079ea0SAlexander Duyck * FCoE indices to the total ring count. 55973079ea0SAlexander Duyck */ 56073079ea0SAlexander Duyck if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 56173079ea0SAlexander Duyck struct ixgbe_ring_feature *fcoe; 56273079ea0SAlexander Duyck 56373079ea0SAlexander Duyck fcoe = &adapter->ring_feature[RING_F_FCOE]; 56473079ea0SAlexander Duyck 56573079ea0SAlexander Duyck /* limit ourselves based on feature limits */ 56673079ea0SAlexander Duyck fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 56773079ea0SAlexander Duyck 56873079ea0SAlexander Duyck if (vmdq_i > 1 && fcoe_i) { 56973079ea0SAlexander Duyck /* alloc queues for FCoE separately */ 57073079ea0SAlexander Duyck fcoe->indices = fcoe_i; 57173079ea0SAlexander Duyck fcoe->offset = vmdq_i * rss_i; 57273079ea0SAlexander Duyck } else { 57373079ea0SAlexander Duyck /* merge FCoE queues with RSS queues */ 57473079ea0SAlexander Duyck fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); 57573079ea0SAlexander Duyck 57673079ea0SAlexander Duyck /* limit indices to rss_i if MSI-X is disabled */ 57773079ea0SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 57873079ea0SAlexander Duyck fcoe_i = rss_i; 57973079ea0SAlexander Duyck 58073079ea0SAlexander Duyck /* attempt to reserve some queues for just FCoE */ 58173079ea0SAlexander Duyck fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); 58273079ea0SAlexander Duyck fcoe->offset = fcoe_i - fcoe->indices; 58373079ea0SAlexander Duyck 58473079ea0SAlexander Duyck fcoe_i -= rss_i; 58573079ea0SAlexander Duyck } 58673079ea0SAlexander Duyck 58773079ea0SAlexander Duyck /* add queues to adapter */ 58873079ea0SAlexander Duyck adapter->num_tx_queues += fcoe_i; 58973079ea0SAlexander Duyck adapter->num_rx_queues += fcoe_i; 59073079ea0SAlexander Duyck } 59173079ea0SAlexander Duyck 59273079ea0SAlexander Duyck #endif 59373079ea0SAlexander Duyck return true; 59473079ea0SAlexander Duyck } 59573079ea0SAlexander Duyck 59673079ea0SAlexander Duyck /** 59749ce9c2cSBen Hutchings * ixgbe_set_rss_queues - Allocate queues for RSS 5988af3c33fSJeff Kirsher * @adapter: board private structure to initialize 5998af3c33fSJeff Kirsher * 6008af3c33fSJeff Kirsher * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 6018af3c33fSJeff Kirsher * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 6028af3c33fSJeff Kirsher * 6038af3c33fSJeff Kirsher **/ 6040b7f5d0bSAlexander Duyck static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) 6058af3c33fSJeff Kirsher { 6060b7f5d0bSAlexander Duyck struct ixgbe_ring_feature *f; 6070b7f5d0bSAlexander Duyck u16 rss_i; 6088af3c33fSJeff Kirsher 6090b7f5d0bSAlexander Duyck /* set mask for 16 queue limit of RSS */ 6100b7f5d0bSAlexander Duyck f = &adapter->ring_feature[RING_F_RSS]; 6110b7f5d0bSAlexander Duyck rss_i = f->limit; 6120b7f5d0bSAlexander Duyck 6130b7f5d0bSAlexander Duyck f->indices = rss_i; 614d411a936SAlexander Duyck f->mask = IXGBE_RSS_16Q_MASK; 6158af3c33fSJeff Kirsher 61639cb681bSAlexander Duyck /* disable ATR by default, it will be configured below */ 61739cb681bSAlexander Duyck adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 61839cb681bSAlexander Duyck 6198af3c33fSJeff Kirsher /* 6200b7f5d0bSAlexander Duyck * Use Flow Director in addition to RSS to ensure the best 6218af3c33fSJeff Kirsher * distribution of flows across cores, even when an FDIR flow 6228af3c33fSJeff Kirsher * isn't matched. 6238af3c33fSJeff Kirsher */ 62439cb681bSAlexander Duyck if (rss_i > 1 && adapter->atr_sample_rate) { 6250b7f5d0bSAlexander Duyck f = &adapter->ring_feature[RING_F_FDIR]; 6260b7f5d0bSAlexander Duyck 627d3cb9869SAlexander Duyck rss_i = f->indices = f->limit; 62839cb681bSAlexander Duyck 62939cb681bSAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 63039cb681bSAlexander Duyck adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 6318af3c33fSJeff Kirsher } 6320b7f5d0bSAlexander Duyck 633d411a936SAlexander Duyck #ifdef IXGBE_FCOE 634d411a936SAlexander Duyck /* 635d411a936SAlexander Duyck * FCoE can exist on the same rings as standard network traffic 636d411a936SAlexander Duyck * however it is preferred to avoid that if possible. In order 637d411a936SAlexander Duyck * to get the best performance we allocate as many FCoE queues 638d411a936SAlexander Duyck * as we can and we place them at the end of the ring array to 639d411a936SAlexander Duyck * avoid sharing queues with standard RSS on systems with 24 or 640d411a936SAlexander Duyck * more CPUs. 641d411a936SAlexander Duyck */ 642d411a936SAlexander Duyck if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 643d411a936SAlexander Duyck struct net_device *dev = adapter->netdev; 644d411a936SAlexander Duyck u16 fcoe_i; 645d411a936SAlexander Duyck 646d411a936SAlexander Duyck f = &adapter->ring_feature[RING_F_FCOE]; 647d411a936SAlexander Duyck 648d411a936SAlexander Duyck /* merge FCoE queues with RSS queues */ 649d411a936SAlexander Duyck fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); 650d411a936SAlexander Duyck fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); 651d411a936SAlexander Duyck 652d411a936SAlexander Duyck /* limit indices to rss_i if MSI-X is disabled */ 653d411a936SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 654d411a936SAlexander Duyck fcoe_i = rss_i; 655d411a936SAlexander Duyck 656d411a936SAlexander Duyck /* attempt to reserve some queues for just FCoE */ 657d411a936SAlexander Duyck f->indices = min_t(u16, fcoe_i, f->limit); 658d411a936SAlexander Duyck f->offset = fcoe_i - f->indices; 659d411a936SAlexander Duyck rss_i = max_t(u16, fcoe_i, rss_i); 660d411a936SAlexander Duyck } 661d411a936SAlexander Duyck 662d411a936SAlexander Duyck #endif /* IXGBE_FCOE */ 6630b7f5d0bSAlexander Duyck adapter->num_rx_queues = rss_i; 6640b7f5d0bSAlexander Duyck adapter->num_tx_queues = rss_i; 6650b7f5d0bSAlexander Duyck 6660b7f5d0bSAlexander Duyck return true; 6678af3c33fSJeff Kirsher } 6688af3c33fSJeff Kirsher 6698af3c33fSJeff Kirsher /** 67049ce9c2cSBen Hutchings * ixgbe_set_num_queues - Allocate queues for device, feature dependent 6718af3c33fSJeff Kirsher * @adapter: board private structure to initialize 6728af3c33fSJeff Kirsher * 6738af3c33fSJeff Kirsher * This is the top level queue allocation routine. The order here is very 6748af3c33fSJeff Kirsher * important, starting with the "most" number of features turned on at once, 6758af3c33fSJeff Kirsher * and ending with the smallest set of features. This way large combinations 6768af3c33fSJeff Kirsher * can be allocated if they're turned on, and smaller combinations are the 6778af3c33fSJeff Kirsher * fallthrough conditions. 6788af3c33fSJeff Kirsher * 6798af3c33fSJeff Kirsher **/ 680ac802f5dSAlexander Duyck static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 6818af3c33fSJeff Kirsher { 6828af3c33fSJeff Kirsher /* Start with base case */ 6838af3c33fSJeff Kirsher adapter->num_rx_queues = 1; 6848af3c33fSJeff Kirsher adapter->num_tx_queues = 1; 6858af3c33fSJeff Kirsher adapter->num_rx_pools = adapter->num_rx_queues; 6868af3c33fSJeff Kirsher adapter->num_rx_queues_per_pool = 1; 6878af3c33fSJeff Kirsher 68873079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB 68973079ea0SAlexander Duyck if (ixgbe_set_dcb_sriov_queues(adapter)) 690ac802f5dSAlexander Duyck return; 6918af3c33fSJeff Kirsher 6928af3c33fSJeff Kirsher if (ixgbe_set_dcb_queues(adapter)) 693ac802f5dSAlexander Duyck return; 6948af3c33fSJeff Kirsher 6958af3c33fSJeff Kirsher #endif 69673079ea0SAlexander Duyck if (ixgbe_set_sriov_queues(adapter)) 69773079ea0SAlexander Duyck return; 69873079ea0SAlexander Duyck 699ac802f5dSAlexander Duyck ixgbe_set_rss_queues(adapter); 7008af3c33fSJeff Kirsher } 7018af3c33fSJeff Kirsher 7023bcf3446SJacob Keller /** 7033bcf3446SJacob Keller * ixgbe_acquire_msix_vectors - acquire MSI-X vectors 7043bcf3446SJacob Keller * @adapter: board private structure 7053bcf3446SJacob Keller * 7063bcf3446SJacob Keller * Attempts to acquire a suitable range of MSI-X vector interrupts. Will 7073bcf3446SJacob Keller * return a negative error code if unable to acquire MSI-X vectors for any 7083bcf3446SJacob Keller * reason. 7093bcf3446SJacob Keller */ 7103bcf3446SJacob Keller static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) 7118af3c33fSJeff Kirsher { 7123bcf3446SJacob Keller struct ixgbe_hw *hw = &adapter->hw; 7133bcf3446SJacob Keller int i, vectors, vector_threshold; 7148af3c33fSJeff Kirsher 7153bcf3446SJacob Keller /* We start by asking for one vector per queue pair */ 7163bcf3446SJacob Keller vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); 7173bcf3446SJacob Keller 7183bcf3446SJacob Keller /* It is easy to be greedy for MSI-X vectors. However, it really 7193bcf3446SJacob Keller * doesn't do much good if we have a lot more vectors than CPUs. We'll 7203bcf3446SJacob Keller * be somewhat conservative and only ask for (roughly) the same number 7213bcf3446SJacob Keller * of vectors as there are CPUs. 7223bcf3446SJacob Keller */ 7233bcf3446SJacob Keller vectors = min_t(int, vectors, num_online_cpus()); 7243bcf3446SJacob Keller 7253bcf3446SJacob Keller /* Some vectors are necessary for non-queue interrupts */ 7263bcf3446SJacob Keller vectors += NON_Q_VECTORS; 7273bcf3446SJacob Keller 7283bcf3446SJacob Keller /* Hardware can only support a maximum of hw.mac->max_msix_vectors. 7293bcf3446SJacob Keller * With features such as RSS and VMDq, we can easily surpass the 7303bcf3446SJacob Keller * number of Rx and Tx descriptor queues supported by our device. 7313bcf3446SJacob Keller * Thus, we cap the maximum in the rare cases where the CPU count also 7323bcf3446SJacob Keller * exceeds our vector limit 7333bcf3446SJacob Keller */ 7343bcf3446SJacob Keller vectors = min_t(int, vectors, hw->mac.max_msix_vectors); 7353bcf3446SJacob Keller 7363bcf3446SJacob Keller /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] 7373bcf3446SJacob Keller * handler, and (2) an Other (Link Status Change, etc.) handler. 7388af3c33fSJeff Kirsher */ 7398af3c33fSJeff Kirsher vector_threshold = MIN_MSIX_COUNT; 7408af3c33fSJeff Kirsher 741027bb561SJacob Keller adapter->msix_entries = kcalloc(vectors, 742027bb561SJacob Keller sizeof(struct msix_entry), 743027bb561SJacob Keller GFP_KERNEL); 744027bb561SJacob Keller if (!adapter->msix_entries) 745027bb561SJacob Keller return -ENOMEM; 746027bb561SJacob Keller 747027bb561SJacob Keller for (i = 0; i < vectors; i++) 748027bb561SJacob Keller adapter->msix_entries[i].entry = i; 749027bb561SJacob Keller 750b45e620cSAlexander Gordeev vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 751b45e620cSAlexander Gordeev vector_threshold, vectors); 7528af3c33fSJeff Kirsher 753b45e620cSAlexander Gordeev if (vectors < 0) { 754493043e5SJacob Keller /* A negative count of allocated vectors indicates an error in 755493043e5SJacob Keller * acquiring within the specified range of MSI-X vectors 7568af3c33fSJeff Kirsher */ 757493043e5SJacob Keller e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", 758493043e5SJacob Keller vectors); 759493043e5SJacob Keller 7608af3c33fSJeff Kirsher adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 7618af3c33fSJeff Kirsher kfree(adapter->msix_entries); 7628af3c33fSJeff Kirsher adapter->msix_entries = NULL; 763d7de3c6eSJacob Keller 764d7de3c6eSJacob Keller return vectors; 765d7de3c6eSJacob Keller } 766d7de3c6eSJacob Keller 767d7de3c6eSJacob Keller /* we successfully allocated some number of vectors within our 768d7de3c6eSJacob Keller * requested range. 769d7de3c6eSJacob Keller */ 770d7de3c6eSJacob Keller adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; 771d7de3c6eSJacob Keller 772d7de3c6eSJacob Keller /* Adjust for only the vectors we'll use, which is minimum 773d7de3c6eSJacob Keller * of max_q_vectors, or the number of vectors we were allocated. 7748af3c33fSJeff Kirsher */ 77549c7ffbeSAlexander Duyck vectors -= NON_Q_VECTORS; 776d7de3c6eSJacob Keller adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); 777d7de3c6eSJacob Keller 778d7de3c6eSJacob Keller return 0; 7798af3c33fSJeff Kirsher } 7808af3c33fSJeff Kirsher 7818af3c33fSJeff Kirsher static void ixgbe_add_ring(struct ixgbe_ring *ring, 7828af3c33fSJeff Kirsher struct ixgbe_ring_container *head) 7838af3c33fSJeff Kirsher { 7848af3c33fSJeff Kirsher ring->next = head->ring; 7858af3c33fSJeff Kirsher head->ring = ring; 7868af3c33fSJeff Kirsher head->count++; 7878af3c33fSJeff Kirsher } 7888af3c33fSJeff Kirsher 7898af3c33fSJeff Kirsher /** 7908af3c33fSJeff Kirsher * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector 7918af3c33fSJeff Kirsher * @adapter: board private structure to initialize 792d0bfcdfdSAlexander Duyck * @v_count: q_vectors allocated on adapter, used for ring interleaving 7938af3c33fSJeff Kirsher * @v_idx: index of vector in adapter struct 794d0bfcdfdSAlexander Duyck * @txr_count: total number of Tx rings to allocate 795d0bfcdfdSAlexander Duyck * @txr_idx: index of first Tx ring to allocate 796d0bfcdfdSAlexander Duyck * @rxr_count: total number of Rx rings to allocate 797d0bfcdfdSAlexander Duyck * @rxr_idx: index of first Rx ring to allocate 7988af3c33fSJeff Kirsher * 7998af3c33fSJeff Kirsher * We allocate one q_vector. If allocation fails we return -ENOMEM. 8008af3c33fSJeff Kirsher **/ 801d0bfcdfdSAlexander Duyck static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, 802d0bfcdfdSAlexander Duyck int v_count, int v_idx, 8038af3c33fSJeff Kirsher int txr_count, int txr_idx, 8048af3c33fSJeff Kirsher int rxr_count, int rxr_idx) 8058af3c33fSJeff Kirsher { 8068af3c33fSJeff Kirsher struct ixgbe_q_vector *q_vector; 8078af3c33fSJeff Kirsher struct ixgbe_ring *ring; 808fd786b7bSAlexander Duyck int node = NUMA_NO_NODE; 8098af3c33fSJeff Kirsher int cpu = -1; 8108af3c33fSJeff Kirsher int ring_count, size; 811fd786b7bSAlexander Duyck u8 tcs = netdev_get_num_tc(adapter->netdev); 8128af3c33fSJeff Kirsher 8138af3c33fSJeff Kirsher ring_count = txr_count + rxr_count; 8148af3c33fSJeff Kirsher size = sizeof(struct ixgbe_q_vector) + 8158af3c33fSJeff Kirsher (sizeof(struct ixgbe_ring) * ring_count); 8168af3c33fSJeff Kirsher 8178af3c33fSJeff Kirsher /* customize cpu for Flow Director mapping */ 818fd786b7bSAlexander Duyck if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { 819fd786b7bSAlexander Duyck u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 820fd786b7bSAlexander Duyck if (rss_i > 1 && adapter->atr_sample_rate) { 8218af3c33fSJeff Kirsher if (cpu_online(v_idx)) { 8228af3c33fSJeff Kirsher cpu = v_idx; 8238af3c33fSJeff Kirsher node = cpu_to_node(cpu); 8248af3c33fSJeff Kirsher } 8258af3c33fSJeff Kirsher } 826fd786b7bSAlexander Duyck } 8278af3c33fSJeff Kirsher 8288af3c33fSJeff Kirsher /* allocate q_vector and rings */ 8298af3c33fSJeff Kirsher q_vector = kzalloc_node(size, GFP_KERNEL, node); 8308af3c33fSJeff Kirsher if (!q_vector) 8318af3c33fSJeff Kirsher q_vector = kzalloc(size, GFP_KERNEL); 8328af3c33fSJeff Kirsher if (!q_vector) 8338af3c33fSJeff Kirsher return -ENOMEM; 8348af3c33fSJeff Kirsher 8358af3c33fSJeff Kirsher /* setup affinity mask and node */ 8368af3c33fSJeff Kirsher if (cpu != -1) 8378af3c33fSJeff Kirsher cpumask_set_cpu(cpu, &q_vector->affinity_mask); 8388af3c33fSJeff Kirsher q_vector->numa_node = node; 8398af3c33fSJeff Kirsher 840245f292dSAlexander Duyck #ifdef CONFIG_IXGBE_DCA 841245f292dSAlexander Duyck /* initialize CPU for DCA */ 842245f292dSAlexander Duyck q_vector->cpu = -1; 843245f292dSAlexander Duyck 844245f292dSAlexander Duyck #endif 8458af3c33fSJeff Kirsher /* initialize NAPI */ 8468af3c33fSJeff Kirsher netif_napi_add(adapter->netdev, &q_vector->napi, 8478af3c33fSJeff Kirsher ixgbe_poll, 64); 8488af3c33fSJeff Kirsher 849adc81090SAlexander Duyck #ifdef CONFIG_NET_RX_BUSY_POLL 850adc81090SAlexander Duyck /* initialize busy poll */ 851adc81090SAlexander Duyck atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); 852adc81090SAlexander Duyck 853adc81090SAlexander Duyck #endif 8548af3c33fSJeff Kirsher /* tie q_vector and adapter together */ 8558af3c33fSJeff Kirsher adapter->q_vector[v_idx] = q_vector; 8568af3c33fSJeff Kirsher q_vector->adapter = adapter; 8578af3c33fSJeff Kirsher q_vector->v_idx = v_idx; 8588af3c33fSJeff Kirsher 8598af3c33fSJeff Kirsher /* initialize work limits */ 8608af3c33fSJeff Kirsher q_vector->tx.work_limit = adapter->tx_work_limit; 8618af3c33fSJeff Kirsher 8628af3c33fSJeff Kirsher /* initialize pointer to rings */ 8638af3c33fSJeff Kirsher ring = q_vector->ring; 8648af3c33fSJeff Kirsher 8653af3361eSEmil Tantilov /* intialize ITR */ 8663af3361eSEmil Tantilov if (txr_count && !rxr_count) { 8673af3361eSEmil Tantilov /* tx only vector */ 8683af3361eSEmil Tantilov if (adapter->tx_itr_setting == 1) 8698ac34f10SAlexander Duyck q_vector->itr = IXGBE_12K_ITR; 8703af3361eSEmil Tantilov else 8713af3361eSEmil Tantilov q_vector->itr = adapter->tx_itr_setting; 8723af3361eSEmil Tantilov } else { 8733af3361eSEmil Tantilov /* rx or rx/tx vector */ 8743af3361eSEmil Tantilov if (adapter->rx_itr_setting == 1) 8753af3361eSEmil Tantilov q_vector->itr = IXGBE_20K_ITR; 8763af3361eSEmil Tantilov else 8773af3361eSEmil Tantilov q_vector->itr = adapter->rx_itr_setting; 8783af3361eSEmil Tantilov } 8793af3361eSEmil Tantilov 8808af3c33fSJeff Kirsher while (txr_count) { 8818af3c33fSJeff Kirsher /* assign generic ring traits */ 8828af3c33fSJeff Kirsher ring->dev = &adapter->pdev->dev; 8838af3c33fSJeff Kirsher ring->netdev = adapter->netdev; 8848af3c33fSJeff Kirsher 8858af3c33fSJeff Kirsher /* configure backlink on ring */ 8868af3c33fSJeff Kirsher ring->q_vector = q_vector; 8878af3c33fSJeff Kirsher 8888af3c33fSJeff Kirsher /* update q_vector Tx values */ 8898af3c33fSJeff Kirsher ixgbe_add_ring(ring, &q_vector->tx); 8908af3c33fSJeff Kirsher 8918af3c33fSJeff Kirsher /* apply Tx specific ring traits */ 8928af3c33fSJeff Kirsher ring->count = adapter->tx_ring_count; 8932a47fa45SJohn Fastabend if (adapter->num_rx_pools > 1) 8942a47fa45SJohn Fastabend ring->queue_index = 8952a47fa45SJohn Fastabend txr_idx % adapter->num_rx_queues_per_pool; 8962a47fa45SJohn Fastabend else 8978af3c33fSJeff Kirsher ring->queue_index = txr_idx; 8988af3c33fSJeff Kirsher 8998af3c33fSJeff Kirsher /* assign ring to adapter */ 9008af3c33fSJeff Kirsher adapter->tx_ring[txr_idx] = ring; 9018af3c33fSJeff Kirsher 9028af3c33fSJeff Kirsher /* update count and index */ 9038af3c33fSJeff Kirsher txr_count--; 904d0bfcdfdSAlexander Duyck txr_idx += v_count; 9058af3c33fSJeff Kirsher 9068af3c33fSJeff Kirsher /* push pointer to next ring */ 9078af3c33fSJeff Kirsher ring++; 9088af3c33fSJeff Kirsher } 9098af3c33fSJeff Kirsher 9108af3c33fSJeff Kirsher while (rxr_count) { 9118af3c33fSJeff Kirsher /* assign generic ring traits */ 9128af3c33fSJeff Kirsher ring->dev = &adapter->pdev->dev; 9138af3c33fSJeff Kirsher ring->netdev = adapter->netdev; 9148af3c33fSJeff Kirsher 9158af3c33fSJeff Kirsher /* configure backlink on ring */ 9168af3c33fSJeff Kirsher ring->q_vector = q_vector; 9178af3c33fSJeff Kirsher 9188af3c33fSJeff Kirsher /* update q_vector Rx values */ 9198af3c33fSJeff Kirsher ixgbe_add_ring(ring, &q_vector->rx); 9208af3c33fSJeff Kirsher 9218af3c33fSJeff Kirsher /* 9228af3c33fSJeff Kirsher * 82599 errata, UDP frames with a 0 checksum 9238af3c33fSJeff Kirsher * can be marked as checksum errors. 9248af3c33fSJeff Kirsher */ 9258af3c33fSJeff Kirsher if (adapter->hw.mac.type == ixgbe_mac_82599EB) 9268af3c33fSJeff Kirsher set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 9278af3c33fSJeff Kirsher 928b2db497eSAlexander Duyck #ifdef IXGBE_FCOE 929b2db497eSAlexander Duyck if (adapter->netdev->features & NETIF_F_FCOE_MTU) { 930b2db497eSAlexander Duyck struct ixgbe_ring_feature *f; 931b2db497eSAlexander Duyck f = &adapter->ring_feature[RING_F_FCOE]; 932e4b317e9SAlexander Duyck if ((rxr_idx >= f->offset) && 933e4b317e9SAlexander Duyck (rxr_idx < f->offset + f->indices)) 93457efd44cSAlexander Duyck set_bit(__IXGBE_RX_FCOE, &ring->state); 935b2db497eSAlexander Duyck } 936b2db497eSAlexander Duyck 937b2db497eSAlexander Duyck #endif /* IXGBE_FCOE */ 9388af3c33fSJeff Kirsher /* apply Rx specific ring traits */ 9398af3c33fSJeff Kirsher ring->count = adapter->rx_ring_count; 9402a47fa45SJohn Fastabend if (adapter->num_rx_pools > 1) 9412a47fa45SJohn Fastabend ring->queue_index = 9422a47fa45SJohn Fastabend rxr_idx % adapter->num_rx_queues_per_pool; 9432a47fa45SJohn Fastabend else 9448af3c33fSJeff Kirsher ring->queue_index = rxr_idx; 9458af3c33fSJeff Kirsher 9468af3c33fSJeff Kirsher /* assign ring to adapter */ 9478af3c33fSJeff Kirsher adapter->rx_ring[rxr_idx] = ring; 9488af3c33fSJeff Kirsher 9498af3c33fSJeff Kirsher /* update count and index */ 9508af3c33fSJeff Kirsher rxr_count--; 951d0bfcdfdSAlexander Duyck rxr_idx += v_count; 9528af3c33fSJeff Kirsher 9538af3c33fSJeff Kirsher /* push pointer to next ring */ 9548af3c33fSJeff Kirsher ring++; 9558af3c33fSJeff Kirsher } 9568af3c33fSJeff Kirsher 9578af3c33fSJeff Kirsher return 0; 9588af3c33fSJeff Kirsher } 9598af3c33fSJeff Kirsher 9608af3c33fSJeff Kirsher /** 9618af3c33fSJeff Kirsher * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector 9628af3c33fSJeff Kirsher * @adapter: board private structure to initialize 9638af3c33fSJeff Kirsher * @v_idx: Index of vector to be freed 9648af3c33fSJeff Kirsher * 9658af3c33fSJeff Kirsher * This function frees the memory allocated to the q_vector. In addition if 9668af3c33fSJeff Kirsher * NAPI is enabled it will delete any references to the NAPI struct prior 9678af3c33fSJeff Kirsher * to freeing the q_vector. 9688af3c33fSJeff Kirsher **/ 9698af3c33fSJeff Kirsher static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) 9708af3c33fSJeff Kirsher { 9718af3c33fSJeff Kirsher struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; 9728af3c33fSJeff Kirsher struct ixgbe_ring *ring; 9738af3c33fSJeff Kirsher 9748af3c33fSJeff Kirsher ixgbe_for_each_ring(ring, q_vector->tx) 9758af3c33fSJeff Kirsher adapter->tx_ring[ring->queue_index] = NULL; 9768af3c33fSJeff Kirsher 9778af3c33fSJeff Kirsher ixgbe_for_each_ring(ring, q_vector->rx) 9788af3c33fSJeff Kirsher adapter->rx_ring[ring->queue_index] = NULL; 9798af3c33fSJeff Kirsher 9808af3c33fSJeff Kirsher adapter->q_vector[v_idx] = NULL; 9815a85e737SEliezer Tamir napi_hash_del(&q_vector->napi); 9828af3c33fSJeff Kirsher netif_napi_del(&q_vector->napi); 9838af3c33fSJeff Kirsher 9848af3c33fSJeff Kirsher /* 9858af3c33fSJeff Kirsher * ixgbe_get_stats64() might access the rings on this vector, 9868af3c33fSJeff Kirsher * we must wait a grace period before freeing it. 9878af3c33fSJeff Kirsher */ 9888af3c33fSJeff Kirsher kfree_rcu(q_vector, rcu); 9898af3c33fSJeff Kirsher } 9908af3c33fSJeff Kirsher 9918af3c33fSJeff Kirsher /** 9928af3c33fSJeff Kirsher * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors 9938af3c33fSJeff Kirsher * @adapter: board private structure to initialize 9948af3c33fSJeff Kirsher * 9958af3c33fSJeff Kirsher * We allocate one q_vector per queue interrupt. If allocation fails we 9968af3c33fSJeff Kirsher * return -ENOMEM. 9978af3c33fSJeff Kirsher **/ 9988af3c33fSJeff Kirsher static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) 9998af3c33fSJeff Kirsher { 100049c7ffbeSAlexander Duyck int q_vectors = adapter->num_q_vectors; 10018af3c33fSJeff Kirsher int rxr_remaining = adapter->num_rx_queues; 10028af3c33fSJeff Kirsher int txr_remaining = adapter->num_tx_queues; 10038af3c33fSJeff Kirsher int rxr_idx = 0, txr_idx = 0, v_idx = 0; 10048af3c33fSJeff Kirsher int err; 10058af3c33fSJeff Kirsher 10068af3c33fSJeff Kirsher /* only one q_vector if MSI-X is disabled. */ 10078af3c33fSJeff Kirsher if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 10088af3c33fSJeff Kirsher q_vectors = 1; 10098af3c33fSJeff Kirsher 10108af3c33fSJeff Kirsher if (q_vectors >= (rxr_remaining + txr_remaining)) { 1011d0bfcdfdSAlexander Duyck for (; rxr_remaining; v_idx++) { 1012d0bfcdfdSAlexander Duyck err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 1013d0bfcdfdSAlexander Duyck 0, 0, 1, rxr_idx); 10148af3c33fSJeff Kirsher 10158af3c33fSJeff Kirsher if (err) 10168af3c33fSJeff Kirsher goto err_out; 10178af3c33fSJeff Kirsher 10188af3c33fSJeff Kirsher /* update counts and index */ 1019d0bfcdfdSAlexander Duyck rxr_remaining--; 1020d0bfcdfdSAlexander Duyck rxr_idx++; 10218af3c33fSJeff Kirsher } 10228af3c33fSJeff Kirsher } 10238af3c33fSJeff Kirsher 1024d0bfcdfdSAlexander Duyck for (; v_idx < q_vectors; v_idx++) { 1025d0bfcdfdSAlexander Duyck int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 1026d0bfcdfdSAlexander Duyck int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 1027d0bfcdfdSAlexander Duyck err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 10288af3c33fSJeff Kirsher tqpv, txr_idx, 10298af3c33fSJeff Kirsher rqpv, rxr_idx); 10308af3c33fSJeff Kirsher 10318af3c33fSJeff Kirsher if (err) 10328af3c33fSJeff Kirsher goto err_out; 10338af3c33fSJeff Kirsher 10348af3c33fSJeff Kirsher /* update counts and index */ 10358af3c33fSJeff Kirsher rxr_remaining -= rqpv; 10368af3c33fSJeff Kirsher txr_remaining -= tqpv; 1037d0bfcdfdSAlexander Duyck rxr_idx++; 1038d0bfcdfdSAlexander Duyck txr_idx++; 10398af3c33fSJeff Kirsher } 10408af3c33fSJeff Kirsher 10418af3c33fSJeff Kirsher return 0; 10428af3c33fSJeff Kirsher 10438af3c33fSJeff Kirsher err_out: 104449c7ffbeSAlexander Duyck adapter->num_tx_queues = 0; 104549c7ffbeSAlexander Duyck adapter->num_rx_queues = 0; 104649c7ffbeSAlexander Duyck adapter->num_q_vectors = 0; 104749c7ffbeSAlexander Duyck 104849c7ffbeSAlexander Duyck while (v_idx--) 10498af3c33fSJeff Kirsher ixgbe_free_q_vector(adapter, v_idx); 10508af3c33fSJeff Kirsher 10518af3c33fSJeff Kirsher return -ENOMEM; 10528af3c33fSJeff Kirsher } 10538af3c33fSJeff Kirsher 10548af3c33fSJeff Kirsher /** 10558af3c33fSJeff Kirsher * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors 10568af3c33fSJeff Kirsher * @adapter: board private structure to initialize 10578af3c33fSJeff Kirsher * 10588af3c33fSJeff Kirsher * This function frees the memory allocated to the q_vectors. In addition if 10598af3c33fSJeff Kirsher * NAPI is enabled it will delete any references to the NAPI struct prior 10608af3c33fSJeff Kirsher * to freeing the q_vector. 10618af3c33fSJeff Kirsher **/ 10628af3c33fSJeff Kirsher static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) 10638af3c33fSJeff Kirsher { 106449c7ffbeSAlexander Duyck int v_idx = adapter->num_q_vectors; 10658af3c33fSJeff Kirsher 106649c7ffbeSAlexander Duyck adapter->num_tx_queues = 0; 106749c7ffbeSAlexander Duyck adapter->num_rx_queues = 0; 106849c7ffbeSAlexander Duyck adapter->num_q_vectors = 0; 10698af3c33fSJeff Kirsher 107049c7ffbeSAlexander Duyck while (v_idx--) 10718af3c33fSJeff Kirsher ixgbe_free_q_vector(adapter, v_idx); 10728af3c33fSJeff Kirsher } 10738af3c33fSJeff Kirsher 10748af3c33fSJeff Kirsher static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 10758af3c33fSJeff Kirsher { 10768af3c33fSJeff Kirsher if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 10778af3c33fSJeff Kirsher adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 10788af3c33fSJeff Kirsher pci_disable_msix(adapter->pdev); 10798af3c33fSJeff Kirsher kfree(adapter->msix_entries); 10808af3c33fSJeff Kirsher adapter->msix_entries = NULL; 10818af3c33fSJeff Kirsher } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 10828af3c33fSJeff Kirsher adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 10838af3c33fSJeff Kirsher pci_disable_msi(adapter->pdev); 10848af3c33fSJeff Kirsher } 10858af3c33fSJeff Kirsher } 10868af3c33fSJeff Kirsher 10878af3c33fSJeff Kirsher /** 10888af3c33fSJeff Kirsher * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported 10898af3c33fSJeff Kirsher * @adapter: board private structure to initialize 10908af3c33fSJeff Kirsher * 10918af3c33fSJeff Kirsher * Attempt to configure the interrupts using the best available 10928af3c33fSJeff Kirsher * capabilities of the hardware and the kernel. 10938af3c33fSJeff Kirsher **/ 1094ac802f5dSAlexander Duyck static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) 10958af3c33fSJeff Kirsher { 10963bcf3446SJacob Keller int err; 10978af3c33fSJeff Kirsher 10983bcf3446SJacob Keller /* We will try to get MSI-X interrupts first */ 10993bcf3446SJacob Keller if (!ixgbe_acquire_msix_vectors(adapter)) 1100ac802f5dSAlexander Duyck return; 11018af3c33fSJeff Kirsher 1102eec66731SJacob Keller /* At this point, we do not have MSI-X capabilities. We need to 1103eec66731SJacob Keller * reconfigure or disable various features which require MSI-X 1104eec66731SJacob Keller * capability. 1105eec66731SJacob Keller */ 1106eec66731SJacob Keller 1107c1c55f63SJacob Keller /* Disable DCB unless we only have a single traffic class */ 1108b724e9f2SAlexander Duyck if (netdev_get_num_tc(adapter->netdev) > 1) { 1109c1c55f63SJacob Keller e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); 1110b724e9f2SAlexander Duyck netdev_reset_tc(adapter->netdev); 111139cb681bSAlexander Duyck 1112b724e9f2SAlexander Duyck if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1113b724e9f2SAlexander Duyck adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 1114b724e9f2SAlexander Duyck 1115b724e9f2SAlexander Duyck adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 1116b724e9f2SAlexander Duyck adapter->temp_dcb_cfg.pfc_mode_enable = false; 1117b724e9f2SAlexander Duyck adapter->dcb_cfg.pfc_mode_enable = false; 1118b724e9f2SAlexander Duyck } 1119d786cf7bSJacob Keller 1120b724e9f2SAlexander Duyck adapter->dcb_cfg.num_tcs.pg_tcs = 1; 1121b724e9f2SAlexander Duyck adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 1122b724e9f2SAlexander Duyck 1123d786cf7bSJacob Keller /* Disable SR-IOV support */ 1124d786cf7bSJacob Keller e_dev_warn("Disabling SR-IOV support\n"); 11258af3c33fSJeff Kirsher ixgbe_disable_sriov(adapter); 11268af3c33fSJeff Kirsher 1127d786cf7bSJacob Keller /* Disable RSS */ 1128d786cf7bSJacob Keller e_dev_warn("Disabling RSS support\n"); 1129fbe7ca7fSAlexander Duyck adapter->ring_feature[RING_F_RSS].limit = 1; 1130b724e9f2SAlexander Duyck 1131eec66731SJacob Keller /* recalculate number of queues now that many features have been 1132eec66731SJacob Keller * changed or disabled. 1133eec66731SJacob Keller */ 1134ac802f5dSAlexander Duyck ixgbe_set_num_queues(adapter); 113549c7ffbeSAlexander Duyck adapter->num_q_vectors = 1; 113649c7ffbeSAlexander Duyck 11378af3c33fSJeff Kirsher err = pci_enable_msi(adapter->pdev); 11385d31b48aSJacob Keller if (err) 11395d31b48aSJacob Keller e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", 11406ec1b71fSJacob Keller err); 11415d31b48aSJacob Keller else 1142ac802f5dSAlexander Duyck adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 11438af3c33fSJeff Kirsher } 11448af3c33fSJeff Kirsher 11458af3c33fSJeff Kirsher /** 11468af3c33fSJeff Kirsher * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme 11478af3c33fSJeff Kirsher * @adapter: board private structure to initialize 11488af3c33fSJeff Kirsher * 11498af3c33fSJeff Kirsher * We determine which interrupt scheme to use based on... 11508af3c33fSJeff Kirsher * - Kernel support (MSI, MSI-X) 11518af3c33fSJeff Kirsher * - which can be user-defined (via MODULE_PARAM) 11528af3c33fSJeff Kirsher * - Hardware queue count (num_*_queues) 11538af3c33fSJeff Kirsher * - defined by miscellaneous hardware support/features (RSS, etc.) 11548af3c33fSJeff Kirsher **/ 11558af3c33fSJeff Kirsher int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 11568af3c33fSJeff Kirsher { 11578af3c33fSJeff Kirsher int err; 11588af3c33fSJeff Kirsher 11598af3c33fSJeff Kirsher /* Number of supported queues */ 1160ac802f5dSAlexander Duyck ixgbe_set_num_queues(adapter); 11618af3c33fSJeff Kirsher 1162ac802f5dSAlexander Duyck /* Set interrupt mode */ 1163ac802f5dSAlexander Duyck ixgbe_set_interrupt_capability(adapter); 11648af3c33fSJeff Kirsher 11658af3c33fSJeff Kirsher err = ixgbe_alloc_q_vectors(adapter); 11668af3c33fSJeff Kirsher if (err) { 11678af3c33fSJeff Kirsher e_dev_err("Unable to allocate memory for queue vectors\n"); 11688af3c33fSJeff Kirsher goto err_alloc_q_vectors; 11698af3c33fSJeff Kirsher } 11708af3c33fSJeff Kirsher 11718af3c33fSJeff Kirsher ixgbe_cache_ring_register(adapter); 11728af3c33fSJeff Kirsher 11738af3c33fSJeff Kirsher e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", 11748af3c33fSJeff Kirsher (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", 11758af3c33fSJeff Kirsher adapter->num_rx_queues, adapter->num_tx_queues); 11768af3c33fSJeff Kirsher 11778af3c33fSJeff Kirsher set_bit(__IXGBE_DOWN, &adapter->state); 11788af3c33fSJeff Kirsher 11798af3c33fSJeff Kirsher return 0; 11808af3c33fSJeff Kirsher 11818af3c33fSJeff Kirsher err_alloc_q_vectors: 11828af3c33fSJeff Kirsher ixgbe_reset_interrupt_capability(adapter); 11838af3c33fSJeff Kirsher return err; 11848af3c33fSJeff Kirsher } 11858af3c33fSJeff Kirsher 11868af3c33fSJeff Kirsher /** 11878af3c33fSJeff Kirsher * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 11888af3c33fSJeff Kirsher * @adapter: board private structure to clear interrupt scheme on 11898af3c33fSJeff Kirsher * 11908af3c33fSJeff Kirsher * We go through and clear interrupt specific resources and reset the structure 11918af3c33fSJeff Kirsher * to pre-load conditions 11928af3c33fSJeff Kirsher **/ 11938af3c33fSJeff Kirsher void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) 11948af3c33fSJeff Kirsher { 11958af3c33fSJeff Kirsher adapter->num_tx_queues = 0; 11968af3c33fSJeff Kirsher adapter->num_rx_queues = 0; 11978af3c33fSJeff Kirsher 11988af3c33fSJeff Kirsher ixgbe_free_q_vectors(adapter); 11998af3c33fSJeff Kirsher ixgbe_reset_interrupt_capability(adapter); 12008af3c33fSJeff Kirsher } 12018af3c33fSJeff Kirsher 12028af3c33fSJeff Kirsher void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, 12038af3c33fSJeff Kirsher u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) 12048af3c33fSJeff Kirsher { 12058af3c33fSJeff Kirsher struct ixgbe_adv_tx_context_desc *context_desc; 12068af3c33fSJeff Kirsher u16 i = tx_ring->next_to_use; 12078af3c33fSJeff Kirsher 12088af3c33fSJeff Kirsher context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); 12098af3c33fSJeff Kirsher 12108af3c33fSJeff Kirsher i++; 12118af3c33fSJeff Kirsher tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 12128af3c33fSJeff Kirsher 12138af3c33fSJeff Kirsher /* set bits to identify this as an advanced context descriptor */ 12148af3c33fSJeff Kirsher type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 12158af3c33fSJeff Kirsher 12168af3c33fSJeff Kirsher context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 12178af3c33fSJeff Kirsher context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); 12188af3c33fSJeff Kirsher context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 12198af3c33fSJeff Kirsher context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 12208af3c33fSJeff Kirsher } 12218af3c33fSJeff Kirsher 1222