18af3c33fSJeff Kirsher /******************************************************************************* 28af3c33fSJeff Kirsher 38af3c33fSJeff Kirsher Intel 10 Gigabit PCI Express Linux driver 449425dfcSMark Rustad Copyright(c) 1999 - 2016 Intel Corporation. 58af3c33fSJeff Kirsher 68af3c33fSJeff Kirsher This program is free software; you can redistribute it and/or modify it 78af3c33fSJeff Kirsher under the terms and conditions of the GNU General Public License, 88af3c33fSJeff Kirsher version 2, as published by the Free Software Foundation. 98af3c33fSJeff Kirsher 108af3c33fSJeff Kirsher This program is distributed in the hope it will be useful, but WITHOUT 118af3c33fSJeff Kirsher ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 128af3c33fSJeff Kirsher FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 138af3c33fSJeff Kirsher more details. 148af3c33fSJeff Kirsher 158af3c33fSJeff Kirsher You should have received a copy of the GNU General Public License along with 168af3c33fSJeff Kirsher this program; if not, write to the Free Software Foundation, Inc., 178af3c33fSJeff Kirsher 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 188af3c33fSJeff Kirsher 198af3c33fSJeff Kirsher The full GNU General Public License is included in this distribution in 208af3c33fSJeff Kirsher the file called "COPYING". 218af3c33fSJeff Kirsher 228af3c33fSJeff Kirsher Contact Information: 23b89aae71SJacob Keller Linux NICS <linux.nics@intel.com> 248af3c33fSJeff Kirsher e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 258af3c33fSJeff Kirsher Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 268af3c33fSJeff Kirsher 278af3c33fSJeff Kirsher *******************************************************************************/ 288af3c33fSJeff Kirsher 298af3c33fSJeff Kirsher #include "ixgbe.h" 308af3c33fSJeff Kirsher #include "ixgbe_sriov.h" 318af3c33fSJeff Kirsher 32800bd607SAlexander Duyck #ifdef CONFIG_IXGBE_DCB 3373079ea0SAlexander Duyck /** 3473079ea0SAlexander Duyck * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV 3573079ea0SAlexander Duyck * @adapter: board private structure to initialize 3673079ea0SAlexander Duyck * 3773079ea0SAlexander Duyck * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It 3873079ea0SAlexander Duyck * will also try to cache the proper offsets if RSS/FCoE are enabled along 3973079ea0SAlexander Duyck * with VMDq. 4073079ea0SAlexander Duyck * 4173079ea0SAlexander Duyck **/ 4273079ea0SAlexander Duyck static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) 4373079ea0SAlexander Duyck { 4473079ea0SAlexander Duyck #ifdef IXGBE_FCOE 4573079ea0SAlexander Duyck struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 4673079ea0SAlexander Duyck #endif /* IXGBE_FCOE */ 4773079ea0SAlexander Duyck struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 4873079ea0SAlexander Duyck int i; 49b5f69ccfSAlexander Duyck u16 reg_idx, pool; 500efbf12bSAlexander Duyck u8 tcs = adapter->hw_tcs; 5173079ea0SAlexander Duyck 5273079ea0SAlexander Duyck /* verify we have DCB queueing enabled before proceeding */ 5373079ea0SAlexander Duyck if (tcs <= 1) 5473079ea0SAlexander Duyck return false; 5573079ea0SAlexander Duyck 5673079ea0SAlexander Duyck /* verify we have VMDq enabled before proceeding */ 5773079ea0SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 5873079ea0SAlexander Duyck return false; 5973079ea0SAlexander Duyck 6073079ea0SAlexander Duyck /* start at VMDq register offset for SR-IOV enabled setups */ 61b5f69ccfSAlexander Duyck pool = 0; 6273079ea0SAlexander Duyck reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 63b5f69ccfSAlexander Duyck for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 6473079ea0SAlexander Duyck /* If we are greater than indices move to next pool */ 65b5f69ccfSAlexander Duyck if ((reg_idx & ~vmdq->mask) >= tcs) { 66b5f69ccfSAlexander Duyck pool++; 6773079ea0SAlexander Duyck reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 68b5f69ccfSAlexander Duyck } 6973079ea0SAlexander Duyck adapter->rx_ring[i]->reg_idx = reg_idx; 70b5f69ccfSAlexander Duyck adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; 7173079ea0SAlexander Duyck } 7273079ea0SAlexander Duyck 7373079ea0SAlexander Duyck reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 7473079ea0SAlexander Duyck for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 7573079ea0SAlexander Duyck /* If we are greater than indices move to next pool */ 7673079ea0SAlexander Duyck if ((reg_idx & ~vmdq->mask) >= tcs) 7773079ea0SAlexander Duyck reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 7873079ea0SAlexander Duyck adapter->tx_ring[i]->reg_idx = reg_idx; 7973079ea0SAlexander Duyck } 8073079ea0SAlexander Duyck 8173079ea0SAlexander Duyck #ifdef IXGBE_FCOE 8273079ea0SAlexander Duyck /* nothing to do if FCoE is disabled */ 8373079ea0SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 8473079ea0SAlexander Duyck return true; 8573079ea0SAlexander Duyck 8673079ea0SAlexander Duyck /* The work is already done if the FCoE ring is shared */ 8773079ea0SAlexander Duyck if (fcoe->offset < tcs) 8873079ea0SAlexander Duyck return true; 8973079ea0SAlexander Duyck 9073079ea0SAlexander Duyck /* The FCoE rings exist separately, we need to move their reg_idx */ 9173079ea0SAlexander Duyck if (fcoe->indices) { 9273079ea0SAlexander Duyck u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 9373079ea0SAlexander Duyck u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); 9473079ea0SAlexander Duyck 9573079ea0SAlexander Duyck reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 9673079ea0SAlexander Duyck for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { 9773079ea0SAlexander Duyck reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 9873079ea0SAlexander Duyck adapter->rx_ring[i]->reg_idx = reg_idx; 99b5f69ccfSAlexander Duyck adapter->rx_ring[i]->netdev = adapter->netdev; 10073079ea0SAlexander Duyck reg_idx++; 10173079ea0SAlexander Duyck } 10273079ea0SAlexander Duyck 10373079ea0SAlexander Duyck reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 10473079ea0SAlexander Duyck for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { 10573079ea0SAlexander Duyck reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 10673079ea0SAlexander Duyck adapter->tx_ring[i]->reg_idx = reg_idx; 10773079ea0SAlexander Duyck reg_idx++; 10873079ea0SAlexander Duyck } 10973079ea0SAlexander Duyck } 11073079ea0SAlexander Duyck 11173079ea0SAlexander Duyck #endif /* IXGBE_FCOE */ 11273079ea0SAlexander Duyck return true; 11373079ea0SAlexander Duyck } 11473079ea0SAlexander Duyck 1158af3c33fSJeff Kirsher /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ 1168af3c33fSJeff Kirsher static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, 1178af3c33fSJeff Kirsher unsigned int *tx, unsigned int *rx) 1188af3c33fSJeff Kirsher { 1198af3c33fSJeff Kirsher struct ixgbe_hw *hw = &adapter->hw; 1200efbf12bSAlexander Duyck u8 num_tcs = adapter->hw_tcs; 1218af3c33fSJeff Kirsher 1228af3c33fSJeff Kirsher *tx = 0; 1238af3c33fSJeff Kirsher *rx = 0; 1248af3c33fSJeff Kirsher 1258af3c33fSJeff Kirsher switch (hw->mac.type) { 1268af3c33fSJeff Kirsher case ixgbe_mac_82598EB: 1274ae63730SAlexander Duyck /* TxQs/TC: 4 RxQs/TC: 8 */ 1284ae63730SAlexander Duyck *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ 1294ae63730SAlexander Duyck *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ 1308af3c33fSJeff Kirsher break; 1318af3c33fSJeff Kirsher case ixgbe_mac_82599EB: 1328af3c33fSJeff Kirsher case ixgbe_mac_X540: 1339a75a1acSDon Skidmore case ixgbe_mac_X550: 1349a75a1acSDon Skidmore case ixgbe_mac_X550EM_x: 13549425dfcSMark Rustad case ixgbe_mac_x550em_a: 1368af3c33fSJeff Kirsher if (num_tcs > 4) { 1374ae63730SAlexander Duyck /* 1384ae63730SAlexander Duyck * TCs : TC0/1 TC2/3 TC4-7 1394ae63730SAlexander Duyck * TxQs/TC: 32 16 8 1404ae63730SAlexander Duyck * RxQs/TC: 16 16 16 1414ae63730SAlexander Duyck */ 1428af3c33fSJeff Kirsher *rx = tc << 4; 1434ae63730SAlexander Duyck if (tc < 3) 1444ae63730SAlexander Duyck *tx = tc << 5; /* 0, 32, 64 */ 1454ae63730SAlexander Duyck else if (tc < 5) 1464ae63730SAlexander Duyck *tx = (tc + 2) << 4; /* 80, 96 */ 1474ae63730SAlexander Duyck else 1484ae63730SAlexander Duyck *tx = (tc + 8) << 3; /* 104, 112, 120 */ 1498af3c33fSJeff Kirsher } else { 1504ae63730SAlexander Duyck /* 1514ae63730SAlexander Duyck * TCs : TC0 TC1 TC2/3 1524ae63730SAlexander Duyck * TxQs/TC: 64 32 16 1534ae63730SAlexander Duyck * RxQs/TC: 32 32 32 1544ae63730SAlexander Duyck */ 1558af3c33fSJeff Kirsher *rx = tc << 5; 1564ae63730SAlexander Duyck if (tc < 2) 1574ae63730SAlexander Duyck *tx = tc << 6; /* 0, 64 */ 1584ae63730SAlexander Duyck else 1594ae63730SAlexander Duyck *tx = (tc + 4) << 4; /* 96, 112 */ 1608af3c33fSJeff Kirsher } 1618af3c33fSJeff Kirsher default: 1628af3c33fSJeff Kirsher break; 1638af3c33fSJeff Kirsher } 1648af3c33fSJeff Kirsher } 1658af3c33fSJeff Kirsher 1668af3c33fSJeff Kirsher /** 1678af3c33fSJeff Kirsher * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 1688af3c33fSJeff Kirsher * @adapter: board private structure to initialize 1698af3c33fSJeff Kirsher * 1708af3c33fSJeff Kirsher * Cache the descriptor ring offsets for DCB to the assigned rings. 1718af3c33fSJeff Kirsher * 1728af3c33fSJeff Kirsher **/ 1734ae63730SAlexander Duyck static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 1748af3c33fSJeff Kirsher { 1750efbf12bSAlexander Duyck u8 num_tcs = adapter->hw_tcs; 1764ae63730SAlexander Duyck unsigned int tx_idx, rx_idx; 1774ae63730SAlexander Duyck int tc, offset, rss_i, i; 1788af3c33fSJeff Kirsher 1794ae63730SAlexander Duyck /* verify we have DCB queueing enabled before proceeding */ 1804ae63730SAlexander Duyck if (num_tcs <= 1) 1818af3c33fSJeff Kirsher return false; 1828af3c33fSJeff Kirsher 1834ae63730SAlexander Duyck rss_i = adapter->ring_feature[RING_F_RSS].indices; 1848af3c33fSJeff Kirsher 1854ae63730SAlexander Duyck for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { 1864ae63730SAlexander Duyck ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); 1874ae63730SAlexander Duyck for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { 1884ae63730SAlexander Duyck adapter->tx_ring[offset + i]->reg_idx = tx_idx; 1894ae63730SAlexander Duyck adapter->rx_ring[offset + i]->reg_idx = rx_idx; 190b5f69ccfSAlexander Duyck adapter->rx_ring[offset + i]->netdev = adapter->netdev; 1914ae63730SAlexander Duyck adapter->tx_ring[offset + i]->dcb_tc = tc; 1924ae63730SAlexander Duyck adapter->rx_ring[offset + i]->dcb_tc = tc; 1938af3c33fSJeff Kirsher } 1948af3c33fSJeff Kirsher } 1958af3c33fSJeff Kirsher 1968af3c33fSJeff Kirsher return true; 1978af3c33fSJeff Kirsher } 198d411a936SAlexander Duyck 1998af3c33fSJeff Kirsher #endif 2008af3c33fSJeff Kirsher /** 2018af3c33fSJeff Kirsher * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov 2028af3c33fSJeff Kirsher * @adapter: board private structure to initialize 2038af3c33fSJeff Kirsher * 2048af3c33fSJeff Kirsher * SR-IOV doesn't use any descriptor rings but changes the default if 2058af3c33fSJeff Kirsher * no other mapping is used. 2068af3c33fSJeff Kirsher * 2078af3c33fSJeff Kirsher */ 20873079ea0SAlexander Duyck static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) 2098af3c33fSJeff Kirsher { 21073079ea0SAlexander Duyck #ifdef IXGBE_FCOE 21173079ea0SAlexander Duyck struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 21273079ea0SAlexander Duyck #endif /* IXGBE_FCOE */ 21373079ea0SAlexander Duyck struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 21473079ea0SAlexander Duyck struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; 215b5f69ccfSAlexander Duyck u16 reg_idx, pool; 21673079ea0SAlexander Duyck int i; 21773079ea0SAlexander Duyck 21873079ea0SAlexander Duyck /* only proceed if VMDq is enabled */ 21973079ea0SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) 2208af3c33fSJeff Kirsher return false; 22173079ea0SAlexander Duyck 22273079ea0SAlexander Duyck /* start at VMDq register offset for SR-IOV enabled setups */ 223b5f69ccfSAlexander Duyck pool = 0; 22473079ea0SAlexander Duyck reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 22573079ea0SAlexander Duyck for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 22673079ea0SAlexander Duyck #ifdef IXGBE_FCOE 22773079ea0SAlexander Duyck /* Allow first FCoE queue to be mapped as RSS */ 22873079ea0SAlexander Duyck if (fcoe->offset && (i > fcoe->offset)) 22973079ea0SAlexander Duyck break; 23073079ea0SAlexander Duyck #endif 23173079ea0SAlexander Duyck /* If we are greater than indices move to next pool */ 232b5f69ccfSAlexander Duyck if ((reg_idx & ~vmdq->mask) >= rss->indices) { 233b5f69ccfSAlexander Duyck pool++; 23473079ea0SAlexander Duyck reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 235b5f69ccfSAlexander Duyck } 23673079ea0SAlexander Duyck adapter->rx_ring[i]->reg_idx = reg_idx; 237b5f69ccfSAlexander Duyck adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; 23873079ea0SAlexander Duyck } 23973079ea0SAlexander Duyck 24073079ea0SAlexander Duyck #ifdef IXGBE_FCOE 24173079ea0SAlexander Duyck /* FCoE uses a linear block of queues so just assigning 1:1 */ 242b5f69ccfSAlexander Duyck for (; i < adapter->num_rx_queues; i++, reg_idx++) { 24373079ea0SAlexander Duyck adapter->rx_ring[i]->reg_idx = reg_idx; 244b5f69ccfSAlexander Duyck adapter->rx_ring[i]->netdev = adapter->netdev; 245b5f69ccfSAlexander Duyck } 24673079ea0SAlexander Duyck 24773079ea0SAlexander Duyck #endif 24873079ea0SAlexander Duyck reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 24973079ea0SAlexander Duyck for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 25073079ea0SAlexander Duyck #ifdef IXGBE_FCOE 25173079ea0SAlexander Duyck /* Allow first FCoE queue to be mapped as RSS */ 25273079ea0SAlexander Duyck if (fcoe->offset && (i > fcoe->offset)) 25373079ea0SAlexander Duyck break; 25473079ea0SAlexander Duyck #endif 25573079ea0SAlexander Duyck /* If we are greater than indices move to next pool */ 25673079ea0SAlexander Duyck if ((reg_idx & rss->mask) >= rss->indices) 25773079ea0SAlexander Duyck reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 25873079ea0SAlexander Duyck adapter->tx_ring[i]->reg_idx = reg_idx; 25973079ea0SAlexander Duyck } 26073079ea0SAlexander Duyck 26173079ea0SAlexander Duyck #ifdef IXGBE_FCOE 26273079ea0SAlexander Duyck /* FCoE uses a linear block of queues so just assigning 1:1 */ 26373079ea0SAlexander Duyck for (; i < adapter->num_tx_queues; i++, reg_idx++) 26473079ea0SAlexander Duyck adapter->tx_ring[i]->reg_idx = reg_idx; 26573079ea0SAlexander Duyck 26673079ea0SAlexander Duyck #endif 26773079ea0SAlexander Duyck 26873079ea0SAlexander Duyck return true; 2698af3c33fSJeff Kirsher } 2708af3c33fSJeff Kirsher 2718af3c33fSJeff Kirsher /** 272d411a936SAlexander Duyck * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS 273d411a936SAlexander Duyck * @adapter: board private structure to initialize 274d411a936SAlexander Duyck * 275d411a936SAlexander Duyck * Cache the descriptor ring offsets for RSS to the assigned rings. 276d411a936SAlexander Duyck * 277d411a936SAlexander Duyck **/ 278d411a936SAlexander Duyck static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 279d411a936SAlexander Duyck { 28033fdc82fSJohn Fastabend int i, reg_idx; 281d411a936SAlexander Duyck 282b5f69ccfSAlexander Duyck for (i = 0; i < adapter->num_rx_queues; i++) { 283d411a936SAlexander Duyck adapter->rx_ring[i]->reg_idx = i; 284b5f69ccfSAlexander Duyck adapter->rx_ring[i]->netdev = adapter->netdev; 285b5f69ccfSAlexander Duyck } 28633fdc82fSJohn Fastabend for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) 28733fdc82fSJohn Fastabend adapter->tx_ring[i]->reg_idx = reg_idx; 28833fdc82fSJohn Fastabend for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++) 28933fdc82fSJohn Fastabend adapter->xdp_ring[i]->reg_idx = reg_idx; 290d411a936SAlexander Duyck 291d411a936SAlexander Duyck return true; 292d411a936SAlexander Duyck } 293d411a936SAlexander Duyck 294d411a936SAlexander Duyck /** 2958af3c33fSJeff Kirsher * ixgbe_cache_ring_register - Descriptor ring to register mapping 2968af3c33fSJeff Kirsher * @adapter: board private structure to initialize 2978af3c33fSJeff Kirsher * 2988af3c33fSJeff Kirsher * Once we know the feature-set enabled for the device, we'll cache 2998af3c33fSJeff Kirsher * the register offset the descriptor ring is assigned to. 3008af3c33fSJeff Kirsher * 3018af3c33fSJeff Kirsher * Note, the order the various feature calls is important. It must start with 3028af3c33fSJeff Kirsher * the "most" features enabled at the same time, then trickle down to the 3038af3c33fSJeff Kirsher * least amount of features turned on at once. 3048af3c33fSJeff Kirsher **/ 3058af3c33fSJeff Kirsher static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 3068af3c33fSJeff Kirsher { 3078af3c33fSJeff Kirsher /* start with default case */ 3088af3c33fSJeff Kirsher adapter->rx_ring[0]->reg_idx = 0; 3098af3c33fSJeff Kirsher adapter->tx_ring[0]->reg_idx = 0; 3108af3c33fSJeff Kirsher 31173079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB 31273079ea0SAlexander Duyck if (ixgbe_cache_ring_dcb_sriov(adapter)) 31373079ea0SAlexander Duyck return; 31473079ea0SAlexander Duyck 31573079ea0SAlexander Duyck if (ixgbe_cache_ring_dcb(adapter)) 31673079ea0SAlexander Duyck return; 31773079ea0SAlexander Duyck 31873079ea0SAlexander Duyck #endif 3198af3c33fSJeff Kirsher if (ixgbe_cache_ring_sriov(adapter)) 3208af3c33fSJeff Kirsher return; 3218af3c33fSJeff Kirsher 322d411a936SAlexander Duyck ixgbe_cache_ring_rss(adapter); 3238af3c33fSJeff Kirsher } 3248af3c33fSJeff Kirsher 32533fdc82fSJohn Fastabend static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) 32633fdc82fSJohn Fastabend { 32733fdc82fSJohn Fastabend return adapter->xdp_prog ? nr_cpu_ids : 0; 32833fdc82fSJohn Fastabend } 32933fdc82fSJohn Fastabend 3302bf1a87bSEmil Tantilov #define IXGBE_RSS_64Q_MASK 0x3F 331d411a936SAlexander Duyck #define IXGBE_RSS_16Q_MASK 0xF 332d411a936SAlexander Duyck #define IXGBE_RSS_8Q_MASK 0x7 333d411a936SAlexander Duyck #define IXGBE_RSS_4Q_MASK 0x3 334d411a936SAlexander Duyck #define IXGBE_RSS_2Q_MASK 0x1 335d411a936SAlexander Duyck #define IXGBE_RSS_DISABLED_MASK 0x0 336d411a936SAlexander Duyck 337d411a936SAlexander Duyck #ifdef CONFIG_IXGBE_DCB 33873079ea0SAlexander Duyck /** 33973079ea0SAlexander Duyck * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB 34073079ea0SAlexander Duyck * @adapter: board private structure to initialize 34173079ea0SAlexander Duyck * 34273079ea0SAlexander Duyck * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 34373079ea0SAlexander Duyck * and VM pools where appropriate. Also assign queues based on DCB 34473079ea0SAlexander Duyck * priorities and map accordingly.. 34573079ea0SAlexander Duyck * 34673079ea0SAlexander Duyck **/ 34773079ea0SAlexander Duyck static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) 34873079ea0SAlexander Duyck { 34973079ea0SAlexander Duyck int i; 35073079ea0SAlexander Duyck u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 35173079ea0SAlexander Duyck u16 vmdq_m = 0; 35273079ea0SAlexander Duyck #ifdef IXGBE_FCOE 35373079ea0SAlexander Duyck u16 fcoe_i = 0; 35473079ea0SAlexander Duyck #endif 3550efbf12bSAlexander Duyck u8 tcs = adapter->hw_tcs; 35673079ea0SAlexander Duyck 35773079ea0SAlexander Duyck /* verify we have DCB queueing enabled before proceeding */ 35873079ea0SAlexander Duyck if (tcs <= 1) 35973079ea0SAlexander Duyck return false; 36073079ea0SAlexander Duyck 36173079ea0SAlexander Duyck /* verify we have VMDq enabled before proceeding */ 36273079ea0SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 36373079ea0SAlexander Duyck return false; 36473079ea0SAlexander Duyck 3654e039c16SAlexander Duyck /* limit VMDq instances on the PF by number of Tx queues */ 3664e039c16SAlexander Duyck vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs); 3674e039c16SAlexander Duyck 36873079ea0SAlexander Duyck /* Add starting offset to total pool count */ 36973079ea0SAlexander Duyck vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 37073079ea0SAlexander Duyck 37173079ea0SAlexander Duyck /* 16 pools w/ 8 TC per pool */ 37273079ea0SAlexander Duyck if (tcs > 4) { 37373079ea0SAlexander Duyck vmdq_i = min_t(u16, vmdq_i, 16); 37473079ea0SAlexander Duyck vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; 37573079ea0SAlexander Duyck /* 32 pools w/ 4 TC per pool */ 37673079ea0SAlexander Duyck } else { 37773079ea0SAlexander Duyck vmdq_i = min_t(u16, vmdq_i, 32); 37873079ea0SAlexander Duyck vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 37973079ea0SAlexander Duyck } 38073079ea0SAlexander Duyck 38173079ea0SAlexander Duyck #ifdef IXGBE_FCOE 38273079ea0SAlexander Duyck /* queues in the remaining pools are available for FCoE */ 38373079ea0SAlexander Duyck fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; 38473079ea0SAlexander Duyck 38573079ea0SAlexander Duyck #endif 38673079ea0SAlexander Duyck /* remove the starting offset from the pool count */ 38773079ea0SAlexander Duyck vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 38873079ea0SAlexander Duyck 38973079ea0SAlexander Duyck /* save features for later use */ 39073079ea0SAlexander Duyck adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 39173079ea0SAlexander Duyck adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 39273079ea0SAlexander Duyck 39373079ea0SAlexander Duyck /* 39473079ea0SAlexander Duyck * We do not support DCB, VMDq, and RSS all simultaneously 39573079ea0SAlexander Duyck * so we will disable RSS since it is the lowest priority 39673079ea0SAlexander Duyck */ 39773079ea0SAlexander Duyck adapter->ring_feature[RING_F_RSS].indices = 1; 39873079ea0SAlexander Duyck adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; 39973079ea0SAlexander Duyck 40039cb681bSAlexander Duyck /* disable ATR as it is not supported when VMDq is enabled */ 40139cb681bSAlexander Duyck adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 40239cb681bSAlexander Duyck 40373079ea0SAlexander Duyck adapter->num_rx_pools = vmdq_i; 40473079ea0SAlexander Duyck adapter->num_rx_queues_per_pool = tcs; 40573079ea0SAlexander Duyck 40673079ea0SAlexander Duyck adapter->num_tx_queues = vmdq_i * tcs; 40733fdc82fSJohn Fastabend adapter->num_xdp_queues = 0; 40873079ea0SAlexander Duyck adapter->num_rx_queues = vmdq_i * tcs; 40973079ea0SAlexander Duyck 41073079ea0SAlexander Duyck #ifdef IXGBE_FCOE 41173079ea0SAlexander Duyck if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 41273079ea0SAlexander Duyck struct ixgbe_ring_feature *fcoe; 41373079ea0SAlexander Duyck 41473079ea0SAlexander Duyck fcoe = &adapter->ring_feature[RING_F_FCOE]; 41573079ea0SAlexander Duyck 41673079ea0SAlexander Duyck /* limit ourselves based on feature limits */ 41773079ea0SAlexander Duyck fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 41873079ea0SAlexander Duyck 41973079ea0SAlexander Duyck if (fcoe_i) { 42073079ea0SAlexander Duyck /* alloc queues for FCoE separately */ 42173079ea0SAlexander Duyck fcoe->indices = fcoe_i; 42273079ea0SAlexander Duyck fcoe->offset = vmdq_i * tcs; 42373079ea0SAlexander Duyck 42473079ea0SAlexander Duyck /* add queues to adapter */ 42573079ea0SAlexander Duyck adapter->num_tx_queues += fcoe_i; 42673079ea0SAlexander Duyck adapter->num_rx_queues += fcoe_i; 42773079ea0SAlexander Duyck } else if (tcs > 1) { 42873079ea0SAlexander Duyck /* use queue belonging to FcoE TC */ 42973079ea0SAlexander Duyck fcoe->indices = 1; 43073079ea0SAlexander Duyck fcoe->offset = ixgbe_fcoe_get_tc(adapter); 43173079ea0SAlexander Duyck } else { 43273079ea0SAlexander Duyck adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 43373079ea0SAlexander Duyck 43473079ea0SAlexander Duyck fcoe->indices = 0; 43573079ea0SAlexander Duyck fcoe->offset = 0; 43673079ea0SAlexander Duyck } 43773079ea0SAlexander Duyck } 43873079ea0SAlexander Duyck 43973079ea0SAlexander Duyck #endif /* IXGBE_FCOE */ 44073079ea0SAlexander Duyck /* configure TC to queue mapping */ 44173079ea0SAlexander Duyck for (i = 0; i < tcs; i++) 44273079ea0SAlexander Duyck netdev_set_tc_queue(adapter->netdev, i, 1, i); 44373079ea0SAlexander Duyck 44473079ea0SAlexander Duyck return true; 44573079ea0SAlexander Duyck } 44673079ea0SAlexander Duyck 447d411a936SAlexander Duyck static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) 448d411a936SAlexander Duyck { 449d411a936SAlexander Duyck struct net_device *dev = adapter->netdev; 450d411a936SAlexander Duyck struct ixgbe_ring_feature *f; 451d411a936SAlexander Duyck int rss_i, rss_m, i; 452d411a936SAlexander Duyck int tcs; 453d411a936SAlexander Duyck 454d411a936SAlexander Duyck /* Map queue offset and counts onto allocated tx queues */ 4550efbf12bSAlexander Duyck tcs = adapter->hw_tcs; 456d411a936SAlexander Duyck 457d411a936SAlexander Duyck /* verify we have DCB queueing enabled before proceeding */ 458d411a936SAlexander Duyck if (tcs <= 1) 459d411a936SAlexander Duyck return false; 460d411a936SAlexander Duyck 461d411a936SAlexander Duyck /* determine the upper limit for our current DCB mode */ 462d411a936SAlexander Duyck rss_i = dev->num_tx_queues / tcs; 463d411a936SAlexander Duyck if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 464d411a936SAlexander Duyck /* 8 TC w/ 4 queues per TC */ 465d411a936SAlexander Duyck rss_i = min_t(u16, rss_i, 4); 466d411a936SAlexander Duyck rss_m = IXGBE_RSS_4Q_MASK; 467d411a936SAlexander Duyck } else if (tcs > 4) { 468d411a936SAlexander Duyck /* 8 TC w/ 8 queues per TC */ 469d411a936SAlexander Duyck rss_i = min_t(u16, rss_i, 8); 470d411a936SAlexander Duyck rss_m = IXGBE_RSS_8Q_MASK; 471d411a936SAlexander Duyck } else { 472d411a936SAlexander Duyck /* 4 TC w/ 16 queues per TC */ 473d411a936SAlexander Duyck rss_i = min_t(u16, rss_i, 16); 474d411a936SAlexander Duyck rss_m = IXGBE_RSS_16Q_MASK; 475d411a936SAlexander Duyck } 476d411a936SAlexander Duyck 477d411a936SAlexander Duyck /* set RSS mask and indices */ 478d411a936SAlexander Duyck f = &adapter->ring_feature[RING_F_RSS]; 479d411a936SAlexander Duyck rss_i = min_t(int, rss_i, f->limit); 480d411a936SAlexander Duyck f->indices = rss_i; 481d411a936SAlexander Duyck f->mask = rss_m; 482d411a936SAlexander Duyck 48339cb681bSAlexander Duyck /* disable ATR as it is not supported when multiple TCs are enabled */ 48439cb681bSAlexander Duyck adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 48539cb681bSAlexander Duyck 486d411a936SAlexander Duyck #ifdef IXGBE_FCOE 487d411a936SAlexander Duyck /* FCoE enabled queues require special configuration indexed 488d411a936SAlexander Duyck * by feature specific indices and offset. Here we map FCoE 489d411a936SAlexander Duyck * indices onto the DCB queue pairs allowing FCoE to own 490d411a936SAlexander Duyck * configuration later. 491d411a936SAlexander Duyck */ 492d411a936SAlexander Duyck if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 493d411a936SAlexander Duyck u8 tc = ixgbe_fcoe_get_tc(adapter); 494d411a936SAlexander Duyck 495d411a936SAlexander Duyck f = &adapter->ring_feature[RING_F_FCOE]; 496d411a936SAlexander Duyck f->indices = min_t(u16, rss_i, f->limit); 497d411a936SAlexander Duyck f->offset = rss_i * tc; 498d411a936SAlexander Duyck } 499d411a936SAlexander Duyck 500d411a936SAlexander Duyck #endif /* IXGBE_FCOE */ 501d411a936SAlexander Duyck for (i = 0; i < tcs; i++) 502d411a936SAlexander Duyck netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 503d411a936SAlexander Duyck 504d411a936SAlexander Duyck adapter->num_tx_queues = rss_i * tcs; 50533fdc82fSJohn Fastabend adapter->num_xdp_queues = 0; 506d411a936SAlexander Duyck adapter->num_rx_queues = rss_i * tcs; 507d411a936SAlexander Duyck 508d411a936SAlexander Duyck return true; 509d411a936SAlexander Duyck } 510d411a936SAlexander Duyck 511d411a936SAlexander Duyck #endif 5128af3c33fSJeff Kirsher /** 51373079ea0SAlexander Duyck * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices 51473079ea0SAlexander Duyck * @adapter: board private structure to initialize 51573079ea0SAlexander Duyck * 51673079ea0SAlexander Duyck * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 51773079ea0SAlexander Duyck * and VM pools where appropriate. If RSS is available, then also try and 51873079ea0SAlexander Duyck * enable RSS and map accordingly. 51973079ea0SAlexander Duyck * 52073079ea0SAlexander Duyck **/ 52173079ea0SAlexander Duyck static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) 52273079ea0SAlexander Duyck { 52373079ea0SAlexander Duyck u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 52473079ea0SAlexander Duyck u16 vmdq_m = 0; 52573079ea0SAlexander Duyck u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; 52673079ea0SAlexander Duyck u16 rss_m = IXGBE_RSS_DISABLED_MASK; 52773079ea0SAlexander Duyck #ifdef IXGBE_FCOE 52873079ea0SAlexander Duyck u16 fcoe_i = 0; 52973079ea0SAlexander Duyck #endif 53073079ea0SAlexander Duyck 53173079ea0SAlexander Duyck /* only proceed if SR-IOV is enabled */ 53273079ea0SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 53373079ea0SAlexander Duyck return false; 53473079ea0SAlexander Duyck 5354e039c16SAlexander Duyck /* limit l2fwd RSS based on total Tx queue limit */ 5364e039c16SAlexander Duyck rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i); 5374e039c16SAlexander Duyck 53873079ea0SAlexander Duyck /* Add starting offset to total pool count */ 53973079ea0SAlexander Duyck vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 54073079ea0SAlexander Duyck 54173079ea0SAlexander Duyck /* double check we are limited to maximum pools */ 54273079ea0SAlexander Duyck vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 54373079ea0SAlexander Duyck 54473079ea0SAlexander Duyck /* 64 pool mode with 2 queues per pool */ 5454e039c16SAlexander Duyck if (vmdq_i > 32) { 54673079ea0SAlexander Duyck vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 54773079ea0SAlexander Duyck rss_m = IXGBE_RSS_2Q_MASK; 54873079ea0SAlexander Duyck rss_i = min_t(u16, rss_i, 2); 549e24fcf28SAlexander Duyck /* 32 pool mode with up to 4 queues per pool */ 55073079ea0SAlexander Duyck } else { 55173079ea0SAlexander Duyck vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 55273079ea0SAlexander Duyck rss_m = IXGBE_RSS_4Q_MASK; 553e24fcf28SAlexander Duyck /* We can support 4, 2, or 1 queues */ 554e24fcf28SAlexander Duyck rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; 55573079ea0SAlexander Duyck } 55673079ea0SAlexander Duyck 55773079ea0SAlexander Duyck #ifdef IXGBE_FCOE 55873079ea0SAlexander Duyck /* queues in the remaining pools are available for FCoE */ 55973079ea0SAlexander Duyck fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); 56073079ea0SAlexander Duyck 56173079ea0SAlexander Duyck #endif 56273079ea0SAlexander Duyck /* remove the starting offset from the pool count */ 56373079ea0SAlexander Duyck vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 56473079ea0SAlexander Duyck 56573079ea0SAlexander Duyck /* save features for later use */ 56673079ea0SAlexander Duyck adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 56773079ea0SAlexander Duyck adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 56873079ea0SAlexander Duyck 56973079ea0SAlexander Duyck /* limit RSS based on user input and save for later use */ 57073079ea0SAlexander Duyck adapter->ring_feature[RING_F_RSS].indices = rss_i; 57173079ea0SAlexander Duyck adapter->ring_feature[RING_F_RSS].mask = rss_m; 57273079ea0SAlexander Duyck 57373079ea0SAlexander Duyck adapter->num_rx_pools = vmdq_i; 57473079ea0SAlexander Duyck adapter->num_rx_queues_per_pool = rss_i; 57573079ea0SAlexander Duyck 57673079ea0SAlexander Duyck adapter->num_rx_queues = vmdq_i * rss_i; 57773079ea0SAlexander Duyck adapter->num_tx_queues = vmdq_i * rss_i; 57833fdc82fSJohn Fastabend adapter->num_xdp_queues = 0; 57973079ea0SAlexander Duyck 58073079ea0SAlexander Duyck /* disable ATR as it is not supported when VMDq is enabled */ 58173079ea0SAlexander Duyck adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 58273079ea0SAlexander Duyck 58373079ea0SAlexander Duyck #ifdef IXGBE_FCOE 58473079ea0SAlexander Duyck /* 58573079ea0SAlexander Duyck * FCoE can use rings from adjacent buffers to allow RSS 58673079ea0SAlexander Duyck * like behavior. To account for this we need to add the 58773079ea0SAlexander Duyck * FCoE indices to the total ring count. 58873079ea0SAlexander Duyck */ 58973079ea0SAlexander Duyck if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 59073079ea0SAlexander Duyck struct ixgbe_ring_feature *fcoe; 59173079ea0SAlexander Duyck 59273079ea0SAlexander Duyck fcoe = &adapter->ring_feature[RING_F_FCOE]; 59373079ea0SAlexander Duyck 59473079ea0SAlexander Duyck /* limit ourselves based on feature limits */ 59573079ea0SAlexander Duyck fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 59673079ea0SAlexander Duyck 59773079ea0SAlexander Duyck if (vmdq_i > 1 && fcoe_i) { 59873079ea0SAlexander Duyck /* alloc queues for FCoE separately */ 59973079ea0SAlexander Duyck fcoe->indices = fcoe_i; 60073079ea0SAlexander Duyck fcoe->offset = vmdq_i * rss_i; 60173079ea0SAlexander Duyck } else { 60273079ea0SAlexander Duyck /* merge FCoE queues with RSS queues */ 60373079ea0SAlexander Duyck fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); 60473079ea0SAlexander Duyck 60573079ea0SAlexander Duyck /* limit indices to rss_i if MSI-X is disabled */ 60673079ea0SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 60773079ea0SAlexander Duyck fcoe_i = rss_i; 60873079ea0SAlexander Duyck 60973079ea0SAlexander Duyck /* attempt to reserve some queues for just FCoE */ 61073079ea0SAlexander Duyck fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); 61173079ea0SAlexander Duyck fcoe->offset = fcoe_i - fcoe->indices; 61273079ea0SAlexander Duyck 61373079ea0SAlexander Duyck fcoe_i -= rss_i; 61473079ea0SAlexander Duyck } 61573079ea0SAlexander Duyck 61673079ea0SAlexander Duyck /* add queues to adapter */ 61773079ea0SAlexander Duyck adapter->num_tx_queues += fcoe_i; 61873079ea0SAlexander Duyck adapter->num_rx_queues += fcoe_i; 61973079ea0SAlexander Duyck } 62073079ea0SAlexander Duyck 62173079ea0SAlexander Duyck #endif 62273079ea0SAlexander Duyck return true; 62373079ea0SAlexander Duyck } 62473079ea0SAlexander Duyck 62573079ea0SAlexander Duyck /** 62649ce9c2cSBen Hutchings * ixgbe_set_rss_queues - Allocate queues for RSS 6278af3c33fSJeff Kirsher * @adapter: board private structure to initialize 6288af3c33fSJeff Kirsher * 6298af3c33fSJeff Kirsher * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 6308af3c33fSJeff Kirsher * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 6318af3c33fSJeff Kirsher * 6328af3c33fSJeff Kirsher **/ 6330b7f5d0bSAlexander Duyck static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) 6348af3c33fSJeff Kirsher { 6352bf1a87bSEmil Tantilov struct ixgbe_hw *hw = &adapter->hw; 6360b7f5d0bSAlexander Duyck struct ixgbe_ring_feature *f; 6370b7f5d0bSAlexander Duyck u16 rss_i; 6388af3c33fSJeff Kirsher 6390b7f5d0bSAlexander Duyck /* set mask for 16 queue limit of RSS */ 6400b7f5d0bSAlexander Duyck f = &adapter->ring_feature[RING_F_RSS]; 6410b7f5d0bSAlexander Duyck rss_i = f->limit; 6420b7f5d0bSAlexander Duyck 6430b7f5d0bSAlexander Duyck f->indices = rss_i; 6442bf1a87bSEmil Tantilov 6452bf1a87bSEmil Tantilov if (hw->mac.type < ixgbe_mac_X550) 646d411a936SAlexander Duyck f->mask = IXGBE_RSS_16Q_MASK; 6472bf1a87bSEmil Tantilov else 6482bf1a87bSEmil Tantilov f->mask = IXGBE_RSS_64Q_MASK; 6498af3c33fSJeff Kirsher 65039cb681bSAlexander Duyck /* disable ATR by default, it will be configured below */ 65139cb681bSAlexander Duyck adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 65239cb681bSAlexander Duyck 6538af3c33fSJeff Kirsher /* 6540b7f5d0bSAlexander Duyck * Use Flow Director in addition to RSS to ensure the best 6558af3c33fSJeff Kirsher * distribution of flows across cores, even when an FDIR flow 6568af3c33fSJeff Kirsher * isn't matched. 6578af3c33fSJeff Kirsher */ 65839cb681bSAlexander Duyck if (rss_i > 1 && adapter->atr_sample_rate) { 6590b7f5d0bSAlexander Duyck f = &adapter->ring_feature[RING_F_FDIR]; 6600b7f5d0bSAlexander Duyck 661d3cb9869SAlexander Duyck rss_i = f->indices = f->limit; 66239cb681bSAlexander Duyck 66339cb681bSAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 66439cb681bSAlexander Duyck adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 6658af3c33fSJeff Kirsher } 6660b7f5d0bSAlexander Duyck 667d411a936SAlexander Duyck #ifdef IXGBE_FCOE 668d411a936SAlexander Duyck /* 669d411a936SAlexander Duyck * FCoE can exist on the same rings as standard network traffic 670d411a936SAlexander Duyck * however it is preferred to avoid that if possible. In order 671d411a936SAlexander Duyck * to get the best performance we allocate as many FCoE queues 672d411a936SAlexander Duyck * as we can and we place them at the end of the ring array to 673d411a936SAlexander Duyck * avoid sharing queues with standard RSS on systems with 24 or 674d411a936SAlexander Duyck * more CPUs. 675d411a936SAlexander Duyck */ 676d411a936SAlexander Duyck if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 677d411a936SAlexander Duyck struct net_device *dev = adapter->netdev; 678d411a936SAlexander Duyck u16 fcoe_i; 679d411a936SAlexander Duyck 680d411a936SAlexander Duyck f = &adapter->ring_feature[RING_F_FCOE]; 681d411a936SAlexander Duyck 682d411a936SAlexander Duyck /* merge FCoE queues with RSS queues */ 683d411a936SAlexander Duyck fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); 684d411a936SAlexander Duyck fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); 685d411a936SAlexander Duyck 686d411a936SAlexander Duyck /* limit indices to rss_i if MSI-X is disabled */ 687d411a936SAlexander Duyck if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 688d411a936SAlexander Duyck fcoe_i = rss_i; 689d411a936SAlexander Duyck 690d411a936SAlexander Duyck /* attempt to reserve some queues for just FCoE */ 691d411a936SAlexander Duyck f->indices = min_t(u16, fcoe_i, f->limit); 692d411a936SAlexander Duyck f->offset = fcoe_i - f->indices; 693d411a936SAlexander Duyck rss_i = max_t(u16, fcoe_i, rss_i); 694d411a936SAlexander Duyck } 695d411a936SAlexander Duyck 696d411a936SAlexander Duyck #endif /* IXGBE_FCOE */ 6970b7f5d0bSAlexander Duyck adapter->num_rx_queues = rss_i; 6980b7f5d0bSAlexander Duyck adapter->num_tx_queues = rss_i; 69933fdc82fSJohn Fastabend adapter->num_xdp_queues = ixgbe_xdp_queues(adapter); 7000b7f5d0bSAlexander Duyck 7010b7f5d0bSAlexander Duyck return true; 7028af3c33fSJeff Kirsher } 7038af3c33fSJeff Kirsher 7048af3c33fSJeff Kirsher /** 70549ce9c2cSBen Hutchings * ixgbe_set_num_queues - Allocate queues for device, feature dependent 7068af3c33fSJeff Kirsher * @adapter: board private structure to initialize 7078af3c33fSJeff Kirsher * 7088af3c33fSJeff Kirsher * This is the top level queue allocation routine. The order here is very 7098af3c33fSJeff Kirsher * important, starting with the "most" number of features turned on at once, 7108af3c33fSJeff Kirsher * and ending with the smallest set of features. This way large combinations 7118af3c33fSJeff Kirsher * can be allocated if they're turned on, and smaller combinations are the 7128af3c33fSJeff Kirsher * fallthrough conditions. 7138af3c33fSJeff Kirsher * 7148af3c33fSJeff Kirsher **/ 715ac802f5dSAlexander Duyck static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 7168af3c33fSJeff Kirsher { 7178af3c33fSJeff Kirsher /* Start with base case */ 7188af3c33fSJeff Kirsher adapter->num_rx_queues = 1; 7198af3c33fSJeff Kirsher adapter->num_tx_queues = 1; 72033fdc82fSJohn Fastabend adapter->num_xdp_queues = 0; 721ff815fb2SAlexander Duyck adapter->num_rx_pools = 1; 7228af3c33fSJeff Kirsher adapter->num_rx_queues_per_pool = 1; 7238af3c33fSJeff Kirsher 72473079ea0SAlexander Duyck #ifdef CONFIG_IXGBE_DCB 72573079ea0SAlexander Duyck if (ixgbe_set_dcb_sriov_queues(adapter)) 726ac802f5dSAlexander Duyck return; 7278af3c33fSJeff Kirsher 7288af3c33fSJeff Kirsher if (ixgbe_set_dcb_queues(adapter)) 729ac802f5dSAlexander Duyck return; 7308af3c33fSJeff Kirsher 7318af3c33fSJeff Kirsher #endif 73273079ea0SAlexander Duyck if (ixgbe_set_sriov_queues(adapter)) 73373079ea0SAlexander Duyck return; 73473079ea0SAlexander Duyck 735ac802f5dSAlexander Duyck ixgbe_set_rss_queues(adapter); 7368af3c33fSJeff Kirsher } 7378af3c33fSJeff Kirsher 7383bcf3446SJacob Keller /** 7393bcf3446SJacob Keller * ixgbe_acquire_msix_vectors - acquire MSI-X vectors 7403bcf3446SJacob Keller * @adapter: board private structure 7413bcf3446SJacob Keller * 7423bcf3446SJacob Keller * Attempts to acquire a suitable range of MSI-X vector interrupts. Will 7433bcf3446SJacob Keller * return a negative error code if unable to acquire MSI-X vectors for any 7443bcf3446SJacob Keller * reason. 7453bcf3446SJacob Keller */ 7463bcf3446SJacob Keller static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) 7478af3c33fSJeff Kirsher { 7483bcf3446SJacob Keller struct ixgbe_hw *hw = &adapter->hw; 7493bcf3446SJacob Keller int i, vectors, vector_threshold; 7508af3c33fSJeff Kirsher 75133fdc82fSJohn Fastabend /* We start by asking for one vector per queue pair with XDP queues 75233fdc82fSJohn Fastabend * being stacked with TX queues. 75333fdc82fSJohn Fastabend */ 7543bcf3446SJacob Keller vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); 75533fdc82fSJohn Fastabend vectors = max(vectors, adapter->num_xdp_queues); 7563bcf3446SJacob Keller 7573bcf3446SJacob Keller /* It is easy to be greedy for MSI-X vectors. However, it really 7583bcf3446SJacob Keller * doesn't do much good if we have a lot more vectors than CPUs. We'll 7593bcf3446SJacob Keller * be somewhat conservative and only ask for (roughly) the same number 7603bcf3446SJacob Keller * of vectors as there are CPUs. 7613bcf3446SJacob Keller */ 7623bcf3446SJacob Keller vectors = min_t(int, vectors, num_online_cpus()); 7633bcf3446SJacob Keller 7643bcf3446SJacob Keller /* Some vectors are necessary for non-queue interrupts */ 7653bcf3446SJacob Keller vectors += NON_Q_VECTORS; 7663bcf3446SJacob Keller 7673bcf3446SJacob Keller /* Hardware can only support a maximum of hw.mac->max_msix_vectors. 7683bcf3446SJacob Keller * With features such as RSS and VMDq, we can easily surpass the 7693bcf3446SJacob Keller * number of Rx and Tx descriptor queues supported by our device. 7703bcf3446SJacob Keller * Thus, we cap the maximum in the rare cases where the CPU count also 7713bcf3446SJacob Keller * exceeds our vector limit 7723bcf3446SJacob Keller */ 7733bcf3446SJacob Keller vectors = min_t(int, vectors, hw->mac.max_msix_vectors); 7743bcf3446SJacob Keller 7753bcf3446SJacob Keller /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] 7763bcf3446SJacob Keller * handler, and (2) an Other (Link Status Change, etc.) handler. 7778af3c33fSJeff Kirsher */ 7788af3c33fSJeff Kirsher vector_threshold = MIN_MSIX_COUNT; 7798af3c33fSJeff Kirsher 780027bb561SJacob Keller adapter->msix_entries = kcalloc(vectors, 781027bb561SJacob Keller sizeof(struct msix_entry), 782027bb561SJacob Keller GFP_KERNEL); 783027bb561SJacob Keller if (!adapter->msix_entries) 784027bb561SJacob Keller return -ENOMEM; 785027bb561SJacob Keller 786027bb561SJacob Keller for (i = 0; i < vectors; i++) 787027bb561SJacob Keller adapter->msix_entries[i].entry = i; 788027bb561SJacob Keller 789b45e620cSAlexander Gordeev vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 790b45e620cSAlexander Gordeev vector_threshold, vectors); 7918af3c33fSJeff Kirsher 792b45e620cSAlexander Gordeev if (vectors < 0) { 793493043e5SJacob Keller /* A negative count of allocated vectors indicates an error in 794493043e5SJacob Keller * acquiring within the specified range of MSI-X vectors 7958af3c33fSJeff Kirsher */ 796493043e5SJacob Keller e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", 797493043e5SJacob Keller vectors); 798493043e5SJacob Keller 7998af3c33fSJeff Kirsher adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 8008af3c33fSJeff Kirsher kfree(adapter->msix_entries); 8018af3c33fSJeff Kirsher adapter->msix_entries = NULL; 802d7de3c6eSJacob Keller 803d7de3c6eSJacob Keller return vectors; 804d7de3c6eSJacob Keller } 805d7de3c6eSJacob Keller 806d7de3c6eSJacob Keller /* we successfully allocated some number of vectors within our 807d7de3c6eSJacob Keller * requested range. 808d7de3c6eSJacob Keller */ 809d7de3c6eSJacob Keller adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; 810d7de3c6eSJacob Keller 811d7de3c6eSJacob Keller /* Adjust for only the vectors we'll use, which is minimum 812d7de3c6eSJacob Keller * of max_q_vectors, or the number of vectors we were allocated. 8138af3c33fSJeff Kirsher */ 81449c7ffbeSAlexander Duyck vectors -= NON_Q_VECTORS; 815d7de3c6eSJacob Keller adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); 816d7de3c6eSJacob Keller 817d7de3c6eSJacob Keller return 0; 8188af3c33fSJeff Kirsher } 8198af3c33fSJeff Kirsher 8208af3c33fSJeff Kirsher static void ixgbe_add_ring(struct ixgbe_ring *ring, 8218af3c33fSJeff Kirsher struct ixgbe_ring_container *head) 8228af3c33fSJeff Kirsher { 8238af3c33fSJeff Kirsher ring->next = head->ring; 8248af3c33fSJeff Kirsher head->ring = ring; 8258af3c33fSJeff Kirsher head->count++; 826b4ded832SAlexander Duyck head->next_update = jiffies + 1; 8278af3c33fSJeff Kirsher } 8288af3c33fSJeff Kirsher 8298af3c33fSJeff Kirsher /** 8308af3c33fSJeff Kirsher * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector 8318af3c33fSJeff Kirsher * @adapter: board private structure to initialize 832d0bfcdfdSAlexander Duyck * @v_count: q_vectors allocated on adapter, used for ring interleaving 8338af3c33fSJeff Kirsher * @v_idx: index of vector in adapter struct 834d0bfcdfdSAlexander Duyck * @txr_count: total number of Tx rings to allocate 835d0bfcdfdSAlexander Duyck * @txr_idx: index of first Tx ring to allocate 83633fdc82fSJohn Fastabend * @xdp_count: total number of XDP rings to allocate 83733fdc82fSJohn Fastabend * @xdp_idx: index of first XDP ring to allocate 838d0bfcdfdSAlexander Duyck * @rxr_count: total number of Rx rings to allocate 839d0bfcdfdSAlexander Duyck * @rxr_idx: index of first Rx ring to allocate 8408af3c33fSJeff Kirsher * 8418af3c33fSJeff Kirsher * We allocate one q_vector. If allocation fails we return -ENOMEM. 8428af3c33fSJeff Kirsher **/ 843d0bfcdfdSAlexander Duyck static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, 844d0bfcdfdSAlexander Duyck int v_count, int v_idx, 8458af3c33fSJeff Kirsher int txr_count, int txr_idx, 84633fdc82fSJohn Fastabend int xdp_count, int xdp_idx, 8478af3c33fSJeff Kirsher int rxr_count, int rxr_idx) 8488af3c33fSJeff Kirsher { 8498af3c33fSJeff Kirsher struct ixgbe_q_vector *q_vector; 8508af3c33fSJeff Kirsher struct ixgbe_ring *ring; 851fd786b7bSAlexander Duyck int node = NUMA_NO_NODE; 8528af3c33fSJeff Kirsher int cpu = -1; 8538af3c33fSJeff Kirsher int ring_count, size; 8540efbf12bSAlexander Duyck u8 tcs = adapter->hw_tcs; 8558af3c33fSJeff Kirsher 85633fdc82fSJohn Fastabend ring_count = txr_count + rxr_count + xdp_count; 8578af3c33fSJeff Kirsher size = sizeof(struct ixgbe_q_vector) + 8588af3c33fSJeff Kirsher (sizeof(struct ixgbe_ring) * ring_count); 8598af3c33fSJeff Kirsher 8608af3c33fSJeff Kirsher /* customize cpu for Flow Director mapping */ 861fd786b7bSAlexander Duyck if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { 862fd786b7bSAlexander Duyck u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 863fd786b7bSAlexander Duyck if (rss_i > 1 && adapter->atr_sample_rate) { 8648af3c33fSJeff Kirsher if (cpu_online(v_idx)) { 8658af3c33fSJeff Kirsher cpu = v_idx; 8668af3c33fSJeff Kirsher node = cpu_to_node(cpu); 8678af3c33fSJeff Kirsher } 8688af3c33fSJeff Kirsher } 869fd786b7bSAlexander Duyck } 8708af3c33fSJeff Kirsher 8718af3c33fSJeff Kirsher /* allocate q_vector and rings */ 8728af3c33fSJeff Kirsher q_vector = kzalloc_node(size, GFP_KERNEL, node); 8738af3c33fSJeff Kirsher if (!q_vector) 8748af3c33fSJeff Kirsher q_vector = kzalloc(size, GFP_KERNEL); 8758af3c33fSJeff Kirsher if (!q_vector) 8768af3c33fSJeff Kirsher return -ENOMEM; 8778af3c33fSJeff Kirsher 8788af3c33fSJeff Kirsher /* setup affinity mask and node */ 8798af3c33fSJeff Kirsher if (cpu != -1) 8808af3c33fSJeff Kirsher cpumask_set_cpu(cpu, &q_vector->affinity_mask); 8818af3c33fSJeff Kirsher q_vector->numa_node = node; 8828af3c33fSJeff Kirsher 883245f292dSAlexander Duyck #ifdef CONFIG_IXGBE_DCA 884245f292dSAlexander Duyck /* initialize CPU for DCA */ 885245f292dSAlexander Duyck q_vector->cpu = -1; 886245f292dSAlexander Duyck 887245f292dSAlexander Duyck #endif 8888af3c33fSJeff Kirsher /* initialize NAPI */ 8898af3c33fSJeff Kirsher netif_napi_add(adapter->netdev, &q_vector->napi, 8908af3c33fSJeff Kirsher ixgbe_poll, 64); 8918af3c33fSJeff Kirsher 8928af3c33fSJeff Kirsher /* tie q_vector and adapter together */ 8938af3c33fSJeff Kirsher adapter->q_vector[v_idx] = q_vector; 8948af3c33fSJeff Kirsher q_vector->adapter = adapter; 8958af3c33fSJeff Kirsher q_vector->v_idx = v_idx; 8968af3c33fSJeff Kirsher 8978af3c33fSJeff Kirsher /* initialize work limits */ 8988af3c33fSJeff Kirsher q_vector->tx.work_limit = adapter->tx_work_limit; 8998af3c33fSJeff Kirsher 900b4ded832SAlexander Duyck /* Initialize setting for adaptive ITR */ 901b4ded832SAlexander Duyck q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | 902b4ded832SAlexander Duyck IXGBE_ITR_ADAPTIVE_LATENCY; 903b4ded832SAlexander Duyck q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | 904b4ded832SAlexander Duyck IXGBE_ITR_ADAPTIVE_LATENCY; 9058af3c33fSJeff Kirsher 9063af3361eSEmil Tantilov /* intialize ITR */ 9073af3361eSEmil Tantilov if (txr_count && !rxr_count) { 9083af3361eSEmil Tantilov /* tx only vector */ 9093af3361eSEmil Tantilov if (adapter->tx_itr_setting == 1) 9108ac34f10SAlexander Duyck q_vector->itr = IXGBE_12K_ITR; 9113af3361eSEmil Tantilov else 9123af3361eSEmil Tantilov q_vector->itr = adapter->tx_itr_setting; 9133af3361eSEmil Tantilov } else { 9143af3361eSEmil Tantilov /* rx or rx/tx vector */ 9153af3361eSEmil Tantilov if (adapter->rx_itr_setting == 1) 9163af3361eSEmil Tantilov q_vector->itr = IXGBE_20K_ITR; 9173af3361eSEmil Tantilov else 9183af3361eSEmil Tantilov q_vector->itr = adapter->rx_itr_setting; 9193af3361eSEmil Tantilov } 9203af3361eSEmil Tantilov 921b4ded832SAlexander Duyck /* initialize pointer to rings */ 922b4ded832SAlexander Duyck ring = q_vector->ring; 923b4ded832SAlexander Duyck 9248af3c33fSJeff Kirsher while (txr_count) { 9258af3c33fSJeff Kirsher /* assign generic ring traits */ 9268af3c33fSJeff Kirsher ring->dev = &adapter->pdev->dev; 9278af3c33fSJeff Kirsher ring->netdev = adapter->netdev; 9288af3c33fSJeff Kirsher 9298af3c33fSJeff Kirsher /* configure backlink on ring */ 9308af3c33fSJeff Kirsher ring->q_vector = q_vector; 9318af3c33fSJeff Kirsher 9328af3c33fSJeff Kirsher /* update q_vector Tx values */ 9338af3c33fSJeff Kirsher ixgbe_add_ring(ring, &q_vector->tx); 9348af3c33fSJeff Kirsher 9358af3c33fSJeff Kirsher /* apply Tx specific ring traits */ 9368af3c33fSJeff Kirsher ring->count = adapter->tx_ring_count; 9378af3c33fSJeff Kirsher ring->queue_index = txr_idx; 9388af3c33fSJeff Kirsher 9398af3c33fSJeff Kirsher /* assign ring to adapter */ 9408af3c33fSJeff Kirsher adapter->tx_ring[txr_idx] = ring; 9418af3c33fSJeff Kirsher 9428af3c33fSJeff Kirsher /* update count and index */ 9438af3c33fSJeff Kirsher txr_count--; 944d0bfcdfdSAlexander Duyck txr_idx += v_count; 9458af3c33fSJeff Kirsher 9468af3c33fSJeff Kirsher /* push pointer to next ring */ 9478af3c33fSJeff Kirsher ring++; 9488af3c33fSJeff Kirsher } 9498af3c33fSJeff Kirsher 95033fdc82fSJohn Fastabend while (xdp_count) { 95133fdc82fSJohn Fastabend /* assign generic ring traits */ 95233fdc82fSJohn Fastabend ring->dev = &adapter->pdev->dev; 95333fdc82fSJohn Fastabend ring->netdev = adapter->netdev; 95433fdc82fSJohn Fastabend 95533fdc82fSJohn Fastabend /* configure backlink on ring */ 95633fdc82fSJohn Fastabend ring->q_vector = q_vector; 95733fdc82fSJohn Fastabend 95833fdc82fSJohn Fastabend /* update q_vector Tx values */ 95933fdc82fSJohn Fastabend ixgbe_add_ring(ring, &q_vector->tx); 96033fdc82fSJohn Fastabend 96133fdc82fSJohn Fastabend /* apply Tx specific ring traits */ 96233fdc82fSJohn Fastabend ring->count = adapter->tx_ring_count; 96333fdc82fSJohn Fastabend ring->queue_index = xdp_idx; 96433fdc82fSJohn Fastabend set_ring_xdp(ring); 96533fdc82fSJohn Fastabend 96633fdc82fSJohn Fastabend /* assign ring to adapter */ 96733fdc82fSJohn Fastabend adapter->xdp_ring[xdp_idx] = ring; 96833fdc82fSJohn Fastabend 96933fdc82fSJohn Fastabend /* update count and index */ 97033fdc82fSJohn Fastabend xdp_count--; 97133fdc82fSJohn Fastabend xdp_idx++; 97233fdc82fSJohn Fastabend 97333fdc82fSJohn Fastabend /* push pointer to next ring */ 97433fdc82fSJohn Fastabend ring++; 97533fdc82fSJohn Fastabend } 97633fdc82fSJohn Fastabend 9778af3c33fSJeff Kirsher while (rxr_count) { 9788af3c33fSJeff Kirsher /* assign generic ring traits */ 9798af3c33fSJeff Kirsher ring->dev = &adapter->pdev->dev; 9808af3c33fSJeff Kirsher ring->netdev = adapter->netdev; 9818af3c33fSJeff Kirsher 9828af3c33fSJeff Kirsher /* configure backlink on ring */ 9838af3c33fSJeff Kirsher ring->q_vector = q_vector; 9848af3c33fSJeff Kirsher 9858af3c33fSJeff Kirsher /* update q_vector Rx values */ 9868af3c33fSJeff Kirsher ixgbe_add_ring(ring, &q_vector->rx); 9878af3c33fSJeff Kirsher 9888af3c33fSJeff Kirsher /* 9898af3c33fSJeff Kirsher * 82599 errata, UDP frames with a 0 checksum 9908af3c33fSJeff Kirsher * can be marked as checksum errors. 9918af3c33fSJeff Kirsher */ 9928af3c33fSJeff Kirsher if (adapter->hw.mac.type == ixgbe_mac_82599EB) 9938af3c33fSJeff Kirsher set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 9948af3c33fSJeff Kirsher 995b2db497eSAlexander Duyck #ifdef IXGBE_FCOE 996b2db497eSAlexander Duyck if (adapter->netdev->features & NETIF_F_FCOE_MTU) { 997b2db497eSAlexander Duyck struct ixgbe_ring_feature *f; 998b2db497eSAlexander Duyck f = &adapter->ring_feature[RING_F_FCOE]; 999e4b317e9SAlexander Duyck if ((rxr_idx >= f->offset) && 1000e4b317e9SAlexander Duyck (rxr_idx < f->offset + f->indices)) 100157efd44cSAlexander Duyck set_bit(__IXGBE_RX_FCOE, &ring->state); 1002b2db497eSAlexander Duyck } 1003b2db497eSAlexander Duyck 1004b2db497eSAlexander Duyck #endif /* IXGBE_FCOE */ 10058af3c33fSJeff Kirsher /* apply Rx specific ring traits */ 10068af3c33fSJeff Kirsher ring->count = adapter->rx_ring_count; 10078af3c33fSJeff Kirsher ring->queue_index = rxr_idx; 10088af3c33fSJeff Kirsher 10098af3c33fSJeff Kirsher /* assign ring to adapter */ 10108af3c33fSJeff Kirsher adapter->rx_ring[rxr_idx] = ring; 10118af3c33fSJeff Kirsher 10128af3c33fSJeff Kirsher /* update count and index */ 10138af3c33fSJeff Kirsher rxr_count--; 1014d0bfcdfdSAlexander Duyck rxr_idx += v_count; 10158af3c33fSJeff Kirsher 10168af3c33fSJeff Kirsher /* push pointer to next ring */ 10178af3c33fSJeff Kirsher ring++; 10188af3c33fSJeff Kirsher } 10198af3c33fSJeff Kirsher 10208af3c33fSJeff Kirsher return 0; 10218af3c33fSJeff Kirsher } 10228af3c33fSJeff Kirsher 10238af3c33fSJeff Kirsher /** 10248af3c33fSJeff Kirsher * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector 10258af3c33fSJeff Kirsher * @adapter: board private structure to initialize 10268af3c33fSJeff Kirsher * @v_idx: Index of vector to be freed 10278af3c33fSJeff Kirsher * 10288af3c33fSJeff Kirsher * This function frees the memory allocated to the q_vector. In addition if 10298af3c33fSJeff Kirsher * NAPI is enabled it will delete any references to the NAPI struct prior 10308af3c33fSJeff Kirsher * to freeing the q_vector. 10318af3c33fSJeff Kirsher **/ 10328af3c33fSJeff Kirsher static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) 10338af3c33fSJeff Kirsher { 10348af3c33fSJeff Kirsher struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; 10358af3c33fSJeff Kirsher struct ixgbe_ring *ring; 10368af3c33fSJeff Kirsher 103790382dcaSJohn Fastabend ixgbe_for_each_ring(ring, q_vector->tx) { 103890382dcaSJohn Fastabend if (ring_is_xdp(ring)) 103990382dcaSJohn Fastabend adapter->xdp_ring[ring->queue_index] = NULL; 104090382dcaSJohn Fastabend else 10418af3c33fSJeff Kirsher adapter->tx_ring[ring->queue_index] = NULL; 104290382dcaSJohn Fastabend } 10438af3c33fSJeff Kirsher 10448af3c33fSJeff Kirsher ixgbe_for_each_ring(ring, q_vector->rx) 10458af3c33fSJeff Kirsher adapter->rx_ring[ring->queue_index] = NULL; 10468af3c33fSJeff Kirsher 10478af3c33fSJeff Kirsher adapter->q_vector[v_idx] = NULL; 10485a85e737SEliezer Tamir napi_hash_del(&q_vector->napi); 10498af3c33fSJeff Kirsher netif_napi_del(&q_vector->napi); 10508af3c33fSJeff Kirsher 10518af3c33fSJeff Kirsher /* 10528af3c33fSJeff Kirsher * ixgbe_get_stats64() might access the rings on this vector, 10538af3c33fSJeff Kirsher * we must wait a grace period before freeing it. 10548af3c33fSJeff Kirsher */ 10558af3c33fSJeff Kirsher kfree_rcu(q_vector, rcu); 10568af3c33fSJeff Kirsher } 10578af3c33fSJeff Kirsher 10588af3c33fSJeff Kirsher /** 10598af3c33fSJeff Kirsher * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors 10608af3c33fSJeff Kirsher * @adapter: board private structure to initialize 10618af3c33fSJeff Kirsher * 10628af3c33fSJeff Kirsher * We allocate one q_vector per queue interrupt. If allocation fails we 10638af3c33fSJeff Kirsher * return -ENOMEM. 10648af3c33fSJeff Kirsher **/ 10658af3c33fSJeff Kirsher static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) 10668af3c33fSJeff Kirsher { 106749c7ffbeSAlexander Duyck int q_vectors = adapter->num_q_vectors; 10688af3c33fSJeff Kirsher int rxr_remaining = adapter->num_rx_queues; 10698af3c33fSJeff Kirsher int txr_remaining = adapter->num_tx_queues; 107033fdc82fSJohn Fastabend int xdp_remaining = adapter->num_xdp_queues; 107133fdc82fSJohn Fastabend int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; 10728af3c33fSJeff Kirsher int err; 10738af3c33fSJeff Kirsher 10748af3c33fSJeff Kirsher /* only one q_vector if MSI-X is disabled. */ 10758af3c33fSJeff Kirsher if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 10768af3c33fSJeff Kirsher q_vectors = 1; 10778af3c33fSJeff Kirsher 107833fdc82fSJohn Fastabend if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { 1079d0bfcdfdSAlexander Duyck for (; rxr_remaining; v_idx++) { 1080d0bfcdfdSAlexander Duyck err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 108133fdc82fSJohn Fastabend 0, 0, 0, 0, 1, rxr_idx); 10828af3c33fSJeff Kirsher 10838af3c33fSJeff Kirsher if (err) 10848af3c33fSJeff Kirsher goto err_out; 10858af3c33fSJeff Kirsher 10868af3c33fSJeff Kirsher /* update counts and index */ 1087d0bfcdfdSAlexander Duyck rxr_remaining--; 1088d0bfcdfdSAlexander Duyck rxr_idx++; 10898af3c33fSJeff Kirsher } 10908af3c33fSJeff Kirsher } 10918af3c33fSJeff Kirsher 1092d0bfcdfdSAlexander Duyck for (; v_idx < q_vectors; v_idx++) { 1093d0bfcdfdSAlexander Duyck int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 1094d0bfcdfdSAlexander Duyck int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 109533fdc82fSJohn Fastabend int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx); 109633fdc82fSJohn Fastabend 1097d0bfcdfdSAlexander Duyck err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 10988af3c33fSJeff Kirsher tqpv, txr_idx, 109933fdc82fSJohn Fastabend xqpv, xdp_idx, 11008af3c33fSJeff Kirsher rqpv, rxr_idx); 11018af3c33fSJeff Kirsher 11028af3c33fSJeff Kirsher if (err) 11038af3c33fSJeff Kirsher goto err_out; 11048af3c33fSJeff Kirsher 11058af3c33fSJeff Kirsher /* update counts and index */ 11068af3c33fSJeff Kirsher rxr_remaining -= rqpv; 11078af3c33fSJeff Kirsher txr_remaining -= tqpv; 110833fdc82fSJohn Fastabend xdp_remaining -= xqpv; 1109d0bfcdfdSAlexander Duyck rxr_idx++; 1110d0bfcdfdSAlexander Duyck txr_idx++; 111133fdc82fSJohn Fastabend xdp_idx += xqpv; 11128af3c33fSJeff Kirsher } 11138af3c33fSJeff Kirsher 11148af3c33fSJeff Kirsher return 0; 11158af3c33fSJeff Kirsher 11168af3c33fSJeff Kirsher err_out: 111749c7ffbeSAlexander Duyck adapter->num_tx_queues = 0; 111833fdc82fSJohn Fastabend adapter->num_xdp_queues = 0; 111949c7ffbeSAlexander Duyck adapter->num_rx_queues = 0; 112049c7ffbeSAlexander Duyck adapter->num_q_vectors = 0; 112149c7ffbeSAlexander Duyck 112249c7ffbeSAlexander Duyck while (v_idx--) 11238af3c33fSJeff Kirsher ixgbe_free_q_vector(adapter, v_idx); 11248af3c33fSJeff Kirsher 11258af3c33fSJeff Kirsher return -ENOMEM; 11268af3c33fSJeff Kirsher } 11278af3c33fSJeff Kirsher 11288af3c33fSJeff Kirsher /** 11298af3c33fSJeff Kirsher * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors 11308af3c33fSJeff Kirsher * @adapter: board private structure to initialize 11318af3c33fSJeff Kirsher * 11328af3c33fSJeff Kirsher * This function frees the memory allocated to the q_vectors. In addition if 11338af3c33fSJeff Kirsher * NAPI is enabled it will delete any references to the NAPI struct prior 11348af3c33fSJeff Kirsher * to freeing the q_vector. 11358af3c33fSJeff Kirsher **/ 11368af3c33fSJeff Kirsher static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) 11378af3c33fSJeff Kirsher { 113849c7ffbeSAlexander Duyck int v_idx = adapter->num_q_vectors; 11398af3c33fSJeff Kirsher 114049c7ffbeSAlexander Duyck adapter->num_tx_queues = 0; 114133fdc82fSJohn Fastabend adapter->num_xdp_queues = 0; 114249c7ffbeSAlexander Duyck adapter->num_rx_queues = 0; 114349c7ffbeSAlexander Duyck adapter->num_q_vectors = 0; 11448af3c33fSJeff Kirsher 114549c7ffbeSAlexander Duyck while (v_idx--) 11468af3c33fSJeff Kirsher ixgbe_free_q_vector(adapter, v_idx); 11478af3c33fSJeff Kirsher } 11488af3c33fSJeff Kirsher 11498af3c33fSJeff Kirsher static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 11508af3c33fSJeff Kirsher { 11518af3c33fSJeff Kirsher if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 11528af3c33fSJeff Kirsher adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 11538af3c33fSJeff Kirsher pci_disable_msix(adapter->pdev); 11548af3c33fSJeff Kirsher kfree(adapter->msix_entries); 11558af3c33fSJeff Kirsher adapter->msix_entries = NULL; 11568af3c33fSJeff Kirsher } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 11578af3c33fSJeff Kirsher adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 11588af3c33fSJeff Kirsher pci_disable_msi(adapter->pdev); 11598af3c33fSJeff Kirsher } 11608af3c33fSJeff Kirsher } 11618af3c33fSJeff Kirsher 11628af3c33fSJeff Kirsher /** 11638af3c33fSJeff Kirsher * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported 11648af3c33fSJeff Kirsher * @adapter: board private structure to initialize 11658af3c33fSJeff Kirsher * 11668af3c33fSJeff Kirsher * Attempt to configure the interrupts using the best available 11678af3c33fSJeff Kirsher * capabilities of the hardware and the kernel. 11688af3c33fSJeff Kirsher **/ 1169ac802f5dSAlexander Duyck static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) 11708af3c33fSJeff Kirsher { 11713bcf3446SJacob Keller int err; 11728af3c33fSJeff Kirsher 11733bcf3446SJacob Keller /* We will try to get MSI-X interrupts first */ 11743bcf3446SJacob Keller if (!ixgbe_acquire_msix_vectors(adapter)) 1175ac802f5dSAlexander Duyck return; 11768af3c33fSJeff Kirsher 1177eec66731SJacob Keller /* At this point, we do not have MSI-X capabilities. We need to 1178eec66731SJacob Keller * reconfigure or disable various features which require MSI-X 1179eec66731SJacob Keller * capability. 1180eec66731SJacob Keller */ 1181eec66731SJacob Keller 1182c1c55f63SJacob Keller /* Disable DCB unless we only have a single traffic class */ 11830efbf12bSAlexander Duyck if (adapter->hw_tcs > 1) { 1184c1c55f63SJacob Keller e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); 1185b724e9f2SAlexander Duyck netdev_reset_tc(adapter->netdev); 118639cb681bSAlexander Duyck 1187b724e9f2SAlexander Duyck if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1188b724e9f2SAlexander Duyck adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 1189b724e9f2SAlexander Duyck 1190b724e9f2SAlexander Duyck adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 1191b724e9f2SAlexander Duyck adapter->temp_dcb_cfg.pfc_mode_enable = false; 1192b724e9f2SAlexander Duyck adapter->dcb_cfg.pfc_mode_enable = false; 1193b724e9f2SAlexander Duyck } 1194d786cf7bSJacob Keller 11950efbf12bSAlexander Duyck adapter->hw_tcs = 0; 1196b724e9f2SAlexander Duyck adapter->dcb_cfg.num_tcs.pg_tcs = 1; 1197b724e9f2SAlexander Duyck adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 1198b724e9f2SAlexander Duyck 1199d786cf7bSJacob Keller /* Disable SR-IOV support */ 1200d786cf7bSJacob Keller e_dev_warn("Disabling SR-IOV support\n"); 12018af3c33fSJeff Kirsher ixgbe_disable_sriov(adapter); 12028af3c33fSJeff Kirsher 1203d786cf7bSJacob Keller /* Disable RSS */ 1204d786cf7bSJacob Keller e_dev_warn("Disabling RSS support\n"); 1205fbe7ca7fSAlexander Duyck adapter->ring_feature[RING_F_RSS].limit = 1; 1206b724e9f2SAlexander Duyck 1207eec66731SJacob Keller /* recalculate number of queues now that many features have been 1208eec66731SJacob Keller * changed or disabled. 1209eec66731SJacob Keller */ 1210ac802f5dSAlexander Duyck ixgbe_set_num_queues(adapter); 121149c7ffbeSAlexander Duyck adapter->num_q_vectors = 1; 121249c7ffbeSAlexander Duyck 12138af3c33fSJeff Kirsher err = pci_enable_msi(adapter->pdev); 12145d31b48aSJacob Keller if (err) 12155d31b48aSJacob Keller e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", 12166ec1b71fSJacob Keller err); 12175d31b48aSJacob Keller else 1218ac802f5dSAlexander Duyck adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 12198af3c33fSJeff Kirsher } 12208af3c33fSJeff Kirsher 12218af3c33fSJeff Kirsher /** 12228af3c33fSJeff Kirsher * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme 12238af3c33fSJeff Kirsher * @adapter: board private structure to initialize 12248af3c33fSJeff Kirsher * 12258af3c33fSJeff Kirsher * We determine which interrupt scheme to use based on... 12268af3c33fSJeff Kirsher * - Kernel support (MSI, MSI-X) 12278af3c33fSJeff Kirsher * - which can be user-defined (via MODULE_PARAM) 12288af3c33fSJeff Kirsher * - Hardware queue count (num_*_queues) 12298af3c33fSJeff Kirsher * - defined by miscellaneous hardware support/features (RSS, etc.) 12308af3c33fSJeff Kirsher **/ 12318af3c33fSJeff Kirsher int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 12328af3c33fSJeff Kirsher { 12338af3c33fSJeff Kirsher int err; 12348af3c33fSJeff Kirsher 12358af3c33fSJeff Kirsher /* Number of supported queues */ 1236ac802f5dSAlexander Duyck ixgbe_set_num_queues(adapter); 12378af3c33fSJeff Kirsher 1238ac802f5dSAlexander Duyck /* Set interrupt mode */ 1239ac802f5dSAlexander Duyck ixgbe_set_interrupt_capability(adapter); 12408af3c33fSJeff Kirsher 12418af3c33fSJeff Kirsher err = ixgbe_alloc_q_vectors(adapter); 12428af3c33fSJeff Kirsher if (err) { 12438af3c33fSJeff Kirsher e_dev_err("Unable to allocate memory for queue vectors\n"); 12448af3c33fSJeff Kirsher goto err_alloc_q_vectors; 12458af3c33fSJeff Kirsher } 12468af3c33fSJeff Kirsher 12478af3c33fSJeff Kirsher ixgbe_cache_ring_register(adapter); 12488af3c33fSJeff Kirsher 124933fdc82fSJohn Fastabend e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n", 12508af3c33fSJeff Kirsher (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", 125133fdc82fSJohn Fastabend adapter->num_rx_queues, adapter->num_tx_queues, 125233fdc82fSJohn Fastabend adapter->num_xdp_queues); 12538af3c33fSJeff Kirsher 12548af3c33fSJeff Kirsher set_bit(__IXGBE_DOWN, &adapter->state); 12558af3c33fSJeff Kirsher 12568af3c33fSJeff Kirsher return 0; 12578af3c33fSJeff Kirsher 12588af3c33fSJeff Kirsher err_alloc_q_vectors: 12598af3c33fSJeff Kirsher ixgbe_reset_interrupt_capability(adapter); 12608af3c33fSJeff Kirsher return err; 12618af3c33fSJeff Kirsher } 12628af3c33fSJeff Kirsher 12638af3c33fSJeff Kirsher /** 12648af3c33fSJeff Kirsher * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 12658af3c33fSJeff Kirsher * @adapter: board private structure to clear interrupt scheme on 12668af3c33fSJeff Kirsher * 12678af3c33fSJeff Kirsher * We go through and clear interrupt specific resources and reset the structure 12688af3c33fSJeff Kirsher * to pre-load conditions 12698af3c33fSJeff Kirsher **/ 12708af3c33fSJeff Kirsher void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) 12718af3c33fSJeff Kirsher { 12728af3c33fSJeff Kirsher adapter->num_tx_queues = 0; 127333fdc82fSJohn Fastabend adapter->num_xdp_queues = 0; 12748af3c33fSJeff Kirsher adapter->num_rx_queues = 0; 12758af3c33fSJeff Kirsher 12768af3c33fSJeff Kirsher ixgbe_free_q_vectors(adapter); 12778af3c33fSJeff Kirsher ixgbe_reset_interrupt_capability(adapter); 12788af3c33fSJeff Kirsher } 12798af3c33fSJeff Kirsher 12808af3c33fSJeff Kirsher void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, 12818af3c33fSJeff Kirsher u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) 12828af3c33fSJeff Kirsher { 12838af3c33fSJeff Kirsher struct ixgbe_adv_tx_context_desc *context_desc; 12848af3c33fSJeff Kirsher u16 i = tx_ring->next_to_use; 12858af3c33fSJeff Kirsher 12868af3c33fSJeff Kirsher context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); 12878af3c33fSJeff Kirsher 12888af3c33fSJeff Kirsher i++; 12898af3c33fSJeff Kirsher tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 12908af3c33fSJeff Kirsher 12918af3c33fSJeff Kirsher /* set bits to identify this as an advanced context descriptor */ 12928af3c33fSJeff Kirsher type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 12938af3c33fSJeff Kirsher 12948af3c33fSJeff Kirsher context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 12958af3c33fSJeff Kirsher context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); 12968af3c33fSJeff Kirsher context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 12978af3c33fSJeff Kirsher context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 12988af3c33fSJeff Kirsher } 12998af3c33fSJeff Kirsher 1300