1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2016 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include "ixgbe.h" 30 #include "ixgbe_sriov.h" 31 32 #ifdef CONFIG_IXGBE_DCB 33 /** 34 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV 35 * @adapter: board private structure to initialize 36 * 37 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It 38 * will also try to cache the proper offsets if RSS/FCoE are enabled along 39 * with VMDq. 40 * 41 **/ 42 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) 43 { 44 #ifdef IXGBE_FCOE 45 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 46 #endif /* IXGBE_FCOE */ 47 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 48 int i; 49 u16 reg_idx, pool; 50 u8 tcs = adapter->hw_tcs; 51 52 /* verify we have DCB queueing enabled before proceeding */ 53 if (tcs <= 1) 54 return false; 55 56 /* verify we have VMDq enabled before proceeding */ 57 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 58 return false; 59 60 /* start at VMDq register offset for SR-IOV enabled setups */ 61 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 62 for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 63 /* If we are greater than indices move to next pool */ 64 if ((reg_idx & ~vmdq->mask) >= tcs) { 65 pool++; 66 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 67 } 68 adapter->rx_ring[i]->reg_idx = reg_idx; 69 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; 70 } 71 72 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 73 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 74 /* If we are greater than indices move to next pool */ 75 if ((reg_idx & ~vmdq->mask) >= tcs) 76 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 77 adapter->tx_ring[i]->reg_idx = reg_idx; 78 } 79 80 #ifdef IXGBE_FCOE 81 /* nothing to do if FCoE is disabled */ 82 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 83 return true; 84 85 /* The work is already done if the FCoE ring is shared */ 86 if (fcoe->offset < tcs) 87 return true; 88 89 /* The FCoE rings exist separately, we need to move their reg_idx */ 90 if (fcoe->indices) { 91 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 92 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); 93 94 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 95 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { 96 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 97 adapter->rx_ring[i]->reg_idx = reg_idx; 98 adapter->rx_ring[i]->netdev = adapter->netdev; 99 reg_idx++; 100 } 101 102 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 103 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { 104 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 105 adapter->tx_ring[i]->reg_idx = reg_idx; 106 reg_idx++; 107 } 108 } 109 110 #endif /* IXGBE_FCOE */ 111 return true; 112 } 113 114 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ 115 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, 116 unsigned int *tx, unsigned int *rx) 117 { 118 struct ixgbe_hw *hw = &adapter->hw; 119 u8 num_tcs = adapter->hw_tcs; 120 121 *tx = 0; 122 *rx = 0; 123 124 switch (hw->mac.type) { 125 case ixgbe_mac_82598EB: 126 /* TxQs/TC: 4 RxQs/TC: 8 */ 127 *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ 128 *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ 129 break; 130 case ixgbe_mac_82599EB: 131 case ixgbe_mac_X540: 132 case ixgbe_mac_X550: 133 case ixgbe_mac_X550EM_x: 134 case ixgbe_mac_x550em_a: 135 if (num_tcs > 4) { 136 /* 137 * TCs : TC0/1 TC2/3 TC4-7 138 * TxQs/TC: 32 16 8 139 * RxQs/TC: 16 16 16 140 */ 141 *rx = tc << 4; 142 if (tc < 3) 143 *tx = tc << 5; /* 0, 32, 64 */ 144 else if (tc < 5) 145 *tx = (tc + 2) << 4; /* 80, 96 */ 146 else 147 *tx = (tc + 8) << 3; /* 104, 112, 120 */ 148 } else { 149 /* 150 * TCs : TC0 TC1 TC2/3 151 * TxQs/TC: 64 32 16 152 * RxQs/TC: 32 32 32 153 */ 154 *rx = tc << 5; 155 if (tc < 2) 156 *tx = tc << 6; /* 0, 64 */ 157 else 158 *tx = (tc + 4) << 4; /* 96, 112 */ 159 } 160 default: 161 break; 162 } 163 } 164 165 /** 166 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 167 * @adapter: board private structure to initialize 168 * 169 * Cache the descriptor ring offsets for DCB to the assigned rings. 170 * 171 **/ 172 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 173 { 174 u8 num_tcs = adapter->hw_tcs; 175 unsigned int tx_idx, rx_idx; 176 int tc, offset, rss_i, i; 177 178 /* verify we have DCB queueing enabled before proceeding */ 179 if (num_tcs <= 1) 180 return false; 181 182 rss_i = adapter->ring_feature[RING_F_RSS].indices; 183 184 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { 185 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); 186 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { 187 adapter->tx_ring[offset + i]->reg_idx = tx_idx; 188 adapter->rx_ring[offset + i]->reg_idx = rx_idx; 189 adapter->rx_ring[offset + i]->netdev = adapter->netdev; 190 adapter->tx_ring[offset + i]->dcb_tc = tc; 191 adapter->rx_ring[offset + i]->dcb_tc = tc; 192 } 193 } 194 195 return true; 196 } 197 198 #endif 199 /** 200 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov 201 * @adapter: board private structure to initialize 202 * 203 * SR-IOV doesn't use any descriptor rings but changes the default if 204 * no other mapping is used. 205 * 206 */ 207 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) 208 { 209 #ifdef IXGBE_FCOE 210 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 211 #endif /* IXGBE_FCOE */ 212 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 213 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; 214 u16 reg_idx, pool; 215 int i; 216 217 /* only proceed if VMDq is enabled */ 218 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) 219 return false; 220 221 /* start at VMDq register offset for SR-IOV enabled setups */ 222 pool = 0; 223 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 224 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 225 #ifdef IXGBE_FCOE 226 /* Allow first FCoE queue to be mapped as RSS */ 227 if (fcoe->offset && (i > fcoe->offset)) 228 break; 229 #endif 230 /* If we are greater than indices move to next pool */ 231 if ((reg_idx & ~vmdq->mask) >= rss->indices) { 232 pool++; 233 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 234 } 235 adapter->rx_ring[i]->reg_idx = reg_idx; 236 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; 237 } 238 239 #ifdef IXGBE_FCOE 240 /* FCoE uses a linear block of queues so just assigning 1:1 */ 241 for (; i < adapter->num_rx_queues; i++, reg_idx++) { 242 adapter->rx_ring[i]->reg_idx = reg_idx; 243 adapter->rx_ring[i]->netdev = adapter->netdev; 244 } 245 246 #endif 247 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 248 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 249 #ifdef IXGBE_FCOE 250 /* Allow first FCoE queue to be mapped as RSS */ 251 if (fcoe->offset && (i > fcoe->offset)) 252 break; 253 #endif 254 /* If we are greater than indices move to next pool */ 255 if ((reg_idx & rss->mask) >= rss->indices) 256 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 257 adapter->tx_ring[i]->reg_idx = reg_idx; 258 } 259 260 #ifdef IXGBE_FCOE 261 /* FCoE uses a linear block of queues so just assigning 1:1 */ 262 for (; i < adapter->num_tx_queues; i++, reg_idx++) 263 adapter->tx_ring[i]->reg_idx = reg_idx; 264 265 #endif 266 267 return true; 268 } 269 270 /** 271 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS 272 * @adapter: board private structure to initialize 273 * 274 * Cache the descriptor ring offsets for RSS to the assigned rings. 275 * 276 **/ 277 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 278 { 279 int i, reg_idx; 280 281 for (i = 0; i < adapter->num_rx_queues; i++) { 282 adapter->rx_ring[i]->reg_idx = i; 283 adapter->rx_ring[i]->netdev = adapter->netdev; 284 } 285 for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) 286 adapter->tx_ring[i]->reg_idx = reg_idx; 287 for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++) 288 adapter->xdp_ring[i]->reg_idx = reg_idx; 289 290 return true; 291 } 292 293 /** 294 * ixgbe_cache_ring_register - Descriptor ring to register mapping 295 * @adapter: board private structure to initialize 296 * 297 * Once we know the feature-set enabled for the device, we'll cache 298 * the register offset the descriptor ring is assigned to. 299 * 300 * Note, the order the various feature calls is important. It must start with 301 * the "most" features enabled at the same time, then trickle down to the 302 * least amount of features turned on at once. 303 **/ 304 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 305 { 306 /* start with default case */ 307 adapter->rx_ring[0]->reg_idx = 0; 308 adapter->tx_ring[0]->reg_idx = 0; 309 310 #ifdef CONFIG_IXGBE_DCB 311 if (ixgbe_cache_ring_dcb_sriov(adapter)) 312 return; 313 314 if (ixgbe_cache_ring_dcb(adapter)) 315 return; 316 317 #endif 318 if (ixgbe_cache_ring_sriov(adapter)) 319 return; 320 321 ixgbe_cache_ring_rss(adapter); 322 } 323 324 static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) 325 { 326 return adapter->xdp_prog ? nr_cpu_ids : 0; 327 } 328 329 #define IXGBE_RSS_64Q_MASK 0x3F 330 #define IXGBE_RSS_16Q_MASK 0xF 331 #define IXGBE_RSS_8Q_MASK 0x7 332 #define IXGBE_RSS_4Q_MASK 0x3 333 #define IXGBE_RSS_2Q_MASK 0x1 334 #define IXGBE_RSS_DISABLED_MASK 0x0 335 336 #ifdef CONFIG_IXGBE_DCB 337 /** 338 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB 339 * @adapter: board private structure to initialize 340 * 341 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 342 * and VM pools where appropriate. Also assign queues based on DCB 343 * priorities and map accordingly.. 344 * 345 **/ 346 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) 347 { 348 int i; 349 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 350 u16 vmdq_m = 0; 351 #ifdef IXGBE_FCOE 352 u16 fcoe_i = 0; 353 #endif 354 u8 tcs = adapter->hw_tcs; 355 356 /* verify we have DCB queueing enabled before proceeding */ 357 if (tcs <= 1) 358 return false; 359 360 /* verify we have VMDq enabled before proceeding */ 361 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 362 return false; 363 364 /* limit VMDq instances on the PF by number of Tx queues */ 365 vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs); 366 367 /* Add starting offset to total pool count */ 368 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 369 370 /* 16 pools w/ 8 TC per pool */ 371 if (tcs > 4) { 372 vmdq_i = min_t(u16, vmdq_i, 16); 373 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; 374 /* 32 pools w/ 4 TC per pool */ 375 } else { 376 vmdq_i = min_t(u16, vmdq_i, 32); 377 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 378 } 379 380 #ifdef IXGBE_FCOE 381 /* queues in the remaining pools are available for FCoE */ 382 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; 383 384 #endif 385 /* remove the starting offset from the pool count */ 386 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 387 388 /* save features for later use */ 389 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 390 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 391 392 /* 393 * We do not support DCB, VMDq, and RSS all simultaneously 394 * so we will disable RSS since it is the lowest priority 395 */ 396 adapter->ring_feature[RING_F_RSS].indices = 1; 397 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; 398 399 /* disable ATR as it is not supported when VMDq is enabled */ 400 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 401 402 adapter->num_rx_pools = vmdq_i; 403 adapter->num_rx_queues_per_pool = tcs; 404 405 adapter->num_tx_queues = vmdq_i * tcs; 406 adapter->num_xdp_queues = 0; 407 adapter->num_rx_queues = vmdq_i * tcs; 408 409 #ifdef IXGBE_FCOE 410 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 411 struct ixgbe_ring_feature *fcoe; 412 413 fcoe = &adapter->ring_feature[RING_F_FCOE]; 414 415 /* limit ourselves based on feature limits */ 416 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 417 418 if (fcoe_i) { 419 /* alloc queues for FCoE separately */ 420 fcoe->indices = fcoe_i; 421 fcoe->offset = vmdq_i * tcs; 422 423 /* add queues to adapter */ 424 adapter->num_tx_queues += fcoe_i; 425 adapter->num_rx_queues += fcoe_i; 426 } else if (tcs > 1) { 427 /* use queue belonging to FcoE TC */ 428 fcoe->indices = 1; 429 fcoe->offset = ixgbe_fcoe_get_tc(adapter); 430 } else { 431 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 432 433 fcoe->indices = 0; 434 fcoe->offset = 0; 435 } 436 } 437 438 #endif /* IXGBE_FCOE */ 439 /* configure TC to queue mapping */ 440 for (i = 0; i < tcs; i++) 441 netdev_set_tc_queue(adapter->netdev, i, 1, i); 442 443 return true; 444 } 445 446 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) 447 { 448 struct net_device *dev = adapter->netdev; 449 struct ixgbe_ring_feature *f; 450 int rss_i, rss_m, i; 451 int tcs; 452 453 /* Map queue offset and counts onto allocated tx queues */ 454 tcs = adapter->hw_tcs; 455 456 /* verify we have DCB queueing enabled before proceeding */ 457 if (tcs <= 1) 458 return false; 459 460 /* determine the upper limit for our current DCB mode */ 461 rss_i = dev->num_tx_queues / tcs; 462 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 463 /* 8 TC w/ 4 queues per TC */ 464 rss_i = min_t(u16, rss_i, 4); 465 rss_m = IXGBE_RSS_4Q_MASK; 466 } else if (tcs > 4) { 467 /* 8 TC w/ 8 queues per TC */ 468 rss_i = min_t(u16, rss_i, 8); 469 rss_m = IXGBE_RSS_8Q_MASK; 470 } else { 471 /* 4 TC w/ 16 queues per TC */ 472 rss_i = min_t(u16, rss_i, 16); 473 rss_m = IXGBE_RSS_16Q_MASK; 474 } 475 476 /* set RSS mask and indices */ 477 f = &adapter->ring_feature[RING_F_RSS]; 478 rss_i = min_t(int, rss_i, f->limit); 479 f->indices = rss_i; 480 f->mask = rss_m; 481 482 /* disable ATR as it is not supported when multiple TCs are enabled */ 483 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 484 485 #ifdef IXGBE_FCOE 486 /* FCoE enabled queues require special configuration indexed 487 * by feature specific indices and offset. Here we map FCoE 488 * indices onto the DCB queue pairs allowing FCoE to own 489 * configuration later. 490 */ 491 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 492 u8 tc = ixgbe_fcoe_get_tc(adapter); 493 494 f = &adapter->ring_feature[RING_F_FCOE]; 495 f->indices = min_t(u16, rss_i, f->limit); 496 f->offset = rss_i * tc; 497 } 498 499 #endif /* IXGBE_FCOE */ 500 for (i = 0; i < tcs; i++) 501 netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 502 503 adapter->num_tx_queues = rss_i * tcs; 504 adapter->num_xdp_queues = 0; 505 adapter->num_rx_queues = rss_i * tcs; 506 507 return true; 508 } 509 510 #endif 511 /** 512 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices 513 * @adapter: board private structure to initialize 514 * 515 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 516 * and VM pools where appropriate. If RSS is available, then also try and 517 * enable RSS and map accordingly. 518 * 519 **/ 520 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) 521 { 522 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 523 u16 vmdq_m = 0; 524 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; 525 u16 rss_m = IXGBE_RSS_DISABLED_MASK; 526 #ifdef IXGBE_FCOE 527 u16 fcoe_i = 0; 528 #endif 529 530 /* only proceed if SR-IOV is enabled */ 531 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 532 return false; 533 534 /* limit l2fwd RSS based on total Tx queue limit */ 535 rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i); 536 537 /* Add starting offset to total pool count */ 538 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 539 540 /* double check we are limited to maximum pools */ 541 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 542 543 /* 64 pool mode with 2 queues per pool */ 544 if (vmdq_i > 32) { 545 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 546 rss_m = IXGBE_RSS_2Q_MASK; 547 rss_i = min_t(u16, rss_i, 2); 548 /* 32 pool mode with up to 4 queues per pool */ 549 } else { 550 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 551 rss_m = IXGBE_RSS_4Q_MASK; 552 /* We can support 4, 2, or 1 queues */ 553 rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; 554 } 555 556 #ifdef IXGBE_FCOE 557 /* queues in the remaining pools are available for FCoE */ 558 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); 559 560 #endif 561 /* remove the starting offset from the pool count */ 562 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 563 564 /* save features for later use */ 565 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 566 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 567 568 /* limit RSS based on user input and save for later use */ 569 adapter->ring_feature[RING_F_RSS].indices = rss_i; 570 adapter->ring_feature[RING_F_RSS].mask = rss_m; 571 572 adapter->num_rx_pools = vmdq_i; 573 adapter->num_rx_queues_per_pool = rss_i; 574 575 adapter->num_rx_queues = vmdq_i * rss_i; 576 adapter->num_tx_queues = vmdq_i * rss_i; 577 adapter->num_xdp_queues = 0; 578 579 /* disable ATR as it is not supported when VMDq is enabled */ 580 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 581 582 #ifdef IXGBE_FCOE 583 /* 584 * FCoE can use rings from adjacent buffers to allow RSS 585 * like behavior. To account for this we need to add the 586 * FCoE indices to the total ring count. 587 */ 588 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 589 struct ixgbe_ring_feature *fcoe; 590 591 fcoe = &adapter->ring_feature[RING_F_FCOE]; 592 593 /* limit ourselves based on feature limits */ 594 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 595 596 if (vmdq_i > 1 && fcoe_i) { 597 /* alloc queues for FCoE separately */ 598 fcoe->indices = fcoe_i; 599 fcoe->offset = vmdq_i * rss_i; 600 } else { 601 /* merge FCoE queues with RSS queues */ 602 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); 603 604 /* limit indices to rss_i if MSI-X is disabled */ 605 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 606 fcoe_i = rss_i; 607 608 /* attempt to reserve some queues for just FCoE */ 609 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); 610 fcoe->offset = fcoe_i - fcoe->indices; 611 612 fcoe_i -= rss_i; 613 } 614 615 /* add queues to adapter */ 616 adapter->num_tx_queues += fcoe_i; 617 adapter->num_rx_queues += fcoe_i; 618 } 619 620 #endif 621 /* populate TC0 for use by pool 0 */ 622 netdev_set_tc_queue(adapter->netdev, 0, 623 adapter->num_rx_queues_per_pool, 0); 624 625 return true; 626 } 627 628 /** 629 * ixgbe_set_rss_queues - Allocate queues for RSS 630 * @adapter: board private structure to initialize 631 * 632 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 633 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 634 * 635 **/ 636 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) 637 { 638 struct ixgbe_hw *hw = &adapter->hw; 639 struct ixgbe_ring_feature *f; 640 u16 rss_i; 641 642 /* set mask for 16 queue limit of RSS */ 643 f = &adapter->ring_feature[RING_F_RSS]; 644 rss_i = f->limit; 645 646 f->indices = rss_i; 647 648 if (hw->mac.type < ixgbe_mac_X550) 649 f->mask = IXGBE_RSS_16Q_MASK; 650 else 651 f->mask = IXGBE_RSS_64Q_MASK; 652 653 /* disable ATR by default, it will be configured below */ 654 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 655 656 /* 657 * Use Flow Director in addition to RSS to ensure the best 658 * distribution of flows across cores, even when an FDIR flow 659 * isn't matched. 660 */ 661 if (rss_i > 1 && adapter->atr_sample_rate) { 662 f = &adapter->ring_feature[RING_F_FDIR]; 663 664 rss_i = f->indices = f->limit; 665 666 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 667 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 668 } 669 670 #ifdef IXGBE_FCOE 671 /* 672 * FCoE can exist on the same rings as standard network traffic 673 * however it is preferred to avoid that if possible. In order 674 * to get the best performance we allocate as many FCoE queues 675 * as we can and we place them at the end of the ring array to 676 * avoid sharing queues with standard RSS on systems with 24 or 677 * more CPUs. 678 */ 679 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 680 struct net_device *dev = adapter->netdev; 681 u16 fcoe_i; 682 683 f = &adapter->ring_feature[RING_F_FCOE]; 684 685 /* merge FCoE queues with RSS queues */ 686 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); 687 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); 688 689 /* limit indices to rss_i if MSI-X is disabled */ 690 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 691 fcoe_i = rss_i; 692 693 /* attempt to reserve some queues for just FCoE */ 694 f->indices = min_t(u16, fcoe_i, f->limit); 695 f->offset = fcoe_i - f->indices; 696 rss_i = max_t(u16, fcoe_i, rss_i); 697 } 698 699 #endif /* IXGBE_FCOE */ 700 adapter->num_rx_queues = rss_i; 701 adapter->num_tx_queues = rss_i; 702 adapter->num_xdp_queues = ixgbe_xdp_queues(adapter); 703 704 return true; 705 } 706 707 /** 708 * ixgbe_set_num_queues - Allocate queues for device, feature dependent 709 * @adapter: board private structure to initialize 710 * 711 * This is the top level queue allocation routine. The order here is very 712 * important, starting with the "most" number of features turned on at once, 713 * and ending with the smallest set of features. This way large combinations 714 * can be allocated if they're turned on, and smaller combinations are the 715 * fallthrough conditions. 716 * 717 **/ 718 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 719 { 720 /* Start with base case */ 721 adapter->num_rx_queues = 1; 722 adapter->num_tx_queues = 1; 723 adapter->num_xdp_queues = 0; 724 adapter->num_rx_pools = 1; 725 adapter->num_rx_queues_per_pool = 1; 726 727 #ifdef CONFIG_IXGBE_DCB 728 if (ixgbe_set_dcb_sriov_queues(adapter)) 729 return; 730 731 if (ixgbe_set_dcb_queues(adapter)) 732 return; 733 734 #endif 735 if (ixgbe_set_sriov_queues(adapter)) 736 return; 737 738 ixgbe_set_rss_queues(adapter); 739 } 740 741 /** 742 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors 743 * @adapter: board private structure 744 * 745 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will 746 * return a negative error code if unable to acquire MSI-X vectors for any 747 * reason. 748 */ 749 static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) 750 { 751 struct ixgbe_hw *hw = &adapter->hw; 752 int i, vectors, vector_threshold; 753 754 /* We start by asking for one vector per queue pair with XDP queues 755 * being stacked with TX queues. 756 */ 757 vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); 758 vectors = max(vectors, adapter->num_xdp_queues); 759 760 /* It is easy to be greedy for MSI-X vectors. However, it really 761 * doesn't do much good if we have a lot more vectors than CPUs. We'll 762 * be somewhat conservative and only ask for (roughly) the same number 763 * of vectors as there are CPUs. 764 */ 765 vectors = min_t(int, vectors, num_online_cpus()); 766 767 /* Some vectors are necessary for non-queue interrupts */ 768 vectors += NON_Q_VECTORS; 769 770 /* Hardware can only support a maximum of hw.mac->max_msix_vectors. 771 * With features such as RSS and VMDq, we can easily surpass the 772 * number of Rx and Tx descriptor queues supported by our device. 773 * Thus, we cap the maximum in the rare cases where the CPU count also 774 * exceeds our vector limit 775 */ 776 vectors = min_t(int, vectors, hw->mac.max_msix_vectors); 777 778 /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] 779 * handler, and (2) an Other (Link Status Change, etc.) handler. 780 */ 781 vector_threshold = MIN_MSIX_COUNT; 782 783 adapter->msix_entries = kcalloc(vectors, 784 sizeof(struct msix_entry), 785 GFP_KERNEL); 786 if (!adapter->msix_entries) 787 return -ENOMEM; 788 789 for (i = 0; i < vectors; i++) 790 adapter->msix_entries[i].entry = i; 791 792 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 793 vector_threshold, vectors); 794 795 if (vectors < 0) { 796 /* A negative count of allocated vectors indicates an error in 797 * acquiring within the specified range of MSI-X vectors 798 */ 799 e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", 800 vectors); 801 802 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 803 kfree(adapter->msix_entries); 804 adapter->msix_entries = NULL; 805 806 return vectors; 807 } 808 809 /* we successfully allocated some number of vectors within our 810 * requested range. 811 */ 812 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; 813 814 /* Adjust for only the vectors we'll use, which is minimum 815 * of max_q_vectors, or the number of vectors we were allocated. 816 */ 817 vectors -= NON_Q_VECTORS; 818 adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); 819 820 return 0; 821 } 822 823 static void ixgbe_add_ring(struct ixgbe_ring *ring, 824 struct ixgbe_ring_container *head) 825 { 826 ring->next = head->ring; 827 head->ring = ring; 828 head->count++; 829 head->next_update = jiffies + 1; 830 } 831 832 /** 833 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector 834 * @adapter: board private structure to initialize 835 * @v_count: q_vectors allocated on adapter, used for ring interleaving 836 * @v_idx: index of vector in adapter struct 837 * @txr_count: total number of Tx rings to allocate 838 * @txr_idx: index of first Tx ring to allocate 839 * @xdp_count: total number of XDP rings to allocate 840 * @xdp_idx: index of first XDP ring to allocate 841 * @rxr_count: total number of Rx rings to allocate 842 * @rxr_idx: index of first Rx ring to allocate 843 * 844 * We allocate one q_vector. If allocation fails we return -ENOMEM. 845 **/ 846 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, 847 int v_count, int v_idx, 848 int txr_count, int txr_idx, 849 int xdp_count, int xdp_idx, 850 int rxr_count, int rxr_idx) 851 { 852 struct ixgbe_q_vector *q_vector; 853 struct ixgbe_ring *ring; 854 int node = NUMA_NO_NODE; 855 int cpu = -1; 856 int ring_count, size; 857 u8 tcs = adapter->hw_tcs; 858 859 ring_count = txr_count + rxr_count + xdp_count; 860 size = sizeof(struct ixgbe_q_vector) + 861 (sizeof(struct ixgbe_ring) * ring_count); 862 863 /* customize cpu for Flow Director mapping */ 864 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { 865 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 866 if (rss_i > 1 && adapter->atr_sample_rate) { 867 if (cpu_online(v_idx)) { 868 cpu = v_idx; 869 node = cpu_to_node(cpu); 870 } 871 } 872 } 873 874 /* allocate q_vector and rings */ 875 q_vector = kzalloc_node(size, GFP_KERNEL, node); 876 if (!q_vector) 877 q_vector = kzalloc(size, GFP_KERNEL); 878 if (!q_vector) 879 return -ENOMEM; 880 881 /* setup affinity mask and node */ 882 if (cpu != -1) 883 cpumask_set_cpu(cpu, &q_vector->affinity_mask); 884 q_vector->numa_node = node; 885 886 #ifdef CONFIG_IXGBE_DCA 887 /* initialize CPU for DCA */ 888 q_vector->cpu = -1; 889 890 #endif 891 /* initialize NAPI */ 892 netif_napi_add(adapter->netdev, &q_vector->napi, 893 ixgbe_poll, 64); 894 895 /* tie q_vector and adapter together */ 896 adapter->q_vector[v_idx] = q_vector; 897 q_vector->adapter = adapter; 898 q_vector->v_idx = v_idx; 899 900 /* initialize work limits */ 901 q_vector->tx.work_limit = adapter->tx_work_limit; 902 903 /* Initialize setting for adaptive ITR */ 904 q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | 905 IXGBE_ITR_ADAPTIVE_LATENCY; 906 q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | 907 IXGBE_ITR_ADAPTIVE_LATENCY; 908 909 /* intialize ITR */ 910 if (txr_count && !rxr_count) { 911 /* tx only vector */ 912 if (adapter->tx_itr_setting == 1) 913 q_vector->itr = IXGBE_12K_ITR; 914 else 915 q_vector->itr = adapter->tx_itr_setting; 916 } else { 917 /* rx or rx/tx vector */ 918 if (adapter->rx_itr_setting == 1) 919 q_vector->itr = IXGBE_20K_ITR; 920 else 921 q_vector->itr = adapter->rx_itr_setting; 922 } 923 924 /* initialize pointer to rings */ 925 ring = q_vector->ring; 926 927 while (txr_count) { 928 /* assign generic ring traits */ 929 ring->dev = &adapter->pdev->dev; 930 ring->netdev = adapter->netdev; 931 932 /* configure backlink on ring */ 933 ring->q_vector = q_vector; 934 935 /* update q_vector Tx values */ 936 ixgbe_add_ring(ring, &q_vector->tx); 937 938 /* apply Tx specific ring traits */ 939 ring->count = adapter->tx_ring_count; 940 ring->queue_index = txr_idx; 941 942 /* assign ring to adapter */ 943 adapter->tx_ring[txr_idx] = ring; 944 945 /* update count and index */ 946 txr_count--; 947 txr_idx += v_count; 948 949 /* push pointer to next ring */ 950 ring++; 951 } 952 953 while (xdp_count) { 954 /* assign generic ring traits */ 955 ring->dev = &adapter->pdev->dev; 956 ring->netdev = adapter->netdev; 957 958 /* configure backlink on ring */ 959 ring->q_vector = q_vector; 960 961 /* update q_vector Tx values */ 962 ixgbe_add_ring(ring, &q_vector->tx); 963 964 /* apply Tx specific ring traits */ 965 ring->count = adapter->tx_ring_count; 966 ring->queue_index = xdp_idx; 967 set_ring_xdp(ring); 968 969 /* assign ring to adapter */ 970 adapter->xdp_ring[xdp_idx] = ring; 971 972 /* update count and index */ 973 xdp_count--; 974 xdp_idx++; 975 976 /* push pointer to next ring */ 977 ring++; 978 } 979 980 while (rxr_count) { 981 /* assign generic ring traits */ 982 ring->dev = &adapter->pdev->dev; 983 ring->netdev = adapter->netdev; 984 985 /* configure backlink on ring */ 986 ring->q_vector = q_vector; 987 988 /* update q_vector Rx values */ 989 ixgbe_add_ring(ring, &q_vector->rx); 990 991 /* 992 * 82599 errata, UDP frames with a 0 checksum 993 * can be marked as checksum errors. 994 */ 995 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 996 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 997 998 #ifdef IXGBE_FCOE 999 if (adapter->netdev->features & NETIF_F_FCOE_MTU) { 1000 struct ixgbe_ring_feature *f; 1001 f = &adapter->ring_feature[RING_F_FCOE]; 1002 if ((rxr_idx >= f->offset) && 1003 (rxr_idx < f->offset + f->indices)) 1004 set_bit(__IXGBE_RX_FCOE, &ring->state); 1005 } 1006 1007 #endif /* IXGBE_FCOE */ 1008 /* apply Rx specific ring traits */ 1009 ring->count = adapter->rx_ring_count; 1010 ring->queue_index = rxr_idx; 1011 1012 /* assign ring to adapter */ 1013 adapter->rx_ring[rxr_idx] = ring; 1014 1015 /* update count and index */ 1016 rxr_count--; 1017 rxr_idx += v_count; 1018 1019 /* push pointer to next ring */ 1020 ring++; 1021 } 1022 1023 return 0; 1024 } 1025 1026 /** 1027 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector 1028 * @adapter: board private structure to initialize 1029 * @v_idx: Index of vector to be freed 1030 * 1031 * This function frees the memory allocated to the q_vector. In addition if 1032 * NAPI is enabled it will delete any references to the NAPI struct prior 1033 * to freeing the q_vector. 1034 **/ 1035 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) 1036 { 1037 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; 1038 struct ixgbe_ring *ring; 1039 1040 ixgbe_for_each_ring(ring, q_vector->tx) { 1041 if (ring_is_xdp(ring)) 1042 adapter->xdp_ring[ring->queue_index] = NULL; 1043 else 1044 adapter->tx_ring[ring->queue_index] = NULL; 1045 } 1046 1047 ixgbe_for_each_ring(ring, q_vector->rx) 1048 adapter->rx_ring[ring->queue_index] = NULL; 1049 1050 adapter->q_vector[v_idx] = NULL; 1051 napi_hash_del(&q_vector->napi); 1052 netif_napi_del(&q_vector->napi); 1053 1054 /* 1055 * ixgbe_get_stats64() might access the rings on this vector, 1056 * we must wait a grace period before freeing it. 1057 */ 1058 kfree_rcu(q_vector, rcu); 1059 } 1060 1061 /** 1062 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors 1063 * @adapter: board private structure to initialize 1064 * 1065 * We allocate one q_vector per queue interrupt. If allocation fails we 1066 * return -ENOMEM. 1067 **/ 1068 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) 1069 { 1070 int q_vectors = adapter->num_q_vectors; 1071 int rxr_remaining = adapter->num_rx_queues; 1072 int txr_remaining = adapter->num_tx_queues; 1073 int xdp_remaining = adapter->num_xdp_queues; 1074 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; 1075 int err; 1076 1077 /* only one q_vector if MSI-X is disabled. */ 1078 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 1079 q_vectors = 1; 1080 1081 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { 1082 for (; rxr_remaining; v_idx++) { 1083 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 1084 0, 0, 0, 0, 1, rxr_idx); 1085 1086 if (err) 1087 goto err_out; 1088 1089 /* update counts and index */ 1090 rxr_remaining--; 1091 rxr_idx++; 1092 } 1093 } 1094 1095 for (; v_idx < q_vectors; v_idx++) { 1096 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 1097 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 1098 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx); 1099 1100 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 1101 tqpv, txr_idx, 1102 xqpv, xdp_idx, 1103 rqpv, rxr_idx); 1104 1105 if (err) 1106 goto err_out; 1107 1108 /* update counts and index */ 1109 rxr_remaining -= rqpv; 1110 txr_remaining -= tqpv; 1111 xdp_remaining -= xqpv; 1112 rxr_idx++; 1113 txr_idx++; 1114 xdp_idx += xqpv; 1115 } 1116 1117 return 0; 1118 1119 err_out: 1120 adapter->num_tx_queues = 0; 1121 adapter->num_xdp_queues = 0; 1122 adapter->num_rx_queues = 0; 1123 adapter->num_q_vectors = 0; 1124 1125 while (v_idx--) 1126 ixgbe_free_q_vector(adapter, v_idx); 1127 1128 return -ENOMEM; 1129 } 1130 1131 /** 1132 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors 1133 * @adapter: board private structure to initialize 1134 * 1135 * This function frees the memory allocated to the q_vectors. In addition if 1136 * NAPI is enabled it will delete any references to the NAPI struct prior 1137 * to freeing the q_vector. 1138 **/ 1139 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) 1140 { 1141 int v_idx = adapter->num_q_vectors; 1142 1143 adapter->num_tx_queues = 0; 1144 adapter->num_xdp_queues = 0; 1145 adapter->num_rx_queues = 0; 1146 adapter->num_q_vectors = 0; 1147 1148 while (v_idx--) 1149 ixgbe_free_q_vector(adapter, v_idx); 1150 } 1151 1152 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 1153 { 1154 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1155 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1156 pci_disable_msix(adapter->pdev); 1157 kfree(adapter->msix_entries); 1158 adapter->msix_entries = NULL; 1159 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1160 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 1161 pci_disable_msi(adapter->pdev); 1162 } 1163 } 1164 1165 /** 1166 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported 1167 * @adapter: board private structure to initialize 1168 * 1169 * Attempt to configure the interrupts using the best available 1170 * capabilities of the hardware and the kernel. 1171 **/ 1172 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) 1173 { 1174 int err; 1175 1176 /* We will try to get MSI-X interrupts first */ 1177 if (!ixgbe_acquire_msix_vectors(adapter)) 1178 return; 1179 1180 /* At this point, we do not have MSI-X capabilities. We need to 1181 * reconfigure or disable various features which require MSI-X 1182 * capability. 1183 */ 1184 1185 /* Disable DCB unless we only have a single traffic class */ 1186 if (adapter->hw_tcs > 1) { 1187 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); 1188 netdev_reset_tc(adapter->netdev); 1189 1190 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1191 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 1192 1193 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 1194 adapter->temp_dcb_cfg.pfc_mode_enable = false; 1195 adapter->dcb_cfg.pfc_mode_enable = false; 1196 } 1197 1198 adapter->hw_tcs = 0; 1199 adapter->dcb_cfg.num_tcs.pg_tcs = 1; 1200 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 1201 1202 /* Disable SR-IOV support */ 1203 e_dev_warn("Disabling SR-IOV support\n"); 1204 ixgbe_disable_sriov(adapter); 1205 1206 /* Disable RSS */ 1207 e_dev_warn("Disabling RSS support\n"); 1208 adapter->ring_feature[RING_F_RSS].limit = 1; 1209 1210 /* recalculate number of queues now that many features have been 1211 * changed or disabled. 1212 */ 1213 ixgbe_set_num_queues(adapter); 1214 adapter->num_q_vectors = 1; 1215 1216 err = pci_enable_msi(adapter->pdev); 1217 if (err) 1218 e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", 1219 err); 1220 else 1221 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 1222 } 1223 1224 /** 1225 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme 1226 * @adapter: board private structure to initialize 1227 * 1228 * We determine which interrupt scheme to use based on... 1229 * - Kernel support (MSI, MSI-X) 1230 * - which can be user-defined (via MODULE_PARAM) 1231 * - Hardware queue count (num_*_queues) 1232 * - defined by miscellaneous hardware support/features (RSS, etc.) 1233 **/ 1234 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 1235 { 1236 int err; 1237 1238 /* Number of supported queues */ 1239 ixgbe_set_num_queues(adapter); 1240 1241 /* Set interrupt mode */ 1242 ixgbe_set_interrupt_capability(adapter); 1243 1244 err = ixgbe_alloc_q_vectors(adapter); 1245 if (err) { 1246 e_dev_err("Unable to allocate memory for queue vectors\n"); 1247 goto err_alloc_q_vectors; 1248 } 1249 1250 ixgbe_cache_ring_register(adapter); 1251 1252 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n", 1253 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", 1254 adapter->num_rx_queues, adapter->num_tx_queues, 1255 adapter->num_xdp_queues); 1256 1257 set_bit(__IXGBE_DOWN, &adapter->state); 1258 1259 return 0; 1260 1261 err_alloc_q_vectors: 1262 ixgbe_reset_interrupt_capability(adapter); 1263 return err; 1264 } 1265 1266 /** 1267 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 1268 * @adapter: board private structure to clear interrupt scheme on 1269 * 1270 * We go through and clear interrupt specific resources and reset the structure 1271 * to pre-load conditions 1272 **/ 1273 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) 1274 { 1275 adapter->num_tx_queues = 0; 1276 adapter->num_xdp_queues = 0; 1277 adapter->num_rx_queues = 0; 1278 1279 ixgbe_free_q_vectors(adapter); 1280 ixgbe_reset_interrupt_capability(adapter); 1281 } 1282 1283 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, 1284 u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx) 1285 { 1286 struct ixgbe_adv_tx_context_desc *context_desc; 1287 u16 i = tx_ring->next_to_use; 1288 1289 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); 1290 1291 i++; 1292 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1293 1294 /* set bits to identify this as an advanced context descriptor */ 1295 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 1296 1297 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1298 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); 1299 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1300 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1301 } 1302 1303