1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2013 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include "ixgbe.h" 30 #include "ixgbe_sriov.h" 31 32 #ifdef CONFIG_IXGBE_DCB 33 /** 34 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV 35 * @adapter: board private structure to initialize 36 * 37 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It 38 * will also try to cache the proper offsets if RSS/FCoE are enabled along 39 * with VMDq. 40 * 41 **/ 42 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) 43 { 44 #ifdef IXGBE_FCOE 45 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 46 #endif /* IXGBE_FCOE */ 47 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 48 int i; 49 u16 reg_idx; 50 u8 tcs = netdev_get_num_tc(adapter->netdev); 51 52 /* verify we have DCB queueing enabled before proceeding */ 53 if (tcs <= 1) 54 return false; 55 56 /* verify we have VMDq enabled before proceeding */ 57 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 58 return false; 59 60 /* start at VMDq register offset for SR-IOV enabled setups */ 61 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 62 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 63 /* If we are greater than indices move to next pool */ 64 if ((reg_idx & ~vmdq->mask) >= tcs) 65 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 66 adapter->rx_ring[i]->reg_idx = reg_idx; 67 } 68 69 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 70 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 71 /* If we are greater than indices move to next pool */ 72 if ((reg_idx & ~vmdq->mask) >= tcs) 73 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 74 adapter->tx_ring[i]->reg_idx = reg_idx; 75 } 76 77 #ifdef IXGBE_FCOE 78 /* nothing to do if FCoE is disabled */ 79 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 80 return true; 81 82 /* The work is already done if the FCoE ring is shared */ 83 if (fcoe->offset < tcs) 84 return true; 85 86 /* The FCoE rings exist separately, we need to move their reg_idx */ 87 if (fcoe->indices) { 88 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 89 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); 90 91 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 92 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { 93 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 94 adapter->rx_ring[i]->reg_idx = reg_idx; 95 reg_idx++; 96 } 97 98 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 99 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { 100 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 101 adapter->tx_ring[i]->reg_idx = reg_idx; 102 reg_idx++; 103 } 104 } 105 106 #endif /* IXGBE_FCOE */ 107 return true; 108 } 109 110 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ 111 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, 112 unsigned int *tx, unsigned int *rx) 113 { 114 struct net_device *dev = adapter->netdev; 115 struct ixgbe_hw *hw = &adapter->hw; 116 u8 num_tcs = netdev_get_num_tc(dev); 117 118 *tx = 0; 119 *rx = 0; 120 121 switch (hw->mac.type) { 122 case ixgbe_mac_82598EB: 123 /* TxQs/TC: 4 RxQs/TC: 8 */ 124 *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ 125 *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ 126 break; 127 case ixgbe_mac_82599EB: 128 case ixgbe_mac_X540: 129 if (num_tcs > 4) { 130 /* 131 * TCs : TC0/1 TC2/3 TC4-7 132 * TxQs/TC: 32 16 8 133 * RxQs/TC: 16 16 16 134 */ 135 *rx = tc << 4; 136 if (tc < 3) 137 *tx = tc << 5; /* 0, 32, 64 */ 138 else if (tc < 5) 139 *tx = (tc + 2) << 4; /* 80, 96 */ 140 else 141 *tx = (tc + 8) << 3; /* 104, 112, 120 */ 142 } else { 143 /* 144 * TCs : TC0 TC1 TC2/3 145 * TxQs/TC: 64 32 16 146 * RxQs/TC: 32 32 32 147 */ 148 *rx = tc << 5; 149 if (tc < 2) 150 *tx = tc << 6; /* 0, 64 */ 151 else 152 *tx = (tc + 4) << 4; /* 96, 112 */ 153 } 154 default: 155 break; 156 } 157 } 158 159 /** 160 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 161 * @adapter: board private structure to initialize 162 * 163 * Cache the descriptor ring offsets for DCB to the assigned rings. 164 * 165 **/ 166 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 167 { 168 struct net_device *dev = adapter->netdev; 169 unsigned int tx_idx, rx_idx; 170 int tc, offset, rss_i, i; 171 u8 num_tcs = netdev_get_num_tc(dev); 172 173 /* verify we have DCB queueing enabled before proceeding */ 174 if (num_tcs <= 1) 175 return false; 176 177 rss_i = adapter->ring_feature[RING_F_RSS].indices; 178 179 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { 180 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); 181 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { 182 adapter->tx_ring[offset + i]->reg_idx = tx_idx; 183 adapter->rx_ring[offset + i]->reg_idx = rx_idx; 184 adapter->tx_ring[offset + i]->dcb_tc = tc; 185 adapter->rx_ring[offset + i]->dcb_tc = tc; 186 } 187 } 188 189 return true; 190 } 191 192 #endif 193 /** 194 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov 195 * @adapter: board private structure to initialize 196 * 197 * SR-IOV doesn't use any descriptor rings but changes the default if 198 * no other mapping is used. 199 * 200 */ 201 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) 202 { 203 #ifdef IXGBE_FCOE 204 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 205 #endif /* IXGBE_FCOE */ 206 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 207 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; 208 int i; 209 u16 reg_idx; 210 211 /* only proceed if VMDq is enabled */ 212 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) 213 return false; 214 215 /* start at VMDq register offset for SR-IOV enabled setups */ 216 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 217 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 218 #ifdef IXGBE_FCOE 219 /* Allow first FCoE queue to be mapped as RSS */ 220 if (fcoe->offset && (i > fcoe->offset)) 221 break; 222 #endif 223 /* If we are greater than indices move to next pool */ 224 if ((reg_idx & ~vmdq->mask) >= rss->indices) 225 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 226 adapter->rx_ring[i]->reg_idx = reg_idx; 227 } 228 229 #ifdef IXGBE_FCOE 230 /* FCoE uses a linear block of queues so just assigning 1:1 */ 231 for (; i < adapter->num_rx_queues; i++, reg_idx++) 232 adapter->rx_ring[i]->reg_idx = reg_idx; 233 234 #endif 235 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 236 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 237 #ifdef IXGBE_FCOE 238 /* Allow first FCoE queue to be mapped as RSS */ 239 if (fcoe->offset && (i > fcoe->offset)) 240 break; 241 #endif 242 /* If we are greater than indices move to next pool */ 243 if ((reg_idx & rss->mask) >= rss->indices) 244 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 245 adapter->tx_ring[i]->reg_idx = reg_idx; 246 } 247 248 #ifdef IXGBE_FCOE 249 /* FCoE uses a linear block of queues so just assigning 1:1 */ 250 for (; i < adapter->num_tx_queues; i++, reg_idx++) 251 adapter->tx_ring[i]->reg_idx = reg_idx; 252 253 #endif 254 255 return true; 256 } 257 258 /** 259 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS 260 * @adapter: board private structure to initialize 261 * 262 * Cache the descriptor ring offsets for RSS to the assigned rings. 263 * 264 **/ 265 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 266 { 267 int i; 268 269 for (i = 0; i < adapter->num_rx_queues; i++) 270 adapter->rx_ring[i]->reg_idx = i; 271 for (i = 0; i < adapter->num_tx_queues; i++) 272 adapter->tx_ring[i]->reg_idx = i; 273 274 return true; 275 } 276 277 /** 278 * ixgbe_cache_ring_register - Descriptor ring to register mapping 279 * @adapter: board private structure to initialize 280 * 281 * Once we know the feature-set enabled for the device, we'll cache 282 * the register offset the descriptor ring is assigned to. 283 * 284 * Note, the order the various feature calls is important. It must start with 285 * the "most" features enabled at the same time, then trickle down to the 286 * least amount of features turned on at once. 287 **/ 288 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 289 { 290 /* start with default case */ 291 adapter->rx_ring[0]->reg_idx = 0; 292 adapter->tx_ring[0]->reg_idx = 0; 293 294 #ifdef CONFIG_IXGBE_DCB 295 if (ixgbe_cache_ring_dcb_sriov(adapter)) 296 return; 297 298 if (ixgbe_cache_ring_dcb(adapter)) 299 return; 300 301 #endif 302 if (ixgbe_cache_ring_sriov(adapter)) 303 return; 304 305 ixgbe_cache_ring_rss(adapter); 306 } 307 308 #define IXGBE_RSS_16Q_MASK 0xF 309 #define IXGBE_RSS_8Q_MASK 0x7 310 #define IXGBE_RSS_4Q_MASK 0x3 311 #define IXGBE_RSS_2Q_MASK 0x1 312 #define IXGBE_RSS_DISABLED_MASK 0x0 313 314 #ifdef CONFIG_IXGBE_DCB 315 /** 316 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB 317 * @adapter: board private structure to initialize 318 * 319 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 320 * and VM pools where appropriate. Also assign queues based on DCB 321 * priorities and map accordingly.. 322 * 323 **/ 324 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) 325 { 326 int i; 327 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 328 u16 vmdq_m = 0; 329 #ifdef IXGBE_FCOE 330 u16 fcoe_i = 0; 331 #endif 332 u8 tcs = netdev_get_num_tc(adapter->netdev); 333 334 /* verify we have DCB queueing enabled before proceeding */ 335 if (tcs <= 1) 336 return false; 337 338 /* verify we have VMDq enabled before proceeding */ 339 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 340 return false; 341 342 /* Add starting offset to total pool count */ 343 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 344 345 /* 16 pools w/ 8 TC per pool */ 346 if (tcs > 4) { 347 vmdq_i = min_t(u16, vmdq_i, 16); 348 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; 349 /* 32 pools w/ 4 TC per pool */ 350 } else { 351 vmdq_i = min_t(u16, vmdq_i, 32); 352 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 353 } 354 355 #ifdef IXGBE_FCOE 356 /* queues in the remaining pools are available for FCoE */ 357 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; 358 359 #endif 360 /* remove the starting offset from the pool count */ 361 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 362 363 /* save features for later use */ 364 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 365 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 366 367 /* 368 * We do not support DCB, VMDq, and RSS all simultaneously 369 * so we will disable RSS since it is the lowest priority 370 */ 371 adapter->ring_feature[RING_F_RSS].indices = 1; 372 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; 373 374 /* disable ATR as it is not supported when VMDq is enabled */ 375 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 376 377 adapter->num_rx_pools = vmdq_i; 378 adapter->num_rx_queues_per_pool = tcs; 379 380 adapter->num_tx_queues = vmdq_i * tcs; 381 adapter->num_rx_queues = vmdq_i * tcs; 382 383 #ifdef IXGBE_FCOE 384 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 385 struct ixgbe_ring_feature *fcoe; 386 387 fcoe = &adapter->ring_feature[RING_F_FCOE]; 388 389 /* limit ourselves based on feature limits */ 390 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 391 392 if (fcoe_i) { 393 /* alloc queues for FCoE separately */ 394 fcoe->indices = fcoe_i; 395 fcoe->offset = vmdq_i * tcs; 396 397 /* add queues to adapter */ 398 adapter->num_tx_queues += fcoe_i; 399 adapter->num_rx_queues += fcoe_i; 400 } else if (tcs > 1) { 401 /* use queue belonging to FcoE TC */ 402 fcoe->indices = 1; 403 fcoe->offset = ixgbe_fcoe_get_tc(adapter); 404 } else { 405 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 406 407 fcoe->indices = 0; 408 fcoe->offset = 0; 409 } 410 } 411 412 #endif /* IXGBE_FCOE */ 413 /* configure TC to queue mapping */ 414 for (i = 0; i < tcs; i++) 415 netdev_set_tc_queue(adapter->netdev, i, 1, i); 416 417 return true; 418 } 419 420 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) 421 { 422 struct net_device *dev = adapter->netdev; 423 struct ixgbe_ring_feature *f; 424 int rss_i, rss_m, i; 425 int tcs; 426 427 /* Map queue offset and counts onto allocated tx queues */ 428 tcs = netdev_get_num_tc(dev); 429 430 /* verify we have DCB queueing enabled before proceeding */ 431 if (tcs <= 1) 432 return false; 433 434 /* determine the upper limit for our current DCB mode */ 435 rss_i = dev->num_tx_queues / tcs; 436 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 437 /* 8 TC w/ 4 queues per TC */ 438 rss_i = min_t(u16, rss_i, 4); 439 rss_m = IXGBE_RSS_4Q_MASK; 440 } else if (tcs > 4) { 441 /* 8 TC w/ 8 queues per TC */ 442 rss_i = min_t(u16, rss_i, 8); 443 rss_m = IXGBE_RSS_8Q_MASK; 444 } else { 445 /* 4 TC w/ 16 queues per TC */ 446 rss_i = min_t(u16, rss_i, 16); 447 rss_m = IXGBE_RSS_16Q_MASK; 448 } 449 450 /* set RSS mask and indices */ 451 f = &adapter->ring_feature[RING_F_RSS]; 452 rss_i = min_t(int, rss_i, f->limit); 453 f->indices = rss_i; 454 f->mask = rss_m; 455 456 /* disable ATR as it is not supported when multiple TCs are enabled */ 457 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 458 459 #ifdef IXGBE_FCOE 460 /* FCoE enabled queues require special configuration indexed 461 * by feature specific indices and offset. Here we map FCoE 462 * indices onto the DCB queue pairs allowing FCoE to own 463 * configuration later. 464 */ 465 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 466 u8 tc = ixgbe_fcoe_get_tc(adapter); 467 468 f = &adapter->ring_feature[RING_F_FCOE]; 469 f->indices = min_t(u16, rss_i, f->limit); 470 f->offset = rss_i * tc; 471 } 472 473 #endif /* IXGBE_FCOE */ 474 for (i = 0; i < tcs; i++) 475 netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 476 477 adapter->num_tx_queues = rss_i * tcs; 478 adapter->num_rx_queues = rss_i * tcs; 479 480 return true; 481 } 482 483 #endif 484 /** 485 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices 486 * @adapter: board private structure to initialize 487 * 488 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 489 * and VM pools where appropriate. If RSS is available, then also try and 490 * enable RSS and map accordingly. 491 * 492 **/ 493 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) 494 { 495 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 496 u16 vmdq_m = 0; 497 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; 498 u16 rss_m = IXGBE_RSS_DISABLED_MASK; 499 #ifdef IXGBE_FCOE 500 u16 fcoe_i = 0; 501 #endif 502 bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); 503 504 /* only proceed if SR-IOV is enabled */ 505 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 506 return false; 507 508 /* Add starting offset to total pool count */ 509 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 510 511 /* double check we are limited to maximum pools */ 512 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 513 514 /* 64 pool mode with 2 queues per pool */ 515 if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) { 516 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 517 rss_m = IXGBE_RSS_2Q_MASK; 518 rss_i = min_t(u16, rss_i, 2); 519 /* 32 pool mode with 4 queues per pool */ 520 } else { 521 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 522 rss_m = IXGBE_RSS_4Q_MASK; 523 rss_i = 4; 524 } 525 526 #ifdef IXGBE_FCOE 527 /* queues in the remaining pools are available for FCoE */ 528 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); 529 530 #endif 531 /* remove the starting offset from the pool count */ 532 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 533 534 /* save features for later use */ 535 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 536 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 537 538 /* limit RSS based on user input and save for later use */ 539 adapter->ring_feature[RING_F_RSS].indices = rss_i; 540 adapter->ring_feature[RING_F_RSS].mask = rss_m; 541 542 adapter->num_rx_pools = vmdq_i; 543 adapter->num_rx_queues_per_pool = rss_i; 544 545 adapter->num_rx_queues = vmdq_i * rss_i; 546 adapter->num_tx_queues = vmdq_i * rss_i; 547 548 /* disable ATR as it is not supported when VMDq is enabled */ 549 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 550 551 #ifdef IXGBE_FCOE 552 /* 553 * FCoE can use rings from adjacent buffers to allow RSS 554 * like behavior. To account for this we need to add the 555 * FCoE indices to the total ring count. 556 */ 557 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 558 struct ixgbe_ring_feature *fcoe; 559 560 fcoe = &adapter->ring_feature[RING_F_FCOE]; 561 562 /* limit ourselves based on feature limits */ 563 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 564 565 if (vmdq_i > 1 && fcoe_i) { 566 /* alloc queues for FCoE separately */ 567 fcoe->indices = fcoe_i; 568 fcoe->offset = vmdq_i * rss_i; 569 } else { 570 /* merge FCoE queues with RSS queues */ 571 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); 572 573 /* limit indices to rss_i if MSI-X is disabled */ 574 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 575 fcoe_i = rss_i; 576 577 /* attempt to reserve some queues for just FCoE */ 578 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); 579 fcoe->offset = fcoe_i - fcoe->indices; 580 581 fcoe_i -= rss_i; 582 } 583 584 /* add queues to adapter */ 585 adapter->num_tx_queues += fcoe_i; 586 adapter->num_rx_queues += fcoe_i; 587 } 588 589 #endif 590 return true; 591 } 592 593 /** 594 * ixgbe_set_rss_queues - Allocate queues for RSS 595 * @adapter: board private structure to initialize 596 * 597 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 598 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 599 * 600 **/ 601 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) 602 { 603 struct ixgbe_ring_feature *f; 604 u16 rss_i; 605 606 /* set mask for 16 queue limit of RSS */ 607 f = &adapter->ring_feature[RING_F_RSS]; 608 rss_i = f->limit; 609 610 f->indices = rss_i; 611 f->mask = IXGBE_RSS_16Q_MASK; 612 613 /* disable ATR by default, it will be configured below */ 614 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 615 616 /* 617 * Use Flow Director in addition to RSS to ensure the best 618 * distribution of flows across cores, even when an FDIR flow 619 * isn't matched. 620 */ 621 if (rss_i > 1 && adapter->atr_sample_rate) { 622 f = &adapter->ring_feature[RING_F_FDIR]; 623 624 rss_i = f->indices = f->limit; 625 626 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 627 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 628 } 629 630 #ifdef IXGBE_FCOE 631 /* 632 * FCoE can exist on the same rings as standard network traffic 633 * however it is preferred to avoid that if possible. In order 634 * to get the best performance we allocate as many FCoE queues 635 * as we can and we place them at the end of the ring array to 636 * avoid sharing queues with standard RSS on systems with 24 or 637 * more CPUs. 638 */ 639 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 640 struct net_device *dev = adapter->netdev; 641 u16 fcoe_i; 642 643 f = &adapter->ring_feature[RING_F_FCOE]; 644 645 /* merge FCoE queues with RSS queues */ 646 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); 647 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); 648 649 /* limit indices to rss_i if MSI-X is disabled */ 650 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 651 fcoe_i = rss_i; 652 653 /* attempt to reserve some queues for just FCoE */ 654 f->indices = min_t(u16, fcoe_i, f->limit); 655 f->offset = fcoe_i - f->indices; 656 rss_i = max_t(u16, fcoe_i, rss_i); 657 } 658 659 #endif /* IXGBE_FCOE */ 660 adapter->num_rx_queues = rss_i; 661 adapter->num_tx_queues = rss_i; 662 663 return true; 664 } 665 666 /** 667 * ixgbe_set_num_queues - Allocate queues for device, feature dependent 668 * @adapter: board private structure to initialize 669 * 670 * This is the top level queue allocation routine. The order here is very 671 * important, starting with the "most" number of features turned on at once, 672 * and ending with the smallest set of features. This way large combinations 673 * can be allocated if they're turned on, and smaller combinations are the 674 * fallthrough conditions. 675 * 676 **/ 677 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 678 { 679 /* Start with base case */ 680 adapter->num_rx_queues = 1; 681 adapter->num_tx_queues = 1; 682 adapter->num_rx_pools = adapter->num_rx_queues; 683 adapter->num_rx_queues_per_pool = 1; 684 685 #ifdef CONFIG_IXGBE_DCB 686 if (ixgbe_set_dcb_sriov_queues(adapter)) 687 return; 688 689 if (ixgbe_set_dcb_queues(adapter)) 690 return; 691 692 #endif 693 if (ixgbe_set_sriov_queues(adapter)) 694 return; 695 696 ixgbe_set_rss_queues(adapter); 697 } 698 699 /** 700 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors 701 * @adapter: board private structure 702 * 703 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will 704 * return a negative error code if unable to acquire MSI-X vectors for any 705 * reason. 706 */ 707 static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) 708 { 709 struct ixgbe_hw *hw = &adapter->hw; 710 int i, vectors, vector_threshold; 711 712 /* We start by asking for one vector per queue pair */ 713 vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); 714 715 /* It is easy to be greedy for MSI-X vectors. However, it really 716 * doesn't do much good if we have a lot more vectors than CPUs. We'll 717 * be somewhat conservative and only ask for (roughly) the same number 718 * of vectors as there are CPUs. 719 */ 720 vectors = min_t(int, vectors, num_online_cpus()); 721 722 /* Some vectors are necessary for non-queue interrupts */ 723 vectors += NON_Q_VECTORS; 724 725 /* Hardware can only support a maximum of hw.mac->max_msix_vectors. 726 * With features such as RSS and VMDq, we can easily surpass the 727 * number of Rx and Tx descriptor queues supported by our device. 728 * Thus, we cap the maximum in the rare cases where the CPU count also 729 * exceeds our vector limit 730 */ 731 vectors = min_t(int, vectors, hw->mac.max_msix_vectors); 732 733 /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] 734 * handler, and (2) an Other (Link Status Change, etc.) handler. 735 */ 736 vector_threshold = MIN_MSIX_COUNT; 737 738 adapter->msix_entries = kcalloc(vectors, 739 sizeof(struct msix_entry), 740 GFP_KERNEL); 741 if (!adapter->msix_entries) 742 return -ENOMEM; 743 744 for (i = 0; i < vectors; i++) 745 adapter->msix_entries[i].entry = i; 746 747 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 748 vector_threshold, vectors); 749 750 if (vectors < 0) { 751 /* A negative count of allocated vectors indicates an error in 752 * acquiring within the specified range of MSI-X vectors 753 */ 754 e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", 755 vectors); 756 757 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 758 kfree(adapter->msix_entries); 759 adapter->msix_entries = NULL; 760 761 return vectors; 762 } 763 764 /* we successfully allocated some number of vectors within our 765 * requested range. 766 */ 767 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; 768 769 /* Adjust for only the vectors we'll use, which is minimum 770 * of max_q_vectors, or the number of vectors we were allocated. 771 */ 772 vectors -= NON_Q_VECTORS; 773 adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); 774 775 return 0; 776 } 777 778 static void ixgbe_add_ring(struct ixgbe_ring *ring, 779 struct ixgbe_ring_container *head) 780 { 781 ring->next = head->ring; 782 head->ring = ring; 783 head->count++; 784 } 785 786 /** 787 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector 788 * @adapter: board private structure to initialize 789 * @v_count: q_vectors allocated on adapter, used for ring interleaving 790 * @v_idx: index of vector in adapter struct 791 * @txr_count: total number of Tx rings to allocate 792 * @txr_idx: index of first Tx ring to allocate 793 * @rxr_count: total number of Rx rings to allocate 794 * @rxr_idx: index of first Rx ring to allocate 795 * 796 * We allocate one q_vector. If allocation fails we return -ENOMEM. 797 **/ 798 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, 799 int v_count, int v_idx, 800 int txr_count, int txr_idx, 801 int rxr_count, int rxr_idx) 802 { 803 struct ixgbe_q_vector *q_vector; 804 struct ixgbe_ring *ring; 805 int node = NUMA_NO_NODE; 806 int cpu = -1; 807 int ring_count, size; 808 u8 tcs = netdev_get_num_tc(adapter->netdev); 809 810 ring_count = txr_count + rxr_count; 811 size = sizeof(struct ixgbe_q_vector) + 812 (sizeof(struct ixgbe_ring) * ring_count); 813 814 /* customize cpu for Flow Director mapping */ 815 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { 816 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 817 if (rss_i > 1 && adapter->atr_sample_rate) { 818 if (cpu_online(v_idx)) { 819 cpu = v_idx; 820 node = cpu_to_node(cpu); 821 } 822 } 823 } 824 825 /* allocate q_vector and rings */ 826 q_vector = kzalloc_node(size, GFP_KERNEL, node); 827 if (!q_vector) 828 q_vector = kzalloc(size, GFP_KERNEL); 829 if (!q_vector) 830 return -ENOMEM; 831 832 /* setup affinity mask and node */ 833 if (cpu != -1) 834 cpumask_set_cpu(cpu, &q_vector->affinity_mask); 835 q_vector->numa_node = node; 836 837 #ifdef CONFIG_IXGBE_DCA 838 /* initialize CPU for DCA */ 839 q_vector->cpu = -1; 840 841 #endif 842 /* initialize NAPI */ 843 netif_napi_add(adapter->netdev, &q_vector->napi, 844 ixgbe_poll, 64); 845 napi_hash_add(&q_vector->napi); 846 847 #ifdef CONFIG_NET_RX_BUSY_POLL 848 /* initialize busy poll */ 849 atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); 850 851 #endif 852 /* tie q_vector and adapter together */ 853 adapter->q_vector[v_idx] = q_vector; 854 q_vector->adapter = adapter; 855 q_vector->v_idx = v_idx; 856 857 /* initialize work limits */ 858 q_vector->tx.work_limit = adapter->tx_work_limit; 859 860 /* initialize pointer to rings */ 861 ring = q_vector->ring; 862 863 /* intialize ITR */ 864 if (txr_count && !rxr_count) { 865 /* tx only vector */ 866 if (adapter->tx_itr_setting == 1) 867 q_vector->itr = IXGBE_10K_ITR; 868 else 869 q_vector->itr = adapter->tx_itr_setting; 870 } else { 871 /* rx or rx/tx vector */ 872 if (adapter->rx_itr_setting == 1) 873 q_vector->itr = IXGBE_20K_ITR; 874 else 875 q_vector->itr = adapter->rx_itr_setting; 876 } 877 878 while (txr_count) { 879 /* assign generic ring traits */ 880 ring->dev = &adapter->pdev->dev; 881 ring->netdev = adapter->netdev; 882 883 /* configure backlink on ring */ 884 ring->q_vector = q_vector; 885 886 /* update q_vector Tx values */ 887 ixgbe_add_ring(ring, &q_vector->tx); 888 889 /* apply Tx specific ring traits */ 890 ring->count = adapter->tx_ring_count; 891 if (adapter->num_rx_pools > 1) 892 ring->queue_index = 893 txr_idx % adapter->num_rx_queues_per_pool; 894 else 895 ring->queue_index = txr_idx; 896 897 /* assign ring to adapter */ 898 adapter->tx_ring[txr_idx] = ring; 899 900 /* update count and index */ 901 txr_count--; 902 txr_idx += v_count; 903 904 /* push pointer to next ring */ 905 ring++; 906 } 907 908 while (rxr_count) { 909 /* assign generic ring traits */ 910 ring->dev = &adapter->pdev->dev; 911 ring->netdev = adapter->netdev; 912 913 /* configure backlink on ring */ 914 ring->q_vector = q_vector; 915 916 /* update q_vector Rx values */ 917 ixgbe_add_ring(ring, &q_vector->rx); 918 919 /* 920 * 82599 errata, UDP frames with a 0 checksum 921 * can be marked as checksum errors. 922 */ 923 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 924 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 925 926 #ifdef IXGBE_FCOE 927 if (adapter->netdev->features & NETIF_F_FCOE_MTU) { 928 struct ixgbe_ring_feature *f; 929 f = &adapter->ring_feature[RING_F_FCOE]; 930 if ((rxr_idx >= f->offset) && 931 (rxr_idx < f->offset + f->indices)) 932 set_bit(__IXGBE_RX_FCOE, &ring->state); 933 } 934 935 #endif /* IXGBE_FCOE */ 936 /* apply Rx specific ring traits */ 937 ring->count = adapter->rx_ring_count; 938 if (adapter->num_rx_pools > 1) 939 ring->queue_index = 940 rxr_idx % adapter->num_rx_queues_per_pool; 941 else 942 ring->queue_index = rxr_idx; 943 944 /* assign ring to adapter */ 945 adapter->rx_ring[rxr_idx] = ring; 946 947 /* update count and index */ 948 rxr_count--; 949 rxr_idx += v_count; 950 951 /* push pointer to next ring */ 952 ring++; 953 } 954 955 return 0; 956 } 957 958 /** 959 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector 960 * @adapter: board private structure to initialize 961 * @v_idx: Index of vector to be freed 962 * 963 * This function frees the memory allocated to the q_vector. In addition if 964 * NAPI is enabled it will delete any references to the NAPI struct prior 965 * to freeing the q_vector. 966 **/ 967 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) 968 { 969 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; 970 struct ixgbe_ring *ring; 971 972 ixgbe_for_each_ring(ring, q_vector->tx) 973 adapter->tx_ring[ring->queue_index] = NULL; 974 975 ixgbe_for_each_ring(ring, q_vector->rx) 976 adapter->rx_ring[ring->queue_index] = NULL; 977 978 adapter->q_vector[v_idx] = NULL; 979 napi_hash_del(&q_vector->napi); 980 netif_napi_del(&q_vector->napi); 981 982 /* 983 * ixgbe_get_stats64() might access the rings on this vector, 984 * we must wait a grace period before freeing it. 985 */ 986 kfree_rcu(q_vector, rcu); 987 } 988 989 /** 990 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors 991 * @adapter: board private structure to initialize 992 * 993 * We allocate one q_vector per queue interrupt. If allocation fails we 994 * return -ENOMEM. 995 **/ 996 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) 997 { 998 int q_vectors = adapter->num_q_vectors; 999 int rxr_remaining = adapter->num_rx_queues; 1000 int txr_remaining = adapter->num_tx_queues; 1001 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 1002 int err; 1003 1004 /* only one q_vector if MSI-X is disabled. */ 1005 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 1006 q_vectors = 1; 1007 1008 if (q_vectors >= (rxr_remaining + txr_remaining)) { 1009 for (; rxr_remaining; v_idx++) { 1010 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 1011 0, 0, 1, rxr_idx); 1012 1013 if (err) 1014 goto err_out; 1015 1016 /* update counts and index */ 1017 rxr_remaining--; 1018 rxr_idx++; 1019 } 1020 } 1021 1022 for (; v_idx < q_vectors; v_idx++) { 1023 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 1024 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 1025 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 1026 tqpv, txr_idx, 1027 rqpv, rxr_idx); 1028 1029 if (err) 1030 goto err_out; 1031 1032 /* update counts and index */ 1033 rxr_remaining -= rqpv; 1034 txr_remaining -= tqpv; 1035 rxr_idx++; 1036 txr_idx++; 1037 } 1038 1039 return 0; 1040 1041 err_out: 1042 adapter->num_tx_queues = 0; 1043 adapter->num_rx_queues = 0; 1044 adapter->num_q_vectors = 0; 1045 1046 while (v_idx--) 1047 ixgbe_free_q_vector(adapter, v_idx); 1048 1049 return -ENOMEM; 1050 } 1051 1052 /** 1053 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors 1054 * @adapter: board private structure to initialize 1055 * 1056 * This function frees the memory allocated to the q_vectors. In addition if 1057 * NAPI is enabled it will delete any references to the NAPI struct prior 1058 * to freeing the q_vector. 1059 **/ 1060 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) 1061 { 1062 int v_idx = adapter->num_q_vectors; 1063 1064 adapter->num_tx_queues = 0; 1065 adapter->num_rx_queues = 0; 1066 adapter->num_q_vectors = 0; 1067 1068 while (v_idx--) 1069 ixgbe_free_q_vector(adapter, v_idx); 1070 } 1071 1072 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 1073 { 1074 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1075 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1076 pci_disable_msix(adapter->pdev); 1077 kfree(adapter->msix_entries); 1078 adapter->msix_entries = NULL; 1079 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1080 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 1081 pci_disable_msi(adapter->pdev); 1082 } 1083 } 1084 1085 /** 1086 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported 1087 * @adapter: board private structure to initialize 1088 * 1089 * Attempt to configure the interrupts using the best available 1090 * capabilities of the hardware and the kernel. 1091 **/ 1092 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) 1093 { 1094 int err; 1095 1096 /* We will try to get MSI-X interrupts first */ 1097 if (!ixgbe_acquire_msix_vectors(adapter)) 1098 return; 1099 1100 /* At this point, we do not have MSI-X capabilities. We need to 1101 * reconfigure or disable various features which require MSI-X 1102 * capability. 1103 */ 1104 1105 /* Disable DCB unless we only have a single traffic class */ 1106 if (netdev_get_num_tc(adapter->netdev) > 1) { 1107 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); 1108 netdev_reset_tc(adapter->netdev); 1109 1110 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1111 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 1112 1113 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 1114 adapter->temp_dcb_cfg.pfc_mode_enable = false; 1115 adapter->dcb_cfg.pfc_mode_enable = false; 1116 } 1117 1118 adapter->dcb_cfg.num_tcs.pg_tcs = 1; 1119 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 1120 1121 /* Disable SR-IOV support */ 1122 e_dev_warn("Disabling SR-IOV support\n"); 1123 ixgbe_disable_sriov(adapter); 1124 1125 /* Disable RSS */ 1126 e_dev_warn("Disabling RSS support\n"); 1127 adapter->ring_feature[RING_F_RSS].limit = 1; 1128 1129 /* recalculate number of queues now that many features have been 1130 * changed or disabled. 1131 */ 1132 ixgbe_set_num_queues(adapter); 1133 adapter->num_q_vectors = 1; 1134 1135 err = pci_enable_msi(adapter->pdev); 1136 if (err) 1137 e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", 1138 err); 1139 else 1140 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 1141 } 1142 1143 /** 1144 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme 1145 * @adapter: board private structure to initialize 1146 * 1147 * We determine which interrupt scheme to use based on... 1148 * - Kernel support (MSI, MSI-X) 1149 * - which can be user-defined (via MODULE_PARAM) 1150 * - Hardware queue count (num_*_queues) 1151 * - defined by miscellaneous hardware support/features (RSS, etc.) 1152 **/ 1153 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 1154 { 1155 int err; 1156 1157 /* Number of supported queues */ 1158 ixgbe_set_num_queues(adapter); 1159 1160 /* Set interrupt mode */ 1161 ixgbe_set_interrupt_capability(adapter); 1162 1163 err = ixgbe_alloc_q_vectors(adapter); 1164 if (err) { 1165 e_dev_err("Unable to allocate memory for queue vectors\n"); 1166 goto err_alloc_q_vectors; 1167 } 1168 1169 ixgbe_cache_ring_register(adapter); 1170 1171 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", 1172 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", 1173 adapter->num_rx_queues, adapter->num_tx_queues); 1174 1175 set_bit(__IXGBE_DOWN, &adapter->state); 1176 1177 return 0; 1178 1179 err_alloc_q_vectors: 1180 ixgbe_reset_interrupt_capability(adapter); 1181 return err; 1182 } 1183 1184 /** 1185 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 1186 * @adapter: board private structure to clear interrupt scheme on 1187 * 1188 * We go through and clear interrupt specific resources and reset the structure 1189 * to pre-load conditions 1190 **/ 1191 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) 1192 { 1193 adapter->num_tx_queues = 0; 1194 adapter->num_rx_queues = 0; 1195 1196 ixgbe_free_q_vectors(adapter); 1197 ixgbe_reset_interrupt_capability(adapter); 1198 } 1199 1200 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, 1201 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) 1202 { 1203 struct ixgbe_adv_tx_context_desc *context_desc; 1204 u16 i = tx_ring->next_to_use; 1205 1206 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); 1207 1208 i++; 1209 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1210 1211 /* set bits to identify this as an advanced context descriptor */ 1212 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 1213 1214 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1215 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); 1216 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1217 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1218 } 1219 1220