1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2013 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27 *******************************************************************************/ 28 29 #include "ixgbe.h" 30 #include "ixgbe_sriov.h" 31 32 #ifdef CONFIG_IXGBE_DCB 33 /** 34 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV 35 * @adapter: board private structure to initialize 36 * 37 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It 38 * will also try to cache the proper offsets if RSS/FCoE are enabled along 39 * with VMDq. 40 * 41 **/ 42 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) 43 { 44 #ifdef IXGBE_FCOE 45 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 46 #endif /* IXGBE_FCOE */ 47 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 48 int i; 49 u16 reg_idx; 50 u8 tcs = netdev_get_num_tc(adapter->netdev); 51 52 /* verify we have DCB queueing enabled before proceeding */ 53 if (tcs <= 1) 54 return false; 55 56 /* verify we have VMDq enabled before proceeding */ 57 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 58 return false; 59 60 /* start at VMDq register offset for SR-IOV enabled setups */ 61 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 62 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 63 /* If we are greater than indices move to next pool */ 64 if ((reg_idx & ~vmdq->mask) >= tcs) 65 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 66 adapter->rx_ring[i]->reg_idx = reg_idx; 67 } 68 69 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 70 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 71 /* If we are greater than indices move to next pool */ 72 if ((reg_idx & ~vmdq->mask) >= tcs) 73 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 74 adapter->tx_ring[i]->reg_idx = reg_idx; 75 } 76 77 #ifdef IXGBE_FCOE 78 /* nothing to do if FCoE is disabled */ 79 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 80 return true; 81 82 /* The work is already done if the FCoE ring is shared */ 83 if (fcoe->offset < tcs) 84 return true; 85 86 /* The FCoE rings exist separately, we need to move their reg_idx */ 87 if (fcoe->indices) { 88 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 89 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); 90 91 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 92 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { 93 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 94 adapter->rx_ring[i]->reg_idx = reg_idx; 95 reg_idx++; 96 } 97 98 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 99 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { 100 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 101 adapter->tx_ring[i]->reg_idx = reg_idx; 102 reg_idx++; 103 } 104 } 105 106 #endif /* IXGBE_FCOE */ 107 return true; 108 } 109 110 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ 111 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, 112 unsigned int *tx, unsigned int *rx) 113 { 114 struct net_device *dev = adapter->netdev; 115 struct ixgbe_hw *hw = &adapter->hw; 116 u8 num_tcs = netdev_get_num_tc(dev); 117 118 *tx = 0; 119 *rx = 0; 120 121 switch (hw->mac.type) { 122 case ixgbe_mac_82598EB: 123 /* TxQs/TC: 4 RxQs/TC: 8 */ 124 *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ 125 *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ 126 break; 127 case ixgbe_mac_82599EB: 128 case ixgbe_mac_X540: 129 if (num_tcs > 4) { 130 /* 131 * TCs : TC0/1 TC2/3 TC4-7 132 * TxQs/TC: 32 16 8 133 * RxQs/TC: 16 16 16 134 */ 135 *rx = tc << 4; 136 if (tc < 3) 137 *tx = tc << 5; /* 0, 32, 64 */ 138 else if (tc < 5) 139 *tx = (tc + 2) << 4; /* 80, 96 */ 140 else 141 *tx = (tc + 8) << 3; /* 104, 112, 120 */ 142 } else { 143 /* 144 * TCs : TC0 TC1 TC2/3 145 * TxQs/TC: 64 32 16 146 * RxQs/TC: 32 32 32 147 */ 148 *rx = tc << 5; 149 if (tc < 2) 150 *tx = tc << 6; /* 0, 64 */ 151 else 152 *tx = (tc + 4) << 4; /* 96, 112 */ 153 } 154 default: 155 break; 156 } 157 } 158 159 /** 160 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 161 * @adapter: board private structure to initialize 162 * 163 * Cache the descriptor ring offsets for DCB to the assigned rings. 164 * 165 **/ 166 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 167 { 168 struct net_device *dev = adapter->netdev; 169 unsigned int tx_idx, rx_idx; 170 int tc, offset, rss_i, i; 171 u8 num_tcs = netdev_get_num_tc(dev); 172 173 /* verify we have DCB queueing enabled before proceeding */ 174 if (num_tcs <= 1) 175 return false; 176 177 rss_i = adapter->ring_feature[RING_F_RSS].indices; 178 179 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { 180 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); 181 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { 182 adapter->tx_ring[offset + i]->reg_idx = tx_idx; 183 adapter->rx_ring[offset + i]->reg_idx = rx_idx; 184 adapter->tx_ring[offset + i]->dcb_tc = tc; 185 adapter->rx_ring[offset + i]->dcb_tc = tc; 186 } 187 } 188 189 return true; 190 } 191 192 #endif 193 /** 194 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov 195 * @adapter: board private structure to initialize 196 * 197 * SR-IOV doesn't use any descriptor rings but changes the default if 198 * no other mapping is used. 199 * 200 */ 201 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) 202 { 203 #ifdef IXGBE_FCOE 204 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 205 #endif /* IXGBE_FCOE */ 206 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 207 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; 208 int i; 209 u16 reg_idx; 210 211 /* only proceed if VMDq is enabled */ 212 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) 213 return false; 214 215 /* start at VMDq register offset for SR-IOV enabled setups */ 216 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 217 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 218 #ifdef IXGBE_FCOE 219 /* Allow first FCoE queue to be mapped as RSS */ 220 if (fcoe->offset && (i > fcoe->offset)) 221 break; 222 #endif 223 /* If we are greater than indices move to next pool */ 224 if ((reg_idx & ~vmdq->mask) >= rss->indices) 225 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 226 adapter->rx_ring[i]->reg_idx = reg_idx; 227 } 228 229 #ifdef IXGBE_FCOE 230 /* FCoE uses a linear block of queues so just assigning 1:1 */ 231 for (; i < adapter->num_rx_queues; i++, reg_idx++) 232 adapter->rx_ring[i]->reg_idx = reg_idx; 233 234 #endif 235 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 236 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 237 #ifdef IXGBE_FCOE 238 /* Allow first FCoE queue to be mapped as RSS */ 239 if (fcoe->offset && (i > fcoe->offset)) 240 break; 241 #endif 242 /* If we are greater than indices move to next pool */ 243 if ((reg_idx & rss->mask) >= rss->indices) 244 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 245 adapter->tx_ring[i]->reg_idx = reg_idx; 246 } 247 248 #ifdef IXGBE_FCOE 249 /* FCoE uses a linear block of queues so just assigning 1:1 */ 250 for (; i < adapter->num_tx_queues; i++, reg_idx++) 251 adapter->tx_ring[i]->reg_idx = reg_idx; 252 253 #endif 254 255 return true; 256 } 257 258 /** 259 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS 260 * @adapter: board private structure to initialize 261 * 262 * Cache the descriptor ring offsets for RSS to the assigned rings. 263 * 264 **/ 265 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 266 { 267 int i; 268 269 for (i = 0; i < adapter->num_rx_queues; i++) 270 adapter->rx_ring[i]->reg_idx = i; 271 for (i = 0; i < adapter->num_tx_queues; i++) 272 adapter->tx_ring[i]->reg_idx = i; 273 274 return true; 275 } 276 277 /** 278 * ixgbe_cache_ring_register - Descriptor ring to register mapping 279 * @adapter: board private structure to initialize 280 * 281 * Once we know the feature-set enabled for the device, we'll cache 282 * the register offset the descriptor ring is assigned to. 283 * 284 * Note, the order the various feature calls is important. It must start with 285 * the "most" features enabled at the same time, then trickle down to the 286 * least amount of features turned on at once. 287 **/ 288 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 289 { 290 /* start with default case */ 291 adapter->rx_ring[0]->reg_idx = 0; 292 adapter->tx_ring[0]->reg_idx = 0; 293 294 #ifdef CONFIG_IXGBE_DCB 295 if (ixgbe_cache_ring_dcb_sriov(adapter)) 296 return; 297 298 if (ixgbe_cache_ring_dcb(adapter)) 299 return; 300 301 #endif 302 if (ixgbe_cache_ring_sriov(adapter)) 303 return; 304 305 ixgbe_cache_ring_rss(adapter); 306 } 307 308 #define IXGBE_RSS_16Q_MASK 0xF 309 #define IXGBE_RSS_8Q_MASK 0x7 310 #define IXGBE_RSS_4Q_MASK 0x3 311 #define IXGBE_RSS_2Q_MASK 0x1 312 #define IXGBE_RSS_DISABLED_MASK 0x0 313 314 #ifdef CONFIG_IXGBE_DCB 315 /** 316 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB 317 * @adapter: board private structure to initialize 318 * 319 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 320 * and VM pools where appropriate. Also assign queues based on DCB 321 * priorities and map accordingly.. 322 * 323 **/ 324 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) 325 { 326 int i; 327 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 328 u16 vmdq_m = 0; 329 #ifdef IXGBE_FCOE 330 u16 fcoe_i = 0; 331 #endif 332 u8 tcs = netdev_get_num_tc(adapter->netdev); 333 334 /* verify we have DCB queueing enabled before proceeding */ 335 if (tcs <= 1) 336 return false; 337 338 /* verify we have VMDq enabled before proceeding */ 339 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 340 return false; 341 342 /* Add starting offset to total pool count */ 343 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 344 345 /* 16 pools w/ 8 TC per pool */ 346 if (tcs > 4) { 347 vmdq_i = min_t(u16, vmdq_i, 16); 348 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; 349 /* 32 pools w/ 4 TC per pool */ 350 } else { 351 vmdq_i = min_t(u16, vmdq_i, 32); 352 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 353 } 354 355 #ifdef IXGBE_FCOE 356 /* queues in the remaining pools are available for FCoE */ 357 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; 358 359 #endif 360 /* remove the starting offset from the pool count */ 361 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 362 363 /* save features for later use */ 364 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 365 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 366 367 /* 368 * We do not support DCB, VMDq, and RSS all simultaneously 369 * so we will disable RSS since it is the lowest priority 370 */ 371 adapter->ring_feature[RING_F_RSS].indices = 1; 372 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; 373 374 /* disable ATR as it is not supported when VMDq is enabled */ 375 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 376 377 adapter->num_rx_pools = vmdq_i; 378 adapter->num_rx_queues_per_pool = tcs; 379 380 adapter->num_tx_queues = vmdq_i * tcs; 381 adapter->num_rx_queues = vmdq_i * tcs; 382 383 #ifdef IXGBE_FCOE 384 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 385 struct ixgbe_ring_feature *fcoe; 386 387 fcoe = &adapter->ring_feature[RING_F_FCOE]; 388 389 /* limit ourselves based on feature limits */ 390 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 391 392 if (fcoe_i) { 393 /* alloc queues for FCoE separately */ 394 fcoe->indices = fcoe_i; 395 fcoe->offset = vmdq_i * tcs; 396 397 /* add queues to adapter */ 398 adapter->num_tx_queues += fcoe_i; 399 adapter->num_rx_queues += fcoe_i; 400 } else if (tcs > 1) { 401 /* use queue belonging to FcoE TC */ 402 fcoe->indices = 1; 403 fcoe->offset = ixgbe_fcoe_get_tc(adapter); 404 } else { 405 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 406 407 fcoe->indices = 0; 408 fcoe->offset = 0; 409 } 410 } 411 412 #endif /* IXGBE_FCOE */ 413 /* configure TC to queue mapping */ 414 for (i = 0; i < tcs; i++) 415 netdev_set_tc_queue(adapter->netdev, i, 1, i); 416 417 return true; 418 } 419 420 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) 421 { 422 struct net_device *dev = adapter->netdev; 423 struct ixgbe_ring_feature *f; 424 int rss_i, rss_m, i; 425 int tcs; 426 427 /* Map queue offset and counts onto allocated tx queues */ 428 tcs = netdev_get_num_tc(dev); 429 430 /* verify we have DCB queueing enabled before proceeding */ 431 if (tcs <= 1) 432 return false; 433 434 /* determine the upper limit for our current DCB mode */ 435 rss_i = dev->num_tx_queues / tcs; 436 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 437 /* 8 TC w/ 4 queues per TC */ 438 rss_i = min_t(u16, rss_i, 4); 439 rss_m = IXGBE_RSS_4Q_MASK; 440 } else if (tcs > 4) { 441 /* 8 TC w/ 8 queues per TC */ 442 rss_i = min_t(u16, rss_i, 8); 443 rss_m = IXGBE_RSS_8Q_MASK; 444 } else { 445 /* 4 TC w/ 16 queues per TC */ 446 rss_i = min_t(u16, rss_i, 16); 447 rss_m = IXGBE_RSS_16Q_MASK; 448 } 449 450 /* set RSS mask and indices */ 451 f = &adapter->ring_feature[RING_F_RSS]; 452 rss_i = min_t(int, rss_i, f->limit); 453 f->indices = rss_i; 454 f->mask = rss_m; 455 456 /* disable ATR as it is not supported when multiple TCs are enabled */ 457 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 458 459 #ifdef IXGBE_FCOE 460 /* FCoE enabled queues require special configuration indexed 461 * by feature specific indices and offset. Here we map FCoE 462 * indices onto the DCB queue pairs allowing FCoE to own 463 * configuration later. 464 */ 465 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 466 u8 tc = ixgbe_fcoe_get_tc(adapter); 467 468 f = &adapter->ring_feature[RING_F_FCOE]; 469 f->indices = min_t(u16, rss_i, f->limit); 470 f->offset = rss_i * tc; 471 } 472 473 #endif /* IXGBE_FCOE */ 474 for (i = 0; i < tcs; i++) 475 netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 476 477 adapter->num_tx_queues = rss_i * tcs; 478 adapter->num_rx_queues = rss_i * tcs; 479 480 return true; 481 } 482 483 #endif 484 /** 485 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices 486 * @adapter: board private structure to initialize 487 * 488 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 489 * and VM pools where appropriate. If RSS is available, then also try and 490 * enable RSS and map accordingly. 491 * 492 **/ 493 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) 494 { 495 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 496 u16 vmdq_m = 0; 497 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; 498 u16 rss_m = IXGBE_RSS_DISABLED_MASK; 499 #ifdef IXGBE_FCOE 500 u16 fcoe_i = 0; 501 #endif 502 bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); 503 504 /* only proceed if SR-IOV is enabled */ 505 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 506 return false; 507 508 /* Add starting offset to total pool count */ 509 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 510 511 /* double check we are limited to maximum pools */ 512 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 513 514 /* 64 pool mode with 2 queues per pool */ 515 if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) { 516 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 517 rss_m = IXGBE_RSS_2Q_MASK; 518 rss_i = min_t(u16, rss_i, 2); 519 /* 32 pool mode with 4 queues per pool */ 520 } else { 521 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 522 rss_m = IXGBE_RSS_4Q_MASK; 523 rss_i = 4; 524 } 525 526 #ifdef IXGBE_FCOE 527 /* queues in the remaining pools are available for FCoE */ 528 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); 529 530 #endif 531 /* remove the starting offset from the pool count */ 532 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 533 534 /* save features for later use */ 535 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 536 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 537 538 /* limit RSS based on user input and save for later use */ 539 adapter->ring_feature[RING_F_RSS].indices = rss_i; 540 adapter->ring_feature[RING_F_RSS].mask = rss_m; 541 542 adapter->num_rx_pools = vmdq_i; 543 adapter->num_rx_queues_per_pool = rss_i; 544 545 adapter->num_rx_queues = vmdq_i * rss_i; 546 adapter->num_tx_queues = vmdq_i * rss_i; 547 548 /* disable ATR as it is not supported when VMDq is enabled */ 549 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 550 551 #ifdef IXGBE_FCOE 552 /* 553 * FCoE can use rings from adjacent buffers to allow RSS 554 * like behavior. To account for this we need to add the 555 * FCoE indices to the total ring count. 556 */ 557 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 558 struct ixgbe_ring_feature *fcoe; 559 560 fcoe = &adapter->ring_feature[RING_F_FCOE]; 561 562 /* limit ourselves based on feature limits */ 563 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 564 565 if (vmdq_i > 1 && fcoe_i) { 566 /* alloc queues for FCoE separately */ 567 fcoe->indices = fcoe_i; 568 fcoe->offset = vmdq_i * rss_i; 569 } else { 570 /* merge FCoE queues with RSS queues */ 571 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); 572 573 /* limit indices to rss_i if MSI-X is disabled */ 574 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 575 fcoe_i = rss_i; 576 577 /* attempt to reserve some queues for just FCoE */ 578 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); 579 fcoe->offset = fcoe_i - fcoe->indices; 580 581 fcoe_i -= rss_i; 582 } 583 584 /* add queues to adapter */ 585 adapter->num_tx_queues += fcoe_i; 586 adapter->num_rx_queues += fcoe_i; 587 } 588 589 #endif 590 return true; 591 } 592 593 /** 594 * ixgbe_set_rss_queues - Allocate queues for RSS 595 * @adapter: board private structure to initialize 596 * 597 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 598 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 599 * 600 **/ 601 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) 602 { 603 struct ixgbe_ring_feature *f; 604 u16 rss_i; 605 606 /* set mask for 16 queue limit of RSS */ 607 f = &adapter->ring_feature[RING_F_RSS]; 608 rss_i = f->limit; 609 610 f->indices = rss_i; 611 f->mask = IXGBE_RSS_16Q_MASK; 612 613 /* disable ATR by default, it will be configured below */ 614 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 615 616 /* 617 * Use Flow Director in addition to RSS to ensure the best 618 * distribution of flows across cores, even when an FDIR flow 619 * isn't matched. 620 */ 621 if (rss_i > 1 && adapter->atr_sample_rate) { 622 f = &adapter->ring_feature[RING_F_FDIR]; 623 624 rss_i = f->indices = f->limit; 625 626 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 627 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 628 } 629 630 #ifdef IXGBE_FCOE 631 /* 632 * FCoE can exist on the same rings as standard network traffic 633 * however it is preferred to avoid that if possible. In order 634 * to get the best performance we allocate as many FCoE queues 635 * as we can and we place them at the end of the ring array to 636 * avoid sharing queues with standard RSS on systems with 24 or 637 * more CPUs. 638 */ 639 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 640 struct net_device *dev = adapter->netdev; 641 u16 fcoe_i; 642 643 f = &adapter->ring_feature[RING_F_FCOE]; 644 645 /* merge FCoE queues with RSS queues */ 646 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); 647 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); 648 649 /* limit indices to rss_i if MSI-X is disabled */ 650 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 651 fcoe_i = rss_i; 652 653 /* attempt to reserve some queues for just FCoE */ 654 f->indices = min_t(u16, fcoe_i, f->limit); 655 f->offset = fcoe_i - f->indices; 656 rss_i = max_t(u16, fcoe_i, rss_i); 657 } 658 659 #endif /* IXGBE_FCOE */ 660 adapter->num_rx_queues = rss_i; 661 adapter->num_tx_queues = rss_i; 662 663 return true; 664 } 665 666 /** 667 * ixgbe_set_num_queues - Allocate queues for device, feature dependent 668 * @adapter: board private structure to initialize 669 * 670 * This is the top level queue allocation routine. The order here is very 671 * important, starting with the "most" number of features turned on at once, 672 * and ending with the smallest set of features. This way large combinations 673 * can be allocated if they're turned on, and smaller combinations are the 674 * fallthrough conditions. 675 * 676 **/ 677 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 678 { 679 /* Start with base case */ 680 adapter->num_rx_queues = 1; 681 adapter->num_tx_queues = 1; 682 adapter->num_rx_pools = adapter->num_rx_queues; 683 adapter->num_rx_queues_per_pool = 1; 684 685 #ifdef CONFIG_IXGBE_DCB 686 if (ixgbe_set_dcb_sriov_queues(adapter)) 687 return; 688 689 if (ixgbe_set_dcb_queues(adapter)) 690 return; 691 692 #endif 693 if (ixgbe_set_sriov_queues(adapter)) 694 return; 695 696 ixgbe_set_rss_queues(adapter); 697 } 698 699 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 700 int vectors) 701 { 702 int vector_threshold; 703 704 /* We'll want at least 2 (vector_threshold): 705 * 1) TxQ[0] + RxQ[0] handler 706 * 2) Other (Link Status Change, etc.) 707 */ 708 vector_threshold = MIN_MSIX_COUNT; 709 710 /* 711 * The more we get, the more we will assign to Tx/Rx Cleanup 712 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 713 * Right now, we simply care about how many we'll get; we'll 714 * set them up later while requesting irq's. 715 */ 716 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 717 vector_threshold, vectors); 718 719 if (vectors < 0) { 720 /* Can't allocate enough MSI-X interrupts? Oh well. 721 * This just means we'll go with either a single MSI 722 * vector or fall back to legacy interrupts. 723 */ 724 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, 725 "Unable to allocate MSI-X interrupts\n"); 726 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 727 kfree(adapter->msix_entries); 728 adapter->msix_entries = NULL; 729 } else { 730 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ 731 /* 732 * Adjust for only the vectors we'll use, which is minimum 733 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 734 * vectors we were allocated. 735 */ 736 vectors -= NON_Q_VECTORS; 737 adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); 738 } 739 } 740 741 static void ixgbe_add_ring(struct ixgbe_ring *ring, 742 struct ixgbe_ring_container *head) 743 { 744 ring->next = head->ring; 745 head->ring = ring; 746 head->count++; 747 } 748 749 /** 750 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector 751 * @adapter: board private structure to initialize 752 * @v_count: q_vectors allocated on adapter, used for ring interleaving 753 * @v_idx: index of vector in adapter struct 754 * @txr_count: total number of Tx rings to allocate 755 * @txr_idx: index of first Tx ring to allocate 756 * @rxr_count: total number of Rx rings to allocate 757 * @rxr_idx: index of first Rx ring to allocate 758 * 759 * We allocate one q_vector. If allocation fails we return -ENOMEM. 760 **/ 761 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, 762 int v_count, int v_idx, 763 int txr_count, int txr_idx, 764 int rxr_count, int rxr_idx) 765 { 766 struct ixgbe_q_vector *q_vector; 767 struct ixgbe_ring *ring; 768 int node = NUMA_NO_NODE; 769 int cpu = -1; 770 int ring_count, size; 771 u8 tcs = netdev_get_num_tc(adapter->netdev); 772 773 ring_count = txr_count + rxr_count; 774 size = sizeof(struct ixgbe_q_vector) + 775 (sizeof(struct ixgbe_ring) * ring_count); 776 777 /* customize cpu for Flow Director mapping */ 778 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { 779 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 780 if (rss_i > 1 && adapter->atr_sample_rate) { 781 if (cpu_online(v_idx)) { 782 cpu = v_idx; 783 node = cpu_to_node(cpu); 784 } 785 } 786 } 787 788 /* allocate q_vector and rings */ 789 q_vector = kzalloc_node(size, GFP_KERNEL, node); 790 if (!q_vector) 791 q_vector = kzalloc(size, GFP_KERNEL); 792 if (!q_vector) 793 return -ENOMEM; 794 795 /* setup affinity mask and node */ 796 if (cpu != -1) 797 cpumask_set_cpu(cpu, &q_vector->affinity_mask); 798 q_vector->numa_node = node; 799 800 #ifdef CONFIG_IXGBE_DCA 801 /* initialize CPU for DCA */ 802 q_vector->cpu = -1; 803 804 #endif 805 /* initialize NAPI */ 806 netif_napi_add(adapter->netdev, &q_vector->napi, 807 ixgbe_poll, 64); 808 napi_hash_add(&q_vector->napi); 809 810 /* tie q_vector and adapter together */ 811 adapter->q_vector[v_idx] = q_vector; 812 q_vector->adapter = adapter; 813 q_vector->v_idx = v_idx; 814 815 /* initialize work limits */ 816 q_vector->tx.work_limit = adapter->tx_work_limit; 817 818 /* initialize pointer to rings */ 819 ring = q_vector->ring; 820 821 /* intialize ITR */ 822 if (txr_count && !rxr_count) { 823 /* tx only vector */ 824 if (adapter->tx_itr_setting == 1) 825 q_vector->itr = IXGBE_10K_ITR; 826 else 827 q_vector->itr = adapter->tx_itr_setting; 828 } else { 829 /* rx or rx/tx vector */ 830 if (adapter->rx_itr_setting == 1) 831 q_vector->itr = IXGBE_20K_ITR; 832 else 833 q_vector->itr = adapter->rx_itr_setting; 834 } 835 836 while (txr_count) { 837 /* assign generic ring traits */ 838 ring->dev = &adapter->pdev->dev; 839 ring->netdev = adapter->netdev; 840 841 /* configure backlink on ring */ 842 ring->q_vector = q_vector; 843 844 /* update q_vector Tx values */ 845 ixgbe_add_ring(ring, &q_vector->tx); 846 847 /* apply Tx specific ring traits */ 848 ring->count = adapter->tx_ring_count; 849 if (adapter->num_rx_pools > 1) 850 ring->queue_index = 851 txr_idx % adapter->num_rx_queues_per_pool; 852 else 853 ring->queue_index = txr_idx; 854 855 /* assign ring to adapter */ 856 adapter->tx_ring[txr_idx] = ring; 857 858 /* update count and index */ 859 txr_count--; 860 txr_idx += v_count; 861 862 /* push pointer to next ring */ 863 ring++; 864 } 865 866 while (rxr_count) { 867 /* assign generic ring traits */ 868 ring->dev = &adapter->pdev->dev; 869 ring->netdev = adapter->netdev; 870 871 /* configure backlink on ring */ 872 ring->q_vector = q_vector; 873 874 /* update q_vector Rx values */ 875 ixgbe_add_ring(ring, &q_vector->rx); 876 877 /* 878 * 82599 errata, UDP frames with a 0 checksum 879 * can be marked as checksum errors. 880 */ 881 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 882 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 883 884 #ifdef IXGBE_FCOE 885 if (adapter->netdev->features & NETIF_F_FCOE_MTU) { 886 struct ixgbe_ring_feature *f; 887 f = &adapter->ring_feature[RING_F_FCOE]; 888 if ((rxr_idx >= f->offset) && 889 (rxr_idx < f->offset + f->indices)) 890 set_bit(__IXGBE_RX_FCOE, &ring->state); 891 } 892 893 #endif /* IXGBE_FCOE */ 894 /* apply Rx specific ring traits */ 895 ring->count = adapter->rx_ring_count; 896 if (adapter->num_rx_pools > 1) 897 ring->queue_index = 898 rxr_idx % adapter->num_rx_queues_per_pool; 899 else 900 ring->queue_index = rxr_idx; 901 902 /* assign ring to adapter */ 903 adapter->rx_ring[rxr_idx] = ring; 904 905 /* update count and index */ 906 rxr_count--; 907 rxr_idx += v_count; 908 909 /* push pointer to next ring */ 910 ring++; 911 } 912 913 return 0; 914 } 915 916 /** 917 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector 918 * @adapter: board private structure to initialize 919 * @v_idx: Index of vector to be freed 920 * 921 * This function frees the memory allocated to the q_vector. In addition if 922 * NAPI is enabled it will delete any references to the NAPI struct prior 923 * to freeing the q_vector. 924 **/ 925 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) 926 { 927 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; 928 struct ixgbe_ring *ring; 929 930 ixgbe_for_each_ring(ring, q_vector->tx) 931 adapter->tx_ring[ring->queue_index] = NULL; 932 933 ixgbe_for_each_ring(ring, q_vector->rx) 934 adapter->rx_ring[ring->queue_index] = NULL; 935 936 adapter->q_vector[v_idx] = NULL; 937 napi_hash_del(&q_vector->napi); 938 netif_napi_del(&q_vector->napi); 939 940 /* 941 * ixgbe_get_stats64() might access the rings on this vector, 942 * we must wait a grace period before freeing it. 943 */ 944 kfree_rcu(q_vector, rcu); 945 } 946 947 /** 948 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors 949 * @adapter: board private structure to initialize 950 * 951 * We allocate one q_vector per queue interrupt. If allocation fails we 952 * return -ENOMEM. 953 **/ 954 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) 955 { 956 int q_vectors = adapter->num_q_vectors; 957 int rxr_remaining = adapter->num_rx_queues; 958 int txr_remaining = adapter->num_tx_queues; 959 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 960 int err; 961 962 /* only one q_vector if MSI-X is disabled. */ 963 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 964 q_vectors = 1; 965 966 if (q_vectors >= (rxr_remaining + txr_remaining)) { 967 for (; rxr_remaining; v_idx++) { 968 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 969 0, 0, 1, rxr_idx); 970 971 if (err) 972 goto err_out; 973 974 /* update counts and index */ 975 rxr_remaining--; 976 rxr_idx++; 977 } 978 } 979 980 for (; v_idx < q_vectors; v_idx++) { 981 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 982 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 983 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 984 tqpv, txr_idx, 985 rqpv, rxr_idx); 986 987 if (err) 988 goto err_out; 989 990 /* update counts and index */ 991 rxr_remaining -= rqpv; 992 txr_remaining -= tqpv; 993 rxr_idx++; 994 txr_idx++; 995 } 996 997 return 0; 998 999 err_out: 1000 adapter->num_tx_queues = 0; 1001 adapter->num_rx_queues = 0; 1002 adapter->num_q_vectors = 0; 1003 1004 while (v_idx--) 1005 ixgbe_free_q_vector(adapter, v_idx); 1006 1007 return -ENOMEM; 1008 } 1009 1010 /** 1011 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors 1012 * @adapter: board private structure to initialize 1013 * 1014 * This function frees the memory allocated to the q_vectors. In addition if 1015 * NAPI is enabled it will delete any references to the NAPI struct prior 1016 * to freeing the q_vector. 1017 **/ 1018 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) 1019 { 1020 int v_idx = adapter->num_q_vectors; 1021 1022 adapter->num_tx_queues = 0; 1023 adapter->num_rx_queues = 0; 1024 adapter->num_q_vectors = 0; 1025 1026 while (v_idx--) 1027 ixgbe_free_q_vector(adapter, v_idx); 1028 } 1029 1030 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 1031 { 1032 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1033 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1034 pci_disable_msix(adapter->pdev); 1035 kfree(adapter->msix_entries); 1036 adapter->msix_entries = NULL; 1037 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1038 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 1039 pci_disable_msi(adapter->pdev); 1040 } 1041 } 1042 1043 /** 1044 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported 1045 * @adapter: board private structure to initialize 1046 * 1047 * Attempt to configure the interrupts using the best available 1048 * capabilities of the hardware and the kernel. 1049 **/ 1050 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) 1051 { 1052 struct ixgbe_hw *hw = &adapter->hw; 1053 int vector, v_budget, err; 1054 1055 /* 1056 * It's easy to be greedy for MSI-X vectors, but it really 1057 * doesn't do us much good if we have a lot more vectors 1058 * than CPU's. So let's be conservative and only ask for 1059 * (roughly) the same number of vectors as there are CPU's. 1060 * The default is to use pairs of vectors. 1061 */ 1062 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 1063 v_budget = min_t(int, v_budget, num_online_cpus()); 1064 v_budget += NON_Q_VECTORS; 1065 1066 /* 1067 * At the same time, hardware can only support a maximum of 1068 * hw.mac->max_msix_vectors vectors. With features 1069 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx 1070 * descriptor queues supported by our device. Thus, we cap it off in 1071 * those rare cases where the cpu count also exceeds our vector limit. 1072 */ 1073 v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); 1074 1075 /* A failure in MSI-X entry allocation isn't fatal, but it does 1076 * mean we disable MSI-X capabilities of the adapter. */ 1077 adapter->msix_entries = kcalloc(v_budget, 1078 sizeof(struct msix_entry), GFP_KERNEL); 1079 if (adapter->msix_entries) { 1080 for (vector = 0; vector < v_budget; vector++) 1081 adapter->msix_entries[vector].entry = vector; 1082 1083 ixgbe_acquire_msix_vectors(adapter, v_budget); 1084 1085 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 1086 return; 1087 } 1088 1089 /* disable DCB if number of TCs exceeds 1 */ 1090 if (netdev_get_num_tc(adapter->netdev) > 1) { 1091 e_err(probe, "num TCs exceeds number of queues - disabling DCB\n"); 1092 netdev_reset_tc(adapter->netdev); 1093 1094 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1095 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 1096 1097 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 1098 adapter->temp_dcb_cfg.pfc_mode_enable = false; 1099 adapter->dcb_cfg.pfc_mode_enable = false; 1100 } 1101 adapter->dcb_cfg.num_tcs.pg_tcs = 1; 1102 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 1103 1104 /* disable SR-IOV */ 1105 ixgbe_disable_sriov(adapter); 1106 1107 /* disable RSS */ 1108 adapter->ring_feature[RING_F_RSS].limit = 1; 1109 1110 ixgbe_set_num_queues(adapter); 1111 adapter->num_q_vectors = 1; 1112 1113 err = pci_enable_msi(adapter->pdev); 1114 if (err) { 1115 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, 1116 "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n", 1117 err); 1118 return; 1119 } 1120 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 1121 } 1122 1123 /** 1124 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme 1125 * @adapter: board private structure to initialize 1126 * 1127 * We determine which interrupt scheme to use based on... 1128 * - Kernel support (MSI, MSI-X) 1129 * - which can be user-defined (via MODULE_PARAM) 1130 * - Hardware queue count (num_*_queues) 1131 * - defined by miscellaneous hardware support/features (RSS, etc.) 1132 **/ 1133 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 1134 { 1135 int err; 1136 1137 /* Number of supported queues */ 1138 ixgbe_set_num_queues(adapter); 1139 1140 /* Set interrupt mode */ 1141 ixgbe_set_interrupt_capability(adapter); 1142 1143 err = ixgbe_alloc_q_vectors(adapter); 1144 if (err) { 1145 e_dev_err("Unable to allocate memory for queue vectors\n"); 1146 goto err_alloc_q_vectors; 1147 } 1148 1149 ixgbe_cache_ring_register(adapter); 1150 1151 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", 1152 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", 1153 adapter->num_rx_queues, adapter->num_tx_queues); 1154 1155 set_bit(__IXGBE_DOWN, &adapter->state); 1156 1157 return 0; 1158 1159 err_alloc_q_vectors: 1160 ixgbe_reset_interrupt_capability(adapter); 1161 return err; 1162 } 1163 1164 /** 1165 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 1166 * @adapter: board private structure to clear interrupt scheme on 1167 * 1168 * We go through and clear interrupt specific resources and reset the structure 1169 * to pre-load conditions 1170 **/ 1171 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) 1172 { 1173 adapter->num_tx_queues = 0; 1174 adapter->num_rx_queues = 0; 1175 1176 ixgbe_free_q_vectors(adapter); 1177 ixgbe_reset_interrupt_capability(adapter); 1178 } 1179 1180 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, 1181 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) 1182 { 1183 struct ixgbe_adv_tx_context_desc *context_desc; 1184 u16 i = tx_ring->next_to_use; 1185 1186 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); 1187 1188 i++; 1189 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1190 1191 /* set bits to identify this as an advanced context descriptor */ 1192 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 1193 1194 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1195 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); 1196 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1197 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1198 } 1199 1200