1 /******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26 *******************************************************************************/ 27 28 #include "ixgbe.h" 29 #include "ixgbe_sriov.h" 30 31 #ifdef CONFIG_IXGBE_DCB 32 /** 33 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV 34 * @adapter: board private structure to initialize 35 * 36 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It 37 * will also try to cache the proper offsets if RSS/FCoE are enabled along 38 * with VMDq. 39 * 40 **/ 41 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) 42 { 43 #ifdef IXGBE_FCOE 44 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 45 #endif /* IXGBE_FCOE */ 46 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 47 int i; 48 u16 reg_idx; 49 u8 tcs = netdev_get_num_tc(adapter->netdev); 50 51 /* verify we have DCB queueing enabled before proceeding */ 52 if (tcs <= 1) 53 return false; 54 55 /* verify we have VMDq enabled before proceeding */ 56 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 57 return false; 58 59 /* start at VMDq register offset for SR-IOV enabled setups */ 60 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 61 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 62 /* If we are greater than indices move to next pool */ 63 if ((reg_idx & ~vmdq->mask) >= tcs) 64 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 65 adapter->rx_ring[i]->reg_idx = reg_idx; 66 } 67 68 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 69 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 70 /* If we are greater than indices move to next pool */ 71 if ((reg_idx & ~vmdq->mask) >= tcs) 72 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 73 adapter->tx_ring[i]->reg_idx = reg_idx; 74 } 75 76 #ifdef IXGBE_FCOE 77 /* nothing to do if FCoE is disabled */ 78 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 79 return true; 80 81 /* The work is already done if the FCoE ring is shared */ 82 if (fcoe->offset < tcs) 83 return true; 84 85 /* The FCoE rings exist separately, we need to move their reg_idx */ 86 if (fcoe->indices) { 87 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 88 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); 89 90 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 91 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { 92 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 93 adapter->rx_ring[i]->reg_idx = reg_idx; 94 reg_idx++; 95 } 96 97 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 98 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { 99 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 100 adapter->tx_ring[i]->reg_idx = reg_idx; 101 reg_idx++; 102 } 103 } 104 105 #endif /* IXGBE_FCOE */ 106 return true; 107 } 108 109 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ 110 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, 111 unsigned int *tx, unsigned int *rx) 112 { 113 struct net_device *dev = adapter->netdev; 114 struct ixgbe_hw *hw = &adapter->hw; 115 u8 num_tcs = netdev_get_num_tc(dev); 116 117 *tx = 0; 118 *rx = 0; 119 120 switch (hw->mac.type) { 121 case ixgbe_mac_82598EB: 122 /* TxQs/TC: 4 RxQs/TC: 8 */ 123 *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ 124 *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ 125 break; 126 case ixgbe_mac_82599EB: 127 case ixgbe_mac_X540: 128 if (num_tcs > 4) { 129 /* 130 * TCs : TC0/1 TC2/3 TC4-7 131 * TxQs/TC: 32 16 8 132 * RxQs/TC: 16 16 16 133 */ 134 *rx = tc << 4; 135 if (tc < 3) 136 *tx = tc << 5; /* 0, 32, 64 */ 137 else if (tc < 5) 138 *tx = (tc + 2) << 4; /* 80, 96 */ 139 else 140 *tx = (tc + 8) << 3; /* 104, 112, 120 */ 141 } else { 142 /* 143 * TCs : TC0 TC1 TC2/3 144 * TxQs/TC: 64 32 16 145 * RxQs/TC: 32 32 32 146 */ 147 *rx = tc << 5; 148 if (tc < 2) 149 *tx = tc << 6; /* 0, 64 */ 150 else 151 *tx = (tc + 4) << 4; /* 96, 112 */ 152 } 153 default: 154 break; 155 } 156 } 157 158 /** 159 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 160 * @adapter: board private structure to initialize 161 * 162 * Cache the descriptor ring offsets for DCB to the assigned rings. 163 * 164 **/ 165 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 166 { 167 struct net_device *dev = adapter->netdev; 168 unsigned int tx_idx, rx_idx; 169 int tc, offset, rss_i, i; 170 u8 num_tcs = netdev_get_num_tc(dev); 171 172 /* verify we have DCB queueing enabled before proceeding */ 173 if (num_tcs <= 1) 174 return false; 175 176 rss_i = adapter->ring_feature[RING_F_RSS].indices; 177 178 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { 179 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); 180 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { 181 adapter->tx_ring[offset + i]->reg_idx = tx_idx; 182 adapter->rx_ring[offset + i]->reg_idx = rx_idx; 183 adapter->tx_ring[offset + i]->dcb_tc = tc; 184 adapter->rx_ring[offset + i]->dcb_tc = tc; 185 } 186 } 187 188 return true; 189 } 190 191 #endif 192 /** 193 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov 194 * @adapter: board private structure to initialize 195 * 196 * SR-IOV doesn't use any descriptor rings but changes the default if 197 * no other mapping is used. 198 * 199 */ 200 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) 201 { 202 #ifdef IXGBE_FCOE 203 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 204 #endif /* IXGBE_FCOE */ 205 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 206 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; 207 int i; 208 u16 reg_idx; 209 210 /* only proceed if VMDq is enabled */ 211 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) 212 return false; 213 214 /* start at VMDq register offset for SR-IOV enabled setups */ 215 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 216 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 217 #ifdef IXGBE_FCOE 218 /* Allow first FCoE queue to be mapped as RSS */ 219 if (fcoe->offset && (i > fcoe->offset)) 220 break; 221 #endif 222 /* If we are greater than indices move to next pool */ 223 if ((reg_idx & ~vmdq->mask) >= rss->indices) 224 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 225 adapter->rx_ring[i]->reg_idx = reg_idx; 226 } 227 228 #ifdef IXGBE_FCOE 229 /* FCoE uses a linear block of queues so just assigning 1:1 */ 230 for (; i < adapter->num_rx_queues; i++, reg_idx++) 231 adapter->rx_ring[i]->reg_idx = reg_idx; 232 233 #endif 234 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 235 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 236 #ifdef IXGBE_FCOE 237 /* Allow first FCoE queue to be mapped as RSS */ 238 if (fcoe->offset && (i > fcoe->offset)) 239 break; 240 #endif 241 /* If we are greater than indices move to next pool */ 242 if ((reg_idx & rss->mask) >= rss->indices) 243 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 244 adapter->tx_ring[i]->reg_idx = reg_idx; 245 } 246 247 #ifdef IXGBE_FCOE 248 /* FCoE uses a linear block of queues so just assigning 1:1 */ 249 for (; i < adapter->num_tx_queues; i++, reg_idx++) 250 adapter->tx_ring[i]->reg_idx = reg_idx; 251 252 #endif 253 254 return true; 255 } 256 257 /** 258 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS 259 * @adapter: board private structure to initialize 260 * 261 * Cache the descriptor ring offsets for RSS to the assigned rings. 262 * 263 **/ 264 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 265 { 266 int i; 267 268 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) 269 return false; 270 271 for (i = 0; i < adapter->num_rx_queues; i++) 272 adapter->rx_ring[i]->reg_idx = i; 273 for (i = 0; i < adapter->num_tx_queues; i++) 274 adapter->tx_ring[i]->reg_idx = i; 275 276 return true; 277 } 278 279 /** 280 * ixgbe_cache_ring_register - Descriptor ring to register mapping 281 * @adapter: board private structure to initialize 282 * 283 * Once we know the feature-set enabled for the device, we'll cache 284 * the register offset the descriptor ring is assigned to. 285 * 286 * Note, the order the various feature calls is important. It must start with 287 * the "most" features enabled at the same time, then trickle down to the 288 * least amount of features turned on at once. 289 **/ 290 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 291 { 292 /* start with default case */ 293 adapter->rx_ring[0]->reg_idx = 0; 294 adapter->tx_ring[0]->reg_idx = 0; 295 296 #ifdef CONFIG_IXGBE_DCB 297 if (ixgbe_cache_ring_dcb_sriov(adapter)) 298 return; 299 300 if (ixgbe_cache_ring_dcb(adapter)) 301 return; 302 303 #endif 304 if (ixgbe_cache_ring_sriov(adapter)) 305 return; 306 307 ixgbe_cache_ring_rss(adapter); 308 } 309 310 #define IXGBE_RSS_16Q_MASK 0xF 311 #define IXGBE_RSS_8Q_MASK 0x7 312 #define IXGBE_RSS_4Q_MASK 0x3 313 #define IXGBE_RSS_2Q_MASK 0x1 314 #define IXGBE_RSS_DISABLED_MASK 0x0 315 316 #ifdef CONFIG_IXGBE_DCB 317 /** 318 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB 319 * @adapter: board private structure to initialize 320 * 321 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 322 * and VM pools where appropriate. Also assign queues based on DCB 323 * priorities and map accordingly.. 324 * 325 **/ 326 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) 327 { 328 int i; 329 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 330 u16 vmdq_m = 0; 331 #ifdef IXGBE_FCOE 332 u16 fcoe_i = 0; 333 #endif 334 u8 tcs = netdev_get_num_tc(adapter->netdev); 335 336 /* verify we have DCB queueing enabled before proceeding */ 337 if (tcs <= 1) 338 return false; 339 340 /* verify we have VMDq enabled before proceeding */ 341 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 342 return false; 343 344 /* Add starting offset to total pool count */ 345 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 346 347 /* 16 pools w/ 8 TC per pool */ 348 if (tcs > 4) { 349 vmdq_i = min_t(u16, vmdq_i, 16); 350 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; 351 /* 32 pools w/ 4 TC per pool */ 352 } else { 353 vmdq_i = min_t(u16, vmdq_i, 32); 354 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 355 } 356 357 #ifdef IXGBE_FCOE 358 /* queues in the remaining pools are available for FCoE */ 359 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; 360 361 #endif 362 /* remove the starting offset from the pool count */ 363 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 364 365 /* save features for later use */ 366 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 367 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 368 369 /* 370 * We do not support DCB, VMDq, and RSS all simultaneously 371 * so we will disable RSS since it is the lowest priority 372 */ 373 adapter->ring_feature[RING_F_RSS].indices = 1; 374 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; 375 376 adapter->num_rx_pools = vmdq_i; 377 adapter->num_rx_queues_per_pool = tcs; 378 379 adapter->num_tx_queues = vmdq_i * tcs; 380 adapter->num_rx_queues = vmdq_i * tcs; 381 382 #ifdef IXGBE_FCOE 383 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 384 struct ixgbe_ring_feature *fcoe; 385 386 fcoe = &adapter->ring_feature[RING_F_FCOE]; 387 388 /* limit ourselves based on feature limits */ 389 fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); 390 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 391 392 if (fcoe_i) { 393 /* alloc queues for FCoE separately */ 394 fcoe->indices = fcoe_i; 395 fcoe->offset = vmdq_i * tcs; 396 397 /* add queues to adapter */ 398 adapter->num_tx_queues += fcoe_i; 399 adapter->num_rx_queues += fcoe_i; 400 } else if (tcs > 1) { 401 /* use queue belonging to FcoE TC */ 402 fcoe->indices = 1; 403 fcoe->offset = ixgbe_fcoe_get_tc(adapter); 404 } else { 405 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 406 407 fcoe->indices = 0; 408 fcoe->offset = 0; 409 } 410 } 411 412 #endif /* IXGBE_FCOE */ 413 /* configure TC to queue mapping */ 414 for (i = 0; i < tcs; i++) 415 netdev_set_tc_queue(adapter->netdev, i, 1, i); 416 417 return true; 418 } 419 420 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) 421 { 422 struct net_device *dev = adapter->netdev; 423 struct ixgbe_ring_feature *f; 424 int rss_i, rss_m, i; 425 int tcs; 426 427 /* Map queue offset and counts onto allocated tx queues */ 428 tcs = netdev_get_num_tc(dev); 429 430 /* verify we have DCB queueing enabled before proceeding */ 431 if (tcs <= 1) 432 return false; 433 434 /* determine the upper limit for our current DCB mode */ 435 rss_i = dev->num_tx_queues / tcs; 436 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 437 /* 8 TC w/ 4 queues per TC */ 438 rss_i = min_t(u16, rss_i, 4); 439 rss_m = IXGBE_RSS_4Q_MASK; 440 } else if (tcs > 4) { 441 /* 8 TC w/ 8 queues per TC */ 442 rss_i = min_t(u16, rss_i, 8); 443 rss_m = IXGBE_RSS_8Q_MASK; 444 } else { 445 /* 4 TC w/ 16 queues per TC */ 446 rss_i = min_t(u16, rss_i, 16); 447 rss_m = IXGBE_RSS_16Q_MASK; 448 } 449 450 /* set RSS mask and indices */ 451 f = &adapter->ring_feature[RING_F_RSS]; 452 rss_i = min_t(int, rss_i, f->limit); 453 f->indices = rss_i; 454 f->mask = rss_m; 455 456 #ifdef IXGBE_FCOE 457 /* FCoE enabled queues require special configuration indexed 458 * by feature specific indices and offset. Here we map FCoE 459 * indices onto the DCB queue pairs allowing FCoE to own 460 * configuration later. 461 */ 462 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 463 u8 tc = ixgbe_fcoe_get_tc(adapter); 464 465 f = &adapter->ring_feature[RING_F_FCOE]; 466 f->indices = min_t(u16, rss_i, f->limit); 467 f->offset = rss_i * tc; 468 } 469 470 #endif /* IXGBE_FCOE */ 471 for (i = 0; i < tcs; i++) 472 netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 473 474 adapter->num_tx_queues = rss_i * tcs; 475 adapter->num_rx_queues = rss_i * tcs; 476 477 return true; 478 } 479 480 #endif 481 /** 482 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices 483 * @adapter: board private structure to initialize 484 * 485 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 486 * and VM pools where appropriate. If RSS is available, then also try and 487 * enable RSS and map accordingly. 488 * 489 **/ 490 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) 491 { 492 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 493 u16 vmdq_m = 0; 494 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; 495 u16 rss_m = IXGBE_RSS_DISABLED_MASK; 496 #ifdef IXGBE_FCOE 497 u16 fcoe_i = 0; 498 #endif 499 500 /* only proceed if SR-IOV is enabled */ 501 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 502 return false; 503 504 /* Add starting offset to total pool count */ 505 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 506 507 /* double check we are limited to maximum pools */ 508 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 509 510 /* 64 pool mode with 2 queues per pool */ 511 if ((vmdq_i > 32) || (rss_i < 4)) { 512 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 513 rss_m = IXGBE_RSS_2Q_MASK; 514 rss_i = min_t(u16, rss_i, 2); 515 /* 32 pool mode with 4 queues per pool */ 516 } else { 517 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 518 rss_m = IXGBE_RSS_4Q_MASK; 519 rss_i = 4; 520 } 521 522 #ifdef IXGBE_FCOE 523 /* queues in the remaining pools are available for FCoE */ 524 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); 525 526 #endif 527 /* remove the starting offset from the pool count */ 528 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 529 530 /* save features for later use */ 531 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 532 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 533 534 /* limit RSS based on user input and save for later use */ 535 adapter->ring_feature[RING_F_RSS].indices = rss_i; 536 adapter->ring_feature[RING_F_RSS].mask = rss_m; 537 538 adapter->num_rx_pools = vmdq_i; 539 adapter->num_rx_queues_per_pool = rss_i; 540 541 adapter->num_rx_queues = vmdq_i * rss_i; 542 adapter->num_tx_queues = vmdq_i * rss_i; 543 544 /* disable ATR as it is not supported when VMDq is enabled */ 545 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 546 547 #ifdef IXGBE_FCOE 548 /* 549 * FCoE can use rings from adjacent buffers to allow RSS 550 * like behavior. To account for this we need to add the 551 * FCoE indices to the total ring count. 552 */ 553 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 554 struct ixgbe_ring_feature *fcoe; 555 556 fcoe = &adapter->ring_feature[RING_F_FCOE]; 557 558 /* limit ourselves based on feature limits */ 559 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 560 561 if (vmdq_i > 1 && fcoe_i) { 562 /* reserve no more than number of CPUs */ 563 fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); 564 565 /* alloc queues for FCoE separately */ 566 fcoe->indices = fcoe_i; 567 fcoe->offset = vmdq_i * rss_i; 568 } else { 569 /* merge FCoE queues with RSS queues */ 570 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); 571 572 /* limit indices to rss_i if MSI-X is disabled */ 573 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 574 fcoe_i = rss_i; 575 576 /* attempt to reserve some queues for just FCoE */ 577 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); 578 fcoe->offset = fcoe_i - fcoe->indices; 579 580 fcoe_i -= rss_i; 581 } 582 583 /* add queues to adapter */ 584 adapter->num_tx_queues += fcoe_i; 585 adapter->num_rx_queues += fcoe_i; 586 } 587 588 #endif 589 return true; 590 } 591 592 /** 593 * ixgbe_set_rss_queues - Allocate queues for RSS 594 * @adapter: board private structure to initialize 595 * 596 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 597 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 598 * 599 **/ 600 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) 601 { 602 struct ixgbe_ring_feature *f; 603 u16 rss_i; 604 605 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { 606 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 607 return false; 608 } 609 610 /* set mask for 16 queue limit of RSS */ 611 f = &adapter->ring_feature[RING_F_RSS]; 612 rss_i = f->limit; 613 614 f->indices = rss_i; 615 f->mask = IXGBE_RSS_16Q_MASK; 616 617 /* 618 * Use Flow Director in addition to RSS to ensure the best 619 * distribution of flows across cores, even when an FDIR flow 620 * isn't matched. 621 */ 622 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 623 f = &adapter->ring_feature[RING_F_FDIR]; 624 625 f->indices = min_t(u16, num_online_cpus(), f->limit); 626 rss_i = max_t(u16, rss_i, f->indices); 627 } 628 629 #ifdef IXGBE_FCOE 630 /* 631 * FCoE can exist on the same rings as standard network traffic 632 * however it is preferred to avoid that if possible. In order 633 * to get the best performance we allocate as many FCoE queues 634 * as we can and we place them at the end of the ring array to 635 * avoid sharing queues with standard RSS on systems with 24 or 636 * more CPUs. 637 */ 638 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 639 struct net_device *dev = adapter->netdev; 640 u16 fcoe_i; 641 642 f = &adapter->ring_feature[RING_F_FCOE]; 643 644 /* merge FCoE queues with RSS queues */ 645 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); 646 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); 647 648 /* limit indices to rss_i if MSI-X is disabled */ 649 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 650 fcoe_i = rss_i; 651 652 /* attempt to reserve some queues for just FCoE */ 653 f->indices = min_t(u16, fcoe_i, f->limit); 654 f->offset = fcoe_i - f->indices; 655 rss_i = max_t(u16, fcoe_i, rss_i); 656 } 657 658 #endif /* IXGBE_FCOE */ 659 adapter->num_rx_queues = rss_i; 660 adapter->num_tx_queues = rss_i; 661 662 return true; 663 } 664 665 /** 666 * ixgbe_set_num_queues - Allocate queues for device, feature dependent 667 * @adapter: board private structure to initialize 668 * 669 * This is the top level queue allocation routine. The order here is very 670 * important, starting with the "most" number of features turned on at once, 671 * and ending with the smallest set of features. This way large combinations 672 * can be allocated if they're turned on, and smaller combinations are the 673 * fallthrough conditions. 674 * 675 **/ 676 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 677 { 678 /* Start with base case */ 679 adapter->num_rx_queues = 1; 680 adapter->num_tx_queues = 1; 681 adapter->num_rx_pools = adapter->num_rx_queues; 682 adapter->num_rx_queues_per_pool = 1; 683 684 #ifdef CONFIG_IXGBE_DCB 685 if (ixgbe_set_dcb_sriov_queues(adapter)) 686 return; 687 688 if (ixgbe_set_dcb_queues(adapter)) 689 return; 690 691 #endif 692 if (ixgbe_set_sriov_queues(adapter)) 693 return; 694 695 ixgbe_set_rss_queues(adapter); 696 } 697 698 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 699 int vectors) 700 { 701 int err, vector_threshold; 702 703 /* We'll want at least 2 (vector_threshold): 704 * 1) TxQ[0] + RxQ[0] handler 705 * 2) Other (Link Status Change, etc.) 706 */ 707 vector_threshold = MIN_MSIX_COUNT; 708 709 /* 710 * The more we get, the more we will assign to Tx/Rx Cleanup 711 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 712 * Right now, we simply care about how many we'll get; we'll 713 * set them up later while requesting irq's. 714 */ 715 while (vectors >= vector_threshold) { 716 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 717 vectors); 718 if (!err) /* Success in acquiring all requested vectors. */ 719 break; 720 else if (err < 0) 721 vectors = 0; /* Nasty failure, quit now */ 722 else /* err == number of vectors we should try again with */ 723 vectors = err; 724 } 725 726 if (vectors < vector_threshold) { 727 /* Can't allocate enough MSI-X interrupts? Oh well. 728 * This just means we'll go with either a single MSI 729 * vector or fall back to legacy interrupts. 730 */ 731 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, 732 "Unable to allocate MSI-X interrupts\n"); 733 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 734 kfree(adapter->msix_entries); 735 adapter->msix_entries = NULL; 736 } else { 737 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ 738 /* 739 * Adjust for only the vectors we'll use, which is minimum 740 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 741 * vectors we were allocated. 742 */ 743 vectors -= NON_Q_VECTORS; 744 adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); 745 } 746 } 747 748 static void ixgbe_add_ring(struct ixgbe_ring *ring, 749 struct ixgbe_ring_container *head) 750 { 751 ring->next = head->ring; 752 head->ring = ring; 753 head->count++; 754 } 755 756 /** 757 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector 758 * @adapter: board private structure to initialize 759 * @v_count: q_vectors allocated on adapter, used for ring interleaving 760 * @v_idx: index of vector in adapter struct 761 * @txr_count: total number of Tx rings to allocate 762 * @txr_idx: index of first Tx ring to allocate 763 * @rxr_count: total number of Rx rings to allocate 764 * @rxr_idx: index of first Rx ring to allocate 765 * 766 * We allocate one q_vector. If allocation fails we return -ENOMEM. 767 **/ 768 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, 769 int v_count, int v_idx, 770 int txr_count, int txr_idx, 771 int rxr_count, int rxr_idx) 772 { 773 struct ixgbe_q_vector *q_vector; 774 struct ixgbe_ring *ring; 775 int node = -1; 776 int cpu = -1; 777 int ring_count, size; 778 779 ring_count = txr_count + rxr_count; 780 size = sizeof(struct ixgbe_q_vector) + 781 (sizeof(struct ixgbe_ring) * ring_count); 782 783 /* customize cpu for Flow Director mapping */ 784 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 785 if (cpu_online(v_idx)) { 786 cpu = v_idx; 787 node = cpu_to_node(cpu); 788 } 789 } 790 791 /* allocate q_vector and rings */ 792 q_vector = kzalloc_node(size, GFP_KERNEL, node); 793 if (!q_vector) 794 q_vector = kzalloc(size, GFP_KERNEL); 795 if (!q_vector) 796 return -ENOMEM; 797 798 /* setup affinity mask and node */ 799 if (cpu != -1) 800 cpumask_set_cpu(cpu, &q_vector->affinity_mask); 801 else 802 cpumask_copy(&q_vector->affinity_mask, cpu_online_mask); 803 q_vector->numa_node = node; 804 805 /* initialize NAPI */ 806 netif_napi_add(adapter->netdev, &q_vector->napi, 807 ixgbe_poll, 64); 808 809 /* tie q_vector and adapter together */ 810 adapter->q_vector[v_idx] = q_vector; 811 q_vector->adapter = adapter; 812 q_vector->v_idx = v_idx; 813 814 /* initialize work limits */ 815 q_vector->tx.work_limit = adapter->tx_work_limit; 816 817 /* initialize pointer to rings */ 818 ring = q_vector->ring; 819 820 while (txr_count) { 821 /* assign generic ring traits */ 822 ring->dev = &adapter->pdev->dev; 823 ring->netdev = adapter->netdev; 824 825 /* configure backlink on ring */ 826 ring->q_vector = q_vector; 827 828 /* update q_vector Tx values */ 829 ixgbe_add_ring(ring, &q_vector->tx); 830 831 /* apply Tx specific ring traits */ 832 ring->count = adapter->tx_ring_count; 833 ring->queue_index = txr_idx; 834 835 /* assign ring to adapter */ 836 adapter->tx_ring[txr_idx] = ring; 837 838 /* update count and index */ 839 txr_count--; 840 txr_idx += v_count; 841 842 /* push pointer to next ring */ 843 ring++; 844 } 845 846 while (rxr_count) { 847 /* assign generic ring traits */ 848 ring->dev = &adapter->pdev->dev; 849 ring->netdev = adapter->netdev; 850 851 /* configure backlink on ring */ 852 ring->q_vector = q_vector; 853 854 /* update q_vector Rx values */ 855 ixgbe_add_ring(ring, &q_vector->rx); 856 857 /* 858 * 82599 errata, UDP frames with a 0 checksum 859 * can be marked as checksum errors. 860 */ 861 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 862 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 863 864 #ifdef IXGBE_FCOE 865 if (adapter->netdev->features & NETIF_F_FCOE_MTU) { 866 struct ixgbe_ring_feature *f; 867 f = &adapter->ring_feature[RING_F_FCOE]; 868 if ((rxr_idx >= f->offset) && 869 (rxr_idx < f->offset + f->indices)) 870 set_bit(__IXGBE_RX_FCOE, &ring->state); 871 } 872 873 #endif /* IXGBE_FCOE */ 874 /* apply Rx specific ring traits */ 875 ring->count = adapter->rx_ring_count; 876 ring->queue_index = rxr_idx; 877 878 /* assign ring to adapter */ 879 adapter->rx_ring[rxr_idx] = ring; 880 881 /* update count and index */ 882 rxr_count--; 883 rxr_idx += v_count; 884 885 /* push pointer to next ring */ 886 ring++; 887 } 888 889 return 0; 890 } 891 892 /** 893 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector 894 * @adapter: board private structure to initialize 895 * @v_idx: Index of vector to be freed 896 * 897 * This function frees the memory allocated to the q_vector. In addition if 898 * NAPI is enabled it will delete any references to the NAPI struct prior 899 * to freeing the q_vector. 900 **/ 901 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) 902 { 903 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; 904 struct ixgbe_ring *ring; 905 906 ixgbe_for_each_ring(ring, q_vector->tx) 907 adapter->tx_ring[ring->queue_index] = NULL; 908 909 ixgbe_for_each_ring(ring, q_vector->rx) 910 adapter->rx_ring[ring->queue_index] = NULL; 911 912 adapter->q_vector[v_idx] = NULL; 913 netif_napi_del(&q_vector->napi); 914 915 /* 916 * ixgbe_get_stats64() might access the rings on this vector, 917 * we must wait a grace period before freeing it. 918 */ 919 kfree_rcu(q_vector, rcu); 920 } 921 922 /** 923 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors 924 * @adapter: board private structure to initialize 925 * 926 * We allocate one q_vector per queue interrupt. If allocation fails we 927 * return -ENOMEM. 928 **/ 929 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) 930 { 931 int q_vectors = adapter->num_q_vectors; 932 int rxr_remaining = adapter->num_rx_queues; 933 int txr_remaining = adapter->num_tx_queues; 934 int rxr_idx = 0, txr_idx = 0, v_idx = 0; 935 int err; 936 937 /* only one q_vector if MSI-X is disabled. */ 938 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 939 q_vectors = 1; 940 941 if (q_vectors >= (rxr_remaining + txr_remaining)) { 942 for (; rxr_remaining; v_idx++) { 943 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 944 0, 0, 1, rxr_idx); 945 946 if (err) 947 goto err_out; 948 949 /* update counts and index */ 950 rxr_remaining--; 951 rxr_idx++; 952 } 953 } 954 955 for (; v_idx < q_vectors; v_idx++) { 956 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 957 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 958 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 959 tqpv, txr_idx, 960 rqpv, rxr_idx); 961 962 if (err) 963 goto err_out; 964 965 /* update counts and index */ 966 rxr_remaining -= rqpv; 967 txr_remaining -= tqpv; 968 rxr_idx++; 969 txr_idx++; 970 } 971 972 return 0; 973 974 err_out: 975 adapter->num_tx_queues = 0; 976 adapter->num_rx_queues = 0; 977 adapter->num_q_vectors = 0; 978 979 while (v_idx--) 980 ixgbe_free_q_vector(adapter, v_idx); 981 982 return -ENOMEM; 983 } 984 985 /** 986 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors 987 * @adapter: board private structure to initialize 988 * 989 * This function frees the memory allocated to the q_vectors. In addition if 990 * NAPI is enabled it will delete any references to the NAPI struct prior 991 * to freeing the q_vector. 992 **/ 993 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) 994 { 995 int v_idx = adapter->num_q_vectors; 996 997 adapter->num_tx_queues = 0; 998 adapter->num_rx_queues = 0; 999 adapter->num_q_vectors = 0; 1000 1001 while (v_idx--) 1002 ixgbe_free_q_vector(adapter, v_idx); 1003 } 1004 1005 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 1006 { 1007 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1008 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1009 pci_disable_msix(adapter->pdev); 1010 kfree(adapter->msix_entries); 1011 adapter->msix_entries = NULL; 1012 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1013 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 1014 pci_disable_msi(adapter->pdev); 1015 } 1016 } 1017 1018 /** 1019 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported 1020 * @adapter: board private structure to initialize 1021 * 1022 * Attempt to configure the interrupts using the best available 1023 * capabilities of the hardware and the kernel. 1024 **/ 1025 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) 1026 { 1027 struct ixgbe_hw *hw = &adapter->hw; 1028 int vector, v_budget, err; 1029 1030 /* 1031 * It's easy to be greedy for MSI-X vectors, but it really 1032 * doesn't do us much good if we have a lot more vectors 1033 * than CPU's. So let's be conservative and only ask for 1034 * (roughly) the same number of vectors as there are CPU's. 1035 * The default is to use pairs of vectors. 1036 */ 1037 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 1038 v_budget = min_t(int, v_budget, num_online_cpus()); 1039 v_budget += NON_Q_VECTORS; 1040 1041 /* 1042 * At the same time, hardware can only support a maximum of 1043 * hw.mac->max_msix_vectors vectors. With features 1044 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx 1045 * descriptor queues supported by our device. Thus, we cap it off in 1046 * those rare cases where the cpu count also exceeds our vector limit. 1047 */ 1048 v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); 1049 1050 /* A failure in MSI-X entry allocation isn't fatal, but it does 1051 * mean we disable MSI-X capabilities of the adapter. */ 1052 adapter->msix_entries = kcalloc(v_budget, 1053 sizeof(struct msix_entry), GFP_KERNEL); 1054 if (adapter->msix_entries) { 1055 for (vector = 0; vector < v_budget; vector++) 1056 adapter->msix_entries[vector].entry = vector; 1057 1058 ixgbe_acquire_msix_vectors(adapter, v_budget); 1059 1060 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 1061 return; 1062 } 1063 1064 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 1065 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 1066 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 1067 e_err(probe, 1068 "ATR is not supported while multiple " 1069 "queues are disabled. Disabling Flow Director\n"); 1070 } 1071 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 1072 adapter->atr_sample_rate = 0; 1073 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 1074 ixgbe_disable_sriov(adapter); 1075 1076 ixgbe_set_num_queues(adapter); 1077 adapter->num_q_vectors = 1; 1078 1079 err = pci_enable_msi(adapter->pdev); 1080 if (err) { 1081 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, 1082 "Unable to allocate MSI interrupt, " 1083 "falling back to legacy. Error: %d\n", err); 1084 return; 1085 } 1086 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 1087 } 1088 1089 /** 1090 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme 1091 * @adapter: board private structure to initialize 1092 * 1093 * We determine which interrupt scheme to use based on... 1094 * - Kernel support (MSI, MSI-X) 1095 * - which can be user-defined (via MODULE_PARAM) 1096 * - Hardware queue count (num_*_queues) 1097 * - defined by miscellaneous hardware support/features (RSS, etc.) 1098 **/ 1099 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 1100 { 1101 int err; 1102 1103 /* Number of supported queues */ 1104 ixgbe_set_num_queues(adapter); 1105 1106 /* Set interrupt mode */ 1107 ixgbe_set_interrupt_capability(adapter); 1108 1109 err = ixgbe_alloc_q_vectors(adapter); 1110 if (err) { 1111 e_dev_err("Unable to allocate memory for queue vectors\n"); 1112 goto err_alloc_q_vectors; 1113 } 1114 1115 ixgbe_cache_ring_register(adapter); 1116 1117 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", 1118 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", 1119 adapter->num_rx_queues, adapter->num_tx_queues); 1120 1121 set_bit(__IXGBE_DOWN, &adapter->state); 1122 1123 return 0; 1124 1125 err_alloc_q_vectors: 1126 ixgbe_reset_interrupt_capability(adapter); 1127 return err; 1128 } 1129 1130 /** 1131 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 1132 * @adapter: board private structure to clear interrupt scheme on 1133 * 1134 * We go through and clear interrupt specific resources and reset the structure 1135 * to pre-load conditions 1136 **/ 1137 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) 1138 { 1139 adapter->num_tx_queues = 0; 1140 adapter->num_rx_queues = 0; 1141 1142 ixgbe_free_q_vectors(adapter); 1143 ixgbe_reset_interrupt_capability(adapter); 1144 } 1145 1146 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, 1147 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) 1148 { 1149 struct ixgbe_adv_tx_context_desc *context_desc; 1150 u16 i = tx_ring->next_to_use; 1151 1152 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); 1153 1154 i++; 1155 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1156 1157 /* set bits to identify this as an advanced context descriptor */ 1158 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 1159 1160 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1161 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); 1162 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1163 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1164 } 1165 1166