1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * The full GNU General Public License is included in this distribution in 20 * the file called "COPYING". 21 * 22 * Contact Information: 23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 * 26 ******************************************************************************/ 27 28 /* Local includes */ 29 #include "i40e.h" 30 31 const char i40e_driver_name[] = "i40e"; 32 static const char i40e_driver_string[] = 33 "Intel(R) Ethernet Connection XL710 Network Driver"; 34 35 #define DRV_KERN "-k" 36 37 #define DRV_VERSION_MAJOR 0 38 #define DRV_VERSION_MINOR 3 39 #define DRV_VERSION_BUILD 11 40 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 41 __stringify(DRV_VERSION_MINOR) "." \ 42 __stringify(DRV_VERSION_BUILD) DRV_KERN 43 const char i40e_driver_version_str[] = DRV_VERSION; 44 static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation."; 45 46 /* a bit of forward declarations */ 47 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 48 static void i40e_handle_reset_warning(struct i40e_pf *pf); 49 static int i40e_add_vsi(struct i40e_vsi *vsi); 50 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 51 static int i40e_setup_pf_switch(struct i40e_pf *pf); 52 static int i40e_setup_misc_vector(struct i40e_pf *pf); 53 static void i40e_determine_queue_usage(struct i40e_pf *pf); 54 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 55 56 /* i40e_pci_tbl - PCI Device ID Table 57 * 58 * Last entry must be all 0s 59 * 60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 61 * Class, Class Mask, private data (not used) } 62 */ 63 static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = { 64 {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0}, 65 {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0}, 66 {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0}, 67 {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0}, 68 {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0}, 69 {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0}, 70 {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0}, 71 {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0}, 72 {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0}, 73 {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0}, 74 /* required last entry */ 75 {0, } 76 }; 77 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 78 79 #define I40E_MAX_VF_COUNT 128 80 static int debug = -1; 81 module_param(debug, int, 0); 82 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 83 84 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 85 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 86 MODULE_LICENSE("GPL"); 87 MODULE_VERSION(DRV_VERSION); 88 89 /** 90 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 91 * @hw: pointer to the HW structure 92 * @mem: ptr to mem struct to fill out 93 * @size: size of memory requested 94 * @alignment: what to align the allocation to 95 **/ 96 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 97 u64 size, u32 alignment) 98 { 99 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 100 101 mem->size = ALIGN(size, alignment); 102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 103 &mem->pa, GFP_KERNEL); 104 if (!mem->va) 105 return -ENOMEM; 106 107 return 0; 108 } 109 110 /** 111 * i40e_free_dma_mem_d - OS specific memory free for shared code 112 * @hw: pointer to the HW structure 113 * @mem: ptr to mem struct to free 114 **/ 115 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 116 { 117 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 118 119 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 120 mem->va = NULL; 121 mem->pa = 0; 122 mem->size = 0; 123 124 return 0; 125 } 126 127 /** 128 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 129 * @hw: pointer to the HW structure 130 * @mem: ptr to mem struct to fill out 131 * @size: size of memory requested 132 **/ 133 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 134 u32 size) 135 { 136 mem->size = size; 137 mem->va = kzalloc(size, GFP_KERNEL); 138 139 if (!mem->va) 140 return -ENOMEM; 141 142 return 0; 143 } 144 145 /** 146 * i40e_free_virt_mem_d - OS specific memory free for shared code 147 * @hw: pointer to the HW structure 148 * @mem: ptr to mem struct to free 149 **/ 150 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 151 { 152 /* it's ok to kfree a NULL pointer */ 153 kfree(mem->va); 154 mem->va = NULL; 155 mem->size = 0; 156 157 return 0; 158 } 159 160 /** 161 * i40e_get_lump - find a lump of free generic resource 162 * @pf: board private structure 163 * @pile: the pile of resource to search 164 * @needed: the number of items needed 165 * @id: an owner id to stick on the items assigned 166 * 167 * Returns the base item index of the lump, or negative for error 168 * 169 * The search_hint trick and lack of advanced fit-finding only work 170 * because we're highly likely to have all the same size lump requests. 171 * Linear search time and any fragmentation should be minimal. 172 **/ 173 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 174 u16 needed, u16 id) 175 { 176 int ret = -ENOMEM; 177 int i, j; 178 179 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 180 dev_info(&pf->pdev->dev, 181 "param err: pile=%p needed=%d id=0x%04x\n", 182 pile, needed, id); 183 return -EINVAL; 184 } 185 186 /* start the linear search with an imperfect hint */ 187 i = pile->search_hint; 188 while (i < pile->num_entries) { 189 /* skip already allocated entries */ 190 if (pile->list[i] & I40E_PILE_VALID_BIT) { 191 i++; 192 continue; 193 } 194 195 /* do we have enough in this lump? */ 196 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 197 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 198 break; 199 } 200 201 if (j == needed) { 202 /* there was enough, so assign it to the requestor */ 203 for (j = 0; j < needed; j++) 204 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 205 ret = i; 206 pile->search_hint = i + j; 207 break; 208 } else { 209 /* not enough, so skip over it and continue looking */ 210 i += j; 211 } 212 } 213 214 return ret; 215 } 216 217 /** 218 * i40e_put_lump - return a lump of generic resource 219 * @pile: the pile of resource to search 220 * @index: the base item index 221 * @id: the owner id of the items assigned 222 * 223 * Returns the count of items in the lump 224 **/ 225 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 226 { 227 int valid_id = (id | I40E_PILE_VALID_BIT); 228 int count = 0; 229 int i; 230 231 if (!pile || index >= pile->num_entries) 232 return -EINVAL; 233 234 for (i = index; 235 i < pile->num_entries && pile->list[i] == valid_id; 236 i++) { 237 pile->list[i] = 0; 238 count++; 239 } 240 241 if (count && index < pile->search_hint) 242 pile->search_hint = index; 243 244 return count; 245 } 246 247 /** 248 * i40e_service_event_schedule - Schedule the service task to wake up 249 * @pf: board private structure 250 * 251 * If not already scheduled, this puts the task into the work queue 252 **/ 253 static void i40e_service_event_schedule(struct i40e_pf *pf) 254 { 255 if (!test_bit(__I40E_DOWN, &pf->state) && 256 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && 257 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) 258 schedule_work(&pf->service_task); 259 } 260 261 /** 262 * i40e_tx_timeout - Respond to a Tx Hang 263 * @netdev: network interface device structure 264 * 265 * If any port has noticed a Tx timeout, it is likely that the whole 266 * device is munged, not just the one netdev port, so go for the full 267 * reset. 268 **/ 269 static void i40e_tx_timeout(struct net_device *netdev) 270 { 271 struct i40e_netdev_priv *np = netdev_priv(netdev); 272 struct i40e_vsi *vsi = np->vsi; 273 struct i40e_pf *pf = vsi->back; 274 275 pf->tx_timeout_count++; 276 277 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 278 pf->tx_timeout_recovery_level = 0; 279 pf->tx_timeout_last_recovery = jiffies; 280 netdev_info(netdev, "tx_timeout recovery level %d\n", 281 pf->tx_timeout_recovery_level); 282 283 switch (pf->tx_timeout_recovery_level) { 284 case 0: 285 /* disable and re-enable queues for the VSI */ 286 if (in_interrupt()) { 287 set_bit(__I40E_REINIT_REQUESTED, &pf->state); 288 set_bit(__I40E_REINIT_REQUESTED, &vsi->state); 289 } else { 290 i40e_vsi_reinit_locked(vsi); 291 } 292 break; 293 case 1: 294 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 295 break; 296 case 2: 297 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 298 break; 299 case 3: 300 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 301 break; 302 default: 303 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 304 i40e_down(vsi); 305 break; 306 } 307 i40e_service_event_schedule(pf); 308 pf->tx_timeout_recovery_level++; 309 } 310 311 /** 312 * i40e_release_rx_desc - Store the new tail and head values 313 * @rx_ring: ring to bump 314 * @val: new head index 315 **/ 316 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 317 { 318 rx_ring->next_to_use = val; 319 320 /* Force memory writes to complete before letting h/w 321 * know there are new descriptors to fetch. (Only 322 * applicable for weak-ordered memory model archs, 323 * such as IA-64). 324 */ 325 wmb(); 326 writel(val, rx_ring->tail); 327 } 328 329 /** 330 * i40e_get_vsi_stats_struct - Get System Network Statistics 331 * @vsi: the VSI we care about 332 * 333 * Returns the address of the device statistics structure. 334 * The statistics are actually updated from the service task. 335 **/ 336 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 337 { 338 return &vsi->net_stats; 339 } 340 341 /** 342 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 343 * @netdev: network interface device structure 344 * 345 * Returns the address of the device statistics structure. 346 * The statistics are actually updated from the service task. 347 **/ 348 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 349 struct net_device *netdev, 350 struct rtnl_link_stats64 *stats) 351 { 352 struct i40e_netdev_priv *np = netdev_priv(netdev); 353 struct i40e_vsi *vsi = np->vsi; 354 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 355 int i; 356 357 if (!vsi->tx_rings) 358 return stats; 359 360 rcu_read_lock(); 361 for (i = 0; i < vsi->num_queue_pairs; i++) { 362 struct i40e_ring *tx_ring, *rx_ring; 363 u64 bytes, packets; 364 unsigned int start; 365 366 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); 367 if (!tx_ring) 368 continue; 369 370 do { 371 start = u64_stats_fetch_begin_bh(&tx_ring->syncp); 372 packets = tx_ring->stats.packets; 373 bytes = tx_ring->stats.bytes; 374 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); 375 376 stats->tx_packets += packets; 377 stats->tx_bytes += bytes; 378 rx_ring = &tx_ring[1]; 379 380 do { 381 start = u64_stats_fetch_begin_bh(&rx_ring->syncp); 382 packets = rx_ring->stats.packets; 383 bytes = rx_ring->stats.bytes; 384 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); 385 386 stats->rx_packets += packets; 387 stats->rx_bytes += bytes; 388 } 389 rcu_read_unlock(); 390 391 /* following stats updated by ixgbe_watchdog_task() */ 392 stats->multicast = vsi_stats->multicast; 393 stats->tx_errors = vsi_stats->tx_errors; 394 stats->tx_dropped = vsi_stats->tx_dropped; 395 stats->rx_errors = vsi_stats->rx_errors; 396 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 397 stats->rx_length_errors = vsi_stats->rx_length_errors; 398 399 return stats; 400 } 401 402 /** 403 * i40e_vsi_reset_stats - Resets all stats of the given vsi 404 * @vsi: the VSI to have its stats reset 405 **/ 406 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 407 { 408 struct rtnl_link_stats64 *ns; 409 int i; 410 411 if (!vsi) 412 return; 413 414 ns = i40e_get_vsi_stats_struct(vsi); 415 memset(ns, 0, sizeof(*ns)); 416 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 417 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 418 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 419 if (vsi->rx_rings) 420 for (i = 0; i < vsi->num_queue_pairs; i++) { 421 memset(&vsi->rx_rings[i]->stats, 0 , 422 sizeof(vsi->rx_rings[i]->stats)); 423 memset(&vsi->rx_rings[i]->rx_stats, 0 , 424 sizeof(vsi->rx_rings[i]->rx_stats)); 425 memset(&vsi->tx_rings[i]->stats, 0 , 426 sizeof(vsi->tx_rings[i]->stats)); 427 memset(&vsi->tx_rings[i]->tx_stats, 0, 428 sizeof(vsi->tx_rings[i]->tx_stats)); 429 } 430 vsi->stat_offsets_loaded = false; 431 } 432 433 /** 434 * i40e_pf_reset_stats - Reset all of the stats for the given pf 435 * @pf: the PF to be reset 436 **/ 437 void i40e_pf_reset_stats(struct i40e_pf *pf) 438 { 439 memset(&pf->stats, 0, sizeof(pf->stats)); 440 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 441 pf->stat_offsets_loaded = false; 442 } 443 444 /** 445 * i40e_stat_update48 - read and update a 48 bit stat from the chip 446 * @hw: ptr to the hardware info 447 * @hireg: the high 32 bit reg to read 448 * @loreg: the low 32 bit reg to read 449 * @offset_loaded: has the initial offset been loaded yet 450 * @offset: ptr to current offset value 451 * @stat: ptr to the stat 452 * 453 * Since the device stats are not reset at PFReset, they likely will not 454 * be zeroed when the driver starts. We'll save the first values read 455 * and use them as offsets to be subtracted from the raw values in order 456 * to report stats that count from zero. In the process, we also manage 457 * the potential roll-over. 458 **/ 459 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 460 bool offset_loaded, u64 *offset, u64 *stat) 461 { 462 u64 new_data; 463 464 if (hw->device_id == I40E_QEMU_DEVICE_ID) { 465 new_data = rd32(hw, loreg); 466 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 467 } else { 468 new_data = rd64(hw, loreg); 469 } 470 if (!offset_loaded) 471 *offset = new_data; 472 if (likely(new_data >= *offset)) 473 *stat = new_data - *offset; 474 else 475 *stat = (new_data + ((u64)1 << 48)) - *offset; 476 *stat &= 0xFFFFFFFFFFFFULL; 477 } 478 479 /** 480 * i40e_stat_update32 - read and update a 32 bit stat from the chip 481 * @hw: ptr to the hardware info 482 * @reg: the hw reg to read 483 * @offset_loaded: has the initial offset been loaded yet 484 * @offset: ptr to current offset value 485 * @stat: ptr to the stat 486 **/ 487 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 488 bool offset_loaded, u64 *offset, u64 *stat) 489 { 490 u32 new_data; 491 492 new_data = rd32(hw, reg); 493 if (!offset_loaded) 494 *offset = new_data; 495 if (likely(new_data >= *offset)) 496 *stat = (u32)(new_data - *offset); 497 else 498 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 499 } 500 501 /** 502 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 503 * @vsi: the VSI to be updated 504 **/ 505 void i40e_update_eth_stats(struct i40e_vsi *vsi) 506 { 507 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 508 struct i40e_pf *pf = vsi->back; 509 struct i40e_hw *hw = &pf->hw; 510 struct i40e_eth_stats *oes; 511 struct i40e_eth_stats *es; /* device's eth stats */ 512 513 es = &vsi->eth_stats; 514 oes = &vsi->eth_stats_offsets; 515 516 /* Gather up the stats that the hw collects */ 517 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 518 vsi->stat_offsets_loaded, 519 &oes->tx_errors, &es->tx_errors); 520 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 521 vsi->stat_offsets_loaded, 522 &oes->rx_discards, &es->rx_discards); 523 524 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 525 I40E_GLV_GORCL(stat_idx), 526 vsi->stat_offsets_loaded, 527 &oes->rx_bytes, &es->rx_bytes); 528 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 529 I40E_GLV_UPRCL(stat_idx), 530 vsi->stat_offsets_loaded, 531 &oes->rx_unicast, &es->rx_unicast); 532 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 533 I40E_GLV_MPRCL(stat_idx), 534 vsi->stat_offsets_loaded, 535 &oes->rx_multicast, &es->rx_multicast); 536 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 537 I40E_GLV_BPRCL(stat_idx), 538 vsi->stat_offsets_loaded, 539 &oes->rx_broadcast, &es->rx_broadcast); 540 541 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 542 I40E_GLV_GOTCL(stat_idx), 543 vsi->stat_offsets_loaded, 544 &oes->tx_bytes, &es->tx_bytes); 545 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 546 I40E_GLV_UPTCL(stat_idx), 547 vsi->stat_offsets_loaded, 548 &oes->tx_unicast, &es->tx_unicast); 549 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 550 I40E_GLV_MPTCL(stat_idx), 551 vsi->stat_offsets_loaded, 552 &oes->tx_multicast, &es->tx_multicast); 553 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 554 I40E_GLV_BPTCL(stat_idx), 555 vsi->stat_offsets_loaded, 556 &oes->tx_broadcast, &es->tx_broadcast); 557 vsi->stat_offsets_loaded = true; 558 } 559 560 /** 561 * i40e_update_veb_stats - Update Switch component statistics 562 * @veb: the VEB being updated 563 **/ 564 static void i40e_update_veb_stats(struct i40e_veb *veb) 565 { 566 struct i40e_pf *pf = veb->pf; 567 struct i40e_hw *hw = &pf->hw; 568 struct i40e_eth_stats *oes; 569 struct i40e_eth_stats *es; /* device's eth stats */ 570 int idx = 0; 571 572 idx = veb->stats_idx; 573 es = &veb->stats; 574 oes = &veb->stats_offsets; 575 576 /* Gather up the stats that the hw collects */ 577 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 578 veb->stat_offsets_loaded, 579 &oes->tx_discards, &es->tx_discards); 580 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 581 veb->stat_offsets_loaded, 582 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); 583 584 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 585 veb->stat_offsets_loaded, 586 &oes->rx_bytes, &es->rx_bytes); 587 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 588 veb->stat_offsets_loaded, 589 &oes->rx_unicast, &es->rx_unicast); 590 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 591 veb->stat_offsets_loaded, 592 &oes->rx_multicast, &es->rx_multicast); 593 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 594 veb->stat_offsets_loaded, 595 &oes->rx_broadcast, &es->rx_broadcast); 596 597 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 598 veb->stat_offsets_loaded, 599 &oes->tx_bytes, &es->tx_bytes); 600 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 601 veb->stat_offsets_loaded, 602 &oes->tx_unicast, &es->tx_unicast); 603 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 604 veb->stat_offsets_loaded, 605 &oes->tx_multicast, &es->tx_multicast); 606 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 607 veb->stat_offsets_loaded, 608 &oes->tx_broadcast, &es->tx_broadcast); 609 veb->stat_offsets_loaded = true; 610 } 611 612 /** 613 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode 614 * @pf: the corresponding PF 615 * 616 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode 617 **/ 618 static void i40e_update_link_xoff_rx(struct i40e_pf *pf) 619 { 620 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 621 struct i40e_hw_port_stats *nsd = &pf->stats; 622 struct i40e_hw *hw = &pf->hw; 623 u64 xoff = 0; 624 u16 i, v; 625 626 if ((hw->fc.current_mode != I40E_FC_FULL) && 627 (hw->fc.current_mode != I40E_FC_RX_PAUSE)) 628 return; 629 630 xoff = nsd->link_xoff_rx; 631 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 632 pf->stat_offsets_loaded, 633 &osd->link_xoff_rx, &nsd->link_xoff_rx); 634 635 /* No new LFC xoff rx */ 636 if (!(nsd->link_xoff_rx - xoff)) 637 return; 638 639 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ 640 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 641 struct i40e_vsi *vsi = pf->vsi[v]; 642 643 if (!vsi) 644 continue; 645 646 for (i = 0; i < vsi->num_queue_pairs; i++) { 647 struct i40e_ring *ring = vsi->tx_rings[i]; 648 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 649 } 650 } 651 } 652 653 /** 654 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode 655 * @pf: the corresponding PF 656 * 657 * Update the Rx XOFF counter (PAUSE frames) in PFC mode 658 **/ 659 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) 660 { 661 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 662 struct i40e_hw_port_stats *nsd = &pf->stats; 663 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; 664 struct i40e_dcbx_config *dcb_cfg; 665 struct i40e_hw *hw = &pf->hw; 666 u16 i, v; 667 u8 tc; 668 669 dcb_cfg = &hw->local_dcbx_config; 670 671 /* See if DCB enabled with PFC TC */ 672 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || 673 !(dcb_cfg->pfc.pfcenable)) { 674 i40e_update_link_xoff_rx(pf); 675 return; 676 } 677 678 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 679 u64 prio_xoff = nsd->priority_xoff_rx[i]; 680 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 681 pf->stat_offsets_loaded, 682 &osd->priority_xoff_rx[i], 683 &nsd->priority_xoff_rx[i]); 684 685 /* No new PFC xoff rx */ 686 if (!(nsd->priority_xoff_rx[i] - prio_xoff)) 687 continue; 688 /* Get the TC for given priority */ 689 tc = dcb_cfg->etscfg.prioritytable[i]; 690 xoff[tc] = true; 691 } 692 693 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ 694 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 695 struct i40e_vsi *vsi = pf->vsi[v]; 696 697 if (!vsi) 698 continue; 699 700 for (i = 0; i < vsi->num_queue_pairs; i++) { 701 struct i40e_ring *ring = vsi->tx_rings[i]; 702 703 tc = ring->dcb_tc; 704 if (xoff[tc]) 705 clear_bit(__I40E_HANG_CHECK_ARMED, 706 &ring->state); 707 } 708 } 709 } 710 711 /** 712 * i40e_update_stats - Update the board statistics counters. 713 * @vsi: the VSI to be updated 714 * 715 * There are a few instances where we store the same stat in a 716 * couple of different structs. This is partly because we have 717 * the netdev stats that need to be filled out, which is slightly 718 * different from the "eth_stats" defined by the chip and used in 719 * VF communications. We sort it all out here in a central place. 720 **/ 721 void i40e_update_stats(struct i40e_vsi *vsi) 722 { 723 struct i40e_pf *pf = vsi->back; 724 struct i40e_hw *hw = &pf->hw; 725 struct rtnl_link_stats64 *ons; 726 struct rtnl_link_stats64 *ns; /* netdev stats */ 727 struct i40e_eth_stats *oes; 728 struct i40e_eth_stats *es; /* device's eth stats */ 729 u32 tx_restart, tx_busy; 730 u32 rx_page, rx_buf; 731 u64 rx_p, rx_b; 732 u64 tx_p, tx_b; 733 int i; 734 u16 q; 735 736 if (test_bit(__I40E_DOWN, &vsi->state) || 737 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 738 return; 739 740 ns = i40e_get_vsi_stats_struct(vsi); 741 ons = &vsi->net_stats_offsets; 742 es = &vsi->eth_stats; 743 oes = &vsi->eth_stats_offsets; 744 745 /* Gather up the netdev and vsi stats that the driver collects 746 * on the fly during packet processing 747 */ 748 rx_b = rx_p = 0; 749 tx_b = tx_p = 0; 750 tx_restart = tx_busy = 0; 751 rx_page = 0; 752 rx_buf = 0; 753 rcu_read_lock(); 754 for (q = 0; q < vsi->num_queue_pairs; q++) { 755 struct i40e_ring *p; 756 u64 bytes, packets; 757 unsigned int start; 758 759 /* locate Tx ring */ 760 p = ACCESS_ONCE(vsi->tx_rings[q]); 761 762 do { 763 start = u64_stats_fetch_begin_bh(&p->syncp); 764 packets = p->stats.packets; 765 bytes = p->stats.bytes; 766 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 767 tx_b += bytes; 768 tx_p += packets; 769 tx_restart += p->tx_stats.restart_queue; 770 tx_busy += p->tx_stats.tx_busy; 771 772 /* Rx queue is part of the same block as Tx queue */ 773 p = &p[1]; 774 do { 775 start = u64_stats_fetch_begin_bh(&p->syncp); 776 packets = p->stats.packets; 777 bytes = p->stats.bytes; 778 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 779 rx_b += bytes; 780 rx_p += packets; 781 rx_buf += p->rx_stats.alloc_rx_buff_failed; 782 rx_page += p->rx_stats.alloc_rx_page_failed; 783 } 784 rcu_read_unlock(); 785 vsi->tx_restart = tx_restart; 786 vsi->tx_busy = tx_busy; 787 vsi->rx_page_failed = rx_page; 788 vsi->rx_buf_failed = rx_buf; 789 790 ns->rx_packets = rx_p; 791 ns->rx_bytes = rx_b; 792 ns->tx_packets = tx_p; 793 ns->tx_bytes = tx_b; 794 795 i40e_update_eth_stats(vsi); 796 /* update netdev stats from eth stats */ 797 ons->rx_errors = oes->rx_errors; 798 ns->rx_errors = es->rx_errors; 799 ons->tx_errors = oes->tx_errors; 800 ns->tx_errors = es->tx_errors; 801 ons->multicast = oes->rx_multicast; 802 ns->multicast = es->rx_multicast; 803 ons->tx_dropped = oes->tx_discards; 804 ns->tx_dropped = es->tx_discards; 805 806 /* Get the port data only if this is the main PF VSI */ 807 if (vsi == pf->vsi[pf->lan_vsi]) { 808 struct i40e_hw_port_stats *nsd = &pf->stats; 809 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 810 811 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 812 I40E_GLPRT_GORCL(hw->port), 813 pf->stat_offsets_loaded, 814 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 815 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 816 I40E_GLPRT_GOTCL(hw->port), 817 pf->stat_offsets_loaded, 818 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 819 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 820 pf->stat_offsets_loaded, 821 &osd->eth.rx_discards, 822 &nsd->eth.rx_discards); 823 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), 824 pf->stat_offsets_loaded, 825 &osd->eth.tx_discards, 826 &nsd->eth.tx_discards); 827 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 828 I40E_GLPRT_MPRCL(hw->port), 829 pf->stat_offsets_loaded, 830 &osd->eth.rx_multicast, 831 &nsd->eth.rx_multicast); 832 833 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 834 pf->stat_offsets_loaded, 835 &osd->tx_dropped_link_down, 836 &nsd->tx_dropped_link_down); 837 838 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 839 pf->stat_offsets_loaded, 840 &osd->crc_errors, &nsd->crc_errors); 841 ns->rx_crc_errors = nsd->crc_errors; 842 843 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 844 pf->stat_offsets_loaded, 845 &osd->illegal_bytes, &nsd->illegal_bytes); 846 ns->rx_errors = nsd->crc_errors 847 + nsd->illegal_bytes; 848 849 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 850 pf->stat_offsets_loaded, 851 &osd->mac_local_faults, 852 &nsd->mac_local_faults); 853 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 854 pf->stat_offsets_loaded, 855 &osd->mac_remote_faults, 856 &nsd->mac_remote_faults); 857 858 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 859 pf->stat_offsets_loaded, 860 &osd->rx_length_errors, 861 &nsd->rx_length_errors); 862 ns->rx_length_errors = nsd->rx_length_errors; 863 864 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 865 pf->stat_offsets_loaded, 866 &osd->link_xon_rx, &nsd->link_xon_rx); 867 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 868 pf->stat_offsets_loaded, 869 &osd->link_xon_tx, &nsd->link_xon_tx); 870 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ 871 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 872 pf->stat_offsets_loaded, 873 &osd->link_xoff_tx, &nsd->link_xoff_tx); 874 875 for (i = 0; i < 8; i++) { 876 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 877 pf->stat_offsets_loaded, 878 &osd->priority_xon_rx[i], 879 &nsd->priority_xon_rx[i]); 880 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 881 pf->stat_offsets_loaded, 882 &osd->priority_xon_tx[i], 883 &nsd->priority_xon_tx[i]); 884 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 885 pf->stat_offsets_loaded, 886 &osd->priority_xoff_tx[i], 887 &nsd->priority_xoff_tx[i]); 888 i40e_stat_update32(hw, 889 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 890 pf->stat_offsets_loaded, 891 &osd->priority_xon_2_xoff[i], 892 &nsd->priority_xon_2_xoff[i]); 893 } 894 895 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 896 I40E_GLPRT_PRC64L(hw->port), 897 pf->stat_offsets_loaded, 898 &osd->rx_size_64, &nsd->rx_size_64); 899 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 900 I40E_GLPRT_PRC127L(hw->port), 901 pf->stat_offsets_loaded, 902 &osd->rx_size_127, &nsd->rx_size_127); 903 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 904 I40E_GLPRT_PRC255L(hw->port), 905 pf->stat_offsets_loaded, 906 &osd->rx_size_255, &nsd->rx_size_255); 907 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 908 I40E_GLPRT_PRC511L(hw->port), 909 pf->stat_offsets_loaded, 910 &osd->rx_size_511, &nsd->rx_size_511); 911 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 912 I40E_GLPRT_PRC1023L(hw->port), 913 pf->stat_offsets_loaded, 914 &osd->rx_size_1023, &nsd->rx_size_1023); 915 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 916 I40E_GLPRT_PRC1522L(hw->port), 917 pf->stat_offsets_loaded, 918 &osd->rx_size_1522, &nsd->rx_size_1522); 919 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 920 I40E_GLPRT_PRC9522L(hw->port), 921 pf->stat_offsets_loaded, 922 &osd->rx_size_big, &nsd->rx_size_big); 923 924 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 925 I40E_GLPRT_PTC64L(hw->port), 926 pf->stat_offsets_loaded, 927 &osd->tx_size_64, &nsd->tx_size_64); 928 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 929 I40E_GLPRT_PTC127L(hw->port), 930 pf->stat_offsets_loaded, 931 &osd->tx_size_127, &nsd->tx_size_127); 932 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 933 I40E_GLPRT_PTC255L(hw->port), 934 pf->stat_offsets_loaded, 935 &osd->tx_size_255, &nsd->tx_size_255); 936 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 937 I40E_GLPRT_PTC511L(hw->port), 938 pf->stat_offsets_loaded, 939 &osd->tx_size_511, &nsd->tx_size_511); 940 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 941 I40E_GLPRT_PTC1023L(hw->port), 942 pf->stat_offsets_loaded, 943 &osd->tx_size_1023, &nsd->tx_size_1023); 944 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 945 I40E_GLPRT_PTC1522L(hw->port), 946 pf->stat_offsets_loaded, 947 &osd->tx_size_1522, &nsd->tx_size_1522); 948 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 949 I40E_GLPRT_PTC9522L(hw->port), 950 pf->stat_offsets_loaded, 951 &osd->tx_size_big, &nsd->tx_size_big); 952 953 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 954 pf->stat_offsets_loaded, 955 &osd->rx_undersize, &nsd->rx_undersize); 956 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 957 pf->stat_offsets_loaded, 958 &osd->rx_fragments, &nsd->rx_fragments); 959 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 960 pf->stat_offsets_loaded, 961 &osd->rx_oversize, &nsd->rx_oversize); 962 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 963 pf->stat_offsets_loaded, 964 &osd->rx_jabber, &nsd->rx_jabber); 965 } 966 967 pf->stat_offsets_loaded = true; 968 } 969 970 /** 971 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 972 * @vsi: the VSI to be searched 973 * @macaddr: the MAC address 974 * @vlan: the vlan 975 * @is_vf: make sure its a vf filter, else doesn't matter 976 * @is_netdev: make sure its a netdev filter, else doesn't matter 977 * 978 * Returns ptr to the filter object or NULL 979 **/ 980 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 981 u8 *macaddr, s16 vlan, 982 bool is_vf, bool is_netdev) 983 { 984 struct i40e_mac_filter *f; 985 986 if (!vsi || !macaddr) 987 return NULL; 988 989 list_for_each_entry(f, &vsi->mac_filter_list, list) { 990 if ((ether_addr_equal(macaddr, f->macaddr)) && 991 (vlan == f->vlan) && 992 (!is_vf || f->is_vf) && 993 (!is_netdev || f->is_netdev)) 994 return f; 995 } 996 return NULL; 997 } 998 999 /** 1000 * i40e_find_mac - Find a mac addr in the macvlan filters list 1001 * @vsi: the VSI to be searched 1002 * @macaddr: the MAC address we are searching for 1003 * @is_vf: make sure its a vf filter, else doesn't matter 1004 * @is_netdev: make sure its a netdev filter, else doesn't matter 1005 * 1006 * Returns the first filter with the provided MAC address or NULL if 1007 * MAC address was not found 1008 **/ 1009 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 1010 bool is_vf, bool is_netdev) 1011 { 1012 struct i40e_mac_filter *f; 1013 1014 if (!vsi || !macaddr) 1015 return NULL; 1016 1017 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1018 if ((ether_addr_equal(macaddr, f->macaddr)) && 1019 (!is_vf || f->is_vf) && 1020 (!is_netdev || f->is_netdev)) 1021 return f; 1022 } 1023 return NULL; 1024 } 1025 1026 /** 1027 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1028 * @vsi: the VSI to be searched 1029 * 1030 * Returns true if VSI is in vlan mode or false otherwise 1031 **/ 1032 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1033 { 1034 struct i40e_mac_filter *f; 1035 1036 /* Only -1 for all the filters denotes not in vlan mode 1037 * so we have to go through all the list in order to make sure 1038 */ 1039 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1040 if (f->vlan >= 0) 1041 return true; 1042 } 1043 1044 return false; 1045 } 1046 1047 /** 1048 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans 1049 * @vsi: the VSI to be searched 1050 * @macaddr: the mac address to be filtered 1051 * @is_vf: true if it is a vf 1052 * @is_netdev: true if it is a netdev 1053 * 1054 * Goes through all the macvlan filters and adds a 1055 * macvlan filter for each unique vlan that already exists 1056 * 1057 * Returns first filter found on success, else NULL 1058 **/ 1059 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1060 bool is_vf, bool is_netdev) 1061 { 1062 struct i40e_mac_filter *f; 1063 1064 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1065 if (!i40e_find_filter(vsi, macaddr, f->vlan, 1066 is_vf, is_netdev)) { 1067 if (!i40e_add_filter(vsi, macaddr, f->vlan, 1068 is_vf, is_netdev)) 1069 return NULL; 1070 } 1071 } 1072 1073 return list_first_entry_or_null(&vsi->mac_filter_list, 1074 struct i40e_mac_filter, list); 1075 } 1076 1077 /** 1078 * i40e_add_filter - Add a mac/vlan filter to the VSI 1079 * @vsi: the VSI to be searched 1080 * @macaddr: the MAC address 1081 * @vlan: the vlan 1082 * @is_vf: make sure its a vf filter, else doesn't matter 1083 * @is_netdev: make sure its a netdev filter, else doesn't matter 1084 * 1085 * Returns ptr to the filter object or NULL when no memory available. 1086 **/ 1087 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1088 u8 *macaddr, s16 vlan, 1089 bool is_vf, bool is_netdev) 1090 { 1091 struct i40e_mac_filter *f; 1092 1093 if (!vsi || !macaddr) 1094 return NULL; 1095 1096 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1097 if (!f) { 1098 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1099 if (!f) 1100 goto add_filter_out; 1101 1102 memcpy(f->macaddr, macaddr, ETH_ALEN); 1103 f->vlan = vlan; 1104 f->changed = true; 1105 1106 INIT_LIST_HEAD(&f->list); 1107 list_add(&f->list, &vsi->mac_filter_list); 1108 } 1109 1110 /* increment counter and add a new flag if needed */ 1111 if (is_vf) { 1112 if (!f->is_vf) { 1113 f->is_vf = true; 1114 f->counter++; 1115 } 1116 } else if (is_netdev) { 1117 if (!f->is_netdev) { 1118 f->is_netdev = true; 1119 f->counter++; 1120 } 1121 } else { 1122 f->counter++; 1123 } 1124 1125 /* changed tells sync_filters_subtask to 1126 * push the filter down to the firmware 1127 */ 1128 if (f->changed) { 1129 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1130 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1131 } 1132 1133 add_filter_out: 1134 return f; 1135 } 1136 1137 /** 1138 * i40e_del_filter - Remove a mac/vlan filter from the VSI 1139 * @vsi: the VSI to be searched 1140 * @macaddr: the MAC address 1141 * @vlan: the vlan 1142 * @is_vf: make sure it's a vf filter, else doesn't matter 1143 * @is_netdev: make sure it's a netdev filter, else doesn't matter 1144 **/ 1145 void i40e_del_filter(struct i40e_vsi *vsi, 1146 u8 *macaddr, s16 vlan, 1147 bool is_vf, bool is_netdev) 1148 { 1149 struct i40e_mac_filter *f; 1150 1151 if (!vsi || !macaddr) 1152 return; 1153 1154 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1155 if (!f || f->counter == 0) 1156 return; 1157 1158 if (is_vf) { 1159 if (f->is_vf) { 1160 f->is_vf = false; 1161 f->counter--; 1162 } 1163 } else if (is_netdev) { 1164 if (f->is_netdev) { 1165 f->is_netdev = false; 1166 f->counter--; 1167 } 1168 } else { 1169 /* make sure we don't remove a filter in use by vf or netdev */ 1170 int min_f = 0; 1171 min_f += (f->is_vf ? 1 : 0); 1172 min_f += (f->is_netdev ? 1 : 0); 1173 1174 if (f->counter > min_f) 1175 f->counter--; 1176 } 1177 1178 /* counter == 0 tells sync_filters_subtask to 1179 * remove the filter from the firmware's list 1180 */ 1181 if (f->counter == 0) { 1182 f->changed = true; 1183 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1184 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1185 } 1186 } 1187 1188 /** 1189 * i40e_set_mac - NDO callback to set mac address 1190 * @netdev: network interface device structure 1191 * @p: pointer to an address structure 1192 * 1193 * Returns 0 on success, negative on failure 1194 **/ 1195 static int i40e_set_mac(struct net_device *netdev, void *p) 1196 { 1197 struct i40e_netdev_priv *np = netdev_priv(netdev); 1198 struct i40e_vsi *vsi = np->vsi; 1199 struct sockaddr *addr = p; 1200 struct i40e_mac_filter *f; 1201 1202 if (!is_valid_ether_addr(addr->sa_data)) 1203 return -EADDRNOTAVAIL; 1204 1205 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data); 1206 1207 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 1208 return 0; 1209 1210 if (vsi->type == I40E_VSI_MAIN) { 1211 i40e_status ret; 1212 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1213 I40E_AQC_WRITE_TYPE_LAA_ONLY, 1214 addr->sa_data, NULL); 1215 if (ret) { 1216 netdev_info(netdev, 1217 "Addr change for Main VSI failed: %d\n", 1218 ret); 1219 return -EADDRNOTAVAIL; 1220 } 1221 1222 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len); 1223 } 1224 1225 /* In order to be sure to not drop any packets, add the new address 1226 * then delete the old one. 1227 */ 1228 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false); 1229 if (!f) 1230 return -ENOMEM; 1231 1232 i40e_sync_vsi_filters(vsi); 1233 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); 1234 i40e_sync_vsi_filters(vsi); 1235 1236 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1237 1238 return 0; 1239 } 1240 1241 /** 1242 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1243 * @vsi: the VSI being setup 1244 * @ctxt: VSI context structure 1245 * @enabled_tc: Enabled TCs bitmap 1246 * @is_add: True if called before Add VSI 1247 * 1248 * Setup VSI queue mapping for enabled traffic classes. 1249 **/ 1250 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1251 struct i40e_vsi_context *ctxt, 1252 u8 enabled_tc, 1253 bool is_add) 1254 { 1255 struct i40e_pf *pf = vsi->back; 1256 u16 sections = 0; 1257 u8 netdev_tc = 0; 1258 u16 numtc = 0; 1259 u16 qcount; 1260 u8 offset; 1261 u16 qmap; 1262 int i; 1263 1264 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1265 offset = 0; 1266 1267 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1268 /* Find numtc from enabled TC bitmap */ 1269 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1270 if (enabled_tc & (1 << i)) /* TC is enabled */ 1271 numtc++; 1272 } 1273 if (!numtc) { 1274 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1275 numtc = 1; 1276 } 1277 } else { 1278 /* At least TC0 is enabled in case of non-DCB case */ 1279 numtc = 1; 1280 } 1281 1282 vsi->tc_config.numtc = numtc; 1283 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1284 1285 /* Setup queue offset/count for all TCs for given VSI */ 1286 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1287 /* See if the given TC is enabled for the given VSI */ 1288 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ 1289 int pow, num_qps; 1290 1291 vsi->tc_config.tc_info[i].qoffset = offset; 1292 switch (vsi->type) { 1293 case I40E_VSI_MAIN: 1294 if (i == 0) 1295 qcount = pf->rss_size; 1296 else 1297 qcount = pf->num_tc_qps; 1298 vsi->tc_config.tc_info[i].qcount = qcount; 1299 break; 1300 case I40E_VSI_FDIR: 1301 case I40E_VSI_SRIOV: 1302 case I40E_VSI_VMDQ2: 1303 default: 1304 qcount = vsi->alloc_queue_pairs; 1305 vsi->tc_config.tc_info[i].qcount = qcount; 1306 WARN_ON(i != 0); 1307 break; 1308 } 1309 1310 /* find the power-of-2 of the number of queue pairs */ 1311 num_qps = vsi->tc_config.tc_info[i].qcount; 1312 pow = 0; 1313 while (num_qps && 1314 ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) { 1315 pow++; 1316 num_qps >>= 1; 1317 } 1318 1319 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1320 qmap = 1321 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1322 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1323 1324 offset += vsi->tc_config.tc_info[i].qcount; 1325 } else { 1326 /* TC is not enabled so set the offset to 1327 * default queue and allocate one queue 1328 * for the given TC. 1329 */ 1330 vsi->tc_config.tc_info[i].qoffset = 0; 1331 vsi->tc_config.tc_info[i].qcount = 1; 1332 vsi->tc_config.tc_info[i].netdev_tc = 0; 1333 1334 qmap = 0; 1335 } 1336 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1337 } 1338 1339 /* Set actual Tx/Rx queue pairs */ 1340 vsi->num_queue_pairs = offset; 1341 1342 /* Scheduler section valid can only be set for ADD VSI */ 1343 if (is_add) { 1344 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1345 1346 ctxt->info.up_enable_bits = enabled_tc; 1347 } 1348 if (vsi->type == I40E_VSI_SRIOV) { 1349 ctxt->info.mapping_flags |= 1350 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1351 for (i = 0; i < vsi->num_queue_pairs; i++) 1352 ctxt->info.queue_mapping[i] = 1353 cpu_to_le16(vsi->base_queue + i); 1354 } else { 1355 ctxt->info.mapping_flags |= 1356 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1357 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1358 } 1359 ctxt->info.valid_sections |= cpu_to_le16(sections); 1360 } 1361 1362 /** 1363 * i40e_set_rx_mode - NDO callback to set the netdev filters 1364 * @netdev: network interface device structure 1365 **/ 1366 static void i40e_set_rx_mode(struct net_device *netdev) 1367 { 1368 struct i40e_netdev_priv *np = netdev_priv(netdev); 1369 struct i40e_mac_filter *f, *ftmp; 1370 struct i40e_vsi *vsi = np->vsi; 1371 struct netdev_hw_addr *uca; 1372 struct netdev_hw_addr *mca; 1373 struct netdev_hw_addr *ha; 1374 1375 /* add addr if not already in the filter list */ 1376 netdev_for_each_uc_addr(uca, netdev) { 1377 if (!i40e_find_mac(vsi, uca->addr, false, true)) { 1378 if (i40e_is_vsi_in_vlan(vsi)) 1379 i40e_put_mac_in_vlan(vsi, uca->addr, 1380 false, true); 1381 else 1382 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, 1383 false, true); 1384 } 1385 } 1386 1387 netdev_for_each_mc_addr(mca, netdev) { 1388 if (!i40e_find_mac(vsi, mca->addr, false, true)) { 1389 if (i40e_is_vsi_in_vlan(vsi)) 1390 i40e_put_mac_in_vlan(vsi, mca->addr, 1391 false, true); 1392 else 1393 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, 1394 false, true); 1395 } 1396 } 1397 1398 /* remove filter if not in netdev list */ 1399 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1400 bool found = false; 1401 1402 if (!f->is_netdev) 1403 continue; 1404 1405 if (is_multicast_ether_addr(f->macaddr)) { 1406 netdev_for_each_mc_addr(mca, netdev) { 1407 if (ether_addr_equal(mca->addr, f->macaddr)) { 1408 found = true; 1409 break; 1410 } 1411 } 1412 } else { 1413 netdev_for_each_uc_addr(uca, netdev) { 1414 if (ether_addr_equal(uca->addr, f->macaddr)) { 1415 found = true; 1416 break; 1417 } 1418 } 1419 1420 for_each_dev_addr(netdev, ha) { 1421 if (ether_addr_equal(ha->addr, f->macaddr)) { 1422 found = true; 1423 break; 1424 } 1425 } 1426 } 1427 if (!found) 1428 i40e_del_filter( 1429 vsi, f->macaddr, I40E_VLAN_ANY, false, true); 1430 } 1431 1432 /* check for other flag changes */ 1433 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1434 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1435 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1436 } 1437 } 1438 1439 /** 1440 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 1441 * @vsi: ptr to the VSI 1442 * 1443 * Push any outstanding VSI filter changes through the AdminQ. 1444 * 1445 * Returns 0 or error value 1446 **/ 1447 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) 1448 { 1449 struct i40e_mac_filter *f, *ftmp; 1450 bool promisc_forced_on = false; 1451 bool add_happened = false; 1452 int filter_list_len = 0; 1453 u32 changed_flags = 0; 1454 i40e_status aq_ret = 0; 1455 struct i40e_pf *pf; 1456 int num_add = 0; 1457 int num_del = 0; 1458 u16 cmd_flags; 1459 1460 /* empty array typed pointers, kcalloc later */ 1461 struct i40e_aqc_add_macvlan_element_data *add_list; 1462 struct i40e_aqc_remove_macvlan_element_data *del_list; 1463 1464 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) 1465 usleep_range(1000, 2000); 1466 pf = vsi->back; 1467 1468 if (vsi->netdev) { 1469 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 1470 vsi->current_netdev_flags = vsi->netdev->flags; 1471 } 1472 1473 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1474 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1475 1476 filter_list_len = pf->hw.aq.asq_buf_size / 1477 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1478 del_list = kcalloc(filter_list_len, 1479 sizeof(struct i40e_aqc_remove_macvlan_element_data), 1480 GFP_KERNEL); 1481 if (!del_list) 1482 return -ENOMEM; 1483 1484 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1485 if (!f->changed) 1486 continue; 1487 1488 if (f->counter != 0) 1489 continue; 1490 f->changed = false; 1491 cmd_flags = 0; 1492 1493 /* add to delete list */ 1494 memcpy(del_list[num_del].mac_addr, 1495 f->macaddr, ETH_ALEN); 1496 del_list[num_del].vlan_tag = 1497 cpu_to_le16((u16)(f->vlan == 1498 I40E_VLAN_ANY ? 0 : f->vlan)); 1499 1500 /* vlan0 as wild card to allow packets from all vlans */ 1501 if (f->vlan == I40E_VLAN_ANY || 1502 (vsi->netdev && !(vsi->netdev->features & 1503 NETIF_F_HW_VLAN_CTAG_FILTER))) 1504 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1505 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1506 del_list[num_del].flags = cmd_flags; 1507 num_del++; 1508 1509 /* unlink from filter list */ 1510 list_del(&f->list); 1511 kfree(f); 1512 1513 /* flush a full buffer */ 1514 if (num_del == filter_list_len) { 1515 aq_ret = i40e_aq_remove_macvlan(&pf->hw, 1516 vsi->seid, del_list, num_del, 1517 NULL); 1518 num_del = 0; 1519 memset(del_list, 0, sizeof(*del_list)); 1520 1521 if (aq_ret) 1522 dev_info(&pf->pdev->dev, 1523 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1524 aq_ret, 1525 pf->hw.aq.asq_last_status); 1526 } 1527 } 1528 if (num_del) { 1529 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1530 del_list, num_del, NULL); 1531 num_del = 0; 1532 1533 if (aq_ret) 1534 dev_info(&pf->pdev->dev, 1535 "ignoring delete macvlan error, err %d, aq_err %d\n", 1536 aq_ret, pf->hw.aq.asq_last_status); 1537 } 1538 1539 kfree(del_list); 1540 del_list = NULL; 1541 1542 /* do all the adds now */ 1543 filter_list_len = pf->hw.aq.asq_buf_size / 1544 sizeof(struct i40e_aqc_add_macvlan_element_data), 1545 add_list = kcalloc(filter_list_len, 1546 sizeof(struct i40e_aqc_add_macvlan_element_data), 1547 GFP_KERNEL); 1548 if (!add_list) 1549 return -ENOMEM; 1550 1551 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1552 if (!f->changed) 1553 continue; 1554 1555 if (f->counter == 0) 1556 continue; 1557 f->changed = false; 1558 add_happened = true; 1559 cmd_flags = 0; 1560 1561 /* add to add array */ 1562 memcpy(add_list[num_add].mac_addr, 1563 f->macaddr, ETH_ALEN); 1564 add_list[num_add].vlan_tag = 1565 cpu_to_le16( 1566 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 1567 add_list[num_add].queue_number = 0; 1568 1569 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1570 1571 /* vlan0 as wild card to allow packets from all vlans */ 1572 if (f->vlan == I40E_VLAN_ANY || (vsi->netdev && 1573 !(vsi->netdev->features & 1574 NETIF_F_HW_VLAN_CTAG_FILTER))) 1575 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; 1576 add_list[num_add].flags = cpu_to_le16(cmd_flags); 1577 num_add++; 1578 1579 /* flush a full buffer */ 1580 if (num_add == filter_list_len) { 1581 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1582 add_list, num_add, 1583 NULL); 1584 num_add = 0; 1585 1586 if (aq_ret) 1587 break; 1588 memset(add_list, 0, sizeof(*add_list)); 1589 } 1590 } 1591 if (num_add) { 1592 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1593 add_list, num_add, NULL); 1594 num_add = 0; 1595 } 1596 kfree(add_list); 1597 add_list = NULL; 1598 1599 if (add_happened && (!aq_ret)) { 1600 /* do nothing */; 1601 } else if (add_happened && (aq_ret)) { 1602 dev_info(&pf->pdev->dev, 1603 "add filter failed, err %d, aq_err %d\n", 1604 aq_ret, pf->hw.aq.asq_last_status); 1605 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 1606 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1607 &vsi->state)) { 1608 promisc_forced_on = true; 1609 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1610 &vsi->state); 1611 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 1612 } 1613 } 1614 } 1615 1616 /* check for changes in promiscuous modes */ 1617 if (changed_flags & IFF_ALLMULTI) { 1618 bool cur_multipromisc; 1619 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 1620 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 1621 vsi->seid, 1622 cur_multipromisc, 1623 NULL); 1624 if (aq_ret) 1625 dev_info(&pf->pdev->dev, 1626 "set multi promisc failed, err %d, aq_err %d\n", 1627 aq_ret, pf->hw.aq.asq_last_status); 1628 } 1629 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 1630 bool cur_promisc; 1631 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 1632 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1633 &vsi->state)); 1634 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, 1635 vsi->seid, 1636 cur_promisc, NULL); 1637 if (aq_ret) 1638 dev_info(&pf->pdev->dev, 1639 "set uni promisc failed, err %d, aq_err %d\n", 1640 aq_ret, pf->hw.aq.asq_last_status); 1641 } 1642 1643 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 1644 return 0; 1645 } 1646 1647 /** 1648 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 1649 * @pf: board private structure 1650 **/ 1651 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 1652 { 1653 int v; 1654 1655 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 1656 return; 1657 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 1658 1659 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 1660 if (pf->vsi[v] && 1661 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) 1662 i40e_sync_vsi_filters(pf->vsi[v]); 1663 } 1664 } 1665 1666 /** 1667 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 1668 * @netdev: network interface device structure 1669 * @new_mtu: new value for maximum frame size 1670 * 1671 * Returns 0 on success, negative on failure 1672 **/ 1673 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 1674 { 1675 struct i40e_netdev_priv *np = netdev_priv(netdev); 1676 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 1677 struct i40e_vsi *vsi = np->vsi; 1678 1679 /* MTU < 68 is an error and causes problems on some kernels */ 1680 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) 1681 return -EINVAL; 1682 1683 netdev_info(netdev, "changing MTU from %d to %d\n", 1684 netdev->mtu, new_mtu); 1685 netdev->mtu = new_mtu; 1686 if (netif_running(netdev)) 1687 i40e_vsi_reinit_locked(vsi); 1688 1689 return 0; 1690 } 1691 1692 /** 1693 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 1694 * @vsi: the vsi being adjusted 1695 **/ 1696 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 1697 { 1698 struct i40e_vsi_context ctxt; 1699 i40e_status ret; 1700 1701 if ((vsi->info.valid_sections & 1702 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 1703 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 1704 return; /* already enabled */ 1705 1706 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1707 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1708 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 1709 1710 ctxt.seid = vsi->seid; 1711 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1712 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1713 if (ret) { 1714 dev_info(&vsi->back->pdev->dev, 1715 "%s: update vsi failed, aq_err=%d\n", 1716 __func__, vsi->back->hw.aq.asq_last_status); 1717 } 1718 } 1719 1720 /** 1721 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 1722 * @vsi: the vsi being adjusted 1723 **/ 1724 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 1725 { 1726 struct i40e_vsi_context ctxt; 1727 i40e_status ret; 1728 1729 if ((vsi->info.valid_sections & 1730 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 1731 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 1732 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 1733 return; /* already disabled */ 1734 1735 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1736 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1737 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 1738 1739 ctxt.seid = vsi->seid; 1740 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1741 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1742 if (ret) { 1743 dev_info(&vsi->back->pdev->dev, 1744 "%s: update vsi failed, aq_err=%d\n", 1745 __func__, vsi->back->hw.aq.asq_last_status); 1746 } 1747 } 1748 1749 /** 1750 * i40e_vlan_rx_register - Setup or shutdown vlan offload 1751 * @netdev: network interface to be adjusted 1752 * @features: netdev features to test if VLAN offload is enabled or not 1753 **/ 1754 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 1755 { 1756 struct i40e_netdev_priv *np = netdev_priv(netdev); 1757 struct i40e_vsi *vsi = np->vsi; 1758 1759 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1760 i40e_vlan_stripping_enable(vsi); 1761 else 1762 i40e_vlan_stripping_disable(vsi); 1763 } 1764 1765 /** 1766 * i40e_vsi_add_vlan - Add vsi membership for given vlan 1767 * @vsi: the vsi being configured 1768 * @vid: vlan id to be added (0 = untagged only , -1 = any) 1769 **/ 1770 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) 1771 { 1772 struct i40e_mac_filter *f, *add_f; 1773 bool is_netdev, is_vf; 1774 int ret; 1775 1776 is_vf = (vsi->type == I40E_VSI_SRIOV); 1777 is_netdev = !!(vsi->netdev); 1778 1779 if (is_netdev) { 1780 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, 1781 is_vf, is_netdev); 1782 if (!add_f) { 1783 dev_info(&vsi->back->pdev->dev, 1784 "Could not add vlan filter %d for %pM\n", 1785 vid, vsi->netdev->dev_addr); 1786 return -ENOMEM; 1787 } 1788 } 1789 1790 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1791 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 1792 if (!add_f) { 1793 dev_info(&vsi->back->pdev->dev, 1794 "Could not add vlan filter %d for %pM\n", 1795 vid, f->macaddr); 1796 return -ENOMEM; 1797 } 1798 } 1799 1800 ret = i40e_sync_vsi_filters(vsi); 1801 if (ret) { 1802 dev_info(&vsi->back->pdev->dev, 1803 "Could not sync filters for vid %d\n", vid); 1804 return ret; 1805 } 1806 1807 /* Now if we add a vlan tag, make sure to check if it is the first 1808 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" 1809 * with 0, so we now accept untagged and specified tagged traffic 1810 * (and not any taged and untagged) 1811 */ 1812 if (vid > 0) { 1813 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, 1814 I40E_VLAN_ANY, 1815 is_vf, is_netdev)) { 1816 i40e_del_filter(vsi, vsi->netdev->dev_addr, 1817 I40E_VLAN_ANY, is_vf, is_netdev); 1818 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, 1819 is_vf, is_netdev); 1820 if (!add_f) { 1821 dev_info(&vsi->back->pdev->dev, 1822 "Could not add filter 0 for %pM\n", 1823 vsi->netdev->dev_addr); 1824 return -ENOMEM; 1825 } 1826 } 1827 1828 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1829 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, 1830 is_vf, is_netdev)) { 1831 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, 1832 is_vf, is_netdev); 1833 add_f = i40e_add_filter(vsi, f->macaddr, 1834 0, is_vf, is_netdev); 1835 if (!add_f) { 1836 dev_info(&vsi->back->pdev->dev, 1837 "Could not add filter 0 for %pM\n", 1838 f->macaddr); 1839 return -ENOMEM; 1840 } 1841 } 1842 } 1843 ret = i40e_sync_vsi_filters(vsi); 1844 } 1845 1846 return ret; 1847 } 1848 1849 /** 1850 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 1851 * @vsi: the vsi being configured 1852 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 1853 * 1854 * Return: 0 on success or negative otherwise 1855 **/ 1856 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 1857 { 1858 struct net_device *netdev = vsi->netdev; 1859 struct i40e_mac_filter *f, *add_f; 1860 bool is_vf, is_netdev; 1861 int filter_count = 0; 1862 int ret; 1863 1864 is_vf = (vsi->type == I40E_VSI_SRIOV); 1865 is_netdev = !!(netdev); 1866 1867 if (is_netdev) 1868 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); 1869 1870 list_for_each_entry(f, &vsi->mac_filter_list, list) 1871 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 1872 1873 ret = i40e_sync_vsi_filters(vsi); 1874 if (ret) { 1875 dev_info(&vsi->back->pdev->dev, "Could not sync filters\n"); 1876 return ret; 1877 } 1878 1879 /* go through all the filters for this VSI and if there is only 1880 * vid == 0 it means there are no other filters, so vid 0 must 1881 * be replaced with -1. This signifies that we should from now 1882 * on accept any traffic (with any tag present, or untagged) 1883 */ 1884 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1885 if (is_netdev) { 1886 if (f->vlan && 1887 ether_addr_equal(netdev->dev_addr, f->macaddr)) 1888 filter_count++; 1889 } 1890 1891 if (f->vlan) 1892 filter_count++; 1893 } 1894 1895 if (!filter_count && is_netdev) { 1896 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); 1897 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 1898 is_vf, is_netdev); 1899 if (!f) { 1900 dev_info(&vsi->back->pdev->dev, 1901 "Could not add filter %d for %pM\n", 1902 I40E_VLAN_ANY, netdev->dev_addr); 1903 return -ENOMEM; 1904 } 1905 } 1906 1907 if (!filter_count) { 1908 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1909 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); 1910 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, 1911 is_vf, is_netdev); 1912 if (!add_f) { 1913 dev_info(&vsi->back->pdev->dev, 1914 "Could not add filter %d for %pM\n", 1915 I40E_VLAN_ANY, f->macaddr); 1916 return -ENOMEM; 1917 } 1918 } 1919 } 1920 1921 return i40e_sync_vsi_filters(vsi); 1922 } 1923 1924 /** 1925 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 1926 * @netdev: network interface to be adjusted 1927 * @vid: vlan id to be added 1928 * 1929 * net_device_ops implementation for adding vlan ids 1930 **/ 1931 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 1932 __always_unused __be16 proto, u16 vid) 1933 { 1934 struct i40e_netdev_priv *np = netdev_priv(netdev); 1935 struct i40e_vsi *vsi = np->vsi; 1936 int ret = 0; 1937 1938 if (vid > 4095) 1939 return -EINVAL; 1940 1941 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 1942 1943 /* If the network stack called us with vid = 0, we should 1944 * indicate to i40e_vsi_add_vlan() that we want to receive 1945 * any traffic (i.e. with any vlan tag, or untagged) 1946 */ 1947 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); 1948 1949 if (!ret && (vid < VLAN_N_VID)) 1950 set_bit(vid, vsi->active_vlans); 1951 1952 return ret; 1953 } 1954 1955 /** 1956 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 1957 * @netdev: network interface to be adjusted 1958 * @vid: vlan id to be removed 1959 * 1960 * net_device_ops implementation for adding vlan ids 1961 **/ 1962 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 1963 __always_unused __be16 proto, u16 vid) 1964 { 1965 struct i40e_netdev_priv *np = netdev_priv(netdev); 1966 struct i40e_vsi *vsi = np->vsi; 1967 1968 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); 1969 1970 /* return code is ignored as there is nothing a user 1971 * can do about failure to remove and a log message was 1972 * already printed from the other function 1973 */ 1974 i40e_vsi_kill_vlan(vsi, vid); 1975 1976 clear_bit(vid, vsi->active_vlans); 1977 1978 return 0; 1979 } 1980 1981 /** 1982 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 1983 * @vsi: the vsi being brought back up 1984 **/ 1985 static void i40e_restore_vlan(struct i40e_vsi *vsi) 1986 { 1987 u16 vid; 1988 1989 if (!vsi->netdev) 1990 return; 1991 1992 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 1993 1994 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 1995 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 1996 vid); 1997 } 1998 1999 /** 2000 * i40e_vsi_add_pvid - Add pvid for the VSI 2001 * @vsi: the vsi being adjusted 2002 * @vid: the vlan id to set as a PVID 2003 **/ 2004 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2005 { 2006 struct i40e_vsi_context ctxt; 2007 i40e_status aq_ret; 2008 2009 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2010 vsi->info.pvid = cpu_to_le16(vid); 2011 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID; 2012 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED; 2013 2014 ctxt.seid = vsi->seid; 2015 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 2016 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2017 if (aq_ret) { 2018 dev_info(&vsi->back->pdev->dev, 2019 "%s: update vsi failed, aq_err=%d\n", 2020 __func__, vsi->back->hw.aq.asq_last_status); 2021 return -ENOENT; 2022 } 2023 2024 return 0; 2025 } 2026 2027 /** 2028 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2029 * @vsi: the vsi being adjusted 2030 * 2031 * Just use the vlan_rx_register() service to put it back to normal 2032 **/ 2033 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2034 { 2035 vsi->info.pvid = 0; 2036 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2037 } 2038 2039 /** 2040 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2041 * @vsi: ptr to the VSI 2042 * 2043 * If this function returns with an error, then it's possible one or 2044 * more of the rings is populated (while the rest are not). It is the 2045 * callers duty to clean those orphaned rings. 2046 * 2047 * Return 0 on success, negative on failure 2048 **/ 2049 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2050 { 2051 int i, err = 0; 2052 2053 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2054 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2055 2056 return err; 2057 } 2058 2059 /** 2060 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2061 * @vsi: ptr to the VSI 2062 * 2063 * Free VSI's transmit software resources 2064 **/ 2065 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2066 { 2067 int i; 2068 2069 for (i = 0; i < vsi->num_queue_pairs; i++) 2070 if (vsi->tx_rings[i]->desc) 2071 i40e_free_tx_resources(vsi->tx_rings[i]); 2072 } 2073 2074 /** 2075 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 2076 * @vsi: ptr to the VSI 2077 * 2078 * If this function returns with an error, then it's possible one or 2079 * more of the rings is populated (while the rest are not). It is the 2080 * callers duty to clean those orphaned rings. 2081 * 2082 * Return 0 on success, negative on failure 2083 **/ 2084 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 2085 { 2086 int i, err = 0; 2087 2088 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2089 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2090 return err; 2091 } 2092 2093 /** 2094 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 2095 * @vsi: ptr to the VSI 2096 * 2097 * Free all receive software resources 2098 **/ 2099 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 2100 { 2101 int i; 2102 2103 for (i = 0; i < vsi->num_queue_pairs; i++) 2104 if (vsi->rx_rings[i]->desc) 2105 i40e_free_rx_resources(vsi->rx_rings[i]); 2106 } 2107 2108 /** 2109 * i40e_configure_tx_ring - Configure a transmit ring context and rest 2110 * @ring: The Tx ring to configure 2111 * 2112 * Configure the Tx descriptor ring in the HMC context. 2113 **/ 2114 static int i40e_configure_tx_ring(struct i40e_ring *ring) 2115 { 2116 struct i40e_vsi *vsi = ring->vsi; 2117 u16 pf_q = vsi->base_queue + ring->queue_index; 2118 struct i40e_hw *hw = &vsi->back->hw; 2119 struct i40e_hmc_obj_txq tx_ctx; 2120 i40e_status err = 0; 2121 u32 qtx_ctl = 0; 2122 2123 /* some ATR related tx ring init */ 2124 if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) { 2125 ring->atr_sample_rate = vsi->back->atr_sample_rate; 2126 ring->atr_count = 0; 2127 } else { 2128 ring->atr_sample_rate = 0; 2129 } 2130 2131 /* initialize XPS */ 2132 if (ring->q_vector && ring->netdev && 2133 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) 2134 netif_set_xps_queue(ring->netdev, 2135 &ring->q_vector->affinity_mask, 2136 ring->queue_index); 2137 2138 /* clear the context structure first */ 2139 memset(&tx_ctx, 0, sizeof(tx_ctx)); 2140 2141 tx_ctx.new_context = 1; 2142 tx_ctx.base = (ring->dma / 128); 2143 tx_ctx.qlen = ring->count; 2144 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED | 2145 I40E_FLAG_FDIR_ATR_ENABLED)); 2146 2147 /* As part of VSI creation/update, FW allocates certain 2148 * Tx arbitration queue sets for each TC enabled for 2149 * the VSI. The FW returns the handles to these queue 2150 * sets as part of the response buffer to Add VSI, 2151 * Update VSI, etc. AQ commands. It is expected that 2152 * these queue set handles be associated with the Tx 2153 * queues by the driver as part of the TX queue context 2154 * initialization. This has to be done regardless of 2155 * DCB as by default everything is mapped to TC0. 2156 */ 2157 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 2158 tx_ctx.rdylist_act = 0; 2159 2160 /* clear the context in the HMC */ 2161 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 2162 if (err) { 2163 dev_info(&vsi->back->pdev->dev, 2164 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 2165 ring->queue_index, pf_q, err); 2166 return -ENOMEM; 2167 } 2168 2169 /* set the context in the HMC */ 2170 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 2171 if (err) { 2172 dev_info(&vsi->back->pdev->dev, 2173 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 2174 ring->queue_index, pf_q, err); 2175 return -ENOMEM; 2176 } 2177 2178 /* Now associate this queue with this PCI function */ 2179 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2180 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2181 I40E_QTX_CTL_PF_INDX_MASK); 2182 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2183 i40e_flush(hw); 2184 2185 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 2186 2187 /* cache tail off for easier writes later */ 2188 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 2189 2190 return 0; 2191 } 2192 2193 /** 2194 * i40e_configure_rx_ring - Configure a receive ring context 2195 * @ring: The Rx ring to configure 2196 * 2197 * Configure the Rx descriptor ring in the HMC context. 2198 **/ 2199 static int i40e_configure_rx_ring(struct i40e_ring *ring) 2200 { 2201 struct i40e_vsi *vsi = ring->vsi; 2202 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 2203 u16 pf_q = vsi->base_queue + ring->queue_index; 2204 struct i40e_hw *hw = &vsi->back->hw; 2205 struct i40e_hmc_obj_rxq rx_ctx; 2206 i40e_status err = 0; 2207 2208 ring->state = 0; 2209 2210 /* clear the context structure first */ 2211 memset(&rx_ctx, 0, sizeof(rx_ctx)); 2212 2213 ring->rx_buf_len = vsi->rx_buf_len; 2214 ring->rx_hdr_len = vsi->rx_hdr_len; 2215 2216 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; 2217 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; 2218 2219 rx_ctx.base = (ring->dma / 128); 2220 rx_ctx.qlen = ring->count; 2221 2222 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { 2223 set_ring_16byte_desc_enabled(ring); 2224 rx_ctx.dsize = 0; 2225 } else { 2226 rx_ctx.dsize = 1; 2227 } 2228 2229 rx_ctx.dtype = vsi->dtype; 2230 if (vsi->dtype) { 2231 set_ring_ps_enabled(ring); 2232 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 2233 I40E_RX_SPLIT_IP | 2234 I40E_RX_SPLIT_TCP_UDP | 2235 I40E_RX_SPLIT_SCTP; 2236 } else { 2237 rx_ctx.hsplit_0 = 0; 2238 } 2239 2240 rx_ctx.rxmax = min_t(u16, vsi->max_frame, 2241 (chain_len * ring->rx_buf_len)); 2242 rx_ctx.tphrdesc_ena = 1; 2243 rx_ctx.tphwdesc_ena = 1; 2244 rx_ctx.tphdata_ena = 1; 2245 rx_ctx.tphhead_ena = 1; 2246 rx_ctx.lrxqthresh = 2; 2247 rx_ctx.crcstrip = 1; 2248 rx_ctx.l2tsel = 1; 2249 rx_ctx.showiv = 1; 2250 2251 /* clear the context in the HMC */ 2252 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2253 if (err) { 2254 dev_info(&vsi->back->pdev->dev, 2255 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2256 ring->queue_index, pf_q, err); 2257 return -ENOMEM; 2258 } 2259 2260 /* set the context in the HMC */ 2261 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 2262 if (err) { 2263 dev_info(&vsi->back->pdev->dev, 2264 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2265 ring->queue_index, pf_q, err); 2266 return -ENOMEM; 2267 } 2268 2269 /* cache tail for quicker writes, and clear the reg before use */ 2270 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 2271 writel(0, ring->tail); 2272 2273 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 2274 2275 return 0; 2276 } 2277 2278 /** 2279 * i40e_vsi_configure_tx - Configure the VSI for Tx 2280 * @vsi: VSI structure describing this set of rings and resources 2281 * 2282 * Configure the Tx VSI for operation. 2283 **/ 2284 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 2285 { 2286 int err = 0; 2287 u16 i; 2288 2289 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 2290 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 2291 2292 return err; 2293 } 2294 2295 /** 2296 * i40e_vsi_configure_rx - Configure the VSI for Rx 2297 * @vsi: the VSI being configured 2298 * 2299 * Configure the Rx VSI for operation. 2300 **/ 2301 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 2302 { 2303 int err = 0; 2304 u16 i; 2305 2306 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) 2307 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN 2308 + ETH_FCS_LEN + VLAN_HLEN; 2309 else 2310 vsi->max_frame = I40E_RXBUFFER_2048; 2311 2312 /* figure out correct receive buffer length */ 2313 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | 2314 I40E_FLAG_RX_PS_ENABLED)) { 2315 case I40E_FLAG_RX_1BUF_ENABLED: 2316 vsi->rx_hdr_len = 0; 2317 vsi->rx_buf_len = vsi->max_frame; 2318 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2319 break; 2320 case I40E_FLAG_RX_PS_ENABLED: 2321 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2322 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2323 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; 2324 break; 2325 default: 2326 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2327 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2328 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; 2329 break; 2330 } 2331 2332 /* round up for the chip's needs */ 2333 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, 2334 (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); 2335 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, 2336 (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); 2337 2338 /* set up individual rings */ 2339 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2340 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 2341 2342 return err; 2343 } 2344 2345 /** 2346 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 2347 * @vsi: ptr to the VSI 2348 **/ 2349 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 2350 { 2351 u16 qoffset, qcount; 2352 int i, n; 2353 2354 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2355 return; 2356 2357 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2358 if (!(vsi->tc_config.enabled_tc & (1 << n))) 2359 continue; 2360 2361 qoffset = vsi->tc_config.tc_info[n].qoffset; 2362 qcount = vsi->tc_config.tc_info[n].qcount; 2363 for (i = qoffset; i < (qoffset + qcount); i++) { 2364 struct i40e_ring *rx_ring = vsi->rx_rings[i]; 2365 struct i40e_ring *tx_ring = vsi->tx_rings[i]; 2366 rx_ring->dcb_tc = n; 2367 tx_ring->dcb_tc = n; 2368 } 2369 } 2370 } 2371 2372 /** 2373 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 2374 * @vsi: ptr to the VSI 2375 **/ 2376 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 2377 { 2378 if (vsi->netdev) 2379 i40e_set_rx_mode(vsi->netdev); 2380 } 2381 2382 /** 2383 * i40e_vsi_configure - Set up the VSI for action 2384 * @vsi: the VSI being configured 2385 **/ 2386 static int i40e_vsi_configure(struct i40e_vsi *vsi) 2387 { 2388 int err; 2389 2390 i40e_set_vsi_rx_mode(vsi); 2391 i40e_restore_vlan(vsi); 2392 i40e_vsi_config_dcb_rings(vsi); 2393 err = i40e_vsi_configure_tx(vsi); 2394 if (!err) 2395 err = i40e_vsi_configure_rx(vsi); 2396 2397 return err; 2398 } 2399 2400 /** 2401 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 2402 * @vsi: the VSI being configured 2403 **/ 2404 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 2405 { 2406 struct i40e_pf *pf = vsi->back; 2407 struct i40e_q_vector *q_vector; 2408 struct i40e_hw *hw = &pf->hw; 2409 u16 vector; 2410 int i, q; 2411 u32 val; 2412 u32 qp; 2413 2414 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 2415 * and PFINT_LNKLSTn registers, e.g.: 2416 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 2417 */ 2418 qp = vsi->base_queue; 2419 vector = vsi->base_vector; 2420 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 2421 q_vector = vsi->q_vectors[i]; 2422 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2423 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2424 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 2425 q_vector->rx.itr); 2426 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2427 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2428 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 2429 q_vector->tx.itr); 2430 2431 /* Linked list for the queuepairs assigned to this vector */ 2432 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 2433 for (q = 0; q < q_vector->num_ringpairs; q++) { 2434 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2435 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2436 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 2437 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 2438 (I40E_QUEUE_TYPE_TX 2439 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 2440 2441 wr32(hw, I40E_QINT_RQCTL(qp), val); 2442 2443 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2444 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2445 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 2446 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| 2447 (I40E_QUEUE_TYPE_RX 2448 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2449 2450 /* Terminate the linked list */ 2451 if (q == (q_vector->num_ringpairs - 1)) 2452 val |= (I40E_QUEUE_END_OF_LIST 2453 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2454 2455 wr32(hw, I40E_QINT_TQCTL(qp), val); 2456 qp++; 2457 } 2458 } 2459 2460 i40e_flush(hw); 2461 } 2462 2463 /** 2464 * i40e_enable_misc_int_causes - enable the non-queue interrupts 2465 * @hw: ptr to the hardware info 2466 **/ 2467 static void i40e_enable_misc_int_causes(struct i40e_hw *hw) 2468 { 2469 u32 val; 2470 2471 /* clear things first */ 2472 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 2473 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 2474 2475 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 2476 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 2477 I40E_PFINT_ICR0_ENA_GRST_MASK | 2478 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 2479 I40E_PFINT_ICR0_ENA_GPIO_MASK | 2480 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | 2481 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 2482 I40E_PFINT_ICR0_ENA_VFLR_MASK | 2483 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2484 2485 wr32(hw, I40E_PFINT_ICR0_ENA, val); 2486 2487 /* SW_ITR_IDX = 0, but don't change INTENA */ 2488 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK | 2489 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK); 2490 2491 /* OTHER_ITR_IDX = 0 */ 2492 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 2493 } 2494 2495 /** 2496 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 2497 * @vsi: the VSI being configured 2498 **/ 2499 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 2500 { 2501 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 2502 struct i40e_pf *pf = vsi->back; 2503 struct i40e_hw *hw = &pf->hw; 2504 u32 val; 2505 2506 /* set the ITR configuration */ 2507 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2508 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2509 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); 2510 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2511 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2512 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 2513 2514 i40e_enable_misc_int_causes(hw); 2515 2516 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 2517 wr32(hw, I40E_PFINT_LNKLST0, 0); 2518 2519 /* Associate the queue pair to the vector and enable the q int */ 2520 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2521 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2522 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2523 2524 wr32(hw, I40E_QINT_RQCTL(0), val); 2525 2526 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2527 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2528 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2529 2530 wr32(hw, I40E_QINT_TQCTL(0), val); 2531 i40e_flush(hw); 2532 } 2533 2534 /** 2535 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 2536 * @pf: board private structure 2537 **/ 2538 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 2539 { 2540 struct i40e_hw *hw = &pf->hw; 2541 u32 val; 2542 2543 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 2544 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 2545 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 2546 2547 wr32(hw, I40E_PFINT_DYN_CTL0, val); 2548 i40e_flush(hw); 2549 } 2550 2551 /** 2552 * i40e_irq_dynamic_enable - Enable default interrupt generation settings 2553 * @vsi: pointer to a vsi 2554 * @vector: enable a particular Hw Interrupt vector 2555 **/ 2556 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) 2557 { 2558 struct i40e_pf *pf = vsi->back; 2559 struct i40e_hw *hw = &pf->hw; 2560 u32 val; 2561 2562 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 2563 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 2564 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2565 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 2566 /* skip the flush */ 2567 } 2568 2569 /** 2570 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 2571 * @irq: interrupt number 2572 * @data: pointer to a q_vector 2573 **/ 2574 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 2575 { 2576 struct i40e_q_vector *q_vector = data; 2577 2578 if (!q_vector->tx.ring && !q_vector->rx.ring) 2579 return IRQ_HANDLED; 2580 2581 napi_schedule(&q_vector->napi); 2582 2583 return IRQ_HANDLED; 2584 } 2585 2586 /** 2587 * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings 2588 * @irq: interrupt number 2589 * @data: pointer to a q_vector 2590 **/ 2591 static irqreturn_t i40e_fdir_clean_rings(int irq, void *data) 2592 { 2593 struct i40e_q_vector *q_vector = data; 2594 2595 if (!q_vector->tx.ring && !q_vector->rx.ring) 2596 return IRQ_HANDLED; 2597 2598 pr_info("fdir ring cleaning needed\n"); 2599 2600 return IRQ_HANDLED; 2601 } 2602 2603 /** 2604 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 2605 * @vsi: the VSI being configured 2606 * @basename: name for the vector 2607 * 2608 * Allocates MSI-X vectors and requests interrupts from the kernel. 2609 **/ 2610 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 2611 { 2612 int q_vectors = vsi->num_q_vectors; 2613 struct i40e_pf *pf = vsi->back; 2614 int base = vsi->base_vector; 2615 int rx_int_idx = 0; 2616 int tx_int_idx = 0; 2617 int vector, err; 2618 2619 for (vector = 0; vector < q_vectors; vector++) { 2620 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 2621 2622 if (q_vector->tx.ring && q_vector->rx.ring) { 2623 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2624 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2625 tx_int_idx++; 2626 } else if (q_vector->rx.ring) { 2627 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2628 "%s-%s-%d", basename, "rx", rx_int_idx++); 2629 } else if (q_vector->tx.ring) { 2630 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2631 "%s-%s-%d", basename, "tx", tx_int_idx++); 2632 } else { 2633 /* skip this unused q_vector */ 2634 continue; 2635 } 2636 err = request_irq(pf->msix_entries[base + vector].vector, 2637 vsi->irq_handler, 2638 0, 2639 q_vector->name, 2640 q_vector); 2641 if (err) { 2642 dev_info(&pf->pdev->dev, 2643 "%s: request_irq failed, error: %d\n", 2644 __func__, err); 2645 goto free_queue_irqs; 2646 } 2647 /* assign the mask for this irq */ 2648 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 2649 &q_vector->affinity_mask); 2650 } 2651 2652 return 0; 2653 2654 free_queue_irqs: 2655 while (vector) { 2656 vector--; 2657 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 2658 NULL); 2659 free_irq(pf->msix_entries[base + vector].vector, 2660 &(vsi->q_vectors[vector])); 2661 } 2662 return err; 2663 } 2664 2665 /** 2666 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 2667 * @vsi: the VSI being un-configured 2668 **/ 2669 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 2670 { 2671 struct i40e_pf *pf = vsi->back; 2672 struct i40e_hw *hw = &pf->hw; 2673 int base = vsi->base_vector; 2674 int i; 2675 2676 for (i = 0; i < vsi->num_queue_pairs; i++) { 2677 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); 2678 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); 2679 } 2680 2681 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 2682 for (i = vsi->base_vector; 2683 i < (vsi->num_q_vectors + vsi->base_vector); i++) 2684 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 2685 2686 i40e_flush(hw); 2687 for (i = 0; i < vsi->num_q_vectors; i++) 2688 synchronize_irq(pf->msix_entries[i + base].vector); 2689 } else { 2690 /* Legacy and MSI mode - this stops all interrupt handling */ 2691 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 2692 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 2693 i40e_flush(hw); 2694 synchronize_irq(pf->pdev->irq); 2695 } 2696 } 2697 2698 /** 2699 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 2700 * @vsi: the VSI being configured 2701 **/ 2702 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 2703 { 2704 struct i40e_pf *pf = vsi->back; 2705 int i; 2706 2707 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 2708 for (i = vsi->base_vector; 2709 i < (vsi->num_q_vectors + vsi->base_vector); i++) 2710 i40e_irq_dynamic_enable(vsi, i); 2711 } else { 2712 i40e_irq_dynamic_enable_icr0(pf); 2713 } 2714 2715 i40e_flush(&pf->hw); 2716 return 0; 2717 } 2718 2719 /** 2720 * i40e_stop_misc_vector - Stop the vector that handles non-queue events 2721 * @pf: board private structure 2722 **/ 2723 static void i40e_stop_misc_vector(struct i40e_pf *pf) 2724 { 2725 /* Disable ICR 0 */ 2726 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 2727 i40e_flush(&pf->hw); 2728 } 2729 2730 /** 2731 * i40e_intr - MSI/Legacy and non-queue interrupt handler 2732 * @irq: interrupt number 2733 * @data: pointer to a q_vector 2734 * 2735 * This is the handler used for all MSI/Legacy interrupts, and deals 2736 * with both queue and non-queue interrupts. This is also used in 2737 * MSIX mode to handle the non-queue interrupts. 2738 **/ 2739 static irqreturn_t i40e_intr(int irq, void *data) 2740 { 2741 struct i40e_pf *pf = (struct i40e_pf *)data; 2742 struct i40e_hw *hw = &pf->hw; 2743 u32 icr0, icr0_remaining; 2744 u32 val, ena_mask; 2745 2746 icr0 = rd32(hw, I40E_PFINT_ICR0); 2747 2748 val = rd32(hw, I40E_PFINT_DYN_CTL0); 2749 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; 2750 wr32(hw, I40E_PFINT_DYN_CTL0, val); 2751 2752 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 2753 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 2754 return IRQ_NONE; 2755 2756 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 2757 2758 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 2759 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 2760 2761 /* temporarily disable queue cause for NAPI processing */ 2762 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); 2763 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; 2764 wr32(hw, I40E_QINT_RQCTL(0), qval); 2765 2766 qval = rd32(hw, I40E_QINT_TQCTL(0)); 2767 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 2768 wr32(hw, I40E_QINT_TQCTL(0), qval); 2769 2770 if (!test_bit(__I40E_DOWN, &pf->state)) 2771 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); 2772 } 2773 2774 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 2775 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2776 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 2777 } 2778 2779 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 2780 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 2781 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 2782 } 2783 2784 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 2785 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 2786 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 2787 } 2788 2789 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 2790 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 2791 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 2792 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 2793 val = rd32(hw, I40E_GLGEN_RSTAT); 2794 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 2795 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 2796 if (val & I40E_RESET_CORER) 2797 pf->corer_count++; 2798 else if (val & I40E_RESET_GLOBR) 2799 pf->globr_count++; 2800 else if (val & I40E_RESET_EMPR) 2801 pf->empr_count++; 2802 } 2803 2804 /* If a critical error is pending we have no choice but to reset the 2805 * device. 2806 * Report and mask out any remaining unexpected interrupts. 2807 */ 2808 icr0_remaining = icr0 & ena_mask; 2809 if (icr0_remaining) { 2810 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 2811 icr0_remaining); 2812 if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) || 2813 (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 2814 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 2815 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) || 2816 (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) { 2817 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 2818 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 2819 } else { 2820 dev_info(&pf->pdev->dev, "device will be reset\n"); 2821 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 2822 i40e_service_event_schedule(pf); 2823 } 2824 } 2825 ena_mask &= ~icr0_remaining; 2826 } 2827 2828 /* re-enable interrupt causes */ 2829 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 2830 if (!test_bit(__I40E_DOWN, &pf->state)) { 2831 i40e_service_event_schedule(pf); 2832 i40e_irq_dynamic_enable_icr0(pf); 2833 } 2834 2835 return IRQ_HANDLED; 2836 } 2837 2838 /** 2839 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 2840 * @vsi: the VSI being configured 2841 * @v_idx: vector index 2842 * @qp_idx: queue pair index 2843 **/ 2844 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 2845 { 2846 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 2847 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 2848 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 2849 2850 tx_ring->q_vector = q_vector; 2851 tx_ring->next = q_vector->tx.ring; 2852 q_vector->tx.ring = tx_ring; 2853 q_vector->tx.count++; 2854 2855 rx_ring->q_vector = q_vector; 2856 rx_ring->next = q_vector->rx.ring; 2857 q_vector->rx.ring = rx_ring; 2858 q_vector->rx.count++; 2859 } 2860 2861 /** 2862 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 2863 * @vsi: the VSI being configured 2864 * 2865 * This function maps descriptor rings to the queue-specific vectors 2866 * we were allotted through the MSI-X enabling code. Ideally, we'd have 2867 * one vector per queue pair, but on a constrained vector budget, we 2868 * group the queue pairs as "efficiently" as possible. 2869 **/ 2870 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 2871 { 2872 int qp_remaining = vsi->num_queue_pairs; 2873 int q_vectors = vsi->num_q_vectors; 2874 int num_ringpairs; 2875 int v_start = 0; 2876 int qp_idx = 0; 2877 2878 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 2879 * group them so there are multiple queues per vector. 2880 */ 2881 for (; v_start < q_vectors && qp_remaining; v_start++) { 2882 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 2883 2884 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 2885 2886 q_vector->num_ringpairs = num_ringpairs; 2887 2888 q_vector->rx.count = 0; 2889 q_vector->tx.count = 0; 2890 q_vector->rx.ring = NULL; 2891 q_vector->tx.ring = NULL; 2892 2893 while (num_ringpairs--) { 2894 map_vector_to_qp(vsi, v_start, qp_idx); 2895 qp_idx++; 2896 qp_remaining--; 2897 } 2898 } 2899 } 2900 2901 /** 2902 * i40e_vsi_request_irq - Request IRQ from the OS 2903 * @vsi: the VSI being configured 2904 * @basename: name for the vector 2905 **/ 2906 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 2907 { 2908 struct i40e_pf *pf = vsi->back; 2909 int err; 2910 2911 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 2912 err = i40e_vsi_request_irq_msix(vsi, basename); 2913 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 2914 err = request_irq(pf->pdev->irq, i40e_intr, 0, 2915 pf->misc_int_name, pf); 2916 else 2917 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 2918 pf->misc_int_name, pf); 2919 2920 if (err) 2921 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 2922 2923 return err; 2924 } 2925 2926 #ifdef CONFIG_NET_POLL_CONTROLLER 2927 /** 2928 * i40e_netpoll - A Polling 'interrupt'handler 2929 * @netdev: network interface device structure 2930 * 2931 * This is used by netconsole to send skbs without having to re-enable 2932 * interrupts. It's not called while the normal interrupt routine is executing. 2933 **/ 2934 static void i40e_netpoll(struct net_device *netdev) 2935 { 2936 struct i40e_netdev_priv *np = netdev_priv(netdev); 2937 struct i40e_vsi *vsi = np->vsi; 2938 struct i40e_pf *pf = vsi->back; 2939 int i; 2940 2941 /* if interface is down do nothing */ 2942 if (test_bit(__I40E_DOWN, &vsi->state)) 2943 return; 2944 2945 pf->flags |= I40E_FLAG_IN_NETPOLL; 2946 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 2947 for (i = 0; i < vsi->num_q_vectors; i++) 2948 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 2949 } else { 2950 i40e_intr(pf->pdev->irq, netdev); 2951 } 2952 pf->flags &= ~I40E_FLAG_IN_NETPOLL; 2953 } 2954 #endif 2955 2956 /** 2957 * i40e_vsi_control_tx - Start or stop a VSI's rings 2958 * @vsi: the VSI being configured 2959 * @enable: start or stop the rings 2960 **/ 2961 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 2962 { 2963 struct i40e_pf *pf = vsi->back; 2964 struct i40e_hw *hw = &pf->hw; 2965 int i, j, pf_q; 2966 u32 tx_reg; 2967 2968 pf_q = vsi->base_queue; 2969 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 2970 j = 1000; 2971 do { 2972 usleep_range(1000, 2000); 2973 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 2974 } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) 2975 ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1); 2976 2977 if (enable) { 2978 /* is STAT set ? */ 2979 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) { 2980 dev_info(&pf->pdev->dev, 2981 "Tx %d already enabled\n", i); 2982 continue; 2983 } 2984 } else { 2985 /* is !STAT set ? */ 2986 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) { 2987 dev_info(&pf->pdev->dev, 2988 "Tx %d already disabled\n", i); 2989 continue; 2990 } 2991 } 2992 2993 /* turn on/off the queue */ 2994 if (enable) 2995 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK | 2996 I40E_QTX_ENA_QENA_STAT_MASK; 2997 else 2998 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 2999 3000 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 3001 3002 /* wait for the change to finish */ 3003 for (j = 0; j < 10; j++) { 3004 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3005 if (enable) { 3006 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3007 break; 3008 } else { 3009 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3010 break; 3011 } 3012 3013 udelay(10); 3014 } 3015 if (j >= 10) { 3016 dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n", 3017 pf_q, (enable ? "en" : "dis")); 3018 return -ETIMEDOUT; 3019 } 3020 } 3021 3022 return 0; 3023 } 3024 3025 /** 3026 * i40e_vsi_control_rx - Start or stop a VSI's rings 3027 * @vsi: the VSI being configured 3028 * @enable: start or stop the rings 3029 **/ 3030 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 3031 { 3032 struct i40e_pf *pf = vsi->back; 3033 struct i40e_hw *hw = &pf->hw; 3034 int i, j, pf_q; 3035 u32 rx_reg; 3036 3037 pf_q = vsi->base_queue; 3038 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3039 j = 1000; 3040 do { 3041 usleep_range(1000, 2000); 3042 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3043 } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) 3044 ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1); 3045 3046 if (enable) { 3047 /* is STAT set ? */ 3048 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3049 continue; 3050 } else { 3051 /* is !STAT set ? */ 3052 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3053 continue; 3054 } 3055 3056 /* turn on/off the queue */ 3057 if (enable) 3058 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK | 3059 I40E_QRX_ENA_QENA_STAT_MASK; 3060 else 3061 rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK | 3062 I40E_QRX_ENA_QENA_STAT_MASK); 3063 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3064 3065 /* wait for the change to finish */ 3066 for (j = 0; j < 10; j++) { 3067 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3068 3069 if (enable) { 3070 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3071 break; 3072 } else { 3073 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3074 break; 3075 } 3076 3077 udelay(10); 3078 } 3079 if (j >= 10) { 3080 dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n", 3081 pf_q, (enable ? "en" : "dis")); 3082 return -ETIMEDOUT; 3083 } 3084 } 3085 3086 return 0; 3087 } 3088 3089 /** 3090 * i40e_vsi_control_rings - Start or stop a VSI's rings 3091 * @vsi: the VSI being configured 3092 * @enable: start or stop the rings 3093 **/ 3094 static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) 3095 { 3096 int ret; 3097 3098 /* do rx first for enable and last for disable */ 3099 if (request) { 3100 ret = i40e_vsi_control_rx(vsi, request); 3101 if (ret) 3102 return ret; 3103 ret = i40e_vsi_control_tx(vsi, request); 3104 } else { 3105 ret = i40e_vsi_control_tx(vsi, request); 3106 if (ret) 3107 return ret; 3108 ret = i40e_vsi_control_rx(vsi, request); 3109 } 3110 3111 return ret; 3112 } 3113 3114 /** 3115 * i40e_vsi_free_irq - Free the irq association with the OS 3116 * @vsi: the VSI being configured 3117 **/ 3118 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 3119 { 3120 struct i40e_pf *pf = vsi->back; 3121 struct i40e_hw *hw = &pf->hw; 3122 int base = vsi->base_vector; 3123 u32 val, qp; 3124 int i; 3125 3126 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3127 if (!vsi->q_vectors) 3128 return; 3129 3130 for (i = 0; i < vsi->num_q_vectors; i++) { 3131 u16 vector = i + base; 3132 3133 /* free only the irqs that were actually requested */ 3134 if (vsi->q_vectors[i]->num_ringpairs == 0) 3135 continue; 3136 3137 /* clear the affinity_mask in the IRQ descriptor */ 3138 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3139 NULL); 3140 free_irq(pf->msix_entries[vector].vector, 3141 vsi->q_vectors[i]); 3142 3143 /* Tear down the interrupt queue link list 3144 * 3145 * We know that they come in pairs and always 3146 * the Rx first, then the Tx. To clear the 3147 * link list, stick the EOL value into the 3148 * next_q field of the registers. 3149 */ 3150 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 3151 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3152 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3153 val |= I40E_QUEUE_END_OF_LIST 3154 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3155 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 3156 3157 while (qp != I40E_QUEUE_END_OF_LIST) { 3158 u32 next; 3159 3160 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3161 3162 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3163 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3164 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3165 I40E_QINT_RQCTL_INTEVENT_MASK); 3166 3167 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3168 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3169 3170 wr32(hw, I40E_QINT_RQCTL(qp), val); 3171 3172 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3173 3174 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 3175 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 3176 3177 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3178 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3179 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3180 I40E_QINT_TQCTL_INTEVENT_MASK); 3181 3182 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3183 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3184 3185 wr32(hw, I40E_QINT_TQCTL(qp), val); 3186 qp = next; 3187 } 3188 } 3189 } else { 3190 free_irq(pf->pdev->irq, pf); 3191 3192 val = rd32(hw, I40E_PFINT_LNKLST0); 3193 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3194 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3195 val |= I40E_QUEUE_END_OF_LIST 3196 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 3197 wr32(hw, I40E_PFINT_LNKLST0, val); 3198 3199 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3200 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3201 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3202 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3203 I40E_QINT_RQCTL_INTEVENT_MASK); 3204 3205 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3206 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3207 3208 wr32(hw, I40E_QINT_RQCTL(qp), val); 3209 3210 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3211 3212 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3213 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3214 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3215 I40E_QINT_TQCTL_INTEVENT_MASK); 3216 3217 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3218 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3219 3220 wr32(hw, I40E_QINT_TQCTL(qp), val); 3221 } 3222 } 3223 3224 /** 3225 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 3226 * @vsi: the VSI being configured 3227 * @v_idx: Index of vector to be freed 3228 * 3229 * This function frees the memory allocated to the q_vector. In addition if 3230 * NAPI is enabled it will delete any references to the NAPI struct prior 3231 * to freeing the q_vector. 3232 **/ 3233 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 3234 { 3235 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3236 struct i40e_ring *ring; 3237 3238 if (!q_vector) 3239 return; 3240 3241 /* disassociate q_vector from rings */ 3242 i40e_for_each_ring(ring, q_vector->tx) 3243 ring->q_vector = NULL; 3244 3245 i40e_for_each_ring(ring, q_vector->rx) 3246 ring->q_vector = NULL; 3247 3248 /* only VSI w/ an associated netdev is set up w/ NAPI */ 3249 if (vsi->netdev) 3250 netif_napi_del(&q_vector->napi); 3251 3252 vsi->q_vectors[v_idx] = NULL; 3253 3254 kfree_rcu(q_vector, rcu); 3255 } 3256 3257 /** 3258 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 3259 * @vsi: the VSI being un-configured 3260 * 3261 * This frees the memory allocated to the q_vectors and 3262 * deletes references to the NAPI struct. 3263 **/ 3264 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 3265 { 3266 int v_idx; 3267 3268 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 3269 i40e_free_q_vector(vsi, v_idx); 3270 } 3271 3272 /** 3273 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 3274 * @pf: board private structure 3275 **/ 3276 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 3277 { 3278 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 3279 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3280 pci_disable_msix(pf->pdev); 3281 kfree(pf->msix_entries); 3282 pf->msix_entries = NULL; 3283 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 3284 pci_disable_msi(pf->pdev); 3285 } 3286 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 3287 } 3288 3289 /** 3290 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 3291 * @pf: board private structure 3292 * 3293 * We go through and clear interrupt specific resources and reset the structure 3294 * to pre-load conditions 3295 **/ 3296 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 3297 { 3298 int i; 3299 3300 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3301 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 3302 if (pf->vsi[i]) 3303 i40e_vsi_free_q_vectors(pf->vsi[i]); 3304 i40e_reset_interrupt_capability(pf); 3305 } 3306 3307 /** 3308 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 3309 * @vsi: the VSI being configured 3310 **/ 3311 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 3312 { 3313 int q_idx; 3314 3315 if (!vsi->netdev) 3316 return; 3317 3318 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3319 napi_enable(&vsi->q_vectors[q_idx]->napi); 3320 } 3321 3322 /** 3323 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 3324 * @vsi: the VSI being configured 3325 **/ 3326 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 3327 { 3328 int q_idx; 3329 3330 if (!vsi->netdev) 3331 return; 3332 3333 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3334 napi_disable(&vsi->q_vectors[q_idx]->napi); 3335 } 3336 3337 /** 3338 * i40e_quiesce_vsi - Pause a given VSI 3339 * @vsi: the VSI being paused 3340 **/ 3341 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 3342 { 3343 if (test_bit(__I40E_DOWN, &vsi->state)) 3344 return; 3345 3346 set_bit(__I40E_NEEDS_RESTART, &vsi->state); 3347 if (vsi->netdev && netif_running(vsi->netdev)) { 3348 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3349 } else { 3350 set_bit(__I40E_DOWN, &vsi->state); 3351 i40e_down(vsi); 3352 } 3353 } 3354 3355 /** 3356 * i40e_unquiesce_vsi - Resume a given VSI 3357 * @vsi: the VSI being resumed 3358 **/ 3359 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 3360 { 3361 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) 3362 return; 3363 3364 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 3365 if (vsi->netdev && netif_running(vsi->netdev)) 3366 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3367 else 3368 i40e_up(vsi); /* this clears the DOWN bit */ 3369 } 3370 3371 /** 3372 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 3373 * @pf: the PF 3374 **/ 3375 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 3376 { 3377 int v; 3378 3379 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3380 if (pf->vsi[v]) 3381 i40e_quiesce_vsi(pf->vsi[v]); 3382 } 3383 } 3384 3385 /** 3386 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 3387 * @pf: the PF 3388 **/ 3389 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 3390 { 3391 int v; 3392 3393 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3394 if (pf->vsi[v]) 3395 i40e_unquiesce_vsi(pf->vsi[v]); 3396 } 3397 } 3398 3399 /** 3400 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 3401 * @dcbcfg: the corresponding DCBx configuration structure 3402 * 3403 * Return the number of TCs from given DCBx configuration 3404 **/ 3405 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 3406 { 3407 u8 num_tc = 0; 3408 int i; 3409 3410 /* Scan the ETS Config Priority Table to find 3411 * traffic class enabled for a given priority 3412 * and use the traffic class index to get the 3413 * number of traffic classes enabled 3414 */ 3415 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 3416 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 3417 num_tc = dcbcfg->etscfg.prioritytable[i]; 3418 } 3419 3420 /* Traffic class index starts from zero so 3421 * increment to return the actual count 3422 */ 3423 return num_tc + 1; 3424 } 3425 3426 /** 3427 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 3428 * @dcbcfg: the corresponding DCBx configuration structure 3429 * 3430 * Query the current DCB configuration and return the number of 3431 * traffic classes enabled from the given DCBX config 3432 **/ 3433 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 3434 { 3435 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 3436 u8 enabled_tc = 1; 3437 u8 i; 3438 3439 for (i = 0; i < num_tc; i++) 3440 enabled_tc |= 1 << i; 3441 3442 return enabled_tc; 3443 } 3444 3445 /** 3446 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 3447 * @pf: PF being queried 3448 * 3449 * Return number of traffic classes enabled for the given PF 3450 **/ 3451 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 3452 { 3453 struct i40e_hw *hw = &pf->hw; 3454 u8 i, enabled_tc; 3455 u8 num_tc = 0; 3456 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 3457 3458 /* If DCB is not enabled then always in single TC */ 3459 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 3460 return 1; 3461 3462 /* MFP mode return count of enabled TCs for this PF */ 3463 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 3464 enabled_tc = pf->hw.func_caps.enabled_tcmap; 3465 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3466 if (enabled_tc & (1 << i)) 3467 num_tc++; 3468 } 3469 return num_tc; 3470 } 3471 3472 /* SFP mode will be enabled for all TCs on port */ 3473 return i40e_dcb_get_num_tc(dcbcfg); 3474 } 3475 3476 /** 3477 * i40e_pf_get_default_tc - Get bitmap for first enabled TC 3478 * @pf: PF being queried 3479 * 3480 * Return a bitmap for first enabled traffic class for this PF. 3481 **/ 3482 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) 3483 { 3484 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; 3485 u8 i = 0; 3486 3487 if (!enabled_tc) 3488 return 0x1; /* TC0 */ 3489 3490 /* Find the first enabled TC */ 3491 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3492 if (enabled_tc & (1 << i)) 3493 break; 3494 } 3495 3496 return 1 << i; 3497 } 3498 3499 /** 3500 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 3501 * @pf: PF being queried 3502 * 3503 * Return a bitmap for enabled traffic classes for this PF. 3504 **/ 3505 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 3506 { 3507 /* If DCB is not enabled for this PF then just return default TC */ 3508 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 3509 return i40e_pf_get_default_tc(pf); 3510 3511 /* MFP mode will have enabled TCs set by FW */ 3512 if (pf->flags & I40E_FLAG_MFP_ENABLED) 3513 return pf->hw.func_caps.enabled_tcmap; 3514 3515 /* SFP mode we want PF to be enabled for all TCs */ 3516 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 3517 } 3518 3519 /** 3520 * i40e_vsi_get_bw_info - Query VSI BW Information 3521 * @vsi: the VSI being queried 3522 * 3523 * Returns 0 on success, negative value on failure 3524 **/ 3525 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 3526 { 3527 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 3528 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 3529 struct i40e_pf *pf = vsi->back; 3530 struct i40e_hw *hw = &pf->hw; 3531 i40e_status aq_ret; 3532 u32 tc_bw_max; 3533 int i; 3534 3535 /* Get the VSI level BW configuration */ 3536 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 3537 if (aq_ret) { 3538 dev_info(&pf->pdev->dev, 3539 "couldn't get pf vsi bw config, err %d, aq_err %d\n", 3540 aq_ret, pf->hw.aq.asq_last_status); 3541 return -EINVAL; 3542 } 3543 3544 /* Get the VSI level BW configuration per TC */ 3545 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 3546 NULL); 3547 if (aq_ret) { 3548 dev_info(&pf->pdev->dev, 3549 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", 3550 aq_ret, pf->hw.aq.asq_last_status); 3551 return -EINVAL; 3552 } 3553 3554 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 3555 dev_info(&pf->pdev->dev, 3556 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 3557 bw_config.tc_valid_bits, 3558 bw_ets_config.tc_valid_bits); 3559 /* Still continuing */ 3560 } 3561 3562 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 3563 vsi->bw_max_quanta = bw_config.max_bw; 3564 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 3565 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 3566 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3567 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 3568 vsi->bw_ets_limit_credits[i] = 3569 le16_to_cpu(bw_ets_config.credits[i]); 3570 /* 3 bits out of 4 for each TC */ 3571 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 3572 } 3573 3574 return 0; 3575 } 3576 3577 /** 3578 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 3579 * @vsi: the VSI being configured 3580 * @enabled_tc: TC bitmap 3581 * @bw_credits: BW shared credits per TC 3582 * 3583 * Returns 0 on success, negative value on failure 3584 **/ 3585 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 3586 u8 *bw_share) 3587 { 3588 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 3589 i40e_status aq_ret; 3590 int i; 3591 3592 bw_data.tc_valid_bits = enabled_tc; 3593 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3594 bw_data.tc_bw_credits[i] = bw_share[i]; 3595 3596 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 3597 NULL); 3598 if (aq_ret) { 3599 dev_info(&vsi->back->pdev->dev, 3600 "%s: AQ command Config VSI BW allocation per TC failed = %d\n", 3601 __func__, vsi->back->hw.aq.asq_last_status); 3602 return -EINVAL; 3603 } 3604 3605 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3606 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 3607 3608 return 0; 3609 } 3610 3611 /** 3612 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 3613 * @vsi: the VSI being configured 3614 * @enabled_tc: TC map to be enabled 3615 * 3616 **/ 3617 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 3618 { 3619 struct net_device *netdev = vsi->netdev; 3620 struct i40e_pf *pf = vsi->back; 3621 struct i40e_hw *hw = &pf->hw; 3622 u8 netdev_tc = 0; 3623 int i; 3624 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 3625 3626 if (!netdev) 3627 return; 3628 3629 if (!enabled_tc) { 3630 netdev_reset_tc(netdev); 3631 return; 3632 } 3633 3634 /* Set up actual enabled TCs on the VSI */ 3635 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 3636 return; 3637 3638 /* set per TC queues for the VSI */ 3639 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3640 /* Only set TC queues for enabled tcs 3641 * 3642 * e.g. For a VSI that has TC0 and TC3 enabled the 3643 * enabled_tc bitmap would be 0x00001001; the driver 3644 * will set the numtc for netdev as 2 that will be 3645 * referenced by the netdev layer as TC 0 and 1. 3646 */ 3647 if (vsi->tc_config.enabled_tc & (1 << i)) 3648 netdev_set_tc_queue(netdev, 3649 vsi->tc_config.tc_info[i].netdev_tc, 3650 vsi->tc_config.tc_info[i].qcount, 3651 vsi->tc_config.tc_info[i].qoffset); 3652 } 3653 3654 /* Assign UP2TC map for the VSI */ 3655 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 3656 /* Get the actual TC# for the UP */ 3657 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 3658 /* Get the mapped netdev TC# for the UP */ 3659 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 3660 netdev_set_prio_tc_map(netdev, i, netdev_tc); 3661 } 3662 } 3663 3664 /** 3665 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 3666 * @vsi: the VSI being configured 3667 * @ctxt: the ctxt buffer returned from AQ VSI update param command 3668 **/ 3669 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 3670 struct i40e_vsi_context *ctxt) 3671 { 3672 /* copy just the sections touched not the entire info 3673 * since not all sections are valid as returned by 3674 * update vsi params 3675 */ 3676 vsi->info.mapping_flags = ctxt->info.mapping_flags; 3677 memcpy(&vsi->info.queue_mapping, 3678 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 3679 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 3680 sizeof(vsi->info.tc_mapping)); 3681 } 3682 3683 /** 3684 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 3685 * @vsi: VSI to be configured 3686 * @enabled_tc: TC bitmap 3687 * 3688 * This configures a particular VSI for TCs that are mapped to the 3689 * given TC bitmap. It uses default bandwidth share for TCs across 3690 * VSIs to configure TC for a particular VSI. 3691 * 3692 * NOTE: 3693 * It is expected that the VSI queues have been quisced before calling 3694 * this function. 3695 **/ 3696 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 3697 { 3698 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 3699 struct i40e_vsi_context ctxt; 3700 int ret = 0; 3701 int i; 3702 3703 /* Check if enabled_tc is same as existing or new TCs */ 3704 if (vsi->tc_config.enabled_tc == enabled_tc) 3705 return ret; 3706 3707 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 3708 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3709 if (enabled_tc & (1 << i)) 3710 bw_share[i] = 1; 3711 } 3712 3713 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 3714 if (ret) { 3715 dev_info(&vsi->back->pdev->dev, 3716 "Failed configuring TC map %d for VSI %d\n", 3717 enabled_tc, vsi->seid); 3718 goto out; 3719 } 3720 3721 /* Update Queue Pairs Mapping for currently enabled UPs */ 3722 ctxt.seid = vsi->seid; 3723 ctxt.pf_num = vsi->back->hw.pf_id; 3724 ctxt.vf_num = 0; 3725 ctxt.uplink_seid = vsi->uplink_seid; 3726 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 3727 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 3728 3729 /* Update the VSI after updating the VSI queue-mapping information */ 3730 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 3731 if (ret) { 3732 dev_info(&vsi->back->pdev->dev, 3733 "update vsi failed, aq_err=%d\n", 3734 vsi->back->hw.aq.asq_last_status); 3735 goto out; 3736 } 3737 /* update the local VSI info with updated queue map */ 3738 i40e_vsi_update_queue_map(vsi, &ctxt); 3739 vsi->info.valid_sections = 0; 3740 3741 /* Update current VSI BW information */ 3742 ret = i40e_vsi_get_bw_info(vsi); 3743 if (ret) { 3744 dev_info(&vsi->back->pdev->dev, 3745 "Failed updating vsi bw info, aq_err=%d\n", 3746 vsi->back->hw.aq.asq_last_status); 3747 goto out; 3748 } 3749 3750 /* Update the netdev TC setup */ 3751 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 3752 out: 3753 return ret; 3754 } 3755 3756 /** 3757 * i40e_up_complete - Finish the last steps of bringing up a connection 3758 * @vsi: the VSI being configured 3759 **/ 3760 static int i40e_up_complete(struct i40e_vsi *vsi) 3761 { 3762 struct i40e_pf *pf = vsi->back; 3763 int err; 3764 3765 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 3766 i40e_vsi_configure_msix(vsi); 3767 else 3768 i40e_configure_msi_and_legacy(vsi); 3769 3770 /* start rings */ 3771 err = i40e_vsi_control_rings(vsi, true); 3772 if (err) 3773 return err; 3774 3775 clear_bit(__I40E_DOWN, &vsi->state); 3776 i40e_napi_enable_all(vsi); 3777 i40e_vsi_enable_irq(vsi); 3778 3779 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 3780 (vsi->netdev)) { 3781 netdev_info(vsi->netdev, "NIC Link is Up\n"); 3782 netif_tx_start_all_queues(vsi->netdev); 3783 netif_carrier_on(vsi->netdev); 3784 } else if (vsi->netdev) { 3785 netdev_info(vsi->netdev, "NIC Link is Down\n"); 3786 } 3787 i40e_service_event_schedule(pf); 3788 3789 return 0; 3790 } 3791 3792 /** 3793 * i40e_vsi_reinit_locked - Reset the VSI 3794 * @vsi: the VSI being configured 3795 * 3796 * Rebuild the ring structs after some configuration 3797 * has changed, e.g. MTU size. 3798 **/ 3799 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 3800 { 3801 struct i40e_pf *pf = vsi->back; 3802 3803 WARN_ON(in_interrupt()); 3804 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 3805 usleep_range(1000, 2000); 3806 i40e_down(vsi); 3807 3808 /* Give a VF some time to respond to the reset. The 3809 * two second wait is based upon the watchdog cycle in 3810 * the VF driver. 3811 */ 3812 if (vsi->type == I40E_VSI_SRIOV) 3813 msleep(2000); 3814 i40e_up(vsi); 3815 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 3816 } 3817 3818 /** 3819 * i40e_up - Bring the connection back up after being down 3820 * @vsi: the VSI being configured 3821 **/ 3822 int i40e_up(struct i40e_vsi *vsi) 3823 { 3824 int err; 3825 3826 err = i40e_vsi_configure(vsi); 3827 if (!err) 3828 err = i40e_up_complete(vsi); 3829 3830 return err; 3831 } 3832 3833 /** 3834 * i40e_down - Shutdown the connection processing 3835 * @vsi: the VSI being stopped 3836 **/ 3837 void i40e_down(struct i40e_vsi *vsi) 3838 { 3839 int i; 3840 3841 /* It is assumed that the caller of this function 3842 * sets the vsi->state __I40E_DOWN bit. 3843 */ 3844 if (vsi->netdev) { 3845 netif_carrier_off(vsi->netdev); 3846 netif_tx_disable(vsi->netdev); 3847 } 3848 i40e_vsi_disable_irq(vsi); 3849 i40e_vsi_control_rings(vsi, false); 3850 i40e_napi_disable_all(vsi); 3851 3852 for (i = 0; i < vsi->num_queue_pairs; i++) { 3853 i40e_clean_tx_ring(vsi->tx_rings[i]); 3854 i40e_clean_rx_ring(vsi->rx_rings[i]); 3855 } 3856 } 3857 3858 /** 3859 * i40e_setup_tc - configure multiple traffic classes 3860 * @netdev: net device to configure 3861 * @tc: number of traffic classes to enable 3862 **/ 3863 static int i40e_setup_tc(struct net_device *netdev, u8 tc) 3864 { 3865 struct i40e_netdev_priv *np = netdev_priv(netdev); 3866 struct i40e_vsi *vsi = np->vsi; 3867 struct i40e_pf *pf = vsi->back; 3868 u8 enabled_tc = 0; 3869 int ret = -EINVAL; 3870 int i; 3871 3872 /* Check if DCB enabled to continue */ 3873 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 3874 netdev_info(netdev, "DCB is not enabled for adapter\n"); 3875 goto exit; 3876 } 3877 3878 /* Check if MFP enabled */ 3879 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 3880 netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); 3881 goto exit; 3882 } 3883 3884 /* Check whether tc count is within enabled limit */ 3885 if (tc > i40e_pf_get_num_tc(pf)) { 3886 netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); 3887 goto exit; 3888 } 3889 3890 /* Generate TC map for number of tc requested */ 3891 for (i = 0; i < tc; i++) 3892 enabled_tc |= (1 << i); 3893 3894 /* Requesting same TC configuration as already enabled */ 3895 if (enabled_tc == vsi->tc_config.enabled_tc) 3896 return 0; 3897 3898 /* Quiesce VSI queues */ 3899 i40e_quiesce_vsi(vsi); 3900 3901 /* Configure VSI for enabled TCs */ 3902 ret = i40e_vsi_config_tc(vsi, enabled_tc); 3903 if (ret) { 3904 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 3905 vsi->seid); 3906 goto exit; 3907 } 3908 3909 /* Unquiesce VSI */ 3910 i40e_unquiesce_vsi(vsi); 3911 3912 exit: 3913 return ret; 3914 } 3915 3916 /** 3917 * i40e_open - Called when a network interface is made active 3918 * @netdev: network interface device structure 3919 * 3920 * The open entry point is called when a network interface is made 3921 * active by the system (IFF_UP). At this point all resources needed 3922 * for transmit and receive operations are allocated, the interrupt 3923 * handler is registered with the OS, the netdev watchdog subtask is 3924 * enabled, and the stack is notified that the interface is ready. 3925 * 3926 * Returns 0 on success, negative value on failure 3927 **/ 3928 static int i40e_open(struct net_device *netdev) 3929 { 3930 struct i40e_netdev_priv *np = netdev_priv(netdev); 3931 struct i40e_vsi *vsi = np->vsi; 3932 struct i40e_pf *pf = vsi->back; 3933 char int_name[IFNAMSIZ]; 3934 int err; 3935 3936 /* disallow open during test */ 3937 if (test_bit(__I40E_TESTING, &pf->state)) 3938 return -EBUSY; 3939 3940 netif_carrier_off(netdev); 3941 3942 /* allocate descriptors */ 3943 err = i40e_vsi_setup_tx_resources(vsi); 3944 if (err) 3945 goto err_setup_tx; 3946 err = i40e_vsi_setup_rx_resources(vsi); 3947 if (err) 3948 goto err_setup_rx; 3949 3950 err = i40e_vsi_configure(vsi); 3951 if (err) 3952 goto err_setup_rx; 3953 3954 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 3955 dev_driver_string(&pf->pdev->dev), netdev->name); 3956 err = i40e_vsi_request_irq(vsi, int_name); 3957 if (err) 3958 goto err_setup_rx; 3959 3960 err = i40e_up_complete(vsi); 3961 if (err) 3962 goto err_up_complete; 3963 3964 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) { 3965 err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL); 3966 if (err) 3967 netdev_info(netdev, 3968 "couldn't set broadcast err %d aq_err %d\n", 3969 err, pf->hw.aq.asq_last_status); 3970 } 3971 3972 return 0; 3973 3974 err_up_complete: 3975 i40e_down(vsi); 3976 i40e_vsi_free_irq(vsi); 3977 err_setup_rx: 3978 i40e_vsi_free_rx_resources(vsi); 3979 err_setup_tx: 3980 i40e_vsi_free_tx_resources(vsi); 3981 if (vsi == pf->vsi[pf->lan_vsi]) 3982 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 3983 3984 return err; 3985 } 3986 3987 /** 3988 * i40e_close - Disables a network interface 3989 * @netdev: network interface device structure 3990 * 3991 * The close entry point is called when an interface is de-activated 3992 * by the OS. The hardware is still under the driver's control, but 3993 * this netdev interface is disabled. 3994 * 3995 * Returns 0, this is not allowed to fail 3996 **/ 3997 static int i40e_close(struct net_device *netdev) 3998 { 3999 struct i40e_netdev_priv *np = netdev_priv(netdev); 4000 struct i40e_vsi *vsi = np->vsi; 4001 4002 if (test_and_set_bit(__I40E_DOWN, &vsi->state)) 4003 return 0; 4004 4005 i40e_down(vsi); 4006 i40e_vsi_free_irq(vsi); 4007 4008 i40e_vsi_free_tx_resources(vsi); 4009 i40e_vsi_free_rx_resources(vsi); 4010 4011 return 0; 4012 } 4013 4014 /** 4015 * i40e_do_reset - Start a PF or Core Reset sequence 4016 * @pf: board private structure 4017 * @reset_flags: which reset is requested 4018 * 4019 * The essential difference in resets is that the PF Reset 4020 * doesn't clear the packet buffers, doesn't reset the PE 4021 * firmware, and doesn't bother the other PFs on the chip. 4022 **/ 4023 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) 4024 { 4025 u32 val; 4026 4027 WARN_ON(in_interrupt()); 4028 4029 /* do the biggest reset indicated */ 4030 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { 4031 4032 /* Request a Global Reset 4033 * 4034 * This will start the chip's countdown to the actual full 4035 * chip reset event, and a warning interrupt to be sent 4036 * to all PFs, including the requestor. Our handler 4037 * for the warning interrupt will deal with the shutdown 4038 * and recovery of the switch setup. 4039 */ 4040 dev_info(&pf->pdev->dev, "GlobalR requested\n"); 4041 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4042 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 4043 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4044 4045 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) { 4046 4047 /* Request a Core Reset 4048 * 4049 * Same as Global Reset, except does *not* include the MAC/PHY 4050 */ 4051 dev_info(&pf->pdev->dev, "CoreR requested\n"); 4052 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4053 val |= I40E_GLGEN_RTRIG_CORER_MASK; 4054 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4055 i40e_flush(&pf->hw); 4056 4057 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { 4058 4059 /* Request a PF Reset 4060 * 4061 * Resets only the PF-specific registers 4062 * 4063 * This goes directly to the tear-down and rebuild of 4064 * the switch, since we need to do all the recovery as 4065 * for the Core Reset. 4066 */ 4067 dev_info(&pf->pdev->dev, "PFR requested\n"); 4068 i40e_handle_reset_warning(pf); 4069 4070 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { 4071 int v; 4072 4073 /* Find the VSI(s) that requested a re-init */ 4074 dev_info(&pf->pdev->dev, 4075 "VSI reinit requested\n"); 4076 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4077 struct i40e_vsi *vsi = pf->vsi[v]; 4078 if (vsi != NULL && 4079 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 4080 i40e_vsi_reinit_locked(pf->vsi[v]); 4081 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); 4082 } 4083 } 4084 4085 /* no further action needed, so return now */ 4086 return; 4087 } else { 4088 dev_info(&pf->pdev->dev, 4089 "bad reset request 0x%08x\n", reset_flags); 4090 return; 4091 } 4092 } 4093 4094 /** 4095 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 4096 * @pf: board private structure 4097 * @e: event info posted on ARQ 4098 * 4099 * Handler for LAN Queue Overflow Event generated by the firmware for PF 4100 * and VF queues 4101 **/ 4102 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 4103 struct i40e_arq_event_info *e) 4104 { 4105 struct i40e_aqc_lan_overflow *data = 4106 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 4107 u32 queue = le32_to_cpu(data->prtdcb_rupto); 4108 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 4109 struct i40e_hw *hw = &pf->hw; 4110 struct i40e_vf *vf; 4111 u16 vf_id; 4112 4113 dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n", 4114 __func__, queue, qtx_ctl); 4115 4116 /* Queue belongs to VF, find the VF and issue VF reset */ 4117 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 4118 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 4119 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 4120 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 4121 vf_id -= hw->func_caps.vf_base_id; 4122 vf = &pf->vf[vf_id]; 4123 i40e_vc_notify_vf_reset(vf); 4124 /* Allow VF to process pending reset notification */ 4125 msleep(20); 4126 i40e_reset_vf(vf, false); 4127 } 4128 } 4129 4130 /** 4131 * i40e_service_event_complete - Finish up the service event 4132 * @pf: board private structure 4133 **/ 4134 static void i40e_service_event_complete(struct i40e_pf *pf) 4135 { 4136 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 4137 4138 /* flush memory to make sure state is correct before next watchog */ 4139 smp_mb__before_clear_bit(); 4140 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 4141 } 4142 4143 /** 4144 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 4145 * @pf: board private structure 4146 **/ 4147 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 4148 { 4149 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT)) 4150 return; 4151 4152 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; 4153 4154 /* if interface is down do nothing */ 4155 if (test_bit(__I40E_DOWN, &pf->state)) 4156 return; 4157 } 4158 4159 /** 4160 * i40e_vsi_link_event - notify VSI of a link event 4161 * @vsi: vsi to be notified 4162 * @link_up: link up or down 4163 **/ 4164 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 4165 { 4166 if (!vsi) 4167 return; 4168 4169 switch (vsi->type) { 4170 case I40E_VSI_MAIN: 4171 if (!vsi->netdev || !vsi->netdev_registered) 4172 break; 4173 4174 if (link_up) { 4175 netif_carrier_on(vsi->netdev); 4176 netif_tx_wake_all_queues(vsi->netdev); 4177 } else { 4178 netif_carrier_off(vsi->netdev); 4179 netif_tx_stop_all_queues(vsi->netdev); 4180 } 4181 break; 4182 4183 case I40E_VSI_SRIOV: 4184 break; 4185 4186 case I40E_VSI_VMDQ2: 4187 case I40E_VSI_CTRL: 4188 case I40E_VSI_MIRROR: 4189 default: 4190 /* there is no notification for other VSIs */ 4191 break; 4192 } 4193 } 4194 4195 /** 4196 * i40e_veb_link_event - notify elements on the veb of a link event 4197 * @veb: veb to be notified 4198 * @link_up: link up or down 4199 **/ 4200 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 4201 { 4202 struct i40e_pf *pf; 4203 int i; 4204 4205 if (!veb || !veb->pf) 4206 return; 4207 pf = veb->pf; 4208 4209 /* depth first... */ 4210 for (i = 0; i < I40E_MAX_VEB; i++) 4211 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 4212 i40e_veb_link_event(pf->veb[i], link_up); 4213 4214 /* ... now the local VSIs */ 4215 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 4216 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 4217 i40e_vsi_link_event(pf->vsi[i], link_up); 4218 } 4219 4220 /** 4221 * i40e_link_event - Update netif_carrier status 4222 * @pf: board private structure 4223 **/ 4224 static void i40e_link_event(struct i40e_pf *pf) 4225 { 4226 bool new_link, old_link; 4227 4228 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP); 4229 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 4230 4231 if (new_link == old_link) 4232 return; 4233 4234 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) 4235 netdev_info(pf->vsi[pf->lan_vsi]->netdev, 4236 "NIC Link is %s\n", (new_link ? "Up" : "Down")); 4237 4238 /* Notify the base of the switch tree connected to 4239 * the link. Floating VEBs are not notified. 4240 */ 4241 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 4242 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 4243 else 4244 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link); 4245 4246 if (pf->vf) 4247 i40e_vc_notify_link_state(pf); 4248 } 4249 4250 /** 4251 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts 4252 * @pf: board private structure 4253 * 4254 * Set the per-queue flags to request a check for stuck queues in the irq 4255 * clean functions, then force interrupts to be sure the irq clean is called. 4256 **/ 4257 static void i40e_check_hang_subtask(struct i40e_pf *pf) 4258 { 4259 int i, v; 4260 4261 /* If we're down or resetting, just bail */ 4262 if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) 4263 return; 4264 4265 /* for each VSI/netdev 4266 * for each Tx queue 4267 * set the check flag 4268 * for each q_vector 4269 * force an interrupt 4270 */ 4271 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4272 struct i40e_vsi *vsi = pf->vsi[v]; 4273 int armed = 0; 4274 4275 if (!pf->vsi[v] || 4276 test_bit(__I40E_DOWN, &vsi->state) || 4277 (vsi->netdev && !netif_carrier_ok(vsi->netdev))) 4278 continue; 4279 4280 for (i = 0; i < vsi->num_queue_pairs; i++) { 4281 set_check_for_tx_hang(vsi->tx_rings[i]); 4282 if (test_bit(__I40E_HANG_CHECK_ARMED, 4283 &vsi->tx_rings[i]->state)) 4284 armed++; 4285 } 4286 4287 if (armed) { 4288 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 4289 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, 4290 (I40E_PFINT_DYN_CTL0_INTENA_MASK | 4291 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); 4292 } else { 4293 u16 vec = vsi->base_vector - 1; 4294 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | 4295 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); 4296 for (i = 0; i < vsi->num_q_vectors; i++, vec++) 4297 wr32(&vsi->back->hw, 4298 I40E_PFINT_DYN_CTLN(vec), val); 4299 } 4300 i40e_flush(&vsi->back->hw); 4301 } 4302 } 4303 } 4304 4305 /** 4306 * i40e_watchdog_subtask - Check and bring link up 4307 * @pf: board private structure 4308 **/ 4309 static void i40e_watchdog_subtask(struct i40e_pf *pf) 4310 { 4311 int i; 4312 4313 /* if interface is down do nothing */ 4314 if (test_bit(__I40E_DOWN, &pf->state) || 4315 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 4316 return; 4317 4318 /* Update the stats for active netdevs so the network stack 4319 * can look at updated numbers whenever it cares to 4320 */ 4321 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 4322 if (pf->vsi[i] && pf->vsi[i]->netdev) 4323 i40e_update_stats(pf->vsi[i]); 4324 4325 /* Update the stats for the active switching components */ 4326 for (i = 0; i < I40E_MAX_VEB; i++) 4327 if (pf->veb[i]) 4328 i40e_update_veb_stats(pf->veb[i]); 4329 } 4330 4331 /** 4332 * i40e_reset_subtask - Set up for resetting the device and driver 4333 * @pf: board private structure 4334 **/ 4335 static void i40e_reset_subtask(struct i40e_pf *pf) 4336 { 4337 u32 reset_flags = 0; 4338 4339 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { 4340 reset_flags |= (1 << __I40E_REINIT_REQUESTED); 4341 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); 4342 } 4343 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { 4344 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED); 4345 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 4346 } 4347 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { 4348 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED); 4349 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 4350 } 4351 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { 4352 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED); 4353 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 4354 } 4355 4356 /* If there's a recovery already waiting, it takes 4357 * precedence before starting a new reset sequence. 4358 */ 4359 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { 4360 i40e_handle_reset_warning(pf); 4361 return; 4362 } 4363 4364 /* If we're already down or resetting, just bail */ 4365 if (reset_flags && 4366 !test_bit(__I40E_DOWN, &pf->state) && 4367 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) 4368 i40e_do_reset(pf, reset_flags); 4369 } 4370 4371 /** 4372 * i40e_handle_link_event - Handle link event 4373 * @pf: board private structure 4374 * @e: event info posted on ARQ 4375 **/ 4376 static void i40e_handle_link_event(struct i40e_pf *pf, 4377 struct i40e_arq_event_info *e) 4378 { 4379 struct i40e_hw *hw = &pf->hw; 4380 struct i40e_aqc_get_link_status *status = 4381 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 4382 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 4383 4384 /* save off old link status information */ 4385 memcpy(&pf->hw.phy.link_info_old, hw_link_info, 4386 sizeof(pf->hw.phy.link_info_old)); 4387 4388 /* update link status */ 4389 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type; 4390 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed; 4391 hw_link_info->link_info = status->link_info; 4392 hw_link_info->an_info = status->an_info; 4393 hw_link_info->ext_info = status->ext_info; 4394 hw_link_info->lse_enable = 4395 le16_to_cpu(status->command_flags) & 4396 I40E_AQ_LSE_ENABLE; 4397 4398 /* process the event */ 4399 i40e_link_event(pf); 4400 4401 /* Do a new status request to re-enable LSE reporting 4402 * and load new status information into the hw struct, 4403 * then see if the status changed while processing the 4404 * initial event. 4405 */ 4406 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); 4407 i40e_link_event(pf); 4408 } 4409 4410 /** 4411 * i40e_clean_adminq_subtask - Clean the AdminQ rings 4412 * @pf: board private structure 4413 **/ 4414 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 4415 { 4416 struct i40e_arq_event_info event; 4417 struct i40e_hw *hw = &pf->hw; 4418 u16 pending, i = 0; 4419 i40e_status ret; 4420 u16 opcode; 4421 u32 val; 4422 4423 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)) 4424 return; 4425 4426 event.msg_size = I40E_MAX_AQ_BUF_SIZE; 4427 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 4428 if (!event.msg_buf) 4429 return; 4430 4431 do { 4432 ret = i40e_clean_arq_element(hw, &event, &pending); 4433 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) { 4434 dev_info(&pf->pdev->dev, "No ARQ event found\n"); 4435 break; 4436 } else if (ret) { 4437 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 4438 break; 4439 } 4440 4441 opcode = le16_to_cpu(event.desc.opcode); 4442 switch (opcode) { 4443 4444 case i40e_aqc_opc_get_link_status: 4445 i40e_handle_link_event(pf, &event); 4446 break; 4447 case i40e_aqc_opc_send_msg_to_pf: 4448 ret = i40e_vc_process_vf_msg(pf, 4449 le16_to_cpu(event.desc.retval), 4450 le32_to_cpu(event.desc.cookie_high), 4451 le32_to_cpu(event.desc.cookie_low), 4452 event.msg_buf, 4453 event.msg_size); 4454 break; 4455 case i40e_aqc_opc_lldp_update_mib: 4456 dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 4457 break; 4458 case i40e_aqc_opc_event_lan_overflow: 4459 dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 4460 i40e_handle_lan_overflow_event(pf, &event); 4461 break; 4462 default: 4463 dev_info(&pf->pdev->dev, 4464 "ARQ Error: Unknown event %d received\n", 4465 event.desc.opcode); 4466 break; 4467 } 4468 } while (pending && (i++ < pf->adminq_work_limit)); 4469 4470 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 4471 /* re-enable Admin queue interrupt cause */ 4472 val = rd32(hw, I40E_PFINT_ICR0_ENA); 4473 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 4474 wr32(hw, I40E_PFINT_ICR0_ENA, val); 4475 i40e_flush(hw); 4476 4477 kfree(event.msg_buf); 4478 } 4479 4480 /** 4481 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 4482 * @veb: pointer to the VEB instance 4483 * 4484 * This is a recursive function that first builds the attached VSIs then 4485 * recurses in to build the next layer of VEB. We track the connections 4486 * through our own index numbers because the seid's from the HW could 4487 * change across the reset. 4488 **/ 4489 static int i40e_reconstitute_veb(struct i40e_veb *veb) 4490 { 4491 struct i40e_vsi *ctl_vsi = NULL; 4492 struct i40e_pf *pf = veb->pf; 4493 int v, veb_idx; 4494 int ret; 4495 4496 /* build VSI that owns this VEB, temporarily attached to base VEB */ 4497 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) { 4498 if (pf->vsi[v] && 4499 pf->vsi[v]->veb_idx == veb->idx && 4500 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 4501 ctl_vsi = pf->vsi[v]; 4502 break; 4503 } 4504 } 4505 if (!ctl_vsi) { 4506 dev_info(&pf->pdev->dev, 4507 "missing owner VSI for veb_idx %d\n", veb->idx); 4508 ret = -ENOENT; 4509 goto end_reconstitute; 4510 } 4511 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 4512 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 4513 ret = i40e_add_vsi(ctl_vsi); 4514 if (ret) { 4515 dev_info(&pf->pdev->dev, 4516 "rebuild of owner VSI failed: %d\n", ret); 4517 goto end_reconstitute; 4518 } 4519 i40e_vsi_reset_stats(ctl_vsi); 4520 4521 /* create the VEB in the switch and move the VSI onto the VEB */ 4522 ret = i40e_add_veb(veb, ctl_vsi); 4523 if (ret) 4524 goto end_reconstitute; 4525 4526 /* create the remaining VSIs attached to this VEB */ 4527 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4528 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 4529 continue; 4530 4531 if (pf->vsi[v]->veb_idx == veb->idx) { 4532 struct i40e_vsi *vsi = pf->vsi[v]; 4533 vsi->uplink_seid = veb->seid; 4534 ret = i40e_add_vsi(vsi); 4535 if (ret) { 4536 dev_info(&pf->pdev->dev, 4537 "rebuild of vsi_idx %d failed: %d\n", 4538 v, ret); 4539 goto end_reconstitute; 4540 } 4541 i40e_vsi_reset_stats(vsi); 4542 } 4543 } 4544 4545 /* create any VEBs attached to this VEB - RECURSION */ 4546 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 4547 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 4548 pf->veb[veb_idx]->uplink_seid = veb->seid; 4549 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 4550 if (ret) 4551 break; 4552 } 4553 } 4554 4555 end_reconstitute: 4556 return ret; 4557 } 4558 4559 /** 4560 * i40e_get_capabilities - get info about the HW 4561 * @pf: the PF struct 4562 **/ 4563 static int i40e_get_capabilities(struct i40e_pf *pf) 4564 { 4565 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 4566 u16 data_size; 4567 int buf_len; 4568 int err; 4569 4570 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 4571 do { 4572 cap_buf = kzalloc(buf_len, GFP_KERNEL); 4573 if (!cap_buf) 4574 return -ENOMEM; 4575 4576 /* this loads the data into the hw struct for us */ 4577 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 4578 &data_size, 4579 i40e_aqc_opc_list_func_capabilities, 4580 NULL); 4581 /* data loaded, buffer no longer needed */ 4582 kfree(cap_buf); 4583 4584 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 4585 /* retry with a larger buffer */ 4586 buf_len = data_size; 4587 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 4588 dev_info(&pf->pdev->dev, 4589 "capability discovery failed: aq=%d\n", 4590 pf->hw.aq.asq_last_status); 4591 return -ENODEV; 4592 } 4593 } while (err); 4594 4595 if (pf->hw.debug_mask & I40E_DEBUG_USER) 4596 dev_info(&pf->pdev->dev, 4597 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 4598 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 4599 pf->hw.func_caps.num_msix_vectors, 4600 pf->hw.func_caps.num_msix_vectors_vf, 4601 pf->hw.func_caps.fd_filters_guaranteed, 4602 pf->hw.func_caps.fd_filters_best_effort, 4603 pf->hw.func_caps.num_tx_qp, 4604 pf->hw.func_caps.num_vsis); 4605 4606 return 0; 4607 } 4608 4609 /** 4610 * i40e_fdir_setup - initialize the Flow Director resources 4611 * @pf: board private structure 4612 **/ 4613 static void i40e_fdir_setup(struct i40e_pf *pf) 4614 { 4615 struct i40e_vsi *vsi; 4616 bool new_vsi = false; 4617 int err, i; 4618 4619 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED | 4620 I40E_FLAG_FDIR_ATR_ENABLED))) 4621 return; 4622 4623 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 4624 4625 /* find existing or make new FDIR VSI */ 4626 vsi = NULL; 4627 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 4628 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) 4629 vsi = pf->vsi[i]; 4630 if (!vsi) { 4631 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0); 4632 if (!vsi) { 4633 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 4634 pf->flags &= ~I40E_FLAG_FDIR_ENABLED; 4635 return; 4636 } 4637 new_vsi = true; 4638 } 4639 WARN_ON(vsi->base_queue != I40E_FDIR_RING); 4640 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings); 4641 4642 err = i40e_vsi_setup_tx_resources(vsi); 4643 if (!err) 4644 err = i40e_vsi_setup_rx_resources(vsi); 4645 if (!err) 4646 err = i40e_vsi_configure(vsi); 4647 if (!err && new_vsi) { 4648 char int_name[IFNAMSIZ + 9]; 4649 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", 4650 dev_driver_string(&pf->pdev->dev)); 4651 err = i40e_vsi_request_irq(vsi, int_name); 4652 } 4653 if (!err) 4654 err = i40e_up_complete(vsi); 4655 4656 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 4657 } 4658 4659 /** 4660 * i40e_fdir_teardown - release the Flow Director resources 4661 * @pf: board private structure 4662 **/ 4663 static void i40e_fdir_teardown(struct i40e_pf *pf) 4664 { 4665 int i; 4666 4667 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 4668 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 4669 i40e_vsi_release(pf->vsi[i]); 4670 break; 4671 } 4672 } 4673 } 4674 4675 /** 4676 * i40e_handle_reset_warning - prep for the core to reset 4677 * @pf: board private structure 4678 * 4679 * Close up the VFs and other things in prep for a Core Reset, 4680 * then get ready to rebuild the world. 4681 **/ 4682 static void i40e_handle_reset_warning(struct i40e_pf *pf) 4683 { 4684 struct i40e_driver_version dv; 4685 struct i40e_hw *hw = &pf->hw; 4686 i40e_status ret; 4687 u32 v; 4688 4689 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 4690 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 4691 return; 4692 4693 dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 4694 4695 i40e_vc_notify_reset(pf); 4696 4697 /* quiesce the VSIs and their queues that are not already DOWN */ 4698 i40e_pf_quiesce_all_vsi(pf); 4699 4700 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4701 if (pf->vsi[v]) 4702 pf->vsi[v]->seid = 0; 4703 } 4704 4705 i40e_shutdown_adminq(&pf->hw); 4706 4707 /* Now we wait for GRST to settle out. 4708 * We don't have to delete the VEBs or VSIs from the hw switch 4709 * because the reset will make them disappear. 4710 */ 4711 ret = i40e_pf_reset(hw); 4712 if (ret) 4713 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 4714 pf->pfr_count++; 4715 4716 if (test_bit(__I40E_DOWN, &pf->state)) 4717 goto end_core_reset; 4718 dev_info(&pf->pdev->dev, "Rebuilding internal switch\n"); 4719 4720 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 4721 ret = i40e_init_adminq(&pf->hw); 4722 if (ret) { 4723 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); 4724 goto end_core_reset; 4725 } 4726 4727 ret = i40e_get_capabilities(pf); 4728 if (ret) { 4729 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", 4730 ret); 4731 goto end_core_reset; 4732 } 4733 4734 /* call shutdown HMC */ 4735 ret = i40e_shutdown_lan_hmc(hw); 4736 if (ret) { 4737 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); 4738 goto end_core_reset; 4739 } 4740 4741 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 4742 hw->func_caps.num_rx_qp, 4743 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 4744 if (ret) { 4745 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 4746 goto end_core_reset; 4747 } 4748 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 4749 if (ret) { 4750 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 4751 goto end_core_reset; 4752 } 4753 4754 /* do basic switch setup */ 4755 ret = i40e_setup_pf_switch(pf); 4756 if (ret) 4757 goto end_core_reset; 4758 4759 /* Rebuild the VSIs and VEBs that existed before reset. 4760 * They are still in our local switch element arrays, so only 4761 * need to rebuild the switch model in the HW. 4762 * 4763 * If there were VEBs but the reconstitution failed, we'll try 4764 * try to recover minimal use by getting the basic PF VSI working. 4765 */ 4766 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 4767 dev_info(&pf->pdev->dev, "attempting to rebuild switch\n"); 4768 /* find the one VEB connected to the MAC, and find orphans */ 4769 for (v = 0; v < I40E_MAX_VEB; v++) { 4770 if (!pf->veb[v]) 4771 continue; 4772 4773 if (pf->veb[v]->uplink_seid == pf->mac_seid || 4774 pf->veb[v]->uplink_seid == 0) { 4775 ret = i40e_reconstitute_veb(pf->veb[v]); 4776 4777 if (!ret) 4778 continue; 4779 4780 /* If Main VEB failed, we're in deep doodoo, 4781 * so give up rebuilding the switch and set up 4782 * for minimal rebuild of PF VSI. 4783 * If orphan failed, we'll report the error 4784 * but try to keep going. 4785 */ 4786 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 4787 dev_info(&pf->pdev->dev, 4788 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 4789 ret); 4790 pf->vsi[pf->lan_vsi]->uplink_seid 4791 = pf->mac_seid; 4792 break; 4793 } else if (pf->veb[v]->uplink_seid == 0) { 4794 dev_info(&pf->pdev->dev, 4795 "rebuild of orphan VEB failed: %d\n", 4796 ret); 4797 } 4798 } 4799 } 4800 } 4801 4802 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { 4803 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 4804 /* no VEB, so rebuild only the Main VSI */ 4805 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); 4806 if (ret) { 4807 dev_info(&pf->pdev->dev, 4808 "rebuild of Main VSI failed: %d\n", ret); 4809 goto end_core_reset; 4810 } 4811 } 4812 4813 /* reinit the misc interrupt */ 4814 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4815 ret = i40e_setup_misc_vector(pf); 4816 4817 /* restart the VSIs that were rebuilt and running before the reset */ 4818 i40e_pf_unquiesce_all_vsi(pf); 4819 4820 /* tell the firmware that we're starting */ 4821 dv.major_version = DRV_VERSION_MAJOR; 4822 dv.minor_version = DRV_VERSION_MINOR; 4823 dv.build_version = DRV_VERSION_BUILD; 4824 dv.subbuild_version = 0; 4825 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 4826 4827 dev_info(&pf->pdev->dev, "PF reset done\n"); 4828 4829 end_core_reset: 4830 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 4831 } 4832 4833 /** 4834 * i40e_handle_mdd_event 4835 * @pf: pointer to the pf structure 4836 * 4837 * Called from the MDD irq handler to identify possibly malicious vfs 4838 **/ 4839 static void i40e_handle_mdd_event(struct i40e_pf *pf) 4840 { 4841 struct i40e_hw *hw = &pf->hw; 4842 bool mdd_detected = false; 4843 struct i40e_vf *vf; 4844 u32 reg; 4845 int i; 4846 4847 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) 4848 return; 4849 4850 /* find what triggered the MDD event */ 4851 reg = rd32(hw, I40E_GL_MDET_TX); 4852 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 4853 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK) 4854 >> I40E_GL_MDET_TX_FUNCTION_SHIFT; 4855 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) 4856 >> I40E_GL_MDET_TX_EVENT_SHIFT; 4857 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) 4858 >> I40E_GL_MDET_TX_QUEUE_SHIFT; 4859 dev_info(&pf->pdev->dev, 4860 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n", 4861 event, queue, func); 4862 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 4863 mdd_detected = true; 4864 } 4865 reg = rd32(hw, I40E_GL_MDET_RX); 4866 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 4867 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) 4868 >> I40E_GL_MDET_RX_FUNCTION_SHIFT; 4869 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) 4870 >> I40E_GL_MDET_RX_EVENT_SHIFT; 4871 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) 4872 >> I40E_GL_MDET_RX_QUEUE_SHIFT; 4873 dev_info(&pf->pdev->dev, 4874 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n", 4875 event, queue, func); 4876 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 4877 mdd_detected = true; 4878 } 4879 4880 /* see if one of the VFs needs its hand slapped */ 4881 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 4882 vf = &(pf->vf[i]); 4883 reg = rd32(hw, I40E_VP_MDET_TX(i)); 4884 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 4885 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 4886 vf->num_mdd_events++; 4887 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i); 4888 } 4889 4890 reg = rd32(hw, I40E_VP_MDET_RX(i)); 4891 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 4892 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 4893 vf->num_mdd_events++; 4894 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i); 4895 } 4896 4897 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 4898 dev_info(&pf->pdev->dev, 4899 "Too many MDD events on VF %d, disabled\n", i); 4900 dev_info(&pf->pdev->dev, 4901 "Use PF Control I/F to re-enable the VF\n"); 4902 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 4903 } 4904 } 4905 4906 /* re-enable mdd interrupt cause */ 4907 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 4908 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 4909 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 4910 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 4911 i40e_flush(hw); 4912 } 4913 4914 /** 4915 * i40e_service_task - Run the driver's async subtasks 4916 * @work: pointer to work_struct containing our data 4917 **/ 4918 static void i40e_service_task(struct work_struct *work) 4919 { 4920 struct i40e_pf *pf = container_of(work, 4921 struct i40e_pf, 4922 service_task); 4923 unsigned long start_time = jiffies; 4924 4925 i40e_reset_subtask(pf); 4926 i40e_handle_mdd_event(pf); 4927 i40e_vc_process_vflr_event(pf); 4928 i40e_watchdog_subtask(pf); 4929 i40e_fdir_reinit_subtask(pf); 4930 i40e_check_hang_subtask(pf); 4931 i40e_sync_filters_subtask(pf); 4932 i40e_clean_adminq_subtask(pf); 4933 4934 i40e_service_event_complete(pf); 4935 4936 /* If the tasks have taken longer than one timer cycle or there 4937 * is more work to be done, reschedule the service task now 4938 * rather than wait for the timer to tick again. 4939 */ 4940 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 4941 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || 4942 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || 4943 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 4944 i40e_service_event_schedule(pf); 4945 } 4946 4947 /** 4948 * i40e_service_timer - timer callback 4949 * @data: pointer to PF struct 4950 **/ 4951 static void i40e_service_timer(unsigned long data) 4952 { 4953 struct i40e_pf *pf = (struct i40e_pf *)data; 4954 4955 mod_timer(&pf->service_timer, 4956 round_jiffies(jiffies + pf->service_timer_period)); 4957 i40e_service_event_schedule(pf); 4958 } 4959 4960 /** 4961 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 4962 * @vsi: the VSI being configured 4963 **/ 4964 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 4965 { 4966 struct i40e_pf *pf = vsi->back; 4967 4968 switch (vsi->type) { 4969 case I40E_VSI_MAIN: 4970 vsi->alloc_queue_pairs = pf->num_lan_qps; 4971 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 4972 I40E_REQ_DESCRIPTOR_MULTIPLE); 4973 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4974 vsi->num_q_vectors = pf->num_lan_msix; 4975 else 4976 vsi->num_q_vectors = 1; 4977 4978 break; 4979 4980 case I40E_VSI_FDIR: 4981 vsi->alloc_queue_pairs = 1; 4982 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 4983 I40E_REQ_DESCRIPTOR_MULTIPLE); 4984 vsi->num_q_vectors = 1; 4985 break; 4986 4987 case I40E_VSI_VMDQ2: 4988 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 4989 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 4990 I40E_REQ_DESCRIPTOR_MULTIPLE); 4991 vsi->num_q_vectors = pf->num_vmdq_msix; 4992 break; 4993 4994 case I40E_VSI_SRIOV: 4995 vsi->alloc_queue_pairs = pf->num_vf_qps; 4996 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 4997 I40E_REQ_DESCRIPTOR_MULTIPLE); 4998 break; 4999 5000 default: 5001 WARN_ON(1); 5002 return -ENODATA; 5003 } 5004 5005 return 0; 5006 } 5007 5008 /** 5009 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 5010 * @pf: board private structure 5011 * @type: type of VSI 5012 * 5013 * On error: returns error code (negative) 5014 * On success: returns vsi index in PF (positive) 5015 **/ 5016 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 5017 { 5018 int ret = -ENODEV; 5019 struct i40e_vsi *vsi; 5020 int sz_vectors; 5021 int sz_rings; 5022 int vsi_idx; 5023 int i; 5024 5025 /* Need to protect the allocation of the VSIs at the PF level */ 5026 mutex_lock(&pf->switch_mutex); 5027 5028 /* VSI list may be fragmented if VSI creation/destruction has 5029 * been happening. We can afford to do a quick scan to look 5030 * for any free VSIs in the list. 5031 * 5032 * find next empty vsi slot, looping back around if necessary 5033 */ 5034 i = pf->next_vsi; 5035 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i]) 5036 i++; 5037 if (i >= pf->hw.func_caps.num_vsis) { 5038 i = 0; 5039 while (i < pf->next_vsi && pf->vsi[i]) 5040 i++; 5041 } 5042 5043 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) { 5044 vsi_idx = i; /* Found one! */ 5045 } else { 5046 ret = -ENODEV; 5047 goto unlock_pf; /* out of VSI slots! */ 5048 } 5049 pf->next_vsi = ++i; 5050 5051 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 5052 if (!vsi) { 5053 ret = -ENOMEM; 5054 goto unlock_pf; 5055 } 5056 vsi->type = type; 5057 vsi->back = pf; 5058 set_bit(__I40E_DOWN, &vsi->state); 5059 vsi->flags = 0; 5060 vsi->idx = vsi_idx; 5061 vsi->rx_itr_setting = pf->rx_itr_default; 5062 vsi->tx_itr_setting = pf->tx_itr_default; 5063 vsi->netdev_registered = false; 5064 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 5065 INIT_LIST_HEAD(&vsi->mac_filter_list); 5066 5067 ret = i40e_set_num_rings_in_vsi(vsi); 5068 if (ret) 5069 goto err_rings; 5070 5071 /* allocate memory for ring pointers */ 5072 sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; 5073 vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL); 5074 if (!vsi->tx_rings) { 5075 ret = -ENOMEM; 5076 goto err_rings; 5077 } 5078 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; 5079 5080 /* allocate memory for q_vector pointers */ 5081 sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors; 5082 vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL); 5083 if (!vsi->q_vectors) { 5084 ret = -ENOMEM; 5085 goto err_vectors; 5086 } 5087 5088 /* Setup default MSIX irq handler for VSI */ 5089 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 5090 5091 pf->vsi[vsi_idx] = vsi; 5092 ret = vsi_idx; 5093 goto unlock_pf; 5094 5095 err_vectors: 5096 kfree(vsi->tx_rings); 5097 err_rings: 5098 pf->next_vsi = i - 1; 5099 kfree(vsi); 5100 unlock_pf: 5101 mutex_unlock(&pf->switch_mutex); 5102 return ret; 5103 } 5104 5105 /** 5106 * i40e_vsi_clear - Deallocate the VSI provided 5107 * @vsi: the VSI being un-configured 5108 **/ 5109 static int i40e_vsi_clear(struct i40e_vsi *vsi) 5110 { 5111 struct i40e_pf *pf; 5112 5113 if (!vsi) 5114 return 0; 5115 5116 if (!vsi->back) 5117 goto free_vsi; 5118 pf = vsi->back; 5119 5120 mutex_lock(&pf->switch_mutex); 5121 if (!pf->vsi[vsi->idx]) { 5122 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", 5123 vsi->idx, vsi->idx, vsi, vsi->type); 5124 goto unlock_vsi; 5125 } 5126 5127 if (pf->vsi[vsi->idx] != vsi) { 5128 dev_err(&pf->pdev->dev, 5129 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", 5130 pf->vsi[vsi->idx]->idx, 5131 pf->vsi[vsi->idx], 5132 pf->vsi[vsi->idx]->type, 5133 vsi->idx, vsi, vsi->type); 5134 goto unlock_vsi; 5135 } 5136 5137 /* updates the pf for this cleared vsi */ 5138 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 5139 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 5140 5141 /* free the ring and vector containers */ 5142 kfree(vsi->q_vectors); 5143 kfree(vsi->tx_rings); 5144 5145 pf->vsi[vsi->idx] = NULL; 5146 if (vsi->idx < pf->next_vsi) 5147 pf->next_vsi = vsi->idx; 5148 5149 unlock_vsi: 5150 mutex_unlock(&pf->switch_mutex); 5151 free_vsi: 5152 kfree(vsi); 5153 5154 return 0; 5155 } 5156 5157 /** 5158 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 5159 * @vsi: the VSI being cleaned 5160 **/ 5161 static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi) 5162 { 5163 int i; 5164 5165 if (vsi->tx_rings[0]) 5166 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 5167 kfree_rcu(vsi->tx_rings[i], rcu); 5168 vsi->tx_rings[i] = NULL; 5169 vsi->rx_rings[i] = NULL; 5170 } 5171 5172 return 0; 5173 } 5174 5175 /** 5176 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 5177 * @vsi: the VSI being configured 5178 **/ 5179 static int i40e_alloc_rings(struct i40e_vsi *vsi) 5180 { 5181 struct i40e_pf *pf = vsi->back; 5182 int i; 5183 5184 /* Set basic values in the rings to be used later during open() */ 5185 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 5186 struct i40e_ring *tx_ring; 5187 struct i40e_ring *rx_ring; 5188 5189 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 5190 if (!tx_ring) 5191 goto err_out; 5192 5193 tx_ring->queue_index = i; 5194 tx_ring->reg_idx = vsi->base_queue + i; 5195 tx_ring->ring_active = false; 5196 tx_ring->vsi = vsi; 5197 tx_ring->netdev = vsi->netdev; 5198 tx_ring->dev = &pf->pdev->dev; 5199 tx_ring->count = vsi->num_desc; 5200 tx_ring->size = 0; 5201 tx_ring->dcb_tc = 0; 5202 vsi->tx_rings[i] = tx_ring; 5203 5204 rx_ring = &tx_ring[1]; 5205 rx_ring->queue_index = i; 5206 rx_ring->reg_idx = vsi->base_queue + i; 5207 rx_ring->ring_active = false; 5208 rx_ring->vsi = vsi; 5209 rx_ring->netdev = vsi->netdev; 5210 rx_ring->dev = &pf->pdev->dev; 5211 rx_ring->count = vsi->num_desc; 5212 rx_ring->size = 0; 5213 rx_ring->dcb_tc = 0; 5214 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) 5215 set_ring_16byte_desc_enabled(rx_ring); 5216 else 5217 clear_ring_16byte_desc_enabled(rx_ring); 5218 vsi->rx_rings[i] = rx_ring; 5219 } 5220 5221 return 0; 5222 5223 err_out: 5224 i40e_vsi_clear_rings(vsi); 5225 return -ENOMEM; 5226 } 5227 5228 /** 5229 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 5230 * @pf: board private structure 5231 * @vectors: the number of MSI-X vectors to request 5232 * 5233 * Returns the number of vectors reserved, or error 5234 **/ 5235 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 5236 { 5237 int err = 0; 5238 5239 pf->num_msix_entries = 0; 5240 while (vectors >= I40E_MIN_MSIX) { 5241 err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors); 5242 if (err == 0) { 5243 /* good to go */ 5244 pf->num_msix_entries = vectors; 5245 break; 5246 } else if (err < 0) { 5247 /* total failure */ 5248 dev_info(&pf->pdev->dev, 5249 "MSI-X vector reservation failed: %d\n", err); 5250 vectors = 0; 5251 break; 5252 } else { 5253 /* err > 0 is the hint for retry */ 5254 dev_info(&pf->pdev->dev, 5255 "MSI-X vectors wanted %d, retrying with %d\n", 5256 vectors, err); 5257 vectors = err; 5258 } 5259 } 5260 5261 if (vectors > 0 && vectors < I40E_MIN_MSIX) { 5262 dev_info(&pf->pdev->dev, 5263 "Couldn't get enough vectors, only %d available\n", 5264 vectors); 5265 vectors = 0; 5266 } 5267 5268 return vectors; 5269 } 5270 5271 /** 5272 * i40e_init_msix - Setup the MSIX capability 5273 * @pf: board private structure 5274 * 5275 * Work with the OS to set up the MSIX vectors needed. 5276 * 5277 * Returns 0 on success, negative on failure 5278 **/ 5279 static int i40e_init_msix(struct i40e_pf *pf) 5280 { 5281 i40e_status err = 0; 5282 struct i40e_hw *hw = &pf->hw; 5283 int v_budget, i; 5284 int vec; 5285 5286 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 5287 return -ENODEV; 5288 5289 /* The number of vectors we'll request will be comprised of: 5290 * - Add 1 for "other" cause for Admin Queue events, etc. 5291 * - The number of LAN queue pairs 5292 * already adjusted for the NUMA node 5293 * assumes symmetric Tx/Rx pairing 5294 * - The number of VMDq pairs 5295 * Once we count this up, try the request. 5296 * 5297 * If we can't get what we want, we'll simplify to nearly nothing 5298 * and try again. If that still fails, we punt. 5299 */ 5300 pf->num_lan_msix = pf->num_lan_qps; 5301 pf->num_vmdq_msix = pf->num_vmdq_qps; 5302 v_budget = 1 + pf->num_lan_msix; 5303 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix); 5304 if (pf->flags & I40E_FLAG_FDIR_ENABLED) 5305 v_budget++; 5306 5307 /* Scale down if necessary, and the rings will share vectors */ 5308 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors); 5309 5310 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 5311 GFP_KERNEL); 5312 if (!pf->msix_entries) 5313 return -ENOMEM; 5314 5315 for (i = 0; i < v_budget; i++) 5316 pf->msix_entries[i].entry = i; 5317 vec = i40e_reserve_msix_vectors(pf, v_budget); 5318 if (vec < I40E_MIN_MSIX) { 5319 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 5320 kfree(pf->msix_entries); 5321 pf->msix_entries = NULL; 5322 return -ENODEV; 5323 5324 } else if (vec == I40E_MIN_MSIX) { 5325 /* Adjust for minimal MSIX use */ 5326 dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n"); 5327 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 5328 pf->num_vmdq_vsis = 0; 5329 pf->num_vmdq_qps = 0; 5330 pf->num_vmdq_msix = 0; 5331 pf->num_lan_qps = 1; 5332 pf->num_lan_msix = 1; 5333 5334 } else if (vec != v_budget) { 5335 /* Scale vector usage down */ 5336 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 5337 vec--; /* reserve the misc vector */ 5338 5339 /* partition out the remaining vectors */ 5340 switch (vec) { 5341 case 2: 5342 pf->num_vmdq_vsis = 1; 5343 pf->num_lan_msix = 1; 5344 break; 5345 case 3: 5346 pf->num_vmdq_vsis = 1; 5347 pf->num_lan_msix = 2; 5348 break; 5349 default: 5350 pf->num_lan_msix = min_t(int, (vec / 2), 5351 pf->num_lan_qps); 5352 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix), 5353 I40E_DEFAULT_NUM_VMDQ_VSI); 5354 break; 5355 } 5356 } 5357 5358 return err; 5359 } 5360 5361 /** 5362 * i40e_alloc_q_vector - Allocate memory for a single interrupt vector 5363 * @vsi: the VSI being configured 5364 * @v_idx: index of the vector in the vsi struct 5365 * 5366 * We allocate one q_vector. If allocation fails we return -ENOMEM. 5367 **/ 5368 static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 5369 { 5370 struct i40e_q_vector *q_vector; 5371 5372 /* allocate q_vector */ 5373 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 5374 if (!q_vector) 5375 return -ENOMEM; 5376 5377 q_vector->vsi = vsi; 5378 q_vector->v_idx = v_idx; 5379 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 5380 if (vsi->netdev) 5381 netif_napi_add(vsi->netdev, &q_vector->napi, 5382 i40e_napi_poll, vsi->work_limit); 5383 5384 q_vector->rx.latency_range = I40E_LOW_LATENCY; 5385 q_vector->tx.latency_range = I40E_LOW_LATENCY; 5386 5387 /* tie q_vector and vsi together */ 5388 vsi->q_vectors[v_idx] = q_vector; 5389 5390 return 0; 5391 } 5392 5393 /** 5394 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors 5395 * @vsi: the VSI being configured 5396 * 5397 * We allocate one q_vector per queue interrupt. If allocation fails we 5398 * return -ENOMEM. 5399 **/ 5400 static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) 5401 { 5402 struct i40e_pf *pf = vsi->back; 5403 int v_idx, num_q_vectors; 5404 int err; 5405 5406 /* if not MSIX, give the one vector only to the LAN VSI */ 5407 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5408 num_q_vectors = vsi->num_q_vectors; 5409 else if (vsi == pf->vsi[pf->lan_vsi]) 5410 num_q_vectors = 1; 5411 else 5412 return -EINVAL; 5413 5414 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 5415 err = i40e_alloc_q_vector(vsi, v_idx); 5416 if (err) 5417 goto err_out; 5418 } 5419 5420 return 0; 5421 5422 err_out: 5423 while (v_idx--) 5424 i40e_free_q_vector(vsi, v_idx); 5425 5426 return err; 5427 } 5428 5429 /** 5430 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 5431 * @pf: board private structure to initialize 5432 **/ 5433 static void i40e_init_interrupt_scheme(struct i40e_pf *pf) 5434 { 5435 int err = 0; 5436 5437 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 5438 err = i40e_init_msix(pf); 5439 if (err) { 5440 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 5441 I40E_FLAG_RSS_ENABLED | 5442 I40E_FLAG_MQ_ENABLED | 5443 I40E_FLAG_DCB_ENABLED | 5444 I40E_FLAG_SRIOV_ENABLED | 5445 I40E_FLAG_FDIR_ENABLED | 5446 I40E_FLAG_FDIR_ATR_ENABLED | 5447 I40E_FLAG_VMDQ_ENABLED); 5448 5449 /* rework the queue expectations without MSIX */ 5450 i40e_determine_queue_usage(pf); 5451 } 5452 } 5453 5454 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 5455 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 5456 dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n"); 5457 err = pci_enable_msi(pf->pdev); 5458 if (err) { 5459 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); 5460 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 5461 } 5462 } 5463 5464 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 5465 dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n"); 5466 5467 /* track first vector for misc interrupts */ 5468 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); 5469 } 5470 5471 /** 5472 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 5473 * @pf: board private structure 5474 * 5475 * This sets up the handler for MSIX 0, which is used to manage the 5476 * non-queue interrupts, e.g. AdminQ and errors. This is not used 5477 * when in MSI or Legacy interrupt mode. 5478 **/ 5479 static int i40e_setup_misc_vector(struct i40e_pf *pf) 5480 { 5481 struct i40e_hw *hw = &pf->hw; 5482 int err = 0; 5483 5484 /* Only request the irq if this is the first time through, and 5485 * not when we're rebuilding after a Reset 5486 */ 5487 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 5488 err = request_irq(pf->msix_entries[0].vector, 5489 i40e_intr, 0, pf->misc_int_name, pf); 5490 if (err) { 5491 dev_info(&pf->pdev->dev, 5492 "request_irq for msix_misc failed: %d\n", err); 5493 return -EFAULT; 5494 } 5495 } 5496 5497 i40e_enable_misc_int_causes(hw); 5498 5499 /* associate no queues to the misc vector */ 5500 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 5501 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 5502 5503 i40e_flush(hw); 5504 5505 i40e_irq_dynamic_enable_icr0(pf); 5506 5507 return err; 5508 } 5509 5510 /** 5511 * i40e_config_rss - Prepare for RSS if used 5512 * @pf: board private structure 5513 **/ 5514 static int i40e_config_rss(struct i40e_pf *pf) 5515 { 5516 struct i40e_hw *hw = &pf->hw; 5517 u32 lut = 0; 5518 int i, j; 5519 u64 hena; 5520 /* Set of random keys generated using kernel random number generator */ 5521 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687, 5522 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, 5523 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, 5524 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be}; 5525 5526 /* Fill out hash function seed */ 5527 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 5528 wr32(hw, I40E_PFQF_HKEY(i), seed[i]); 5529 5530 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 5531 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | 5532 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); 5533 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | 5534 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | 5535 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | 5536 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | 5537 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | 5538 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | 5539 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | 5540 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | 5541 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)| 5542 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); 5543 wr32(hw, I40E_PFQF_HENA(0), (u32)hena); 5544 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 5545 5546 /* Populate the LUT with max no. of queues in round robin fashion */ 5547 for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) { 5548 5549 /* The assumption is that lan qp count will be the highest 5550 * qp count for any PF VSI that needs RSS. 5551 * If multiple VSIs need RSS support, all the qp counts 5552 * for those VSIs should be a power of 2 for RSS to work. 5553 * If LAN VSI is the only consumer for RSS then this requirement 5554 * is not necessary. 5555 */ 5556 if (j == pf->rss_size) 5557 j = 0; 5558 /* lut = 4-byte sliding window of 4 lut entries */ 5559 lut = (lut << 8) | (j & 5560 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); 5561 /* On i = 3, we have 4 entries in lut; write to the register */ 5562 if ((i & 3) == 3) 5563 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); 5564 } 5565 i40e_flush(hw); 5566 5567 return 0; 5568 } 5569 5570 /** 5571 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 5572 * @pf: board private structure to initialize 5573 * 5574 * i40e_sw_init initializes the Adapter private data structure. 5575 * Fields are initialized based on PCI device information and 5576 * OS network device settings (MTU size). 5577 **/ 5578 static int i40e_sw_init(struct i40e_pf *pf) 5579 { 5580 int err = 0; 5581 int size; 5582 5583 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, 5584 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); 5585 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { 5586 if (I40E_DEBUG_USER & debug) 5587 pf->hw.debug_mask = debug; 5588 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), 5589 I40E_DEFAULT_MSG_ENABLE); 5590 } 5591 5592 /* Set default capability flags */ 5593 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 5594 I40E_FLAG_MSI_ENABLED | 5595 I40E_FLAG_MSIX_ENABLED | 5596 I40E_FLAG_RX_PS_ENABLED | 5597 I40E_FLAG_MQ_ENABLED | 5598 I40E_FLAG_RX_1BUF_ENABLED; 5599 5600 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; 5601 if (pf->hw.func_caps.rss) { 5602 pf->flags |= I40E_FLAG_RSS_ENABLED; 5603 pf->rss_size = min_t(int, pf->rss_size_max, 5604 nr_cpus_node(numa_node_id())); 5605 } else { 5606 pf->rss_size = 1; 5607 } 5608 5609 if (pf->hw.func_caps.dcb) 5610 pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC; 5611 else 5612 pf->num_tc_qps = 0; 5613 5614 if (pf->hw.func_caps.fd) { 5615 /* FW/NVM is not yet fixed in this regard */ 5616 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 5617 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 5618 pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED; 5619 dev_info(&pf->pdev->dev, 5620 "Flow Director ATR mode Enabled\n"); 5621 pf->flags |= I40E_FLAG_FDIR_ENABLED; 5622 dev_info(&pf->pdev->dev, 5623 "Flow Director Side Band mode Enabled\n"); 5624 pf->fdir_pf_filter_count = 5625 pf->hw.func_caps.fd_filters_guaranteed; 5626 } 5627 } else { 5628 pf->fdir_pf_filter_count = 0; 5629 } 5630 5631 if (pf->hw.func_caps.vmdq) { 5632 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 5633 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 5634 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; 5635 } 5636 5637 /* MFP mode enabled */ 5638 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { 5639 pf->flags |= I40E_FLAG_MFP_ENABLED; 5640 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 5641 } 5642 5643 #ifdef CONFIG_PCI_IOV 5644 if (pf->hw.func_caps.num_vfs) { 5645 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 5646 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 5647 pf->num_req_vfs = min_t(int, 5648 pf->hw.func_caps.num_vfs, 5649 I40E_MAX_VF_COUNT); 5650 } 5651 #endif /* CONFIG_PCI_IOV */ 5652 pf->eeprom_version = 0xDEAD; 5653 pf->lan_veb = I40E_NO_VEB; 5654 pf->lan_vsi = I40E_NO_VSI; 5655 5656 /* set up queue assignment tracking */ 5657 size = sizeof(struct i40e_lump_tracking) 5658 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 5659 pf->qp_pile = kzalloc(size, GFP_KERNEL); 5660 if (!pf->qp_pile) { 5661 err = -ENOMEM; 5662 goto sw_init_done; 5663 } 5664 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 5665 pf->qp_pile->search_hint = 0; 5666 5667 /* set up vector assignment tracking */ 5668 size = sizeof(struct i40e_lump_tracking) 5669 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors); 5670 pf->irq_pile = kzalloc(size, GFP_KERNEL); 5671 if (!pf->irq_pile) { 5672 kfree(pf->qp_pile); 5673 err = -ENOMEM; 5674 goto sw_init_done; 5675 } 5676 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors; 5677 pf->irq_pile->search_hint = 0; 5678 5679 mutex_init(&pf->switch_mutex); 5680 5681 sw_init_done: 5682 return err; 5683 } 5684 5685 /** 5686 * i40e_set_features - set the netdev feature flags 5687 * @netdev: ptr to the netdev being adjusted 5688 * @features: the feature set that the stack is suggesting 5689 **/ 5690 static int i40e_set_features(struct net_device *netdev, 5691 netdev_features_t features) 5692 { 5693 struct i40e_netdev_priv *np = netdev_priv(netdev); 5694 struct i40e_vsi *vsi = np->vsi; 5695 5696 if (features & NETIF_F_HW_VLAN_CTAG_RX) 5697 i40e_vlan_stripping_enable(vsi); 5698 else 5699 i40e_vlan_stripping_disable(vsi); 5700 5701 return 0; 5702 } 5703 5704 static const struct net_device_ops i40e_netdev_ops = { 5705 .ndo_open = i40e_open, 5706 .ndo_stop = i40e_close, 5707 .ndo_start_xmit = i40e_lan_xmit_frame, 5708 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 5709 .ndo_set_rx_mode = i40e_set_rx_mode, 5710 .ndo_validate_addr = eth_validate_addr, 5711 .ndo_set_mac_address = i40e_set_mac, 5712 .ndo_change_mtu = i40e_change_mtu, 5713 .ndo_tx_timeout = i40e_tx_timeout, 5714 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 5715 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 5716 #ifdef CONFIG_NET_POLL_CONTROLLER 5717 .ndo_poll_controller = i40e_netpoll, 5718 #endif 5719 .ndo_setup_tc = i40e_setup_tc, 5720 .ndo_set_features = i40e_set_features, 5721 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 5722 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 5723 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, 5724 .ndo_get_vf_config = i40e_ndo_get_vf_config, 5725 }; 5726 5727 /** 5728 * i40e_config_netdev - Setup the netdev flags 5729 * @vsi: the VSI being configured 5730 * 5731 * Returns 0 on success, negative value on failure 5732 **/ 5733 static int i40e_config_netdev(struct i40e_vsi *vsi) 5734 { 5735 struct i40e_pf *pf = vsi->back; 5736 struct i40e_hw *hw = &pf->hw; 5737 struct i40e_netdev_priv *np; 5738 struct net_device *netdev; 5739 u8 mac_addr[ETH_ALEN]; 5740 int etherdev_size; 5741 5742 etherdev_size = sizeof(struct i40e_netdev_priv); 5743 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 5744 if (!netdev) 5745 return -ENOMEM; 5746 5747 vsi->netdev = netdev; 5748 np = netdev_priv(netdev); 5749 np->vsi = vsi; 5750 5751 netdev->hw_enc_features = NETIF_F_IP_CSUM | 5752 NETIF_F_GSO_UDP_TUNNEL | 5753 NETIF_F_TSO | 5754 NETIF_F_SG; 5755 5756 netdev->features = NETIF_F_SG | 5757 NETIF_F_IP_CSUM | 5758 NETIF_F_SCTP_CSUM | 5759 NETIF_F_HIGHDMA | 5760 NETIF_F_GSO_UDP_TUNNEL | 5761 NETIF_F_HW_VLAN_CTAG_TX | 5762 NETIF_F_HW_VLAN_CTAG_RX | 5763 NETIF_F_HW_VLAN_CTAG_FILTER | 5764 NETIF_F_IPV6_CSUM | 5765 NETIF_F_TSO | 5766 NETIF_F_TSO6 | 5767 NETIF_F_RXCSUM | 5768 NETIF_F_RXHASH | 5769 0; 5770 5771 /* copy netdev features into list of user selectable features */ 5772 netdev->hw_features |= netdev->features; 5773 5774 if (vsi->type == I40E_VSI_MAIN) { 5775 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 5776 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); 5777 } else { 5778 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 5779 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 5780 pf->vsi[pf->lan_vsi]->netdev->name); 5781 random_ether_addr(mac_addr); 5782 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); 5783 } 5784 5785 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); 5786 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN); 5787 /* vlan gets same features (except vlan offload) 5788 * after any tweaks for specific VSI types 5789 */ 5790 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | 5791 NETIF_F_HW_VLAN_CTAG_RX | 5792 NETIF_F_HW_VLAN_CTAG_FILTER); 5793 netdev->priv_flags |= IFF_UNICAST_FLT; 5794 netdev->priv_flags |= IFF_SUPP_NOFCS; 5795 /* Setup netdev TC information */ 5796 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 5797 5798 netdev->netdev_ops = &i40e_netdev_ops; 5799 netdev->watchdog_timeo = 5 * HZ; 5800 i40e_set_ethtool_ops(netdev); 5801 5802 return 0; 5803 } 5804 5805 /** 5806 * i40e_vsi_delete - Delete a VSI from the switch 5807 * @vsi: the VSI being removed 5808 * 5809 * Returns 0 on success, negative value on failure 5810 **/ 5811 static void i40e_vsi_delete(struct i40e_vsi *vsi) 5812 { 5813 /* remove default VSI is not allowed */ 5814 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 5815 return; 5816 5817 /* there is no HW VSI for FDIR */ 5818 if (vsi->type == I40E_VSI_FDIR) 5819 return; 5820 5821 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 5822 return; 5823 } 5824 5825 /** 5826 * i40e_add_vsi - Add a VSI to the switch 5827 * @vsi: the VSI being configured 5828 * 5829 * This initializes a VSI context depending on the VSI type to be added and 5830 * passes it down to the add_vsi aq command. 5831 **/ 5832 static int i40e_add_vsi(struct i40e_vsi *vsi) 5833 { 5834 int ret = -ENODEV; 5835 struct i40e_mac_filter *f, *ftmp; 5836 struct i40e_pf *pf = vsi->back; 5837 struct i40e_hw *hw = &pf->hw; 5838 struct i40e_vsi_context ctxt; 5839 u8 enabled_tc = 0x1; /* TC0 enabled */ 5840 int f_count = 0; 5841 5842 memset(&ctxt, 0, sizeof(ctxt)); 5843 switch (vsi->type) { 5844 case I40E_VSI_MAIN: 5845 /* The PF's main VSI is already setup as part of the 5846 * device initialization, so we'll not bother with 5847 * the add_vsi call, but we will retrieve the current 5848 * VSI context. 5849 */ 5850 ctxt.seid = pf->main_vsi_seid; 5851 ctxt.pf_num = pf->hw.pf_id; 5852 ctxt.vf_num = 0; 5853 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 5854 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 5855 if (ret) { 5856 dev_info(&pf->pdev->dev, 5857 "couldn't get pf vsi config, err %d, aq_err %d\n", 5858 ret, pf->hw.aq.asq_last_status); 5859 return -ENOENT; 5860 } 5861 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 5862 vsi->info.valid_sections = 0; 5863 5864 vsi->seid = ctxt.seid; 5865 vsi->id = ctxt.vsi_number; 5866 5867 enabled_tc = i40e_pf_get_tc_map(pf); 5868 5869 /* MFP mode setup queue map and update VSI */ 5870 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 5871 memset(&ctxt, 0, sizeof(ctxt)); 5872 ctxt.seid = pf->main_vsi_seid; 5873 ctxt.pf_num = pf->hw.pf_id; 5874 ctxt.vf_num = 0; 5875 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 5876 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 5877 if (ret) { 5878 dev_info(&pf->pdev->dev, 5879 "update vsi failed, aq_err=%d\n", 5880 pf->hw.aq.asq_last_status); 5881 ret = -ENOENT; 5882 goto err; 5883 } 5884 /* update the local VSI info queue map */ 5885 i40e_vsi_update_queue_map(vsi, &ctxt); 5886 vsi->info.valid_sections = 0; 5887 } else { 5888 /* Default/Main VSI is only enabled for TC0 5889 * reconfigure it to enable all TCs that are 5890 * available on the port in SFP mode. 5891 */ 5892 ret = i40e_vsi_config_tc(vsi, enabled_tc); 5893 if (ret) { 5894 dev_info(&pf->pdev->dev, 5895 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n", 5896 enabled_tc, ret, 5897 pf->hw.aq.asq_last_status); 5898 ret = -ENOENT; 5899 } 5900 } 5901 break; 5902 5903 case I40E_VSI_FDIR: 5904 /* no queue mapping or actual HW VSI needed */ 5905 vsi->info.valid_sections = 0; 5906 vsi->seid = 0; 5907 vsi->id = 0; 5908 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 5909 return 0; 5910 break; 5911 5912 case I40E_VSI_VMDQ2: 5913 ctxt.pf_num = hw->pf_id; 5914 ctxt.vf_num = 0; 5915 ctxt.uplink_seid = vsi->uplink_seid; 5916 ctxt.connection_type = 0x1; /* regular data port */ 5917 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 5918 5919 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 5920 5921 /* This VSI is connected to VEB so the switch_id 5922 * should be set to zero by default. 5923 */ 5924 ctxt.info.switch_id = 0; 5925 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB); 5926 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 5927 5928 /* Setup the VSI tx/rx queue map for TC0 only for now */ 5929 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 5930 break; 5931 5932 case I40E_VSI_SRIOV: 5933 ctxt.pf_num = hw->pf_id; 5934 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 5935 ctxt.uplink_seid = vsi->uplink_seid; 5936 ctxt.connection_type = 0x1; /* regular data port */ 5937 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 5938 5939 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 5940 5941 /* This VSI is connected to VEB so the switch_id 5942 * should be set to zero by default. 5943 */ 5944 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 5945 5946 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 5947 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 5948 /* Setup the VSI tx/rx queue map for TC0 only for now */ 5949 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 5950 break; 5951 5952 default: 5953 return -ENODEV; 5954 } 5955 5956 if (vsi->type != I40E_VSI_MAIN) { 5957 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 5958 if (ret) { 5959 dev_info(&vsi->back->pdev->dev, 5960 "add vsi failed, aq_err=%d\n", 5961 vsi->back->hw.aq.asq_last_status); 5962 ret = -ENOENT; 5963 goto err; 5964 } 5965 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 5966 vsi->info.valid_sections = 0; 5967 vsi->seid = ctxt.seid; 5968 vsi->id = ctxt.vsi_number; 5969 } 5970 5971 /* If macvlan filters already exist, force them to get loaded */ 5972 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 5973 f->changed = true; 5974 f_count++; 5975 } 5976 if (f_count) { 5977 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 5978 pf->flags |= I40E_FLAG_FILTER_SYNC; 5979 } 5980 5981 /* Update VSI BW information */ 5982 ret = i40e_vsi_get_bw_info(vsi); 5983 if (ret) { 5984 dev_info(&pf->pdev->dev, 5985 "couldn't get vsi bw info, err %d, aq_err %d\n", 5986 ret, pf->hw.aq.asq_last_status); 5987 /* VSI is already added so not tearing that up */ 5988 ret = 0; 5989 } 5990 5991 err: 5992 return ret; 5993 } 5994 5995 /** 5996 * i40e_vsi_release - Delete a VSI and free its resources 5997 * @vsi: the VSI being removed 5998 * 5999 * Returns 0 on success or < 0 on error 6000 **/ 6001 int i40e_vsi_release(struct i40e_vsi *vsi) 6002 { 6003 struct i40e_mac_filter *f, *ftmp; 6004 struct i40e_veb *veb = NULL; 6005 struct i40e_pf *pf; 6006 u16 uplink_seid; 6007 int i, n; 6008 6009 pf = vsi->back; 6010 6011 /* release of a VEB-owner or last VSI is not allowed */ 6012 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 6013 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 6014 vsi->seid, vsi->uplink_seid); 6015 return -ENODEV; 6016 } 6017 if (vsi == pf->vsi[pf->lan_vsi] && 6018 !test_bit(__I40E_DOWN, &pf->state)) { 6019 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 6020 return -ENODEV; 6021 } 6022 6023 uplink_seid = vsi->uplink_seid; 6024 if (vsi->type != I40E_VSI_SRIOV) { 6025 if (vsi->netdev_registered) { 6026 vsi->netdev_registered = false; 6027 if (vsi->netdev) { 6028 /* results in a call to i40e_close() */ 6029 unregister_netdev(vsi->netdev); 6030 free_netdev(vsi->netdev); 6031 vsi->netdev = NULL; 6032 } 6033 } else { 6034 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 6035 i40e_down(vsi); 6036 i40e_vsi_free_irq(vsi); 6037 i40e_vsi_free_tx_resources(vsi); 6038 i40e_vsi_free_rx_resources(vsi); 6039 } 6040 i40e_vsi_disable_irq(vsi); 6041 } 6042 6043 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) 6044 i40e_del_filter(vsi, f->macaddr, f->vlan, 6045 f->is_vf, f->is_netdev); 6046 i40e_sync_vsi_filters(vsi); 6047 6048 i40e_vsi_delete(vsi); 6049 i40e_vsi_free_q_vectors(vsi); 6050 i40e_vsi_clear_rings(vsi); 6051 i40e_vsi_clear(vsi); 6052 6053 /* If this was the last thing on the VEB, except for the 6054 * controlling VSI, remove the VEB, which puts the controlling 6055 * VSI onto the next level down in the switch. 6056 * 6057 * Well, okay, there's one more exception here: don't remove 6058 * the orphan VEBs yet. We'll wait for an explicit remove request 6059 * from up the network stack. 6060 */ 6061 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) { 6062 if (pf->vsi[i] && 6063 pf->vsi[i]->uplink_seid == uplink_seid && 6064 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 6065 n++; /* count the VSIs */ 6066 } 6067 } 6068 for (i = 0; i < I40E_MAX_VEB; i++) { 6069 if (!pf->veb[i]) 6070 continue; 6071 if (pf->veb[i]->uplink_seid == uplink_seid) 6072 n++; /* count the VEBs */ 6073 if (pf->veb[i]->seid == uplink_seid) 6074 veb = pf->veb[i]; 6075 } 6076 if (n == 0 && veb && veb->uplink_seid != 0) 6077 i40e_veb_release(veb); 6078 6079 return 0; 6080 } 6081 6082 /** 6083 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 6084 * @vsi: ptr to the VSI 6085 * 6086 * This should only be called after i40e_vsi_mem_alloc() which allocates the 6087 * corresponding SW VSI structure and initializes num_queue_pairs for the 6088 * newly allocated VSI. 6089 * 6090 * Returns 0 on success or negative on failure 6091 **/ 6092 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 6093 { 6094 int ret = -ENOENT; 6095 struct i40e_pf *pf = vsi->back; 6096 6097 if (vsi->q_vectors[0]) { 6098 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 6099 vsi->seid); 6100 return -EEXIST; 6101 } 6102 6103 if (vsi->base_vector) { 6104 dev_info(&pf->pdev->dev, 6105 "VSI %d has non-zero base vector %d\n", 6106 vsi->seid, vsi->base_vector); 6107 return -EEXIST; 6108 } 6109 6110 ret = i40e_alloc_q_vectors(vsi); 6111 if (ret) { 6112 dev_info(&pf->pdev->dev, 6113 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 6114 vsi->num_q_vectors, vsi->seid, ret); 6115 vsi->num_q_vectors = 0; 6116 goto vector_setup_out; 6117 } 6118 6119 if (vsi->num_q_vectors) 6120 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 6121 vsi->num_q_vectors, vsi->idx); 6122 if (vsi->base_vector < 0) { 6123 dev_info(&pf->pdev->dev, 6124 "failed to get q tracking for VSI %d, err=%d\n", 6125 vsi->seid, vsi->base_vector); 6126 i40e_vsi_free_q_vectors(vsi); 6127 ret = -ENOENT; 6128 goto vector_setup_out; 6129 } 6130 6131 vector_setup_out: 6132 return ret; 6133 } 6134 6135 /** 6136 * i40e_vsi_setup - Set up a VSI by a given type 6137 * @pf: board private structure 6138 * @type: VSI type 6139 * @uplink_seid: the switch element to link to 6140 * @param1: usage depends upon VSI type. For VF types, indicates VF id 6141 * 6142 * This allocates the sw VSI structure and its queue resources, then add a VSI 6143 * to the identified VEB. 6144 * 6145 * Returns pointer to the successfully allocated and configure VSI sw struct on 6146 * success, otherwise returns NULL on failure. 6147 **/ 6148 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 6149 u16 uplink_seid, u32 param1) 6150 { 6151 struct i40e_vsi *vsi = NULL; 6152 struct i40e_veb *veb = NULL; 6153 int ret, i; 6154 int v_idx; 6155 6156 /* The requested uplink_seid must be either 6157 * - the PF's port seid 6158 * no VEB is needed because this is the PF 6159 * or this is a Flow Director special case VSI 6160 * - seid of an existing VEB 6161 * - seid of a VSI that owns an existing VEB 6162 * - seid of a VSI that doesn't own a VEB 6163 * a new VEB is created and the VSI becomes the owner 6164 * - seid of the PF VSI, which is what creates the first VEB 6165 * this is a special case of the previous 6166 * 6167 * Find which uplink_seid we were given and create a new VEB if needed 6168 */ 6169 for (i = 0; i < I40E_MAX_VEB; i++) { 6170 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 6171 veb = pf->veb[i]; 6172 break; 6173 } 6174 } 6175 6176 if (!veb && uplink_seid != pf->mac_seid) { 6177 6178 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 6179 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 6180 vsi = pf->vsi[i]; 6181 break; 6182 } 6183 } 6184 if (!vsi) { 6185 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 6186 uplink_seid); 6187 return NULL; 6188 } 6189 6190 if (vsi->uplink_seid == pf->mac_seid) 6191 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 6192 vsi->tc_config.enabled_tc); 6193 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 6194 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 6195 vsi->tc_config.enabled_tc); 6196 6197 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 6198 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 6199 veb = pf->veb[i]; 6200 } 6201 if (!veb) { 6202 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 6203 return NULL; 6204 } 6205 6206 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 6207 uplink_seid = veb->seid; 6208 } 6209 6210 /* get vsi sw struct */ 6211 v_idx = i40e_vsi_mem_alloc(pf, type); 6212 if (v_idx < 0) 6213 goto err_alloc; 6214 vsi = pf->vsi[v_idx]; 6215 vsi->type = type; 6216 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 6217 6218 if (type == I40E_VSI_MAIN) 6219 pf->lan_vsi = v_idx; 6220 else if (type == I40E_VSI_SRIOV) 6221 vsi->vf_id = param1; 6222 /* assign it some queues */ 6223 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); 6224 if (ret < 0) { 6225 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", 6226 vsi->seid, ret); 6227 goto err_vsi; 6228 } 6229 vsi->base_queue = ret; 6230 6231 /* get a VSI from the hardware */ 6232 vsi->uplink_seid = uplink_seid; 6233 ret = i40e_add_vsi(vsi); 6234 if (ret) 6235 goto err_vsi; 6236 6237 switch (vsi->type) { 6238 /* setup the netdev if needed */ 6239 case I40E_VSI_MAIN: 6240 case I40E_VSI_VMDQ2: 6241 ret = i40e_config_netdev(vsi); 6242 if (ret) 6243 goto err_netdev; 6244 ret = register_netdev(vsi->netdev); 6245 if (ret) 6246 goto err_netdev; 6247 vsi->netdev_registered = true; 6248 netif_carrier_off(vsi->netdev); 6249 /* fall through */ 6250 6251 case I40E_VSI_FDIR: 6252 /* set up vectors and rings if needed */ 6253 ret = i40e_vsi_setup_vectors(vsi); 6254 if (ret) 6255 goto err_msix; 6256 6257 ret = i40e_alloc_rings(vsi); 6258 if (ret) 6259 goto err_rings; 6260 6261 /* map all of the rings to the q_vectors */ 6262 i40e_vsi_map_rings_to_vectors(vsi); 6263 6264 i40e_vsi_reset_stats(vsi); 6265 break; 6266 6267 default: 6268 /* no netdev or rings for the other VSI types */ 6269 break; 6270 } 6271 6272 return vsi; 6273 6274 err_rings: 6275 i40e_vsi_free_q_vectors(vsi); 6276 err_msix: 6277 if (vsi->netdev_registered) { 6278 vsi->netdev_registered = false; 6279 unregister_netdev(vsi->netdev); 6280 free_netdev(vsi->netdev); 6281 vsi->netdev = NULL; 6282 } 6283 err_netdev: 6284 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 6285 err_vsi: 6286 i40e_vsi_clear(vsi); 6287 err_alloc: 6288 return NULL; 6289 } 6290 6291 /** 6292 * i40e_veb_get_bw_info - Query VEB BW information 6293 * @veb: the veb to query 6294 * 6295 * Query the Tx scheduler BW configuration data for given VEB 6296 **/ 6297 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 6298 { 6299 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 6300 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 6301 struct i40e_pf *pf = veb->pf; 6302 struct i40e_hw *hw = &pf->hw; 6303 u32 tc_bw_max; 6304 int ret = 0; 6305 int i; 6306 6307 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 6308 &bw_data, NULL); 6309 if (ret) { 6310 dev_info(&pf->pdev->dev, 6311 "query veb bw config failed, aq_err=%d\n", 6312 hw->aq.asq_last_status); 6313 goto out; 6314 } 6315 6316 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 6317 &ets_data, NULL); 6318 if (ret) { 6319 dev_info(&pf->pdev->dev, 6320 "query veb bw ets config failed, aq_err=%d\n", 6321 hw->aq.asq_last_status); 6322 goto out; 6323 } 6324 6325 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 6326 veb->bw_max_quanta = ets_data.tc_bw_max; 6327 veb->is_abs_credits = bw_data.absolute_credits_enable; 6328 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 6329 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 6330 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 6331 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 6332 veb->bw_tc_limit_credits[i] = 6333 le16_to_cpu(bw_data.tc_bw_limits[i]); 6334 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 6335 } 6336 6337 out: 6338 return ret; 6339 } 6340 6341 /** 6342 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 6343 * @pf: board private structure 6344 * 6345 * On error: returns error code (negative) 6346 * On success: returns vsi index in PF (positive) 6347 **/ 6348 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 6349 { 6350 int ret = -ENOENT; 6351 struct i40e_veb *veb; 6352 int i; 6353 6354 /* Need to protect the allocation of switch elements at the PF level */ 6355 mutex_lock(&pf->switch_mutex); 6356 6357 /* VEB list may be fragmented if VEB creation/destruction has 6358 * been happening. We can afford to do a quick scan to look 6359 * for any free slots in the list. 6360 * 6361 * find next empty veb slot, looping back around if necessary 6362 */ 6363 i = 0; 6364 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 6365 i++; 6366 if (i >= I40E_MAX_VEB) { 6367 ret = -ENOMEM; 6368 goto err_alloc_veb; /* out of VEB slots! */ 6369 } 6370 6371 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 6372 if (!veb) { 6373 ret = -ENOMEM; 6374 goto err_alloc_veb; 6375 } 6376 veb->pf = pf; 6377 veb->idx = i; 6378 veb->enabled_tc = 1; 6379 6380 pf->veb[i] = veb; 6381 ret = i; 6382 err_alloc_veb: 6383 mutex_unlock(&pf->switch_mutex); 6384 return ret; 6385 } 6386 6387 /** 6388 * i40e_switch_branch_release - Delete a branch of the switch tree 6389 * @branch: where to start deleting 6390 * 6391 * This uses recursion to find the tips of the branch to be 6392 * removed, deleting until we get back to and can delete this VEB. 6393 **/ 6394 static void i40e_switch_branch_release(struct i40e_veb *branch) 6395 { 6396 struct i40e_pf *pf = branch->pf; 6397 u16 branch_seid = branch->seid; 6398 u16 veb_idx = branch->idx; 6399 int i; 6400 6401 /* release any VEBs on this VEB - RECURSION */ 6402 for (i = 0; i < I40E_MAX_VEB; i++) { 6403 if (!pf->veb[i]) 6404 continue; 6405 if (pf->veb[i]->uplink_seid == branch->seid) 6406 i40e_switch_branch_release(pf->veb[i]); 6407 } 6408 6409 /* Release the VSIs on this VEB, but not the owner VSI. 6410 * 6411 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 6412 * the VEB itself, so don't use (*branch) after this loop. 6413 */ 6414 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 6415 if (!pf->vsi[i]) 6416 continue; 6417 if (pf->vsi[i]->uplink_seid == branch_seid && 6418 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 6419 i40e_vsi_release(pf->vsi[i]); 6420 } 6421 } 6422 6423 /* There's one corner case where the VEB might not have been 6424 * removed, so double check it here and remove it if needed. 6425 * This case happens if the veb was created from the debugfs 6426 * commands and no VSIs were added to it. 6427 */ 6428 if (pf->veb[veb_idx]) 6429 i40e_veb_release(pf->veb[veb_idx]); 6430 } 6431 6432 /** 6433 * i40e_veb_clear - remove veb struct 6434 * @veb: the veb to remove 6435 **/ 6436 static void i40e_veb_clear(struct i40e_veb *veb) 6437 { 6438 if (!veb) 6439 return; 6440 6441 if (veb->pf) { 6442 struct i40e_pf *pf = veb->pf; 6443 6444 mutex_lock(&pf->switch_mutex); 6445 if (pf->veb[veb->idx] == veb) 6446 pf->veb[veb->idx] = NULL; 6447 mutex_unlock(&pf->switch_mutex); 6448 } 6449 6450 kfree(veb); 6451 } 6452 6453 /** 6454 * i40e_veb_release - Delete a VEB and free its resources 6455 * @veb: the VEB being removed 6456 **/ 6457 void i40e_veb_release(struct i40e_veb *veb) 6458 { 6459 struct i40e_vsi *vsi = NULL; 6460 struct i40e_pf *pf; 6461 int i, n = 0; 6462 6463 pf = veb->pf; 6464 6465 /* find the remaining VSI and check for extras */ 6466 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 6467 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 6468 n++; 6469 vsi = pf->vsi[i]; 6470 } 6471 } 6472 if (n != 1) { 6473 dev_info(&pf->pdev->dev, 6474 "can't remove VEB %d with %d VSIs left\n", 6475 veb->seid, n); 6476 return; 6477 } 6478 6479 /* move the remaining VSI to uplink veb */ 6480 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 6481 if (veb->uplink_seid) { 6482 vsi->uplink_seid = veb->uplink_seid; 6483 if (veb->uplink_seid == pf->mac_seid) 6484 vsi->veb_idx = I40E_NO_VEB; 6485 else 6486 vsi->veb_idx = veb->veb_idx; 6487 } else { 6488 /* floating VEB */ 6489 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 6490 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 6491 } 6492 6493 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 6494 i40e_veb_clear(veb); 6495 6496 return; 6497 } 6498 6499 /** 6500 * i40e_add_veb - create the VEB in the switch 6501 * @veb: the VEB to be instantiated 6502 * @vsi: the controlling VSI 6503 **/ 6504 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 6505 { 6506 bool is_default = (vsi->idx == vsi->back->lan_vsi); 6507 int ret; 6508 6509 /* get a VEB from the hardware */ 6510 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, 6511 veb->enabled_tc, is_default, &veb->seid, NULL); 6512 if (ret) { 6513 dev_info(&veb->pf->pdev->dev, 6514 "couldn't add VEB, err %d, aq_err %d\n", 6515 ret, veb->pf->hw.aq.asq_last_status); 6516 return -EPERM; 6517 } 6518 6519 /* get statistics counter */ 6520 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, 6521 &veb->stats_idx, NULL, NULL, NULL); 6522 if (ret) { 6523 dev_info(&veb->pf->pdev->dev, 6524 "couldn't get VEB statistics idx, err %d, aq_err %d\n", 6525 ret, veb->pf->hw.aq.asq_last_status); 6526 return -EPERM; 6527 } 6528 ret = i40e_veb_get_bw_info(veb); 6529 if (ret) { 6530 dev_info(&veb->pf->pdev->dev, 6531 "couldn't get VEB bw info, err %d, aq_err %d\n", 6532 ret, veb->pf->hw.aq.asq_last_status); 6533 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); 6534 return -ENOENT; 6535 } 6536 6537 vsi->uplink_seid = veb->seid; 6538 vsi->veb_idx = veb->idx; 6539 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 6540 6541 return 0; 6542 } 6543 6544 /** 6545 * i40e_veb_setup - Set up a VEB 6546 * @pf: board private structure 6547 * @flags: VEB setup flags 6548 * @uplink_seid: the switch element to link to 6549 * @vsi_seid: the initial VSI seid 6550 * @enabled_tc: Enabled TC bit-map 6551 * 6552 * This allocates the sw VEB structure and links it into the switch 6553 * It is possible and legal for this to be a duplicate of an already 6554 * existing VEB. It is also possible for both uplink and vsi seids 6555 * to be zero, in order to create a floating VEB. 6556 * 6557 * Returns pointer to the successfully allocated VEB sw struct on 6558 * success, otherwise returns NULL on failure. 6559 **/ 6560 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 6561 u16 uplink_seid, u16 vsi_seid, 6562 u8 enabled_tc) 6563 { 6564 struct i40e_veb *veb, *uplink_veb = NULL; 6565 int vsi_idx, veb_idx; 6566 int ret; 6567 6568 /* if one seid is 0, the other must be 0 to create a floating relay */ 6569 if ((uplink_seid == 0 || vsi_seid == 0) && 6570 (uplink_seid + vsi_seid != 0)) { 6571 dev_info(&pf->pdev->dev, 6572 "one, not both seid's are 0: uplink=%d vsi=%d\n", 6573 uplink_seid, vsi_seid); 6574 return NULL; 6575 } 6576 6577 /* make sure there is such a vsi and uplink */ 6578 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++) 6579 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 6580 break; 6581 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) { 6582 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 6583 vsi_seid); 6584 return NULL; 6585 } 6586 6587 if (uplink_seid && uplink_seid != pf->mac_seid) { 6588 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 6589 if (pf->veb[veb_idx] && 6590 pf->veb[veb_idx]->seid == uplink_seid) { 6591 uplink_veb = pf->veb[veb_idx]; 6592 break; 6593 } 6594 } 6595 if (!uplink_veb) { 6596 dev_info(&pf->pdev->dev, 6597 "uplink seid %d not found\n", uplink_seid); 6598 return NULL; 6599 } 6600 } 6601 6602 /* get veb sw struct */ 6603 veb_idx = i40e_veb_mem_alloc(pf); 6604 if (veb_idx < 0) 6605 goto err_alloc; 6606 veb = pf->veb[veb_idx]; 6607 veb->flags = flags; 6608 veb->uplink_seid = uplink_seid; 6609 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 6610 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 6611 6612 /* create the VEB in the switch */ 6613 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 6614 if (ret) 6615 goto err_veb; 6616 6617 return veb; 6618 6619 err_veb: 6620 i40e_veb_clear(veb); 6621 err_alloc: 6622 return NULL; 6623 } 6624 6625 /** 6626 * i40e_setup_pf_switch_element - set pf vars based on switch type 6627 * @pf: board private structure 6628 * @ele: element we are building info from 6629 * @num_reported: total number of elements 6630 * @printconfig: should we print the contents 6631 * 6632 * helper function to assist in extracting a few useful SEID values. 6633 **/ 6634 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 6635 struct i40e_aqc_switch_config_element_resp *ele, 6636 u16 num_reported, bool printconfig) 6637 { 6638 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 6639 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 6640 u8 element_type = ele->element_type; 6641 u16 seid = le16_to_cpu(ele->seid); 6642 6643 if (printconfig) 6644 dev_info(&pf->pdev->dev, 6645 "type=%d seid=%d uplink=%d downlink=%d\n", 6646 element_type, seid, uplink_seid, downlink_seid); 6647 6648 switch (element_type) { 6649 case I40E_SWITCH_ELEMENT_TYPE_MAC: 6650 pf->mac_seid = seid; 6651 break; 6652 case I40E_SWITCH_ELEMENT_TYPE_VEB: 6653 /* Main VEB? */ 6654 if (uplink_seid != pf->mac_seid) 6655 break; 6656 if (pf->lan_veb == I40E_NO_VEB) { 6657 int v; 6658 6659 /* find existing or else empty VEB */ 6660 for (v = 0; v < I40E_MAX_VEB; v++) { 6661 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 6662 pf->lan_veb = v; 6663 break; 6664 } 6665 } 6666 if (pf->lan_veb == I40E_NO_VEB) { 6667 v = i40e_veb_mem_alloc(pf); 6668 if (v < 0) 6669 break; 6670 pf->lan_veb = v; 6671 } 6672 } 6673 6674 pf->veb[pf->lan_veb]->seid = seid; 6675 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 6676 pf->veb[pf->lan_veb]->pf = pf; 6677 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 6678 break; 6679 case I40E_SWITCH_ELEMENT_TYPE_VSI: 6680 if (num_reported != 1) 6681 break; 6682 /* This is immediately after a reset so we can assume this is 6683 * the PF's VSI 6684 */ 6685 pf->mac_seid = uplink_seid; 6686 pf->pf_seid = downlink_seid; 6687 pf->main_vsi_seid = seid; 6688 if (printconfig) 6689 dev_info(&pf->pdev->dev, 6690 "pf_seid=%d main_vsi_seid=%d\n", 6691 pf->pf_seid, pf->main_vsi_seid); 6692 break; 6693 case I40E_SWITCH_ELEMENT_TYPE_PF: 6694 case I40E_SWITCH_ELEMENT_TYPE_VF: 6695 case I40E_SWITCH_ELEMENT_TYPE_EMP: 6696 case I40E_SWITCH_ELEMENT_TYPE_BMC: 6697 case I40E_SWITCH_ELEMENT_TYPE_PE: 6698 case I40E_SWITCH_ELEMENT_TYPE_PA: 6699 /* ignore these for now */ 6700 break; 6701 default: 6702 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 6703 element_type, seid); 6704 break; 6705 } 6706 } 6707 6708 /** 6709 * i40e_fetch_switch_configuration - Get switch config from firmware 6710 * @pf: board private structure 6711 * @printconfig: should we print the contents 6712 * 6713 * Get the current switch configuration from the device and 6714 * extract a few useful SEID values. 6715 **/ 6716 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 6717 { 6718 struct i40e_aqc_get_switch_config_resp *sw_config; 6719 u16 next_seid = 0; 6720 int ret = 0; 6721 u8 *aq_buf; 6722 int i; 6723 6724 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 6725 if (!aq_buf) 6726 return -ENOMEM; 6727 6728 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 6729 do { 6730 u16 num_reported, num_total; 6731 6732 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 6733 I40E_AQ_LARGE_BUF, 6734 &next_seid, NULL); 6735 if (ret) { 6736 dev_info(&pf->pdev->dev, 6737 "get switch config failed %d aq_err=%x\n", 6738 ret, pf->hw.aq.asq_last_status); 6739 kfree(aq_buf); 6740 return -ENOENT; 6741 } 6742 6743 num_reported = le16_to_cpu(sw_config->header.num_reported); 6744 num_total = le16_to_cpu(sw_config->header.num_total); 6745 6746 if (printconfig) 6747 dev_info(&pf->pdev->dev, 6748 "header: %d reported %d total\n", 6749 num_reported, num_total); 6750 6751 if (num_reported) { 6752 int sz = sizeof(*sw_config) * num_reported; 6753 6754 kfree(pf->sw_config); 6755 pf->sw_config = kzalloc(sz, GFP_KERNEL); 6756 if (pf->sw_config) 6757 memcpy(pf->sw_config, sw_config, sz); 6758 } 6759 6760 for (i = 0; i < num_reported; i++) { 6761 struct i40e_aqc_switch_config_element_resp *ele = 6762 &sw_config->element[i]; 6763 6764 i40e_setup_pf_switch_element(pf, ele, num_reported, 6765 printconfig); 6766 } 6767 } while (next_seid != 0); 6768 6769 kfree(aq_buf); 6770 return ret; 6771 } 6772 6773 /** 6774 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 6775 * @pf: board private structure 6776 * 6777 * Returns 0 on success, negative value on failure 6778 **/ 6779 static int i40e_setup_pf_switch(struct i40e_pf *pf) 6780 { 6781 int ret; 6782 6783 /* find out what's out there already */ 6784 ret = i40e_fetch_switch_configuration(pf, false); 6785 if (ret) { 6786 dev_info(&pf->pdev->dev, 6787 "couldn't fetch switch config, err %d, aq_err %d\n", 6788 ret, pf->hw.aq.asq_last_status); 6789 return ret; 6790 } 6791 i40e_pf_reset_stats(pf); 6792 6793 /* fdir VSI must happen first to be sure it gets queue 0, but only 6794 * if there is enough room for the fdir VSI 6795 */ 6796 if (pf->num_lan_qps > 1) 6797 i40e_fdir_setup(pf); 6798 6799 /* first time setup */ 6800 if (pf->lan_vsi == I40E_NO_VSI) { 6801 struct i40e_vsi *vsi = NULL; 6802 u16 uplink_seid; 6803 6804 /* Set up the PF VSI associated with the PF's main VSI 6805 * that is already in the HW switch 6806 */ 6807 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 6808 uplink_seid = pf->veb[pf->lan_veb]->seid; 6809 else 6810 uplink_seid = pf->mac_seid; 6811 6812 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 6813 if (!vsi) { 6814 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 6815 i40e_fdir_teardown(pf); 6816 return -EAGAIN; 6817 } 6818 /* accommodate kcompat by copying the main VSI queue count 6819 * into the pf, since this newer code pushes the pf queue 6820 * info down a level into a VSI 6821 */ 6822 pf->num_rx_queues = vsi->alloc_queue_pairs; 6823 pf->num_tx_queues = vsi->alloc_queue_pairs; 6824 } else { 6825 /* force a reset of TC and queue layout configurations */ 6826 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 6827 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 6828 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 6829 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 6830 } 6831 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 6832 6833 /* Setup static PF queue filter control settings */ 6834 ret = i40e_setup_pf_filter_control(pf); 6835 if (ret) { 6836 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 6837 ret); 6838 /* Failure here should not stop continuing other steps */ 6839 } 6840 6841 /* enable RSS in the HW, even for only one queue, as the stack can use 6842 * the hash 6843 */ 6844 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 6845 i40e_config_rss(pf); 6846 6847 /* fill in link information and enable LSE reporting */ 6848 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); 6849 i40e_link_event(pf); 6850 6851 /* Initialize user-specifics link properties */ 6852 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 6853 I40E_AQ_AN_COMPLETED) ? true : false); 6854 pf->hw.fc.requested_mode = I40E_FC_DEFAULT; 6855 if (pf->hw.phy.link_info.an_info & 6856 (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX)) 6857 pf->hw.fc.current_mode = I40E_FC_FULL; 6858 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) 6859 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE; 6860 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) 6861 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE; 6862 else 6863 pf->hw.fc.current_mode = I40E_FC_DEFAULT; 6864 6865 return ret; 6866 } 6867 6868 /** 6869 * i40e_set_rss_size - helper to set rss_size 6870 * @pf: board private structure 6871 * @queues_left: how many queues 6872 */ 6873 static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left) 6874 { 6875 int num_tc0; 6876 6877 num_tc0 = min_t(int, queues_left, pf->rss_size_max); 6878 num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id())); 6879 num_tc0 = rounddown_pow_of_two(num_tc0); 6880 6881 return num_tc0; 6882 } 6883 6884 /** 6885 * i40e_determine_queue_usage - Work out queue distribution 6886 * @pf: board private structure 6887 **/ 6888 static void i40e_determine_queue_usage(struct i40e_pf *pf) 6889 { 6890 int accum_tc_size; 6891 int queues_left; 6892 6893 pf->num_lan_qps = 0; 6894 pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps); 6895 accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps; 6896 6897 /* Find the max queues to be put into basic use. We'll always be 6898 * using TC0, whether or not DCB is running, and TC0 will get the 6899 * big RSS set. 6900 */ 6901 queues_left = pf->hw.func_caps.num_tx_qp; 6902 6903 if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) && 6904 (pf->flags & I40E_FLAG_MQ_ENABLED)) || 6905 !(pf->flags & (I40E_FLAG_RSS_ENABLED | 6906 I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) || 6907 (queues_left == 1)) { 6908 6909 /* one qp for PF, no queues for anything else */ 6910 queues_left = 0; 6911 pf->rss_size = pf->num_lan_qps = 1; 6912 6913 /* make sure all the fancies are disabled */ 6914 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 6915 I40E_FLAG_MQ_ENABLED | 6916 I40E_FLAG_FDIR_ENABLED | 6917 I40E_FLAG_FDIR_ATR_ENABLED | 6918 I40E_FLAG_DCB_ENABLED | 6919 I40E_FLAG_SRIOV_ENABLED | 6920 I40E_FLAG_VMDQ_ENABLED); 6921 6922 } else if (pf->flags & I40E_FLAG_RSS_ENABLED && 6923 !(pf->flags & I40E_FLAG_FDIR_ENABLED) && 6924 !(pf->flags & I40E_FLAG_DCB_ENABLED)) { 6925 6926 pf->rss_size = i40e_set_rss_size(pf, queues_left); 6927 6928 queues_left -= pf->rss_size; 6929 pf->num_lan_qps = pf->rss_size; 6930 6931 } else if (pf->flags & I40E_FLAG_RSS_ENABLED && 6932 !(pf->flags & I40E_FLAG_FDIR_ENABLED) && 6933 (pf->flags & I40E_FLAG_DCB_ENABLED)) { 6934 6935 /* save num_tc_qps queues for TCs 1 thru 7 and the rest 6936 * are set up for RSS in TC0 6937 */ 6938 queues_left -= accum_tc_size; 6939 6940 pf->rss_size = i40e_set_rss_size(pf, queues_left); 6941 6942 queues_left -= pf->rss_size; 6943 if (queues_left < 0) { 6944 dev_info(&pf->pdev->dev, "not enough queues for DCB\n"); 6945 return; 6946 } 6947 6948 pf->num_lan_qps = pf->rss_size + accum_tc_size; 6949 6950 } else if (pf->flags & I40E_FLAG_RSS_ENABLED && 6951 (pf->flags & I40E_FLAG_FDIR_ENABLED) && 6952 !(pf->flags & I40E_FLAG_DCB_ENABLED)) { 6953 6954 queues_left -= 1; /* save 1 queue for FD */ 6955 6956 pf->rss_size = i40e_set_rss_size(pf, queues_left); 6957 6958 queues_left -= pf->rss_size; 6959 if (queues_left < 0) { 6960 dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n"); 6961 return; 6962 } 6963 6964 pf->num_lan_qps = pf->rss_size; 6965 6966 } else if (pf->flags & I40E_FLAG_RSS_ENABLED && 6967 (pf->flags & I40E_FLAG_FDIR_ENABLED) && 6968 (pf->flags & I40E_FLAG_DCB_ENABLED)) { 6969 6970 /* save 1 queue for TCs 1 thru 7, 6971 * 1 queue for flow director, 6972 * and the rest are set up for RSS in TC0 6973 */ 6974 queues_left -= 1; 6975 queues_left -= accum_tc_size; 6976 6977 pf->rss_size = i40e_set_rss_size(pf, queues_left); 6978 queues_left -= pf->rss_size; 6979 if (queues_left < 0) { 6980 dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n"); 6981 return; 6982 } 6983 6984 pf->num_lan_qps = pf->rss_size + accum_tc_size; 6985 6986 } else { 6987 dev_info(&pf->pdev->dev, 6988 "Invalid configuration, flags=0x%08llx\n", pf->flags); 6989 return; 6990 } 6991 6992 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 6993 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 6994 pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left / 6995 pf->num_vf_qps)); 6996 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 6997 } 6998 6999 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 7000 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 7001 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 7002 (queues_left / pf->num_vmdq_qps)); 7003 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 7004 } 7005 7006 return; 7007 } 7008 7009 /** 7010 * i40e_setup_pf_filter_control - Setup PF static filter control 7011 * @pf: PF to be setup 7012 * 7013 * i40e_setup_pf_filter_control sets up a pf's initial filter control 7014 * settings. If PE/FCoE are enabled then it will also set the per PF 7015 * based filter sizes required for them. It also enables Flow director, 7016 * ethertype and macvlan type filter settings for the pf. 7017 * 7018 * Returns 0 on success, negative on failure 7019 **/ 7020 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 7021 { 7022 struct i40e_filter_control_settings *settings = &pf->filter_settings; 7023 7024 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 7025 7026 /* Flow Director is enabled */ 7027 if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED)) 7028 settings->enable_fdir = true; 7029 7030 /* Ethtype and MACVLAN filters enabled for PF */ 7031 settings->enable_ethtype = true; 7032 settings->enable_macvlan = true; 7033 7034 if (i40e_set_filter_control(&pf->hw, settings)) 7035 return -ENOENT; 7036 7037 return 0; 7038 } 7039 7040 /** 7041 * i40e_probe - Device initialization routine 7042 * @pdev: PCI device information struct 7043 * @ent: entry in i40e_pci_tbl 7044 * 7045 * i40e_probe initializes a pf identified by a pci_dev structure. 7046 * The OS initialization, configuring of the pf private structure, 7047 * and a hardware reset occur. 7048 * 7049 * Returns 0 on success, negative on failure 7050 **/ 7051 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 7052 { 7053 struct i40e_driver_version dv; 7054 struct i40e_pf *pf; 7055 struct i40e_hw *hw; 7056 int err = 0; 7057 u32 len; 7058 7059 err = pci_enable_device_mem(pdev); 7060 if (err) 7061 return err; 7062 7063 /* set up for high or low dma */ 7064 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 7065 /* coherent mask for the same size will always succeed if 7066 * dma_set_mask does 7067 */ 7068 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 7069 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 7070 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 7071 } else { 7072 dev_err(&pdev->dev, "DMA configuration failed: %d\n", err); 7073 err = -EIO; 7074 goto err_dma; 7075 } 7076 7077 /* set up pci connections */ 7078 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 7079 IORESOURCE_MEM), i40e_driver_name); 7080 if (err) { 7081 dev_info(&pdev->dev, 7082 "pci_request_selected_regions failed %d\n", err); 7083 goto err_pci_reg; 7084 } 7085 7086 pci_enable_pcie_error_reporting(pdev); 7087 pci_set_master(pdev); 7088 7089 /* Now that we have a PCI connection, we need to do the 7090 * low level device setup. This is primarily setting up 7091 * the Admin Queue structures and then querying for the 7092 * device's current profile information. 7093 */ 7094 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 7095 if (!pf) { 7096 err = -ENOMEM; 7097 goto err_pf_alloc; 7098 } 7099 pf->next_vsi = 0; 7100 pf->pdev = pdev; 7101 set_bit(__I40E_DOWN, &pf->state); 7102 7103 hw = &pf->hw; 7104 hw->back = pf; 7105 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 7106 pci_resource_len(pdev, 0)); 7107 if (!hw->hw_addr) { 7108 err = -EIO; 7109 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 7110 (unsigned int)pci_resource_start(pdev, 0), 7111 (unsigned int)pci_resource_len(pdev, 0), err); 7112 goto err_ioremap; 7113 } 7114 hw->vendor_id = pdev->vendor; 7115 hw->device_id = pdev->device; 7116 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 7117 hw->subsystem_vendor_id = pdev->subsystem_vendor; 7118 hw->subsystem_device_id = pdev->subsystem_device; 7119 hw->bus.device = PCI_SLOT(pdev->devfn); 7120 hw->bus.func = PCI_FUNC(pdev->devfn); 7121 7122 /* Reset here to make sure all is clean and to define PF 'n' */ 7123 err = i40e_pf_reset(hw); 7124 if (err) { 7125 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 7126 goto err_pf_reset; 7127 } 7128 pf->pfr_count++; 7129 7130 hw->aq.num_arq_entries = I40E_AQ_LEN; 7131 hw->aq.num_asq_entries = I40E_AQ_LEN; 7132 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 7133 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 7134 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 7135 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1, 7136 "%s-pf%d:misc", 7137 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id); 7138 7139 err = i40e_init_shared_code(hw); 7140 if (err) { 7141 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); 7142 goto err_pf_reset; 7143 } 7144 7145 err = i40e_init_adminq(hw); 7146 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); 7147 if (err) { 7148 dev_info(&pdev->dev, 7149 "init_adminq failed: %d expecting API %02x.%02x\n", 7150 err, 7151 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR); 7152 goto err_pf_reset; 7153 } 7154 7155 err = i40e_get_capabilities(pf); 7156 if (err) 7157 goto err_adminq_setup; 7158 7159 err = i40e_sw_init(pf); 7160 if (err) { 7161 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 7162 goto err_sw_init; 7163 } 7164 7165 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 7166 hw->func_caps.num_rx_qp, 7167 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 7168 if (err) { 7169 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 7170 goto err_init_lan_hmc; 7171 } 7172 7173 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 7174 if (err) { 7175 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 7176 err = -ENOENT; 7177 goto err_configure_lan_hmc; 7178 } 7179 7180 i40e_get_mac_addr(hw, hw->mac.addr); 7181 if (i40e_validate_mac_addr(hw->mac.addr)) { 7182 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 7183 err = -EIO; 7184 goto err_mac_addr; 7185 } 7186 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 7187 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN); 7188 7189 pci_set_drvdata(pdev, pf); 7190 pci_save_state(pdev); 7191 7192 /* set up periodic task facility */ 7193 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); 7194 pf->service_timer_period = HZ; 7195 7196 INIT_WORK(&pf->service_task, i40e_service_task); 7197 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 7198 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; 7199 pf->link_check_timeout = jiffies; 7200 7201 /* set up the main switch operations */ 7202 i40e_determine_queue_usage(pf); 7203 i40e_init_interrupt_scheme(pf); 7204 7205 /* Set up the *vsi struct based on the number of VSIs in the HW, 7206 * and set up our local tracking of the MAIN PF vsi. 7207 */ 7208 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; 7209 pf->vsi = kzalloc(len, GFP_KERNEL); 7210 if (!pf->vsi) { 7211 err = -ENOMEM; 7212 goto err_switch_setup; 7213 } 7214 7215 err = i40e_setup_pf_switch(pf); 7216 if (err) { 7217 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 7218 goto err_vsis; 7219 } 7220 7221 /* The main driver is (mostly) up and happy. We need to set this state 7222 * before setting up the misc vector or we get a race and the vector 7223 * ends up disabled forever. 7224 */ 7225 clear_bit(__I40E_DOWN, &pf->state); 7226 7227 /* In case of MSIX we are going to setup the misc vector right here 7228 * to handle admin queue events etc. In case of legacy and MSI 7229 * the misc functionality and queue processing is combined in 7230 * the same vector and that gets setup at open. 7231 */ 7232 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 7233 err = i40e_setup_misc_vector(pf); 7234 if (err) { 7235 dev_info(&pdev->dev, 7236 "setup of misc vector failed: %d\n", err); 7237 goto err_vsis; 7238 } 7239 } 7240 7241 /* prep for VF support */ 7242 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 7243 (pf->flags & I40E_FLAG_MSIX_ENABLED)) { 7244 u32 val; 7245 7246 /* disable link interrupts for VFs */ 7247 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 7248 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 7249 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 7250 i40e_flush(hw); 7251 } 7252 7253 i40e_dbg_pf_init(pf); 7254 7255 /* tell the firmware that we're starting */ 7256 dv.major_version = DRV_VERSION_MAJOR; 7257 dv.minor_version = DRV_VERSION_MINOR; 7258 dv.build_version = DRV_VERSION_BUILD; 7259 dv.subbuild_version = 0; 7260 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 7261 7262 /* since everything's happy, start the service_task timer */ 7263 mod_timer(&pf->service_timer, 7264 round_jiffies(jiffies + pf->service_timer_period)); 7265 7266 return 0; 7267 7268 /* Unwind what we've done if something failed in the setup */ 7269 err_vsis: 7270 set_bit(__I40E_DOWN, &pf->state); 7271 err_switch_setup: 7272 i40e_clear_interrupt_scheme(pf); 7273 kfree(pf->vsi); 7274 del_timer_sync(&pf->service_timer); 7275 err_mac_addr: 7276 err_configure_lan_hmc: 7277 (void)i40e_shutdown_lan_hmc(hw); 7278 err_init_lan_hmc: 7279 kfree(pf->qp_pile); 7280 kfree(pf->irq_pile); 7281 err_sw_init: 7282 err_adminq_setup: 7283 (void)i40e_shutdown_adminq(hw); 7284 err_pf_reset: 7285 iounmap(hw->hw_addr); 7286 err_ioremap: 7287 kfree(pf); 7288 err_pf_alloc: 7289 pci_disable_pcie_error_reporting(pdev); 7290 pci_release_selected_regions(pdev, 7291 pci_select_bars(pdev, IORESOURCE_MEM)); 7292 err_pci_reg: 7293 err_dma: 7294 pci_disable_device(pdev); 7295 return err; 7296 } 7297 7298 /** 7299 * i40e_remove - Device removal routine 7300 * @pdev: PCI device information struct 7301 * 7302 * i40e_remove is called by the PCI subsystem to alert the driver 7303 * that is should release a PCI device. This could be caused by a 7304 * Hot-Plug event, or because the driver is going to be removed from 7305 * memory. 7306 **/ 7307 static void i40e_remove(struct pci_dev *pdev) 7308 { 7309 struct i40e_pf *pf = pci_get_drvdata(pdev); 7310 i40e_status ret_code; 7311 u32 reg; 7312 int i; 7313 7314 i40e_dbg_pf_exit(pf); 7315 7316 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 7317 i40e_free_vfs(pf); 7318 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 7319 } 7320 7321 /* no more scheduling of any task */ 7322 set_bit(__I40E_DOWN, &pf->state); 7323 del_timer_sync(&pf->service_timer); 7324 cancel_work_sync(&pf->service_task); 7325 7326 i40e_fdir_teardown(pf); 7327 7328 /* If there is a switch structure or any orphans, remove them. 7329 * This will leave only the PF's VSI remaining. 7330 */ 7331 for (i = 0; i < I40E_MAX_VEB; i++) { 7332 if (!pf->veb[i]) 7333 continue; 7334 7335 if (pf->veb[i]->uplink_seid == pf->mac_seid || 7336 pf->veb[i]->uplink_seid == 0) 7337 i40e_switch_branch_release(pf->veb[i]); 7338 } 7339 7340 /* Now we can shutdown the PF's VSI, just before we kill 7341 * adminq and hmc. 7342 */ 7343 if (pf->vsi[pf->lan_vsi]) 7344 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 7345 7346 i40e_stop_misc_vector(pf); 7347 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 7348 synchronize_irq(pf->msix_entries[0].vector); 7349 free_irq(pf->msix_entries[0].vector, pf); 7350 } 7351 7352 /* shutdown and destroy the HMC */ 7353 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 7354 if (ret_code) 7355 dev_warn(&pdev->dev, 7356 "Failed to destroy the HMC resources: %d\n", ret_code); 7357 7358 /* shutdown the adminq */ 7359 i40e_aq_queue_shutdown(&pf->hw, true); 7360 ret_code = i40e_shutdown_adminq(&pf->hw); 7361 if (ret_code) 7362 dev_warn(&pdev->dev, 7363 "Failed to destroy the Admin Queue resources: %d\n", 7364 ret_code); 7365 7366 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 7367 i40e_clear_interrupt_scheme(pf); 7368 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7369 if (pf->vsi[i]) { 7370 i40e_vsi_clear_rings(pf->vsi[i]); 7371 i40e_vsi_clear(pf->vsi[i]); 7372 pf->vsi[i] = NULL; 7373 } 7374 } 7375 7376 for (i = 0; i < I40E_MAX_VEB; i++) { 7377 kfree(pf->veb[i]); 7378 pf->veb[i] = NULL; 7379 } 7380 7381 kfree(pf->qp_pile); 7382 kfree(pf->irq_pile); 7383 kfree(pf->sw_config); 7384 kfree(pf->vsi); 7385 7386 /* force a PF reset to clean anything leftover */ 7387 reg = rd32(&pf->hw, I40E_PFGEN_CTRL); 7388 wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 7389 i40e_flush(&pf->hw); 7390 7391 iounmap(pf->hw.hw_addr); 7392 kfree(pf); 7393 pci_release_selected_regions(pdev, 7394 pci_select_bars(pdev, IORESOURCE_MEM)); 7395 7396 pci_disable_pcie_error_reporting(pdev); 7397 pci_disable_device(pdev); 7398 } 7399 7400 /** 7401 * i40e_pci_error_detected - warning that something funky happened in PCI land 7402 * @pdev: PCI device information struct 7403 * 7404 * Called to warn that something happened and the error handling steps 7405 * are in progress. Allows the driver to quiesce things, be ready for 7406 * remediation. 7407 **/ 7408 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 7409 enum pci_channel_state error) 7410 { 7411 struct i40e_pf *pf = pci_get_drvdata(pdev); 7412 7413 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 7414 7415 /* shutdown all operations */ 7416 i40e_pf_quiesce_all_vsi(pf); 7417 7418 /* Request a slot reset */ 7419 return PCI_ERS_RESULT_NEED_RESET; 7420 } 7421 7422 /** 7423 * i40e_pci_error_slot_reset - a PCI slot reset just happened 7424 * @pdev: PCI device information struct 7425 * 7426 * Called to find if the driver can work with the device now that 7427 * the pci slot has been reset. If a basic connection seems good 7428 * (registers are readable and have sane content) then return a 7429 * happy little PCI_ERS_RESULT_xxx. 7430 **/ 7431 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 7432 { 7433 struct i40e_pf *pf = pci_get_drvdata(pdev); 7434 pci_ers_result_t result; 7435 int err; 7436 u32 reg; 7437 7438 dev_info(&pdev->dev, "%s\n", __func__); 7439 if (pci_enable_device_mem(pdev)) { 7440 dev_info(&pdev->dev, 7441 "Cannot re-enable PCI device after reset.\n"); 7442 result = PCI_ERS_RESULT_DISCONNECT; 7443 } else { 7444 pci_set_master(pdev); 7445 pci_restore_state(pdev); 7446 pci_save_state(pdev); 7447 pci_wake_from_d3(pdev, false); 7448 7449 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 7450 if (reg == 0) 7451 result = PCI_ERS_RESULT_RECOVERED; 7452 else 7453 result = PCI_ERS_RESULT_DISCONNECT; 7454 } 7455 7456 err = pci_cleanup_aer_uncorrect_error_status(pdev); 7457 if (err) { 7458 dev_info(&pdev->dev, 7459 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 7460 err); 7461 /* non-fatal, continue */ 7462 } 7463 7464 return result; 7465 } 7466 7467 /** 7468 * i40e_pci_error_resume - restart operations after PCI error recovery 7469 * @pdev: PCI device information struct 7470 * 7471 * Called to allow the driver to bring things back up after PCI error 7472 * and/or reset recovery has finished. 7473 **/ 7474 static void i40e_pci_error_resume(struct pci_dev *pdev) 7475 { 7476 struct i40e_pf *pf = pci_get_drvdata(pdev); 7477 7478 dev_info(&pdev->dev, "%s\n", __func__); 7479 i40e_handle_reset_warning(pf); 7480 } 7481 7482 static const struct pci_error_handlers i40e_err_handler = { 7483 .error_detected = i40e_pci_error_detected, 7484 .slot_reset = i40e_pci_error_slot_reset, 7485 .resume = i40e_pci_error_resume, 7486 }; 7487 7488 static struct pci_driver i40e_driver = { 7489 .name = i40e_driver_name, 7490 .id_table = i40e_pci_tbl, 7491 .probe = i40e_probe, 7492 .remove = i40e_remove, 7493 .err_handler = &i40e_err_handler, 7494 .sriov_configure = i40e_pci_sriov_configure, 7495 }; 7496 7497 /** 7498 * i40e_init_module - Driver registration routine 7499 * 7500 * i40e_init_module is the first routine called when the driver is 7501 * loaded. All it does is register with the PCI subsystem. 7502 **/ 7503 static int __init i40e_init_module(void) 7504 { 7505 pr_info("%s: %s - version %s\n", i40e_driver_name, 7506 i40e_driver_string, i40e_driver_version_str); 7507 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 7508 i40e_dbg_init(); 7509 return pci_register_driver(&i40e_driver); 7510 } 7511 module_init(i40e_init_module); 7512 7513 /** 7514 * i40e_exit_module - Driver exit cleanup routine 7515 * 7516 * i40e_exit_module is called just before the driver is removed 7517 * from memory. 7518 **/ 7519 static void __exit i40e_exit_module(void) 7520 { 7521 pci_unregister_driver(&i40e_driver); 7522 i40e_dbg_exit(); 7523 } 7524 module_exit(i40e_exit_module); 7525