1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 /* Local includes */ 28 #include "i40e.h" 29 #include "i40e_diag.h" 30 #ifdef CONFIG_I40E_VXLAN 31 #include <net/vxlan.h> 32 #endif 33 34 const char i40e_driver_name[] = "i40e"; 35 static const char i40e_driver_string[] = 36 "Intel(R) Ethernet Connection XL710 Network Driver"; 37 38 #define DRV_KERN "-k" 39 40 #define DRV_VERSION_MAJOR 0 41 #define DRV_VERSION_MINOR 4 42 #define DRV_VERSION_BUILD 21 43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \ 45 __stringify(DRV_VERSION_BUILD) DRV_KERN 46 const char i40e_driver_version_str[] = DRV_VERSION; 47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; 48 49 /* a bit of forward declarations */ 50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 51 static void i40e_handle_reset_warning(struct i40e_pf *pf); 52 static int i40e_add_vsi(struct i40e_vsi *vsi); 53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 55 static int i40e_setup_misc_vector(struct i40e_pf *pf); 56 static void i40e_determine_queue_usage(struct i40e_pf *pf); 57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 58 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 59 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 60 61 /* i40e_pci_tbl - PCI Device ID Table 62 * 63 * Last entry must be all 0s 64 * 65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 66 * Class, Class Mask, private data (not used) } 67 */ 68 static const struct pci_device_id i40e_pci_tbl[] = { 69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, 72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 77 /* required last entry */ 78 {0, } 79 }; 80 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 81 82 #define I40E_MAX_VF_COUNT 128 83 static int debug = -1; 84 module_param(debug, int, 0); 85 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 86 87 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 88 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 89 MODULE_LICENSE("GPL"); 90 MODULE_VERSION(DRV_VERSION); 91 92 /** 93 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 94 * @hw: pointer to the HW structure 95 * @mem: ptr to mem struct to fill out 96 * @size: size of memory requested 97 * @alignment: what to align the allocation to 98 **/ 99 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 100 u64 size, u32 alignment) 101 { 102 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 103 104 mem->size = ALIGN(size, alignment); 105 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 106 &mem->pa, GFP_KERNEL); 107 if (!mem->va) 108 return -ENOMEM; 109 110 return 0; 111 } 112 113 /** 114 * i40e_free_dma_mem_d - OS specific memory free for shared code 115 * @hw: pointer to the HW structure 116 * @mem: ptr to mem struct to free 117 **/ 118 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 119 { 120 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 121 122 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 123 mem->va = NULL; 124 mem->pa = 0; 125 mem->size = 0; 126 127 return 0; 128 } 129 130 /** 131 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 132 * @hw: pointer to the HW structure 133 * @mem: ptr to mem struct to fill out 134 * @size: size of memory requested 135 **/ 136 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 137 u32 size) 138 { 139 mem->size = size; 140 mem->va = kzalloc(size, GFP_KERNEL); 141 142 if (!mem->va) 143 return -ENOMEM; 144 145 return 0; 146 } 147 148 /** 149 * i40e_free_virt_mem_d - OS specific memory free for shared code 150 * @hw: pointer to the HW structure 151 * @mem: ptr to mem struct to free 152 **/ 153 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 154 { 155 /* it's ok to kfree a NULL pointer */ 156 kfree(mem->va); 157 mem->va = NULL; 158 mem->size = 0; 159 160 return 0; 161 } 162 163 /** 164 * i40e_get_lump - find a lump of free generic resource 165 * @pf: board private structure 166 * @pile: the pile of resource to search 167 * @needed: the number of items needed 168 * @id: an owner id to stick on the items assigned 169 * 170 * Returns the base item index of the lump, or negative for error 171 * 172 * The search_hint trick and lack of advanced fit-finding only work 173 * because we're highly likely to have all the same size lump requests. 174 * Linear search time and any fragmentation should be minimal. 175 **/ 176 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 177 u16 needed, u16 id) 178 { 179 int ret = -ENOMEM; 180 int i, j; 181 182 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 183 dev_info(&pf->pdev->dev, 184 "param err: pile=%p needed=%d id=0x%04x\n", 185 pile, needed, id); 186 return -EINVAL; 187 } 188 189 /* start the linear search with an imperfect hint */ 190 i = pile->search_hint; 191 while (i < pile->num_entries) { 192 /* skip already allocated entries */ 193 if (pile->list[i] & I40E_PILE_VALID_BIT) { 194 i++; 195 continue; 196 } 197 198 /* do we have enough in this lump? */ 199 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 200 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 201 break; 202 } 203 204 if (j == needed) { 205 /* there was enough, so assign it to the requestor */ 206 for (j = 0; j < needed; j++) 207 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 208 ret = i; 209 pile->search_hint = i + j; 210 break; 211 } else { 212 /* not enough, so skip over it and continue looking */ 213 i += j; 214 } 215 } 216 217 return ret; 218 } 219 220 /** 221 * i40e_put_lump - return a lump of generic resource 222 * @pile: the pile of resource to search 223 * @index: the base item index 224 * @id: the owner id of the items assigned 225 * 226 * Returns the count of items in the lump 227 **/ 228 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 229 { 230 int valid_id = (id | I40E_PILE_VALID_BIT); 231 int count = 0; 232 int i; 233 234 if (!pile || index >= pile->num_entries) 235 return -EINVAL; 236 237 for (i = index; 238 i < pile->num_entries && pile->list[i] == valid_id; 239 i++) { 240 pile->list[i] = 0; 241 count++; 242 } 243 244 if (count && index < pile->search_hint) 245 pile->search_hint = index; 246 247 return count; 248 } 249 250 /** 251 * i40e_service_event_schedule - Schedule the service task to wake up 252 * @pf: board private structure 253 * 254 * If not already scheduled, this puts the task into the work queue 255 **/ 256 static void i40e_service_event_schedule(struct i40e_pf *pf) 257 { 258 if (!test_bit(__I40E_DOWN, &pf->state) && 259 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && 260 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) 261 schedule_work(&pf->service_task); 262 } 263 264 /** 265 * i40e_tx_timeout - Respond to a Tx Hang 266 * @netdev: network interface device structure 267 * 268 * If any port has noticed a Tx timeout, it is likely that the whole 269 * device is munged, not just the one netdev port, so go for the full 270 * reset. 271 **/ 272 #ifdef I40E_FCOE 273 void i40e_tx_timeout(struct net_device *netdev) 274 #else 275 static void i40e_tx_timeout(struct net_device *netdev) 276 #endif 277 { 278 struct i40e_netdev_priv *np = netdev_priv(netdev); 279 struct i40e_vsi *vsi = np->vsi; 280 struct i40e_pf *pf = vsi->back; 281 282 pf->tx_timeout_count++; 283 284 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 285 pf->tx_timeout_recovery_level = 1; 286 pf->tx_timeout_last_recovery = jiffies; 287 netdev_info(netdev, "tx_timeout recovery level %d\n", 288 pf->tx_timeout_recovery_level); 289 290 switch (pf->tx_timeout_recovery_level) { 291 case 0: 292 /* disable and re-enable queues for the VSI */ 293 if (in_interrupt()) { 294 set_bit(__I40E_REINIT_REQUESTED, &pf->state); 295 set_bit(__I40E_REINIT_REQUESTED, &vsi->state); 296 } else { 297 i40e_vsi_reinit_locked(vsi); 298 } 299 break; 300 case 1: 301 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 302 break; 303 case 2: 304 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 305 break; 306 case 3: 307 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 308 break; 309 default: 310 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 311 set_bit(__I40E_DOWN_REQUESTED, &pf->state); 312 set_bit(__I40E_DOWN_REQUESTED, &vsi->state); 313 break; 314 } 315 i40e_service_event_schedule(pf); 316 pf->tx_timeout_recovery_level++; 317 } 318 319 /** 320 * i40e_release_rx_desc - Store the new tail and head values 321 * @rx_ring: ring to bump 322 * @val: new head index 323 **/ 324 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 325 { 326 rx_ring->next_to_use = val; 327 328 /* Force memory writes to complete before letting h/w 329 * know there are new descriptors to fetch. (Only 330 * applicable for weak-ordered memory model archs, 331 * such as IA-64). 332 */ 333 wmb(); 334 writel(val, rx_ring->tail); 335 } 336 337 /** 338 * i40e_get_vsi_stats_struct - Get System Network Statistics 339 * @vsi: the VSI we care about 340 * 341 * Returns the address of the device statistics structure. 342 * The statistics are actually updated from the service task. 343 **/ 344 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 345 { 346 return &vsi->net_stats; 347 } 348 349 /** 350 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 351 * @netdev: network interface device structure 352 * 353 * Returns the address of the device statistics structure. 354 * The statistics are actually updated from the service task. 355 **/ 356 #ifdef I40E_FCOE 357 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 358 struct net_device *netdev, 359 struct rtnl_link_stats64 *stats) 360 #else 361 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 362 struct net_device *netdev, 363 struct rtnl_link_stats64 *stats) 364 #endif 365 { 366 struct i40e_netdev_priv *np = netdev_priv(netdev); 367 struct i40e_ring *tx_ring, *rx_ring; 368 struct i40e_vsi *vsi = np->vsi; 369 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 370 int i; 371 372 if (test_bit(__I40E_DOWN, &vsi->state)) 373 return stats; 374 375 if (!vsi->tx_rings) 376 return stats; 377 378 rcu_read_lock(); 379 for (i = 0; i < vsi->num_queue_pairs; i++) { 380 u64 bytes, packets; 381 unsigned int start; 382 383 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); 384 if (!tx_ring) 385 continue; 386 387 do { 388 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 389 packets = tx_ring->stats.packets; 390 bytes = tx_ring->stats.bytes; 391 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 392 393 stats->tx_packets += packets; 394 stats->tx_bytes += bytes; 395 rx_ring = &tx_ring[1]; 396 397 do { 398 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 399 packets = rx_ring->stats.packets; 400 bytes = rx_ring->stats.bytes; 401 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 402 403 stats->rx_packets += packets; 404 stats->rx_bytes += bytes; 405 } 406 rcu_read_unlock(); 407 408 /* following stats updated by i40e_watchdog_subtask() */ 409 stats->multicast = vsi_stats->multicast; 410 stats->tx_errors = vsi_stats->tx_errors; 411 stats->tx_dropped = vsi_stats->tx_dropped; 412 stats->rx_errors = vsi_stats->rx_errors; 413 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 414 stats->rx_length_errors = vsi_stats->rx_length_errors; 415 416 return stats; 417 } 418 419 /** 420 * i40e_vsi_reset_stats - Resets all stats of the given vsi 421 * @vsi: the VSI to have its stats reset 422 **/ 423 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 424 { 425 struct rtnl_link_stats64 *ns; 426 int i; 427 428 if (!vsi) 429 return; 430 431 ns = i40e_get_vsi_stats_struct(vsi); 432 memset(ns, 0, sizeof(*ns)); 433 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 434 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 435 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 436 if (vsi->rx_rings && vsi->rx_rings[0]) { 437 for (i = 0; i < vsi->num_queue_pairs; i++) { 438 memset(&vsi->rx_rings[i]->stats, 0 , 439 sizeof(vsi->rx_rings[i]->stats)); 440 memset(&vsi->rx_rings[i]->rx_stats, 0 , 441 sizeof(vsi->rx_rings[i]->rx_stats)); 442 memset(&vsi->tx_rings[i]->stats, 0 , 443 sizeof(vsi->tx_rings[i]->stats)); 444 memset(&vsi->tx_rings[i]->tx_stats, 0, 445 sizeof(vsi->tx_rings[i]->tx_stats)); 446 } 447 } 448 vsi->stat_offsets_loaded = false; 449 } 450 451 /** 452 * i40e_pf_reset_stats - Reset all of the stats for the given pf 453 * @pf: the PF to be reset 454 **/ 455 void i40e_pf_reset_stats(struct i40e_pf *pf) 456 { 457 int i; 458 459 memset(&pf->stats, 0, sizeof(pf->stats)); 460 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 461 pf->stat_offsets_loaded = false; 462 463 for (i = 0; i < I40E_MAX_VEB; i++) { 464 if (pf->veb[i]) { 465 memset(&pf->veb[i]->stats, 0, 466 sizeof(pf->veb[i]->stats)); 467 memset(&pf->veb[i]->stats_offsets, 0, 468 sizeof(pf->veb[i]->stats_offsets)); 469 pf->veb[i]->stat_offsets_loaded = false; 470 } 471 } 472 } 473 474 /** 475 * i40e_stat_update48 - read and update a 48 bit stat from the chip 476 * @hw: ptr to the hardware info 477 * @hireg: the high 32 bit reg to read 478 * @loreg: the low 32 bit reg to read 479 * @offset_loaded: has the initial offset been loaded yet 480 * @offset: ptr to current offset value 481 * @stat: ptr to the stat 482 * 483 * Since the device stats are not reset at PFReset, they likely will not 484 * be zeroed when the driver starts. We'll save the first values read 485 * and use them as offsets to be subtracted from the raw values in order 486 * to report stats that count from zero. In the process, we also manage 487 * the potential roll-over. 488 **/ 489 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 490 bool offset_loaded, u64 *offset, u64 *stat) 491 { 492 u64 new_data; 493 494 if (hw->device_id == I40E_DEV_ID_QEMU) { 495 new_data = rd32(hw, loreg); 496 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 497 } else { 498 new_data = rd64(hw, loreg); 499 } 500 if (!offset_loaded) 501 *offset = new_data; 502 if (likely(new_data >= *offset)) 503 *stat = new_data - *offset; 504 else 505 *stat = (new_data + ((u64)1 << 48)) - *offset; 506 *stat &= 0xFFFFFFFFFFFFULL; 507 } 508 509 /** 510 * i40e_stat_update32 - read and update a 32 bit stat from the chip 511 * @hw: ptr to the hardware info 512 * @reg: the hw reg to read 513 * @offset_loaded: has the initial offset been loaded yet 514 * @offset: ptr to current offset value 515 * @stat: ptr to the stat 516 **/ 517 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 518 bool offset_loaded, u64 *offset, u64 *stat) 519 { 520 u32 new_data; 521 522 new_data = rd32(hw, reg); 523 if (!offset_loaded) 524 *offset = new_data; 525 if (likely(new_data >= *offset)) 526 *stat = (u32)(new_data - *offset); 527 else 528 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 529 } 530 531 /** 532 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 533 * @vsi: the VSI to be updated 534 **/ 535 void i40e_update_eth_stats(struct i40e_vsi *vsi) 536 { 537 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 538 struct i40e_pf *pf = vsi->back; 539 struct i40e_hw *hw = &pf->hw; 540 struct i40e_eth_stats *oes; 541 struct i40e_eth_stats *es; /* device's eth stats */ 542 543 es = &vsi->eth_stats; 544 oes = &vsi->eth_stats_offsets; 545 546 /* Gather up the stats that the hw collects */ 547 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 548 vsi->stat_offsets_loaded, 549 &oes->tx_errors, &es->tx_errors); 550 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 551 vsi->stat_offsets_loaded, 552 &oes->rx_discards, &es->rx_discards); 553 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), 554 vsi->stat_offsets_loaded, 555 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); 556 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 557 vsi->stat_offsets_loaded, 558 &oes->tx_errors, &es->tx_errors); 559 560 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 561 I40E_GLV_GORCL(stat_idx), 562 vsi->stat_offsets_loaded, 563 &oes->rx_bytes, &es->rx_bytes); 564 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 565 I40E_GLV_UPRCL(stat_idx), 566 vsi->stat_offsets_loaded, 567 &oes->rx_unicast, &es->rx_unicast); 568 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 569 I40E_GLV_MPRCL(stat_idx), 570 vsi->stat_offsets_loaded, 571 &oes->rx_multicast, &es->rx_multicast); 572 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 573 I40E_GLV_BPRCL(stat_idx), 574 vsi->stat_offsets_loaded, 575 &oes->rx_broadcast, &es->rx_broadcast); 576 577 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 578 I40E_GLV_GOTCL(stat_idx), 579 vsi->stat_offsets_loaded, 580 &oes->tx_bytes, &es->tx_bytes); 581 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 582 I40E_GLV_UPTCL(stat_idx), 583 vsi->stat_offsets_loaded, 584 &oes->tx_unicast, &es->tx_unicast); 585 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 586 I40E_GLV_MPTCL(stat_idx), 587 vsi->stat_offsets_loaded, 588 &oes->tx_multicast, &es->tx_multicast); 589 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 590 I40E_GLV_BPTCL(stat_idx), 591 vsi->stat_offsets_loaded, 592 &oes->tx_broadcast, &es->tx_broadcast); 593 vsi->stat_offsets_loaded = true; 594 } 595 596 /** 597 * i40e_update_veb_stats - Update Switch component statistics 598 * @veb: the VEB being updated 599 **/ 600 static void i40e_update_veb_stats(struct i40e_veb *veb) 601 { 602 struct i40e_pf *pf = veb->pf; 603 struct i40e_hw *hw = &pf->hw; 604 struct i40e_eth_stats *oes; 605 struct i40e_eth_stats *es; /* device's eth stats */ 606 int idx = 0; 607 608 idx = veb->stats_idx; 609 es = &veb->stats; 610 oes = &veb->stats_offsets; 611 612 /* Gather up the stats that the hw collects */ 613 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 614 veb->stat_offsets_loaded, 615 &oes->tx_discards, &es->tx_discards); 616 if (hw->revision_id > 0) 617 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 618 veb->stat_offsets_loaded, 619 &oes->rx_unknown_protocol, 620 &es->rx_unknown_protocol); 621 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 622 veb->stat_offsets_loaded, 623 &oes->rx_bytes, &es->rx_bytes); 624 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 625 veb->stat_offsets_loaded, 626 &oes->rx_unicast, &es->rx_unicast); 627 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 628 veb->stat_offsets_loaded, 629 &oes->rx_multicast, &es->rx_multicast); 630 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 631 veb->stat_offsets_loaded, 632 &oes->rx_broadcast, &es->rx_broadcast); 633 634 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 635 veb->stat_offsets_loaded, 636 &oes->tx_bytes, &es->tx_bytes); 637 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 638 veb->stat_offsets_loaded, 639 &oes->tx_unicast, &es->tx_unicast); 640 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 641 veb->stat_offsets_loaded, 642 &oes->tx_multicast, &es->tx_multicast); 643 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 644 veb->stat_offsets_loaded, 645 &oes->tx_broadcast, &es->tx_broadcast); 646 veb->stat_offsets_loaded = true; 647 } 648 649 #ifdef I40E_FCOE 650 /** 651 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. 652 * @vsi: the VSI that is capable of doing FCoE 653 **/ 654 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) 655 { 656 struct i40e_pf *pf = vsi->back; 657 struct i40e_hw *hw = &pf->hw; 658 struct i40e_fcoe_stats *ofs; 659 struct i40e_fcoe_stats *fs; /* device's eth stats */ 660 int idx; 661 662 if (vsi->type != I40E_VSI_FCOE) 663 return; 664 665 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; 666 fs = &vsi->fcoe_stats; 667 ofs = &vsi->fcoe_stats_offsets; 668 669 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), 670 vsi->fcoe_stat_offsets_loaded, 671 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); 672 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), 673 vsi->fcoe_stat_offsets_loaded, 674 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); 675 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), 676 vsi->fcoe_stat_offsets_loaded, 677 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); 678 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), 679 vsi->fcoe_stat_offsets_loaded, 680 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); 681 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), 682 vsi->fcoe_stat_offsets_loaded, 683 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); 684 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), 685 vsi->fcoe_stat_offsets_loaded, 686 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); 687 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), 688 vsi->fcoe_stat_offsets_loaded, 689 &ofs->fcoe_last_error, &fs->fcoe_last_error); 690 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), 691 vsi->fcoe_stat_offsets_loaded, 692 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); 693 694 vsi->fcoe_stat_offsets_loaded = true; 695 } 696 697 #endif 698 /** 699 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode 700 * @pf: the corresponding PF 701 * 702 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode 703 **/ 704 static void i40e_update_link_xoff_rx(struct i40e_pf *pf) 705 { 706 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 707 struct i40e_hw_port_stats *nsd = &pf->stats; 708 struct i40e_hw *hw = &pf->hw; 709 u64 xoff = 0; 710 u16 i, v; 711 712 if ((hw->fc.current_mode != I40E_FC_FULL) && 713 (hw->fc.current_mode != I40E_FC_RX_PAUSE)) 714 return; 715 716 xoff = nsd->link_xoff_rx; 717 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 718 pf->stat_offsets_loaded, 719 &osd->link_xoff_rx, &nsd->link_xoff_rx); 720 721 /* No new LFC xoff rx */ 722 if (!(nsd->link_xoff_rx - xoff)) 723 return; 724 725 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ 726 for (v = 0; v < pf->num_alloc_vsi; v++) { 727 struct i40e_vsi *vsi = pf->vsi[v]; 728 729 if (!vsi || !vsi->tx_rings[0]) 730 continue; 731 732 for (i = 0; i < vsi->num_queue_pairs; i++) { 733 struct i40e_ring *ring = vsi->tx_rings[i]; 734 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 735 } 736 } 737 } 738 739 /** 740 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode 741 * @pf: the corresponding PF 742 * 743 * Update the Rx XOFF counter (PAUSE frames) in PFC mode 744 **/ 745 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) 746 { 747 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 748 struct i40e_hw_port_stats *nsd = &pf->stats; 749 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; 750 struct i40e_dcbx_config *dcb_cfg; 751 struct i40e_hw *hw = &pf->hw; 752 u16 i, v; 753 u8 tc; 754 755 dcb_cfg = &hw->local_dcbx_config; 756 757 /* See if DCB enabled with PFC TC */ 758 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || 759 !(dcb_cfg->pfc.pfcenable)) { 760 i40e_update_link_xoff_rx(pf); 761 return; 762 } 763 764 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 765 u64 prio_xoff = nsd->priority_xoff_rx[i]; 766 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 767 pf->stat_offsets_loaded, 768 &osd->priority_xoff_rx[i], 769 &nsd->priority_xoff_rx[i]); 770 771 /* No new PFC xoff rx */ 772 if (!(nsd->priority_xoff_rx[i] - prio_xoff)) 773 continue; 774 /* Get the TC for given priority */ 775 tc = dcb_cfg->etscfg.prioritytable[i]; 776 xoff[tc] = true; 777 } 778 779 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ 780 for (v = 0; v < pf->num_alloc_vsi; v++) { 781 struct i40e_vsi *vsi = pf->vsi[v]; 782 783 if (!vsi || !vsi->tx_rings[0]) 784 continue; 785 786 for (i = 0; i < vsi->num_queue_pairs; i++) { 787 struct i40e_ring *ring = vsi->tx_rings[i]; 788 789 tc = ring->dcb_tc; 790 if (xoff[tc]) 791 clear_bit(__I40E_HANG_CHECK_ARMED, 792 &ring->state); 793 } 794 } 795 } 796 797 /** 798 * i40e_update_vsi_stats - Update the vsi statistics counters. 799 * @vsi: the VSI to be updated 800 * 801 * There are a few instances where we store the same stat in a 802 * couple of different structs. This is partly because we have 803 * the netdev stats that need to be filled out, which is slightly 804 * different from the "eth_stats" defined by the chip and used in 805 * VF communications. We sort it out here. 806 **/ 807 static void i40e_update_vsi_stats(struct i40e_vsi *vsi) 808 { 809 struct i40e_pf *pf = vsi->back; 810 struct rtnl_link_stats64 *ons; 811 struct rtnl_link_stats64 *ns; /* netdev stats */ 812 struct i40e_eth_stats *oes; 813 struct i40e_eth_stats *es; /* device's eth stats */ 814 u32 tx_restart, tx_busy; 815 u32 rx_page, rx_buf; 816 u64 rx_p, rx_b; 817 u64 tx_p, tx_b; 818 u16 q; 819 820 if (test_bit(__I40E_DOWN, &vsi->state) || 821 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 822 return; 823 824 ns = i40e_get_vsi_stats_struct(vsi); 825 ons = &vsi->net_stats_offsets; 826 es = &vsi->eth_stats; 827 oes = &vsi->eth_stats_offsets; 828 829 /* Gather up the netdev and vsi stats that the driver collects 830 * on the fly during packet processing 831 */ 832 rx_b = rx_p = 0; 833 tx_b = tx_p = 0; 834 tx_restart = tx_busy = 0; 835 rx_page = 0; 836 rx_buf = 0; 837 rcu_read_lock(); 838 for (q = 0; q < vsi->num_queue_pairs; q++) { 839 struct i40e_ring *p; 840 u64 bytes, packets; 841 unsigned int start; 842 843 /* locate Tx ring */ 844 p = ACCESS_ONCE(vsi->tx_rings[q]); 845 846 do { 847 start = u64_stats_fetch_begin_irq(&p->syncp); 848 packets = p->stats.packets; 849 bytes = p->stats.bytes; 850 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 851 tx_b += bytes; 852 tx_p += packets; 853 tx_restart += p->tx_stats.restart_queue; 854 tx_busy += p->tx_stats.tx_busy; 855 856 /* Rx queue is part of the same block as Tx queue */ 857 p = &p[1]; 858 do { 859 start = u64_stats_fetch_begin_irq(&p->syncp); 860 packets = p->stats.packets; 861 bytes = p->stats.bytes; 862 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 863 rx_b += bytes; 864 rx_p += packets; 865 rx_buf += p->rx_stats.alloc_buff_failed; 866 rx_page += p->rx_stats.alloc_page_failed; 867 } 868 rcu_read_unlock(); 869 vsi->tx_restart = tx_restart; 870 vsi->tx_busy = tx_busy; 871 vsi->rx_page_failed = rx_page; 872 vsi->rx_buf_failed = rx_buf; 873 874 ns->rx_packets = rx_p; 875 ns->rx_bytes = rx_b; 876 ns->tx_packets = tx_p; 877 ns->tx_bytes = tx_b; 878 879 /* update netdev stats from eth stats */ 880 i40e_update_eth_stats(vsi); 881 ons->tx_errors = oes->tx_errors; 882 ns->tx_errors = es->tx_errors; 883 ons->multicast = oes->rx_multicast; 884 ns->multicast = es->rx_multicast; 885 ons->rx_dropped = oes->rx_discards; 886 ns->rx_dropped = es->rx_discards; 887 ons->tx_dropped = oes->tx_discards; 888 ns->tx_dropped = es->tx_discards; 889 890 /* pull in a couple PF stats if this is the main vsi */ 891 if (vsi == pf->vsi[pf->lan_vsi]) { 892 ns->rx_crc_errors = pf->stats.crc_errors; 893 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; 894 ns->rx_length_errors = pf->stats.rx_length_errors; 895 } 896 } 897 898 /** 899 * i40e_update_pf_stats - Update the pf statistics counters. 900 * @pf: the PF to be updated 901 **/ 902 static void i40e_update_pf_stats(struct i40e_pf *pf) 903 { 904 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 905 struct i40e_hw_port_stats *nsd = &pf->stats; 906 struct i40e_hw *hw = &pf->hw; 907 u32 val; 908 int i; 909 910 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 911 I40E_GLPRT_GORCL(hw->port), 912 pf->stat_offsets_loaded, 913 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 914 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 915 I40E_GLPRT_GOTCL(hw->port), 916 pf->stat_offsets_loaded, 917 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 918 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 919 pf->stat_offsets_loaded, 920 &osd->eth.rx_discards, 921 &nsd->eth.rx_discards); 922 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), 923 pf->stat_offsets_loaded, 924 &osd->eth.tx_discards, 925 &nsd->eth.tx_discards); 926 927 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 928 I40E_GLPRT_UPRCL(hw->port), 929 pf->stat_offsets_loaded, 930 &osd->eth.rx_unicast, 931 &nsd->eth.rx_unicast); 932 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 933 I40E_GLPRT_MPRCL(hw->port), 934 pf->stat_offsets_loaded, 935 &osd->eth.rx_multicast, 936 &nsd->eth.rx_multicast); 937 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 938 I40E_GLPRT_BPRCL(hw->port), 939 pf->stat_offsets_loaded, 940 &osd->eth.rx_broadcast, 941 &nsd->eth.rx_broadcast); 942 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 943 I40E_GLPRT_UPTCL(hw->port), 944 pf->stat_offsets_loaded, 945 &osd->eth.tx_unicast, 946 &nsd->eth.tx_unicast); 947 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 948 I40E_GLPRT_MPTCL(hw->port), 949 pf->stat_offsets_loaded, 950 &osd->eth.tx_multicast, 951 &nsd->eth.tx_multicast); 952 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 953 I40E_GLPRT_BPTCL(hw->port), 954 pf->stat_offsets_loaded, 955 &osd->eth.tx_broadcast, 956 &nsd->eth.tx_broadcast); 957 958 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 959 pf->stat_offsets_loaded, 960 &osd->tx_dropped_link_down, 961 &nsd->tx_dropped_link_down); 962 963 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 964 pf->stat_offsets_loaded, 965 &osd->crc_errors, &nsd->crc_errors); 966 967 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 968 pf->stat_offsets_loaded, 969 &osd->illegal_bytes, &nsd->illegal_bytes); 970 971 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 972 pf->stat_offsets_loaded, 973 &osd->mac_local_faults, 974 &nsd->mac_local_faults); 975 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 976 pf->stat_offsets_loaded, 977 &osd->mac_remote_faults, 978 &nsd->mac_remote_faults); 979 980 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 981 pf->stat_offsets_loaded, 982 &osd->rx_length_errors, 983 &nsd->rx_length_errors); 984 985 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 986 pf->stat_offsets_loaded, 987 &osd->link_xon_rx, &nsd->link_xon_rx); 988 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 989 pf->stat_offsets_loaded, 990 &osd->link_xon_tx, &nsd->link_xon_tx); 991 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ 992 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 993 pf->stat_offsets_loaded, 994 &osd->link_xoff_tx, &nsd->link_xoff_tx); 995 996 for (i = 0; i < 8; i++) { 997 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 998 pf->stat_offsets_loaded, 999 &osd->priority_xon_rx[i], 1000 &nsd->priority_xon_rx[i]); 1001 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 1002 pf->stat_offsets_loaded, 1003 &osd->priority_xon_tx[i], 1004 &nsd->priority_xon_tx[i]); 1005 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 1006 pf->stat_offsets_loaded, 1007 &osd->priority_xoff_tx[i], 1008 &nsd->priority_xoff_tx[i]); 1009 i40e_stat_update32(hw, 1010 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 1011 pf->stat_offsets_loaded, 1012 &osd->priority_xon_2_xoff[i], 1013 &nsd->priority_xon_2_xoff[i]); 1014 } 1015 1016 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 1017 I40E_GLPRT_PRC64L(hw->port), 1018 pf->stat_offsets_loaded, 1019 &osd->rx_size_64, &nsd->rx_size_64); 1020 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 1021 I40E_GLPRT_PRC127L(hw->port), 1022 pf->stat_offsets_loaded, 1023 &osd->rx_size_127, &nsd->rx_size_127); 1024 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 1025 I40E_GLPRT_PRC255L(hw->port), 1026 pf->stat_offsets_loaded, 1027 &osd->rx_size_255, &nsd->rx_size_255); 1028 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 1029 I40E_GLPRT_PRC511L(hw->port), 1030 pf->stat_offsets_loaded, 1031 &osd->rx_size_511, &nsd->rx_size_511); 1032 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 1033 I40E_GLPRT_PRC1023L(hw->port), 1034 pf->stat_offsets_loaded, 1035 &osd->rx_size_1023, &nsd->rx_size_1023); 1036 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 1037 I40E_GLPRT_PRC1522L(hw->port), 1038 pf->stat_offsets_loaded, 1039 &osd->rx_size_1522, &nsd->rx_size_1522); 1040 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 1041 I40E_GLPRT_PRC9522L(hw->port), 1042 pf->stat_offsets_loaded, 1043 &osd->rx_size_big, &nsd->rx_size_big); 1044 1045 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 1046 I40E_GLPRT_PTC64L(hw->port), 1047 pf->stat_offsets_loaded, 1048 &osd->tx_size_64, &nsd->tx_size_64); 1049 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 1050 I40E_GLPRT_PTC127L(hw->port), 1051 pf->stat_offsets_loaded, 1052 &osd->tx_size_127, &nsd->tx_size_127); 1053 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 1054 I40E_GLPRT_PTC255L(hw->port), 1055 pf->stat_offsets_loaded, 1056 &osd->tx_size_255, &nsd->tx_size_255); 1057 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 1058 I40E_GLPRT_PTC511L(hw->port), 1059 pf->stat_offsets_loaded, 1060 &osd->tx_size_511, &nsd->tx_size_511); 1061 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 1062 I40E_GLPRT_PTC1023L(hw->port), 1063 pf->stat_offsets_loaded, 1064 &osd->tx_size_1023, &nsd->tx_size_1023); 1065 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 1066 I40E_GLPRT_PTC1522L(hw->port), 1067 pf->stat_offsets_loaded, 1068 &osd->tx_size_1522, &nsd->tx_size_1522); 1069 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 1070 I40E_GLPRT_PTC9522L(hw->port), 1071 pf->stat_offsets_loaded, 1072 &osd->tx_size_big, &nsd->tx_size_big); 1073 1074 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 1075 pf->stat_offsets_loaded, 1076 &osd->rx_undersize, &nsd->rx_undersize); 1077 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 1078 pf->stat_offsets_loaded, 1079 &osd->rx_fragments, &nsd->rx_fragments); 1080 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 1081 pf->stat_offsets_loaded, 1082 &osd->rx_oversize, &nsd->rx_oversize); 1083 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 1084 pf->stat_offsets_loaded, 1085 &osd->rx_jabber, &nsd->rx_jabber); 1086 1087 /* FDIR stats */ 1088 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), 1089 pf->stat_offsets_loaded, 1090 &osd->fd_atr_match, &nsd->fd_atr_match); 1091 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), 1092 pf->stat_offsets_loaded, 1093 &osd->fd_sb_match, &nsd->fd_sb_match); 1094 1095 val = rd32(hw, I40E_PRTPM_EEE_STAT); 1096 nsd->tx_lpi_status = 1097 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> 1098 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; 1099 nsd->rx_lpi_status = 1100 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> 1101 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; 1102 i40e_stat_update32(hw, I40E_PRTPM_TLPIC, 1103 pf->stat_offsets_loaded, 1104 &osd->tx_lpi_count, &nsd->tx_lpi_count); 1105 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 1106 pf->stat_offsets_loaded, 1107 &osd->rx_lpi_count, &nsd->rx_lpi_count); 1108 1109 pf->stat_offsets_loaded = true; 1110 } 1111 1112 /** 1113 * i40e_update_stats - Update the various statistics counters. 1114 * @vsi: the VSI to be updated 1115 * 1116 * Update the various stats for this VSI and its related entities. 1117 **/ 1118 void i40e_update_stats(struct i40e_vsi *vsi) 1119 { 1120 struct i40e_pf *pf = vsi->back; 1121 1122 if (vsi == pf->vsi[pf->lan_vsi]) 1123 i40e_update_pf_stats(pf); 1124 1125 i40e_update_vsi_stats(vsi); 1126 #ifdef I40E_FCOE 1127 i40e_update_fcoe_stats(vsi); 1128 #endif 1129 } 1130 1131 /** 1132 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1133 * @vsi: the VSI to be searched 1134 * @macaddr: the MAC address 1135 * @vlan: the vlan 1136 * @is_vf: make sure its a vf filter, else doesn't matter 1137 * @is_netdev: make sure its a netdev filter, else doesn't matter 1138 * 1139 * Returns ptr to the filter object or NULL 1140 **/ 1141 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 1142 u8 *macaddr, s16 vlan, 1143 bool is_vf, bool is_netdev) 1144 { 1145 struct i40e_mac_filter *f; 1146 1147 if (!vsi || !macaddr) 1148 return NULL; 1149 1150 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1151 if ((ether_addr_equal(macaddr, f->macaddr)) && 1152 (vlan == f->vlan) && 1153 (!is_vf || f->is_vf) && 1154 (!is_netdev || f->is_netdev)) 1155 return f; 1156 } 1157 return NULL; 1158 } 1159 1160 /** 1161 * i40e_find_mac - Find a mac addr in the macvlan filters list 1162 * @vsi: the VSI to be searched 1163 * @macaddr: the MAC address we are searching for 1164 * @is_vf: make sure its a vf filter, else doesn't matter 1165 * @is_netdev: make sure its a netdev filter, else doesn't matter 1166 * 1167 * Returns the first filter with the provided MAC address or NULL if 1168 * MAC address was not found 1169 **/ 1170 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 1171 bool is_vf, bool is_netdev) 1172 { 1173 struct i40e_mac_filter *f; 1174 1175 if (!vsi || !macaddr) 1176 return NULL; 1177 1178 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1179 if ((ether_addr_equal(macaddr, f->macaddr)) && 1180 (!is_vf || f->is_vf) && 1181 (!is_netdev || f->is_netdev)) 1182 return f; 1183 } 1184 return NULL; 1185 } 1186 1187 /** 1188 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1189 * @vsi: the VSI to be searched 1190 * 1191 * Returns true if VSI is in vlan mode or false otherwise 1192 **/ 1193 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1194 { 1195 struct i40e_mac_filter *f; 1196 1197 /* Only -1 for all the filters denotes not in vlan mode 1198 * so we have to go through all the list in order to make sure 1199 */ 1200 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1201 if (f->vlan >= 0) 1202 return true; 1203 } 1204 1205 return false; 1206 } 1207 1208 /** 1209 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans 1210 * @vsi: the VSI to be searched 1211 * @macaddr: the mac address to be filtered 1212 * @is_vf: true if it is a vf 1213 * @is_netdev: true if it is a netdev 1214 * 1215 * Goes through all the macvlan filters and adds a 1216 * macvlan filter for each unique vlan that already exists 1217 * 1218 * Returns first filter found on success, else NULL 1219 **/ 1220 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1221 bool is_vf, bool is_netdev) 1222 { 1223 struct i40e_mac_filter *f; 1224 1225 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1226 if (!i40e_find_filter(vsi, macaddr, f->vlan, 1227 is_vf, is_netdev)) { 1228 if (!i40e_add_filter(vsi, macaddr, f->vlan, 1229 is_vf, is_netdev)) 1230 return NULL; 1231 } 1232 } 1233 1234 return list_first_entry_or_null(&vsi->mac_filter_list, 1235 struct i40e_mac_filter, list); 1236 } 1237 1238 /** 1239 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM 1240 * @vsi: the PF Main VSI - inappropriate for any other VSI 1241 * @macaddr: the MAC address 1242 **/ 1243 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) 1244 { 1245 struct i40e_aqc_remove_macvlan_element_data element; 1246 struct i40e_pf *pf = vsi->back; 1247 i40e_status aq_ret; 1248 1249 /* Only appropriate for the PF main VSI */ 1250 if (vsi->type != I40E_VSI_MAIN) 1251 return; 1252 1253 ether_addr_copy(element.mac_addr, macaddr); 1254 element.vlan_tag = 0; 1255 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1256 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1257 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1258 if (aq_ret) 1259 dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n"); 1260 } 1261 1262 /** 1263 * i40e_add_filter - Add a mac/vlan filter to the VSI 1264 * @vsi: the VSI to be searched 1265 * @macaddr: the MAC address 1266 * @vlan: the vlan 1267 * @is_vf: make sure its a vf filter, else doesn't matter 1268 * @is_netdev: make sure its a netdev filter, else doesn't matter 1269 * 1270 * Returns ptr to the filter object or NULL when no memory available. 1271 **/ 1272 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1273 u8 *macaddr, s16 vlan, 1274 bool is_vf, bool is_netdev) 1275 { 1276 struct i40e_mac_filter *f; 1277 1278 if (!vsi || !macaddr) 1279 return NULL; 1280 1281 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1282 if (!f) { 1283 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1284 if (!f) 1285 goto add_filter_out; 1286 1287 ether_addr_copy(f->macaddr, macaddr); 1288 f->vlan = vlan; 1289 f->changed = true; 1290 1291 INIT_LIST_HEAD(&f->list); 1292 list_add(&f->list, &vsi->mac_filter_list); 1293 } 1294 1295 /* increment counter and add a new flag if needed */ 1296 if (is_vf) { 1297 if (!f->is_vf) { 1298 f->is_vf = true; 1299 f->counter++; 1300 } 1301 } else if (is_netdev) { 1302 if (!f->is_netdev) { 1303 f->is_netdev = true; 1304 f->counter++; 1305 } 1306 } else { 1307 f->counter++; 1308 } 1309 1310 /* changed tells sync_filters_subtask to 1311 * push the filter down to the firmware 1312 */ 1313 if (f->changed) { 1314 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1315 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1316 } 1317 1318 add_filter_out: 1319 return f; 1320 } 1321 1322 /** 1323 * i40e_del_filter - Remove a mac/vlan filter from the VSI 1324 * @vsi: the VSI to be searched 1325 * @macaddr: the MAC address 1326 * @vlan: the vlan 1327 * @is_vf: make sure it's a vf filter, else doesn't matter 1328 * @is_netdev: make sure it's a netdev filter, else doesn't matter 1329 **/ 1330 void i40e_del_filter(struct i40e_vsi *vsi, 1331 u8 *macaddr, s16 vlan, 1332 bool is_vf, bool is_netdev) 1333 { 1334 struct i40e_mac_filter *f; 1335 1336 if (!vsi || !macaddr) 1337 return; 1338 1339 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1340 if (!f || f->counter == 0) 1341 return; 1342 1343 if (is_vf) { 1344 if (f->is_vf) { 1345 f->is_vf = false; 1346 f->counter--; 1347 } 1348 } else if (is_netdev) { 1349 if (f->is_netdev) { 1350 f->is_netdev = false; 1351 f->counter--; 1352 } 1353 } else { 1354 /* make sure we don't remove a filter in use by vf or netdev */ 1355 int min_f = 0; 1356 min_f += (f->is_vf ? 1 : 0); 1357 min_f += (f->is_netdev ? 1 : 0); 1358 1359 if (f->counter > min_f) 1360 f->counter--; 1361 } 1362 1363 /* counter == 0 tells sync_filters_subtask to 1364 * remove the filter from the firmware's list 1365 */ 1366 if (f->counter == 0) { 1367 f->changed = true; 1368 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1369 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1370 } 1371 } 1372 1373 /** 1374 * i40e_set_mac - NDO callback to set mac address 1375 * @netdev: network interface device structure 1376 * @p: pointer to an address structure 1377 * 1378 * Returns 0 on success, negative on failure 1379 **/ 1380 #ifdef I40E_FCOE 1381 int i40e_set_mac(struct net_device *netdev, void *p) 1382 #else 1383 static int i40e_set_mac(struct net_device *netdev, void *p) 1384 #endif 1385 { 1386 struct i40e_netdev_priv *np = netdev_priv(netdev); 1387 struct i40e_vsi *vsi = np->vsi; 1388 struct sockaddr *addr = p; 1389 struct i40e_mac_filter *f; 1390 1391 if (!is_valid_ether_addr(addr->sa_data)) 1392 return -EADDRNOTAVAIL; 1393 1394 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data); 1395 1396 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1397 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1398 return -EADDRNOTAVAIL; 1399 1400 if (vsi->type == I40E_VSI_MAIN) { 1401 i40e_status ret; 1402 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1403 I40E_AQC_WRITE_TYPE_LAA_WOL, 1404 addr->sa_data, NULL); 1405 if (ret) { 1406 netdev_info(netdev, 1407 "Addr change for Main VSI failed: %d\n", 1408 ret); 1409 return -EADDRNOTAVAIL; 1410 } 1411 } 1412 1413 f = i40e_find_mac(vsi, addr->sa_data, false, true); 1414 if (!f) { 1415 /* In order to be sure to not drop any packets, add the 1416 * new address first then delete the old one. 1417 */ 1418 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, 1419 false, false); 1420 if (!f) 1421 return -ENOMEM; 1422 1423 i40e_sync_vsi_filters(vsi); 1424 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 1425 false, false); 1426 i40e_sync_vsi_filters(vsi); 1427 } 1428 1429 f->is_laa = true; 1430 if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) 1431 ether_addr_copy(netdev->dev_addr, addr->sa_data); 1432 1433 return 0; 1434 } 1435 1436 /** 1437 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1438 * @vsi: the VSI being setup 1439 * @ctxt: VSI context structure 1440 * @enabled_tc: Enabled TCs bitmap 1441 * @is_add: True if called before Add VSI 1442 * 1443 * Setup VSI queue mapping for enabled traffic classes. 1444 **/ 1445 #ifdef I40E_FCOE 1446 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1447 struct i40e_vsi_context *ctxt, 1448 u8 enabled_tc, 1449 bool is_add) 1450 #else 1451 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1452 struct i40e_vsi_context *ctxt, 1453 u8 enabled_tc, 1454 bool is_add) 1455 #endif 1456 { 1457 struct i40e_pf *pf = vsi->back; 1458 u16 sections = 0; 1459 u8 netdev_tc = 0; 1460 u16 numtc = 0; 1461 u16 qcount; 1462 u8 offset; 1463 u16 qmap; 1464 int i; 1465 u16 num_tc_qps = 0; 1466 1467 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1468 offset = 0; 1469 1470 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1471 /* Find numtc from enabled TC bitmap */ 1472 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1473 if (enabled_tc & (1 << i)) /* TC is enabled */ 1474 numtc++; 1475 } 1476 if (!numtc) { 1477 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1478 numtc = 1; 1479 } 1480 } else { 1481 /* At least TC0 is enabled in case of non-DCB case */ 1482 numtc = 1; 1483 } 1484 1485 vsi->tc_config.numtc = numtc; 1486 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1487 /* Number of queues per enabled TC */ 1488 num_tc_qps = vsi->alloc_queue_pairs/numtc; 1489 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1490 1491 /* Setup queue offset/count for all TCs for given VSI */ 1492 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1493 /* See if the given TC is enabled for the given VSI */ 1494 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ 1495 int pow, num_qps; 1496 1497 switch (vsi->type) { 1498 case I40E_VSI_MAIN: 1499 qcount = min_t(int, pf->rss_size, num_tc_qps); 1500 break; 1501 #ifdef I40E_FCOE 1502 case I40E_VSI_FCOE: 1503 qcount = num_tc_qps; 1504 break; 1505 #endif 1506 case I40E_VSI_FDIR: 1507 case I40E_VSI_SRIOV: 1508 case I40E_VSI_VMDQ2: 1509 default: 1510 qcount = num_tc_qps; 1511 WARN_ON(i != 0); 1512 break; 1513 } 1514 vsi->tc_config.tc_info[i].qoffset = offset; 1515 vsi->tc_config.tc_info[i].qcount = qcount; 1516 1517 /* find the power-of-2 of the number of queue pairs */ 1518 num_qps = qcount; 1519 pow = 0; 1520 while (num_qps && ((1 << pow) < qcount)) { 1521 pow++; 1522 num_qps >>= 1; 1523 } 1524 1525 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1526 qmap = 1527 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1528 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1529 1530 offset += qcount; 1531 } else { 1532 /* TC is not enabled so set the offset to 1533 * default queue and allocate one queue 1534 * for the given TC. 1535 */ 1536 vsi->tc_config.tc_info[i].qoffset = 0; 1537 vsi->tc_config.tc_info[i].qcount = 1; 1538 vsi->tc_config.tc_info[i].netdev_tc = 0; 1539 1540 qmap = 0; 1541 } 1542 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1543 } 1544 1545 /* Set actual Tx/Rx queue pairs */ 1546 vsi->num_queue_pairs = offset; 1547 1548 /* Scheduler section valid can only be set for ADD VSI */ 1549 if (is_add) { 1550 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1551 1552 ctxt->info.up_enable_bits = enabled_tc; 1553 } 1554 if (vsi->type == I40E_VSI_SRIOV) { 1555 ctxt->info.mapping_flags |= 1556 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1557 for (i = 0; i < vsi->num_queue_pairs; i++) 1558 ctxt->info.queue_mapping[i] = 1559 cpu_to_le16(vsi->base_queue + i); 1560 } else { 1561 ctxt->info.mapping_flags |= 1562 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1563 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1564 } 1565 ctxt->info.valid_sections |= cpu_to_le16(sections); 1566 } 1567 1568 /** 1569 * i40e_set_rx_mode - NDO callback to set the netdev filters 1570 * @netdev: network interface device structure 1571 **/ 1572 #ifdef I40E_FCOE 1573 void i40e_set_rx_mode(struct net_device *netdev) 1574 #else 1575 static void i40e_set_rx_mode(struct net_device *netdev) 1576 #endif 1577 { 1578 struct i40e_netdev_priv *np = netdev_priv(netdev); 1579 struct i40e_mac_filter *f, *ftmp; 1580 struct i40e_vsi *vsi = np->vsi; 1581 struct netdev_hw_addr *uca; 1582 struct netdev_hw_addr *mca; 1583 struct netdev_hw_addr *ha; 1584 1585 /* add addr if not already in the filter list */ 1586 netdev_for_each_uc_addr(uca, netdev) { 1587 if (!i40e_find_mac(vsi, uca->addr, false, true)) { 1588 if (i40e_is_vsi_in_vlan(vsi)) 1589 i40e_put_mac_in_vlan(vsi, uca->addr, 1590 false, true); 1591 else 1592 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, 1593 false, true); 1594 } 1595 } 1596 1597 netdev_for_each_mc_addr(mca, netdev) { 1598 if (!i40e_find_mac(vsi, mca->addr, false, true)) { 1599 if (i40e_is_vsi_in_vlan(vsi)) 1600 i40e_put_mac_in_vlan(vsi, mca->addr, 1601 false, true); 1602 else 1603 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, 1604 false, true); 1605 } 1606 } 1607 1608 /* remove filter if not in netdev list */ 1609 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1610 bool found = false; 1611 1612 if (!f->is_netdev) 1613 continue; 1614 1615 if (is_multicast_ether_addr(f->macaddr)) { 1616 netdev_for_each_mc_addr(mca, netdev) { 1617 if (ether_addr_equal(mca->addr, f->macaddr)) { 1618 found = true; 1619 break; 1620 } 1621 } 1622 } else { 1623 netdev_for_each_uc_addr(uca, netdev) { 1624 if (ether_addr_equal(uca->addr, f->macaddr)) { 1625 found = true; 1626 break; 1627 } 1628 } 1629 1630 for_each_dev_addr(netdev, ha) { 1631 if (ether_addr_equal(ha->addr, f->macaddr)) { 1632 found = true; 1633 break; 1634 } 1635 } 1636 } 1637 if (!found) 1638 i40e_del_filter( 1639 vsi, f->macaddr, I40E_VLAN_ANY, false, true); 1640 } 1641 1642 /* check for other flag changes */ 1643 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1644 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1645 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1646 } 1647 } 1648 1649 /** 1650 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 1651 * @vsi: ptr to the VSI 1652 * 1653 * Push any outstanding VSI filter changes through the AdminQ. 1654 * 1655 * Returns 0 or error value 1656 **/ 1657 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) 1658 { 1659 struct i40e_mac_filter *f, *ftmp; 1660 bool promisc_forced_on = false; 1661 bool add_happened = false; 1662 int filter_list_len = 0; 1663 u32 changed_flags = 0; 1664 i40e_status aq_ret = 0; 1665 struct i40e_pf *pf; 1666 int num_add = 0; 1667 int num_del = 0; 1668 u16 cmd_flags; 1669 1670 /* empty array typed pointers, kcalloc later */ 1671 struct i40e_aqc_add_macvlan_element_data *add_list; 1672 struct i40e_aqc_remove_macvlan_element_data *del_list; 1673 1674 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) 1675 usleep_range(1000, 2000); 1676 pf = vsi->back; 1677 1678 if (vsi->netdev) { 1679 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 1680 vsi->current_netdev_flags = vsi->netdev->flags; 1681 } 1682 1683 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1684 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1685 1686 filter_list_len = pf->hw.aq.asq_buf_size / 1687 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1688 del_list = kcalloc(filter_list_len, 1689 sizeof(struct i40e_aqc_remove_macvlan_element_data), 1690 GFP_KERNEL); 1691 if (!del_list) 1692 return -ENOMEM; 1693 1694 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1695 if (!f->changed) 1696 continue; 1697 1698 if (f->counter != 0) 1699 continue; 1700 f->changed = false; 1701 cmd_flags = 0; 1702 1703 /* add to delete list */ 1704 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); 1705 del_list[num_del].vlan_tag = 1706 cpu_to_le16((u16)(f->vlan == 1707 I40E_VLAN_ANY ? 0 : f->vlan)); 1708 1709 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1710 del_list[num_del].flags = cmd_flags; 1711 num_del++; 1712 1713 /* unlink from filter list */ 1714 list_del(&f->list); 1715 kfree(f); 1716 1717 /* flush a full buffer */ 1718 if (num_del == filter_list_len) { 1719 aq_ret = i40e_aq_remove_macvlan(&pf->hw, 1720 vsi->seid, del_list, num_del, 1721 NULL); 1722 num_del = 0; 1723 memset(del_list, 0, sizeof(*del_list)); 1724 1725 if (aq_ret && 1726 pf->hw.aq.asq_last_status != 1727 I40E_AQ_RC_ENOENT) 1728 dev_info(&pf->pdev->dev, 1729 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1730 aq_ret, 1731 pf->hw.aq.asq_last_status); 1732 } 1733 } 1734 if (num_del) { 1735 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1736 del_list, num_del, NULL); 1737 num_del = 0; 1738 1739 if (aq_ret && 1740 pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT) 1741 dev_info(&pf->pdev->dev, 1742 "ignoring delete macvlan error, err %d, aq_err %d\n", 1743 aq_ret, pf->hw.aq.asq_last_status); 1744 } 1745 1746 kfree(del_list); 1747 del_list = NULL; 1748 1749 /* do all the adds now */ 1750 filter_list_len = pf->hw.aq.asq_buf_size / 1751 sizeof(struct i40e_aqc_add_macvlan_element_data), 1752 add_list = kcalloc(filter_list_len, 1753 sizeof(struct i40e_aqc_add_macvlan_element_data), 1754 GFP_KERNEL); 1755 if (!add_list) 1756 return -ENOMEM; 1757 1758 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1759 if (!f->changed) 1760 continue; 1761 1762 if (f->counter == 0) 1763 continue; 1764 f->changed = false; 1765 add_happened = true; 1766 cmd_flags = 0; 1767 1768 /* add to add array */ 1769 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); 1770 add_list[num_add].vlan_tag = 1771 cpu_to_le16( 1772 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 1773 add_list[num_add].queue_number = 0; 1774 1775 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1776 add_list[num_add].flags = cpu_to_le16(cmd_flags); 1777 num_add++; 1778 1779 /* flush a full buffer */ 1780 if (num_add == filter_list_len) { 1781 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1782 add_list, num_add, 1783 NULL); 1784 num_add = 0; 1785 1786 if (aq_ret) 1787 break; 1788 memset(add_list, 0, sizeof(*add_list)); 1789 } 1790 } 1791 if (num_add) { 1792 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1793 add_list, num_add, NULL); 1794 num_add = 0; 1795 } 1796 kfree(add_list); 1797 add_list = NULL; 1798 1799 if (add_happened && (!aq_ret)) { 1800 /* do nothing */; 1801 } else if (add_happened && (aq_ret)) { 1802 dev_info(&pf->pdev->dev, 1803 "add filter failed, err %d, aq_err %d\n", 1804 aq_ret, pf->hw.aq.asq_last_status); 1805 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 1806 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1807 &vsi->state)) { 1808 promisc_forced_on = true; 1809 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1810 &vsi->state); 1811 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 1812 } 1813 } 1814 } 1815 1816 /* check for changes in promiscuous modes */ 1817 if (changed_flags & IFF_ALLMULTI) { 1818 bool cur_multipromisc; 1819 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 1820 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 1821 vsi->seid, 1822 cur_multipromisc, 1823 NULL); 1824 if (aq_ret) 1825 dev_info(&pf->pdev->dev, 1826 "set multi promisc failed, err %d, aq_err %d\n", 1827 aq_ret, pf->hw.aq.asq_last_status); 1828 } 1829 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 1830 bool cur_promisc; 1831 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 1832 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1833 &vsi->state)); 1834 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, 1835 vsi->seid, 1836 cur_promisc, NULL); 1837 if (aq_ret) 1838 dev_info(&pf->pdev->dev, 1839 "set uni promisc failed, err %d, aq_err %d\n", 1840 aq_ret, pf->hw.aq.asq_last_status); 1841 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 1842 vsi->seid, 1843 cur_promisc, NULL); 1844 if (aq_ret) 1845 dev_info(&pf->pdev->dev, 1846 "set brdcast promisc failed, err %d, aq_err %d\n", 1847 aq_ret, pf->hw.aq.asq_last_status); 1848 } 1849 1850 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 1851 return 0; 1852 } 1853 1854 /** 1855 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 1856 * @pf: board private structure 1857 **/ 1858 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 1859 { 1860 int v; 1861 1862 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 1863 return; 1864 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 1865 1866 for (v = 0; v < pf->num_alloc_vsi; v++) { 1867 if (pf->vsi[v] && 1868 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) 1869 i40e_sync_vsi_filters(pf->vsi[v]); 1870 } 1871 } 1872 1873 /** 1874 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 1875 * @netdev: network interface device structure 1876 * @new_mtu: new value for maximum frame size 1877 * 1878 * Returns 0 on success, negative on failure 1879 **/ 1880 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 1881 { 1882 struct i40e_netdev_priv *np = netdev_priv(netdev); 1883 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 1884 struct i40e_vsi *vsi = np->vsi; 1885 1886 /* MTU < 68 is an error and causes problems on some kernels */ 1887 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) 1888 return -EINVAL; 1889 1890 netdev_info(netdev, "changing MTU from %d to %d\n", 1891 netdev->mtu, new_mtu); 1892 netdev->mtu = new_mtu; 1893 if (netif_running(netdev)) 1894 i40e_vsi_reinit_locked(vsi); 1895 1896 return 0; 1897 } 1898 1899 /** 1900 * i40e_ioctl - Access the hwtstamp interface 1901 * @netdev: network interface device structure 1902 * @ifr: interface request data 1903 * @cmd: ioctl command 1904 **/ 1905 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1906 { 1907 struct i40e_netdev_priv *np = netdev_priv(netdev); 1908 struct i40e_pf *pf = np->vsi->back; 1909 1910 switch (cmd) { 1911 case SIOCGHWTSTAMP: 1912 return i40e_ptp_get_ts_config(pf, ifr); 1913 case SIOCSHWTSTAMP: 1914 return i40e_ptp_set_ts_config(pf, ifr); 1915 default: 1916 return -EOPNOTSUPP; 1917 } 1918 } 1919 1920 /** 1921 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 1922 * @vsi: the vsi being adjusted 1923 **/ 1924 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 1925 { 1926 struct i40e_vsi_context ctxt; 1927 i40e_status ret; 1928 1929 if ((vsi->info.valid_sections & 1930 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 1931 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 1932 return; /* already enabled */ 1933 1934 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1935 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1936 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 1937 1938 ctxt.seid = vsi->seid; 1939 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1940 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1941 if (ret) { 1942 dev_info(&vsi->back->pdev->dev, 1943 "%s: update vsi failed, aq_err=%d\n", 1944 __func__, vsi->back->hw.aq.asq_last_status); 1945 } 1946 } 1947 1948 /** 1949 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 1950 * @vsi: the vsi being adjusted 1951 **/ 1952 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 1953 { 1954 struct i40e_vsi_context ctxt; 1955 i40e_status ret; 1956 1957 if ((vsi->info.valid_sections & 1958 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 1959 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 1960 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 1961 return; /* already disabled */ 1962 1963 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1964 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1965 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 1966 1967 ctxt.seid = vsi->seid; 1968 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1969 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1970 if (ret) { 1971 dev_info(&vsi->back->pdev->dev, 1972 "%s: update vsi failed, aq_err=%d\n", 1973 __func__, vsi->back->hw.aq.asq_last_status); 1974 } 1975 } 1976 1977 /** 1978 * i40e_vlan_rx_register - Setup or shutdown vlan offload 1979 * @netdev: network interface to be adjusted 1980 * @features: netdev features to test if VLAN offload is enabled or not 1981 **/ 1982 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 1983 { 1984 struct i40e_netdev_priv *np = netdev_priv(netdev); 1985 struct i40e_vsi *vsi = np->vsi; 1986 1987 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1988 i40e_vlan_stripping_enable(vsi); 1989 else 1990 i40e_vlan_stripping_disable(vsi); 1991 } 1992 1993 /** 1994 * i40e_vsi_add_vlan - Add vsi membership for given vlan 1995 * @vsi: the vsi being configured 1996 * @vid: vlan id to be added (0 = untagged only , -1 = any) 1997 **/ 1998 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) 1999 { 2000 struct i40e_mac_filter *f, *add_f; 2001 bool is_netdev, is_vf; 2002 2003 is_vf = (vsi->type == I40E_VSI_SRIOV); 2004 is_netdev = !!(vsi->netdev); 2005 2006 if (is_netdev) { 2007 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, 2008 is_vf, is_netdev); 2009 if (!add_f) { 2010 dev_info(&vsi->back->pdev->dev, 2011 "Could not add vlan filter %d for %pM\n", 2012 vid, vsi->netdev->dev_addr); 2013 return -ENOMEM; 2014 } 2015 } 2016 2017 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2018 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2019 if (!add_f) { 2020 dev_info(&vsi->back->pdev->dev, 2021 "Could not add vlan filter %d for %pM\n", 2022 vid, f->macaddr); 2023 return -ENOMEM; 2024 } 2025 } 2026 2027 /* Now if we add a vlan tag, make sure to check if it is the first 2028 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" 2029 * with 0, so we now accept untagged and specified tagged traffic 2030 * (and not any taged and untagged) 2031 */ 2032 if (vid > 0) { 2033 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, 2034 I40E_VLAN_ANY, 2035 is_vf, is_netdev)) { 2036 i40e_del_filter(vsi, vsi->netdev->dev_addr, 2037 I40E_VLAN_ANY, is_vf, is_netdev); 2038 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, 2039 is_vf, is_netdev); 2040 if (!add_f) { 2041 dev_info(&vsi->back->pdev->dev, 2042 "Could not add filter 0 for %pM\n", 2043 vsi->netdev->dev_addr); 2044 return -ENOMEM; 2045 } 2046 } 2047 } 2048 2049 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ 2050 if (vid > 0 && !vsi->info.pvid) { 2051 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2052 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2053 is_vf, is_netdev)) { 2054 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2055 is_vf, is_netdev); 2056 add_f = i40e_add_filter(vsi, f->macaddr, 2057 0, is_vf, is_netdev); 2058 if (!add_f) { 2059 dev_info(&vsi->back->pdev->dev, 2060 "Could not add filter 0 for %pM\n", 2061 f->macaddr); 2062 return -ENOMEM; 2063 } 2064 } 2065 } 2066 } 2067 2068 if (test_bit(__I40E_DOWN, &vsi->back->state) || 2069 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 2070 return 0; 2071 2072 return i40e_sync_vsi_filters(vsi); 2073 } 2074 2075 /** 2076 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 2077 * @vsi: the vsi being configured 2078 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 2079 * 2080 * Return: 0 on success or negative otherwise 2081 **/ 2082 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 2083 { 2084 struct net_device *netdev = vsi->netdev; 2085 struct i40e_mac_filter *f, *add_f; 2086 bool is_vf, is_netdev; 2087 int filter_count = 0; 2088 2089 is_vf = (vsi->type == I40E_VSI_SRIOV); 2090 is_netdev = !!(netdev); 2091 2092 if (is_netdev) 2093 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); 2094 2095 list_for_each_entry(f, &vsi->mac_filter_list, list) 2096 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2097 2098 /* go through all the filters for this VSI and if there is only 2099 * vid == 0 it means there are no other filters, so vid 0 must 2100 * be replaced with -1. This signifies that we should from now 2101 * on accept any traffic (with any tag present, or untagged) 2102 */ 2103 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2104 if (is_netdev) { 2105 if (f->vlan && 2106 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2107 filter_count++; 2108 } 2109 2110 if (f->vlan) 2111 filter_count++; 2112 } 2113 2114 if (!filter_count && is_netdev) { 2115 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); 2116 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 2117 is_vf, is_netdev); 2118 if (!f) { 2119 dev_info(&vsi->back->pdev->dev, 2120 "Could not add filter %d for %pM\n", 2121 I40E_VLAN_ANY, netdev->dev_addr); 2122 return -ENOMEM; 2123 } 2124 } 2125 2126 if (!filter_count) { 2127 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2128 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); 2129 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2130 is_vf, is_netdev); 2131 if (!add_f) { 2132 dev_info(&vsi->back->pdev->dev, 2133 "Could not add filter %d for %pM\n", 2134 I40E_VLAN_ANY, f->macaddr); 2135 return -ENOMEM; 2136 } 2137 } 2138 } 2139 2140 if (test_bit(__I40E_DOWN, &vsi->back->state) || 2141 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 2142 return 0; 2143 2144 return i40e_sync_vsi_filters(vsi); 2145 } 2146 2147 /** 2148 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 2149 * @netdev: network interface to be adjusted 2150 * @vid: vlan id to be added 2151 * 2152 * net_device_ops implementation for adding vlan ids 2153 **/ 2154 #ifdef I40E_FCOE 2155 int i40e_vlan_rx_add_vid(struct net_device *netdev, 2156 __always_unused __be16 proto, u16 vid) 2157 #else 2158 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 2159 __always_unused __be16 proto, u16 vid) 2160 #endif 2161 { 2162 struct i40e_netdev_priv *np = netdev_priv(netdev); 2163 struct i40e_vsi *vsi = np->vsi; 2164 int ret = 0; 2165 2166 if (vid > 4095) 2167 return -EINVAL; 2168 2169 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 2170 2171 /* If the network stack called us with vid = 0 then 2172 * it is asking to receive priority tagged packets with 2173 * vlan id 0. Our HW receives them by default when configured 2174 * to receive untagged packets so there is no need to add an 2175 * extra filter for vlan 0 tagged packets. 2176 */ 2177 if (vid) 2178 ret = i40e_vsi_add_vlan(vsi, vid); 2179 2180 if (!ret && (vid < VLAN_N_VID)) 2181 set_bit(vid, vsi->active_vlans); 2182 2183 return ret; 2184 } 2185 2186 /** 2187 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 2188 * @netdev: network interface to be adjusted 2189 * @vid: vlan id to be removed 2190 * 2191 * net_device_ops implementation for removing vlan ids 2192 **/ 2193 #ifdef I40E_FCOE 2194 int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2195 __always_unused __be16 proto, u16 vid) 2196 #else 2197 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2198 __always_unused __be16 proto, u16 vid) 2199 #endif 2200 { 2201 struct i40e_netdev_priv *np = netdev_priv(netdev); 2202 struct i40e_vsi *vsi = np->vsi; 2203 2204 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); 2205 2206 /* return code is ignored as there is nothing a user 2207 * can do about failure to remove and a log message was 2208 * already printed from the other function 2209 */ 2210 i40e_vsi_kill_vlan(vsi, vid); 2211 2212 clear_bit(vid, vsi->active_vlans); 2213 2214 return 0; 2215 } 2216 2217 /** 2218 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 2219 * @vsi: the vsi being brought back up 2220 **/ 2221 static void i40e_restore_vlan(struct i40e_vsi *vsi) 2222 { 2223 u16 vid; 2224 2225 if (!vsi->netdev) 2226 return; 2227 2228 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2229 2230 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 2231 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 2232 vid); 2233 } 2234 2235 /** 2236 * i40e_vsi_add_pvid - Add pvid for the VSI 2237 * @vsi: the vsi being adjusted 2238 * @vid: the vlan id to set as a PVID 2239 **/ 2240 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2241 { 2242 struct i40e_vsi_context ctxt; 2243 i40e_status aq_ret; 2244 2245 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2246 vsi->info.pvid = cpu_to_le16(vid); 2247 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | 2248 I40E_AQ_VSI_PVLAN_INSERT_PVID | 2249 I40E_AQ_VSI_PVLAN_EMOD_STR; 2250 2251 ctxt.seid = vsi->seid; 2252 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 2253 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2254 if (aq_ret) { 2255 dev_info(&vsi->back->pdev->dev, 2256 "%s: update vsi failed, aq_err=%d\n", 2257 __func__, vsi->back->hw.aq.asq_last_status); 2258 return -ENOENT; 2259 } 2260 2261 return 0; 2262 } 2263 2264 /** 2265 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2266 * @vsi: the vsi being adjusted 2267 * 2268 * Just use the vlan_rx_register() service to put it back to normal 2269 **/ 2270 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2271 { 2272 i40e_vlan_stripping_disable(vsi); 2273 2274 vsi->info.pvid = 0; 2275 } 2276 2277 /** 2278 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2279 * @vsi: ptr to the VSI 2280 * 2281 * If this function returns with an error, then it's possible one or 2282 * more of the rings is populated (while the rest are not). It is the 2283 * callers duty to clean those orphaned rings. 2284 * 2285 * Return 0 on success, negative on failure 2286 **/ 2287 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2288 { 2289 int i, err = 0; 2290 2291 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2292 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2293 2294 return err; 2295 } 2296 2297 /** 2298 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2299 * @vsi: ptr to the VSI 2300 * 2301 * Free VSI's transmit software resources 2302 **/ 2303 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2304 { 2305 int i; 2306 2307 if (!vsi->tx_rings) 2308 return; 2309 2310 for (i = 0; i < vsi->num_queue_pairs; i++) 2311 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2312 i40e_free_tx_resources(vsi->tx_rings[i]); 2313 } 2314 2315 /** 2316 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 2317 * @vsi: ptr to the VSI 2318 * 2319 * If this function returns with an error, then it's possible one or 2320 * more of the rings is populated (while the rest are not). It is the 2321 * callers duty to clean those orphaned rings. 2322 * 2323 * Return 0 on success, negative on failure 2324 **/ 2325 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 2326 { 2327 int i, err = 0; 2328 2329 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2330 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2331 #ifdef I40E_FCOE 2332 i40e_fcoe_setup_ddp_resources(vsi); 2333 #endif 2334 return err; 2335 } 2336 2337 /** 2338 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 2339 * @vsi: ptr to the VSI 2340 * 2341 * Free all receive software resources 2342 **/ 2343 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 2344 { 2345 int i; 2346 2347 if (!vsi->rx_rings) 2348 return; 2349 2350 for (i = 0; i < vsi->num_queue_pairs; i++) 2351 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2352 i40e_free_rx_resources(vsi->rx_rings[i]); 2353 #ifdef I40E_FCOE 2354 i40e_fcoe_free_ddp_resources(vsi); 2355 #endif 2356 } 2357 2358 /** 2359 * i40e_configure_tx_ring - Configure a transmit ring context and rest 2360 * @ring: The Tx ring to configure 2361 * 2362 * Configure the Tx descriptor ring in the HMC context. 2363 **/ 2364 static int i40e_configure_tx_ring(struct i40e_ring *ring) 2365 { 2366 struct i40e_vsi *vsi = ring->vsi; 2367 u16 pf_q = vsi->base_queue + ring->queue_index; 2368 struct i40e_hw *hw = &vsi->back->hw; 2369 struct i40e_hmc_obj_txq tx_ctx; 2370 i40e_status err = 0; 2371 u32 qtx_ctl = 0; 2372 2373 /* some ATR related tx ring init */ 2374 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { 2375 ring->atr_sample_rate = vsi->back->atr_sample_rate; 2376 ring->atr_count = 0; 2377 } else { 2378 ring->atr_sample_rate = 0; 2379 } 2380 2381 /* initialize XPS */ 2382 if (ring->q_vector && ring->netdev && 2383 vsi->tc_config.numtc <= 1 && 2384 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) 2385 netif_set_xps_queue(ring->netdev, 2386 &ring->q_vector->affinity_mask, 2387 ring->queue_index); 2388 2389 /* clear the context structure first */ 2390 memset(&tx_ctx, 0, sizeof(tx_ctx)); 2391 2392 tx_ctx.new_context = 1; 2393 tx_ctx.base = (ring->dma / 128); 2394 tx_ctx.qlen = ring->count; 2395 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2396 I40E_FLAG_FD_ATR_ENABLED)); 2397 #ifdef I40E_FCOE 2398 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2399 #endif 2400 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2401 /* FDIR VSI tx ring can still use RS bit and writebacks */ 2402 if (vsi->type != I40E_VSI_FDIR) 2403 tx_ctx.head_wb_ena = 1; 2404 tx_ctx.head_wb_addr = ring->dma + 2405 (ring->count * sizeof(struct i40e_tx_desc)); 2406 2407 /* As part of VSI creation/update, FW allocates certain 2408 * Tx arbitration queue sets for each TC enabled for 2409 * the VSI. The FW returns the handles to these queue 2410 * sets as part of the response buffer to Add VSI, 2411 * Update VSI, etc. AQ commands. It is expected that 2412 * these queue set handles be associated with the Tx 2413 * queues by the driver as part of the TX queue context 2414 * initialization. This has to be done regardless of 2415 * DCB as by default everything is mapped to TC0. 2416 */ 2417 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 2418 tx_ctx.rdylist_act = 0; 2419 2420 /* clear the context in the HMC */ 2421 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 2422 if (err) { 2423 dev_info(&vsi->back->pdev->dev, 2424 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 2425 ring->queue_index, pf_q, err); 2426 return -ENOMEM; 2427 } 2428 2429 /* set the context in the HMC */ 2430 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 2431 if (err) { 2432 dev_info(&vsi->back->pdev->dev, 2433 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 2434 ring->queue_index, pf_q, err); 2435 return -ENOMEM; 2436 } 2437 2438 /* Now associate this queue with this PCI function */ 2439 if (vsi->type == I40E_VSI_VMDQ2) 2440 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 2441 else 2442 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2443 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2444 I40E_QTX_CTL_PF_INDX_MASK); 2445 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2446 i40e_flush(hw); 2447 2448 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 2449 2450 /* cache tail off for easier writes later */ 2451 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 2452 2453 return 0; 2454 } 2455 2456 /** 2457 * i40e_configure_rx_ring - Configure a receive ring context 2458 * @ring: The Rx ring to configure 2459 * 2460 * Configure the Rx descriptor ring in the HMC context. 2461 **/ 2462 static int i40e_configure_rx_ring(struct i40e_ring *ring) 2463 { 2464 struct i40e_vsi *vsi = ring->vsi; 2465 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 2466 u16 pf_q = vsi->base_queue + ring->queue_index; 2467 struct i40e_hw *hw = &vsi->back->hw; 2468 struct i40e_hmc_obj_rxq rx_ctx; 2469 i40e_status err = 0; 2470 2471 ring->state = 0; 2472 2473 /* clear the context structure first */ 2474 memset(&rx_ctx, 0, sizeof(rx_ctx)); 2475 2476 ring->rx_buf_len = vsi->rx_buf_len; 2477 ring->rx_hdr_len = vsi->rx_hdr_len; 2478 2479 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; 2480 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; 2481 2482 rx_ctx.base = (ring->dma / 128); 2483 rx_ctx.qlen = ring->count; 2484 2485 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { 2486 set_ring_16byte_desc_enabled(ring); 2487 rx_ctx.dsize = 0; 2488 } else { 2489 rx_ctx.dsize = 1; 2490 } 2491 2492 rx_ctx.dtype = vsi->dtype; 2493 if (vsi->dtype) { 2494 set_ring_ps_enabled(ring); 2495 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 2496 I40E_RX_SPLIT_IP | 2497 I40E_RX_SPLIT_TCP_UDP | 2498 I40E_RX_SPLIT_SCTP; 2499 } else { 2500 rx_ctx.hsplit_0 = 0; 2501 } 2502 2503 rx_ctx.rxmax = min_t(u16, vsi->max_frame, 2504 (chain_len * ring->rx_buf_len)); 2505 if (hw->revision_id == 0) 2506 rx_ctx.lrxqthresh = 0; 2507 else 2508 rx_ctx.lrxqthresh = 2; 2509 rx_ctx.crcstrip = 1; 2510 rx_ctx.l2tsel = 1; 2511 rx_ctx.showiv = 1; 2512 #ifdef I40E_FCOE 2513 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2514 #endif 2515 /* set the prefena field to 1 because the manual says to */ 2516 rx_ctx.prefena = 1; 2517 2518 /* clear the context in the HMC */ 2519 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2520 if (err) { 2521 dev_info(&vsi->back->pdev->dev, 2522 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2523 ring->queue_index, pf_q, err); 2524 return -ENOMEM; 2525 } 2526 2527 /* set the context in the HMC */ 2528 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 2529 if (err) { 2530 dev_info(&vsi->back->pdev->dev, 2531 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2532 ring->queue_index, pf_q, err); 2533 return -ENOMEM; 2534 } 2535 2536 /* cache tail for quicker writes, and clear the reg before use */ 2537 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 2538 writel(0, ring->tail); 2539 2540 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 2541 2542 return 0; 2543 } 2544 2545 /** 2546 * i40e_vsi_configure_tx - Configure the VSI for Tx 2547 * @vsi: VSI structure describing this set of rings and resources 2548 * 2549 * Configure the Tx VSI for operation. 2550 **/ 2551 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 2552 { 2553 int err = 0; 2554 u16 i; 2555 2556 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 2557 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 2558 2559 return err; 2560 } 2561 2562 /** 2563 * i40e_vsi_configure_rx - Configure the VSI for Rx 2564 * @vsi: the VSI being configured 2565 * 2566 * Configure the Rx VSI for operation. 2567 **/ 2568 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 2569 { 2570 int err = 0; 2571 u16 i; 2572 2573 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) 2574 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN 2575 + ETH_FCS_LEN + VLAN_HLEN; 2576 else 2577 vsi->max_frame = I40E_RXBUFFER_2048; 2578 2579 /* figure out correct receive buffer length */ 2580 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | 2581 I40E_FLAG_RX_PS_ENABLED)) { 2582 case I40E_FLAG_RX_1BUF_ENABLED: 2583 vsi->rx_hdr_len = 0; 2584 vsi->rx_buf_len = vsi->max_frame; 2585 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2586 break; 2587 case I40E_FLAG_RX_PS_ENABLED: 2588 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2589 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2590 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; 2591 break; 2592 default: 2593 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2594 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2595 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; 2596 break; 2597 } 2598 2599 #ifdef I40E_FCOE 2600 /* setup rx buffer for FCoE */ 2601 if ((vsi->type == I40E_VSI_FCOE) && 2602 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { 2603 vsi->rx_hdr_len = 0; 2604 vsi->rx_buf_len = I40E_RXBUFFER_3072; 2605 vsi->max_frame = I40E_RXBUFFER_3072; 2606 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2607 } 2608 2609 #endif /* I40E_FCOE */ 2610 /* round up for the chip's needs */ 2611 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, 2612 (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); 2613 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, 2614 (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); 2615 2616 /* set up individual rings */ 2617 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2618 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 2619 2620 return err; 2621 } 2622 2623 /** 2624 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 2625 * @vsi: ptr to the VSI 2626 **/ 2627 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 2628 { 2629 struct i40e_ring *tx_ring, *rx_ring; 2630 u16 qoffset, qcount; 2631 int i, n; 2632 2633 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2634 return; 2635 2636 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2637 if (!(vsi->tc_config.enabled_tc & (1 << n))) 2638 continue; 2639 2640 qoffset = vsi->tc_config.tc_info[n].qoffset; 2641 qcount = vsi->tc_config.tc_info[n].qcount; 2642 for (i = qoffset; i < (qoffset + qcount); i++) { 2643 rx_ring = vsi->rx_rings[i]; 2644 tx_ring = vsi->tx_rings[i]; 2645 rx_ring->dcb_tc = n; 2646 tx_ring->dcb_tc = n; 2647 } 2648 } 2649 } 2650 2651 /** 2652 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 2653 * @vsi: ptr to the VSI 2654 **/ 2655 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 2656 { 2657 if (vsi->netdev) 2658 i40e_set_rx_mode(vsi->netdev); 2659 } 2660 2661 /** 2662 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters 2663 * @vsi: Pointer to the targeted VSI 2664 * 2665 * This function replays the hlist on the hw where all the SB Flow Director 2666 * filters were saved. 2667 **/ 2668 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) 2669 { 2670 struct i40e_fdir_filter *filter; 2671 struct i40e_pf *pf = vsi->back; 2672 struct hlist_node *node; 2673 2674 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 2675 return; 2676 2677 hlist_for_each_entry_safe(filter, node, 2678 &pf->fdir_filter_list, fdir_node) { 2679 i40e_add_del_fdir(vsi, filter, true); 2680 } 2681 } 2682 2683 /** 2684 * i40e_vsi_configure - Set up the VSI for action 2685 * @vsi: the VSI being configured 2686 **/ 2687 static int i40e_vsi_configure(struct i40e_vsi *vsi) 2688 { 2689 int err; 2690 2691 i40e_set_vsi_rx_mode(vsi); 2692 i40e_restore_vlan(vsi); 2693 i40e_vsi_config_dcb_rings(vsi); 2694 err = i40e_vsi_configure_tx(vsi); 2695 if (!err) 2696 err = i40e_vsi_configure_rx(vsi); 2697 2698 return err; 2699 } 2700 2701 /** 2702 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 2703 * @vsi: the VSI being configured 2704 **/ 2705 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 2706 { 2707 struct i40e_pf *pf = vsi->back; 2708 struct i40e_q_vector *q_vector; 2709 struct i40e_hw *hw = &pf->hw; 2710 u16 vector; 2711 int i, q; 2712 u32 val; 2713 u32 qp; 2714 2715 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 2716 * and PFINT_LNKLSTn registers, e.g.: 2717 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 2718 */ 2719 qp = vsi->base_queue; 2720 vector = vsi->base_vector; 2721 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 2722 q_vector = vsi->q_vectors[i]; 2723 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2724 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2725 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 2726 q_vector->rx.itr); 2727 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2728 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2729 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 2730 q_vector->tx.itr); 2731 2732 /* Linked list for the queuepairs assigned to this vector */ 2733 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 2734 for (q = 0; q < q_vector->num_ringpairs; q++) { 2735 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2736 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2737 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 2738 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 2739 (I40E_QUEUE_TYPE_TX 2740 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 2741 2742 wr32(hw, I40E_QINT_RQCTL(qp), val); 2743 2744 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2745 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2746 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 2747 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| 2748 (I40E_QUEUE_TYPE_RX 2749 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2750 2751 /* Terminate the linked list */ 2752 if (q == (q_vector->num_ringpairs - 1)) 2753 val |= (I40E_QUEUE_END_OF_LIST 2754 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2755 2756 wr32(hw, I40E_QINT_TQCTL(qp), val); 2757 qp++; 2758 } 2759 } 2760 2761 i40e_flush(hw); 2762 } 2763 2764 /** 2765 * i40e_enable_misc_int_causes - enable the non-queue interrupts 2766 * @hw: ptr to the hardware info 2767 **/ 2768 static void i40e_enable_misc_int_causes(struct i40e_hw *hw) 2769 { 2770 u32 val; 2771 2772 /* clear things first */ 2773 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 2774 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 2775 2776 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 2777 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 2778 I40E_PFINT_ICR0_ENA_GRST_MASK | 2779 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 2780 I40E_PFINT_ICR0_ENA_GPIO_MASK | 2781 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK | 2782 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 2783 I40E_PFINT_ICR0_ENA_VFLR_MASK | 2784 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2785 2786 wr32(hw, I40E_PFINT_ICR0_ENA, val); 2787 2788 /* SW_ITR_IDX = 0, but don't change INTENA */ 2789 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 2790 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 2791 2792 /* OTHER_ITR_IDX = 0 */ 2793 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 2794 } 2795 2796 /** 2797 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 2798 * @vsi: the VSI being configured 2799 **/ 2800 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 2801 { 2802 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 2803 struct i40e_pf *pf = vsi->back; 2804 struct i40e_hw *hw = &pf->hw; 2805 u32 val; 2806 2807 /* set the ITR configuration */ 2808 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2809 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2810 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); 2811 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2812 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2813 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 2814 2815 i40e_enable_misc_int_causes(hw); 2816 2817 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 2818 wr32(hw, I40E_PFINT_LNKLST0, 0); 2819 2820 /* Associate the queue pair to the vector and enable the queue int */ 2821 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2822 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2823 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2824 2825 wr32(hw, I40E_QINT_RQCTL(0), val); 2826 2827 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2828 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2829 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2830 2831 wr32(hw, I40E_QINT_TQCTL(0), val); 2832 i40e_flush(hw); 2833 } 2834 2835 /** 2836 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 2837 * @pf: board private structure 2838 **/ 2839 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) 2840 { 2841 struct i40e_hw *hw = &pf->hw; 2842 2843 wr32(hw, I40E_PFINT_DYN_CTL0, 2844 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2845 i40e_flush(hw); 2846 } 2847 2848 /** 2849 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 2850 * @pf: board private structure 2851 **/ 2852 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 2853 { 2854 struct i40e_hw *hw = &pf->hw; 2855 u32 val; 2856 2857 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 2858 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 2859 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 2860 2861 wr32(hw, I40E_PFINT_DYN_CTL0, val); 2862 i40e_flush(hw); 2863 } 2864 2865 /** 2866 * i40e_irq_dynamic_enable - Enable default interrupt generation settings 2867 * @vsi: pointer to a vsi 2868 * @vector: enable a particular Hw Interrupt vector 2869 **/ 2870 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) 2871 { 2872 struct i40e_pf *pf = vsi->back; 2873 struct i40e_hw *hw = &pf->hw; 2874 u32 val; 2875 2876 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 2877 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 2878 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2879 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 2880 /* skip the flush */ 2881 } 2882 2883 /** 2884 * i40e_irq_dynamic_disable - Disable default interrupt generation settings 2885 * @vsi: pointer to a vsi 2886 * @vector: enable a particular Hw Interrupt vector 2887 **/ 2888 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) 2889 { 2890 struct i40e_pf *pf = vsi->back; 2891 struct i40e_hw *hw = &pf->hw; 2892 u32 val; 2893 2894 val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 2895 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 2896 i40e_flush(hw); 2897 } 2898 2899 /** 2900 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 2901 * @irq: interrupt number 2902 * @data: pointer to a q_vector 2903 **/ 2904 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 2905 { 2906 struct i40e_q_vector *q_vector = data; 2907 2908 if (!q_vector->tx.ring && !q_vector->rx.ring) 2909 return IRQ_HANDLED; 2910 2911 napi_schedule(&q_vector->napi); 2912 2913 return IRQ_HANDLED; 2914 } 2915 2916 /** 2917 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 2918 * @vsi: the VSI being configured 2919 * @basename: name for the vector 2920 * 2921 * Allocates MSI-X vectors and requests interrupts from the kernel. 2922 **/ 2923 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 2924 { 2925 int q_vectors = vsi->num_q_vectors; 2926 struct i40e_pf *pf = vsi->back; 2927 int base = vsi->base_vector; 2928 int rx_int_idx = 0; 2929 int tx_int_idx = 0; 2930 int vector, err; 2931 2932 for (vector = 0; vector < q_vectors; vector++) { 2933 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 2934 2935 if (q_vector->tx.ring && q_vector->rx.ring) { 2936 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2937 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2938 tx_int_idx++; 2939 } else if (q_vector->rx.ring) { 2940 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2941 "%s-%s-%d", basename, "rx", rx_int_idx++); 2942 } else if (q_vector->tx.ring) { 2943 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2944 "%s-%s-%d", basename, "tx", tx_int_idx++); 2945 } else { 2946 /* skip this unused q_vector */ 2947 continue; 2948 } 2949 err = request_irq(pf->msix_entries[base + vector].vector, 2950 vsi->irq_handler, 2951 0, 2952 q_vector->name, 2953 q_vector); 2954 if (err) { 2955 dev_info(&pf->pdev->dev, 2956 "%s: request_irq failed, error: %d\n", 2957 __func__, err); 2958 goto free_queue_irqs; 2959 } 2960 /* assign the mask for this irq */ 2961 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 2962 &q_vector->affinity_mask); 2963 } 2964 2965 vsi->irqs_ready = true; 2966 return 0; 2967 2968 free_queue_irqs: 2969 while (vector) { 2970 vector--; 2971 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 2972 NULL); 2973 free_irq(pf->msix_entries[base + vector].vector, 2974 &(vsi->q_vectors[vector])); 2975 } 2976 return err; 2977 } 2978 2979 /** 2980 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 2981 * @vsi: the VSI being un-configured 2982 **/ 2983 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 2984 { 2985 struct i40e_pf *pf = vsi->back; 2986 struct i40e_hw *hw = &pf->hw; 2987 int base = vsi->base_vector; 2988 int i; 2989 2990 for (i = 0; i < vsi->num_queue_pairs; i++) { 2991 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); 2992 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); 2993 } 2994 2995 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 2996 for (i = vsi->base_vector; 2997 i < (vsi->num_q_vectors + vsi->base_vector); i++) 2998 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 2999 3000 i40e_flush(hw); 3001 for (i = 0; i < vsi->num_q_vectors; i++) 3002 synchronize_irq(pf->msix_entries[i + base].vector); 3003 } else { 3004 /* Legacy and MSI mode - this stops all interrupt handling */ 3005 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 3006 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 3007 i40e_flush(hw); 3008 synchronize_irq(pf->pdev->irq); 3009 } 3010 } 3011 3012 /** 3013 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 3014 * @vsi: the VSI being configured 3015 **/ 3016 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 3017 { 3018 struct i40e_pf *pf = vsi->back; 3019 int i; 3020 3021 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3022 for (i = vsi->base_vector; 3023 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3024 i40e_irq_dynamic_enable(vsi, i); 3025 } else { 3026 i40e_irq_dynamic_enable_icr0(pf); 3027 } 3028 3029 i40e_flush(&pf->hw); 3030 return 0; 3031 } 3032 3033 /** 3034 * i40e_stop_misc_vector - Stop the vector that handles non-queue events 3035 * @pf: board private structure 3036 **/ 3037 static void i40e_stop_misc_vector(struct i40e_pf *pf) 3038 { 3039 /* Disable ICR 0 */ 3040 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 3041 i40e_flush(&pf->hw); 3042 } 3043 3044 /** 3045 * i40e_intr - MSI/Legacy and non-queue interrupt handler 3046 * @irq: interrupt number 3047 * @data: pointer to a q_vector 3048 * 3049 * This is the handler used for all MSI/Legacy interrupts, and deals 3050 * with both queue and non-queue interrupts. This is also used in 3051 * MSIX mode to handle the non-queue interrupts. 3052 **/ 3053 static irqreturn_t i40e_intr(int irq, void *data) 3054 { 3055 struct i40e_pf *pf = (struct i40e_pf *)data; 3056 struct i40e_hw *hw = &pf->hw; 3057 irqreturn_t ret = IRQ_NONE; 3058 u32 icr0, icr0_remaining; 3059 u32 val, ena_mask; 3060 3061 icr0 = rd32(hw, I40E_PFINT_ICR0); 3062 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 3063 3064 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 3065 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 3066 goto enable_intr; 3067 3068 /* if interrupt but no bits showing, must be SWINT */ 3069 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || 3070 (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) 3071 pf->sw_int_count++; 3072 3073 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 3074 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 3075 3076 /* temporarily disable queue cause for NAPI processing */ 3077 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); 3078 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; 3079 wr32(hw, I40E_QINT_RQCTL(0), qval); 3080 3081 qval = rd32(hw, I40E_QINT_TQCTL(0)); 3082 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 3083 wr32(hw, I40E_QINT_TQCTL(0), qval); 3084 3085 if (!test_bit(__I40E_DOWN, &pf->state)) 3086 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); 3087 } 3088 3089 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 3090 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3091 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 3092 } 3093 3094 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 3095 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 3096 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 3097 } 3098 3099 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 3100 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 3101 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 3102 } 3103 3104 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 3105 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 3106 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 3107 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 3108 val = rd32(hw, I40E_GLGEN_RSTAT); 3109 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 3110 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3111 if (val == I40E_RESET_CORER) { 3112 pf->corer_count++; 3113 } else if (val == I40E_RESET_GLOBR) { 3114 pf->globr_count++; 3115 } else if (val == I40E_RESET_EMPR) { 3116 pf->empr_count++; 3117 set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); 3118 } 3119 } 3120 3121 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 3122 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 3123 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 3124 } 3125 3126 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 3127 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 3128 3129 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 3130 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3131 i40e_ptp_tx_hwtstamp(pf); 3132 } 3133 } 3134 3135 /* If a critical error is pending we have no choice but to reset the 3136 * device. 3137 * Report and mask out any remaining unexpected interrupts. 3138 */ 3139 icr0_remaining = icr0 & ena_mask; 3140 if (icr0_remaining) { 3141 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 3142 icr0_remaining); 3143 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 3144 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 3145 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { 3146 dev_info(&pf->pdev->dev, "device will be reset\n"); 3147 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 3148 i40e_service_event_schedule(pf); 3149 } 3150 ena_mask &= ~icr0_remaining; 3151 } 3152 ret = IRQ_HANDLED; 3153 3154 enable_intr: 3155 /* re-enable interrupt causes */ 3156 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3157 if (!test_bit(__I40E_DOWN, &pf->state)) { 3158 i40e_service_event_schedule(pf); 3159 i40e_irq_dynamic_enable_icr0(pf); 3160 } 3161 3162 return ret; 3163 } 3164 3165 /** 3166 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes 3167 * @tx_ring: tx ring to clean 3168 * @budget: how many cleans we're allowed 3169 * 3170 * Returns true if there's any budget left (e.g. the clean is finished) 3171 **/ 3172 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) 3173 { 3174 struct i40e_vsi *vsi = tx_ring->vsi; 3175 u16 i = tx_ring->next_to_clean; 3176 struct i40e_tx_buffer *tx_buf; 3177 struct i40e_tx_desc *tx_desc; 3178 3179 tx_buf = &tx_ring->tx_bi[i]; 3180 tx_desc = I40E_TX_DESC(tx_ring, i); 3181 i -= tx_ring->count; 3182 3183 do { 3184 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 3185 3186 /* if next_to_watch is not set then there is no work pending */ 3187 if (!eop_desc) 3188 break; 3189 3190 /* prevent any other reads prior to eop_desc */ 3191 read_barrier_depends(); 3192 3193 /* if the descriptor isn't done, no work yet to do */ 3194 if (!(eop_desc->cmd_type_offset_bsz & 3195 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 3196 break; 3197 3198 /* clear next_to_watch to prevent false hangs */ 3199 tx_buf->next_to_watch = NULL; 3200 3201 tx_desc->buffer_addr = 0; 3202 tx_desc->cmd_type_offset_bsz = 0; 3203 /* move past filter desc */ 3204 tx_buf++; 3205 tx_desc++; 3206 i++; 3207 if (unlikely(!i)) { 3208 i -= tx_ring->count; 3209 tx_buf = tx_ring->tx_bi; 3210 tx_desc = I40E_TX_DESC(tx_ring, 0); 3211 } 3212 /* unmap skb header data */ 3213 dma_unmap_single(tx_ring->dev, 3214 dma_unmap_addr(tx_buf, dma), 3215 dma_unmap_len(tx_buf, len), 3216 DMA_TO_DEVICE); 3217 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) 3218 kfree(tx_buf->raw_buf); 3219 3220 tx_buf->raw_buf = NULL; 3221 tx_buf->tx_flags = 0; 3222 tx_buf->next_to_watch = NULL; 3223 dma_unmap_len_set(tx_buf, len, 0); 3224 tx_desc->buffer_addr = 0; 3225 tx_desc->cmd_type_offset_bsz = 0; 3226 3227 /* move us past the eop_desc for start of next FD desc */ 3228 tx_buf++; 3229 tx_desc++; 3230 i++; 3231 if (unlikely(!i)) { 3232 i -= tx_ring->count; 3233 tx_buf = tx_ring->tx_bi; 3234 tx_desc = I40E_TX_DESC(tx_ring, 0); 3235 } 3236 3237 /* update budget accounting */ 3238 budget--; 3239 } while (likely(budget)); 3240 3241 i += tx_ring->count; 3242 tx_ring->next_to_clean = i; 3243 3244 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 3245 i40e_irq_dynamic_enable(vsi, 3246 tx_ring->q_vector->v_idx + vsi->base_vector); 3247 } 3248 return budget > 0; 3249 } 3250 3251 /** 3252 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring 3253 * @irq: interrupt number 3254 * @data: pointer to a q_vector 3255 **/ 3256 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) 3257 { 3258 struct i40e_q_vector *q_vector = data; 3259 struct i40e_vsi *vsi; 3260 3261 if (!q_vector->tx.ring) 3262 return IRQ_HANDLED; 3263 3264 vsi = q_vector->tx.ring->vsi; 3265 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); 3266 3267 return IRQ_HANDLED; 3268 } 3269 3270 /** 3271 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 3272 * @vsi: the VSI being configured 3273 * @v_idx: vector index 3274 * @qp_idx: queue pair index 3275 **/ 3276 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 3277 { 3278 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3279 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 3280 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 3281 3282 tx_ring->q_vector = q_vector; 3283 tx_ring->next = q_vector->tx.ring; 3284 q_vector->tx.ring = tx_ring; 3285 q_vector->tx.count++; 3286 3287 rx_ring->q_vector = q_vector; 3288 rx_ring->next = q_vector->rx.ring; 3289 q_vector->rx.ring = rx_ring; 3290 q_vector->rx.count++; 3291 } 3292 3293 /** 3294 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 3295 * @vsi: the VSI being configured 3296 * 3297 * This function maps descriptor rings to the queue-specific vectors 3298 * we were allotted through the MSI-X enabling code. Ideally, we'd have 3299 * one vector per queue pair, but on a constrained vector budget, we 3300 * group the queue pairs as "efficiently" as possible. 3301 **/ 3302 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 3303 { 3304 int qp_remaining = vsi->num_queue_pairs; 3305 int q_vectors = vsi->num_q_vectors; 3306 int num_ringpairs; 3307 int v_start = 0; 3308 int qp_idx = 0; 3309 3310 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 3311 * group them so there are multiple queues per vector. 3312 * It is also important to go through all the vectors available to be 3313 * sure that if we don't use all the vectors, that the remaining vectors 3314 * are cleared. This is especially important when decreasing the 3315 * number of queues in use. 3316 */ 3317 for (; v_start < q_vectors; v_start++) { 3318 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 3319 3320 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 3321 3322 q_vector->num_ringpairs = num_ringpairs; 3323 3324 q_vector->rx.count = 0; 3325 q_vector->tx.count = 0; 3326 q_vector->rx.ring = NULL; 3327 q_vector->tx.ring = NULL; 3328 3329 while (num_ringpairs--) { 3330 map_vector_to_qp(vsi, v_start, qp_idx); 3331 qp_idx++; 3332 qp_remaining--; 3333 } 3334 } 3335 } 3336 3337 /** 3338 * i40e_vsi_request_irq - Request IRQ from the OS 3339 * @vsi: the VSI being configured 3340 * @basename: name for the vector 3341 **/ 3342 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 3343 { 3344 struct i40e_pf *pf = vsi->back; 3345 int err; 3346 3347 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 3348 err = i40e_vsi_request_irq_msix(vsi, basename); 3349 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3350 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3351 pf->misc_int_name, pf); 3352 else 3353 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3354 pf->misc_int_name, pf); 3355 3356 if (err) 3357 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3358 3359 return err; 3360 } 3361 3362 #ifdef CONFIG_NET_POLL_CONTROLLER 3363 /** 3364 * i40e_netpoll - A Polling 'interrupt'handler 3365 * @netdev: network interface device structure 3366 * 3367 * This is used by netconsole to send skbs without having to re-enable 3368 * interrupts. It's not called while the normal interrupt routine is executing. 3369 **/ 3370 #ifdef I40E_FCOE 3371 void i40e_netpoll(struct net_device *netdev) 3372 #else 3373 static void i40e_netpoll(struct net_device *netdev) 3374 #endif 3375 { 3376 struct i40e_netdev_priv *np = netdev_priv(netdev); 3377 struct i40e_vsi *vsi = np->vsi; 3378 struct i40e_pf *pf = vsi->back; 3379 int i; 3380 3381 /* if interface is down do nothing */ 3382 if (test_bit(__I40E_DOWN, &vsi->state)) 3383 return; 3384 3385 pf->flags |= I40E_FLAG_IN_NETPOLL; 3386 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3387 for (i = 0; i < vsi->num_q_vectors; i++) 3388 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 3389 } else { 3390 i40e_intr(pf->pdev->irq, netdev); 3391 } 3392 pf->flags &= ~I40E_FLAG_IN_NETPOLL; 3393 } 3394 #endif 3395 3396 /** 3397 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled 3398 * @pf: the PF being configured 3399 * @pf_q: the PF queue 3400 * @enable: enable or disable state of the queue 3401 * 3402 * This routine will wait for the given Tx queue of the PF to reach the 3403 * enabled or disabled state. 3404 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3405 * multiple retries; else will return 0 in case of success. 3406 **/ 3407 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3408 { 3409 int i; 3410 u32 tx_reg; 3411 3412 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3413 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); 3414 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3415 break; 3416 3417 udelay(10); 3418 } 3419 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3420 return -ETIMEDOUT; 3421 3422 return 0; 3423 } 3424 3425 /** 3426 * i40e_vsi_control_tx - Start or stop a VSI's rings 3427 * @vsi: the VSI being configured 3428 * @enable: start or stop the rings 3429 **/ 3430 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 3431 { 3432 struct i40e_pf *pf = vsi->back; 3433 struct i40e_hw *hw = &pf->hw; 3434 int i, j, pf_q, ret = 0; 3435 u32 tx_reg; 3436 3437 pf_q = vsi->base_queue; 3438 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3439 3440 /* warn the TX unit of coming changes */ 3441 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); 3442 if (!enable) 3443 udelay(10); 3444 3445 for (j = 0; j < 50; j++) { 3446 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3447 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 3448 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) 3449 break; 3450 usleep_range(1000, 2000); 3451 } 3452 /* Skip if the queue is already in the requested state */ 3453 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3454 continue; 3455 3456 /* turn on/off the queue */ 3457 if (enable) { 3458 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 3459 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; 3460 } else { 3461 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3462 } 3463 3464 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 3465 3466 /* wait for the change to finish */ 3467 ret = i40e_pf_txq_wait(pf, pf_q, enable); 3468 if (ret) { 3469 dev_info(&pf->pdev->dev, 3470 "%s: VSI seid %d Tx ring %d %sable timeout\n", 3471 __func__, vsi->seid, pf_q, 3472 (enable ? "en" : "dis")); 3473 break; 3474 } 3475 } 3476 3477 if (hw->revision_id == 0) 3478 mdelay(50); 3479 return ret; 3480 } 3481 3482 /** 3483 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 3484 * @pf: the PF being configured 3485 * @pf_q: the PF queue 3486 * @enable: enable or disable state of the queue 3487 * 3488 * This routine will wait for the given Rx queue of the PF to reach the 3489 * enabled or disabled state. 3490 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3491 * multiple retries; else will return 0 in case of success. 3492 **/ 3493 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3494 { 3495 int i; 3496 u32 rx_reg; 3497 3498 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3499 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); 3500 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3501 break; 3502 3503 udelay(10); 3504 } 3505 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3506 return -ETIMEDOUT; 3507 3508 return 0; 3509 } 3510 3511 /** 3512 * i40e_vsi_control_rx - Start or stop a VSI's rings 3513 * @vsi: the VSI being configured 3514 * @enable: start or stop the rings 3515 **/ 3516 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 3517 { 3518 struct i40e_pf *pf = vsi->back; 3519 struct i40e_hw *hw = &pf->hw; 3520 int i, j, pf_q, ret = 0; 3521 u32 rx_reg; 3522 3523 pf_q = vsi->base_queue; 3524 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3525 for (j = 0; j < 50; j++) { 3526 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3527 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == 3528 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) 3529 break; 3530 usleep_range(1000, 2000); 3531 } 3532 3533 /* Skip if the queue is already in the requested state */ 3534 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3535 continue; 3536 3537 /* turn on/off the queue */ 3538 if (enable) 3539 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; 3540 else 3541 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 3542 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3543 3544 /* wait for the change to finish */ 3545 ret = i40e_pf_rxq_wait(pf, pf_q, enable); 3546 if (ret) { 3547 dev_info(&pf->pdev->dev, 3548 "%s: VSI seid %d Rx ring %d %sable timeout\n", 3549 __func__, vsi->seid, pf_q, 3550 (enable ? "en" : "dis")); 3551 break; 3552 } 3553 } 3554 3555 return ret; 3556 } 3557 3558 /** 3559 * i40e_vsi_control_rings - Start or stop a VSI's rings 3560 * @vsi: the VSI being configured 3561 * @enable: start or stop the rings 3562 **/ 3563 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) 3564 { 3565 int ret = 0; 3566 3567 /* do rx first for enable and last for disable */ 3568 if (request) { 3569 ret = i40e_vsi_control_rx(vsi, request); 3570 if (ret) 3571 return ret; 3572 ret = i40e_vsi_control_tx(vsi, request); 3573 } else { 3574 /* Ignore return value, we need to shutdown whatever we can */ 3575 i40e_vsi_control_tx(vsi, request); 3576 i40e_vsi_control_rx(vsi, request); 3577 } 3578 3579 return ret; 3580 } 3581 3582 /** 3583 * i40e_vsi_free_irq - Free the irq association with the OS 3584 * @vsi: the VSI being configured 3585 **/ 3586 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 3587 { 3588 struct i40e_pf *pf = vsi->back; 3589 struct i40e_hw *hw = &pf->hw; 3590 int base = vsi->base_vector; 3591 u32 val, qp; 3592 int i; 3593 3594 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3595 if (!vsi->q_vectors) 3596 return; 3597 3598 if (!vsi->irqs_ready) 3599 return; 3600 3601 vsi->irqs_ready = false; 3602 for (i = 0; i < vsi->num_q_vectors; i++) { 3603 u16 vector = i + base; 3604 3605 /* free only the irqs that were actually requested */ 3606 if (!vsi->q_vectors[i] || 3607 !vsi->q_vectors[i]->num_ringpairs) 3608 continue; 3609 3610 /* clear the affinity_mask in the IRQ descriptor */ 3611 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3612 NULL); 3613 free_irq(pf->msix_entries[vector].vector, 3614 vsi->q_vectors[i]); 3615 3616 /* Tear down the interrupt queue link list 3617 * 3618 * We know that they come in pairs and always 3619 * the Rx first, then the Tx. To clear the 3620 * link list, stick the EOL value into the 3621 * next_q field of the registers. 3622 */ 3623 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 3624 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3625 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3626 val |= I40E_QUEUE_END_OF_LIST 3627 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3628 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 3629 3630 while (qp != I40E_QUEUE_END_OF_LIST) { 3631 u32 next; 3632 3633 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3634 3635 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3636 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3637 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3638 I40E_QINT_RQCTL_INTEVENT_MASK); 3639 3640 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3641 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3642 3643 wr32(hw, I40E_QINT_RQCTL(qp), val); 3644 3645 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3646 3647 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 3648 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 3649 3650 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3651 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3652 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3653 I40E_QINT_TQCTL_INTEVENT_MASK); 3654 3655 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3656 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3657 3658 wr32(hw, I40E_QINT_TQCTL(qp), val); 3659 qp = next; 3660 } 3661 } 3662 } else { 3663 free_irq(pf->pdev->irq, pf); 3664 3665 val = rd32(hw, I40E_PFINT_LNKLST0); 3666 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3667 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3668 val |= I40E_QUEUE_END_OF_LIST 3669 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 3670 wr32(hw, I40E_PFINT_LNKLST0, val); 3671 3672 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3673 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3674 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3675 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3676 I40E_QINT_RQCTL_INTEVENT_MASK); 3677 3678 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3679 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3680 3681 wr32(hw, I40E_QINT_RQCTL(qp), val); 3682 3683 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3684 3685 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3686 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3687 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3688 I40E_QINT_TQCTL_INTEVENT_MASK); 3689 3690 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3691 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3692 3693 wr32(hw, I40E_QINT_TQCTL(qp), val); 3694 } 3695 } 3696 3697 /** 3698 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 3699 * @vsi: the VSI being configured 3700 * @v_idx: Index of vector to be freed 3701 * 3702 * This function frees the memory allocated to the q_vector. In addition if 3703 * NAPI is enabled it will delete any references to the NAPI struct prior 3704 * to freeing the q_vector. 3705 **/ 3706 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 3707 { 3708 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3709 struct i40e_ring *ring; 3710 3711 if (!q_vector) 3712 return; 3713 3714 /* disassociate q_vector from rings */ 3715 i40e_for_each_ring(ring, q_vector->tx) 3716 ring->q_vector = NULL; 3717 3718 i40e_for_each_ring(ring, q_vector->rx) 3719 ring->q_vector = NULL; 3720 3721 /* only VSI w/ an associated netdev is set up w/ NAPI */ 3722 if (vsi->netdev) 3723 netif_napi_del(&q_vector->napi); 3724 3725 vsi->q_vectors[v_idx] = NULL; 3726 3727 kfree_rcu(q_vector, rcu); 3728 } 3729 3730 /** 3731 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 3732 * @vsi: the VSI being un-configured 3733 * 3734 * This frees the memory allocated to the q_vectors and 3735 * deletes references to the NAPI struct. 3736 **/ 3737 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 3738 { 3739 int v_idx; 3740 3741 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 3742 i40e_free_q_vector(vsi, v_idx); 3743 } 3744 3745 /** 3746 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 3747 * @pf: board private structure 3748 **/ 3749 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 3750 { 3751 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 3752 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3753 pci_disable_msix(pf->pdev); 3754 kfree(pf->msix_entries); 3755 pf->msix_entries = NULL; 3756 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 3757 pci_disable_msi(pf->pdev); 3758 } 3759 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 3760 } 3761 3762 /** 3763 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 3764 * @pf: board private structure 3765 * 3766 * We go through and clear interrupt specific resources and reset the structure 3767 * to pre-load conditions 3768 **/ 3769 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 3770 { 3771 int i; 3772 3773 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3774 for (i = 0; i < pf->num_alloc_vsi; i++) 3775 if (pf->vsi[i]) 3776 i40e_vsi_free_q_vectors(pf->vsi[i]); 3777 i40e_reset_interrupt_capability(pf); 3778 } 3779 3780 /** 3781 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 3782 * @vsi: the VSI being configured 3783 **/ 3784 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 3785 { 3786 int q_idx; 3787 3788 if (!vsi->netdev) 3789 return; 3790 3791 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3792 napi_enable(&vsi->q_vectors[q_idx]->napi); 3793 } 3794 3795 /** 3796 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 3797 * @vsi: the VSI being configured 3798 **/ 3799 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 3800 { 3801 int q_idx; 3802 3803 if (!vsi->netdev) 3804 return; 3805 3806 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3807 napi_disable(&vsi->q_vectors[q_idx]->napi); 3808 } 3809 3810 /** 3811 * i40e_vsi_close - Shut down a VSI 3812 * @vsi: the vsi to be quelled 3813 **/ 3814 static void i40e_vsi_close(struct i40e_vsi *vsi) 3815 { 3816 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 3817 i40e_down(vsi); 3818 i40e_vsi_free_irq(vsi); 3819 i40e_vsi_free_tx_resources(vsi); 3820 i40e_vsi_free_rx_resources(vsi); 3821 } 3822 3823 /** 3824 * i40e_quiesce_vsi - Pause a given VSI 3825 * @vsi: the VSI being paused 3826 **/ 3827 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 3828 { 3829 if (test_bit(__I40E_DOWN, &vsi->state)) 3830 return; 3831 3832 set_bit(__I40E_NEEDS_RESTART, &vsi->state); 3833 if (vsi->netdev && netif_running(vsi->netdev)) { 3834 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3835 } else { 3836 i40e_vsi_close(vsi); 3837 } 3838 } 3839 3840 /** 3841 * i40e_unquiesce_vsi - Resume a given VSI 3842 * @vsi: the VSI being resumed 3843 **/ 3844 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 3845 { 3846 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) 3847 return; 3848 3849 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 3850 if (vsi->netdev && netif_running(vsi->netdev)) 3851 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3852 else 3853 i40e_vsi_open(vsi); /* this clears the DOWN bit */ 3854 } 3855 3856 /** 3857 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 3858 * @pf: the PF 3859 **/ 3860 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 3861 { 3862 int v; 3863 3864 for (v = 0; v < pf->num_alloc_vsi; v++) { 3865 if (pf->vsi[v]) 3866 i40e_quiesce_vsi(pf->vsi[v]); 3867 } 3868 } 3869 3870 /** 3871 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 3872 * @pf: the PF 3873 **/ 3874 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 3875 { 3876 int v; 3877 3878 for (v = 0; v < pf->num_alloc_vsi; v++) { 3879 if (pf->vsi[v]) 3880 i40e_unquiesce_vsi(pf->vsi[v]); 3881 } 3882 } 3883 3884 /** 3885 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 3886 * @dcbcfg: the corresponding DCBx configuration structure 3887 * 3888 * Return the number of TCs from given DCBx configuration 3889 **/ 3890 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 3891 { 3892 u8 num_tc = 0; 3893 int i; 3894 3895 /* Scan the ETS Config Priority Table to find 3896 * traffic class enabled for a given priority 3897 * and use the traffic class index to get the 3898 * number of traffic classes enabled 3899 */ 3900 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 3901 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 3902 num_tc = dcbcfg->etscfg.prioritytable[i]; 3903 } 3904 3905 /* Traffic class index starts from zero so 3906 * increment to return the actual count 3907 */ 3908 return num_tc + 1; 3909 } 3910 3911 /** 3912 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 3913 * @dcbcfg: the corresponding DCBx configuration structure 3914 * 3915 * Query the current DCB configuration and return the number of 3916 * traffic classes enabled from the given DCBX config 3917 **/ 3918 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 3919 { 3920 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 3921 u8 enabled_tc = 1; 3922 u8 i; 3923 3924 for (i = 0; i < num_tc; i++) 3925 enabled_tc |= 1 << i; 3926 3927 return enabled_tc; 3928 } 3929 3930 /** 3931 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 3932 * @pf: PF being queried 3933 * 3934 * Return number of traffic classes enabled for the given PF 3935 **/ 3936 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 3937 { 3938 struct i40e_hw *hw = &pf->hw; 3939 u8 i, enabled_tc; 3940 u8 num_tc = 0; 3941 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 3942 3943 /* If DCB is not enabled then always in single TC */ 3944 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 3945 return 1; 3946 3947 /* MFP mode return count of enabled TCs for this PF */ 3948 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 3949 enabled_tc = pf->hw.func_caps.enabled_tcmap; 3950 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3951 if (enabled_tc & (1 << i)) 3952 num_tc++; 3953 } 3954 return num_tc; 3955 } 3956 3957 /* SFP mode will be enabled for all TCs on port */ 3958 return i40e_dcb_get_num_tc(dcbcfg); 3959 } 3960 3961 /** 3962 * i40e_pf_get_default_tc - Get bitmap for first enabled TC 3963 * @pf: PF being queried 3964 * 3965 * Return a bitmap for first enabled traffic class for this PF. 3966 **/ 3967 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) 3968 { 3969 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; 3970 u8 i = 0; 3971 3972 if (!enabled_tc) 3973 return 0x1; /* TC0 */ 3974 3975 /* Find the first enabled TC */ 3976 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3977 if (enabled_tc & (1 << i)) 3978 break; 3979 } 3980 3981 return 1 << i; 3982 } 3983 3984 /** 3985 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 3986 * @pf: PF being queried 3987 * 3988 * Return a bitmap for enabled traffic classes for this PF. 3989 **/ 3990 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 3991 { 3992 /* If DCB is not enabled for this PF then just return default TC */ 3993 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 3994 return i40e_pf_get_default_tc(pf); 3995 3996 /* MFP mode will have enabled TCs set by FW */ 3997 if (pf->flags & I40E_FLAG_MFP_ENABLED) 3998 return pf->hw.func_caps.enabled_tcmap; 3999 4000 /* SFP mode we want PF to be enabled for all TCs */ 4001 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 4002 } 4003 4004 /** 4005 * i40e_vsi_get_bw_info - Query VSI BW Information 4006 * @vsi: the VSI being queried 4007 * 4008 * Returns 0 on success, negative value on failure 4009 **/ 4010 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 4011 { 4012 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 4013 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 4014 struct i40e_pf *pf = vsi->back; 4015 struct i40e_hw *hw = &pf->hw; 4016 i40e_status aq_ret; 4017 u32 tc_bw_max; 4018 int i; 4019 4020 /* Get the VSI level BW configuration */ 4021 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 4022 if (aq_ret) { 4023 dev_info(&pf->pdev->dev, 4024 "couldn't get pf vsi bw config, err %d, aq_err %d\n", 4025 aq_ret, pf->hw.aq.asq_last_status); 4026 return -EINVAL; 4027 } 4028 4029 /* Get the VSI level BW configuration per TC */ 4030 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 4031 NULL); 4032 if (aq_ret) { 4033 dev_info(&pf->pdev->dev, 4034 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", 4035 aq_ret, pf->hw.aq.asq_last_status); 4036 return -EINVAL; 4037 } 4038 4039 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 4040 dev_info(&pf->pdev->dev, 4041 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 4042 bw_config.tc_valid_bits, 4043 bw_ets_config.tc_valid_bits); 4044 /* Still continuing */ 4045 } 4046 4047 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 4048 vsi->bw_max_quanta = bw_config.max_bw; 4049 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 4050 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 4051 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4052 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 4053 vsi->bw_ets_limit_credits[i] = 4054 le16_to_cpu(bw_ets_config.credits[i]); 4055 /* 3 bits out of 4 for each TC */ 4056 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 4057 } 4058 4059 return 0; 4060 } 4061 4062 /** 4063 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 4064 * @vsi: the VSI being configured 4065 * @enabled_tc: TC bitmap 4066 * @bw_credits: BW shared credits per TC 4067 * 4068 * Returns 0 on success, negative value on failure 4069 **/ 4070 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 4071 u8 *bw_share) 4072 { 4073 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 4074 i40e_status aq_ret; 4075 int i; 4076 4077 bw_data.tc_valid_bits = enabled_tc; 4078 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4079 bw_data.tc_bw_credits[i] = bw_share[i]; 4080 4081 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 4082 NULL); 4083 if (aq_ret) { 4084 dev_info(&vsi->back->pdev->dev, 4085 "AQ command Config VSI BW allocation per TC failed = %d\n", 4086 vsi->back->hw.aq.asq_last_status); 4087 return -EINVAL; 4088 } 4089 4090 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4091 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 4092 4093 return 0; 4094 } 4095 4096 /** 4097 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 4098 * @vsi: the VSI being configured 4099 * @enabled_tc: TC map to be enabled 4100 * 4101 **/ 4102 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4103 { 4104 struct net_device *netdev = vsi->netdev; 4105 struct i40e_pf *pf = vsi->back; 4106 struct i40e_hw *hw = &pf->hw; 4107 u8 netdev_tc = 0; 4108 int i; 4109 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4110 4111 if (!netdev) 4112 return; 4113 4114 if (!enabled_tc) { 4115 netdev_reset_tc(netdev); 4116 return; 4117 } 4118 4119 /* Set up actual enabled TCs on the VSI */ 4120 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 4121 return; 4122 4123 /* set per TC queues for the VSI */ 4124 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4125 /* Only set TC queues for enabled tcs 4126 * 4127 * e.g. For a VSI that has TC0 and TC3 enabled the 4128 * enabled_tc bitmap would be 0x00001001; the driver 4129 * will set the numtc for netdev as 2 that will be 4130 * referenced by the netdev layer as TC 0 and 1. 4131 */ 4132 if (vsi->tc_config.enabled_tc & (1 << i)) 4133 netdev_set_tc_queue(netdev, 4134 vsi->tc_config.tc_info[i].netdev_tc, 4135 vsi->tc_config.tc_info[i].qcount, 4136 vsi->tc_config.tc_info[i].qoffset); 4137 } 4138 4139 /* Assign UP2TC map for the VSI */ 4140 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4141 /* Get the actual TC# for the UP */ 4142 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 4143 /* Get the mapped netdev TC# for the UP */ 4144 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 4145 netdev_set_prio_tc_map(netdev, i, netdev_tc); 4146 } 4147 } 4148 4149 /** 4150 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 4151 * @vsi: the VSI being configured 4152 * @ctxt: the ctxt buffer returned from AQ VSI update param command 4153 **/ 4154 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 4155 struct i40e_vsi_context *ctxt) 4156 { 4157 /* copy just the sections touched not the entire info 4158 * since not all sections are valid as returned by 4159 * update vsi params 4160 */ 4161 vsi->info.mapping_flags = ctxt->info.mapping_flags; 4162 memcpy(&vsi->info.queue_mapping, 4163 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 4164 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 4165 sizeof(vsi->info.tc_mapping)); 4166 } 4167 4168 /** 4169 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 4170 * @vsi: VSI to be configured 4171 * @enabled_tc: TC bitmap 4172 * 4173 * This configures a particular VSI for TCs that are mapped to the 4174 * given TC bitmap. It uses default bandwidth share for TCs across 4175 * VSIs to configure TC for a particular VSI. 4176 * 4177 * NOTE: 4178 * It is expected that the VSI queues have been quisced before calling 4179 * this function. 4180 **/ 4181 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4182 { 4183 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 4184 struct i40e_vsi_context ctxt; 4185 int ret = 0; 4186 int i; 4187 4188 /* Check if enabled_tc is same as existing or new TCs */ 4189 if (vsi->tc_config.enabled_tc == enabled_tc) 4190 return ret; 4191 4192 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 4193 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4194 if (enabled_tc & (1 << i)) 4195 bw_share[i] = 1; 4196 } 4197 4198 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 4199 if (ret) { 4200 dev_info(&vsi->back->pdev->dev, 4201 "Failed configuring TC map %d for VSI %d\n", 4202 enabled_tc, vsi->seid); 4203 goto out; 4204 } 4205 4206 /* Update Queue Pairs Mapping for currently enabled UPs */ 4207 ctxt.seid = vsi->seid; 4208 ctxt.pf_num = vsi->back->hw.pf_id; 4209 ctxt.vf_num = 0; 4210 ctxt.uplink_seid = vsi->uplink_seid; 4211 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 4212 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 4213 4214 /* Update the VSI after updating the VSI queue-mapping information */ 4215 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 4216 if (ret) { 4217 dev_info(&vsi->back->pdev->dev, 4218 "update vsi failed, aq_err=%d\n", 4219 vsi->back->hw.aq.asq_last_status); 4220 goto out; 4221 } 4222 /* update the local VSI info with updated queue map */ 4223 i40e_vsi_update_queue_map(vsi, &ctxt); 4224 vsi->info.valid_sections = 0; 4225 4226 /* Update current VSI BW information */ 4227 ret = i40e_vsi_get_bw_info(vsi); 4228 if (ret) { 4229 dev_info(&vsi->back->pdev->dev, 4230 "Failed updating vsi bw info, aq_err=%d\n", 4231 vsi->back->hw.aq.asq_last_status); 4232 goto out; 4233 } 4234 4235 /* Update the netdev TC setup */ 4236 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 4237 out: 4238 return ret; 4239 } 4240 4241 /** 4242 * i40e_veb_config_tc - Configure TCs for given VEB 4243 * @veb: given VEB 4244 * @enabled_tc: TC bitmap 4245 * 4246 * Configures given TC bitmap for VEB (switching) element 4247 **/ 4248 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) 4249 { 4250 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; 4251 struct i40e_pf *pf = veb->pf; 4252 int ret = 0; 4253 int i; 4254 4255 /* No TCs or already enabled TCs just return */ 4256 if (!enabled_tc || veb->enabled_tc == enabled_tc) 4257 return ret; 4258 4259 bw_data.tc_valid_bits = enabled_tc; 4260 /* bw_data.absolute_credits is not set (relative) */ 4261 4262 /* Enable ETS TCs with equal BW Share for now */ 4263 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4264 if (enabled_tc & (1 << i)) 4265 bw_data.tc_bw_share_credits[i] = 1; 4266 } 4267 4268 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, 4269 &bw_data, NULL); 4270 if (ret) { 4271 dev_info(&pf->pdev->dev, 4272 "veb bw config failed, aq_err=%d\n", 4273 pf->hw.aq.asq_last_status); 4274 goto out; 4275 } 4276 4277 /* Update the BW information */ 4278 ret = i40e_veb_get_bw_info(veb); 4279 if (ret) { 4280 dev_info(&pf->pdev->dev, 4281 "Failed getting veb bw config, aq_err=%d\n", 4282 pf->hw.aq.asq_last_status); 4283 } 4284 4285 out: 4286 return ret; 4287 } 4288 4289 #ifdef CONFIG_I40E_DCB 4290 /** 4291 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs 4292 * @pf: PF struct 4293 * 4294 * Reconfigure VEB/VSIs on a given PF; it is assumed that 4295 * the caller would've quiesce all the VSIs before calling 4296 * this function 4297 **/ 4298 static void i40e_dcb_reconfigure(struct i40e_pf *pf) 4299 { 4300 u8 tc_map = 0; 4301 int ret; 4302 u8 v; 4303 4304 /* Enable the TCs available on PF to all VEBs */ 4305 tc_map = i40e_pf_get_tc_map(pf); 4306 for (v = 0; v < I40E_MAX_VEB; v++) { 4307 if (!pf->veb[v]) 4308 continue; 4309 ret = i40e_veb_config_tc(pf->veb[v], tc_map); 4310 if (ret) { 4311 dev_info(&pf->pdev->dev, 4312 "Failed configuring TC for VEB seid=%d\n", 4313 pf->veb[v]->seid); 4314 /* Will try to configure as many components */ 4315 } 4316 } 4317 4318 /* Update each VSI */ 4319 for (v = 0; v < pf->num_alloc_vsi; v++) { 4320 if (!pf->vsi[v]) 4321 continue; 4322 4323 /* - Enable all TCs for the LAN VSI 4324 #ifdef I40E_FCOE 4325 * - For FCoE VSI only enable the TC configured 4326 * as per the APP TLV 4327 #endif 4328 * - For all others keep them at TC0 for now 4329 */ 4330 if (v == pf->lan_vsi) 4331 tc_map = i40e_pf_get_tc_map(pf); 4332 else 4333 tc_map = i40e_pf_get_default_tc(pf); 4334 #ifdef I40E_FCOE 4335 if (pf->vsi[v]->type == I40E_VSI_FCOE) 4336 tc_map = i40e_get_fcoe_tc_map(pf); 4337 #endif /* #ifdef I40E_FCOE */ 4338 4339 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 4340 if (ret) { 4341 dev_info(&pf->pdev->dev, 4342 "Failed configuring TC for VSI seid=%d\n", 4343 pf->vsi[v]->seid); 4344 /* Will try to configure as many components */ 4345 } else { 4346 /* Re-configure VSI vectors based on updated TC map */ 4347 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); 4348 if (pf->vsi[v]->netdev) 4349 i40e_dcbnl_set_all(pf->vsi[v]); 4350 } 4351 } 4352 } 4353 4354 /** 4355 * i40e_init_pf_dcb - Initialize DCB configuration 4356 * @pf: PF being configured 4357 * 4358 * Query the current DCB configuration and cache it 4359 * in the hardware structure 4360 **/ 4361 static int i40e_init_pf_dcb(struct i40e_pf *pf) 4362 { 4363 struct i40e_hw *hw = &pf->hw; 4364 int err = 0; 4365 4366 if (pf->hw.func_caps.npar_enable) 4367 goto out; 4368 4369 /* Get the initial DCB configuration */ 4370 err = i40e_init_dcb(hw); 4371 if (!err) { 4372 /* Device/Function is not DCBX capable */ 4373 if ((!hw->func_caps.dcb) || 4374 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { 4375 dev_info(&pf->pdev->dev, 4376 "DCBX offload is not supported or is disabled for this PF.\n"); 4377 4378 if (pf->flags & I40E_FLAG_MFP_ENABLED) 4379 goto out; 4380 4381 } else { 4382 /* When status is not DISABLED then DCBX in FW */ 4383 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 4384 DCB_CAP_DCBX_VER_IEEE; 4385 4386 pf->flags |= I40E_FLAG_DCB_CAPABLE; 4387 /* Enable DCB tagging only when more than one TC */ 4388 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 4389 pf->flags |= I40E_FLAG_DCB_ENABLED; 4390 } 4391 } else { 4392 dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n", 4393 pf->hw.aq.asq_last_status); 4394 } 4395 4396 out: 4397 return err; 4398 } 4399 #endif /* CONFIG_I40E_DCB */ 4400 #define SPEED_SIZE 14 4401 #define FC_SIZE 8 4402 /** 4403 * i40e_print_link_message - print link up or down 4404 * @vsi: the VSI for which link needs a message 4405 */ 4406 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) 4407 { 4408 char speed[SPEED_SIZE] = "Unknown"; 4409 char fc[FC_SIZE] = "RX/TX"; 4410 4411 if (!isup) { 4412 netdev_info(vsi->netdev, "NIC Link is Down\n"); 4413 return; 4414 } 4415 4416 switch (vsi->back->hw.phy.link_info.link_speed) { 4417 case I40E_LINK_SPEED_40GB: 4418 strlcpy(speed, "40 Gbps", SPEED_SIZE); 4419 break; 4420 case I40E_LINK_SPEED_10GB: 4421 strlcpy(speed, "10 Gbps", SPEED_SIZE); 4422 break; 4423 case I40E_LINK_SPEED_1GB: 4424 strlcpy(speed, "1000 Mbps", SPEED_SIZE); 4425 break; 4426 default: 4427 break; 4428 } 4429 4430 switch (vsi->back->hw.fc.current_mode) { 4431 case I40E_FC_FULL: 4432 strlcpy(fc, "RX/TX", FC_SIZE); 4433 break; 4434 case I40E_FC_TX_PAUSE: 4435 strlcpy(fc, "TX", FC_SIZE); 4436 break; 4437 case I40E_FC_RX_PAUSE: 4438 strlcpy(fc, "RX", FC_SIZE); 4439 break; 4440 default: 4441 strlcpy(fc, "None", FC_SIZE); 4442 break; 4443 } 4444 4445 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n", 4446 speed, fc); 4447 } 4448 4449 /** 4450 * i40e_up_complete - Finish the last steps of bringing up a connection 4451 * @vsi: the VSI being configured 4452 **/ 4453 static int i40e_up_complete(struct i40e_vsi *vsi) 4454 { 4455 struct i40e_pf *pf = vsi->back; 4456 u8 set_fc_aq_fail = 0; 4457 int err; 4458 4459 /* force flow control off */ 4460 i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); 4461 4462 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4463 i40e_vsi_configure_msix(vsi); 4464 else 4465 i40e_configure_msi_and_legacy(vsi); 4466 4467 /* start rings */ 4468 err = i40e_vsi_control_rings(vsi, true); 4469 if (err) 4470 return err; 4471 4472 clear_bit(__I40E_DOWN, &vsi->state); 4473 i40e_napi_enable_all(vsi); 4474 i40e_vsi_enable_irq(vsi); 4475 4476 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 4477 (vsi->netdev)) { 4478 i40e_print_link_message(vsi, true); 4479 netif_tx_start_all_queues(vsi->netdev); 4480 netif_carrier_on(vsi->netdev); 4481 } else if (vsi->netdev) { 4482 i40e_print_link_message(vsi, false); 4483 } 4484 4485 /* replay FDIR SB filters */ 4486 if (vsi->type == I40E_VSI_FDIR) 4487 i40e_fdir_filter_restore(vsi); 4488 i40e_service_event_schedule(pf); 4489 4490 return 0; 4491 } 4492 4493 /** 4494 * i40e_vsi_reinit_locked - Reset the VSI 4495 * @vsi: the VSI being configured 4496 * 4497 * Rebuild the ring structs after some configuration 4498 * has changed, e.g. MTU size. 4499 **/ 4500 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 4501 { 4502 struct i40e_pf *pf = vsi->back; 4503 4504 WARN_ON(in_interrupt()); 4505 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 4506 usleep_range(1000, 2000); 4507 i40e_down(vsi); 4508 4509 /* Give a VF some time to respond to the reset. The 4510 * two second wait is based upon the watchdog cycle in 4511 * the VF driver. 4512 */ 4513 if (vsi->type == I40E_VSI_SRIOV) 4514 msleep(2000); 4515 i40e_up(vsi); 4516 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 4517 } 4518 4519 /** 4520 * i40e_up - Bring the connection back up after being down 4521 * @vsi: the VSI being configured 4522 **/ 4523 int i40e_up(struct i40e_vsi *vsi) 4524 { 4525 int err; 4526 4527 err = i40e_vsi_configure(vsi); 4528 if (!err) 4529 err = i40e_up_complete(vsi); 4530 4531 return err; 4532 } 4533 4534 /** 4535 * i40e_down - Shutdown the connection processing 4536 * @vsi: the VSI being stopped 4537 **/ 4538 void i40e_down(struct i40e_vsi *vsi) 4539 { 4540 int i; 4541 4542 /* It is assumed that the caller of this function 4543 * sets the vsi->state __I40E_DOWN bit. 4544 */ 4545 if (vsi->netdev) { 4546 netif_carrier_off(vsi->netdev); 4547 netif_tx_disable(vsi->netdev); 4548 } 4549 i40e_vsi_disable_irq(vsi); 4550 i40e_vsi_control_rings(vsi, false); 4551 i40e_napi_disable_all(vsi); 4552 4553 for (i = 0; i < vsi->num_queue_pairs; i++) { 4554 i40e_clean_tx_ring(vsi->tx_rings[i]); 4555 i40e_clean_rx_ring(vsi->rx_rings[i]); 4556 } 4557 } 4558 4559 /** 4560 * i40e_setup_tc - configure multiple traffic classes 4561 * @netdev: net device to configure 4562 * @tc: number of traffic classes to enable 4563 **/ 4564 #ifdef I40E_FCOE 4565 int i40e_setup_tc(struct net_device *netdev, u8 tc) 4566 #else 4567 static int i40e_setup_tc(struct net_device *netdev, u8 tc) 4568 #endif 4569 { 4570 struct i40e_netdev_priv *np = netdev_priv(netdev); 4571 struct i40e_vsi *vsi = np->vsi; 4572 struct i40e_pf *pf = vsi->back; 4573 u8 enabled_tc = 0; 4574 int ret = -EINVAL; 4575 int i; 4576 4577 /* Check if DCB enabled to continue */ 4578 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 4579 netdev_info(netdev, "DCB is not enabled for adapter\n"); 4580 goto exit; 4581 } 4582 4583 /* Check if MFP enabled */ 4584 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4585 netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); 4586 goto exit; 4587 } 4588 4589 /* Check whether tc count is within enabled limit */ 4590 if (tc > i40e_pf_get_num_tc(pf)) { 4591 netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); 4592 goto exit; 4593 } 4594 4595 /* Generate TC map for number of tc requested */ 4596 for (i = 0; i < tc; i++) 4597 enabled_tc |= (1 << i); 4598 4599 /* Requesting same TC configuration as already enabled */ 4600 if (enabled_tc == vsi->tc_config.enabled_tc) 4601 return 0; 4602 4603 /* Quiesce VSI queues */ 4604 i40e_quiesce_vsi(vsi); 4605 4606 /* Configure VSI for enabled TCs */ 4607 ret = i40e_vsi_config_tc(vsi, enabled_tc); 4608 if (ret) { 4609 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 4610 vsi->seid); 4611 goto exit; 4612 } 4613 4614 /* Unquiesce VSI */ 4615 i40e_unquiesce_vsi(vsi); 4616 4617 exit: 4618 return ret; 4619 } 4620 4621 /** 4622 * i40e_open - Called when a network interface is made active 4623 * @netdev: network interface device structure 4624 * 4625 * The open entry point is called when a network interface is made 4626 * active by the system (IFF_UP). At this point all resources needed 4627 * for transmit and receive operations are allocated, the interrupt 4628 * handler is registered with the OS, the netdev watchdog subtask is 4629 * enabled, and the stack is notified that the interface is ready. 4630 * 4631 * Returns 0 on success, negative value on failure 4632 **/ 4633 #ifdef I40E_FCOE 4634 int i40e_open(struct net_device *netdev) 4635 #else 4636 static int i40e_open(struct net_device *netdev) 4637 #endif 4638 { 4639 struct i40e_netdev_priv *np = netdev_priv(netdev); 4640 struct i40e_vsi *vsi = np->vsi; 4641 struct i40e_pf *pf = vsi->back; 4642 int err; 4643 4644 /* disallow open during test or if eeprom is broken */ 4645 if (test_bit(__I40E_TESTING, &pf->state) || 4646 test_bit(__I40E_BAD_EEPROM, &pf->state)) 4647 return -EBUSY; 4648 4649 netif_carrier_off(netdev); 4650 4651 err = i40e_vsi_open(vsi); 4652 if (err) 4653 return err; 4654 4655 /* configure global TSO hardware offload settings */ 4656 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | 4657 TCP_FLAG_FIN) >> 16); 4658 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | 4659 TCP_FLAG_FIN | 4660 TCP_FLAG_CWR) >> 16); 4661 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 4662 4663 #ifdef CONFIG_I40E_VXLAN 4664 vxlan_get_rx_port(netdev); 4665 #endif 4666 4667 return 0; 4668 } 4669 4670 /** 4671 * i40e_vsi_open - 4672 * @vsi: the VSI to open 4673 * 4674 * Finish initialization of the VSI. 4675 * 4676 * Returns 0 on success, negative value on failure 4677 **/ 4678 int i40e_vsi_open(struct i40e_vsi *vsi) 4679 { 4680 struct i40e_pf *pf = vsi->back; 4681 char int_name[IFNAMSIZ]; 4682 int err; 4683 4684 /* allocate descriptors */ 4685 err = i40e_vsi_setup_tx_resources(vsi); 4686 if (err) 4687 goto err_setup_tx; 4688 err = i40e_vsi_setup_rx_resources(vsi); 4689 if (err) 4690 goto err_setup_rx; 4691 4692 err = i40e_vsi_configure(vsi); 4693 if (err) 4694 goto err_setup_rx; 4695 4696 if (vsi->netdev) { 4697 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 4698 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 4699 err = i40e_vsi_request_irq(vsi, int_name); 4700 if (err) 4701 goto err_setup_rx; 4702 4703 /* Notify the stack of the actual queue counts. */ 4704 err = netif_set_real_num_tx_queues(vsi->netdev, 4705 vsi->num_queue_pairs); 4706 if (err) 4707 goto err_set_queues; 4708 4709 err = netif_set_real_num_rx_queues(vsi->netdev, 4710 vsi->num_queue_pairs); 4711 if (err) 4712 goto err_set_queues; 4713 4714 } else if (vsi->type == I40E_VSI_FDIR) { 4715 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", 4716 dev_driver_string(&pf->pdev->dev)); 4717 err = i40e_vsi_request_irq(vsi, int_name); 4718 } else { 4719 err = -EINVAL; 4720 goto err_setup_rx; 4721 } 4722 4723 err = i40e_up_complete(vsi); 4724 if (err) 4725 goto err_up_complete; 4726 4727 return 0; 4728 4729 err_up_complete: 4730 i40e_down(vsi); 4731 err_set_queues: 4732 i40e_vsi_free_irq(vsi); 4733 err_setup_rx: 4734 i40e_vsi_free_rx_resources(vsi); 4735 err_setup_tx: 4736 i40e_vsi_free_tx_resources(vsi); 4737 if (vsi == pf->vsi[pf->lan_vsi]) 4738 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 4739 4740 return err; 4741 } 4742 4743 /** 4744 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting 4745 * @pf: Pointer to pf 4746 * 4747 * This function destroys the hlist where all the Flow Director 4748 * filters were saved. 4749 **/ 4750 static void i40e_fdir_filter_exit(struct i40e_pf *pf) 4751 { 4752 struct i40e_fdir_filter *filter; 4753 struct hlist_node *node2; 4754 4755 hlist_for_each_entry_safe(filter, node2, 4756 &pf->fdir_filter_list, fdir_node) { 4757 hlist_del(&filter->fdir_node); 4758 kfree(filter); 4759 } 4760 pf->fdir_pf_active_filters = 0; 4761 } 4762 4763 /** 4764 * i40e_close - Disables a network interface 4765 * @netdev: network interface device structure 4766 * 4767 * The close entry point is called when an interface is de-activated 4768 * by the OS. The hardware is still under the driver's control, but 4769 * this netdev interface is disabled. 4770 * 4771 * Returns 0, this is not allowed to fail 4772 **/ 4773 #ifdef I40E_FCOE 4774 int i40e_close(struct net_device *netdev) 4775 #else 4776 static int i40e_close(struct net_device *netdev) 4777 #endif 4778 { 4779 struct i40e_netdev_priv *np = netdev_priv(netdev); 4780 struct i40e_vsi *vsi = np->vsi; 4781 4782 i40e_vsi_close(vsi); 4783 4784 return 0; 4785 } 4786 4787 /** 4788 * i40e_do_reset - Start a PF or Core Reset sequence 4789 * @pf: board private structure 4790 * @reset_flags: which reset is requested 4791 * 4792 * The essential difference in resets is that the PF Reset 4793 * doesn't clear the packet buffers, doesn't reset the PE 4794 * firmware, and doesn't bother the other PFs on the chip. 4795 **/ 4796 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) 4797 { 4798 u32 val; 4799 4800 WARN_ON(in_interrupt()); 4801 4802 if (i40e_check_asq_alive(&pf->hw)) 4803 i40e_vc_notify_reset(pf); 4804 4805 /* do the biggest reset indicated */ 4806 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { 4807 4808 /* Request a Global Reset 4809 * 4810 * This will start the chip's countdown to the actual full 4811 * chip reset event, and a warning interrupt to be sent 4812 * to all PFs, including the requestor. Our handler 4813 * for the warning interrupt will deal with the shutdown 4814 * and recovery of the switch setup. 4815 */ 4816 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); 4817 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4818 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 4819 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4820 4821 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) { 4822 4823 /* Request a Core Reset 4824 * 4825 * Same as Global Reset, except does *not* include the MAC/PHY 4826 */ 4827 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); 4828 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4829 val |= I40E_GLGEN_RTRIG_CORER_MASK; 4830 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4831 i40e_flush(&pf->hw); 4832 4833 } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) { 4834 4835 /* Request a Firmware Reset 4836 * 4837 * Same as Global reset, plus restarting the 4838 * embedded firmware engine. 4839 */ 4840 /* enable EMP Reset */ 4841 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP); 4842 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK; 4843 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val); 4844 4845 /* force the reset */ 4846 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4847 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK; 4848 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4849 i40e_flush(&pf->hw); 4850 4851 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { 4852 4853 /* Request a PF Reset 4854 * 4855 * Resets only the PF-specific registers 4856 * 4857 * This goes directly to the tear-down and rebuild of 4858 * the switch, since we need to do all the recovery as 4859 * for the Core Reset. 4860 */ 4861 dev_dbg(&pf->pdev->dev, "PFR requested\n"); 4862 i40e_handle_reset_warning(pf); 4863 4864 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { 4865 int v; 4866 4867 /* Find the VSI(s) that requested a re-init */ 4868 dev_info(&pf->pdev->dev, 4869 "VSI reinit requested\n"); 4870 for (v = 0; v < pf->num_alloc_vsi; v++) { 4871 struct i40e_vsi *vsi = pf->vsi[v]; 4872 if (vsi != NULL && 4873 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 4874 i40e_vsi_reinit_locked(pf->vsi[v]); 4875 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); 4876 } 4877 } 4878 4879 /* no further action needed, so return now */ 4880 return; 4881 } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) { 4882 int v; 4883 4884 /* Find the VSI(s) that needs to be brought down */ 4885 dev_info(&pf->pdev->dev, "VSI down requested\n"); 4886 for (v = 0; v < pf->num_alloc_vsi; v++) { 4887 struct i40e_vsi *vsi = pf->vsi[v]; 4888 if (vsi != NULL && 4889 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { 4890 set_bit(__I40E_DOWN, &vsi->state); 4891 i40e_down(vsi); 4892 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); 4893 } 4894 } 4895 4896 /* no further action needed, so return now */ 4897 return; 4898 } else { 4899 dev_info(&pf->pdev->dev, 4900 "bad reset request 0x%08x\n", reset_flags); 4901 return; 4902 } 4903 } 4904 4905 #ifdef CONFIG_I40E_DCB 4906 /** 4907 * i40e_dcb_need_reconfig - Check if DCB needs reconfig 4908 * @pf: board private structure 4909 * @old_cfg: current DCB config 4910 * @new_cfg: new DCB config 4911 **/ 4912 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, 4913 struct i40e_dcbx_config *old_cfg, 4914 struct i40e_dcbx_config *new_cfg) 4915 { 4916 bool need_reconfig = false; 4917 4918 /* Check if ETS configuration has changed */ 4919 if (memcmp(&new_cfg->etscfg, 4920 &old_cfg->etscfg, 4921 sizeof(new_cfg->etscfg))) { 4922 /* If Priority Table has changed reconfig is needed */ 4923 if (memcmp(&new_cfg->etscfg.prioritytable, 4924 &old_cfg->etscfg.prioritytable, 4925 sizeof(new_cfg->etscfg.prioritytable))) { 4926 need_reconfig = true; 4927 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); 4928 } 4929 4930 if (memcmp(&new_cfg->etscfg.tcbwtable, 4931 &old_cfg->etscfg.tcbwtable, 4932 sizeof(new_cfg->etscfg.tcbwtable))) 4933 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 4934 4935 if (memcmp(&new_cfg->etscfg.tsatable, 4936 &old_cfg->etscfg.tsatable, 4937 sizeof(new_cfg->etscfg.tsatable))) 4938 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); 4939 } 4940 4941 /* Check if PFC configuration has changed */ 4942 if (memcmp(&new_cfg->pfc, 4943 &old_cfg->pfc, 4944 sizeof(new_cfg->pfc))) { 4945 need_reconfig = true; 4946 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); 4947 } 4948 4949 /* Check if APP Table has changed */ 4950 if (memcmp(&new_cfg->app, 4951 &old_cfg->app, 4952 sizeof(new_cfg->app))) { 4953 need_reconfig = true; 4954 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); 4955 } 4956 4957 return need_reconfig; 4958 } 4959 4960 /** 4961 * i40e_handle_lldp_event - Handle LLDP Change MIB event 4962 * @pf: board private structure 4963 * @e: event info posted on ARQ 4964 **/ 4965 static int i40e_handle_lldp_event(struct i40e_pf *pf, 4966 struct i40e_arq_event_info *e) 4967 { 4968 struct i40e_aqc_lldp_get_mib *mib = 4969 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; 4970 struct i40e_hw *hw = &pf->hw; 4971 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; 4972 struct i40e_dcbx_config tmp_dcbx_cfg; 4973 bool need_reconfig = false; 4974 int ret = 0; 4975 u8 type; 4976 4977 /* Not DCB capable or capability disabled */ 4978 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) 4979 return ret; 4980 4981 /* Ignore if event is not for Nearest Bridge */ 4982 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 4983 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 4984 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) 4985 return ret; 4986 4987 /* Check MIB Type and return if event for Remote MIB update */ 4988 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; 4989 if (type == I40E_AQ_LLDP_MIB_REMOTE) { 4990 /* Update the remote cached instance and return */ 4991 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, 4992 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 4993 &hw->remote_dcbx_config); 4994 goto exit; 4995 } 4996 4997 /* Convert/store the DCBX data from LLDPDU temporarily */ 4998 memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); 4999 ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg); 5000 if (ret) { 5001 /* Error in LLDPDU parsing return */ 5002 dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n"); 5003 goto exit; 5004 } 5005 5006 /* No change detected in DCBX configs */ 5007 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { 5008 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 5009 goto exit; 5010 } 5011 5012 need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg); 5013 5014 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg); 5015 5016 /* Overwrite the new configuration */ 5017 *dcbx_cfg = tmp_dcbx_cfg; 5018 5019 if (!need_reconfig) 5020 goto exit; 5021 5022 /* Enable DCB tagging only when more than one TC */ 5023 if (i40e_dcb_get_num_tc(dcbx_cfg) > 1) 5024 pf->flags |= I40E_FLAG_DCB_ENABLED; 5025 else 5026 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 5027 5028 /* Reconfiguration needed quiesce all VSIs */ 5029 i40e_pf_quiesce_all_vsi(pf); 5030 5031 /* Changes in configuration update VEB/VSI */ 5032 i40e_dcb_reconfigure(pf); 5033 5034 i40e_pf_unquiesce_all_vsi(pf); 5035 exit: 5036 return ret; 5037 } 5038 #endif /* CONFIG_I40E_DCB */ 5039 5040 /** 5041 * i40e_do_reset_safe - Protected reset path for userland calls. 5042 * @pf: board private structure 5043 * @reset_flags: which reset is requested 5044 * 5045 **/ 5046 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) 5047 { 5048 rtnl_lock(); 5049 i40e_do_reset(pf, reset_flags); 5050 rtnl_unlock(); 5051 } 5052 5053 /** 5054 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 5055 * @pf: board private structure 5056 * @e: event info posted on ARQ 5057 * 5058 * Handler for LAN Queue Overflow Event generated by the firmware for PF 5059 * and VF queues 5060 **/ 5061 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 5062 struct i40e_arq_event_info *e) 5063 { 5064 struct i40e_aqc_lan_overflow *data = 5065 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 5066 u32 queue = le32_to_cpu(data->prtdcb_rupto); 5067 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 5068 struct i40e_hw *hw = &pf->hw; 5069 struct i40e_vf *vf; 5070 u16 vf_id; 5071 5072 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", 5073 queue, qtx_ctl); 5074 5075 /* Queue belongs to VF, find the VF and issue VF reset */ 5076 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 5077 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 5078 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 5079 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 5080 vf_id -= hw->func_caps.vf_base_id; 5081 vf = &pf->vf[vf_id]; 5082 i40e_vc_notify_vf_reset(vf); 5083 /* Allow VF to process pending reset notification */ 5084 msleep(20); 5085 i40e_reset_vf(vf, false); 5086 } 5087 } 5088 5089 /** 5090 * i40e_service_event_complete - Finish up the service event 5091 * @pf: board private structure 5092 **/ 5093 static void i40e_service_event_complete(struct i40e_pf *pf) 5094 { 5095 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 5096 5097 /* flush memory to make sure state is correct before next watchog */ 5098 smp_mb__before_atomic(); 5099 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 5100 } 5101 5102 /** 5103 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters 5104 * @pf: board private structure 5105 **/ 5106 int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) 5107 { 5108 int val, fcnt_prog; 5109 5110 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5111 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); 5112 return fcnt_prog; 5113 } 5114 5115 /** 5116 * i40e_get_current_fd_count - Get the count of total FD filters programmed 5117 * @pf: board private structure 5118 **/ 5119 int i40e_get_current_fd_count(struct i40e_pf *pf) 5120 { 5121 int val, fcnt_prog; 5122 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5123 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + 5124 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> 5125 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 5126 return fcnt_prog; 5127 } 5128 /** 5129 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 5130 * @pf: board private structure 5131 **/ 5132 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) 5133 { 5134 u32 fcnt_prog, fcnt_avail; 5135 5136 /* Check if, FD SB or ATR was auto disabled and if there is enough room 5137 * to re-enable 5138 */ 5139 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 5140 (pf->flags & I40E_FLAG_FD_SB_ENABLED)) 5141 return; 5142 fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); 5143 fcnt_avail = pf->fdir_pf_filter_count; 5144 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { 5145 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 5146 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 5147 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 5148 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 5149 } 5150 } 5151 /* Wait for some more space to be available to turn on ATR */ 5152 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { 5153 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 5154 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 5155 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5156 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); 5157 } 5158 } 5159 } 5160 5161 /** 5162 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 5163 * @pf: board private structure 5164 **/ 5165 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 5166 { 5167 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT)) 5168 return; 5169 5170 /* if interface is down do nothing */ 5171 if (test_bit(__I40E_DOWN, &pf->state)) 5172 return; 5173 i40e_fdir_check_and_reenable(pf); 5174 5175 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 5176 (pf->flags & I40E_FLAG_FD_SB_ENABLED)) 5177 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; 5178 } 5179 5180 /** 5181 * i40e_vsi_link_event - notify VSI of a link event 5182 * @vsi: vsi to be notified 5183 * @link_up: link up or down 5184 **/ 5185 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 5186 { 5187 if (!vsi) 5188 return; 5189 5190 switch (vsi->type) { 5191 case I40E_VSI_MAIN: 5192 #ifdef I40E_FCOE 5193 case I40E_VSI_FCOE: 5194 #endif 5195 if (!vsi->netdev || !vsi->netdev_registered) 5196 break; 5197 5198 if (link_up) { 5199 netif_carrier_on(vsi->netdev); 5200 netif_tx_wake_all_queues(vsi->netdev); 5201 } else { 5202 netif_carrier_off(vsi->netdev); 5203 netif_tx_stop_all_queues(vsi->netdev); 5204 } 5205 break; 5206 5207 case I40E_VSI_SRIOV: 5208 break; 5209 5210 case I40E_VSI_VMDQ2: 5211 case I40E_VSI_CTRL: 5212 case I40E_VSI_MIRROR: 5213 default: 5214 /* there is no notification for other VSIs */ 5215 break; 5216 } 5217 } 5218 5219 /** 5220 * i40e_veb_link_event - notify elements on the veb of a link event 5221 * @veb: veb to be notified 5222 * @link_up: link up or down 5223 **/ 5224 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 5225 { 5226 struct i40e_pf *pf; 5227 int i; 5228 5229 if (!veb || !veb->pf) 5230 return; 5231 pf = veb->pf; 5232 5233 /* depth first... */ 5234 for (i = 0; i < I40E_MAX_VEB; i++) 5235 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 5236 i40e_veb_link_event(pf->veb[i], link_up); 5237 5238 /* ... now the local VSIs */ 5239 for (i = 0; i < pf->num_alloc_vsi; i++) 5240 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 5241 i40e_vsi_link_event(pf->vsi[i], link_up); 5242 } 5243 5244 /** 5245 * i40e_link_event - Update netif_carrier status 5246 * @pf: board private structure 5247 **/ 5248 static void i40e_link_event(struct i40e_pf *pf) 5249 { 5250 bool new_link, old_link; 5251 5252 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP); 5253 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 5254 5255 if (new_link == old_link) 5256 return; 5257 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) 5258 i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link); 5259 5260 /* Notify the base of the switch tree connected to 5261 * the link. Floating VEBs are not notified. 5262 */ 5263 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 5264 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 5265 else 5266 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link); 5267 5268 if (pf->vf) 5269 i40e_vc_notify_link_state(pf); 5270 5271 if (pf->flags & I40E_FLAG_PTP) 5272 i40e_ptp_set_increment(pf); 5273 } 5274 5275 /** 5276 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts 5277 * @pf: board private structure 5278 * 5279 * Set the per-queue flags to request a check for stuck queues in the irq 5280 * clean functions, then force interrupts to be sure the irq clean is called. 5281 **/ 5282 static void i40e_check_hang_subtask(struct i40e_pf *pf) 5283 { 5284 int i, v; 5285 5286 /* If we're down or resetting, just bail */ 5287 if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5288 return; 5289 5290 /* for each VSI/netdev 5291 * for each Tx queue 5292 * set the check flag 5293 * for each q_vector 5294 * force an interrupt 5295 */ 5296 for (v = 0; v < pf->num_alloc_vsi; v++) { 5297 struct i40e_vsi *vsi = pf->vsi[v]; 5298 int armed = 0; 5299 5300 if (!pf->vsi[v] || 5301 test_bit(__I40E_DOWN, &vsi->state) || 5302 (vsi->netdev && !netif_carrier_ok(vsi->netdev))) 5303 continue; 5304 5305 for (i = 0; i < vsi->num_queue_pairs; i++) { 5306 set_check_for_tx_hang(vsi->tx_rings[i]); 5307 if (test_bit(__I40E_HANG_CHECK_ARMED, 5308 &vsi->tx_rings[i]->state)) 5309 armed++; 5310 } 5311 5312 if (armed) { 5313 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 5314 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, 5315 (I40E_PFINT_DYN_CTL0_INTENA_MASK | 5316 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); 5317 } else { 5318 u16 vec = vsi->base_vector - 1; 5319 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | 5320 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); 5321 for (i = 0; i < vsi->num_q_vectors; i++, vec++) 5322 wr32(&vsi->back->hw, 5323 I40E_PFINT_DYN_CTLN(vec), val); 5324 } 5325 i40e_flush(&vsi->back->hw); 5326 } 5327 } 5328 } 5329 5330 /** 5331 * i40e_watchdog_subtask - Check and bring link up 5332 * @pf: board private structure 5333 **/ 5334 static void i40e_watchdog_subtask(struct i40e_pf *pf) 5335 { 5336 int i; 5337 5338 /* if interface is down do nothing */ 5339 if (test_bit(__I40E_DOWN, &pf->state) || 5340 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5341 return; 5342 5343 /* Update the stats for active netdevs so the network stack 5344 * can look at updated numbers whenever it cares to 5345 */ 5346 for (i = 0; i < pf->num_alloc_vsi; i++) 5347 if (pf->vsi[i] && pf->vsi[i]->netdev) 5348 i40e_update_stats(pf->vsi[i]); 5349 5350 /* Update the stats for the active switching components */ 5351 for (i = 0; i < I40E_MAX_VEB; i++) 5352 if (pf->veb[i]) 5353 i40e_update_veb_stats(pf->veb[i]); 5354 5355 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); 5356 } 5357 5358 /** 5359 * i40e_reset_subtask - Set up for resetting the device and driver 5360 * @pf: board private structure 5361 **/ 5362 static void i40e_reset_subtask(struct i40e_pf *pf) 5363 { 5364 u32 reset_flags = 0; 5365 5366 rtnl_lock(); 5367 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { 5368 reset_flags |= (1 << __I40E_REINIT_REQUESTED); 5369 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); 5370 } 5371 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { 5372 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED); 5373 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5374 } 5375 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { 5376 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED); 5377 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 5378 } 5379 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { 5380 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED); 5381 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 5382 } 5383 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { 5384 reset_flags |= (1 << __I40E_DOWN_REQUESTED); 5385 clear_bit(__I40E_DOWN_REQUESTED, &pf->state); 5386 } 5387 5388 /* If there's a recovery already waiting, it takes 5389 * precedence before starting a new reset sequence. 5390 */ 5391 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { 5392 i40e_handle_reset_warning(pf); 5393 goto unlock; 5394 } 5395 5396 /* If we're already down or resetting, just bail */ 5397 if (reset_flags && 5398 !test_bit(__I40E_DOWN, &pf->state) && 5399 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5400 i40e_do_reset(pf, reset_flags); 5401 5402 unlock: 5403 rtnl_unlock(); 5404 } 5405 5406 /** 5407 * i40e_handle_link_event - Handle link event 5408 * @pf: board private structure 5409 * @e: event info posted on ARQ 5410 **/ 5411 static void i40e_handle_link_event(struct i40e_pf *pf, 5412 struct i40e_arq_event_info *e) 5413 { 5414 struct i40e_hw *hw = &pf->hw; 5415 struct i40e_aqc_get_link_status *status = 5416 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 5417 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 5418 5419 /* save off old link status information */ 5420 memcpy(&pf->hw.phy.link_info_old, hw_link_info, 5421 sizeof(pf->hw.phy.link_info_old)); 5422 5423 /* update link status */ 5424 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type; 5425 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed; 5426 hw_link_info->link_info = status->link_info; 5427 hw_link_info->an_info = status->an_info; 5428 hw_link_info->ext_info = status->ext_info; 5429 hw_link_info->lse_enable = 5430 le16_to_cpu(status->command_flags) & 5431 I40E_AQ_LSE_ENABLE; 5432 5433 /* process the event */ 5434 i40e_link_event(pf); 5435 5436 /* Do a new status request to re-enable LSE reporting 5437 * and load new status information into the hw struct, 5438 * then see if the status changed while processing the 5439 * initial event. 5440 */ 5441 i40e_update_link_info(&pf->hw, true); 5442 i40e_link_event(pf); 5443 } 5444 5445 /** 5446 * i40e_clean_adminq_subtask - Clean the AdminQ rings 5447 * @pf: board private structure 5448 **/ 5449 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 5450 { 5451 struct i40e_arq_event_info event; 5452 struct i40e_hw *hw = &pf->hw; 5453 u16 pending, i = 0; 5454 i40e_status ret; 5455 u16 opcode; 5456 u32 oldval; 5457 u32 val; 5458 5459 /* check for error indications */ 5460 val = rd32(&pf->hw, pf->hw.aq.arq.len); 5461 oldval = val; 5462 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { 5463 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); 5464 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; 5465 } 5466 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { 5467 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); 5468 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; 5469 } 5470 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { 5471 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); 5472 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; 5473 } 5474 if (oldval != val) 5475 wr32(&pf->hw, pf->hw.aq.arq.len, val); 5476 5477 val = rd32(&pf->hw, pf->hw.aq.asq.len); 5478 oldval = val; 5479 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { 5480 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); 5481 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; 5482 } 5483 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { 5484 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); 5485 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; 5486 } 5487 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { 5488 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); 5489 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; 5490 } 5491 if (oldval != val) 5492 wr32(&pf->hw, pf->hw.aq.asq.len, val); 5493 5494 event.msg_size = I40E_MAX_AQ_BUF_SIZE; 5495 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 5496 if (!event.msg_buf) 5497 return; 5498 5499 do { 5500 event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */ 5501 ret = i40e_clean_arq_element(hw, &event, &pending); 5502 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) 5503 break; 5504 else if (ret) { 5505 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 5506 break; 5507 } 5508 5509 opcode = le16_to_cpu(event.desc.opcode); 5510 switch (opcode) { 5511 5512 case i40e_aqc_opc_get_link_status: 5513 i40e_handle_link_event(pf, &event); 5514 break; 5515 case i40e_aqc_opc_send_msg_to_pf: 5516 ret = i40e_vc_process_vf_msg(pf, 5517 le16_to_cpu(event.desc.retval), 5518 le32_to_cpu(event.desc.cookie_high), 5519 le32_to_cpu(event.desc.cookie_low), 5520 event.msg_buf, 5521 event.msg_size); 5522 break; 5523 case i40e_aqc_opc_lldp_update_mib: 5524 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 5525 #ifdef CONFIG_I40E_DCB 5526 rtnl_lock(); 5527 ret = i40e_handle_lldp_event(pf, &event); 5528 rtnl_unlock(); 5529 #endif /* CONFIG_I40E_DCB */ 5530 break; 5531 case i40e_aqc_opc_event_lan_overflow: 5532 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 5533 i40e_handle_lan_overflow_event(pf, &event); 5534 break; 5535 case i40e_aqc_opc_send_msg_to_peer: 5536 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); 5537 break; 5538 default: 5539 dev_info(&pf->pdev->dev, 5540 "ARQ Error: Unknown event 0x%04x received\n", 5541 opcode); 5542 break; 5543 } 5544 } while (pending && (i++ < pf->adminq_work_limit)); 5545 5546 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 5547 /* re-enable Admin queue interrupt cause */ 5548 val = rd32(hw, I40E_PFINT_ICR0_ENA); 5549 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 5550 wr32(hw, I40E_PFINT_ICR0_ENA, val); 5551 i40e_flush(hw); 5552 5553 kfree(event.msg_buf); 5554 } 5555 5556 /** 5557 * i40e_verify_eeprom - make sure eeprom is good to use 5558 * @pf: board private structure 5559 **/ 5560 static void i40e_verify_eeprom(struct i40e_pf *pf) 5561 { 5562 int err; 5563 5564 err = i40e_diag_eeprom_test(&pf->hw); 5565 if (err) { 5566 /* retry in case of garbage read */ 5567 err = i40e_diag_eeprom_test(&pf->hw); 5568 if (err) { 5569 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", 5570 err); 5571 set_bit(__I40E_BAD_EEPROM, &pf->state); 5572 } 5573 } 5574 5575 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { 5576 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); 5577 clear_bit(__I40E_BAD_EEPROM, &pf->state); 5578 } 5579 } 5580 5581 /** 5582 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 5583 * @veb: pointer to the VEB instance 5584 * 5585 * This is a recursive function that first builds the attached VSIs then 5586 * recurses in to build the next layer of VEB. We track the connections 5587 * through our own index numbers because the seid's from the HW could 5588 * change across the reset. 5589 **/ 5590 static int i40e_reconstitute_veb(struct i40e_veb *veb) 5591 { 5592 struct i40e_vsi *ctl_vsi = NULL; 5593 struct i40e_pf *pf = veb->pf; 5594 int v, veb_idx; 5595 int ret; 5596 5597 /* build VSI that owns this VEB, temporarily attached to base VEB */ 5598 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { 5599 if (pf->vsi[v] && 5600 pf->vsi[v]->veb_idx == veb->idx && 5601 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 5602 ctl_vsi = pf->vsi[v]; 5603 break; 5604 } 5605 } 5606 if (!ctl_vsi) { 5607 dev_info(&pf->pdev->dev, 5608 "missing owner VSI for veb_idx %d\n", veb->idx); 5609 ret = -ENOENT; 5610 goto end_reconstitute; 5611 } 5612 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 5613 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 5614 ret = i40e_add_vsi(ctl_vsi); 5615 if (ret) { 5616 dev_info(&pf->pdev->dev, 5617 "rebuild of owner VSI failed: %d\n", ret); 5618 goto end_reconstitute; 5619 } 5620 i40e_vsi_reset_stats(ctl_vsi); 5621 5622 /* create the VEB in the switch and move the VSI onto the VEB */ 5623 ret = i40e_add_veb(veb, ctl_vsi); 5624 if (ret) 5625 goto end_reconstitute; 5626 5627 /* create the remaining VSIs attached to this VEB */ 5628 for (v = 0; v < pf->num_alloc_vsi; v++) { 5629 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 5630 continue; 5631 5632 if (pf->vsi[v]->veb_idx == veb->idx) { 5633 struct i40e_vsi *vsi = pf->vsi[v]; 5634 vsi->uplink_seid = veb->seid; 5635 ret = i40e_add_vsi(vsi); 5636 if (ret) { 5637 dev_info(&pf->pdev->dev, 5638 "rebuild of vsi_idx %d failed: %d\n", 5639 v, ret); 5640 goto end_reconstitute; 5641 } 5642 i40e_vsi_reset_stats(vsi); 5643 } 5644 } 5645 5646 /* create any VEBs attached to this VEB - RECURSION */ 5647 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 5648 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 5649 pf->veb[veb_idx]->uplink_seid = veb->seid; 5650 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 5651 if (ret) 5652 break; 5653 } 5654 } 5655 5656 end_reconstitute: 5657 return ret; 5658 } 5659 5660 /** 5661 * i40e_get_capabilities - get info about the HW 5662 * @pf: the PF struct 5663 **/ 5664 static int i40e_get_capabilities(struct i40e_pf *pf) 5665 { 5666 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 5667 u16 data_size; 5668 int buf_len; 5669 int err; 5670 5671 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 5672 do { 5673 cap_buf = kzalloc(buf_len, GFP_KERNEL); 5674 if (!cap_buf) 5675 return -ENOMEM; 5676 5677 /* this loads the data into the hw struct for us */ 5678 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 5679 &data_size, 5680 i40e_aqc_opc_list_func_capabilities, 5681 NULL); 5682 /* data loaded, buffer no longer needed */ 5683 kfree(cap_buf); 5684 5685 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 5686 /* retry with a larger buffer */ 5687 buf_len = data_size; 5688 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 5689 dev_info(&pf->pdev->dev, 5690 "capability discovery failed: aq=%d\n", 5691 pf->hw.aq.asq_last_status); 5692 return -ENODEV; 5693 } 5694 } while (err); 5695 5696 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || 5697 (pf->hw.aq.fw_maj_ver < 2)) { 5698 pf->hw.func_caps.num_msix_vectors++; 5699 pf->hw.func_caps.num_msix_vectors_vf++; 5700 } 5701 5702 if (pf->hw.debug_mask & I40E_DEBUG_USER) 5703 dev_info(&pf->pdev->dev, 5704 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 5705 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 5706 pf->hw.func_caps.num_msix_vectors, 5707 pf->hw.func_caps.num_msix_vectors_vf, 5708 pf->hw.func_caps.fd_filters_guaranteed, 5709 pf->hw.func_caps.fd_filters_best_effort, 5710 pf->hw.func_caps.num_tx_qp, 5711 pf->hw.func_caps.num_vsis); 5712 5713 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ 5714 + pf->hw.func_caps.num_vfs) 5715 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { 5716 dev_info(&pf->pdev->dev, 5717 "got num_vsis %d, setting num_vsis to %d\n", 5718 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); 5719 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; 5720 } 5721 5722 return 0; 5723 } 5724 5725 static int i40e_vsi_clear(struct i40e_vsi *vsi); 5726 5727 /** 5728 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband 5729 * @pf: board private structure 5730 **/ 5731 static void i40e_fdir_sb_setup(struct i40e_pf *pf) 5732 { 5733 struct i40e_vsi *vsi; 5734 int i; 5735 5736 /* quick workaround for an NVM issue that leaves a critical register 5737 * uninitialized 5738 */ 5739 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { 5740 static const u32 hkey[] = { 5741 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 5742 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 5743 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 5744 0x95b3a76d}; 5745 5746 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) 5747 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); 5748 } 5749 5750 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 5751 return; 5752 5753 /* find existing VSI and see if it needs configuring */ 5754 vsi = NULL; 5755 for (i = 0; i < pf->num_alloc_vsi; i++) { 5756 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5757 vsi = pf->vsi[i]; 5758 break; 5759 } 5760 } 5761 5762 /* create a new VSI if none exists */ 5763 if (!vsi) { 5764 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, 5765 pf->vsi[pf->lan_vsi]->seid, 0); 5766 if (!vsi) { 5767 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 5768 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 5769 return; 5770 } 5771 } 5772 5773 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); 5774 } 5775 5776 /** 5777 * i40e_fdir_teardown - release the Flow Director resources 5778 * @pf: board private structure 5779 **/ 5780 static void i40e_fdir_teardown(struct i40e_pf *pf) 5781 { 5782 int i; 5783 5784 i40e_fdir_filter_exit(pf); 5785 for (i = 0; i < pf->num_alloc_vsi; i++) { 5786 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5787 i40e_vsi_release(pf->vsi[i]); 5788 break; 5789 } 5790 } 5791 } 5792 5793 /** 5794 * i40e_prep_for_reset - prep for the core to reset 5795 * @pf: board private structure 5796 * 5797 * Close up the VFs and other things in prep for pf Reset. 5798 **/ 5799 static void i40e_prep_for_reset(struct i40e_pf *pf) 5800 { 5801 struct i40e_hw *hw = &pf->hw; 5802 i40e_status ret = 0; 5803 u32 v; 5804 5805 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 5806 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 5807 return; 5808 5809 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 5810 5811 /* quiesce the VSIs and their queues that are not already DOWN */ 5812 i40e_pf_quiesce_all_vsi(pf); 5813 5814 for (v = 0; v < pf->num_alloc_vsi; v++) { 5815 if (pf->vsi[v]) 5816 pf->vsi[v]->seid = 0; 5817 } 5818 5819 i40e_shutdown_adminq(&pf->hw); 5820 5821 /* call shutdown HMC */ 5822 if (hw->hmc.hmc_obj) { 5823 ret = i40e_shutdown_lan_hmc(hw); 5824 if (ret) 5825 dev_warn(&pf->pdev->dev, 5826 "shutdown_lan_hmc failed: %d\n", ret); 5827 } 5828 } 5829 5830 /** 5831 * i40e_send_version - update firmware with driver version 5832 * @pf: PF struct 5833 */ 5834 static void i40e_send_version(struct i40e_pf *pf) 5835 { 5836 struct i40e_driver_version dv; 5837 5838 dv.major_version = DRV_VERSION_MAJOR; 5839 dv.minor_version = DRV_VERSION_MINOR; 5840 dv.build_version = DRV_VERSION_BUILD; 5841 dv.subbuild_version = 0; 5842 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); 5843 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 5844 } 5845 5846 /** 5847 * i40e_reset_and_rebuild - reset and rebuild using a saved config 5848 * @pf: board private structure 5849 * @reinit: if the Main VSI needs to re-initialized. 5850 **/ 5851 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 5852 { 5853 struct i40e_hw *hw = &pf->hw; 5854 i40e_status ret; 5855 u32 v; 5856 5857 /* Now we wait for GRST to settle out. 5858 * We don't have to delete the VEBs or VSIs from the hw switch 5859 * because the reset will make them disappear. 5860 */ 5861 ret = i40e_pf_reset(hw); 5862 if (ret) { 5863 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 5864 goto end_core_reset; 5865 } 5866 pf->pfr_count++; 5867 5868 if (test_bit(__I40E_DOWN, &pf->state)) 5869 goto end_core_reset; 5870 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 5871 5872 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 5873 ret = i40e_init_adminq(&pf->hw); 5874 if (ret) { 5875 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); 5876 goto end_core_reset; 5877 } 5878 5879 /* re-verify the eeprom if we just had an EMP reset */ 5880 if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) { 5881 clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); 5882 i40e_verify_eeprom(pf); 5883 } 5884 5885 i40e_clear_pxe_mode(hw); 5886 ret = i40e_get_capabilities(pf); 5887 if (ret) { 5888 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", 5889 ret); 5890 goto end_core_reset; 5891 } 5892 5893 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 5894 hw->func_caps.num_rx_qp, 5895 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 5896 if (ret) { 5897 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 5898 goto end_core_reset; 5899 } 5900 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 5901 if (ret) { 5902 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 5903 goto end_core_reset; 5904 } 5905 5906 #ifdef CONFIG_I40E_DCB 5907 ret = i40e_init_pf_dcb(pf); 5908 if (ret) { 5909 dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret); 5910 goto end_core_reset; 5911 } 5912 #endif /* CONFIG_I40E_DCB */ 5913 #ifdef I40E_FCOE 5914 ret = i40e_init_pf_fcoe(pf); 5915 if (ret) 5916 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret); 5917 5918 #endif 5919 /* do basic switch setup */ 5920 ret = i40e_setup_pf_switch(pf, reinit); 5921 if (ret) 5922 goto end_core_reset; 5923 5924 /* Rebuild the VSIs and VEBs that existed before reset. 5925 * They are still in our local switch element arrays, so only 5926 * need to rebuild the switch model in the HW. 5927 * 5928 * If there were VEBs but the reconstitution failed, we'll try 5929 * try to recover minimal use by getting the basic PF VSI working. 5930 */ 5931 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 5932 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); 5933 /* find the one VEB connected to the MAC, and find orphans */ 5934 for (v = 0; v < I40E_MAX_VEB; v++) { 5935 if (!pf->veb[v]) 5936 continue; 5937 5938 if (pf->veb[v]->uplink_seid == pf->mac_seid || 5939 pf->veb[v]->uplink_seid == 0) { 5940 ret = i40e_reconstitute_veb(pf->veb[v]); 5941 5942 if (!ret) 5943 continue; 5944 5945 /* If Main VEB failed, we're in deep doodoo, 5946 * so give up rebuilding the switch and set up 5947 * for minimal rebuild of PF VSI. 5948 * If orphan failed, we'll report the error 5949 * but try to keep going. 5950 */ 5951 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 5952 dev_info(&pf->pdev->dev, 5953 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 5954 ret); 5955 pf->vsi[pf->lan_vsi]->uplink_seid 5956 = pf->mac_seid; 5957 break; 5958 } else if (pf->veb[v]->uplink_seid == 0) { 5959 dev_info(&pf->pdev->dev, 5960 "rebuild of orphan VEB failed: %d\n", 5961 ret); 5962 } 5963 } 5964 } 5965 } 5966 5967 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { 5968 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 5969 /* no VEB, so rebuild only the Main VSI */ 5970 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); 5971 if (ret) { 5972 dev_info(&pf->pdev->dev, 5973 "rebuild of Main VSI failed: %d\n", ret); 5974 goto end_core_reset; 5975 } 5976 } 5977 5978 /* reinit the misc interrupt */ 5979 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5980 ret = i40e_setup_misc_vector(pf); 5981 5982 /* restart the VSIs that were rebuilt and running before the reset */ 5983 i40e_pf_unquiesce_all_vsi(pf); 5984 5985 if (pf->num_alloc_vfs) { 5986 for (v = 0; v < pf->num_alloc_vfs; v++) 5987 i40e_reset_vf(&pf->vf[v], true); 5988 } 5989 5990 /* tell the firmware that we're starting */ 5991 i40e_send_version(pf); 5992 5993 end_core_reset: 5994 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5995 } 5996 5997 /** 5998 * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild 5999 * @pf: board private structure 6000 * 6001 * Close up the VFs and other things in prep for a Core Reset, 6002 * then get ready to rebuild the world. 6003 **/ 6004 static void i40e_handle_reset_warning(struct i40e_pf *pf) 6005 { 6006 i40e_prep_for_reset(pf); 6007 i40e_reset_and_rebuild(pf, false); 6008 } 6009 6010 /** 6011 * i40e_handle_mdd_event 6012 * @pf: pointer to the pf structure 6013 * 6014 * Called from the MDD irq handler to identify possibly malicious vfs 6015 **/ 6016 static void i40e_handle_mdd_event(struct i40e_pf *pf) 6017 { 6018 struct i40e_hw *hw = &pf->hw; 6019 bool mdd_detected = false; 6020 bool pf_mdd_detected = false; 6021 struct i40e_vf *vf; 6022 u32 reg; 6023 int i; 6024 6025 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) 6026 return; 6027 6028 /* find what triggered the MDD event */ 6029 reg = rd32(hw, I40E_GL_MDET_TX); 6030 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 6031 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 6032 I40E_GL_MDET_TX_PF_NUM_SHIFT; 6033 u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 6034 I40E_GL_MDET_TX_VF_NUM_SHIFT; 6035 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >> 6036 I40E_GL_MDET_TX_EVENT_SHIFT; 6037 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 6038 I40E_GL_MDET_TX_QUEUE_SHIFT; 6039 dev_info(&pf->pdev->dev, 6040 "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", 6041 event, queue, pf_num, vf_num); 6042 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 6043 mdd_detected = true; 6044 } 6045 reg = rd32(hw, I40E_GL_MDET_RX); 6046 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 6047 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 6048 I40E_GL_MDET_RX_FUNCTION_SHIFT; 6049 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >> 6050 I40E_GL_MDET_RX_EVENT_SHIFT; 6051 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 6052 I40E_GL_MDET_RX_QUEUE_SHIFT; 6053 dev_info(&pf->pdev->dev, 6054 "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 6055 event, queue, func); 6056 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 6057 mdd_detected = true; 6058 } 6059 6060 if (mdd_detected) { 6061 reg = rd32(hw, I40E_PF_MDET_TX); 6062 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 6063 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 6064 dev_info(&pf->pdev->dev, 6065 "MDD TX event is for this function 0x%08x, requesting PF reset.\n", 6066 reg); 6067 pf_mdd_detected = true; 6068 } 6069 reg = rd32(hw, I40E_PF_MDET_RX); 6070 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 6071 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 6072 dev_info(&pf->pdev->dev, 6073 "MDD RX event is for this function 0x%08x, requesting PF reset.\n", 6074 reg); 6075 pf_mdd_detected = true; 6076 } 6077 /* Queue belongs to the PF, initiate a reset */ 6078 if (pf_mdd_detected) { 6079 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 6080 i40e_service_event_schedule(pf); 6081 } 6082 } 6083 6084 /* see if one of the VFs needs its hand slapped */ 6085 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 6086 vf = &(pf->vf[i]); 6087 reg = rd32(hw, I40E_VP_MDET_TX(i)); 6088 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 6089 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 6090 vf->num_mdd_events++; 6091 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i); 6092 } 6093 6094 reg = rd32(hw, I40E_VP_MDET_RX(i)); 6095 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 6096 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 6097 vf->num_mdd_events++; 6098 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i); 6099 } 6100 6101 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 6102 dev_info(&pf->pdev->dev, 6103 "Too many MDD events on VF %d, disabled\n", i); 6104 dev_info(&pf->pdev->dev, 6105 "Use PF Control I/F to re-enable the VF\n"); 6106 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 6107 } 6108 } 6109 6110 /* re-enable mdd interrupt cause */ 6111 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 6112 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 6113 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 6114 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 6115 i40e_flush(hw); 6116 } 6117 6118 #ifdef CONFIG_I40E_VXLAN 6119 /** 6120 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW 6121 * @pf: board private structure 6122 **/ 6123 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) 6124 { 6125 struct i40e_hw *hw = &pf->hw; 6126 i40e_status ret; 6127 u8 filter_index; 6128 __be16 port; 6129 int i; 6130 6131 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) 6132 return; 6133 6134 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; 6135 6136 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 6137 if (pf->pending_vxlan_bitmap & (1 << i)) { 6138 pf->pending_vxlan_bitmap &= ~(1 << i); 6139 port = pf->vxlan_ports[i]; 6140 ret = port ? 6141 i40e_aq_add_udp_tunnel(hw, ntohs(port), 6142 I40E_AQC_TUNNEL_TYPE_VXLAN, 6143 &filter_index, NULL) 6144 : i40e_aq_del_udp_tunnel(hw, i, NULL); 6145 6146 if (ret) { 6147 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n", 6148 port ? "adding" : "deleting", 6149 ntohs(port), port ? i : i); 6150 6151 pf->vxlan_ports[i] = 0; 6152 } else { 6153 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n", 6154 port ? "Added" : "Deleted", 6155 ntohs(port), port ? i : filter_index); 6156 } 6157 } 6158 } 6159 } 6160 6161 #endif 6162 /** 6163 * i40e_service_task - Run the driver's async subtasks 6164 * @work: pointer to work_struct containing our data 6165 **/ 6166 static void i40e_service_task(struct work_struct *work) 6167 { 6168 struct i40e_pf *pf = container_of(work, 6169 struct i40e_pf, 6170 service_task); 6171 unsigned long start_time = jiffies; 6172 6173 /* don't bother with service tasks if a reset is in progress */ 6174 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 6175 i40e_service_event_complete(pf); 6176 return; 6177 } 6178 6179 i40e_reset_subtask(pf); 6180 i40e_handle_mdd_event(pf); 6181 i40e_vc_process_vflr_event(pf); 6182 i40e_watchdog_subtask(pf); 6183 i40e_fdir_reinit_subtask(pf); 6184 i40e_check_hang_subtask(pf); 6185 i40e_sync_filters_subtask(pf); 6186 #ifdef CONFIG_I40E_VXLAN 6187 i40e_sync_vxlan_filters_subtask(pf); 6188 #endif 6189 i40e_clean_adminq_subtask(pf); 6190 6191 i40e_service_event_complete(pf); 6192 6193 /* If the tasks have taken longer than one timer cycle or there 6194 * is more work to be done, reschedule the service task now 6195 * rather than wait for the timer to tick again. 6196 */ 6197 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 6198 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || 6199 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || 6200 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 6201 i40e_service_event_schedule(pf); 6202 } 6203 6204 /** 6205 * i40e_service_timer - timer callback 6206 * @data: pointer to PF struct 6207 **/ 6208 static void i40e_service_timer(unsigned long data) 6209 { 6210 struct i40e_pf *pf = (struct i40e_pf *)data; 6211 6212 mod_timer(&pf->service_timer, 6213 round_jiffies(jiffies + pf->service_timer_period)); 6214 i40e_service_event_schedule(pf); 6215 } 6216 6217 /** 6218 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 6219 * @vsi: the VSI being configured 6220 **/ 6221 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 6222 { 6223 struct i40e_pf *pf = vsi->back; 6224 6225 switch (vsi->type) { 6226 case I40E_VSI_MAIN: 6227 vsi->alloc_queue_pairs = pf->num_lan_qps; 6228 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6229 I40E_REQ_DESCRIPTOR_MULTIPLE); 6230 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6231 vsi->num_q_vectors = pf->num_lan_msix; 6232 else 6233 vsi->num_q_vectors = 1; 6234 6235 break; 6236 6237 case I40E_VSI_FDIR: 6238 vsi->alloc_queue_pairs = 1; 6239 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 6240 I40E_REQ_DESCRIPTOR_MULTIPLE); 6241 vsi->num_q_vectors = 1; 6242 break; 6243 6244 case I40E_VSI_VMDQ2: 6245 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 6246 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6247 I40E_REQ_DESCRIPTOR_MULTIPLE); 6248 vsi->num_q_vectors = pf->num_vmdq_msix; 6249 break; 6250 6251 case I40E_VSI_SRIOV: 6252 vsi->alloc_queue_pairs = pf->num_vf_qps; 6253 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6254 I40E_REQ_DESCRIPTOR_MULTIPLE); 6255 break; 6256 6257 #ifdef I40E_FCOE 6258 case I40E_VSI_FCOE: 6259 vsi->alloc_queue_pairs = pf->num_fcoe_qps; 6260 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6261 I40E_REQ_DESCRIPTOR_MULTIPLE); 6262 vsi->num_q_vectors = pf->num_fcoe_msix; 6263 break; 6264 6265 #endif /* I40E_FCOE */ 6266 default: 6267 WARN_ON(1); 6268 return -ENODATA; 6269 } 6270 6271 return 0; 6272 } 6273 6274 /** 6275 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi 6276 * @type: VSI pointer 6277 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 6278 * 6279 * On error: returns error code (negative) 6280 * On success: returns 0 6281 **/ 6282 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) 6283 { 6284 int size; 6285 int ret = 0; 6286 6287 /* allocate memory for both Tx and Rx ring pointers */ 6288 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; 6289 vsi->tx_rings = kzalloc(size, GFP_KERNEL); 6290 if (!vsi->tx_rings) 6291 return -ENOMEM; 6292 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; 6293 6294 if (alloc_qvectors) { 6295 /* allocate memory for q_vector pointers */ 6296 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; 6297 vsi->q_vectors = kzalloc(size, GFP_KERNEL); 6298 if (!vsi->q_vectors) { 6299 ret = -ENOMEM; 6300 goto err_vectors; 6301 } 6302 } 6303 return ret; 6304 6305 err_vectors: 6306 kfree(vsi->tx_rings); 6307 return ret; 6308 } 6309 6310 /** 6311 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 6312 * @pf: board private structure 6313 * @type: type of VSI 6314 * 6315 * On error: returns error code (negative) 6316 * On success: returns vsi index in PF (positive) 6317 **/ 6318 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 6319 { 6320 int ret = -ENODEV; 6321 struct i40e_vsi *vsi; 6322 int vsi_idx; 6323 int i; 6324 6325 /* Need to protect the allocation of the VSIs at the PF level */ 6326 mutex_lock(&pf->switch_mutex); 6327 6328 /* VSI list may be fragmented if VSI creation/destruction has 6329 * been happening. We can afford to do a quick scan to look 6330 * for any free VSIs in the list. 6331 * 6332 * find next empty vsi slot, looping back around if necessary 6333 */ 6334 i = pf->next_vsi; 6335 while (i < pf->num_alloc_vsi && pf->vsi[i]) 6336 i++; 6337 if (i >= pf->num_alloc_vsi) { 6338 i = 0; 6339 while (i < pf->next_vsi && pf->vsi[i]) 6340 i++; 6341 } 6342 6343 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { 6344 vsi_idx = i; /* Found one! */ 6345 } else { 6346 ret = -ENODEV; 6347 goto unlock_pf; /* out of VSI slots! */ 6348 } 6349 pf->next_vsi = ++i; 6350 6351 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 6352 if (!vsi) { 6353 ret = -ENOMEM; 6354 goto unlock_pf; 6355 } 6356 vsi->type = type; 6357 vsi->back = pf; 6358 set_bit(__I40E_DOWN, &vsi->state); 6359 vsi->flags = 0; 6360 vsi->idx = vsi_idx; 6361 vsi->rx_itr_setting = pf->rx_itr_default; 6362 vsi->tx_itr_setting = pf->tx_itr_default; 6363 vsi->netdev_registered = false; 6364 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 6365 INIT_LIST_HEAD(&vsi->mac_filter_list); 6366 vsi->irqs_ready = false; 6367 6368 ret = i40e_set_num_rings_in_vsi(vsi); 6369 if (ret) 6370 goto err_rings; 6371 6372 ret = i40e_vsi_alloc_arrays(vsi, true); 6373 if (ret) 6374 goto err_rings; 6375 6376 /* Setup default MSIX irq handler for VSI */ 6377 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 6378 6379 pf->vsi[vsi_idx] = vsi; 6380 ret = vsi_idx; 6381 goto unlock_pf; 6382 6383 err_rings: 6384 pf->next_vsi = i - 1; 6385 kfree(vsi); 6386 unlock_pf: 6387 mutex_unlock(&pf->switch_mutex); 6388 return ret; 6389 } 6390 6391 /** 6392 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 6393 * @type: VSI pointer 6394 * @free_qvectors: a bool to specify if q_vectors need to be freed. 6395 * 6396 * On error: returns error code (negative) 6397 * On success: returns 0 6398 **/ 6399 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) 6400 { 6401 /* free the ring and vector containers */ 6402 if (free_qvectors) { 6403 kfree(vsi->q_vectors); 6404 vsi->q_vectors = NULL; 6405 } 6406 kfree(vsi->tx_rings); 6407 vsi->tx_rings = NULL; 6408 vsi->rx_rings = NULL; 6409 } 6410 6411 /** 6412 * i40e_vsi_clear - Deallocate the VSI provided 6413 * @vsi: the VSI being un-configured 6414 **/ 6415 static int i40e_vsi_clear(struct i40e_vsi *vsi) 6416 { 6417 struct i40e_pf *pf; 6418 6419 if (!vsi) 6420 return 0; 6421 6422 if (!vsi->back) 6423 goto free_vsi; 6424 pf = vsi->back; 6425 6426 mutex_lock(&pf->switch_mutex); 6427 if (!pf->vsi[vsi->idx]) { 6428 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", 6429 vsi->idx, vsi->idx, vsi, vsi->type); 6430 goto unlock_vsi; 6431 } 6432 6433 if (pf->vsi[vsi->idx] != vsi) { 6434 dev_err(&pf->pdev->dev, 6435 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", 6436 pf->vsi[vsi->idx]->idx, 6437 pf->vsi[vsi->idx], 6438 pf->vsi[vsi->idx]->type, 6439 vsi->idx, vsi, vsi->type); 6440 goto unlock_vsi; 6441 } 6442 6443 /* updates the pf for this cleared vsi */ 6444 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 6445 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 6446 6447 i40e_vsi_free_arrays(vsi, true); 6448 6449 pf->vsi[vsi->idx] = NULL; 6450 if (vsi->idx < pf->next_vsi) 6451 pf->next_vsi = vsi->idx; 6452 6453 unlock_vsi: 6454 mutex_unlock(&pf->switch_mutex); 6455 free_vsi: 6456 kfree(vsi); 6457 6458 return 0; 6459 } 6460 6461 /** 6462 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 6463 * @vsi: the VSI being cleaned 6464 **/ 6465 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) 6466 { 6467 int i; 6468 6469 if (vsi->tx_rings && vsi->tx_rings[0]) { 6470 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 6471 kfree_rcu(vsi->tx_rings[i], rcu); 6472 vsi->tx_rings[i] = NULL; 6473 vsi->rx_rings[i] = NULL; 6474 } 6475 } 6476 } 6477 6478 /** 6479 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 6480 * @vsi: the VSI being configured 6481 **/ 6482 static int i40e_alloc_rings(struct i40e_vsi *vsi) 6483 { 6484 struct i40e_ring *tx_ring, *rx_ring; 6485 struct i40e_pf *pf = vsi->back; 6486 int i; 6487 6488 /* Set basic values in the rings to be used later during open() */ 6489 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 6490 /* allocate space for both Tx and Rx in one shot */ 6491 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 6492 if (!tx_ring) 6493 goto err_out; 6494 6495 tx_ring->queue_index = i; 6496 tx_ring->reg_idx = vsi->base_queue + i; 6497 tx_ring->ring_active = false; 6498 tx_ring->vsi = vsi; 6499 tx_ring->netdev = vsi->netdev; 6500 tx_ring->dev = &pf->pdev->dev; 6501 tx_ring->count = vsi->num_desc; 6502 tx_ring->size = 0; 6503 tx_ring->dcb_tc = 0; 6504 vsi->tx_rings[i] = tx_ring; 6505 6506 rx_ring = &tx_ring[1]; 6507 rx_ring->queue_index = i; 6508 rx_ring->reg_idx = vsi->base_queue + i; 6509 rx_ring->ring_active = false; 6510 rx_ring->vsi = vsi; 6511 rx_ring->netdev = vsi->netdev; 6512 rx_ring->dev = &pf->pdev->dev; 6513 rx_ring->count = vsi->num_desc; 6514 rx_ring->size = 0; 6515 rx_ring->dcb_tc = 0; 6516 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) 6517 set_ring_16byte_desc_enabled(rx_ring); 6518 else 6519 clear_ring_16byte_desc_enabled(rx_ring); 6520 vsi->rx_rings[i] = rx_ring; 6521 } 6522 6523 return 0; 6524 6525 err_out: 6526 i40e_vsi_clear_rings(vsi); 6527 return -ENOMEM; 6528 } 6529 6530 /** 6531 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 6532 * @pf: board private structure 6533 * @vectors: the number of MSI-X vectors to request 6534 * 6535 * Returns the number of vectors reserved, or error 6536 **/ 6537 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 6538 { 6539 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, 6540 I40E_MIN_MSIX, vectors); 6541 if (vectors < 0) { 6542 dev_info(&pf->pdev->dev, 6543 "MSI-X vector reservation failed: %d\n", vectors); 6544 vectors = 0; 6545 } 6546 6547 return vectors; 6548 } 6549 6550 /** 6551 * i40e_init_msix - Setup the MSIX capability 6552 * @pf: board private structure 6553 * 6554 * Work with the OS to set up the MSIX vectors needed. 6555 * 6556 * Returns 0 on success, negative on failure 6557 **/ 6558 static int i40e_init_msix(struct i40e_pf *pf) 6559 { 6560 i40e_status err = 0; 6561 struct i40e_hw *hw = &pf->hw; 6562 int v_budget, i; 6563 int vec; 6564 6565 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 6566 return -ENODEV; 6567 6568 /* The number of vectors we'll request will be comprised of: 6569 * - Add 1 for "other" cause for Admin Queue events, etc. 6570 * - The number of LAN queue pairs 6571 * - Queues being used for RSS. 6572 * We don't need as many as max_rss_size vectors. 6573 * use rss_size instead in the calculation since that 6574 * is governed by number of cpus in the system. 6575 * - assumes symmetric Tx/Rx pairing 6576 * - The number of VMDq pairs 6577 #ifdef I40E_FCOE 6578 * - The number of FCOE qps. 6579 #endif 6580 * Once we count this up, try the request. 6581 * 6582 * If we can't get what we want, we'll simplify to nearly nothing 6583 * and try again. If that still fails, we punt. 6584 */ 6585 pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); 6586 pf->num_vmdq_msix = pf->num_vmdq_qps; 6587 v_budget = 1 + pf->num_lan_msix; 6588 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix); 6589 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) 6590 v_budget++; 6591 6592 #ifdef I40E_FCOE 6593 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 6594 pf->num_fcoe_msix = pf->num_fcoe_qps; 6595 v_budget += pf->num_fcoe_msix; 6596 } 6597 6598 #endif 6599 /* Scale down if necessary, and the rings will share vectors */ 6600 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors); 6601 6602 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 6603 GFP_KERNEL); 6604 if (!pf->msix_entries) 6605 return -ENOMEM; 6606 6607 for (i = 0; i < v_budget; i++) 6608 pf->msix_entries[i].entry = i; 6609 vec = i40e_reserve_msix_vectors(pf, v_budget); 6610 6611 if (vec != v_budget) { 6612 /* If we have limited resources, we will start with no vectors 6613 * for the special features and then allocate vectors to some 6614 * of these features based on the policy and at the end disable 6615 * the features that did not get any vectors. 6616 */ 6617 #ifdef I40E_FCOE 6618 pf->num_fcoe_qps = 0; 6619 pf->num_fcoe_msix = 0; 6620 #endif 6621 pf->num_vmdq_msix = 0; 6622 } 6623 6624 if (vec < I40E_MIN_MSIX) { 6625 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 6626 kfree(pf->msix_entries); 6627 pf->msix_entries = NULL; 6628 return -ENODEV; 6629 6630 } else if (vec == I40E_MIN_MSIX) { 6631 /* Adjust for minimal MSIX use */ 6632 pf->num_vmdq_vsis = 0; 6633 pf->num_vmdq_qps = 0; 6634 pf->num_lan_qps = 1; 6635 pf->num_lan_msix = 1; 6636 6637 } else if (vec != v_budget) { 6638 /* reserve the misc vector */ 6639 vec--; 6640 6641 /* Scale vector usage down */ 6642 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 6643 pf->num_vmdq_vsis = 1; 6644 6645 /* partition out the remaining vectors */ 6646 switch (vec) { 6647 case 2: 6648 pf->num_lan_msix = 1; 6649 break; 6650 case 3: 6651 #ifdef I40E_FCOE 6652 /* give one vector to FCoE */ 6653 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 6654 pf->num_lan_msix = 1; 6655 pf->num_fcoe_msix = 1; 6656 } 6657 #else 6658 pf->num_lan_msix = 2; 6659 #endif 6660 break; 6661 default: 6662 #ifdef I40E_FCOE 6663 /* give one vector to FCoE */ 6664 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 6665 pf->num_fcoe_msix = 1; 6666 vec--; 6667 } 6668 #endif 6669 pf->num_lan_msix = min_t(int, (vec / 2), 6670 pf->num_lan_qps); 6671 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix), 6672 I40E_DEFAULT_NUM_VMDQ_VSI); 6673 break; 6674 } 6675 } 6676 6677 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 6678 (pf->num_vmdq_msix == 0)) { 6679 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); 6680 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 6681 } 6682 #ifdef I40E_FCOE 6683 6684 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { 6685 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); 6686 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 6687 } 6688 #endif 6689 return err; 6690 } 6691 6692 /** 6693 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 6694 * @vsi: the VSI being configured 6695 * @v_idx: index of the vector in the vsi struct 6696 * 6697 * We allocate one q_vector. If allocation fails we return -ENOMEM. 6698 **/ 6699 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 6700 { 6701 struct i40e_q_vector *q_vector; 6702 6703 /* allocate q_vector */ 6704 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 6705 if (!q_vector) 6706 return -ENOMEM; 6707 6708 q_vector->vsi = vsi; 6709 q_vector->v_idx = v_idx; 6710 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 6711 if (vsi->netdev) 6712 netif_napi_add(vsi->netdev, &q_vector->napi, 6713 i40e_napi_poll, NAPI_POLL_WEIGHT); 6714 6715 q_vector->rx.latency_range = I40E_LOW_LATENCY; 6716 q_vector->tx.latency_range = I40E_LOW_LATENCY; 6717 6718 /* tie q_vector and vsi together */ 6719 vsi->q_vectors[v_idx] = q_vector; 6720 6721 return 0; 6722 } 6723 6724 /** 6725 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 6726 * @vsi: the VSI being configured 6727 * 6728 * We allocate one q_vector per queue interrupt. If allocation fails we 6729 * return -ENOMEM. 6730 **/ 6731 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 6732 { 6733 struct i40e_pf *pf = vsi->back; 6734 int v_idx, num_q_vectors; 6735 int err; 6736 6737 /* if not MSIX, give the one vector only to the LAN VSI */ 6738 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6739 num_q_vectors = vsi->num_q_vectors; 6740 else if (vsi == pf->vsi[pf->lan_vsi]) 6741 num_q_vectors = 1; 6742 else 6743 return -EINVAL; 6744 6745 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 6746 err = i40e_vsi_alloc_q_vector(vsi, v_idx); 6747 if (err) 6748 goto err_out; 6749 } 6750 6751 return 0; 6752 6753 err_out: 6754 while (v_idx--) 6755 i40e_free_q_vector(vsi, v_idx); 6756 6757 return err; 6758 } 6759 6760 /** 6761 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 6762 * @pf: board private structure to initialize 6763 **/ 6764 static void i40e_init_interrupt_scheme(struct i40e_pf *pf) 6765 { 6766 int err = 0; 6767 6768 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 6769 err = i40e_init_msix(pf); 6770 if (err) { 6771 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 6772 #ifdef I40E_FCOE 6773 I40E_FLAG_FCOE_ENABLED | 6774 #endif 6775 I40E_FLAG_RSS_ENABLED | 6776 I40E_FLAG_DCB_CAPABLE | 6777 I40E_FLAG_SRIOV_ENABLED | 6778 I40E_FLAG_FD_SB_ENABLED | 6779 I40E_FLAG_FD_ATR_ENABLED | 6780 I40E_FLAG_VMDQ_ENABLED); 6781 6782 /* rework the queue expectations without MSIX */ 6783 i40e_determine_queue_usage(pf); 6784 } 6785 } 6786 6787 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 6788 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 6789 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); 6790 err = pci_enable_msi(pf->pdev); 6791 if (err) { 6792 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); 6793 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 6794 } 6795 } 6796 6797 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 6798 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); 6799 6800 /* track first vector for misc interrupts */ 6801 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); 6802 } 6803 6804 /** 6805 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 6806 * @pf: board private structure 6807 * 6808 * This sets up the handler for MSIX 0, which is used to manage the 6809 * non-queue interrupts, e.g. AdminQ and errors. This is not used 6810 * when in MSI or Legacy interrupt mode. 6811 **/ 6812 static int i40e_setup_misc_vector(struct i40e_pf *pf) 6813 { 6814 struct i40e_hw *hw = &pf->hw; 6815 int err = 0; 6816 6817 /* Only request the irq if this is the first time through, and 6818 * not when we're rebuilding after a Reset 6819 */ 6820 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 6821 err = request_irq(pf->msix_entries[0].vector, 6822 i40e_intr, 0, pf->misc_int_name, pf); 6823 if (err) { 6824 dev_info(&pf->pdev->dev, 6825 "request_irq for %s failed: %d\n", 6826 pf->misc_int_name, err); 6827 return -EFAULT; 6828 } 6829 } 6830 6831 i40e_enable_misc_int_causes(hw); 6832 6833 /* associate no queues to the misc vector */ 6834 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 6835 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 6836 6837 i40e_flush(hw); 6838 6839 i40e_irq_dynamic_enable_icr0(pf); 6840 6841 return err; 6842 } 6843 6844 /** 6845 * i40e_config_rss - Prepare for RSS if used 6846 * @pf: board private structure 6847 **/ 6848 static int i40e_config_rss(struct i40e_pf *pf) 6849 { 6850 /* Set of random keys generated using kernel random number generator */ 6851 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687, 6852 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, 6853 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, 6854 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be}; 6855 struct i40e_hw *hw = &pf->hw; 6856 u32 lut = 0; 6857 int i, j; 6858 u64 hena; 6859 u32 reg_val; 6860 6861 /* Fill out hash function seed */ 6862 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 6863 wr32(hw, I40E_PFQF_HKEY(i), seed[i]); 6864 6865 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 6866 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | 6867 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); 6868 hena |= I40E_DEFAULT_RSS_HENA; 6869 wr32(hw, I40E_PFQF_HENA(0), (u32)hena); 6870 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 6871 6872 /* Check capability and Set table size and register per hw expectation*/ 6873 reg_val = rd32(hw, I40E_PFQF_CTL_0); 6874 if (hw->func_caps.rss_table_size == 512) { 6875 reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512; 6876 pf->rss_table_size = 512; 6877 } else { 6878 pf->rss_table_size = 128; 6879 reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512; 6880 } 6881 wr32(hw, I40E_PFQF_CTL_0, reg_val); 6882 6883 /* Populate the LUT with max no. of queues in round robin fashion */ 6884 for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) { 6885 6886 /* The assumption is that lan qp count will be the highest 6887 * qp count for any PF VSI that needs RSS. 6888 * If multiple VSIs need RSS support, all the qp counts 6889 * for those VSIs should be a power of 2 for RSS to work. 6890 * If LAN VSI is the only consumer for RSS then this requirement 6891 * is not necessary. 6892 */ 6893 if (j == pf->rss_size) 6894 j = 0; 6895 /* lut = 4-byte sliding window of 4 lut entries */ 6896 lut = (lut << 8) | (j & 6897 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); 6898 /* On i = 3, we have 4 entries in lut; write to the register */ 6899 if ((i & 3) == 3) 6900 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); 6901 } 6902 i40e_flush(hw); 6903 6904 return 0; 6905 } 6906 6907 /** 6908 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild 6909 * @pf: board private structure 6910 * @queue_count: the requested queue count for rss. 6911 * 6912 * returns 0 if rss is not enabled, if enabled returns the final rss queue 6913 * count which may be different from the requested queue count. 6914 **/ 6915 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) 6916 { 6917 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) 6918 return 0; 6919 6920 queue_count = min_t(int, queue_count, pf->rss_size_max); 6921 6922 if (queue_count != pf->rss_size) { 6923 i40e_prep_for_reset(pf); 6924 6925 pf->rss_size = queue_count; 6926 6927 i40e_reset_and_rebuild(pf, true); 6928 i40e_config_rss(pf); 6929 } 6930 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); 6931 return pf->rss_size; 6932 } 6933 6934 /** 6935 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 6936 * @pf: board private structure to initialize 6937 * 6938 * i40e_sw_init initializes the Adapter private data structure. 6939 * Fields are initialized based on PCI device information and 6940 * OS network device settings (MTU size). 6941 **/ 6942 static int i40e_sw_init(struct i40e_pf *pf) 6943 { 6944 int err = 0; 6945 int size; 6946 6947 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, 6948 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); 6949 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; 6950 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { 6951 if (I40E_DEBUG_USER & debug) 6952 pf->hw.debug_mask = debug; 6953 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), 6954 I40E_DEFAULT_MSG_ENABLE); 6955 } 6956 6957 /* Set default capability flags */ 6958 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 6959 I40E_FLAG_MSI_ENABLED | 6960 I40E_FLAG_MSIX_ENABLED | 6961 I40E_FLAG_RX_1BUF_ENABLED; 6962 6963 /* Set default ITR */ 6964 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; 6965 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; 6966 6967 /* Depending on PF configurations, it is possible that the RSS 6968 * maximum might end up larger than the available queues 6969 */ 6970 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; 6971 pf->rss_size = 1; 6972 pf->rss_size_max = min_t(int, pf->rss_size_max, 6973 pf->hw.func_caps.num_tx_qp); 6974 if (pf->hw.func_caps.rss) { 6975 pf->flags |= I40E_FLAG_RSS_ENABLED; 6976 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); 6977 } 6978 6979 /* MFP mode enabled */ 6980 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { 6981 pf->flags |= I40E_FLAG_MFP_ENABLED; 6982 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 6983 } 6984 6985 /* FW/NVM is not yet fixed in this regard */ 6986 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 6987 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 6988 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 6989 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 6990 /* Setup a counter for fd_atr per pf */ 6991 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); 6992 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 6993 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 6994 /* Setup a counter for fd_sb per pf */ 6995 pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); 6996 } else { 6997 dev_info(&pf->pdev->dev, 6998 "Flow Director Sideband mode Disabled in MFP mode\n"); 6999 } 7000 pf->fdir_pf_filter_count = 7001 pf->hw.func_caps.fd_filters_guaranteed; 7002 pf->hw.fdir_shared_filter_count = 7003 pf->hw.func_caps.fd_filters_best_effort; 7004 } 7005 7006 if (pf->hw.func_caps.vmdq) { 7007 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 7008 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 7009 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; 7010 } 7011 7012 #ifdef I40E_FCOE 7013 err = i40e_init_pf_fcoe(pf); 7014 if (err) 7015 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err); 7016 7017 #endif /* I40E_FCOE */ 7018 #ifdef CONFIG_PCI_IOV 7019 if (pf->hw.func_caps.num_vfs) { 7020 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 7021 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 7022 pf->num_req_vfs = min_t(int, 7023 pf->hw.func_caps.num_vfs, 7024 I40E_MAX_VF_COUNT); 7025 } 7026 #endif /* CONFIG_PCI_IOV */ 7027 pf->eeprom_version = 0xDEAD; 7028 pf->lan_veb = I40E_NO_VEB; 7029 pf->lan_vsi = I40E_NO_VSI; 7030 7031 /* set up queue assignment tracking */ 7032 size = sizeof(struct i40e_lump_tracking) 7033 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 7034 pf->qp_pile = kzalloc(size, GFP_KERNEL); 7035 if (!pf->qp_pile) { 7036 err = -ENOMEM; 7037 goto sw_init_done; 7038 } 7039 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 7040 pf->qp_pile->search_hint = 0; 7041 7042 /* set up vector assignment tracking */ 7043 size = sizeof(struct i40e_lump_tracking) 7044 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors); 7045 pf->irq_pile = kzalloc(size, GFP_KERNEL); 7046 if (!pf->irq_pile) { 7047 kfree(pf->qp_pile); 7048 err = -ENOMEM; 7049 goto sw_init_done; 7050 } 7051 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors; 7052 pf->irq_pile->search_hint = 0; 7053 7054 pf->tx_timeout_recovery_level = 1; 7055 7056 mutex_init(&pf->switch_mutex); 7057 7058 sw_init_done: 7059 return err; 7060 } 7061 7062 /** 7063 * i40e_set_ntuple - set the ntuple feature flag and take action 7064 * @pf: board private structure to initialize 7065 * @features: the feature set that the stack is suggesting 7066 * 7067 * returns a bool to indicate if reset needs to happen 7068 **/ 7069 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) 7070 { 7071 bool need_reset = false; 7072 7073 /* Check if Flow Director n-tuple support was enabled or disabled. If 7074 * the state changed, we need to reset. 7075 */ 7076 if (features & NETIF_F_NTUPLE) { 7077 /* Enable filters and mark for reset */ 7078 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 7079 need_reset = true; 7080 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 7081 } else { 7082 /* turn off filters, mark for reset and clear SW filter list */ 7083 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7084 need_reset = true; 7085 i40e_fdir_filter_exit(pf); 7086 } 7087 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7088 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 7089 /* if ATR was auto disabled it can be re-enabled. */ 7090 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 7091 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 7092 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 7093 } 7094 return need_reset; 7095 } 7096 7097 /** 7098 * i40e_set_features - set the netdev feature flags 7099 * @netdev: ptr to the netdev being adjusted 7100 * @features: the feature set that the stack is suggesting 7101 **/ 7102 static int i40e_set_features(struct net_device *netdev, 7103 netdev_features_t features) 7104 { 7105 struct i40e_netdev_priv *np = netdev_priv(netdev); 7106 struct i40e_vsi *vsi = np->vsi; 7107 struct i40e_pf *pf = vsi->back; 7108 bool need_reset; 7109 7110 if (features & NETIF_F_HW_VLAN_CTAG_RX) 7111 i40e_vlan_stripping_enable(vsi); 7112 else 7113 i40e_vlan_stripping_disable(vsi); 7114 7115 need_reset = i40e_set_ntuple(pf, features); 7116 7117 if (need_reset) 7118 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 7119 7120 return 0; 7121 } 7122 7123 #ifdef CONFIG_I40E_VXLAN 7124 /** 7125 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port 7126 * @pf: board private structure 7127 * @port: The UDP port to look up 7128 * 7129 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 7130 **/ 7131 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) 7132 { 7133 u8 i; 7134 7135 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 7136 if (pf->vxlan_ports[i] == port) 7137 return i; 7138 } 7139 7140 return i; 7141 } 7142 7143 /** 7144 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up 7145 * @netdev: This physical port's netdev 7146 * @sa_family: Socket Family that VXLAN is notifying us about 7147 * @port: New UDP port number that VXLAN started listening to 7148 **/ 7149 static void i40e_add_vxlan_port(struct net_device *netdev, 7150 sa_family_t sa_family, __be16 port) 7151 { 7152 struct i40e_netdev_priv *np = netdev_priv(netdev); 7153 struct i40e_vsi *vsi = np->vsi; 7154 struct i40e_pf *pf = vsi->back; 7155 u8 next_idx; 7156 u8 idx; 7157 7158 if (sa_family == AF_INET6) 7159 return; 7160 7161 idx = i40e_get_vxlan_port_idx(pf, port); 7162 7163 /* Check if port already exists */ 7164 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 7165 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port)); 7166 return; 7167 } 7168 7169 /* Now check if there is space to add the new port */ 7170 next_idx = i40e_get_vxlan_port_idx(pf, 0); 7171 7172 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 7173 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n", 7174 ntohs(port)); 7175 return; 7176 } 7177 7178 /* New port: add it and mark its index in the bitmap */ 7179 pf->vxlan_ports[next_idx] = port; 7180 pf->pending_vxlan_bitmap |= (1 << next_idx); 7181 7182 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 7183 } 7184 7185 /** 7186 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away 7187 * @netdev: This physical port's netdev 7188 * @sa_family: Socket Family that VXLAN is notifying us about 7189 * @port: UDP port number that VXLAN stopped listening to 7190 **/ 7191 static void i40e_del_vxlan_port(struct net_device *netdev, 7192 sa_family_t sa_family, __be16 port) 7193 { 7194 struct i40e_netdev_priv *np = netdev_priv(netdev); 7195 struct i40e_vsi *vsi = np->vsi; 7196 struct i40e_pf *pf = vsi->back; 7197 u8 idx; 7198 7199 if (sa_family == AF_INET6) 7200 return; 7201 7202 idx = i40e_get_vxlan_port_idx(pf, port); 7203 7204 /* Check if port already exists */ 7205 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 7206 /* if port exists, set it to 0 (mark for deletion) 7207 * and make it pending 7208 */ 7209 pf->vxlan_ports[idx] = 0; 7210 7211 pf->pending_vxlan_bitmap |= (1 << idx); 7212 7213 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 7214 } else { 7215 netdev_warn(netdev, "Port %d was not found, not deleting\n", 7216 ntohs(port)); 7217 } 7218 } 7219 7220 #endif 7221 static int i40e_get_phys_port_id(struct net_device *netdev, 7222 struct netdev_phys_port_id *ppid) 7223 { 7224 struct i40e_netdev_priv *np = netdev_priv(netdev); 7225 struct i40e_pf *pf = np->vsi->back; 7226 struct i40e_hw *hw = &pf->hw; 7227 7228 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) 7229 return -EOPNOTSUPP; 7230 7231 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); 7232 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); 7233 7234 return 0; 7235 } 7236 7237 #ifdef HAVE_FDB_OPS 7238 #ifdef USE_CONST_DEV_UC_CHAR 7239 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 7240 struct net_device *dev, 7241 const unsigned char *addr, 7242 u16 flags) 7243 #else 7244 static int i40e_ndo_fdb_add(struct ndmsg *ndm, 7245 struct net_device *dev, 7246 unsigned char *addr, 7247 u16 flags) 7248 #endif 7249 { 7250 struct i40e_netdev_priv *np = netdev_priv(dev); 7251 struct i40e_pf *pf = np->vsi->back; 7252 int err = 0; 7253 7254 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) 7255 return -EOPNOTSUPP; 7256 7257 /* Hardware does not support aging addresses so if a 7258 * ndm_state is given only allow permanent addresses 7259 */ 7260 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 7261 netdev_info(dev, "FDB only supports static addresses\n"); 7262 return -EINVAL; 7263 } 7264 7265 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 7266 err = dev_uc_add_excl(dev, addr); 7267 else if (is_multicast_ether_addr(addr)) 7268 err = dev_mc_add_excl(dev, addr); 7269 else 7270 err = -EINVAL; 7271 7272 /* Only return duplicate errors if NLM_F_EXCL is set */ 7273 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 7274 err = 0; 7275 7276 return err; 7277 } 7278 7279 #ifndef USE_DEFAULT_FDB_DEL_DUMP 7280 #ifdef USE_CONST_DEV_UC_CHAR 7281 static int i40e_ndo_fdb_del(struct ndmsg *ndm, 7282 struct net_device *dev, 7283 const unsigned char *addr) 7284 #else 7285 static int i40e_ndo_fdb_del(struct ndmsg *ndm, 7286 struct net_device *dev, 7287 unsigned char *addr) 7288 #endif 7289 { 7290 struct i40e_netdev_priv *np = netdev_priv(dev); 7291 struct i40e_pf *pf = np->vsi->back; 7292 int err = -EOPNOTSUPP; 7293 7294 if (ndm->ndm_state & NUD_PERMANENT) { 7295 netdev_info(dev, "FDB only supports static addresses\n"); 7296 return -EINVAL; 7297 } 7298 7299 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 7300 if (is_unicast_ether_addr(addr)) 7301 err = dev_uc_del(dev, addr); 7302 else if (is_multicast_ether_addr(addr)) 7303 err = dev_mc_del(dev, addr); 7304 else 7305 err = -EINVAL; 7306 } 7307 7308 return err; 7309 } 7310 7311 static int i40e_ndo_fdb_dump(struct sk_buff *skb, 7312 struct netlink_callback *cb, 7313 struct net_device *dev, 7314 struct net_device *filter_dev, 7315 int idx) 7316 { 7317 struct i40e_netdev_priv *np = netdev_priv(dev); 7318 struct i40e_pf *pf = np->vsi->back; 7319 7320 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) 7321 idx = ndo_dflt_fdb_dump(skb, cb, dev, filter_dev, idx); 7322 7323 return idx; 7324 } 7325 7326 #endif /* USE_DEFAULT_FDB_DEL_DUMP */ 7327 #endif /* HAVE_FDB_OPS */ 7328 static const struct net_device_ops i40e_netdev_ops = { 7329 .ndo_open = i40e_open, 7330 .ndo_stop = i40e_close, 7331 .ndo_start_xmit = i40e_lan_xmit_frame, 7332 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 7333 .ndo_set_rx_mode = i40e_set_rx_mode, 7334 .ndo_validate_addr = eth_validate_addr, 7335 .ndo_set_mac_address = i40e_set_mac, 7336 .ndo_change_mtu = i40e_change_mtu, 7337 .ndo_do_ioctl = i40e_ioctl, 7338 .ndo_tx_timeout = i40e_tx_timeout, 7339 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 7340 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 7341 #ifdef CONFIG_NET_POLL_CONTROLLER 7342 .ndo_poll_controller = i40e_netpoll, 7343 #endif 7344 .ndo_setup_tc = i40e_setup_tc, 7345 #ifdef I40E_FCOE 7346 .ndo_fcoe_enable = i40e_fcoe_enable, 7347 .ndo_fcoe_disable = i40e_fcoe_disable, 7348 #endif 7349 .ndo_set_features = i40e_set_features, 7350 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 7351 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 7352 .ndo_set_vf_rate = i40e_ndo_set_vf_bw, 7353 .ndo_get_vf_config = i40e_ndo_get_vf_config, 7354 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 7355 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck, 7356 #ifdef CONFIG_I40E_VXLAN 7357 .ndo_add_vxlan_port = i40e_add_vxlan_port, 7358 .ndo_del_vxlan_port = i40e_del_vxlan_port, 7359 #endif 7360 .ndo_get_phys_port_id = i40e_get_phys_port_id, 7361 #ifdef HAVE_FDB_OPS 7362 .ndo_fdb_add = i40e_ndo_fdb_add, 7363 #ifndef USE_DEFAULT_FDB_DEL_DUMP 7364 .ndo_fdb_del = i40e_ndo_fdb_del, 7365 .ndo_fdb_dump = i40e_ndo_fdb_dump, 7366 #endif 7367 #endif 7368 }; 7369 7370 /** 7371 * i40e_config_netdev - Setup the netdev flags 7372 * @vsi: the VSI being configured 7373 * 7374 * Returns 0 on success, negative value on failure 7375 **/ 7376 static int i40e_config_netdev(struct i40e_vsi *vsi) 7377 { 7378 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 7379 struct i40e_pf *pf = vsi->back; 7380 struct i40e_hw *hw = &pf->hw; 7381 struct i40e_netdev_priv *np; 7382 struct net_device *netdev; 7383 u8 mac_addr[ETH_ALEN]; 7384 int etherdev_size; 7385 7386 etherdev_size = sizeof(struct i40e_netdev_priv); 7387 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 7388 if (!netdev) 7389 return -ENOMEM; 7390 7391 vsi->netdev = netdev; 7392 np = netdev_priv(netdev); 7393 np->vsi = vsi; 7394 7395 netdev->hw_enc_features |= NETIF_F_IP_CSUM | 7396 NETIF_F_GSO_UDP_TUNNEL | 7397 NETIF_F_TSO; 7398 7399 netdev->features = NETIF_F_SG | 7400 NETIF_F_IP_CSUM | 7401 NETIF_F_SCTP_CSUM | 7402 NETIF_F_HIGHDMA | 7403 NETIF_F_GSO_UDP_TUNNEL | 7404 NETIF_F_HW_VLAN_CTAG_TX | 7405 NETIF_F_HW_VLAN_CTAG_RX | 7406 NETIF_F_HW_VLAN_CTAG_FILTER | 7407 NETIF_F_IPV6_CSUM | 7408 NETIF_F_TSO | 7409 NETIF_F_TSO_ECN | 7410 NETIF_F_TSO6 | 7411 NETIF_F_RXCSUM | 7412 NETIF_F_RXHASH | 7413 0; 7414 7415 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 7416 netdev->features |= NETIF_F_NTUPLE; 7417 7418 /* copy netdev features into list of user selectable features */ 7419 netdev->hw_features |= netdev->features; 7420 7421 if (vsi->type == I40E_VSI_MAIN) { 7422 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 7423 ether_addr_copy(mac_addr, hw->mac.perm_addr); 7424 /* The following two steps are necessary to prevent reception 7425 * of tagged packets - by default the NVM loads a MAC-VLAN 7426 * filter that will accept any tagged packet. This is to 7427 * prevent that during normal operations until a specific 7428 * VLAN tag filter has been set. 7429 */ 7430 i40e_rm_default_mac_filter(vsi, mac_addr); 7431 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); 7432 } else { 7433 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 7434 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 7435 pf->vsi[pf->lan_vsi]->netdev->name); 7436 random_ether_addr(mac_addr); 7437 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); 7438 } 7439 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 7440 7441 ether_addr_copy(netdev->dev_addr, mac_addr); 7442 ether_addr_copy(netdev->perm_addr, mac_addr); 7443 /* vlan gets same features (except vlan offload) 7444 * after any tweaks for specific VSI types 7445 */ 7446 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | 7447 NETIF_F_HW_VLAN_CTAG_RX | 7448 NETIF_F_HW_VLAN_CTAG_FILTER); 7449 netdev->priv_flags |= IFF_UNICAST_FLT; 7450 netdev->priv_flags |= IFF_SUPP_NOFCS; 7451 /* Setup netdev TC information */ 7452 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 7453 7454 netdev->netdev_ops = &i40e_netdev_ops; 7455 netdev->watchdog_timeo = 5 * HZ; 7456 i40e_set_ethtool_ops(netdev); 7457 #ifdef I40E_FCOE 7458 i40e_fcoe_config_netdev(netdev, vsi); 7459 #endif 7460 7461 return 0; 7462 } 7463 7464 /** 7465 * i40e_vsi_delete - Delete a VSI from the switch 7466 * @vsi: the VSI being removed 7467 * 7468 * Returns 0 on success, negative value on failure 7469 **/ 7470 static void i40e_vsi_delete(struct i40e_vsi *vsi) 7471 { 7472 /* remove default VSI is not allowed */ 7473 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 7474 return; 7475 7476 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 7477 } 7478 7479 /** 7480 * i40e_add_vsi - Add a VSI to the switch 7481 * @vsi: the VSI being configured 7482 * 7483 * This initializes a VSI context depending on the VSI type to be added and 7484 * passes it down to the add_vsi aq command. 7485 **/ 7486 static int i40e_add_vsi(struct i40e_vsi *vsi) 7487 { 7488 int ret = -ENODEV; 7489 struct i40e_mac_filter *f, *ftmp; 7490 struct i40e_pf *pf = vsi->back; 7491 struct i40e_hw *hw = &pf->hw; 7492 struct i40e_vsi_context ctxt; 7493 u8 enabled_tc = 0x1; /* TC0 enabled */ 7494 int f_count = 0; 7495 7496 memset(&ctxt, 0, sizeof(ctxt)); 7497 switch (vsi->type) { 7498 case I40E_VSI_MAIN: 7499 /* The PF's main VSI is already setup as part of the 7500 * device initialization, so we'll not bother with 7501 * the add_vsi call, but we will retrieve the current 7502 * VSI context. 7503 */ 7504 ctxt.seid = pf->main_vsi_seid; 7505 ctxt.pf_num = pf->hw.pf_id; 7506 ctxt.vf_num = 0; 7507 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 7508 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 7509 if (ret) { 7510 dev_info(&pf->pdev->dev, 7511 "couldn't get pf vsi config, err %d, aq_err %d\n", 7512 ret, pf->hw.aq.asq_last_status); 7513 return -ENOENT; 7514 } 7515 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 7516 vsi->info.valid_sections = 0; 7517 7518 vsi->seid = ctxt.seid; 7519 vsi->id = ctxt.vsi_number; 7520 7521 enabled_tc = i40e_pf_get_tc_map(pf); 7522 7523 /* MFP mode setup queue map and update VSI */ 7524 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 7525 memset(&ctxt, 0, sizeof(ctxt)); 7526 ctxt.seid = pf->main_vsi_seid; 7527 ctxt.pf_num = pf->hw.pf_id; 7528 ctxt.vf_num = 0; 7529 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 7530 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 7531 if (ret) { 7532 dev_info(&pf->pdev->dev, 7533 "update vsi failed, aq_err=%d\n", 7534 pf->hw.aq.asq_last_status); 7535 ret = -ENOENT; 7536 goto err; 7537 } 7538 /* update the local VSI info queue map */ 7539 i40e_vsi_update_queue_map(vsi, &ctxt); 7540 vsi->info.valid_sections = 0; 7541 } else { 7542 /* Default/Main VSI is only enabled for TC0 7543 * reconfigure it to enable all TCs that are 7544 * available on the port in SFP mode. 7545 */ 7546 ret = i40e_vsi_config_tc(vsi, enabled_tc); 7547 if (ret) { 7548 dev_info(&pf->pdev->dev, 7549 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n", 7550 enabled_tc, ret, 7551 pf->hw.aq.asq_last_status); 7552 ret = -ENOENT; 7553 } 7554 } 7555 break; 7556 7557 case I40E_VSI_FDIR: 7558 ctxt.pf_num = hw->pf_id; 7559 ctxt.vf_num = 0; 7560 ctxt.uplink_seid = vsi->uplink_seid; 7561 ctxt.connection_type = 0x1; /* regular data port */ 7562 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 7563 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 7564 break; 7565 7566 case I40E_VSI_VMDQ2: 7567 ctxt.pf_num = hw->pf_id; 7568 ctxt.vf_num = 0; 7569 ctxt.uplink_seid = vsi->uplink_seid; 7570 ctxt.connection_type = 0x1; /* regular data port */ 7571 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 7572 7573 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 7574 7575 /* This VSI is connected to VEB so the switch_id 7576 * should be set to zero by default. 7577 */ 7578 ctxt.info.switch_id = 0; 7579 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 7580 7581 /* Setup the VSI tx/rx queue map for TC0 only for now */ 7582 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 7583 break; 7584 7585 case I40E_VSI_SRIOV: 7586 ctxt.pf_num = hw->pf_id; 7587 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 7588 ctxt.uplink_seid = vsi->uplink_seid; 7589 ctxt.connection_type = 0x1; /* regular data port */ 7590 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 7591 7592 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 7593 7594 /* This VSI is connected to VEB so the switch_id 7595 * should be set to zero by default. 7596 */ 7597 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 7598 7599 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 7600 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 7601 if (pf->vf[vsi->vf_id].spoofchk) { 7602 ctxt.info.valid_sections |= 7603 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 7604 ctxt.info.sec_flags |= 7605 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 7606 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 7607 } 7608 /* Setup the VSI tx/rx queue map for TC0 only for now */ 7609 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 7610 break; 7611 7612 #ifdef I40E_FCOE 7613 case I40E_VSI_FCOE: 7614 ret = i40e_fcoe_vsi_init(vsi, &ctxt); 7615 if (ret) { 7616 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); 7617 return ret; 7618 } 7619 break; 7620 7621 #endif /* I40E_FCOE */ 7622 default: 7623 return -ENODEV; 7624 } 7625 7626 if (vsi->type != I40E_VSI_MAIN) { 7627 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 7628 if (ret) { 7629 dev_info(&vsi->back->pdev->dev, 7630 "add vsi failed, aq_err=%d\n", 7631 vsi->back->hw.aq.asq_last_status); 7632 ret = -ENOENT; 7633 goto err; 7634 } 7635 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 7636 vsi->info.valid_sections = 0; 7637 vsi->seid = ctxt.seid; 7638 vsi->id = ctxt.vsi_number; 7639 } 7640 7641 /* If macvlan filters already exist, force them to get loaded */ 7642 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 7643 f->changed = true; 7644 f_count++; 7645 7646 if (f->is_laa && vsi->type == I40E_VSI_MAIN) { 7647 i40e_aq_mac_address_write(&vsi->back->hw, 7648 I40E_AQC_WRITE_TYPE_LAA_WOL, 7649 f->macaddr, NULL); 7650 } 7651 } 7652 if (f_count) { 7653 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 7654 pf->flags |= I40E_FLAG_FILTER_SYNC; 7655 } 7656 7657 /* Update VSI BW information */ 7658 ret = i40e_vsi_get_bw_info(vsi); 7659 if (ret) { 7660 dev_info(&pf->pdev->dev, 7661 "couldn't get vsi bw info, err %d, aq_err %d\n", 7662 ret, pf->hw.aq.asq_last_status); 7663 /* VSI is already added so not tearing that up */ 7664 ret = 0; 7665 } 7666 7667 err: 7668 return ret; 7669 } 7670 7671 /** 7672 * i40e_vsi_release - Delete a VSI and free its resources 7673 * @vsi: the VSI being removed 7674 * 7675 * Returns 0 on success or < 0 on error 7676 **/ 7677 int i40e_vsi_release(struct i40e_vsi *vsi) 7678 { 7679 struct i40e_mac_filter *f, *ftmp; 7680 struct i40e_veb *veb = NULL; 7681 struct i40e_pf *pf; 7682 u16 uplink_seid; 7683 int i, n; 7684 7685 pf = vsi->back; 7686 7687 /* release of a VEB-owner or last VSI is not allowed */ 7688 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 7689 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 7690 vsi->seid, vsi->uplink_seid); 7691 return -ENODEV; 7692 } 7693 if (vsi == pf->vsi[pf->lan_vsi] && 7694 !test_bit(__I40E_DOWN, &pf->state)) { 7695 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 7696 return -ENODEV; 7697 } 7698 7699 uplink_seid = vsi->uplink_seid; 7700 if (vsi->type != I40E_VSI_SRIOV) { 7701 if (vsi->netdev_registered) { 7702 vsi->netdev_registered = false; 7703 if (vsi->netdev) { 7704 /* results in a call to i40e_close() */ 7705 unregister_netdev(vsi->netdev); 7706 } 7707 } else { 7708 i40e_vsi_close(vsi); 7709 } 7710 i40e_vsi_disable_irq(vsi); 7711 } 7712 7713 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) 7714 i40e_del_filter(vsi, f->macaddr, f->vlan, 7715 f->is_vf, f->is_netdev); 7716 i40e_sync_vsi_filters(vsi); 7717 7718 i40e_vsi_delete(vsi); 7719 i40e_vsi_free_q_vectors(vsi); 7720 if (vsi->netdev) { 7721 free_netdev(vsi->netdev); 7722 vsi->netdev = NULL; 7723 } 7724 i40e_vsi_clear_rings(vsi); 7725 i40e_vsi_clear(vsi); 7726 7727 /* If this was the last thing on the VEB, except for the 7728 * controlling VSI, remove the VEB, which puts the controlling 7729 * VSI onto the next level down in the switch. 7730 * 7731 * Well, okay, there's one more exception here: don't remove 7732 * the orphan VEBs yet. We'll wait for an explicit remove request 7733 * from up the network stack. 7734 */ 7735 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { 7736 if (pf->vsi[i] && 7737 pf->vsi[i]->uplink_seid == uplink_seid && 7738 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 7739 n++; /* count the VSIs */ 7740 } 7741 } 7742 for (i = 0; i < I40E_MAX_VEB; i++) { 7743 if (!pf->veb[i]) 7744 continue; 7745 if (pf->veb[i]->uplink_seid == uplink_seid) 7746 n++; /* count the VEBs */ 7747 if (pf->veb[i]->seid == uplink_seid) 7748 veb = pf->veb[i]; 7749 } 7750 if (n == 0 && veb && veb->uplink_seid != 0) 7751 i40e_veb_release(veb); 7752 7753 return 0; 7754 } 7755 7756 /** 7757 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 7758 * @vsi: ptr to the VSI 7759 * 7760 * This should only be called after i40e_vsi_mem_alloc() which allocates the 7761 * corresponding SW VSI structure and initializes num_queue_pairs for the 7762 * newly allocated VSI. 7763 * 7764 * Returns 0 on success or negative on failure 7765 **/ 7766 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 7767 { 7768 int ret = -ENOENT; 7769 struct i40e_pf *pf = vsi->back; 7770 7771 if (vsi->q_vectors[0]) { 7772 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 7773 vsi->seid); 7774 return -EEXIST; 7775 } 7776 7777 if (vsi->base_vector) { 7778 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 7779 vsi->seid, vsi->base_vector); 7780 return -EEXIST; 7781 } 7782 7783 ret = i40e_vsi_alloc_q_vectors(vsi); 7784 if (ret) { 7785 dev_info(&pf->pdev->dev, 7786 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 7787 vsi->num_q_vectors, vsi->seid, ret); 7788 vsi->num_q_vectors = 0; 7789 goto vector_setup_out; 7790 } 7791 7792 if (vsi->num_q_vectors) 7793 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 7794 vsi->num_q_vectors, vsi->idx); 7795 if (vsi->base_vector < 0) { 7796 dev_info(&pf->pdev->dev, 7797 "failed to get queue tracking for VSI %d, err=%d\n", 7798 vsi->seid, vsi->base_vector); 7799 i40e_vsi_free_q_vectors(vsi); 7800 ret = -ENOENT; 7801 goto vector_setup_out; 7802 } 7803 7804 vector_setup_out: 7805 return ret; 7806 } 7807 7808 /** 7809 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI 7810 * @vsi: pointer to the vsi. 7811 * 7812 * This re-allocates a vsi's queue resources. 7813 * 7814 * Returns pointer to the successfully allocated and configured VSI sw struct 7815 * on success, otherwise returns NULL on failure. 7816 **/ 7817 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) 7818 { 7819 struct i40e_pf *pf = vsi->back; 7820 u8 enabled_tc; 7821 int ret; 7822 7823 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 7824 i40e_vsi_clear_rings(vsi); 7825 7826 i40e_vsi_free_arrays(vsi, false); 7827 i40e_set_num_rings_in_vsi(vsi); 7828 ret = i40e_vsi_alloc_arrays(vsi, false); 7829 if (ret) 7830 goto err_vsi; 7831 7832 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); 7833 if (ret < 0) { 7834 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", 7835 vsi->seid, ret); 7836 goto err_vsi; 7837 } 7838 vsi->base_queue = ret; 7839 7840 /* Update the FW view of the VSI. Force a reset of TC and queue 7841 * layout configurations. 7842 */ 7843 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 7844 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 7845 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 7846 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 7847 7848 /* assign it some queues */ 7849 ret = i40e_alloc_rings(vsi); 7850 if (ret) 7851 goto err_rings; 7852 7853 /* map all of the rings to the q_vectors */ 7854 i40e_vsi_map_rings_to_vectors(vsi); 7855 return vsi; 7856 7857 err_rings: 7858 i40e_vsi_free_q_vectors(vsi); 7859 if (vsi->netdev_registered) { 7860 vsi->netdev_registered = false; 7861 unregister_netdev(vsi->netdev); 7862 free_netdev(vsi->netdev); 7863 vsi->netdev = NULL; 7864 } 7865 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 7866 err_vsi: 7867 i40e_vsi_clear(vsi); 7868 return NULL; 7869 } 7870 7871 /** 7872 * i40e_vsi_setup - Set up a VSI by a given type 7873 * @pf: board private structure 7874 * @type: VSI type 7875 * @uplink_seid: the switch element to link to 7876 * @param1: usage depends upon VSI type. For VF types, indicates VF id 7877 * 7878 * This allocates the sw VSI structure and its queue resources, then add a VSI 7879 * to the identified VEB. 7880 * 7881 * Returns pointer to the successfully allocated and configure VSI sw struct on 7882 * success, otherwise returns NULL on failure. 7883 **/ 7884 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 7885 u16 uplink_seid, u32 param1) 7886 { 7887 struct i40e_vsi *vsi = NULL; 7888 struct i40e_veb *veb = NULL; 7889 int ret, i; 7890 int v_idx; 7891 7892 /* The requested uplink_seid must be either 7893 * - the PF's port seid 7894 * no VEB is needed because this is the PF 7895 * or this is a Flow Director special case VSI 7896 * - seid of an existing VEB 7897 * - seid of a VSI that owns an existing VEB 7898 * - seid of a VSI that doesn't own a VEB 7899 * a new VEB is created and the VSI becomes the owner 7900 * - seid of the PF VSI, which is what creates the first VEB 7901 * this is a special case of the previous 7902 * 7903 * Find which uplink_seid we were given and create a new VEB if needed 7904 */ 7905 for (i = 0; i < I40E_MAX_VEB; i++) { 7906 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 7907 veb = pf->veb[i]; 7908 break; 7909 } 7910 } 7911 7912 if (!veb && uplink_seid != pf->mac_seid) { 7913 7914 for (i = 0; i < pf->num_alloc_vsi; i++) { 7915 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 7916 vsi = pf->vsi[i]; 7917 break; 7918 } 7919 } 7920 if (!vsi) { 7921 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 7922 uplink_seid); 7923 return NULL; 7924 } 7925 7926 if (vsi->uplink_seid == pf->mac_seid) 7927 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 7928 vsi->tc_config.enabled_tc); 7929 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 7930 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 7931 vsi->tc_config.enabled_tc); 7932 7933 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 7934 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 7935 veb = pf->veb[i]; 7936 } 7937 if (!veb) { 7938 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 7939 return NULL; 7940 } 7941 7942 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 7943 uplink_seid = veb->seid; 7944 } 7945 7946 /* get vsi sw struct */ 7947 v_idx = i40e_vsi_mem_alloc(pf, type); 7948 if (v_idx < 0) 7949 goto err_alloc; 7950 vsi = pf->vsi[v_idx]; 7951 if (!vsi) 7952 goto err_alloc; 7953 vsi->type = type; 7954 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 7955 7956 if (type == I40E_VSI_MAIN) 7957 pf->lan_vsi = v_idx; 7958 else if (type == I40E_VSI_SRIOV) 7959 vsi->vf_id = param1; 7960 /* assign it some queues */ 7961 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, 7962 vsi->idx); 7963 if (ret < 0) { 7964 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", 7965 vsi->seid, ret); 7966 goto err_vsi; 7967 } 7968 vsi->base_queue = ret; 7969 7970 /* get a VSI from the hardware */ 7971 vsi->uplink_seid = uplink_seid; 7972 ret = i40e_add_vsi(vsi); 7973 if (ret) 7974 goto err_vsi; 7975 7976 switch (vsi->type) { 7977 /* setup the netdev if needed */ 7978 case I40E_VSI_MAIN: 7979 case I40E_VSI_VMDQ2: 7980 case I40E_VSI_FCOE: 7981 ret = i40e_config_netdev(vsi); 7982 if (ret) 7983 goto err_netdev; 7984 ret = register_netdev(vsi->netdev); 7985 if (ret) 7986 goto err_netdev; 7987 vsi->netdev_registered = true; 7988 netif_carrier_off(vsi->netdev); 7989 #ifdef CONFIG_I40E_DCB 7990 /* Setup DCB netlink interface */ 7991 i40e_dcbnl_setup(vsi); 7992 #endif /* CONFIG_I40E_DCB */ 7993 /* fall through */ 7994 7995 case I40E_VSI_FDIR: 7996 /* set up vectors and rings if needed */ 7997 ret = i40e_vsi_setup_vectors(vsi); 7998 if (ret) 7999 goto err_msix; 8000 8001 ret = i40e_alloc_rings(vsi); 8002 if (ret) 8003 goto err_rings; 8004 8005 /* map all of the rings to the q_vectors */ 8006 i40e_vsi_map_rings_to_vectors(vsi); 8007 8008 i40e_vsi_reset_stats(vsi); 8009 break; 8010 8011 default: 8012 /* no netdev or rings for the other VSI types */ 8013 break; 8014 } 8015 8016 return vsi; 8017 8018 err_rings: 8019 i40e_vsi_free_q_vectors(vsi); 8020 err_msix: 8021 if (vsi->netdev_registered) { 8022 vsi->netdev_registered = false; 8023 unregister_netdev(vsi->netdev); 8024 free_netdev(vsi->netdev); 8025 vsi->netdev = NULL; 8026 } 8027 err_netdev: 8028 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 8029 err_vsi: 8030 i40e_vsi_clear(vsi); 8031 err_alloc: 8032 return NULL; 8033 } 8034 8035 /** 8036 * i40e_veb_get_bw_info - Query VEB BW information 8037 * @veb: the veb to query 8038 * 8039 * Query the Tx scheduler BW configuration data for given VEB 8040 **/ 8041 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 8042 { 8043 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 8044 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 8045 struct i40e_pf *pf = veb->pf; 8046 struct i40e_hw *hw = &pf->hw; 8047 u32 tc_bw_max; 8048 int ret = 0; 8049 int i; 8050 8051 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 8052 &bw_data, NULL); 8053 if (ret) { 8054 dev_info(&pf->pdev->dev, 8055 "query veb bw config failed, aq_err=%d\n", 8056 hw->aq.asq_last_status); 8057 goto out; 8058 } 8059 8060 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 8061 &ets_data, NULL); 8062 if (ret) { 8063 dev_info(&pf->pdev->dev, 8064 "query veb bw ets config failed, aq_err=%d\n", 8065 hw->aq.asq_last_status); 8066 goto out; 8067 } 8068 8069 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 8070 veb->bw_max_quanta = ets_data.tc_bw_max; 8071 veb->is_abs_credits = bw_data.absolute_credits_enable; 8072 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 8073 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 8074 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 8075 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 8076 veb->bw_tc_limit_credits[i] = 8077 le16_to_cpu(bw_data.tc_bw_limits[i]); 8078 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 8079 } 8080 8081 out: 8082 return ret; 8083 } 8084 8085 /** 8086 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 8087 * @pf: board private structure 8088 * 8089 * On error: returns error code (negative) 8090 * On success: returns vsi index in PF (positive) 8091 **/ 8092 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 8093 { 8094 int ret = -ENOENT; 8095 struct i40e_veb *veb; 8096 int i; 8097 8098 /* Need to protect the allocation of switch elements at the PF level */ 8099 mutex_lock(&pf->switch_mutex); 8100 8101 /* VEB list may be fragmented if VEB creation/destruction has 8102 * been happening. We can afford to do a quick scan to look 8103 * for any free slots in the list. 8104 * 8105 * find next empty veb slot, looping back around if necessary 8106 */ 8107 i = 0; 8108 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 8109 i++; 8110 if (i >= I40E_MAX_VEB) { 8111 ret = -ENOMEM; 8112 goto err_alloc_veb; /* out of VEB slots! */ 8113 } 8114 8115 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 8116 if (!veb) { 8117 ret = -ENOMEM; 8118 goto err_alloc_veb; 8119 } 8120 veb->pf = pf; 8121 veb->idx = i; 8122 veb->enabled_tc = 1; 8123 8124 pf->veb[i] = veb; 8125 ret = i; 8126 err_alloc_veb: 8127 mutex_unlock(&pf->switch_mutex); 8128 return ret; 8129 } 8130 8131 /** 8132 * i40e_switch_branch_release - Delete a branch of the switch tree 8133 * @branch: where to start deleting 8134 * 8135 * This uses recursion to find the tips of the branch to be 8136 * removed, deleting until we get back to and can delete this VEB. 8137 **/ 8138 static void i40e_switch_branch_release(struct i40e_veb *branch) 8139 { 8140 struct i40e_pf *pf = branch->pf; 8141 u16 branch_seid = branch->seid; 8142 u16 veb_idx = branch->idx; 8143 int i; 8144 8145 /* release any VEBs on this VEB - RECURSION */ 8146 for (i = 0; i < I40E_MAX_VEB; i++) { 8147 if (!pf->veb[i]) 8148 continue; 8149 if (pf->veb[i]->uplink_seid == branch->seid) 8150 i40e_switch_branch_release(pf->veb[i]); 8151 } 8152 8153 /* Release the VSIs on this VEB, but not the owner VSI. 8154 * 8155 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 8156 * the VEB itself, so don't use (*branch) after this loop. 8157 */ 8158 for (i = 0; i < pf->num_alloc_vsi; i++) { 8159 if (!pf->vsi[i]) 8160 continue; 8161 if (pf->vsi[i]->uplink_seid == branch_seid && 8162 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 8163 i40e_vsi_release(pf->vsi[i]); 8164 } 8165 } 8166 8167 /* There's one corner case where the VEB might not have been 8168 * removed, so double check it here and remove it if needed. 8169 * This case happens if the veb was created from the debugfs 8170 * commands and no VSIs were added to it. 8171 */ 8172 if (pf->veb[veb_idx]) 8173 i40e_veb_release(pf->veb[veb_idx]); 8174 } 8175 8176 /** 8177 * i40e_veb_clear - remove veb struct 8178 * @veb: the veb to remove 8179 **/ 8180 static void i40e_veb_clear(struct i40e_veb *veb) 8181 { 8182 if (!veb) 8183 return; 8184 8185 if (veb->pf) { 8186 struct i40e_pf *pf = veb->pf; 8187 8188 mutex_lock(&pf->switch_mutex); 8189 if (pf->veb[veb->idx] == veb) 8190 pf->veb[veb->idx] = NULL; 8191 mutex_unlock(&pf->switch_mutex); 8192 } 8193 8194 kfree(veb); 8195 } 8196 8197 /** 8198 * i40e_veb_release - Delete a VEB and free its resources 8199 * @veb: the VEB being removed 8200 **/ 8201 void i40e_veb_release(struct i40e_veb *veb) 8202 { 8203 struct i40e_vsi *vsi = NULL; 8204 struct i40e_pf *pf; 8205 int i, n = 0; 8206 8207 pf = veb->pf; 8208 8209 /* find the remaining VSI and check for extras */ 8210 for (i = 0; i < pf->num_alloc_vsi; i++) { 8211 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 8212 n++; 8213 vsi = pf->vsi[i]; 8214 } 8215 } 8216 if (n != 1) { 8217 dev_info(&pf->pdev->dev, 8218 "can't remove VEB %d with %d VSIs left\n", 8219 veb->seid, n); 8220 return; 8221 } 8222 8223 /* move the remaining VSI to uplink veb */ 8224 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 8225 if (veb->uplink_seid) { 8226 vsi->uplink_seid = veb->uplink_seid; 8227 if (veb->uplink_seid == pf->mac_seid) 8228 vsi->veb_idx = I40E_NO_VEB; 8229 else 8230 vsi->veb_idx = veb->veb_idx; 8231 } else { 8232 /* floating VEB */ 8233 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 8234 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 8235 } 8236 8237 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 8238 i40e_veb_clear(veb); 8239 } 8240 8241 /** 8242 * i40e_add_veb - create the VEB in the switch 8243 * @veb: the VEB to be instantiated 8244 * @vsi: the controlling VSI 8245 **/ 8246 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 8247 { 8248 bool is_default = false; 8249 bool is_cloud = false; 8250 int ret; 8251 8252 /* get a VEB from the hardware */ 8253 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, 8254 veb->enabled_tc, is_default, 8255 is_cloud, &veb->seid, NULL); 8256 if (ret) { 8257 dev_info(&veb->pf->pdev->dev, 8258 "couldn't add VEB, err %d, aq_err %d\n", 8259 ret, veb->pf->hw.aq.asq_last_status); 8260 return -EPERM; 8261 } 8262 8263 /* get statistics counter */ 8264 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, 8265 &veb->stats_idx, NULL, NULL, NULL); 8266 if (ret) { 8267 dev_info(&veb->pf->pdev->dev, 8268 "couldn't get VEB statistics idx, err %d, aq_err %d\n", 8269 ret, veb->pf->hw.aq.asq_last_status); 8270 return -EPERM; 8271 } 8272 ret = i40e_veb_get_bw_info(veb); 8273 if (ret) { 8274 dev_info(&veb->pf->pdev->dev, 8275 "couldn't get VEB bw info, err %d, aq_err %d\n", 8276 ret, veb->pf->hw.aq.asq_last_status); 8277 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); 8278 return -ENOENT; 8279 } 8280 8281 vsi->uplink_seid = veb->seid; 8282 vsi->veb_idx = veb->idx; 8283 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 8284 8285 return 0; 8286 } 8287 8288 /** 8289 * i40e_veb_setup - Set up a VEB 8290 * @pf: board private structure 8291 * @flags: VEB setup flags 8292 * @uplink_seid: the switch element to link to 8293 * @vsi_seid: the initial VSI seid 8294 * @enabled_tc: Enabled TC bit-map 8295 * 8296 * This allocates the sw VEB structure and links it into the switch 8297 * It is possible and legal for this to be a duplicate of an already 8298 * existing VEB. It is also possible for both uplink and vsi seids 8299 * to be zero, in order to create a floating VEB. 8300 * 8301 * Returns pointer to the successfully allocated VEB sw struct on 8302 * success, otherwise returns NULL on failure. 8303 **/ 8304 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 8305 u16 uplink_seid, u16 vsi_seid, 8306 u8 enabled_tc) 8307 { 8308 struct i40e_veb *veb, *uplink_veb = NULL; 8309 int vsi_idx, veb_idx; 8310 int ret; 8311 8312 /* if one seid is 0, the other must be 0 to create a floating relay */ 8313 if ((uplink_seid == 0 || vsi_seid == 0) && 8314 (uplink_seid + vsi_seid != 0)) { 8315 dev_info(&pf->pdev->dev, 8316 "one, not both seid's are 0: uplink=%d vsi=%d\n", 8317 uplink_seid, vsi_seid); 8318 return NULL; 8319 } 8320 8321 /* make sure there is such a vsi and uplink */ 8322 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) 8323 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 8324 break; 8325 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { 8326 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 8327 vsi_seid); 8328 return NULL; 8329 } 8330 8331 if (uplink_seid && uplink_seid != pf->mac_seid) { 8332 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 8333 if (pf->veb[veb_idx] && 8334 pf->veb[veb_idx]->seid == uplink_seid) { 8335 uplink_veb = pf->veb[veb_idx]; 8336 break; 8337 } 8338 } 8339 if (!uplink_veb) { 8340 dev_info(&pf->pdev->dev, 8341 "uplink seid %d not found\n", uplink_seid); 8342 return NULL; 8343 } 8344 } 8345 8346 /* get veb sw struct */ 8347 veb_idx = i40e_veb_mem_alloc(pf); 8348 if (veb_idx < 0) 8349 goto err_alloc; 8350 veb = pf->veb[veb_idx]; 8351 veb->flags = flags; 8352 veb->uplink_seid = uplink_seid; 8353 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 8354 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 8355 8356 /* create the VEB in the switch */ 8357 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 8358 if (ret) 8359 goto err_veb; 8360 if (vsi_idx == pf->lan_vsi) 8361 pf->lan_veb = veb->idx; 8362 8363 return veb; 8364 8365 err_veb: 8366 i40e_veb_clear(veb); 8367 err_alloc: 8368 return NULL; 8369 } 8370 8371 /** 8372 * i40e_setup_pf_switch_element - set pf vars based on switch type 8373 * @pf: board private structure 8374 * @ele: element we are building info from 8375 * @num_reported: total number of elements 8376 * @printconfig: should we print the contents 8377 * 8378 * helper function to assist in extracting a few useful SEID values. 8379 **/ 8380 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 8381 struct i40e_aqc_switch_config_element_resp *ele, 8382 u16 num_reported, bool printconfig) 8383 { 8384 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 8385 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 8386 u8 element_type = ele->element_type; 8387 u16 seid = le16_to_cpu(ele->seid); 8388 8389 if (printconfig) 8390 dev_info(&pf->pdev->dev, 8391 "type=%d seid=%d uplink=%d downlink=%d\n", 8392 element_type, seid, uplink_seid, downlink_seid); 8393 8394 switch (element_type) { 8395 case I40E_SWITCH_ELEMENT_TYPE_MAC: 8396 pf->mac_seid = seid; 8397 break; 8398 case I40E_SWITCH_ELEMENT_TYPE_VEB: 8399 /* Main VEB? */ 8400 if (uplink_seid != pf->mac_seid) 8401 break; 8402 if (pf->lan_veb == I40E_NO_VEB) { 8403 int v; 8404 8405 /* find existing or else empty VEB */ 8406 for (v = 0; v < I40E_MAX_VEB; v++) { 8407 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 8408 pf->lan_veb = v; 8409 break; 8410 } 8411 } 8412 if (pf->lan_veb == I40E_NO_VEB) { 8413 v = i40e_veb_mem_alloc(pf); 8414 if (v < 0) 8415 break; 8416 pf->lan_veb = v; 8417 } 8418 } 8419 8420 pf->veb[pf->lan_veb]->seid = seid; 8421 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 8422 pf->veb[pf->lan_veb]->pf = pf; 8423 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 8424 break; 8425 case I40E_SWITCH_ELEMENT_TYPE_VSI: 8426 if (num_reported != 1) 8427 break; 8428 /* This is immediately after a reset so we can assume this is 8429 * the PF's VSI 8430 */ 8431 pf->mac_seid = uplink_seid; 8432 pf->pf_seid = downlink_seid; 8433 pf->main_vsi_seid = seid; 8434 if (printconfig) 8435 dev_info(&pf->pdev->dev, 8436 "pf_seid=%d main_vsi_seid=%d\n", 8437 pf->pf_seid, pf->main_vsi_seid); 8438 break; 8439 case I40E_SWITCH_ELEMENT_TYPE_PF: 8440 case I40E_SWITCH_ELEMENT_TYPE_VF: 8441 case I40E_SWITCH_ELEMENT_TYPE_EMP: 8442 case I40E_SWITCH_ELEMENT_TYPE_BMC: 8443 case I40E_SWITCH_ELEMENT_TYPE_PE: 8444 case I40E_SWITCH_ELEMENT_TYPE_PA: 8445 /* ignore these for now */ 8446 break; 8447 default: 8448 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 8449 element_type, seid); 8450 break; 8451 } 8452 } 8453 8454 /** 8455 * i40e_fetch_switch_configuration - Get switch config from firmware 8456 * @pf: board private structure 8457 * @printconfig: should we print the contents 8458 * 8459 * Get the current switch configuration from the device and 8460 * extract a few useful SEID values. 8461 **/ 8462 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 8463 { 8464 struct i40e_aqc_get_switch_config_resp *sw_config; 8465 u16 next_seid = 0; 8466 int ret = 0; 8467 u8 *aq_buf; 8468 int i; 8469 8470 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 8471 if (!aq_buf) 8472 return -ENOMEM; 8473 8474 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 8475 do { 8476 u16 num_reported, num_total; 8477 8478 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 8479 I40E_AQ_LARGE_BUF, 8480 &next_seid, NULL); 8481 if (ret) { 8482 dev_info(&pf->pdev->dev, 8483 "get switch config failed %d aq_err=%x\n", 8484 ret, pf->hw.aq.asq_last_status); 8485 kfree(aq_buf); 8486 return -ENOENT; 8487 } 8488 8489 num_reported = le16_to_cpu(sw_config->header.num_reported); 8490 num_total = le16_to_cpu(sw_config->header.num_total); 8491 8492 if (printconfig) 8493 dev_info(&pf->pdev->dev, 8494 "header: %d reported %d total\n", 8495 num_reported, num_total); 8496 8497 for (i = 0; i < num_reported; i++) { 8498 struct i40e_aqc_switch_config_element_resp *ele = 8499 &sw_config->element[i]; 8500 8501 i40e_setup_pf_switch_element(pf, ele, num_reported, 8502 printconfig); 8503 } 8504 } while (next_seid != 0); 8505 8506 kfree(aq_buf); 8507 return ret; 8508 } 8509 8510 /** 8511 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 8512 * @pf: board private structure 8513 * @reinit: if the Main VSI needs to re-initialized. 8514 * 8515 * Returns 0 on success, negative value on failure 8516 **/ 8517 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 8518 { 8519 int ret; 8520 8521 /* find out what's out there already */ 8522 ret = i40e_fetch_switch_configuration(pf, false); 8523 if (ret) { 8524 dev_info(&pf->pdev->dev, 8525 "couldn't fetch switch config, err %d, aq_err %d\n", 8526 ret, pf->hw.aq.asq_last_status); 8527 return ret; 8528 } 8529 i40e_pf_reset_stats(pf); 8530 8531 /* first time setup */ 8532 if (pf->lan_vsi == I40E_NO_VSI || reinit) { 8533 struct i40e_vsi *vsi = NULL; 8534 u16 uplink_seid; 8535 8536 /* Set up the PF VSI associated with the PF's main VSI 8537 * that is already in the HW switch 8538 */ 8539 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 8540 uplink_seid = pf->veb[pf->lan_veb]->seid; 8541 else 8542 uplink_seid = pf->mac_seid; 8543 if (pf->lan_vsi == I40E_NO_VSI) 8544 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 8545 else if (reinit) 8546 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); 8547 if (!vsi) { 8548 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 8549 i40e_fdir_teardown(pf); 8550 return -EAGAIN; 8551 } 8552 } else { 8553 /* force a reset of TC and queue layout configurations */ 8554 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 8555 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 8556 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 8557 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 8558 } 8559 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 8560 8561 i40e_fdir_sb_setup(pf); 8562 8563 /* Setup static PF queue filter control settings */ 8564 ret = i40e_setup_pf_filter_control(pf); 8565 if (ret) { 8566 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 8567 ret); 8568 /* Failure here should not stop continuing other steps */ 8569 } 8570 8571 /* enable RSS in the HW, even for only one queue, as the stack can use 8572 * the hash 8573 */ 8574 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 8575 i40e_config_rss(pf); 8576 8577 /* fill in link information and enable LSE reporting */ 8578 i40e_update_link_info(&pf->hw, true); 8579 i40e_link_event(pf); 8580 8581 /* Initialize user-specific link properties */ 8582 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 8583 I40E_AQ_AN_COMPLETED) ? true : false); 8584 8585 i40e_ptp_init(pf); 8586 8587 return ret; 8588 } 8589 8590 /** 8591 * i40e_determine_queue_usage - Work out queue distribution 8592 * @pf: board private structure 8593 **/ 8594 static void i40e_determine_queue_usage(struct i40e_pf *pf) 8595 { 8596 int queues_left; 8597 8598 pf->num_lan_qps = 0; 8599 #ifdef I40E_FCOE 8600 pf->num_fcoe_qps = 0; 8601 #endif 8602 8603 /* Find the max queues to be put into basic use. We'll always be 8604 * using TC0, whether or not DCB is running, and TC0 will get the 8605 * big RSS set. 8606 */ 8607 queues_left = pf->hw.func_caps.num_tx_qp; 8608 8609 if ((queues_left == 1) || 8610 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 8611 /* one qp for PF, no queues for anything else */ 8612 queues_left = 0; 8613 pf->rss_size = pf->num_lan_qps = 1; 8614 8615 /* make sure all the fancies are disabled */ 8616 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 8617 #ifdef I40E_FCOE 8618 I40E_FLAG_FCOE_ENABLED | 8619 #endif 8620 I40E_FLAG_FD_SB_ENABLED | 8621 I40E_FLAG_FD_ATR_ENABLED | 8622 I40E_FLAG_DCB_CAPABLE | 8623 I40E_FLAG_SRIOV_ENABLED | 8624 I40E_FLAG_VMDQ_ENABLED); 8625 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | 8626 I40E_FLAG_FD_SB_ENABLED | 8627 I40E_FLAG_FD_ATR_ENABLED | 8628 I40E_FLAG_DCB_CAPABLE))) { 8629 /* one qp for PF */ 8630 pf->rss_size = pf->num_lan_qps = 1; 8631 queues_left -= pf->num_lan_qps; 8632 8633 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 8634 #ifdef I40E_FCOE 8635 I40E_FLAG_FCOE_ENABLED | 8636 #endif 8637 I40E_FLAG_FD_SB_ENABLED | 8638 I40E_FLAG_FD_ATR_ENABLED | 8639 I40E_FLAG_DCB_ENABLED | 8640 I40E_FLAG_VMDQ_ENABLED); 8641 } else { 8642 /* Not enough queues for all TCs */ 8643 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && 8644 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 8645 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 8646 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 8647 } 8648 pf->num_lan_qps = pf->rss_size_max; 8649 queues_left -= pf->num_lan_qps; 8650 } 8651 8652 #ifdef I40E_FCOE 8653 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 8654 if (I40E_DEFAULT_FCOE <= queues_left) { 8655 pf->num_fcoe_qps = I40E_DEFAULT_FCOE; 8656 } else if (I40E_MINIMUM_FCOE <= queues_left) { 8657 pf->num_fcoe_qps = I40E_MINIMUM_FCOE; 8658 } else { 8659 pf->num_fcoe_qps = 0; 8660 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 8661 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); 8662 } 8663 8664 queues_left -= pf->num_fcoe_qps; 8665 } 8666 8667 #endif 8668 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 8669 if (queues_left > 1) { 8670 queues_left -= 1; /* save 1 queue for FD */ 8671 } else { 8672 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 8673 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); 8674 } 8675 } 8676 8677 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 8678 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 8679 pf->num_req_vfs = min_t(int, pf->num_req_vfs, 8680 (queues_left / pf->num_vf_qps)); 8681 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 8682 } 8683 8684 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 8685 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 8686 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 8687 (queues_left / pf->num_vmdq_qps)); 8688 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 8689 } 8690 8691 pf->queues_left = queues_left; 8692 #ifdef I40E_FCOE 8693 dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); 8694 #endif 8695 } 8696 8697 /** 8698 * i40e_setup_pf_filter_control - Setup PF static filter control 8699 * @pf: PF to be setup 8700 * 8701 * i40e_setup_pf_filter_control sets up a pf's initial filter control 8702 * settings. If PE/FCoE are enabled then it will also set the per PF 8703 * based filter sizes required for them. It also enables Flow director, 8704 * ethertype and macvlan type filter settings for the pf. 8705 * 8706 * Returns 0 on success, negative on failure 8707 **/ 8708 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 8709 { 8710 struct i40e_filter_control_settings *settings = &pf->filter_settings; 8711 8712 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 8713 8714 /* Flow Director is enabled */ 8715 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) 8716 settings->enable_fdir = true; 8717 8718 /* Ethtype and MACVLAN filters enabled for PF */ 8719 settings->enable_ethtype = true; 8720 settings->enable_macvlan = true; 8721 8722 if (i40e_set_filter_control(&pf->hw, settings)) 8723 return -ENOENT; 8724 8725 return 0; 8726 } 8727 8728 #define INFO_STRING_LEN 255 8729 static void i40e_print_features(struct i40e_pf *pf) 8730 { 8731 struct i40e_hw *hw = &pf->hw; 8732 char *buf, *string; 8733 8734 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); 8735 if (!string) { 8736 dev_err(&pf->pdev->dev, "Features string allocation failed\n"); 8737 return; 8738 } 8739 8740 buf = string; 8741 8742 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); 8743 #ifdef CONFIG_PCI_IOV 8744 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); 8745 #endif 8746 buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis, 8747 pf->vsi[pf->lan_vsi]->num_queue_pairs); 8748 8749 if (pf->flags & I40E_FLAG_RSS_ENABLED) 8750 buf += sprintf(buf, "RSS "); 8751 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 8752 buf += sprintf(buf, "FD_ATR "); 8753 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 8754 buf += sprintf(buf, "FD_SB "); 8755 buf += sprintf(buf, "NTUPLE "); 8756 } 8757 if (pf->flags & I40E_FLAG_DCB_CAPABLE) 8758 buf += sprintf(buf, "DCB "); 8759 if (pf->flags & I40E_FLAG_PTP) 8760 buf += sprintf(buf, "PTP "); 8761 #ifdef I40E_FCOE 8762 if (pf->flags & I40E_FLAG_FCOE_ENABLED) 8763 buf += sprintf(buf, "FCOE "); 8764 #endif 8765 8766 BUG_ON(buf > (string + INFO_STRING_LEN)); 8767 dev_info(&pf->pdev->dev, "%s\n", string); 8768 kfree(string); 8769 } 8770 8771 /** 8772 * i40e_probe - Device initialization routine 8773 * @pdev: PCI device information struct 8774 * @ent: entry in i40e_pci_tbl 8775 * 8776 * i40e_probe initializes a pf identified by a pci_dev structure. 8777 * The OS initialization, configuring of the pf private structure, 8778 * and a hardware reset occur. 8779 * 8780 * Returns 0 on success, negative on failure 8781 **/ 8782 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 8783 { 8784 struct i40e_pf *pf; 8785 struct i40e_hw *hw; 8786 static u16 pfs_found; 8787 u16 link_status; 8788 int err = 0; 8789 u32 len; 8790 u32 i; 8791 8792 err = pci_enable_device_mem(pdev); 8793 if (err) 8794 return err; 8795 8796 /* set up for high or low dma */ 8797 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 8798 if (err) { 8799 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 8800 if (err) { 8801 dev_err(&pdev->dev, 8802 "DMA configuration failed: 0x%x\n", err); 8803 goto err_dma; 8804 } 8805 } 8806 8807 /* set up pci connections */ 8808 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 8809 IORESOURCE_MEM), i40e_driver_name); 8810 if (err) { 8811 dev_info(&pdev->dev, 8812 "pci_request_selected_regions failed %d\n", err); 8813 goto err_pci_reg; 8814 } 8815 8816 pci_enable_pcie_error_reporting(pdev); 8817 pci_set_master(pdev); 8818 8819 /* Now that we have a PCI connection, we need to do the 8820 * low level device setup. This is primarily setting up 8821 * the Admin Queue structures and then querying for the 8822 * device's current profile information. 8823 */ 8824 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 8825 if (!pf) { 8826 err = -ENOMEM; 8827 goto err_pf_alloc; 8828 } 8829 pf->next_vsi = 0; 8830 pf->pdev = pdev; 8831 set_bit(__I40E_DOWN, &pf->state); 8832 8833 hw = &pf->hw; 8834 hw->back = pf; 8835 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 8836 pci_resource_len(pdev, 0)); 8837 if (!hw->hw_addr) { 8838 err = -EIO; 8839 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 8840 (unsigned int)pci_resource_start(pdev, 0), 8841 (unsigned int)pci_resource_len(pdev, 0), err); 8842 goto err_ioremap; 8843 } 8844 hw->vendor_id = pdev->vendor; 8845 hw->device_id = pdev->device; 8846 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 8847 hw->subsystem_vendor_id = pdev->subsystem_vendor; 8848 hw->subsystem_device_id = pdev->subsystem_device; 8849 hw->bus.device = PCI_SLOT(pdev->devfn); 8850 hw->bus.func = PCI_FUNC(pdev->devfn); 8851 pf->instance = pfs_found; 8852 8853 /* do a special CORER for clearing PXE mode once at init */ 8854 if (hw->revision_id == 0 && 8855 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { 8856 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 8857 i40e_flush(hw); 8858 msleep(200); 8859 pf->corer_count++; 8860 8861 i40e_clear_pxe_mode(hw); 8862 } 8863 8864 /* Reset here to make sure all is clean and to define PF 'n' */ 8865 i40e_clear_hw(hw); 8866 err = i40e_pf_reset(hw); 8867 if (err) { 8868 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 8869 goto err_pf_reset; 8870 } 8871 pf->pfr_count++; 8872 8873 hw->aq.num_arq_entries = I40E_AQ_LEN; 8874 hw->aq.num_asq_entries = I40E_AQ_LEN; 8875 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 8876 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 8877 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 8878 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1, 8879 "%s-pf%d:misc", 8880 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id); 8881 8882 err = i40e_init_shared_code(hw); 8883 if (err) { 8884 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); 8885 goto err_pf_reset; 8886 } 8887 8888 /* set up a default setting for link flow control */ 8889 pf->hw.fc.requested_mode = I40E_FC_NONE; 8890 8891 err = i40e_init_adminq(hw); 8892 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); 8893 if (err) { 8894 dev_info(&pdev->dev, 8895 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 8896 goto err_pf_reset; 8897 } 8898 8899 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 8900 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) 8901 dev_info(&pdev->dev, 8902 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 8903 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 8904 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) 8905 dev_info(&pdev->dev, 8906 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 8907 8908 8909 i40e_verify_eeprom(pf); 8910 8911 /* Rev 0 hardware was never productized */ 8912 if (hw->revision_id < 1) 8913 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 8914 8915 i40e_clear_pxe_mode(hw); 8916 err = i40e_get_capabilities(pf); 8917 if (err) 8918 goto err_adminq_setup; 8919 8920 err = i40e_sw_init(pf); 8921 if (err) { 8922 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 8923 goto err_sw_init; 8924 } 8925 8926 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 8927 hw->func_caps.num_rx_qp, 8928 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 8929 if (err) { 8930 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 8931 goto err_init_lan_hmc; 8932 } 8933 8934 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 8935 if (err) { 8936 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 8937 err = -ENOENT; 8938 goto err_configure_lan_hmc; 8939 } 8940 8941 i40e_get_mac_addr(hw, hw->mac.addr); 8942 if (!is_valid_ether_addr(hw->mac.addr)) { 8943 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 8944 err = -EIO; 8945 goto err_mac_addr; 8946 } 8947 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 8948 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); 8949 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 8950 if (is_valid_ether_addr(hw->mac.port_addr)) 8951 pf->flags |= I40E_FLAG_PORT_ID_VALID; 8952 #ifdef I40E_FCOE 8953 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); 8954 if (err) 8955 dev_info(&pdev->dev, 8956 "(non-fatal) SAN MAC retrieval failed: %d\n", err); 8957 if (!is_valid_ether_addr(hw->mac.san_addr)) { 8958 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", 8959 hw->mac.san_addr); 8960 ether_addr_copy(hw->mac.san_addr, hw->mac.addr); 8961 } 8962 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); 8963 #endif /* I40E_FCOE */ 8964 8965 pci_set_drvdata(pdev, pf); 8966 pci_save_state(pdev); 8967 #ifdef CONFIG_I40E_DCB 8968 err = i40e_init_pf_dcb(pf); 8969 if (err) { 8970 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); 8971 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 8972 /* Continue without DCB enabled */ 8973 } 8974 #endif /* CONFIG_I40E_DCB */ 8975 8976 /* set up periodic task facility */ 8977 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); 8978 pf->service_timer_period = HZ; 8979 8980 INIT_WORK(&pf->service_task, i40e_service_task); 8981 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 8982 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; 8983 pf->link_check_timeout = jiffies; 8984 8985 /* WoL defaults to disabled */ 8986 pf->wol_en = false; 8987 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 8988 8989 /* set up the main switch operations */ 8990 i40e_determine_queue_usage(pf); 8991 i40e_init_interrupt_scheme(pf); 8992 8993 /* The number of VSIs reported by the FW is the minimum guaranteed 8994 * to us; HW supports far more and we share the remaining pool with 8995 * the other PFs. We allocate space for more than the guarantee with 8996 * the understanding that we might not get them all later. 8997 */ 8998 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) 8999 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; 9000 else 9001 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; 9002 9003 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ 9004 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; 9005 pf->vsi = kzalloc(len, GFP_KERNEL); 9006 if (!pf->vsi) { 9007 err = -ENOMEM; 9008 goto err_switch_setup; 9009 } 9010 9011 err = i40e_setup_pf_switch(pf, false); 9012 if (err) { 9013 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 9014 goto err_vsis; 9015 } 9016 /* if FDIR VSI was set up, start it now */ 9017 for (i = 0; i < pf->num_alloc_vsi; i++) { 9018 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 9019 i40e_vsi_open(pf->vsi[i]); 9020 break; 9021 } 9022 } 9023 9024 /* The main driver is (mostly) up and happy. We need to set this state 9025 * before setting up the misc vector or we get a race and the vector 9026 * ends up disabled forever. 9027 */ 9028 clear_bit(__I40E_DOWN, &pf->state); 9029 9030 /* In case of MSIX we are going to setup the misc vector right here 9031 * to handle admin queue events etc. In case of legacy and MSI 9032 * the misc functionality and queue processing is combined in 9033 * the same vector and that gets setup at open. 9034 */ 9035 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 9036 err = i40e_setup_misc_vector(pf); 9037 if (err) { 9038 dev_info(&pdev->dev, 9039 "setup of misc vector failed: %d\n", err); 9040 goto err_vsis; 9041 } 9042 } 9043 9044 #ifdef CONFIG_PCI_IOV 9045 /* prep for VF support */ 9046 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 9047 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 9048 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 9049 u32 val; 9050 9051 /* disable link interrupts for VFs */ 9052 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 9053 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 9054 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 9055 i40e_flush(hw); 9056 9057 if (pci_num_vf(pdev)) { 9058 dev_info(&pdev->dev, 9059 "Active VFs found, allocating resources.\n"); 9060 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); 9061 if (err) 9062 dev_info(&pdev->dev, 9063 "Error %d allocating resources for existing VFs\n", 9064 err); 9065 } 9066 } 9067 #endif /* CONFIG_PCI_IOV */ 9068 9069 pfs_found++; 9070 9071 i40e_dbg_pf_init(pf); 9072 9073 /* tell the firmware that we're starting */ 9074 i40e_send_version(pf); 9075 9076 /* since everything's happy, start the service_task timer */ 9077 mod_timer(&pf->service_timer, 9078 round_jiffies(jiffies + pf->service_timer_period)); 9079 9080 #ifdef I40E_FCOE 9081 /* create FCoE interface */ 9082 i40e_fcoe_vsi_setup(pf); 9083 9084 #endif 9085 /* Get the negotiated link width and speed from PCI config space */ 9086 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); 9087 9088 i40e_set_pci_config_data(hw, link_status); 9089 9090 dev_info(&pdev->dev, "PCI-Express: %s %s\n", 9091 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : 9092 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : 9093 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : 9094 "Unknown"), 9095 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : 9096 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : 9097 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : 9098 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : 9099 "Unknown")); 9100 9101 if (hw->bus.width < i40e_bus_width_pcie_x8 || 9102 hw->bus.speed < i40e_bus_speed_8000) { 9103 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 9104 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 9105 } 9106 9107 /* print a string summarizing features */ 9108 i40e_print_features(pf); 9109 9110 return 0; 9111 9112 /* Unwind what we've done if something failed in the setup */ 9113 err_vsis: 9114 set_bit(__I40E_DOWN, &pf->state); 9115 i40e_clear_interrupt_scheme(pf); 9116 kfree(pf->vsi); 9117 err_switch_setup: 9118 i40e_reset_interrupt_capability(pf); 9119 del_timer_sync(&pf->service_timer); 9120 err_mac_addr: 9121 err_configure_lan_hmc: 9122 (void)i40e_shutdown_lan_hmc(hw); 9123 err_init_lan_hmc: 9124 kfree(pf->qp_pile); 9125 kfree(pf->irq_pile); 9126 err_sw_init: 9127 err_adminq_setup: 9128 (void)i40e_shutdown_adminq(hw); 9129 err_pf_reset: 9130 iounmap(hw->hw_addr); 9131 err_ioremap: 9132 kfree(pf); 9133 err_pf_alloc: 9134 pci_disable_pcie_error_reporting(pdev); 9135 pci_release_selected_regions(pdev, 9136 pci_select_bars(pdev, IORESOURCE_MEM)); 9137 err_pci_reg: 9138 err_dma: 9139 pci_disable_device(pdev); 9140 return err; 9141 } 9142 9143 /** 9144 * i40e_remove - Device removal routine 9145 * @pdev: PCI device information struct 9146 * 9147 * i40e_remove is called by the PCI subsystem to alert the driver 9148 * that is should release a PCI device. This could be caused by a 9149 * Hot-Plug event, or because the driver is going to be removed from 9150 * memory. 9151 **/ 9152 static void i40e_remove(struct pci_dev *pdev) 9153 { 9154 struct i40e_pf *pf = pci_get_drvdata(pdev); 9155 i40e_status ret_code; 9156 int i; 9157 9158 i40e_dbg_pf_exit(pf); 9159 9160 i40e_ptp_stop(pf); 9161 9162 /* no more scheduling of any task */ 9163 set_bit(__I40E_DOWN, &pf->state); 9164 del_timer_sync(&pf->service_timer); 9165 cancel_work_sync(&pf->service_task); 9166 9167 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 9168 i40e_free_vfs(pf); 9169 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 9170 } 9171 9172 i40e_fdir_teardown(pf); 9173 9174 /* If there is a switch structure or any orphans, remove them. 9175 * This will leave only the PF's VSI remaining. 9176 */ 9177 for (i = 0; i < I40E_MAX_VEB; i++) { 9178 if (!pf->veb[i]) 9179 continue; 9180 9181 if (pf->veb[i]->uplink_seid == pf->mac_seid || 9182 pf->veb[i]->uplink_seid == 0) 9183 i40e_switch_branch_release(pf->veb[i]); 9184 } 9185 9186 /* Now we can shutdown the PF's VSI, just before we kill 9187 * adminq and hmc. 9188 */ 9189 if (pf->vsi[pf->lan_vsi]) 9190 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 9191 9192 i40e_stop_misc_vector(pf); 9193 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 9194 synchronize_irq(pf->msix_entries[0].vector); 9195 free_irq(pf->msix_entries[0].vector, pf); 9196 } 9197 9198 /* shutdown and destroy the HMC */ 9199 if (pf->hw.hmc.hmc_obj) { 9200 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 9201 if (ret_code) 9202 dev_warn(&pdev->dev, 9203 "Failed to destroy the HMC resources: %d\n", 9204 ret_code); 9205 } 9206 9207 /* shutdown the adminq */ 9208 ret_code = i40e_shutdown_adminq(&pf->hw); 9209 if (ret_code) 9210 dev_warn(&pdev->dev, 9211 "Failed to destroy the Admin Queue resources: %d\n", 9212 ret_code); 9213 9214 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 9215 i40e_clear_interrupt_scheme(pf); 9216 for (i = 0; i < pf->num_alloc_vsi; i++) { 9217 if (pf->vsi[i]) { 9218 i40e_vsi_clear_rings(pf->vsi[i]); 9219 i40e_vsi_clear(pf->vsi[i]); 9220 pf->vsi[i] = NULL; 9221 } 9222 } 9223 9224 for (i = 0; i < I40E_MAX_VEB; i++) { 9225 kfree(pf->veb[i]); 9226 pf->veb[i] = NULL; 9227 } 9228 9229 kfree(pf->qp_pile); 9230 kfree(pf->irq_pile); 9231 kfree(pf->vsi); 9232 9233 iounmap(pf->hw.hw_addr); 9234 kfree(pf); 9235 pci_release_selected_regions(pdev, 9236 pci_select_bars(pdev, IORESOURCE_MEM)); 9237 9238 pci_disable_pcie_error_reporting(pdev); 9239 pci_disable_device(pdev); 9240 } 9241 9242 /** 9243 * i40e_pci_error_detected - warning that something funky happened in PCI land 9244 * @pdev: PCI device information struct 9245 * 9246 * Called to warn that something happened and the error handling steps 9247 * are in progress. Allows the driver to quiesce things, be ready for 9248 * remediation. 9249 **/ 9250 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 9251 enum pci_channel_state error) 9252 { 9253 struct i40e_pf *pf = pci_get_drvdata(pdev); 9254 9255 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 9256 9257 /* shutdown all operations */ 9258 if (!test_bit(__I40E_SUSPENDED, &pf->state)) { 9259 rtnl_lock(); 9260 i40e_prep_for_reset(pf); 9261 rtnl_unlock(); 9262 } 9263 9264 /* Request a slot reset */ 9265 return PCI_ERS_RESULT_NEED_RESET; 9266 } 9267 9268 /** 9269 * i40e_pci_error_slot_reset - a PCI slot reset just happened 9270 * @pdev: PCI device information struct 9271 * 9272 * Called to find if the driver can work with the device now that 9273 * the pci slot has been reset. If a basic connection seems good 9274 * (registers are readable and have sane content) then return a 9275 * happy little PCI_ERS_RESULT_xxx. 9276 **/ 9277 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 9278 { 9279 struct i40e_pf *pf = pci_get_drvdata(pdev); 9280 pci_ers_result_t result; 9281 int err; 9282 u32 reg; 9283 9284 dev_info(&pdev->dev, "%s\n", __func__); 9285 if (pci_enable_device_mem(pdev)) { 9286 dev_info(&pdev->dev, 9287 "Cannot re-enable PCI device after reset.\n"); 9288 result = PCI_ERS_RESULT_DISCONNECT; 9289 } else { 9290 pci_set_master(pdev); 9291 pci_restore_state(pdev); 9292 pci_save_state(pdev); 9293 pci_wake_from_d3(pdev, false); 9294 9295 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 9296 if (reg == 0) 9297 result = PCI_ERS_RESULT_RECOVERED; 9298 else 9299 result = PCI_ERS_RESULT_DISCONNECT; 9300 } 9301 9302 err = pci_cleanup_aer_uncorrect_error_status(pdev); 9303 if (err) { 9304 dev_info(&pdev->dev, 9305 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 9306 err); 9307 /* non-fatal, continue */ 9308 } 9309 9310 return result; 9311 } 9312 9313 /** 9314 * i40e_pci_error_resume - restart operations after PCI error recovery 9315 * @pdev: PCI device information struct 9316 * 9317 * Called to allow the driver to bring things back up after PCI error 9318 * and/or reset recovery has finished. 9319 **/ 9320 static void i40e_pci_error_resume(struct pci_dev *pdev) 9321 { 9322 struct i40e_pf *pf = pci_get_drvdata(pdev); 9323 9324 dev_info(&pdev->dev, "%s\n", __func__); 9325 if (test_bit(__I40E_SUSPENDED, &pf->state)) 9326 return; 9327 9328 rtnl_lock(); 9329 i40e_handle_reset_warning(pf); 9330 rtnl_lock(); 9331 } 9332 9333 /** 9334 * i40e_shutdown - PCI callback for shutting down 9335 * @pdev: PCI device information struct 9336 **/ 9337 static void i40e_shutdown(struct pci_dev *pdev) 9338 { 9339 struct i40e_pf *pf = pci_get_drvdata(pdev); 9340 struct i40e_hw *hw = &pf->hw; 9341 9342 set_bit(__I40E_SUSPENDED, &pf->state); 9343 set_bit(__I40E_DOWN, &pf->state); 9344 rtnl_lock(); 9345 i40e_prep_for_reset(pf); 9346 rtnl_unlock(); 9347 9348 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 9349 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 9350 9351 if (system_state == SYSTEM_POWER_OFF) { 9352 pci_wake_from_d3(pdev, pf->wol_en); 9353 pci_set_power_state(pdev, PCI_D3hot); 9354 } 9355 } 9356 9357 #ifdef CONFIG_PM 9358 /** 9359 * i40e_suspend - PCI callback for moving to D3 9360 * @pdev: PCI device information struct 9361 **/ 9362 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) 9363 { 9364 struct i40e_pf *pf = pci_get_drvdata(pdev); 9365 struct i40e_hw *hw = &pf->hw; 9366 9367 set_bit(__I40E_SUSPENDED, &pf->state); 9368 set_bit(__I40E_DOWN, &pf->state); 9369 rtnl_lock(); 9370 i40e_prep_for_reset(pf); 9371 rtnl_unlock(); 9372 9373 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 9374 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 9375 9376 pci_wake_from_d3(pdev, pf->wol_en); 9377 pci_set_power_state(pdev, PCI_D3hot); 9378 9379 return 0; 9380 } 9381 9382 /** 9383 * i40e_resume - PCI callback for waking up from D3 9384 * @pdev: PCI device information struct 9385 **/ 9386 static int i40e_resume(struct pci_dev *pdev) 9387 { 9388 struct i40e_pf *pf = pci_get_drvdata(pdev); 9389 u32 err; 9390 9391 pci_set_power_state(pdev, PCI_D0); 9392 pci_restore_state(pdev); 9393 /* pci_restore_state() clears dev->state_saves, so 9394 * call pci_save_state() again to restore it. 9395 */ 9396 pci_save_state(pdev); 9397 9398 err = pci_enable_device_mem(pdev); 9399 if (err) { 9400 dev_err(&pdev->dev, 9401 "%s: Cannot enable PCI device from suspend\n", 9402 __func__); 9403 return err; 9404 } 9405 pci_set_master(pdev); 9406 9407 /* no wakeup events while running */ 9408 pci_wake_from_d3(pdev, false); 9409 9410 /* handling the reset will rebuild the device state */ 9411 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { 9412 clear_bit(__I40E_DOWN, &pf->state); 9413 rtnl_lock(); 9414 i40e_reset_and_rebuild(pf, false); 9415 rtnl_unlock(); 9416 } 9417 9418 return 0; 9419 } 9420 9421 #endif 9422 static const struct pci_error_handlers i40e_err_handler = { 9423 .error_detected = i40e_pci_error_detected, 9424 .slot_reset = i40e_pci_error_slot_reset, 9425 .resume = i40e_pci_error_resume, 9426 }; 9427 9428 static struct pci_driver i40e_driver = { 9429 .name = i40e_driver_name, 9430 .id_table = i40e_pci_tbl, 9431 .probe = i40e_probe, 9432 .remove = i40e_remove, 9433 #ifdef CONFIG_PM 9434 .suspend = i40e_suspend, 9435 .resume = i40e_resume, 9436 #endif 9437 .shutdown = i40e_shutdown, 9438 .err_handler = &i40e_err_handler, 9439 .sriov_configure = i40e_pci_sriov_configure, 9440 }; 9441 9442 /** 9443 * i40e_init_module - Driver registration routine 9444 * 9445 * i40e_init_module is the first routine called when the driver is 9446 * loaded. All it does is register with the PCI subsystem. 9447 **/ 9448 static int __init i40e_init_module(void) 9449 { 9450 pr_info("%s: %s - version %s\n", i40e_driver_name, 9451 i40e_driver_string, i40e_driver_version_str); 9452 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 9453 i40e_dbg_init(); 9454 return pci_register_driver(&i40e_driver); 9455 } 9456 module_init(i40e_init_module); 9457 9458 /** 9459 * i40e_exit_module - Driver exit cleanup routine 9460 * 9461 * i40e_exit_module is called just before the driver is removed 9462 * from memory. 9463 **/ 9464 static void __exit i40e_exit_module(void) 9465 { 9466 pci_unregister_driver(&i40e_driver); 9467 i40e_dbg_exit(); 9468 } 9469 module_exit(i40e_exit_module); 9470