1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2015 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 /* Local includes */ 28 #include "i40e.h" 29 #include "i40e_diag.h" 30 #ifdef CONFIG_I40E_VXLAN 31 #include <net/vxlan.h> 32 #endif 33 34 const char i40e_driver_name[] = "i40e"; 35 static const char i40e_driver_string[] = 36 "Intel(R) Ethernet Connection XL710 Network Driver"; 37 38 #define DRV_KERN "-k" 39 40 #define DRV_VERSION_MAJOR 1 41 #define DRV_VERSION_MINOR 2 42 #define DRV_VERSION_BUILD 37 43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \ 45 __stringify(DRV_VERSION_BUILD) DRV_KERN 46 const char i40e_driver_version_str[] = DRV_VERSION; 47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; 48 49 /* a bit of forward declarations */ 50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 51 static void i40e_handle_reset_warning(struct i40e_pf *pf); 52 static int i40e_add_vsi(struct i40e_vsi *vsi); 53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 55 static int i40e_setup_misc_vector(struct i40e_pf *pf); 56 static void i40e_determine_queue_usage(struct i40e_pf *pf); 57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 58 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 59 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 60 61 /* i40e_pci_tbl - PCI Device ID Table 62 * 63 * Last entry must be all 0s 64 * 65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 66 * Class, Class Mask, private data (not used) } 67 */ 68 static const struct pci_device_id i40e_pci_tbl[] = { 69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, 72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, 78 /* required last entry */ 79 {0, } 80 }; 81 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 82 83 #define I40E_MAX_VF_COUNT 128 84 static int debug = -1; 85 module_param(debug, int, 0); 86 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 87 88 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 89 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 90 MODULE_LICENSE("GPL"); 91 MODULE_VERSION(DRV_VERSION); 92 93 /** 94 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 95 * @hw: pointer to the HW structure 96 * @mem: ptr to mem struct to fill out 97 * @size: size of memory requested 98 * @alignment: what to align the allocation to 99 **/ 100 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 101 u64 size, u32 alignment) 102 { 103 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 104 105 mem->size = ALIGN(size, alignment); 106 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 107 &mem->pa, GFP_KERNEL); 108 if (!mem->va) 109 return -ENOMEM; 110 111 return 0; 112 } 113 114 /** 115 * i40e_free_dma_mem_d - OS specific memory free for shared code 116 * @hw: pointer to the HW structure 117 * @mem: ptr to mem struct to free 118 **/ 119 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 120 { 121 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 122 123 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 124 mem->va = NULL; 125 mem->pa = 0; 126 mem->size = 0; 127 128 return 0; 129 } 130 131 /** 132 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 133 * @hw: pointer to the HW structure 134 * @mem: ptr to mem struct to fill out 135 * @size: size of memory requested 136 **/ 137 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 138 u32 size) 139 { 140 mem->size = size; 141 mem->va = kzalloc(size, GFP_KERNEL); 142 143 if (!mem->va) 144 return -ENOMEM; 145 146 return 0; 147 } 148 149 /** 150 * i40e_free_virt_mem_d - OS specific memory free for shared code 151 * @hw: pointer to the HW structure 152 * @mem: ptr to mem struct to free 153 **/ 154 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 155 { 156 /* it's ok to kfree a NULL pointer */ 157 kfree(mem->va); 158 mem->va = NULL; 159 mem->size = 0; 160 161 return 0; 162 } 163 164 /** 165 * i40e_get_lump - find a lump of free generic resource 166 * @pf: board private structure 167 * @pile: the pile of resource to search 168 * @needed: the number of items needed 169 * @id: an owner id to stick on the items assigned 170 * 171 * Returns the base item index of the lump, or negative for error 172 * 173 * The search_hint trick and lack of advanced fit-finding only work 174 * because we're highly likely to have all the same size lump requests. 175 * Linear search time and any fragmentation should be minimal. 176 **/ 177 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 178 u16 needed, u16 id) 179 { 180 int ret = -ENOMEM; 181 int i, j; 182 183 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 184 dev_info(&pf->pdev->dev, 185 "param err: pile=%p needed=%d id=0x%04x\n", 186 pile, needed, id); 187 return -EINVAL; 188 } 189 190 /* start the linear search with an imperfect hint */ 191 i = pile->search_hint; 192 while (i < pile->num_entries) { 193 /* skip already allocated entries */ 194 if (pile->list[i] & I40E_PILE_VALID_BIT) { 195 i++; 196 continue; 197 } 198 199 /* do we have enough in this lump? */ 200 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 201 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 202 break; 203 } 204 205 if (j == needed) { 206 /* there was enough, so assign it to the requestor */ 207 for (j = 0; j < needed; j++) 208 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 209 ret = i; 210 pile->search_hint = i + j; 211 break; 212 } else { 213 /* not enough, so skip over it and continue looking */ 214 i += j; 215 } 216 } 217 218 return ret; 219 } 220 221 /** 222 * i40e_put_lump - return a lump of generic resource 223 * @pile: the pile of resource to search 224 * @index: the base item index 225 * @id: the owner id of the items assigned 226 * 227 * Returns the count of items in the lump 228 **/ 229 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 230 { 231 int valid_id = (id | I40E_PILE_VALID_BIT); 232 int count = 0; 233 int i; 234 235 if (!pile || index >= pile->num_entries) 236 return -EINVAL; 237 238 for (i = index; 239 i < pile->num_entries && pile->list[i] == valid_id; 240 i++) { 241 pile->list[i] = 0; 242 count++; 243 } 244 245 if (count && index < pile->search_hint) 246 pile->search_hint = index; 247 248 return count; 249 } 250 251 /** 252 * i40e_service_event_schedule - Schedule the service task to wake up 253 * @pf: board private structure 254 * 255 * If not already scheduled, this puts the task into the work queue 256 **/ 257 static void i40e_service_event_schedule(struct i40e_pf *pf) 258 { 259 if (!test_bit(__I40E_DOWN, &pf->state) && 260 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && 261 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) 262 schedule_work(&pf->service_task); 263 } 264 265 /** 266 * i40e_tx_timeout - Respond to a Tx Hang 267 * @netdev: network interface device structure 268 * 269 * If any port has noticed a Tx timeout, it is likely that the whole 270 * device is munged, not just the one netdev port, so go for the full 271 * reset. 272 **/ 273 #ifdef I40E_FCOE 274 void i40e_tx_timeout(struct net_device *netdev) 275 #else 276 static void i40e_tx_timeout(struct net_device *netdev) 277 #endif 278 { 279 struct i40e_netdev_priv *np = netdev_priv(netdev); 280 struct i40e_vsi *vsi = np->vsi; 281 struct i40e_pf *pf = vsi->back; 282 283 pf->tx_timeout_count++; 284 285 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 286 pf->tx_timeout_recovery_level = 1; 287 pf->tx_timeout_last_recovery = jiffies; 288 netdev_info(netdev, "tx_timeout recovery level %d\n", 289 pf->tx_timeout_recovery_level); 290 291 switch (pf->tx_timeout_recovery_level) { 292 case 0: 293 /* disable and re-enable queues for the VSI */ 294 if (in_interrupt()) { 295 set_bit(__I40E_REINIT_REQUESTED, &pf->state); 296 set_bit(__I40E_REINIT_REQUESTED, &vsi->state); 297 } else { 298 i40e_vsi_reinit_locked(vsi); 299 } 300 break; 301 case 1: 302 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 303 break; 304 case 2: 305 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 306 break; 307 case 3: 308 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 309 break; 310 default: 311 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 312 set_bit(__I40E_DOWN_REQUESTED, &pf->state); 313 set_bit(__I40E_DOWN_REQUESTED, &vsi->state); 314 break; 315 } 316 i40e_service_event_schedule(pf); 317 pf->tx_timeout_recovery_level++; 318 } 319 320 /** 321 * i40e_release_rx_desc - Store the new tail and head values 322 * @rx_ring: ring to bump 323 * @val: new head index 324 **/ 325 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 326 { 327 rx_ring->next_to_use = val; 328 329 /* Force memory writes to complete before letting h/w 330 * know there are new descriptors to fetch. (Only 331 * applicable for weak-ordered memory model archs, 332 * such as IA-64). 333 */ 334 wmb(); 335 writel(val, rx_ring->tail); 336 } 337 338 /** 339 * i40e_get_vsi_stats_struct - Get System Network Statistics 340 * @vsi: the VSI we care about 341 * 342 * Returns the address of the device statistics structure. 343 * The statistics are actually updated from the service task. 344 **/ 345 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 346 { 347 return &vsi->net_stats; 348 } 349 350 /** 351 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 352 * @netdev: network interface device structure 353 * 354 * Returns the address of the device statistics structure. 355 * The statistics are actually updated from the service task. 356 **/ 357 #ifdef I40E_FCOE 358 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 359 struct net_device *netdev, 360 struct rtnl_link_stats64 *stats) 361 #else 362 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 363 struct net_device *netdev, 364 struct rtnl_link_stats64 *stats) 365 #endif 366 { 367 struct i40e_netdev_priv *np = netdev_priv(netdev); 368 struct i40e_ring *tx_ring, *rx_ring; 369 struct i40e_vsi *vsi = np->vsi; 370 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 371 int i; 372 373 if (test_bit(__I40E_DOWN, &vsi->state)) 374 return stats; 375 376 if (!vsi->tx_rings) 377 return stats; 378 379 rcu_read_lock(); 380 for (i = 0; i < vsi->num_queue_pairs; i++) { 381 u64 bytes, packets; 382 unsigned int start; 383 384 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); 385 if (!tx_ring) 386 continue; 387 388 do { 389 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 390 packets = tx_ring->stats.packets; 391 bytes = tx_ring->stats.bytes; 392 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 393 394 stats->tx_packets += packets; 395 stats->tx_bytes += bytes; 396 rx_ring = &tx_ring[1]; 397 398 do { 399 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 400 packets = rx_ring->stats.packets; 401 bytes = rx_ring->stats.bytes; 402 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 403 404 stats->rx_packets += packets; 405 stats->rx_bytes += bytes; 406 } 407 rcu_read_unlock(); 408 409 /* following stats updated by i40e_watchdog_subtask() */ 410 stats->multicast = vsi_stats->multicast; 411 stats->tx_errors = vsi_stats->tx_errors; 412 stats->tx_dropped = vsi_stats->tx_dropped; 413 stats->rx_errors = vsi_stats->rx_errors; 414 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 415 stats->rx_length_errors = vsi_stats->rx_length_errors; 416 417 return stats; 418 } 419 420 /** 421 * i40e_vsi_reset_stats - Resets all stats of the given vsi 422 * @vsi: the VSI to have its stats reset 423 **/ 424 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 425 { 426 struct rtnl_link_stats64 *ns; 427 int i; 428 429 if (!vsi) 430 return; 431 432 ns = i40e_get_vsi_stats_struct(vsi); 433 memset(ns, 0, sizeof(*ns)); 434 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 435 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 436 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 437 if (vsi->rx_rings && vsi->rx_rings[0]) { 438 for (i = 0; i < vsi->num_queue_pairs; i++) { 439 memset(&vsi->rx_rings[i]->stats, 0 , 440 sizeof(vsi->rx_rings[i]->stats)); 441 memset(&vsi->rx_rings[i]->rx_stats, 0 , 442 sizeof(vsi->rx_rings[i]->rx_stats)); 443 memset(&vsi->tx_rings[i]->stats, 0 , 444 sizeof(vsi->tx_rings[i]->stats)); 445 memset(&vsi->tx_rings[i]->tx_stats, 0, 446 sizeof(vsi->tx_rings[i]->tx_stats)); 447 } 448 } 449 vsi->stat_offsets_loaded = false; 450 } 451 452 /** 453 * i40e_pf_reset_stats - Reset all of the stats for the given PF 454 * @pf: the PF to be reset 455 **/ 456 void i40e_pf_reset_stats(struct i40e_pf *pf) 457 { 458 int i; 459 460 memset(&pf->stats, 0, sizeof(pf->stats)); 461 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 462 pf->stat_offsets_loaded = false; 463 464 for (i = 0; i < I40E_MAX_VEB; i++) { 465 if (pf->veb[i]) { 466 memset(&pf->veb[i]->stats, 0, 467 sizeof(pf->veb[i]->stats)); 468 memset(&pf->veb[i]->stats_offsets, 0, 469 sizeof(pf->veb[i]->stats_offsets)); 470 pf->veb[i]->stat_offsets_loaded = false; 471 } 472 } 473 } 474 475 /** 476 * i40e_stat_update48 - read and update a 48 bit stat from the chip 477 * @hw: ptr to the hardware info 478 * @hireg: the high 32 bit reg to read 479 * @loreg: the low 32 bit reg to read 480 * @offset_loaded: has the initial offset been loaded yet 481 * @offset: ptr to current offset value 482 * @stat: ptr to the stat 483 * 484 * Since the device stats are not reset at PFReset, they likely will not 485 * be zeroed when the driver starts. We'll save the first values read 486 * and use them as offsets to be subtracted from the raw values in order 487 * to report stats that count from zero. In the process, we also manage 488 * the potential roll-over. 489 **/ 490 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 491 bool offset_loaded, u64 *offset, u64 *stat) 492 { 493 u64 new_data; 494 495 if (hw->device_id == I40E_DEV_ID_QEMU) { 496 new_data = rd32(hw, loreg); 497 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 498 } else { 499 new_data = rd64(hw, loreg); 500 } 501 if (!offset_loaded) 502 *offset = new_data; 503 if (likely(new_data >= *offset)) 504 *stat = new_data - *offset; 505 else 506 *stat = (new_data + ((u64)1 << 48)) - *offset; 507 *stat &= 0xFFFFFFFFFFFFULL; 508 } 509 510 /** 511 * i40e_stat_update32 - read and update a 32 bit stat from the chip 512 * @hw: ptr to the hardware info 513 * @reg: the hw reg to read 514 * @offset_loaded: has the initial offset been loaded yet 515 * @offset: ptr to current offset value 516 * @stat: ptr to the stat 517 **/ 518 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 519 bool offset_loaded, u64 *offset, u64 *stat) 520 { 521 u32 new_data; 522 523 new_data = rd32(hw, reg); 524 if (!offset_loaded) 525 *offset = new_data; 526 if (likely(new_data >= *offset)) 527 *stat = (u32)(new_data - *offset); 528 else 529 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 530 } 531 532 /** 533 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 534 * @vsi: the VSI to be updated 535 **/ 536 void i40e_update_eth_stats(struct i40e_vsi *vsi) 537 { 538 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 539 struct i40e_pf *pf = vsi->back; 540 struct i40e_hw *hw = &pf->hw; 541 struct i40e_eth_stats *oes; 542 struct i40e_eth_stats *es; /* device's eth stats */ 543 544 es = &vsi->eth_stats; 545 oes = &vsi->eth_stats_offsets; 546 547 /* Gather up the stats that the hw collects */ 548 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 549 vsi->stat_offsets_loaded, 550 &oes->tx_errors, &es->tx_errors); 551 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 552 vsi->stat_offsets_loaded, 553 &oes->rx_discards, &es->rx_discards); 554 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), 555 vsi->stat_offsets_loaded, 556 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); 557 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 558 vsi->stat_offsets_loaded, 559 &oes->tx_errors, &es->tx_errors); 560 561 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 562 I40E_GLV_GORCL(stat_idx), 563 vsi->stat_offsets_loaded, 564 &oes->rx_bytes, &es->rx_bytes); 565 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 566 I40E_GLV_UPRCL(stat_idx), 567 vsi->stat_offsets_loaded, 568 &oes->rx_unicast, &es->rx_unicast); 569 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 570 I40E_GLV_MPRCL(stat_idx), 571 vsi->stat_offsets_loaded, 572 &oes->rx_multicast, &es->rx_multicast); 573 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 574 I40E_GLV_BPRCL(stat_idx), 575 vsi->stat_offsets_loaded, 576 &oes->rx_broadcast, &es->rx_broadcast); 577 578 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 579 I40E_GLV_GOTCL(stat_idx), 580 vsi->stat_offsets_loaded, 581 &oes->tx_bytes, &es->tx_bytes); 582 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 583 I40E_GLV_UPTCL(stat_idx), 584 vsi->stat_offsets_loaded, 585 &oes->tx_unicast, &es->tx_unicast); 586 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 587 I40E_GLV_MPTCL(stat_idx), 588 vsi->stat_offsets_loaded, 589 &oes->tx_multicast, &es->tx_multicast); 590 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 591 I40E_GLV_BPTCL(stat_idx), 592 vsi->stat_offsets_loaded, 593 &oes->tx_broadcast, &es->tx_broadcast); 594 vsi->stat_offsets_loaded = true; 595 } 596 597 /** 598 * i40e_update_veb_stats - Update Switch component statistics 599 * @veb: the VEB being updated 600 **/ 601 static void i40e_update_veb_stats(struct i40e_veb *veb) 602 { 603 struct i40e_pf *pf = veb->pf; 604 struct i40e_hw *hw = &pf->hw; 605 struct i40e_eth_stats *oes; 606 struct i40e_eth_stats *es; /* device's eth stats */ 607 int idx = 0; 608 609 idx = veb->stats_idx; 610 es = &veb->stats; 611 oes = &veb->stats_offsets; 612 613 /* Gather up the stats that the hw collects */ 614 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 615 veb->stat_offsets_loaded, 616 &oes->tx_discards, &es->tx_discards); 617 if (hw->revision_id > 0) 618 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 619 veb->stat_offsets_loaded, 620 &oes->rx_unknown_protocol, 621 &es->rx_unknown_protocol); 622 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 623 veb->stat_offsets_loaded, 624 &oes->rx_bytes, &es->rx_bytes); 625 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 626 veb->stat_offsets_loaded, 627 &oes->rx_unicast, &es->rx_unicast); 628 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 629 veb->stat_offsets_loaded, 630 &oes->rx_multicast, &es->rx_multicast); 631 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 632 veb->stat_offsets_loaded, 633 &oes->rx_broadcast, &es->rx_broadcast); 634 635 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 636 veb->stat_offsets_loaded, 637 &oes->tx_bytes, &es->tx_bytes); 638 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 639 veb->stat_offsets_loaded, 640 &oes->tx_unicast, &es->tx_unicast); 641 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 642 veb->stat_offsets_loaded, 643 &oes->tx_multicast, &es->tx_multicast); 644 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 645 veb->stat_offsets_loaded, 646 &oes->tx_broadcast, &es->tx_broadcast); 647 veb->stat_offsets_loaded = true; 648 } 649 650 #ifdef I40E_FCOE 651 /** 652 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. 653 * @vsi: the VSI that is capable of doing FCoE 654 **/ 655 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) 656 { 657 struct i40e_pf *pf = vsi->back; 658 struct i40e_hw *hw = &pf->hw; 659 struct i40e_fcoe_stats *ofs; 660 struct i40e_fcoe_stats *fs; /* device's eth stats */ 661 int idx; 662 663 if (vsi->type != I40E_VSI_FCOE) 664 return; 665 666 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; 667 fs = &vsi->fcoe_stats; 668 ofs = &vsi->fcoe_stats_offsets; 669 670 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), 671 vsi->fcoe_stat_offsets_loaded, 672 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); 673 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), 674 vsi->fcoe_stat_offsets_loaded, 675 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); 676 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), 677 vsi->fcoe_stat_offsets_loaded, 678 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); 679 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), 680 vsi->fcoe_stat_offsets_loaded, 681 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); 682 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), 683 vsi->fcoe_stat_offsets_loaded, 684 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); 685 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), 686 vsi->fcoe_stat_offsets_loaded, 687 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); 688 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), 689 vsi->fcoe_stat_offsets_loaded, 690 &ofs->fcoe_last_error, &fs->fcoe_last_error); 691 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), 692 vsi->fcoe_stat_offsets_loaded, 693 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); 694 695 vsi->fcoe_stat_offsets_loaded = true; 696 } 697 698 #endif 699 /** 700 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode 701 * @pf: the corresponding PF 702 * 703 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode 704 **/ 705 static void i40e_update_link_xoff_rx(struct i40e_pf *pf) 706 { 707 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 708 struct i40e_hw_port_stats *nsd = &pf->stats; 709 struct i40e_hw *hw = &pf->hw; 710 u64 xoff = 0; 711 u16 i, v; 712 713 if ((hw->fc.current_mode != I40E_FC_FULL) && 714 (hw->fc.current_mode != I40E_FC_RX_PAUSE)) 715 return; 716 717 xoff = nsd->link_xoff_rx; 718 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 719 pf->stat_offsets_loaded, 720 &osd->link_xoff_rx, &nsd->link_xoff_rx); 721 722 /* No new LFC xoff rx */ 723 if (!(nsd->link_xoff_rx - xoff)) 724 return; 725 726 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ 727 for (v = 0; v < pf->num_alloc_vsi; v++) { 728 struct i40e_vsi *vsi = pf->vsi[v]; 729 730 if (!vsi || !vsi->tx_rings[0]) 731 continue; 732 733 for (i = 0; i < vsi->num_queue_pairs; i++) { 734 struct i40e_ring *ring = vsi->tx_rings[i]; 735 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 736 } 737 } 738 } 739 740 /** 741 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode 742 * @pf: the corresponding PF 743 * 744 * Update the Rx XOFF counter (PAUSE frames) in PFC mode 745 **/ 746 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) 747 { 748 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 749 struct i40e_hw_port_stats *nsd = &pf->stats; 750 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; 751 struct i40e_dcbx_config *dcb_cfg; 752 struct i40e_hw *hw = &pf->hw; 753 u16 i, v; 754 u8 tc; 755 756 dcb_cfg = &hw->local_dcbx_config; 757 758 /* See if DCB enabled with PFC TC */ 759 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || 760 !(dcb_cfg->pfc.pfcenable)) { 761 i40e_update_link_xoff_rx(pf); 762 return; 763 } 764 765 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 766 u64 prio_xoff = nsd->priority_xoff_rx[i]; 767 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 768 pf->stat_offsets_loaded, 769 &osd->priority_xoff_rx[i], 770 &nsd->priority_xoff_rx[i]); 771 772 /* No new PFC xoff rx */ 773 if (!(nsd->priority_xoff_rx[i] - prio_xoff)) 774 continue; 775 /* Get the TC for given priority */ 776 tc = dcb_cfg->etscfg.prioritytable[i]; 777 xoff[tc] = true; 778 } 779 780 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ 781 for (v = 0; v < pf->num_alloc_vsi; v++) { 782 struct i40e_vsi *vsi = pf->vsi[v]; 783 784 if (!vsi || !vsi->tx_rings[0]) 785 continue; 786 787 for (i = 0; i < vsi->num_queue_pairs; i++) { 788 struct i40e_ring *ring = vsi->tx_rings[i]; 789 790 tc = ring->dcb_tc; 791 if (xoff[tc]) 792 clear_bit(__I40E_HANG_CHECK_ARMED, 793 &ring->state); 794 } 795 } 796 } 797 798 /** 799 * i40e_update_vsi_stats - Update the vsi statistics counters. 800 * @vsi: the VSI to be updated 801 * 802 * There are a few instances where we store the same stat in a 803 * couple of different structs. This is partly because we have 804 * the netdev stats that need to be filled out, which is slightly 805 * different from the "eth_stats" defined by the chip and used in 806 * VF communications. We sort it out here. 807 **/ 808 static void i40e_update_vsi_stats(struct i40e_vsi *vsi) 809 { 810 struct i40e_pf *pf = vsi->back; 811 struct rtnl_link_stats64 *ons; 812 struct rtnl_link_stats64 *ns; /* netdev stats */ 813 struct i40e_eth_stats *oes; 814 struct i40e_eth_stats *es; /* device's eth stats */ 815 u32 tx_restart, tx_busy; 816 struct i40e_ring *p; 817 u32 rx_page, rx_buf; 818 u64 bytes, packets; 819 unsigned int start; 820 u64 rx_p, rx_b; 821 u64 tx_p, tx_b; 822 u16 q; 823 824 if (test_bit(__I40E_DOWN, &vsi->state) || 825 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 826 return; 827 828 ns = i40e_get_vsi_stats_struct(vsi); 829 ons = &vsi->net_stats_offsets; 830 es = &vsi->eth_stats; 831 oes = &vsi->eth_stats_offsets; 832 833 /* Gather up the netdev and vsi stats that the driver collects 834 * on the fly during packet processing 835 */ 836 rx_b = rx_p = 0; 837 tx_b = tx_p = 0; 838 tx_restart = tx_busy = 0; 839 rx_page = 0; 840 rx_buf = 0; 841 rcu_read_lock(); 842 for (q = 0; q < vsi->num_queue_pairs; q++) { 843 /* locate Tx ring */ 844 p = ACCESS_ONCE(vsi->tx_rings[q]); 845 846 do { 847 start = u64_stats_fetch_begin_irq(&p->syncp); 848 packets = p->stats.packets; 849 bytes = p->stats.bytes; 850 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 851 tx_b += bytes; 852 tx_p += packets; 853 tx_restart += p->tx_stats.restart_queue; 854 tx_busy += p->tx_stats.tx_busy; 855 856 /* Rx queue is part of the same block as Tx queue */ 857 p = &p[1]; 858 do { 859 start = u64_stats_fetch_begin_irq(&p->syncp); 860 packets = p->stats.packets; 861 bytes = p->stats.bytes; 862 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 863 rx_b += bytes; 864 rx_p += packets; 865 rx_buf += p->rx_stats.alloc_buff_failed; 866 rx_page += p->rx_stats.alloc_page_failed; 867 } 868 rcu_read_unlock(); 869 vsi->tx_restart = tx_restart; 870 vsi->tx_busy = tx_busy; 871 vsi->rx_page_failed = rx_page; 872 vsi->rx_buf_failed = rx_buf; 873 874 ns->rx_packets = rx_p; 875 ns->rx_bytes = rx_b; 876 ns->tx_packets = tx_p; 877 ns->tx_bytes = tx_b; 878 879 /* update netdev stats from eth stats */ 880 i40e_update_eth_stats(vsi); 881 ons->tx_errors = oes->tx_errors; 882 ns->tx_errors = es->tx_errors; 883 ons->multicast = oes->rx_multicast; 884 ns->multicast = es->rx_multicast; 885 ons->rx_dropped = oes->rx_discards; 886 ns->rx_dropped = es->rx_discards; 887 ons->tx_dropped = oes->tx_discards; 888 ns->tx_dropped = es->tx_discards; 889 890 /* pull in a couple PF stats if this is the main vsi */ 891 if (vsi == pf->vsi[pf->lan_vsi]) { 892 ns->rx_crc_errors = pf->stats.crc_errors; 893 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; 894 ns->rx_length_errors = pf->stats.rx_length_errors; 895 } 896 } 897 898 /** 899 * i40e_update_pf_stats - Update the PF statistics counters. 900 * @pf: the PF to be updated 901 **/ 902 static void i40e_update_pf_stats(struct i40e_pf *pf) 903 { 904 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 905 struct i40e_hw_port_stats *nsd = &pf->stats; 906 struct i40e_hw *hw = &pf->hw; 907 u32 val; 908 int i; 909 910 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 911 I40E_GLPRT_GORCL(hw->port), 912 pf->stat_offsets_loaded, 913 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 914 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 915 I40E_GLPRT_GOTCL(hw->port), 916 pf->stat_offsets_loaded, 917 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 918 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 919 pf->stat_offsets_loaded, 920 &osd->eth.rx_discards, 921 &nsd->eth.rx_discards); 922 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 923 I40E_GLPRT_UPRCL(hw->port), 924 pf->stat_offsets_loaded, 925 &osd->eth.rx_unicast, 926 &nsd->eth.rx_unicast); 927 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 928 I40E_GLPRT_MPRCL(hw->port), 929 pf->stat_offsets_loaded, 930 &osd->eth.rx_multicast, 931 &nsd->eth.rx_multicast); 932 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 933 I40E_GLPRT_BPRCL(hw->port), 934 pf->stat_offsets_loaded, 935 &osd->eth.rx_broadcast, 936 &nsd->eth.rx_broadcast); 937 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 938 I40E_GLPRT_UPTCL(hw->port), 939 pf->stat_offsets_loaded, 940 &osd->eth.tx_unicast, 941 &nsd->eth.tx_unicast); 942 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 943 I40E_GLPRT_MPTCL(hw->port), 944 pf->stat_offsets_loaded, 945 &osd->eth.tx_multicast, 946 &nsd->eth.tx_multicast); 947 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 948 I40E_GLPRT_BPTCL(hw->port), 949 pf->stat_offsets_loaded, 950 &osd->eth.tx_broadcast, 951 &nsd->eth.tx_broadcast); 952 953 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 954 pf->stat_offsets_loaded, 955 &osd->tx_dropped_link_down, 956 &nsd->tx_dropped_link_down); 957 958 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 959 pf->stat_offsets_loaded, 960 &osd->crc_errors, &nsd->crc_errors); 961 962 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 963 pf->stat_offsets_loaded, 964 &osd->illegal_bytes, &nsd->illegal_bytes); 965 966 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 967 pf->stat_offsets_loaded, 968 &osd->mac_local_faults, 969 &nsd->mac_local_faults); 970 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 971 pf->stat_offsets_loaded, 972 &osd->mac_remote_faults, 973 &nsd->mac_remote_faults); 974 975 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 976 pf->stat_offsets_loaded, 977 &osd->rx_length_errors, 978 &nsd->rx_length_errors); 979 980 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 981 pf->stat_offsets_loaded, 982 &osd->link_xon_rx, &nsd->link_xon_rx); 983 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 984 pf->stat_offsets_loaded, 985 &osd->link_xon_tx, &nsd->link_xon_tx); 986 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ 987 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 988 pf->stat_offsets_loaded, 989 &osd->link_xoff_tx, &nsd->link_xoff_tx); 990 991 for (i = 0; i < 8; i++) { 992 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 993 pf->stat_offsets_loaded, 994 &osd->priority_xon_rx[i], 995 &nsd->priority_xon_rx[i]); 996 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 997 pf->stat_offsets_loaded, 998 &osd->priority_xon_tx[i], 999 &nsd->priority_xon_tx[i]); 1000 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 1001 pf->stat_offsets_loaded, 1002 &osd->priority_xoff_tx[i], 1003 &nsd->priority_xoff_tx[i]); 1004 i40e_stat_update32(hw, 1005 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 1006 pf->stat_offsets_loaded, 1007 &osd->priority_xon_2_xoff[i], 1008 &nsd->priority_xon_2_xoff[i]); 1009 } 1010 1011 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 1012 I40E_GLPRT_PRC64L(hw->port), 1013 pf->stat_offsets_loaded, 1014 &osd->rx_size_64, &nsd->rx_size_64); 1015 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 1016 I40E_GLPRT_PRC127L(hw->port), 1017 pf->stat_offsets_loaded, 1018 &osd->rx_size_127, &nsd->rx_size_127); 1019 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 1020 I40E_GLPRT_PRC255L(hw->port), 1021 pf->stat_offsets_loaded, 1022 &osd->rx_size_255, &nsd->rx_size_255); 1023 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 1024 I40E_GLPRT_PRC511L(hw->port), 1025 pf->stat_offsets_loaded, 1026 &osd->rx_size_511, &nsd->rx_size_511); 1027 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 1028 I40E_GLPRT_PRC1023L(hw->port), 1029 pf->stat_offsets_loaded, 1030 &osd->rx_size_1023, &nsd->rx_size_1023); 1031 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 1032 I40E_GLPRT_PRC1522L(hw->port), 1033 pf->stat_offsets_loaded, 1034 &osd->rx_size_1522, &nsd->rx_size_1522); 1035 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 1036 I40E_GLPRT_PRC9522L(hw->port), 1037 pf->stat_offsets_loaded, 1038 &osd->rx_size_big, &nsd->rx_size_big); 1039 1040 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 1041 I40E_GLPRT_PTC64L(hw->port), 1042 pf->stat_offsets_loaded, 1043 &osd->tx_size_64, &nsd->tx_size_64); 1044 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 1045 I40E_GLPRT_PTC127L(hw->port), 1046 pf->stat_offsets_loaded, 1047 &osd->tx_size_127, &nsd->tx_size_127); 1048 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 1049 I40E_GLPRT_PTC255L(hw->port), 1050 pf->stat_offsets_loaded, 1051 &osd->tx_size_255, &nsd->tx_size_255); 1052 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 1053 I40E_GLPRT_PTC511L(hw->port), 1054 pf->stat_offsets_loaded, 1055 &osd->tx_size_511, &nsd->tx_size_511); 1056 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 1057 I40E_GLPRT_PTC1023L(hw->port), 1058 pf->stat_offsets_loaded, 1059 &osd->tx_size_1023, &nsd->tx_size_1023); 1060 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 1061 I40E_GLPRT_PTC1522L(hw->port), 1062 pf->stat_offsets_loaded, 1063 &osd->tx_size_1522, &nsd->tx_size_1522); 1064 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 1065 I40E_GLPRT_PTC9522L(hw->port), 1066 pf->stat_offsets_loaded, 1067 &osd->tx_size_big, &nsd->tx_size_big); 1068 1069 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 1070 pf->stat_offsets_loaded, 1071 &osd->rx_undersize, &nsd->rx_undersize); 1072 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 1073 pf->stat_offsets_loaded, 1074 &osd->rx_fragments, &nsd->rx_fragments); 1075 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 1076 pf->stat_offsets_loaded, 1077 &osd->rx_oversize, &nsd->rx_oversize); 1078 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 1079 pf->stat_offsets_loaded, 1080 &osd->rx_jabber, &nsd->rx_jabber); 1081 1082 /* FDIR stats */ 1083 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), 1084 pf->stat_offsets_loaded, 1085 &osd->fd_atr_match, &nsd->fd_atr_match); 1086 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), 1087 pf->stat_offsets_loaded, 1088 &osd->fd_sb_match, &nsd->fd_sb_match); 1089 1090 val = rd32(hw, I40E_PRTPM_EEE_STAT); 1091 nsd->tx_lpi_status = 1092 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> 1093 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; 1094 nsd->rx_lpi_status = 1095 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> 1096 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; 1097 i40e_stat_update32(hw, I40E_PRTPM_TLPIC, 1098 pf->stat_offsets_loaded, 1099 &osd->tx_lpi_count, &nsd->tx_lpi_count); 1100 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 1101 pf->stat_offsets_loaded, 1102 &osd->rx_lpi_count, &nsd->rx_lpi_count); 1103 1104 pf->stat_offsets_loaded = true; 1105 } 1106 1107 /** 1108 * i40e_update_stats - Update the various statistics counters. 1109 * @vsi: the VSI to be updated 1110 * 1111 * Update the various stats for this VSI and its related entities. 1112 **/ 1113 void i40e_update_stats(struct i40e_vsi *vsi) 1114 { 1115 struct i40e_pf *pf = vsi->back; 1116 1117 if (vsi == pf->vsi[pf->lan_vsi]) 1118 i40e_update_pf_stats(pf); 1119 1120 i40e_update_vsi_stats(vsi); 1121 #ifdef I40E_FCOE 1122 i40e_update_fcoe_stats(vsi); 1123 #endif 1124 } 1125 1126 /** 1127 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1128 * @vsi: the VSI to be searched 1129 * @macaddr: the MAC address 1130 * @vlan: the vlan 1131 * @is_vf: make sure its a VF filter, else doesn't matter 1132 * @is_netdev: make sure its a netdev filter, else doesn't matter 1133 * 1134 * Returns ptr to the filter object or NULL 1135 **/ 1136 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 1137 u8 *macaddr, s16 vlan, 1138 bool is_vf, bool is_netdev) 1139 { 1140 struct i40e_mac_filter *f; 1141 1142 if (!vsi || !macaddr) 1143 return NULL; 1144 1145 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1146 if ((ether_addr_equal(macaddr, f->macaddr)) && 1147 (vlan == f->vlan) && 1148 (!is_vf || f->is_vf) && 1149 (!is_netdev || f->is_netdev)) 1150 return f; 1151 } 1152 return NULL; 1153 } 1154 1155 /** 1156 * i40e_find_mac - Find a mac addr in the macvlan filters list 1157 * @vsi: the VSI to be searched 1158 * @macaddr: the MAC address we are searching for 1159 * @is_vf: make sure its a VF filter, else doesn't matter 1160 * @is_netdev: make sure its a netdev filter, else doesn't matter 1161 * 1162 * Returns the first filter with the provided MAC address or NULL if 1163 * MAC address was not found 1164 **/ 1165 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 1166 bool is_vf, bool is_netdev) 1167 { 1168 struct i40e_mac_filter *f; 1169 1170 if (!vsi || !macaddr) 1171 return NULL; 1172 1173 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1174 if ((ether_addr_equal(macaddr, f->macaddr)) && 1175 (!is_vf || f->is_vf) && 1176 (!is_netdev || f->is_netdev)) 1177 return f; 1178 } 1179 return NULL; 1180 } 1181 1182 /** 1183 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1184 * @vsi: the VSI to be searched 1185 * 1186 * Returns true if VSI is in vlan mode or false otherwise 1187 **/ 1188 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1189 { 1190 struct i40e_mac_filter *f; 1191 1192 /* Only -1 for all the filters denotes not in vlan mode 1193 * so we have to go through all the list in order to make sure 1194 */ 1195 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1196 if (f->vlan >= 0) 1197 return true; 1198 } 1199 1200 return false; 1201 } 1202 1203 /** 1204 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans 1205 * @vsi: the VSI to be searched 1206 * @macaddr: the mac address to be filtered 1207 * @is_vf: true if it is a VF 1208 * @is_netdev: true if it is a netdev 1209 * 1210 * Goes through all the macvlan filters and adds a 1211 * macvlan filter for each unique vlan that already exists 1212 * 1213 * Returns first filter found on success, else NULL 1214 **/ 1215 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1216 bool is_vf, bool is_netdev) 1217 { 1218 struct i40e_mac_filter *f; 1219 1220 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1221 if (!i40e_find_filter(vsi, macaddr, f->vlan, 1222 is_vf, is_netdev)) { 1223 if (!i40e_add_filter(vsi, macaddr, f->vlan, 1224 is_vf, is_netdev)) 1225 return NULL; 1226 } 1227 } 1228 1229 return list_first_entry_or_null(&vsi->mac_filter_list, 1230 struct i40e_mac_filter, list); 1231 } 1232 1233 /** 1234 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM 1235 * @vsi: the PF Main VSI - inappropriate for any other VSI 1236 * @macaddr: the MAC address 1237 * 1238 * Some older firmware configurations set up a default promiscuous VLAN 1239 * filter that needs to be removed. 1240 **/ 1241 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) 1242 { 1243 struct i40e_aqc_remove_macvlan_element_data element; 1244 struct i40e_pf *pf = vsi->back; 1245 i40e_status aq_ret; 1246 1247 /* Only appropriate for the PF main VSI */ 1248 if (vsi->type != I40E_VSI_MAIN) 1249 return -EINVAL; 1250 1251 memset(&element, 0, sizeof(element)); 1252 ether_addr_copy(element.mac_addr, macaddr); 1253 element.vlan_tag = 0; 1254 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1255 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1256 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1257 if (aq_ret) 1258 return -ENOENT; 1259 1260 return 0; 1261 } 1262 1263 /** 1264 * i40e_add_filter - Add a mac/vlan filter to the VSI 1265 * @vsi: the VSI to be searched 1266 * @macaddr: the MAC address 1267 * @vlan: the vlan 1268 * @is_vf: make sure its a VF filter, else doesn't matter 1269 * @is_netdev: make sure its a netdev filter, else doesn't matter 1270 * 1271 * Returns ptr to the filter object or NULL when no memory available. 1272 **/ 1273 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1274 u8 *macaddr, s16 vlan, 1275 bool is_vf, bool is_netdev) 1276 { 1277 struct i40e_mac_filter *f; 1278 1279 if (!vsi || !macaddr) 1280 return NULL; 1281 1282 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1283 if (!f) { 1284 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1285 if (!f) 1286 goto add_filter_out; 1287 1288 ether_addr_copy(f->macaddr, macaddr); 1289 f->vlan = vlan; 1290 f->changed = true; 1291 1292 INIT_LIST_HEAD(&f->list); 1293 list_add(&f->list, &vsi->mac_filter_list); 1294 } 1295 1296 /* increment counter and add a new flag if needed */ 1297 if (is_vf) { 1298 if (!f->is_vf) { 1299 f->is_vf = true; 1300 f->counter++; 1301 } 1302 } else if (is_netdev) { 1303 if (!f->is_netdev) { 1304 f->is_netdev = true; 1305 f->counter++; 1306 } 1307 } else { 1308 f->counter++; 1309 } 1310 1311 /* changed tells sync_filters_subtask to 1312 * push the filter down to the firmware 1313 */ 1314 if (f->changed) { 1315 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1316 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1317 } 1318 1319 add_filter_out: 1320 return f; 1321 } 1322 1323 /** 1324 * i40e_del_filter - Remove a mac/vlan filter from the VSI 1325 * @vsi: the VSI to be searched 1326 * @macaddr: the MAC address 1327 * @vlan: the vlan 1328 * @is_vf: make sure it's a VF filter, else doesn't matter 1329 * @is_netdev: make sure it's a netdev filter, else doesn't matter 1330 **/ 1331 void i40e_del_filter(struct i40e_vsi *vsi, 1332 u8 *macaddr, s16 vlan, 1333 bool is_vf, bool is_netdev) 1334 { 1335 struct i40e_mac_filter *f; 1336 1337 if (!vsi || !macaddr) 1338 return; 1339 1340 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1341 if (!f || f->counter == 0) 1342 return; 1343 1344 if (is_vf) { 1345 if (f->is_vf) { 1346 f->is_vf = false; 1347 f->counter--; 1348 } 1349 } else if (is_netdev) { 1350 if (f->is_netdev) { 1351 f->is_netdev = false; 1352 f->counter--; 1353 } 1354 } else { 1355 /* make sure we don't remove a filter in use by VF or netdev */ 1356 int min_f = 0; 1357 min_f += (f->is_vf ? 1 : 0); 1358 min_f += (f->is_netdev ? 1 : 0); 1359 1360 if (f->counter > min_f) 1361 f->counter--; 1362 } 1363 1364 /* counter == 0 tells sync_filters_subtask to 1365 * remove the filter from the firmware's list 1366 */ 1367 if (f->counter == 0) { 1368 f->changed = true; 1369 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1370 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1371 } 1372 } 1373 1374 /** 1375 * i40e_set_mac - NDO callback to set mac address 1376 * @netdev: network interface device structure 1377 * @p: pointer to an address structure 1378 * 1379 * Returns 0 on success, negative on failure 1380 **/ 1381 #ifdef I40E_FCOE 1382 int i40e_set_mac(struct net_device *netdev, void *p) 1383 #else 1384 static int i40e_set_mac(struct net_device *netdev, void *p) 1385 #endif 1386 { 1387 struct i40e_netdev_priv *np = netdev_priv(netdev); 1388 struct i40e_vsi *vsi = np->vsi; 1389 struct i40e_pf *pf = vsi->back; 1390 struct i40e_hw *hw = &pf->hw; 1391 struct sockaddr *addr = p; 1392 struct i40e_mac_filter *f; 1393 1394 if (!is_valid_ether_addr(addr->sa_data)) 1395 return -EADDRNOTAVAIL; 1396 1397 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { 1398 netdev_info(netdev, "already using mac address %pM\n", 1399 addr->sa_data); 1400 return 0; 1401 } 1402 1403 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1404 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1405 return -EADDRNOTAVAIL; 1406 1407 if (ether_addr_equal(hw->mac.addr, addr->sa_data)) 1408 netdev_info(netdev, "returning to hw mac address %pM\n", 1409 hw->mac.addr); 1410 else 1411 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1412 1413 if (vsi->type == I40E_VSI_MAIN) { 1414 i40e_status ret; 1415 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1416 I40E_AQC_WRITE_TYPE_LAA_WOL, 1417 addr->sa_data, NULL); 1418 if (ret) { 1419 netdev_info(netdev, 1420 "Addr change for Main VSI failed: %d\n", 1421 ret); 1422 return -EADDRNOTAVAIL; 1423 } 1424 } 1425 1426 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { 1427 struct i40e_aqc_remove_macvlan_element_data element; 1428 1429 memset(&element, 0, sizeof(element)); 1430 ether_addr_copy(element.mac_addr, netdev->dev_addr); 1431 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1432 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1433 } else { 1434 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 1435 false, false); 1436 } 1437 1438 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { 1439 struct i40e_aqc_add_macvlan_element_data element; 1440 1441 memset(&element, 0, sizeof(element)); 1442 ether_addr_copy(element.mac_addr, hw->mac.addr); 1443 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 1444 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1445 } else { 1446 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, 1447 false, false); 1448 if (f) 1449 f->is_laa = true; 1450 } 1451 1452 i40e_sync_vsi_filters(vsi); 1453 ether_addr_copy(netdev->dev_addr, addr->sa_data); 1454 1455 return 0; 1456 } 1457 1458 /** 1459 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1460 * @vsi: the VSI being setup 1461 * @ctxt: VSI context structure 1462 * @enabled_tc: Enabled TCs bitmap 1463 * @is_add: True if called before Add VSI 1464 * 1465 * Setup VSI queue mapping for enabled traffic classes. 1466 **/ 1467 #ifdef I40E_FCOE 1468 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1469 struct i40e_vsi_context *ctxt, 1470 u8 enabled_tc, 1471 bool is_add) 1472 #else 1473 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1474 struct i40e_vsi_context *ctxt, 1475 u8 enabled_tc, 1476 bool is_add) 1477 #endif 1478 { 1479 struct i40e_pf *pf = vsi->back; 1480 u16 sections = 0; 1481 u8 netdev_tc = 0; 1482 u16 numtc = 0; 1483 u16 qcount; 1484 u8 offset; 1485 u16 qmap; 1486 int i; 1487 u16 num_tc_qps = 0; 1488 1489 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1490 offset = 0; 1491 1492 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1493 /* Find numtc from enabled TC bitmap */ 1494 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1495 if (enabled_tc & (1 << i)) /* TC is enabled */ 1496 numtc++; 1497 } 1498 if (!numtc) { 1499 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1500 numtc = 1; 1501 } 1502 } else { 1503 /* At least TC0 is enabled in case of non-DCB case */ 1504 numtc = 1; 1505 } 1506 1507 vsi->tc_config.numtc = numtc; 1508 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1509 /* Number of queues per enabled TC */ 1510 /* In MFP case we can have a much lower count of MSIx 1511 * vectors available and so we need to lower the used 1512 * q count. 1513 */ 1514 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); 1515 num_tc_qps = qcount / numtc; 1516 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1517 1518 /* Setup queue offset/count for all TCs for given VSI */ 1519 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1520 /* See if the given TC is enabled for the given VSI */ 1521 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ 1522 int pow, num_qps; 1523 1524 switch (vsi->type) { 1525 case I40E_VSI_MAIN: 1526 qcount = min_t(int, pf->rss_size, num_tc_qps); 1527 break; 1528 #ifdef I40E_FCOE 1529 case I40E_VSI_FCOE: 1530 qcount = num_tc_qps; 1531 break; 1532 #endif 1533 case I40E_VSI_FDIR: 1534 case I40E_VSI_SRIOV: 1535 case I40E_VSI_VMDQ2: 1536 default: 1537 qcount = num_tc_qps; 1538 WARN_ON(i != 0); 1539 break; 1540 } 1541 vsi->tc_config.tc_info[i].qoffset = offset; 1542 vsi->tc_config.tc_info[i].qcount = qcount; 1543 1544 /* find the next higher power-of-2 of num queue pairs */ 1545 num_qps = qcount; 1546 pow = 0; 1547 while (num_qps && ((1 << pow) < qcount)) { 1548 pow++; 1549 num_qps >>= 1; 1550 } 1551 1552 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1553 qmap = 1554 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1555 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1556 1557 offset += qcount; 1558 } else { 1559 /* TC is not enabled so set the offset to 1560 * default queue and allocate one queue 1561 * for the given TC. 1562 */ 1563 vsi->tc_config.tc_info[i].qoffset = 0; 1564 vsi->tc_config.tc_info[i].qcount = 1; 1565 vsi->tc_config.tc_info[i].netdev_tc = 0; 1566 1567 qmap = 0; 1568 } 1569 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1570 } 1571 1572 /* Set actual Tx/Rx queue pairs */ 1573 vsi->num_queue_pairs = offset; 1574 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { 1575 if (vsi->req_queue_pairs > 0) 1576 vsi->num_queue_pairs = vsi->req_queue_pairs; 1577 else 1578 vsi->num_queue_pairs = pf->num_lan_msix; 1579 } 1580 1581 /* Scheduler section valid can only be set for ADD VSI */ 1582 if (is_add) { 1583 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1584 1585 ctxt->info.up_enable_bits = enabled_tc; 1586 } 1587 if (vsi->type == I40E_VSI_SRIOV) { 1588 ctxt->info.mapping_flags |= 1589 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1590 for (i = 0; i < vsi->num_queue_pairs; i++) 1591 ctxt->info.queue_mapping[i] = 1592 cpu_to_le16(vsi->base_queue + i); 1593 } else { 1594 ctxt->info.mapping_flags |= 1595 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1596 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1597 } 1598 ctxt->info.valid_sections |= cpu_to_le16(sections); 1599 } 1600 1601 /** 1602 * i40e_set_rx_mode - NDO callback to set the netdev filters 1603 * @netdev: network interface device structure 1604 **/ 1605 #ifdef I40E_FCOE 1606 void i40e_set_rx_mode(struct net_device *netdev) 1607 #else 1608 static void i40e_set_rx_mode(struct net_device *netdev) 1609 #endif 1610 { 1611 struct i40e_netdev_priv *np = netdev_priv(netdev); 1612 struct i40e_mac_filter *f, *ftmp; 1613 struct i40e_vsi *vsi = np->vsi; 1614 struct netdev_hw_addr *uca; 1615 struct netdev_hw_addr *mca; 1616 struct netdev_hw_addr *ha; 1617 1618 /* add addr if not already in the filter list */ 1619 netdev_for_each_uc_addr(uca, netdev) { 1620 if (!i40e_find_mac(vsi, uca->addr, false, true)) { 1621 if (i40e_is_vsi_in_vlan(vsi)) 1622 i40e_put_mac_in_vlan(vsi, uca->addr, 1623 false, true); 1624 else 1625 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, 1626 false, true); 1627 } 1628 } 1629 1630 netdev_for_each_mc_addr(mca, netdev) { 1631 if (!i40e_find_mac(vsi, mca->addr, false, true)) { 1632 if (i40e_is_vsi_in_vlan(vsi)) 1633 i40e_put_mac_in_vlan(vsi, mca->addr, 1634 false, true); 1635 else 1636 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, 1637 false, true); 1638 } 1639 } 1640 1641 /* remove filter if not in netdev list */ 1642 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1643 bool found = false; 1644 1645 if (!f->is_netdev) 1646 continue; 1647 1648 if (is_multicast_ether_addr(f->macaddr)) { 1649 netdev_for_each_mc_addr(mca, netdev) { 1650 if (ether_addr_equal(mca->addr, f->macaddr)) { 1651 found = true; 1652 break; 1653 } 1654 } 1655 } else { 1656 netdev_for_each_uc_addr(uca, netdev) { 1657 if (ether_addr_equal(uca->addr, f->macaddr)) { 1658 found = true; 1659 break; 1660 } 1661 } 1662 1663 for_each_dev_addr(netdev, ha) { 1664 if (ether_addr_equal(ha->addr, f->macaddr)) { 1665 found = true; 1666 break; 1667 } 1668 } 1669 } 1670 if (!found) 1671 i40e_del_filter( 1672 vsi, f->macaddr, I40E_VLAN_ANY, false, true); 1673 } 1674 1675 /* check for other flag changes */ 1676 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1677 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1678 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1679 } 1680 } 1681 1682 /** 1683 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 1684 * @vsi: ptr to the VSI 1685 * 1686 * Push any outstanding VSI filter changes through the AdminQ. 1687 * 1688 * Returns 0 or error value 1689 **/ 1690 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) 1691 { 1692 struct i40e_mac_filter *f, *ftmp; 1693 bool promisc_forced_on = false; 1694 bool add_happened = false; 1695 int filter_list_len = 0; 1696 u32 changed_flags = 0; 1697 i40e_status aq_ret = 0; 1698 struct i40e_pf *pf; 1699 int num_add = 0; 1700 int num_del = 0; 1701 u16 cmd_flags; 1702 1703 /* empty array typed pointers, kcalloc later */ 1704 struct i40e_aqc_add_macvlan_element_data *add_list; 1705 struct i40e_aqc_remove_macvlan_element_data *del_list; 1706 1707 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) 1708 usleep_range(1000, 2000); 1709 pf = vsi->back; 1710 1711 if (vsi->netdev) { 1712 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 1713 vsi->current_netdev_flags = vsi->netdev->flags; 1714 } 1715 1716 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1717 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1718 1719 filter_list_len = pf->hw.aq.asq_buf_size / 1720 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1721 del_list = kcalloc(filter_list_len, 1722 sizeof(struct i40e_aqc_remove_macvlan_element_data), 1723 GFP_KERNEL); 1724 if (!del_list) 1725 return -ENOMEM; 1726 1727 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1728 if (!f->changed) 1729 continue; 1730 1731 if (f->counter != 0) 1732 continue; 1733 f->changed = false; 1734 cmd_flags = 0; 1735 1736 /* add to delete list */ 1737 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); 1738 del_list[num_del].vlan_tag = 1739 cpu_to_le16((u16)(f->vlan == 1740 I40E_VLAN_ANY ? 0 : f->vlan)); 1741 1742 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1743 del_list[num_del].flags = cmd_flags; 1744 num_del++; 1745 1746 /* unlink from filter list */ 1747 list_del(&f->list); 1748 kfree(f); 1749 1750 /* flush a full buffer */ 1751 if (num_del == filter_list_len) { 1752 aq_ret = i40e_aq_remove_macvlan(&pf->hw, 1753 vsi->seid, del_list, num_del, 1754 NULL); 1755 num_del = 0; 1756 memset(del_list, 0, sizeof(*del_list)); 1757 1758 if (aq_ret && 1759 pf->hw.aq.asq_last_status != 1760 I40E_AQ_RC_ENOENT) 1761 dev_info(&pf->pdev->dev, 1762 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1763 aq_ret, 1764 pf->hw.aq.asq_last_status); 1765 } 1766 } 1767 if (num_del) { 1768 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1769 del_list, num_del, NULL); 1770 num_del = 0; 1771 1772 if (aq_ret && 1773 pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT) 1774 dev_info(&pf->pdev->dev, 1775 "ignoring delete macvlan error, err %d, aq_err %d\n", 1776 aq_ret, pf->hw.aq.asq_last_status); 1777 } 1778 1779 kfree(del_list); 1780 del_list = NULL; 1781 1782 /* do all the adds now */ 1783 filter_list_len = pf->hw.aq.asq_buf_size / 1784 sizeof(struct i40e_aqc_add_macvlan_element_data), 1785 add_list = kcalloc(filter_list_len, 1786 sizeof(struct i40e_aqc_add_macvlan_element_data), 1787 GFP_KERNEL); 1788 if (!add_list) 1789 return -ENOMEM; 1790 1791 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1792 if (!f->changed) 1793 continue; 1794 1795 if (f->counter == 0) 1796 continue; 1797 f->changed = false; 1798 add_happened = true; 1799 cmd_flags = 0; 1800 1801 /* add to add array */ 1802 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); 1803 add_list[num_add].vlan_tag = 1804 cpu_to_le16( 1805 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 1806 add_list[num_add].queue_number = 0; 1807 1808 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1809 add_list[num_add].flags = cpu_to_le16(cmd_flags); 1810 num_add++; 1811 1812 /* flush a full buffer */ 1813 if (num_add == filter_list_len) { 1814 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1815 add_list, num_add, 1816 NULL); 1817 num_add = 0; 1818 1819 if (aq_ret) 1820 break; 1821 memset(add_list, 0, sizeof(*add_list)); 1822 } 1823 } 1824 if (num_add) { 1825 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1826 add_list, num_add, NULL); 1827 num_add = 0; 1828 } 1829 kfree(add_list); 1830 add_list = NULL; 1831 1832 if (add_happened && aq_ret && 1833 pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) { 1834 dev_info(&pf->pdev->dev, 1835 "add filter failed, err %d, aq_err %d\n", 1836 aq_ret, pf->hw.aq.asq_last_status); 1837 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 1838 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1839 &vsi->state)) { 1840 promisc_forced_on = true; 1841 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1842 &vsi->state); 1843 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 1844 } 1845 } 1846 } 1847 1848 /* check for changes in promiscuous modes */ 1849 if (changed_flags & IFF_ALLMULTI) { 1850 bool cur_multipromisc; 1851 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 1852 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 1853 vsi->seid, 1854 cur_multipromisc, 1855 NULL); 1856 if (aq_ret) 1857 dev_info(&pf->pdev->dev, 1858 "set multi promisc failed, err %d, aq_err %d\n", 1859 aq_ret, pf->hw.aq.asq_last_status); 1860 } 1861 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 1862 bool cur_promisc; 1863 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 1864 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1865 &vsi->state)); 1866 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, 1867 vsi->seid, 1868 cur_promisc, NULL); 1869 if (aq_ret) 1870 dev_info(&pf->pdev->dev, 1871 "set uni promisc failed, err %d, aq_err %d\n", 1872 aq_ret, pf->hw.aq.asq_last_status); 1873 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 1874 vsi->seid, 1875 cur_promisc, NULL); 1876 if (aq_ret) 1877 dev_info(&pf->pdev->dev, 1878 "set brdcast promisc failed, err %d, aq_err %d\n", 1879 aq_ret, pf->hw.aq.asq_last_status); 1880 } 1881 1882 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 1883 return 0; 1884 } 1885 1886 /** 1887 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 1888 * @pf: board private structure 1889 **/ 1890 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 1891 { 1892 int v; 1893 1894 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 1895 return; 1896 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 1897 1898 for (v = 0; v < pf->num_alloc_vsi; v++) { 1899 if (pf->vsi[v] && 1900 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) 1901 i40e_sync_vsi_filters(pf->vsi[v]); 1902 } 1903 } 1904 1905 /** 1906 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 1907 * @netdev: network interface device structure 1908 * @new_mtu: new value for maximum frame size 1909 * 1910 * Returns 0 on success, negative on failure 1911 **/ 1912 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 1913 { 1914 struct i40e_netdev_priv *np = netdev_priv(netdev); 1915 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 1916 struct i40e_vsi *vsi = np->vsi; 1917 1918 /* MTU < 68 is an error and causes problems on some kernels */ 1919 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) 1920 return -EINVAL; 1921 1922 netdev_info(netdev, "changing MTU from %d to %d\n", 1923 netdev->mtu, new_mtu); 1924 netdev->mtu = new_mtu; 1925 if (netif_running(netdev)) 1926 i40e_vsi_reinit_locked(vsi); 1927 1928 return 0; 1929 } 1930 1931 /** 1932 * i40e_ioctl - Access the hwtstamp interface 1933 * @netdev: network interface device structure 1934 * @ifr: interface request data 1935 * @cmd: ioctl command 1936 **/ 1937 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1938 { 1939 struct i40e_netdev_priv *np = netdev_priv(netdev); 1940 struct i40e_pf *pf = np->vsi->back; 1941 1942 switch (cmd) { 1943 case SIOCGHWTSTAMP: 1944 return i40e_ptp_get_ts_config(pf, ifr); 1945 case SIOCSHWTSTAMP: 1946 return i40e_ptp_set_ts_config(pf, ifr); 1947 default: 1948 return -EOPNOTSUPP; 1949 } 1950 } 1951 1952 /** 1953 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 1954 * @vsi: the vsi being adjusted 1955 **/ 1956 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 1957 { 1958 struct i40e_vsi_context ctxt; 1959 i40e_status ret; 1960 1961 if ((vsi->info.valid_sections & 1962 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 1963 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 1964 return; /* already enabled */ 1965 1966 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1967 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1968 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 1969 1970 ctxt.seid = vsi->seid; 1971 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1972 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1973 if (ret) { 1974 dev_info(&vsi->back->pdev->dev, 1975 "%s: update vsi failed, aq_err=%d\n", 1976 __func__, vsi->back->hw.aq.asq_last_status); 1977 } 1978 } 1979 1980 /** 1981 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 1982 * @vsi: the vsi being adjusted 1983 **/ 1984 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 1985 { 1986 struct i40e_vsi_context ctxt; 1987 i40e_status ret; 1988 1989 if ((vsi->info.valid_sections & 1990 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 1991 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 1992 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 1993 return; /* already disabled */ 1994 1995 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1996 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1997 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 1998 1999 ctxt.seid = vsi->seid; 2000 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 2001 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2002 if (ret) { 2003 dev_info(&vsi->back->pdev->dev, 2004 "%s: update vsi failed, aq_err=%d\n", 2005 __func__, vsi->back->hw.aq.asq_last_status); 2006 } 2007 } 2008 2009 /** 2010 * i40e_vlan_rx_register - Setup or shutdown vlan offload 2011 * @netdev: network interface to be adjusted 2012 * @features: netdev features to test if VLAN offload is enabled or not 2013 **/ 2014 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 2015 { 2016 struct i40e_netdev_priv *np = netdev_priv(netdev); 2017 struct i40e_vsi *vsi = np->vsi; 2018 2019 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2020 i40e_vlan_stripping_enable(vsi); 2021 else 2022 i40e_vlan_stripping_disable(vsi); 2023 } 2024 2025 /** 2026 * i40e_vsi_add_vlan - Add vsi membership for given vlan 2027 * @vsi: the vsi being configured 2028 * @vid: vlan id to be added (0 = untagged only , -1 = any) 2029 **/ 2030 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) 2031 { 2032 struct i40e_mac_filter *f, *add_f; 2033 bool is_netdev, is_vf; 2034 2035 is_vf = (vsi->type == I40E_VSI_SRIOV); 2036 is_netdev = !!(vsi->netdev); 2037 2038 if (is_netdev) { 2039 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, 2040 is_vf, is_netdev); 2041 if (!add_f) { 2042 dev_info(&vsi->back->pdev->dev, 2043 "Could not add vlan filter %d for %pM\n", 2044 vid, vsi->netdev->dev_addr); 2045 return -ENOMEM; 2046 } 2047 } 2048 2049 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2050 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2051 if (!add_f) { 2052 dev_info(&vsi->back->pdev->dev, 2053 "Could not add vlan filter %d for %pM\n", 2054 vid, f->macaddr); 2055 return -ENOMEM; 2056 } 2057 } 2058 2059 /* Now if we add a vlan tag, make sure to check if it is the first 2060 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" 2061 * with 0, so we now accept untagged and specified tagged traffic 2062 * (and not any taged and untagged) 2063 */ 2064 if (vid > 0) { 2065 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, 2066 I40E_VLAN_ANY, 2067 is_vf, is_netdev)) { 2068 i40e_del_filter(vsi, vsi->netdev->dev_addr, 2069 I40E_VLAN_ANY, is_vf, is_netdev); 2070 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, 2071 is_vf, is_netdev); 2072 if (!add_f) { 2073 dev_info(&vsi->back->pdev->dev, 2074 "Could not add filter 0 for %pM\n", 2075 vsi->netdev->dev_addr); 2076 return -ENOMEM; 2077 } 2078 } 2079 } 2080 2081 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ 2082 if (vid > 0 && !vsi->info.pvid) { 2083 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2084 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2085 is_vf, is_netdev)) { 2086 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2087 is_vf, is_netdev); 2088 add_f = i40e_add_filter(vsi, f->macaddr, 2089 0, is_vf, is_netdev); 2090 if (!add_f) { 2091 dev_info(&vsi->back->pdev->dev, 2092 "Could not add filter 0 for %pM\n", 2093 f->macaddr); 2094 return -ENOMEM; 2095 } 2096 } 2097 } 2098 } 2099 2100 if (test_bit(__I40E_DOWN, &vsi->back->state) || 2101 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 2102 return 0; 2103 2104 return i40e_sync_vsi_filters(vsi); 2105 } 2106 2107 /** 2108 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 2109 * @vsi: the vsi being configured 2110 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 2111 * 2112 * Return: 0 on success or negative otherwise 2113 **/ 2114 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 2115 { 2116 struct net_device *netdev = vsi->netdev; 2117 struct i40e_mac_filter *f, *add_f; 2118 bool is_vf, is_netdev; 2119 int filter_count = 0; 2120 2121 is_vf = (vsi->type == I40E_VSI_SRIOV); 2122 is_netdev = !!(netdev); 2123 2124 if (is_netdev) 2125 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); 2126 2127 list_for_each_entry(f, &vsi->mac_filter_list, list) 2128 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2129 2130 /* go through all the filters for this VSI and if there is only 2131 * vid == 0 it means there are no other filters, so vid 0 must 2132 * be replaced with -1. This signifies that we should from now 2133 * on accept any traffic (with any tag present, or untagged) 2134 */ 2135 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2136 if (is_netdev) { 2137 if (f->vlan && 2138 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2139 filter_count++; 2140 } 2141 2142 if (f->vlan) 2143 filter_count++; 2144 } 2145 2146 if (!filter_count && is_netdev) { 2147 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); 2148 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 2149 is_vf, is_netdev); 2150 if (!f) { 2151 dev_info(&vsi->back->pdev->dev, 2152 "Could not add filter %d for %pM\n", 2153 I40E_VLAN_ANY, netdev->dev_addr); 2154 return -ENOMEM; 2155 } 2156 } 2157 2158 if (!filter_count) { 2159 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2160 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); 2161 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2162 is_vf, is_netdev); 2163 if (!add_f) { 2164 dev_info(&vsi->back->pdev->dev, 2165 "Could not add filter %d for %pM\n", 2166 I40E_VLAN_ANY, f->macaddr); 2167 return -ENOMEM; 2168 } 2169 } 2170 } 2171 2172 if (test_bit(__I40E_DOWN, &vsi->back->state) || 2173 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 2174 return 0; 2175 2176 return i40e_sync_vsi_filters(vsi); 2177 } 2178 2179 /** 2180 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 2181 * @netdev: network interface to be adjusted 2182 * @vid: vlan id to be added 2183 * 2184 * net_device_ops implementation for adding vlan ids 2185 **/ 2186 #ifdef I40E_FCOE 2187 int i40e_vlan_rx_add_vid(struct net_device *netdev, 2188 __always_unused __be16 proto, u16 vid) 2189 #else 2190 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 2191 __always_unused __be16 proto, u16 vid) 2192 #endif 2193 { 2194 struct i40e_netdev_priv *np = netdev_priv(netdev); 2195 struct i40e_vsi *vsi = np->vsi; 2196 int ret = 0; 2197 2198 if (vid > 4095) 2199 return -EINVAL; 2200 2201 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 2202 2203 /* If the network stack called us with vid = 0 then 2204 * it is asking to receive priority tagged packets with 2205 * vlan id 0. Our HW receives them by default when configured 2206 * to receive untagged packets so there is no need to add an 2207 * extra filter for vlan 0 tagged packets. 2208 */ 2209 if (vid) 2210 ret = i40e_vsi_add_vlan(vsi, vid); 2211 2212 if (!ret && (vid < VLAN_N_VID)) 2213 set_bit(vid, vsi->active_vlans); 2214 2215 return ret; 2216 } 2217 2218 /** 2219 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 2220 * @netdev: network interface to be adjusted 2221 * @vid: vlan id to be removed 2222 * 2223 * net_device_ops implementation for removing vlan ids 2224 **/ 2225 #ifdef I40E_FCOE 2226 int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2227 __always_unused __be16 proto, u16 vid) 2228 #else 2229 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2230 __always_unused __be16 proto, u16 vid) 2231 #endif 2232 { 2233 struct i40e_netdev_priv *np = netdev_priv(netdev); 2234 struct i40e_vsi *vsi = np->vsi; 2235 2236 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); 2237 2238 /* return code is ignored as there is nothing a user 2239 * can do about failure to remove and a log message was 2240 * already printed from the other function 2241 */ 2242 i40e_vsi_kill_vlan(vsi, vid); 2243 2244 clear_bit(vid, vsi->active_vlans); 2245 2246 return 0; 2247 } 2248 2249 /** 2250 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 2251 * @vsi: the vsi being brought back up 2252 **/ 2253 static void i40e_restore_vlan(struct i40e_vsi *vsi) 2254 { 2255 u16 vid; 2256 2257 if (!vsi->netdev) 2258 return; 2259 2260 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2261 2262 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 2263 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 2264 vid); 2265 } 2266 2267 /** 2268 * i40e_vsi_add_pvid - Add pvid for the VSI 2269 * @vsi: the vsi being adjusted 2270 * @vid: the vlan id to set as a PVID 2271 **/ 2272 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2273 { 2274 struct i40e_vsi_context ctxt; 2275 i40e_status aq_ret; 2276 2277 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2278 vsi->info.pvid = cpu_to_le16(vid); 2279 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | 2280 I40E_AQ_VSI_PVLAN_INSERT_PVID | 2281 I40E_AQ_VSI_PVLAN_EMOD_STR; 2282 2283 ctxt.seid = vsi->seid; 2284 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 2285 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2286 if (aq_ret) { 2287 dev_info(&vsi->back->pdev->dev, 2288 "%s: update vsi failed, aq_err=%d\n", 2289 __func__, vsi->back->hw.aq.asq_last_status); 2290 return -ENOENT; 2291 } 2292 2293 return 0; 2294 } 2295 2296 /** 2297 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2298 * @vsi: the vsi being adjusted 2299 * 2300 * Just use the vlan_rx_register() service to put it back to normal 2301 **/ 2302 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2303 { 2304 i40e_vlan_stripping_disable(vsi); 2305 2306 vsi->info.pvid = 0; 2307 } 2308 2309 /** 2310 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2311 * @vsi: ptr to the VSI 2312 * 2313 * If this function returns with an error, then it's possible one or 2314 * more of the rings is populated (while the rest are not). It is the 2315 * callers duty to clean those orphaned rings. 2316 * 2317 * Return 0 on success, negative on failure 2318 **/ 2319 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2320 { 2321 int i, err = 0; 2322 2323 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2324 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2325 2326 return err; 2327 } 2328 2329 /** 2330 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2331 * @vsi: ptr to the VSI 2332 * 2333 * Free VSI's transmit software resources 2334 **/ 2335 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2336 { 2337 int i; 2338 2339 if (!vsi->tx_rings) 2340 return; 2341 2342 for (i = 0; i < vsi->num_queue_pairs; i++) 2343 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2344 i40e_free_tx_resources(vsi->tx_rings[i]); 2345 } 2346 2347 /** 2348 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 2349 * @vsi: ptr to the VSI 2350 * 2351 * If this function returns with an error, then it's possible one or 2352 * more of the rings is populated (while the rest are not). It is the 2353 * callers duty to clean those orphaned rings. 2354 * 2355 * Return 0 on success, negative on failure 2356 **/ 2357 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 2358 { 2359 int i, err = 0; 2360 2361 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2362 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2363 #ifdef I40E_FCOE 2364 i40e_fcoe_setup_ddp_resources(vsi); 2365 #endif 2366 return err; 2367 } 2368 2369 /** 2370 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 2371 * @vsi: ptr to the VSI 2372 * 2373 * Free all receive software resources 2374 **/ 2375 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 2376 { 2377 int i; 2378 2379 if (!vsi->rx_rings) 2380 return; 2381 2382 for (i = 0; i < vsi->num_queue_pairs; i++) 2383 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2384 i40e_free_rx_resources(vsi->rx_rings[i]); 2385 #ifdef I40E_FCOE 2386 i40e_fcoe_free_ddp_resources(vsi); 2387 #endif 2388 } 2389 2390 /** 2391 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring 2392 * @ring: The Tx ring to configure 2393 * 2394 * This enables/disables XPS for a given Tx descriptor ring 2395 * based on the TCs enabled for the VSI that ring belongs to. 2396 **/ 2397 static void i40e_config_xps_tx_ring(struct i40e_ring *ring) 2398 { 2399 struct i40e_vsi *vsi = ring->vsi; 2400 cpumask_var_t mask; 2401 2402 if (!ring->q_vector || !ring->netdev) 2403 return; 2404 2405 /* Single TC mode enable XPS */ 2406 if (vsi->tc_config.numtc <= 1) { 2407 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) 2408 netif_set_xps_queue(ring->netdev, 2409 &ring->q_vector->affinity_mask, 2410 ring->queue_index); 2411 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { 2412 /* Disable XPS to allow selection based on TC */ 2413 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); 2414 netif_set_xps_queue(ring->netdev, mask, ring->queue_index); 2415 free_cpumask_var(mask); 2416 } 2417 } 2418 2419 /** 2420 * i40e_configure_tx_ring - Configure a transmit ring context and rest 2421 * @ring: The Tx ring to configure 2422 * 2423 * Configure the Tx descriptor ring in the HMC context. 2424 **/ 2425 static int i40e_configure_tx_ring(struct i40e_ring *ring) 2426 { 2427 struct i40e_vsi *vsi = ring->vsi; 2428 u16 pf_q = vsi->base_queue + ring->queue_index; 2429 struct i40e_hw *hw = &vsi->back->hw; 2430 struct i40e_hmc_obj_txq tx_ctx; 2431 i40e_status err = 0; 2432 u32 qtx_ctl = 0; 2433 2434 /* some ATR related tx ring init */ 2435 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { 2436 ring->atr_sample_rate = vsi->back->atr_sample_rate; 2437 ring->atr_count = 0; 2438 } else { 2439 ring->atr_sample_rate = 0; 2440 } 2441 2442 /* configure XPS */ 2443 i40e_config_xps_tx_ring(ring); 2444 2445 /* clear the context structure first */ 2446 memset(&tx_ctx, 0, sizeof(tx_ctx)); 2447 2448 tx_ctx.new_context = 1; 2449 tx_ctx.base = (ring->dma / 128); 2450 tx_ctx.qlen = ring->count; 2451 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2452 I40E_FLAG_FD_ATR_ENABLED)); 2453 #ifdef I40E_FCOE 2454 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2455 #endif 2456 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2457 /* FDIR VSI tx ring can still use RS bit and writebacks */ 2458 if (vsi->type != I40E_VSI_FDIR) 2459 tx_ctx.head_wb_ena = 1; 2460 tx_ctx.head_wb_addr = ring->dma + 2461 (ring->count * sizeof(struct i40e_tx_desc)); 2462 2463 /* As part of VSI creation/update, FW allocates certain 2464 * Tx arbitration queue sets for each TC enabled for 2465 * the VSI. The FW returns the handles to these queue 2466 * sets as part of the response buffer to Add VSI, 2467 * Update VSI, etc. AQ commands. It is expected that 2468 * these queue set handles be associated with the Tx 2469 * queues by the driver as part of the TX queue context 2470 * initialization. This has to be done regardless of 2471 * DCB as by default everything is mapped to TC0. 2472 */ 2473 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 2474 tx_ctx.rdylist_act = 0; 2475 2476 /* clear the context in the HMC */ 2477 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 2478 if (err) { 2479 dev_info(&vsi->back->pdev->dev, 2480 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 2481 ring->queue_index, pf_q, err); 2482 return -ENOMEM; 2483 } 2484 2485 /* set the context in the HMC */ 2486 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 2487 if (err) { 2488 dev_info(&vsi->back->pdev->dev, 2489 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 2490 ring->queue_index, pf_q, err); 2491 return -ENOMEM; 2492 } 2493 2494 /* Now associate this queue with this PCI function */ 2495 if (vsi->type == I40E_VSI_VMDQ2) { 2496 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 2497 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & 2498 I40E_QTX_CTL_VFVM_INDX_MASK; 2499 } else { 2500 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2501 } 2502 2503 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2504 I40E_QTX_CTL_PF_INDX_MASK); 2505 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2506 i40e_flush(hw); 2507 2508 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 2509 2510 /* cache tail off for easier writes later */ 2511 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 2512 2513 return 0; 2514 } 2515 2516 /** 2517 * i40e_configure_rx_ring - Configure a receive ring context 2518 * @ring: The Rx ring to configure 2519 * 2520 * Configure the Rx descriptor ring in the HMC context. 2521 **/ 2522 static int i40e_configure_rx_ring(struct i40e_ring *ring) 2523 { 2524 struct i40e_vsi *vsi = ring->vsi; 2525 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 2526 u16 pf_q = vsi->base_queue + ring->queue_index; 2527 struct i40e_hw *hw = &vsi->back->hw; 2528 struct i40e_hmc_obj_rxq rx_ctx; 2529 i40e_status err = 0; 2530 2531 ring->state = 0; 2532 2533 /* clear the context structure first */ 2534 memset(&rx_ctx, 0, sizeof(rx_ctx)); 2535 2536 ring->rx_buf_len = vsi->rx_buf_len; 2537 ring->rx_hdr_len = vsi->rx_hdr_len; 2538 2539 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; 2540 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; 2541 2542 rx_ctx.base = (ring->dma / 128); 2543 rx_ctx.qlen = ring->count; 2544 2545 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { 2546 set_ring_16byte_desc_enabled(ring); 2547 rx_ctx.dsize = 0; 2548 } else { 2549 rx_ctx.dsize = 1; 2550 } 2551 2552 rx_ctx.dtype = vsi->dtype; 2553 if (vsi->dtype) { 2554 set_ring_ps_enabled(ring); 2555 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 2556 I40E_RX_SPLIT_IP | 2557 I40E_RX_SPLIT_TCP_UDP | 2558 I40E_RX_SPLIT_SCTP; 2559 } else { 2560 rx_ctx.hsplit_0 = 0; 2561 } 2562 2563 rx_ctx.rxmax = min_t(u16, vsi->max_frame, 2564 (chain_len * ring->rx_buf_len)); 2565 if (hw->revision_id == 0) 2566 rx_ctx.lrxqthresh = 0; 2567 else 2568 rx_ctx.lrxqthresh = 2; 2569 rx_ctx.crcstrip = 1; 2570 rx_ctx.l2tsel = 1; 2571 rx_ctx.showiv = 1; 2572 #ifdef I40E_FCOE 2573 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2574 #endif 2575 /* set the prefena field to 1 because the manual says to */ 2576 rx_ctx.prefena = 1; 2577 2578 /* clear the context in the HMC */ 2579 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2580 if (err) { 2581 dev_info(&vsi->back->pdev->dev, 2582 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2583 ring->queue_index, pf_q, err); 2584 return -ENOMEM; 2585 } 2586 2587 /* set the context in the HMC */ 2588 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 2589 if (err) { 2590 dev_info(&vsi->back->pdev->dev, 2591 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2592 ring->queue_index, pf_q, err); 2593 return -ENOMEM; 2594 } 2595 2596 /* cache tail for quicker writes, and clear the reg before use */ 2597 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 2598 writel(0, ring->tail); 2599 2600 if (ring_is_ps_enabled(ring)) { 2601 i40e_alloc_rx_headers(ring); 2602 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); 2603 } else { 2604 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); 2605 } 2606 2607 return 0; 2608 } 2609 2610 /** 2611 * i40e_vsi_configure_tx - Configure the VSI for Tx 2612 * @vsi: VSI structure describing this set of rings and resources 2613 * 2614 * Configure the Tx VSI for operation. 2615 **/ 2616 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 2617 { 2618 int err = 0; 2619 u16 i; 2620 2621 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 2622 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 2623 2624 return err; 2625 } 2626 2627 /** 2628 * i40e_vsi_configure_rx - Configure the VSI for Rx 2629 * @vsi: the VSI being configured 2630 * 2631 * Configure the Rx VSI for operation. 2632 **/ 2633 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 2634 { 2635 int err = 0; 2636 u16 i; 2637 2638 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) 2639 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN 2640 + ETH_FCS_LEN + VLAN_HLEN; 2641 else 2642 vsi->max_frame = I40E_RXBUFFER_2048; 2643 2644 /* figure out correct receive buffer length */ 2645 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | 2646 I40E_FLAG_RX_PS_ENABLED)) { 2647 case I40E_FLAG_RX_1BUF_ENABLED: 2648 vsi->rx_hdr_len = 0; 2649 vsi->rx_buf_len = vsi->max_frame; 2650 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2651 break; 2652 case I40E_FLAG_RX_PS_ENABLED: 2653 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2654 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2655 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; 2656 break; 2657 default: 2658 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2659 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2660 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; 2661 break; 2662 } 2663 2664 #ifdef I40E_FCOE 2665 /* setup rx buffer for FCoE */ 2666 if ((vsi->type == I40E_VSI_FCOE) && 2667 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { 2668 vsi->rx_hdr_len = 0; 2669 vsi->rx_buf_len = I40E_RXBUFFER_3072; 2670 vsi->max_frame = I40E_RXBUFFER_3072; 2671 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2672 } 2673 2674 #endif /* I40E_FCOE */ 2675 /* round up for the chip's needs */ 2676 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, 2677 (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); 2678 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, 2679 (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); 2680 2681 /* set up individual rings */ 2682 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2683 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 2684 2685 return err; 2686 } 2687 2688 /** 2689 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 2690 * @vsi: ptr to the VSI 2691 **/ 2692 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 2693 { 2694 struct i40e_ring *tx_ring, *rx_ring; 2695 u16 qoffset, qcount; 2696 int i, n; 2697 2698 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 2699 /* Reset the TC information */ 2700 for (i = 0; i < vsi->num_queue_pairs; i++) { 2701 rx_ring = vsi->rx_rings[i]; 2702 tx_ring = vsi->tx_rings[i]; 2703 rx_ring->dcb_tc = 0; 2704 tx_ring->dcb_tc = 0; 2705 } 2706 } 2707 2708 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2709 if (!(vsi->tc_config.enabled_tc & (1 << n))) 2710 continue; 2711 2712 qoffset = vsi->tc_config.tc_info[n].qoffset; 2713 qcount = vsi->tc_config.tc_info[n].qcount; 2714 for (i = qoffset; i < (qoffset + qcount); i++) { 2715 rx_ring = vsi->rx_rings[i]; 2716 tx_ring = vsi->tx_rings[i]; 2717 rx_ring->dcb_tc = n; 2718 tx_ring->dcb_tc = n; 2719 } 2720 } 2721 } 2722 2723 /** 2724 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 2725 * @vsi: ptr to the VSI 2726 **/ 2727 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 2728 { 2729 if (vsi->netdev) 2730 i40e_set_rx_mode(vsi->netdev); 2731 } 2732 2733 /** 2734 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters 2735 * @vsi: Pointer to the targeted VSI 2736 * 2737 * This function replays the hlist on the hw where all the SB Flow Director 2738 * filters were saved. 2739 **/ 2740 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) 2741 { 2742 struct i40e_fdir_filter *filter; 2743 struct i40e_pf *pf = vsi->back; 2744 struct hlist_node *node; 2745 2746 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 2747 return; 2748 2749 hlist_for_each_entry_safe(filter, node, 2750 &pf->fdir_filter_list, fdir_node) { 2751 i40e_add_del_fdir(vsi, filter, true); 2752 } 2753 } 2754 2755 /** 2756 * i40e_vsi_configure - Set up the VSI for action 2757 * @vsi: the VSI being configured 2758 **/ 2759 static int i40e_vsi_configure(struct i40e_vsi *vsi) 2760 { 2761 int err; 2762 2763 i40e_set_vsi_rx_mode(vsi); 2764 i40e_restore_vlan(vsi); 2765 i40e_vsi_config_dcb_rings(vsi); 2766 err = i40e_vsi_configure_tx(vsi); 2767 if (!err) 2768 err = i40e_vsi_configure_rx(vsi); 2769 2770 return err; 2771 } 2772 2773 /** 2774 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 2775 * @vsi: the VSI being configured 2776 **/ 2777 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 2778 { 2779 struct i40e_pf *pf = vsi->back; 2780 struct i40e_q_vector *q_vector; 2781 struct i40e_hw *hw = &pf->hw; 2782 u16 vector; 2783 int i, q; 2784 u32 val; 2785 u32 qp; 2786 2787 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 2788 * and PFINT_LNKLSTn registers, e.g.: 2789 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 2790 */ 2791 qp = vsi->base_queue; 2792 vector = vsi->base_vector; 2793 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 2794 q_vector = vsi->q_vectors[i]; 2795 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2796 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2797 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 2798 q_vector->rx.itr); 2799 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2800 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2801 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 2802 q_vector->tx.itr); 2803 2804 /* Linked list for the queuepairs assigned to this vector */ 2805 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 2806 for (q = 0; q < q_vector->num_ringpairs; q++) { 2807 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2808 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2809 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 2810 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 2811 (I40E_QUEUE_TYPE_TX 2812 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 2813 2814 wr32(hw, I40E_QINT_RQCTL(qp), val); 2815 2816 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2817 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2818 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 2819 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| 2820 (I40E_QUEUE_TYPE_RX 2821 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2822 2823 /* Terminate the linked list */ 2824 if (q == (q_vector->num_ringpairs - 1)) 2825 val |= (I40E_QUEUE_END_OF_LIST 2826 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2827 2828 wr32(hw, I40E_QINT_TQCTL(qp), val); 2829 qp++; 2830 } 2831 } 2832 2833 i40e_flush(hw); 2834 } 2835 2836 /** 2837 * i40e_enable_misc_int_causes - enable the non-queue interrupts 2838 * @hw: ptr to the hardware info 2839 **/ 2840 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) 2841 { 2842 struct i40e_hw *hw = &pf->hw; 2843 u32 val; 2844 2845 /* clear things first */ 2846 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 2847 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 2848 2849 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 2850 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 2851 I40E_PFINT_ICR0_ENA_GRST_MASK | 2852 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 2853 I40E_PFINT_ICR0_ENA_GPIO_MASK | 2854 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 2855 I40E_PFINT_ICR0_ENA_VFLR_MASK | 2856 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2857 2858 if (pf->flags & I40E_FLAG_PTP) 2859 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 2860 2861 wr32(hw, I40E_PFINT_ICR0_ENA, val); 2862 2863 /* SW_ITR_IDX = 0, but don't change INTENA */ 2864 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 2865 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 2866 2867 /* OTHER_ITR_IDX = 0 */ 2868 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 2869 } 2870 2871 /** 2872 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 2873 * @vsi: the VSI being configured 2874 **/ 2875 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 2876 { 2877 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 2878 struct i40e_pf *pf = vsi->back; 2879 struct i40e_hw *hw = &pf->hw; 2880 u32 val; 2881 2882 /* set the ITR configuration */ 2883 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2884 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2885 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); 2886 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2887 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2888 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 2889 2890 i40e_enable_misc_int_causes(pf); 2891 2892 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 2893 wr32(hw, I40E_PFINT_LNKLST0, 0); 2894 2895 /* Associate the queue pair to the vector and enable the queue int */ 2896 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2897 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2898 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2899 2900 wr32(hw, I40E_QINT_RQCTL(0), val); 2901 2902 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2903 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2904 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2905 2906 wr32(hw, I40E_QINT_TQCTL(0), val); 2907 i40e_flush(hw); 2908 } 2909 2910 /** 2911 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 2912 * @pf: board private structure 2913 **/ 2914 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) 2915 { 2916 struct i40e_hw *hw = &pf->hw; 2917 2918 wr32(hw, I40E_PFINT_DYN_CTL0, 2919 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2920 i40e_flush(hw); 2921 } 2922 2923 /** 2924 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 2925 * @pf: board private structure 2926 **/ 2927 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 2928 { 2929 struct i40e_hw *hw = &pf->hw; 2930 u32 val; 2931 2932 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 2933 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 2934 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 2935 2936 wr32(hw, I40E_PFINT_DYN_CTL0, val); 2937 i40e_flush(hw); 2938 } 2939 2940 /** 2941 * i40e_irq_dynamic_enable - Enable default interrupt generation settings 2942 * @vsi: pointer to a vsi 2943 * @vector: enable a particular Hw Interrupt vector 2944 **/ 2945 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) 2946 { 2947 struct i40e_pf *pf = vsi->back; 2948 struct i40e_hw *hw = &pf->hw; 2949 u32 val; 2950 2951 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 2952 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 2953 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2954 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 2955 /* skip the flush */ 2956 } 2957 2958 /** 2959 * i40e_irq_dynamic_disable - Disable default interrupt generation settings 2960 * @vsi: pointer to a vsi 2961 * @vector: disable a particular Hw Interrupt vector 2962 **/ 2963 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) 2964 { 2965 struct i40e_pf *pf = vsi->back; 2966 struct i40e_hw *hw = &pf->hw; 2967 u32 val; 2968 2969 val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 2970 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 2971 i40e_flush(hw); 2972 } 2973 2974 /** 2975 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 2976 * @irq: interrupt number 2977 * @data: pointer to a q_vector 2978 **/ 2979 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 2980 { 2981 struct i40e_q_vector *q_vector = data; 2982 2983 if (!q_vector->tx.ring && !q_vector->rx.ring) 2984 return IRQ_HANDLED; 2985 2986 napi_schedule(&q_vector->napi); 2987 2988 return IRQ_HANDLED; 2989 } 2990 2991 /** 2992 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 2993 * @vsi: the VSI being configured 2994 * @basename: name for the vector 2995 * 2996 * Allocates MSI-X vectors and requests interrupts from the kernel. 2997 **/ 2998 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 2999 { 3000 int q_vectors = vsi->num_q_vectors; 3001 struct i40e_pf *pf = vsi->back; 3002 int base = vsi->base_vector; 3003 int rx_int_idx = 0; 3004 int tx_int_idx = 0; 3005 int vector, err; 3006 3007 for (vector = 0; vector < q_vectors; vector++) { 3008 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 3009 3010 if (q_vector->tx.ring && q_vector->rx.ring) { 3011 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3012 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 3013 tx_int_idx++; 3014 } else if (q_vector->rx.ring) { 3015 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3016 "%s-%s-%d", basename, "rx", rx_int_idx++); 3017 } else if (q_vector->tx.ring) { 3018 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3019 "%s-%s-%d", basename, "tx", tx_int_idx++); 3020 } else { 3021 /* skip this unused q_vector */ 3022 continue; 3023 } 3024 err = request_irq(pf->msix_entries[base + vector].vector, 3025 vsi->irq_handler, 3026 0, 3027 q_vector->name, 3028 q_vector); 3029 if (err) { 3030 dev_info(&pf->pdev->dev, 3031 "%s: request_irq failed, error: %d\n", 3032 __func__, err); 3033 goto free_queue_irqs; 3034 } 3035 /* assign the mask for this irq */ 3036 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3037 &q_vector->affinity_mask); 3038 } 3039 3040 vsi->irqs_ready = true; 3041 return 0; 3042 3043 free_queue_irqs: 3044 while (vector) { 3045 vector--; 3046 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3047 NULL); 3048 free_irq(pf->msix_entries[base + vector].vector, 3049 &(vsi->q_vectors[vector])); 3050 } 3051 return err; 3052 } 3053 3054 /** 3055 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 3056 * @vsi: the VSI being un-configured 3057 **/ 3058 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 3059 { 3060 struct i40e_pf *pf = vsi->back; 3061 struct i40e_hw *hw = &pf->hw; 3062 int base = vsi->base_vector; 3063 int i; 3064 3065 for (i = 0; i < vsi->num_queue_pairs; i++) { 3066 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); 3067 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); 3068 } 3069 3070 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3071 for (i = vsi->base_vector; 3072 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3073 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 3074 3075 i40e_flush(hw); 3076 for (i = 0; i < vsi->num_q_vectors; i++) 3077 synchronize_irq(pf->msix_entries[i + base].vector); 3078 } else { 3079 /* Legacy and MSI mode - this stops all interrupt handling */ 3080 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 3081 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 3082 i40e_flush(hw); 3083 synchronize_irq(pf->pdev->irq); 3084 } 3085 } 3086 3087 /** 3088 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 3089 * @vsi: the VSI being configured 3090 **/ 3091 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 3092 { 3093 struct i40e_pf *pf = vsi->back; 3094 int i; 3095 3096 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3097 for (i = vsi->base_vector; 3098 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3099 i40e_irq_dynamic_enable(vsi, i); 3100 } else { 3101 i40e_irq_dynamic_enable_icr0(pf); 3102 } 3103 3104 i40e_flush(&pf->hw); 3105 return 0; 3106 } 3107 3108 /** 3109 * i40e_stop_misc_vector - Stop the vector that handles non-queue events 3110 * @pf: board private structure 3111 **/ 3112 static void i40e_stop_misc_vector(struct i40e_pf *pf) 3113 { 3114 /* Disable ICR 0 */ 3115 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 3116 i40e_flush(&pf->hw); 3117 } 3118 3119 /** 3120 * i40e_intr - MSI/Legacy and non-queue interrupt handler 3121 * @irq: interrupt number 3122 * @data: pointer to a q_vector 3123 * 3124 * This is the handler used for all MSI/Legacy interrupts, and deals 3125 * with both queue and non-queue interrupts. This is also used in 3126 * MSIX mode to handle the non-queue interrupts. 3127 **/ 3128 static irqreturn_t i40e_intr(int irq, void *data) 3129 { 3130 struct i40e_pf *pf = (struct i40e_pf *)data; 3131 struct i40e_hw *hw = &pf->hw; 3132 irqreturn_t ret = IRQ_NONE; 3133 u32 icr0, icr0_remaining; 3134 u32 val, ena_mask; 3135 3136 icr0 = rd32(hw, I40E_PFINT_ICR0); 3137 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 3138 3139 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 3140 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 3141 goto enable_intr; 3142 3143 /* if interrupt but no bits showing, must be SWINT */ 3144 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || 3145 (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) 3146 pf->sw_int_count++; 3147 3148 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 3149 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 3150 3151 /* temporarily disable queue cause for NAPI processing */ 3152 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); 3153 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; 3154 wr32(hw, I40E_QINT_RQCTL(0), qval); 3155 3156 qval = rd32(hw, I40E_QINT_TQCTL(0)); 3157 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 3158 wr32(hw, I40E_QINT_TQCTL(0), qval); 3159 3160 if (!test_bit(__I40E_DOWN, &pf->state)) 3161 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); 3162 } 3163 3164 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 3165 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3166 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 3167 } 3168 3169 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 3170 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 3171 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 3172 } 3173 3174 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 3175 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 3176 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 3177 } 3178 3179 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 3180 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 3181 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 3182 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 3183 val = rd32(hw, I40E_GLGEN_RSTAT); 3184 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 3185 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3186 if (val == I40E_RESET_CORER) { 3187 pf->corer_count++; 3188 } else if (val == I40E_RESET_GLOBR) { 3189 pf->globr_count++; 3190 } else if (val == I40E_RESET_EMPR) { 3191 pf->empr_count++; 3192 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); 3193 } 3194 } 3195 3196 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 3197 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 3198 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 3199 } 3200 3201 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 3202 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 3203 3204 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 3205 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3206 i40e_ptp_tx_hwtstamp(pf); 3207 } 3208 } 3209 3210 /* If a critical error is pending we have no choice but to reset the 3211 * device. 3212 * Report and mask out any remaining unexpected interrupts. 3213 */ 3214 icr0_remaining = icr0 & ena_mask; 3215 if (icr0_remaining) { 3216 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 3217 icr0_remaining); 3218 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 3219 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 3220 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { 3221 dev_info(&pf->pdev->dev, "device will be reset\n"); 3222 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 3223 i40e_service_event_schedule(pf); 3224 } 3225 ena_mask &= ~icr0_remaining; 3226 } 3227 ret = IRQ_HANDLED; 3228 3229 enable_intr: 3230 /* re-enable interrupt causes */ 3231 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3232 if (!test_bit(__I40E_DOWN, &pf->state)) { 3233 i40e_service_event_schedule(pf); 3234 i40e_irq_dynamic_enable_icr0(pf); 3235 } 3236 3237 return ret; 3238 } 3239 3240 /** 3241 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes 3242 * @tx_ring: tx ring to clean 3243 * @budget: how many cleans we're allowed 3244 * 3245 * Returns true if there's any budget left (e.g. the clean is finished) 3246 **/ 3247 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) 3248 { 3249 struct i40e_vsi *vsi = tx_ring->vsi; 3250 u16 i = tx_ring->next_to_clean; 3251 struct i40e_tx_buffer *tx_buf; 3252 struct i40e_tx_desc *tx_desc; 3253 3254 tx_buf = &tx_ring->tx_bi[i]; 3255 tx_desc = I40E_TX_DESC(tx_ring, i); 3256 i -= tx_ring->count; 3257 3258 do { 3259 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 3260 3261 /* if next_to_watch is not set then there is no work pending */ 3262 if (!eop_desc) 3263 break; 3264 3265 /* prevent any other reads prior to eop_desc */ 3266 read_barrier_depends(); 3267 3268 /* if the descriptor isn't done, no work yet to do */ 3269 if (!(eop_desc->cmd_type_offset_bsz & 3270 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 3271 break; 3272 3273 /* clear next_to_watch to prevent false hangs */ 3274 tx_buf->next_to_watch = NULL; 3275 3276 tx_desc->buffer_addr = 0; 3277 tx_desc->cmd_type_offset_bsz = 0; 3278 /* move past filter desc */ 3279 tx_buf++; 3280 tx_desc++; 3281 i++; 3282 if (unlikely(!i)) { 3283 i -= tx_ring->count; 3284 tx_buf = tx_ring->tx_bi; 3285 tx_desc = I40E_TX_DESC(tx_ring, 0); 3286 } 3287 /* unmap skb header data */ 3288 dma_unmap_single(tx_ring->dev, 3289 dma_unmap_addr(tx_buf, dma), 3290 dma_unmap_len(tx_buf, len), 3291 DMA_TO_DEVICE); 3292 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) 3293 kfree(tx_buf->raw_buf); 3294 3295 tx_buf->raw_buf = NULL; 3296 tx_buf->tx_flags = 0; 3297 tx_buf->next_to_watch = NULL; 3298 dma_unmap_len_set(tx_buf, len, 0); 3299 tx_desc->buffer_addr = 0; 3300 tx_desc->cmd_type_offset_bsz = 0; 3301 3302 /* move us past the eop_desc for start of next FD desc */ 3303 tx_buf++; 3304 tx_desc++; 3305 i++; 3306 if (unlikely(!i)) { 3307 i -= tx_ring->count; 3308 tx_buf = tx_ring->tx_bi; 3309 tx_desc = I40E_TX_DESC(tx_ring, 0); 3310 } 3311 3312 /* update budget accounting */ 3313 budget--; 3314 } while (likely(budget)); 3315 3316 i += tx_ring->count; 3317 tx_ring->next_to_clean = i; 3318 3319 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 3320 i40e_irq_dynamic_enable(vsi, 3321 tx_ring->q_vector->v_idx + vsi->base_vector); 3322 } 3323 return budget > 0; 3324 } 3325 3326 /** 3327 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring 3328 * @irq: interrupt number 3329 * @data: pointer to a q_vector 3330 **/ 3331 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) 3332 { 3333 struct i40e_q_vector *q_vector = data; 3334 struct i40e_vsi *vsi; 3335 3336 if (!q_vector->tx.ring) 3337 return IRQ_HANDLED; 3338 3339 vsi = q_vector->tx.ring->vsi; 3340 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); 3341 3342 return IRQ_HANDLED; 3343 } 3344 3345 /** 3346 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 3347 * @vsi: the VSI being configured 3348 * @v_idx: vector index 3349 * @qp_idx: queue pair index 3350 **/ 3351 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 3352 { 3353 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3354 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 3355 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 3356 3357 tx_ring->q_vector = q_vector; 3358 tx_ring->next = q_vector->tx.ring; 3359 q_vector->tx.ring = tx_ring; 3360 q_vector->tx.count++; 3361 3362 rx_ring->q_vector = q_vector; 3363 rx_ring->next = q_vector->rx.ring; 3364 q_vector->rx.ring = rx_ring; 3365 q_vector->rx.count++; 3366 } 3367 3368 /** 3369 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 3370 * @vsi: the VSI being configured 3371 * 3372 * This function maps descriptor rings to the queue-specific vectors 3373 * we were allotted through the MSI-X enabling code. Ideally, we'd have 3374 * one vector per queue pair, but on a constrained vector budget, we 3375 * group the queue pairs as "efficiently" as possible. 3376 **/ 3377 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 3378 { 3379 int qp_remaining = vsi->num_queue_pairs; 3380 int q_vectors = vsi->num_q_vectors; 3381 int num_ringpairs; 3382 int v_start = 0; 3383 int qp_idx = 0; 3384 3385 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 3386 * group them so there are multiple queues per vector. 3387 * It is also important to go through all the vectors available to be 3388 * sure that if we don't use all the vectors, that the remaining vectors 3389 * are cleared. This is especially important when decreasing the 3390 * number of queues in use. 3391 */ 3392 for (; v_start < q_vectors; v_start++) { 3393 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 3394 3395 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 3396 3397 q_vector->num_ringpairs = num_ringpairs; 3398 3399 q_vector->rx.count = 0; 3400 q_vector->tx.count = 0; 3401 q_vector->rx.ring = NULL; 3402 q_vector->tx.ring = NULL; 3403 3404 while (num_ringpairs--) { 3405 map_vector_to_qp(vsi, v_start, qp_idx); 3406 qp_idx++; 3407 qp_remaining--; 3408 } 3409 } 3410 } 3411 3412 /** 3413 * i40e_vsi_request_irq - Request IRQ from the OS 3414 * @vsi: the VSI being configured 3415 * @basename: name for the vector 3416 **/ 3417 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 3418 { 3419 struct i40e_pf *pf = vsi->back; 3420 int err; 3421 3422 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 3423 err = i40e_vsi_request_irq_msix(vsi, basename); 3424 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3425 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3426 pf->int_name, pf); 3427 else 3428 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3429 pf->int_name, pf); 3430 3431 if (err) 3432 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3433 3434 return err; 3435 } 3436 3437 #ifdef CONFIG_NET_POLL_CONTROLLER 3438 /** 3439 * i40e_netpoll - A Polling 'interrupt'handler 3440 * @netdev: network interface device structure 3441 * 3442 * This is used by netconsole to send skbs without having to re-enable 3443 * interrupts. It's not called while the normal interrupt routine is executing. 3444 **/ 3445 #ifdef I40E_FCOE 3446 void i40e_netpoll(struct net_device *netdev) 3447 #else 3448 static void i40e_netpoll(struct net_device *netdev) 3449 #endif 3450 { 3451 struct i40e_netdev_priv *np = netdev_priv(netdev); 3452 struct i40e_vsi *vsi = np->vsi; 3453 struct i40e_pf *pf = vsi->back; 3454 int i; 3455 3456 /* if interface is down do nothing */ 3457 if (test_bit(__I40E_DOWN, &vsi->state)) 3458 return; 3459 3460 pf->flags |= I40E_FLAG_IN_NETPOLL; 3461 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3462 for (i = 0; i < vsi->num_q_vectors; i++) 3463 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 3464 } else { 3465 i40e_intr(pf->pdev->irq, netdev); 3466 } 3467 pf->flags &= ~I40E_FLAG_IN_NETPOLL; 3468 } 3469 #endif 3470 3471 /** 3472 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled 3473 * @pf: the PF being configured 3474 * @pf_q: the PF queue 3475 * @enable: enable or disable state of the queue 3476 * 3477 * This routine will wait for the given Tx queue of the PF to reach the 3478 * enabled or disabled state. 3479 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3480 * multiple retries; else will return 0 in case of success. 3481 **/ 3482 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3483 { 3484 int i; 3485 u32 tx_reg; 3486 3487 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3488 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); 3489 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3490 break; 3491 3492 usleep_range(10, 20); 3493 } 3494 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3495 return -ETIMEDOUT; 3496 3497 return 0; 3498 } 3499 3500 /** 3501 * i40e_vsi_control_tx - Start or stop a VSI's rings 3502 * @vsi: the VSI being configured 3503 * @enable: start or stop the rings 3504 **/ 3505 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 3506 { 3507 struct i40e_pf *pf = vsi->back; 3508 struct i40e_hw *hw = &pf->hw; 3509 int i, j, pf_q, ret = 0; 3510 u32 tx_reg; 3511 3512 pf_q = vsi->base_queue; 3513 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3514 3515 /* warn the TX unit of coming changes */ 3516 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); 3517 if (!enable) 3518 usleep_range(10, 20); 3519 3520 for (j = 0; j < 50; j++) { 3521 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3522 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 3523 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) 3524 break; 3525 usleep_range(1000, 2000); 3526 } 3527 /* Skip if the queue is already in the requested state */ 3528 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3529 continue; 3530 3531 /* turn on/off the queue */ 3532 if (enable) { 3533 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 3534 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; 3535 } else { 3536 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3537 } 3538 3539 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 3540 /* No waiting for the Tx queue to disable */ 3541 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) 3542 continue; 3543 3544 /* wait for the change to finish */ 3545 ret = i40e_pf_txq_wait(pf, pf_q, enable); 3546 if (ret) { 3547 dev_info(&pf->pdev->dev, 3548 "%s: VSI seid %d Tx ring %d %sable timeout\n", 3549 __func__, vsi->seid, pf_q, 3550 (enable ? "en" : "dis")); 3551 break; 3552 } 3553 } 3554 3555 if (hw->revision_id == 0) 3556 mdelay(50); 3557 return ret; 3558 } 3559 3560 /** 3561 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 3562 * @pf: the PF being configured 3563 * @pf_q: the PF queue 3564 * @enable: enable or disable state of the queue 3565 * 3566 * This routine will wait for the given Rx queue of the PF to reach the 3567 * enabled or disabled state. 3568 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3569 * multiple retries; else will return 0 in case of success. 3570 **/ 3571 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3572 { 3573 int i; 3574 u32 rx_reg; 3575 3576 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3577 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); 3578 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3579 break; 3580 3581 usleep_range(10, 20); 3582 } 3583 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3584 return -ETIMEDOUT; 3585 3586 return 0; 3587 } 3588 3589 /** 3590 * i40e_vsi_control_rx - Start or stop a VSI's rings 3591 * @vsi: the VSI being configured 3592 * @enable: start or stop the rings 3593 **/ 3594 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 3595 { 3596 struct i40e_pf *pf = vsi->back; 3597 struct i40e_hw *hw = &pf->hw; 3598 int i, j, pf_q, ret = 0; 3599 u32 rx_reg; 3600 3601 pf_q = vsi->base_queue; 3602 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3603 for (j = 0; j < 50; j++) { 3604 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3605 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == 3606 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) 3607 break; 3608 usleep_range(1000, 2000); 3609 } 3610 3611 /* Skip if the queue is already in the requested state */ 3612 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3613 continue; 3614 3615 /* turn on/off the queue */ 3616 if (enable) 3617 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; 3618 else 3619 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 3620 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3621 3622 /* wait for the change to finish */ 3623 ret = i40e_pf_rxq_wait(pf, pf_q, enable); 3624 if (ret) { 3625 dev_info(&pf->pdev->dev, 3626 "%s: VSI seid %d Rx ring %d %sable timeout\n", 3627 __func__, vsi->seid, pf_q, 3628 (enable ? "en" : "dis")); 3629 break; 3630 } 3631 } 3632 3633 return ret; 3634 } 3635 3636 /** 3637 * i40e_vsi_control_rings - Start or stop a VSI's rings 3638 * @vsi: the VSI being configured 3639 * @enable: start or stop the rings 3640 **/ 3641 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) 3642 { 3643 int ret = 0; 3644 3645 /* do rx first for enable and last for disable */ 3646 if (request) { 3647 ret = i40e_vsi_control_rx(vsi, request); 3648 if (ret) 3649 return ret; 3650 ret = i40e_vsi_control_tx(vsi, request); 3651 } else { 3652 /* Ignore return value, we need to shutdown whatever we can */ 3653 i40e_vsi_control_tx(vsi, request); 3654 i40e_vsi_control_rx(vsi, request); 3655 } 3656 3657 return ret; 3658 } 3659 3660 /** 3661 * i40e_vsi_free_irq - Free the irq association with the OS 3662 * @vsi: the VSI being configured 3663 **/ 3664 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 3665 { 3666 struct i40e_pf *pf = vsi->back; 3667 struct i40e_hw *hw = &pf->hw; 3668 int base = vsi->base_vector; 3669 u32 val, qp; 3670 int i; 3671 3672 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3673 if (!vsi->q_vectors) 3674 return; 3675 3676 if (!vsi->irqs_ready) 3677 return; 3678 3679 vsi->irqs_ready = false; 3680 for (i = 0; i < vsi->num_q_vectors; i++) { 3681 u16 vector = i + base; 3682 3683 /* free only the irqs that were actually requested */ 3684 if (!vsi->q_vectors[i] || 3685 !vsi->q_vectors[i]->num_ringpairs) 3686 continue; 3687 3688 /* clear the affinity_mask in the IRQ descriptor */ 3689 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3690 NULL); 3691 free_irq(pf->msix_entries[vector].vector, 3692 vsi->q_vectors[i]); 3693 3694 /* Tear down the interrupt queue link list 3695 * 3696 * We know that they come in pairs and always 3697 * the Rx first, then the Tx. To clear the 3698 * link list, stick the EOL value into the 3699 * next_q field of the registers. 3700 */ 3701 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 3702 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3703 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3704 val |= I40E_QUEUE_END_OF_LIST 3705 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3706 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 3707 3708 while (qp != I40E_QUEUE_END_OF_LIST) { 3709 u32 next; 3710 3711 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3712 3713 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3714 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3715 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3716 I40E_QINT_RQCTL_INTEVENT_MASK); 3717 3718 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3719 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3720 3721 wr32(hw, I40E_QINT_RQCTL(qp), val); 3722 3723 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3724 3725 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 3726 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 3727 3728 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3729 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3730 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3731 I40E_QINT_TQCTL_INTEVENT_MASK); 3732 3733 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3734 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3735 3736 wr32(hw, I40E_QINT_TQCTL(qp), val); 3737 qp = next; 3738 } 3739 } 3740 } else { 3741 free_irq(pf->pdev->irq, pf); 3742 3743 val = rd32(hw, I40E_PFINT_LNKLST0); 3744 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3745 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3746 val |= I40E_QUEUE_END_OF_LIST 3747 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 3748 wr32(hw, I40E_PFINT_LNKLST0, val); 3749 3750 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3751 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3752 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3753 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3754 I40E_QINT_RQCTL_INTEVENT_MASK); 3755 3756 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3757 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3758 3759 wr32(hw, I40E_QINT_RQCTL(qp), val); 3760 3761 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3762 3763 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3764 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3765 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3766 I40E_QINT_TQCTL_INTEVENT_MASK); 3767 3768 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3769 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3770 3771 wr32(hw, I40E_QINT_TQCTL(qp), val); 3772 } 3773 } 3774 3775 /** 3776 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 3777 * @vsi: the VSI being configured 3778 * @v_idx: Index of vector to be freed 3779 * 3780 * This function frees the memory allocated to the q_vector. In addition if 3781 * NAPI is enabled it will delete any references to the NAPI struct prior 3782 * to freeing the q_vector. 3783 **/ 3784 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 3785 { 3786 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3787 struct i40e_ring *ring; 3788 3789 if (!q_vector) 3790 return; 3791 3792 /* disassociate q_vector from rings */ 3793 i40e_for_each_ring(ring, q_vector->tx) 3794 ring->q_vector = NULL; 3795 3796 i40e_for_each_ring(ring, q_vector->rx) 3797 ring->q_vector = NULL; 3798 3799 /* only VSI w/ an associated netdev is set up w/ NAPI */ 3800 if (vsi->netdev) 3801 netif_napi_del(&q_vector->napi); 3802 3803 vsi->q_vectors[v_idx] = NULL; 3804 3805 kfree_rcu(q_vector, rcu); 3806 } 3807 3808 /** 3809 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 3810 * @vsi: the VSI being un-configured 3811 * 3812 * This frees the memory allocated to the q_vectors and 3813 * deletes references to the NAPI struct. 3814 **/ 3815 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 3816 { 3817 int v_idx; 3818 3819 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 3820 i40e_free_q_vector(vsi, v_idx); 3821 } 3822 3823 /** 3824 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 3825 * @pf: board private structure 3826 **/ 3827 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 3828 { 3829 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 3830 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3831 pci_disable_msix(pf->pdev); 3832 kfree(pf->msix_entries); 3833 pf->msix_entries = NULL; 3834 kfree(pf->irq_pile); 3835 pf->irq_pile = NULL; 3836 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 3837 pci_disable_msi(pf->pdev); 3838 } 3839 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 3840 } 3841 3842 /** 3843 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 3844 * @pf: board private structure 3845 * 3846 * We go through and clear interrupt specific resources and reset the structure 3847 * to pre-load conditions 3848 **/ 3849 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 3850 { 3851 int i; 3852 3853 i40e_stop_misc_vector(pf); 3854 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3855 synchronize_irq(pf->msix_entries[0].vector); 3856 free_irq(pf->msix_entries[0].vector, pf); 3857 } 3858 3859 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3860 for (i = 0; i < pf->num_alloc_vsi; i++) 3861 if (pf->vsi[i]) 3862 i40e_vsi_free_q_vectors(pf->vsi[i]); 3863 i40e_reset_interrupt_capability(pf); 3864 } 3865 3866 /** 3867 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 3868 * @vsi: the VSI being configured 3869 **/ 3870 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 3871 { 3872 int q_idx; 3873 3874 if (!vsi->netdev) 3875 return; 3876 3877 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3878 napi_enable(&vsi->q_vectors[q_idx]->napi); 3879 } 3880 3881 /** 3882 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 3883 * @vsi: the VSI being configured 3884 **/ 3885 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 3886 { 3887 int q_idx; 3888 3889 if (!vsi->netdev) 3890 return; 3891 3892 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3893 napi_disable(&vsi->q_vectors[q_idx]->napi); 3894 } 3895 3896 /** 3897 * i40e_vsi_close - Shut down a VSI 3898 * @vsi: the vsi to be quelled 3899 **/ 3900 static void i40e_vsi_close(struct i40e_vsi *vsi) 3901 { 3902 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 3903 i40e_down(vsi); 3904 i40e_vsi_free_irq(vsi); 3905 i40e_vsi_free_tx_resources(vsi); 3906 i40e_vsi_free_rx_resources(vsi); 3907 } 3908 3909 /** 3910 * i40e_quiesce_vsi - Pause a given VSI 3911 * @vsi: the VSI being paused 3912 **/ 3913 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 3914 { 3915 if (test_bit(__I40E_DOWN, &vsi->state)) 3916 return; 3917 3918 /* No need to disable FCoE VSI when Tx suspended */ 3919 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && 3920 vsi->type == I40E_VSI_FCOE) { 3921 dev_dbg(&vsi->back->pdev->dev, 3922 "%s: VSI seid %d skipping FCoE VSI disable\n", 3923 __func__, vsi->seid); 3924 return; 3925 } 3926 3927 set_bit(__I40E_NEEDS_RESTART, &vsi->state); 3928 if (vsi->netdev && netif_running(vsi->netdev)) { 3929 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3930 } else { 3931 i40e_vsi_close(vsi); 3932 } 3933 } 3934 3935 /** 3936 * i40e_unquiesce_vsi - Resume a given VSI 3937 * @vsi: the VSI being resumed 3938 **/ 3939 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 3940 { 3941 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) 3942 return; 3943 3944 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 3945 if (vsi->netdev && netif_running(vsi->netdev)) 3946 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3947 else 3948 i40e_vsi_open(vsi); /* this clears the DOWN bit */ 3949 } 3950 3951 /** 3952 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 3953 * @pf: the PF 3954 **/ 3955 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 3956 { 3957 int v; 3958 3959 for (v = 0; v < pf->num_alloc_vsi; v++) { 3960 if (pf->vsi[v]) 3961 i40e_quiesce_vsi(pf->vsi[v]); 3962 } 3963 } 3964 3965 /** 3966 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 3967 * @pf: the PF 3968 **/ 3969 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 3970 { 3971 int v; 3972 3973 for (v = 0; v < pf->num_alloc_vsi; v++) { 3974 if (pf->vsi[v]) 3975 i40e_unquiesce_vsi(pf->vsi[v]); 3976 } 3977 } 3978 3979 #ifdef CONFIG_I40E_DCB 3980 /** 3981 * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled 3982 * @vsi: the VSI being configured 3983 * 3984 * This function waits for the given VSI's Tx queues to be disabled. 3985 **/ 3986 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) 3987 { 3988 struct i40e_pf *pf = vsi->back; 3989 int i, pf_q, ret; 3990 3991 pf_q = vsi->base_queue; 3992 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3993 /* Check and wait for the disable status of the queue */ 3994 ret = i40e_pf_txq_wait(pf, pf_q, false); 3995 if (ret) { 3996 dev_info(&pf->pdev->dev, 3997 "%s: VSI seid %d Tx ring %d disable timeout\n", 3998 __func__, vsi->seid, pf_q); 3999 return ret; 4000 } 4001 } 4002 4003 return 0; 4004 } 4005 4006 /** 4007 * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled 4008 * @pf: the PF 4009 * 4010 * This function waits for the Tx queues to be in disabled state for all the 4011 * VSIs that are managed by this PF. 4012 **/ 4013 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) 4014 { 4015 int v, ret = 0; 4016 4017 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4018 /* No need to wait for FCoE VSI queues */ 4019 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { 4020 ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); 4021 if (ret) 4022 break; 4023 } 4024 } 4025 4026 return ret; 4027 } 4028 4029 #endif 4030 /** 4031 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP 4032 * @pf: pointer to PF 4033 * 4034 * Get TC map for ISCSI PF type that will include iSCSI TC 4035 * and LAN TC. 4036 **/ 4037 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) 4038 { 4039 struct i40e_dcb_app_priority_table app; 4040 struct i40e_hw *hw = &pf->hw; 4041 u8 enabled_tc = 1; /* TC0 is always enabled */ 4042 u8 tc, i; 4043 /* Get the iSCSI APP TLV */ 4044 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4045 4046 for (i = 0; i < dcbcfg->numapps; i++) { 4047 app = dcbcfg->app[i]; 4048 if (app.selector == I40E_APP_SEL_TCPIP && 4049 app.protocolid == I40E_APP_PROTOID_ISCSI) { 4050 tc = dcbcfg->etscfg.prioritytable[app.priority]; 4051 enabled_tc |= (1 << tc); 4052 break; 4053 } 4054 } 4055 4056 return enabled_tc; 4057 } 4058 4059 /** 4060 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 4061 * @dcbcfg: the corresponding DCBx configuration structure 4062 * 4063 * Return the number of TCs from given DCBx configuration 4064 **/ 4065 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 4066 { 4067 u8 num_tc = 0; 4068 int i; 4069 4070 /* Scan the ETS Config Priority Table to find 4071 * traffic class enabled for a given priority 4072 * and use the traffic class index to get the 4073 * number of traffic classes enabled 4074 */ 4075 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4076 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 4077 num_tc = dcbcfg->etscfg.prioritytable[i]; 4078 } 4079 4080 /* Traffic class index starts from zero so 4081 * increment to return the actual count 4082 */ 4083 return num_tc + 1; 4084 } 4085 4086 /** 4087 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 4088 * @dcbcfg: the corresponding DCBx configuration structure 4089 * 4090 * Query the current DCB configuration and return the number of 4091 * traffic classes enabled from the given DCBX config 4092 **/ 4093 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 4094 { 4095 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 4096 u8 enabled_tc = 1; 4097 u8 i; 4098 4099 for (i = 0; i < num_tc; i++) 4100 enabled_tc |= 1 << i; 4101 4102 return enabled_tc; 4103 } 4104 4105 /** 4106 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 4107 * @pf: PF being queried 4108 * 4109 * Return number of traffic classes enabled for the given PF 4110 **/ 4111 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 4112 { 4113 struct i40e_hw *hw = &pf->hw; 4114 u8 i, enabled_tc; 4115 u8 num_tc = 0; 4116 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4117 4118 /* If DCB is not enabled then always in single TC */ 4119 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4120 return 1; 4121 4122 /* SFP mode will be enabled for all TCs on port */ 4123 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4124 return i40e_dcb_get_num_tc(dcbcfg); 4125 4126 /* MFP mode return count of enabled TCs for this PF */ 4127 if (pf->hw.func_caps.iscsi) 4128 enabled_tc = i40e_get_iscsi_tc_map(pf); 4129 else 4130 return 1; /* Only TC0 */ 4131 4132 /* At least have TC0 */ 4133 enabled_tc = (enabled_tc ? enabled_tc : 0x1); 4134 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4135 if (enabled_tc & (1 << i)) 4136 num_tc++; 4137 } 4138 return num_tc; 4139 } 4140 4141 /** 4142 * i40e_pf_get_default_tc - Get bitmap for first enabled TC 4143 * @pf: PF being queried 4144 * 4145 * Return a bitmap for first enabled traffic class for this PF. 4146 **/ 4147 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) 4148 { 4149 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; 4150 u8 i = 0; 4151 4152 if (!enabled_tc) 4153 return 0x1; /* TC0 */ 4154 4155 /* Find the first enabled TC */ 4156 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4157 if (enabled_tc & (1 << i)) 4158 break; 4159 } 4160 4161 return 1 << i; 4162 } 4163 4164 /** 4165 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 4166 * @pf: PF being queried 4167 * 4168 * Return a bitmap for enabled traffic classes for this PF. 4169 **/ 4170 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 4171 { 4172 /* If DCB is not enabled for this PF then just return default TC */ 4173 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4174 return i40e_pf_get_default_tc(pf); 4175 4176 /* SFP mode we want PF to be enabled for all TCs */ 4177 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4178 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 4179 4180 /* MFP enabled and iSCSI PF type */ 4181 if (pf->hw.func_caps.iscsi) 4182 return i40e_get_iscsi_tc_map(pf); 4183 else 4184 return i40e_pf_get_default_tc(pf); 4185 } 4186 4187 /** 4188 * i40e_vsi_get_bw_info - Query VSI BW Information 4189 * @vsi: the VSI being queried 4190 * 4191 * Returns 0 on success, negative value on failure 4192 **/ 4193 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 4194 { 4195 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 4196 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 4197 struct i40e_pf *pf = vsi->back; 4198 struct i40e_hw *hw = &pf->hw; 4199 i40e_status aq_ret; 4200 u32 tc_bw_max; 4201 int i; 4202 4203 /* Get the VSI level BW configuration */ 4204 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 4205 if (aq_ret) { 4206 dev_info(&pf->pdev->dev, 4207 "couldn't get PF vsi bw config, err %d, aq_err %d\n", 4208 aq_ret, pf->hw.aq.asq_last_status); 4209 return -EINVAL; 4210 } 4211 4212 /* Get the VSI level BW configuration per TC */ 4213 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 4214 NULL); 4215 if (aq_ret) { 4216 dev_info(&pf->pdev->dev, 4217 "couldn't get PF vsi ets bw config, err %d, aq_err %d\n", 4218 aq_ret, pf->hw.aq.asq_last_status); 4219 return -EINVAL; 4220 } 4221 4222 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 4223 dev_info(&pf->pdev->dev, 4224 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 4225 bw_config.tc_valid_bits, 4226 bw_ets_config.tc_valid_bits); 4227 /* Still continuing */ 4228 } 4229 4230 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 4231 vsi->bw_max_quanta = bw_config.max_bw; 4232 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 4233 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 4234 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4235 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 4236 vsi->bw_ets_limit_credits[i] = 4237 le16_to_cpu(bw_ets_config.credits[i]); 4238 /* 3 bits out of 4 for each TC */ 4239 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 4240 } 4241 4242 return 0; 4243 } 4244 4245 /** 4246 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 4247 * @vsi: the VSI being configured 4248 * @enabled_tc: TC bitmap 4249 * @bw_credits: BW shared credits per TC 4250 * 4251 * Returns 0 on success, negative value on failure 4252 **/ 4253 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 4254 u8 *bw_share) 4255 { 4256 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 4257 i40e_status aq_ret; 4258 int i; 4259 4260 bw_data.tc_valid_bits = enabled_tc; 4261 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4262 bw_data.tc_bw_credits[i] = bw_share[i]; 4263 4264 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 4265 NULL); 4266 if (aq_ret) { 4267 dev_info(&vsi->back->pdev->dev, 4268 "AQ command Config VSI BW allocation per TC failed = %d\n", 4269 vsi->back->hw.aq.asq_last_status); 4270 return -EINVAL; 4271 } 4272 4273 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4274 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 4275 4276 return 0; 4277 } 4278 4279 /** 4280 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 4281 * @vsi: the VSI being configured 4282 * @enabled_tc: TC map to be enabled 4283 * 4284 **/ 4285 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4286 { 4287 struct net_device *netdev = vsi->netdev; 4288 struct i40e_pf *pf = vsi->back; 4289 struct i40e_hw *hw = &pf->hw; 4290 u8 netdev_tc = 0; 4291 int i; 4292 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4293 4294 if (!netdev) 4295 return; 4296 4297 if (!enabled_tc) { 4298 netdev_reset_tc(netdev); 4299 return; 4300 } 4301 4302 /* Set up actual enabled TCs on the VSI */ 4303 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 4304 return; 4305 4306 /* set per TC queues for the VSI */ 4307 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4308 /* Only set TC queues for enabled tcs 4309 * 4310 * e.g. For a VSI that has TC0 and TC3 enabled the 4311 * enabled_tc bitmap would be 0x00001001; the driver 4312 * will set the numtc for netdev as 2 that will be 4313 * referenced by the netdev layer as TC 0 and 1. 4314 */ 4315 if (vsi->tc_config.enabled_tc & (1 << i)) 4316 netdev_set_tc_queue(netdev, 4317 vsi->tc_config.tc_info[i].netdev_tc, 4318 vsi->tc_config.tc_info[i].qcount, 4319 vsi->tc_config.tc_info[i].qoffset); 4320 } 4321 4322 /* Assign UP2TC map for the VSI */ 4323 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4324 /* Get the actual TC# for the UP */ 4325 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 4326 /* Get the mapped netdev TC# for the UP */ 4327 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 4328 netdev_set_prio_tc_map(netdev, i, netdev_tc); 4329 } 4330 } 4331 4332 /** 4333 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 4334 * @vsi: the VSI being configured 4335 * @ctxt: the ctxt buffer returned from AQ VSI update param command 4336 **/ 4337 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 4338 struct i40e_vsi_context *ctxt) 4339 { 4340 /* copy just the sections touched not the entire info 4341 * since not all sections are valid as returned by 4342 * update vsi params 4343 */ 4344 vsi->info.mapping_flags = ctxt->info.mapping_flags; 4345 memcpy(&vsi->info.queue_mapping, 4346 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 4347 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 4348 sizeof(vsi->info.tc_mapping)); 4349 } 4350 4351 /** 4352 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 4353 * @vsi: VSI to be configured 4354 * @enabled_tc: TC bitmap 4355 * 4356 * This configures a particular VSI for TCs that are mapped to the 4357 * given TC bitmap. It uses default bandwidth share for TCs across 4358 * VSIs to configure TC for a particular VSI. 4359 * 4360 * NOTE: 4361 * It is expected that the VSI queues have been quisced before calling 4362 * this function. 4363 **/ 4364 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4365 { 4366 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 4367 struct i40e_vsi_context ctxt; 4368 int ret = 0; 4369 int i; 4370 4371 /* Check if enabled_tc is same as existing or new TCs */ 4372 if (vsi->tc_config.enabled_tc == enabled_tc) 4373 return ret; 4374 4375 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 4376 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4377 if (enabled_tc & (1 << i)) 4378 bw_share[i] = 1; 4379 } 4380 4381 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 4382 if (ret) { 4383 dev_info(&vsi->back->pdev->dev, 4384 "Failed configuring TC map %d for VSI %d\n", 4385 enabled_tc, vsi->seid); 4386 goto out; 4387 } 4388 4389 /* Update Queue Pairs Mapping for currently enabled UPs */ 4390 ctxt.seid = vsi->seid; 4391 ctxt.pf_num = vsi->back->hw.pf_id; 4392 ctxt.vf_num = 0; 4393 ctxt.uplink_seid = vsi->uplink_seid; 4394 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 4395 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 4396 4397 /* Update the VSI after updating the VSI queue-mapping information */ 4398 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 4399 if (ret) { 4400 dev_info(&vsi->back->pdev->dev, 4401 "update vsi failed, aq_err=%d\n", 4402 vsi->back->hw.aq.asq_last_status); 4403 goto out; 4404 } 4405 /* update the local VSI info with updated queue map */ 4406 i40e_vsi_update_queue_map(vsi, &ctxt); 4407 vsi->info.valid_sections = 0; 4408 4409 /* Update current VSI BW information */ 4410 ret = i40e_vsi_get_bw_info(vsi); 4411 if (ret) { 4412 dev_info(&vsi->back->pdev->dev, 4413 "Failed updating vsi bw info, aq_err=%d\n", 4414 vsi->back->hw.aq.asq_last_status); 4415 goto out; 4416 } 4417 4418 /* Update the netdev TC setup */ 4419 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 4420 out: 4421 return ret; 4422 } 4423 4424 /** 4425 * i40e_veb_config_tc - Configure TCs for given VEB 4426 * @veb: given VEB 4427 * @enabled_tc: TC bitmap 4428 * 4429 * Configures given TC bitmap for VEB (switching) element 4430 **/ 4431 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) 4432 { 4433 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; 4434 struct i40e_pf *pf = veb->pf; 4435 int ret = 0; 4436 int i; 4437 4438 /* No TCs or already enabled TCs just return */ 4439 if (!enabled_tc || veb->enabled_tc == enabled_tc) 4440 return ret; 4441 4442 bw_data.tc_valid_bits = enabled_tc; 4443 /* bw_data.absolute_credits is not set (relative) */ 4444 4445 /* Enable ETS TCs with equal BW Share for now */ 4446 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4447 if (enabled_tc & (1 << i)) 4448 bw_data.tc_bw_share_credits[i] = 1; 4449 } 4450 4451 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, 4452 &bw_data, NULL); 4453 if (ret) { 4454 dev_info(&pf->pdev->dev, 4455 "veb bw config failed, aq_err=%d\n", 4456 pf->hw.aq.asq_last_status); 4457 goto out; 4458 } 4459 4460 /* Update the BW information */ 4461 ret = i40e_veb_get_bw_info(veb); 4462 if (ret) { 4463 dev_info(&pf->pdev->dev, 4464 "Failed getting veb bw config, aq_err=%d\n", 4465 pf->hw.aq.asq_last_status); 4466 } 4467 4468 out: 4469 return ret; 4470 } 4471 4472 #ifdef CONFIG_I40E_DCB 4473 /** 4474 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs 4475 * @pf: PF struct 4476 * 4477 * Reconfigure VEB/VSIs on a given PF; it is assumed that 4478 * the caller would've quiesce all the VSIs before calling 4479 * this function 4480 **/ 4481 static void i40e_dcb_reconfigure(struct i40e_pf *pf) 4482 { 4483 u8 tc_map = 0; 4484 int ret; 4485 u8 v; 4486 4487 /* Enable the TCs available on PF to all VEBs */ 4488 tc_map = i40e_pf_get_tc_map(pf); 4489 for (v = 0; v < I40E_MAX_VEB; v++) { 4490 if (!pf->veb[v]) 4491 continue; 4492 ret = i40e_veb_config_tc(pf->veb[v], tc_map); 4493 if (ret) { 4494 dev_info(&pf->pdev->dev, 4495 "Failed configuring TC for VEB seid=%d\n", 4496 pf->veb[v]->seid); 4497 /* Will try to configure as many components */ 4498 } 4499 } 4500 4501 /* Update each VSI */ 4502 for (v = 0; v < pf->num_alloc_vsi; v++) { 4503 if (!pf->vsi[v]) 4504 continue; 4505 4506 /* - Enable all TCs for the LAN VSI 4507 #ifdef I40E_FCOE 4508 * - For FCoE VSI only enable the TC configured 4509 * as per the APP TLV 4510 #endif 4511 * - For all others keep them at TC0 for now 4512 */ 4513 if (v == pf->lan_vsi) 4514 tc_map = i40e_pf_get_tc_map(pf); 4515 else 4516 tc_map = i40e_pf_get_default_tc(pf); 4517 #ifdef I40E_FCOE 4518 if (pf->vsi[v]->type == I40E_VSI_FCOE) 4519 tc_map = i40e_get_fcoe_tc_map(pf); 4520 #endif /* #ifdef I40E_FCOE */ 4521 4522 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 4523 if (ret) { 4524 dev_info(&pf->pdev->dev, 4525 "Failed configuring TC for VSI seid=%d\n", 4526 pf->vsi[v]->seid); 4527 /* Will try to configure as many components */ 4528 } else { 4529 /* Re-configure VSI vectors based on updated TC map */ 4530 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); 4531 if (pf->vsi[v]->netdev) 4532 i40e_dcbnl_set_all(pf->vsi[v]); 4533 } 4534 } 4535 } 4536 4537 /** 4538 * i40e_resume_port_tx - Resume port Tx 4539 * @pf: PF struct 4540 * 4541 * Resume a port's Tx and issue a PF reset in case of failure to 4542 * resume. 4543 **/ 4544 static int i40e_resume_port_tx(struct i40e_pf *pf) 4545 { 4546 struct i40e_hw *hw = &pf->hw; 4547 int ret; 4548 4549 ret = i40e_aq_resume_port_tx(hw, NULL); 4550 if (ret) { 4551 dev_info(&pf->pdev->dev, 4552 "AQ command Resume Port Tx failed = %d\n", 4553 pf->hw.aq.asq_last_status); 4554 /* Schedule PF reset to recover */ 4555 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 4556 i40e_service_event_schedule(pf); 4557 } 4558 4559 return ret; 4560 } 4561 4562 /** 4563 * i40e_init_pf_dcb - Initialize DCB configuration 4564 * @pf: PF being configured 4565 * 4566 * Query the current DCB configuration and cache it 4567 * in the hardware structure 4568 **/ 4569 static int i40e_init_pf_dcb(struct i40e_pf *pf) 4570 { 4571 struct i40e_hw *hw = &pf->hw; 4572 int err = 0; 4573 4574 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ 4575 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 4576 (pf->hw.aq.fw_maj_ver < 4)) 4577 goto out; 4578 4579 /* Get the initial DCB configuration */ 4580 err = i40e_init_dcb(hw); 4581 if (!err) { 4582 /* Device/Function is not DCBX capable */ 4583 if ((!hw->func_caps.dcb) || 4584 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { 4585 dev_info(&pf->pdev->dev, 4586 "DCBX offload is not supported or is disabled for this PF.\n"); 4587 4588 if (pf->flags & I40E_FLAG_MFP_ENABLED) 4589 goto out; 4590 4591 } else { 4592 /* When status is not DISABLED then DCBX in FW */ 4593 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 4594 DCB_CAP_DCBX_VER_IEEE; 4595 4596 pf->flags |= I40E_FLAG_DCB_CAPABLE; 4597 /* Enable DCB tagging only when more than one TC */ 4598 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 4599 pf->flags |= I40E_FLAG_DCB_ENABLED; 4600 dev_dbg(&pf->pdev->dev, 4601 "DCBX offload is supported for this PF.\n"); 4602 } 4603 } else { 4604 dev_info(&pf->pdev->dev, 4605 "AQ Querying DCB configuration failed: aq_err %d\n", 4606 pf->hw.aq.asq_last_status); 4607 } 4608 4609 out: 4610 return err; 4611 } 4612 #endif /* CONFIG_I40E_DCB */ 4613 #define SPEED_SIZE 14 4614 #define FC_SIZE 8 4615 /** 4616 * i40e_print_link_message - print link up or down 4617 * @vsi: the VSI for which link needs a message 4618 */ 4619 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) 4620 { 4621 char speed[SPEED_SIZE] = "Unknown"; 4622 char fc[FC_SIZE] = "RX/TX"; 4623 4624 if (!isup) { 4625 netdev_info(vsi->netdev, "NIC Link is Down\n"); 4626 return; 4627 } 4628 4629 /* Warn user if link speed on NPAR enabled partition is not at 4630 * least 10GB 4631 */ 4632 if (vsi->back->hw.func_caps.npar_enable && 4633 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || 4634 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) 4635 netdev_warn(vsi->netdev, 4636 "The partition detected link speed that is less than 10Gbps\n"); 4637 4638 switch (vsi->back->hw.phy.link_info.link_speed) { 4639 case I40E_LINK_SPEED_40GB: 4640 strlcpy(speed, "40 Gbps", SPEED_SIZE); 4641 break; 4642 case I40E_LINK_SPEED_10GB: 4643 strlcpy(speed, "10 Gbps", SPEED_SIZE); 4644 break; 4645 case I40E_LINK_SPEED_1GB: 4646 strlcpy(speed, "1000 Mbps", SPEED_SIZE); 4647 break; 4648 case I40E_LINK_SPEED_100MB: 4649 strncpy(speed, "100 Mbps", SPEED_SIZE); 4650 break; 4651 default: 4652 break; 4653 } 4654 4655 switch (vsi->back->hw.fc.current_mode) { 4656 case I40E_FC_FULL: 4657 strlcpy(fc, "RX/TX", FC_SIZE); 4658 break; 4659 case I40E_FC_TX_PAUSE: 4660 strlcpy(fc, "TX", FC_SIZE); 4661 break; 4662 case I40E_FC_RX_PAUSE: 4663 strlcpy(fc, "RX", FC_SIZE); 4664 break; 4665 default: 4666 strlcpy(fc, "None", FC_SIZE); 4667 break; 4668 } 4669 4670 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n", 4671 speed, fc); 4672 } 4673 4674 /** 4675 * i40e_up_complete - Finish the last steps of bringing up a connection 4676 * @vsi: the VSI being configured 4677 **/ 4678 static int i40e_up_complete(struct i40e_vsi *vsi) 4679 { 4680 struct i40e_pf *pf = vsi->back; 4681 int err; 4682 4683 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4684 i40e_vsi_configure_msix(vsi); 4685 else 4686 i40e_configure_msi_and_legacy(vsi); 4687 4688 /* start rings */ 4689 err = i40e_vsi_control_rings(vsi, true); 4690 if (err) 4691 return err; 4692 4693 clear_bit(__I40E_DOWN, &vsi->state); 4694 i40e_napi_enable_all(vsi); 4695 i40e_vsi_enable_irq(vsi); 4696 4697 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 4698 (vsi->netdev)) { 4699 i40e_print_link_message(vsi, true); 4700 netif_tx_start_all_queues(vsi->netdev); 4701 netif_carrier_on(vsi->netdev); 4702 } else if (vsi->netdev) { 4703 i40e_print_link_message(vsi, false); 4704 /* need to check for qualified module here*/ 4705 if ((pf->hw.phy.link_info.link_info & 4706 I40E_AQ_MEDIA_AVAILABLE) && 4707 (!(pf->hw.phy.link_info.an_info & 4708 I40E_AQ_QUALIFIED_MODULE))) 4709 netdev_err(vsi->netdev, 4710 "the driver failed to link because an unqualified module was detected."); 4711 } 4712 4713 /* replay FDIR SB filters */ 4714 if (vsi->type == I40E_VSI_FDIR) { 4715 /* reset fd counters */ 4716 pf->fd_add_err = pf->fd_atr_cnt = 0; 4717 if (pf->fd_tcp_rule > 0) { 4718 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 4719 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); 4720 pf->fd_tcp_rule = 0; 4721 } 4722 i40e_fdir_filter_restore(vsi); 4723 } 4724 i40e_service_event_schedule(pf); 4725 4726 return 0; 4727 } 4728 4729 /** 4730 * i40e_vsi_reinit_locked - Reset the VSI 4731 * @vsi: the VSI being configured 4732 * 4733 * Rebuild the ring structs after some configuration 4734 * has changed, e.g. MTU size. 4735 **/ 4736 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 4737 { 4738 struct i40e_pf *pf = vsi->back; 4739 4740 WARN_ON(in_interrupt()); 4741 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 4742 usleep_range(1000, 2000); 4743 i40e_down(vsi); 4744 4745 /* Give a VF some time to respond to the reset. The 4746 * two second wait is based upon the watchdog cycle in 4747 * the VF driver. 4748 */ 4749 if (vsi->type == I40E_VSI_SRIOV) 4750 msleep(2000); 4751 i40e_up(vsi); 4752 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 4753 } 4754 4755 /** 4756 * i40e_up - Bring the connection back up after being down 4757 * @vsi: the VSI being configured 4758 **/ 4759 int i40e_up(struct i40e_vsi *vsi) 4760 { 4761 int err; 4762 4763 err = i40e_vsi_configure(vsi); 4764 if (!err) 4765 err = i40e_up_complete(vsi); 4766 4767 return err; 4768 } 4769 4770 /** 4771 * i40e_down - Shutdown the connection processing 4772 * @vsi: the VSI being stopped 4773 **/ 4774 void i40e_down(struct i40e_vsi *vsi) 4775 { 4776 int i; 4777 4778 /* It is assumed that the caller of this function 4779 * sets the vsi->state __I40E_DOWN bit. 4780 */ 4781 if (vsi->netdev) { 4782 netif_carrier_off(vsi->netdev); 4783 netif_tx_disable(vsi->netdev); 4784 } 4785 i40e_vsi_disable_irq(vsi); 4786 i40e_vsi_control_rings(vsi, false); 4787 i40e_napi_disable_all(vsi); 4788 4789 for (i = 0; i < vsi->num_queue_pairs; i++) { 4790 i40e_clean_tx_ring(vsi->tx_rings[i]); 4791 i40e_clean_rx_ring(vsi->rx_rings[i]); 4792 } 4793 } 4794 4795 /** 4796 * i40e_setup_tc - configure multiple traffic classes 4797 * @netdev: net device to configure 4798 * @tc: number of traffic classes to enable 4799 **/ 4800 #ifdef I40E_FCOE 4801 int i40e_setup_tc(struct net_device *netdev, u8 tc) 4802 #else 4803 static int i40e_setup_tc(struct net_device *netdev, u8 tc) 4804 #endif 4805 { 4806 struct i40e_netdev_priv *np = netdev_priv(netdev); 4807 struct i40e_vsi *vsi = np->vsi; 4808 struct i40e_pf *pf = vsi->back; 4809 u8 enabled_tc = 0; 4810 int ret = -EINVAL; 4811 int i; 4812 4813 /* Check if DCB enabled to continue */ 4814 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 4815 netdev_info(netdev, "DCB is not enabled for adapter\n"); 4816 goto exit; 4817 } 4818 4819 /* Check if MFP enabled */ 4820 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4821 netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); 4822 goto exit; 4823 } 4824 4825 /* Check whether tc count is within enabled limit */ 4826 if (tc > i40e_pf_get_num_tc(pf)) { 4827 netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); 4828 goto exit; 4829 } 4830 4831 /* Generate TC map for number of tc requested */ 4832 for (i = 0; i < tc; i++) 4833 enabled_tc |= (1 << i); 4834 4835 /* Requesting same TC configuration as already enabled */ 4836 if (enabled_tc == vsi->tc_config.enabled_tc) 4837 return 0; 4838 4839 /* Quiesce VSI queues */ 4840 i40e_quiesce_vsi(vsi); 4841 4842 /* Configure VSI for enabled TCs */ 4843 ret = i40e_vsi_config_tc(vsi, enabled_tc); 4844 if (ret) { 4845 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 4846 vsi->seid); 4847 goto exit; 4848 } 4849 4850 /* Unquiesce VSI */ 4851 i40e_unquiesce_vsi(vsi); 4852 4853 exit: 4854 return ret; 4855 } 4856 4857 /** 4858 * i40e_open - Called when a network interface is made active 4859 * @netdev: network interface device structure 4860 * 4861 * The open entry point is called when a network interface is made 4862 * active by the system (IFF_UP). At this point all resources needed 4863 * for transmit and receive operations are allocated, the interrupt 4864 * handler is registered with the OS, the netdev watchdog subtask is 4865 * enabled, and the stack is notified that the interface is ready. 4866 * 4867 * Returns 0 on success, negative value on failure 4868 **/ 4869 int i40e_open(struct net_device *netdev) 4870 { 4871 struct i40e_netdev_priv *np = netdev_priv(netdev); 4872 struct i40e_vsi *vsi = np->vsi; 4873 struct i40e_pf *pf = vsi->back; 4874 int err; 4875 4876 /* disallow open during test or if eeprom is broken */ 4877 if (test_bit(__I40E_TESTING, &pf->state) || 4878 test_bit(__I40E_BAD_EEPROM, &pf->state)) 4879 return -EBUSY; 4880 4881 netif_carrier_off(netdev); 4882 4883 err = i40e_vsi_open(vsi); 4884 if (err) 4885 return err; 4886 4887 /* configure global TSO hardware offload settings */ 4888 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | 4889 TCP_FLAG_FIN) >> 16); 4890 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | 4891 TCP_FLAG_FIN | 4892 TCP_FLAG_CWR) >> 16); 4893 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 4894 4895 #ifdef CONFIG_I40E_VXLAN 4896 vxlan_get_rx_port(netdev); 4897 #endif 4898 4899 return 0; 4900 } 4901 4902 /** 4903 * i40e_vsi_open - 4904 * @vsi: the VSI to open 4905 * 4906 * Finish initialization of the VSI. 4907 * 4908 * Returns 0 on success, negative value on failure 4909 **/ 4910 int i40e_vsi_open(struct i40e_vsi *vsi) 4911 { 4912 struct i40e_pf *pf = vsi->back; 4913 char int_name[I40E_INT_NAME_STR_LEN]; 4914 int err; 4915 4916 /* allocate descriptors */ 4917 err = i40e_vsi_setup_tx_resources(vsi); 4918 if (err) 4919 goto err_setup_tx; 4920 err = i40e_vsi_setup_rx_resources(vsi); 4921 if (err) 4922 goto err_setup_rx; 4923 4924 err = i40e_vsi_configure(vsi); 4925 if (err) 4926 goto err_setup_rx; 4927 4928 if (vsi->netdev) { 4929 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 4930 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 4931 err = i40e_vsi_request_irq(vsi, int_name); 4932 if (err) 4933 goto err_setup_rx; 4934 4935 /* Notify the stack of the actual queue counts. */ 4936 err = netif_set_real_num_tx_queues(vsi->netdev, 4937 vsi->num_queue_pairs); 4938 if (err) 4939 goto err_set_queues; 4940 4941 err = netif_set_real_num_rx_queues(vsi->netdev, 4942 vsi->num_queue_pairs); 4943 if (err) 4944 goto err_set_queues; 4945 4946 } else if (vsi->type == I40E_VSI_FDIR) { 4947 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", 4948 dev_driver_string(&pf->pdev->dev), 4949 dev_name(&pf->pdev->dev)); 4950 err = i40e_vsi_request_irq(vsi, int_name); 4951 4952 } else { 4953 err = -EINVAL; 4954 goto err_setup_rx; 4955 } 4956 4957 err = i40e_up_complete(vsi); 4958 if (err) 4959 goto err_up_complete; 4960 4961 return 0; 4962 4963 err_up_complete: 4964 i40e_down(vsi); 4965 err_set_queues: 4966 i40e_vsi_free_irq(vsi); 4967 err_setup_rx: 4968 i40e_vsi_free_rx_resources(vsi); 4969 err_setup_tx: 4970 i40e_vsi_free_tx_resources(vsi); 4971 if (vsi == pf->vsi[pf->lan_vsi]) 4972 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 4973 4974 return err; 4975 } 4976 4977 /** 4978 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting 4979 * @pf: Pointer to PF 4980 * 4981 * This function destroys the hlist where all the Flow Director 4982 * filters were saved. 4983 **/ 4984 static void i40e_fdir_filter_exit(struct i40e_pf *pf) 4985 { 4986 struct i40e_fdir_filter *filter; 4987 struct hlist_node *node2; 4988 4989 hlist_for_each_entry_safe(filter, node2, 4990 &pf->fdir_filter_list, fdir_node) { 4991 hlist_del(&filter->fdir_node); 4992 kfree(filter); 4993 } 4994 pf->fdir_pf_active_filters = 0; 4995 } 4996 4997 /** 4998 * i40e_close - Disables a network interface 4999 * @netdev: network interface device structure 5000 * 5001 * The close entry point is called when an interface is de-activated 5002 * by the OS. The hardware is still under the driver's control, but 5003 * this netdev interface is disabled. 5004 * 5005 * Returns 0, this is not allowed to fail 5006 **/ 5007 #ifdef I40E_FCOE 5008 int i40e_close(struct net_device *netdev) 5009 #else 5010 static int i40e_close(struct net_device *netdev) 5011 #endif 5012 { 5013 struct i40e_netdev_priv *np = netdev_priv(netdev); 5014 struct i40e_vsi *vsi = np->vsi; 5015 5016 i40e_vsi_close(vsi); 5017 5018 return 0; 5019 } 5020 5021 /** 5022 * i40e_do_reset - Start a PF or Core Reset sequence 5023 * @pf: board private structure 5024 * @reset_flags: which reset is requested 5025 * 5026 * The essential difference in resets is that the PF Reset 5027 * doesn't clear the packet buffers, doesn't reset the PE 5028 * firmware, and doesn't bother the other PFs on the chip. 5029 **/ 5030 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) 5031 { 5032 u32 val; 5033 5034 WARN_ON(in_interrupt()); 5035 5036 if (i40e_check_asq_alive(&pf->hw)) 5037 i40e_vc_notify_reset(pf); 5038 5039 /* do the biggest reset indicated */ 5040 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { 5041 5042 /* Request a Global Reset 5043 * 5044 * This will start the chip's countdown to the actual full 5045 * chip reset event, and a warning interrupt to be sent 5046 * to all PFs, including the requestor. Our handler 5047 * for the warning interrupt will deal with the shutdown 5048 * and recovery of the switch setup. 5049 */ 5050 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); 5051 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5052 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 5053 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5054 5055 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) { 5056 5057 /* Request a Core Reset 5058 * 5059 * Same as Global Reset, except does *not* include the MAC/PHY 5060 */ 5061 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); 5062 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5063 val |= I40E_GLGEN_RTRIG_CORER_MASK; 5064 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5065 i40e_flush(&pf->hw); 5066 5067 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { 5068 5069 /* Request a PF Reset 5070 * 5071 * Resets only the PF-specific registers 5072 * 5073 * This goes directly to the tear-down and rebuild of 5074 * the switch, since we need to do all the recovery as 5075 * for the Core Reset. 5076 */ 5077 dev_dbg(&pf->pdev->dev, "PFR requested\n"); 5078 i40e_handle_reset_warning(pf); 5079 5080 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { 5081 int v; 5082 5083 /* Find the VSI(s) that requested a re-init */ 5084 dev_info(&pf->pdev->dev, 5085 "VSI reinit requested\n"); 5086 for (v = 0; v < pf->num_alloc_vsi; v++) { 5087 struct i40e_vsi *vsi = pf->vsi[v]; 5088 if (vsi != NULL && 5089 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 5090 i40e_vsi_reinit_locked(pf->vsi[v]); 5091 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); 5092 } 5093 } 5094 5095 /* no further action needed, so return now */ 5096 return; 5097 } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) { 5098 int v; 5099 5100 /* Find the VSI(s) that needs to be brought down */ 5101 dev_info(&pf->pdev->dev, "VSI down requested\n"); 5102 for (v = 0; v < pf->num_alloc_vsi; v++) { 5103 struct i40e_vsi *vsi = pf->vsi[v]; 5104 if (vsi != NULL && 5105 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { 5106 set_bit(__I40E_DOWN, &vsi->state); 5107 i40e_down(vsi); 5108 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); 5109 } 5110 } 5111 5112 /* no further action needed, so return now */ 5113 return; 5114 } else { 5115 dev_info(&pf->pdev->dev, 5116 "bad reset request 0x%08x\n", reset_flags); 5117 return; 5118 } 5119 } 5120 5121 #ifdef CONFIG_I40E_DCB 5122 /** 5123 * i40e_dcb_need_reconfig - Check if DCB needs reconfig 5124 * @pf: board private structure 5125 * @old_cfg: current DCB config 5126 * @new_cfg: new DCB config 5127 **/ 5128 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, 5129 struct i40e_dcbx_config *old_cfg, 5130 struct i40e_dcbx_config *new_cfg) 5131 { 5132 bool need_reconfig = false; 5133 5134 /* Check if ETS configuration has changed */ 5135 if (memcmp(&new_cfg->etscfg, 5136 &old_cfg->etscfg, 5137 sizeof(new_cfg->etscfg))) { 5138 /* If Priority Table has changed reconfig is needed */ 5139 if (memcmp(&new_cfg->etscfg.prioritytable, 5140 &old_cfg->etscfg.prioritytable, 5141 sizeof(new_cfg->etscfg.prioritytable))) { 5142 need_reconfig = true; 5143 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); 5144 } 5145 5146 if (memcmp(&new_cfg->etscfg.tcbwtable, 5147 &old_cfg->etscfg.tcbwtable, 5148 sizeof(new_cfg->etscfg.tcbwtable))) 5149 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 5150 5151 if (memcmp(&new_cfg->etscfg.tsatable, 5152 &old_cfg->etscfg.tsatable, 5153 sizeof(new_cfg->etscfg.tsatable))) 5154 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); 5155 } 5156 5157 /* Check if PFC configuration has changed */ 5158 if (memcmp(&new_cfg->pfc, 5159 &old_cfg->pfc, 5160 sizeof(new_cfg->pfc))) { 5161 need_reconfig = true; 5162 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); 5163 } 5164 5165 /* Check if APP Table has changed */ 5166 if (memcmp(&new_cfg->app, 5167 &old_cfg->app, 5168 sizeof(new_cfg->app))) { 5169 need_reconfig = true; 5170 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); 5171 } 5172 5173 dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__, 5174 need_reconfig); 5175 return need_reconfig; 5176 } 5177 5178 /** 5179 * i40e_handle_lldp_event - Handle LLDP Change MIB event 5180 * @pf: board private structure 5181 * @e: event info posted on ARQ 5182 **/ 5183 static int i40e_handle_lldp_event(struct i40e_pf *pf, 5184 struct i40e_arq_event_info *e) 5185 { 5186 struct i40e_aqc_lldp_get_mib *mib = 5187 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; 5188 struct i40e_hw *hw = &pf->hw; 5189 struct i40e_dcbx_config tmp_dcbx_cfg; 5190 bool need_reconfig = false; 5191 int ret = 0; 5192 u8 type; 5193 5194 /* Not DCB capable or capability disabled */ 5195 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) 5196 return ret; 5197 5198 /* Ignore if event is not for Nearest Bridge */ 5199 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 5200 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 5201 dev_dbg(&pf->pdev->dev, 5202 "%s: LLDP event mib bridge type 0x%x\n", __func__, type); 5203 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) 5204 return ret; 5205 5206 /* Check MIB Type and return if event for Remote MIB update */ 5207 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; 5208 dev_dbg(&pf->pdev->dev, 5209 "%s: LLDP event mib type %s\n", __func__, 5210 type ? "remote" : "local"); 5211 if (type == I40E_AQ_LLDP_MIB_REMOTE) { 5212 /* Update the remote cached instance and return */ 5213 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, 5214 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 5215 &hw->remote_dcbx_config); 5216 goto exit; 5217 } 5218 5219 memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); 5220 /* Store the old configuration */ 5221 memcpy(&tmp_dcbx_cfg, &hw->local_dcbx_config, sizeof(tmp_dcbx_cfg)); 5222 5223 /* Reset the old DCBx configuration data */ 5224 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); 5225 /* Get updated DCBX data from firmware */ 5226 ret = i40e_get_dcb_config(&pf->hw); 5227 if (ret) { 5228 dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n"); 5229 goto exit; 5230 } 5231 5232 /* No change detected in DCBX configs */ 5233 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, 5234 sizeof(tmp_dcbx_cfg))) { 5235 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 5236 goto exit; 5237 } 5238 5239 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, 5240 &hw->local_dcbx_config); 5241 5242 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); 5243 5244 if (!need_reconfig) 5245 goto exit; 5246 5247 /* Enable DCB tagging only when more than one TC */ 5248 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5249 pf->flags |= I40E_FLAG_DCB_ENABLED; 5250 else 5251 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 5252 5253 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5254 /* Reconfiguration needed quiesce all VSIs */ 5255 i40e_pf_quiesce_all_vsi(pf); 5256 5257 /* Changes in configuration update VEB/VSI */ 5258 i40e_dcb_reconfigure(pf); 5259 5260 ret = i40e_resume_port_tx(pf); 5261 5262 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5263 /* In case of error no point in resuming VSIs */ 5264 if (ret) 5265 goto exit; 5266 5267 /* Wait for the PF's Tx queues to be disabled */ 5268 ret = i40e_pf_wait_txq_disabled(pf); 5269 if (ret) { 5270 /* Schedule PF reset to recover */ 5271 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5272 i40e_service_event_schedule(pf); 5273 } else { 5274 i40e_pf_unquiesce_all_vsi(pf); 5275 } 5276 5277 exit: 5278 return ret; 5279 } 5280 #endif /* CONFIG_I40E_DCB */ 5281 5282 /** 5283 * i40e_do_reset_safe - Protected reset path for userland calls. 5284 * @pf: board private structure 5285 * @reset_flags: which reset is requested 5286 * 5287 **/ 5288 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) 5289 { 5290 rtnl_lock(); 5291 i40e_do_reset(pf, reset_flags); 5292 rtnl_unlock(); 5293 } 5294 5295 /** 5296 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 5297 * @pf: board private structure 5298 * @e: event info posted on ARQ 5299 * 5300 * Handler for LAN Queue Overflow Event generated by the firmware for PF 5301 * and VF queues 5302 **/ 5303 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 5304 struct i40e_arq_event_info *e) 5305 { 5306 struct i40e_aqc_lan_overflow *data = 5307 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 5308 u32 queue = le32_to_cpu(data->prtdcb_rupto); 5309 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 5310 struct i40e_hw *hw = &pf->hw; 5311 struct i40e_vf *vf; 5312 u16 vf_id; 5313 5314 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", 5315 queue, qtx_ctl); 5316 5317 /* Queue belongs to VF, find the VF and issue VF reset */ 5318 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 5319 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 5320 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 5321 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 5322 vf_id -= hw->func_caps.vf_base_id; 5323 vf = &pf->vf[vf_id]; 5324 i40e_vc_notify_vf_reset(vf); 5325 /* Allow VF to process pending reset notification */ 5326 msleep(20); 5327 i40e_reset_vf(vf, false); 5328 } 5329 } 5330 5331 /** 5332 * i40e_service_event_complete - Finish up the service event 5333 * @pf: board private structure 5334 **/ 5335 static void i40e_service_event_complete(struct i40e_pf *pf) 5336 { 5337 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 5338 5339 /* flush memory to make sure state is correct before next watchog */ 5340 smp_mb__before_atomic(); 5341 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 5342 } 5343 5344 /** 5345 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters 5346 * @pf: board private structure 5347 **/ 5348 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) 5349 { 5350 u32 val, fcnt_prog; 5351 5352 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5353 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); 5354 return fcnt_prog; 5355 } 5356 5357 /** 5358 * i40e_get_current_fd_count - Get total FD filters programmed for this PF 5359 * @pf: board private structure 5360 **/ 5361 u32 i40e_get_current_fd_count(struct i40e_pf *pf) 5362 { 5363 u32 val, fcnt_prog; 5364 5365 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5366 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + 5367 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> 5368 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 5369 return fcnt_prog; 5370 } 5371 5372 /** 5373 * i40e_get_global_fd_count - Get total FD filters programmed on device 5374 * @pf: board private structure 5375 **/ 5376 u32 i40e_get_global_fd_count(struct i40e_pf *pf) 5377 { 5378 u32 val, fcnt_prog; 5379 5380 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); 5381 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + 5382 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> 5383 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); 5384 return fcnt_prog; 5385 } 5386 5387 /** 5388 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 5389 * @pf: board private structure 5390 **/ 5391 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) 5392 { 5393 u32 fcnt_prog, fcnt_avail; 5394 5395 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 5396 return; 5397 5398 /* Check if, FD SB or ATR was auto disabled and if there is enough room 5399 * to re-enable 5400 */ 5401 fcnt_prog = i40e_get_global_fd_count(pf); 5402 fcnt_avail = pf->fdir_pf_filter_count; 5403 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || 5404 (pf->fd_add_err == 0) || 5405 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { 5406 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 5407 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 5408 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 5409 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 5410 } 5411 } 5412 /* Wait for some more space to be available to turn on ATR */ 5413 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { 5414 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 5415 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 5416 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5417 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); 5418 } 5419 } 5420 } 5421 5422 #define I40E_MIN_FD_FLUSH_INTERVAL 10 5423 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 5424 /** 5425 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB 5426 * @pf: board private structure 5427 **/ 5428 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) 5429 { 5430 unsigned long min_flush_time; 5431 int flush_wait_retry = 50; 5432 bool disable_atr = false; 5433 int fd_room; 5434 int reg; 5435 5436 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5437 return; 5438 5439 if (time_after(jiffies, pf->fd_flush_timestamp + 5440 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { 5441 /* If the flush is happening too quick and we have mostly 5442 * SB rules we should not re-enable ATR for some time. 5443 */ 5444 min_flush_time = pf->fd_flush_timestamp 5445 + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); 5446 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; 5447 5448 if (!(time_after(jiffies, min_flush_time)) && 5449 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { 5450 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); 5451 disable_atr = true; 5452 } 5453 5454 pf->fd_flush_timestamp = jiffies; 5455 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5456 /* flush all filters */ 5457 wr32(&pf->hw, I40E_PFQF_CTL_1, 5458 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); 5459 i40e_flush(&pf->hw); 5460 pf->fd_flush_cnt++; 5461 pf->fd_add_err = 0; 5462 do { 5463 /* Check FD flush status every 5-6msec */ 5464 usleep_range(5000, 6000); 5465 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); 5466 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) 5467 break; 5468 } while (flush_wait_retry--); 5469 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { 5470 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); 5471 } else { 5472 /* replay sideband filters */ 5473 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); 5474 if (!disable_atr) 5475 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 5476 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 5477 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 5478 } 5479 } 5480 } 5481 5482 /** 5483 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed 5484 * @pf: board private structure 5485 **/ 5486 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) 5487 { 5488 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; 5489 } 5490 5491 /* We can see up to 256 filter programming desc in transit if the filters are 5492 * being applied really fast; before we see the first 5493 * filter miss error on Rx queue 0. Accumulating enough error messages before 5494 * reacting will make sure we don't cause flush too often. 5495 */ 5496 #define I40E_MAX_FD_PROGRAM_ERROR 256 5497 5498 /** 5499 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 5500 * @pf: board private structure 5501 **/ 5502 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 5503 { 5504 5505 /* if interface is down do nothing */ 5506 if (test_bit(__I40E_DOWN, &pf->state)) 5507 return; 5508 5509 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5510 return; 5511 5512 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 5513 i40e_fdir_flush_and_replay(pf); 5514 5515 i40e_fdir_check_and_reenable(pf); 5516 5517 } 5518 5519 /** 5520 * i40e_vsi_link_event - notify VSI of a link event 5521 * @vsi: vsi to be notified 5522 * @link_up: link up or down 5523 **/ 5524 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 5525 { 5526 if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) 5527 return; 5528 5529 switch (vsi->type) { 5530 case I40E_VSI_MAIN: 5531 #ifdef I40E_FCOE 5532 case I40E_VSI_FCOE: 5533 #endif 5534 if (!vsi->netdev || !vsi->netdev_registered) 5535 break; 5536 5537 if (link_up) { 5538 netif_carrier_on(vsi->netdev); 5539 netif_tx_wake_all_queues(vsi->netdev); 5540 } else { 5541 netif_carrier_off(vsi->netdev); 5542 netif_tx_stop_all_queues(vsi->netdev); 5543 } 5544 break; 5545 5546 case I40E_VSI_SRIOV: 5547 case I40E_VSI_VMDQ2: 5548 case I40E_VSI_CTRL: 5549 case I40E_VSI_MIRROR: 5550 default: 5551 /* there is no notification for other VSIs */ 5552 break; 5553 } 5554 } 5555 5556 /** 5557 * i40e_veb_link_event - notify elements on the veb of a link event 5558 * @veb: veb to be notified 5559 * @link_up: link up or down 5560 **/ 5561 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 5562 { 5563 struct i40e_pf *pf; 5564 int i; 5565 5566 if (!veb || !veb->pf) 5567 return; 5568 pf = veb->pf; 5569 5570 /* depth first... */ 5571 for (i = 0; i < I40E_MAX_VEB; i++) 5572 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 5573 i40e_veb_link_event(pf->veb[i], link_up); 5574 5575 /* ... now the local VSIs */ 5576 for (i = 0; i < pf->num_alloc_vsi; i++) 5577 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 5578 i40e_vsi_link_event(pf->vsi[i], link_up); 5579 } 5580 5581 /** 5582 * i40e_link_event - Update netif_carrier status 5583 * @pf: board private structure 5584 **/ 5585 static void i40e_link_event(struct i40e_pf *pf) 5586 { 5587 bool new_link, old_link; 5588 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 5589 u8 new_link_speed, old_link_speed; 5590 5591 /* set this to force the get_link_status call to refresh state */ 5592 pf->hw.phy.get_link_info = true; 5593 5594 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 5595 new_link = i40e_get_link_status(&pf->hw); 5596 old_link_speed = pf->hw.phy.link_info_old.link_speed; 5597 new_link_speed = pf->hw.phy.link_info.link_speed; 5598 5599 if (new_link == old_link && 5600 new_link_speed == old_link_speed && 5601 (test_bit(__I40E_DOWN, &vsi->state) || 5602 new_link == netif_carrier_ok(vsi->netdev))) 5603 return; 5604 5605 if (!test_bit(__I40E_DOWN, &vsi->state)) 5606 i40e_print_link_message(vsi, new_link); 5607 5608 /* Notify the base of the switch tree connected to 5609 * the link. Floating VEBs are not notified. 5610 */ 5611 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 5612 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 5613 else 5614 i40e_vsi_link_event(vsi, new_link); 5615 5616 if (pf->vf) 5617 i40e_vc_notify_link_state(pf); 5618 5619 if (pf->flags & I40E_FLAG_PTP) 5620 i40e_ptp_set_increment(pf); 5621 } 5622 5623 /** 5624 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts 5625 * @pf: board private structure 5626 * 5627 * Set the per-queue flags to request a check for stuck queues in the irq 5628 * clean functions, then force interrupts to be sure the irq clean is called. 5629 **/ 5630 static void i40e_check_hang_subtask(struct i40e_pf *pf) 5631 { 5632 int i, v; 5633 5634 /* If we're down or resetting, just bail */ 5635 if (test_bit(__I40E_DOWN, &pf->state) || 5636 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5637 return; 5638 5639 /* for each VSI/netdev 5640 * for each Tx queue 5641 * set the check flag 5642 * for each q_vector 5643 * force an interrupt 5644 */ 5645 for (v = 0; v < pf->num_alloc_vsi; v++) { 5646 struct i40e_vsi *vsi = pf->vsi[v]; 5647 int armed = 0; 5648 5649 if (!pf->vsi[v] || 5650 test_bit(__I40E_DOWN, &vsi->state) || 5651 (vsi->netdev && !netif_carrier_ok(vsi->netdev))) 5652 continue; 5653 5654 for (i = 0; i < vsi->num_queue_pairs; i++) { 5655 set_check_for_tx_hang(vsi->tx_rings[i]); 5656 if (test_bit(__I40E_HANG_CHECK_ARMED, 5657 &vsi->tx_rings[i]->state)) 5658 armed++; 5659 } 5660 5661 if (armed) { 5662 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 5663 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, 5664 (I40E_PFINT_DYN_CTL0_INTENA_MASK | 5665 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | 5666 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | 5667 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | 5668 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); 5669 } else { 5670 u16 vec = vsi->base_vector - 1; 5671 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | 5672 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | 5673 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | 5674 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | 5675 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK); 5676 for (i = 0; i < vsi->num_q_vectors; i++, vec++) 5677 wr32(&vsi->back->hw, 5678 I40E_PFINT_DYN_CTLN(vec), val); 5679 } 5680 i40e_flush(&vsi->back->hw); 5681 } 5682 } 5683 } 5684 5685 /** 5686 * i40e_watchdog_subtask - periodic checks not using event driven response 5687 * @pf: board private structure 5688 **/ 5689 static void i40e_watchdog_subtask(struct i40e_pf *pf) 5690 { 5691 int i; 5692 5693 /* if interface is down do nothing */ 5694 if (test_bit(__I40E_DOWN, &pf->state) || 5695 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5696 return; 5697 5698 /* make sure we don't do these things too often */ 5699 if (time_before(jiffies, (pf->service_timer_previous + 5700 pf->service_timer_period))) 5701 return; 5702 pf->service_timer_previous = jiffies; 5703 5704 i40e_check_hang_subtask(pf); 5705 i40e_link_event(pf); 5706 5707 /* Update the stats for active netdevs so the network stack 5708 * can look at updated numbers whenever it cares to 5709 */ 5710 for (i = 0; i < pf->num_alloc_vsi; i++) 5711 if (pf->vsi[i] && pf->vsi[i]->netdev) 5712 i40e_update_stats(pf->vsi[i]); 5713 5714 /* Update the stats for the active switching components */ 5715 for (i = 0; i < I40E_MAX_VEB; i++) 5716 if (pf->veb[i]) 5717 i40e_update_veb_stats(pf->veb[i]); 5718 5719 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); 5720 } 5721 5722 /** 5723 * i40e_reset_subtask - Set up for resetting the device and driver 5724 * @pf: board private structure 5725 **/ 5726 static void i40e_reset_subtask(struct i40e_pf *pf) 5727 { 5728 u32 reset_flags = 0; 5729 5730 rtnl_lock(); 5731 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { 5732 reset_flags |= (1 << __I40E_REINIT_REQUESTED); 5733 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); 5734 } 5735 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { 5736 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED); 5737 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5738 } 5739 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { 5740 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED); 5741 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 5742 } 5743 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { 5744 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED); 5745 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 5746 } 5747 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { 5748 reset_flags |= (1 << __I40E_DOWN_REQUESTED); 5749 clear_bit(__I40E_DOWN_REQUESTED, &pf->state); 5750 } 5751 5752 /* If there's a recovery already waiting, it takes 5753 * precedence before starting a new reset sequence. 5754 */ 5755 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { 5756 i40e_handle_reset_warning(pf); 5757 goto unlock; 5758 } 5759 5760 /* If we're already down or resetting, just bail */ 5761 if (reset_flags && 5762 !test_bit(__I40E_DOWN, &pf->state) && 5763 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5764 i40e_do_reset(pf, reset_flags); 5765 5766 unlock: 5767 rtnl_unlock(); 5768 } 5769 5770 /** 5771 * i40e_handle_link_event - Handle link event 5772 * @pf: board private structure 5773 * @e: event info posted on ARQ 5774 **/ 5775 static void i40e_handle_link_event(struct i40e_pf *pf, 5776 struct i40e_arq_event_info *e) 5777 { 5778 struct i40e_hw *hw = &pf->hw; 5779 struct i40e_aqc_get_link_status *status = 5780 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 5781 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 5782 5783 /* save off old link status information */ 5784 memcpy(&pf->hw.phy.link_info_old, hw_link_info, 5785 sizeof(pf->hw.phy.link_info_old)); 5786 5787 /* Do a new status request to re-enable LSE reporting 5788 * and load new status information into the hw struct 5789 * This completely ignores any state information 5790 * in the ARQ event info, instead choosing to always 5791 * issue the AQ update link status command. 5792 */ 5793 i40e_link_event(pf); 5794 5795 /* check for unqualified module, if link is down */ 5796 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && 5797 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && 5798 (!(status->link_info & I40E_AQ_LINK_UP))) 5799 dev_err(&pf->pdev->dev, 5800 "The driver failed to link because an unqualified module was detected.\n"); 5801 } 5802 5803 /** 5804 * i40e_clean_adminq_subtask - Clean the AdminQ rings 5805 * @pf: board private structure 5806 **/ 5807 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 5808 { 5809 struct i40e_arq_event_info event; 5810 struct i40e_hw *hw = &pf->hw; 5811 u16 pending, i = 0; 5812 i40e_status ret; 5813 u16 opcode; 5814 u32 oldval; 5815 u32 val; 5816 5817 /* Do not run clean AQ when PF reset fails */ 5818 if (test_bit(__I40E_RESET_FAILED, &pf->state)) 5819 return; 5820 5821 /* check for error indications */ 5822 val = rd32(&pf->hw, pf->hw.aq.arq.len); 5823 oldval = val; 5824 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { 5825 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); 5826 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; 5827 } 5828 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { 5829 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); 5830 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; 5831 } 5832 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { 5833 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); 5834 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; 5835 } 5836 if (oldval != val) 5837 wr32(&pf->hw, pf->hw.aq.arq.len, val); 5838 5839 val = rd32(&pf->hw, pf->hw.aq.asq.len); 5840 oldval = val; 5841 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { 5842 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); 5843 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; 5844 } 5845 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { 5846 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); 5847 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; 5848 } 5849 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { 5850 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); 5851 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; 5852 } 5853 if (oldval != val) 5854 wr32(&pf->hw, pf->hw.aq.asq.len, val); 5855 5856 event.buf_len = I40E_MAX_AQ_BUF_SIZE; 5857 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 5858 if (!event.msg_buf) 5859 return; 5860 5861 do { 5862 ret = i40e_clean_arq_element(hw, &event, &pending); 5863 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) 5864 break; 5865 else if (ret) { 5866 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 5867 break; 5868 } 5869 5870 opcode = le16_to_cpu(event.desc.opcode); 5871 switch (opcode) { 5872 5873 case i40e_aqc_opc_get_link_status: 5874 i40e_handle_link_event(pf, &event); 5875 break; 5876 case i40e_aqc_opc_send_msg_to_pf: 5877 ret = i40e_vc_process_vf_msg(pf, 5878 le16_to_cpu(event.desc.retval), 5879 le32_to_cpu(event.desc.cookie_high), 5880 le32_to_cpu(event.desc.cookie_low), 5881 event.msg_buf, 5882 event.msg_len); 5883 break; 5884 case i40e_aqc_opc_lldp_update_mib: 5885 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 5886 #ifdef CONFIG_I40E_DCB 5887 rtnl_lock(); 5888 ret = i40e_handle_lldp_event(pf, &event); 5889 rtnl_unlock(); 5890 #endif /* CONFIG_I40E_DCB */ 5891 break; 5892 case i40e_aqc_opc_event_lan_overflow: 5893 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 5894 i40e_handle_lan_overflow_event(pf, &event); 5895 break; 5896 case i40e_aqc_opc_send_msg_to_peer: 5897 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); 5898 break; 5899 default: 5900 dev_info(&pf->pdev->dev, 5901 "ARQ Error: Unknown event 0x%04x received\n", 5902 opcode); 5903 break; 5904 } 5905 } while (pending && (i++ < pf->adminq_work_limit)); 5906 5907 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 5908 /* re-enable Admin queue interrupt cause */ 5909 val = rd32(hw, I40E_PFINT_ICR0_ENA); 5910 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 5911 wr32(hw, I40E_PFINT_ICR0_ENA, val); 5912 i40e_flush(hw); 5913 5914 kfree(event.msg_buf); 5915 } 5916 5917 /** 5918 * i40e_verify_eeprom - make sure eeprom is good to use 5919 * @pf: board private structure 5920 **/ 5921 static void i40e_verify_eeprom(struct i40e_pf *pf) 5922 { 5923 int err; 5924 5925 err = i40e_diag_eeprom_test(&pf->hw); 5926 if (err) { 5927 /* retry in case of garbage read */ 5928 err = i40e_diag_eeprom_test(&pf->hw); 5929 if (err) { 5930 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", 5931 err); 5932 set_bit(__I40E_BAD_EEPROM, &pf->state); 5933 } 5934 } 5935 5936 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { 5937 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); 5938 clear_bit(__I40E_BAD_EEPROM, &pf->state); 5939 } 5940 } 5941 5942 /** 5943 * i40e_enable_pf_switch_lb 5944 * @pf: pointer to the PF structure 5945 * 5946 * enable switch loop back or die - no point in a return value 5947 **/ 5948 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) 5949 { 5950 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 5951 struct i40e_vsi_context ctxt; 5952 int aq_ret; 5953 5954 ctxt.seid = pf->main_vsi_seid; 5955 ctxt.pf_num = pf->hw.pf_id; 5956 ctxt.vf_num = 0; 5957 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 5958 if (aq_ret) { 5959 dev_info(&pf->pdev->dev, 5960 "%s couldn't get PF vsi config, err %d, aq_err %d\n", 5961 __func__, aq_ret, pf->hw.aq.asq_last_status); 5962 return; 5963 } 5964 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 5965 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 5966 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 5967 5968 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 5969 if (aq_ret) { 5970 dev_info(&pf->pdev->dev, 5971 "%s: update vsi switch failed, aq_err=%d\n", 5972 __func__, vsi->back->hw.aq.asq_last_status); 5973 } 5974 } 5975 5976 /** 5977 * i40e_disable_pf_switch_lb 5978 * @pf: pointer to the PF structure 5979 * 5980 * disable switch loop back or die - no point in a return value 5981 **/ 5982 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) 5983 { 5984 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 5985 struct i40e_vsi_context ctxt; 5986 int aq_ret; 5987 5988 ctxt.seid = pf->main_vsi_seid; 5989 ctxt.pf_num = pf->hw.pf_id; 5990 ctxt.vf_num = 0; 5991 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 5992 if (aq_ret) { 5993 dev_info(&pf->pdev->dev, 5994 "%s couldn't get PF vsi config, err %d, aq_err %d\n", 5995 __func__, aq_ret, pf->hw.aq.asq_last_status); 5996 return; 5997 } 5998 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 5999 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6000 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6001 6002 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6003 if (aq_ret) { 6004 dev_info(&pf->pdev->dev, 6005 "%s: update vsi switch failed, aq_err=%d\n", 6006 __func__, vsi->back->hw.aq.asq_last_status); 6007 } 6008 } 6009 6010 /** 6011 * i40e_config_bridge_mode - Configure the HW bridge mode 6012 * @veb: pointer to the bridge instance 6013 * 6014 * Configure the loop back mode for the LAN VSI that is downlink to the 6015 * specified HW bridge instance. It is expected this function is called 6016 * when a new HW bridge is instantiated. 6017 **/ 6018 static void i40e_config_bridge_mode(struct i40e_veb *veb) 6019 { 6020 struct i40e_pf *pf = veb->pf; 6021 6022 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", 6023 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 6024 if (veb->bridge_mode & BRIDGE_MODE_VEPA) 6025 i40e_disable_pf_switch_lb(pf); 6026 else 6027 i40e_enable_pf_switch_lb(pf); 6028 } 6029 6030 /** 6031 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 6032 * @veb: pointer to the VEB instance 6033 * 6034 * This is a recursive function that first builds the attached VSIs then 6035 * recurses in to build the next layer of VEB. We track the connections 6036 * through our own index numbers because the seid's from the HW could 6037 * change across the reset. 6038 **/ 6039 static int i40e_reconstitute_veb(struct i40e_veb *veb) 6040 { 6041 struct i40e_vsi *ctl_vsi = NULL; 6042 struct i40e_pf *pf = veb->pf; 6043 int v, veb_idx; 6044 int ret; 6045 6046 /* build VSI that owns this VEB, temporarily attached to base VEB */ 6047 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { 6048 if (pf->vsi[v] && 6049 pf->vsi[v]->veb_idx == veb->idx && 6050 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 6051 ctl_vsi = pf->vsi[v]; 6052 break; 6053 } 6054 } 6055 if (!ctl_vsi) { 6056 dev_info(&pf->pdev->dev, 6057 "missing owner VSI for veb_idx %d\n", veb->idx); 6058 ret = -ENOENT; 6059 goto end_reconstitute; 6060 } 6061 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 6062 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 6063 ret = i40e_add_vsi(ctl_vsi); 6064 if (ret) { 6065 dev_info(&pf->pdev->dev, 6066 "rebuild of owner VSI failed: %d\n", ret); 6067 goto end_reconstitute; 6068 } 6069 i40e_vsi_reset_stats(ctl_vsi); 6070 6071 /* create the VEB in the switch and move the VSI onto the VEB */ 6072 ret = i40e_add_veb(veb, ctl_vsi); 6073 if (ret) 6074 goto end_reconstitute; 6075 6076 i40e_config_bridge_mode(veb); 6077 6078 /* create the remaining VSIs attached to this VEB */ 6079 for (v = 0; v < pf->num_alloc_vsi; v++) { 6080 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 6081 continue; 6082 6083 if (pf->vsi[v]->veb_idx == veb->idx) { 6084 struct i40e_vsi *vsi = pf->vsi[v]; 6085 vsi->uplink_seid = veb->seid; 6086 ret = i40e_add_vsi(vsi); 6087 if (ret) { 6088 dev_info(&pf->pdev->dev, 6089 "rebuild of vsi_idx %d failed: %d\n", 6090 v, ret); 6091 goto end_reconstitute; 6092 } 6093 i40e_vsi_reset_stats(vsi); 6094 } 6095 } 6096 6097 /* create any VEBs attached to this VEB - RECURSION */ 6098 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 6099 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 6100 pf->veb[veb_idx]->uplink_seid = veb->seid; 6101 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 6102 if (ret) 6103 break; 6104 } 6105 } 6106 6107 end_reconstitute: 6108 return ret; 6109 } 6110 6111 /** 6112 * i40e_get_capabilities - get info about the HW 6113 * @pf: the PF struct 6114 **/ 6115 static int i40e_get_capabilities(struct i40e_pf *pf) 6116 { 6117 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 6118 u16 data_size; 6119 int buf_len; 6120 int err; 6121 6122 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 6123 do { 6124 cap_buf = kzalloc(buf_len, GFP_KERNEL); 6125 if (!cap_buf) 6126 return -ENOMEM; 6127 6128 /* this loads the data into the hw struct for us */ 6129 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 6130 &data_size, 6131 i40e_aqc_opc_list_func_capabilities, 6132 NULL); 6133 /* data loaded, buffer no longer needed */ 6134 kfree(cap_buf); 6135 6136 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 6137 /* retry with a larger buffer */ 6138 buf_len = data_size; 6139 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 6140 dev_info(&pf->pdev->dev, 6141 "capability discovery failed: aq=%d\n", 6142 pf->hw.aq.asq_last_status); 6143 return -ENODEV; 6144 } 6145 } while (err); 6146 6147 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || 6148 (pf->hw.aq.fw_maj_ver < 2)) { 6149 pf->hw.func_caps.num_msix_vectors++; 6150 pf->hw.func_caps.num_msix_vectors_vf++; 6151 } 6152 6153 if (pf->hw.debug_mask & I40E_DEBUG_USER) 6154 dev_info(&pf->pdev->dev, 6155 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 6156 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 6157 pf->hw.func_caps.num_msix_vectors, 6158 pf->hw.func_caps.num_msix_vectors_vf, 6159 pf->hw.func_caps.fd_filters_guaranteed, 6160 pf->hw.func_caps.fd_filters_best_effort, 6161 pf->hw.func_caps.num_tx_qp, 6162 pf->hw.func_caps.num_vsis); 6163 6164 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ 6165 + pf->hw.func_caps.num_vfs) 6166 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { 6167 dev_info(&pf->pdev->dev, 6168 "got num_vsis %d, setting num_vsis to %d\n", 6169 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); 6170 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; 6171 } 6172 6173 return 0; 6174 } 6175 6176 static int i40e_vsi_clear(struct i40e_vsi *vsi); 6177 6178 /** 6179 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband 6180 * @pf: board private structure 6181 **/ 6182 static void i40e_fdir_sb_setup(struct i40e_pf *pf) 6183 { 6184 struct i40e_vsi *vsi; 6185 int i; 6186 6187 /* quick workaround for an NVM issue that leaves a critical register 6188 * uninitialized 6189 */ 6190 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { 6191 static const u32 hkey[] = { 6192 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 6193 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 6194 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 6195 0x95b3a76d}; 6196 6197 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) 6198 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); 6199 } 6200 6201 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 6202 return; 6203 6204 /* find existing VSI and see if it needs configuring */ 6205 vsi = NULL; 6206 for (i = 0; i < pf->num_alloc_vsi; i++) { 6207 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6208 vsi = pf->vsi[i]; 6209 break; 6210 } 6211 } 6212 6213 /* create a new VSI if none exists */ 6214 if (!vsi) { 6215 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, 6216 pf->vsi[pf->lan_vsi]->seid, 0); 6217 if (!vsi) { 6218 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 6219 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 6220 return; 6221 } 6222 } 6223 6224 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); 6225 } 6226 6227 /** 6228 * i40e_fdir_teardown - release the Flow Director resources 6229 * @pf: board private structure 6230 **/ 6231 static void i40e_fdir_teardown(struct i40e_pf *pf) 6232 { 6233 int i; 6234 6235 i40e_fdir_filter_exit(pf); 6236 for (i = 0; i < pf->num_alloc_vsi; i++) { 6237 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6238 i40e_vsi_release(pf->vsi[i]); 6239 break; 6240 } 6241 } 6242 } 6243 6244 /** 6245 * i40e_prep_for_reset - prep for the core to reset 6246 * @pf: board private structure 6247 * 6248 * Close up the VFs and other things in prep for PF Reset. 6249 **/ 6250 static void i40e_prep_for_reset(struct i40e_pf *pf) 6251 { 6252 struct i40e_hw *hw = &pf->hw; 6253 i40e_status ret = 0; 6254 u32 v; 6255 6256 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 6257 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 6258 return; 6259 6260 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 6261 6262 /* quiesce the VSIs and their queues that are not already DOWN */ 6263 i40e_pf_quiesce_all_vsi(pf); 6264 6265 for (v = 0; v < pf->num_alloc_vsi; v++) { 6266 if (pf->vsi[v]) 6267 pf->vsi[v]->seid = 0; 6268 } 6269 6270 i40e_shutdown_adminq(&pf->hw); 6271 6272 /* call shutdown HMC */ 6273 if (hw->hmc.hmc_obj) { 6274 ret = i40e_shutdown_lan_hmc(hw); 6275 if (ret) 6276 dev_warn(&pf->pdev->dev, 6277 "shutdown_lan_hmc failed: %d\n", ret); 6278 } 6279 } 6280 6281 /** 6282 * i40e_send_version - update firmware with driver version 6283 * @pf: PF struct 6284 */ 6285 static void i40e_send_version(struct i40e_pf *pf) 6286 { 6287 struct i40e_driver_version dv; 6288 6289 dv.major_version = DRV_VERSION_MAJOR; 6290 dv.minor_version = DRV_VERSION_MINOR; 6291 dv.build_version = DRV_VERSION_BUILD; 6292 dv.subbuild_version = 0; 6293 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); 6294 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 6295 } 6296 6297 /** 6298 * i40e_reset_and_rebuild - reset and rebuild using a saved config 6299 * @pf: board private structure 6300 * @reinit: if the Main VSI needs to re-initialized. 6301 **/ 6302 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 6303 { 6304 struct i40e_hw *hw = &pf->hw; 6305 u8 set_fc_aq_fail = 0; 6306 i40e_status ret; 6307 u32 v; 6308 6309 /* Now we wait for GRST to settle out. 6310 * We don't have to delete the VEBs or VSIs from the hw switch 6311 * because the reset will make them disappear. 6312 */ 6313 ret = i40e_pf_reset(hw); 6314 if (ret) { 6315 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 6316 set_bit(__I40E_RESET_FAILED, &pf->state); 6317 goto clear_recovery; 6318 } 6319 pf->pfr_count++; 6320 6321 if (test_bit(__I40E_DOWN, &pf->state)) 6322 goto clear_recovery; 6323 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 6324 6325 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 6326 ret = i40e_init_adminq(&pf->hw); 6327 if (ret) { 6328 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); 6329 goto clear_recovery; 6330 } 6331 6332 /* re-verify the eeprom if we just had an EMP reset */ 6333 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) 6334 i40e_verify_eeprom(pf); 6335 6336 i40e_clear_pxe_mode(hw); 6337 ret = i40e_get_capabilities(pf); 6338 if (ret) { 6339 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", 6340 ret); 6341 goto end_core_reset; 6342 } 6343 6344 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 6345 hw->func_caps.num_rx_qp, 6346 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 6347 if (ret) { 6348 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 6349 goto end_core_reset; 6350 } 6351 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 6352 if (ret) { 6353 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 6354 goto end_core_reset; 6355 } 6356 6357 #ifdef CONFIG_I40E_DCB 6358 ret = i40e_init_pf_dcb(pf); 6359 if (ret) { 6360 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); 6361 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 6362 /* Continue without DCB enabled */ 6363 } 6364 #endif /* CONFIG_I40E_DCB */ 6365 #ifdef I40E_FCOE 6366 ret = i40e_init_pf_fcoe(pf); 6367 if (ret) 6368 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret); 6369 6370 #endif 6371 /* do basic switch setup */ 6372 ret = i40e_setup_pf_switch(pf, reinit); 6373 if (ret) 6374 goto end_core_reset; 6375 6376 /* driver is only interested in link up/down and module qualification 6377 * reports from firmware 6378 */ 6379 ret = i40e_aq_set_phy_int_mask(&pf->hw, 6380 I40E_AQ_EVENT_LINK_UPDOWN | 6381 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); 6382 if (ret) 6383 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret); 6384 6385 /* make sure our flow control settings are restored */ 6386 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); 6387 if (ret) 6388 dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret); 6389 6390 /* Rebuild the VSIs and VEBs that existed before reset. 6391 * They are still in our local switch element arrays, so only 6392 * need to rebuild the switch model in the HW. 6393 * 6394 * If there were VEBs but the reconstitution failed, we'll try 6395 * try to recover minimal use by getting the basic PF VSI working. 6396 */ 6397 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 6398 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); 6399 /* find the one VEB connected to the MAC, and find orphans */ 6400 for (v = 0; v < I40E_MAX_VEB; v++) { 6401 if (!pf->veb[v]) 6402 continue; 6403 6404 if (pf->veb[v]->uplink_seid == pf->mac_seid || 6405 pf->veb[v]->uplink_seid == 0) { 6406 ret = i40e_reconstitute_veb(pf->veb[v]); 6407 6408 if (!ret) 6409 continue; 6410 6411 /* If Main VEB failed, we're in deep doodoo, 6412 * so give up rebuilding the switch and set up 6413 * for minimal rebuild of PF VSI. 6414 * If orphan failed, we'll report the error 6415 * but try to keep going. 6416 */ 6417 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 6418 dev_info(&pf->pdev->dev, 6419 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 6420 ret); 6421 pf->vsi[pf->lan_vsi]->uplink_seid 6422 = pf->mac_seid; 6423 break; 6424 } else if (pf->veb[v]->uplink_seid == 0) { 6425 dev_info(&pf->pdev->dev, 6426 "rebuild of orphan VEB failed: %d\n", 6427 ret); 6428 } 6429 } 6430 } 6431 } 6432 6433 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { 6434 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 6435 /* no VEB, so rebuild only the Main VSI */ 6436 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); 6437 if (ret) { 6438 dev_info(&pf->pdev->dev, 6439 "rebuild of Main VSI failed: %d\n", ret); 6440 goto end_core_reset; 6441 } 6442 } 6443 6444 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 6445 (pf->hw.aq.fw_maj_ver < 4)) { 6446 msleep(75); 6447 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 6448 if (ret) 6449 dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", 6450 pf->hw.aq.asq_last_status); 6451 } 6452 /* reinit the misc interrupt */ 6453 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6454 ret = i40e_setup_misc_vector(pf); 6455 6456 /* restart the VSIs that were rebuilt and running before the reset */ 6457 i40e_pf_unquiesce_all_vsi(pf); 6458 6459 if (pf->num_alloc_vfs) { 6460 for (v = 0; v < pf->num_alloc_vfs; v++) 6461 i40e_reset_vf(&pf->vf[v], true); 6462 } 6463 6464 /* tell the firmware that we're starting */ 6465 i40e_send_version(pf); 6466 6467 end_core_reset: 6468 clear_bit(__I40E_RESET_FAILED, &pf->state); 6469 clear_recovery: 6470 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 6471 } 6472 6473 /** 6474 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild 6475 * @pf: board private structure 6476 * 6477 * Close up the VFs and other things in prep for a Core Reset, 6478 * then get ready to rebuild the world. 6479 **/ 6480 static void i40e_handle_reset_warning(struct i40e_pf *pf) 6481 { 6482 i40e_prep_for_reset(pf); 6483 i40e_reset_and_rebuild(pf, false); 6484 } 6485 6486 /** 6487 * i40e_handle_mdd_event 6488 * @pf: pointer to the PF structure 6489 * 6490 * Called from the MDD irq handler to identify possibly malicious vfs 6491 **/ 6492 static void i40e_handle_mdd_event(struct i40e_pf *pf) 6493 { 6494 struct i40e_hw *hw = &pf->hw; 6495 bool mdd_detected = false; 6496 bool pf_mdd_detected = false; 6497 struct i40e_vf *vf; 6498 u32 reg; 6499 int i; 6500 6501 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) 6502 return; 6503 6504 /* find what triggered the MDD event */ 6505 reg = rd32(hw, I40E_GL_MDET_TX); 6506 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 6507 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 6508 I40E_GL_MDET_TX_PF_NUM_SHIFT; 6509 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 6510 I40E_GL_MDET_TX_VF_NUM_SHIFT; 6511 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 6512 I40E_GL_MDET_TX_EVENT_SHIFT; 6513 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 6514 I40E_GL_MDET_TX_QUEUE_SHIFT) - 6515 pf->hw.func_caps.base_queue; 6516 if (netif_msg_tx_err(pf)) 6517 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", 6518 event, queue, pf_num, vf_num); 6519 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 6520 mdd_detected = true; 6521 } 6522 reg = rd32(hw, I40E_GL_MDET_RX); 6523 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 6524 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 6525 I40E_GL_MDET_RX_FUNCTION_SHIFT; 6526 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 6527 I40E_GL_MDET_RX_EVENT_SHIFT; 6528 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 6529 I40E_GL_MDET_RX_QUEUE_SHIFT) - 6530 pf->hw.func_caps.base_queue; 6531 if (netif_msg_rx_err(pf)) 6532 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 6533 event, queue, func); 6534 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 6535 mdd_detected = true; 6536 } 6537 6538 if (mdd_detected) { 6539 reg = rd32(hw, I40E_PF_MDET_TX); 6540 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 6541 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 6542 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 6543 pf_mdd_detected = true; 6544 } 6545 reg = rd32(hw, I40E_PF_MDET_RX); 6546 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 6547 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 6548 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 6549 pf_mdd_detected = true; 6550 } 6551 /* Queue belongs to the PF, initiate a reset */ 6552 if (pf_mdd_detected) { 6553 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 6554 i40e_service_event_schedule(pf); 6555 } 6556 } 6557 6558 /* see if one of the VFs needs its hand slapped */ 6559 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 6560 vf = &(pf->vf[i]); 6561 reg = rd32(hw, I40E_VP_MDET_TX(i)); 6562 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 6563 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 6564 vf->num_mdd_events++; 6565 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 6566 i); 6567 } 6568 6569 reg = rd32(hw, I40E_VP_MDET_RX(i)); 6570 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 6571 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 6572 vf->num_mdd_events++; 6573 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 6574 i); 6575 } 6576 6577 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 6578 dev_info(&pf->pdev->dev, 6579 "Too many MDD events on VF %d, disabled\n", i); 6580 dev_info(&pf->pdev->dev, 6581 "Use PF Control I/F to re-enable the VF\n"); 6582 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 6583 } 6584 } 6585 6586 /* re-enable mdd interrupt cause */ 6587 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 6588 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 6589 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 6590 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 6591 i40e_flush(hw); 6592 } 6593 6594 #ifdef CONFIG_I40E_VXLAN 6595 /** 6596 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW 6597 * @pf: board private structure 6598 **/ 6599 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) 6600 { 6601 struct i40e_hw *hw = &pf->hw; 6602 i40e_status ret; 6603 u8 filter_index; 6604 __be16 port; 6605 int i; 6606 6607 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) 6608 return; 6609 6610 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; 6611 6612 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 6613 if (pf->pending_vxlan_bitmap & (1 << i)) { 6614 pf->pending_vxlan_bitmap &= ~(1 << i); 6615 port = pf->vxlan_ports[i]; 6616 ret = port ? 6617 i40e_aq_add_udp_tunnel(hw, ntohs(port), 6618 I40E_AQC_TUNNEL_TYPE_VXLAN, 6619 &filter_index, NULL) 6620 : i40e_aq_del_udp_tunnel(hw, i, NULL); 6621 6622 if (ret) { 6623 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n", 6624 port ? "adding" : "deleting", 6625 ntohs(port), port ? i : i); 6626 6627 pf->vxlan_ports[i] = 0; 6628 } else { 6629 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n", 6630 port ? "Added" : "Deleted", 6631 ntohs(port), port ? i : filter_index); 6632 } 6633 } 6634 } 6635 } 6636 6637 #endif 6638 /** 6639 * i40e_service_task - Run the driver's async subtasks 6640 * @work: pointer to work_struct containing our data 6641 **/ 6642 static void i40e_service_task(struct work_struct *work) 6643 { 6644 struct i40e_pf *pf = container_of(work, 6645 struct i40e_pf, 6646 service_task); 6647 unsigned long start_time = jiffies; 6648 6649 /* don't bother with service tasks if a reset is in progress */ 6650 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 6651 i40e_service_event_complete(pf); 6652 return; 6653 } 6654 6655 i40e_reset_subtask(pf); 6656 i40e_handle_mdd_event(pf); 6657 i40e_vc_process_vflr_event(pf); 6658 i40e_watchdog_subtask(pf); 6659 i40e_fdir_reinit_subtask(pf); 6660 i40e_sync_filters_subtask(pf); 6661 #ifdef CONFIG_I40E_VXLAN 6662 i40e_sync_vxlan_filters_subtask(pf); 6663 #endif 6664 i40e_clean_adminq_subtask(pf); 6665 6666 i40e_service_event_complete(pf); 6667 6668 /* If the tasks have taken longer than one timer cycle or there 6669 * is more work to be done, reschedule the service task now 6670 * rather than wait for the timer to tick again. 6671 */ 6672 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 6673 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || 6674 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || 6675 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 6676 i40e_service_event_schedule(pf); 6677 } 6678 6679 /** 6680 * i40e_service_timer - timer callback 6681 * @data: pointer to PF struct 6682 **/ 6683 static void i40e_service_timer(unsigned long data) 6684 { 6685 struct i40e_pf *pf = (struct i40e_pf *)data; 6686 6687 mod_timer(&pf->service_timer, 6688 round_jiffies(jiffies + pf->service_timer_period)); 6689 i40e_service_event_schedule(pf); 6690 } 6691 6692 /** 6693 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 6694 * @vsi: the VSI being configured 6695 **/ 6696 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 6697 { 6698 struct i40e_pf *pf = vsi->back; 6699 6700 switch (vsi->type) { 6701 case I40E_VSI_MAIN: 6702 vsi->alloc_queue_pairs = pf->num_lan_qps; 6703 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6704 I40E_REQ_DESCRIPTOR_MULTIPLE); 6705 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6706 vsi->num_q_vectors = pf->num_lan_msix; 6707 else 6708 vsi->num_q_vectors = 1; 6709 6710 break; 6711 6712 case I40E_VSI_FDIR: 6713 vsi->alloc_queue_pairs = 1; 6714 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 6715 I40E_REQ_DESCRIPTOR_MULTIPLE); 6716 vsi->num_q_vectors = 1; 6717 break; 6718 6719 case I40E_VSI_VMDQ2: 6720 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 6721 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6722 I40E_REQ_DESCRIPTOR_MULTIPLE); 6723 vsi->num_q_vectors = pf->num_vmdq_msix; 6724 break; 6725 6726 case I40E_VSI_SRIOV: 6727 vsi->alloc_queue_pairs = pf->num_vf_qps; 6728 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6729 I40E_REQ_DESCRIPTOR_MULTIPLE); 6730 break; 6731 6732 #ifdef I40E_FCOE 6733 case I40E_VSI_FCOE: 6734 vsi->alloc_queue_pairs = pf->num_fcoe_qps; 6735 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6736 I40E_REQ_DESCRIPTOR_MULTIPLE); 6737 vsi->num_q_vectors = pf->num_fcoe_msix; 6738 break; 6739 6740 #endif /* I40E_FCOE */ 6741 default: 6742 WARN_ON(1); 6743 return -ENODATA; 6744 } 6745 6746 return 0; 6747 } 6748 6749 /** 6750 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi 6751 * @type: VSI pointer 6752 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 6753 * 6754 * On error: returns error code (negative) 6755 * On success: returns 0 6756 **/ 6757 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) 6758 { 6759 int size; 6760 int ret = 0; 6761 6762 /* allocate memory for both Tx and Rx ring pointers */ 6763 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; 6764 vsi->tx_rings = kzalloc(size, GFP_KERNEL); 6765 if (!vsi->tx_rings) 6766 return -ENOMEM; 6767 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; 6768 6769 if (alloc_qvectors) { 6770 /* allocate memory for q_vector pointers */ 6771 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; 6772 vsi->q_vectors = kzalloc(size, GFP_KERNEL); 6773 if (!vsi->q_vectors) { 6774 ret = -ENOMEM; 6775 goto err_vectors; 6776 } 6777 } 6778 return ret; 6779 6780 err_vectors: 6781 kfree(vsi->tx_rings); 6782 return ret; 6783 } 6784 6785 /** 6786 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 6787 * @pf: board private structure 6788 * @type: type of VSI 6789 * 6790 * On error: returns error code (negative) 6791 * On success: returns vsi index in PF (positive) 6792 **/ 6793 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 6794 { 6795 int ret = -ENODEV; 6796 struct i40e_vsi *vsi; 6797 int vsi_idx; 6798 int i; 6799 6800 /* Need to protect the allocation of the VSIs at the PF level */ 6801 mutex_lock(&pf->switch_mutex); 6802 6803 /* VSI list may be fragmented if VSI creation/destruction has 6804 * been happening. We can afford to do a quick scan to look 6805 * for any free VSIs in the list. 6806 * 6807 * find next empty vsi slot, looping back around if necessary 6808 */ 6809 i = pf->next_vsi; 6810 while (i < pf->num_alloc_vsi && pf->vsi[i]) 6811 i++; 6812 if (i >= pf->num_alloc_vsi) { 6813 i = 0; 6814 while (i < pf->next_vsi && pf->vsi[i]) 6815 i++; 6816 } 6817 6818 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { 6819 vsi_idx = i; /* Found one! */ 6820 } else { 6821 ret = -ENODEV; 6822 goto unlock_pf; /* out of VSI slots! */ 6823 } 6824 pf->next_vsi = ++i; 6825 6826 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 6827 if (!vsi) { 6828 ret = -ENOMEM; 6829 goto unlock_pf; 6830 } 6831 vsi->type = type; 6832 vsi->back = pf; 6833 set_bit(__I40E_DOWN, &vsi->state); 6834 vsi->flags = 0; 6835 vsi->idx = vsi_idx; 6836 vsi->rx_itr_setting = pf->rx_itr_default; 6837 vsi->tx_itr_setting = pf->tx_itr_default; 6838 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? 6839 pf->rss_table_size : 64; 6840 vsi->netdev_registered = false; 6841 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 6842 INIT_LIST_HEAD(&vsi->mac_filter_list); 6843 vsi->irqs_ready = false; 6844 6845 ret = i40e_set_num_rings_in_vsi(vsi); 6846 if (ret) 6847 goto err_rings; 6848 6849 ret = i40e_vsi_alloc_arrays(vsi, true); 6850 if (ret) 6851 goto err_rings; 6852 6853 /* Setup default MSIX irq handler for VSI */ 6854 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 6855 6856 pf->vsi[vsi_idx] = vsi; 6857 ret = vsi_idx; 6858 goto unlock_pf; 6859 6860 err_rings: 6861 pf->next_vsi = i - 1; 6862 kfree(vsi); 6863 unlock_pf: 6864 mutex_unlock(&pf->switch_mutex); 6865 return ret; 6866 } 6867 6868 /** 6869 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 6870 * @type: VSI pointer 6871 * @free_qvectors: a bool to specify if q_vectors need to be freed. 6872 * 6873 * On error: returns error code (negative) 6874 * On success: returns 0 6875 **/ 6876 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) 6877 { 6878 /* free the ring and vector containers */ 6879 if (free_qvectors) { 6880 kfree(vsi->q_vectors); 6881 vsi->q_vectors = NULL; 6882 } 6883 kfree(vsi->tx_rings); 6884 vsi->tx_rings = NULL; 6885 vsi->rx_rings = NULL; 6886 } 6887 6888 /** 6889 * i40e_vsi_clear - Deallocate the VSI provided 6890 * @vsi: the VSI being un-configured 6891 **/ 6892 static int i40e_vsi_clear(struct i40e_vsi *vsi) 6893 { 6894 struct i40e_pf *pf; 6895 6896 if (!vsi) 6897 return 0; 6898 6899 if (!vsi->back) 6900 goto free_vsi; 6901 pf = vsi->back; 6902 6903 mutex_lock(&pf->switch_mutex); 6904 if (!pf->vsi[vsi->idx]) { 6905 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", 6906 vsi->idx, vsi->idx, vsi, vsi->type); 6907 goto unlock_vsi; 6908 } 6909 6910 if (pf->vsi[vsi->idx] != vsi) { 6911 dev_err(&pf->pdev->dev, 6912 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", 6913 pf->vsi[vsi->idx]->idx, 6914 pf->vsi[vsi->idx], 6915 pf->vsi[vsi->idx]->type, 6916 vsi->idx, vsi, vsi->type); 6917 goto unlock_vsi; 6918 } 6919 6920 /* updates the PF for this cleared vsi */ 6921 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 6922 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 6923 6924 i40e_vsi_free_arrays(vsi, true); 6925 6926 pf->vsi[vsi->idx] = NULL; 6927 if (vsi->idx < pf->next_vsi) 6928 pf->next_vsi = vsi->idx; 6929 6930 unlock_vsi: 6931 mutex_unlock(&pf->switch_mutex); 6932 free_vsi: 6933 kfree(vsi); 6934 6935 return 0; 6936 } 6937 6938 /** 6939 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 6940 * @vsi: the VSI being cleaned 6941 **/ 6942 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) 6943 { 6944 int i; 6945 6946 if (vsi->tx_rings && vsi->tx_rings[0]) { 6947 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 6948 kfree_rcu(vsi->tx_rings[i], rcu); 6949 vsi->tx_rings[i] = NULL; 6950 vsi->rx_rings[i] = NULL; 6951 } 6952 } 6953 } 6954 6955 /** 6956 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 6957 * @vsi: the VSI being configured 6958 **/ 6959 static int i40e_alloc_rings(struct i40e_vsi *vsi) 6960 { 6961 struct i40e_ring *tx_ring, *rx_ring; 6962 struct i40e_pf *pf = vsi->back; 6963 int i; 6964 6965 /* Set basic values in the rings to be used later during open() */ 6966 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 6967 /* allocate space for both Tx and Rx in one shot */ 6968 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 6969 if (!tx_ring) 6970 goto err_out; 6971 6972 tx_ring->queue_index = i; 6973 tx_ring->reg_idx = vsi->base_queue + i; 6974 tx_ring->ring_active = false; 6975 tx_ring->vsi = vsi; 6976 tx_ring->netdev = vsi->netdev; 6977 tx_ring->dev = &pf->pdev->dev; 6978 tx_ring->count = vsi->num_desc; 6979 tx_ring->size = 0; 6980 tx_ring->dcb_tc = 0; 6981 vsi->tx_rings[i] = tx_ring; 6982 6983 rx_ring = &tx_ring[1]; 6984 rx_ring->queue_index = i; 6985 rx_ring->reg_idx = vsi->base_queue + i; 6986 rx_ring->ring_active = false; 6987 rx_ring->vsi = vsi; 6988 rx_ring->netdev = vsi->netdev; 6989 rx_ring->dev = &pf->pdev->dev; 6990 rx_ring->count = vsi->num_desc; 6991 rx_ring->size = 0; 6992 rx_ring->dcb_tc = 0; 6993 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) 6994 set_ring_16byte_desc_enabled(rx_ring); 6995 else 6996 clear_ring_16byte_desc_enabled(rx_ring); 6997 vsi->rx_rings[i] = rx_ring; 6998 } 6999 7000 return 0; 7001 7002 err_out: 7003 i40e_vsi_clear_rings(vsi); 7004 return -ENOMEM; 7005 } 7006 7007 /** 7008 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 7009 * @pf: board private structure 7010 * @vectors: the number of MSI-X vectors to request 7011 * 7012 * Returns the number of vectors reserved, or error 7013 **/ 7014 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 7015 { 7016 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, 7017 I40E_MIN_MSIX, vectors); 7018 if (vectors < 0) { 7019 dev_info(&pf->pdev->dev, 7020 "MSI-X vector reservation failed: %d\n", vectors); 7021 vectors = 0; 7022 } 7023 7024 return vectors; 7025 } 7026 7027 /** 7028 * i40e_init_msix - Setup the MSIX capability 7029 * @pf: board private structure 7030 * 7031 * Work with the OS to set up the MSIX vectors needed. 7032 * 7033 * Returns the number of vectors reserved or negative on failure 7034 **/ 7035 static int i40e_init_msix(struct i40e_pf *pf) 7036 { 7037 struct i40e_hw *hw = &pf->hw; 7038 int vectors_left; 7039 int v_budget, i; 7040 int v_actual; 7041 7042 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 7043 return -ENODEV; 7044 7045 /* The number of vectors we'll request will be comprised of: 7046 * - Add 1 for "other" cause for Admin Queue events, etc. 7047 * - The number of LAN queue pairs 7048 * - Queues being used for RSS. 7049 * We don't need as many as max_rss_size vectors. 7050 * use rss_size instead in the calculation since that 7051 * is governed by number of cpus in the system. 7052 * - assumes symmetric Tx/Rx pairing 7053 * - The number of VMDq pairs 7054 #ifdef I40E_FCOE 7055 * - The number of FCOE qps. 7056 #endif 7057 * Once we count this up, try the request. 7058 * 7059 * If we can't get what we want, we'll simplify to nearly nothing 7060 * and try again. If that still fails, we punt. 7061 */ 7062 vectors_left = hw->func_caps.num_msix_vectors; 7063 v_budget = 0; 7064 7065 /* reserve one vector for miscellaneous handler */ 7066 if (vectors_left) { 7067 v_budget++; 7068 vectors_left--; 7069 } 7070 7071 /* reserve vectors for the main PF traffic queues */ 7072 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); 7073 vectors_left -= pf->num_lan_msix; 7074 v_budget += pf->num_lan_msix; 7075 7076 /* reserve one vector for sideband flow director */ 7077 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7078 if (vectors_left) { 7079 v_budget++; 7080 vectors_left--; 7081 } else { 7082 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7083 } 7084 } 7085 7086 #ifdef I40E_FCOE 7087 /* can we reserve enough for FCoE? */ 7088 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7089 if (!vectors_left) 7090 pf->num_fcoe_msix = 0; 7091 else if (vectors_left >= pf->num_fcoe_qps) 7092 pf->num_fcoe_msix = pf->num_fcoe_qps; 7093 else 7094 pf->num_fcoe_msix = 1; 7095 v_budget += pf->num_fcoe_msix; 7096 vectors_left -= pf->num_fcoe_msix; 7097 } 7098 7099 #endif 7100 /* any vectors left over go for VMDq support */ 7101 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { 7102 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; 7103 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); 7104 7105 /* if we're short on vectors for what's desired, we limit 7106 * the queues per vmdq. If this is still more than are 7107 * available, the user will need to change the number of 7108 * queues/vectors used by the PF later with the ethtool 7109 * channels command 7110 */ 7111 if (vmdq_vecs < vmdq_vecs_wanted) 7112 pf->num_vmdq_qps = 1; 7113 pf->num_vmdq_msix = pf->num_vmdq_qps; 7114 7115 v_budget += vmdq_vecs; 7116 vectors_left -= vmdq_vecs; 7117 } 7118 7119 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 7120 GFP_KERNEL); 7121 if (!pf->msix_entries) 7122 return -ENOMEM; 7123 7124 for (i = 0; i < v_budget; i++) 7125 pf->msix_entries[i].entry = i; 7126 v_actual = i40e_reserve_msix_vectors(pf, v_budget); 7127 7128 if (v_actual != v_budget) { 7129 /* If we have limited resources, we will start with no vectors 7130 * for the special features and then allocate vectors to some 7131 * of these features based on the policy and at the end disable 7132 * the features that did not get any vectors. 7133 */ 7134 #ifdef I40E_FCOE 7135 pf->num_fcoe_qps = 0; 7136 pf->num_fcoe_msix = 0; 7137 #endif 7138 pf->num_vmdq_msix = 0; 7139 } 7140 7141 if (v_actual < I40E_MIN_MSIX) { 7142 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 7143 kfree(pf->msix_entries); 7144 pf->msix_entries = NULL; 7145 return -ENODEV; 7146 7147 } else if (v_actual == I40E_MIN_MSIX) { 7148 /* Adjust for minimal MSIX use */ 7149 pf->num_vmdq_vsis = 0; 7150 pf->num_vmdq_qps = 0; 7151 pf->num_lan_qps = 1; 7152 pf->num_lan_msix = 1; 7153 7154 } else if (v_actual != v_budget) { 7155 int vec; 7156 7157 /* reserve the misc vector */ 7158 vec = v_actual - 1; 7159 7160 /* Scale vector usage down */ 7161 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 7162 pf->num_vmdq_vsis = 1; 7163 pf->num_vmdq_qps = 1; 7164 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7165 7166 /* partition out the remaining vectors */ 7167 switch (vec) { 7168 case 2: 7169 pf->num_lan_msix = 1; 7170 break; 7171 case 3: 7172 #ifdef I40E_FCOE 7173 /* give one vector to FCoE */ 7174 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7175 pf->num_lan_msix = 1; 7176 pf->num_fcoe_msix = 1; 7177 } 7178 #else 7179 pf->num_lan_msix = 2; 7180 #endif 7181 break; 7182 default: 7183 #ifdef I40E_FCOE 7184 /* give one vector to FCoE */ 7185 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7186 pf->num_fcoe_msix = 1; 7187 vec--; 7188 } 7189 #endif 7190 /* give the rest to the PF */ 7191 pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps); 7192 break; 7193 } 7194 } 7195 7196 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 7197 (pf->num_vmdq_msix == 0)) { 7198 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); 7199 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 7200 } 7201 #ifdef I40E_FCOE 7202 7203 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { 7204 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); 7205 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 7206 } 7207 #endif 7208 return v_actual; 7209 } 7210 7211 /** 7212 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 7213 * @vsi: the VSI being configured 7214 * @v_idx: index of the vector in the vsi struct 7215 * 7216 * We allocate one q_vector. If allocation fails we return -ENOMEM. 7217 **/ 7218 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 7219 { 7220 struct i40e_q_vector *q_vector; 7221 7222 /* allocate q_vector */ 7223 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 7224 if (!q_vector) 7225 return -ENOMEM; 7226 7227 q_vector->vsi = vsi; 7228 q_vector->v_idx = v_idx; 7229 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 7230 if (vsi->netdev) 7231 netif_napi_add(vsi->netdev, &q_vector->napi, 7232 i40e_napi_poll, NAPI_POLL_WEIGHT); 7233 7234 q_vector->rx.latency_range = I40E_LOW_LATENCY; 7235 q_vector->tx.latency_range = I40E_LOW_LATENCY; 7236 7237 /* tie q_vector and vsi together */ 7238 vsi->q_vectors[v_idx] = q_vector; 7239 7240 return 0; 7241 } 7242 7243 /** 7244 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 7245 * @vsi: the VSI being configured 7246 * 7247 * We allocate one q_vector per queue interrupt. If allocation fails we 7248 * return -ENOMEM. 7249 **/ 7250 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 7251 { 7252 struct i40e_pf *pf = vsi->back; 7253 int v_idx, num_q_vectors; 7254 int err; 7255 7256 /* if not MSIX, give the one vector only to the LAN VSI */ 7257 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7258 num_q_vectors = vsi->num_q_vectors; 7259 else if (vsi == pf->vsi[pf->lan_vsi]) 7260 num_q_vectors = 1; 7261 else 7262 return -EINVAL; 7263 7264 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 7265 err = i40e_vsi_alloc_q_vector(vsi, v_idx); 7266 if (err) 7267 goto err_out; 7268 } 7269 7270 return 0; 7271 7272 err_out: 7273 while (v_idx--) 7274 i40e_free_q_vector(vsi, v_idx); 7275 7276 return err; 7277 } 7278 7279 /** 7280 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 7281 * @pf: board private structure to initialize 7282 **/ 7283 static void i40e_init_interrupt_scheme(struct i40e_pf *pf) 7284 { 7285 int vectors = 0; 7286 ssize_t size; 7287 7288 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 7289 vectors = i40e_init_msix(pf); 7290 if (vectors < 0) { 7291 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 7292 #ifdef I40E_FCOE 7293 I40E_FLAG_FCOE_ENABLED | 7294 #endif 7295 I40E_FLAG_RSS_ENABLED | 7296 I40E_FLAG_DCB_CAPABLE | 7297 I40E_FLAG_SRIOV_ENABLED | 7298 I40E_FLAG_FD_SB_ENABLED | 7299 I40E_FLAG_FD_ATR_ENABLED | 7300 I40E_FLAG_VMDQ_ENABLED); 7301 7302 /* rework the queue expectations without MSIX */ 7303 i40e_determine_queue_usage(pf); 7304 } 7305 } 7306 7307 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 7308 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 7309 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); 7310 vectors = pci_enable_msi(pf->pdev); 7311 if (vectors < 0) { 7312 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", 7313 vectors); 7314 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 7315 } 7316 vectors = 1; /* one MSI or Legacy vector */ 7317 } 7318 7319 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 7320 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); 7321 7322 /* set up vector assignment tracking */ 7323 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); 7324 pf->irq_pile = kzalloc(size, GFP_KERNEL); 7325 pf->irq_pile->num_entries = vectors; 7326 pf->irq_pile->search_hint = 0; 7327 7328 /* track first vector for misc interrupts */ 7329 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); 7330 } 7331 7332 /** 7333 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 7334 * @pf: board private structure 7335 * 7336 * This sets up the handler for MSIX 0, which is used to manage the 7337 * non-queue interrupts, e.g. AdminQ and errors. This is not used 7338 * when in MSI or Legacy interrupt mode. 7339 **/ 7340 static int i40e_setup_misc_vector(struct i40e_pf *pf) 7341 { 7342 struct i40e_hw *hw = &pf->hw; 7343 int err = 0; 7344 7345 /* Only request the irq if this is the first time through, and 7346 * not when we're rebuilding after a Reset 7347 */ 7348 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7349 err = request_irq(pf->msix_entries[0].vector, 7350 i40e_intr, 0, pf->int_name, pf); 7351 if (err) { 7352 dev_info(&pf->pdev->dev, 7353 "request_irq for %s failed: %d\n", 7354 pf->int_name, err); 7355 return -EFAULT; 7356 } 7357 } 7358 7359 i40e_enable_misc_int_causes(pf); 7360 7361 /* associate no queues to the misc vector */ 7362 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 7363 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 7364 7365 i40e_flush(hw); 7366 7367 i40e_irq_dynamic_enable_icr0(pf); 7368 7369 return err; 7370 } 7371 7372 /** 7373 * i40e_config_rss - Prepare for RSS if used 7374 * @pf: board private structure 7375 **/ 7376 static int i40e_config_rss(struct i40e_pf *pf) 7377 { 7378 u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; 7379 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 7380 struct i40e_hw *hw = &pf->hw; 7381 u32 lut = 0; 7382 int i, j; 7383 u64 hena; 7384 u32 reg_val; 7385 7386 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 7387 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 7388 wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]); 7389 7390 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 7391 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | 7392 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); 7393 hena |= I40E_DEFAULT_RSS_HENA; 7394 wr32(hw, I40E_PFQF_HENA(0), (u32)hena); 7395 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 7396 7397 vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); 7398 7399 /* Check capability and Set table size and register per hw expectation*/ 7400 reg_val = rd32(hw, I40E_PFQF_CTL_0); 7401 if (pf->rss_table_size == 512) 7402 reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512; 7403 else 7404 reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512; 7405 wr32(hw, I40E_PFQF_CTL_0, reg_val); 7406 7407 /* Populate the LUT with max no. of queues in round robin fashion */ 7408 for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) { 7409 7410 /* The assumption is that lan qp count will be the highest 7411 * qp count for any PF VSI that needs RSS. 7412 * If multiple VSIs need RSS support, all the qp counts 7413 * for those VSIs should be a power of 2 for RSS to work. 7414 * If LAN VSI is the only consumer for RSS then this requirement 7415 * is not necessary. 7416 */ 7417 if (j == vsi->rss_size) 7418 j = 0; 7419 /* lut = 4-byte sliding window of 4 lut entries */ 7420 lut = (lut << 8) | (j & 7421 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); 7422 /* On i = 3, we have 4 entries in lut; write to the register */ 7423 if ((i & 3) == 3) 7424 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); 7425 } 7426 i40e_flush(hw); 7427 7428 return 0; 7429 } 7430 7431 /** 7432 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild 7433 * @pf: board private structure 7434 * @queue_count: the requested queue count for rss. 7435 * 7436 * returns 0 if rss is not enabled, if enabled returns the final rss queue 7437 * count which may be different from the requested queue count. 7438 **/ 7439 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) 7440 { 7441 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 7442 int new_rss_size; 7443 7444 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) 7445 return 0; 7446 7447 new_rss_size = min_t(int, queue_count, pf->rss_size_max); 7448 7449 if (queue_count != vsi->num_queue_pairs) { 7450 vsi->req_queue_pairs = queue_count; 7451 i40e_prep_for_reset(pf); 7452 7453 pf->rss_size = new_rss_size; 7454 7455 i40e_reset_and_rebuild(pf, true); 7456 i40e_config_rss(pf); 7457 } 7458 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); 7459 return pf->rss_size; 7460 } 7461 7462 /** 7463 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition 7464 * @pf: board private structure 7465 **/ 7466 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) 7467 { 7468 i40e_status status; 7469 bool min_valid, max_valid; 7470 u32 max_bw, min_bw; 7471 7472 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, 7473 &min_valid, &max_valid); 7474 7475 if (!status) { 7476 if (min_valid) 7477 pf->npar_min_bw = min_bw; 7478 if (max_valid) 7479 pf->npar_max_bw = max_bw; 7480 } 7481 7482 return status; 7483 } 7484 7485 /** 7486 * i40e_set_npar_bw_setting - Set BW settings for this PF partition 7487 * @pf: board private structure 7488 **/ 7489 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) 7490 { 7491 struct i40e_aqc_configure_partition_bw_data bw_data; 7492 i40e_status status; 7493 7494 /* Set the valid bit for this PF */ 7495 bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id); 7496 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; 7497 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; 7498 7499 /* Set the new bandwidths */ 7500 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); 7501 7502 return status; 7503 } 7504 7505 /** 7506 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition 7507 * @pf: board private structure 7508 **/ 7509 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) 7510 { 7511 /* Commit temporary BW setting to permanent NVM image */ 7512 enum i40e_admin_queue_err last_aq_status; 7513 i40e_status ret; 7514 u16 nvm_word; 7515 7516 if (pf->hw.partition_id != 1) { 7517 dev_info(&pf->pdev->dev, 7518 "Commit BW only works on partition 1! This is partition %d", 7519 pf->hw.partition_id); 7520 ret = I40E_NOT_SUPPORTED; 7521 goto bw_commit_out; 7522 } 7523 7524 /* Acquire NVM for read access */ 7525 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); 7526 last_aq_status = pf->hw.aq.asq_last_status; 7527 if (ret) { 7528 dev_info(&pf->pdev->dev, 7529 "Cannot acquire NVM for read access, err %d: aq_err %d\n", 7530 ret, last_aq_status); 7531 goto bw_commit_out; 7532 } 7533 7534 /* Read word 0x10 of NVM - SW compatibility word 1 */ 7535 ret = i40e_aq_read_nvm(&pf->hw, 7536 I40E_SR_NVM_CONTROL_WORD, 7537 0x10, sizeof(nvm_word), &nvm_word, 7538 false, NULL); 7539 /* Save off last admin queue command status before releasing 7540 * the NVM 7541 */ 7542 last_aq_status = pf->hw.aq.asq_last_status; 7543 i40e_release_nvm(&pf->hw); 7544 if (ret) { 7545 dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n", 7546 ret, last_aq_status); 7547 goto bw_commit_out; 7548 } 7549 7550 /* Wait a bit for NVM release to complete */ 7551 msleep(50); 7552 7553 /* Acquire NVM for write access */ 7554 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); 7555 last_aq_status = pf->hw.aq.asq_last_status; 7556 if (ret) { 7557 dev_info(&pf->pdev->dev, 7558 "Cannot acquire NVM for write access, err %d: aq_err %d\n", 7559 ret, last_aq_status); 7560 goto bw_commit_out; 7561 } 7562 /* Write it back out unchanged to initiate update NVM, 7563 * which will force a write of the shadow (alt) RAM to 7564 * the NVM - thus storing the bandwidth values permanently. 7565 */ 7566 ret = i40e_aq_update_nvm(&pf->hw, 7567 I40E_SR_NVM_CONTROL_WORD, 7568 0x10, sizeof(nvm_word), 7569 &nvm_word, true, NULL); 7570 /* Save off last admin queue command status before releasing 7571 * the NVM 7572 */ 7573 last_aq_status = pf->hw.aq.asq_last_status; 7574 i40e_release_nvm(&pf->hw); 7575 if (ret) 7576 dev_info(&pf->pdev->dev, 7577 "BW settings NOT SAVED, err %d aq_err %d\n", 7578 ret, last_aq_status); 7579 bw_commit_out: 7580 7581 return ret; 7582 } 7583 7584 /** 7585 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 7586 * @pf: board private structure to initialize 7587 * 7588 * i40e_sw_init initializes the Adapter private data structure. 7589 * Fields are initialized based on PCI device information and 7590 * OS network device settings (MTU size). 7591 **/ 7592 static int i40e_sw_init(struct i40e_pf *pf) 7593 { 7594 int err = 0; 7595 int size; 7596 7597 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, 7598 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); 7599 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; 7600 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { 7601 if (I40E_DEBUG_USER & debug) 7602 pf->hw.debug_mask = debug; 7603 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), 7604 I40E_DEFAULT_MSG_ENABLE); 7605 } 7606 7607 /* Set default capability flags */ 7608 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 7609 I40E_FLAG_MSI_ENABLED | 7610 I40E_FLAG_MSIX_ENABLED; 7611 7612 if (iommu_present(&pci_bus_type)) 7613 pf->flags |= I40E_FLAG_RX_PS_ENABLED; 7614 else 7615 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; 7616 7617 /* Set default ITR */ 7618 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; 7619 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; 7620 7621 /* Depending on PF configurations, it is possible that the RSS 7622 * maximum might end up larger than the available queues 7623 */ 7624 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; 7625 pf->rss_size = 1; 7626 pf->rss_table_size = pf->hw.func_caps.rss_table_size; 7627 pf->rss_size_max = min_t(int, pf->rss_size_max, 7628 pf->hw.func_caps.num_tx_qp); 7629 if (pf->hw.func_caps.rss) { 7630 pf->flags |= I40E_FLAG_RSS_ENABLED; 7631 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); 7632 } 7633 7634 /* MFP mode enabled */ 7635 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { 7636 pf->flags |= I40E_FLAG_MFP_ENABLED; 7637 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 7638 if (i40e_get_npar_bw_setting(pf)) 7639 dev_warn(&pf->pdev->dev, 7640 "Could not get NPAR bw settings\n"); 7641 else 7642 dev_info(&pf->pdev->dev, 7643 "Min BW = %8.8x, Max BW = %8.8x\n", 7644 pf->npar_min_bw, pf->npar_max_bw); 7645 } 7646 7647 /* FW/NVM is not yet fixed in this regard */ 7648 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 7649 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 7650 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 7651 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 7652 /* Setup a counter for fd_atr per PF */ 7653 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); 7654 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 7655 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 7656 /* Setup a counter for fd_sb per PF */ 7657 pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); 7658 } else { 7659 dev_info(&pf->pdev->dev, 7660 "Flow Director Sideband mode Disabled in MFP mode\n"); 7661 } 7662 pf->fdir_pf_filter_count = 7663 pf->hw.func_caps.fd_filters_guaranteed; 7664 pf->hw.fdir_shared_filter_count = 7665 pf->hw.func_caps.fd_filters_best_effort; 7666 } 7667 7668 if (pf->hw.func_caps.vmdq) { 7669 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 7670 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 7671 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; 7672 } 7673 7674 #ifdef I40E_FCOE 7675 err = i40e_init_pf_fcoe(pf); 7676 if (err) 7677 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err); 7678 7679 #endif /* I40E_FCOE */ 7680 #ifdef CONFIG_PCI_IOV 7681 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { 7682 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 7683 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 7684 pf->num_req_vfs = min_t(int, 7685 pf->hw.func_caps.num_vfs, 7686 I40E_MAX_VF_COUNT); 7687 } 7688 #endif /* CONFIG_PCI_IOV */ 7689 pf->eeprom_version = 0xDEAD; 7690 pf->lan_veb = I40E_NO_VEB; 7691 pf->lan_vsi = I40E_NO_VSI; 7692 7693 /* set up queue assignment tracking */ 7694 size = sizeof(struct i40e_lump_tracking) 7695 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 7696 pf->qp_pile = kzalloc(size, GFP_KERNEL); 7697 if (!pf->qp_pile) { 7698 err = -ENOMEM; 7699 goto sw_init_done; 7700 } 7701 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 7702 pf->qp_pile->search_hint = 0; 7703 7704 pf->tx_timeout_recovery_level = 1; 7705 7706 mutex_init(&pf->switch_mutex); 7707 7708 /* If NPAR is enabled nudge the Tx scheduler */ 7709 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) 7710 i40e_set_npar_bw_setting(pf); 7711 7712 sw_init_done: 7713 return err; 7714 } 7715 7716 /** 7717 * i40e_set_ntuple - set the ntuple feature flag and take action 7718 * @pf: board private structure to initialize 7719 * @features: the feature set that the stack is suggesting 7720 * 7721 * returns a bool to indicate if reset needs to happen 7722 **/ 7723 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) 7724 { 7725 bool need_reset = false; 7726 7727 /* Check if Flow Director n-tuple support was enabled or disabled. If 7728 * the state changed, we need to reset. 7729 */ 7730 if (features & NETIF_F_NTUPLE) { 7731 /* Enable filters and mark for reset */ 7732 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 7733 need_reset = true; 7734 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 7735 } else { 7736 /* turn off filters, mark for reset and clear SW filter list */ 7737 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7738 need_reset = true; 7739 i40e_fdir_filter_exit(pf); 7740 } 7741 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7742 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 7743 /* reset fd counters */ 7744 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; 7745 pf->fdir_pf_active_filters = 0; 7746 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 7747 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); 7748 /* if ATR was auto disabled it can be re-enabled. */ 7749 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 7750 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 7751 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 7752 } 7753 return need_reset; 7754 } 7755 7756 /** 7757 * i40e_set_features - set the netdev feature flags 7758 * @netdev: ptr to the netdev being adjusted 7759 * @features: the feature set that the stack is suggesting 7760 **/ 7761 static int i40e_set_features(struct net_device *netdev, 7762 netdev_features_t features) 7763 { 7764 struct i40e_netdev_priv *np = netdev_priv(netdev); 7765 struct i40e_vsi *vsi = np->vsi; 7766 struct i40e_pf *pf = vsi->back; 7767 bool need_reset; 7768 7769 if (features & NETIF_F_HW_VLAN_CTAG_RX) 7770 i40e_vlan_stripping_enable(vsi); 7771 else 7772 i40e_vlan_stripping_disable(vsi); 7773 7774 need_reset = i40e_set_ntuple(pf, features); 7775 7776 if (need_reset) 7777 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 7778 7779 return 0; 7780 } 7781 7782 #ifdef CONFIG_I40E_VXLAN 7783 /** 7784 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port 7785 * @pf: board private structure 7786 * @port: The UDP port to look up 7787 * 7788 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 7789 **/ 7790 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) 7791 { 7792 u8 i; 7793 7794 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 7795 if (pf->vxlan_ports[i] == port) 7796 return i; 7797 } 7798 7799 return i; 7800 } 7801 7802 /** 7803 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up 7804 * @netdev: This physical port's netdev 7805 * @sa_family: Socket Family that VXLAN is notifying us about 7806 * @port: New UDP port number that VXLAN started listening to 7807 **/ 7808 static void i40e_add_vxlan_port(struct net_device *netdev, 7809 sa_family_t sa_family, __be16 port) 7810 { 7811 struct i40e_netdev_priv *np = netdev_priv(netdev); 7812 struct i40e_vsi *vsi = np->vsi; 7813 struct i40e_pf *pf = vsi->back; 7814 u8 next_idx; 7815 u8 idx; 7816 7817 if (sa_family == AF_INET6) 7818 return; 7819 7820 idx = i40e_get_vxlan_port_idx(pf, port); 7821 7822 /* Check if port already exists */ 7823 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 7824 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port)); 7825 return; 7826 } 7827 7828 /* Now check if there is space to add the new port */ 7829 next_idx = i40e_get_vxlan_port_idx(pf, 0); 7830 7831 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 7832 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n", 7833 ntohs(port)); 7834 return; 7835 } 7836 7837 /* New port: add it and mark its index in the bitmap */ 7838 pf->vxlan_ports[next_idx] = port; 7839 pf->pending_vxlan_bitmap |= (1 << next_idx); 7840 7841 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 7842 } 7843 7844 /** 7845 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away 7846 * @netdev: This physical port's netdev 7847 * @sa_family: Socket Family that VXLAN is notifying us about 7848 * @port: UDP port number that VXLAN stopped listening to 7849 **/ 7850 static void i40e_del_vxlan_port(struct net_device *netdev, 7851 sa_family_t sa_family, __be16 port) 7852 { 7853 struct i40e_netdev_priv *np = netdev_priv(netdev); 7854 struct i40e_vsi *vsi = np->vsi; 7855 struct i40e_pf *pf = vsi->back; 7856 u8 idx; 7857 7858 if (sa_family == AF_INET6) 7859 return; 7860 7861 idx = i40e_get_vxlan_port_idx(pf, port); 7862 7863 /* Check if port already exists */ 7864 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 7865 /* if port exists, set it to 0 (mark for deletion) 7866 * and make it pending 7867 */ 7868 pf->vxlan_ports[idx] = 0; 7869 7870 pf->pending_vxlan_bitmap |= (1 << idx); 7871 7872 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 7873 } else { 7874 netdev_warn(netdev, "Port %d was not found, not deleting\n", 7875 ntohs(port)); 7876 } 7877 } 7878 7879 #endif 7880 static int i40e_get_phys_port_id(struct net_device *netdev, 7881 struct netdev_phys_item_id *ppid) 7882 { 7883 struct i40e_netdev_priv *np = netdev_priv(netdev); 7884 struct i40e_pf *pf = np->vsi->back; 7885 struct i40e_hw *hw = &pf->hw; 7886 7887 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) 7888 return -EOPNOTSUPP; 7889 7890 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); 7891 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); 7892 7893 return 0; 7894 } 7895 7896 /** 7897 * i40e_ndo_fdb_add - add an entry to the hardware database 7898 * @ndm: the input from the stack 7899 * @tb: pointer to array of nladdr (unused) 7900 * @dev: the net device pointer 7901 * @addr: the MAC address entry being added 7902 * @flags: instructions from stack about fdb operation 7903 */ 7904 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 7905 struct net_device *dev, 7906 const unsigned char *addr, u16 vid, 7907 u16 flags) 7908 { 7909 struct i40e_netdev_priv *np = netdev_priv(dev); 7910 struct i40e_pf *pf = np->vsi->back; 7911 int err = 0; 7912 7913 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) 7914 return -EOPNOTSUPP; 7915 7916 if (vid) { 7917 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); 7918 return -EINVAL; 7919 } 7920 7921 /* Hardware does not support aging addresses so if a 7922 * ndm_state is given only allow permanent addresses 7923 */ 7924 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 7925 netdev_info(dev, "FDB only supports static addresses\n"); 7926 return -EINVAL; 7927 } 7928 7929 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 7930 err = dev_uc_add_excl(dev, addr); 7931 else if (is_multicast_ether_addr(addr)) 7932 err = dev_mc_add_excl(dev, addr); 7933 else 7934 err = -EINVAL; 7935 7936 /* Only return duplicate errors if NLM_F_EXCL is set */ 7937 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 7938 err = 0; 7939 7940 return err; 7941 } 7942 7943 #ifdef HAVE_BRIDGE_ATTRIBS 7944 /** 7945 * i40e_ndo_bridge_setlink - Set the hardware bridge mode 7946 * @dev: the netdev being configured 7947 * @nlh: RTNL message 7948 * 7949 * Inserts a new hardware bridge if not already created and 7950 * enables the bridging mode requested (VEB or VEPA). If the 7951 * hardware bridge has already been inserted and the request 7952 * is to change the mode then that requires a PF reset to 7953 * allow rebuild of the components with required hardware 7954 * bridge mode enabled. 7955 **/ 7956 static int i40e_ndo_bridge_setlink(struct net_device *dev, 7957 struct nlmsghdr *nlh) 7958 { 7959 struct i40e_netdev_priv *np = netdev_priv(dev); 7960 struct i40e_vsi *vsi = np->vsi; 7961 struct i40e_pf *pf = vsi->back; 7962 struct i40e_veb *veb = NULL; 7963 struct nlattr *attr, *br_spec; 7964 int i, rem; 7965 7966 /* Only for PF VSI for now */ 7967 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 7968 return -EOPNOTSUPP; 7969 7970 /* Find the HW bridge for PF VSI */ 7971 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 7972 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 7973 veb = pf->veb[i]; 7974 } 7975 7976 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7977 7978 nla_for_each_nested(attr, br_spec, rem) { 7979 __u16 mode; 7980 7981 if (nla_type(attr) != IFLA_BRIDGE_MODE) 7982 continue; 7983 7984 mode = nla_get_u16(attr); 7985 if ((mode != BRIDGE_MODE_VEPA) && 7986 (mode != BRIDGE_MODE_VEB)) 7987 return -EINVAL; 7988 7989 /* Insert a new HW bridge */ 7990 if (!veb) { 7991 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 7992 vsi->tc_config.enabled_tc); 7993 if (veb) { 7994 veb->bridge_mode = mode; 7995 i40e_config_bridge_mode(veb); 7996 } else { 7997 /* No Bridge HW offload available */ 7998 return -ENOENT; 7999 } 8000 break; 8001 } else if (mode != veb->bridge_mode) { 8002 /* Existing HW bridge but different mode needs reset */ 8003 veb->bridge_mode = mode; 8004 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 8005 break; 8006 } 8007 } 8008 8009 return 0; 8010 } 8011 8012 /** 8013 * i40e_ndo_bridge_getlink - Get the hardware bridge mode 8014 * @skb: skb buff 8015 * @pid: process id 8016 * @seq: RTNL message seq # 8017 * @dev: the netdev being configured 8018 * @filter_mask: unused 8019 * 8020 * Return the mode in which the hardware bridge is operating in 8021 * i.e VEB or VEPA. 8022 **/ 8023 #ifdef HAVE_BRIDGE_FILTER 8024 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 8025 struct net_device *dev, 8026 u32 __always_unused filter_mask) 8027 #else 8028 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 8029 struct net_device *dev) 8030 #endif /* HAVE_BRIDGE_FILTER */ 8031 { 8032 struct i40e_netdev_priv *np = netdev_priv(dev); 8033 struct i40e_vsi *vsi = np->vsi; 8034 struct i40e_pf *pf = vsi->back; 8035 struct i40e_veb *veb = NULL; 8036 int i; 8037 8038 /* Only for PF VSI for now */ 8039 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 8040 return -EOPNOTSUPP; 8041 8042 /* Find the HW bridge for the PF VSI */ 8043 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8044 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 8045 veb = pf->veb[i]; 8046 } 8047 8048 if (!veb) 8049 return 0; 8050 8051 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode); 8052 } 8053 #endif /* HAVE_BRIDGE_ATTRIBS */ 8054 8055 static const struct net_device_ops i40e_netdev_ops = { 8056 .ndo_open = i40e_open, 8057 .ndo_stop = i40e_close, 8058 .ndo_start_xmit = i40e_lan_xmit_frame, 8059 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 8060 .ndo_set_rx_mode = i40e_set_rx_mode, 8061 .ndo_validate_addr = eth_validate_addr, 8062 .ndo_set_mac_address = i40e_set_mac, 8063 .ndo_change_mtu = i40e_change_mtu, 8064 .ndo_do_ioctl = i40e_ioctl, 8065 .ndo_tx_timeout = i40e_tx_timeout, 8066 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 8067 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 8068 #ifdef CONFIG_NET_POLL_CONTROLLER 8069 .ndo_poll_controller = i40e_netpoll, 8070 #endif 8071 .ndo_setup_tc = i40e_setup_tc, 8072 #ifdef I40E_FCOE 8073 .ndo_fcoe_enable = i40e_fcoe_enable, 8074 .ndo_fcoe_disable = i40e_fcoe_disable, 8075 #endif 8076 .ndo_set_features = i40e_set_features, 8077 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 8078 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 8079 .ndo_set_vf_rate = i40e_ndo_set_vf_bw, 8080 .ndo_get_vf_config = i40e_ndo_get_vf_config, 8081 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 8082 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, 8083 #ifdef CONFIG_I40E_VXLAN 8084 .ndo_add_vxlan_port = i40e_add_vxlan_port, 8085 .ndo_del_vxlan_port = i40e_del_vxlan_port, 8086 #endif 8087 .ndo_get_phys_port_id = i40e_get_phys_port_id, 8088 .ndo_fdb_add = i40e_ndo_fdb_add, 8089 #ifdef HAVE_BRIDGE_ATTRIBS 8090 .ndo_bridge_getlink = i40e_ndo_bridge_getlink, 8091 .ndo_bridge_setlink = i40e_ndo_bridge_setlink, 8092 #endif /* HAVE_BRIDGE_ATTRIBS */ 8093 }; 8094 8095 /** 8096 * i40e_config_netdev - Setup the netdev flags 8097 * @vsi: the VSI being configured 8098 * 8099 * Returns 0 on success, negative value on failure 8100 **/ 8101 static int i40e_config_netdev(struct i40e_vsi *vsi) 8102 { 8103 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 8104 struct i40e_pf *pf = vsi->back; 8105 struct i40e_hw *hw = &pf->hw; 8106 struct i40e_netdev_priv *np; 8107 struct net_device *netdev; 8108 u8 mac_addr[ETH_ALEN]; 8109 int etherdev_size; 8110 8111 etherdev_size = sizeof(struct i40e_netdev_priv); 8112 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 8113 if (!netdev) 8114 return -ENOMEM; 8115 8116 vsi->netdev = netdev; 8117 np = netdev_priv(netdev); 8118 np->vsi = vsi; 8119 8120 netdev->hw_enc_features |= NETIF_F_IP_CSUM | 8121 NETIF_F_GSO_UDP_TUNNEL | 8122 NETIF_F_TSO; 8123 8124 netdev->features = NETIF_F_SG | 8125 NETIF_F_IP_CSUM | 8126 NETIF_F_SCTP_CSUM | 8127 NETIF_F_HIGHDMA | 8128 NETIF_F_GSO_UDP_TUNNEL | 8129 NETIF_F_HW_VLAN_CTAG_TX | 8130 NETIF_F_HW_VLAN_CTAG_RX | 8131 NETIF_F_HW_VLAN_CTAG_FILTER | 8132 NETIF_F_IPV6_CSUM | 8133 NETIF_F_TSO | 8134 NETIF_F_TSO_ECN | 8135 NETIF_F_TSO6 | 8136 NETIF_F_RXCSUM | 8137 NETIF_F_RXHASH | 8138 0; 8139 8140 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 8141 netdev->features |= NETIF_F_NTUPLE; 8142 8143 /* copy netdev features into list of user selectable features */ 8144 netdev->hw_features |= netdev->features; 8145 8146 if (vsi->type == I40E_VSI_MAIN) { 8147 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 8148 ether_addr_copy(mac_addr, hw->mac.perm_addr); 8149 /* The following steps are necessary to prevent reception 8150 * of tagged packets - some older NVM configurations load a 8151 * default a MAC-VLAN filter that accepts any tagged packet 8152 * which must be replaced by a normal filter. 8153 */ 8154 if (!i40e_rm_default_mac_filter(vsi, mac_addr)) 8155 i40e_add_filter(vsi, mac_addr, 8156 I40E_VLAN_ANY, false, true); 8157 } else { 8158 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 8159 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 8160 pf->vsi[pf->lan_vsi]->netdev->name); 8161 random_ether_addr(mac_addr); 8162 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); 8163 } 8164 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 8165 8166 ether_addr_copy(netdev->dev_addr, mac_addr); 8167 ether_addr_copy(netdev->perm_addr, mac_addr); 8168 /* vlan gets same features (except vlan offload) 8169 * after any tweaks for specific VSI types 8170 */ 8171 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | 8172 NETIF_F_HW_VLAN_CTAG_RX | 8173 NETIF_F_HW_VLAN_CTAG_FILTER); 8174 netdev->priv_flags |= IFF_UNICAST_FLT; 8175 netdev->priv_flags |= IFF_SUPP_NOFCS; 8176 /* Setup netdev TC information */ 8177 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 8178 8179 netdev->netdev_ops = &i40e_netdev_ops; 8180 netdev->watchdog_timeo = 5 * HZ; 8181 i40e_set_ethtool_ops(netdev); 8182 #ifdef I40E_FCOE 8183 i40e_fcoe_config_netdev(netdev, vsi); 8184 #endif 8185 8186 return 0; 8187 } 8188 8189 /** 8190 * i40e_vsi_delete - Delete a VSI from the switch 8191 * @vsi: the VSI being removed 8192 * 8193 * Returns 0 on success, negative value on failure 8194 **/ 8195 static void i40e_vsi_delete(struct i40e_vsi *vsi) 8196 { 8197 /* remove default VSI is not allowed */ 8198 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 8199 return; 8200 8201 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 8202 } 8203 8204 /** 8205 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB 8206 * @vsi: the VSI being queried 8207 * 8208 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode 8209 **/ 8210 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) 8211 { 8212 struct i40e_veb *veb; 8213 struct i40e_pf *pf = vsi->back; 8214 8215 /* Uplink is not a bridge so default to VEB */ 8216 if (vsi->veb_idx == I40E_NO_VEB) 8217 return 1; 8218 8219 veb = pf->veb[vsi->veb_idx]; 8220 /* Uplink is a bridge in VEPA mode */ 8221 if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) 8222 return 0; 8223 8224 /* Uplink is a bridge in VEB mode */ 8225 return 1; 8226 } 8227 8228 /** 8229 * i40e_add_vsi - Add a VSI to the switch 8230 * @vsi: the VSI being configured 8231 * 8232 * This initializes a VSI context depending on the VSI type to be added and 8233 * passes it down to the add_vsi aq command. 8234 **/ 8235 static int i40e_add_vsi(struct i40e_vsi *vsi) 8236 { 8237 int ret = -ENODEV; 8238 struct i40e_mac_filter *f, *ftmp; 8239 struct i40e_pf *pf = vsi->back; 8240 struct i40e_hw *hw = &pf->hw; 8241 struct i40e_vsi_context ctxt; 8242 u8 enabled_tc = 0x1; /* TC0 enabled */ 8243 int f_count = 0; 8244 8245 memset(&ctxt, 0, sizeof(ctxt)); 8246 switch (vsi->type) { 8247 case I40E_VSI_MAIN: 8248 /* The PF's main VSI is already setup as part of the 8249 * device initialization, so we'll not bother with 8250 * the add_vsi call, but we will retrieve the current 8251 * VSI context. 8252 */ 8253 ctxt.seid = pf->main_vsi_seid; 8254 ctxt.pf_num = pf->hw.pf_id; 8255 ctxt.vf_num = 0; 8256 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 8257 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 8258 if (ret) { 8259 dev_info(&pf->pdev->dev, 8260 "couldn't get PF vsi config, err %d, aq_err %d\n", 8261 ret, pf->hw.aq.asq_last_status); 8262 return -ENOENT; 8263 } 8264 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 8265 vsi->info.valid_sections = 0; 8266 8267 vsi->seid = ctxt.seid; 8268 vsi->id = ctxt.vsi_number; 8269 8270 enabled_tc = i40e_pf_get_tc_map(pf); 8271 8272 /* MFP mode setup queue map and update VSI */ 8273 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && 8274 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ 8275 memset(&ctxt, 0, sizeof(ctxt)); 8276 ctxt.seid = pf->main_vsi_seid; 8277 ctxt.pf_num = pf->hw.pf_id; 8278 ctxt.vf_num = 0; 8279 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 8280 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 8281 if (ret) { 8282 dev_info(&pf->pdev->dev, 8283 "update vsi failed, aq_err=%d\n", 8284 pf->hw.aq.asq_last_status); 8285 ret = -ENOENT; 8286 goto err; 8287 } 8288 /* update the local VSI info queue map */ 8289 i40e_vsi_update_queue_map(vsi, &ctxt); 8290 vsi->info.valid_sections = 0; 8291 } else { 8292 /* Default/Main VSI is only enabled for TC0 8293 * reconfigure it to enable all TCs that are 8294 * available on the port in SFP mode. 8295 * For MFP case the iSCSI PF would use this 8296 * flow to enable LAN+iSCSI TC. 8297 */ 8298 ret = i40e_vsi_config_tc(vsi, enabled_tc); 8299 if (ret) { 8300 dev_info(&pf->pdev->dev, 8301 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n", 8302 enabled_tc, ret, 8303 pf->hw.aq.asq_last_status); 8304 ret = -ENOENT; 8305 } 8306 } 8307 break; 8308 8309 case I40E_VSI_FDIR: 8310 ctxt.pf_num = hw->pf_id; 8311 ctxt.vf_num = 0; 8312 ctxt.uplink_seid = vsi->uplink_seid; 8313 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 8314 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 8315 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 8316 ctxt.info.valid_sections |= 8317 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8318 ctxt.info.switch_id = 8319 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8320 } 8321 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 8322 break; 8323 8324 case I40E_VSI_VMDQ2: 8325 ctxt.pf_num = hw->pf_id; 8326 ctxt.vf_num = 0; 8327 ctxt.uplink_seid = vsi->uplink_seid; 8328 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 8329 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 8330 8331 /* This VSI is connected to VEB so the switch_id 8332 * should be set to zero by default. 8333 */ 8334 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 8335 ctxt.info.valid_sections |= 8336 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8337 ctxt.info.switch_id = 8338 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8339 } 8340 8341 /* Setup the VSI tx/rx queue map for TC0 only for now */ 8342 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 8343 break; 8344 8345 case I40E_VSI_SRIOV: 8346 ctxt.pf_num = hw->pf_id; 8347 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 8348 ctxt.uplink_seid = vsi->uplink_seid; 8349 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 8350 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 8351 8352 /* This VSI is connected to VEB so the switch_id 8353 * should be set to zero by default. 8354 */ 8355 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 8356 ctxt.info.valid_sections |= 8357 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8358 ctxt.info.switch_id = 8359 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8360 } 8361 8362 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 8363 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 8364 if (pf->vf[vsi->vf_id].spoofchk) { 8365 ctxt.info.valid_sections |= 8366 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 8367 ctxt.info.sec_flags |= 8368 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 8369 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 8370 } 8371 /* Setup the VSI tx/rx queue map for TC0 only for now */ 8372 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 8373 break; 8374 8375 #ifdef I40E_FCOE 8376 case I40E_VSI_FCOE: 8377 ret = i40e_fcoe_vsi_init(vsi, &ctxt); 8378 if (ret) { 8379 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); 8380 return ret; 8381 } 8382 break; 8383 8384 #endif /* I40E_FCOE */ 8385 default: 8386 return -ENODEV; 8387 } 8388 8389 if (vsi->type != I40E_VSI_MAIN) { 8390 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 8391 if (ret) { 8392 dev_info(&vsi->back->pdev->dev, 8393 "add vsi failed, aq_err=%d\n", 8394 vsi->back->hw.aq.asq_last_status); 8395 ret = -ENOENT; 8396 goto err; 8397 } 8398 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 8399 vsi->info.valid_sections = 0; 8400 vsi->seid = ctxt.seid; 8401 vsi->id = ctxt.vsi_number; 8402 } 8403 8404 /* If macvlan filters already exist, force them to get loaded */ 8405 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 8406 f->changed = true; 8407 f_count++; 8408 8409 if (f->is_laa && vsi->type == I40E_VSI_MAIN) { 8410 struct i40e_aqc_remove_macvlan_element_data element; 8411 8412 memset(&element, 0, sizeof(element)); 8413 ether_addr_copy(element.mac_addr, f->macaddr); 8414 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 8415 ret = i40e_aq_remove_macvlan(hw, vsi->seid, 8416 &element, 1, NULL); 8417 if (ret) { 8418 /* some older FW has a different default */ 8419 element.flags |= 8420 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 8421 i40e_aq_remove_macvlan(hw, vsi->seid, 8422 &element, 1, NULL); 8423 } 8424 8425 i40e_aq_mac_address_write(hw, 8426 I40E_AQC_WRITE_TYPE_LAA_WOL, 8427 f->macaddr, NULL); 8428 } 8429 } 8430 if (f_count) { 8431 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 8432 pf->flags |= I40E_FLAG_FILTER_SYNC; 8433 } 8434 8435 /* Update VSI BW information */ 8436 ret = i40e_vsi_get_bw_info(vsi); 8437 if (ret) { 8438 dev_info(&pf->pdev->dev, 8439 "couldn't get vsi bw info, err %d, aq_err %d\n", 8440 ret, pf->hw.aq.asq_last_status); 8441 /* VSI is already added so not tearing that up */ 8442 ret = 0; 8443 } 8444 8445 err: 8446 return ret; 8447 } 8448 8449 /** 8450 * i40e_vsi_release - Delete a VSI and free its resources 8451 * @vsi: the VSI being removed 8452 * 8453 * Returns 0 on success or < 0 on error 8454 **/ 8455 int i40e_vsi_release(struct i40e_vsi *vsi) 8456 { 8457 struct i40e_mac_filter *f, *ftmp; 8458 struct i40e_veb *veb = NULL; 8459 struct i40e_pf *pf; 8460 u16 uplink_seid; 8461 int i, n; 8462 8463 pf = vsi->back; 8464 8465 /* release of a VEB-owner or last VSI is not allowed */ 8466 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 8467 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 8468 vsi->seid, vsi->uplink_seid); 8469 return -ENODEV; 8470 } 8471 if (vsi == pf->vsi[pf->lan_vsi] && 8472 !test_bit(__I40E_DOWN, &pf->state)) { 8473 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 8474 return -ENODEV; 8475 } 8476 8477 uplink_seid = vsi->uplink_seid; 8478 if (vsi->type != I40E_VSI_SRIOV) { 8479 if (vsi->netdev_registered) { 8480 vsi->netdev_registered = false; 8481 if (vsi->netdev) { 8482 /* results in a call to i40e_close() */ 8483 unregister_netdev(vsi->netdev); 8484 } 8485 } else { 8486 i40e_vsi_close(vsi); 8487 } 8488 i40e_vsi_disable_irq(vsi); 8489 } 8490 8491 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) 8492 i40e_del_filter(vsi, f->macaddr, f->vlan, 8493 f->is_vf, f->is_netdev); 8494 i40e_sync_vsi_filters(vsi); 8495 8496 i40e_vsi_delete(vsi); 8497 i40e_vsi_free_q_vectors(vsi); 8498 if (vsi->netdev) { 8499 free_netdev(vsi->netdev); 8500 vsi->netdev = NULL; 8501 } 8502 i40e_vsi_clear_rings(vsi); 8503 i40e_vsi_clear(vsi); 8504 8505 /* If this was the last thing on the VEB, except for the 8506 * controlling VSI, remove the VEB, which puts the controlling 8507 * VSI onto the next level down in the switch. 8508 * 8509 * Well, okay, there's one more exception here: don't remove 8510 * the orphan VEBs yet. We'll wait for an explicit remove request 8511 * from up the network stack. 8512 */ 8513 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { 8514 if (pf->vsi[i] && 8515 pf->vsi[i]->uplink_seid == uplink_seid && 8516 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 8517 n++; /* count the VSIs */ 8518 } 8519 } 8520 for (i = 0; i < I40E_MAX_VEB; i++) { 8521 if (!pf->veb[i]) 8522 continue; 8523 if (pf->veb[i]->uplink_seid == uplink_seid) 8524 n++; /* count the VEBs */ 8525 if (pf->veb[i]->seid == uplink_seid) 8526 veb = pf->veb[i]; 8527 } 8528 if (n == 0 && veb && veb->uplink_seid != 0) 8529 i40e_veb_release(veb); 8530 8531 return 0; 8532 } 8533 8534 /** 8535 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 8536 * @vsi: ptr to the VSI 8537 * 8538 * This should only be called after i40e_vsi_mem_alloc() which allocates the 8539 * corresponding SW VSI structure and initializes num_queue_pairs for the 8540 * newly allocated VSI. 8541 * 8542 * Returns 0 on success or negative on failure 8543 **/ 8544 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 8545 { 8546 int ret = -ENOENT; 8547 struct i40e_pf *pf = vsi->back; 8548 8549 if (vsi->q_vectors[0]) { 8550 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 8551 vsi->seid); 8552 return -EEXIST; 8553 } 8554 8555 if (vsi->base_vector) { 8556 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 8557 vsi->seid, vsi->base_vector); 8558 return -EEXIST; 8559 } 8560 8561 ret = i40e_vsi_alloc_q_vectors(vsi); 8562 if (ret) { 8563 dev_info(&pf->pdev->dev, 8564 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 8565 vsi->num_q_vectors, vsi->seid, ret); 8566 vsi->num_q_vectors = 0; 8567 goto vector_setup_out; 8568 } 8569 8570 if (vsi->num_q_vectors) 8571 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 8572 vsi->num_q_vectors, vsi->idx); 8573 if (vsi->base_vector < 0) { 8574 dev_info(&pf->pdev->dev, 8575 "failed to get tracking for %d vectors for VSI %d, err=%d\n", 8576 vsi->num_q_vectors, vsi->seid, vsi->base_vector); 8577 i40e_vsi_free_q_vectors(vsi); 8578 ret = -ENOENT; 8579 goto vector_setup_out; 8580 } 8581 8582 vector_setup_out: 8583 return ret; 8584 } 8585 8586 /** 8587 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI 8588 * @vsi: pointer to the vsi. 8589 * 8590 * This re-allocates a vsi's queue resources. 8591 * 8592 * Returns pointer to the successfully allocated and configured VSI sw struct 8593 * on success, otherwise returns NULL on failure. 8594 **/ 8595 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) 8596 { 8597 struct i40e_pf *pf = vsi->back; 8598 u8 enabled_tc; 8599 int ret; 8600 8601 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 8602 i40e_vsi_clear_rings(vsi); 8603 8604 i40e_vsi_free_arrays(vsi, false); 8605 i40e_set_num_rings_in_vsi(vsi); 8606 ret = i40e_vsi_alloc_arrays(vsi, false); 8607 if (ret) 8608 goto err_vsi; 8609 8610 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); 8611 if (ret < 0) { 8612 dev_info(&pf->pdev->dev, 8613 "failed to get tracking for %d queues for VSI %d err=%d\n", 8614 vsi->alloc_queue_pairs, vsi->seid, ret); 8615 goto err_vsi; 8616 } 8617 vsi->base_queue = ret; 8618 8619 /* Update the FW view of the VSI. Force a reset of TC and queue 8620 * layout configurations. 8621 */ 8622 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 8623 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 8624 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 8625 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 8626 8627 /* assign it some queues */ 8628 ret = i40e_alloc_rings(vsi); 8629 if (ret) 8630 goto err_rings; 8631 8632 /* map all of the rings to the q_vectors */ 8633 i40e_vsi_map_rings_to_vectors(vsi); 8634 return vsi; 8635 8636 err_rings: 8637 i40e_vsi_free_q_vectors(vsi); 8638 if (vsi->netdev_registered) { 8639 vsi->netdev_registered = false; 8640 unregister_netdev(vsi->netdev); 8641 free_netdev(vsi->netdev); 8642 vsi->netdev = NULL; 8643 } 8644 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 8645 err_vsi: 8646 i40e_vsi_clear(vsi); 8647 return NULL; 8648 } 8649 8650 /** 8651 * i40e_vsi_setup - Set up a VSI by a given type 8652 * @pf: board private structure 8653 * @type: VSI type 8654 * @uplink_seid: the switch element to link to 8655 * @param1: usage depends upon VSI type. For VF types, indicates VF id 8656 * 8657 * This allocates the sw VSI structure and its queue resources, then add a VSI 8658 * to the identified VEB. 8659 * 8660 * Returns pointer to the successfully allocated and configure VSI sw struct on 8661 * success, otherwise returns NULL on failure. 8662 **/ 8663 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 8664 u16 uplink_seid, u32 param1) 8665 { 8666 struct i40e_vsi *vsi = NULL; 8667 struct i40e_veb *veb = NULL; 8668 int ret, i; 8669 int v_idx; 8670 8671 /* The requested uplink_seid must be either 8672 * - the PF's port seid 8673 * no VEB is needed because this is the PF 8674 * or this is a Flow Director special case VSI 8675 * - seid of an existing VEB 8676 * - seid of a VSI that owns an existing VEB 8677 * - seid of a VSI that doesn't own a VEB 8678 * a new VEB is created and the VSI becomes the owner 8679 * - seid of the PF VSI, which is what creates the first VEB 8680 * this is a special case of the previous 8681 * 8682 * Find which uplink_seid we were given and create a new VEB if needed 8683 */ 8684 for (i = 0; i < I40E_MAX_VEB; i++) { 8685 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 8686 veb = pf->veb[i]; 8687 break; 8688 } 8689 } 8690 8691 if (!veb && uplink_seid != pf->mac_seid) { 8692 8693 for (i = 0; i < pf->num_alloc_vsi; i++) { 8694 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 8695 vsi = pf->vsi[i]; 8696 break; 8697 } 8698 } 8699 if (!vsi) { 8700 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 8701 uplink_seid); 8702 return NULL; 8703 } 8704 8705 if (vsi->uplink_seid == pf->mac_seid) 8706 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 8707 vsi->tc_config.enabled_tc); 8708 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 8709 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 8710 vsi->tc_config.enabled_tc); 8711 if (veb) { 8712 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { 8713 dev_info(&vsi->back->pdev->dev, 8714 "%s: New VSI creation error, uplink seid of LAN VSI expected.\n", 8715 __func__); 8716 return NULL; 8717 } 8718 i40e_config_bridge_mode(veb); 8719 } 8720 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8721 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 8722 veb = pf->veb[i]; 8723 } 8724 if (!veb) { 8725 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 8726 return NULL; 8727 } 8728 8729 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 8730 uplink_seid = veb->seid; 8731 } 8732 8733 /* get vsi sw struct */ 8734 v_idx = i40e_vsi_mem_alloc(pf, type); 8735 if (v_idx < 0) 8736 goto err_alloc; 8737 vsi = pf->vsi[v_idx]; 8738 if (!vsi) 8739 goto err_alloc; 8740 vsi->type = type; 8741 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 8742 8743 if (type == I40E_VSI_MAIN) 8744 pf->lan_vsi = v_idx; 8745 else if (type == I40E_VSI_SRIOV) 8746 vsi->vf_id = param1; 8747 /* assign it some queues */ 8748 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, 8749 vsi->idx); 8750 if (ret < 0) { 8751 dev_info(&pf->pdev->dev, 8752 "failed to get tracking for %d queues for VSI %d err=%d\n", 8753 vsi->alloc_queue_pairs, vsi->seid, ret); 8754 goto err_vsi; 8755 } 8756 vsi->base_queue = ret; 8757 8758 /* get a VSI from the hardware */ 8759 vsi->uplink_seid = uplink_seid; 8760 ret = i40e_add_vsi(vsi); 8761 if (ret) 8762 goto err_vsi; 8763 8764 switch (vsi->type) { 8765 /* setup the netdev if needed */ 8766 case I40E_VSI_MAIN: 8767 case I40E_VSI_VMDQ2: 8768 case I40E_VSI_FCOE: 8769 ret = i40e_config_netdev(vsi); 8770 if (ret) 8771 goto err_netdev; 8772 ret = register_netdev(vsi->netdev); 8773 if (ret) 8774 goto err_netdev; 8775 vsi->netdev_registered = true; 8776 netif_carrier_off(vsi->netdev); 8777 #ifdef CONFIG_I40E_DCB 8778 /* Setup DCB netlink interface */ 8779 i40e_dcbnl_setup(vsi); 8780 #endif /* CONFIG_I40E_DCB */ 8781 /* fall through */ 8782 8783 case I40E_VSI_FDIR: 8784 /* set up vectors and rings if needed */ 8785 ret = i40e_vsi_setup_vectors(vsi); 8786 if (ret) 8787 goto err_msix; 8788 8789 ret = i40e_alloc_rings(vsi); 8790 if (ret) 8791 goto err_rings; 8792 8793 /* map all of the rings to the q_vectors */ 8794 i40e_vsi_map_rings_to_vectors(vsi); 8795 8796 i40e_vsi_reset_stats(vsi); 8797 break; 8798 8799 default: 8800 /* no netdev or rings for the other VSI types */ 8801 break; 8802 } 8803 8804 return vsi; 8805 8806 err_rings: 8807 i40e_vsi_free_q_vectors(vsi); 8808 err_msix: 8809 if (vsi->netdev_registered) { 8810 vsi->netdev_registered = false; 8811 unregister_netdev(vsi->netdev); 8812 free_netdev(vsi->netdev); 8813 vsi->netdev = NULL; 8814 } 8815 err_netdev: 8816 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 8817 err_vsi: 8818 i40e_vsi_clear(vsi); 8819 err_alloc: 8820 return NULL; 8821 } 8822 8823 /** 8824 * i40e_veb_get_bw_info - Query VEB BW information 8825 * @veb: the veb to query 8826 * 8827 * Query the Tx scheduler BW configuration data for given VEB 8828 **/ 8829 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 8830 { 8831 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 8832 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 8833 struct i40e_pf *pf = veb->pf; 8834 struct i40e_hw *hw = &pf->hw; 8835 u32 tc_bw_max; 8836 int ret = 0; 8837 int i; 8838 8839 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 8840 &bw_data, NULL); 8841 if (ret) { 8842 dev_info(&pf->pdev->dev, 8843 "query veb bw config failed, aq_err=%d\n", 8844 hw->aq.asq_last_status); 8845 goto out; 8846 } 8847 8848 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 8849 &ets_data, NULL); 8850 if (ret) { 8851 dev_info(&pf->pdev->dev, 8852 "query veb bw ets config failed, aq_err=%d\n", 8853 hw->aq.asq_last_status); 8854 goto out; 8855 } 8856 8857 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 8858 veb->bw_max_quanta = ets_data.tc_bw_max; 8859 veb->is_abs_credits = bw_data.absolute_credits_enable; 8860 veb->enabled_tc = ets_data.tc_valid_bits; 8861 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 8862 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 8863 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 8864 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 8865 veb->bw_tc_limit_credits[i] = 8866 le16_to_cpu(bw_data.tc_bw_limits[i]); 8867 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 8868 } 8869 8870 out: 8871 return ret; 8872 } 8873 8874 /** 8875 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 8876 * @pf: board private structure 8877 * 8878 * On error: returns error code (negative) 8879 * On success: returns vsi index in PF (positive) 8880 **/ 8881 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 8882 { 8883 int ret = -ENOENT; 8884 struct i40e_veb *veb; 8885 int i; 8886 8887 /* Need to protect the allocation of switch elements at the PF level */ 8888 mutex_lock(&pf->switch_mutex); 8889 8890 /* VEB list may be fragmented if VEB creation/destruction has 8891 * been happening. We can afford to do a quick scan to look 8892 * for any free slots in the list. 8893 * 8894 * find next empty veb slot, looping back around if necessary 8895 */ 8896 i = 0; 8897 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 8898 i++; 8899 if (i >= I40E_MAX_VEB) { 8900 ret = -ENOMEM; 8901 goto err_alloc_veb; /* out of VEB slots! */ 8902 } 8903 8904 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 8905 if (!veb) { 8906 ret = -ENOMEM; 8907 goto err_alloc_veb; 8908 } 8909 veb->pf = pf; 8910 veb->idx = i; 8911 veb->enabled_tc = 1; 8912 8913 pf->veb[i] = veb; 8914 ret = i; 8915 err_alloc_veb: 8916 mutex_unlock(&pf->switch_mutex); 8917 return ret; 8918 } 8919 8920 /** 8921 * i40e_switch_branch_release - Delete a branch of the switch tree 8922 * @branch: where to start deleting 8923 * 8924 * This uses recursion to find the tips of the branch to be 8925 * removed, deleting until we get back to and can delete this VEB. 8926 **/ 8927 static void i40e_switch_branch_release(struct i40e_veb *branch) 8928 { 8929 struct i40e_pf *pf = branch->pf; 8930 u16 branch_seid = branch->seid; 8931 u16 veb_idx = branch->idx; 8932 int i; 8933 8934 /* release any VEBs on this VEB - RECURSION */ 8935 for (i = 0; i < I40E_MAX_VEB; i++) { 8936 if (!pf->veb[i]) 8937 continue; 8938 if (pf->veb[i]->uplink_seid == branch->seid) 8939 i40e_switch_branch_release(pf->veb[i]); 8940 } 8941 8942 /* Release the VSIs on this VEB, but not the owner VSI. 8943 * 8944 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 8945 * the VEB itself, so don't use (*branch) after this loop. 8946 */ 8947 for (i = 0; i < pf->num_alloc_vsi; i++) { 8948 if (!pf->vsi[i]) 8949 continue; 8950 if (pf->vsi[i]->uplink_seid == branch_seid && 8951 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 8952 i40e_vsi_release(pf->vsi[i]); 8953 } 8954 } 8955 8956 /* There's one corner case where the VEB might not have been 8957 * removed, so double check it here and remove it if needed. 8958 * This case happens if the veb was created from the debugfs 8959 * commands and no VSIs were added to it. 8960 */ 8961 if (pf->veb[veb_idx]) 8962 i40e_veb_release(pf->veb[veb_idx]); 8963 } 8964 8965 /** 8966 * i40e_veb_clear - remove veb struct 8967 * @veb: the veb to remove 8968 **/ 8969 static void i40e_veb_clear(struct i40e_veb *veb) 8970 { 8971 if (!veb) 8972 return; 8973 8974 if (veb->pf) { 8975 struct i40e_pf *pf = veb->pf; 8976 8977 mutex_lock(&pf->switch_mutex); 8978 if (pf->veb[veb->idx] == veb) 8979 pf->veb[veb->idx] = NULL; 8980 mutex_unlock(&pf->switch_mutex); 8981 } 8982 8983 kfree(veb); 8984 } 8985 8986 /** 8987 * i40e_veb_release - Delete a VEB and free its resources 8988 * @veb: the VEB being removed 8989 **/ 8990 void i40e_veb_release(struct i40e_veb *veb) 8991 { 8992 struct i40e_vsi *vsi = NULL; 8993 struct i40e_pf *pf; 8994 int i, n = 0; 8995 8996 pf = veb->pf; 8997 8998 /* find the remaining VSI and check for extras */ 8999 for (i = 0; i < pf->num_alloc_vsi; i++) { 9000 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 9001 n++; 9002 vsi = pf->vsi[i]; 9003 } 9004 } 9005 if (n != 1) { 9006 dev_info(&pf->pdev->dev, 9007 "can't remove VEB %d with %d VSIs left\n", 9008 veb->seid, n); 9009 return; 9010 } 9011 9012 /* move the remaining VSI to uplink veb */ 9013 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 9014 if (veb->uplink_seid) { 9015 vsi->uplink_seid = veb->uplink_seid; 9016 if (veb->uplink_seid == pf->mac_seid) 9017 vsi->veb_idx = I40E_NO_VEB; 9018 else 9019 vsi->veb_idx = veb->veb_idx; 9020 } else { 9021 /* floating VEB */ 9022 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 9023 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 9024 } 9025 9026 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 9027 i40e_veb_clear(veb); 9028 } 9029 9030 /** 9031 * i40e_add_veb - create the VEB in the switch 9032 * @veb: the VEB to be instantiated 9033 * @vsi: the controlling VSI 9034 **/ 9035 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 9036 { 9037 bool is_default = false; 9038 bool is_cloud = false; 9039 int ret; 9040 9041 /* get a VEB from the hardware */ 9042 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, 9043 veb->enabled_tc, is_default, 9044 is_cloud, &veb->seid, NULL); 9045 if (ret) { 9046 dev_info(&veb->pf->pdev->dev, 9047 "couldn't add VEB, err %d, aq_err %d\n", 9048 ret, veb->pf->hw.aq.asq_last_status); 9049 return -EPERM; 9050 } 9051 9052 /* get statistics counter */ 9053 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, 9054 &veb->stats_idx, NULL, NULL, NULL); 9055 if (ret) { 9056 dev_info(&veb->pf->pdev->dev, 9057 "couldn't get VEB statistics idx, err %d, aq_err %d\n", 9058 ret, veb->pf->hw.aq.asq_last_status); 9059 return -EPERM; 9060 } 9061 ret = i40e_veb_get_bw_info(veb); 9062 if (ret) { 9063 dev_info(&veb->pf->pdev->dev, 9064 "couldn't get VEB bw info, err %d, aq_err %d\n", 9065 ret, veb->pf->hw.aq.asq_last_status); 9066 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); 9067 return -ENOENT; 9068 } 9069 9070 vsi->uplink_seid = veb->seid; 9071 vsi->veb_idx = veb->idx; 9072 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 9073 9074 return 0; 9075 } 9076 9077 /** 9078 * i40e_veb_setup - Set up a VEB 9079 * @pf: board private structure 9080 * @flags: VEB setup flags 9081 * @uplink_seid: the switch element to link to 9082 * @vsi_seid: the initial VSI seid 9083 * @enabled_tc: Enabled TC bit-map 9084 * 9085 * This allocates the sw VEB structure and links it into the switch 9086 * It is possible and legal for this to be a duplicate of an already 9087 * existing VEB. It is also possible for both uplink and vsi seids 9088 * to be zero, in order to create a floating VEB. 9089 * 9090 * Returns pointer to the successfully allocated VEB sw struct on 9091 * success, otherwise returns NULL on failure. 9092 **/ 9093 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 9094 u16 uplink_seid, u16 vsi_seid, 9095 u8 enabled_tc) 9096 { 9097 struct i40e_veb *veb, *uplink_veb = NULL; 9098 int vsi_idx, veb_idx; 9099 int ret; 9100 9101 /* if one seid is 0, the other must be 0 to create a floating relay */ 9102 if ((uplink_seid == 0 || vsi_seid == 0) && 9103 (uplink_seid + vsi_seid != 0)) { 9104 dev_info(&pf->pdev->dev, 9105 "one, not both seid's are 0: uplink=%d vsi=%d\n", 9106 uplink_seid, vsi_seid); 9107 return NULL; 9108 } 9109 9110 /* make sure there is such a vsi and uplink */ 9111 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) 9112 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 9113 break; 9114 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { 9115 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 9116 vsi_seid); 9117 return NULL; 9118 } 9119 9120 if (uplink_seid && uplink_seid != pf->mac_seid) { 9121 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 9122 if (pf->veb[veb_idx] && 9123 pf->veb[veb_idx]->seid == uplink_seid) { 9124 uplink_veb = pf->veb[veb_idx]; 9125 break; 9126 } 9127 } 9128 if (!uplink_veb) { 9129 dev_info(&pf->pdev->dev, 9130 "uplink seid %d not found\n", uplink_seid); 9131 return NULL; 9132 } 9133 } 9134 9135 /* get veb sw struct */ 9136 veb_idx = i40e_veb_mem_alloc(pf); 9137 if (veb_idx < 0) 9138 goto err_alloc; 9139 veb = pf->veb[veb_idx]; 9140 veb->flags = flags; 9141 veb->uplink_seid = uplink_seid; 9142 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 9143 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 9144 9145 /* create the VEB in the switch */ 9146 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 9147 if (ret) 9148 goto err_veb; 9149 if (vsi_idx == pf->lan_vsi) 9150 pf->lan_veb = veb->idx; 9151 9152 return veb; 9153 9154 err_veb: 9155 i40e_veb_clear(veb); 9156 err_alloc: 9157 return NULL; 9158 } 9159 9160 /** 9161 * i40e_setup_pf_switch_element - set PF vars based on switch type 9162 * @pf: board private structure 9163 * @ele: element we are building info from 9164 * @num_reported: total number of elements 9165 * @printconfig: should we print the contents 9166 * 9167 * helper function to assist in extracting a few useful SEID values. 9168 **/ 9169 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 9170 struct i40e_aqc_switch_config_element_resp *ele, 9171 u16 num_reported, bool printconfig) 9172 { 9173 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 9174 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 9175 u8 element_type = ele->element_type; 9176 u16 seid = le16_to_cpu(ele->seid); 9177 9178 if (printconfig) 9179 dev_info(&pf->pdev->dev, 9180 "type=%d seid=%d uplink=%d downlink=%d\n", 9181 element_type, seid, uplink_seid, downlink_seid); 9182 9183 switch (element_type) { 9184 case I40E_SWITCH_ELEMENT_TYPE_MAC: 9185 pf->mac_seid = seid; 9186 break; 9187 case I40E_SWITCH_ELEMENT_TYPE_VEB: 9188 /* Main VEB? */ 9189 if (uplink_seid != pf->mac_seid) 9190 break; 9191 if (pf->lan_veb == I40E_NO_VEB) { 9192 int v; 9193 9194 /* find existing or else empty VEB */ 9195 for (v = 0; v < I40E_MAX_VEB; v++) { 9196 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 9197 pf->lan_veb = v; 9198 break; 9199 } 9200 } 9201 if (pf->lan_veb == I40E_NO_VEB) { 9202 v = i40e_veb_mem_alloc(pf); 9203 if (v < 0) 9204 break; 9205 pf->lan_veb = v; 9206 } 9207 } 9208 9209 pf->veb[pf->lan_veb]->seid = seid; 9210 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 9211 pf->veb[pf->lan_veb]->pf = pf; 9212 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 9213 break; 9214 case I40E_SWITCH_ELEMENT_TYPE_VSI: 9215 if (num_reported != 1) 9216 break; 9217 /* This is immediately after a reset so we can assume this is 9218 * the PF's VSI 9219 */ 9220 pf->mac_seid = uplink_seid; 9221 pf->pf_seid = downlink_seid; 9222 pf->main_vsi_seid = seid; 9223 if (printconfig) 9224 dev_info(&pf->pdev->dev, 9225 "pf_seid=%d main_vsi_seid=%d\n", 9226 pf->pf_seid, pf->main_vsi_seid); 9227 break; 9228 case I40E_SWITCH_ELEMENT_TYPE_PF: 9229 case I40E_SWITCH_ELEMENT_TYPE_VF: 9230 case I40E_SWITCH_ELEMENT_TYPE_EMP: 9231 case I40E_SWITCH_ELEMENT_TYPE_BMC: 9232 case I40E_SWITCH_ELEMENT_TYPE_PE: 9233 case I40E_SWITCH_ELEMENT_TYPE_PA: 9234 /* ignore these for now */ 9235 break; 9236 default: 9237 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 9238 element_type, seid); 9239 break; 9240 } 9241 } 9242 9243 /** 9244 * i40e_fetch_switch_configuration - Get switch config from firmware 9245 * @pf: board private structure 9246 * @printconfig: should we print the contents 9247 * 9248 * Get the current switch configuration from the device and 9249 * extract a few useful SEID values. 9250 **/ 9251 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 9252 { 9253 struct i40e_aqc_get_switch_config_resp *sw_config; 9254 u16 next_seid = 0; 9255 int ret = 0; 9256 u8 *aq_buf; 9257 int i; 9258 9259 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 9260 if (!aq_buf) 9261 return -ENOMEM; 9262 9263 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 9264 do { 9265 u16 num_reported, num_total; 9266 9267 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 9268 I40E_AQ_LARGE_BUF, 9269 &next_seid, NULL); 9270 if (ret) { 9271 dev_info(&pf->pdev->dev, 9272 "get switch config failed %d aq_err=%x\n", 9273 ret, pf->hw.aq.asq_last_status); 9274 kfree(aq_buf); 9275 return -ENOENT; 9276 } 9277 9278 num_reported = le16_to_cpu(sw_config->header.num_reported); 9279 num_total = le16_to_cpu(sw_config->header.num_total); 9280 9281 if (printconfig) 9282 dev_info(&pf->pdev->dev, 9283 "header: %d reported %d total\n", 9284 num_reported, num_total); 9285 9286 for (i = 0; i < num_reported; i++) { 9287 struct i40e_aqc_switch_config_element_resp *ele = 9288 &sw_config->element[i]; 9289 9290 i40e_setup_pf_switch_element(pf, ele, num_reported, 9291 printconfig); 9292 } 9293 } while (next_seid != 0); 9294 9295 kfree(aq_buf); 9296 return ret; 9297 } 9298 9299 /** 9300 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 9301 * @pf: board private structure 9302 * @reinit: if the Main VSI needs to re-initialized. 9303 * 9304 * Returns 0 on success, negative value on failure 9305 **/ 9306 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 9307 { 9308 int ret; 9309 9310 /* find out what's out there already */ 9311 ret = i40e_fetch_switch_configuration(pf, false); 9312 if (ret) { 9313 dev_info(&pf->pdev->dev, 9314 "couldn't fetch switch config, err %d, aq_err %d\n", 9315 ret, pf->hw.aq.asq_last_status); 9316 return ret; 9317 } 9318 i40e_pf_reset_stats(pf); 9319 9320 /* first time setup */ 9321 if (pf->lan_vsi == I40E_NO_VSI || reinit) { 9322 struct i40e_vsi *vsi = NULL; 9323 u16 uplink_seid; 9324 9325 /* Set up the PF VSI associated with the PF's main VSI 9326 * that is already in the HW switch 9327 */ 9328 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 9329 uplink_seid = pf->veb[pf->lan_veb]->seid; 9330 else 9331 uplink_seid = pf->mac_seid; 9332 if (pf->lan_vsi == I40E_NO_VSI) 9333 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 9334 else if (reinit) 9335 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); 9336 if (!vsi) { 9337 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 9338 i40e_fdir_teardown(pf); 9339 return -EAGAIN; 9340 } 9341 } else { 9342 /* force a reset of TC and queue layout configurations */ 9343 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 9344 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 9345 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 9346 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 9347 } 9348 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 9349 9350 i40e_fdir_sb_setup(pf); 9351 9352 /* Setup static PF queue filter control settings */ 9353 ret = i40e_setup_pf_filter_control(pf); 9354 if (ret) { 9355 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 9356 ret); 9357 /* Failure here should not stop continuing other steps */ 9358 } 9359 9360 /* enable RSS in the HW, even for only one queue, as the stack can use 9361 * the hash 9362 */ 9363 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 9364 i40e_config_rss(pf); 9365 9366 /* fill in link information and enable LSE reporting */ 9367 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); 9368 i40e_link_event(pf); 9369 9370 /* Initialize user-specific link properties */ 9371 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 9372 I40E_AQ_AN_COMPLETED) ? true : false); 9373 9374 i40e_ptp_init(pf); 9375 9376 return ret; 9377 } 9378 9379 /** 9380 * i40e_determine_queue_usage - Work out queue distribution 9381 * @pf: board private structure 9382 **/ 9383 static void i40e_determine_queue_usage(struct i40e_pf *pf) 9384 { 9385 int queues_left; 9386 9387 pf->num_lan_qps = 0; 9388 #ifdef I40E_FCOE 9389 pf->num_fcoe_qps = 0; 9390 #endif 9391 9392 /* Find the max queues to be put into basic use. We'll always be 9393 * using TC0, whether or not DCB is running, and TC0 will get the 9394 * big RSS set. 9395 */ 9396 queues_left = pf->hw.func_caps.num_tx_qp; 9397 9398 if ((queues_left == 1) || 9399 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 9400 /* one qp for PF, no queues for anything else */ 9401 queues_left = 0; 9402 pf->rss_size = pf->num_lan_qps = 1; 9403 9404 /* make sure all the fancies are disabled */ 9405 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 9406 #ifdef I40E_FCOE 9407 I40E_FLAG_FCOE_ENABLED | 9408 #endif 9409 I40E_FLAG_FD_SB_ENABLED | 9410 I40E_FLAG_FD_ATR_ENABLED | 9411 I40E_FLAG_DCB_CAPABLE | 9412 I40E_FLAG_SRIOV_ENABLED | 9413 I40E_FLAG_VMDQ_ENABLED); 9414 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | 9415 I40E_FLAG_FD_SB_ENABLED | 9416 I40E_FLAG_FD_ATR_ENABLED | 9417 I40E_FLAG_DCB_CAPABLE))) { 9418 /* one qp for PF */ 9419 pf->rss_size = pf->num_lan_qps = 1; 9420 queues_left -= pf->num_lan_qps; 9421 9422 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 9423 #ifdef I40E_FCOE 9424 I40E_FLAG_FCOE_ENABLED | 9425 #endif 9426 I40E_FLAG_FD_SB_ENABLED | 9427 I40E_FLAG_FD_ATR_ENABLED | 9428 I40E_FLAG_DCB_ENABLED | 9429 I40E_FLAG_VMDQ_ENABLED); 9430 } else { 9431 /* Not enough queues for all TCs */ 9432 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && 9433 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 9434 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 9435 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 9436 } 9437 pf->num_lan_qps = max_t(int, pf->rss_size_max, 9438 num_online_cpus()); 9439 pf->num_lan_qps = min_t(int, pf->num_lan_qps, 9440 pf->hw.func_caps.num_tx_qp); 9441 9442 queues_left -= pf->num_lan_qps; 9443 } 9444 9445 #ifdef I40E_FCOE 9446 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 9447 if (I40E_DEFAULT_FCOE <= queues_left) { 9448 pf->num_fcoe_qps = I40E_DEFAULT_FCOE; 9449 } else if (I40E_MINIMUM_FCOE <= queues_left) { 9450 pf->num_fcoe_qps = I40E_MINIMUM_FCOE; 9451 } else { 9452 pf->num_fcoe_qps = 0; 9453 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 9454 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); 9455 } 9456 9457 queues_left -= pf->num_fcoe_qps; 9458 } 9459 9460 #endif 9461 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 9462 if (queues_left > 1) { 9463 queues_left -= 1; /* save 1 queue for FD */ 9464 } else { 9465 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 9466 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); 9467 } 9468 } 9469 9470 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 9471 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 9472 pf->num_req_vfs = min_t(int, pf->num_req_vfs, 9473 (queues_left / pf->num_vf_qps)); 9474 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 9475 } 9476 9477 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 9478 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 9479 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 9480 (queues_left / pf->num_vmdq_qps)); 9481 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 9482 } 9483 9484 pf->queues_left = queues_left; 9485 #ifdef I40E_FCOE 9486 dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); 9487 #endif 9488 } 9489 9490 /** 9491 * i40e_setup_pf_filter_control - Setup PF static filter control 9492 * @pf: PF to be setup 9493 * 9494 * i40e_setup_pf_filter_control sets up a PF's initial filter control 9495 * settings. If PE/FCoE are enabled then it will also set the per PF 9496 * based filter sizes required for them. It also enables Flow director, 9497 * ethertype and macvlan type filter settings for the pf. 9498 * 9499 * Returns 0 on success, negative on failure 9500 **/ 9501 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 9502 { 9503 struct i40e_filter_control_settings *settings = &pf->filter_settings; 9504 9505 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 9506 9507 /* Flow Director is enabled */ 9508 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) 9509 settings->enable_fdir = true; 9510 9511 /* Ethtype and MACVLAN filters enabled for PF */ 9512 settings->enable_ethtype = true; 9513 settings->enable_macvlan = true; 9514 9515 if (i40e_set_filter_control(&pf->hw, settings)) 9516 return -ENOENT; 9517 9518 return 0; 9519 } 9520 9521 #define INFO_STRING_LEN 255 9522 static void i40e_print_features(struct i40e_pf *pf) 9523 { 9524 struct i40e_hw *hw = &pf->hw; 9525 char *buf, *string; 9526 9527 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); 9528 if (!string) { 9529 dev_err(&pf->pdev->dev, "Features string allocation failed\n"); 9530 return; 9531 } 9532 9533 buf = string; 9534 9535 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); 9536 #ifdef CONFIG_PCI_IOV 9537 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); 9538 #endif 9539 buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", 9540 pf->hw.func_caps.num_vsis, 9541 pf->vsi[pf->lan_vsi]->num_queue_pairs, 9542 pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); 9543 9544 if (pf->flags & I40E_FLAG_RSS_ENABLED) 9545 buf += sprintf(buf, "RSS "); 9546 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 9547 buf += sprintf(buf, "FD_ATR "); 9548 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 9549 buf += sprintf(buf, "FD_SB "); 9550 buf += sprintf(buf, "NTUPLE "); 9551 } 9552 if (pf->flags & I40E_FLAG_DCB_CAPABLE) 9553 buf += sprintf(buf, "DCB "); 9554 if (pf->flags & I40E_FLAG_PTP) 9555 buf += sprintf(buf, "PTP "); 9556 #ifdef I40E_FCOE 9557 if (pf->flags & I40E_FLAG_FCOE_ENABLED) 9558 buf += sprintf(buf, "FCOE "); 9559 #endif 9560 9561 BUG_ON(buf > (string + INFO_STRING_LEN)); 9562 dev_info(&pf->pdev->dev, "%s\n", string); 9563 kfree(string); 9564 } 9565 9566 /** 9567 * i40e_probe - Device initialization routine 9568 * @pdev: PCI device information struct 9569 * @ent: entry in i40e_pci_tbl 9570 * 9571 * i40e_probe initializes a PF identified by a pci_dev structure. 9572 * The OS initialization, configuring of the PF private structure, 9573 * and a hardware reset occur. 9574 * 9575 * Returns 0 on success, negative on failure 9576 **/ 9577 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 9578 { 9579 struct i40e_aq_get_phy_abilities_resp abilities; 9580 unsigned long ioremap_len; 9581 struct i40e_pf *pf; 9582 struct i40e_hw *hw; 9583 static u16 pfs_found; 9584 u16 link_status; 9585 int err = 0; 9586 u32 len; 9587 u32 i; 9588 9589 err = pci_enable_device_mem(pdev); 9590 if (err) 9591 return err; 9592 9593 /* set up for high or low dma */ 9594 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9595 if (err) { 9596 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9597 if (err) { 9598 dev_err(&pdev->dev, 9599 "DMA configuration failed: 0x%x\n", err); 9600 goto err_dma; 9601 } 9602 } 9603 9604 /* set up pci connections */ 9605 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 9606 IORESOURCE_MEM), i40e_driver_name); 9607 if (err) { 9608 dev_info(&pdev->dev, 9609 "pci_request_selected_regions failed %d\n", err); 9610 goto err_pci_reg; 9611 } 9612 9613 pci_enable_pcie_error_reporting(pdev); 9614 pci_set_master(pdev); 9615 9616 /* Now that we have a PCI connection, we need to do the 9617 * low level device setup. This is primarily setting up 9618 * the Admin Queue structures and then querying for the 9619 * device's current profile information. 9620 */ 9621 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 9622 if (!pf) { 9623 err = -ENOMEM; 9624 goto err_pf_alloc; 9625 } 9626 pf->next_vsi = 0; 9627 pf->pdev = pdev; 9628 set_bit(__I40E_DOWN, &pf->state); 9629 9630 hw = &pf->hw; 9631 hw->back = pf; 9632 9633 ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0), 9634 I40E_MAX_CSR_SPACE); 9635 9636 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len); 9637 if (!hw->hw_addr) { 9638 err = -EIO; 9639 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 9640 (unsigned int)pci_resource_start(pdev, 0), 9641 (unsigned int)pci_resource_len(pdev, 0), err); 9642 goto err_ioremap; 9643 } 9644 hw->vendor_id = pdev->vendor; 9645 hw->device_id = pdev->device; 9646 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 9647 hw->subsystem_vendor_id = pdev->subsystem_vendor; 9648 hw->subsystem_device_id = pdev->subsystem_device; 9649 hw->bus.device = PCI_SLOT(pdev->devfn); 9650 hw->bus.func = PCI_FUNC(pdev->devfn); 9651 pf->instance = pfs_found; 9652 9653 if (debug != -1) { 9654 pf->msg_enable = pf->hw.debug_mask; 9655 pf->msg_enable = debug; 9656 } 9657 9658 /* do a special CORER for clearing PXE mode once at init */ 9659 if (hw->revision_id == 0 && 9660 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { 9661 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 9662 i40e_flush(hw); 9663 msleep(200); 9664 pf->corer_count++; 9665 9666 i40e_clear_pxe_mode(hw); 9667 } 9668 9669 /* Reset here to make sure all is clean and to define PF 'n' */ 9670 i40e_clear_hw(hw); 9671 err = i40e_pf_reset(hw); 9672 if (err) { 9673 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 9674 goto err_pf_reset; 9675 } 9676 pf->pfr_count++; 9677 9678 hw->aq.num_arq_entries = I40E_AQ_LEN; 9679 hw->aq.num_asq_entries = I40E_AQ_LEN; 9680 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 9681 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 9682 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 9683 9684 snprintf(pf->int_name, sizeof(pf->int_name) - 1, 9685 "%s-%s:misc", 9686 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); 9687 9688 err = i40e_init_shared_code(hw); 9689 if (err) { 9690 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); 9691 goto err_pf_reset; 9692 } 9693 9694 /* set up a default setting for link flow control */ 9695 pf->hw.fc.requested_mode = I40E_FC_NONE; 9696 9697 err = i40e_init_adminq(hw); 9698 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); 9699 if (err) { 9700 dev_info(&pdev->dev, 9701 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 9702 goto err_pf_reset; 9703 } 9704 9705 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 9706 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) 9707 dev_info(&pdev->dev, 9708 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 9709 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 9710 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) 9711 dev_info(&pdev->dev, 9712 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 9713 9714 i40e_verify_eeprom(pf); 9715 9716 /* Rev 0 hardware was never productized */ 9717 if (hw->revision_id < 1) 9718 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 9719 9720 i40e_clear_pxe_mode(hw); 9721 err = i40e_get_capabilities(pf); 9722 if (err) 9723 goto err_adminq_setup; 9724 9725 err = i40e_sw_init(pf); 9726 if (err) { 9727 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 9728 goto err_sw_init; 9729 } 9730 9731 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 9732 hw->func_caps.num_rx_qp, 9733 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 9734 if (err) { 9735 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 9736 goto err_init_lan_hmc; 9737 } 9738 9739 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 9740 if (err) { 9741 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 9742 err = -ENOENT; 9743 goto err_configure_lan_hmc; 9744 } 9745 9746 /* Disable LLDP for NICs that have firmware versions lower than v4.3. 9747 * Ignore error return codes because if it was already disabled via 9748 * hardware settings this will fail 9749 */ 9750 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 9751 (pf->hw.aq.fw_maj_ver < 4)) { 9752 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); 9753 i40e_aq_stop_lldp(hw, true, NULL); 9754 } 9755 9756 i40e_get_mac_addr(hw, hw->mac.addr); 9757 if (!is_valid_ether_addr(hw->mac.addr)) { 9758 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 9759 err = -EIO; 9760 goto err_mac_addr; 9761 } 9762 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 9763 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); 9764 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 9765 if (is_valid_ether_addr(hw->mac.port_addr)) 9766 pf->flags |= I40E_FLAG_PORT_ID_VALID; 9767 #ifdef I40E_FCOE 9768 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); 9769 if (err) 9770 dev_info(&pdev->dev, 9771 "(non-fatal) SAN MAC retrieval failed: %d\n", err); 9772 if (!is_valid_ether_addr(hw->mac.san_addr)) { 9773 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", 9774 hw->mac.san_addr); 9775 ether_addr_copy(hw->mac.san_addr, hw->mac.addr); 9776 } 9777 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); 9778 #endif /* I40E_FCOE */ 9779 9780 pci_set_drvdata(pdev, pf); 9781 pci_save_state(pdev); 9782 #ifdef CONFIG_I40E_DCB 9783 err = i40e_init_pf_dcb(pf); 9784 if (err) { 9785 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); 9786 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 9787 /* Continue without DCB enabled */ 9788 } 9789 #endif /* CONFIG_I40E_DCB */ 9790 9791 /* set up periodic task facility */ 9792 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); 9793 pf->service_timer_period = HZ; 9794 9795 INIT_WORK(&pf->service_task, i40e_service_task); 9796 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 9797 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; 9798 pf->link_check_timeout = jiffies; 9799 9800 /* WoL defaults to disabled */ 9801 pf->wol_en = false; 9802 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 9803 9804 /* set up the main switch operations */ 9805 i40e_determine_queue_usage(pf); 9806 i40e_init_interrupt_scheme(pf); 9807 9808 /* The number of VSIs reported by the FW is the minimum guaranteed 9809 * to us; HW supports far more and we share the remaining pool with 9810 * the other PFs. We allocate space for more than the guarantee with 9811 * the understanding that we might not get them all later. 9812 */ 9813 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) 9814 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; 9815 else 9816 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; 9817 9818 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ 9819 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; 9820 pf->vsi = kzalloc(len, GFP_KERNEL); 9821 if (!pf->vsi) { 9822 err = -ENOMEM; 9823 goto err_switch_setup; 9824 } 9825 9826 err = i40e_setup_pf_switch(pf, false); 9827 if (err) { 9828 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 9829 goto err_vsis; 9830 } 9831 /* if FDIR VSI was set up, start it now */ 9832 for (i = 0; i < pf->num_alloc_vsi; i++) { 9833 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 9834 i40e_vsi_open(pf->vsi[i]); 9835 break; 9836 } 9837 } 9838 9839 /* driver is only interested in link up/down and module qualification 9840 * reports from firmware 9841 */ 9842 err = i40e_aq_set_phy_int_mask(&pf->hw, 9843 I40E_AQ_EVENT_LINK_UPDOWN | 9844 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); 9845 if (err) 9846 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); 9847 9848 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 9849 (pf->hw.aq.fw_maj_ver < 4)) { 9850 msleep(75); 9851 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 9852 if (err) 9853 dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", 9854 pf->hw.aq.asq_last_status); 9855 } 9856 /* The main driver is (mostly) up and happy. We need to set this state 9857 * before setting up the misc vector or we get a race and the vector 9858 * ends up disabled forever. 9859 */ 9860 clear_bit(__I40E_DOWN, &pf->state); 9861 9862 /* In case of MSIX we are going to setup the misc vector right here 9863 * to handle admin queue events etc. In case of legacy and MSI 9864 * the misc functionality and queue processing is combined in 9865 * the same vector and that gets setup at open. 9866 */ 9867 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 9868 err = i40e_setup_misc_vector(pf); 9869 if (err) { 9870 dev_info(&pdev->dev, 9871 "setup of misc vector failed: %d\n", err); 9872 goto err_vsis; 9873 } 9874 } 9875 9876 #ifdef CONFIG_PCI_IOV 9877 /* prep for VF support */ 9878 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 9879 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 9880 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 9881 u32 val; 9882 9883 /* disable link interrupts for VFs */ 9884 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 9885 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 9886 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 9887 i40e_flush(hw); 9888 9889 if (pci_num_vf(pdev)) { 9890 dev_info(&pdev->dev, 9891 "Active VFs found, allocating resources.\n"); 9892 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); 9893 if (err) 9894 dev_info(&pdev->dev, 9895 "Error %d allocating resources for existing VFs\n", 9896 err); 9897 } 9898 } 9899 #endif /* CONFIG_PCI_IOV */ 9900 9901 pfs_found++; 9902 9903 i40e_dbg_pf_init(pf); 9904 9905 /* tell the firmware that we're starting */ 9906 i40e_send_version(pf); 9907 9908 /* since everything's happy, start the service_task timer */ 9909 mod_timer(&pf->service_timer, 9910 round_jiffies(jiffies + pf->service_timer_period)); 9911 9912 #ifdef I40E_FCOE 9913 /* create FCoE interface */ 9914 i40e_fcoe_vsi_setup(pf); 9915 9916 #endif 9917 /* Get the negotiated link width and speed from PCI config space */ 9918 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); 9919 9920 i40e_set_pci_config_data(hw, link_status); 9921 9922 dev_info(&pdev->dev, "PCI-Express: %s %s\n", 9923 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : 9924 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : 9925 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : 9926 "Unknown"), 9927 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : 9928 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : 9929 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : 9930 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : 9931 "Unknown")); 9932 9933 if (hw->bus.width < i40e_bus_width_pcie_x8 || 9934 hw->bus.speed < i40e_bus_speed_8000) { 9935 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 9936 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 9937 } 9938 9939 /* get the requested speeds from the fw */ 9940 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); 9941 if (err) 9942 dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n", 9943 err); 9944 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; 9945 9946 /* print a string summarizing features */ 9947 i40e_print_features(pf); 9948 9949 return 0; 9950 9951 /* Unwind what we've done if something failed in the setup */ 9952 err_vsis: 9953 set_bit(__I40E_DOWN, &pf->state); 9954 i40e_clear_interrupt_scheme(pf); 9955 kfree(pf->vsi); 9956 err_switch_setup: 9957 i40e_reset_interrupt_capability(pf); 9958 del_timer_sync(&pf->service_timer); 9959 err_mac_addr: 9960 err_configure_lan_hmc: 9961 (void)i40e_shutdown_lan_hmc(hw); 9962 err_init_lan_hmc: 9963 kfree(pf->qp_pile); 9964 err_sw_init: 9965 err_adminq_setup: 9966 (void)i40e_shutdown_adminq(hw); 9967 err_pf_reset: 9968 iounmap(hw->hw_addr); 9969 err_ioremap: 9970 kfree(pf); 9971 err_pf_alloc: 9972 pci_disable_pcie_error_reporting(pdev); 9973 pci_release_selected_regions(pdev, 9974 pci_select_bars(pdev, IORESOURCE_MEM)); 9975 err_pci_reg: 9976 err_dma: 9977 pci_disable_device(pdev); 9978 return err; 9979 } 9980 9981 /** 9982 * i40e_remove - Device removal routine 9983 * @pdev: PCI device information struct 9984 * 9985 * i40e_remove is called by the PCI subsystem to alert the driver 9986 * that is should release a PCI device. This could be caused by a 9987 * Hot-Plug event, or because the driver is going to be removed from 9988 * memory. 9989 **/ 9990 static void i40e_remove(struct pci_dev *pdev) 9991 { 9992 struct i40e_pf *pf = pci_get_drvdata(pdev); 9993 i40e_status ret_code; 9994 int i; 9995 9996 i40e_dbg_pf_exit(pf); 9997 9998 i40e_ptp_stop(pf); 9999 10000 /* no more scheduling of any task */ 10001 set_bit(__I40E_DOWN, &pf->state); 10002 del_timer_sync(&pf->service_timer); 10003 cancel_work_sync(&pf->service_task); 10004 i40e_fdir_teardown(pf); 10005 10006 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 10007 i40e_free_vfs(pf); 10008 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 10009 } 10010 10011 i40e_fdir_teardown(pf); 10012 10013 /* If there is a switch structure or any orphans, remove them. 10014 * This will leave only the PF's VSI remaining. 10015 */ 10016 for (i = 0; i < I40E_MAX_VEB; i++) { 10017 if (!pf->veb[i]) 10018 continue; 10019 10020 if (pf->veb[i]->uplink_seid == pf->mac_seid || 10021 pf->veb[i]->uplink_seid == 0) 10022 i40e_switch_branch_release(pf->veb[i]); 10023 } 10024 10025 /* Now we can shutdown the PF's VSI, just before we kill 10026 * adminq and hmc. 10027 */ 10028 if (pf->vsi[pf->lan_vsi]) 10029 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 10030 10031 /* shutdown and destroy the HMC */ 10032 if (pf->hw.hmc.hmc_obj) { 10033 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 10034 if (ret_code) 10035 dev_warn(&pdev->dev, 10036 "Failed to destroy the HMC resources: %d\n", 10037 ret_code); 10038 } 10039 10040 /* shutdown the adminq */ 10041 ret_code = i40e_shutdown_adminq(&pf->hw); 10042 if (ret_code) 10043 dev_warn(&pdev->dev, 10044 "Failed to destroy the Admin Queue resources: %d\n", 10045 ret_code); 10046 10047 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 10048 i40e_clear_interrupt_scheme(pf); 10049 for (i = 0; i < pf->num_alloc_vsi; i++) { 10050 if (pf->vsi[i]) { 10051 i40e_vsi_clear_rings(pf->vsi[i]); 10052 i40e_vsi_clear(pf->vsi[i]); 10053 pf->vsi[i] = NULL; 10054 } 10055 } 10056 10057 for (i = 0; i < I40E_MAX_VEB; i++) { 10058 kfree(pf->veb[i]); 10059 pf->veb[i] = NULL; 10060 } 10061 10062 kfree(pf->qp_pile); 10063 kfree(pf->vsi); 10064 10065 iounmap(pf->hw.hw_addr); 10066 kfree(pf); 10067 pci_release_selected_regions(pdev, 10068 pci_select_bars(pdev, IORESOURCE_MEM)); 10069 10070 pci_disable_pcie_error_reporting(pdev); 10071 pci_disable_device(pdev); 10072 } 10073 10074 /** 10075 * i40e_pci_error_detected - warning that something funky happened in PCI land 10076 * @pdev: PCI device information struct 10077 * 10078 * Called to warn that something happened and the error handling steps 10079 * are in progress. Allows the driver to quiesce things, be ready for 10080 * remediation. 10081 **/ 10082 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 10083 enum pci_channel_state error) 10084 { 10085 struct i40e_pf *pf = pci_get_drvdata(pdev); 10086 10087 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 10088 10089 /* shutdown all operations */ 10090 if (!test_bit(__I40E_SUSPENDED, &pf->state)) { 10091 rtnl_lock(); 10092 i40e_prep_for_reset(pf); 10093 rtnl_unlock(); 10094 } 10095 10096 /* Request a slot reset */ 10097 return PCI_ERS_RESULT_NEED_RESET; 10098 } 10099 10100 /** 10101 * i40e_pci_error_slot_reset - a PCI slot reset just happened 10102 * @pdev: PCI device information struct 10103 * 10104 * Called to find if the driver can work with the device now that 10105 * the pci slot has been reset. If a basic connection seems good 10106 * (registers are readable and have sane content) then return a 10107 * happy little PCI_ERS_RESULT_xxx. 10108 **/ 10109 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 10110 { 10111 struct i40e_pf *pf = pci_get_drvdata(pdev); 10112 pci_ers_result_t result; 10113 int err; 10114 u32 reg; 10115 10116 dev_info(&pdev->dev, "%s\n", __func__); 10117 if (pci_enable_device_mem(pdev)) { 10118 dev_info(&pdev->dev, 10119 "Cannot re-enable PCI device after reset.\n"); 10120 result = PCI_ERS_RESULT_DISCONNECT; 10121 } else { 10122 pci_set_master(pdev); 10123 pci_restore_state(pdev); 10124 pci_save_state(pdev); 10125 pci_wake_from_d3(pdev, false); 10126 10127 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 10128 if (reg == 0) 10129 result = PCI_ERS_RESULT_RECOVERED; 10130 else 10131 result = PCI_ERS_RESULT_DISCONNECT; 10132 } 10133 10134 err = pci_cleanup_aer_uncorrect_error_status(pdev); 10135 if (err) { 10136 dev_info(&pdev->dev, 10137 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 10138 err); 10139 /* non-fatal, continue */ 10140 } 10141 10142 return result; 10143 } 10144 10145 /** 10146 * i40e_pci_error_resume - restart operations after PCI error recovery 10147 * @pdev: PCI device information struct 10148 * 10149 * Called to allow the driver to bring things back up after PCI error 10150 * and/or reset recovery has finished. 10151 **/ 10152 static void i40e_pci_error_resume(struct pci_dev *pdev) 10153 { 10154 struct i40e_pf *pf = pci_get_drvdata(pdev); 10155 10156 dev_info(&pdev->dev, "%s\n", __func__); 10157 if (test_bit(__I40E_SUSPENDED, &pf->state)) 10158 return; 10159 10160 rtnl_lock(); 10161 i40e_handle_reset_warning(pf); 10162 rtnl_lock(); 10163 } 10164 10165 /** 10166 * i40e_shutdown - PCI callback for shutting down 10167 * @pdev: PCI device information struct 10168 **/ 10169 static void i40e_shutdown(struct pci_dev *pdev) 10170 { 10171 struct i40e_pf *pf = pci_get_drvdata(pdev); 10172 struct i40e_hw *hw = &pf->hw; 10173 10174 set_bit(__I40E_SUSPENDED, &pf->state); 10175 set_bit(__I40E_DOWN, &pf->state); 10176 rtnl_lock(); 10177 i40e_prep_for_reset(pf); 10178 rtnl_unlock(); 10179 10180 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 10181 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 10182 10183 i40e_clear_interrupt_scheme(pf); 10184 10185 if (system_state == SYSTEM_POWER_OFF) { 10186 pci_wake_from_d3(pdev, pf->wol_en); 10187 pci_set_power_state(pdev, PCI_D3hot); 10188 } 10189 } 10190 10191 #ifdef CONFIG_PM 10192 /** 10193 * i40e_suspend - PCI callback for moving to D3 10194 * @pdev: PCI device information struct 10195 **/ 10196 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) 10197 { 10198 struct i40e_pf *pf = pci_get_drvdata(pdev); 10199 struct i40e_hw *hw = &pf->hw; 10200 10201 set_bit(__I40E_SUSPENDED, &pf->state); 10202 set_bit(__I40E_DOWN, &pf->state); 10203 del_timer_sync(&pf->service_timer); 10204 cancel_work_sync(&pf->service_task); 10205 rtnl_lock(); 10206 i40e_prep_for_reset(pf); 10207 rtnl_unlock(); 10208 10209 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 10210 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 10211 10212 pci_wake_from_d3(pdev, pf->wol_en); 10213 pci_set_power_state(pdev, PCI_D3hot); 10214 10215 return 0; 10216 } 10217 10218 /** 10219 * i40e_resume - PCI callback for waking up from D3 10220 * @pdev: PCI device information struct 10221 **/ 10222 static int i40e_resume(struct pci_dev *pdev) 10223 { 10224 struct i40e_pf *pf = pci_get_drvdata(pdev); 10225 u32 err; 10226 10227 pci_set_power_state(pdev, PCI_D0); 10228 pci_restore_state(pdev); 10229 /* pci_restore_state() clears dev->state_saves, so 10230 * call pci_save_state() again to restore it. 10231 */ 10232 pci_save_state(pdev); 10233 10234 err = pci_enable_device_mem(pdev); 10235 if (err) { 10236 dev_err(&pdev->dev, 10237 "%s: Cannot enable PCI device from suspend\n", 10238 __func__); 10239 return err; 10240 } 10241 pci_set_master(pdev); 10242 10243 /* no wakeup events while running */ 10244 pci_wake_from_d3(pdev, false); 10245 10246 /* handling the reset will rebuild the device state */ 10247 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { 10248 clear_bit(__I40E_DOWN, &pf->state); 10249 rtnl_lock(); 10250 i40e_reset_and_rebuild(pf, false); 10251 rtnl_unlock(); 10252 } 10253 10254 return 0; 10255 } 10256 10257 #endif 10258 static const struct pci_error_handlers i40e_err_handler = { 10259 .error_detected = i40e_pci_error_detected, 10260 .slot_reset = i40e_pci_error_slot_reset, 10261 .resume = i40e_pci_error_resume, 10262 }; 10263 10264 static struct pci_driver i40e_driver = { 10265 .name = i40e_driver_name, 10266 .id_table = i40e_pci_tbl, 10267 .probe = i40e_probe, 10268 .remove = i40e_remove, 10269 #ifdef CONFIG_PM 10270 .suspend = i40e_suspend, 10271 .resume = i40e_resume, 10272 #endif 10273 .shutdown = i40e_shutdown, 10274 .err_handler = &i40e_err_handler, 10275 .sriov_configure = i40e_pci_sriov_configure, 10276 }; 10277 10278 /** 10279 * i40e_init_module - Driver registration routine 10280 * 10281 * i40e_init_module is the first routine called when the driver is 10282 * loaded. All it does is register with the PCI subsystem. 10283 **/ 10284 static int __init i40e_init_module(void) 10285 { 10286 pr_info("%s: %s - version %s\n", i40e_driver_name, 10287 i40e_driver_string, i40e_driver_version_str); 10288 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 10289 10290 i40e_dbg_init(); 10291 return pci_register_driver(&i40e_driver); 10292 } 10293 module_init(i40e_init_module); 10294 10295 /** 10296 * i40e_exit_module - Driver exit cleanup routine 10297 * 10298 * i40e_exit_module is called just before the driver is removed 10299 * from memory. 10300 **/ 10301 static void __exit i40e_exit_module(void) 10302 { 10303 pci_unregister_driver(&i40e_driver); 10304 i40e_dbg_exit(); 10305 } 10306 module_exit(i40e_exit_module); 10307