1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 /* Local includes */ 28 #include "i40e.h" 29 #include "i40e_diag.h" 30 #ifdef CONFIG_I40E_VXLAN 31 #include <net/vxlan.h> 32 #endif 33 34 const char i40e_driver_name[] = "i40e"; 35 static const char i40e_driver_string[] = 36 "Intel(R) Ethernet Connection XL710 Network Driver"; 37 38 #define DRV_KERN "-k" 39 40 #define DRV_VERSION_MAJOR 1 41 #define DRV_VERSION_MINOR 2 42 #define DRV_VERSION_BUILD 6 43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \ 45 __stringify(DRV_VERSION_BUILD) DRV_KERN 46 const char i40e_driver_version_str[] = DRV_VERSION; 47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; 48 49 /* a bit of forward declarations */ 50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 51 static void i40e_handle_reset_warning(struct i40e_pf *pf); 52 static int i40e_add_vsi(struct i40e_vsi *vsi); 53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 55 static int i40e_setup_misc_vector(struct i40e_pf *pf); 56 static void i40e_determine_queue_usage(struct i40e_pf *pf); 57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 58 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 59 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 60 61 /* i40e_pci_tbl - PCI Device ID Table 62 * 63 * Last entry must be all 0s 64 * 65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 66 * Class, Class Mask, private data (not used) } 67 */ 68 static const struct pci_device_id i40e_pci_tbl[] = { 69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, 72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, 78 /* required last entry */ 79 {0, } 80 }; 81 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 82 83 #define I40E_MAX_VF_COUNT 128 84 static int debug = -1; 85 module_param(debug, int, 0); 86 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 87 88 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 89 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 90 MODULE_LICENSE("GPL"); 91 MODULE_VERSION(DRV_VERSION); 92 93 /** 94 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 95 * @hw: pointer to the HW structure 96 * @mem: ptr to mem struct to fill out 97 * @size: size of memory requested 98 * @alignment: what to align the allocation to 99 **/ 100 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 101 u64 size, u32 alignment) 102 { 103 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 104 105 mem->size = ALIGN(size, alignment); 106 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 107 &mem->pa, GFP_KERNEL); 108 if (!mem->va) 109 return -ENOMEM; 110 111 return 0; 112 } 113 114 /** 115 * i40e_free_dma_mem_d - OS specific memory free for shared code 116 * @hw: pointer to the HW structure 117 * @mem: ptr to mem struct to free 118 **/ 119 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 120 { 121 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 122 123 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 124 mem->va = NULL; 125 mem->pa = 0; 126 mem->size = 0; 127 128 return 0; 129 } 130 131 /** 132 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 133 * @hw: pointer to the HW structure 134 * @mem: ptr to mem struct to fill out 135 * @size: size of memory requested 136 **/ 137 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 138 u32 size) 139 { 140 mem->size = size; 141 mem->va = kzalloc(size, GFP_KERNEL); 142 143 if (!mem->va) 144 return -ENOMEM; 145 146 return 0; 147 } 148 149 /** 150 * i40e_free_virt_mem_d - OS specific memory free for shared code 151 * @hw: pointer to the HW structure 152 * @mem: ptr to mem struct to free 153 **/ 154 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 155 { 156 /* it's ok to kfree a NULL pointer */ 157 kfree(mem->va); 158 mem->va = NULL; 159 mem->size = 0; 160 161 return 0; 162 } 163 164 /** 165 * i40e_get_lump - find a lump of free generic resource 166 * @pf: board private structure 167 * @pile: the pile of resource to search 168 * @needed: the number of items needed 169 * @id: an owner id to stick on the items assigned 170 * 171 * Returns the base item index of the lump, or negative for error 172 * 173 * The search_hint trick and lack of advanced fit-finding only work 174 * because we're highly likely to have all the same size lump requests. 175 * Linear search time and any fragmentation should be minimal. 176 **/ 177 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 178 u16 needed, u16 id) 179 { 180 int ret = -ENOMEM; 181 int i, j; 182 183 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 184 dev_info(&pf->pdev->dev, 185 "param err: pile=%p needed=%d id=0x%04x\n", 186 pile, needed, id); 187 return -EINVAL; 188 } 189 190 /* start the linear search with an imperfect hint */ 191 i = pile->search_hint; 192 while (i < pile->num_entries) { 193 /* skip already allocated entries */ 194 if (pile->list[i] & I40E_PILE_VALID_BIT) { 195 i++; 196 continue; 197 } 198 199 /* do we have enough in this lump? */ 200 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 201 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 202 break; 203 } 204 205 if (j == needed) { 206 /* there was enough, so assign it to the requestor */ 207 for (j = 0; j < needed; j++) 208 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 209 ret = i; 210 pile->search_hint = i + j; 211 break; 212 } else { 213 /* not enough, so skip over it and continue looking */ 214 i += j; 215 } 216 } 217 218 return ret; 219 } 220 221 /** 222 * i40e_put_lump - return a lump of generic resource 223 * @pile: the pile of resource to search 224 * @index: the base item index 225 * @id: the owner id of the items assigned 226 * 227 * Returns the count of items in the lump 228 **/ 229 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 230 { 231 int valid_id = (id | I40E_PILE_VALID_BIT); 232 int count = 0; 233 int i; 234 235 if (!pile || index >= pile->num_entries) 236 return -EINVAL; 237 238 for (i = index; 239 i < pile->num_entries && pile->list[i] == valid_id; 240 i++) { 241 pile->list[i] = 0; 242 count++; 243 } 244 245 if (count && index < pile->search_hint) 246 pile->search_hint = index; 247 248 return count; 249 } 250 251 /** 252 * i40e_service_event_schedule - Schedule the service task to wake up 253 * @pf: board private structure 254 * 255 * If not already scheduled, this puts the task into the work queue 256 **/ 257 static void i40e_service_event_schedule(struct i40e_pf *pf) 258 { 259 if (!test_bit(__I40E_DOWN, &pf->state) && 260 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && 261 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) 262 schedule_work(&pf->service_task); 263 } 264 265 /** 266 * i40e_tx_timeout - Respond to a Tx Hang 267 * @netdev: network interface device structure 268 * 269 * If any port has noticed a Tx timeout, it is likely that the whole 270 * device is munged, not just the one netdev port, so go for the full 271 * reset. 272 **/ 273 #ifdef I40E_FCOE 274 void i40e_tx_timeout(struct net_device *netdev) 275 #else 276 static void i40e_tx_timeout(struct net_device *netdev) 277 #endif 278 { 279 struct i40e_netdev_priv *np = netdev_priv(netdev); 280 struct i40e_vsi *vsi = np->vsi; 281 struct i40e_pf *pf = vsi->back; 282 283 pf->tx_timeout_count++; 284 285 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 286 pf->tx_timeout_recovery_level = 1; 287 pf->tx_timeout_last_recovery = jiffies; 288 netdev_info(netdev, "tx_timeout recovery level %d\n", 289 pf->tx_timeout_recovery_level); 290 291 switch (pf->tx_timeout_recovery_level) { 292 case 0: 293 /* disable and re-enable queues for the VSI */ 294 if (in_interrupt()) { 295 set_bit(__I40E_REINIT_REQUESTED, &pf->state); 296 set_bit(__I40E_REINIT_REQUESTED, &vsi->state); 297 } else { 298 i40e_vsi_reinit_locked(vsi); 299 } 300 break; 301 case 1: 302 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 303 break; 304 case 2: 305 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 306 break; 307 case 3: 308 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 309 break; 310 default: 311 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 312 set_bit(__I40E_DOWN_REQUESTED, &pf->state); 313 set_bit(__I40E_DOWN_REQUESTED, &vsi->state); 314 break; 315 } 316 i40e_service_event_schedule(pf); 317 pf->tx_timeout_recovery_level++; 318 } 319 320 /** 321 * i40e_release_rx_desc - Store the new tail and head values 322 * @rx_ring: ring to bump 323 * @val: new head index 324 **/ 325 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 326 { 327 rx_ring->next_to_use = val; 328 329 /* Force memory writes to complete before letting h/w 330 * know there are new descriptors to fetch. (Only 331 * applicable for weak-ordered memory model archs, 332 * such as IA-64). 333 */ 334 wmb(); 335 writel(val, rx_ring->tail); 336 } 337 338 /** 339 * i40e_get_vsi_stats_struct - Get System Network Statistics 340 * @vsi: the VSI we care about 341 * 342 * Returns the address of the device statistics structure. 343 * The statistics are actually updated from the service task. 344 **/ 345 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 346 { 347 return &vsi->net_stats; 348 } 349 350 /** 351 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 352 * @netdev: network interface device structure 353 * 354 * Returns the address of the device statistics structure. 355 * The statistics are actually updated from the service task. 356 **/ 357 #ifdef I40E_FCOE 358 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 359 struct net_device *netdev, 360 struct rtnl_link_stats64 *stats) 361 #else 362 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 363 struct net_device *netdev, 364 struct rtnl_link_stats64 *stats) 365 #endif 366 { 367 struct i40e_netdev_priv *np = netdev_priv(netdev); 368 struct i40e_ring *tx_ring, *rx_ring; 369 struct i40e_vsi *vsi = np->vsi; 370 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 371 int i; 372 373 if (test_bit(__I40E_DOWN, &vsi->state)) 374 return stats; 375 376 if (!vsi->tx_rings) 377 return stats; 378 379 rcu_read_lock(); 380 for (i = 0; i < vsi->num_queue_pairs; i++) { 381 u64 bytes, packets; 382 unsigned int start; 383 384 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); 385 if (!tx_ring) 386 continue; 387 388 do { 389 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 390 packets = tx_ring->stats.packets; 391 bytes = tx_ring->stats.bytes; 392 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 393 394 stats->tx_packets += packets; 395 stats->tx_bytes += bytes; 396 rx_ring = &tx_ring[1]; 397 398 do { 399 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 400 packets = rx_ring->stats.packets; 401 bytes = rx_ring->stats.bytes; 402 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 403 404 stats->rx_packets += packets; 405 stats->rx_bytes += bytes; 406 } 407 rcu_read_unlock(); 408 409 /* following stats updated by i40e_watchdog_subtask() */ 410 stats->multicast = vsi_stats->multicast; 411 stats->tx_errors = vsi_stats->tx_errors; 412 stats->tx_dropped = vsi_stats->tx_dropped; 413 stats->rx_errors = vsi_stats->rx_errors; 414 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 415 stats->rx_length_errors = vsi_stats->rx_length_errors; 416 417 return stats; 418 } 419 420 /** 421 * i40e_vsi_reset_stats - Resets all stats of the given vsi 422 * @vsi: the VSI to have its stats reset 423 **/ 424 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 425 { 426 struct rtnl_link_stats64 *ns; 427 int i; 428 429 if (!vsi) 430 return; 431 432 ns = i40e_get_vsi_stats_struct(vsi); 433 memset(ns, 0, sizeof(*ns)); 434 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 435 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 436 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 437 if (vsi->rx_rings && vsi->rx_rings[0]) { 438 for (i = 0; i < vsi->num_queue_pairs; i++) { 439 memset(&vsi->rx_rings[i]->stats, 0 , 440 sizeof(vsi->rx_rings[i]->stats)); 441 memset(&vsi->rx_rings[i]->rx_stats, 0 , 442 sizeof(vsi->rx_rings[i]->rx_stats)); 443 memset(&vsi->tx_rings[i]->stats, 0 , 444 sizeof(vsi->tx_rings[i]->stats)); 445 memset(&vsi->tx_rings[i]->tx_stats, 0, 446 sizeof(vsi->tx_rings[i]->tx_stats)); 447 } 448 } 449 vsi->stat_offsets_loaded = false; 450 } 451 452 /** 453 * i40e_pf_reset_stats - Reset all of the stats for the given pf 454 * @pf: the PF to be reset 455 **/ 456 void i40e_pf_reset_stats(struct i40e_pf *pf) 457 { 458 int i; 459 460 memset(&pf->stats, 0, sizeof(pf->stats)); 461 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 462 pf->stat_offsets_loaded = false; 463 464 for (i = 0; i < I40E_MAX_VEB; i++) { 465 if (pf->veb[i]) { 466 memset(&pf->veb[i]->stats, 0, 467 sizeof(pf->veb[i]->stats)); 468 memset(&pf->veb[i]->stats_offsets, 0, 469 sizeof(pf->veb[i]->stats_offsets)); 470 pf->veb[i]->stat_offsets_loaded = false; 471 } 472 } 473 } 474 475 /** 476 * i40e_stat_update48 - read and update a 48 bit stat from the chip 477 * @hw: ptr to the hardware info 478 * @hireg: the high 32 bit reg to read 479 * @loreg: the low 32 bit reg to read 480 * @offset_loaded: has the initial offset been loaded yet 481 * @offset: ptr to current offset value 482 * @stat: ptr to the stat 483 * 484 * Since the device stats are not reset at PFReset, they likely will not 485 * be zeroed when the driver starts. We'll save the first values read 486 * and use them as offsets to be subtracted from the raw values in order 487 * to report stats that count from zero. In the process, we also manage 488 * the potential roll-over. 489 **/ 490 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 491 bool offset_loaded, u64 *offset, u64 *stat) 492 { 493 u64 new_data; 494 495 if (hw->device_id == I40E_DEV_ID_QEMU) { 496 new_data = rd32(hw, loreg); 497 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 498 } else { 499 new_data = rd64(hw, loreg); 500 } 501 if (!offset_loaded) 502 *offset = new_data; 503 if (likely(new_data >= *offset)) 504 *stat = new_data - *offset; 505 else 506 *stat = (new_data + ((u64)1 << 48)) - *offset; 507 *stat &= 0xFFFFFFFFFFFFULL; 508 } 509 510 /** 511 * i40e_stat_update32 - read and update a 32 bit stat from the chip 512 * @hw: ptr to the hardware info 513 * @reg: the hw reg to read 514 * @offset_loaded: has the initial offset been loaded yet 515 * @offset: ptr to current offset value 516 * @stat: ptr to the stat 517 **/ 518 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 519 bool offset_loaded, u64 *offset, u64 *stat) 520 { 521 u32 new_data; 522 523 new_data = rd32(hw, reg); 524 if (!offset_loaded) 525 *offset = new_data; 526 if (likely(new_data >= *offset)) 527 *stat = (u32)(new_data - *offset); 528 else 529 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 530 } 531 532 /** 533 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 534 * @vsi: the VSI to be updated 535 **/ 536 void i40e_update_eth_stats(struct i40e_vsi *vsi) 537 { 538 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 539 struct i40e_pf *pf = vsi->back; 540 struct i40e_hw *hw = &pf->hw; 541 struct i40e_eth_stats *oes; 542 struct i40e_eth_stats *es; /* device's eth stats */ 543 544 es = &vsi->eth_stats; 545 oes = &vsi->eth_stats_offsets; 546 547 /* Gather up the stats that the hw collects */ 548 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 549 vsi->stat_offsets_loaded, 550 &oes->tx_errors, &es->tx_errors); 551 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 552 vsi->stat_offsets_loaded, 553 &oes->rx_discards, &es->rx_discards); 554 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), 555 vsi->stat_offsets_loaded, 556 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); 557 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 558 vsi->stat_offsets_loaded, 559 &oes->tx_errors, &es->tx_errors); 560 561 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 562 I40E_GLV_GORCL(stat_idx), 563 vsi->stat_offsets_loaded, 564 &oes->rx_bytes, &es->rx_bytes); 565 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 566 I40E_GLV_UPRCL(stat_idx), 567 vsi->stat_offsets_loaded, 568 &oes->rx_unicast, &es->rx_unicast); 569 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 570 I40E_GLV_MPRCL(stat_idx), 571 vsi->stat_offsets_loaded, 572 &oes->rx_multicast, &es->rx_multicast); 573 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 574 I40E_GLV_BPRCL(stat_idx), 575 vsi->stat_offsets_loaded, 576 &oes->rx_broadcast, &es->rx_broadcast); 577 578 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 579 I40E_GLV_GOTCL(stat_idx), 580 vsi->stat_offsets_loaded, 581 &oes->tx_bytes, &es->tx_bytes); 582 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 583 I40E_GLV_UPTCL(stat_idx), 584 vsi->stat_offsets_loaded, 585 &oes->tx_unicast, &es->tx_unicast); 586 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 587 I40E_GLV_MPTCL(stat_idx), 588 vsi->stat_offsets_loaded, 589 &oes->tx_multicast, &es->tx_multicast); 590 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 591 I40E_GLV_BPTCL(stat_idx), 592 vsi->stat_offsets_loaded, 593 &oes->tx_broadcast, &es->tx_broadcast); 594 vsi->stat_offsets_loaded = true; 595 } 596 597 /** 598 * i40e_update_veb_stats - Update Switch component statistics 599 * @veb: the VEB being updated 600 **/ 601 static void i40e_update_veb_stats(struct i40e_veb *veb) 602 { 603 struct i40e_pf *pf = veb->pf; 604 struct i40e_hw *hw = &pf->hw; 605 struct i40e_eth_stats *oes; 606 struct i40e_eth_stats *es; /* device's eth stats */ 607 int idx = 0; 608 609 idx = veb->stats_idx; 610 es = &veb->stats; 611 oes = &veb->stats_offsets; 612 613 /* Gather up the stats that the hw collects */ 614 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 615 veb->stat_offsets_loaded, 616 &oes->tx_discards, &es->tx_discards); 617 if (hw->revision_id > 0) 618 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 619 veb->stat_offsets_loaded, 620 &oes->rx_unknown_protocol, 621 &es->rx_unknown_protocol); 622 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 623 veb->stat_offsets_loaded, 624 &oes->rx_bytes, &es->rx_bytes); 625 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 626 veb->stat_offsets_loaded, 627 &oes->rx_unicast, &es->rx_unicast); 628 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 629 veb->stat_offsets_loaded, 630 &oes->rx_multicast, &es->rx_multicast); 631 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 632 veb->stat_offsets_loaded, 633 &oes->rx_broadcast, &es->rx_broadcast); 634 635 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 636 veb->stat_offsets_loaded, 637 &oes->tx_bytes, &es->tx_bytes); 638 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 639 veb->stat_offsets_loaded, 640 &oes->tx_unicast, &es->tx_unicast); 641 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 642 veb->stat_offsets_loaded, 643 &oes->tx_multicast, &es->tx_multicast); 644 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 645 veb->stat_offsets_loaded, 646 &oes->tx_broadcast, &es->tx_broadcast); 647 veb->stat_offsets_loaded = true; 648 } 649 650 #ifdef I40E_FCOE 651 /** 652 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. 653 * @vsi: the VSI that is capable of doing FCoE 654 **/ 655 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) 656 { 657 struct i40e_pf *pf = vsi->back; 658 struct i40e_hw *hw = &pf->hw; 659 struct i40e_fcoe_stats *ofs; 660 struct i40e_fcoe_stats *fs; /* device's eth stats */ 661 int idx; 662 663 if (vsi->type != I40E_VSI_FCOE) 664 return; 665 666 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; 667 fs = &vsi->fcoe_stats; 668 ofs = &vsi->fcoe_stats_offsets; 669 670 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), 671 vsi->fcoe_stat_offsets_loaded, 672 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); 673 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), 674 vsi->fcoe_stat_offsets_loaded, 675 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); 676 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), 677 vsi->fcoe_stat_offsets_loaded, 678 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); 679 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), 680 vsi->fcoe_stat_offsets_loaded, 681 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); 682 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), 683 vsi->fcoe_stat_offsets_loaded, 684 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); 685 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), 686 vsi->fcoe_stat_offsets_loaded, 687 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); 688 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), 689 vsi->fcoe_stat_offsets_loaded, 690 &ofs->fcoe_last_error, &fs->fcoe_last_error); 691 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), 692 vsi->fcoe_stat_offsets_loaded, 693 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); 694 695 vsi->fcoe_stat_offsets_loaded = true; 696 } 697 698 #endif 699 /** 700 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode 701 * @pf: the corresponding PF 702 * 703 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode 704 **/ 705 static void i40e_update_link_xoff_rx(struct i40e_pf *pf) 706 { 707 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 708 struct i40e_hw_port_stats *nsd = &pf->stats; 709 struct i40e_hw *hw = &pf->hw; 710 u64 xoff = 0; 711 u16 i, v; 712 713 if ((hw->fc.current_mode != I40E_FC_FULL) && 714 (hw->fc.current_mode != I40E_FC_RX_PAUSE)) 715 return; 716 717 xoff = nsd->link_xoff_rx; 718 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 719 pf->stat_offsets_loaded, 720 &osd->link_xoff_rx, &nsd->link_xoff_rx); 721 722 /* No new LFC xoff rx */ 723 if (!(nsd->link_xoff_rx - xoff)) 724 return; 725 726 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ 727 for (v = 0; v < pf->num_alloc_vsi; v++) { 728 struct i40e_vsi *vsi = pf->vsi[v]; 729 730 if (!vsi || !vsi->tx_rings[0]) 731 continue; 732 733 for (i = 0; i < vsi->num_queue_pairs; i++) { 734 struct i40e_ring *ring = vsi->tx_rings[i]; 735 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 736 } 737 } 738 } 739 740 /** 741 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode 742 * @pf: the corresponding PF 743 * 744 * Update the Rx XOFF counter (PAUSE frames) in PFC mode 745 **/ 746 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) 747 { 748 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 749 struct i40e_hw_port_stats *nsd = &pf->stats; 750 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; 751 struct i40e_dcbx_config *dcb_cfg; 752 struct i40e_hw *hw = &pf->hw; 753 u16 i, v; 754 u8 tc; 755 756 dcb_cfg = &hw->local_dcbx_config; 757 758 /* See if DCB enabled with PFC TC */ 759 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || 760 !(dcb_cfg->pfc.pfcenable)) { 761 i40e_update_link_xoff_rx(pf); 762 return; 763 } 764 765 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 766 u64 prio_xoff = nsd->priority_xoff_rx[i]; 767 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 768 pf->stat_offsets_loaded, 769 &osd->priority_xoff_rx[i], 770 &nsd->priority_xoff_rx[i]); 771 772 /* No new PFC xoff rx */ 773 if (!(nsd->priority_xoff_rx[i] - prio_xoff)) 774 continue; 775 /* Get the TC for given priority */ 776 tc = dcb_cfg->etscfg.prioritytable[i]; 777 xoff[tc] = true; 778 } 779 780 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ 781 for (v = 0; v < pf->num_alloc_vsi; v++) { 782 struct i40e_vsi *vsi = pf->vsi[v]; 783 784 if (!vsi || !vsi->tx_rings[0]) 785 continue; 786 787 for (i = 0; i < vsi->num_queue_pairs; i++) { 788 struct i40e_ring *ring = vsi->tx_rings[i]; 789 790 tc = ring->dcb_tc; 791 if (xoff[tc]) 792 clear_bit(__I40E_HANG_CHECK_ARMED, 793 &ring->state); 794 } 795 } 796 } 797 798 /** 799 * i40e_update_vsi_stats - Update the vsi statistics counters. 800 * @vsi: the VSI to be updated 801 * 802 * There are a few instances where we store the same stat in a 803 * couple of different structs. This is partly because we have 804 * the netdev stats that need to be filled out, which is slightly 805 * different from the "eth_stats" defined by the chip and used in 806 * VF communications. We sort it out here. 807 **/ 808 static void i40e_update_vsi_stats(struct i40e_vsi *vsi) 809 { 810 struct i40e_pf *pf = vsi->back; 811 struct rtnl_link_stats64 *ons; 812 struct rtnl_link_stats64 *ns; /* netdev stats */ 813 struct i40e_eth_stats *oes; 814 struct i40e_eth_stats *es; /* device's eth stats */ 815 u32 tx_restart, tx_busy; 816 struct i40e_ring *p; 817 u32 rx_page, rx_buf; 818 u64 bytes, packets; 819 unsigned int start; 820 u64 rx_p, rx_b; 821 u64 tx_p, tx_b; 822 u16 q; 823 824 if (test_bit(__I40E_DOWN, &vsi->state) || 825 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 826 return; 827 828 ns = i40e_get_vsi_stats_struct(vsi); 829 ons = &vsi->net_stats_offsets; 830 es = &vsi->eth_stats; 831 oes = &vsi->eth_stats_offsets; 832 833 /* Gather up the netdev and vsi stats that the driver collects 834 * on the fly during packet processing 835 */ 836 rx_b = rx_p = 0; 837 tx_b = tx_p = 0; 838 tx_restart = tx_busy = 0; 839 rx_page = 0; 840 rx_buf = 0; 841 rcu_read_lock(); 842 for (q = 0; q < vsi->num_queue_pairs; q++) { 843 /* locate Tx ring */ 844 p = ACCESS_ONCE(vsi->tx_rings[q]); 845 846 do { 847 start = u64_stats_fetch_begin_irq(&p->syncp); 848 packets = p->stats.packets; 849 bytes = p->stats.bytes; 850 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 851 tx_b += bytes; 852 tx_p += packets; 853 tx_restart += p->tx_stats.restart_queue; 854 tx_busy += p->tx_stats.tx_busy; 855 856 /* Rx queue is part of the same block as Tx queue */ 857 p = &p[1]; 858 do { 859 start = u64_stats_fetch_begin_irq(&p->syncp); 860 packets = p->stats.packets; 861 bytes = p->stats.bytes; 862 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 863 rx_b += bytes; 864 rx_p += packets; 865 rx_buf += p->rx_stats.alloc_buff_failed; 866 rx_page += p->rx_stats.alloc_page_failed; 867 } 868 rcu_read_unlock(); 869 vsi->tx_restart = tx_restart; 870 vsi->tx_busy = tx_busy; 871 vsi->rx_page_failed = rx_page; 872 vsi->rx_buf_failed = rx_buf; 873 874 ns->rx_packets = rx_p; 875 ns->rx_bytes = rx_b; 876 ns->tx_packets = tx_p; 877 ns->tx_bytes = tx_b; 878 879 /* update netdev stats from eth stats */ 880 i40e_update_eth_stats(vsi); 881 ons->tx_errors = oes->tx_errors; 882 ns->tx_errors = es->tx_errors; 883 ons->multicast = oes->rx_multicast; 884 ns->multicast = es->rx_multicast; 885 ons->rx_dropped = oes->rx_discards; 886 ns->rx_dropped = es->rx_discards; 887 ons->tx_dropped = oes->tx_discards; 888 ns->tx_dropped = es->tx_discards; 889 890 /* pull in a couple PF stats if this is the main vsi */ 891 if (vsi == pf->vsi[pf->lan_vsi]) { 892 ns->rx_crc_errors = pf->stats.crc_errors; 893 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; 894 ns->rx_length_errors = pf->stats.rx_length_errors; 895 } 896 } 897 898 /** 899 * i40e_update_pf_stats - Update the pf statistics counters. 900 * @pf: the PF to be updated 901 **/ 902 static void i40e_update_pf_stats(struct i40e_pf *pf) 903 { 904 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 905 struct i40e_hw_port_stats *nsd = &pf->stats; 906 struct i40e_hw *hw = &pf->hw; 907 u32 val; 908 int i; 909 910 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 911 I40E_GLPRT_GORCL(hw->port), 912 pf->stat_offsets_loaded, 913 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 914 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 915 I40E_GLPRT_GOTCL(hw->port), 916 pf->stat_offsets_loaded, 917 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 918 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 919 pf->stat_offsets_loaded, 920 &osd->eth.rx_discards, 921 &nsd->eth.rx_discards); 922 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), 923 pf->stat_offsets_loaded, 924 &osd->eth.tx_discards, 925 &nsd->eth.tx_discards); 926 927 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 928 I40E_GLPRT_UPRCL(hw->port), 929 pf->stat_offsets_loaded, 930 &osd->eth.rx_unicast, 931 &nsd->eth.rx_unicast); 932 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 933 I40E_GLPRT_MPRCL(hw->port), 934 pf->stat_offsets_loaded, 935 &osd->eth.rx_multicast, 936 &nsd->eth.rx_multicast); 937 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 938 I40E_GLPRT_BPRCL(hw->port), 939 pf->stat_offsets_loaded, 940 &osd->eth.rx_broadcast, 941 &nsd->eth.rx_broadcast); 942 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 943 I40E_GLPRT_UPTCL(hw->port), 944 pf->stat_offsets_loaded, 945 &osd->eth.tx_unicast, 946 &nsd->eth.tx_unicast); 947 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 948 I40E_GLPRT_MPTCL(hw->port), 949 pf->stat_offsets_loaded, 950 &osd->eth.tx_multicast, 951 &nsd->eth.tx_multicast); 952 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 953 I40E_GLPRT_BPTCL(hw->port), 954 pf->stat_offsets_loaded, 955 &osd->eth.tx_broadcast, 956 &nsd->eth.tx_broadcast); 957 958 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 959 pf->stat_offsets_loaded, 960 &osd->tx_dropped_link_down, 961 &nsd->tx_dropped_link_down); 962 963 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 964 pf->stat_offsets_loaded, 965 &osd->crc_errors, &nsd->crc_errors); 966 967 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 968 pf->stat_offsets_loaded, 969 &osd->illegal_bytes, &nsd->illegal_bytes); 970 971 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 972 pf->stat_offsets_loaded, 973 &osd->mac_local_faults, 974 &nsd->mac_local_faults); 975 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 976 pf->stat_offsets_loaded, 977 &osd->mac_remote_faults, 978 &nsd->mac_remote_faults); 979 980 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 981 pf->stat_offsets_loaded, 982 &osd->rx_length_errors, 983 &nsd->rx_length_errors); 984 985 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 986 pf->stat_offsets_loaded, 987 &osd->link_xon_rx, &nsd->link_xon_rx); 988 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 989 pf->stat_offsets_loaded, 990 &osd->link_xon_tx, &nsd->link_xon_tx); 991 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ 992 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 993 pf->stat_offsets_loaded, 994 &osd->link_xoff_tx, &nsd->link_xoff_tx); 995 996 for (i = 0; i < 8; i++) { 997 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 998 pf->stat_offsets_loaded, 999 &osd->priority_xon_rx[i], 1000 &nsd->priority_xon_rx[i]); 1001 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 1002 pf->stat_offsets_loaded, 1003 &osd->priority_xon_tx[i], 1004 &nsd->priority_xon_tx[i]); 1005 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 1006 pf->stat_offsets_loaded, 1007 &osd->priority_xoff_tx[i], 1008 &nsd->priority_xoff_tx[i]); 1009 i40e_stat_update32(hw, 1010 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 1011 pf->stat_offsets_loaded, 1012 &osd->priority_xon_2_xoff[i], 1013 &nsd->priority_xon_2_xoff[i]); 1014 } 1015 1016 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 1017 I40E_GLPRT_PRC64L(hw->port), 1018 pf->stat_offsets_loaded, 1019 &osd->rx_size_64, &nsd->rx_size_64); 1020 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 1021 I40E_GLPRT_PRC127L(hw->port), 1022 pf->stat_offsets_loaded, 1023 &osd->rx_size_127, &nsd->rx_size_127); 1024 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 1025 I40E_GLPRT_PRC255L(hw->port), 1026 pf->stat_offsets_loaded, 1027 &osd->rx_size_255, &nsd->rx_size_255); 1028 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 1029 I40E_GLPRT_PRC511L(hw->port), 1030 pf->stat_offsets_loaded, 1031 &osd->rx_size_511, &nsd->rx_size_511); 1032 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 1033 I40E_GLPRT_PRC1023L(hw->port), 1034 pf->stat_offsets_loaded, 1035 &osd->rx_size_1023, &nsd->rx_size_1023); 1036 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 1037 I40E_GLPRT_PRC1522L(hw->port), 1038 pf->stat_offsets_loaded, 1039 &osd->rx_size_1522, &nsd->rx_size_1522); 1040 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 1041 I40E_GLPRT_PRC9522L(hw->port), 1042 pf->stat_offsets_loaded, 1043 &osd->rx_size_big, &nsd->rx_size_big); 1044 1045 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 1046 I40E_GLPRT_PTC64L(hw->port), 1047 pf->stat_offsets_loaded, 1048 &osd->tx_size_64, &nsd->tx_size_64); 1049 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 1050 I40E_GLPRT_PTC127L(hw->port), 1051 pf->stat_offsets_loaded, 1052 &osd->tx_size_127, &nsd->tx_size_127); 1053 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 1054 I40E_GLPRT_PTC255L(hw->port), 1055 pf->stat_offsets_loaded, 1056 &osd->tx_size_255, &nsd->tx_size_255); 1057 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 1058 I40E_GLPRT_PTC511L(hw->port), 1059 pf->stat_offsets_loaded, 1060 &osd->tx_size_511, &nsd->tx_size_511); 1061 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 1062 I40E_GLPRT_PTC1023L(hw->port), 1063 pf->stat_offsets_loaded, 1064 &osd->tx_size_1023, &nsd->tx_size_1023); 1065 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 1066 I40E_GLPRT_PTC1522L(hw->port), 1067 pf->stat_offsets_loaded, 1068 &osd->tx_size_1522, &nsd->tx_size_1522); 1069 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 1070 I40E_GLPRT_PTC9522L(hw->port), 1071 pf->stat_offsets_loaded, 1072 &osd->tx_size_big, &nsd->tx_size_big); 1073 1074 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 1075 pf->stat_offsets_loaded, 1076 &osd->rx_undersize, &nsd->rx_undersize); 1077 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 1078 pf->stat_offsets_loaded, 1079 &osd->rx_fragments, &nsd->rx_fragments); 1080 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 1081 pf->stat_offsets_loaded, 1082 &osd->rx_oversize, &nsd->rx_oversize); 1083 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 1084 pf->stat_offsets_loaded, 1085 &osd->rx_jabber, &nsd->rx_jabber); 1086 1087 /* FDIR stats */ 1088 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), 1089 pf->stat_offsets_loaded, 1090 &osd->fd_atr_match, &nsd->fd_atr_match); 1091 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), 1092 pf->stat_offsets_loaded, 1093 &osd->fd_sb_match, &nsd->fd_sb_match); 1094 1095 val = rd32(hw, I40E_PRTPM_EEE_STAT); 1096 nsd->tx_lpi_status = 1097 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> 1098 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; 1099 nsd->rx_lpi_status = 1100 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> 1101 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; 1102 i40e_stat_update32(hw, I40E_PRTPM_TLPIC, 1103 pf->stat_offsets_loaded, 1104 &osd->tx_lpi_count, &nsd->tx_lpi_count); 1105 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 1106 pf->stat_offsets_loaded, 1107 &osd->rx_lpi_count, &nsd->rx_lpi_count); 1108 1109 pf->stat_offsets_loaded = true; 1110 } 1111 1112 /** 1113 * i40e_update_stats - Update the various statistics counters. 1114 * @vsi: the VSI to be updated 1115 * 1116 * Update the various stats for this VSI and its related entities. 1117 **/ 1118 void i40e_update_stats(struct i40e_vsi *vsi) 1119 { 1120 struct i40e_pf *pf = vsi->back; 1121 1122 if (vsi == pf->vsi[pf->lan_vsi]) 1123 i40e_update_pf_stats(pf); 1124 1125 i40e_update_vsi_stats(vsi); 1126 #ifdef I40E_FCOE 1127 i40e_update_fcoe_stats(vsi); 1128 #endif 1129 } 1130 1131 /** 1132 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1133 * @vsi: the VSI to be searched 1134 * @macaddr: the MAC address 1135 * @vlan: the vlan 1136 * @is_vf: make sure its a vf filter, else doesn't matter 1137 * @is_netdev: make sure its a netdev filter, else doesn't matter 1138 * 1139 * Returns ptr to the filter object or NULL 1140 **/ 1141 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 1142 u8 *macaddr, s16 vlan, 1143 bool is_vf, bool is_netdev) 1144 { 1145 struct i40e_mac_filter *f; 1146 1147 if (!vsi || !macaddr) 1148 return NULL; 1149 1150 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1151 if ((ether_addr_equal(macaddr, f->macaddr)) && 1152 (vlan == f->vlan) && 1153 (!is_vf || f->is_vf) && 1154 (!is_netdev || f->is_netdev)) 1155 return f; 1156 } 1157 return NULL; 1158 } 1159 1160 /** 1161 * i40e_find_mac - Find a mac addr in the macvlan filters list 1162 * @vsi: the VSI to be searched 1163 * @macaddr: the MAC address we are searching for 1164 * @is_vf: make sure its a vf filter, else doesn't matter 1165 * @is_netdev: make sure its a netdev filter, else doesn't matter 1166 * 1167 * Returns the first filter with the provided MAC address or NULL if 1168 * MAC address was not found 1169 **/ 1170 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 1171 bool is_vf, bool is_netdev) 1172 { 1173 struct i40e_mac_filter *f; 1174 1175 if (!vsi || !macaddr) 1176 return NULL; 1177 1178 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1179 if ((ether_addr_equal(macaddr, f->macaddr)) && 1180 (!is_vf || f->is_vf) && 1181 (!is_netdev || f->is_netdev)) 1182 return f; 1183 } 1184 return NULL; 1185 } 1186 1187 /** 1188 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1189 * @vsi: the VSI to be searched 1190 * 1191 * Returns true if VSI is in vlan mode or false otherwise 1192 **/ 1193 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1194 { 1195 struct i40e_mac_filter *f; 1196 1197 /* Only -1 for all the filters denotes not in vlan mode 1198 * so we have to go through all the list in order to make sure 1199 */ 1200 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1201 if (f->vlan >= 0) 1202 return true; 1203 } 1204 1205 return false; 1206 } 1207 1208 /** 1209 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans 1210 * @vsi: the VSI to be searched 1211 * @macaddr: the mac address to be filtered 1212 * @is_vf: true if it is a vf 1213 * @is_netdev: true if it is a netdev 1214 * 1215 * Goes through all the macvlan filters and adds a 1216 * macvlan filter for each unique vlan that already exists 1217 * 1218 * Returns first filter found on success, else NULL 1219 **/ 1220 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1221 bool is_vf, bool is_netdev) 1222 { 1223 struct i40e_mac_filter *f; 1224 1225 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1226 if (!i40e_find_filter(vsi, macaddr, f->vlan, 1227 is_vf, is_netdev)) { 1228 if (!i40e_add_filter(vsi, macaddr, f->vlan, 1229 is_vf, is_netdev)) 1230 return NULL; 1231 } 1232 } 1233 1234 return list_first_entry_or_null(&vsi->mac_filter_list, 1235 struct i40e_mac_filter, list); 1236 } 1237 1238 /** 1239 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM 1240 * @vsi: the PF Main VSI - inappropriate for any other VSI 1241 * @macaddr: the MAC address 1242 * 1243 * Some older firmware configurations set up a default promiscuous VLAN 1244 * filter that needs to be removed. 1245 **/ 1246 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) 1247 { 1248 struct i40e_aqc_remove_macvlan_element_data element; 1249 struct i40e_pf *pf = vsi->back; 1250 i40e_status aq_ret; 1251 1252 /* Only appropriate for the PF main VSI */ 1253 if (vsi->type != I40E_VSI_MAIN) 1254 return -EINVAL; 1255 1256 memset(&element, 0, sizeof(element)); 1257 ether_addr_copy(element.mac_addr, macaddr); 1258 element.vlan_tag = 0; 1259 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1260 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1261 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1262 if (aq_ret) 1263 return -ENOENT; 1264 1265 return 0; 1266 } 1267 1268 /** 1269 * i40e_add_filter - Add a mac/vlan filter to the VSI 1270 * @vsi: the VSI to be searched 1271 * @macaddr: the MAC address 1272 * @vlan: the vlan 1273 * @is_vf: make sure its a vf filter, else doesn't matter 1274 * @is_netdev: make sure its a netdev filter, else doesn't matter 1275 * 1276 * Returns ptr to the filter object or NULL when no memory available. 1277 **/ 1278 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1279 u8 *macaddr, s16 vlan, 1280 bool is_vf, bool is_netdev) 1281 { 1282 struct i40e_mac_filter *f; 1283 1284 if (!vsi || !macaddr) 1285 return NULL; 1286 1287 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1288 if (!f) { 1289 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1290 if (!f) 1291 goto add_filter_out; 1292 1293 ether_addr_copy(f->macaddr, macaddr); 1294 f->vlan = vlan; 1295 f->changed = true; 1296 1297 INIT_LIST_HEAD(&f->list); 1298 list_add(&f->list, &vsi->mac_filter_list); 1299 } 1300 1301 /* increment counter and add a new flag if needed */ 1302 if (is_vf) { 1303 if (!f->is_vf) { 1304 f->is_vf = true; 1305 f->counter++; 1306 } 1307 } else if (is_netdev) { 1308 if (!f->is_netdev) { 1309 f->is_netdev = true; 1310 f->counter++; 1311 } 1312 } else { 1313 f->counter++; 1314 } 1315 1316 /* changed tells sync_filters_subtask to 1317 * push the filter down to the firmware 1318 */ 1319 if (f->changed) { 1320 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1321 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1322 } 1323 1324 add_filter_out: 1325 return f; 1326 } 1327 1328 /** 1329 * i40e_del_filter - Remove a mac/vlan filter from the VSI 1330 * @vsi: the VSI to be searched 1331 * @macaddr: the MAC address 1332 * @vlan: the vlan 1333 * @is_vf: make sure it's a vf filter, else doesn't matter 1334 * @is_netdev: make sure it's a netdev filter, else doesn't matter 1335 **/ 1336 void i40e_del_filter(struct i40e_vsi *vsi, 1337 u8 *macaddr, s16 vlan, 1338 bool is_vf, bool is_netdev) 1339 { 1340 struct i40e_mac_filter *f; 1341 1342 if (!vsi || !macaddr) 1343 return; 1344 1345 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1346 if (!f || f->counter == 0) 1347 return; 1348 1349 if (is_vf) { 1350 if (f->is_vf) { 1351 f->is_vf = false; 1352 f->counter--; 1353 } 1354 } else if (is_netdev) { 1355 if (f->is_netdev) { 1356 f->is_netdev = false; 1357 f->counter--; 1358 } 1359 } else { 1360 /* make sure we don't remove a filter in use by vf or netdev */ 1361 int min_f = 0; 1362 min_f += (f->is_vf ? 1 : 0); 1363 min_f += (f->is_netdev ? 1 : 0); 1364 1365 if (f->counter > min_f) 1366 f->counter--; 1367 } 1368 1369 /* counter == 0 tells sync_filters_subtask to 1370 * remove the filter from the firmware's list 1371 */ 1372 if (f->counter == 0) { 1373 f->changed = true; 1374 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1375 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1376 } 1377 } 1378 1379 /** 1380 * i40e_set_mac - NDO callback to set mac address 1381 * @netdev: network interface device structure 1382 * @p: pointer to an address structure 1383 * 1384 * Returns 0 on success, negative on failure 1385 **/ 1386 #ifdef I40E_FCOE 1387 int i40e_set_mac(struct net_device *netdev, void *p) 1388 #else 1389 static int i40e_set_mac(struct net_device *netdev, void *p) 1390 #endif 1391 { 1392 struct i40e_netdev_priv *np = netdev_priv(netdev); 1393 struct i40e_vsi *vsi = np->vsi; 1394 struct i40e_pf *pf = vsi->back; 1395 struct i40e_hw *hw = &pf->hw; 1396 struct sockaddr *addr = p; 1397 struct i40e_mac_filter *f; 1398 1399 if (!is_valid_ether_addr(addr->sa_data)) 1400 return -EADDRNOTAVAIL; 1401 1402 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { 1403 netdev_info(netdev, "already using mac address %pM\n", 1404 addr->sa_data); 1405 return 0; 1406 } 1407 1408 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1409 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1410 return -EADDRNOTAVAIL; 1411 1412 if (ether_addr_equal(hw->mac.addr, addr->sa_data)) 1413 netdev_info(netdev, "returning to hw mac address %pM\n", 1414 hw->mac.addr); 1415 else 1416 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1417 1418 if (vsi->type == I40E_VSI_MAIN) { 1419 i40e_status ret; 1420 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1421 I40E_AQC_WRITE_TYPE_LAA_WOL, 1422 addr->sa_data, NULL); 1423 if (ret) { 1424 netdev_info(netdev, 1425 "Addr change for Main VSI failed: %d\n", 1426 ret); 1427 return -EADDRNOTAVAIL; 1428 } 1429 } 1430 1431 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { 1432 struct i40e_aqc_remove_macvlan_element_data element; 1433 1434 memset(&element, 0, sizeof(element)); 1435 ether_addr_copy(element.mac_addr, netdev->dev_addr); 1436 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1437 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1438 } else { 1439 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 1440 false, false); 1441 } 1442 1443 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { 1444 struct i40e_aqc_add_macvlan_element_data element; 1445 1446 memset(&element, 0, sizeof(element)); 1447 ether_addr_copy(element.mac_addr, hw->mac.addr); 1448 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 1449 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1450 } else { 1451 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, 1452 false, false); 1453 if (f) 1454 f->is_laa = true; 1455 } 1456 1457 i40e_sync_vsi_filters(vsi); 1458 ether_addr_copy(netdev->dev_addr, addr->sa_data); 1459 1460 return 0; 1461 } 1462 1463 /** 1464 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1465 * @vsi: the VSI being setup 1466 * @ctxt: VSI context structure 1467 * @enabled_tc: Enabled TCs bitmap 1468 * @is_add: True if called before Add VSI 1469 * 1470 * Setup VSI queue mapping for enabled traffic classes. 1471 **/ 1472 #ifdef I40E_FCOE 1473 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1474 struct i40e_vsi_context *ctxt, 1475 u8 enabled_tc, 1476 bool is_add) 1477 #else 1478 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1479 struct i40e_vsi_context *ctxt, 1480 u8 enabled_tc, 1481 bool is_add) 1482 #endif 1483 { 1484 struct i40e_pf *pf = vsi->back; 1485 u16 sections = 0; 1486 u8 netdev_tc = 0; 1487 u16 numtc = 0; 1488 u16 qcount; 1489 u8 offset; 1490 u16 qmap; 1491 int i; 1492 u16 num_tc_qps = 0; 1493 1494 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1495 offset = 0; 1496 1497 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1498 /* Find numtc from enabled TC bitmap */ 1499 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1500 if (enabled_tc & (1 << i)) /* TC is enabled */ 1501 numtc++; 1502 } 1503 if (!numtc) { 1504 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1505 numtc = 1; 1506 } 1507 } else { 1508 /* At least TC0 is enabled in case of non-DCB case */ 1509 numtc = 1; 1510 } 1511 1512 vsi->tc_config.numtc = numtc; 1513 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1514 /* Number of queues per enabled TC */ 1515 num_tc_qps = vsi->alloc_queue_pairs/numtc; 1516 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1517 1518 /* Setup queue offset/count for all TCs for given VSI */ 1519 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1520 /* See if the given TC is enabled for the given VSI */ 1521 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ 1522 int pow, num_qps; 1523 1524 switch (vsi->type) { 1525 case I40E_VSI_MAIN: 1526 qcount = min_t(int, pf->rss_size, num_tc_qps); 1527 break; 1528 #ifdef I40E_FCOE 1529 case I40E_VSI_FCOE: 1530 qcount = num_tc_qps; 1531 break; 1532 #endif 1533 case I40E_VSI_FDIR: 1534 case I40E_VSI_SRIOV: 1535 case I40E_VSI_VMDQ2: 1536 default: 1537 qcount = num_tc_qps; 1538 WARN_ON(i != 0); 1539 break; 1540 } 1541 vsi->tc_config.tc_info[i].qoffset = offset; 1542 vsi->tc_config.tc_info[i].qcount = qcount; 1543 1544 /* find the power-of-2 of the number of queue pairs */ 1545 num_qps = qcount; 1546 pow = 0; 1547 while (num_qps && ((1 << pow) < qcount)) { 1548 pow++; 1549 num_qps >>= 1; 1550 } 1551 1552 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1553 qmap = 1554 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1555 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1556 1557 offset += qcount; 1558 } else { 1559 /* TC is not enabled so set the offset to 1560 * default queue and allocate one queue 1561 * for the given TC. 1562 */ 1563 vsi->tc_config.tc_info[i].qoffset = 0; 1564 vsi->tc_config.tc_info[i].qcount = 1; 1565 vsi->tc_config.tc_info[i].netdev_tc = 0; 1566 1567 qmap = 0; 1568 } 1569 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1570 } 1571 1572 /* Set actual Tx/Rx queue pairs */ 1573 vsi->num_queue_pairs = offset; 1574 1575 /* Scheduler section valid can only be set for ADD VSI */ 1576 if (is_add) { 1577 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1578 1579 ctxt->info.up_enable_bits = enabled_tc; 1580 } 1581 if (vsi->type == I40E_VSI_SRIOV) { 1582 ctxt->info.mapping_flags |= 1583 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1584 for (i = 0; i < vsi->num_queue_pairs; i++) 1585 ctxt->info.queue_mapping[i] = 1586 cpu_to_le16(vsi->base_queue + i); 1587 } else { 1588 ctxt->info.mapping_flags |= 1589 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1590 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1591 } 1592 ctxt->info.valid_sections |= cpu_to_le16(sections); 1593 } 1594 1595 /** 1596 * i40e_set_rx_mode - NDO callback to set the netdev filters 1597 * @netdev: network interface device structure 1598 **/ 1599 #ifdef I40E_FCOE 1600 void i40e_set_rx_mode(struct net_device *netdev) 1601 #else 1602 static void i40e_set_rx_mode(struct net_device *netdev) 1603 #endif 1604 { 1605 struct i40e_netdev_priv *np = netdev_priv(netdev); 1606 struct i40e_mac_filter *f, *ftmp; 1607 struct i40e_vsi *vsi = np->vsi; 1608 struct netdev_hw_addr *uca; 1609 struct netdev_hw_addr *mca; 1610 struct netdev_hw_addr *ha; 1611 1612 /* add addr if not already in the filter list */ 1613 netdev_for_each_uc_addr(uca, netdev) { 1614 if (!i40e_find_mac(vsi, uca->addr, false, true)) { 1615 if (i40e_is_vsi_in_vlan(vsi)) 1616 i40e_put_mac_in_vlan(vsi, uca->addr, 1617 false, true); 1618 else 1619 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, 1620 false, true); 1621 } 1622 } 1623 1624 netdev_for_each_mc_addr(mca, netdev) { 1625 if (!i40e_find_mac(vsi, mca->addr, false, true)) { 1626 if (i40e_is_vsi_in_vlan(vsi)) 1627 i40e_put_mac_in_vlan(vsi, mca->addr, 1628 false, true); 1629 else 1630 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, 1631 false, true); 1632 } 1633 } 1634 1635 /* remove filter if not in netdev list */ 1636 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1637 bool found = false; 1638 1639 if (!f->is_netdev) 1640 continue; 1641 1642 if (is_multicast_ether_addr(f->macaddr)) { 1643 netdev_for_each_mc_addr(mca, netdev) { 1644 if (ether_addr_equal(mca->addr, f->macaddr)) { 1645 found = true; 1646 break; 1647 } 1648 } 1649 } else { 1650 netdev_for_each_uc_addr(uca, netdev) { 1651 if (ether_addr_equal(uca->addr, f->macaddr)) { 1652 found = true; 1653 break; 1654 } 1655 } 1656 1657 for_each_dev_addr(netdev, ha) { 1658 if (ether_addr_equal(ha->addr, f->macaddr)) { 1659 found = true; 1660 break; 1661 } 1662 } 1663 } 1664 if (!found) 1665 i40e_del_filter( 1666 vsi, f->macaddr, I40E_VLAN_ANY, false, true); 1667 } 1668 1669 /* check for other flag changes */ 1670 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1671 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1672 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1673 } 1674 } 1675 1676 /** 1677 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 1678 * @vsi: ptr to the VSI 1679 * 1680 * Push any outstanding VSI filter changes through the AdminQ. 1681 * 1682 * Returns 0 or error value 1683 **/ 1684 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) 1685 { 1686 struct i40e_mac_filter *f, *ftmp; 1687 bool promisc_forced_on = false; 1688 bool add_happened = false; 1689 int filter_list_len = 0; 1690 u32 changed_flags = 0; 1691 i40e_status aq_ret = 0; 1692 struct i40e_pf *pf; 1693 int num_add = 0; 1694 int num_del = 0; 1695 u16 cmd_flags; 1696 1697 /* empty array typed pointers, kcalloc later */ 1698 struct i40e_aqc_add_macvlan_element_data *add_list; 1699 struct i40e_aqc_remove_macvlan_element_data *del_list; 1700 1701 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) 1702 usleep_range(1000, 2000); 1703 pf = vsi->back; 1704 1705 if (vsi->netdev) { 1706 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 1707 vsi->current_netdev_flags = vsi->netdev->flags; 1708 } 1709 1710 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1711 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1712 1713 filter_list_len = pf->hw.aq.asq_buf_size / 1714 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1715 del_list = kcalloc(filter_list_len, 1716 sizeof(struct i40e_aqc_remove_macvlan_element_data), 1717 GFP_KERNEL); 1718 if (!del_list) 1719 return -ENOMEM; 1720 1721 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1722 if (!f->changed) 1723 continue; 1724 1725 if (f->counter != 0) 1726 continue; 1727 f->changed = false; 1728 cmd_flags = 0; 1729 1730 /* add to delete list */ 1731 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); 1732 del_list[num_del].vlan_tag = 1733 cpu_to_le16((u16)(f->vlan == 1734 I40E_VLAN_ANY ? 0 : f->vlan)); 1735 1736 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1737 del_list[num_del].flags = cmd_flags; 1738 num_del++; 1739 1740 /* unlink from filter list */ 1741 list_del(&f->list); 1742 kfree(f); 1743 1744 /* flush a full buffer */ 1745 if (num_del == filter_list_len) { 1746 aq_ret = i40e_aq_remove_macvlan(&pf->hw, 1747 vsi->seid, del_list, num_del, 1748 NULL); 1749 num_del = 0; 1750 memset(del_list, 0, sizeof(*del_list)); 1751 1752 if (aq_ret && 1753 pf->hw.aq.asq_last_status != 1754 I40E_AQ_RC_ENOENT) 1755 dev_info(&pf->pdev->dev, 1756 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1757 aq_ret, 1758 pf->hw.aq.asq_last_status); 1759 } 1760 } 1761 if (num_del) { 1762 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1763 del_list, num_del, NULL); 1764 num_del = 0; 1765 1766 if (aq_ret && 1767 pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT) 1768 dev_info(&pf->pdev->dev, 1769 "ignoring delete macvlan error, err %d, aq_err %d\n", 1770 aq_ret, pf->hw.aq.asq_last_status); 1771 } 1772 1773 kfree(del_list); 1774 del_list = NULL; 1775 1776 /* do all the adds now */ 1777 filter_list_len = pf->hw.aq.asq_buf_size / 1778 sizeof(struct i40e_aqc_add_macvlan_element_data), 1779 add_list = kcalloc(filter_list_len, 1780 sizeof(struct i40e_aqc_add_macvlan_element_data), 1781 GFP_KERNEL); 1782 if (!add_list) 1783 return -ENOMEM; 1784 1785 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1786 if (!f->changed) 1787 continue; 1788 1789 if (f->counter == 0) 1790 continue; 1791 f->changed = false; 1792 add_happened = true; 1793 cmd_flags = 0; 1794 1795 /* add to add array */ 1796 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); 1797 add_list[num_add].vlan_tag = 1798 cpu_to_le16( 1799 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 1800 add_list[num_add].queue_number = 0; 1801 1802 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1803 add_list[num_add].flags = cpu_to_le16(cmd_flags); 1804 num_add++; 1805 1806 /* flush a full buffer */ 1807 if (num_add == filter_list_len) { 1808 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1809 add_list, num_add, 1810 NULL); 1811 num_add = 0; 1812 1813 if (aq_ret) 1814 break; 1815 memset(add_list, 0, sizeof(*add_list)); 1816 } 1817 } 1818 if (num_add) { 1819 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1820 add_list, num_add, NULL); 1821 num_add = 0; 1822 } 1823 kfree(add_list); 1824 add_list = NULL; 1825 1826 if (add_happened && aq_ret && 1827 pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) { 1828 dev_info(&pf->pdev->dev, 1829 "add filter failed, err %d, aq_err %d\n", 1830 aq_ret, pf->hw.aq.asq_last_status); 1831 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 1832 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1833 &vsi->state)) { 1834 promisc_forced_on = true; 1835 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1836 &vsi->state); 1837 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 1838 } 1839 } 1840 } 1841 1842 /* check for changes in promiscuous modes */ 1843 if (changed_flags & IFF_ALLMULTI) { 1844 bool cur_multipromisc; 1845 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 1846 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 1847 vsi->seid, 1848 cur_multipromisc, 1849 NULL); 1850 if (aq_ret) 1851 dev_info(&pf->pdev->dev, 1852 "set multi promisc failed, err %d, aq_err %d\n", 1853 aq_ret, pf->hw.aq.asq_last_status); 1854 } 1855 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 1856 bool cur_promisc; 1857 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 1858 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1859 &vsi->state)); 1860 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, 1861 vsi->seid, 1862 cur_promisc, NULL); 1863 if (aq_ret) 1864 dev_info(&pf->pdev->dev, 1865 "set uni promisc failed, err %d, aq_err %d\n", 1866 aq_ret, pf->hw.aq.asq_last_status); 1867 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 1868 vsi->seid, 1869 cur_promisc, NULL); 1870 if (aq_ret) 1871 dev_info(&pf->pdev->dev, 1872 "set brdcast promisc failed, err %d, aq_err %d\n", 1873 aq_ret, pf->hw.aq.asq_last_status); 1874 } 1875 1876 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 1877 return 0; 1878 } 1879 1880 /** 1881 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 1882 * @pf: board private structure 1883 **/ 1884 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 1885 { 1886 int v; 1887 1888 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 1889 return; 1890 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 1891 1892 for (v = 0; v < pf->num_alloc_vsi; v++) { 1893 if (pf->vsi[v] && 1894 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) 1895 i40e_sync_vsi_filters(pf->vsi[v]); 1896 } 1897 } 1898 1899 /** 1900 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 1901 * @netdev: network interface device structure 1902 * @new_mtu: new value for maximum frame size 1903 * 1904 * Returns 0 on success, negative on failure 1905 **/ 1906 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 1907 { 1908 struct i40e_netdev_priv *np = netdev_priv(netdev); 1909 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 1910 struct i40e_vsi *vsi = np->vsi; 1911 1912 /* MTU < 68 is an error and causes problems on some kernels */ 1913 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) 1914 return -EINVAL; 1915 1916 netdev_info(netdev, "changing MTU from %d to %d\n", 1917 netdev->mtu, new_mtu); 1918 netdev->mtu = new_mtu; 1919 if (netif_running(netdev)) 1920 i40e_vsi_reinit_locked(vsi); 1921 1922 return 0; 1923 } 1924 1925 /** 1926 * i40e_ioctl - Access the hwtstamp interface 1927 * @netdev: network interface device structure 1928 * @ifr: interface request data 1929 * @cmd: ioctl command 1930 **/ 1931 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1932 { 1933 struct i40e_netdev_priv *np = netdev_priv(netdev); 1934 struct i40e_pf *pf = np->vsi->back; 1935 1936 switch (cmd) { 1937 case SIOCGHWTSTAMP: 1938 return i40e_ptp_get_ts_config(pf, ifr); 1939 case SIOCSHWTSTAMP: 1940 return i40e_ptp_set_ts_config(pf, ifr); 1941 default: 1942 return -EOPNOTSUPP; 1943 } 1944 } 1945 1946 /** 1947 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 1948 * @vsi: the vsi being adjusted 1949 **/ 1950 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 1951 { 1952 struct i40e_vsi_context ctxt; 1953 i40e_status ret; 1954 1955 if ((vsi->info.valid_sections & 1956 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 1957 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 1958 return; /* already enabled */ 1959 1960 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1961 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1962 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 1963 1964 ctxt.seid = vsi->seid; 1965 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1966 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1967 if (ret) { 1968 dev_info(&vsi->back->pdev->dev, 1969 "%s: update vsi failed, aq_err=%d\n", 1970 __func__, vsi->back->hw.aq.asq_last_status); 1971 } 1972 } 1973 1974 /** 1975 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 1976 * @vsi: the vsi being adjusted 1977 **/ 1978 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 1979 { 1980 struct i40e_vsi_context ctxt; 1981 i40e_status ret; 1982 1983 if ((vsi->info.valid_sections & 1984 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 1985 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 1986 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 1987 return; /* already disabled */ 1988 1989 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1990 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1991 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 1992 1993 ctxt.seid = vsi->seid; 1994 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1995 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1996 if (ret) { 1997 dev_info(&vsi->back->pdev->dev, 1998 "%s: update vsi failed, aq_err=%d\n", 1999 __func__, vsi->back->hw.aq.asq_last_status); 2000 } 2001 } 2002 2003 /** 2004 * i40e_vlan_rx_register - Setup or shutdown vlan offload 2005 * @netdev: network interface to be adjusted 2006 * @features: netdev features to test if VLAN offload is enabled or not 2007 **/ 2008 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 2009 { 2010 struct i40e_netdev_priv *np = netdev_priv(netdev); 2011 struct i40e_vsi *vsi = np->vsi; 2012 2013 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2014 i40e_vlan_stripping_enable(vsi); 2015 else 2016 i40e_vlan_stripping_disable(vsi); 2017 } 2018 2019 /** 2020 * i40e_vsi_add_vlan - Add vsi membership for given vlan 2021 * @vsi: the vsi being configured 2022 * @vid: vlan id to be added (0 = untagged only , -1 = any) 2023 **/ 2024 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) 2025 { 2026 struct i40e_mac_filter *f, *add_f; 2027 bool is_netdev, is_vf; 2028 2029 is_vf = (vsi->type == I40E_VSI_SRIOV); 2030 is_netdev = !!(vsi->netdev); 2031 2032 if (is_netdev) { 2033 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, 2034 is_vf, is_netdev); 2035 if (!add_f) { 2036 dev_info(&vsi->back->pdev->dev, 2037 "Could not add vlan filter %d for %pM\n", 2038 vid, vsi->netdev->dev_addr); 2039 return -ENOMEM; 2040 } 2041 } 2042 2043 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2044 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2045 if (!add_f) { 2046 dev_info(&vsi->back->pdev->dev, 2047 "Could not add vlan filter %d for %pM\n", 2048 vid, f->macaddr); 2049 return -ENOMEM; 2050 } 2051 } 2052 2053 /* Now if we add a vlan tag, make sure to check if it is the first 2054 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" 2055 * with 0, so we now accept untagged and specified tagged traffic 2056 * (and not any taged and untagged) 2057 */ 2058 if (vid > 0) { 2059 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, 2060 I40E_VLAN_ANY, 2061 is_vf, is_netdev)) { 2062 i40e_del_filter(vsi, vsi->netdev->dev_addr, 2063 I40E_VLAN_ANY, is_vf, is_netdev); 2064 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, 2065 is_vf, is_netdev); 2066 if (!add_f) { 2067 dev_info(&vsi->back->pdev->dev, 2068 "Could not add filter 0 for %pM\n", 2069 vsi->netdev->dev_addr); 2070 return -ENOMEM; 2071 } 2072 } 2073 } 2074 2075 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ 2076 if (vid > 0 && !vsi->info.pvid) { 2077 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2078 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2079 is_vf, is_netdev)) { 2080 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2081 is_vf, is_netdev); 2082 add_f = i40e_add_filter(vsi, f->macaddr, 2083 0, is_vf, is_netdev); 2084 if (!add_f) { 2085 dev_info(&vsi->back->pdev->dev, 2086 "Could not add filter 0 for %pM\n", 2087 f->macaddr); 2088 return -ENOMEM; 2089 } 2090 } 2091 } 2092 } 2093 2094 if (test_bit(__I40E_DOWN, &vsi->back->state) || 2095 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 2096 return 0; 2097 2098 return i40e_sync_vsi_filters(vsi); 2099 } 2100 2101 /** 2102 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 2103 * @vsi: the vsi being configured 2104 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 2105 * 2106 * Return: 0 on success or negative otherwise 2107 **/ 2108 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 2109 { 2110 struct net_device *netdev = vsi->netdev; 2111 struct i40e_mac_filter *f, *add_f; 2112 bool is_vf, is_netdev; 2113 int filter_count = 0; 2114 2115 is_vf = (vsi->type == I40E_VSI_SRIOV); 2116 is_netdev = !!(netdev); 2117 2118 if (is_netdev) 2119 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); 2120 2121 list_for_each_entry(f, &vsi->mac_filter_list, list) 2122 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2123 2124 /* go through all the filters for this VSI and if there is only 2125 * vid == 0 it means there are no other filters, so vid 0 must 2126 * be replaced with -1. This signifies that we should from now 2127 * on accept any traffic (with any tag present, or untagged) 2128 */ 2129 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2130 if (is_netdev) { 2131 if (f->vlan && 2132 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2133 filter_count++; 2134 } 2135 2136 if (f->vlan) 2137 filter_count++; 2138 } 2139 2140 if (!filter_count && is_netdev) { 2141 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); 2142 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 2143 is_vf, is_netdev); 2144 if (!f) { 2145 dev_info(&vsi->back->pdev->dev, 2146 "Could not add filter %d for %pM\n", 2147 I40E_VLAN_ANY, netdev->dev_addr); 2148 return -ENOMEM; 2149 } 2150 } 2151 2152 if (!filter_count) { 2153 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2154 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); 2155 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2156 is_vf, is_netdev); 2157 if (!add_f) { 2158 dev_info(&vsi->back->pdev->dev, 2159 "Could not add filter %d for %pM\n", 2160 I40E_VLAN_ANY, f->macaddr); 2161 return -ENOMEM; 2162 } 2163 } 2164 } 2165 2166 if (test_bit(__I40E_DOWN, &vsi->back->state) || 2167 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 2168 return 0; 2169 2170 return i40e_sync_vsi_filters(vsi); 2171 } 2172 2173 /** 2174 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 2175 * @netdev: network interface to be adjusted 2176 * @vid: vlan id to be added 2177 * 2178 * net_device_ops implementation for adding vlan ids 2179 **/ 2180 #ifdef I40E_FCOE 2181 int i40e_vlan_rx_add_vid(struct net_device *netdev, 2182 __always_unused __be16 proto, u16 vid) 2183 #else 2184 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 2185 __always_unused __be16 proto, u16 vid) 2186 #endif 2187 { 2188 struct i40e_netdev_priv *np = netdev_priv(netdev); 2189 struct i40e_vsi *vsi = np->vsi; 2190 int ret = 0; 2191 2192 if (vid > 4095) 2193 return -EINVAL; 2194 2195 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 2196 2197 /* If the network stack called us with vid = 0 then 2198 * it is asking to receive priority tagged packets with 2199 * vlan id 0. Our HW receives them by default when configured 2200 * to receive untagged packets so there is no need to add an 2201 * extra filter for vlan 0 tagged packets. 2202 */ 2203 if (vid) 2204 ret = i40e_vsi_add_vlan(vsi, vid); 2205 2206 if (!ret && (vid < VLAN_N_VID)) 2207 set_bit(vid, vsi->active_vlans); 2208 2209 return ret; 2210 } 2211 2212 /** 2213 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 2214 * @netdev: network interface to be adjusted 2215 * @vid: vlan id to be removed 2216 * 2217 * net_device_ops implementation for removing vlan ids 2218 **/ 2219 #ifdef I40E_FCOE 2220 int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2221 __always_unused __be16 proto, u16 vid) 2222 #else 2223 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2224 __always_unused __be16 proto, u16 vid) 2225 #endif 2226 { 2227 struct i40e_netdev_priv *np = netdev_priv(netdev); 2228 struct i40e_vsi *vsi = np->vsi; 2229 2230 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); 2231 2232 /* return code is ignored as there is nothing a user 2233 * can do about failure to remove and a log message was 2234 * already printed from the other function 2235 */ 2236 i40e_vsi_kill_vlan(vsi, vid); 2237 2238 clear_bit(vid, vsi->active_vlans); 2239 2240 return 0; 2241 } 2242 2243 /** 2244 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 2245 * @vsi: the vsi being brought back up 2246 **/ 2247 static void i40e_restore_vlan(struct i40e_vsi *vsi) 2248 { 2249 u16 vid; 2250 2251 if (!vsi->netdev) 2252 return; 2253 2254 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2255 2256 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 2257 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 2258 vid); 2259 } 2260 2261 /** 2262 * i40e_vsi_add_pvid - Add pvid for the VSI 2263 * @vsi: the vsi being adjusted 2264 * @vid: the vlan id to set as a PVID 2265 **/ 2266 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2267 { 2268 struct i40e_vsi_context ctxt; 2269 i40e_status aq_ret; 2270 2271 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2272 vsi->info.pvid = cpu_to_le16(vid); 2273 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | 2274 I40E_AQ_VSI_PVLAN_INSERT_PVID | 2275 I40E_AQ_VSI_PVLAN_EMOD_STR; 2276 2277 ctxt.seid = vsi->seid; 2278 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 2279 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2280 if (aq_ret) { 2281 dev_info(&vsi->back->pdev->dev, 2282 "%s: update vsi failed, aq_err=%d\n", 2283 __func__, vsi->back->hw.aq.asq_last_status); 2284 return -ENOENT; 2285 } 2286 2287 return 0; 2288 } 2289 2290 /** 2291 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2292 * @vsi: the vsi being adjusted 2293 * 2294 * Just use the vlan_rx_register() service to put it back to normal 2295 **/ 2296 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2297 { 2298 i40e_vlan_stripping_disable(vsi); 2299 2300 vsi->info.pvid = 0; 2301 } 2302 2303 /** 2304 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2305 * @vsi: ptr to the VSI 2306 * 2307 * If this function returns with an error, then it's possible one or 2308 * more of the rings is populated (while the rest are not). It is the 2309 * callers duty to clean those orphaned rings. 2310 * 2311 * Return 0 on success, negative on failure 2312 **/ 2313 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2314 { 2315 int i, err = 0; 2316 2317 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2318 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2319 2320 return err; 2321 } 2322 2323 /** 2324 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2325 * @vsi: ptr to the VSI 2326 * 2327 * Free VSI's transmit software resources 2328 **/ 2329 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2330 { 2331 int i; 2332 2333 if (!vsi->tx_rings) 2334 return; 2335 2336 for (i = 0; i < vsi->num_queue_pairs; i++) 2337 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2338 i40e_free_tx_resources(vsi->tx_rings[i]); 2339 } 2340 2341 /** 2342 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 2343 * @vsi: ptr to the VSI 2344 * 2345 * If this function returns with an error, then it's possible one or 2346 * more of the rings is populated (while the rest are not). It is the 2347 * callers duty to clean those orphaned rings. 2348 * 2349 * Return 0 on success, negative on failure 2350 **/ 2351 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 2352 { 2353 int i, err = 0; 2354 2355 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2356 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2357 #ifdef I40E_FCOE 2358 i40e_fcoe_setup_ddp_resources(vsi); 2359 #endif 2360 return err; 2361 } 2362 2363 /** 2364 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 2365 * @vsi: ptr to the VSI 2366 * 2367 * Free all receive software resources 2368 **/ 2369 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 2370 { 2371 int i; 2372 2373 if (!vsi->rx_rings) 2374 return; 2375 2376 for (i = 0; i < vsi->num_queue_pairs; i++) 2377 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2378 i40e_free_rx_resources(vsi->rx_rings[i]); 2379 #ifdef I40E_FCOE 2380 i40e_fcoe_free_ddp_resources(vsi); 2381 #endif 2382 } 2383 2384 /** 2385 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring 2386 * @ring: The Tx ring to configure 2387 * 2388 * This enables/disables XPS for a given Tx descriptor ring 2389 * based on the TCs enabled for the VSI that ring belongs to. 2390 **/ 2391 static void i40e_config_xps_tx_ring(struct i40e_ring *ring) 2392 { 2393 struct i40e_vsi *vsi = ring->vsi; 2394 cpumask_var_t mask; 2395 2396 if (ring->q_vector && ring->netdev) { 2397 /* Single TC mode enable XPS */ 2398 if (vsi->tc_config.numtc <= 1 && 2399 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { 2400 netif_set_xps_queue(ring->netdev, 2401 &ring->q_vector->affinity_mask, 2402 ring->queue_index); 2403 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { 2404 /* Disable XPS to allow selection based on TC */ 2405 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); 2406 netif_set_xps_queue(ring->netdev, mask, 2407 ring->queue_index); 2408 free_cpumask_var(mask); 2409 } 2410 } 2411 } 2412 2413 /** 2414 * i40e_configure_tx_ring - Configure a transmit ring context and rest 2415 * @ring: The Tx ring to configure 2416 * 2417 * Configure the Tx descriptor ring in the HMC context. 2418 **/ 2419 static int i40e_configure_tx_ring(struct i40e_ring *ring) 2420 { 2421 struct i40e_vsi *vsi = ring->vsi; 2422 u16 pf_q = vsi->base_queue + ring->queue_index; 2423 struct i40e_hw *hw = &vsi->back->hw; 2424 struct i40e_hmc_obj_txq tx_ctx; 2425 i40e_status err = 0; 2426 u32 qtx_ctl = 0; 2427 2428 /* some ATR related tx ring init */ 2429 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { 2430 ring->atr_sample_rate = vsi->back->atr_sample_rate; 2431 ring->atr_count = 0; 2432 } else { 2433 ring->atr_sample_rate = 0; 2434 } 2435 2436 /* configure XPS */ 2437 i40e_config_xps_tx_ring(ring); 2438 2439 /* clear the context structure first */ 2440 memset(&tx_ctx, 0, sizeof(tx_ctx)); 2441 2442 tx_ctx.new_context = 1; 2443 tx_ctx.base = (ring->dma / 128); 2444 tx_ctx.qlen = ring->count; 2445 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2446 I40E_FLAG_FD_ATR_ENABLED)); 2447 #ifdef I40E_FCOE 2448 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2449 #endif 2450 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2451 /* FDIR VSI tx ring can still use RS bit and writebacks */ 2452 if (vsi->type != I40E_VSI_FDIR) 2453 tx_ctx.head_wb_ena = 1; 2454 tx_ctx.head_wb_addr = ring->dma + 2455 (ring->count * sizeof(struct i40e_tx_desc)); 2456 2457 /* As part of VSI creation/update, FW allocates certain 2458 * Tx arbitration queue sets for each TC enabled for 2459 * the VSI. The FW returns the handles to these queue 2460 * sets as part of the response buffer to Add VSI, 2461 * Update VSI, etc. AQ commands. It is expected that 2462 * these queue set handles be associated with the Tx 2463 * queues by the driver as part of the TX queue context 2464 * initialization. This has to be done regardless of 2465 * DCB as by default everything is mapped to TC0. 2466 */ 2467 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 2468 tx_ctx.rdylist_act = 0; 2469 2470 /* clear the context in the HMC */ 2471 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 2472 if (err) { 2473 dev_info(&vsi->back->pdev->dev, 2474 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 2475 ring->queue_index, pf_q, err); 2476 return -ENOMEM; 2477 } 2478 2479 /* set the context in the HMC */ 2480 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 2481 if (err) { 2482 dev_info(&vsi->back->pdev->dev, 2483 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 2484 ring->queue_index, pf_q, err); 2485 return -ENOMEM; 2486 } 2487 2488 /* Now associate this queue with this PCI function */ 2489 if (vsi->type == I40E_VSI_VMDQ2) { 2490 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 2491 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & 2492 I40E_QTX_CTL_VFVM_INDX_MASK; 2493 } else { 2494 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2495 } 2496 2497 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2498 I40E_QTX_CTL_PF_INDX_MASK); 2499 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2500 i40e_flush(hw); 2501 2502 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 2503 2504 /* cache tail off for easier writes later */ 2505 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 2506 2507 return 0; 2508 } 2509 2510 /** 2511 * i40e_configure_rx_ring - Configure a receive ring context 2512 * @ring: The Rx ring to configure 2513 * 2514 * Configure the Rx descriptor ring in the HMC context. 2515 **/ 2516 static int i40e_configure_rx_ring(struct i40e_ring *ring) 2517 { 2518 struct i40e_vsi *vsi = ring->vsi; 2519 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 2520 u16 pf_q = vsi->base_queue + ring->queue_index; 2521 struct i40e_hw *hw = &vsi->back->hw; 2522 struct i40e_hmc_obj_rxq rx_ctx; 2523 i40e_status err = 0; 2524 2525 ring->state = 0; 2526 2527 /* clear the context structure first */ 2528 memset(&rx_ctx, 0, sizeof(rx_ctx)); 2529 2530 ring->rx_buf_len = vsi->rx_buf_len; 2531 ring->rx_hdr_len = vsi->rx_hdr_len; 2532 2533 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; 2534 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; 2535 2536 rx_ctx.base = (ring->dma / 128); 2537 rx_ctx.qlen = ring->count; 2538 2539 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { 2540 set_ring_16byte_desc_enabled(ring); 2541 rx_ctx.dsize = 0; 2542 } else { 2543 rx_ctx.dsize = 1; 2544 } 2545 2546 rx_ctx.dtype = vsi->dtype; 2547 if (vsi->dtype) { 2548 set_ring_ps_enabled(ring); 2549 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 2550 I40E_RX_SPLIT_IP | 2551 I40E_RX_SPLIT_TCP_UDP | 2552 I40E_RX_SPLIT_SCTP; 2553 } else { 2554 rx_ctx.hsplit_0 = 0; 2555 } 2556 2557 rx_ctx.rxmax = min_t(u16, vsi->max_frame, 2558 (chain_len * ring->rx_buf_len)); 2559 if (hw->revision_id == 0) 2560 rx_ctx.lrxqthresh = 0; 2561 else 2562 rx_ctx.lrxqthresh = 2; 2563 rx_ctx.crcstrip = 1; 2564 rx_ctx.l2tsel = 1; 2565 rx_ctx.showiv = 1; 2566 #ifdef I40E_FCOE 2567 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2568 #endif 2569 /* set the prefena field to 1 because the manual says to */ 2570 rx_ctx.prefena = 1; 2571 2572 /* clear the context in the HMC */ 2573 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2574 if (err) { 2575 dev_info(&vsi->back->pdev->dev, 2576 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2577 ring->queue_index, pf_q, err); 2578 return -ENOMEM; 2579 } 2580 2581 /* set the context in the HMC */ 2582 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 2583 if (err) { 2584 dev_info(&vsi->back->pdev->dev, 2585 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2586 ring->queue_index, pf_q, err); 2587 return -ENOMEM; 2588 } 2589 2590 /* cache tail for quicker writes, and clear the reg before use */ 2591 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 2592 writel(0, ring->tail); 2593 2594 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 2595 2596 return 0; 2597 } 2598 2599 /** 2600 * i40e_vsi_configure_tx - Configure the VSI for Tx 2601 * @vsi: VSI structure describing this set of rings and resources 2602 * 2603 * Configure the Tx VSI for operation. 2604 **/ 2605 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 2606 { 2607 int err = 0; 2608 u16 i; 2609 2610 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 2611 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 2612 2613 return err; 2614 } 2615 2616 /** 2617 * i40e_vsi_configure_rx - Configure the VSI for Rx 2618 * @vsi: the VSI being configured 2619 * 2620 * Configure the Rx VSI for operation. 2621 **/ 2622 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 2623 { 2624 int err = 0; 2625 u16 i; 2626 2627 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) 2628 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN 2629 + ETH_FCS_LEN + VLAN_HLEN; 2630 else 2631 vsi->max_frame = I40E_RXBUFFER_2048; 2632 2633 /* figure out correct receive buffer length */ 2634 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | 2635 I40E_FLAG_RX_PS_ENABLED)) { 2636 case I40E_FLAG_RX_1BUF_ENABLED: 2637 vsi->rx_hdr_len = 0; 2638 vsi->rx_buf_len = vsi->max_frame; 2639 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2640 break; 2641 case I40E_FLAG_RX_PS_ENABLED: 2642 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2643 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2644 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; 2645 break; 2646 default: 2647 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2648 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2649 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; 2650 break; 2651 } 2652 2653 #ifdef I40E_FCOE 2654 /* setup rx buffer for FCoE */ 2655 if ((vsi->type == I40E_VSI_FCOE) && 2656 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { 2657 vsi->rx_hdr_len = 0; 2658 vsi->rx_buf_len = I40E_RXBUFFER_3072; 2659 vsi->max_frame = I40E_RXBUFFER_3072; 2660 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2661 } 2662 2663 #endif /* I40E_FCOE */ 2664 /* round up for the chip's needs */ 2665 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, 2666 (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); 2667 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, 2668 (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); 2669 2670 /* set up individual rings */ 2671 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2672 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 2673 2674 return err; 2675 } 2676 2677 /** 2678 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 2679 * @vsi: ptr to the VSI 2680 **/ 2681 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 2682 { 2683 struct i40e_ring *tx_ring, *rx_ring; 2684 u16 qoffset, qcount; 2685 int i, n; 2686 2687 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2688 return; 2689 2690 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2691 if (!(vsi->tc_config.enabled_tc & (1 << n))) 2692 continue; 2693 2694 qoffset = vsi->tc_config.tc_info[n].qoffset; 2695 qcount = vsi->tc_config.tc_info[n].qcount; 2696 for (i = qoffset; i < (qoffset + qcount); i++) { 2697 rx_ring = vsi->rx_rings[i]; 2698 tx_ring = vsi->tx_rings[i]; 2699 rx_ring->dcb_tc = n; 2700 tx_ring->dcb_tc = n; 2701 } 2702 } 2703 } 2704 2705 /** 2706 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 2707 * @vsi: ptr to the VSI 2708 **/ 2709 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 2710 { 2711 if (vsi->netdev) 2712 i40e_set_rx_mode(vsi->netdev); 2713 } 2714 2715 /** 2716 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters 2717 * @vsi: Pointer to the targeted VSI 2718 * 2719 * This function replays the hlist on the hw where all the SB Flow Director 2720 * filters were saved. 2721 **/ 2722 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) 2723 { 2724 struct i40e_fdir_filter *filter; 2725 struct i40e_pf *pf = vsi->back; 2726 struct hlist_node *node; 2727 2728 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 2729 return; 2730 2731 hlist_for_each_entry_safe(filter, node, 2732 &pf->fdir_filter_list, fdir_node) { 2733 i40e_add_del_fdir(vsi, filter, true); 2734 } 2735 } 2736 2737 /** 2738 * i40e_vsi_configure - Set up the VSI for action 2739 * @vsi: the VSI being configured 2740 **/ 2741 static int i40e_vsi_configure(struct i40e_vsi *vsi) 2742 { 2743 int err; 2744 2745 i40e_set_vsi_rx_mode(vsi); 2746 i40e_restore_vlan(vsi); 2747 i40e_vsi_config_dcb_rings(vsi); 2748 err = i40e_vsi_configure_tx(vsi); 2749 if (!err) 2750 err = i40e_vsi_configure_rx(vsi); 2751 2752 return err; 2753 } 2754 2755 /** 2756 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 2757 * @vsi: the VSI being configured 2758 **/ 2759 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 2760 { 2761 struct i40e_pf *pf = vsi->back; 2762 struct i40e_q_vector *q_vector; 2763 struct i40e_hw *hw = &pf->hw; 2764 u16 vector; 2765 int i, q; 2766 u32 val; 2767 u32 qp; 2768 2769 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 2770 * and PFINT_LNKLSTn registers, e.g.: 2771 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 2772 */ 2773 qp = vsi->base_queue; 2774 vector = vsi->base_vector; 2775 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 2776 q_vector = vsi->q_vectors[i]; 2777 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2778 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2779 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 2780 q_vector->rx.itr); 2781 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2782 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2783 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 2784 q_vector->tx.itr); 2785 2786 /* Linked list for the queuepairs assigned to this vector */ 2787 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 2788 for (q = 0; q < q_vector->num_ringpairs; q++) { 2789 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2790 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2791 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 2792 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 2793 (I40E_QUEUE_TYPE_TX 2794 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 2795 2796 wr32(hw, I40E_QINT_RQCTL(qp), val); 2797 2798 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2799 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2800 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 2801 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| 2802 (I40E_QUEUE_TYPE_RX 2803 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2804 2805 /* Terminate the linked list */ 2806 if (q == (q_vector->num_ringpairs - 1)) 2807 val |= (I40E_QUEUE_END_OF_LIST 2808 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2809 2810 wr32(hw, I40E_QINT_TQCTL(qp), val); 2811 qp++; 2812 } 2813 } 2814 2815 i40e_flush(hw); 2816 } 2817 2818 /** 2819 * i40e_enable_misc_int_causes - enable the non-queue interrupts 2820 * @hw: ptr to the hardware info 2821 **/ 2822 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) 2823 { 2824 struct i40e_hw *hw = &pf->hw; 2825 u32 val; 2826 2827 /* clear things first */ 2828 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 2829 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 2830 2831 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 2832 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 2833 I40E_PFINT_ICR0_ENA_GRST_MASK | 2834 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 2835 I40E_PFINT_ICR0_ENA_GPIO_MASK | 2836 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 2837 I40E_PFINT_ICR0_ENA_VFLR_MASK | 2838 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2839 2840 if (pf->flags & I40E_FLAG_PTP) 2841 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 2842 2843 wr32(hw, I40E_PFINT_ICR0_ENA, val); 2844 2845 /* SW_ITR_IDX = 0, but don't change INTENA */ 2846 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 2847 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 2848 2849 /* OTHER_ITR_IDX = 0 */ 2850 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 2851 } 2852 2853 /** 2854 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 2855 * @vsi: the VSI being configured 2856 **/ 2857 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 2858 { 2859 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 2860 struct i40e_pf *pf = vsi->back; 2861 struct i40e_hw *hw = &pf->hw; 2862 u32 val; 2863 2864 /* set the ITR configuration */ 2865 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2866 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2867 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); 2868 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2869 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2870 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 2871 2872 i40e_enable_misc_int_causes(pf); 2873 2874 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 2875 wr32(hw, I40E_PFINT_LNKLST0, 0); 2876 2877 /* Associate the queue pair to the vector and enable the queue int */ 2878 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2879 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2880 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2881 2882 wr32(hw, I40E_QINT_RQCTL(0), val); 2883 2884 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2885 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2886 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2887 2888 wr32(hw, I40E_QINT_TQCTL(0), val); 2889 i40e_flush(hw); 2890 } 2891 2892 /** 2893 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 2894 * @pf: board private structure 2895 **/ 2896 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) 2897 { 2898 struct i40e_hw *hw = &pf->hw; 2899 2900 wr32(hw, I40E_PFINT_DYN_CTL0, 2901 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2902 i40e_flush(hw); 2903 } 2904 2905 /** 2906 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 2907 * @pf: board private structure 2908 **/ 2909 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 2910 { 2911 struct i40e_hw *hw = &pf->hw; 2912 u32 val; 2913 2914 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 2915 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 2916 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 2917 2918 wr32(hw, I40E_PFINT_DYN_CTL0, val); 2919 i40e_flush(hw); 2920 } 2921 2922 /** 2923 * i40e_irq_dynamic_enable - Enable default interrupt generation settings 2924 * @vsi: pointer to a vsi 2925 * @vector: enable a particular Hw Interrupt vector 2926 **/ 2927 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) 2928 { 2929 struct i40e_pf *pf = vsi->back; 2930 struct i40e_hw *hw = &pf->hw; 2931 u32 val; 2932 2933 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 2934 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 2935 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2936 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 2937 /* skip the flush */ 2938 } 2939 2940 /** 2941 * i40e_irq_dynamic_disable - Disable default interrupt generation settings 2942 * @vsi: pointer to a vsi 2943 * @vector: disable a particular Hw Interrupt vector 2944 **/ 2945 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) 2946 { 2947 struct i40e_pf *pf = vsi->back; 2948 struct i40e_hw *hw = &pf->hw; 2949 u32 val; 2950 2951 val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 2952 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 2953 i40e_flush(hw); 2954 } 2955 2956 /** 2957 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 2958 * @irq: interrupt number 2959 * @data: pointer to a q_vector 2960 **/ 2961 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 2962 { 2963 struct i40e_q_vector *q_vector = data; 2964 2965 if (!q_vector->tx.ring && !q_vector->rx.ring) 2966 return IRQ_HANDLED; 2967 2968 napi_schedule(&q_vector->napi); 2969 2970 return IRQ_HANDLED; 2971 } 2972 2973 /** 2974 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 2975 * @vsi: the VSI being configured 2976 * @basename: name for the vector 2977 * 2978 * Allocates MSI-X vectors and requests interrupts from the kernel. 2979 **/ 2980 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 2981 { 2982 int q_vectors = vsi->num_q_vectors; 2983 struct i40e_pf *pf = vsi->back; 2984 int base = vsi->base_vector; 2985 int rx_int_idx = 0; 2986 int tx_int_idx = 0; 2987 int vector, err; 2988 2989 for (vector = 0; vector < q_vectors; vector++) { 2990 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 2991 2992 if (q_vector->tx.ring && q_vector->rx.ring) { 2993 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2994 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2995 tx_int_idx++; 2996 } else if (q_vector->rx.ring) { 2997 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2998 "%s-%s-%d", basename, "rx", rx_int_idx++); 2999 } else if (q_vector->tx.ring) { 3000 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3001 "%s-%s-%d", basename, "tx", tx_int_idx++); 3002 } else { 3003 /* skip this unused q_vector */ 3004 continue; 3005 } 3006 err = request_irq(pf->msix_entries[base + vector].vector, 3007 vsi->irq_handler, 3008 0, 3009 q_vector->name, 3010 q_vector); 3011 if (err) { 3012 dev_info(&pf->pdev->dev, 3013 "%s: request_irq failed, error: %d\n", 3014 __func__, err); 3015 goto free_queue_irqs; 3016 } 3017 /* assign the mask for this irq */ 3018 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3019 &q_vector->affinity_mask); 3020 } 3021 3022 vsi->irqs_ready = true; 3023 return 0; 3024 3025 free_queue_irqs: 3026 while (vector) { 3027 vector--; 3028 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3029 NULL); 3030 free_irq(pf->msix_entries[base + vector].vector, 3031 &(vsi->q_vectors[vector])); 3032 } 3033 return err; 3034 } 3035 3036 /** 3037 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 3038 * @vsi: the VSI being un-configured 3039 **/ 3040 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 3041 { 3042 struct i40e_pf *pf = vsi->back; 3043 struct i40e_hw *hw = &pf->hw; 3044 int base = vsi->base_vector; 3045 int i; 3046 3047 for (i = 0; i < vsi->num_queue_pairs; i++) { 3048 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); 3049 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); 3050 } 3051 3052 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3053 for (i = vsi->base_vector; 3054 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3055 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 3056 3057 i40e_flush(hw); 3058 for (i = 0; i < vsi->num_q_vectors; i++) 3059 synchronize_irq(pf->msix_entries[i + base].vector); 3060 } else { 3061 /* Legacy and MSI mode - this stops all interrupt handling */ 3062 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 3063 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 3064 i40e_flush(hw); 3065 synchronize_irq(pf->pdev->irq); 3066 } 3067 } 3068 3069 /** 3070 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 3071 * @vsi: the VSI being configured 3072 **/ 3073 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 3074 { 3075 struct i40e_pf *pf = vsi->back; 3076 int i; 3077 3078 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3079 for (i = vsi->base_vector; 3080 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3081 i40e_irq_dynamic_enable(vsi, i); 3082 } else { 3083 i40e_irq_dynamic_enable_icr0(pf); 3084 } 3085 3086 i40e_flush(&pf->hw); 3087 return 0; 3088 } 3089 3090 /** 3091 * i40e_stop_misc_vector - Stop the vector that handles non-queue events 3092 * @pf: board private structure 3093 **/ 3094 static void i40e_stop_misc_vector(struct i40e_pf *pf) 3095 { 3096 /* Disable ICR 0 */ 3097 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 3098 i40e_flush(&pf->hw); 3099 } 3100 3101 /** 3102 * i40e_intr - MSI/Legacy and non-queue interrupt handler 3103 * @irq: interrupt number 3104 * @data: pointer to a q_vector 3105 * 3106 * This is the handler used for all MSI/Legacy interrupts, and deals 3107 * with both queue and non-queue interrupts. This is also used in 3108 * MSIX mode to handle the non-queue interrupts. 3109 **/ 3110 static irqreturn_t i40e_intr(int irq, void *data) 3111 { 3112 struct i40e_pf *pf = (struct i40e_pf *)data; 3113 struct i40e_hw *hw = &pf->hw; 3114 irqreturn_t ret = IRQ_NONE; 3115 u32 icr0, icr0_remaining; 3116 u32 val, ena_mask; 3117 3118 icr0 = rd32(hw, I40E_PFINT_ICR0); 3119 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 3120 3121 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 3122 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 3123 goto enable_intr; 3124 3125 /* if interrupt but no bits showing, must be SWINT */ 3126 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || 3127 (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) 3128 pf->sw_int_count++; 3129 3130 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 3131 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 3132 3133 /* temporarily disable queue cause for NAPI processing */ 3134 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); 3135 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; 3136 wr32(hw, I40E_QINT_RQCTL(0), qval); 3137 3138 qval = rd32(hw, I40E_QINT_TQCTL(0)); 3139 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 3140 wr32(hw, I40E_QINT_TQCTL(0), qval); 3141 3142 if (!test_bit(__I40E_DOWN, &pf->state)) 3143 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); 3144 } 3145 3146 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 3147 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3148 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 3149 } 3150 3151 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 3152 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 3153 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 3154 } 3155 3156 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 3157 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 3158 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 3159 } 3160 3161 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 3162 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 3163 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 3164 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 3165 val = rd32(hw, I40E_GLGEN_RSTAT); 3166 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 3167 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3168 if (val == I40E_RESET_CORER) { 3169 pf->corer_count++; 3170 } else if (val == I40E_RESET_GLOBR) { 3171 pf->globr_count++; 3172 } else if (val == I40E_RESET_EMPR) { 3173 pf->empr_count++; 3174 set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); 3175 } 3176 } 3177 3178 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 3179 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 3180 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 3181 } 3182 3183 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 3184 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 3185 3186 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 3187 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3188 i40e_ptp_tx_hwtstamp(pf); 3189 } 3190 } 3191 3192 /* If a critical error is pending we have no choice but to reset the 3193 * device. 3194 * Report and mask out any remaining unexpected interrupts. 3195 */ 3196 icr0_remaining = icr0 & ena_mask; 3197 if (icr0_remaining) { 3198 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 3199 icr0_remaining); 3200 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 3201 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 3202 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { 3203 dev_info(&pf->pdev->dev, "device will be reset\n"); 3204 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 3205 i40e_service_event_schedule(pf); 3206 } 3207 ena_mask &= ~icr0_remaining; 3208 } 3209 ret = IRQ_HANDLED; 3210 3211 enable_intr: 3212 /* re-enable interrupt causes */ 3213 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3214 if (!test_bit(__I40E_DOWN, &pf->state)) { 3215 i40e_service_event_schedule(pf); 3216 i40e_irq_dynamic_enable_icr0(pf); 3217 } 3218 3219 return ret; 3220 } 3221 3222 /** 3223 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes 3224 * @tx_ring: tx ring to clean 3225 * @budget: how many cleans we're allowed 3226 * 3227 * Returns true if there's any budget left (e.g. the clean is finished) 3228 **/ 3229 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) 3230 { 3231 struct i40e_vsi *vsi = tx_ring->vsi; 3232 u16 i = tx_ring->next_to_clean; 3233 struct i40e_tx_buffer *tx_buf; 3234 struct i40e_tx_desc *tx_desc; 3235 3236 tx_buf = &tx_ring->tx_bi[i]; 3237 tx_desc = I40E_TX_DESC(tx_ring, i); 3238 i -= tx_ring->count; 3239 3240 do { 3241 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 3242 3243 /* if next_to_watch is not set then there is no work pending */ 3244 if (!eop_desc) 3245 break; 3246 3247 /* prevent any other reads prior to eop_desc */ 3248 read_barrier_depends(); 3249 3250 /* if the descriptor isn't done, no work yet to do */ 3251 if (!(eop_desc->cmd_type_offset_bsz & 3252 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 3253 break; 3254 3255 /* clear next_to_watch to prevent false hangs */ 3256 tx_buf->next_to_watch = NULL; 3257 3258 tx_desc->buffer_addr = 0; 3259 tx_desc->cmd_type_offset_bsz = 0; 3260 /* move past filter desc */ 3261 tx_buf++; 3262 tx_desc++; 3263 i++; 3264 if (unlikely(!i)) { 3265 i -= tx_ring->count; 3266 tx_buf = tx_ring->tx_bi; 3267 tx_desc = I40E_TX_DESC(tx_ring, 0); 3268 } 3269 /* unmap skb header data */ 3270 dma_unmap_single(tx_ring->dev, 3271 dma_unmap_addr(tx_buf, dma), 3272 dma_unmap_len(tx_buf, len), 3273 DMA_TO_DEVICE); 3274 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) 3275 kfree(tx_buf->raw_buf); 3276 3277 tx_buf->raw_buf = NULL; 3278 tx_buf->tx_flags = 0; 3279 tx_buf->next_to_watch = NULL; 3280 dma_unmap_len_set(tx_buf, len, 0); 3281 tx_desc->buffer_addr = 0; 3282 tx_desc->cmd_type_offset_bsz = 0; 3283 3284 /* move us past the eop_desc for start of next FD desc */ 3285 tx_buf++; 3286 tx_desc++; 3287 i++; 3288 if (unlikely(!i)) { 3289 i -= tx_ring->count; 3290 tx_buf = tx_ring->tx_bi; 3291 tx_desc = I40E_TX_DESC(tx_ring, 0); 3292 } 3293 3294 /* update budget accounting */ 3295 budget--; 3296 } while (likely(budget)); 3297 3298 i += tx_ring->count; 3299 tx_ring->next_to_clean = i; 3300 3301 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 3302 i40e_irq_dynamic_enable(vsi, 3303 tx_ring->q_vector->v_idx + vsi->base_vector); 3304 } 3305 return budget > 0; 3306 } 3307 3308 /** 3309 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring 3310 * @irq: interrupt number 3311 * @data: pointer to a q_vector 3312 **/ 3313 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) 3314 { 3315 struct i40e_q_vector *q_vector = data; 3316 struct i40e_vsi *vsi; 3317 3318 if (!q_vector->tx.ring) 3319 return IRQ_HANDLED; 3320 3321 vsi = q_vector->tx.ring->vsi; 3322 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); 3323 3324 return IRQ_HANDLED; 3325 } 3326 3327 /** 3328 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 3329 * @vsi: the VSI being configured 3330 * @v_idx: vector index 3331 * @qp_idx: queue pair index 3332 **/ 3333 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 3334 { 3335 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3336 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 3337 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 3338 3339 tx_ring->q_vector = q_vector; 3340 tx_ring->next = q_vector->tx.ring; 3341 q_vector->tx.ring = tx_ring; 3342 q_vector->tx.count++; 3343 3344 rx_ring->q_vector = q_vector; 3345 rx_ring->next = q_vector->rx.ring; 3346 q_vector->rx.ring = rx_ring; 3347 q_vector->rx.count++; 3348 } 3349 3350 /** 3351 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 3352 * @vsi: the VSI being configured 3353 * 3354 * This function maps descriptor rings to the queue-specific vectors 3355 * we were allotted through the MSI-X enabling code. Ideally, we'd have 3356 * one vector per queue pair, but on a constrained vector budget, we 3357 * group the queue pairs as "efficiently" as possible. 3358 **/ 3359 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 3360 { 3361 int qp_remaining = vsi->num_queue_pairs; 3362 int q_vectors = vsi->num_q_vectors; 3363 int num_ringpairs; 3364 int v_start = 0; 3365 int qp_idx = 0; 3366 3367 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 3368 * group them so there are multiple queues per vector. 3369 * It is also important to go through all the vectors available to be 3370 * sure that if we don't use all the vectors, that the remaining vectors 3371 * are cleared. This is especially important when decreasing the 3372 * number of queues in use. 3373 */ 3374 for (; v_start < q_vectors; v_start++) { 3375 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 3376 3377 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 3378 3379 q_vector->num_ringpairs = num_ringpairs; 3380 3381 q_vector->rx.count = 0; 3382 q_vector->tx.count = 0; 3383 q_vector->rx.ring = NULL; 3384 q_vector->tx.ring = NULL; 3385 3386 while (num_ringpairs--) { 3387 map_vector_to_qp(vsi, v_start, qp_idx); 3388 qp_idx++; 3389 qp_remaining--; 3390 } 3391 } 3392 } 3393 3394 /** 3395 * i40e_vsi_request_irq - Request IRQ from the OS 3396 * @vsi: the VSI being configured 3397 * @basename: name for the vector 3398 **/ 3399 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 3400 { 3401 struct i40e_pf *pf = vsi->back; 3402 int err; 3403 3404 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 3405 err = i40e_vsi_request_irq_msix(vsi, basename); 3406 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3407 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3408 pf->int_name, pf); 3409 else 3410 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3411 pf->int_name, pf); 3412 3413 if (err) 3414 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3415 3416 return err; 3417 } 3418 3419 #ifdef CONFIG_NET_POLL_CONTROLLER 3420 /** 3421 * i40e_netpoll - A Polling 'interrupt'handler 3422 * @netdev: network interface device structure 3423 * 3424 * This is used by netconsole to send skbs without having to re-enable 3425 * interrupts. It's not called while the normal interrupt routine is executing. 3426 **/ 3427 #ifdef I40E_FCOE 3428 void i40e_netpoll(struct net_device *netdev) 3429 #else 3430 static void i40e_netpoll(struct net_device *netdev) 3431 #endif 3432 { 3433 struct i40e_netdev_priv *np = netdev_priv(netdev); 3434 struct i40e_vsi *vsi = np->vsi; 3435 struct i40e_pf *pf = vsi->back; 3436 int i; 3437 3438 /* if interface is down do nothing */ 3439 if (test_bit(__I40E_DOWN, &vsi->state)) 3440 return; 3441 3442 pf->flags |= I40E_FLAG_IN_NETPOLL; 3443 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3444 for (i = 0; i < vsi->num_q_vectors; i++) 3445 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 3446 } else { 3447 i40e_intr(pf->pdev->irq, netdev); 3448 } 3449 pf->flags &= ~I40E_FLAG_IN_NETPOLL; 3450 } 3451 #endif 3452 3453 /** 3454 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled 3455 * @pf: the PF being configured 3456 * @pf_q: the PF queue 3457 * @enable: enable or disable state of the queue 3458 * 3459 * This routine will wait for the given Tx queue of the PF to reach the 3460 * enabled or disabled state. 3461 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3462 * multiple retries; else will return 0 in case of success. 3463 **/ 3464 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3465 { 3466 int i; 3467 u32 tx_reg; 3468 3469 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3470 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); 3471 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3472 break; 3473 3474 usleep_range(10, 20); 3475 } 3476 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3477 return -ETIMEDOUT; 3478 3479 return 0; 3480 } 3481 3482 /** 3483 * i40e_vsi_control_tx - Start or stop a VSI's rings 3484 * @vsi: the VSI being configured 3485 * @enable: start or stop the rings 3486 **/ 3487 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 3488 { 3489 struct i40e_pf *pf = vsi->back; 3490 struct i40e_hw *hw = &pf->hw; 3491 int i, j, pf_q, ret = 0; 3492 u32 tx_reg; 3493 3494 pf_q = vsi->base_queue; 3495 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3496 3497 /* warn the TX unit of coming changes */ 3498 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); 3499 if (!enable) 3500 usleep_range(10, 20); 3501 3502 for (j = 0; j < 50; j++) { 3503 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3504 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 3505 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) 3506 break; 3507 usleep_range(1000, 2000); 3508 } 3509 /* Skip if the queue is already in the requested state */ 3510 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3511 continue; 3512 3513 /* turn on/off the queue */ 3514 if (enable) { 3515 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 3516 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; 3517 } else { 3518 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3519 } 3520 3521 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 3522 /* No waiting for the Tx queue to disable */ 3523 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) 3524 continue; 3525 3526 /* wait for the change to finish */ 3527 ret = i40e_pf_txq_wait(pf, pf_q, enable); 3528 if (ret) { 3529 dev_info(&pf->pdev->dev, 3530 "%s: VSI seid %d Tx ring %d %sable timeout\n", 3531 __func__, vsi->seid, pf_q, 3532 (enable ? "en" : "dis")); 3533 break; 3534 } 3535 } 3536 3537 if (hw->revision_id == 0) 3538 mdelay(50); 3539 return ret; 3540 } 3541 3542 /** 3543 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 3544 * @pf: the PF being configured 3545 * @pf_q: the PF queue 3546 * @enable: enable or disable state of the queue 3547 * 3548 * This routine will wait for the given Rx queue of the PF to reach the 3549 * enabled or disabled state. 3550 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3551 * multiple retries; else will return 0 in case of success. 3552 **/ 3553 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3554 { 3555 int i; 3556 u32 rx_reg; 3557 3558 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3559 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); 3560 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3561 break; 3562 3563 usleep_range(10, 20); 3564 } 3565 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3566 return -ETIMEDOUT; 3567 3568 return 0; 3569 } 3570 3571 /** 3572 * i40e_vsi_control_rx - Start or stop a VSI's rings 3573 * @vsi: the VSI being configured 3574 * @enable: start or stop the rings 3575 **/ 3576 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 3577 { 3578 struct i40e_pf *pf = vsi->back; 3579 struct i40e_hw *hw = &pf->hw; 3580 int i, j, pf_q, ret = 0; 3581 u32 rx_reg; 3582 3583 pf_q = vsi->base_queue; 3584 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3585 for (j = 0; j < 50; j++) { 3586 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3587 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == 3588 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) 3589 break; 3590 usleep_range(1000, 2000); 3591 } 3592 3593 /* Skip if the queue is already in the requested state */ 3594 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3595 continue; 3596 3597 /* turn on/off the queue */ 3598 if (enable) 3599 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; 3600 else 3601 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 3602 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3603 3604 /* wait for the change to finish */ 3605 ret = i40e_pf_rxq_wait(pf, pf_q, enable); 3606 if (ret) { 3607 dev_info(&pf->pdev->dev, 3608 "%s: VSI seid %d Rx ring %d %sable timeout\n", 3609 __func__, vsi->seid, pf_q, 3610 (enable ? "en" : "dis")); 3611 break; 3612 } 3613 } 3614 3615 return ret; 3616 } 3617 3618 /** 3619 * i40e_vsi_control_rings - Start or stop a VSI's rings 3620 * @vsi: the VSI being configured 3621 * @enable: start or stop the rings 3622 **/ 3623 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) 3624 { 3625 int ret = 0; 3626 3627 /* do rx first for enable and last for disable */ 3628 if (request) { 3629 ret = i40e_vsi_control_rx(vsi, request); 3630 if (ret) 3631 return ret; 3632 ret = i40e_vsi_control_tx(vsi, request); 3633 } else { 3634 /* Ignore return value, we need to shutdown whatever we can */ 3635 i40e_vsi_control_tx(vsi, request); 3636 i40e_vsi_control_rx(vsi, request); 3637 } 3638 3639 return ret; 3640 } 3641 3642 /** 3643 * i40e_vsi_free_irq - Free the irq association with the OS 3644 * @vsi: the VSI being configured 3645 **/ 3646 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 3647 { 3648 struct i40e_pf *pf = vsi->back; 3649 struct i40e_hw *hw = &pf->hw; 3650 int base = vsi->base_vector; 3651 u32 val, qp; 3652 int i; 3653 3654 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3655 if (!vsi->q_vectors) 3656 return; 3657 3658 if (!vsi->irqs_ready) 3659 return; 3660 3661 vsi->irqs_ready = false; 3662 for (i = 0; i < vsi->num_q_vectors; i++) { 3663 u16 vector = i + base; 3664 3665 /* free only the irqs that were actually requested */ 3666 if (!vsi->q_vectors[i] || 3667 !vsi->q_vectors[i]->num_ringpairs) 3668 continue; 3669 3670 /* clear the affinity_mask in the IRQ descriptor */ 3671 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3672 NULL); 3673 free_irq(pf->msix_entries[vector].vector, 3674 vsi->q_vectors[i]); 3675 3676 /* Tear down the interrupt queue link list 3677 * 3678 * We know that they come in pairs and always 3679 * the Rx first, then the Tx. To clear the 3680 * link list, stick the EOL value into the 3681 * next_q field of the registers. 3682 */ 3683 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 3684 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3685 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3686 val |= I40E_QUEUE_END_OF_LIST 3687 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3688 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 3689 3690 while (qp != I40E_QUEUE_END_OF_LIST) { 3691 u32 next; 3692 3693 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3694 3695 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3696 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3697 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3698 I40E_QINT_RQCTL_INTEVENT_MASK); 3699 3700 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3701 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3702 3703 wr32(hw, I40E_QINT_RQCTL(qp), val); 3704 3705 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3706 3707 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 3708 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 3709 3710 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3711 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3712 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3713 I40E_QINT_TQCTL_INTEVENT_MASK); 3714 3715 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3716 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3717 3718 wr32(hw, I40E_QINT_TQCTL(qp), val); 3719 qp = next; 3720 } 3721 } 3722 } else { 3723 free_irq(pf->pdev->irq, pf); 3724 3725 val = rd32(hw, I40E_PFINT_LNKLST0); 3726 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3727 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3728 val |= I40E_QUEUE_END_OF_LIST 3729 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 3730 wr32(hw, I40E_PFINT_LNKLST0, val); 3731 3732 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3733 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3734 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3735 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3736 I40E_QINT_RQCTL_INTEVENT_MASK); 3737 3738 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3739 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3740 3741 wr32(hw, I40E_QINT_RQCTL(qp), val); 3742 3743 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3744 3745 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3746 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3747 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3748 I40E_QINT_TQCTL_INTEVENT_MASK); 3749 3750 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3751 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3752 3753 wr32(hw, I40E_QINT_TQCTL(qp), val); 3754 } 3755 } 3756 3757 /** 3758 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 3759 * @vsi: the VSI being configured 3760 * @v_idx: Index of vector to be freed 3761 * 3762 * This function frees the memory allocated to the q_vector. In addition if 3763 * NAPI is enabled it will delete any references to the NAPI struct prior 3764 * to freeing the q_vector. 3765 **/ 3766 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 3767 { 3768 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3769 struct i40e_ring *ring; 3770 3771 if (!q_vector) 3772 return; 3773 3774 /* disassociate q_vector from rings */ 3775 i40e_for_each_ring(ring, q_vector->tx) 3776 ring->q_vector = NULL; 3777 3778 i40e_for_each_ring(ring, q_vector->rx) 3779 ring->q_vector = NULL; 3780 3781 /* only VSI w/ an associated netdev is set up w/ NAPI */ 3782 if (vsi->netdev) 3783 netif_napi_del(&q_vector->napi); 3784 3785 vsi->q_vectors[v_idx] = NULL; 3786 3787 kfree_rcu(q_vector, rcu); 3788 } 3789 3790 /** 3791 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 3792 * @vsi: the VSI being un-configured 3793 * 3794 * This frees the memory allocated to the q_vectors and 3795 * deletes references to the NAPI struct. 3796 **/ 3797 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 3798 { 3799 int v_idx; 3800 3801 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 3802 i40e_free_q_vector(vsi, v_idx); 3803 } 3804 3805 /** 3806 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 3807 * @pf: board private structure 3808 **/ 3809 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 3810 { 3811 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 3812 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3813 pci_disable_msix(pf->pdev); 3814 kfree(pf->msix_entries); 3815 pf->msix_entries = NULL; 3816 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 3817 pci_disable_msi(pf->pdev); 3818 } 3819 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 3820 } 3821 3822 /** 3823 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 3824 * @pf: board private structure 3825 * 3826 * We go through and clear interrupt specific resources and reset the structure 3827 * to pre-load conditions 3828 **/ 3829 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 3830 { 3831 int i; 3832 3833 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3834 for (i = 0; i < pf->num_alloc_vsi; i++) 3835 if (pf->vsi[i]) 3836 i40e_vsi_free_q_vectors(pf->vsi[i]); 3837 i40e_reset_interrupt_capability(pf); 3838 } 3839 3840 /** 3841 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 3842 * @vsi: the VSI being configured 3843 **/ 3844 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 3845 { 3846 int q_idx; 3847 3848 if (!vsi->netdev) 3849 return; 3850 3851 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3852 napi_enable(&vsi->q_vectors[q_idx]->napi); 3853 } 3854 3855 /** 3856 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 3857 * @vsi: the VSI being configured 3858 **/ 3859 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 3860 { 3861 int q_idx; 3862 3863 if (!vsi->netdev) 3864 return; 3865 3866 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3867 napi_disable(&vsi->q_vectors[q_idx]->napi); 3868 } 3869 3870 /** 3871 * i40e_vsi_close - Shut down a VSI 3872 * @vsi: the vsi to be quelled 3873 **/ 3874 static void i40e_vsi_close(struct i40e_vsi *vsi) 3875 { 3876 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 3877 i40e_down(vsi); 3878 i40e_vsi_free_irq(vsi); 3879 i40e_vsi_free_tx_resources(vsi); 3880 i40e_vsi_free_rx_resources(vsi); 3881 } 3882 3883 /** 3884 * i40e_quiesce_vsi - Pause a given VSI 3885 * @vsi: the VSI being paused 3886 **/ 3887 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 3888 { 3889 if (test_bit(__I40E_DOWN, &vsi->state)) 3890 return; 3891 3892 /* No need to disable FCoE VSI when Tx suspended */ 3893 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && 3894 vsi->type == I40E_VSI_FCOE) { 3895 dev_dbg(&vsi->back->pdev->dev, 3896 "%s: VSI seid %d skipping FCoE VSI disable\n", 3897 __func__, vsi->seid); 3898 return; 3899 } 3900 3901 set_bit(__I40E_NEEDS_RESTART, &vsi->state); 3902 if (vsi->netdev && netif_running(vsi->netdev)) { 3903 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3904 } else { 3905 i40e_vsi_close(vsi); 3906 } 3907 } 3908 3909 /** 3910 * i40e_unquiesce_vsi - Resume a given VSI 3911 * @vsi: the VSI being resumed 3912 **/ 3913 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 3914 { 3915 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) 3916 return; 3917 3918 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 3919 if (vsi->netdev && netif_running(vsi->netdev)) 3920 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3921 else 3922 i40e_vsi_open(vsi); /* this clears the DOWN bit */ 3923 } 3924 3925 /** 3926 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 3927 * @pf: the PF 3928 **/ 3929 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 3930 { 3931 int v; 3932 3933 for (v = 0; v < pf->num_alloc_vsi; v++) { 3934 if (pf->vsi[v]) 3935 i40e_quiesce_vsi(pf->vsi[v]); 3936 } 3937 } 3938 3939 /** 3940 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 3941 * @pf: the PF 3942 **/ 3943 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 3944 { 3945 int v; 3946 3947 for (v = 0; v < pf->num_alloc_vsi; v++) { 3948 if (pf->vsi[v]) 3949 i40e_unquiesce_vsi(pf->vsi[v]); 3950 } 3951 } 3952 3953 #ifdef CONFIG_I40E_DCB 3954 /** 3955 * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled 3956 * @vsi: the VSI being configured 3957 * 3958 * This function waits for the given VSI's Tx queues to be disabled. 3959 **/ 3960 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) 3961 { 3962 struct i40e_pf *pf = vsi->back; 3963 int i, pf_q, ret; 3964 3965 pf_q = vsi->base_queue; 3966 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3967 /* Check and wait for the disable status of the queue */ 3968 ret = i40e_pf_txq_wait(pf, pf_q, false); 3969 if (ret) { 3970 dev_info(&pf->pdev->dev, 3971 "%s: VSI seid %d Tx ring %d disable timeout\n", 3972 __func__, vsi->seid, pf_q); 3973 return ret; 3974 } 3975 } 3976 3977 return 0; 3978 } 3979 3980 /** 3981 * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled 3982 * @pf: the PF 3983 * 3984 * This function waits for the Tx queues to be in disabled state for all the 3985 * VSIs that are managed by this PF. 3986 **/ 3987 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) 3988 { 3989 int v, ret = 0; 3990 3991 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3992 /* No need to wait for FCoE VSI queues */ 3993 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { 3994 ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); 3995 if (ret) 3996 break; 3997 } 3998 } 3999 4000 return ret; 4001 } 4002 4003 #endif 4004 /** 4005 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP 4006 * @pf: pointer to pf 4007 * 4008 * Get TC map for ISCSI PF type that will include iSCSI TC 4009 * and LAN TC. 4010 **/ 4011 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) 4012 { 4013 struct i40e_dcb_app_priority_table app; 4014 struct i40e_hw *hw = &pf->hw; 4015 u8 enabled_tc = 1; /* TC0 is always enabled */ 4016 u8 tc, i; 4017 /* Get the iSCSI APP TLV */ 4018 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4019 4020 for (i = 0; i < dcbcfg->numapps; i++) { 4021 app = dcbcfg->app[i]; 4022 if (app.selector == I40E_APP_SEL_TCPIP && 4023 app.protocolid == I40E_APP_PROTOID_ISCSI) { 4024 tc = dcbcfg->etscfg.prioritytable[app.priority]; 4025 enabled_tc |= (1 << tc); 4026 break; 4027 } 4028 } 4029 4030 return enabled_tc; 4031 } 4032 4033 /** 4034 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 4035 * @dcbcfg: the corresponding DCBx configuration structure 4036 * 4037 * Return the number of TCs from given DCBx configuration 4038 **/ 4039 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 4040 { 4041 u8 num_tc = 0; 4042 int i; 4043 4044 /* Scan the ETS Config Priority Table to find 4045 * traffic class enabled for a given priority 4046 * and use the traffic class index to get the 4047 * number of traffic classes enabled 4048 */ 4049 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4050 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 4051 num_tc = dcbcfg->etscfg.prioritytable[i]; 4052 } 4053 4054 /* Traffic class index starts from zero so 4055 * increment to return the actual count 4056 */ 4057 return num_tc + 1; 4058 } 4059 4060 /** 4061 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 4062 * @dcbcfg: the corresponding DCBx configuration structure 4063 * 4064 * Query the current DCB configuration and return the number of 4065 * traffic classes enabled from the given DCBX config 4066 **/ 4067 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 4068 { 4069 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 4070 u8 enabled_tc = 1; 4071 u8 i; 4072 4073 for (i = 0; i < num_tc; i++) 4074 enabled_tc |= 1 << i; 4075 4076 return enabled_tc; 4077 } 4078 4079 /** 4080 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 4081 * @pf: PF being queried 4082 * 4083 * Return number of traffic classes enabled for the given PF 4084 **/ 4085 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 4086 { 4087 struct i40e_hw *hw = &pf->hw; 4088 u8 i, enabled_tc; 4089 u8 num_tc = 0; 4090 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4091 4092 /* If DCB is not enabled then always in single TC */ 4093 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4094 return 1; 4095 4096 /* SFP mode will be enabled for all TCs on port */ 4097 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4098 return i40e_dcb_get_num_tc(dcbcfg); 4099 4100 /* MFP mode return count of enabled TCs for this PF */ 4101 if (pf->hw.func_caps.iscsi) 4102 enabled_tc = i40e_get_iscsi_tc_map(pf); 4103 else 4104 enabled_tc = pf->hw.func_caps.enabled_tcmap; 4105 4106 /* At least have TC0 */ 4107 enabled_tc = (enabled_tc ? enabled_tc : 0x1); 4108 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4109 if (enabled_tc & (1 << i)) 4110 num_tc++; 4111 } 4112 return num_tc; 4113 } 4114 4115 /** 4116 * i40e_pf_get_default_tc - Get bitmap for first enabled TC 4117 * @pf: PF being queried 4118 * 4119 * Return a bitmap for first enabled traffic class for this PF. 4120 **/ 4121 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) 4122 { 4123 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; 4124 u8 i = 0; 4125 4126 if (!enabled_tc) 4127 return 0x1; /* TC0 */ 4128 4129 /* Find the first enabled TC */ 4130 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4131 if (enabled_tc & (1 << i)) 4132 break; 4133 } 4134 4135 return 1 << i; 4136 } 4137 4138 /** 4139 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 4140 * @pf: PF being queried 4141 * 4142 * Return a bitmap for enabled traffic classes for this PF. 4143 **/ 4144 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 4145 { 4146 /* If DCB is not enabled for this PF then just return default TC */ 4147 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4148 return i40e_pf_get_default_tc(pf); 4149 4150 /* SFP mode we want PF to be enabled for all TCs */ 4151 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4152 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 4153 4154 /* MPF enabled and iSCSI PF type */ 4155 if (pf->hw.func_caps.iscsi) 4156 return i40e_get_iscsi_tc_map(pf); 4157 else 4158 return pf->hw.func_caps.enabled_tcmap; 4159 } 4160 4161 /** 4162 * i40e_vsi_get_bw_info - Query VSI BW Information 4163 * @vsi: the VSI being queried 4164 * 4165 * Returns 0 on success, negative value on failure 4166 **/ 4167 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 4168 { 4169 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 4170 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 4171 struct i40e_pf *pf = vsi->back; 4172 struct i40e_hw *hw = &pf->hw; 4173 i40e_status aq_ret; 4174 u32 tc_bw_max; 4175 int i; 4176 4177 /* Get the VSI level BW configuration */ 4178 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 4179 if (aq_ret) { 4180 dev_info(&pf->pdev->dev, 4181 "couldn't get pf vsi bw config, err %d, aq_err %d\n", 4182 aq_ret, pf->hw.aq.asq_last_status); 4183 return -EINVAL; 4184 } 4185 4186 /* Get the VSI level BW configuration per TC */ 4187 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 4188 NULL); 4189 if (aq_ret) { 4190 dev_info(&pf->pdev->dev, 4191 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", 4192 aq_ret, pf->hw.aq.asq_last_status); 4193 return -EINVAL; 4194 } 4195 4196 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 4197 dev_info(&pf->pdev->dev, 4198 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 4199 bw_config.tc_valid_bits, 4200 bw_ets_config.tc_valid_bits); 4201 /* Still continuing */ 4202 } 4203 4204 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 4205 vsi->bw_max_quanta = bw_config.max_bw; 4206 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 4207 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 4208 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4209 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 4210 vsi->bw_ets_limit_credits[i] = 4211 le16_to_cpu(bw_ets_config.credits[i]); 4212 /* 3 bits out of 4 for each TC */ 4213 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 4214 } 4215 4216 return 0; 4217 } 4218 4219 /** 4220 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 4221 * @vsi: the VSI being configured 4222 * @enabled_tc: TC bitmap 4223 * @bw_credits: BW shared credits per TC 4224 * 4225 * Returns 0 on success, negative value on failure 4226 **/ 4227 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 4228 u8 *bw_share) 4229 { 4230 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 4231 i40e_status aq_ret; 4232 int i; 4233 4234 bw_data.tc_valid_bits = enabled_tc; 4235 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4236 bw_data.tc_bw_credits[i] = bw_share[i]; 4237 4238 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 4239 NULL); 4240 if (aq_ret) { 4241 dev_info(&vsi->back->pdev->dev, 4242 "AQ command Config VSI BW allocation per TC failed = %d\n", 4243 vsi->back->hw.aq.asq_last_status); 4244 return -EINVAL; 4245 } 4246 4247 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4248 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 4249 4250 return 0; 4251 } 4252 4253 /** 4254 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 4255 * @vsi: the VSI being configured 4256 * @enabled_tc: TC map to be enabled 4257 * 4258 **/ 4259 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4260 { 4261 struct net_device *netdev = vsi->netdev; 4262 struct i40e_pf *pf = vsi->back; 4263 struct i40e_hw *hw = &pf->hw; 4264 u8 netdev_tc = 0; 4265 int i; 4266 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4267 4268 if (!netdev) 4269 return; 4270 4271 if (!enabled_tc) { 4272 netdev_reset_tc(netdev); 4273 return; 4274 } 4275 4276 /* Set up actual enabled TCs on the VSI */ 4277 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 4278 return; 4279 4280 /* set per TC queues for the VSI */ 4281 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4282 /* Only set TC queues for enabled tcs 4283 * 4284 * e.g. For a VSI that has TC0 and TC3 enabled the 4285 * enabled_tc bitmap would be 0x00001001; the driver 4286 * will set the numtc for netdev as 2 that will be 4287 * referenced by the netdev layer as TC 0 and 1. 4288 */ 4289 if (vsi->tc_config.enabled_tc & (1 << i)) 4290 netdev_set_tc_queue(netdev, 4291 vsi->tc_config.tc_info[i].netdev_tc, 4292 vsi->tc_config.tc_info[i].qcount, 4293 vsi->tc_config.tc_info[i].qoffset); 4294 } 4295 4296 /* Assign UP2TC map for the VSI */ 4297 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4298 /* Get the actual TC# for the UP */ 4299 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 4300 /* Get the mapped netdev TC# for the UP */ 4301 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 4302 netdev_set_prio_tc_map(netdev, i, netdev_tc); 4303 } 4304 } 4305 4306 /** 4307 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 4308 * @vsi: the VSI being configured 4309 * @ctxt: the ctxt buffer returned from AQ VSI update param command 4310 **/ 4311 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 4312 struct i40e_vsi_context *ctxt) 4313 { 4314 /* copy just the sections touched not the entire info 4315 * since not all sections are valid as returned by 4316 * update vsi params 4317 */ 4318 vsi->info.mapping_flags = ctxt->info.mapping_flags; 4319 memcpy(&vsi->info.queue_mapping, 4320 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 4321 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 4322 sizeof(vsi->info.tc_mapping)); 4323 } 4324 4325 /** 4326 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 4327 * @vsi: VSI to be configured 4328 * @enabled_tc: TC bitmap 4329 * 4330 * This configures a particular VSI for TCs that are mapped to the 4331 * given TC bitmap. It uses default bandwidth share for TCs across 4332 * VSIs to configure TC for a particular VSI. 4333 * 4334 * NOTE: 4335 * It is expected that the VSI queues have been quisced before calling 4336 * this function. 4337 **/ 4338 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4339 { 4340 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 4341 struct i40e_vsi_context ctxt; 4342 int ret = 0; 4343 int i; 4344 4345 /* Check if enabled_tc is same as existing or new TCs */ 4346 if (vsi->tc_config.enabled_tc == enabled_tc) 4347 return ret; 4348 4349 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 4350 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4351 if (enabled_tc & (1 << i)) 4352 bw_share[i] = 1; 4353 } 4354 4355 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 4356 if (ret) { 4357 dev_info(&vsi->back->pdev->dev, 4358 "Failed configuring TC map %d for VSI %d\n", 4359 enabled_tc, vsi->seid); 4360 goto out; 4361 } 4362 4363 /* Update Queue Pairs Mapping for currently enabled UPs */ 4364 ctxt.seid = vsi->seid; 4365 ctxt.pf_num = vsi->back->hw.pf_id; 4366 ctxt.vf_num = 0; 4367 ctxt.uplink_seid = vsi->uplink_seid; 4368 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 4369 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 4370 4371 /* Update the VSI after updating the VSI queue-mapping information */ 4372 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 4373 if (ret) { 4374 dev_info(&vsi->back->pdev->dev, 4375 "update vsi failed, aq_err=%d\n", 4376 vsi->back->hw.aq.asq_last_status); 4377 goto out; 4378 } 4379 /* update the local VSI info with updated queue map */ 4380 i40e_vsi_update_queue_map(vsi, &ctxt); 4381 vsi->info.valid_sections = 0; 4382 4383 /* Update current VSI BW information */ 4384 ret = i40e_vsi_get_bw_info(vsi); 4385 if (ret) { 4386 dev_info(&vsi->back->pdev->dev, 4387 "Failed updating vsi bw info, aq_err=%d\n", 4388 vsi->back->hw.aq.asq_last_status); 4389 goto out; 4390 } 4391 4392 /* Update the netdev TC setup */ 4393 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 4394 out: 4395 return ret; 4396 } 4397 4398 /** 4399 * i40e_veb_config_tc - Configure TCs for given VEB 4400 * @veb: given VEB 4401 * @enabled_tc: TC bitmap 4402 * 4403 * Configures given TC bitmap for VEB (switching) element 4404 **/ 4405 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) 4406 { 4407 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; 4408 struct i40e_pf *pf = veb->pf; 4409 int ret = 0; 4410 int i; 4411 4412 /* No TCs or already enabled TCs just return */ 4413 if (!enabled_tc || veb->enabled_tc == enabled_tc) 4414 return ret; 4415 4416 bw_data.tc_valid_bits = enabled_tc; 4417 /* bw_data.absolute_credits is not set (relative) */ 4418 4419 /* Enable ETS TCs with equal BW Share for now */ 4420 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4421 if (enabled_tc & (1 << i)) 4422 bw_data.tc_bw_share_credits[i] = 1; 4423 } 4424 4425 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, 4426 &bw_data, NULL); 4427 if (ret) { 4428 dev_info(&pf->pdev->dev, 4429 "veb bw config failed, aq_err=%d\n", 4430 pf->hw.aq.asq_last_status); 4431 goto out; 4432 } 4433 4434 /* Update the BW information */ 4435 ret = i40e_veb_get_bw_info(veb); 4436 if (ret) { 4437 dev_info(&pf->pdev->dev, 4438 "Failed getting veb bw config, aq_err=%d\n", 4439 pf->hw.aq.asq_last_status); 4440 } 4441 4442 out: 4443 return ret; 4444 } 4445 4446 #ifdef CONFIG_I40E_DCB 4447 /** 4448 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs 4449 * @pf: PF struct 4450 * 4451 * Reconfigure VEB/VSIs on a given PF; it is assumed that 4452 * the caller would've quiesce all the VSIs before calling 4453 * this function 4454 **/ 4455 static void i40e_dcb_reconfigure(struct i40e_pf *pf) 4456 { 4457 u8 tc_map = 0; 4458 int ret; 4459 u8 v; 4460 4461 /* Enable the TCs available on PF to all VEBs */ 4462 tc_map = i40e_pf_get_tc_map(pf); 4463 for (v = 0; v < I40E_MAX_VEB; v++) { 4464 if (!pf->veb[v]) 4465 continue; 4466 ret = i40e_veb_config_tc(pf->veb[v], tc_map); 4467 if (ret) { 4468 dev_info(&pf->pdev->dev, 4469 "Failed configuring TC for VEB seid=%d\n", 4470 pf->veb[v]->seid); 4471 /* Will try to configure as many components */ 4472 } 4473 } 4474 4475 /* Update each VSI */ 4476 for (v = 0; v < pf->num_alloc_vsi; v++) { 4477 if (!pf->vsi[v]) 4478 continue; 4479 4480 /* - Enable all TCs for the LAN VSI 4481 #ifdef I40E_FCOE 4482 * - For FCoE VSI only enable the TC configured 4483 * as per the APP TLV 4484 #endif 4485 * - For all others keep them at TC0 for now 4486 */ 4487 if (v == pf->lan_vsi) 4488 tc_map = i40e_pf_get_tc_map(pf); 4489 else 4490 tc_map = i40e_pf_get_default_tc(pf); 4491 #ifdef I40E_FCOE 4492 if (pf->vsi[v]->type == I40E_VSI_FCOE) 4493 tc_map = i40e_get_fcoe_tc_map(pf); 4494 #endif /* #ifdef I40E_FCOE */ 4495 4496 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 4497 if (ret) { 4498 dev_info(&pf->pdev->dev, 4499 "Failed configuring TC for VSI seid=%d\n", 4500 pf->vsi[v]->seid); 4501 /* Will try to configure as many components */ 4502 } else { 4503 /* Re-configure VSI vectors based on updated TC map */ 4504 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); 4505 if (pf->vsi[v]->netdev) 4506 i40e_dcbnl_set_all(pf->vsi[v]); 4507 } 4508 } 4509 } 4510 4511 /** 4512 * i40e_resume_port_tx - Resume port Tx 4513 * @pf: PF struct 4514 * 4515 * Resume a port's Tx and issue a PF reset in case of failure to 4516 * resume. 4517 **/ 4518 static int i40e_resume_port_tx(struct i40e_pf *pf) 4519 { 4520 struct i40e_hw *hw = &pf->hw; 4521 int ret; 4522 4523 ret = i40e_aq_resume_port_tx(hw, NULL); 4524 if (ret) { 4525 dev_info(&pf->pdev->dev, 4526 "AQ command Resume Port Tx failed = %d\n", 4527 pf->hw.aq.asq_last_status); 4528 /* Schedule PF reset to recover */ 4529 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 4530 i40e_service_event_schedule(pf); 4531 } 4532 4533 return ret; 4534 } 4535 4536 /** 4537 * i40e_init_pf_dcb - Initialize DCB configuration 4538 * @pf: PF being configured 4539 * 4540 * Query the current DCB configuration and cache it 4541 * in the hardware structure 4542 **/ 4543 static int i40e_init_pf_dcb(struct i40e_pf *pf) 4544 { 4545 struct i40e_hw *hw = &pf->hw; 4546 int err = 0; 4547 4548 /* Get the initial DCB configuration */ 4549 err = i40e_init_dcb(hw); 4550 if (!err) { 4551 /* Device/Function is not DCBX capable */ 4552 if ((!hw->func_caps.dcb) || 4553 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { 4554 dev_info(&pf->pdev->dev, 4555 "DCBX offload is not supported or is disabled for this PF.\n"); 4556 4557 if (pf->flags & I40E_FLAG_MFP_ENABLED) 4558 goto out; 4559 4560 } else { 4561 /* When status is not DISABLED then DCBX in FW */ 4562 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 4563 DCB_CAP_DCBX_VER_IEEE; 4564 4565 pf->flags |= I40E_FLAG_DCB_CAPABLE; 4566 /* Enable DCB tagging only when more than one TC */ 4567 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 4568 pf->flags |= I40E_FLAG_DCB_ENABLED; 4569 dev_dbg(&pf->pdev->dev, 4570 "DCBX offload is supported for this PF.\n"); 4571 } 4572 } else { 4573 dev_info(&pf->pdev->dev, 4574 "AQ Querying DCB configuration failed: aq_err %d\n", 4575 pf->hw.aq.asq_last_status); 4576 } 4577 4578 out: 4579 return err; 4580 } 4581 #endif /* CONFIG_I40E_DCB */ 4582 #define SPEED_SIZE 14 4583 #define FC_SIZE 8 4584 /** 4585 * i40e_print_link_message - print link up or down 4586 * @vsi: the VSI for which link needs a message 4587 */ 4588 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) 4589 { 4590 char speed[SPEED_SIZE] = "Unknown"; 4591 char fc[FC_SIZE] = "RX/TX"; 4592 4593 if (!isup) { 4594 netdev_info(vsi->netdev, "NIC Link is Down\n"); 4595 return; 4596 } 4597 4598 /* Warn user if link speed on NPAR enabled partition is not at 4599 * least 10GB 4600 */ 4601 if (vsi->back->hw.func_caps.npar_enable && 4602 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || 4603 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) 4604 netdev_warn(vsi->netdev, 4605 "The partition detected link speed that is less than 10Gbps\n"); 4606 4607 switch (vsi->back->hw.phy.link_info.link_speed) { 4608 case I40E_LINK_SPEED_40GB: 4609 strlcpy(speed, "40 Gbps", SPEED_SIZE); 4610 break; 4611 case I40E_LINK_SPEED_10GB: 4612 strlcpy(speed, "10 Gbps", SPEED_SIZE); 4613 break; 4614 case I40E_LINK_SPEED_1GB: 4615 strlcpy(speed, "1000 Mbps", SPEED_SIZE); 4616 break; 4617 case I40E_LINK_SPEED_100MB: 4618 strncpy(speed, "100 Mbps", SPEED_SIZE); 4619 break; 4620 default: 4621 break; 4622 } 4623 4624 switch (vsi->back->hw.fc.current_mode) { 4625 case I40E_FC_FULL: 4626 strlcpy(fc, "RX/TX", FC_SIZE); 4627 break; 4628 case I40E_FC_TX_PAUSE: 4629 strlcpy(fc, "TX", FC_SIZE); 4630 break; 4631 case I40E_FC_RX_PAUSE: 4632 strlcpy(fc, "RX", FC_SIZE); 4633 break; 4634 default: 4635 strlcpy(fc, "None", FC_SIZE); 4636 break; 4637 } 4638 4639 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n", 4640 speed, fc); 4641 } 4642 4643 /** 4644 * i40e_up_complete - Finish the last steps of bringing up a connection 4645 * @vsi: the VSI being configured 4646 **/ 4647 static int i40e_up_complete(struct i40e_vsi *vsi) 4648 { 4649 struct i40e_pf *pf = vsi->back; 4650 int err; 4651 4652 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4653 i40e_vsi_configure_msix(vsi); 4654 else 4655 i40e_configure_msi_and_legacy(vsi); 4656 4657 /* start rings */ 4658 err = i40e_vsi_control_rings(vsi, true); 4659 if (err) 4660 return err; 4661 4662 clear_bit(__I40E_DOWN, &vsi->state); 4663 i40e_napi_enable_all(vsi); 4664 i40e_vsi_enable_irq(vsi); 4665 4666 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 4667 (vsi->netdev)) { 4668 i40e_print_link_message(vsi, true); 4669 netif_tx_start_all_queues(vsi->netdev); 4670 netif_carrier_on(vsi->netdev); 4671 } else if (vsi->netdev) { 4672 i40e_print_link_message(vsi, false); 4673 /* need to check for qualified module here*/ 4674 if ((pf->hw.phy.link_info.link_info & 4675 I40E_AQ_MEDIA_AVAILABLE) && 4676 (!(pf->hw.phy.link_info.an_info & 4677 I40E_AQ_QUALIFIED_MODULE))) 4678 netdev_err(vsi->netdev, 4679 "the driver failed to link because an unqualified module was detected."); 4680 } 4681 4682 /* replay FDIR SB filters */ 4683 if (vsi->type == I40E_VSI_FDIR) { 4684 /* reset fd counters */ 4685 pf->fd_add_err = pf->fd_atr_cnt = 0; 4686 if (pf->fd_tcp_rule > 0) { 4687 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 4688 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); 4689 pf->fd_tcp_rule = 0; 4690 } 4691 i40e_fdir_filter_restore(vsi); 4692 } 4693 i40e_service_event_schedule(pf); 4694 4695 return 0; 4696 } 4697 4698 /** 4699 * i40e_vsi_reinit_locked - Reset the VSI 4700 * @vsi: the VSI being configured 4701 * 4702 * Rebuild the ring structs after some configuration 4703 * has changed, e.g. MTU size. 4704 **/ 4705 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 4706 { 4707 struct i40e_pf *pf = vsi->back; 4708 4709 WARN_ON(in_interrupt()); 4710 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 4711 usleep_range(1000, 2000); 4712 i40e_down(vsi); 4713 4714 /* Give a VF some time to respond to the reset. The 4715 * two second wait is based upon the watchdog cycle in 4716 * the VF driver. 4717 */ 4718 if (vsi->type == I40E_VSI_SRIOV) 4719 msleep(2000); 4720 i40e_up(vsi); 4721 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 4722 } 4723 4724 /** 4725 * i40e_up - Bring the connection back up after being down 4726 * @vsi: the VSI being configured 4727 **/ 4728 int i40e_up(struct i40e_vsi *vsi) 4729 { 4730 int err; 4731 4732 err = i40e_vsi_configure(vsi); 4733 if (!err) 4734 err = i40e_up_complete(vsi); 4735 4736 return err; 4737 } 4738 4739 /** 4740 * i40e_down - Shutdown the connection processing 4741 * @vsi: the VSI being stopped 4742 **/ 4743 void i40e_down(struct i40e_vsi *vsi) 4744 { 4745 int i; 4746 4747 /* It is assumed that the caller of this function 4748 * sets the vsi->state __I40E_DOWN bit. 4749 */ 4750 if (vsi->netdev) { 4751 netif_carrier_off(vsi->netdev); 4752 netif_tx_disable(vsi->netdev); 4753 } 4754 i40e_vsi_disable_irq(vsi); 4755 i40e_vsi_control_rings(vsi, false); 4756 i40e_napi_disable_all(vsi); 4757 4758 for (i = 0; i < vsi->num_queue_pairs; i++) { 4759 i40e_clean_tx_ring(vsi->tx_rings[i]); 4760 i40e_clean_rx_ring(vsi->rx_rings[i]); 4761 } 4762 } 4763 4764 /** 4765 * i40e_setup_tc - configure multiple traffic classes 4766 * @netdev: net device to configure 4767 * @tc: number of traffic classes to enable 4768 **/ 4769 #ifdef I40E_FCOE 4770 int i40e_setup_tc(struct net_device *netdev, u8 tc) 4771 #else 4772 static int i40e_setup_tc(struct net_device *netdev, u8 tc) 4773 #endif 4774 { 4775 struct i40e_netdev_priv *np = netdev_priv(netdev); 4776 struct i40e_vsi *vsi = np->vsi; 4777 struct i40e_pf *pf = vsi->back; 4778 u8 enabled_tc = 0; 4779 int ret = -EINVAL; 4780 int i; 4781 4782 /* Check if DCB enabled to continue */ 4783 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 4784 netdev_info(netdev, "DCB is not enabled for adapter\n"); 4785 goto exit; 4786 } 4787 4788 /* Check if MFP enabled */ 4789 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4790 netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); 4791 goto exit; 4792 } 4793 4794 /* Check whether tc count is within enabled limit */ 4795 if (tc > i40e_pf_get_num_tc(pf)) { 4796 netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); 4797 goto exit; 4798 } 4799 4800 /* Generate TC map for number of tc requested */ 4801 for (i = 0; i < tc; i++) 4802 enabled_tc |= (1 << i); 4803 4804 /* Requesting same TC configuration as already enabled */ 4805 if (enabled_tc == vsi->tc_config.enabled_tc) 4806 return 0; 4807 4808 /* Quiesce VSI queues */ 4809 i40e_quiesce_vsi(vsi); 4810 4811 /* Configure VSI for enabled TCs */ 4812 ret = i40e_vsi_config_tc(vsi, enabled_tc); 4813 if (ret) { 4814 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 4815 vsi->seid); 4816 goto exit; 4817 } 4818 4819 /* Unquiesce VSI */ 4820 i40e_unquiesce_vsi(vsi); 4821 4822 exit: 4823 return ret; 4824 } 4825 4826 /** 4827 * i40e_open - Called when a network interface is made active 4828 * @netdev: network interface device structure 4829 * 4830 * The open entry point is called when a network interface is made 4831 * active by the system (IFF_UP). At this point all resources needed 4832 * for transmit and receive operations are allocated, the interrupt 4833 * handler is registered with the OS, the netdev watchdog subtask is 4834 * enabled, and the stack is notified that the interface is ready. 4835 * 4836 * Returns 0 on success, negative value on failure 4837 **/ 4838 #ifdef I40E_FCOE 4839 int i40e_open(struct net_device *netdev) 4840 #else 4841 static int i40e_open(struct net_device *netdev) 4842 #endif 4843 { 4844 struct i40e_netdev_priv *np = netdev_priv(netdev); 4845 struct i40e_vsi *vsi = np->vsi; 4846 struct i40e_pf *pf = vsi->back; 4847 int err; 4848 4849 /* disallow open during test or if eeprom is broken */ 4850 if (test_bit(__I40E_TESTING, &pf->state) || 4851 test_bit(__I40E_BAD_EEPROM, &pf->state)) 4852 return -EBUSY; 4853 4854 netif_carrier_off(netdev); 4855 4856 err = i40e_vsi_open(vsi); 4857 if (err) 4858 return err; 4859 4860 /* configure global TSO hardware offload settings */ 4861 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | 4862 TCP_FLAG_FIN) >> 16); 4863 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | 4864 TCP_FLAG_FIN | 4865 TCP_FLAG_CWR) >> 16); 4866 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 4867 4868 #ifdef CONFIG_I40E_VXLAN 4869 vxlan_get_rx_port(netdev); 4870 #endif 4871 4872 return 0; 4873 } 4874 4875 /** 4876 * i40e_vsi_open - 4877 * @vsi: the VSI to open 4878 * 4879 * Finish initialization of the VSI. 4880 * 4881 * Returns 0 on success, negative value on failure 4882 **/ 4883 int i40e_vsi_open(struct i40e_vsi *vsi) 4884 { 4885 struct i40e_pf *pf = vsi->back; 4886 char int_name[I40E_INT_NAME_STR_LEN]; 4887 int err; 4888 4889 /* allocate descriptors */ 4890 err = i40e_vsi_setup_tx_resources(vsi); 4891 if (err) 4892 goto err_setup_tx; 4893 err = i40e_vsi_setup_rx_resources(vsi); 4894 if (err) 4895 goto err_setup_rx; 4896 4897 err = i40e_vsi_configure(vsi); 4898 if (err) 4899 goto err_setup_rx; 4900 4901 if (vsi->netdev) { 4902 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 4903 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 4904 err = i40e_vsi_request_irq(vsi, int_name); 4905 if (err) 4906 goto err_setup_rx; 4907 4908 /* Notify the stack of the actual queue counts. */ 4909 err = netif_set_real_num_tx_queues(vsi->netdev, 4910 vsi->num_queue_pairs); 4911 if (err) 4912 goto err_set_queues; 4913 4914 err = netif_set_real_num_rx_queues(vsi->netdev, 4915 vsi->num_queue_pairs); 4916 if (err) 4917 goto err_set_queues; 4918 4919 } else if (vsi->type == I40E_VSI_FDIR) { 4920 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", 4921 dev_driver_string(&pf->pdev->dev), 4922 dev_name(&pf->pdev->dev)); 4923 err = i40e_vsi_request_irq(vsi, int_name); 4924 4925 } else { 4926 err = -EINVAL; 4927 goto err_setup_rx; 4928 } 4929 4930 err = i40e_up_complete(vsi); 4931 if (err) 4932 goto err_up_complete; 4933 4934 return 0; 4935 4936 err_up_complete: 4937 i40e_down(vsi); 4938 err_set_queues: 4939 i40e_vsi_free_irq(vsi); 4940 err_setup_rx: 4941 i40e_vsi_free_rx_resources(vsi); 4942 err_setup_tx: 4943 i40e_vsi_free_tx_resources(vsi); 4944 if (vsi == pf->vsi[pf->lan_vsi]) 4945 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 4946 4947 return err; 4948 } 4949 4950 /** 4951 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting 4952 * @pf: Pointer to pf 4953 * 4954 * This function destroys the hlist where all the Flow Director 4955 * filters were saved. 4956 **/ 4957 static void i40e_fdir_filter_exit(struct i40e_pf *pf) 4958 { 4959 struct i40e_fdir_filter *filter; 4960 struct hlist_node *node2; 4961 4962 hlist_for_each_entry_safe(filter, node2, 4963 &pf->fdir_filter_list, fdir_node) { 4964 hlist_del(&filter->fdir_node); 4965 kfree(filter); 4966 } 4967 pf->fdir_pf_active_filters = 0; 4968 } 4969 4970 /** 4971 * i40e_close - Disables a network interface 4972 * @netdev: network interface device structure 4973 * 4974 * The close entry point is called when an interface is de-activated 4975 * by the OS. The hardware is still under the driver's control, but 4976 * this netdev interface is disabled. 4977 * 4978 * Returns 0, this is not allowed to fail 4979 **/ 4980 #ifdef I40E_FCOE 4981 int i40e_close(struct net_device *netdev) 4982 #else 4983 static int i40e_close(struct net_device *netdev) 4984 #endif 4985 { 4986 struct i40e_netdev_priv *np = netdev_priv(netdev); 4987 struct i40e_vsi *vsi = np->vsi; 4988 4989 i40e_vsi_close(vsi); 4990 4991 return 0; 4992 } 4993 4994 /** 4995 * i40e_do_reset - Start a PF or Core Reset sequence 4996 * @pf: board private structure 4997 * @reset_flags: which reset is requested 4998 * 4999 * The essential difference in resets is that the PF Reset 5000 * doesn't clear the packet buffers, doesn't reset the PE 5001 * firmware, and doesn't bother the other PFs on the chip. 5002 **/ 5003 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) 5004 { 5005 u32 val; 5006 5007 WARN_ON(in_interrupt()); 5008 5009 if (i40e_check_asq_alive(&pf->hw)) 5010 i40e_vc_notify_reset(pf); 5011 5012 /* do the biggest reset indicated */ 5013 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { 5014 5015 /* Request a Global Reset 5016 * 5017 * This will start the chip's countdown to the actual full 5018 * chip reset event, and a warning interrupt to be sent 5019 * to all PFs, including the requestor. Our handler 5020 * for the warning interrupt will deal with the shutdown 5021 * and recovery of the switch setup. 5022 */ 5023 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); 5024 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5025 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 5026 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5027 5028 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) { 5029 5030 /* Request a Core Reset 5031 * 5032 * Same as Global Reset, except does *not* include the MAC/PHY 5033 */ 5034 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); 5035 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5036 val |= I40E_GLGEN_RTRIG_CORER_MASK; 5037 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5038 i40e_flush(&pf->hw); 5039 5040 } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) { 5041 5042 /* Request a Firmware Reset 5043 * 5044 * Same as Global reset, plus restarting the 5045 * embedded firmware engine. 5046 */ 5047 /* enable EMP Reset */ 5048 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP); 5049 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK; 5050 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val); 5051 5052 /* force the reset */ 5053 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5054 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK; 5055 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5056 i40e_flush(&pf->hw); 5057 5058 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { 5059 5060 /* Request a PF Reset 5061 * 5062 * Resets only the PF-specific registers 5063 * 5064 * This goes directly to the tear-down and rebuild of 5065 * the switch, since we need to do all the recovery as 5066 * for the Core Reset. 5067 */ 5068 dev_dbg(&pf->pdev->dev, "PFR requested\n"); 5069 i40e_handle_reset_warning(pf); 5070 5071 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { 5072 int v; 5073 5074 /* Find the VSI(s) that requested a re-init */ 5075 dev_info(&pf->pdev->dev, 5076 "VSI reinit requested\n"); 5077 for (v = 0; v < pf->num_alloc_vsi; v++) { 5078 struct i40e_vsi *vsi = pf->vsi[v]; 5079 if (vsi != NULL && 5080 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 5081 i40e_vsi_reinit_locked(pf->vsi[v]); 5082 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); 5083 } 5084 } 5085 5086 /* no further action needed, so return now */ 5087 return; 5088 } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) { 5089 int v; 5090 5091 /* Find the VSI(s) that needs to be brought down */ 5092 dev_info(&pf->pdev->dev, "VSI down requested\n"); 5093 for (v = 0; v < pf->num_alloc_vsi; v++) { 5094 struct i40e_vsi *vsi = pf->vsi[v]; 5095 if (vsi != NULL && 5096 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { 5097 set_bit(__I40E_DOWN, &vsi->state); 5098 i40e_down(vsi); 5099 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); 5100 } 5101 } 5102 5103 /* no further action needed, so return now */ 5104 return; 5105 } else { 5106 dev_info(&pf->pdev->dev, 5107 "bad reset request 0x%08x\n", reset_flags); 5108 return; 5109 } 5110 } 5111 5112 #ifdef CONFIG_I40E_DCB 5113 /** 5114 * i40e_dcb_need_reconfig - Check if DCB needs reconfig 5115 * @pf: board private structure 5116 * @old_cfg: current DCB config 5117 * @new_cfg: new DCB config 5118 **/ 5119 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, 5120 struct i40e_dcbx_config *old_cfg, 5121 struct i40e_dcbx_config *new_cfg) 5122 { 5123 bool need_reconfig = false; 5124 5125 /* Check if ETS configuration has changed */ 5126 if (memcmp(&new_cfg->etscfg, 5127 &old_cfg->etscfg, 5128 sizeof(new_cfg->etscfg))) { 5129 /* If Priority Table has changed reconfig is needed */ 5130 if (memcmp(&new_cfg->etscfg.prioritytable, 5131 &old_cfg->etscfg.prioritytable, 5132 sizeof(new_cfg->etscfg.prioritytable))) { 5133 need_reconfig = true; 5134 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); 5135 } 5136 5137 if (memcmp(&new_cfg->etscfg.tcbwtable, 5138 &old_cfg->etscfg.tcbwtable, 5139 sizeof(new_cfg->etscfg.tcbwtable))) 5140 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 5141 5142 if (memcmp(&new_cfg->etscfg.tsatable, 5143 &old_cfg->etscfg.tsatable, 5144 sizeof(new_cfg->etscfg.tsatable))) 5145 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); 5146 } 5147 5148 /* Check if PFC configuration has changed */ 5149 if (memcmp(&new_cfg->pfc, 5150 &old_cfg->pfc, 5151 sizeof(new_cfg->pfc))) { 5152 need_reconfig = true; 5153 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); 5154 } 5155 5156 /* Check if APP Table has changed */ 5157 if (memcmp(&new_cfg->app, 5158 &old_cfg->app, 5159 sizeof(new_cfg->app))) { 5160 need_reconfig = true; 5161 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); 5162 } 5163 5164 dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__, 5165 need_reconfig); 5166 return need_reconfig; 5167 } 5168 5169 /** 5170 * i40e_handle_lldp_event - Handle LLDP Change MIB event 5171 * @pf: board private structure 5172 * @e: event info posted on ARQ 5173 **/ 5174 static int i40e_handle_lldp_event(struct i40e_pf *pf, 5175 struct i40e_arq_event_info *e) 5176 { 5177 struct i40e_aqc_lldp_get_mib *mib = 5178 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; 5179 struct i40e_hw *hw = &pf->hw; 5180 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; 5181 struct i40e_dcbx_config tmp_dcbx_cfg; 5182 bool need_reconfig = false; 5183 int ret = 0; 5184 u8 type; 5185 5186 /* Not DCB capable or capability disabled */ 5187 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) 5188 return ret; 5189 5190 /* Ignore if event is not for Nearest Bridge */ 5191 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 5192 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 5193 dev_dbg(&pf->pdev->dev, 5194 "%s: LLDP event mib bridge type 0x%x\n", __func__, type); 5195 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) 5196 return ret; 5197 5198 /* Check MIB Type and return if event for Remote MIB update */ 5199 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; 5200 dev_dbg(&pf->pdev->dev, 5201 "%s: LLDP event mib type %s\n", __func__, 5202 type ? "remote" : "local"); 5203 if (type == I40E_AQ_LLDP_MIB_REMOTE) { 5204 /* Update the remote cached instance and return */ 5205 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, 5206 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 5207 &hw->remote_dcbx_config); 5208 goto exit; 5209 } 5210 5211 memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); 5212 /* Store the old configuration */ 5213 tmp_dcbx_cfg = *dcbx_cfg; 5214 5215 /* Get updated DCBX data from firmware */ 5216 ret = i40e_get_dcb_config(&pf->hw); 5217 if (ret) { 5218 dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n"); 5219 goto exit; 5220 } 5221 5222 /* No change detected in DCBX configs */ 5223 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { 5224 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 5225 goto exit; 5226 } 5227 5228 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg); 5229 5230 i40e_dcbnl_flush_apps(pf, dcbx_cfg); 5231 5232 if (!need_reconfig) 5233 goto exit; 5234 5235 /* Enable DCB tagging only when more than one TC */ 5236 if (i40e_dcb_get_num_tc(dcbx_cfg) > 1) 5237 pf->flags |= I40E_FLAG_DCB_ENABLED; 5238 else 5239 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 5240 5241 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5242 /* Reconfiguration needed quiesce all VSIs */ 5243 i40e_pf_quiesce_all_vsi(pf); 5244 5245 /* Changes in configuration update VEB/VSI */ 5246 i40e_dcb_reconfigure(pf); 5247 5248 ret = i40e_resume_port_tx(pf); 5249 5250 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5251 /* In case of error no point in resuming VSIs */ 5252 if (ret) 5253 goto exit; 5254 5255 /* Wait for the PF's Tx queues to be disabled */ 5256 ret = i40e_pf_wait_txq_disabled(pf); 5257 if (!ret) 5258 i40e_pf_unquiesce_all_vsi(pf); 5259 exit: 5260 return ret; 5261 } 5262 #endif /* CONFIG_I40E_DCB */ 5263 5264 /** 5265 * i40e_do_reset_safe - Protected reset path for userland calls. 5266 * @pf: board private structure 5267 * @reset_flags: which reset is requested 5268 * 5269 **/ 5270 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) 5271 { 5272 rtnl_lock(); 5273 i40e_do_reset(pf, reset_flags); 5274 rtnl_unlock(); 5275 } 5276 5277 /** 5278 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 5279 * @pf: board private structure 5280 * @e: event info posted on ARQ 5281 * 5282 * Handler for LAN Queue Overflow Event generated by the firmware for PF 5283 * and VF queues 5284 **/ 5285 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 5286 struct i40e_arq_event_info *e) 5287 { 5288 struct i40e_aqc_lan_overflow *data = 5289 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 5290 u32 queue = le32_to_cpu(data->prtdcb_rupto); 5291 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 5292 struct i40e_hw *hw = &pf->hw; 5293 struct i40e_vf *vf; 5294 u16 vf_id; 5295 5296 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", 5297 queue, qtx_ctl); 5298 5299 /* Queue belongs to VF, find the VF and issue VF reset */ 5300 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 5301 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 5302 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 5303 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 5304 vf_id -= hw->func_caps.vf_base_id; 5305 vf = &pf->vf[vf_id]; 5306 i40e_vc_notify_vf_reset(vf); 5307 /* Allow VF to process pending reset notification */ 5308 msleep(20); 5309 i40e_reset_vf(vf, false); 5310 } 5311 } 5312 5313 /** 5314 * i40e_service_event_complete - Finish up the service event 5315 * @pf: board private structure 5316 **/ 5317 static void i40e_service_event_complete(struct i40e_pf *pf) 5318 { 5319 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 5320 5321 /* flush memory to make sure state is correct before next watchog */ 5322 smp_mb__before_atomic(); 5323 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 5324 } 5325 5326 /** 5327 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters 5328 * @pf: board private structure 5329 **/ 5330 int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) 5331 { 5332 int val, fcnt_prog; 5333 5334 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5335 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); 5336 return fcnt_prog; 5337 } 5338 5339 /** 5340 * i40e_get_current_fd_count - Get the count of total FD filters programmed 5341 * @pf: board private structure 5342 **/ 5343 int i40e_get_current_fd_count(struct i40e_pf *pf) 5344 { 5345 int val, fcnt_prog; 5346 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5347 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + 5348 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> 5349 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 5350 return fcnt_prog; 5351 } 5352 5353 /** 5354 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 5355 * @pf: board private structure 5356 **/ 5357 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) 5358 { 5359 u32 fcnt_prog, fcnt_avail; 5360 5361 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 5362 return; 5363 5364 /* Check if, FD SB or ATR was auto disabled and if there is enough room 5365 * to re-enable 5366 */ 5367 fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); 5368 fcnt_avail = pf->fdir_pf_filter_count; 5369 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || 5370 (pf->fd_add_err == 0) || 5371 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { 5372 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 5373 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 5374 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 5375 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 5376 } 5377 } 5378 /* Wait for some more space to be available to turn on ATR */ 5379 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { 5380 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 5381 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 5382 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5383 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); 5384 } 5385 } 5386 } 5387 5388 #define I40E_MIN_FD_FLUSH_INTERVAL 10 5389 /** 5390 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB 5391 * @pf: board private structure 5392 **/ 5393 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) 5394 { 5395 int flush_wait_retry = 50; 5396 int reg; 5397 5398 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5399 return; 5400 5401 if (time_after(jiffies, pf->fd_flush_timestamp + 5402 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { 5403 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 5404 pf->fd_flush_timestamp = jiffies; 5405 pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; 5406 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5407 /* flush all filters */ 5408 wr32(&pf->hw, I40E_PFQF_CTL_1, 5409 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); 5410 i40e_flush(&pf->hw); 5411 pf->fd_flush_cnt++; 5412 pf->fd_add_err = 0; 5413 do { 5414 /* Check FD flush status every 5-6msec */ 5415 usleep_range(5000, 6000); 5416 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); 5417 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) 5418 break; 5419 } while (flush_wait_retry--); 5420 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { 5421 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); 5422 } else { 5423 /* replay sideband filters */ 5424 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); 5425 5426 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 5427 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5428 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 5429 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 5430 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 5431 } 5432 } 5433 } 5434 5435 /** 5436 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed 5437 * @pf: board private structure 5438 **/ 5439 int i40e_get_current_atr_cnt(struct i40e_pf *pf) 5440 { 5441 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; 5442 } 5443 5444 /* We can see up to 256 filter programming desc in transit if the filters are 5445 * being applied really fast; before we see the first 5446 * filter miss error on Rx queue 0. Accumulating enough error messages before 5447 * reacting will make sure we don't cause flush too often. 5448 */ 5449 #define I40E_MAX_FD_PROGRAM_ERROR 256 5450 5451 /** 5452 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 5453 * @pf: board private structure 5454 **/ 5455 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 5456 { 5457 5458 /* if interface is down do nothing */ 5459 if (test_bit(__I40E_DOWN, &pf->state)) 5460 return; 5461 5462 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5463 return; 5464 5465 if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) && 5466 (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) && 5467 (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count)) 5468 i40e_fdir_flush_and_replay(pf); 5469 5470 i40e_fdir_check_and_reenable(pf); 5471 5472 } 5473 5474 /** 5475 * i40e_vsi_link_event - notify VSI of a link event 5476 * @vsi: vsi to be notified 5477 * @link_up: link up or down 5478 **/ 5479 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 5480 { 5481 if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) 5482 return; 5483 5484 switch (vsi->type) { 5485 case I40E_VSI_MAIN: 5486 #ifdef I40E_FCOE 5487 case I40E_VSI_FCOE: 5488 #endif 5489 if (!vsi->netdev || !vsi->netdev_registered) 5490 break; 5491 5492 if (link_up) { 5493 netif_carrier_on(vsi->netdev); 5494 netif_tx_wake_all_queues(vsi->netdev); 5495 } else { 5496 netif_carrier_off(vsi->netdev); 5497 netif_tx_stop_all_queues(vsi->netdev); 5498 } 5499 break; 5500 5501 case I40E_VSI_SRIOV: 5502 case I40E_VSI_VMDQ2: 5503 case I40E_VSI_CTRL: 5504 case I40E_VSI_MIRROR: 5505 default: 5506 /* there is no notification for other VSIs */ 5507 break; 5508 } 5509 } 5510 5511 /** 5512 * i40e_veb_link_event - notify elements on the veb of a link event 5513 * @veb: veb to be notified 5514 * @link_up: link up or down 5515 **/ 5516 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 5517 { 5518 struct i40e_pf *pf; 5519 int i; 5520 5521 if (!veb || !veb->pf) 5522 return; 5523 pf = veb->pf; 5524 5525 /* depth first... */ 5526 for (i = 0; i < I40E_MAX_VEB; i++) 5527 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 5528 i40e_veb_link_event(pf->veb[i], link_up); 5529 5530 /* ... now the local VSIs */ 5531 for (i = 0; i < pf->num_alloc_vsi; i++) 5532 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 5533 i40e_vsi_link_event(pf->vsi[i], link_up); 5534 } 5535 5536 /** 5537 * i40e_link_event - Update netif_carrier status 5538 * @pf: board private structure 5539 **/ 5540 static void i40e_link_event(struct i40e_pf *pf) 5541 { 5542 bool new_link, old_link; 5543 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 5544 u8 new_link_speed, old_link_speed; 5545 5546 /* set this to force the get_link_status call to refresh state */ 5547 pf->hw.phy.get_link_info = true; 5548 5549 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 5550 new_link = i40e_get_link_status(&pf->hw); 5551 old_link_speed = pf->hw.phy.link_info_old.link_speed; 5552 new_link_speed = pf->hw.phy.link_info.link_speed; 5553 5554 if (new_link == old_link && 5555 new_link_speed == old_link_speed && 5556 (test_bit(__I40E_DOWN, &vsi->state) || 5557 new_link == netif_carrier_ok(vsi->netdev))) 5558 return; 5559 5560 if (!test_bit(__I40E_DOWN, &vsi->state)) 5561 i40e_print_link_message(vsi, new_link); 5562 5563 /* Notify the base of the switch tree connected to 5564 * the link. Floating VEBs are not notified. 5565 */ 5566 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 5567 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 5568 else 5569 i40e_vsi_link_event(vsi, new_link); 5570 5571 if (pf->vf) 5572 i40e_vc_notify_link_state(pf); 5573 5574 if (pf->flags & I40E_FLAG_PTP) 5575 i40e_ptp_set_increment(pf); 5576 } 5577 5578 /** 5579 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts 5580 * @pf: board private structure 5581 * 5582 * Set the per-queue flags to request a check for stuck queues in the irq 5583 * clean functions, then force interrupts to be sure the irq clean is called. 5584 **/ 5585 static void i40e_check_hang_subtask(struct i40e_pf *pf) 5586 { 5587 int i, v; 5588 5589 /* If we're down or resetting, just bail */ 5590 if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5591 return; 5592 5593 /* for each VSI/netdev 5594 * for each Tx queue 5595 * set the check flag 5596 * for each q_vector 5597 * force an interrupt 5598 */ 5599 for (v = 0; v < pf->num_alloc_vsi; v++) { 5600 struct i40e_vsi *vsi = pf->vsi[v]; 5601 int armed = 0; 5602 5603 if (!pf->vsi[v] || 5604 test_bit(__I40E_DOWN, &vsi->state) || 5605 (vsi->netdev && !netif_carrier_ok(vsi->netdev))) 5606 continue; 5607 5608 for (i = 0; i < vsi->num_queue_pairs; i++) { 5609 set_check_for_tx_hang(vsi->tx_rings[i]); 5610 if (test_bit(__I40E_HANG_CHECK_ARMED, 5611 &vsi->tx_rings[i]->state)) 5612 armed++; 5613 } 5614 5615 if (armed) { 5616 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 5617 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, 5618 (I40E_PFINT_DYN_CTL0_INTENA_MASK | 5619 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | 5620 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | 5621 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | 5622 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); 5623 } else { 5624 u16 vec = vsi->base_vector - 1; 5625 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | 5626 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | 5627 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | 5628 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | 5629 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK); 5630 for (i = 0; i < vsi->num_q_vectors; i++, vec++) 5631 wr32(&vsi->back->hw, 5632 I40E_PFINT_DYN_CTLN(vec), val); 5633 } 5634 i40e_flush(&vsi->back->hw); 5635 } 5636 } 5637 } 5638 5639 /** 5640 * i40e_watchdog_subtask - periodic checks not using event driven response 5641 * @pf: board private structure 5642 **/ 5643 static void i40e_watchdog_subtask(struct i40e_pf *pf) 5644 { 5645 int i; 5646 5647 /* if interface is down do nothing */ 5648 if (test_bit(__I40E_DOWN, &pf->state) || 5649 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5650 return; 5651 5652 /* make sure we don't do these things too often */ 5653 if (time_before(jiffies, (pf->service_timer_previous + 5654 pf->service_timer_period))) 5655 return; 5656 pf->service_timer_previous = jiffies; 5657 5658 i40e_check_hang_subtask(pf); 5659 i40e_link_event(pf); 5660 5661 /* Update the stats for active netdevs so the network stack 5662 * can look at updated numbers whenever it cares to 5663 */ 5664 for (i = 0; i < pf->num_alloc_vsi; i++) 5665 if (pf->vsi[i] && pf->vsi[i]->netdev) 5666 i40e_update_stats(pf->vsi[i]); 5667 5668 /* Update the stats for the active switching components */ 5669 for (i = 0; i < I40E_MAX_VEB; i++) 5670 if (pf->veb[i]) 5671 i40e_update_veb_stats(pf->veb[i]); 5672 5673 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); 5674 } 5675 5676 /** 5677 * i40e_reset_subtask - Set up for resetting the device and driver 5678 * @pf: board private structure 5679 **/ 5680 static void i40e_reset_subtask(struct i40e_pf *pf) 5681 { 5682 u32 reset_flags = 0; 5683 5684 rtnl_lock(); 5685 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { 5686 reset_flags |= (1 << __I40E_REINIT_REQUESTED); 5687 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); 5688 } 5689 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { 5690 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED); 5691 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5692 } 5693 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { 5694 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED); 5695 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 5696 } 5697 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { 5698 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED); 5699 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 5700 } 5701 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { 5702 reset_flags |= (1 << __I40E_DOWN_REQUESTED); 5703 clear_bit(__I40E_DOWN_REQUESTED, &pf->state); 5704 } 5705 5706 /* If there's a recovery already waiting, it takes 5707 * precedence before starting a new reset sequence. 5708 */ 5709 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { 5710 i40e_handle_reset_warning(pf); 5711 goto unlock; 5712 } 5713 5714 /* If we're already down or resetting, just bail */ 5715 if (reset_flags && 5716 !test_bit(__I40E_DOWN, &pf->state) && 5717 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5718 i40e_do_reset(pf, reset_flags); 5719 5720 unlock: 5721 rtnl_unlock(); 5722 } 5723 5724 /** 5725 * i40e_handle_link_event - Handle link event 5726 * @pf: board private structure 5727 * @e: event info posted on ARQ 5728 **/ 5729 static void i40e_handle_link_event(struct i40e_pf *pf, 5730 struct i40e_arq_event_info *e) 5731 { 5732 struct i40e_hw *hw = &pf->hw; 5733 struct i40e_aqc_get_link_status *status = 5734 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 5735 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 5736 5737 /* save off old link status information */ 5738 memcpy(&pf->hw.phy.link_info_old, hw_link_info, 5739 sizeof(pf->hw.phy.link_info_old)); 5740 5741 /* Do a new status request to re-enable LSE reporting 5742 * and load new status information into the hw struct 5743 * This completely ignores any state information 5744 * in the ARQ event info, instead choosing to always 5745 * issue the AQ update link status command. 5746 */ 5747 i40e_link_event(pf); 5748 5749 /* check for unqualified module, if link is down */ 5750 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && 5751 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && 5752 (!(status->link_info & I40E_AQ_LINK_UP))) 5753 dev_err(&pf->pdev->dev, 5754 "The driver failed to link because an unqualified module was detected.\n"); 5755 } 5756 5757 /** 5758 * i40e_clean_adminq_subtask - Clean the AdminQ rings 5759 * @pf: board private structure 5760 **/ 5761 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 5762 { 5763 struct i40e_arq_event_info event; 5764 struct i40e_hw *hw = &pf->hw; 5765 u16 pending, i = 0; 5766 i40e_status ret; 5767 u16 opcode; 5768 u32 oldval; 5769 u32 val; 5770 5771 /* Do not run clean AQ when PF reset fails */ 5772 if (test_bit(__I40E_RESET_FAILED, &pf->state)) 5773 return; 5774 5775 /* check for error indications */ 5776 val = rd32(&pf->hw, pf->hw.aq.arq.len); 5777 oldval = val; 5778 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { 5779 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); 5780 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; 5781 } 5782 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { 5783 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); 5784 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; 5785 } 5786 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { 5787 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); 5788 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; 5789 } 5790 if (oldval != val) 5791 wr32(&pf->hw, pf->hw.aq.arq.len, val); 5792 5793 val = rd32(&pf->hw, pf->hw.aq.asq.len); 5794 oldval = val; 5795 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { 5796 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); 5797 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; 5798 } 5799 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { 5800 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); 5801 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; 5802 } 5803 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { 5804 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); 5805 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; 5806 } 5807 if (oldval != val) 5808 wr32(&pf->hw, pf->hw.aq.asq.len, val); 5809 5810 event.buf_len = I40E_MAX_AQ_BUF_SIZE; 5811 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 5812 if (!event.msg_buf) 5813 return; 5814 5815 do { 5816 ret = i40e_clean_arq_element(hw, &event, &pending); 5817 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) 5818 break; 5819 else if (ret) { 5820 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 5821 break; 5822 } 5823 5824 opcode = le16_to_cpu(event.desc.opcode); 5825 switch (opcode) { 5826 5827 case i40e_aqc_opc_get_link_status: 5828 i40e_handle_link_event(pf, &event); 5829 break; 5830 case i40e_aqc_opc_send_msg_to_pf: 5831 ret = i40e_vc_process_vf_msg(pf, 5832 le16_to_cpu(event.desc.retval), 5833 le32_to_cpu(event.desc.cookie_high), 5834 le32_to_cpu(event.desc.cookie_low), 5835 event.msg_buf, 5836 event.msg_len); 5837 break; 5838 case i40e_aqc_opc_lldp_update_mib: 5839 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 5840 #ifdef CONFIG_I40E_DCB 5841 rtnl_lock(); 5842 ret = i40e_handle_lldp_event(pf, &event); 5843 rtnl_unlock(); 5844 #endif /* CONFIG_I40E_DCB */ 5845 break; 5846 case i40e_aqc_opc_event_lan_overflow: 5847 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 5848 i40e_handle_lan_overflow_event(pf, &event); 5849 break; 5850 case i40e_aqc_opc_send_msg_to_peer: 5851 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); 5852 break; 5853 default: 5854 dev_info(&pf->pdev->dev, 5855 "ARQ Error: Unknown event 0x%04x received\n", 5856 opcode); 5857 break; 5858 } 5859 } while (pending && (i++ < pf->adminq_work_limit)); 5860 5861 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 5862 /* re-enable Admin queue interrupt cause */ 5863 val = rd32(hw, I40E_PFINT_ICR0_ENA); 5864 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 5865 wr32(hw, I40E_PFINT_ICR0_ENA, val); 5866 i40e_flush(hw); 5867 5868 kfree(event.msg_buf); 5869 } 5870 5871 /** 5872 * i40e_verify_eeprom - make sure eeprom is good to use 5873 * @pf: board private structure 5874 **/ 5875 static void i40e_verify_eeprom(struct i40e_pf *pf) 5876 { 5877 int err; 5878 5879 err = i40e_diag_eeprom_test(&pf->hw); 5880 if (err) { 5881 /* retry in case of garbage read */ 5882 err = i40e_diag_eeprom_test(&pf->hw); 5883 if (err) { 5884 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", 5885 err); 5886 set_bit(__I40E_BAD_EEPROM, &pf->state); 5887 } 5888 } 5889 5890 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { 5891 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); 5892 clear_bit(__I40E_BAD_EEPROM, &pf->state); 5893 } 5894 } 5895 5896 /** 5897 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 5898 * @veb: pointer to the VEB instance 5899 * 5900 * This is a recursive function that first builds the attached VSIs then 5901 * recurses in to build the next layer of VEB. We track the connections 5902 * through our own index numbers because the seid's from the HW could 5903 * change across the reset. 5904 **/ 5905 static int i40e_reconstitute_veb(struct i40e_veb *veb) 5906 { 5907 struct i40e_vsi *ctl_vsi = NULL; 5908 struct i40e_pf *pf = veb->pf; 5909 int v, veb_idx; 5910 int ret; 5911 5912 /* build VSI that owns this VEB, temporarily attached to base VEB */ 5913 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { 5914 if (pf->vsi[v] && 5915 pf->vsi[v]->veb_idx == veb->idx && 5916 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 5917 ctl_vsi = pf->vsi[v]; 5918 break; 5919 } 5920 } 5921 if (!ctl_vsi) { 5922 dev_info(&pf->pdev->dev, 5923 "missing owner VSI for veb_idx %d\n", veb->idx); 5924 ret = -ENOENT; 5925 goto end_reconstitute; 5926 } 5927 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 5928 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 5929 ret = i40e_add_vsi(ctl_vsi); 5930 if (ret) { 5931 dev_info(&pf->pdev->dev, 5932 "rebuild of owner VSI failed: %d\n", ret); 5933 goto end_reconstitute; 5934 } 5935 i40e_vsi_reset_stats(ctl_vsi); 5936 5937 /* create the VEB in the switch and move the VSI onto the VEB */ 5938 ret = i40e_add_veb(veb, ctl_vsi); 5939 if (ret) 5940 goto end_reconstitute; 5941 5942 /* Enable LB mode for the main VSI now that it is on a VEB */ 5943 i40e_enable_pf_switch_lb(pf); 5944 5945 /* create the remaining VSIs attached to this VEB */ 5946 for (v = 0; v < pf->num_alloc_vsi; v++) { 5947 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 5948 continue; 5949 5950 if (pf->vsi[v]->veb_idx == veb->idx) { 5951 struct i40e_vsi *vsi = pf->vsi[v]; 5952 vsi->uplink_seid = veb->seid; 5953 ret = i40e_add_vsi(vsi); 5954 if (ret) { 5955 dev_info(&pf->pdev->dev, 5956 "rebuild of vsi_idx %d failed: %d\n", 5957 v, ret); 5958 goto end_reconstitute; 5959 } 5960 i40e_vsi_reset_stats(vsi); 5961 } 5962 } 5963 5964 /* create any VEBs attached to this VEB - RECURSION */ 5965 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 5966 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 5967 pf->veb[veb_idx]->uplink_seid = veb->seid; 5968 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 5969 if (ret) 5970 break; 5971 } 5972 } 5973 5974 end_reconstitute: 5975 return ret; 5976 } 5977 5978 /** 5979 * i40e_get_capabilities - get info about the HW 5980 * @pf: the PF struct 5981 **/ 5982 static int i40e_get_capabilities(struct i40e_pf *pf) 5983 { 5984 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 5985 u16 data_size; 5986 int buf_len; 5987 int err; 5988 5989 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 5990 do { 5991 cap_buf = kzalloc(buf_len, GFP_KERNEL); 5992 if (!cap_buf) 5993 return -ENOMEM; 5994 5995 /* this loads the data into the hw struct for us */ 5996 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 5997 &data_size, 5998 i40e_aqc_opc_list_func_capabilities, 5999 NULL); 6000 /* data loaded, buffer no longer needed */ 6001 kfree(cap_buf); 6002 6003 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 6004 /* retry with a larger buffer */ 6005 buf_len = data_size; 6006 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 6007 dev_info(&pf->pdev->dev, 6008 "capability discovery failed: aq=%d\n", 6009 pf->hw.aq.asq_last_status); 6010 return -ENODEV; 6011 } 6012 } while (err); 6013 6014 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || 6015 (pf->hw.aq.fw_maj_ver < 2)) { 6016 pf->hw.func_caps.num_msix_vectors++; 6017 pf->hw.func_caps.num_msix_vectors_vf++; 6018 } 6019 6020 if (pf->hw.debug_mask & I40E_DEBUG_USER) 6021 dev_info(&pf->pdev->dev, 6022 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 6023 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 6024 pf->hw.func_caps.num_msix_vectors, 6025 pf->hw.func_caps.num_msix_vectors_vf, 6026 pf->hw.func_caps.fd_filters_guaranteed, 6027 pf->hw.func_caps.fd_filters_best_effort, 6028 pf->hw.func_caps.num_tx_qp, 6029 pf->hw.func_caps.num_vsis); 6030 6031 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ 6032 + pf->hw.func_caps.num_vfs) 6033 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { 6034 dev_info(&pf->pdev->dev, 6035 "got num_vsis %d, setting num_vsis to %d\n", 6036 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); 6037 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; 6038 } 6039 6040 return 0; 6041 } 6042 6043 static int i40e_vsi_clear(struct i40e_vsi *vsi); 6044 6045 /** 6046 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband 6047 * @pf: board private structure 6048 **/ 6049 static void i40e_fdir_sb_setup(struct i40e_pf *pf) 6050 { 6051 struct i40e_vsi *vsi; 6052 int i; 6053 6054 /* quick workaround for an NVM issue that leaves a critical register 6055 * uninitialized 6056 */ 6057 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { 6058 static const u32 hkey[] = { 6059 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 6060 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 6061 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 6062 0x95b3a76d}; 6063 6064 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) 6065 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); 6066 } 6067 6068 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 6069 return; 6070 6071 /* find existing VSI and see if it needs configuring */ 6072 vsi = NULL; 6073 for (i = 0; i < pf->num_alloc_vsi; i++) { 6074 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6075 vsi = pf->vsi[i]; 6076 break; 6077 } 6078 } 6079 6080 /* create a new VSI if none exists */ 6081 if (!vsi) { 6082 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, 6083 pf->vsi[pf->lan_vsi]->seid, 0); 6084 if (!vsi) { 6085 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 6086 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 6087 return; 6088 } 6089 } 6090 6091 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); 6092 } 6093 6094 /** 6095 * i40e_fdir_teardown - release the Flow Director resources 6096 * @pf: board private structure 6097 **/ 6098 static void i40e_fdir_teardown(struct i40e_pf *pf) 6099 { 6100 int i; 6101 6102 i40e_fdir_filter_exit(pf); 6103 for (i = 0; i < pf->num_alloc_vsi; i++) { 6104 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6105 i40e_vsi_release(pf->vsi[i]); 6106 break; 6107 } 6108 } 6109 } 6110 6111 /** 6112 * i40e_prep_for_reset - prep for the core to reset 6113 * @pf: board private structure 6114 * 6115 * Close up the VFs and other things in prep for pf Reset. 6116 **/ 6117 static void i40e_prep_for_reset(struct i40e_pf *pf) 6118 { 6119 struct i40e_hw *hw = &pf->hw; 6120 i40e_status ret = 0; 6121 u32 v; 6122 6123 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 6124 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 6125 return; 6126 6127 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 6128 6129 /* quiesce the VSIs and their queues that are not already DOWN */ 6130 i40e_pf_quiesce_all_vsi(pf); 6131 6132 for (v = 0; v < pf->num_alloc_vsi; v++) { 6133 if (pf->vsi[v]) 6134 pf->vsi[v]->seid = 0; 6135 } 6136 6137 i40e_shutdown_adminq(&pf->hw); 6138 6139 /* call shutdown HMC */ 6140 if (hw->hmc.hmc_obj) { 6141 ret = i40e_shutdown_lan_hmc(hw); 6142 if (ret) 6143 dev_warn(&pf->pdev->dev, 6144 "shutdown_lan_hmc failed: %d\n", ret); 6145 } 6146 } 6147 6148 /** 6149 * i40e_send_version - update firmware with driver version 6150 * @pf: PF struct 6151 */ 6152 static void i40e_send_version(struct i40e_pf *pf) 6153 { 6154 struct i40e_driver_version dv; 6155 6156 dv.major_version = DRV_VERSION_MAJOR; 6157 dv.minor_version = DRV_VERSION_MINOR; 6158 dv.build_version = DRV_VERSION_BUILD; 6159 dv.subbuild_version = 0; 6160 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); 6161 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 6162 } 6163 6164 /** 6165 * i40e_reset_and_rebuild - reset and rebuild using a saved config 6166 * @pf: board private structure 6167 * @reinit: if the Main VSI needs to re-initialized. 6168 **/ 6169 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 6170 { 6171 struct i40e_hw *hw = &pf->hw; 6172 u8 set_fc_aq_fail = 0; 6173 i40e_status ret; 6174 u32 v; 6175 6176 /* Now we wait for GRST to settle out. 6177 * We don't have to delete the VEBs or VSIs from the hw switch 6178 * because the reset will make them disappear. 6179 */ 6180 ret = i40e_pf_reset(hw); 6181 if (ret) { 6182 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 6183 set_bit(__I40E_RESET_FAILED, &pf->state); 6184 goto clear_recovery; 6185 } 6186 pf->pfr_count++; 6187 6188 if (test_bit(__I40E_DOWN, &pf->state)) 6189 goto clear_recovery; 6190 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 6191 6192 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 6193 ret = i40e_init_adminq(&pf->hw); 6194 if (ret) { 6195 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); 6196 goto clear_recovery; 6197 } 6198 6199 /* re-verify the eeprom if we just had an EMP reset */ 6200 if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) { 6201 clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); 6202 i40e_verify_eeprom(pf); 6203 } 6204 6205 i40e_clear_pxe_mode(hw); 6206 ret = i40e_get_capabilities(pf); 6207 if (ret) { 6208 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", 6209 ret); 6210 goto end_core_reset; 6211 } 6212 6213 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 6214 hw->func_caps.num_rx_qp, 6215 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 6216 if (ret) { 6217 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 6218 goto end_core_reset; 6219 } 6220 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 6221 if (ret) { 6222 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 6223 goto end_core_reset; 6224 } 6225 6226 #ifdef CONFIG_I40E_DCB 6227 ret = i40e_init_pf_dcb(pf); 6228 if (ret) { 6229 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); 6230 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 6231 /* Continue without DCB enabled */ 6232 } 6233 #endif /* CONFIG_I40E_DCB */ 6234 #ifdef I40E_FCOE 6235 ret = i40e_init_pf_fcoe(pf); 6236 if (ret) 6237 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret); 6238 6239 #endif 6240 /* do basic switch setup */ 6241 ret = i40e_setup_pf_switch(pf, reinit); 6242 if (ret) 6243 goto end_core_reset; 6244 6245 /* driver is only interested in link up/down and module qualification 6246 * reports from firmware 6247 */ 6248 ret = i40e_aq_set_phy_int_mask(&pf->hw, 6249 I40E_AQ_EVENT_LINK_UPDOWN | 6250 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); 6251 if (ret) 6252 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret); 6253 6254 /* make sure our flow control settings are restored */ 6255 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); 6256 if (ret) 6257 dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret); 6258 6259 /* Rebuild the VSIs and VEBs that existed before reset. 6260 * They are still in our local switch element arrays, so only 6261 * need to rebuild the switch model in the HW. 6262 * 6263 * If there were VEBs but the reconstitution failed, we'll try 6264 * try to recover minimal use by getting the basic PF VSI working. 6265 */ 6266 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 6267 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); 6268 /* find the one VEB connected to the MAC, and find orphans */ 6269 for (v = 0; v < I40E_MAX_VEB; v++) { 6270 if (!pf->veb[v]) 6271 continue; 6272 6273 if (pf->veb[v]->uplink_seid == pf->mac_seid || 6274 pf->veb[v]->uplink_seid == 0) { 6275 ret = i40e_reconstitute_veb(pf->veb[v]); 6276 6277 if (!ret) 6278 continue; 6279 6280 /* If Main VEB failed, we're in deep doodoo, 6281 * so give up rebuilding the switch and set up 6282 * for minimal rebuild of PF VSI. 6283 * If orphan failed, we'll report the error 6284 * but try to keep going. 6285 */ 6286 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 6287 dev_info(&pf->pdev->dev, 6288 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 6289 ret); 6290 pf->vsi[pf->lan_vsi]->uplink_seid 6291 = pf->mac_seid; 6292 break; 6293 } else if (pf->veb[v]->uplink_seid == 0) { 6294 dev_info(&pf->pdev->dev, 6295 "rebuild of orphan VEB failed: %d\n", 6296 ret); 6297 } 6298 } 6299 } 6300 } 6301 6302 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { 6303 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 6304 /* no VEB, so rebuild only the Main VSI */ 6305 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); 6306 if (ret) { 6307 dev_info(&pf->pdev->dev, 6308 "rebuild of Main VSI failed: %d\n", ret); 6309 goto end_core_reset; 6310 } 6311 } 6312 6313 msleep(75); 6314 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 6315 if (ret) { 6316 dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", 6317 pf->hw.aq.asq_last_status); 6318 } 6319 6320 /* reinit the misc interrupt */ 6321 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6322 ret = i40e_setup_misc_vector(pf); 6323 6324 /* restart the VSIs that were rebuilt and running before the reset */ 6325 i40e_pf_unquiesce_all_vsi(pf); 6326 6327 if (pf->num_alloc_vfs) { 6328 for (v = 0; v < pf->num_alloc_vfs; v++) 6329 i40e_reset_vf(&pf->vf[v], true); 6330 } 6331 6332 /* tell the firmware that we're starting */ 6333 i40e_send_version(pf); 6334 6335 end_core_reset: 6336 clear_bit(__I40E_RESET_FAILED, &pf->state); 6337 clear_recovery: 6338 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 6339 } 6340 6341 /** 6342 * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild 6343 * @pf: board private structure 6344 * 6345 * Close up the VFs and other things in prep for a Core Reset, 6346 * then get ready to rebuild the world. 6347 **/ 6348 static void i40e_handle_reset_warning(struct i40e_pf *pf) 6349 { 6350 i40e_prep_for_reset(pf); 6351 i40e_reset_and_rebuild(pf, false); 6352 } 6353 6354 /** 6355 * i40e_handle_mdd_event 6356 * @pf: pointer to the pf structure 6357 * 6358 * Called from the MDD irq handler to identify possibly malicious vfs 6359 **/ 6360 static void i40e_handle_mdd_event(struct i40e_pf *pf) 6361 { 6362 struct i40e_hw *hw = &pf->hw; 6363 bool mdd_detected = false; 6364 bool pf_mdd_detected = false; 6365 struct i40e_vf *vf; 6366 u32 reg; 6367 int i; 6368 6369 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) 6370 return; 6371 6372 /* find what triggered the MDD event */ 6373 reg = rd32(hw, I40E_GL_MDET_TX); 6374 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 6375 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 6376 I40E_GL_MDET_TX_PF_NUM_SHIFT; 6377 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 6378 I40E_GL_MDET_TX_VF_NUM_SHIFT; 6379 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 6380 I40E_GL_MDET_TX_EVENT_SHIFT; 6381 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 6382 I40E_GL_MDET_TX_QUEUE_SHIFT) - 6383 pf->hw.func_caps.base_queue; 6384 if (netif_msg_tx_err(pf)) 6385 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", 6386 event, queue, pf_num, vf_num); 6387 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 6388 mdd_detected = true; 6389 } 6390 reg = rd32(hw, I40E_GL_MDET_RX); 6391 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 6392 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 6393 I40E_GL_MDET_RX_FUNCTION_SHIFT; 6394 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 6395 I40E_GL_MDET_RX_EVENT_SHIFT; 6396 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 6397 I40E_GL_MDET_RX_QUEUE_SHIFT) - 6398 pf->hw.func_caps.base_queue; 6399 if (netif_msg_rx_err(pf)) 6400 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 6401 event, queue, func); 6402 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 6403 mdd_detected = true; 6404 } 6405 6406 if (mdd_detected) { 6407 reg = rd32(hw, I40E_PF_MDET_TX); 6408 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 6409 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 6410 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 6411 pf_mdd_detected = true; 6412 } 6413 reg = rd32(hw, I40E_PF_MDET_RX); 6414 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 6415 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 6416 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 6417 pf_mdd_detected = true; 6418 } 6419 /* Queue belongs to the PF, initiate a reset */ 6420 if (pf_mdd_detected) { 6421 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 6422 i40e_service_event_schedule(pf); 6423 } 6424 } 6425 6426 /* see if one of the VFs needs its hand slapped */ 6427 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 6428 vf = &(pf->vf[i]); 6429 reg = rd32(hw, I40E_VP_MDET_TX(i)); 6430 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 6431 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 6432 vf->num_mdd_events++; 6433 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 6434 i); 6435 } 6436 6437 reg = rd32(hw, I40E_VP_MDET_RX(i)); 6438 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 6439 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 6440 vf->num_mdd_events++; 6441 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 6442 i); 6443 } 6444 6445 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 6446 dev_info(&pf->pdev->dev, 6447 "Too many MDD events on VF %d, disabled\n", i); 6448 dev_info(&pf->pdev->dev, 6449 "Use PF Control I/F to re-enable the VF\n"); 6450 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 6451 } 6452 } 6453 6454 /* re-enable mdd interrupt cause */ 6455 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 6456 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 6457 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 6458 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 6459 i40e_flush(hw); 6460 } 6461 6462 #ifdef CONFIG_I40E_VXLAN 6463 /** 6464 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW 6465 * @pf: board private structure 6466 **/ 6467 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) 6468 { 6469 struct i40e_hw *hw = &pf->hw; 6470 i40e_status ret; 6471 u8 filter_index; 6472 __be16 port; 6473 int i; 6474 6475 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) 6476 return; 6477 6478 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; 6479 6480 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 6481 if (pf->pending_vxlan_bitmap & (1 << i)) { 6482 pf->pending_vxlan_bitmap &= ~(1 << i); 6483 port = pf->vxlan_ports[i]; 6484 ret = port ? 6485 i40e_aq_add_udp_tunnel(hw, ntohs(port), 6486 I40E_AQC_TUNNEL_TYPE_VXLAN, 6487 &filter_index, NULL) 6488 : i40e_aq_del_udp_tunnel(hw, i, NULL); 6489 6490 if (ret) { 6491 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n", 6492 port ? "adding" : "deleting", 6493 ntohs(port), port ? i : i); 6494 6495 pf->vxlan_ports[i] = 0; 6496 } else { 6497 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n", 6498 port ? "Added" : "Deleted", 6499 ntohs(port), port ? i : filter_index); 6500 } 6501 } 6502 } 6503 } 6504 6505 #endif 6506 /** 6507 * i40e_service_task - Run the driver's async subtasks 6508 * @work: pointer to work_struct containing our data 6509 **/ 6510 static void i40e_service_task(struct work_struct *work) 6511 { 6512 struct i40e_pf *pf = container_of(work, 6513 struct i40e_pf, 6514 service_task); 6515 unsigned long start_time = jiffies; 6516 6517 /* don't bother with service tasks if a reset is in progress */ 6518 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 6519 i40e_service_event_complete(pf); 6520 return; 6521 } 6522 6523 i40e_reset_subtask(pf); 6524 i40e_handle_mdd_event(pf); 6525 i40e_vc_process_vflr_event(pf); 6526 i40e_watchdog_subtask(pf); 6527 i40e_fdir_reinit_subtask(pf); 6528 i40e_sync_filters_subtask(pf); 6529 #ifdef CONFIG_I40E_VXLAN 6530 i40e_sync_vxlan_filters_subtask(pf); 6531 #endif 6532 i40e_clean_adminq_subtask(pf); 6533 6534 i40e_service_event_complete(pf); 6535 6536 /* If the tasks have taken longer than one timer cycle or there 6537 * is more work to be done, reschedule the service task now 6538 * rather than wait for the timer to tick again. 6539 */ 6540 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 6541 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || 6542 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || 6543 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 6544 i40e_service_event_schedule(pf); 6545 } 6546 6547 /** 6548 * i40e_service_timer - timer callback 6549 * @data: pointer to PF struct 6550 **/ 6551 static void i40e_service_timer(unsigned long data) 6552 { 6553 struct i40e_pf *pf = (struct i40e_pf *)data; 6554 6555 mod_timer(&pf->service_timer, 6556 round_jiffies(jiffies + pf->service_timer_period)); 6557 i40e_service_event_schedule(pf); 6558 } 6559 6560 /** 6561 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 6562 * @vsi: the VSI being configured 6563 **/ 6564 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 6565 { 6566 struct i40e_pf *pf = vsi->back; 6567 6568 switch (vsi->type) { 6569 case I40E_VSI_MAIN: 6570 vsi->alloc_queue_pairs = pf->num_lan_qps; 6571 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6572 I40E_REQ_DESCRIPTOR_MULTIPLE); 6573 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6574 vsi->num_q_vectors = pf->num_lan_msix; 6575 else 6576 vsi->num_q_vectors = 1; 6577 6578 break; 6579 6580 case I40E_VSI_FDIR: 6581 vsi->alloc_queue_pairs = 1; 6582 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 6583 I40E_REQ_DESCRIPTOR_MULTIPLE); 6584 vsi->num_q_vectors = 1; 6585 break; 6586 6587 case I40E_VSI_VMDQ2: 6588 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 6589 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6590 I40E_REQ_DESCRIPTOR_MULTIPLE); 6591 vsi->num_q_vectors = pf->num_vmdq_msix; 6592 break; 6593 6594 case I40E_VSI_SRIOV: 6595 vsi->alloc_queue_pairs = pf->num_vf_qps; 6596 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6597 I40E_REQ_DESCRIPTOR_MULTIPLE); 6598 break; 6599 6600 #ifdef I40E_FCOE 6601 case I40E_VSI_FCOE: 6602 vsi->alloc_queue_pairs = pf->num_fcoe_qps; 6603 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6604 I40E_REQ_DESCRIPTOR_MULTIPLE); 6605 vsi->num_q_vectors = pf->num_fcoe_msix; 6606 break; 6607 6608 #endif /* I40E_FCOE */ 6609 default: 6610 WARN_ON(1); 6611 return -ENODATA; 6612 } 6613 6614 return 0; 6615 } 6616 6617 /** 6618 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi 6619 * @type: VSI pointer 6620 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 6621 * 6622 * On error: returns error code (negative) 6623 * On success: returns 0 6624 **/ 6625 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) 6626 { 6627 int size; 6628 int ret = 0; 6629 6630 /* allocate memory for both Tx and Rx ring pointers */ 6631 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; 6632 vsi->tx_rings = kzalloc(size, GFP_KERNEL); 6633 if (!vsi->tx_rings) 6634 return -ENOMEM; 6635 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; 6636 6637 if (alloc_qvectors) { 6638 /* allocate memory for q_vector pointers */ 6639 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; 6640 vsi->q_vectors = kzalloc(size, GFP_KERNEL); 6641 if (!vsi->q_vectors) { 6642 ret = -ENOMEM; 6643 goto err_vectors; 6644 } 6645 } 6646 return ret; 6647 6648 err_vectors: 6649 kfree(vsi->tx_rings); 6650 return ret; 6651 } 6652 6653 /** 6654 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 6655 * @pf: board private structure 6656 * @type: type of VSI 6657 * 6658 * On error: returns error code (negative) 6659 * On success: returns vsi index in PF (positive) 6660 **/ 6661 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 6662 { 6663 int ret = -ENODEV; 6664 struct i40e_vsi *vsi; 6665 int vsi_idx; 6666 int i; 6667 6668 /* Need to protect the allocation of the VSIs at the PF level */ 6669 mutex_lock(&pf->switch_mutex); 6670 6671 /* VSI list may be fragmented if VSI creation/destruction has 6672 * been happening. We can afford to do a quick scan to look 6673 * for any free VSIs in the list. 6674 * 6675 * find next empty vsi slot, looping back around if necessary 6676 */ 6677 i = pf->next_vsi; 6678 while (i < pf->num_alloc_vsi && pf->vsi[i]) 6679 i++; 6680 if (i >= pf->num_alloc_vsi) { 6681 i = 0; 6682 while (i < pf->next_vsi && pf->vsi[i]) 6683 i++; 6684 } 6685 6686 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { 6687 vsi_idx = i; /* Found one! */ 6688 } else { 6689 ret = -ENODEV; 6690 goto unlock_pf; /* out of VSI slots! */ 6691 } 6692 pf->next_vsi = ++i; 6693 6694 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 6695 if (!vsi) { 6696 ret = -ENOMEM; 6697 goto unlock_pf; 6698 } 6699 vsi->type = type; 6700 vsi->back = pf; 6701 set_bit(__I40E_DOWN, &vsi->state); 6702 vsi->flags = 0; 6703 vsi->idx = vsi_idx; 6704 vsi->rx_itr_setting = pf->rx_itr_default; 6705 vsi->tx_itr_setting = pf->tx_itr_default; 6706 vsi->netdev_registered = false; 6707 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 6708 INIT_LIST_HEAD(&vsi->mac_filter_list); 6709 vsi->irqs_ready = false; 6710 6711 ret = i40e_set_num_rings_in_vsi(vsi); 6712 if (ret) 6713 goto err_rings; 6714 6715 ret = i40e_vsi_alloc_arrays(vsi, true); 6716 if (ret) 6717 goto err_rings; 6718 6719 /* Setup default MSIX irq handler for VSI */ 6720 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 6721 6722 pf->vsi[vsi_idx] = vsi; 6723 ret = vsi_idx; 6724 goto unlock_pf; 6725 6726 err_rings: 6727 pf->next_vsi = i - 1; 6728 kfree(vsi); 6729 unlock_pf: 6730 mutex_unlock(&pf->switch_mutex); 6731 return ret; 6732 } 6733 6734 /** 6735 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 6736 * @type: VSI pointer 6737 * @free_qvectors: a bool to specify if q_vectors need to be freed. 6738 * 6739 * On error: returns error code (negative) 6740 * On success: returns 0 6741 **/ 6742 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) 6743 { 6744 /* free the ring and vector containers */ 6745 if (free_qvectors) { 6746 kfree(vsi->q_vectors); 6747 vsi->q_vectors = NULL; 6748 } 6749 kfree(vsi->tx_rings); 6750 vsi->tx_rings = NULL; 6751 vsi->rx_rings = NULL; 6752 } 6753 6754 /** 6755 * i40e_vsi_clear - Deallocate the VSI provided 6756 * @vsi: the VSI being un-configured 6757 **/ 6758 static int i40e_vsi_clear(struct i40e_vsi *vsi) 6759 { 6760 struct i40e_pf *pf; 6761 6762 if (!vsi) 6763 return 0; 6764 6765 if (!vsi->back) 6766 goto free_vsi; 6767 pf = vsi->back; 6768 6769 mutex_lock(&pf->switch_mutex); 6770 if (!pf->vsi[vsi->idx]) { 6771 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", 6772 vsi->idx, vsi->idx, vsi, vsi->type); 6773 goto unlock_vsi; 6774 } 6775 6776 if (pf->vsi[vsi->idx] != vsi) { 6777 dev_err(&pf->pdev->dev, 6778 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", 6779 pf->vsi[vsi->idx]->idx, 6780 pf->vsi[vsi->idx], 6781 pf->vsi[vsi->idx]->type, 6782 vsi->idx, vsi, vsi->type); 6783 goto unlock_vsi; 6784 } 6785 6786 /* updates the pf for this cleared vsi */ 6787 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 6788 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 6789 6790 i40e_vsi_free_arrays(vsi, true); 6791 6792 pf->vsi[vsi->idx] = NULL; 6793 if (vsi->idx < pf->next_vsi) 6794 pf->next_vsi = vsi->idx; 6795 6796 unlock_vsi: 6797 mutex_unlock(&pf->switch_mutex); 6798 free_vsi: 6799 kfree(vsi); 6800 6801 return 0; 6802 } 6803 6804 /** 6805 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 6806 * @vsi: the VSI being cleaned 6807 **/ 6808 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) 6809 { 6810 int i; 6811 6812 if (vsi->tx_rings && vsi->tx_rings[0]) { 6813 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 6814 kfree_rcu(vsi->tx_rings[i], rcu); 6815 vsi->tx_rings[i] = NULL; 6816 vsi->rx_rings[i] = NULL; 6817 } 6818 } 6819 } 6820 6821 /** 6822 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 6823 * @vsi: the VSI being configured 6824 **/ 6825 static int i40e_alloc_rings(struct i40e_vsi *vsi) 6826 { 6827 struct i40e_ring *tx_ring, *rx_ring; 6828 struct i40e_pf *pf = vsi->back; 6829 int i; 6830 6831 /* Set basic values in the rings to be used later during open() */ 6832 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 6833 /* allocate space for both Tx and Rx in one shot */ 6834 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 6835 if (!tx_ring) 6836 goto err_out; 6837 6838 tx_ring->queue_index = i; 6839 tx_ring->reg_idx = vsi->base_queue + i; 6840 tx_ring->ring_active = false; 6841 tx_ring->vsi = vsi; 6842 tx_ring->netdev = vsi->netdev; 6843 tx_ring->dev = &pf->pdev->dev; 6844 tx_ring->count = vsi->num_desc; 6845 tx_ring->size = 0; 6846 tx_ring->dcb_tc = 0; 6847 vsi->tx_rings[i] = tx_ring; 6848 6849 rx_ring = &tx_ring[1]; 6850 rx_ring->queue_index = i; 6851 rx_ring->reg_idx = vsi->base_queue + i; 6852 rx_ring->ring_active = false; 6853 rx_ring->vsi = vsi; 6854 rx_ring->netdev = vsi->netdev; 6855 rx_ring->dev = &pf->pdev->dev; 6856 rx_ring->count = vsi->num_desc; 6857 rx_ring->size = 0; 6858 rx_ring->dcb_tc = 0; 6859 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) 6860 set_ring_16byte_desc_enabled(rx_ring); 6861 else 6862 clear_ring_16byte_desc_enabled(rx_ring); 6863 vsi->rx_rings[i] = rx_ring; 6864 } 6865 6866 return 0; 6867 6868 err_out: 6869 i40e_vsi_clear_rings(vsi); 6870 return -ENOMEM; 6871 } 6872 6873 /** 6874 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 6875 * @pf: board private structure 6876 * @vectors: the number of MSI-X vectors to request 6877 * 6878 * Returns the number of vectors reserved, or error 6879 **/ 6880 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 6881 { 6882 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, 6883 I40E_MIN_MSIX, vectors); 6884 if (vectors < 0) { 6885 dev_info(&pf->pdev->dev, 6886 "MSI-X vector reservation failed: %d\n", vectors); 6887 vectors = 0; 6888 } 6889 6890 return vectors; 6891 } 6892 6893 /** 6894 * i40e_init_msix - Setup the MSIX capability 6895 * @pf: board private structure 6896 * 6897 * Work with the OS to set up the MSIX vectors needed. 6898 * 6899 * Returns 0 on success, negative on failure 6900 **/ 6901 static int i40e_init_msix(struct i40e_pf *pf) 6902 { 6903 i40e_status err = 0; 6904 struct i40e_hw *hw = &pf->hw; 6905 int other_vecs = 0; 6906 int v_budget, i; 6907 int vec; 6908 6909 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 6910 return -ENODEV; 6911 6912 /* The number of vectors we'll request will be comprised of: 6913 * - Add 1 for "other" cause for Admin Queue events, etc. 6914 * - The number of LAN queue pairs 6915 * - Queues being used for RSS. 6916 * We don't need as many as max_rss_size vectors. 6917 * use rss_size instead in the calculation since that 6918 * is governed by number of cpus in the system. 6919 * - assumes symmetric Tx/Rx pairing 6920 * - The number of VMDq pairs 6921 #ifdef I40E_FCOE 6922 * - The number of FCOE qps. 6923 #endif 6924 * Once we count this up, try the request. 6925 * 6926 * If we can't get what we want, we'll simplify to nearly nothing 6927 * and try again. If that still fails, we punt. 6928 */ 6929 pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); 6930 pf->num_vmdq_msix = pf->num_vmdq_qps; 6931 other_vecs = 1; 6932 other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix); 6933 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) 6934 other_vecs++; 6935 6936 /* Scale down if necessary, and the rings will share vectors */ 6937 pf->num_lan_msix = min_t(int, pf->num_lan_msix, 6938 (hw->func_caps.num_msix_vectors - other_vecs)); 6939 v_budget = pf->num_lan_msix + other_vecs; 6940 6941 #ifdef I40E_FCOE 6942 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 6943 pf->num_fcoe_msix = pf->num_fcoe_qps; 6944 v_budget += pf->num_fcoe_msix; 6945 } 6946 #endif 6947 6948 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 6949 GFP_KERNEL); 6950 if (!pf->msix_entries) 6951 return -ENOMEM; 6952 6953 for (i = 0; i < v_budget; i++) 6954 pf->msix_entries[i].entry = i; 6955 vec = i40e_reserve_msix_vectors(pf, v_budget); 6956 6957 if (vec != v_budget) { 6958 /* If we have limited resources, we will start with no vectors 6959 * for the special features and then allocate vectors to some 6960 * of these features based on the policy and at the end disable 6961 * the features that did not get any vectors. 6962 */ 6963 #ifdef I40E_FCOE 6964 pf->num_fcoe_qps = 0; 6965 pf->num_fcoe_msix = 0; 6966 #endif 6967 pf->num_vmdq_msix = 0; 6968 } 6969 6970 if (vec < I40E_MIN_MSIX) { 6971 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 6972 kfree(pf->msix_entries); 6973 pf->msix_entries = NULL; 6974 return -ENODEV; 6975 6976 } else if (vec == I40E_MIN_MSIX) { 6977 /* Adjust for minimal MSIX use */ 6978 pf->num_vmdq_vsis = 0; 6979 pf->num_vmdq_qps = 0; 6980 pf->num_lan_qps = 1; 6981 pf->num_lan_msix = 1; 6982 6983 } else if (vec != v_budget) { 6984 /* reserve the misc vector */ 6985 vec--; 6986 6987 /* Scale vector usage down */ 6988 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 6989 pf->num_vmdq_vsis = 1; 6990 6991 /* partition out the remaining vectors */ 6992 switch (vec) { 6993 case 2: 6994 pf->num_lan_msix = 1; 6995 break; 6996 case 3: 6997 #ifdef I40E_FCOE 6998 /* give one vector to FCoE */ 6999 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7000 pf->num_lan_msix = 1; 7001 pf->num_fcoe_msix = 1; 7002 } 7003 #else 7004 pf->num_lan_msix = 2; 7005 #endif 7006 break; 7007 default: 7008 #ifdef I40E_FCOE 7009 /* give one vector to FCoE */ 7010 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7011 pf->num_fcoe_msix = 1; 7012 vec--; 7013 } 7014 #endif 7015 pf->num_lan_msix = min_t(int, (vec / 2), 7016 pf->num_lan_qps); 7017 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix), 7018 I40E_DEFAULT_NUM_VMDQ_VSI); 7019 break; 7020 } 7021 } 7022 7023 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 7024 (pf->num_vmdq_msix == 0)) { 7025 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); 7026 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 7027 } 7028 #ifdef I40E_FCOE 7029 7030 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { 7031 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); 7032 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 7033 } 7034 #endif 7035 return err; 7036 } 7037 7038 /** 7039 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 7040 * @vsi: the VSI being configured 7041 * @v_idx: index of the vector in the vsi struct 7042 * 7043 * We allocate one q_vector. If allocation fails we return -ENOMEM. 7044 **/ 7045 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 7046 { 7047 struct i40e_q_vector *q_vector; 7048 7049 /* allocate q_vector */ 7050 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 7051 if (!q_vector) 7052 return -ENOMEM; 7053 7054 q_vector->vsi = vsi; 7055 q_vector->v_idx = v_idx; 7056 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 7057 if (vsi->netdev) 7058 netif_napi_add(vsi->netdev, &q_vector->napi, 7059 i40e_napi_poll, NAPI_POLL_WEIGHT); 7060 7061 q_vector->rx.latency_range = I40E_LOW_LATENCY; 7062 q_vector->tx.latency_range = I40E_LOW_LATENCY; 7063 7064 /* tie q_vector and vsi together */ 7065 vsi->q_vectors[v_idx] = q_vector; 7066 7067 return 0; 7068 } 7069 7070 /** 7071 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 7072 * @vsi: the VSI being configured 7073 * 7074 * We allocate one q_vector per queue interrupt. If allocation fails we 7075 * return -ENOMEM. 7076 **/ 7077 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 7078 { 7079 struct i40e_pf *pf = vsi->back; 7080 int v_idx, num_q_vectors; 7081 int err; 7082 7083 /* if not MSIX, give the one vector only to the LAN VSI */ 7084 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7085 num_q_vectors = vsi->num_q_vectors; 7086 else if (vsi == pf->vsi[pf->lan_vsi]) 7087 num_q_vectors = 1; 7088 else 7089 return -EINVAL; 7090 7091 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 7092 err = i40e_vsi_alloc_q_vector(vsi, v_idx); 7093 if (err) 7094 goto err_out; 7095 } 7096 7097 return 0; 7098 7099 err_out: 7100 while (v_idx--) 7101 i40e_free_q_vector(vsi, v_idx); 7102 7103 return err; 7104 } 7105 7106 /** 7107 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 7108 * @pf: board private structure to initialize 7109 **/ 7110 static void i40e_init_interrupt_scheme(struct i40e_pf *pf) 7111 { 7112 int err = 0; 7113 7114 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 7115 err = i40e_init_msix(pf); 7116 if (err) { 7117 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 7118 #ifdef I40E_FCOE 7119 I40E_FLAG_FCOE_ENABLED | 7120 #endif 7121 I40E_FLAG_RSS_ENABLED | 7122 I40E_FLAG_DCB_CAPABLE | 7123 I40E_FLAG_SRIOV_ENABLED | 7124 I40E_FLAG_FD_SB_ENABLED | 7125 I40E_FLAG_FD_ATR_ENABLED | 7126 I40E_FLAG_VMDQ_ENABLED); 7127 7128 /* rework the queue expectations without MSIX */ 7129 i40e_determine_queue_usage(pf); 7130 } 7131 } 7132 7133 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 7134 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 7135 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); 7136 err = pci_enable_msi(pf->pdev); 7137 if (err) { 7138 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); 7139 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 7140 } 7141 } 7142 7143 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 7144 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); 7145 7146 /* track first vector for misc interrupts */ 7147 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); 7148 } 7149 7150 /** 7151 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 7152 * @pf: board private structure 7153 * 7154 * This sets up the handler for MSIX 0, which is used to manage the 7155 * non-queue interrupts, e.g. AdminQ and errors. This is not used 7156 * when in MSI or Legacy interrupt mode. 7157 **/ 7158 static int i40e_setup_misc_vector(struct i40e_pf *pf) 7159 { 7160 struct i40e_hw *hw = &pf->hw; 7161 int err = 0; 7162 7163 /* Only request the irq if this is the first time through, and 7164 * not when we're rebuilding after a Reset 7165 */ 7166 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7167 err = request_irq(pf->msix_entries[0].vector, 7168 i40e_intr, 0, pf->int_name, pf); 7169 if (err) { 7170 dev_info(&pf->pdev->dev, 7171 "request_irq for %s failed: %d\n", 7172 pf->int_name, err); 7173 return -EFAULT; 7174 } 7175 } 7176 7177 i40e_enable_misc_int_causes(pf); 7178 7179 /* associate no queues to the misc vector */ 7180 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 7181 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 7182 7183 i40e_flush(hw); 7184 7185 i40e_irq_dynamic_enable_icr0(pf); 7186 7187 return err; 7188 } 7189 7190 /** 7191 * i40e_config_rss - Prepare for RSS if used 7192 * @pf: board private structure 7193 **/ 7194 static int i40e_config_rss(struct i40e_pf *pf) 7195 { 7196 u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; 7197 struct i40e_hw *hw = &pf->hw; 7198 u32 lut = 0; 7199 int i, j; 7200 u64 hena; 7201 u32 reg_val; 7202 7203 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 7204 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 7205 wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]); 7206 7207 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 7208 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | 7209 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); 7210 hena |= I40E_DEFAULT_RSS_HENA; 7211 wr32(hw, I40E_PFQF_HENA(0), (u32)hena); 7212 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 7213 7214 /* Check capability and Set table size and register per hw expectation*/ 7215 reg_val = rd32(hw, I40E_PFQF_CTL_0); 7216 if (hw->func_caps.rss_table_size == 512) { 7217 reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512; 7218 pf->rss_table_size = 512; 7219 } else { 7220 pf->rss_table_size = 128; 7221 reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512; 7222 } 7223 wr32(hw, I40E_PFQF_CTL_0, reg_val); 7224 7225 /* Populate the LUT with max no. of queues in round robin fashion */ 7226 for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) { 7227 7228 /* The assumption is that lan qp count will be the highest 7229 * qp count for any PF VSI that needs RSS. 7230 * If multiple VSIs need RSS support, all the qp counts 7231 * for those VSIs should be a power of 2 for RSS to work. 7232 * If LAN VSI is the only consumer for RSS then this requirement 7233 * is not necessary. 7234 */ 7235 if (j == pf->rss_size) 7236 j = 0; 7237 /* lut = 4-byte sliding window of 4 lut entries */ 7238 lut = (lut << 8) | (j & 7239 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); 7240 /* On i = 3, we have 4 entries in lut; write to the register */ 7241 if ((i & 3) == 3) 7242 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); 7243 } 7244 i40e_flush(hw); 7245 7246 return 0; 7247 } 7248 7249 /** 7250 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild 7251 * @pf: board private structure 7252 * @queue_count: the requested queue count for rss. 7253 * 7254 * returns 0 if rss is not enabled, if enabled returns the final rss queue 7255 * count which may be different from the requested queue count. 7256 **/ 7257 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) 7258 { 7259 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) 7260 return 0; 7261 7262 queue_count = min_t(int, queue_count, pf->rss_size_max); 7263 7264 if (queue_count != pf->rss_size) { 7265 i40e_prep_for_reset(pf); 7266 7267 pf->rss_size = queue_count; 7268 7269 i40e_reset_and_rebuild(pf, true); 7270 i40e_config_rss(pf); 7271 } 7272 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); 7273 return pf->rss_size; 7274 } 7275 7276 /** 7277 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 7278 * @pf: board private structure to initialize 7279 * 7280 * i40e_sw_init initializes the Adapter private data structure. 7281 * Fields are initialized based on PCI device information and 7282 * OS network device settings (MTU size). 7283 **/ 7284 static int i40e_sw_init(struct i40e_pf *pf) 7285 { 7286 int err = 0; 7287 int size; 7288 7289 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, 7290 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); 7291 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; 7292 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { 7293 if (I40E_DEBUG_USER & debug) 7294 pf->hw.debug_mask = debug; 7295 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), 7296 I40E_DEFAULT_MSG_ENABLE); 7297 } 7298 7299 /* Set default capability flags */ 7300 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 7301 I40E_FLAG_MSI_ENABLED | 7302 I40E_FLAG_MSIX_ENABLED | 7303 I40E_FLAG_RX_1BUF_ENABLED; 7304 7305 /* Set default ITR */ 7306 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; 7307 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; 7308 7309 /* Depending on PF configurations, it is possible that the RSS 7310 * maximum might end up larger than the available queues 7311 */ 7312 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; 7313 pf->rss_size = 1; 7314 pf->rss_size_max = min_t(int, pf->rss_size_max, 7315 pf->hw.func_caps.num_tx_qp); 7316 if (pf->hw.func_caps.rss) { 7317 pf->flags |= I40E_FLAG_RSS_ENABLED; 7318 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); 7319 } 7320 7321 /* MFP mode enabled */ 7322 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { 7323 pf->flags |= I40E_FLAG_MFP_ENABLED; 7324 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 7325 } 7326 7327 /* FW/NVM is not yet fixed in this regard */ 7328 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 7329 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 7330 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 7331 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 7332 /* Setup a counter for fd_atr per pf */ 7333 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); 7334 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 7335 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 7336 /* Setup a counter for fd_sb per pf */ 7337 pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); 7338 } else { 7339 dev_info(&pf->pdev->dev, 7340 "Flow Director Sideband mode Disabled in MFP mode\n"); 7341 } 7342 pf->fdir_pf_filter_count = 7343 pf->hw.func_caps.fd_filters_guaranteed; 7344 pf->hw.fdir_shared_filter_count = 7345 pf->hw.func_caps.fd_filters_best_effort; 7346 } 7347 7348 if (pf->hw.func_caps.vmdq) { 7349 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 7350 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 7351 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; 7352 } 7353 7354 #ifdef I40E_FCOE 7355 err = i40e_init_pf_fcoe(pf); 7356 if (err) 7357 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err); 7358 7359 #endif /* I40E_FCOE */ 7360 #ifdef CONFIG_PCI_IOV 7361 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { 7362 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 7363 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 7364 pf->num_req_vfs = min_t(int, 7365 pf->hw.func_caps.num_vfs, 7366 I40E_MAX_VF_COUNT); 7367 } 7368 #endif /* CONFIG_PCI_IOV */ 7369 pf->eeprom_version = 0xDEAD; 7370 pf->lan_veb = I40E_NO_VEB; 7371 pf->lan_vsi = I40E_NO_VSI; 7372 7373 /* set up queue assignment tracking */ 7374 size = sizeof(struct i40e_lump_tracking) 7375 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 7376 pf->qp_pile = kzalloc(size, GFP_KERNEL); 7377 if (!pf->qp_pile) { 7378 err = -ENOMEM; 7379 goto sw_init_done; 7380 } 7381 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 7382 pf->qp_pile->search_hint = 0; 7383 7384 /* set up vector assignment tracking */ 7385 size = sizeof(struct i40e_lump_tracking) 7386 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors); 7387 pf->irq_pile = kzalloc(size, GFP_KERNEL); 7388 if (!pf->irq_pile) { 7389 kfree(pf->qp_pile); 7390 err = -ENOMEM; 7391 goto sw_init_done; 7392 } 7393 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors; 7394 pf->irq_pile->search_hint = 0; 7395 7396 pf->tx_timeout_recovery_level = 1; 7397 7398 mutex_init(&pf->switch_mutex); 7399 7400 sw_init_done: 7401 return err; 7402 } 7403 7404 /** 7405 * i40e_set_ntuple - set the ntuple feature flag and take action 7406 * @pf: board private structure to initialize 7407 * @features: the feature set that the stack is suggesting 7408 * 7409 * returns a bool to indicate if reset needs to happen 7410 **/ 7411 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) 7412 { 7413 bool need_reset = false; 7414 7415 /* Check if Flow Director n-tuple support was enabled or disabled. If 7416 * the state changed, we need to reset. 7417 */ 7418 if (features & NETIF_F_NTUPLE) { 7419 /* Enable filters and mark for reset */ 7420 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 7421 need_reset = true; 7422 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 7423 } else { 7424 /* turn off filters, mark for reset and clear SW filter list */ 7425 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7426 need_reset = true; 7427 i40e_fdir_filter_exit(pf); 7428 } 7429 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7430 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 7431 /* reset fd counters */ 7432 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; 7433 pf->fdir_pf_active_filters = 0; 7434 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 7435 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); 7436 /* if ATR was auto disabled it can be re-enabled. */ 7437 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 7438 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 7439 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 7440 } 7441 return need_reset; 7442 } 7443 7444 /** 7445 * i40e_set_features - set the netdev feature flags 7446 * @netdev: ptr to the netdev being adjusted 7447 * @features: the feature set that the stack is suggesting 7448 **/ 7449 static int i40e_set_features(struct net_device *netdev, 7450 netdev_features_t features) 7451 { 7452 struct i40e_netdev_priv *np = netdev_priv(netdev); 7453 struct i40e_vsi *vsi = np->vsi; 7454 struct i40e_pf *pf = vsi->back; 7455 bool need_reset; 7456 7457 if (features & NETIF_F_HW_VLAN_CTAG_RX) 7458 i40e_vlan_stripping_enable(vsi); 7459 else 7460 i40e_vlan_stripping_disable(vsi); 7461 7462 need_reset = i40e_set_ntuple(pf, features); 7463 7464 if (need_reset) 7465 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 7466 7467 return 0; 7468 } 7469 7470 #ifdef CONFIG_I40E_VXLAN 7471 /** 7472 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port 7473 * @pf: board private structure 7474 * @port: The UDP port to look up 7475 * 7476 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 7477 **/ 7478 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) 7479 { 7480 u8 i; 7481 7482 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 7483 if (pf->vxlan_ports[i] == port) 7484 return i; 7485 } 7486 7487 return i; 7488 } 7489 7490 /** 7491 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up 7492 * @netdev: This physical port's netdev 7493 * @sa_family: Socket Family that VXLAN is notifying us about 7494 * @port: New UDP port number that VXLAN started listening to 7495 **/ 7496 static void i40e_add_vxlan_port(struct net_device *netdev, 7497 sa_family_t sa_family, __be16 port) 7498 { 7499 struct i40e_netdev_priv *np = netdev_priv(netdev); 7500 struct i40e_vsi *vsi = np->vsi; 7501 struct i40e_pf *pf = vsi->back; 7502 u8 next_idx; 7503 u8 idx; 7504 7505 if (sa_family == AF_INET6) 7506 return; 7507 7508 idx = i40e_get_vxlan_port_idx(pf, port); 7509 7510 /* Check if port already exists */ 7511 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 7512 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port)); 7513 return; 7514 } 7515 7516 /* Now check if there is space to add the new port */ 7517 next_idx = i40e_get_vxlan_port_idx(pf, 0); 7518 7519 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 7520 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n", 7521 ntohs(port)); 7522 return; 7523 } 7524 7525 /* New port: add it and mark its index in the bitmap */ 7526 pf->vxlan_ports[next_idx] = port; 7527 pf->pending_vxlan_bitmap |= (1 << next_idx); 7528 7529 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 7530 } 7531 7532 /** 7533 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away 7534 * @netdev: This physical port's netdev 7535 * @sa_family: Socket Family that VXLAN is notifying us about 7536 * @port: UDP port number that VXLAN stopped listening to 7537 **/ 7538 static void i40e_del_vxlan_port(struct net_device *netdev, 7539 sa_family_t sa_family, __be16 port) 7540 { 7541 struct i40e_netdev_priv *np = netdev_priv(netdev); 7542 struct i40e_vsi *vsi = np->vsi; 7543 struct i40e_pf *pf = vsi->back; 7544 u8 idx; 7545 7546 if (sa_family == AF_INET6) 7547 return; 7548 7549 idx = i40e_get_vxlan_port_idx(pf, port); 7550 7551 /* Check if port already exists */ 7552 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 7553 /* if port exists, set it to 0 (mark for deletion) 7554 * and make it pending 7555 */ 7556 pf->vxlan_ports[idx] = 0; 7557 7558 pf->pending_vxlan_bitmap |= (1 << idx); 7559 7560 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 7561 } else { 7562 netdev_warn(netdev, "Port %d was not found, not deleting\n", 7563 ntohs(port)); 7564 } 7565 } 7566 7567 #endif 7568 static int i40e_get_phys_port_id(struct net_device *netdev, 7569 struct netdev_phys_item_id *ppid) 7570 { 7571 struct i40e_netdev_priv *np = netdev_priv(netdev); 7572 struct i40e_pf *pf = np->vsi->back; 7573 struct i40e_hw *hw = &pf->hw; 7574 7575 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) 7576 return -EOPNOTSUPP; 7577 7578 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); 7579 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); 7580 7581 return 0; 7582 } 7583 7584 /** 7585 * i40e_ndo_fdb_add - add an entry to the hardware database 7586 * @ndm: the input from the stack 7587 * @tb: pointer to array of nladdr (unused) 7588 * @dev: the net device pointer 7589 * @addr: the MAC address entry being added 7590 * @flags: instructions from stack about fdb operation 7591 */ 7592 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 7593 struct net_device *dev, 7594 const unsigned char *addr, u16 vid, 7595 u16 flags) 7596 { 7597 struct i40e_netdev_priv *np = netdev_priv(dev); 7598 struct i40e_pf *pf = np->vsi->back; 7599 int err = 0; 7600 7601 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) 7602 return -EOPNOTSUPP; 7603 7604 if (vid) { 7605 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); 7606 return -EINVAL; 7607 } 7608 7609 /* Hardware does not support aging addresses so if a 7610 * ndm_state is given only allow permanent addresses 7611 */ 7612 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 7613 netdev_info(dev, "FDB only supports static addresses\n"); 7614 return -EINVAL; 7615 } 7616 7617 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 7618 err = dev_uc_add_excl(dev, addr); 7619 else if (is_multicast_ether_addr(addr)) 7620 err = dev_mc_add_excl(dev, addr); 7621 else 7622 err = -EINVAL; 7623 7624 /* Only return duplicate errors if NLM_F_EXCL is set */ 7625 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 7626 err = 0; 7627 7628 return err; 7629 } 7630 7631 static const struct net_device_ops i40e_netdev_ops = { 7632 .ndo_open = i40e_open, 7633 .ndo_stop = i40e_close, 7634 .ndo_start_xmit = i40e_lan_xmit_frame, 7635 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 7636 .ndo_set_rx_mode = i40e_set_rx_mode, 7637 .ndo_validate_addr = eth_validate_addr, 7638 .ndo_set_mac_address = i40e_set_mac, 7639 .ndo_change_mtu = i40e_change_mtu, 7640 .ndo_do_ioctl = i40e_ioctl, 7641 .ndo_tx_timeout = i40e_tx_timeout, 7642 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 7643 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 7644 #ifdef CONFIG_NET_POLL_CONTROLLER 7645 .ndo_poll_controller = i40e_netpoll, 7646 #endif 7647 .ndo_setup_tc = i40e_setup_tc, 7648 #ifdef I40E_FCOE 7649 .ndo_fcoe_enable = i40e_fcoe_enable, 7650 .ndo_fcoe_disable = i40e_fcoe_disable, 7651 #endif 7652 .ndo_set_features = i40e_set_features, 7653 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 7654 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 7655 .ndo_set_vf_rate = i40e_ndo_set_vf_bw, 7656 .ndo_get_vf_config = i40e_ndo_get_vf_config, 7657 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 7658 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, 7659 #ifdef CONFIG_I40E_VXLAN 7660 .ndo_add_vxlan_port = i40e_add_vxlan_port, 7661 .ndo_del_vxlan_port = i40e_del_vxlan_port, 7662 #endif 7663 .ndo_get_phys_port_id = i40e_get_phys_port_id, 7664 .ndo_fdb_add = i40e_ndo_fdb_add, 7665 }; 7666 7667 /** 7668 * i40e_config_netdev - Setup the netdev flags 7669 * @vsi: the VSI being configured 7670 * 7671 * Returns 0 on success, negative value on failure 7672 **/ 7673 static int i40e_config_netdev(struct i40e_vsi *vsi) 7674 { 7675 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 7676 struct i40e_pf *pf = vsi->back; 7677 struct i40e_hw *hw = &pf->hw; 7678 struct i40e_netdev_priv *np; 7679 struct net_device *netdev; 7680 u8 mac_addr[ETH_ALEN]; 7681 int etherdev_size; 7682 7683 etherdev_size = sizeof(struct i40e_netdev_priv); 7684 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 7685 if (!netdev) 7686 return -ENOMEM; 7687 7688 vsi->netdev = netdev; 7689 np = netdev_priv(netdev); 7690 np->vsi = vsi; 7691 7692 netdev->hw_enc_features |= NETIF_F_IP_CSUM | 7693 NETIF_F_GSO_UDP_TUNNEL | 7694 NETIF_F_TSO; 7695 7696 netdev->features = NETIF_F_SG | 7697 NETIF_F_IP_CSUM | 7698 NETIF_F_SCTP_CSUM | 7699 NETIF_F_HIGHDMA | 7700 NETIF_F_GSO_UDP_TUNNEL | 7701 NETIF_F_HW_VLAN_CTAG_TX | 7702 NETIF_F_HW_VLAN_CTAG_RX | 7703 NETIF_F_HW_VLAN_CTAG_FILTER | 7704 NETIF_F_IPV6_CSUM | 7705 NETIF_F_TSO | 7706 NETIF_F_TSO_ECN | 7707 NETIF_F_TSO6 | 7708 NETIF_F_RXCSUM | 7709 NETIF_F_RXHASH | 7710 0; 7711 7712 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 7713 netdev->features |= NETIF_F_NTUPLE; 7714 7715 /* copy netdev features into list of user selectable features */ 7716 netdev->hw_features |= netdev->features; 7717 7718 if (vsi->type == I40E_VSI_MAIN) { 7719 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 7720 ether_addr_copy(mac_addr, hw->mac.perm_addr); 7721 /* The following steps are necessary to prevent reception 7722 * of tagged packets - some older NVM configurations load a 7723 * default a MAC-VLAN filter that accepts any tagged packet 7724 * which must be replaced by a normal filter. 7725 */ 7726 if (!i40e_rm_default_mac_filter(vsi, mac_addr)) 7727 i40e_add_filter(vsi, mac_addr, 7728 I40E_VLAN_ANY, false, true); 7729 } else { 7730 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 7731 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 7732 pf->vsi[pf->lan_vsi]->netdev->name); 7733 random_ether_addr(mac_addr); 7734 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); 7735 } 7736 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 7737 7738 ether_addr_copy(netdev->dev_addr, mac_addr); 7739 ether_addr_copy(netdev->perm_addr, mac_addr); 7740 /* vlan gets same features (except vlan offload) 7741 * after any tweaks for specific VSI types 7742 */ 7743 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | 7744 NETIF_F_HW_VLAN_CTAG_RX | 7745 NETIF_F_HW_VLAN_CTAG_FILTER); 7746 netdev->priv_flags |= IFF_UNICAST_FLT; 7747 netdev->priv_flags |= IFF_SUPP_NOFCS; 7748 /* Setup netdev TC information */ 7749 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 7750 7751 netdev->netdev_ops = &i40e_netdev_ops; 7752 netdev->watchdog_timeo = 5 * HZ; 7753 i40e_set_ethtool_ops(netdev); 7754 #ifdef I40E_FCOE 7755 i40e_fcoe_config_netdev(netdev, vsi); 7756 #endif 7757 7758 return 0; 7759 } 7760 7761 /** 7762 * i40e_vsi_delete - Delete a VSI from the switch 7763 * @vsi: the VSI being removed 7764 * 7765 * Returns 0 on success, negative value on failure 7766 **/ 7767 static void i40e_vsi_delete(struct i40e_vsi *vsi) 7768 { 7769 /* remove default VSI is not allowed */ 7770 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 7771 return; 7772 7773 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 7774 } 7775 7776 /** 7777 * i40e_add_vsi - Add a VSI to the switch 7778 * @vsi: the VSI being configured 7779 * 7780 * This initializes a VSI context depending on the VSI type to be added and 7781 * passes it down to the add_vsi aq command. 7782 **/ 7783 static int i40e_add_vsi(struct i40e_vsi *vsi) 7784 { 7785 int ret = -ENODEV; 7786 struct i40e_mac_filter *f, *ftmp; 7787 struct i40e_pf *pf = vsi->back; 7788 struct i40e_hw *hw = &pf->hw; 7789 struct i40e_vsi_context ctxt; 7790 u8 enabled_tc = 0x1; /* TC0 enabled */ 7791 int f_count = 0; 7792 7793 memset(&ctxt, 0, sizeof(ctxt)); 7794 switch (vsi->type) { 7795 case I40E_VSI_MAIN: 7796 /* The PF's main VSI is already setup as part of the 7797 * device initialization, so we'll not bother with 7798 * the add_vsi call, but we will retrieve the current 7799 * VSI context. 7800 */ 7801 ctxt.seid = pf->main_vsi_seid; 7802 ctxt.pf_num = pf->hw.pf_id; 7803 ctxt.vf_num = 0; 7804 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 7805 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 7806 if (ret) { 7807 dev_info(&pf->pdev->dev, 7808 "couldn't get pf vsi config, err %d, aq_err %d\n", 7809 ret, pf->hw.aq.asq_last_status); 7810 return -ENOENT; 7811 } 7812 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 7813 vsi->info.valid_sections = 0; 7814 7815 vsi->seid = ctxt.seid; 7816 vsi->id = ctxt.vsi_number; 7817 7818 enabled_tc = i40e_pf_get_tc_map(pf); 7819 7820 /* MFP mode setup queue map and update VSI */ 7821 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && 7822 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ 7823 memset(&ctxt, 0, sizeof(ctxt)); 7824 ctxt.seid = pf->main_vsi_seid; 7825 ctxt.pf_num = pf->hw.pf_id; 7826 ctxt.vf_num = 0; 7827 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 7828 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 7829 if (ret) { 7830 dev_info(&pf->pdev->dev, 7831 "update vsi failed, aq_err=%d\n", 7832 pf->hw.aq.asq_last_status); 7833 ret = -ENOENT; 7834 goto err; 7835 } 7836 /* update the local VSI info queue map */ 7837 i40e_vsi_update_queue_map(vsi, &ctxt); 7838 vsi->info.valid_sections = 0; 7839 } else { 7840 /* Default/Main VSI is only enabled for TC0 7841 * reconfigure it to enable all TCs that are 7842 * available on the port in SFP mode. 7843 * For MFP case the iSCSI PF would use this 7844 * flow to enable LAN+iSCSI TC. 7845 */ 7846 ret = i40e_vsi_config_tc(vsi, enabled_tc); 7847 if (ret) { 7848 dev_info(&pf->pdev->dev, 7849 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n", 7850 enabled_tc, ret, 7851 pf->hw.aq.asq_last_status); 7852 ret = -ENOENT; 7853 } 7854 } 7855 break; 7856 7857 case I40E_VSI_FDIR: 7858 ctxt.pf_num = hw->pf_id; 7859 ctxt.vf_num = 0; 7860 ctxt.uplink_seid = vsi->uplink_seid; 7861 ctxt.connection_type = 0x1; /* regular data port */ 7862 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 7863 ctxt.info.valid_sections |= 7864 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 7865 ctxt.info.switch_id = 7866 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 7867 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 7868 break; 7869 7870 case I40E_VSI_VMDQ2: 7871 ctxt.pf_num = hw->pf_id; 7872 ctxt.vf_num = 0; 7873 ctxt.uplink_seid = vsi->uplink_seid; 7874 ctxt.connection_type = 0x1; /* regular data port */ 7875 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 7876 7877 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 7878 7879 /* This VSI is connected to VEB so the switch_id 7880 * should be set to zero by default. 7881 */ 7882 ctxt.info.switch_id = 0; 7883 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 7884 7885 /* Setup the VSI tx/rx queue map for TC0 only for now */ 7886 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 7887 break; 7888 7889 case I40E_VSI_SRIOV: 7890 ctxt.pf_num = hw->pf_id; 7891 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 7892 ctxt.uplink_seid = vsi->uplink_seid; 7893 ctxt.connection_type = 0x1; /* regular data port */ 7894 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 7895 7896 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 7897 7898 /* This VSI is connected to VEB so the switch_id 7899 * should be set to zero by default. 7900 */ 7901 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 7902 7903 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 7904 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 7905 if (pf->vf[vsi->vf_id].spoofchk) { 7906 ctxt.info.valid_sections |= 7907 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 7908 ctxt.info.sec_flags |= 7909 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 7910 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 7911 } 7912 /* Setup the VSI tx/rx queue map for TC0 only for now */ 7913 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 7914 break; 7915 7916 #ifdef I40E_FCOE 7917 case I40E_VSI_FCOE: 7918 ret = i40e_fcoe_vsi_init(vsi, &ctxt); 7919 if (ret) { 7920 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); 7921 return ret; 7922 } 7923 break; 7924 7925 #endif /* I40E_FCOE */ 7926 default: 7927 return -ENODEV; 7928 } 7929 7930 if (vsi->type != I40E_VSI_MAIN) { 7931 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 7932 if (ret) { 7933 dev_info(&vsi->back->pdev->dev, 7934 "add vsi failed, aq_err=%d\n", 7935 vsi->back->hw.aq.asq_last_status); 7936 ret = -ENOENT; 7937 goto err; 7938 } 7939 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 7940 vsi->info.valid_sections = 0; 7941 vsi->seid = ctxt.seid; 7942 vsi->id = ctxt.vsi_number; 7943 } 7944 7945 /* If macvlan filters already exist, force them to get loaded */ 7946 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 7947 f->changed = true; 7948 f_count++; 7949 7950 if (f->is_laa && vsi->type == I40E_VSI_MAIN) { 7951 struct i40e_aqc_remove_macvlan_element_data element; 7952 7953 memset(&element, 0, sizeof(element)); 7954 ether_addr_copy(element.mac_addr, f->macaddr); 7955 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 7956 ret = i40e_aq_remove_macvlan(hw, vsi->seid, 7957 &element, 1, NULL); 7958 if (ret) { 7959 /* some older FW has a different default */ 7960 element.flags |= 7961 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 7962 i40e_aq_remove_macvlan(hw, vsi->seid, 7963 &element, 1, NULL); 7964 } 7965 7966 i40e_aq_mac_address_write(hw, 7967 I40E_AQC_WRITE_TYPE_LAA_WOL, 7968 f->macaddr, NULL); 7969 } 7970 } 7971 if (f_count) { 7972 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 7973 pf->flags |= I40E_FLAG_FILTER_SYNC; 7974 } 7975 7976 /* Update VSI BW information */ 7977 ret = i40e_vsi_get_bw_info(vsi); 7978 if (ret) { 7979 dev_info(&pf->pdev->dev, 7980 "couldn't get vsi bw info, err %d, aq_err %d\n", 7981 ret, pf->hw.aq.asq_last_status); 7982 /* VSI is already added so not tearing that up */ 7983 ret = 0; 7984 } 7985 7986 err: 7987 return ret; 7988 } 7989 7990 /** 7991 * i40e_vsi_release - Delete a VSI and free its resources 7992 * @vsi: the VSI being removed 7993 * 7994 * Returns 0 on success or < 0 on error 7995 **/ 7996 int i40e_vsi_release(struct i40e_vsi *vsi) 7997 { 7998 struct i40e_mac_filter *f, *ftmp; 7999 struct i40e_veb *veb = NULL; 8000 struct i40e_pf *pf; 8001 u16 uplink_seid; 8002 int i, n; 8003 8004 pf = vsi->back; 8005 8006 /* release of a VEB-owner or last VSI is not allowed */ 8007 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 8008 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 8009 vsi->seid, vsi->uplink_seid); 8010 return -ENODEV; 8011 } 8012 if (vsi == pf->vsi[pf->lan_vsi] && 8013 !test_bit(__I40E_DOWN, &pf->state)) { 8014 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 8015 return -ENODEV; 8016 } 8017 8018 uplink_seid = vsi->uplink_seid; 8019 if (vsi->type != I40E_VSI_SRIOV) { 8020 if (vsi->netdev_registered) { 8021 vsi->netdev_registered = false; 8022 if (vsi->netdev) { 8023 /* results in a call to i40e_close() */ 8024 unregister_netdev(vsi->netdev); 8025 } 8026 } else { 8027 i40e_vsi_close(vsi); 8028 } 8029 i40e_vsi_disable_irq(vsi); 8030 } 8031 8032 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) 8033 i40e_del_filter(vsi, f->macaddr, f->vlan, 8034 f->is_vf, f->is_netdev); 8035 i40e_sync_vsi_filters(vsi); 8036 8037 i40e_vsi_delete(vsi); 8038 i40e_vsi_free_q_vectors(vsi); 8039 if (vsi->netdev) { 8040 free_netdev(vsi->netdev); 8041 vsi->netdev = NULL; 8042 } 8043 i40e_vsi_clear_rings(vsi); 8044 i40e_vsi_clear(vsi); 8045 8046 /* If this was the last thing on the VEB, except for the 8047 * controlling VSI, remove the VEB, which puts the controlling 8048 * VSI onto the next level down in the switch. 8049 * 8050 * Well, okay, there's one more exception here: don't remove 8051 * the orphan VEBs yet. We'll wait for an explicit remove request 8052 * from up the network stack. 8053 */ 8054 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { 8055 if (pf->vsi[i] && 8056 pf->vsi[i]->uplink_seid == uplink_seid && 8057 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 8058 n++; /* count the VSIs */ 8059 } 8060 } 8061 for (i = 0; i < I40E_MAX_VEB; i++) { 8062 if (!pf->veb[i]) 8063 continue; 8064 if (pf->veb[i]->uplink_seid == uplink_seid) 8065 n++; /* count the VEBs */ 8066 if (pf->veb[i]->seid == uplink_seid) 8067 veb = pf->veb[i]; 8068 } 8069 if (n == 0 && veb && veb->uplink_seid != 0) 8070 i40e_veb_release(veb); 8071 8072 return 0; 8073 } 8074 8075 /** 8076 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 8077 * @vsi: ptr to the VSI 8078 * 8079 * This should only be called after i40e_vsi_mem_alloc() which allocates the 8080 * corresponding SW VSI structure and initializes num_queue_pairs for the 8081 * newly allocated VSI. 8082 * 8083 * Returns 0 on success or negative on failure 8084 **/ 8085 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 8086 { 8087 int ret = -ENOENT; 8088 struct i40e_pf *pf = vsi->back; 8089 8090 if (vsi->q_vectors[0]) { 8091 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 8092 vsi->seid); 8093 return -EEXIST; 8094 } 8095 8096 if (vsi->base_vector) { 8097 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 8098 vsi->seid, vsi->base_vector); 8099 return -EEXIST; 8100 } 8101 8102 ret = i40e_vsi_alloc_q_vectors(vsi); 8103 if (ret) { 8104 dev_info(&pf->pdev->dev, 8105 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 8106 vsi->num_q_vectors, vsi->seid, ret); 8107 vsi->num_q_vectors = 0; 8108 goto vector_setup_out; 8109 } 8110 8111 if (vsi->num_q_vectors) 8112 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 8113 vsi->num_q_vectors, vsi->idx); 8114 if (vsi->base_vector < 0) { 8115 dev_info(&pf->pdev->dev, 8116 "failed to get tracking for %d vectors for VSI %d, err=%d\n", 8117 vsi->num_q_vectors, vsi->seid, vsi->base_vector); 8118 i40e_vsi_free_q_vectors(vsi); 8119 ret = -ENOENT; 8120 goto vector_setup_out; 8121 } 8122 8123 vector_setup_out: 8124 return ret; 8125 } 8126 8127 /** 8128 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI 8129 * @vsi: pointer to the vsi. 8130 * 8131 * This re-allocates a vsi's queue resources. 8132 * 8133 * Returns pointer to the successfully allocated and configured VSI sw struct 8134 * on success, otherwise returns NULL on failure. 8135 **/ 8136 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) 8137 { 8138 struct i40e_pf *pf = vsi->back; 8139 u8 enabled_tc; 8140 int ret; 8141 8142 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 8143 i40e_vsi_clear_rings(vsi); 8144 8145 i40e_vsi_free_arrays(vsi, false); 8146 i40e_set_num_rings_in_vsi(vsi); 8147 ret = i40e_vsi_alloc_arrays(vsi, false); 8148 if (ret) 8149 goto err_vsi; 8150 8151 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); 8152 if (ret < 0) { 8153 dev_info(&pf->pdev->dev, 8154 "failed to get tracking for %d queues for VSI %d err=%d\n", 8155 vsi->alloc_queue_pairs, vsi->seid, ret); 8156 goto err_vsi; 8157 } 8158 vsi->base_queue = ret; 8159 8160 /* Update the FW view of the VSI. Force a reset of TC and queue 8161 * layout configurations. 8162 */ 8163 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 8164 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 8165 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 8166 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 8167 8168 /* assign it some queues */ 8169 ret = i40e_alloc_rings(vsi); 8170 if (ret) 8171 goto err_rings; 8172 8173 /* map all of the rings to the q_vectors */ 8174 i40e_vsi_map_rings_to_vectors(vsi); 8175 return vsi; 8176 8177 err_rings: 8178 i40e_vsi_free_q_vectors(vsi); 8179 if (vsi->netdev_registered) { 8180 vsi->netdev_registered = false; 8181 unregister_netdev(vsi->netdev); 8182 free_netdev(vsi->netdev); 8183 vsi->netdev = NULL; 8184 } 8185 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 8186 err_vsi: 8187 i40e_vsi_clear(vsi); 8188 return NULL; 8189 } 8190 8191 /** 8192 * i40e_vsi_setup - Set up a VSI by a given type 8193 * @pf: board private structure 8194 * @type: VSI type 8195 * @uplink_seid: the switch element to link to 8196 * @param1: usage depends upon VSI type. For VF types, indicates VF id 8197 * 8198 * This allocates the sw VSI structure and its queue resources, then add a VSI 8199 * to the identified VEB. 8200 * 8201 * Returns pointer to the successfully allocated and configure VSI sw struct on 8202 * success, otherwise returns NULL on failure. 8203 **/ 8204 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 8205 u16 uplink_seid, u32 param1) 8206 { 8207 struct i40e_vsi *vsi = NULL; 8208 struct i40e_veb *veb = NULL; 8209 int ret, i; 8210 int v_idx; 8211 8212 /* The requested uplink_seid must be either 8213 * - the PF's port seid 8214 * no VEB is needed because this is the PF 8215 * or this is a Flow Director special case VSI 8216 * - seid of an existing VEB 8217 * - seid of a VSI that owns an existing VEB 8218 * - seid of a VSI that doesn't own a VEB 8219 * a new VEB is created and the VSI becomes the owner 8220 * - seid of the PF VSI, which is what creates the first VEB 8221 * this is a special case of the previous 8222 * 8223 * Find which uplink_seid we were given and create a new VEB if needed 8224 */ 8225 for (i = 0; i < I40E_MAX_VEB; i++) { 8226 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 8227 veb = pf->veb[i]; 8228 break; 8229 } 8230 } 8231 8232 if (!veb && uplink_seid != pf->mac_seid) { 8233 8234 for (i = 0; i < pf->num_alloc_vsi; i++) { 8235 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 8236 vsi = pf->vsi[i]; 8237 break; 8238 } 8239 } 8240 if (!vsi) { 8241 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 8242 uplink_seid); 8243 return NULL; 8244 } 8245 8246 if (vsi->uplink_seid == pf->mac_seid) 8247 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 8248 vsi->tc_config.enabled_tc); 8249 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 8250 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 8251 vsi->tc_config.enabled_tc); 8252 if (veb) { 8253 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { 8254 dev_info(&vsi->back->pdev->dev, 8255 "%s: New VSI creation error, uplink seid of LAN VSI expected.\n", 8256 __func__); 8257 return NULL; 8258 } 8259 i40e_enable_pf_switch_lb(pf); 8260 } 8261 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8262 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 8263 veb = pf->veb[i]; 8264 } 8265 if (!veb) { 8266 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 8267 return NULL; 8268 } 8269 8270 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 8271 uplink_seid = veb->seid; 8272 } 8273 8274 /* get vsi sw struct */ 8275 v_idx = i40e_vsi_mem_alloc(pf, type); 8276 if (v_idx < 0) 8277 goto err_alloc; 8278 vsi = pf->vsi[v_idx]; 8279 if (!vsi) 8280 goto err_alloc; 8281 vsi->type = type; 8282 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 8283 8284 if (type == I40E_VSI_MAIN) 8285 pf->lan_vsi = v_idx; 8286 else if (type == I40E_VSI_SRIOV) 8287 vsi->vf_id = param1; 8288 /* assign it some queues */ 8289 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, 8290 vsi->idx); 8291 if (ret < 0) { 8292 dev_info(&pf->pdev->dev, 8293 "failed to get tracking for %d queues for VSI %d err=%d\n", 8294 vsi->alloc_queue_pairs, vsi->seid, ret); 8295 goto err_vsi; 8296 } 8297 vsi->base_queue = ret; 8298 8299 /* get a VSI from the hardware */ 8300 vsi->uplink_seid = uplink_seid; 8301 ret = i40e_add_vsi(vsi); 8302 if (ret) 8303 goto err_vsi; 8304 8305 switch (vsi->type) { 8306 /* setup the netdev if needed */ 8307 case I40E_VSI_MAIN: 8308 case I40E_VSI_VMDQ2: 8309 case I40E_VSI_FCOE: 8310 ret = i40e_config_netdev(vsi); 8311 if (ret) 8312 goto err_netdev; 8313 ret = register_netdev(vsi->netdev); 8314 if (ret) 8315 goto err_netdev; 8316 vsi->netdev_registered = true; 8317 netif_carrier_off(vsi->netdev); 8318 #ifdef CONFIG_I40E_DCB 8319 /* Setup DCB netlink interface */ 8320 i40e_dcbnl_setup(vsi); 8321 #endif /* CONFIG_I40E_DCB */ 8322 /* fall through */ 8323 8324 case I40E_VSI_FDIR: 8325 /* set up vectors and rings if needed */ 8326 ret = i40e_vsi_setup_vectors(vsi); 8327 if (ret) 8328 goto err_msix; 8329 8330 ret = i40e_alloc_rings(vsi); 8331 if (ret) 8332 goto err_rings; 8333 8334 /* map all of the rings to the q_vectors */ 8335 i40e_vsi_map_rings_to_vectors(vsi); 8336 8337 i40e_vsi_reset_stats(vsi); 8338 break; 8339 8340 default: 8341 /* no netdev or rings for the other VSI types */ 8342 break; 8343 } 8344 8345 return vsi; 8346 8347 err_rings: 8348 i40e_vsi_free_q_vectors(vsi); 8349 err_msix: 8350 if (vsi->netdev_registered) { 8351 vsi->netdev_registered = false; 8352 unregister_netdev(vsi->netdev); 8353 free_netdev(vsi->netdev); 8354 vsi->netdev = NULL; 8355 } 8356 err_netdev: 8357 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 8358 err_vsi: 8359 i40e_vsi_clear(vsi); 8360 err_alloc: 8361 return NULL; 8362 } 8363 8364 /** 8365 * i40e_veb_get_bw_info - Query VEB BW information 8366 * @veb: the veb to query 8367 * 8368 * Query the Tx scheduler BW configuration data for given VEB 8369 **/ 8370 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 8371 { 8372 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 8373 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 8374 struct i40e_pf *pf = veb->pf; 8375 struct i40e_hw *hw = &pf->hw; 8376 u32 tc_bw_max; 8377 int ret = 0; 8378 int i; 8379 8380 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 8381 &bw_data, NULL); 8382 if (ret) { 8383 dev_info(&pf->pdev->dev, 8384 "query veb bw config failed, aq_err=%d\n", 8385 hw->aq.asq_last_status); 8386 goto out; 8387 } 8388 8389 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 8390 &ets_data, NULL); 8391 if (ret) { 8392 dev_info(&pf->pdev->dev, 8393 "query veb bw ets config failed, aq_err=%d\n", 8394 hw->aq.asq_last_status); 8395 goto out; 8396 } 8397 8398 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 8399 veb->bw_max_quanta = ets_data.tc_bw_max; 8400 veb->is_abs_credits = bw_data.absolute_credits_enable; 8401 veb->enabled_tc = ets_data.tc_valid_bits; 8402 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 8403 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 8404 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 8405 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 8406 veb->bw_tc_limit_credits[i] = 8407 le16_to_cpu(bw_data.tc_bw_limits[i]); 8408 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 8409 } 8410 8411 out: 8412 return ret; 8413 } 8414 8415 /** 8416 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 8417 * @pf: board private structure 8418 * 8419 * On error: returns error code (negative) 8420 * On success: returns vsi index in PF (positive) 8421 **/ 8422 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 8423 { 8424 int ret = -ENOENT; 8425 struct i40e_veb *veb; 8426 int i; 8427 8428 /* Need to protect the allocation of switch elements at the PF level */ 8429 mutex_lock(&pf->switch_mutex); 8430 8431 /* VEB list may be fragmented if VEB creation/destruction has 8432 * been happening. We can afford to do a quick scan to look 8433 * for any free slots in the list. 8434 * 8435 * find next empty veb slot, looping back around if necessary 8436 */ 8437 i = 0; 8438 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 8439 i++; 8440 if (i >= I40E_MAX_VEB) { 8441 ret = -ENOMEM; 8442 goto err_alloc_veb; /* out of VEB slots! */ 8443 } 8444 8445 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 8446 if (!veb) { 8447 ret = -ENOMEM; 8448 goto err_alloc_veb; 8449 } 8450 veb->pf = pf; 8451 veb->idx = i; 8452 veb->enabled_tc = 1; 8453 8454 pf->veb[i] = veb; 8455 ret = i; 8456 err_alloc_veb: 8457 mutex_unlock(&pf->switch_mutex); 8458 return ret; 8459 } 8460 8461 /** 8462 * i40e_switch_branch_release - Delete a branch of the switch tree 8463 * @branch: where to start deleting 8464 * 8465 * This uses recursion to find the tips of the branch to be 8466 * removed, deleting until we get back to and can delete this VEB. 8467 **/ 8468 static void i40e_switch_branch_release(struct i40e_veb *branch) 8469 { 8470 struct i40e_pf *pf = branch->pf; 8471 u16 branch_seid = branch->seid; 8472 u16 veb_idx = branch->idx; 8473 int i; 8474 8475 /* release any VEBs on this VEB - RECURSION */ 8476 for (i = 0; i < I40E_MAX_VEB; i++) { 8477 if (!pf->veb[i]) 8478 continue; 8479 if (pf->veb[i]->uplink_seid == branch->seid) 8480 i40e_switch_branch_release(pf->veb[i]); 8481 } 8482 8483 /* Release the VSIs on this VEB, but not the owner VSI. 8484 * 8485 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 8486 * the VEB itself, so don't use (*branch) after this loop. 8487 */ 8488 for (i = 0; i < pf->num_alloc_vsi; i++) { 8489 if (!pf->vsi[i]) 8490 continue; 8491 if (pf->vsi[i]->uplink_seid == branch_seid && 8492 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 8493 i40e_vsi_release(pf->vsi[i]); 8494 } 8495 } 8496 8497 /* There's one corner case where the VEB might not have been 8498 * removed, so double check it here and remove it if needed. 8499 * This case happens if the veb was created from the debugfs 8500 * commands and no VSIs were added to it. 8501 */ 8502 if (pf->veb[veb_idx]) 8503 i40e_veb_release(pf->veb[veb_idx]); 8504 } 8505 8506 /** 8507 * i40e_veb_clear - remove veb struct 8508 * @veb: the veb to remove 8509 **/ 8510 static void i40e_veb_clear(struct i40e_veb *veb) 8511 { 8512 if (!veb) 8513 return; 8514 8515 if (veb->pf) { 8516 struct i40e_pf *pf = veb->pf; 8517 8518 mutex_lock(&pf->switch_mutex); 8519 if (pf->veb[veb->idx] == veb) 8520 pf->veb[veb->idx] = NULL; 8521 mutex_unlock(&pf->switch_mutex); 8522 } 8523 8524 kfree(veb); 8525 } 8526 8527 /** 8528 * i40e_veb_release - Delete a VEB and free its resources 8529 * @veb: the VEB being removed 8530 **/ 8531 void i40e_veb_release(struct i40e_veb *veb) 8532 { 8533 struct i40e_vsi *vsi = NULL; 8534 struct i40e_pf *pf; 8535 int i, n = 0; 8536 8537 pf = veb->pf; 8538 8539 /* find the remaining VSI and check for extras */ 8540 for (i = 0; i < pf->num_alloc_vsi; i++) { 8541 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 8542 n++; 8543 vsi = pf->vsi[i]; 8544 } 8545 } 8546 if (n != 1) { 8547 dev_info(&pf->pdev->dev, 8548 "can't remove VEB %d with %d VSIs left\n", 8549 veb->seid, n); 8550 return; 8551 } 8552 8553 /* move the remaining VSI to uplink veb */ 8554 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 8555 if (veb->uplink_seid) { 8556 vsi->uplink_seid = veb->uplink_seid; 8557 if (veb->uplink_seid == pf->mac_seid) 8558 vsi->veb_idx = I40E_NO_VEB; 8559 else 8560 vsi->veb_idx = veb->veb_idx; 8561 } else { 8562 /* floating VEB */ 8563 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 8564 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 8565 } 8566 8567 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 8568 i40e_veb_clear(veb); 8569 } 8570 8571 /** 8572 * i40e_add_veb - create the VEB in the switch 8573 * @veb: the VEB to be instantiated 8574 * @vsi: the controlling VSI 8575 **/ 8576 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 8577 { 8578 bool is_default = false; 8579 bool is_cloud = false; 8580 int ret; 8581 8582 /* get a VEB from the hardware */ 8583 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, 8584 veb->enabled_tc, is_default, 8585 is_cloud, &veb->seid, NULL); 8586 if (ret) { 8587 dev_info(&veb->pf->pdev->dev, 8588 "couldn't add VEB, err %d, aq_err %d\n", 8589 ret, veb->pf->hw.aq.asq_last_status); 8590 return -EPERM; 8591 } 8592 8593 /* get statistics counter */ 8594 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, 8595 &veb->stats_idx, NULL, NULL, NULL); 8596 if (ret) { 8597 dev_info(&veb->pf->pdev->dev, 8598 "couldn't get VEB statistics idx, err %d, aq_err %d\n", 8599 ret, veb->pf->hw.aq.asq_last_status); 8600 return -EPERM; 8601 } 8602 ret = i40e_veb_get_bw_info(veb); 8603 if (ret) { 8604 dev_info(&veb->pf->pdev->dev, 8605 "couldn't get VEB bw info, err %d, aq_err %d\n", 8606 ret, veb->pf->hw.aq.asq_last_status); 8607 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); 8608 return -ENOENT; 8609 } 8610 8611 vsi->uplink_seid = veb->seid; 8612 vsi->veb_idx = veb->idx; 8613 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 8614 8615 return 0; 8616 } 8617 8618 /** 8619 * i40e_veb_setup - Set up a VEB 8620 * @pf: board private structure 8621 * @flags: VEB setup flags 8622 * @uplink_seid: the switch element to link to 8623 * @vsi_seid: the initial VSI seid 8624 * @enabled_tc: Enabled TC bit-map 8625 * 8626 * This allocates the sw VEB structure and links it into the switch 8627 * It is possible and legal for this to be a duplicate of an already 8628 * existing VEB. It is also possible for both uplink and vsi seids 8629 * to be zero, in order to create a floating VEB. 8630 * 8631 * Returns pointer to the successfully allocated VEB sw struct on 8632 * success, otherwise returns NULL on failure. 8633 **/ 8634 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 8635 u16 uplink_seid, u16 vsi_seid, 8636 u8 enabled_tc) 8637 { 8638 struct i40e_veb *veb, *uplink_veb = NULL; 8639 int vsi_idx, veb_idx; 8640 int ret; 8641 8642 /* if one seid is 0, the other must be 0 to create a floating relay */ 8643 if ((uplink_seid == 0 || vsi_seid == 0) && 8644 (uplink_seid + vsi_seid != 0)) { 8645 dev_info(&pf->pdev->dev, 8646 "one, not both seid's are 0: uplink=%d vsi=%d\n", 8647 uplink_seid, vsi_seid); 8648 return NULL; 8649 } 8650 8651 /* make sure there is such a vsi and uplink */ 8652 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) 8653 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 8654 break; 8655 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { 8656 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 8657 vsi_seid); 8658 return NULL; 8659 } 8660 8661 if (uplink_seid && uplink_seid != pf->mac_seid) { 8662 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 8663 if (pf->veb[veb_idx] && 8664 pf->veb[veb_idx]->seid == uplink_seid) { 8665 uplink_veb = pf->veb[veb_idx]; 8666 break; 8667 } 8668 } 8669 if (!uplink_veb) { 8670 dev_info(&pf->pdev->dev, 8671 "uplink seid %d not found\n", uplink_seid); 8672 return NULL; 8673 } 8674 } 8675 8676 /* get veb sw struct */ 8677 veb_idx = i40e_veb_mem_alloc(pf); 8678 if (veb_idx < 0) 8679 goto err_alloc; 8680 veb = pf->veb[veb_idx]; 8681 veb->flags = flags; 8682 veb->uplink_seid = uplink_seid; 8683 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 8684 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 8685 8686 /* create the VEB in the switch */ 8687 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 8688 if (ret) 8689 goto err_veb; 8690 if (vsi_idx == pf->lan_vsi) 8691 pf->lan_veb = veb->idx; 8692 8693 return veb; 8694 8695 err_veb: 8696 i40e_veb_clear(veb); 8697 err_alloc: 8698 return NULL; 8699 } 8700 8701 /** 8702 * i40e_setup_pf_switch_element - set pf vars based on switch type 8703 * @pf: board private structure 8704 * @ele: element we are building info from 8705 * @num_reported: total number of elements 8706 * @printconfig: should we print the contents 8707 * 8708 * helper function to assist in extracting a few useful SEID values. 8709 **/ 8710 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 8711 struct i40e_aqc_switch_config_element_resp *ele, 8712 u16 num_reported, bool printconfig) 8713 { 8714 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 8715 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 8716 u8 element_type = ele->element_type; 8717 u16 seid = le16_to_cpu(ele->seid); 8718 8719 if (printconfig) 8720 dev_info(&pf->pdev->dev, 8721 "type=%d seid=%d uplink=%d downlink=%d\n", 8722 element_type, seid, uplink_seid, downlink_seid); 8723 8724 switch (element_type) { 8725 case I40E_SWITCH_ELEMENT_TYPE_MAC: 8726 pf->mac_seid = seid; 8727 break; 8728 case I40E_SWITCH_ELEMENT_TYPE_VEB: 8729 /* Main VEB? */ 8730 if (uplink_seid != pf->mac_seid) 8731 break; 8732 if (pf->lan_veb == I40E_NO_VEB) { 8733 int v; 8734 8735 /* find existing or else empty VEB */ 8736 for (v = 0; v < I40E_MAX_VEB; v++) { 8737 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 8738 pf->lan_veb = v; 8739 break; 8740 } 8741 } 8742 if (pf->lan_veb == I40E_NO_VEB) { 8743 v = i40e_veb_mem_alloc(pf); 8744 if (v < 0) 8745 break; 8746 pf->lan_veb = v; 8747 } 8748 } 8749 8750 pf->veb[pf->lan_veb]->seid = seid; 8751 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 8752 pf->veb[pf->lan_veb]->pf = pf; 8753 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 8754 break; 8755 case I40E_SWITCH_ELEMENT_TYPE_VSI: 8756 if (num_reported != 1) 8757 break; 8758 /* This is immediately after a reset so we can assume this is 8759 * the PF's VSI 8760 */ 8761 pf->mac_seid = uplink_seid; 8762 pf->pf_seid = downlink_seid; 8763 pf->main_vsi_seid = seid; 8764 if (printconfig) 8765 dev_info(&pf->pdev->dev, 8766 "pf_seid=%d main_vsi_seid=%d\n", 8767 pf->pf_seid, pf->main_vsi_seid); 8768 break; 8769 case I40E_SWITCH_ELEMENT_TYPE_PF: 8770 case I40E_SWITCH_ELEMENT_TYPE_VF: 8771 case I40E_SWITCH_ELEMENT_TYPE_EMP: 8772 case I40E_SWITCH_ELEMENT_TYPE_BMC: 8773 case I40E_SWITCH_ELEMENT_TYPE_PE: 8774 case I40E_SWITCH_ELEMENT_TYPE_PA: 8775 /* ignore these for now */ 8776 break; 8777 default: 8778 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 8779 element_type, seid); 8780 break; 8781 } 8782 } 8783 8784 /** 8785 * i40e_fetch_switch_configuration - Get switch config from firmware 8786 * @pf: board private structure 8787 * @printconfig: should we print the contents 8788 * 8789 * Get the current switch configuration from the device and 8790 * extract a few useful SEID values. 8791 **/ 8792 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 8793 { 8794 struct i40e_aqc_get_switch_config_resp *sw_config; 8795 u16 next_seid = 0; 8796 int ret = 0; 8797 u8 *aq_buf; 8798 int i; 8799 8800 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 8801 if (!aq_buf) 8802 return -ENOMEM; 8803 8804 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 8805 do { 8806 u16 num_reported, num_total; 8807 8808 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 8809 I40E_AQ_LARGE_BUF, 8810 &next_seid, NULL); 8811 if (ret) { 8812 dev_info(&pf->pdev->dev, 8813 "get switch config failed %d aq_err=%x\n", 8814 ret, pf->hw.aq.asq_last_status); 8815 kfree(aq_buf); 8816 return -ENOENT; 8817 } 8818 8819 num_reported = le16_to_cpu(sw_config->header.num_reported); 8820 num_total = le16_to_cpu(sw_config->header.num_total); 8821 8822 if (printconfig) 8823 dev_info(&pf->pdev->dev, 8824 "header: %d reported %d total\n", 8825 num_reported, num_total); 8826 8827 for (i = 0; i < num_reported; i++) { 8828 struct i40e_aqc_switch_config_element_resp *ele = 8829 &sw_config->element[i]; 8830 8831 i40e_setup_pf_switch_element(pf, ele, num_reported, 8832 printconfig); 8833 } 8834 } while (next_seid != 0); 8835 8836 kfree(aq_buf); 8837 return ret; 8838 } 8839 8840 /** 8841 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 8842 * @pf: board private structure 8843 * @reinit: if the Main VSI needs to re-initialized. 8844 * 8845 * Returns 0 on success, negative value on failure 8846 **/ 8847 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 8848 { 8849 int ret; 8850 8851 /* find out what's out there already */ 8852 ret = i40e_fetch_switch_configuration(pf, false); 8853 if (ret) { 8854 dev_info(&pf->pdev->dev, 8855 "couldn't fetch switch config, err %d, aq_err %d\n", 8856 ret, pf->hw.aq.asq_last_status); 8857 return ret; 8858 } 8859 i40e_pf_reset_stats(pf); 8860 8861 /* first time setup */ 8862 if (pf->lan_vsi == I40E_NO_VSI || reinit) { 8863 struct i40e_vsi *vsi = NULL; 8864 u16 uplink_seid; 8865 8866 /* Set up the PF VSI associated with the PF's main VSI 8867 * that is already in the HW switch 8868 */ 8869 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 8870 uplink_seid = pf->veb[pf->lan_veb]->seid; 8871 else 8872 uplink_seid = pf->mac_seid; 8873 if (pf->lan_vsi == I40E_NO_VSI) 8874 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 8875 else if (reinit) 8876 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); 8877 if (!vsi) { 8878 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 8879 i40e_fdir_teardown(pf); 8880 return -EAGAIN; 8881 } 8882 } else { 8883 /* force a reset of TC and queue layout configurations */ 8884 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 8885 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 8886 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 8887 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 8888 } 8889 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 8890 8891 i40e_fdir_sb_setup(pf); 8892 8893 /* Setup static PF queue filter control settings */ 8894 ret = i40e_setup_pf_filter_control(pf); 8895 if (ret) { 8896 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 8897 ret); 8898 /* Failure here should not stop continuing other steps */ 8899 } 8900 8901 /* enable RSS in the HW, even for only one queue, as the stack can use 8902 * the hash 8903 */ 8904 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 8905 i40e_config_rss(pf); 8906 8907 /* fill in link information and enable LSE reporting */ 8908 i40e_update_link_info(&pf->hw, true); 8909 i40e_link_event(pf); 8910 8911 /* Initialize user-specific link properties */ 8912 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 8913 I40E_AQ_AN_COMPLETED) ? true : false); 8914 8915 /* fill in link information and enable LSE reporting */ 8916 i40e_update_link_info(&pf->hw, true); 8917 i40e_link_event(pf); 8918 8919 /* Initialize user-specific link properties */ 8920 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 8921 I40E_AQ_AN_COMPLETED) ? true : false); 8922 8923 i40e_ptp_init(pf); 8924 8925 return ret; 8926 } 8927 8928 /** 8929 * i40e_determine_queue_usage - Work out queue distribution 8930 * @pf: board private structure 8931 **/ 8932 static void i40e_determine_queue_usage(struct i40e_pf *pf) 8933 { 8934 int queues_left; 8935 8936 pf->num_lan_qps = 0; 8937 #ifdef I40E_FCOE 8938 pf->num_fcoe_qps = 0; 8939 #endif 8940 8941 /* Find the max queues to be put into basic use. We'll always be 8942 * using TC0, whether or not DCB is running, and TC0 will get the 8943 * big RSS set. 8944 */ 8945 queues_left = pf->hw.func_caps.num_tx_qp; 8946 8947 if ((queues_left == 1) || 8948 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 8949 /* one qp for PF, no queues for anything else */ 8950 queues_left = 0; 8951 pf->rss_size = pf->num_lan_qps = 1; 8952 8953 /* make sure all the fancies are disabled */ 8954 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 8955 #ifdef I40E_FCOE 8956 I40E_FLAG_FCOE_ENABLED | 8957 #endif 8958 I40E_FLAG_FD_SB_ENABLED | 8959 I40E_FLAG_FD_ATR_ENABLED | 8960 I40E_FLAG_DCB_CAPABLE | 8961 I40E_FLAG_SRIOV_ENABLED | 8962 I40E_FLAG_VMDQ_ENABLED); 8963 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | 8964 I40E_FLAG_FD_SB_ENABLED | 8965 I40E_FLAG_FD_ATR_ENABLED | 8966 I40E_FLAG_DCB_CAPABLE))) { 8967 /* one qp for PF */ 8968 pf->rss_size = pf->num_lan_qps = 1; 8969 queues_left -= pf->num_lan_qps; 8970 8971 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 8972 #ifdef I40E_FCOE 8973 I40E_FLAG_FCOE_ENABLED | 8974 #endif 8975 I40E_FLAG_FD_SB_ENABLED | 8976 I40E_FLAG_FD_ATR_ENABLED | 8977 I40E_FLAG_DCB_ENABLED | 8978 I40E_FLAG_VMDQ_ENABLED); 8979 } else { 8980 /* Not enough queues for all TCs */ 8981 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && 8982 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 8983 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 8984 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 8985 } 8986 pf->num_lan_qps = pf->rss_size_max; 8987 queues_left -= pf->num_lan_qps; 8988 } 8989 8990 #ifdef I40E_FCOE 8991 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 8992 if (I40E_DEFAULT_FCOE <= queues_left) { 8993 pf->num_fcoe_qps = I40E_DEFAULT_FCOE; 8994 } else if (I40E_MINIMUM_FCOE <= queues_left) { 8995 pf->num_fcoe_qps = I40E_MINIMUM_FCOE; 8996 } else { 8997 pf->num_fcoe_qps = 0; 8998 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 8999 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); 9000 } 9001 9002 queues_left -= pf->num_fcoe_qps; 9003 } 9004 9005 #endif 9006 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 9007 if (queues_left > 1) { 9008 queues_left -= 1; /* save 1 queue for FD */ 9009 } else { 9010 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 9011 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); 9012 } 9013 } 9014 9015 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 9016 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 9017 pf->num_req_vfs = min_t(int, pf->num_req_vfs, 9018 (queues_left / pf->num_vf_qps)); 9019 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 9020 } 9021 9022 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 9023 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 9024 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 9025 (queues_left / pf->num_vmdq_qps)); 9026 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 9027 } 9028 9029 pf->queues_left = queues_left; 9030 #ifdef I40E_FCOE 9031 dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); 9032 #endif 9033 } 9034 9035 /** 9036 * i40e_setup_pf_filter_control - Setup PF static filter control 9037 * @pf: PF to be setup 9038 * 9039 * i40e_setup_pf_filter_control sets up a pf's initial filter control 9040 * settings. If PE/FCoE are enabled then it will also set the per PF 9041 * based filter sizes required for them. It also enables Flow director, 9042 * ethertype and macvlan type filter settings for the pf. 9043 * 9044 * Returns 0 on success, negative on failure 9045 **/ 9046 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 9047 { 9048 struct i40e_filter_control_settings *settings = &pf->filter_settings; 9049 9050 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 9051 9052 /* Flow Director is enabled */ 9053 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) 9054 settings->enable_fdir = true; 9055 9056 /* Ethtype and MACVLAN filters enabled for PF */ 9057 settings->enable_ethtype = true; 9058 settings->enable_macvlan = true; 9059 9060 if (i40e_set_filter_control(&pf->hw, settings)) 9061 return -ENOENT; 9062 9063 return 0; 9064 } 9065 9066 #define INFO_STRING_LEN 255 9067 static void i40e_print_features(struct i40e_pf *pf) 9068 { 9069 struct i40e_hw *hw = &pf->hw; 9070 char *buf, *string; 9071 9072 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); 9073 if (!string) { 9074 dev_err(&pf->pdev->dev, "Features string allocation failed\n"); 9075 return; 9076 } 9077 9078 buf = string; 9079 9080 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); 9081 #ifdef CONFIG_PCI_IOV 9082 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); 9083 #endif 9084 buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis, 9085 pf->vsi[pf->lan_vsi]->num_queue_pairs); 9086 9087 if (pf->flags & I40E_FLAG_RSS_ENABLED) 9088 buf += sprintf(buf, "RSS "); 9089 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 9090 buf += sprintf(buf, "FD_ATR "); 9091 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 9092 buf += sprintf(buf, "FD_SB "); 9093 buf += sprintf(buf, "NTUPLE "); 9094 } 9095 if (pf->flags & I40E_FLAG_DCB_CAPABLE) 9096 buf += sprintf(buf, "DCB "); 9097 if (pf->flags & I40E_FLAG_PTP) 9098 buf += sprintf(buf, "PTP "); 9099 #ifdef I40E_FCOE 9100 if (pf->flags & I40E_FLAG_FCOE_ENABLED) 9101 buf += sprintf(buf, "FCOE "); 9102 #endif 9103 9104 BUG_ON(buf > (string + INFO_STRING_LEN)); 9105 dev_info(&pf->pdev->dev, "%s\n", string); 9106 kfree(string); 9107 } 9108 9109 /** 9110 * i40e_probe - Device initialization routine 9111 * @pdev: PCI device information struct 9112 * @ent: entry in i40e_pci_tbl 9113 * 9114 * i40e_probe initializes a pf identified by a pci_dev structure. 9115 * The OS initialization, configuring of the pf private structure, 9116 * and a hardware reset occur. 9117 * 9118 * Returns 0 on success, negative on failure 9119 **/ 9120 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 9121 { 9122 struct i40e_pf *pf; 9123 struct i40e_hw *hw; 9124 static u16 pfs_found; 9125 u16 link_status; 9126 int err = 0; 9127 u32 len; 9128 u32 i; 9129 9130 err = pci_enable_device_mem(pdev); 9131 if (err) 9132 return err; 9133 9134 /* set up for high or low dma */ 9135 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9136 if (err) { 9137 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9138 if (err) { 9139 dev_err(&pdev->dev, 9140 "DMA configuration failed: 0x%x\n", err); 9141 goto err_dma; 9142 } 9143 } 9144 9145 /* set up pci connections */ 9146 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 9147 IORESOURCE_MEM), i40e_driver_name); 9148 if (err) { 9149 dev_info(&pdev->dev, 9150 "pci_request_selected_regions failed %d\n", err); 9151 goto err_pci_reg; 9152 } 9153 9154 pci_enable_pcie_error_reporting(pdev); 9155 pci_set_master(pdev); 9156 9157 /* Now that we have a PCI connection, we need to do the 9158 * low level device setup. This is primarily setting up 9159 * the Admin Queue structures and then querying for the 9160 * device's current profile information. 9161 */ 9162 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 9163 if (!pf) { 9164 err = -ENOMEM; 9165 goto err_pf_alloc; 9166 } 9167 pf->next_vsi = 0; 9168 pf->pdev = pdev; 9169 set_bit(__I40E_DOWN, &pf->state); 9170 9171 hw = &pf->hw; 9172 hw->back = pf; 9173 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 9174 pci_resource_len(pdev, 0)); 9175 if (!hw->hw_addr) { 9176 err = -EIO; 9177 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 9178 (unsigned int)pci_resource_start(pdev, 0), 9179 (unsigned int)pci_resource_len(pdev, 0), err); 9180 goto err_ioremap; 9181 } 9182 hw->vendor_id = pdev->vendor; 9183 hw->device_id = pdev->device; 9184 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 9185 hw->subsystem_vendor_id = pdev->subsystem_vendor; 9186 hw->subsystem_device_id = pdev->subsystem_device; 9187 hw->bus.device = PCI_SLOT(pdev->devfn); 9188 hw->bus.func = PCI_FUNC(pdev->devfn); 9189 pf->instance = pfs_found; 9190 9191 if (debug != -1) { 9192 pf->msg_enable = pf->hw.debug_mask; 9193 pf->msg_enable = debug; 9194 } 9195 9196 /* do a special CORER for clearing PXE mode once at init */ 9197 if (hw->revision_id == 0 && 9198 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { 9199 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 9200 i40e_flush(hw); 9201 msleep(200); 9202 pf->corer_count++; 9203 9204 i40e_clear_pxe_mode(hw); 9205 } 9206 9207 /* Reset here to make sure all is clean and to define PF 'n' */ 9208 i40e_clear_hw(hw); 9209 err = i40e_pf_reset(hw); 9210 if (err) { 9211 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 9212 goto err_pf_reset; 9213 } 9214 pf->pfr_count++; 9215 9216 hw->aq.num_arq_entries = I40E_AQ_LEN; 9217 hw->aq.num_asq_entries = I40E_AQ_LEN; 9218 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 9219 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 9220 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 9221 9222 snprintf(pf->int_name, sizeof(pf->int_name) - 1, 9223 "%s-%s:misc", 9224 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); 9225 9226 err = i40e_init_shared_code(hw); 9227 if (err) { 9228 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); 9229 goto err_pf_reset; 9230 } 9231 9232 /* set up a default setting for link flow control */ 9233 pf->hw.fc.requested_mode = I40E_FC_NONE; 9234 9235 err = i40e_init_adminq(hw); 9236 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); 9237 if (err) { 9238 dev_info(&pdev->dev, 9239 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 9240 goto err_pf_reset; 9241 } 9242 9243 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 9244 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) 9245 dev_info(&pdev->dev, 9246 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 9247 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 9248 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) 9249 dev_info(&pdev->dev, 9250 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 9251 9252 9253 i40e_verify_eeprom(pf); 9254 9255 /* Rev 0 hardware was never productized */ 9256 if (hw->revision_id < 1) 9257 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 9258 9259 i40e_clear_pxe_mode(hw); 9260 err = i40e_get_capabilities(pf); 9261 if (err) 9262 goto err_adminq_setup; 9263 9264 err = i40e_sw_init(pf); 9265 if (err) { 9266 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 9267 goto err_sw_init; 9268 } 9269 9270 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 9271 hw->func_caps.num_rx_qp, 9272 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 9273 if (err) { 9274 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 9275 goto err_init_lan_hmc; 9276 } 9277 9278 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 9279 if (err) { 9280 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 9281 err = -ENOENT; 9282 goto err_configure_lan_hmc; 9283 } 9284 9285 /* Disable LLDP for NICs that have firmware versions lower than v4.3. 9286 * Ignore error return codes because if it was already disabled via 9287 * hardware settings this will fail 9288 */ 9289 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 9290 (pf->hw.aq.fw_maj_ver < 4)) { 9291 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); 9292 i40e_aq_stop_lldp(hw, true, NULL); 9293 } 9294 9295 i40e_get_mac_addr(hw, hw->mac.addr); 9296 if (!is_valid_ether_addr(hw->mac.addr)) { 9297 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 9298 err = -EIO; 9299 goto err_mac_addr; 9300 } 9301 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 9302 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); 9303 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 9304 if (is_valid_ether_addr(hw->mac.port_addr)) 9305 pf->flags |= I40E_FLAG_PORT_ID_VALID; 9306 #ifdef I40E_FCOE 9307 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); 9308 if (err) 9309 dev_info(&pdev->dev, 9310 "(non-fatal) SAN MAC retrieval failed: %d\n", err); 9311 if (!is_valid_ether_addr(hw->mac.san_addr)) { 9312 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", 9313 hw->mac.san_addr); 9314 ether_addr_copy(hw->mac.san_addr, hw->mac.addr); 9315 } 9316 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); 9317 #endif /* I40E_FCOE */ 9318 9319 pci_set_drvdata(pdev, pf); 9320 pci_save_state(pdev); 9321 #ifdef CONFIG_I40E_DCB 9322 err = i40e_init_pf_dcb(pf); 9323 if (err) { 9324 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); 9325 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 9326 /* Continue without DCB enabled */ 9327 } 9328 #endif /* CONFIG_I40E_DCB */ 9329 9330 /* set up periodic task facility */ 9331 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); 9332 pf->service_timer_period = HZ; 9333 9334 INIT_WORK(&pf->service_task, i40e_service_task); 9335 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 9336 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; 9337 pf->link_check_timeout = jiffies; 9338 9339 /* WoL defaults to disabled */ 9340 pf->wol_en = false; 9341 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 9342 9343 /* set up the main switch operations */ 9344 i40e_determine_queue_usage(pf); 9345 i40e_init_interrupt_scheme(pf); 9346 9347 /* The number of VSIs reported by the FW is the minimum guaranteed 9348 * to us; HW supports far more and we share the remaining pool with 9349 * the other PFs. We allocate space for more than the guarantee with 9350 * the understanding that we might not get them all later. 9351 */ 9352 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) 9353 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; 9354 else 9355 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; 9356 9357 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ 9358 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; 9359 pf->vsi = kzalloc(len, GFP_KERNEL); 9360 if (!pf->vsi) { 9361 err = -ENOMEM; 9362 goto err_switch_setup; 9363 } 9364 9365 err = i40e_setup_pf_switch(pf, false); 9366 if (err) { 9367 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 9368 goto err_vsis; 9369 } 9370 /* if FDIR VSI was set up, start it now */ 9371 for (i = 0; i < pf->num_alloc_vsi; i++) { 9372 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 9373 i40e_vsi_open(pf->vsi[i]); 9374 break; 9375 } 9376 } 9377 9378 /* driver is only interested in link up/down and module qualification 9379 * reports from firmware 9380 */ 9381 err = i40e_aq_set_phy_int_mask(&pf->hw, 9382 I40E_AQ_EVENT_LINK_UPDOWN | 9383 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); 9384 if (err) 9385 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); 9386 9387 msleep(75); 9388 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 9389 if (err) { 9390 dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", 9391 pf->hw.aq.asq_last_status); 9392 } 9393 9394 /* The main driver is (mostly) up and happy. We need to set this state 9395 * before setting up the misc vector or we get a race and the vector 9396 * ends up disabled forever. 9397 */ 9398 clear_bit(__I40E_DOWN, &pf->state); 9399 9400 /* In case of MSIX we are going to setup the misc vector right here 9401 * to handle admin queue events etc. In case of legacy and MSI 9402 * the misc functionality and queue processing is combined in 9403 * the same vector and that gets setup at open. 9404 */ 9405 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 9406 err = i40e_setup_misc_vector(pf); 9407 if (err) { 9408 dev_info(&pdev->dev, 9409 "setup of misc vector failed: %d\n", err); 9410 goto err_vsis; 9411 } 9412 } 9413 9414 #ifdef CONFIG_PCI_IOV 9415 /* prep for VF support */ 9416 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 9417 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 9418 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 9419 u32 val; 9420 9421 /* disable link interrupts for VFs */ 9422 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 9423 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 9424 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 9425 i40e_flush(hw); 9426 9427 if (pci_num_vf(pdev)) { 9428 dev_info(&pdev->dev, 9429 "Active VFs found, allocating resources.\n"); 9430 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); 9431 if (err) 9432 dev_info(&pdev->dev, 9433 "Error %d allocating resources for existing VFs\n", 9434 err); 9435 } 9436 } 9437 #endif /* CONFIG_PCI_IOV */ 9438 9439 pfs_found++; 9440 9441 i40e_dbg_pf_init(pf); 9442 9443 /* tell the firmware that we're starting */ 9444 i40e_send_version(pf); 9445 9446 /* since everything's happy, start the service_task timer */ 9447 mod_timer(&pf->service_timer, 9448 round_jiffies(jiffies + pf->service_timer_period)); 9449 9450 #ifdef I40E_FCOE 9451 /* create FCoE interface */ 9452 i40e_fcoe_vsi_setup(pf); 9453 9454 #endif 9455 /* Get the negotiated link width and speed from PCI config space */ 9456 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); 9457 9458 i40e_set_pci_config_data(hw, link_status); 9459 9460 dev_info(&pdev->dev, "PCI-Express: %s %s\n", 9461 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : 9462 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : 9463 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : 9464 "Unknown"), 9465 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : 9466 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : 9467 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : 9468 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : 9469 "Unknown")); 9470 9471 if (hw->bus.width < i40e_bus_width_pcie_x8 || 9472 hw->bus.speed < i40e_bus_speed_8000) { 9473 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 9474 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 9475 } 9476 9477 /* print a string summarizing features */ 9478 i40e_print_features(pf); 9479 9480 return 0; 9481 9482 /* Unwind what we've done if something failed in the setup */ 9483 err_vsis: 9484 set_bit(__I40E_DOWN, &pf->state); 9485 i40e_clear_interrupt_scheme(pf); 9486 kfree(pf->vsi); 9487 err_switch_setup: 9488 i40e_reset_interrupt_capability(pf); 9489 del_timer_sync(&pf->service_timer); 9490 err_mac_addr: 9491 err_configure_lan_hmc: 9492 (void)i40e_shutdown_lan_hmc(hw); 9493 err_init_lan_hmc: 9494 kfree(pf->qp_pile); 9495 kfree(pf->irq_pile); 9496 err_sw_init: 9497 err_adminq_setup: 9498 (void)i40e_shutdown_adminq(hw); 9499 err_pf_reset: 9500 iounmap(hw->hw_addr); 9501 err_ioremap: 9502 kfree(pf); 9503 err_pf_alloc: 9504 pci_disable_pcie_error_reporting(pdev); 9505 pci_release_selected_regions(pdev, 9506 pci_select_bars(pdev, IORESOURCE_MEM)); 9507 err_pci_reg: 9508 err_dma: 9509 pci_disable_device(pdev); 9510 return err; 9511 } 9512 9513 /** 9514 * i40e_remove - Device removal routine 9515 * @pdev: PCI device information struct 9516 * 9517 * i40e_remove is called by the PCI subsystem to alert the driver 9518 * that is should release a PCI device. This could be caused by a 9519 * Hot-Plug event, or because the driver is going to be removed from 9520 * memory. 9521 **/ 9522 static void i40e_remove(struct pci_dev *pdev) 9523 { 9524 struct i40e_pf *pf = pci_get_drvdata(pdev); 9525 i40e_status ret_code; 9526 int i; 9527 9528 i40e_dbg_pf_exit(pf); 9529 9530 i40e_ptp_stop(pf); 9531 9532 /* no more scheduling of any task */ 9533 set_bit(__I40E_DOWN, &pf->state); 9534 del_timer_sync(&pf->service_timer); 9535 cancel_work_sync(&pf->service_task); 9536 9537 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 9538 i40e_free_vfs(pf); 9539 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 9540 } 9541 9542 i40e_fdir_teardown(pf); 9543 9544 /* If there is a switch structure or any orphans, remove them. 9545 * This will leave only the PF's VSI remaining. 9546 */ 9547 for (i = 0; i < I40E_MAX_VEB; i++) { 9548 if (!pf->veb[i]) 9549 continue; 9550 9551 if (pf->veb[i]->uplink_seid == pf->mac_seid || 9552 pf->veb[i]->uplink_seid == 0) 9553 i40e_switch_branch_release(pf->veb[i]); 9554 } 9555 9556 /* Now we can shutdown the PF's VSI, just before we kill 9557 * adminq and hmc. 9558 */ 9559 if (pf->vsi[pf->lan_vsi]) 9560 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 9561 9562 i40e_stop_misc_vector(pf); 9563 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 9564 synchronize_irq(pf->msix_entries[0].vector); 9565 free_irq(pf->msix_entries[0].vector, pf); 9566 } 9567 9568 /* shutdown and destroy the HMC */ 9569 if (pf->hw.hmc.hmc_obj) { 9570 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 9571 if (ret_code) 9572 dev_warn(&pdev->dev, 9573 "Failed to destroy the HMC resources: %d\n", 9574 ret_code); 9575 } 9576 9577 /* shutdown the adminq */ 9578 ret_code = i40e_shutdown_adminq(&pf->hw); 9579 if (ret_code) 9580 dev_warn(&pdev->dev, 9581 "Failed to destroy the Admin Queue resources: %d\n", 9582 ret_code); 9583 9584 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 9585 i40e_clear_interrupt_scheme(pf); 9586 for (i = 0; i < pf->num_alloc_vsi; i++) { 9587 if (pf->vsi[i]) { 9588 i40e_vsi_clear_rings(pf->vsi[i]); 9589 i40e_vsi_clear(pf->vsi[i]); 9590 pf->vsi[i] = NULL; 9591 } 9592 } 9593 9594 for (i = 0; i < I40E_MAX_VEB; i++) { 9595 kfree(pf->veb[i]); 9596 pf->veb[i] = NULL; 9597 } 9598 9599 kfree(pf->qp_pile); 9600 kfree(pf->irq_pile); 9601 kfree(pf->vsi); 9602 9603 iounmap(pf->hw.hw_addr); 9604 kfree(pf); 9605 pci_release_selected_regions(pdev, 9606 pci_select_bars(pdev, IORESOURCE_MEM)); 9607 9608 pci_disable_pcie_error_reporting(pdev); 9609 pci_disable_device(pdev); 9610 } 9611 9612 /** 9613 * i40e_pci_error_detected - warning that something funky happened in PCI land 9614 * @pdev: PCI device information struct 9615 * 9616 * Called to warn that something happened and the error handling steps 9617 * are in progress. Allows the driver to quiesce things, be ready for 9618 * remediation. 9619 **/ 9620 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 9621 enum pci_channel_state error) 9622 { 9623 struct i40e_pf *pf = pci_get_drvdata(pdev); 9624 9625 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 9626 9627 /* shutdown all operations */ 9628 if (!test_bit(__I40E_SUSPENDED, &pf->state)) { 9629 rtnl_lock(); 9630 i40e_prep_for_reset(pf); 9631 rtnl_unlock(); 9632 } 9633 9634 /* Request a slot reset */ 9635 return PCI_ERS_RESULT_NEED_RESET; 9636 } 9637 9638 /** 9639 * i40e_pci_error_slot_reset - a PCI slot reset just happened 9640 * @pdev: PCI device information struct 9641 * 9642 * Called to find if the driver can work with the device now that 9643 * the pci slot has been reset. If a basic connection seems good 9644 * (registers are readable and have sane content) then return a 9645 * happy little PCI_ERS_RESULT_xxx. 9646 **/ 9647 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 9648 { 9649 struct i40e_pf *pf = pci_get_drvdata(pdev); 9650 pci_ers_result_t result; 9651 int err; 9652 u32 reg; 9653 9654 dev_info(&pdev->dev, "%s\n", __func__); 9655 if (pci_enable_device_mem(pdev)) { 9656 dev_info(&pdev->dev, 9657 "Cannot re-enable PCI device after reset.\n"); 9658 result = PCI_ERS_RESULT_DISCONNECT; 9659 } else { 9660 pci_set_master(pdev); 9661 pci_restore_state(pdev); 9662 pci_save_state(pdev); 9663 pci_wake_from_d3(pdev, false); 9664 9665 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 9666 if (reg == 0) 9667 result = PCI_ERS_RESULT_RECOVERED; 9668 else 9669 result = PCI_ERS_RESULT_DISCONNECT; 9670 } 9671 9672 err = pci_cleanup_aer_uncorrect_error_status(pdev); 9673 if (err) { 9674 dev_info(&pdev->dev, 9675 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 9676 err); 9677 /* non-fatal, continue */ 9678 } 9679 9680 return result; 9681 } 9682 9683 /** 9684 * i40e_pci_error_resume - restart operations after PCI error recovery 9685 * @pdev: PCI device information struct 9686 * 9687 * Called to allow the driver to bring things back up after PCI error 9688 * and/or reset recovery has finished. 9689 **/ 9690 static void i40e_pci_error_resume(struct pci_dev *pdev) 9691 { 9692 struct i40e_pf *pf = pci_get_drvdata(pdev); 9693 9694 dev_info(&pdev->dev, "%s\n", __func__); 9695 if (test_bit(__I40E_SUSPENDED, &pf->state)) 9696 return; 9697 9698 rtnl_lock(); 9699 i40e_handle_reset_warning(pf); 9700 rtnl_lock(); 9701 } 9702 9703 /** 9704 * i40e_shutdown - PCI callback for shutting down 9705 * @pdev: PCI device information struct 9706 **/ 9707 static void i40e_shutdown(struct pci_dev *pdev) 9708 { 9709 struct i40e_pf *pf = pci_get_drvdata(pdev); 9710 struct i40e_hw *hw = &pf->hw; 9711 9712 set_bit(__I40E_SUSPENDED, &pf->state); 9713 set_bit(__I40E_DOWN, &pf->state); 9714 rtnl_lock(); 9715 i40e_prep_for_reset(pf); 9716 rtnl_unlock(); 9717 9718 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 9719 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 9720 9721 if (system_state == SYSTEM_POWER_OFF) { 9722 pci_wake_from_d3(pdev, pf->wol_en); 9723 pci_set_power_state(pdev, PCI_D3hot); 9724 } 9725 } 9726 9727 #ifdef CONFIG_PM 9728 /** 9729 * i40e_suspend - PCI callback for moving to D3 9730 * @pdev: PCI device information struct 9731 **/ 9732 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) 9733 { 9734 struct i40e_pf *pf = pci_get_drvdata(pdev); 9735 struct i40e_hw *hw = &pf->hw; 9736 9737 set_bit(__I40E_SUSPENDED, &pf->state); 9738 set_bit(__I40E_DOWN, &pf->state); 9739 del_timer_sync(&pf->service_timer); 9740 cancel_work_sync(&pf->service_task); 9741 rtnl_lock(); 9742 i40e_prep_for_reset(pf); 9743 rtnl_unlock(); 9744 9745 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 9746 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 9747 9748 pci_wake_from_d3(pdev, pf->wol_en); 9749 pci_set_power_state(pdev, PCI_D3hot); 9750 9751 return 0; 9752 } 9753 9754 /** 9755 * i40e_resume - PCI callback for waking up from D3 9756 * @pdev: PCI device information struct 9757 **/ 9758 static int i40e_resume(struct pci_dev *pdev) 9759 { 9760 struct i40e_pf *pf = pci_get_drvdata(pdev); 9761 u32 err; 9762 9763 pci_set_power_state(pdev, PCI_D0); 9764 pci_restore_state(pdev); 9765 /* pci_restore_state() clears dev->state_saves, so 9766 * call pci_save_state() again to restore it. 9767 */ 9768 pci_save_state(pdev); 9769 9770 err = pci_enable_device_mem(pdev); 9771 if (err) { 9772 dev_err(&pdev->dev, 9773 "%s: Cannot enable PCI device from suspend\n", 9774 __func__); 9775 return err; 9776 } 9777 pci_set_master(pdev); 9778 9779 /* no wakeup events while running */ 9780 pci_wake_from_d3(pdev, false); 9781 9782 /* handling the reset will rebuild the device state */ 9783 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { 9784 clear_bit(__I40E_DOWN, &pf->state); 9785 rtnl_lock(); 9786 i40e_reset_and_rebuild(pf, false); 9787 rtnl_unlock(); 9788 } 9789 9790 return 0; 9791 } 9792 9793 #endif 9794 static const struct pci_error_handlers i40e_err_handler = { 9795 .error_detected = i40e_pci_error_detected, 9796 .slot_reset = i40e_pci_error_slot_reset, 9797 .resume = i40e_pci_error_resume, 9798 }; 9799 9800 static struct pci_driver i40e_driver = { 9801 .name = i40e_driver_name, 9802 .id_table = i40e_pci_tbl, 9803 .probe = i40e_probe, 9804 .remove = i40e_remove, 9805 #ifdef CONFIG_PM 9806 .suspend = i40e_suspend, 9807 .resume = i40e_resume, 9808 #endif 9809 .shutdown = i40e_shutdown, 9810 .err_handler = &i40e_err_handler, 9811 .sriov_configure = i40e_pci_sriov_configure, 9812 }; 9813 9814 /** 9815 * i40e_init_module - Driver registration routine 9816 * 9817 * i40e_init_module is the first routine called when the driver is 9818 * loaded. All it does is register with the PCI subsystem. 9819 **/ 9820 static int __init i40e_init_module(void) 9821 { 9822 pr_info("%s: %s - version %s\n", i40e_driver_name, 9823 i40e_driver_string, i40e_driver_version_str); 9824 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 9825 i40e_dbg_init(); 9826 return pci_register_driver(&i40e_driver); 9827 } 9828 module_init(i40e_init_module); 9829 9830 /** 9831 * i40e_exit_module - Driver exit cleanup routine 9832 * 9833 * i40e_exit_module is called just before the driver is removed 9834 * from memory. 9835 **/ 9836 static void __exit i40e_exit_module(void) 9837 { 9838 pci_unregister_driver(&i40e_driver); 9839 i40e_dbg_exit(); 9840 } 9841 module_exit(i40e_exit_module); 9842