1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2015 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 /* Local includes */ 28 #include "i40e.h" 29 #include "i40e_diag.h" 30 #ifdef CONFIG_I40E_VXLAN 31 #include <net/vxlan.h> 32 #endif 33 34 const char i40e_driver_name[] = "i40e"; 35 static const char i40e_driver_string[] = 36 "Intel(R) Ethernet Connection XL710 Network Driver"; 37 38 #define DRV_KERN "-k" 39 40 #define DRV_VERSION_MAJOR 1 41 #define DRV_VERSION_MINOR 3 42 #define DRV_VERSION_BUILD 46 43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \ 45 __stringify(DRV_VERSION_BUILD) DRV_KERN 46 const char i40e_driver_version_str[] = DRV_VERSION; 47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; 48 49 /* a bit of forward declarations */ 50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 51 static void i40e_handle_reset_warning(struct i40e_pf *pf); 52 static int i40e_add_vsi(struct i40e_vsi *vsi); 53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 55 static int i40e_setup_misc_vector(struct i40e_pf *pf); 56 static void i40e_determine_queue_usage(struct i40e_pf *pf); 57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 58 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 59 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 60 61 /* i40e_pci_tbl - PCI Device ID Table 62 * 63 * Last entry must be all 0s 64 * 65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 66 * Class, Class Mask, private data (not used) } 67 */ 68 static const struct pci_device_id i40e_pci_tbl[] = { 69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, 72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, 78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, 79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, 81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, 82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, 83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, 85 /* required last entry */ 86 {0, } 87 }; 88 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 89 90 #define I40E_MAX_VF_COUNT 128 91 static int debug = -1; 92 module_param(debug, int, 0); 93 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 94 95 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 96 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 97 MODULE_LICENSE("GPL"); 98 MODULE_VERSION(DRV_VERSION); 99 100 /** 101 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 102 * @hw: pointer to the HW structure 103 * @mem: ptr to mem struct to fill out 104 * @size: size of memory requested 105 * @alignment: what to align the allocation to 106 **/ 107 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 108 u64 size, u32 alignment) 109 { 110 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 111 112 mem->size = ALIGN(size, alignment); 113 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 114 &mem->pa, GFP_KERNEL); 115 if (!mem->va) 116 return -ENOMEM; 117 118 return 0; 119 } 120 121 /** 122 * i40e_free_dma_mem_d - OS specific memory free for shared code 123 * @hw: pointer to the HW structure 124 * @mem: ptr to mem struct to free 125 **/ 126 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 127 { 128 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 129 130 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 131 mem->va = NULL; 132 mem->pa = 0; 133 mem->size = 0; 134 135 return 0; 136 } 137 138 /** 139 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 140 * @hw: pointer to the HW structure 141 * @mem: ptr to mem struct to fill out 142 * @size: size of memory requested 143 **/ 144 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 145 u32 size) 146 { 147 mem->size = size; 148 mem->va = kzalloc(size, GFP_KERNEL); 149 150 if (!mem->va) 151 return -ENOMEM; 152 153 return 0; 154 } 155 156 /** 157 * i40e_free_virt_mem_d - OS specific memory free for shared code 158 * @hw: pointer to the HW structure 159 * @mem: ptr to mem struct to free 160 **/ 161 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 162 { 163 /* it's ok to kfree a NULL pointer */ 164 kfree(mem->va); 165 mem->va = NULL; 166 mem->size = 0; 167 168 return 0; 169 } 170 171 /** 172 * i40e_get_lump - find a lump of free generic resource 173 * @pf: board private structure 174 * @pile: the pile of resource to search 175 * @needed: the number of items needed 176 * @id: an owner id to stick on the items assigned 177 * 178 * Returns the base item index of the lump, or negative for error 179 * 180 * The search_hint trick and lack of advanced fit-finding only work 181 * because we're highly likely to have all the same size lump requests. 182 * Linear search time and any fragmentation should be minimal. 183 **/ 184 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 185 u16 needed, u16 id) 186 { 187 int ret = -ENOMEM; 188 int i, j; 189 190 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 191 dev_info(&pf->pdev->dev, 192 "param err: pile=%p needed=%d id=0x%04x\n", 193 pile, needed, id); 194 return -EINVAL; 195 } 196 197 /* start the linear search with an imperfect hint */ 198 i = pile->search_hint; 199 while (i < pile->num_entries) { 200 /* skip already allocated entries */ 201 if (pile->list[i] & I40E_PILE_VALID_BIT) { 202 i++; 203 continue; 204 } 205 206 /* do we have enough in this lump? */ 207 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 208 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 209 break; 210 } 211 212 if (j == needed) { 213 /* there was enough, so assign it to the requestor */ 214 for (j = 0; j < needed; j++) 215 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 216 ret = i; 217 pile->search_hint = i + j; 218 break; 219 } 220 221 /* not enough, so skip over it and continue looking */ 222 i += j; 223 } 224 225 return ret; 226 } 227 228 /** 229 * i40e_put_lump - return a lump of generic resource 230 * @pile: the pile of resource to search 231 * @index: the base item index 232 * @id: the owner id of the items assigned 233 * 234 * Returns the count of items in the lump 235 **/ 236 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 237 { 238 int valid_id = (id | I40E_PILE_VALID_BIT); 239 int count = 0; 240 int i; 241 242 if (!pile || index >= pile->num_entries) 243 return -EINVAL; 244 245 for (i = index; 246 i < pile->num_entries && pile->list[i] == valid_id; 247 i++) { 248 pile->list[i] = 0; 249 count++; 250 } 251 252 if (count && index < pile->search_hint) 253 pile->search_hint = index; 254 255 return count; 256 } 257 258 /** 259 * i40e_find_vsi_from_id - searches for the vsi with the given id 260 * @pf - the pf structure to search for the vsi 261 * @id - id of the vsi it is searching for 262 **/ 263 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) 264 { 265 int i; 266 267 for (i = 0; i < pf->num_alloc_vsi; i++) 268 if (pf->vsi[i] && (pf->vsi[i]->id == id)) 269 return pf->vsi[i]; 270 271 return NULL; 272 } 273 274 /** 275 * i40e_service_event_schedule - Schedule the service task to wake up 276 * @pf: board private structure 277 * 278 * If not already scheduled, this puts the task into the work queue 279 **/ 280 static void i40e_service_event_schedule(struct i40e_pf *pf) 281 { 282 if (!test_bit(__I40E_DOWN, &pf->state) && 283 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && 284 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) 285 schedule_work(&pf->service_task); 286 } 287 288 /** 289 * i40e_tx_timeout - Respond to a Tx Hang 290 * @netdev: network interface device structure 291 * 292 * If any port has noticed a Tx timeout, it is likely that the whole 293 * device is munged, not just the one netdev port, so go for the full 294 * reset. 295 **/ 296 #ifdef I40E_FCOE 297 void i40e_tx_timeout(struct net_device *netdev) 298 #else 299 static void i40e_tx_timeout(struct net_device *netdev) 300 #endif 301 { 302 struct i40e_netdev_priv *np = netdev_priv(netdev); 303 struct i40e_vsi *vsi = np->vsi; 304 struct i40e_pf *pf = vsi->back; 305 struct i40e_ring *tx_ring = NULL; 306 unsigned int i, hung_queue = 0; 307 u32 head, val; 308 309 pf->tx_timeout_count++; 310 311 /* find the stopped queue the same way the stack does */ 312 for (i = 0; i < netdev->num_tx_queues; i++) { 313 struct netdev_queue *q; 314 unsigned long trans_start; 315 316 q = netdev_get_tx_queue(netdev, i); 317 trans_start = q->trans_start ? : netdev->trans_start; 318 if (netif_xmit_stopped(q) && 319 time_after(jiffies, 320 (trans_start + netdev->watchdog_timeo))) { 321 hung_queue = i; 322 break; 323 } 324 } 325 326 if (i == netdev->num_tx_queues) { 327 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); 328 } else { 329 /* now that we have an index, find the tx_ring struct */ 330 for (i = 0; i < vsi->num_queue_pairs; i++) { 331 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { 332 if (hung_queue == 333 vsi->tx_rings[i]->queue_index) { 334 tx_ring = vsi->tx_rings[i]; 335 break; 336 } 337 } 338 } 339 } 340 341 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 342 pf->tx_timeout_recovery_level = 1; /* reset after some time */ 343 else if (time_before(jiffies, 344 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) 345 return; /* don't do any new action before the next timeout */ 346 347 if (tx_ring) { 348 head = i40e_get_head(tx_ring); 349 /* Read interrupt register */ 350 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 351 val = rd32(&pf->hw, 352 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + 353 tx_ring->vsi->base_vector - 1)); 354 else 355 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); 356 357 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", 358 vsi->seid, hung_queue, tx_ring->next_to_clean, 359 head, tx_ring->next_to_use, 360 readl(tx_ring->tail), val); 361 } 362 363 pf->tx_timeout_last_recovery = jiffies; 364 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", 365 pf->tx_timeout_recovery_level, hung_queue); 366 367 switch (pf->tx_timeout_recovery_level) { 368 case 1: 369 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 370 break; 371 case 2: 372 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 373 break; 374 case 3: 375 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 376 break; 377 default: 378 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 379 break; 380 } 381 382 i40e_service_event_schedule(pf); 383 pf->tx_timeout_recovery_level++; 384 } 385 386 /** 387 * i40e_release_rx_desc - Store the new tail and head values 388 * @rx_ring: ring to bump 389 * @val: new head index 390 **/ 391 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 392 { 393 rx_ring->next_to_use = val; 394 395 /* Force memory writes to complete before letting h/w 396 * know there are new descriptors to fetch. (Only 397 * applicable for weak-ordered memory model archs, 398 * such as IA-64). 399 */ 400 wmb(); 401 writel(val, rx_ring->tail); 402 } 403 404 /** 405 * i40e_get_vsi_stats_struct - Get System Network Statistics 406 * @vsi: the VSI we care about 407 * 408 * Returns the address of the device statistics structure. 409 * The statistics are actually updated from the service task. 410 **/ 411 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 412 { 413 return &vsi->net_stats; 414 } 415 416 /** 417 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 418 * @netdev: network interface device structure 419 * 420 * Returns the address of the device statistics structure. 421 * The statistics are actually updated from the service task. 422 **/ 423 #ifdef I40E_FCOE 424 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 425 struct net_device *netdev, 426 struct rtnl_link_stats64 *stats) 427 #else 428 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 429 struct net_device *netdev, 430 struct rtnl_link_stats64 *stats) 431 #endif 432 { 433 struct i40e_netdev_priv *np = netdev_priv(netdev); 434 struct i40e_ring *tx_ring, *rx_ring; 435 struct i40e_vsi *vsi = np->vsi; 436 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 437 int i; 438 439 if (test_bit(__I40E_DOWN, &vsi->state)) 440 return stats; 441 442 if (!vsi->tx_rings) 443 return stats; 444 445 rcu_read_lock(); 446 for (i = 0; i < vsi->num_queue_pairs; i++) { 447 u64 bytes, packets; 448 unsigned int start; 449 450 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); 451 if (!tx_ring) 452 continue; 453 454 do { 455 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 456 packets = tx_ring->stats.packets; 457 bytes = tx_ring->stats.bytes; 458 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 459 460 stats->tx_packets += packets; 461 stats->tx_bytes += bytes; 462 rx_ring = &tx_ring[1]; 463 464 do { 465 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 466 packets = rx_ring->stats.packets; 467 bytes = rx_ring->stats.bytes; 468 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 469 470 stats->rx_packets += packets; 471 stats->rx_bytes += bytes; 472 } 473 rcu_read_unlock(); 474 475 /* following stats updated by i40e_watchdog_subtask() */ 476 stats->multicast = vsi_stats->multicast; 477 stats->tx_errors = vsi_stats->tx_errors; 478 stats->tx_dropped = vsi_stats->tx_dropped; 479 stats->rx_errors = vsi_stats->rx_errors; 480 stats->rx_dropped = vsi_stats->rx_dropped; 481 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 482 stats->rx_length_errors = vsi_stats->rx_length_errors; 483 484 return stats; 485 } 486 487 /** 488 * i40e_vsi_reset_stats - Resets all stats of the given vsi 489 * @vsi: the VSI to have its stats reset 490 **/ 491 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 492 { 493 struct rtnl_link_stats64 *ns; 494 int i; 495 496 if (!vsi) 497 return; 498 499 ns = i40e_get_vsi_stats_struct(vsi); 500 memset(ns, 0, sizeof(*ns)); 501 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 502 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 503 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 504 if (vsi->rx_rings && vsi->rx_rings[0]) { 505 for (i = 0; i < vsi->num_queue_pairs; i++) { 506 memset(&vsi->rx_rings[i]->stats, 0, 507 sizeof(vsi->rx_rings[i]->stats)); 508 memset(&vsi->rx_rings[i]->rx_stats, 0, 509 sizeof(vsi->rx_rings[i]->rx_stats)); 510 memset(&vsi->tx_rings[i]->stats, 0, 511 sizeof(vsi->tx_rings[i]->stats)); 512 memset(&vsi->tx_rings[i]->tx_stats, 0, 513 sizeof(vsi->tx_rings[i]->tx_stats)); 514 } 515 } 516 vsi->stat_offsets_loaded = false; 517 } 518 519 /** 520 * i40e_pf_reset_stats - Reset all of the stats for the given PF 521 * @pf: the PF to be reset 522 **/ 523 void i40e_pf_reset_stats(struct i40e_pf *pf) 524 { 525 int i; 526 527 memset(&pf->stats, 0, sizeof(pf->stats)); 528 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 529 pf->stat_offsets_loaded = false; 530 531 for (i = 0; i < I40E_MAX_VEB; i++) { 532 if (pf->veb[i]) { 533 memset(&pf->veb[i]->stats, 0, 534 sizeof(pf->veb[i]->stats)); 535 memset(&pf->veb[i]->stats_offsets, 0, 536 sizeof(pf->veb[i]->stats_offsets)); 537 pf->veb[i]->stat_offsets_loaded = false; 538 } 539 } 540 } 541 542 /** 543 * i40e_stat_update48 - read and update a 48 bit stat from the chip 544 * @hw: ptr to the hardware info 545 * @hireg: the high 32 bit reg to read 546 * @loreg: the low 32 bit reg to read 547 * @offset_loaded: has the initial offset been loaded yet 548 * @offset: ptr to current offset value 549 * @stat: ptr to the stat 550 * 551 * Since the device stats are not reset at PFReset, they likely will not 552 * be zeroed when the driver starts. We'll save the first values read 553 * and use them as offsets to be subtracted from the raw values in order 554 * to report stats that count from zero. In the process, we also manage 555 * the potential roll-over. 556 **/ 557 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 558 bool offset_loaded, u64 *offset, u64 *stat) 559 { 560 u64 new_data; 561 562 if (hw->device_id == I40E_DEV_ID_QEMU) { 563 new_data = rd32(hw, loreg); 564 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 565 } else { 566 new_data = rd64(hw, loreg); 567 } 568 if (!offset_loaded) 569 *offset = new_data; 570 if (likely(new_data >= *offset)) 571 *stat = new_data - *offset; 572 else 573 *stat = (new_data + BIT_ULL(48)) - *offset; 574 *stat &= 0xFFFFFFFFFFFFULL; 575 } 576 577 /** 578 * i40e_stat_update32 - read and update a 32 bit stat from the chip 579 * @hw: ptr to the hardware info 580 * @reg: the hw reg to read 581 * @offset_loaded: has the initial offset been loaded yet 582 * @offset: ptr to current offset value 583 * @stat: ptr to the stat 584 **/ 585 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 586 bool offset_loaded, u64 *offset, u64 *stat) 587 { 588 u32 new_data; 589 590 new_data = rd32(hw, reg); 591 if (!offset_loaded) 592 *offset = new_data; 593 if (likely(new_data >= *offset)) 594 *stat = (u32)(new_data - *offset); 595 else 596 *stat = (u32)((new_data + BIT_ULL(32)) - *offset); 597 } 598 599 /** 600 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 601 * @vsi: the VSI to be updated 602 **/ 603 void i40e_update_eth_stats(struct i40e_vsi *vsi) 604 { 605 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 606 struct i40e_pf *pf = vsi->back; 607 struct i40e_hw *hw = &pf->hw; 608 struct i40e_eth_stats *oes; 609 struct i40e_eth_stats *es; /* device's eth stats */ 610 611 es = &vsi->eth_stats; 612 oes = &vsi->eth_stats_offsets; 613 614 /* Gather up the stats that the hw collects */ 615 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 616 vsi->stat_offsets_loaded, 617 &oes->tx_errors, &es->tx_errors); 618 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 619 vsi->stat_offsets_loaded, 620 &oes->rx_discards, &es->rx_discards); 621 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), 622 vsi->stat_offsets_loaded, 623 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); 624 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 625 vsi->stat_offsets_loaded, 626 &oes->tx_errors, &es->tx_errors); 627 628 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 629 I40E_GLV_GORCL(stat_idx), 630 vsi->stat_offsets_loaded, 631 &oes->rx_bytes, &es->rx_bytes); 632 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 633 I40E_GLV_UPRCL(stat_idx), 634 vsi->stat_offsets_loaded, 635 &oes->rx_unicast, &es->rx_unicast); 636 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 637 I40E_GLV_MPRCL(stat_idx), 638 vsi->stat_offsets_loaded, 639 &oes->rx_multicast, &es->rx_multicast); 640 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 641 I40E_GLV_BPRCL(stat_idx), 642 vsi->stat_offsets_loaded, 643 &oes->rx_broadcast, &es->rx_broadcast); 644 645 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 646 I40E_GLV_GOTCL(stat_idx), 647 vsi->stat_offsets_loaded, 648 &oes->tx_bytes, &es->tx_bytes); 649 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 650 I40E_GLV_UPTCL(stat_idx), 651 vsi->stat_offsets_loaded, 652 &oes->tx_unicast, &es->tx_unicast); 653 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 654 I40E_GLV_MPTCL(stat_idx), 655 vsi->stat_offsets_loaded, 656 &oes->tx_multicast, &es->tx_multicast); 657 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 658 I40E_GLV_BPTCL(stat_idx), 659 vsi->stat_offsets_loaded, 660 &oes->tx_broadcast, &es->tx_broadcast); 661 vsi->stat_offsets_loaded = true; 662 } 663 664 /** 665 * i40e_update_veb_stats - Update Switch component statistics 666 * @veb: the VEB being updated 667 **/ 668 static void i40e_update_veb_stats(struct i40e_veb *veb) 669 { 670 struct i40e_pf *pf = veb->pf; 671 struct i40e_hw *hw = &pf->hw; 672 struct i40e_eth_stats *oes; 673 struct i40e_eth_stats *es; /* device's eth stats */ 674 struct i40e_veb_tc_stats *veb_oes; 675 struct i40e_veb_tc_stats *veb_es; 676 int i, idx = 0; 677 678 idx = veb->stats_idx; 679 es = &veb->stats; 680 oes = &veb->stats_offsets; 681 veb_es = &veb->tc_stats; 682 veb_oes = &veb->tc_stats_offsets; 683 684 /* Gather up the stats that the hw collects */ 685 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 686 veb->stat_offsets_loaded, 687 &oes->tx_discards, &es->tx_discards); 688 if (hw->revision_id > 0) 689 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 690 veb->stat_offsets_loaded, 691 &oes->rx_unknown_protocol, 692 &es->rx_unknown_protocol); 693 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 694 veb->stat_offsets_loaded, 695 &oes->rx_bytes, &es->rx_bytes); 696 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 697 veb->stat_offsets_loaded, 698 &oes->rx_unicast, &es->rx_unicast); 699 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 700 veb->stat_offsets_loaded, 701 &oes->rx_multicast, &es->rx_multicast); 702 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 703 veb->stat_offsets_loaded, 704 &oes->rx_broadcast, &es->rx_broadcast); 705 706 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 707 veb->stat_offsets_loaded, 708 &oes->tx_bytes, &es->tx_bytes); 709 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 710 veb->stat_offsets_loaded, 711 &oes->tx_unicast, &es->tx_unicast); 712 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 713 veb->stat_offsets_loaded, 714 &oes->tx_multicast, &es->tx_multicast); 715 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 716 veb->stat_offsets_loaded, 717 &oes->tx_broadcast, &es->tx_broadcast); 718 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 719 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx), 720 I40E_GLVEBTC_RPCL(i, idx), 721 veb->stat_offsets_loaded, 722 &veb_oes->tc_rx_packets[i], 723 &veb_es->tc_rx_packets[i]); 724 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx), 725 I40E_GLVEBTC_RBCL(i, idx), 726 veb->stat_offsets_loaded, 727 &veb_oes->tc_rx_bytes[i], 728 &veb_es->tc_rx_bytes[i]); 729 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx), 730 I40E_GLVEBTC_TPCL(i, idx), 731 veb->stat_offsets_loaded, 732 &veb_oes->tc_tx_packets[i], 733 &veb_es->tc_tx_packets[i]); 734 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx), 735 I40E_GLVEBTC_TBCL(i, idx), 736 veb->stat_offsets_loaded, 737 &veb_oes->tc_tx_bytes[i], 738 &veb_es->tc_tx_bytes[i]); 739 } 740 veb->stat_offsets_loaded = true; 741 } 742 743 #ifdef I40E_FCOE 744 /** 745 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. 746 * @vsi: the VSI that is capable of doing FCoE 747 **/ 748 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) 749 { 750 struct i40e_pf *pf = vsi->back; 751 struct i40e_hw *hw = &pf->hw; 752 struct i40e_fcoe_stats *ofs; 753 struct i40e_fcoe_stats *fs; /* device's eth stats */ 754 int idx; 755 756 if (vsi->type != I40E_VSI_FCOE) 757 return; 758 759 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; 760 fs = &vsi->fcoe_stats; 761 ofs = &vsi->fcoe_stats_offsets; 762 763 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), 764 vsi->fcoe_stat_offsets_loaded, 765 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); 766 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), 767 vsi->fcoe_stat_offsets_loaded, 768 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); 769 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), 770 vsi->fcoe_stat_offsets_loaded, 771 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); 772 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), 773 vsi->fcoe_stat_offsets_loaded, 774 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); 775 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), 776 vsi->fcoe_stat_offsets_loaded, 777 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); 778 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), 779 vsi->fcoe_stat_offsets_loaded, 780 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); 781 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), 782 vsi->fcoe_stat_offsets_loaded, 783 &ofs->fcoe_last_error, &fs->fcoe_last_error); 784 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), 785 vsi->fcoe_stat_offsets_loaded, 786 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); 787 788 vsi->fcoe_stat_offsets_loaded = true; 789 } 790 791 #endif 792 /** 793 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode 794 * @pf: the corresponding PF 795 * 796 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode 797 **/ 798 static void i40e_update_link_xoff_rx(struct i40e_pf *pf) 799 { 800 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 801 struct i40e_hw_port_stats *nsd = &pf->stats; 802 struct i40e_hw *hw = &pf->hw; 803 u64 xoff = 0; 804 805 if ((hw->fc.current_mode != I40E_FC_FULL) && 806 (hw->fc.current_mode != I40E_FC_RX_PAUSE)) 807 return; 808 809 xoff = nsd->link_xoff_rx; 810 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 811 pf->stat_offsets_loaded, 812 &osd->link_xoff_rx, &nsd->link_xoff_rx); 813 814 /* No new LFC xoff rx */ 815 if (!(nsd->link_xoff_rx - xoff)) 816 return; 817 818 } 819 820 /** 821 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode 822 * @pf: the corresponding PF 823 * 824 * Update the Rx XOFF counter (PAUSE frames) in PFC mode 825 **/ 826 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) 827 { 828 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 829 struct i40e_hw_port_stats *nsd = &pf->stats; 830 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; 831 struct i40e_dcbx_config *dcb_cfg; 832 struct i40e_hw *hw = &pf->hw; 833 u16 i; 834 u8 tc; 835 836 dcb_cfg = &hw->local_dcbx_config; 837 838 /* Collect Link XOFF stats when PFC is disabled */ 839 if (!dcb_cfg->pfc.pfcenable) { 840 i40e_update_link_xoff_rx(pf); 841 return; 842 } 843 844 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 845 u64 prio_xoff = nsd->priority_xoff_rx[i]; 846 847 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 848 pf->stat_offsets_loaded, 849 &osd->priority_xoff_rx[i], 850 &nsd->priority_xoff_rx[i]); 851 852 /* No new PFC xoff rx */ 853 if (!(nsd->priority_xoff_rx[i] - prio_xoff)) 854 continue; 855 /* Get the TC for given priority */ 856 tc = dcb_cfg->etscfg.prioritytable[i]; 857 xoff[tc] = true; 858 } 859 } 860 861 /** 862 * i40e_update_vsi_stats - Update the vsi statistics counters. 863 * @vsi: the VSI to be updated 864 * 865 * There are a few instances where we store the same stat in a 866 * couple of different structs. This is partly because we have 867 * the netdev stats that need to be filled out, which is slightly 868 * different from the "eth_stats" defined by the chip and used in 869 * VF communications. We sort it out here. 870 **/ 871 static void i40e_update_vsi_stats(struct i40e_vsi *vsi) 872 { 873 struct i40e_pf *pf = vsi->back; 874 struct rtnl_link_stats64 *ons; 875 struct rtnl_link_stats64 *ns; /* netdev stats */ 876 struct i40e_eth_stats *oes; 877 struct i40e_eth_stats *es; /* device's eth stats */ 878 u32 tx_restart, tx_busy; 879 struct i40e_ring *p; 880 u32 rx_page, rx_buf; 881 u64 bytes, packets; 882 unsigned int start; 883 u64 tx_linearize; 884 u64 rx_p, rx_b; 885 u64 tx_p, tx_b; 886 u16 q; 887 888 if (test_bit(__I40E_DOWN, &vsi->state) || 889 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 890 return; 891 892 ns = i40e_get_vsi_stats_struct(vsi); 893 ons = &vsi->net_stats_offsets; 894 es = &vsi->eth_stats; 895 oes = &vsi->eth_stats_offsets; 896 897 /* Gather up the netdev and vsi stats that the driver collects 898 * on the fly during packet processing 899 */ 900 rx_b = rx_p = 0; 901 tx_b = tx_p = 0; 902 tx_restart = tx_busy = tx_linearize = 0; 903 rx_page = 0; 904 rx_buf = 0; 905 rcu_read_lock(); 906 for (q = 0; q < vsi->num_queue_pairs; q++) { 907 /* locate Tx ring */ 908 p = ACCESS_ONCE(vsi->tx_rings[q]); 909 910 do { 911 start = u64_stats_fetch_begin_irq(&p->syncp); 912 packets = p->stats.packets; 913 bytes = p->stats.bytes; 914 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 915 tx_b += bytes; 916 tx_p += packets; 917 tx_restart += p->tx_stats.restart_queue; 918 tx_busy += p->tx_stats.tx_busy; 919 tx_linearize += p->tx_stats.tx_linearize; 920 921 /* Rx queue is part of the same block as Tx queue */ 922 p = &p[1]; 923 do { 924 start = u64_stats_fetch_begin_irq(&p->syncp); 925 packets = p->stats.packets; 926 bytes = p->stats.bytes; 927 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 928 rx_b += bytes; 929 rx_p += packets; 930 rx_buf += p->rx_stats.alloc_buff_failed; 931 rx_page += p->rx_stats.alloc_page_failed; 932 } 933 rcu_read_unlock(); 934 vsi->tx_restart = tx_restart; 935 vsi->tx_busy = tx_busy; 936 vsi->tx_linearize = tx_linearize; 937 vsi->rx_page_failed = rx_page; 938 vsi->rx_buf_failed = rx_buf; 939 940 ns->rx_packets = rx_p; 941 ns->rx_bytes = rx_b; 942 ns->tx_packets = tx_p; 943 ns->tx_bytes = tx_b; 944 945 /* update netdev stats from eth stats */ 946 i40e_update_eth_stats(vsi); 947 ons->tx_errors = oes->tx_errors; 948 ns->tx_errors = es->tx_errors; 949 ons->multicast = oes->rx_multicast; 950 ns->multicast = es->rx_multicast; 951 ons->rx_dropped = oes->rx_discards; 952 ns->rx_dropped = es->rx_discards; 953 ons->tx_dropped = oes->tx_discards; 954 ns->tx_dropped = es->tx_discards; 955 956 /* pull in a couple PF stats if this is the main vsi */ 957 if (vsi == pf->vsi[pf->lan_vsi]) { 958 ns->rx_crc_errors = pf->stats.crc_errors; 959 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; 960 ns->rx_length_errors = pf->stats.rx_length_errors; 961 } 962 } 963 964 /** 965 * i40e_update_pf_stats - Update the PF statistics counters. 966 * @pf: the PF to be updated 967 **/ 968 static void i40e_update_pf_stats(struct i40e_pf *pf) 969 { 970 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 971 struct i40e_hw_port_stats *nsd = &pf->stats; 972 struct i40e_hw *hw = &pf->hw; 973 u32 val; 974 int i; 975 976 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 977 I40E_GLPRT_GORCL(hw->port), 978 pf->stat_offsets_loaded, 979 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 980 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 981 I40E_GLPRT_GOTCL(hw->port), 982 pf->stat_offsets_loaded, 983 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 984 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 985 pf->stat_offsets_loaded, 986 &osd->eth.rx_discards, 987 &nsd->eth.rx_discards); 988 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 989 I40E_GLPRT_UPRCL(hw->port), 990 pf->stat_offsets_loaded, 991 &osd->eth.rx_unicast, 992 &nsd->eth.rx_unicast); 993 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 994 I40E_GLPRT_MPRCL(hw->port), 995 pf->stat_offsets_loaded, 996 &osd->eth.rx_multicast, 997 &nsd->eth.rx_multicast); 998 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 999 I40E_GLPRT_BPRCL(hw->port), 1000 pf->stat_offsets_loaded, 1001 &osd->eth.rx_broadcast, 1002 &nsd->eth.rx_broadcast); 1003 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 1004 I40E_GLPRT_UPTCL(hw->port), 1005 pf->stat_offsets_loaded, 1006 &osd->eth.tx_unicast, 1007 &nsd->eth.tx_unicast); 1008 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 1009 I40E_GLPRT_MPTCL(hw->port), 1010 pf->stat_offsets_loaded, 1011 &osd->eth.tx_multicast, 1012 &nsd->eth.tx_multicast); 1013 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 1014 I40E_GLPRT_BPTCL(hw->port), 1015 pf->stat_offsets_loaded, 1016 &osd->eth.tx_broadcast, 1017 &nsd->eth.tx_broadcast); 1018 1019 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 1020 pf->stat_offsets_loaded, 1021 &osd->tx_dropped_link_down, 1022 &nsd->tx_dropped_link_down); 1023 1024 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 1025 pf->stat_offsets_loaded, 1026 &osd->crc_errors, &nsd->crc_errors); 1027 1028 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 1029 pf->stat_offsets_loaded, 1030 &osd->illegal_bytes, &nsd->illegal_bytes); 1031 1032 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 1033 pf->stat_offsets_loaded, 1034 &osd->mac_local_faults, 1035 &nsd->mac_local_faults); 1036 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 1037 pf->stat_offsets_loaded, 1038 &osd->mac_remote_faults, 1039 &nsd->mac_remote_faults); 1040 1041 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 1042 pf->stat_offsets_loaded, 1043 &osd->rx_length_errors, 1044 &nsd->rx_length_errors); 1045 1046 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 1047 pf->stat_offsets_loaded, 1048 &osd->link_xon_rx, &nsd->link_xon_rx); 1049 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 1050 pf->stat_offsets_loaded, 1051 &osd->link_xon_tx, &nsd->link_xon_tx); 1052 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ 1053 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 1054 pf->stat_offsets_loaded, 1055 &osd->link_xoff_tx, &nsd->link_xoff_tx); 1056 1057 for (i = 0; i < 8; i++) { 1058 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 1059 pf->stat_offsets_loaded, 1060 &osd->priority_xon_rx[i], 1061 &nsd->priority_xon_rx[i]); 1062 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 1063 pf->stat_offsets_loaded, 1064 &osd->priority_xon_tx[i], 1065 &nsd->priority_xon_tx[i]); 1066 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 1067 pf->stat_offsets_loaded, 1068 &osd->priority_xoff_tx[i], 1069 &nsd->priority_xoff_tx[i]); 1070 i40e_stat_update32(hw, 1071 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 1072 pf->stat_offsets_loaded, 1073 &osd->priority_xon_2_xoff[i], 1074 &nsd->priority_xon_2_xoff[i]); 1075 } 1076 1077 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 1078 I40E_GLPRT_PRC64L(hw->port), 1079 pf->stat_offsets_loaded, 1080 &osd->rx_size_64, &nsd->rx_size_64); 1081 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 1082 I40E_GLPRT_PRC127L(hw->port), 1083 pf->stat_offsets_loaded, 1084 &osd->rx_size_127, &nsd->rx_size_127); 1085 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 1086 I40E_GLPRT_PRC255L(hw->port), 1087 pf->stat_offsets_loaded, 1088 &osd->rx_size_255, &nsd->rx_size_255); 1089 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 1090 I40E_GLPRT_PRC511L(hw->port), 1091 pf->stat_offsets_loaded, 1092 &osd->rx_size_511, &nsd->rx_size_511); 1093 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 1094 I40E_GLPRT_PRC1023L(hw->port), 1095 pf->stat_offsets_loaded, 1096 &osd->rx_size_1023, &nsd->rx_size_1023); 1097 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 1098 I40E_GLPRT_PRC1522L(hw->port), 1099 pf->stat_offsets_loaded, 1100 &osd->rx_size_1522, &nsd->rx_size_1522); 1101 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 1102 I40E_GLPRT_PRC9522L(hw->port), 1103 pf->stat_offsets_loaded, 1104 &osd->rx_size_big, &nsd->rx_size_big); 1105 1106 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 1107 I40E_GLPRT_PTC64L(hw->port), 1108 pf->stat_offsets_loaded, 1109 &osd->tx_size_64, &nsd->tx_size_64); 1110 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 1111 I40E_GLPRT_PTC127L(hw->port), 1112 pf->stat_offsets_loaded, 1113 &osd->tx_size_127, &nsd->tx_size_127); 1114 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 1115 I40E_GLPRT_PTC255L(hw->port), 1116 pf->stat_offsets_loaded, 1117 &osd->tx_size_255, &nsd->tx_size_255); 1118 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 1119 I40E_GLPRT_PTC511L(hw->port), 1120 pf->stat_offsets_loaded, 1121 &osd->tx_size_511, &nsd->tx_size_511); 1122 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 1123 I40E_GLPRT_PTC1023L(hw->port), 1124 pf->stat_offsets_loaded, 1125 &osd->tx_size_1023, &nsd->tx_size_1023); 1126 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 1127 I40E_GLPRT_PTC1522L(hw->port), 1128 pf->stat_offsets_loaded, 1129 &osd->tx_size_1522, &nsd->tx_size_1522); 1130 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 1131 I40E_GLPRT_PTC9522L(hw->port), 1132 pf->stat_offsets_loaded, 1133 &osd->tx_size_big, &nsd->tx_size_big); 1134 1135 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 1136 pf->stat_offsets_loaded, 1137 &osd->rx_undersize, &nsd->rx_undersize); 1138 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 1139 pf->stat_offsets_loaded, 1140 &osd->rx_fragments, &nsd->rx_fragments); 1141 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 1142 pf->stat_offsets_loaded, 1143 &osd->rx_oversize, &nsd->rx_oversize); 1144 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 1145 pf->stat_offsets_loaded, 1146 &osd->rx_jabber, &nsd->rx_jabber); 1147 1148 /* FDIR stats */ 1149 i40e_stat_update32(hw, 1150 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), 1151 pf->stat_offsets_loaded, 1152 &osd->fd_atr_match, &nsd->fd_atr_match); 1153 i40e_stat_update32(hw, 1154 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), 1155 pf->stat_offsets_loaded, 1156 &osd->fd_sb_match, &nsd->fd_sb_match); 1157 i40e_stat_update32(hw, 1158 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)), 1159 pf->stat_offsets_loaded, 1160 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match); 1161 1162 val = rd32(hw, I40E_PRTPM_EEE_STAT); 1163 nsd->tx_lpi_status = 1164 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> 1165 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; 1166 nsd->rx_lpi_status = 1167 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> 1168 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; 1169 i40e_stat_update32(hw, I40E_PRTPM_TLPIC, 1170 pf->stat_offsets_loaded, 1171 &osd->tx_lpi_count, &nsd->tx_lpi_count); 1172 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 1173 pf->stat_offsets_loaded, 1174 &osd->rx_lpi_count, &nsd->rx_lpi_count); 1175 1176 if (pf->flags & I40E_FLAG_FD_SB_ENABLED && 1177 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) 1178 nsd->fd_sb_status = true; 1179 else 1180 nsd->fd_sb_status = false; 1181 1182 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && 1183 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 1184 nsd->fd_atr_status = true; 1185 else 1186 nsd->fd_atr_status = false; 1187 1188 pf->stat_offsets_loaded = true; 1189 } 1190 1191 /** 1192 * i40e_update_stats - Update the various statistics counters. 1193 * @vsi: the VSI to be updated 1194 * 1195 * Update the various stats for this VSI and its related entities. 1196 **/ 1197 void i40e_update_stats(struct i40e_vsi *vsi) 1198 { 1199 struct i40e_pf *pf = vsi->back; 1200 1201 if (vsi == pf->vsi[pf->lan_vsi]) 1202 i40e_update_pf_stats(pf); 1203 1204 i40e_update_vsi_stats(vsi); 1205 #ifdef I40E_FCOE 1206 i40e_update_fcoe_stats(vsi); 1207 #endif 1208 } 1209 1210 /** 1211 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1212 * @vsi: the VSI to be searched 1213 * @macaddr: the MAC address 1214 * @vlan: the vlan 1215 * @is_vf: make sure its a VF filter, else doesn't matter 1216 * @is_netdev: make sure its a netdev filter, else doesn't matter 1217 * 1218 * Returns ptr to the filter object or NULL 1219 **/ 1220 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 1221 u8 *macaddr, s16 vlan, 1222 bool is_vf, bool is_netdev) 1223 { 1224 struct i40e_mac_filter *f; 1225 1226 if (!vsi || !macaddr) 1227 return NULL; 1228 1229 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1230 if ((ether_addr_equal(macaddr, f->macaddr)) && 1231 (vlan == f->vlan) && 1232 (!is_vf || f->is_vf) && 1233 (!is_netdev || f->is_netdev)) 1234 return f; 1235 } 1236 return NULL; 1237 } 1238 1239 /** 1240 * i40e_find_mac - Find a mac addr in the macvlan filters list 1241 * @vsi: the VSI to be searched 1242 * @macaddr: the MAC address we are searching for 1243 * @is_vf: make sure its a VF filter, else doesn't matter 1244 * @is_netdev: make sure its a netdev filter, else doesn't matter 1245 * 1246 * Returns the first filter with the provided MAC address or NULL if 1247 * MAC address was not found 1248 **/ 1249 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 1250 bool is_vf, bool is_netdev) 1251 { 1252 struct i40e_mac_filter *f; 1253 1254 if (!vsi || !macaddr) 1255 return NULL; 1256 1257 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1258 if ((ether_addr_equal(macaddr, f->macaddr)) && 1259 (!is_vf || f->is_vf) && 1260 (!is_netdev || f->is_netdev)) 1261 return f; 1262 } 1263 return NULL; 1264 } 1265 1266 /** 1267 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1268 * @vsi: the VSI to be searched 1269 * 1270 * Returns true if VSI is in vlan mode or false otherwise 1271 **/ 1272 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1273 { 1274 struct i40e_mac_filter *f; 1275 1276 /* Only -1 for all the filters denotes not in vlan mode 1277 * so we have to go through all the list in order to make sure 1278 */ 1279 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1280 if (f->vlan >= 0 || vsi->info.pvid) 1281 return true; 1282 } 1283 1284 return false; 1285 } 1286 1287 /** 1288 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans 1289 * @vsi: the VSI to be searched 1290 * @macaddr: the mac address to be filtered 1291 * @is_vf: true if it is a VF 1292 * @is_netdev: true if it is a netdev 1293 * 1294 * Goes through all the macvlan filters and adds a 1295 * macvlan filter for each unique vlan that already exists 1296 * 1297 * Returns first filter found on success, else NULL 1298 **/ 1299 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1300 bool is_vf, bool is_netdev) 1301 { 1302 struct i40e_mac_filter *f; 1303 1304 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1305 if (vsi->info.pvid) 1306 f->vlan = le16_to_cpu(vsi->info.pvid); 1307 if (!i40e_find_filter(vsi, macaddr, f->vlan, 1308 is_vf, is_netdev)) { 1309 if (!i40e_add_filter(vsi, macaddr, f->vlan, 1310 is_vf, is_netdev)) 1311 return NULL; 1312 } 1313 } 1314 1315 return list_first_entry_or_null(&vsi->mac_filter_list, 1316 struct i40e_mac_filter, list); 1317 } 1318 1319 /** 1320 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM 1321 * @vsi: the PF Main VSI - inappropriate for any other VSI 1322 * @macaddr: the MAC address 1323 * 1324 * Some older firmware configurations set up a default promiscuous VLAN 1325 * filter that needs to be removed. 1326 **/ 1327 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) 1328 { 1329 struct i40e_aqc_remove_macvlan_element_data element; 1330 struct i40e_pf *pf = vsi->back; 1331 i40e_status ret; 1332 1333 /* Only appropriate for the PF main VSI */ 1334 if (vsi->type != I40E_VSI_MAIN) 1335 return -EINVAL; 1336 1337 memset(&element, 0, sizeof(element)); 1338 ether_addr_copy(element.mac_addr, macaddr); 1339 element.vlan_tag = 0; 1340 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1341 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1342 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1343 if (ret) 1344 return -ENOENT; 1345 1346 return 0; 1347 } 1348 1349 /** 1350 * i40e_add_filter - Add a mac/vlan filter to the VSI 1351 * @vsi: the VSI to be searched 1352 * @macaddr: the MAC address 1353 * @vlan: the vlan 1354 * @is_vf: make sure its a VF filter, else doesn't matter 1355 * @is_netdev: make sure its a netdev filter, else doesn't matter 1356 * 1357 * Returns ptr to the filter object or NULL when no memory available. 1358 * 1359 * NOTE: This function is expected to be called with mac_filter_list_lock 1360 * being held. 1361 **/ 1362 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1363 u8 *macaddr, s16 vlan, 1364 bool is_vf, bool is_netdev) 1365 { 1366 struct i40e_mac_filter *f; 1367 1368 if (!vsi || !macaddr) 1369 return NULL; 1370 1371 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1372 if (!f) { 1373 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1374 if (!f) 1375 goto add_filter_out; 1376 1377 ether_addr_copy(f->macaddr, macaddr); 1378 f->vlan = vlan; 1379 f->changed = true; 1380 1381 INIT_LIST_HEAD(&f->list); 1382 list_add(&f->list, &vsi->mac_filter_list); 1383 } 1384 1385 /* increment counter and add a new flag if needed */ 1386 if (is_vf) { 1387 if (!f->is_vf) { 1388 f->is_vf = true; 1389 f->counter++; 1390 } 1391 } else if (is_netdev) { 1392 if (!f->is_netdev) { 1393 f->is_netdev = true; 1394 f->counter++; 1395 } 1396 } else { 1397 f->counter++; 1398 } 1399 1400 /* changed tells sync_filters_subtask to 1401 * push the filter down to the firmware 1402 */ 1403 if (f->changed) { 1404 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1405 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1406 } 1407 1408 add_filter_out: 1409 return f; 1410 } 1411 1412 /** 1413 * i40e_del_filter - Remove a mac/vlan filter from the VSI 1414 * @vsi: the VSI to be searched 1415 * @macaddr: the MAC address 1416 * @vlan: the vlan 1417 * @is_vf: make sure it's a VF filter, else doesn't matter 1418 * @is_netdev: make sure it's a netdev filter, else doesn't matter 1419 * 1420 * NOTE: This function is expected to be called with mac_filter_list_lock 1421 * being held. 1422 **/ 1423 void i40e_del_filter(struct i40e_vsi *vsi, 1424 u8 *macaddr, s16 vlan, 1425 bool is_vf, bool is_netdev) 1426 { 1427 struct i40e_mac_filter *f; 1428 1429 if (!vsi || !macaddr) 1430 return; 1431 1432 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1433 if (!f || f->counter == 0) 1434 return; 1435 1436 if (is_vf) { 1437 if (f->is_vf) { 1438 f->is_vf = false; 1439 f->counter--; 1440 } 1441 } else if (is_netdev) { 1442 if (f->is_netdev) { 1443 f->is_netdev = false; 1444 f->counter--; 1445 } 1446 } else { 1447 /* make sure we don't remove a filter in use by VF or netdev */ 1448 int min_f = 0; 1449 1450 min_f += (f->is_vf ? 1 : 0); 1451 min_f += (f->is_netdev ? 1 : 0); 1452 1453 if (f->counter > min_f) 1454 f->counter--; 1455 } 1456 1457 /* counter == 0 tells sync_filters_subtask to 1458 * remove the filter from the firmware's list 1459 */ 1460 if (f->counter == 0) { 1461 f->changed = true; 1462 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1463 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1464 } 1465 } 1466 1467 /** 1468 * i40e_set_mac - NDO callback to set mac address 1469 * @netdev: network interface device structure 1470 * @p: pointer to an address structure 1471 * 1472 * Returns 0 on success, negative on failure 1473 **/ 1474 #ifdef I40E_FCOE 1475 int i40e_set_mac(struct net_device *netdev, void *p) 1476 #else 1477 static int i40e_set_mac(struct net_device *netdev, void *p) 1478 #endif 1479 { 1480 struct i40e_netdev_priv *np = netdev_priv(netdev); 1481 struct i40e_vsi *vsi = np->vsi; 1482 struct i40e_pf *pf = vsi->back; 1483 struct i40e_hw *hw = &pf->hw; 1484 struct sockaddr *addr = p; 1485 struct i40e_mac_filter *f; 1486 1487 if (!is_valid_ether_addr(addr->sa_data)) 1488 return -EADDRNOTAVAIL; 1489 1490 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { 1491 netdev_info(netdev, "already using mac address %pM\n", 1492 addr->sa_data); 1493 return 0; 1494 } 1495 1496 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1497 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1498 return -EADDRNOTAVAIL; 1499 1500 if (ether_addr_equal(hw->mac.addr, addr->sa_data)) 1501 netdev_info(netdev, "returning to hw mac address %pM\n", 1502 hw->mac.addr); 1503 else 1504 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1505 1506 if (vsi->type == I40E_VSI_MAIN) { 1507 i40e_status ret; 1508 1509 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1510 I40E_AQC_WRITE_TYPE_LAA_WOL, 1511 addr->sa_data, NULL); 1512 if (ret) { 1513 netdev_info(netdev, 1514 "Addr change for Main VSI failed: %d\n", 1515 ret); 1516 return -EADDRNOTAVAIL; 1517 } 1518 } 1519 1520 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { 1521 struct i40e_aqc_remove_macvlan_element_data element; 1522 1523 memset(&element, 0, sizeof(element)); 1524 ether_addr_copy(element.mac_addr, netdev->dev_addr); 1525 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1526 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1527 } else { 1528 spin_lock_bh(&vsi->mac_filter_list_lock); 1529 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 1530 false, false); 1531 spin_unlock_bh(&vsi->mac_filter_list_lock); 1532 } 1533 1534 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { 1535 struct i40e_aqc_add_macvlan_element_data element; 1536 1537 memset(&element, 0, sizeof(element)); 1538 ether_addr_copy(element.mac_addr, hw->mac.addr); 1539 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 1540 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1541 } else { 1542 spin_lock_bh(&vsi->mac_filter_list_lock); 1543 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, 1544 false, false); 1545 if (f) 1546 f->is_laa = true; 1547 spin_unlock_bh(&vsi->mac_filter_list_lock); 1548 } 1549 1550 i40e_sync_vsi_filters(vsi, false); 1551 ether_addr_copy(netdev->dev_addr, addr->sa_data); 1552 1553 return 0; 1554 } 1555 1556 /** 1557 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1558 * @vsi: the VSI being setup 1559 * @ctxt: VSI context structure 1560 * @enabled_tc: Enabled TCs bitmap 1561 * @is_add: True if called before Add VSI 1562 * 1563 * Setup VSI queue mapping for enabled traffic classes. 1564 **/ 1565 #ifdef I40E_FCOE 1566 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1567 struct i40e_vsi_context *ctxt, 1568 u8 enabled_tc, 1569 bool is_add) 1570 #else 1571 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1572 struct i40e_vsi_context *ctxt, 1573 u8 enabled_tc, 1574 bool is_add) 1575 #endif 1576 { 1577 struct i40e_pf *pf = vsi->back; 1578 u16 sections = 0; 1579 u8 netdev_tc = 0; 1580 u16 numtc = 0; 1581 u16 qcount; 1582 u8 offset; 1583 u16 qmap; 1584 int i; 1585 u16 num_tc_qps = 0; 1586 1587 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1588 offset = 0; 1589 1590 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1591 /* Find numtc from enabled TC bitmap */ 1592 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1593 if (enabled_tc & BIT_ULL(i)) /* TC is enabled */ 1594 numtc++; 1595 } 1596 if (!numtc) { 1597 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1598 numtc = 1; 1599 } 1600 } else { 1601 /* At least TC0 is enabled in case of non-DCB case */ 1602 numtc = 1; 1603 } 1604 1605 vsi->tc_config.numtc = numtc; 1606 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1607 /* Number of queues per enabled TC */ 1608 /* In MFP case we can have a much lower count of MSIx 1609 * vectors available and so we need to lower the used 1610 * q count. 1611 */ 1612 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1613 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); 1614 else 1615 qcount = vsi->alloc_queue_pairs; 1616 num_tc_qps = qcount / numtc; 1617 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); 1618 1619 /* Setup queue offset/count for all TCs for given VSI */ 1620 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1621 /* See if the given TC is enabled for the given VSI */ 1622 if (vsi->tc_config.enabled_tc & BIT_ULL(i)) { 1623 /* TC is enabled */ 1624 int pow, num_qps; 1625 1626 switch (vsi->type) { 1627 case I40E_VSI_MAIN: 1628 qcount = min_t(int, pf->rss_size, num_tc_qps); 1629 break; 1630 #ifdef I40E_FCOE 1631 case I40E_VSI_FCOE: 1632 qcount = num_tc_qps; 1633 break; 1634 #endif 1635 case I40E_VSI_FDIR: 1636 case I40E_VSI_SRIOV: 1637 case I40E_VSI_VMDQ2: 1638 default: 1639 qcount = num_tc_qps; 1640 WARN_ON(i != 0); 1641 break; 1642 } 1643 vsi->tc_config.tc_info[i].qoffset = offset; 1644 vsi->tc_config.tc_info[i].qcount = qcount; 1645 1646 /* find the next higher power-of-2 of num queue pairs */ 1647 num_qps = qcount; 1648 pow = 0; 1649 while (num_qps && (BIT_ULL(pow) < qcount)) { 1650 pow++; 1651 num_qps >>= 1; 1652 } 1653 1654 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1655 qmap = 1656 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1657 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1658 1659 offset += qcount; 1660 } else { 1661 /* TC is not enabled so set the offset to 1662 * default queue and allocate one queue 1663 * for the given TC. 1664 */ 1665 vsi->tc_config.tc_info[i].qoffset = 0; 1666 vsi->tc_config.tc_info[i].qcount = 1; 1667 vsi->tc_config.tc_info[i].netdev_tc = 0; 1668 1669 qmap = 0; 1670 } 1671 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1672 } 1673 1674 /* Set actual Tx/Rx queue pairs */ 1675 vsi->num_queue_pairs = offset; 1676 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { 1677 if (vsi->req_queue_pairs > 0) 1678 vsi->num_queue_pairs = vsi->req_queue_pairs; 1679 else if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1680 vsi->num_queue_pairs = pf->num_lan_msix; 1681 } 1682 1683 /* Scheduler section valid can only be set for ADD VSI */ 1684 if (is_add) { 1685 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1686 1687 ctxt->info.up_enable_bits = enabled_tc; 1688 } 1689 if (vsi->type == I40E_VSI_SRIOV) { 1690 ctxt->info.mapping_flags |= 1691 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1692 for (i = 0; i < vsi->num_queue_pairs; i++) 1693 ctxt->info.queue_mapping[i] = 1694 cpu_to_le16(vsi->base_queue + i); 1695 } else { 1696 ctxt->info.mapping_flags |= 1697 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1698 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1699 } 1700 ctxt->info.valid_sections |= cpu_to_le16(sections); 1701 } 1702 1703 /** 1704 * i40e_set_rx_mode - NDO callback to set the netdev filters 1705 * @netdev: network interface device structure 1706 **/ 1707 #ifdef I40E_FCOE 1708 void i40e_set_rx_mode(struct net_device *netdev) 1709 #else 1710 static void i40e_set_rx_mode(struct net_device *netdev) 1711 #endif 1712 { 1713 struct i40e_netdev_priv *np = netdev_priv(netdev); 1714 struct i40e_mac_filter *f, *ftmp; 1715 struct i40e_vsi *vsi = np->vsi; 1716 struct netdev_hw_addr *uca; 1717 struct netdev_hw_addr *mca; 1718 struct netdev_hw_addr *ha; 1719 1720 spin_lock_bh(&vsi->mac_filter_list_lock); 1721 1722 /* add addr if not already in the filter list */ 1723 netdev_for_each_uc_addr(uca, netdev) { 1724 if (!i40e_find_mac(vsi, uca->addr, false, true)) { 1725 if (i40e_is_vsi_in_vlan(vsi)) 1726 i40e_put_mac_in_vlan(vsi, uca->addr, 1727 false, true); 1728 else 1729 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, 1730 false, true); 1731 } 1732 } 1733 1734 netdev_for_each_mc_addr(mca, netdev) { 1735 if (!i40e_find_mac(vsi, mca->addr, false, true)) { 1736 if (i40e_is_vsi_in_vlan(vsi)) 1737 i40e_put_mac_in_vlan(vsi, mca->addr, 1738 false, true); 1739 else 1740 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, 1741 false, true); 1742 } 1743 } 1744 1745 /* remove filter if not in netdev list */ 1746 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1747 1748 if (!f->is_netdev) 1749 continue; 1750 1751 netdev_for_each_mc_addr(mca, netdev) 1752 if (ether_addr_equal(mca->addr, f->macaddr)) 1753 goto bottom_of_search_loop; 1754 1755 netdev_for_each_uc_addr(uca, netdev) 1756 if (ether_addr_equal(uca->addr, f->macaddr)) 1757 goto bottom_of_search_loop; 1758 1759 for_each_dev_addr(netdev, ha) 1760 if (ether_addr_equal(ha->addr, f->macaddr)) 1761 goto bottom_of_search_loop; 1762 1763 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */ 1764 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true); 1765 1766 bottom_of_search_loop: 1767 continue; 1768 } 1769 spin_unlock_bh(&vsi->mac_filter_list_lock); 1770 1771 /* check for other flag changes */ 1772 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1773 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1774 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1775 } 1776 } 1777 1778 /** 1779 * i40e_mac_filter_entry_clone - Clones a MAC filter entry 1780 * @src: source MAC filter entry to be clones 1781 * 1782 * Returns the pointer to newly cloned MAC filter entry or NULL 1783 * in case of error 1784 **/ 1785 static struct i40e_mac_filter *i40e_mac_filter_entry_clone( 1786 struct i40e_mac_filter *src) 1787 { 1788 struct i40e_mac_filter *f; 1789 1790 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1791 if (!f) 1792 return NULL; 1793 *f = *src; 1794 1795 INIT_LIST_HEAD(&f->list); 1796 1797 return f; 1798 } 1799 1800 /** 1801 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries 1802 * @vsi: pointer to vsi struct 1803 * @from: Pointer to list which contains MAC filter entries - changes to 1804 * those entries needs to be undone. 1805 * 1806 * MAC filter entries from list were slated to be removed from device. 1807 **/ 1808 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, 1809 struct list_head *from) 1810 { 1811 struct i40e_mac_filter *f, *ftmp; 1812 1813 list_for_each_entry_safe(f, ftmp, from, list) { 1814 f->changed = true; 1815 /* Move the element back into MAC filter list*/ 1816 list_move_tail(&f->list, &vsi->mac_filter_list); 1817 } 1818 } 1819 1820 /** 1821 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries 1822 * @vsi: pointer to vsi struct 1823 * 1824 * MAC filter entries from list were slated to be added from device. 1825 **/ 1826 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi) 1827 { 1828 struct i40e_mac_filter *f, *ftmp; 1829 1830 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1831 if (!f->changed && f->counter) 1832 f->changed = true; 1833 } 1834 } 1835 1836 /** 1837 * i40e_cleanup_add_list - Deletes the element from add list and release 1838 * memory 1839 * @add_list: Pointer to list which contains MAC filter entries 1840 **/ 1841 static void i40e_cleanup_add_list(struct list_head *add_list) 1842 { 1843 struct i40e_mac_filter *f, *ftmp; 1844 1845 list_for_each_entry_safe(f, ftmp, add_list, list) { 1846 list_del(&f->list); 1847 kfree(f); 1848 } 1849 } 1850 1851 /** 1852 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 1853 * @vsi: ptr to the VSI 1854 * @grab_rtnl: whether RTNL needs to be grabbed 1855 * 1856 * Push any outstanding VSI filter changes through the AdminQ. 1857 * 1858 * Returns 0 or error value 1859 **/ 1860 int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) 1861 { 1862 struct list_head tmp_del_list, tmp_add_list; 1863 struct i40e_mac_filter *f, *ftmp, *fclone; 1864 bool promisc_forced_on = false; 1865 bool add_happened = false; 1866 int filter_list_len = 0; 1867 u32 changed_flags = 0; 1868 bool err_cond = false; 1869 i40e_status ret = 0; 1870 struct i40e_pf *pf; 1871 int num_add = 0; 1872 int num_del = 0; 1873 int aq_err = 0; 1874 u16 cmd_flags; 1875 1876 /* empty array typed pointers, kcalloc later */ 1877 struct i40e_aqc_add_macvlan_element_data *add_list; 1878 struct i40e_aqc_remove_macvlan_element_data *del_list; 1879 1880 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) 1881 usleep_range(1000, 2000); 1882 pf = vsi->back; 1883 1884 if (vsi->netdev) { 1885 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 1886 vsi->current_netdev_flags = vsi->netdev->flags; 1887 } 1888 1889 INIT_LIST_HEAD(&tmp_del_list); 1890 INIT_LIST_HEAD(&tmp_add_list); 1891 1892 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1893 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1894 1895 spin_lock_bh(&vsi->mac_filter_list_lock); 1896 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1897 if (!f->changed) 1898 continue; 1899 1900 if (f->counter != 0) 1901 continue; 1902 f->changed = false; 1903 1904 /* Move the element into temporary del_list */ 1905 list_move_tail(&f->list, &tmp_del_list); 1906 } 1907 1908 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1909 if (!f->changed) 1910 continue; 1911 1912 if (f->counter == 0) 1913 continue; 1914 f->changed = false; 1915 1916 /* Clone MAC filter entry and add into temporary list */ 1917 fclone = i40e_mac_filter_entry_clone(f); 1918 if (!fclone) { 1919 err_cond = true; 1920 break; 1921 } 1922 list_add_tail(&fclone->list, &tmp_add_list); 1923 } 1924 1925 /* if failed to clone MAC filter entry - undo */ 1926 if (err_cond) { 1927 i40e_undo_del_filter_entries(vsi, &tmp_del_list); 1928 i40e_undo_add_filter_entries(vsi); 1929 } 1930 spin_unlock_bh(&vsi->mac_filter_list_lock); 1931 1932 if (err_cond) 1933 i40e_cleanup_add_list(&tmp_add_list); 1934 } 1935 1936 /* Now process 'del_list' outside the lock */ 1937 if (!list_empty(&tmp_del_list)) { 1938 filter_list_len = pf->hw.aq.asq_buf_size / 1939 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1940 del_list = kcalloc(filter_list_len, 1941 sizeof(struct i40e_aqc_remove_macvlan_element_data), 1942 GFP_KERNEL); 1943 if (!del_list) { 1944 i40e_cleanup_add_list(&tmp_add_list); 1945 1946 /* Undo VSI's MAC filter entry element updates */ 1947 spin_lock_bh(&vsi->mac_filter_list_lock); 1948 i40e_undo_del_filter_entries(vsi, &tmp_del_list); 1949 i40e_undo_add_filter_entries(vsi); 1950 spin_unlock_bh(&vsi->mac_filter_list_lock); 1951 return -ENOMEM; 1952 } 1953 1954 list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { 1955 cmd_flags = 0; 1956 1957 /* add to delete list */ 1958 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); 1959 del_list[num_del].vlan_tag = 1960 cpu_to_le16((u16)(f->vlan == 1961 I40E_VLAN_ANY ? 0 : f->vlan)); 1962 1963 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1964 del_list[num_del].flags = cmd_flags; 1965 num_del++; 1966 1967 /* flush a full buffer */ 1968 if (num_del == filter_list_len) { 1969 ret = i40e_aq_remove_macvlan(&pf->hw, 1970 vsi->seid, del_list, num_del, 1971 NULL); 1972 aq_err = pf->hw.aq.asq_last_status; 1973 num_del = 0; 1974 memset(del_list, 0, sizeof(*del_list)); 1975 1976 if (ret && aq_err != I40E_AQ_RC_ENOENT) 1977 dev_err(&pf->pdev->dev, 1978 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", 1979 i40e_stat_str(&pf->hw, ret), 1980 i40e_aq_str(&pf->hw, aq_err)); 1981 } 1982 /* Release memory for MAC filter entries which were 1983 * synced up with HW. 1984 */ 1985 list_del(&f->list); 1986 kfree(f); 1987 } 1988 1989 if (num_del) { 1990 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1991 del_list, num_del, NULL); 1992 aq_err = pf->hw.aq.asq_last_status; 1993 num_del = 0; 1994 1995 if (ret && aq_err != I40E_AQ_RC_ENOENT) 1996 dev_info(&pf->pdev->dev, 1997 "ignoring delete macvlan error, err %s aq_err %s\n", 1998 i40e_stat_str(&pf->hw, ret), 1999 i40e_aq_str(&pf->hw, aq_err)); 2000 } 2001 2002 kfree(del_list); 2003 del_list = NULL; 2004 } 2005 2006 if (!list_empty(&tmp_add_list)) { 2007 2008 /* do all the adds now */ 2009 filter_list_len = pf->hw.aq.asq_buf_size / 2010 sizeof(struct i40e_aqc_add_macvlan_element_data), 2011 add_list = kcalloc(filter_list_len, 2012 sizeof(struct i40e_aqc_add_macvlan_element_data), 2013 GFP_KERNEL); 2014 if (!add_list) { 2015 /* Purge element from temporary lists */ 2016 i40e_cleanup_add_list(&tmp_add_list); 2017 2018 /* Undo add filter entries from VSI MAC filter list */ 2019 spin_lock_bh(&vsi->mac_filter_list_lock); 2020 i40e_undo_add_filter_entries(vsi); 2021 spin_unlock_bh(&vsi->mac_filter_list_lock); 2022 return -ENOMEM; 2023 } 2024 2025 list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { 2026 2027 add_happened = true; 2028 cmd_flags = 0; 2029 2030 /* add to add array */ 2031 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); 2032 add_list[num_add].vlan_tag = 2033 cpu_to_le16( 2034 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 2035 add_list[num_add].queue_number = 0; 2036 2037 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 2038 add_list[num_add].flags = cpu_to_le16(cmd_flags); 2039 num_add++; 2040 2041 /* flush a full buffer */ 2042 if (num_add == filter_list_len) { 2043 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 2044 add_list, num_add, 2045 NULL); 2046 aq_err = pf->hw.aq.asq_last_status; 2047 num_add = 0; 2048 2049 if (ret) 2050 break; 2051 memset(add_list, 0, sizeof(*add_list)); 2052 } 2053 /* Entries from tmp_add_list were cloned from MAC 2054 * filter list, hence clean those cloned entries 2055 */ 2056 list_del(&f->list); 2057 kfree(f); 2058 } 2059 2060 if (num_add) { 2061 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 2062 add_list, num_add, NULL); 2063 aq_err = pf->hw.aq.asq_last_status; 2064 num_add = 0; 2065 } 2066 kfree(add_list); 2067 add_list = NULL; 2068 2069 if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) { 2070 dev_info(&pf->pdev->dev, 2071 "add filter failed, err %s aq_err %s\n", 2072 i40e_stat_str(&pf->hw, ret), 2073 i40e_aq_str(&pf->hw, aq_err)); 2074 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 2075 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2076 &vsi->state)) { 2077 promisc_forced_on = true; 2078 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2079 &vsi->state); 2080 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 2081 } 2082 } 2083 } 2084 2085 /* check for changes in promiscuous modes */ 2086 if (changed_flags & IFF_ALLMULTI) { 2087 bool cur_multipromisc; 2088 2089 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 2090 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 2091 vsi->seid, 2092 cur_multipromisc, 2093 NULL); 2094 if (ret) 2095 dev_info(&pf->pdev->dev, 2096 "set multi promisc failed, err %s aq_err %s\n", 2097 i40e_stat_str(&pf->hw, ret), 2098 i40e_aq_str(&pf->hw, 2099 pf->hw.aq.asq_last_status)); 2100 } 2101 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 2102 bool cur_promisc; 2103 2104 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 2105 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2106 &vsi->state)); 2107 if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) { 2108 /* set defport ON for Main VSI instead of true promisc 2109 * this way we will get all unicast/multicast and VLAN 2110 * promisc behavior but will not get VF or VMDq traffic 2111 * replicated on the Main VSI. 2112 */ 2113 if (pf->cur_promisc != cur_promisc) { 2114 pf->cur_promisc = cur_promisc; 2115 if (grab_rtnl) 2116 i40e_do_reset_safe(pf, 2117 BIT(__I40E_PF_RESET_REQUESTED)); 2118 else 2119 i40e_do_reset(pf, 2120 BIT(__I40E_PF_RESET_REQUESTED)); 2121 } 2122 } else { 2123 ret = i40e_aq_set_vsi_unicast_promiscuous( 2124 &vsi->back->hw, 2125 vsi->seid, 2126 cur_promisc, NULL); 2127 if (ret) 2128 dev_info(&pf->pdev->dev, 2129 "set unicast promisc failed, err %d, aq_err %d\n", 2130 ret, pf->hw.aq.asq_last_status); 2131 ret = i40e_aq_set_vsi_multicast_promiscuous( 2132 &vsi->back->hw, 2133 vsi->seid, 2134 cur_promisc, NULL); 2135 if (ret) 2136 dev_info(&pf->pdev->dev, 2137 "set multicast promisc failed, err %d, aq_err %d\n", 2138 ret, pf->hw.aq.asq_last_status); 2139 } 2140 ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 2141 vsi->seid, 2142 cur_promisc, NULL); 2143 if (ret) 2144 dev_info(&pf->pdev->dev, 2145 "set brdcast promisc failed, err %s, aq_err %s\n", 2146 i40e_stat_str(&pf->hw, ret), 2147 i40e_aq_str(&pf->hw, 2148 pf->hw.aq.asq_last_status)); 2149 } 2150 2151 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 2152 return 0; 2153 } 2154 2155 /** 2156 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 2157 * @pf: board private structure 2158 **/ 2159 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 2160 { 2161 int v; 2162 2163 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 2164 return; 2165 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 2166 2167 for (v = 0; v < pf->num_alloc_vsi; v++) { 2168 if (pf->vsi[v] && 2169 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) 2170 i40e_sync_vsi_filters(pf->vsi[v], true); 2171 } 2172 } 2173 2174 /** 2175 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 2176 * @netdev: network interface device structure 2177 * @new_mtu: new value for maximum frame size 2178 * 2179 * Returns 0 on success, negative on failure 2180 **/ 2181 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 2182 { 2183 struct i40e_netdev_priv *np = netdev_priv(netdev); 2184 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 2185 struct i40e_vsi *vsi = np->vsi; 2186 2187 /* MTU < 68 is an error and causes problems on some kernels */ 2188 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) 2189 return -EINVAL; 2190 2191 netdev_info(netdev, "changing MTU from %d to %d\n", 2192 netdev->mtu, new_mtu); 2193 netdev->mtu = new_mtu; 2194 if (netif_running(netdev)) 2195 i40e_vsi_reinit_locked(vsi); 2196 2197 return 0; 2198 } 2199 2200 /** 2201 * i40e_ioctl - Access the hwtstamp interface 2202 * @netdev: network interface device structure 2203 * @ifr: interface request data 2204 * @cmd: ioctl command 2205 **/ 2206 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2207 { 2208 struct i40e_netdev_priv *np = netdev_priv(netdev); 2209 struct i40e_pf *pf = np->vsi->back; 2210 2211 switch (cmd) { 2212 case SIOCGHWTSTAMP: 2213 return i40e_ptp_get_ts_config(pf, ifr); 2214 case SIOCSHWTSTAMP: 2215 return i40e_ptp_set_ts_config(pf, ifr); 2216 default: 2217 return -EOPNOTSUPP; 2218 } 2219 } 2220 2221 /** 2222 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 2223 * @vsi: the vsi being adjusted 2224 **/ 2225 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 2226 { 2227 struct i40e_vsi_context ctxt; 2228 i40e_status ret; 2229 2230 if ((vsi->info.valid_sections & 2231 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2232 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 2233 return; /* already enabled */ 2234 2235 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2236 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2237 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 2238 2239 ctxt.seid = vsi->seid; 2240 ctxt.info = vsi->info; 2241 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2242 if (ret) { 2243 dev_info(&vsi->back->pdev->dev, 2244 "update vlan stripping failed, err %s aq_err %s\n", 2245 i40e_stat_str(&vsi->back->hw, ret), 2246 i40e_aq_str(&vsi->back->hw, 2247 vsi->back->hw.aq.asq_last_status)); 2248 } 2249 } 2250 2251 /** 2252 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 2253 * @vsi: the vsi being adjusted 2254 **/ 2255 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 2256 { 2257 struct i40e_vsi_context ctxt; 2258 i40e_status ret; 2259 2260 if ((vsi->info.valid_sections & 2261 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2262 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 2263 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 2264 return; /* already disabled */ 2265 2266 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2267 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2268 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 2269 2270 ctxt.seid = vsi->seid; 2271 ctxt.info = vsi->info; 2272 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2273 if (ret) { 2274 dev_info(&vsi->back->pdev->dev, 2275 "update vlan stripping failed, err %s aq_err %s\n", 2276 i40e_stat_str(&vsi->back->hw, ret), 2277 i40e_aq_str(&vsi->back->hw, 2278 vsi->back->hw.aq.asq_last_status)); 2279 } 2280 } 2281 2282 /** 2283 * i40e_vlan_rx_register - Setup or shutdown vlan offload 2284 * @netdev: network interface to be adjusted 2285 * @features: netdev features to test if VLAN offload is enabled or not 2286 **/ 2287 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 2288 { 2289 struct i40e_netdev_priv *np = netdev_priv(netdev); 2290 struct i40e_vsi *vsi = np->vsi; 2291 2292 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2293 i40e_vlan_stripping_enable(vsi); 2294 else 2295 i40e_vlan_stripping_disable(vsi); 2296 } 2297 2298 /** 2299 * i40e_vsi_add_vlan - Add vsi membership for given vlan 2300 * @vsi: the vsi being configured 2301 * @vid: vlan id to be added (0 = untagged only , -1 = any) 2302 **/ 2303 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) 2304 { 2305 struct i40e_mac_filter *f, *add_f; 2306 bool is_netdev, is_vf; 2307 2308 is_vf = (vsi->type == I40E_VSI_SRIOV); 2309 is_netdev = !!(vsi->netdev); 2310 2311 /* Locked once because all functions invoked below iterates list*/ 2312 spin_lock_bh(&vsi->mac_filter_list_lock); 2313 2314 if (is_netdev) { 2315 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, 2316 is_vf, is_netdev); 2317 if (!add_f) { 2318 dev_info(&vsi->back->pdev->dev, 2319 "Could not add vlan filter %d for %pM\n", 2320 vid, vsi->netdev->dev_addr); 2321 spin_unlock_bh(&vsi->mac_filter_list_lock); 2322 return -ENOMEM; 2323 } 2324 } 2325 2326 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2327 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2328 if (!add_f) { 2329 dev_info(&vsi->back->pdev->dev, 2330 "Could not add vlan filter %d for %pM\n", 2331 vid, f->macaddr); 2332 spin_unlock_bh(&vsi->mac_filter_list_lock); 2333 return -ENOMEM; 2334 } 2335 } 2336 2337 /* Now if we add a vlan tag, make sure to check if it is the first 2338 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" 2339 * with 0, so we now accept untagged and specified tagged traffic 2340 * (and not any taged and untagged) 2341 */ 2342 if (vid > 0) { 2343 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, 2344 I40E_VLAN_ANY, 2345 is_vf, is_netdev)) { 2346 i40e_del_filter(vsi, vsi->netdev->dev_addr, 2347 I40E_VLAN_ANY, is_vf, is_netdev); 2348 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, 2349 is_vf, is_netdev); 2350 if (!add_f) { 2351 dev_info(&vsi->back->pdev->dev, 2352 "Could not add filter 0 for %pM\n", 2353 vsi->netdev->dev_addr); 2354 spin_unlock_bh(&vsi->mac_filter_list_lock); 2355 return -ENOMEM; 2356 } 2357 } 2358 } 2359 2360 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ 2361 if (vid > 0 && !vsi->info.pvid) { 2362 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2363 if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2364 is_vf, is_netdev)) 2365 continue; 2366 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2367 is_vf, is_netdev); 2368 add_f = i40e_add_filter(vsi, f->macaddr, 2369 0, is_vf, is_netdev); 2370 if (!add_f) { 2371 dev_info(&vsi->back->pdev->dev, 2372 "Could not add filter 0 for %pM\n", 2373 f->macaddr); 2374 spin_unlock_bh(&vsi->mac_filter_list_lock); 2375 return -ENOMEM; 2376 } 2377 } 2378 } 2379 2380 /* Make sure to release before sync_vsi_filter because that 2381 * function will lock/unlock as necessary 2382 */ 2383 spin_unlock_bh(&vsi->mac_filter_list_lock); 2384 2385 if (test_bit(__I40E_DOWN, &vsi->back->state) || 2386 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 2387 return 0; 2388 2389 return i40e_sync_vsi_filters(vsi, false); 2390 } 2391 2392 /** 2393 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 2394 * @vsi: the vsi being configured 2395 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 2396 * 2397 * Return: 0 on success or negative otherwise 2398 **/ 2399 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 2400 { 2401 struct net_device *netdev = vsi->netdev; 2402 struct i40e_mac_filter *f, *add_f; 2403 bool is_vf, is_netdev; 2404 int filter_count = 0; 2405 2406 is_vf = (vsi->type == I40E_VSI_SRIOV); 2407 is_netdev = !!(netdev); 2408 2409 /* Locked once because all functions invoked below iterates list */ 2410 spin_lock_bh(&vsi->mac_filter_list_lock); 2411 2412 if (is_netdev) 2413 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); 2414 2415 list_for_each_entry(f, &vsi->mac_filter_list, list) 2416 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2417 2418 /* go through all the filters for this VSI and if there is only 2419 * vid == 0 it means there are no other filters, so vid 0 must 2420 * be replaced with -1. This signifies that we should from now 2421 * on accept any traffic (with any tag present, or untagged) 2422 */ 2423 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2424 if (is_netdev) { 2425 if (f->vlan && 2426 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2427 filter_count++; 2428 } 2429 2430 if (f->vlan) 2431 filter_count++; 2432 } 2433 2434 if (!filter_count && is_netdev) { 2435 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); 2436 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 2437 is_vf, is_netdev); 2438 if (!f) { 2439 dev_info(&vsi->back->pdev->dev, 2440 "Could not add filter %d for %pM\n", 2441 I40E_VLAN_ANY, netdev->dev_addr); 2442 spin_unlock_bh(&vsi->mac_filter_list_lock); 2443 return -ENOMEM; 2444 } 2445 } 2446 2447 if (!filter_count) { 2448 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2449 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); 2450 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2451 is_vf, is_netdev); 2452 if (!add_f) { 2453 dev_info(&vsi->back->pdev->dev, 2454 "Could not add filter %d for %pM\n", 2455 I40E_VLAN_ANY, f->macaddr); 2456 spin_unlock_bh(&vsi->mac_filter_list_lock); 2457 return -ENOMEM; 2458 } 2459 } 2460 } 2461 2462 /* Make sure to release before sync_vsi_filter because that 2463 * function with lock/unlock as necessary 2464 */ 2465 spin_unlock_bh(&vsi->mac_filter_list_lock); 2466 2467 if (test_bit(__I40E_DOWN, &vsi->back->state) || 2468 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 2469 return 0; 2470 2471 return i40e_sync_vsi_filters(vsi, false); 2472 } 2473 2474 /** 2475 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 2476 * @netdev: network interface to be adjusted 2477 * @vid: vlan id to be added 2478 * 2479 * net_device_ops implementation for adding vlan ids 2480 **/ 2481 #ifdef I40E_FCOE 2482 int i40e_vlan_rx_add_vid(struct net_device *netdev, 2483 __always_unused __be16 proto, u16 vid) 2484 #else 2485 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 2486 __always_unused __be16 proto, u16 vid) 2487 #endif 2488 { 2489 struct i40e_netdev_priv *np = netdev_priv(netdev); 2490 struct i40e_vsi *vsi = np->vsi; 2491 int ret = 0; 2492 2493 if (vid > 4095) 2494 return -EINVAL; 2495 2496 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 2497 2498 /* If the network stack called us with vid = 0 then 2499 * it is asking to receive priority tagged packets with 2500 * vlan id 0. Our HW receives them by default when configured 2501 * to receive untagged packets so there is no need to add an 2502 * extra filter for vlan 0 tagged packets. 2503 */ 2504 if (vid) 2505 ret = i40e_vsi_add_vlan(vsi, vid); 2506 2507 if (!ret && (vid < VLAN_N_VID)) 2508 set_bit(vid, vsi->active_vlans); 2509 2510 return ret; 2511 } 2512 2513 /** 2514 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 2515 * @netdev: network interface to be adjusted 2516 * @vid: vlan id to be removed 2517 * 2518 * net_device_ops implementation for removing vlan ids 2519 **/ 2520 #ifdef I40E_FCOE 2521 int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2522 __always_unused __be16 proto, u16 vid) 2523 #else 2524 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2525 __always_unused __be16 proto, u16 vid) 2526 #endif 2527 { 2528 struct i40e_netdev_priv *np = netdev_priv(netdev); 2529 struct i40e_vsi *vsi = np->vsi; 2530 2531 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); 2532 2533 /* return code is ignored as there is nothing a user 2534 * can do about failure to remove and a log message was 2535 * already printed from the other function 2536 */ 2537 i40e_vsi_kill_vlan(vsi, vid); 2538 2539 clear_bit(vid, vsi->active_vlans); 2540 2541 return 0; 2542 } 2543 2544 /** 2545 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 2546 * @vsi: the vsi being brought back up 2547 **/ 2548 static void i40e_restore_vlan(struct i40e_vsi *vsi) 2549 { 2550 u16 vid; 2551 2552 if (!vsi->netdev) 2553 return; 2554 2555 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2556 2557 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 2558 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 2559 vid); 2560 } 2561 2562 /** 2563 * i40e_vsi_add_pvid - Add pvid for the VSI 2564 * @vsi: the vsi being adjusted 2565 * @vid: the vlan id to set as a PVID 2566 **/ 2567 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2568 { 2569 struct i40e_vsi_context ctxt; 2570 i40e_status ret; 2571 2572 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2573 vsi->info.pvid = cpu_to_le16(vid); 2574 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | 2575 I40E_AQ_VSI_PVLAN_INSERT_PVID | 2576 I40E_AQ_VSI_PVLAN_EMOD_STR; 2577 2578 ctxt.seid = vsi->seid; 2579 ctxt.info = vsi->info; 2580 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2581 if (ret) { 2582 dev_info(&vsi->back->pdev->dev, 2583 "add pvid failed, err %s aq_err %s\n", 2584 i40e_stat_str(&vsi->back->hw, ret), 2585 i40e_aq_str(&vsi->back->hw, 2586 vsi->back->hw.aq.asq_last_status)); 2587 return -ENOENT; 2588 } 2589 2590 return 0; 2591 } 2592 2593 /** 2594 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2595 * @vsi: the vsi being adjusted 2596 * 2597 * Just use the vlan_rx_register() service to put it back to normal 2598 **/ 2599 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2600 { 2601 i40e_vlan_stripping_disable(vsi); 2602 2603 vsi->info.pvid = 0; 2604 } 2605 2606 /** 2607 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2608 * @vsi: ptr to the VSI 2609 * 2610 * If this function returns with an error, then it's possible one or 2611 * more of the rings is populated (while the rest are not). It is the 2612 * callers duty to clean those orphaned rings. 2613 * 2614 * Return 0 on success, negative on failure 2615 **/ 2616 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2617 { 2618 int i, err = 0; 2619 2620 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2621 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2622 2623 return err; 2624 } 2625 2626 /** 2627 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2628 * @vsi: ptr to the VSI 2629 * 2630 * Free VSI's transmit software resources 2631 **/ 2632 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2633 { 2634 int i; 2635 2636 if (!vsi->tx_rings) 2637 return; 2638 2639 for (i = 0; i < vsi->num_queue_pairs; i++) 2640 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2641 i40e_free_tx_resources(vsi->tx_rings[i]); 2642 } 2643 2644 /** 2645 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 2646 * @vsi: ptr to the VSI 2647 * 2648 * If this function returns with an error, then it's possible one or 2649 * more of the rings is populated (while the rest are not). It is the 2650 * callers duty to clean those orphaned rings. 2651 * 2652 * Return 0 on success, negative on failure 2653 **/ 2654 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 2655 { 2656 int i, err = 0; 2657 2658 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2659 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2660 #ifdef I40E_FCOE 2661 i40e_fcoe_setup_ddp_resources(vsi); 2662 #endif 2663 return err; 2664 } 2665 2666 /** 2667 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 2668 * @vsi: ptr to the VSI 2669 * 2670 * Free all receive software resources 2671 **/ 2672 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 2673 { 2674 int i; 2675 2676 if (!vsi->rx_rings) 2677 return; 2678 2679 for (i = 0; i < vsi->num_queue_pairs; i++) 2680 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2681 i40e_free_rx_resources(vsi->rx_rings[i]); 2682 #ifdef I40E_FCOE 2683 i40e_fcoe_free_ddp_resources(vsi); 2684 #endif 2685 } 2686 2687 /** 2688 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring 2689 * @ring: The Tx ring to configure 2690 * 2691 * This enables/disables XPS for a given Tx descriptor ring 2692 * based on the TCs enabled for the VSI that ring belongs to. 2693 **/ 2694 static void i40e_config_xps_tx_ring(struct i40e_ring *ring) 2695 { 2696 struct i40e_vsi *vsi = ring->vsi; 2697 cpumask_var_t mask; 2698 2699 if (!ring->q_vector || !ring->netdev) 2700 return; 2701 2702 /* Single TC mode enable XPS */ 2703 if (vsi->tc_config.numtc <= 1) { 2704 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) 2705 netif_set_xps_queue(ring->netdev, 2706 &ring->q_vector->affinity_mask, 2707 ring->queue_index); 2708 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { 2709 /* Disable XPS to allow selection based on TC */ 2710 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); 2711 netif_set_xps_queue(ring->netdev, mask, ring->queue_index); 2712 free_cpumask_var(mask); 2713 } 2714 } 2715 2716 /** 2717 * i40e_configure_tx_ring - Configure a transmit ring context and rest 2718 * @ring: The Tx ring to configure 2719 * 2720 * Configure the Tx descriptor ring in the HMC context. 2721 **/ 2722 static int i40e_configure_tx_ring(struct i40e_ring *ring) 2723 { 2724 struct i40e_vsi *vsi = ring->vsi; 2725 u16 pf_q = vsi->base_queue + ring->queue_index; 2726 struct i40e_hw *hw = &vsi->back->hw; 2727 struct i40e_hmc_obj_txq tx_ctx; 2728 i40e_status err = 0; 2729 u32 qtx_ctl = 0; 2730 2731 /* some ATR related tx ring init */ 2732 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { 2733 ring->atr_sample_rate = vsi->back->atr_sample_rate; 2734 ring->atr_count = 0; 2735 } else { 2736 ring->atr_sample_rate = 0; 2737 } 2738 2739 /* configure XPS */ 2740 i40e_config_xps_tx_ring(ring); 2741 2742 /* clear the context structure first */ 2743 memset(&tx_ctx, 0, sizeof(tx_ctx)); 2744 2745 tx_ctx.new_context = 1; 2746 tx_ctx.base = (ring->dma / 128); 2747 tx_ctx.qlen = ring->count; 2748 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2749 I40E_FLAG_FD_ATR_ENABLED)); 2750 #ifdef I40E_FCOE 2751 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2752 #endif 2753 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2754 /* FDIR VSI tx ring can still use RS bit and writebacks */ 2755 if (vsi->type != I40E_VSI_FDIR) 2756 tx_ctx.head_wb_ena = 1; 2757 tx_ctx.head_wb_addr = ring->dma + 2758 (ring->count * sizeof(struct i40e_tx_desc)); 2759 2760 /* As part of VSI creation/update, FW allocates certain 2761 * Tx arbitration queue sets for each TC enabled for 2762 * the VSI. The FW returns the handles to these queue 2763 * sets as part of the response buffer to Add VSI, 2764 * Update VSI, etc. AQ commands. It is expected that 2765 * these queue set handles be associated with the Tx 2766 * queues by the driver as part of the TX queue context 2767 * initialization. This has to be done regardless of 2768 * DCB as by default everything is mapped to TC0. 2769 */ 2770 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 2771 tx_ctx.rdylist_act = 0; 2772 2773 /* clear the context in the HMC */ 2774 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 2775 if (err) { 2776 dev_info(&vsi->back->pdev->dev, 2777 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 2778 ring->queue_index, pf_q, err); 2779 return -ENOMEM; 2780 } 2781 2782 /* set the context in the HMC */ 2783 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 2784 if (err) { 2785 dev_info(&vsi->back->pdev->dev, 2786 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 2787 ring->queue_index, pf_q, err); 2788 return -ENOMEM; 2789 } 2790 2791 /* Now associate this queue with this PCI function */ 2792 if (vsi->type == I40E_VSI_VMDQ2) { 2793 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 2794 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & 2795 I40E_QTX_CTL_VFVM_INDX_MASK; 2796 } else { 2797 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2798 } 2799 2800 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2801 I40E_QTX_CTL_PF_INDX_MASK); 2802 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2803 i40e_flush(hw); 2804 2805 /* cache tail off for easier writes later */ 2806 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 2807 2808 return 0; 2809 } 2810 2811 /** 2812 * i40e_configure_rx_ring - Configure a receive ring context 2813 * @ring: The Rx ring to configure 2814 * 2815 * Configure the Rx descriptor ring in the HMC context. 2816 **/ 2817 static int i40e_configure_rx_ring(struct i40e_ring *ring) 2818 { 2819 struct i40e_vsi *vsi = ring->vsi; 2820 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 2821 u16 pf_q = vsi->base_queue + ring->queue_index; 2822 struct i40e_hw *hw = &vsi->back->hw; 2823 struct i40e_hmc_obj_rxq rx_ctx; 2824 i40e_status err = 0; 2825 2826 ring->state = 0; 2827 2828 /* clear the context structure first */ 2829 memset(&rx_ctx, 0, sizeof(rx_ctx)); 2830 2831 ring->rx_buf_len = vsi->rx_buf_len; 2832 ring->rx_hdr_len = vsi->rx_hdr_len; 2833 2834 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; 2835 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; 2836 2837 rx_ctx.base = (ring->dma / 128); 2838 rx_ctx.qlen = ring->count; 2839 2840 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { 2841 set_ring_16byte_desc_enabled(ring); 2842 rx_ctx.dsize = 0; 2843 } else { 2844 rx_ctx.dsize = 1; 2845 } 2846 2847 rx_ctx.dtype = vsi->dtype; 2848 if (vsi->dtype) { 2849 set_ring_ps_enabled(ring); 2850 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 2851 I40E_RX_SPLIT_IP | 2852 I40E_RX_SPLIT_TCP_UDP | 2853 I40E_RX_SPLIT_SCTP; 2854 } else { 2855 rx_ctx.hsplit_0 = 0; 2856 } 2857 2858 rx_ctx.rxmax = min_t(u16, vsi->max_frame, 2859 (chain_len * ring->rx_buf_len)); 2860 if (hw->revision_id == 0) 2861 rx_ctx.lrxqthresh = 0; 2862 else 2863 rx_ctx.lrxqthresh = 2; 2864 rx_ctx.crcstrip = 1; 2865 rx_ctx.l2tsel = 1; 2866 /* this controls whether VLAN is stripped from inner headers */ 2867 rx_ctx.showiv = 0; 2868 #ifdef I40E_FCOE 2869 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2870 #endif 2871 /* set the prefena field to 1 because the manual says to */ 2872 rx_ctx.prefena = 1; 2873 2874 /* clear the context in the HMC */ 2875 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2876 if (err) { 2877 dev_info(&vsi->back->pdev->dev, 2878 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2879 ring->queue_index, pf_q, err); 2880 return -ENOMEM; 2881 } 2882 2883 /* set the context in the HMC */ 2884 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 2885 if (err) { 2886 dev_info(&vsi->back->pdev->dev, 2887 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2888 ring->queue_index, pf_q, err); 2889 return -ENOMEM; 2890 } 2891 2892 /* cache tail for quicker writes, and clear the reg before use */ 2893 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 2894 writel(0, ring->tail); 2895 2896 if (ring_is_ps_enabled(ring)) { 2897 i40e_alloc_rx_headers(ring); 2898 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); 2899 } else { 2900 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); 2901 } 2902 2903 return 0; 2904 } 2905 2906 /** 2907 * i40e_vsi_configure_tx - Configure the VSI for Tx 2908 * @vsi: VSI structure describing this set of rings and resources 2909 * 2910 * Configure the Tx VSI for operation. 2911 **/ 2912 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 2913 { 2914 int err = 0; 2915 u16 i; 2916 2917 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 2918 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 2919 2920 return err; 2921 } 2922 2923 /** 2924 * i40e_vsi_configure_rx - Configure the VSI for Rx 2925 * @vsi: the VSI being configured 2926 * 2927 * Configure the Rx VSI for operation. 2928 **/ 2929 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 2930 { 2931 int err = 0; 2932 u16 i; 2933 2934 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) 2935 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN 2936 + ETH_FCS_LEN + VLAN_HLEN; 2937 else 2938 vsi->max_frame = I40E_RXBUFFER_2048; 2939 2940 /* figure out correct receive buffer length */ 2941 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | 2942 I40E_FLAG_RX_PS_ENABLED)) { 2943 case I40E_FLAG_RX_1BUF_ENABLED: 2944 vsi->rx_hdr_len = 0; 2945 vsi->rx_buf_len = vsi->max_frame; 2946 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2947 break; 2948 case I40E_FLAG_RX_PS_ENABLED: 2949 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2950 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2951 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; 2952 break; 2953 default: 2954 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2955 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2956 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; 2957 break; 2958 } 2959 2960 #ifdef I40E_FCOE 2961 /* setup rx buffer for FCoE */ 2962 if ((vsi->type == I40E_VSI_FCOE) && 2963 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { 2964 vsi->rx_hdr_len = 0; 2965 vsi->rx_buf_len = I40E_RXBUFFER_3072; 2966 vsi->max_frame = I40E_RXBUFFER_3072; 2967 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2968 } 2969 2970 #endif /* I40E_FCOE */ 2971 /* round up for the chip's needs */ 2972 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, 2973 BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT)); 2974 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, 2975 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); 2976 2977 /* set up individual rings */ 2978 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2979 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 2980 2981 return err; 2982 } 2983 2984 /** 2985 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 2986 * @vsi: ptr to the VSI 2987 **/ 2988 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 2989 { 2990 struct i40e_ring *tx_ring, *rx_ring; 2991 u16 qoffset, qcount; 2992 int i, n; 2993 2994 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 2995 /* Reset the TC information */ 2996 for (i = 0; i < vsi->num_queue_pairs; i++) { 2997 rx_ring = vsi->rx_rings[i]; 2998 tx_ring = vsi->tx_rings[i]; 2999 rx_ring->dcb_tc = 0; 3000 tx_ring->dcb_tc = 0; 3001 } 3002 } 3003 3004 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 3005 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) 3006 continue; 3007 3008 qoffset = vsi->tc_config.tc_info[n].qoffset; 3009 qcount = vsi->tc_config.tc_info[n].qcount; 3010 for (i = qoffset; i < (qoffset + qcount); i++) { 3011 rx_ring = vsi->rx_rings[i]; 3012 tx_ring = vsi->tx_rings[i]; 3013 rx_ring->dcb_tc = n; 3014 tx_ring->dcb_tc = n; 3015 } 3016 } 3017 } 3018 3019 /** 3020 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 3021 * @vsi: ptr to the VSI 3022 **/ 3023 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 3024 { 3025 if (vsi->netdev) 3026 i40e_set_rx_mode(vsi->netdev); 3027 } 3028 3029 /** 3030 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters 3031 * @vsi: Pointer to the targeted VSI 3032 * 3033 * This function replays the hlist on the hw where all the SB Flow Director 3034 * filters were saved. 3035 **/ 3036 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) 3037 { 3038 struct i40e_fdir_filter *filter; 3039 struct i40e_pf *pf = vsi->back; 3040 struct hlist_node *node; 3041 3042 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 3043 return; 3044 3045 hlist_for_each_entry_safe(filter, node, 3046 &pf->fdir_filter_list, fdir_node) { 3047 i40e_add_del_fdir(vsi, filter, true); 3048 } 3049 } 3050 3051 /** 3052 * i40e_vsi_configure - Set up the VSI for action 3053 * @vsi: the VSI being configured 3054 **/ 3055 static int i40e_vsi_configure(struct i40e_vsi *vsi) 3056 { 3057 int err; 3058 3059 i40e_set_vsi_rx_mode(vsi); 3060 i40e_restore_vlan(vsi); 3061 i40e_vsi_config_dcb_rings(vsi); 3062 err = i40e_vsi_configure_tx(vsi); 3063 if (!err) 3064 err = i40e_vsi_configure_rx(vsi); 3065 3066 return err; 3067 } 3068 3069 /** 3070 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 3071 * @vsi: the VSI being configured 3072 **/ 3073 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 3074 { 3075 struct i40e_pf *pf = vsi->back; 3076 struct i40e_hw *hw = &pf->hw; 3077 u16 vector; 3078 int i, q; 3079 u32 qp; 3080 3081 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 3082 * and PFINT_LNKLSTn registers, e.g.: 3083 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 3084 */ 3085 qp = vsi->base_queue; 3086 vector = vsi->base_vector; 3087 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 3088 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; 3089 3090 q_vector->itr_countdown = ITR_COUNTDOWN_START; 3091 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 3092 q_vector->rx.latency_range = I40E_LOW_LATENCY; 3093 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 3094 q_vector->rx.itr); 3095 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 3096 q_vector->tx.latency_range = I40E_LOW_LATENCY; 3097 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 3098 q_vector->tx.itr); 3099 wr32(hw, I40E_PFINT_RATEN(vector - 1), 3100 INTRL_USEC_TO_REG(vsi->int_rate_limit)); 3101 3102 /* Linked list for the queuepairs assigned to this vector */ 3103 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 3104 for (q = 0; q < q_vector->num_ringpairs; q++) { 3105 u32 val; 3106 3107 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3108 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3109 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 3110 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 3111 (I40E_QUEUE_TYPE_TX 3112 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 3113 3114 wr32(hw, I40E_QINT_RQCTL(qp), val); 3115 3116 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3117 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3118 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 3119 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| 3120 (I40E_QUEUE_TYPE_RX 3121 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3122 3123 /* Terminate the linked list */ 3124 if (q == (q_vector->num_ringpairs - 1)) 3125 val |= (I40E_QUEUE_END_OF_LIST 3126 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3127 3128 wr32(hw, I40E_QINT_TQCTL(qp), val); 3129 qp++; 3130 } 3131 } 3132 3133 i40e_flush(hw); 3134 } 3135 3136 /** 3137 * i40e_enable_misc_int_causes - enable the non-queue interrupts 3138 * @hw: ptr to the hardware info 3139 **/ 3140 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) 3141 { 3142 struct i40e_hw *hw = &pf->hw; 3143 u32 val; 3144 3145 /* clear things first */ 3146 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 3147 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 3148 3149 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 3150 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 3151 I40E_PFINT_ICR0_ENA_GRST_MASK | 3152 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 3153 I40E_PFINT_ICR0_ENA_GPIO_MASK | 3154 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 3155 I40E_PFINT_ICR0_ENA_VFLR_MASK | 3156 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3157 3158 if (pf->flags & I40E_FLAG_IWARP_ENABLED) 3159 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3160 3161 if (pf->flags & I40E_FLAG_PTP) 3162 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3163 3164 wr32(hw, I40E_PFINT_ICR0_ENA, val); 3165 3166 /* SW_ITR_IDX = 0, but don't change INTENA */ 3167 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 3168 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 3169 3170 /* OTHER_ITR_IDX = 0 */ 3171 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 3172 } 3173 3174 /** 3175 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 3176 * @vsi: the VSI being configured 3177 **/ 3178 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 3179 { 3180 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 3181 struct i40e_pf *pf = vsi->back; 3182 struct i40e_hw *hw = &pf->hw; 3183 u32 val; 3184 3185 /* set the ITR configuration */ 3186 q_vector->itr_countdown = ITR_COUNTDOWN_START; 3187 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 3188 q_vector->rx.latency_range = I40E_LOW_LATENCY; 3189 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); 3190 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 3191 q_vector->tx.latency_range = I40E_LOW_LATENCY; 3192 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 3193 3194 i40e_enable_misc_int_causes(pf); 3195 3196 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 3197 wr32(hw, I40E_PFINT_LNKLST0, 0); 3198 3199 /* Associate the queue pair to the vector and enable the queue int */ 3200 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3201 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3202 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3203 3204 wr32(hw, I40E_QINT_RQCTL(0), val); 3205 3206 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3207 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3208 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3209 3210 wr32(hw, I40E_QINT_TQCTL(0), val); 3211 i40e_flush(hw); 3212 } 3213 3214 /** 3215 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 3216 * @pf: board private structure 3217 **/ 3218 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) 3219 { 3220 struct i40e_hw *hw = &pf->hw; 3221 3222 wr32(hw, I40E_PFINT_DYN_CTL0, 3223 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 3224 i40e_flush(hw); 3225 } 3226 3227 /** 3228 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 3229 * @pf: board private structure 3230 **/ 3231 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 3232 { 3233 struct i40e_hw *hw = &pf->hw; 3234 u32 val; 3235 3236 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 3237 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 3238 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 3239 3240 wr32(hw, I40E_PFINT_DYN_CTL0, val); 3241 i40e_flush(hw); 3242 } 3243 3244 /** 3245 * i40e_irq_dynamic_disable - Disable default interrupt generation settings 3246 * @vsi: pointer to a vsi 3247 * @vector: disable a particular Hw Interrupt vector 3248 **/ 3249 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) 3250 { 3251 struct i40e_pf *pf = vsi->back; 3252 struct i40e_hw *hw = &pf->hw; 3253 u32 val; 3254 3255 val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 3256 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 3257 i40e_flush(hw); 3258 } 3259 3260 /** 3261 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 3262 * @irq: interrupt number 3263 * @data: pointer to a q_vector 3264 **/ 3265 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 3266 { 3267 struct i40e_q_vector *q_vector = data; 3268 3269 if (!q_vector->tx.ring && !q_vector->rx.ring) 3270 return IRQ_HANDLED; 3271 3272 napi_schedule_irqoff(&q_vector->napi); 3273 3274 return IRQ_HANDLED; 3275 } 3276 3277 /** 3278 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 3279 * @vsi: the VSI being configured 3280 * @basename: name for the vector 3281 * 3282 * Allocates MSI-X vectors and requests interrupts from the kernel. 3283 **/ 3284 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 3285 { 3286 int q_vectors = vsi->num_q_vectors; 3287 struct i40e_pf *pf = vsi->back; 3288 int base = vsi->base_vector; 3289 int rx_int_idx = 0; 3290 int tx_int_idx = 0; 3291 int vector, err; 3292 3293 for (vector = 0; vector < q_vectors; vector++) { 3294 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 3295 3296 if (q_vector->tx.ring && q_vector->rx.ring) { 3297 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3298 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 3299 tx_int_idx++; 3300 } else if (q_vector->rx.ring) { 3301 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3302 "%s-%s-%d", basename, "rx", rx_int_idx++); 3303 } else if (q_vector->tx.ring) { 3304 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3305 "%s-%s-%d", basename, "tx", tx_int_idx++); 3306 } else { 3307 /* skip this unused q_vector */ 3308 continue; 3309 } 3310 err = request_irq(pf->msix_entries[base + vector].vector, 3311 vsi->irq_handler, 3312 0, 3313 q_vector->name, 3314 q_vector); 3315 if (err) { 3316 dev_info(&pf->pdev->dev, 3317 "MSIX request_irq failed, error: %d\n", err); 3318 goto free_queue_irqs; 3319 } 3320 /* assign the mask for this irq */ 3321 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3322 &q_vector->affinity_mask); 3323 } 3324 3325 vsi->irqs_ready = true; 3326 return 0; 3327 3328 free_queue_irqs: 3329 while (vector) { 3330 vector--; 3331 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3332 NULL); 3333 free_irq(pf->msix_entries[base + vector].vector, 3334 &(vsi->q_vectors[vector])); 3335 } 3336 return err; 3337 } 3338 3339 /** 3340 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 3341 * @vsi: the VSI being un-configured 3342 **/ 3343 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 3344 { 3345 struct i40e_pf *pf = vsi->back; 3346 struct i40e_hw *hw = &pf->hw; 3347 int base = vsi->base_vector; 3348 int i; 3349 3350 for (i = 0; i < vsi->num_queue_pairs; i++) { 3351 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); 3352 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); 3353 } 3354 3355 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3356 for (i = vsi->base_vector; 3357 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3358 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 3359 3360 i40e_flush(hw); 3361 for (i = 0; i < vsi->num_q_vectors; i++) 3362 synchronize_irq(pf->msix_entries[i + base].vector); 3363 } else { 3364 /* Legacy and MSI mode - this stops all interrupt handling */ 3365 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 3366 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 3367 i40e_flush(hw); 3368 synchronize_irq(pf->pdev->irq); 3369 } 3370 } 3371 3372 /** 3373 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 3374 * @vsi: the VSI being configured 3375 **/ 3376 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 3377 { 3378 struct i40e_pf *pf = vsi->back; 3379 int i; 3380 3381 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3382 for (i = 0; i < vsi->num_q_vectors; i++) 3383 i40e_irq_dynamic_enable(vsi, i); 3384 } else { 3385 i40e_irq_dynamic_enable_icr0(pf); 3386 } 3387 3388 i40e_flush(&pf->hw); 3389 return 0; 3390 } 3391 3392 /** 3393 * i40e_stop_misc_vector - Stop the vector that handles non-queue events 3394 * @pf: board private structure 3395 **/ 3396 static void i40e_stop_misc_vector(struct i40e_pf *pf) 3397 { 3398 /* Disable ICR 0 */ 3399 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 3400 i40e_flush(&pf->hw); 3401 } 3402 3403 /** 3404 * i40e_intr - MSI/Legacy and non-queue interrupt handler 3405 * @irq: interrupt number 3406 * @data: pointer to a q_vector 3407 * 3408 * This is the handler used for all MSI/Legacy interrupts, and deals 3409 * with both queue and non-queue interrupts. This is also used in 3410 * MSIX mode to handle the non-queue interrupts. 3411 **/ 3412 static irqreturn_t i40e_intr(int irq, void *data) 3413 { 3414 struct i40e_pf *pf = (struct i40e_pf *)data; 3415 struct i40e_hw *hw = &pf->hw; 3416 irqreturn_t ret = IRQ_NONE; 3417 u32 icr0, icr0_remaining; 3418 u32 val, ena_mask; 3419 3420 icr0 = rd32(hw, I40E_PFINT_ICR0); 3421 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 3422 3423 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 3424 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 3425 goto enable_intr; 3426 3427 /* if interrupt but no bits showing, must be SWINT */ 3428 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || 3429 (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) 3430 pf->sw_int_count++; 3431 3432 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && 3433 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { 3434 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3435 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3436 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n"); 3437 } 3438 3439 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 3440 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 3441 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 3442 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 3443 3444 /* temporarily disable queue cause for NAPI processing */ 3445 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); 3446 3447 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; 3448 wr32(hw, I40E_QINT_RQCTL(0), qval); 3449 3450 qval = rd32(hw, I40E_QINT_TQCTL(0)); 3451 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 3452 wr32(hw, I40E_QINT_TQCTL(0), qval); 3453 3454 if (!test_bit(__I40E_DOWN, &pf->state)) 3455 napi_schedule_irqoff(&q_vector->napi); 3456 } 3457 3458 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 3459 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3460 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 3461 } 3462 3463 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 3464 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 3465 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 3466 } 3467 3468 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 3469 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 3470 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 3471 } 3472 3473 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 3474 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 3475 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 3476 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 3477 val = rd32(hw, I40E_GLGEN_RSTAT); 3478 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 3479 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3480 if (val == I40E_RESET_CORER) { 3481 pf->corer_count++; 3482 } else if (val == I40E_RESET_GLOBR) { 3483 pf->globr_count++; 3484 } else if (val == I40E_RESET_EMPR) { 3485 pf->empr_count++; 3486 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); 3487 } 3488 } 3489 3490 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 3491 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 3492 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 3493 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", 3494 rd32(hw, I40E_PFHMC_ERRORINFO), 3495 rd32(hw, I40E_PFHMC_ERRORDATA)); 3496 } 3497 3498 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 3499 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 3500 3501 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 3502 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3503 i40e_ptp_tx_hwtstamp(pf); 3504 } 3505 } 3506 3507 /* If a critical error is pending we have no choice but to reset the 3508 * device. 3509 * Report and mask out any remaining unexpected interrupts. 3510 */ 3511 icr0_remaining = icr0 & ena_mask; 3512 if (icr0_remaining) { 3513 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 3514 icr0_remaining); 3515 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 3516 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 3517 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { 3518 dev_info(&pf->pdev->dev, "device will be reset\n"); 3519 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 3520 i40e_service_event_schedule(pf); 3521 } 3522 ena_mask &= ~icr0_remaining; 3523 } 3524 ret = IRQ_HANDLED; 3525 3526 enable_intr: 3527 /* re-enable interrupt causes */ 3528 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3529 if (!test_bit(__I40E_DOWN, &pf->state)) { 3530 i40e_service_event_schedule(pf); 3531 i40e_irq_dynamic_enable_icr0(pf); 3532 } 3533 3534 return ret; 3535 } 3536 3537 /** 3538 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes 3539 * @tx_ring: tx ring to clean 3540 * @budget: how many cleans we're allowed 3541 * 3542 * Returns true if there's any budget left (e.g. the clean is finished) 3543 **/ 3544 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) 3545 { 3546 struct i40e_vsi *vsi = tx_ring->vsi; 3547 u16 i = tx_ring->next_to_clean; 3548 struct i40e_tx_buffer *tx_buf; 3549 struct i40e_tx_desc *tx_desc; 3550 3551 tx_buf = &tx_ring->tx_bi[i]; 3552 tx_desc = I40E_TX_DESC(tx_ring, i); 3553 i -= tx_ring->count; 3554 3555 do { 3556 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 3557 3558 /* if next_to_watch is not set then there is no work pending */ 3559 if (!eop_desc) 3560 break; 3561 3562 /* prevent any other reads prior to eop_desc */ 3563 read_barrier_depends(); 3564 3565 /* if the descriptor isn't done, no work yet to do */ 3566 if (!(eop_desc->cmd_type_offset_bsz & 3567 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 3568 break; 3569 3570 /* clear next_to_watch to prevent false hangs */ 3571 tx_buf->next_to_watch = NULL; 3572 3573 tx_desc->buffer_addr = 0; 3574 tx_desc->cmd_type_offset_bsz = 0; 3575 /* move past filter desc */ 3576 tx_buf++; 3577 tx_desc++; 3578 i++; 3579 if (unlikely(!i)) { 3580 i -= tx_ring->count; 3581 tx_buf = tx_ring->tx_bi; 3582 tx_desc = I40E_TX_DESC(tx_ring, 0); 3583 } 3584 /* unmap skb header data */ 3585 dma_unmap_single(tx_ring->dev, 3586 dma_unmap_addr(tx_buf, dma), 3587 dma_unmap_len(tx_buf, len), 3588 DMA_TO_DEVICE); 3589 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) 3590 kfree(tx_buf->raw_buf); 3591 3592 tx_buf->raw_buf = NULL; 3593 tx_buf->tx_flags = 0; 3594 tx_buf->next_to_watch = NULL; 3595 dma_unmap_len_set(tx_buf, len, 0); 3596 tx_desc->buffer_addr = 0; 3597 tx_desc->cmd_type_offset_bsz = 0; 3598 3599 /* move us past the eop_desc for start of next FD desc */ 3600 tx_buf++; 3601 tx_desc++; 3602 i++; 3603 if (unlikely(!i)) { 3604 i -= tx_ring->count; 3605 tx_buf = tx_ring->tx_bi; 3606 tx_desc = I40E_TX_DESC(tx_ring, 0); 3607 } 3608 3609 /* update budget accounting */ 3610 budget--; 3611 } while (likely(budget)); 3612 3613 i += tx_ring->count; 3614 tx_ring->next_to_clean = i; 3615 3616 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) 3617 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); 3618 3619 return budget > 0; 3620 } 3621 3622 /** 3623 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring 3624 * @irq: interrupt number 3625 * @data: pointer to a q_vector 3626 **/ 3627 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) 3628 { 3629 struct i40e_q_vector *q_vector = data; 3630 struct i40e_vsi *vsi; 3631 3632 if (!q_vector->tx.ring) 3633 return IRQ_HANDLED; 3634 3635 vsi = q_vector->tx.ring->vsi; 3636 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); 3637 3638 return IRQ_HANDLED; 3639 } 3640 3641 /** 3642 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 3643 * @vsi: the VSI being configured 3644 * @v_idx: vector index 3645 * @qp_idx: queue pair index 3646 **/ 3647 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 3648 { 3649 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3650 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 3651 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 3652 3653 tx_ring->q_vector = q_vector; 3654 tx_ring->next = q_vector->tx.ring; 3655 q_vector->tx.ring = tx_ring; 3656 q_vector->tx.count++; 3657 3658 rx_ring->q_vector = q_vector; 3659 rx_ring->next = q_vector->rx.ring; 3660 q_vector->rx.ring = rx_ring; 3661 q_vector->rx.count++; 3662 } 3663 3664 /** 3665 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 3666 * @vsi: the VSI being configured 3667 * 3668 * This function maps descriptor rings to the queue-specific vectors 3669 * we were allotted through the MSI-X enabling code. Ideally, we'd have 3670 * one vector per queue pair, but on a constrained vector budget, we 3671 * group the queue pairs as "efficiently" as possible. 3672 **/ 3673 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 3674 { 3675 int qp_remaining = vsi->num_queue_pairs; 3676 int q_vectors = vsi->num_q_vectors; 3677 int num_ringpairs; 3678 int v_start = 0; 3679 int qp_idx = 0; 3680 3681 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 3682 * group them so there are multiple queues per vector. 3683 * It is also important to go through all the vectors available to be 3684 * sure that if we don't use all the vectors, that the remaining vectors 3685 * are cleared. This is especially important when decreasing the 3686 * number of queues in use. 3687 */ 3688 for (; v_start < q_vectors; v_start++) { 3689 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 3690 3691 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 3692 3693 q_vector->num_ringpairs = num_ringpairs; 3694 3695 q_vector->rx.count = 0; 3696 q_vector->tx.count = 0; 3697 q_vector->rx.ring = NULL; 3698 q_vector->tx.ring = NULL; 3699 3700 while (num_ringpairs--) { 3701 i40e_map_vector_to_qp(vsi, v_start, qp_idx); 3702 qp_idx++; 3703 qp_remaining--; 3704 } 3705 } 3706 } 3707 3708 /** 3709 * i40e_vsi_request_irq - Request IRQ from the OS 3710 * @vsi: the VSI being configured 3711 * @basename: name for the vector 3712 **/ 3713 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 3714 { 3715 struct i40e_pf *pf = vsi->back; 3716 int err; 3717 3718 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 3719 err = i40e_vsi_request_irq_msix(vsi, basename); 3720 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3721 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3722 pf->int_name, pf); 3723 else 3724 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3725 pf->int_name, pf); 3726 3727 if (err) 3728 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3729 3730 return err; 3731 } 3732 3733 #ifdef CONFIG_NET_POLL_CONTROLLER 3734 /** 3735 * i40e_netpoll - A Polling 'interrupt'handler 3736 * @netdev: network interface device structure 3737 * 3738 * This is used by netconsole to send skbs without having to re-enable 3739 * interrupts. It's not called while the normal interrupt routine is executing. 3740 **/ 3741 #ifdef I40E_FCOE 3742 void i40e_netpoll(struct net_device *netdev) 3743 #else 3744 static void i40e_netpoll(struct net_device *netdev) 3745 #endif 3746 { 3747 struct i40e_netdev_priv *np = netdev_priv(netdev); 3748 struct i40e_vsi *vsi = np->vsi; 3749 struct i40e_pf *pf = vsi->back; 3750 int i; 3751 3752 /* if interface is down do nothing */ 3753 if (test_bit(__I40E_DOWN, &vsi->state)) 3754 return; 3755 3756 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3757 for (i = 0; i < vsi->num_q_vectors; i++) 3758 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 3759 } else { 3760 i40e_intr(pf->pdev->irq, netdev); 3761 } 3762 } 3763 #endif 3764 3765 /** 3766 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled 3767 * @pf: the PF being configured 3768 * @pf_q: the PF queue 3769 * @enable: enable or disable state of the queue 3770 * 3771 * This routine will wait for the given Tx queue of the PF to reach the 3772 * enabled or disabled state. 3773 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3774 * multiple retries; else will return 0 in case of success. 3775 **/ 3776 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3777 { 3778 int i; 3779 u32 tx_reg; 3780 3781 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3782 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); 3783 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3784 break; 3785 3786 usleep_range(10, 20); 3787 } 3788 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3789 return -ETIMEDOUT; 3790 3791 return 0; 3792 } 3793 3794 /** 3795 * i40e_vsi_control_tx - Start or stop a VSI's rings 3796 * @vsi: the VSI being configured 3797 * @enable: start or stop the rings 3798 **/ 3799 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 3800 { 3801 struct i40e_pf *pf = vsi->back; 3802 struct i40e_hw *hw = &pf->hw; 3803 int i, j, pf_q, ret = 0; 3804 u32 tx_reg; 3805 3806 pf_q = vsi->base_queue; 3807 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3808 3809 /* warn the TX unit of coming changes */ 3810 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); 3811 if (!enable) 3812 usleep_range(10, 20); 3813 3814 for (j = 0; j < 50; j++) { 3815 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3816 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 3817 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) 3818 break; 3819 usleep_range(1000, 2000); 3820 } 3821 /* Skip if the queue is already in the requested state */ 3822 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3823 continue; 3824 3825 /* turn on/off the queue */ 3826 if (enable) { 3827 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 3828 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; 3829 } else { 3830 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3831 } 3832 3833 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 3834 /* No waiting for the Tx queue to disable */ 3835 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) 3836 continue; 3837 3838 /* wait for the change to finish */ 3839 ret = i40e_pf_txq_wait(pf, pf_q, enable); 3840 if (ret) { 3841 dev_info(&pf->pdev->dev, 3842 "VSI seid %d Tx ring %d %sable timeout\n", 3843 vsi->seid, pf_q, (enable ? "en" : "dis")); 3844 break; 3845 } 3846 } 3847 3848 if (hw->revision_id == 0) 3849 mdelay(50); 3850 return ret; 3851 } 3852 3853 /** 3854 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 3855 * @pf: the PF being configured 3856 * @pf_q: the PF queue 3857 * @enable: enable or disable state of the queue 3858 * 3859 * This routine will wait for the given Rx queue of the PF to reach the 3860 * enabled or disabled state. 3861 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3862 * multiple retries; else will return 0 in case of success. 3863 **/ 3864 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3865 { 3866 int i; 3867 u32 rx_reg; 3868 3869 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3870 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); 3871 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3872 break; 3873 3874 usleep_range(10, 20); 3875 } 3876 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3877 return -ETIMEDOUT; 3878 3879 return 0; 3880 } 3881 3882 /** 3883 * i40e_vsi_control_rx - Start or stop a VSI's rings 3884 * @vsi: the VSI being configured 3885 * @enable: start or stop the rings 3886 **/ 3887 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 3888 { 3889 struct i40e_pf *pf = vsi->back; 3890 struct i40e_hw *hw = &pf->hw; 3891 int i, j, pf_q, ret = 0; 3892 u32 rx_reg; 3893 3894 pf_q = vsi->base_queue; 3895 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3896 for (j = 0; j < 50; j++) { 3897 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3898 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == 3899 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) 3900 break; 3901 usleep_range(1000, 2000); 3902 } 3903 3904 /* Skip if the queue is already in the requested state */ 3905 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3906 continue; 3907 3908 /* turn on/off the queue */ 3909 if (enable) 3910 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; 3911 else 3912 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 3913 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3914 3915 /* wait for the change to finish */ 3916 ret = i40e_pf_rxq_wait(pf, pf_q, enable); 3917 if (ret) { 3918 dev_info(&pf->pdev->dev, 3919 "VSI seid %d Rx ring %d %sable timeout\n", 3920 vsi->seid, pf_q, (enable ? "en" : "dis")); 3921 break; 3922 } 3923 } 3924 3925 return ret; 3926 } 3927 3928 /** 3929 * i40e_vsi_control_rings - Start or stop a VSI's rings 3930 * @vsi: the VSI being configured 3931 * @enable: start or stop the rings 3932 **/ 3933 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) 3934 { 3935 int ret = 0; 3936 3937 /* do rx first for enable and last for disable */ 3938 if (request) { 3939 ret = i40e_vsi_control_rx(vsi, request); 3940 if (ret) 3941 return ret; 3942 ret = i40e_vsi_control_tx(vsi, request); 3943 } else { 3944 /* Ignore return value, we need to shutdown whatever we can */ 3945 i40e_vsi_control_tx(vsi, request); 3946 i40e_vsi_control_rx(vsi, request); 3947 } 3948 3949 return ret; 3950 } 3951 3952 /** 3953 * i40e_vsi_free_irq - Free the irq association with the OS 3954 * @vsi: the VSI being configured 3955 **/ 3956 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 3957 { 3958 struct i40e_pf *pf = vsi->back; 3959 struct i40e_hw *hw = &pf->hw; 3960 int base = vsi->base_vector; 3961 u32 val, qp; 3962 int i; 3963 3964 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3965 if (!vsi->q_vectors) 3966 return; 3967 3968 if (!vsi->irqs_ready) 3969 return; 3970 3971 vsi->irqs_ready = false; 3972 for (i = 0; i < vsi->num_q_vectors; i++) { 3973 u16 vector = i + base; 3974 3975 /* free only the irqs that were actually requested */ 3976 if (!vsi->q_vectors[i] || 3977 !vsi->q_vectors[i]->num_ringpairs) 3978 continue; 3979 3980 /* clear the affinity_mask in the IRQ descriptor */ 3981 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3982 NULL); 3983 free_irq(pf->msix_entries[vector].vector, 3984 vsi->q_vectors[i]); 3985 3986 /* Tear down the interrupt queue link list 3987 * 3988 * We know that they come in pairs and always 3989 * the Rx first, then the Tx. To clear the 3990 * link list, stick the EOL value into the 3991 * next_q field of the registers. 3992 */ 3993 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 3994 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3995 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3996 val |= I40E_QUEUE_END_OF_LIST 3997 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3998 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 3999 4000 while (qp != I40E_QUEUE_END_OF_LIST) { 4001 u32 next; 4002 4003 val = rd32(hw, I40E_QINT_RQCTL(qp)); 4004 4005 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 4006 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 4007 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 4008 I40E_QINT_RQCTL_INTEVENT_MASK); 4009 4010 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 4011 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 4012 4013 wr32(hw, I40E_QINT_RQCTL(qp), val); 4014 4015 val = rd32(hw, I40E_QINT_TQCTL(qp)); 4016 4017 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 4018 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 4019 4020 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 4021 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 4022 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 4023 I40E_QINT_TQCTL_INTEVENT_MASK); 4024 4025 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 4026 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 4027 4028 wr32(hw, I40E_QINT_TQCTL(qp), val); 4029 qp = next; 4030 } 4031 } 4032 } else { 4033 free_irq(pf->pdev->irq, pf); 4034 4035 val = rd32(hw, I40E_PFINT_LNKLST0); 4036 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 4037 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4038 val |= I40E_QUEUE_END_OF_LIST 4039 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 4040 wr32(hw, I40E_PFINT_LNKLST0, val); 4041 4042 val = rd32(hw, I40E_QINT_RQCTL(qp)); 4043 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 4044 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 4045 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 4046 I40E_QINT_RQCTL_INTEVENT_MASK); 4047 4048 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 4049 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 4050 4051 wr32(hw, I40E_QINT_RQCTL(qp), val); 4052 4053 val = rd32(hw, I40E_QINT_TQCTL(qp)); 4054 4055 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 4056 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 4057 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 4058 I40E_QINT_TQCTL_INTEVENT_MASK); 4059 4060 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 4061 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 4062 4063 wr32(hw, I40E_QINT_TQCTL(qp), val); 4064 } 4065 } 4066 4067 /** 4068 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 4069 * @vsi: the VSI being configured 4070 * @v_idx: Index of vector to be freed 4071 * 4072 * This function frees the memory allocated to the q_vector. In addition if 4073 * NAPI is enabled it will delete any references to the NAPI struct prior 4074 * to freeing the q_vector. 4075 **/ 4076 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 4077 { 4078 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 4079 struct i40e_ring *ring; 4080 4081 if (!q_vector) 4082 return; 4083 4084 /* disassociate q_vector from rings */ 4085 i40e_for_each_ring(ring, q_vector->tx) 4086 ring->q_vector = NULL; 4087 4088 i40e_for_each_ring(ring, q_vector->rx) 4089 ring->q_vector = NULL; 4090 4091 /* only VSI w/ an associated netdev is set up w/ NAPI */ 4092 if (vsi->netdev) 4093 netif_napi_del(&q_vector->napi); 4094 4095 vsi->q_vectors[v_idx] = NULL; 4096 4097 kfree_rcu(q_vector, rcu); 4098 } 4099 4100 /** 4101 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 4102 * @vsi: the VSI being un-configured 4103 * 4104 * This frees the memory allocated to the q_vectors and 4105 * deletes references to the NAPI struct. 4106 **/ 4107 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 4108 { 4109 int v_idx; 4110 4111 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 4112 i40e_free_q_vector(vsi, v_idx); 4113 } 4114 4115 /** 4116 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 4117 * @pf: board private structure 4118 **/ 4119 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 4120 { 4121 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 4122 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4123 pci_disable_msix(pf->pdev); 4124 kfree(pf->msix_entries); 4125 pf->msix_entries = NULL; 4126 kfree(pf->irq_pile); 4127 pf->irq_pile = NULL; 4128 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 4129 pci_disable_msi(pf->pdev); 4130 } 4131 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 4132 } 4133 4134 /** 4135 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 4136 * @pf: board private structure 4137 * 4138 * We go through and clear interrupt specific resources and reset the structure 4139 * to pre-load conditions 4140 **/ 4141 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 4142 { 4143 int i; 4144 4145 i40e_stop_misc_vector(pf); 4146 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4147 synchronize_irq(pf->msix_entries[0].vector); 4148 free_irq(pf->msix_entries[0].vector, pf); 4149 } 4150 4151 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 4152 for (i = 0; i < pf->num_alloc_vsi; i++) 4153 if (pf->vsi[i]) 4154 i40e_vsi_free_q_vectors(pf->vsi[i]); 4155 i40e_reset_interrupt_capability(pf); 4156 } 4157 4158 /** 4159 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 4160 * @vsi: the VSI being configured 4161 **/ 4162 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 4163 { 4164 int q_idx; 4165 4166 if (!vsi->netdev) 4167 return; 4168 4169 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4170 napi_enable(&vsi->q_vectors[q_idx]->napi); 4171 } 4172 4173 /** 4174 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 4175 * @vsi: the VSI being configured 4176 **/ 4177 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 4178 { 4179 int q_idx; 4180 4181 if (!vsi->netdev) 4182 return; 4183 4184 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4185 napi_disable(&vsi->q_vectors[q_idx]->napi); 4186 } 4187 4188 /** 4189 * i40e_vsi_close - Shut down a VSI 4190 * @vsi: the vsi to be quelled 4191 **/ 4192 static void i40e_vsi_close(struct i40e_vsi *vsi) 4193 { 4194 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 4195 i40e_down(vsi); 4196 i40e_vsi_free_irq(vsi); 4197 i40e_vsi_free_tx_resources(vsi); 4198 i40e_vsi_free_rx_resources(vsi); 4199 vsi->current_netdev_flags = 0; 4200 } 4201 4202 /** 4203 * i40e_quiesce_vsi - Pause a given VSI 4204 * @vsi: the VSI being paused 4205 **/ 4206 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 4207 { 4208 if (test_bit(__I40E_DOWN, &vsi->state)) 4209 return; 4210 4211 /* No need to disable FCoE VSI when Tx suspended */ 4212 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && 4213 vsi->type == I40E_VSI_FCOE) { 4214 dev_dbg(&vsi->back->pdev->dev, 4215 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid); 4216 return; 4217 } 4218 4219 set_bit(__I40E_NEEDS_RESTART, &vsi->state); 4220 if (vsi->netdev && netif_running(vsi->netdev)) 4221 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 4222 else 4223 i40e_vsi_close(vsi); 4224 } 4225 4226 /** 4227 * i40e_unquiesce_vsi - Resume a given VSI 4228 * @vsi: the VSI being resumed 4229 **/ 4230 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 4231 { 4232 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) 4233 return; 4234 4235 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 4236 if (vsi->netdev && netif_running(vsi->netdev)) 4237 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 4238 else 4239 i40e_vsi_open(vsi); /* this clears the DOWN bit */ 4240 } 4241 4242 /** 4243 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 4244 * @pf: the PF 4245 **/ 4246 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 4247 { 4248 int v; 4249 4250 for (v = 0; v < pf->num_alloc_vsi; v++) { 4251 if (pf->vsi[v]) 4252 i40e_quiesce_vsi(pf->vsi[v]); 4253 } 4254 } 4255 4256 /** 4257 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 4258 * @pf: the PF 4259 **/ 4260 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 4261 { 4262 int v; 4263 4264 for (v = 0; v < pf->num_alloc_vsi; v++) { 4265 if (pf->vsi[v]) 4266 i40e_unquiesce_vsi(pf->vsi[v]); 4267 } 4268 } 4269 4270 #ifdef CONFIG_I40E_DCB 4271 /** 4272 * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled 4273 * @vsi: the VSI being configured 4274 * 4275 * This function waits for the given VSI's Tx queues to be disabled. 4276 **/ 4277 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) 4278 { 4279 struct i40e_pf *pf = vsi->back; 4280 int i, pf_q, ret; 4281 4282 pf_q = vsi->base_queue; 4283 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4284 /* Check and wait for the disable status of the queue */ 4285 ret = i40e_pf_txq_wait(pf, pf_q, false); 4286 if (ret) { 4287 dev_info(&pf->pdev->dev, 4288 "VSI seid %d Tx ring %d disable timeout\n", 4289 vsi->seid, pf_q); 4290 return ret; 4291 } 4292 } 4293 4294 return 0; 4295 } 4296 4297 /** 4298 * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled 4299 * @pf: the PF 4300 * 4301 * This function waits for the Tx queues to be in disabled state for all the 4302 * VSIs that are managed by this PF. 4303 **/ 4304 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) 4305 { 4306 int v, ret = 0; 4307 4308 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4309 /* No need to wait for FCoE VSI queues */ 4310 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { 4311 ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); 4312 if (ret) 4313 break; 4314 } 4315 } 4316 4317 return ret; 4318 } 4319 4320 #endif 4321 4322 /** 4323 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue 4324 * @q_idx: TX queue number 4325 * @vsi: Pointer to VSI struct 4326 * 4327 * This function checks specified queue for given VSI. Detects hung condition. 4328 * Sets hung bit since it is two step process. Before next run of service task 4329 * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not, 4330 * hung condition remain unchanged and during subsequent run, this function 4331 * issues SW interrupt to recover from hung condition. 4332 **/ 4333 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) 4334 { 4335 struct i40e_ring *tx_ring = NULL; 4336 struct i40e_pf *pf; 4337 u32 head, val, tx_pending; 4338 int i; 4339 4340 pf = vsi->back; 4341 4342 /* now that we have an index, find the tx_ring struct */ 4343 for (i = 0; i < vsi->num_queue_pairs; i++) { 4344 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { 4345 if (q_idx == vsi->tx_rings[i]->queue_index) { 4346 tx_ring = vsi->tx_rings[i]; 4347 break; 4348 } 4349 } 4350 } 4351 4352 if (!tx_ring) 4353 return; 4354 4355 /* Read interrupt register */ 4356 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4357 val = rd32(&pf->hw, 4358 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + 4359 tx_ring->vsi->base_vector - 1)); 4360 else 4361 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); 4362 4363 head = i40e_get_head(tx_ring); 4364 4365 tx_pending = i40e_get_tx_pending(tx_ring); 4366 4367 /* Interrupts are disabled and TX pending is non-zero, 4368 * trigger the SW interrupt (don't wait). Worst case 4369 * there will be one extra interrupt which may result 4370 * into not cleaning any queues because queues are cleaned. 4371 */ 4372 if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) 4373 i40e_force_wb(vsi, tx_ring->q_vector); 4374 } 4375 4376 /** 4377 * i40e_detect_recover_hung - Function to detect and recover hung_queues 4378 * @pf: pointer to PF struct 4379 * 4380 * LAN VSI has netdev and netdev has TX queues. This function is to check 4381 * each of those TX queues if they are hung, trigger recovery by issuing 4382 * SW interrupt. 4383 **/ 4384 static void i40e_detect_recover_hung(struct i40e_pf *pf) 4385 { 4386 struct net_device *netdev; 4387 struct i40e_vsi *vsi; 4388 int i; 4389 4390 /* Only for LAN VSI */ 4391 vsi = pf->vsi[pf->lan_vsi]; 4392 4393 if (!vsi) 4394 return; 4395 4396 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ 4397 if (test_bit(__I40E_DOWN, &vsi->back->state) || 4398 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 4399 return; 4400 4401 /* Make sure type is MAIN VSI */ 4402 if (vsi->type != I40E_VSI_MAIN) 4403 return; 4404 4405 netdev = vsi->netdev; 4406 if (!netdev) 4407 return; 4408 4409 /* Bail out if netif_carrier is not OK */ 4410 if (!netif_carrier_ok(netdev)) 4411 return; 4412 4413 /* Go thru' TX queues for netdev */ 4414 for (i = 0; i < netdev->num_tx_queues; i++) { 4415 struct netdev_queue *q; 4416 4417 q = netdev_get_tx_queue(netdev, i); 4418 if (q) 4419 i40e_detect_recover_hung_queue(i, vsi); 4420 } 4421 } 4422 4423 /** 4424 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP 4425 * @pf: pointer to PF 4426 * 4427 * Get TC map for ISCSI PF type that will include iSCSI TC 4428 * and LAN TC. 4429 **/ 4430 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) 4431 { 4432 struct i40e_dcb_app_priority_table app; 4433 struct i40e_hw *hw = &pf->hw; 4434 u8 enabled_tc = 1; /* TC0 is always enabled */ 4435 u8 tc, i; 4436 /* Get the iSCSI APP TLV */ 4437 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4438 4439 for (i = 0; i < dcbcfg->numapps; i++) { 4440 app = dcbcfg->app[i]; 4441 if (app.selector == I40E_APP_SEL_TCPIP && 4442 app.protocolid == I40E_APP_PROTOID_ISCSI) { 4443 tc = dcbcfg->etscfg.prioritytable[app.priority]; 4444 enabled_tc |= BIT_ULL(tc); 4445 break; 4446 } 4447 } 4448 4449 return enabled_tc; 4450 } 4451 4452 /** 4453 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 4454 * @dcbcfg: the corresponding DCBx configuration structure 4455 * 4456 * Return the number of TCs from given DCBx configuration 4457 **/ 4458 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 4459 { 4460 u8 num_tc = 0; 4461 int i; 4462 4463 /* Scan the ETS Config Priority Table to find 4464 * traffic class enabled for a given priority 4465 * and use the traffic class index to get the 4466 * number of traffic classes enabled 4467 */ 4468 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4469 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 4470 num_tc = dcbcfg->etscfg.prioritytable[i]; 4471 } 4472 4473 /* Traffic class index starts from zero so 4474 * increment to return the actual count 4475 */ 4476 return num_tc + 1; 4477 } 4478 4479 /** 4480 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 4481 * @dcbcfg: the corresponding DCBx configuration structure 4482 * 4483 * Query the current DCB configuration and return the number of 4484 * traffic classes enabled from the given DCBX config 4485 **/ 4486 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 4487 { 4488 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 4489 u8 enabled_tc = 1; 4490 u8 i; 4491 4492 for (i = 0; i < num_tc; i++) 4493 enabled_tc |= BIT(i); 4494 4495 return enabled_tc; 4496 } 4497 4498 /** 4499 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 4500 * @pf: PF being queried 4501 * 4502 * Return number of traffic classes enabled for the given PF 4503 **/ 4504 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 4505 { 4506 struct i40e_hw *hw = &pf->hw; 4507 u8 i, enabled_tc; 4508 u8 num_tc = 0; 4509 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4510 4511 /* If DCB is not enabled then always in single TC */ 4512 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4513 return 1; 4514 4515 /* SFP mode will be enabled for all TCs on port */ 4516 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4517 return i40e_dcb_get_num_tc(dcbcfg); 4518 4519 /* MFP mode return count of enabled TCs for this PF */ 4520 if (pf->hw.func_caps.iscsi) 4521 enabled_tc = i40e_get_iscsi_tc_map(pf); 4522 else 4523 return 1; /* Only TC0 */ 4524 4525 /* At least have TC0 */ 4526 enabled_tc = (enabled_tc ? enabled_tc : 0x1); 4527 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4528 if (enabled_tc & BIT_ULL(i)) 4529 num_tc++; 4530 } 4531 return num_tc; 4532 } 4533 4534 /** 4535 * i40e_pf_get_default_tc - Get bitmap for first enabled TC 4536 * @pf: PF being queried 4537 * 4538 * Return a bitmap for first enabled traffic class for this PF. 4539 **/ 4540 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) 4541 { 4542 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; 4543 u8 i = 0; 4544 4545 if (!enabled_tc) 4546 return 0x1; /* TC0 */ 4547 4548 /* Find the first enabled TC */ 4549 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4550 if (enabled_tc & BIT_ULL(i)) 4551 break; 4552 } 4553 4554 return BIT(i); 4555 } 4556 4557 /** 4558 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 4559 * @pf: PF being queried 4560 * 4561 * Return a bitmap for enabled traffic classes for this PF. 4562 **/ 4563 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 4564 { 4565 /* If DCB is not enabled for this PF then just return default TC */ 4566 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4567 return i40e_pf_get_default_tc(pf); 4568 4569 /* SFP mode we want PF to be enabled for all TCs */ 4570 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4571 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 4572 4573 /* MFP enabled and iSCSI PF type */ 4574 if (pf->hw.func_caps.iscsi) 4575 return i40e_get_iscsi_tc_map(pf); 4576 else 4577 return i40e_pf_get_default_tc(pf); 4578 } 4579 4580 /** 4581 * i40e_vsi_get_bw_info - Query VSI BW Information 4582 * @vsi: the VSI being queried 4583 * 4584 * Returns 0 on success, negative value on failure 4585 **/ 4586 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 4587 { 4588 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 4589 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 4590 struct i40e_pf *pf = vsi->back; 4591 struct i40e_hw *hw = &pf->hw; 4592 i40e_status ret; 4593 u32 tc_bw_max; 4594 int i; 4595 4596 /* Get the VSI level BW configuration */ 4597 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 4598 if (ret) { 4599 dev_info(&pf->pdev->dev, 4600 "couldn't get PF vsi bw config, err %s aq_err %s\n", 4601 i40e_stat_str(&pf->hw, ret), 4602 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4603 return -EINVAL; 4604 } 4605 4606 /* Get the VSI level BW configuration per TC */ 4607 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 4608 NULL); 4609 if (ret) { 4610 dev_info(&pf->pdev->dev, 4611 "couldn't get PF vsi ets bw config, err %s aq_err %s\n", 4612 i40e_stat_str(&pf->hw, ret), 4613 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4614 return -EINVAL; 4615 } 4616 4617 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 4618 dev_info(&pf->pdev->dev, 4619 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 4620 bw_config.tc_valid_bits, 4621 bw_ets_config.tc_valid_bits); 4622 /* Still continuing */ 4623 } 4624 4625 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 4626 vsi->bw_max_quanta = bw_config.max_bw; 4627 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 4628 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 4629 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4630 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 4631 vsi->bw_ets_limit_credits[i] = 4632 le16_to_cpu(bw_ets_config.credits[i]); 4633 /* 3 bits out of 4 for each TC */ 4634 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 4635 } 4636 4637 return 0; 4638 } 4639 4640 /** 4641 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 4642 * @vsi: the VSI being configured 4643 * @enabled_tc: TC bitmap 4644 * @bw_credits: BW shared credits per TC 4645 * 4646 * Returns 0 on success, negative value on failure 4647 **/ 4648 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 4649 u8 *bw_share) 4650 { 4651 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 4652 i40e_status ret; 4653 int i; 4654 4655 bw_data.tc_valid_bits = enabled_tc; 4656 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4657 bw_data.tc_bw_credits[i] = bw_share[i]; 4658 4659 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 4660 NULL); 4661 if (ret) { 4662 dev_info(&vsi->back->pdev->dev, 4663 "AQ command Config VSI BW allocation per TC failed = %d\n", 4664 vsi->back->hw.aq.asq_last_status); 4665 return -EINVAL; 4666 } 4667 4668 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4669 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 4670 4671 return 0; 4672 } 4673 4674 /** 4675 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 4676 * @vsi: the VSI being configured 4677 * @enabled_tc: TC map to be enabled 4678 * 4679 **/ 4680 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4681 { 4682 struct net_device *netdev = vsi->netdev; 4683 struct i40e_pf *pf = vsi->back; 4684 struct i40e_hw *hw = &pf->hw; 4685 u8 netdev_tc = 0; 4686 int i; 4687 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4688 4689 if (!netdev) 4690 return; 4691 4692 if (!enabled_tc) { 4693 netdev_reset_tc(netdev); 4694 return; 4695 } 4696 4697 /* Set up actual enabled TCs on the VSI */ 4698 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 4699 return; 4700 4701 /* set per TC queues for the VSI */ 4702 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4703 /* Only set TC queues for enabled tcs 4704 * 4705 * e.g. For a VSI that has TC0 and TC3 enabled the 4706 * enabled_tc bitmap would be 0x00001001; the driver 4707 * will set the numtc for netdev as 2 that will be 4708 * referenced by the netdev layer as TC 0 and 1. 4709 */ 4710 if (vsi->tc_config.enabled_tc & BIT_ULL(i)) 4711 netdev_set_tc_queue(netdev, 4712 vsi->tc_config.tc_info[i].netdev_tc, 4713 vsi->tc_config.tc_info[i].qcount, 4714 vsi->tc_config.tc_info[i].qoffset); 4715 } 4716 4717 /* Assign UP2TC map for the VSI */ 4718 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4719 /* Get the actual TC# for the UP */ 4720 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 4721 /* Get the mapped netdev TC# for the UP */ 4722 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 4723 netdev_set_prio_tc_map(netdev, i, netdev_tc); 4724 } 4725 } 4726 4727 /** 4728 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 4729 * @vsi: the VSI being configured 4730 * @ctxt: the ctxt buffer returned from AQ VSI update param command 4731 **/ 4732 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 4733 struct i40e_vsi_context *ctxt) 4734 { 4735 /* copy just the sections touched not the entire info 4736 * since not all sections are valid as returned by 4737 * update vsi params 4738 */ 4739 vsi->info.mapping_flags = ctxt->info.mapping_flags; 4740 memcpy(&vsi->info.queue_mapping, 4741 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 4742 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 4743 sizeof(vsi->info.tc_mapping)); 4744 } 4745 4746 /** 4747 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 4748 * @vsi: VSI to be configured 4749 * @enabled_tc: TC bitmap 4750 * 4751 * This configures a particular VSI for TCs that are mapped to the 4752 * given TC bitmap. It uses default bandwidth share for TCs across 4753 * VSIs to configure TC for a particular VSI. 4754 * 4755 * NOTE: 4756 * It is expected that the VSI queues have been quisced before calling 4757 * this function. 4758 **/ 4759 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4760 { 4761 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 4762 struct i40e_vsi_context ctxt; 4763 int ret = 0; 4764 int i; 4765 4766 /* Check if enabled_tc is same as existing or new TCs */ 4767 if (vsi->tc_config.enabled_tc == enabled_tc) 4768 return ret; 4769 4770 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 4771 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4772 if (enabled_tc & BIT_ULL(i)) 4773 bw_share[i] = 1; 4774 } 4775 4776 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 4777 if (ret) { 4778 dev_info(&vsi->back->pdev->dev, 4779 "Failed configuring TC map %d for VSI %d\n", 4780 enabled_tc, vsi->seid); 4781 goto out; 4782 } 4783 4784 /* Update Queue Pairs Mapping for currently enabled UPs */ 4785 ctxt.seid = vsi->seid; 4786 ctxt.pf_num = vsi->back->hw.pf_id; 4787 ctxt.vf_num = 0; 4788 ctxt.uplink_seid = vsi->uplink_seid; 4789 ctxt.info = vsi->info; 4790 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 4791 4792 /* Update the VSI after updating the VSI queue-mapping information */ 4793 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 4794 if (ret) { 4795 dev_info(&vsi->back->pdev->dev, 4796 "Update vsi tc config failed, err %s aq_err %s\n", 4797 i40e_stat_str(&vsi->back->hw, ret), 4798 i40e_aq_str(&vsi->back->hw, 4799 vsi->back->hw.aq.asq_last_status)); 4800 goto out; 4801 } 4802 /* update the local VSI info with updated queue map */ 4803 i40e_vsi_update_queue_map(vsi, &ctxt); 4804 vsi->info.valid_sections = 0; 4805 4806 /* Update current VSI BW information */ 4807 ret = i40e_vsi_get_bw_info(vsi); 4808 if (ret) { 4809 dev_info(&vsi->back->pdev->dev, 4810 "Failed updating vsi bw info, err %s aq_err %s\n", 4811 i40e_stat_str(&vsi->back->hw, ret), 4812 i40e_aq_str(&vsi->back->hw, 4813 vsi->back->hw.aq.asq_last_status)); 4814 goto out; 4815 } 4816 4817 /* Update the netdev TC setup */ 4818 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 4819 out: 4820 return ret; 4821 } 4822 4823 /** 4824 * i40e_veb_config_tc - Configure TCs for given VEB 4825 * @veb: given VEB 4826 * @enabled_tc: TC bitmap 4827 * 4828 * Configures given TC bitmap for VEB (switching) element 4829 **/ 4830 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) 4831 { 4832 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; 4833 struct i40e_pf *pf = veb->pf; 4834 int ret = 0; 4835 int i; 4836 4837 /* No TCs or already enabled TCs just return */ 4838 if (!enabled_tc || veb->enabled_tc == enabled_tc) 4839 return ret; 4840 4841 bw_data.tc_valid_bits = enabled_tc; 4842 /* bw_data.absolute_credits is not set (relative) */ 4843 4844 /* Enable ETS TCs with equal BW Share for now */ 4845 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4846 if (enabled_tc & BIT_ULL(i)) 4847 bw_data.tc_bw_share_credits[i] = 1; 4848 } 4849 4850 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, 4851 &bw_data, NULL); 4852 if (ret) { 4853 dev_info(&pf->pdev->dev, 4854 "VEB bw config failed, err %s aq_err %s\n", 4855 i40e_stat_str(&pf->hw, ret), 4856 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4857 goto out; 4858 } 4859 4860 /* Update the BW information */ 4861 ret = i40e_veb_get_bw_info(veb); 4862 if (ret) { 4863 dev_info(&pf->pdev->dev, 4864 "Failed getting veb bw config, err %s aq_err %s\n", 4865 i40e_stat_str(&pf->hw, ret), 4866 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4867 } 4868 4869 out: 4870 return ret; 4871 } 4872 4873 #ifdef CONFIG_I40E_DCB 4874 /** 4875 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs 4876 * @pf: PF struct 4877 * 4878 * Reconfigure VEB/VSIs on a given PF; it is assumed that 4879 * the caller would've quiesce all the VSIs before calling 4880 * this function 4881 **/ 4882 static void i40e_dcb_reconfigure(struct i40e_pf *pf) 4883 { 4884 u8 tc_map = 0; 4885 int ret; 4886 u8 v; 4887 4888 /* Enable the TCs available on PF to all VEBs */ 4889 tc_map = i40e_pf_get_tc_map(pf); 4890 for (v = 0; v < I40E_MAX_VEB; v++) { 4891 if (!pf->veb[v]) 4892 continue; 4893 ret = i40e_veb_config_tc(pf->veb[v], tc_map); 4894 if (ret) { 4895 dev_info(&pf->pdev->dev, 4896 "Failed configuring TC for VEB seid=%d\n", 4897 pf->veb[v]->seid); 4898 /* Will try to configure as many components */ 4899 } 4900 } 4901 4902 /* Update each VSI */ 4903 for (v = 0; v < pf->num_alloc_vsi; v++) { 4904 if (!pf->vsi[v]) 4905 continue; 4906 4907 /* - Enable all TCs for the LAN VSI 4908 #ifdef I40E_FCOE 4909 * - For FCoE VSI only enable the TC configured 4910 * as per the APP TLV 4911 #endif 4912 * - For all others keep them at TC0 for now 4913 */ 4914 if (v == pf->lan_vsi) 4915 tc_map = i40e_pf_get_tc_map(pf); 4916 else 4917 tc_map = i40e_pf_get_default_tc(pf); 4918 #ifdef I40E_FCOE 4919 if (pf->vsi[v]->type == I40E_VSI_FCOE) 4920 tc_map = i40e_get_fcoe_tc_map(pf); 4921 #endif /* #ifdef I40E_FCOE */ 4922 4923 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 4924 if (ret) { 4925 dev_info(&pf->pdev->dev, 4926 "Failed configuring TC for VSI seid=%d\n", 4927 pf->vsi[v]->seid); 4928 /* Will try to configure as many components */ 4929 } else { 4930 /* Re-configure VSI vectors based on updated TC map */ 4931 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); 4932 if (pf->vsi[v]->netdev) 4933 i40e_dcbnl_set_all(pf->vsi[v]); 4934 } 4935 } 4936 } 4937 4938 /** 4939 * i40e_resume_port_tx - Resume port Tx 4940 * @pf: PF struct 4941 * 4942 * Resume a port's Tx and issue a PF reset in case of failure to 4943 * resume. 4944 **/ 4945 static int i40e_resume_port_tx(struct i40e_pf *pf) 4946 { 4947 struct i40e_hw *hw = &pf->hw; 4948 int ret; 4949 4950 ret = i40e_aq_resume_port_tx(hw, NULL); 4951 if (ret) { 4952 dev_info(&pf->pdev->dev, 4953 "Resume Port Tx failed, err %s aq_err %s\n", 4954 i40e_stat_str(&pf->hw, ret), 4955 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4956 /* Schedule PF reset to recover */ 4957 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 4958 i40e_service_event_schedule(pf); 4959 } 4960 4961 return ret; 4962 } 4963 4964 /** 4965 * i40e_init_pf_dcb - Initialize DCB configuration 4966 * @pf: PF being configured 4967 * 4968 * Query the current DCB configuration and cache it 4969 * in the hardware structure 4970 **/ 4971 static int i40e_init_pf_dcb(struct i40e_pf *pf) 4972 { 4973 struct i40e_hw *hw = &pf->hw; 4974 int err = 0; 4975 4976 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ 4977 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 4978 (pf->hw.aq.fw_maj_ver < 4)) 4979 goto out; 4980 4981 /* Get the initial DCB configuration */ 4982 err = i40e_init_dcb(hw); 4983 if (!err) { 4984 /* Device/Function is not DCBX capable */ 4985 if ((!hw->func_caps.dcb) || 4986 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { 4987 dev_info(&pf->pdev->dev, 4988 "DCBX offload is not supported or is disabled for this PF.\n"); 4989 4990 if (pf->flags & I40E_FLAG_MFP_ENABLED) 4991 goto out; 4992 4993 } else { 4994 /* When status is not DISABLED then DCBX in FW */ 4995 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 4996 DCB_CAP_DCBX_VER_IEEE; 4997 4998 pf->flags |= I40E_FLAG_DCB_CAPABLE; 4999 /* Enable DCB tagging only when more than one TC */ 5000 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5001 pf->flags |= I40E_FLAG_DCB_ENABLED; 5002 dev_dbg(&pf->pdev->dev, 5003 "DCBX offload is supported for this PF.\n"); 5004 } 5005 } else { 5006 dev_info(&pf->pdev->dev, 5007 "Query for DCB configuration failed, err %s aq_err %s\n", 5008 i40e_stat_str(&pf->hw, err), 5009 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5010 } 5011 5012 out: 5013 return err; 5014 } 5015 #endif /* CONFIG_I40E_DCB */ 5016 #define SPEED_SIZE 14 5017 #define FC_SIZE 8 5018 /** 5019 * i40e_print_link_message - print link up or down 5020 * @vsi: the VSI for which link needs a message 5021 */ 5022 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) 5023 { 5024 char *speed = "Unknown"; 5025 char *fc = "Unknown"; 5026 5027 if (vsi->current_isup == isup) 5028 return; 5029 vsi->current_isup = isup; 5030 if (!isup) { 5031 netdev_info(vsi->netdev, "NIC Link is Down\n"); 5032 return; 5033 } 5034 5035 /* Warn user if link speed on NPAR enabled partition is not at 5036 * least 10GB 5037 */ 5038 if (vsi->back->hw.func_caps.npar_enable && 5039 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || 5040 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) 5041 netdev_warn(vsi->netdev, 5042 "The partition detected link speed that is less than 10Gbps\n"); 5043 5044 switch (vsi->back->hw.phy.link_info.link_speed) { 5045 case I40E_LINK_SPEED_40GB: 5046 speed = "40 G"; 5047 break; 5048 case I40E_LINK_SPEED_20GB: 5049 speed = "20 G"; 5050 break; 5051 case I40E_LINK_SPEED_10GB: 5052 speed = "10 G"; 5053 break; 5054 case I40E_LINK_SPEED_1GB: 5055 speed = "1000 M"; 5056 break; 5057 case I40E_LINK_SPEED_100MB: 5058 speed = "100 M"; 5059 break; 5060 default: 5061 break; 5062 } 5063 5064 switch (vsi->back->hw.fc.current_mode) { 5065 case I40E_FC_FULL: 5066 fc = "RX/TX"; 5067 break; 5068 case I40E_FC_TX_PAUSE: 5069 fc = "TX"; 5070 break; 5071 case I40E_FC_RX_PAUSE: 5072 fc = "RX"; 5073 break; 5074 default: 5075 fc = "None"; 5076 break; 5077 } 5078 5079 netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n", 5080 speed, fc); 5081 } 5082 5083 /** 5084 * i40e_up_complete - Finish the last steps of bringing up a connection 5085 * @vsi: the VSI being configured 5086 **/ 5087 static int i40e_up_complete(struct i40e_vsi *vsi) 5088 { 5089 struct i40e_pf *pf = vsi->back; 5090 int err; 5091 5092 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5093 i40e_vsi_configure_msix(vsi); 5094 else 5095 i40e_configure_msi_and_legacy(vsi); 5096 5097 /* start rings */ 5098 err = i40e_vsi_control_rings(vsi, true); 5099 if (err) 5100 return err; 5101 5102 clear_bit(__I40E_DOWN, &vsi->state); 5103 i40e_napi_enable_all(vsi); 5104 i40e_vsi_enable_irq(vsi); 5105 5106 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 5107 (vsi->netdev)) { 5108 i40e_print_link_message(vsi, true); 5109 netif_tx_start_all_queues(vsi->netdev); 5110 netif_carrier_on(vsi->netdev); 5111 } else if (vsi->netdev) { 5112 i40e_print_link_message(vsi, false); 5113 /* need to check for qualified module here*/ 5114 if ((pf->hw.phy.link_info.link_info & 5115 I40E_AQ_MEDIA_AVAILABLE) && 5116 (!(pf->hw.phy.link_info.an_info & 5117 I40E_AQ_QUALIFIED_MODULE))) 5118 netdev_err(vsi->netdev, 5119 "the driver failed to link because an unqualified module was detected."); 5120 } 5121 5122 /* replay FDIR SB filters */ 5123 if (vsi->type == I40E_VSI_FDIR) { 5124 /* reset fd counters */ 5125 pf->fd_add_err = pf->fd_atr_cnt = 0; 5126 if (pf->fd_tcp_rule > 0) { 5127 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5128 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5129 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); 5130 pf->fd_tcp_rule = 0; 5131 } 5132 i40e_fdir_filter_restore(vsi); 5133 } 5134 i40e_service_event_schedule(pf); 5135 5136 return 0; 5137 } 5138 5139 /** 5140 * i40e_vsi_reinit_locked - Reset the VSI 5141 * @vsi: the VSI being configured 5142 * 5143 * Rebuild the ring structs after some configuration 5144 * has changed, e.g. MTU size. 5145 **/ 5146 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 5147 { 5148 struct i40e_pf *pf = vsi->back; 5149 5150 WARN_ON(in_interrupt()); 5151 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 5152 usleep_range(1000, 2000); 5153 i40e_down(vsi); 5154 5155 /* Give a VF some time to respond to the reset. The 5156 * two second wait is based upon the watchdog cycle in 5157 * the VF driver. 5158 */ 5159 if (vsi->type == I40E_VSI_SRIOV) 5160 msleep(2000); 5161 i40e_up(vsi); 5162 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 5163 } 5164 5165 /** 5166 * i40e_up - Bring the connection back up after being down 5167 * @vsi: the VSI being configured 5168 **/ 5169 int i40e_up(struct i40e_vsi *vsi) 5170 { 5171 int err; 5172 5173 err = i40e_vsi_configure(vsi); 5174 if (!err) 5175 err = i40e_up_complete(vsi); 5176 5177 return err; 5178 } 5179 5180 /** 5181 * i40e_down - Shutdown the connection processing 5182 * @vsi: the VSI being stopped 5183 **/ 5184 void i40e_down(struct i40e_vsi *vsi) 5185 { 5186 int i; 5187 5188 /* It is assumed that the caller of this function 5189 * sets the vsi->state __I40E_DOWN bit. 5190 */ 5191 if (vsi->netdev) { 5192 netif_carrier_off(vsi->netdev); 5193 netif_tx_disable(vsi->netdev); 5194 } 5195 i40e_vsi_disable_irq(vsi); 5196 i40e_vsi_control_rings(vsi, false); 5197 i40e_napi_disable_all(vsi); 5198 5199 for (i = 0; i < vsi->num_queue_pairs; i++) { 5200 i40e_clean_tx_ring(vsi->tx_rings[i]); 5201 i40e_clean_rx_ring(vsi->rx_rings[i]); 5202 } 5203 } 5204 5205 /** 5206 * i40e_setup_tc - configure multiple traffic classes 5207 * @netdev: net device to configure 5208 * @tc: number of traffic classes to enable 5209 **/ 5210 #ifdef I40E_FCOE 5211 int i40e_setup_tc(struct net_device *netdev, u8 tc) 5212 #else 5213 static int i40e_setup_tc(struct net_device *netdev, u8 tc) 5214 #endif 5215 { 5216 struct i40e_netdev_priv *np = netdev_priv(netdev); 5217 struct i40e_vsi *vsi = np->vsi; 5218 struct i40e_pf *pf = vsi->back; 5219 u8 enabled_tc = 0; 5220 int ret = -EINVAL; 5221 int i; 5222 5223 /* Check if DCB enabled to continue */ 5224 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 5225 netdev_info(netdev, "DCB is not enabled for adapter\n"); 5226 goto exit; 5227 } 5228 5229 /* Check if MFP enabled */ 5230 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 5231 netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); 5232 goto exit; 5233 } 5234 5235 /* Check whether tc count is within enabled limit */ 5236 if (tc > i40e_pf_get_num_tc(pf)) { 5237 netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); 5238 goto exit; 5239 } 5240 5241 /* Generate TC map for number of tc requested */ 5242 for (i = 0; i < tc; i++) 5243 enabled_tc |= BIT_ULL(i); 5244 5245 /* Requesting same TC configuration as already enabled */ 5246 if (enabled_tc == vsi->tc_config.enabled_tc) 5247 return 0; 5248 5249 /* Quiesce VSI queues */ 5250 i40e_quiesce_vsi(vsi); 5251 5252 /* Configure VSI for enabled TCs */ 5253 ret = i40e_vsi_config_tc(vsi, enabled_tc); 5254 if (ret) { 5255 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 5256 vsi->seid); 5257 goto exit; 5258 } 5259 5260 /* Unquiesce VSI */ 5261 i40e_unquiesce_vsi(vsi); 5262 5263 exit: 5264 return ret; 5265 } 5266 5267 /** 5268 * i40e_open - Called when a network interface is made active 5269 * @netdev: network interface device structure 5270 * 5271 * The open entry point is called when a network interface is made 5272 * active by the system (IFF_UP). At this point all resources needed 5273 * for transmit and receive operations are allocated, the interrupt 5274 * handler is registered with the OS, the netdev watchdog subtask is 5275 * enabled, and the stack is notified that the interface is ready. 5276 * 5277 * Returns 0 on success, negative value on failure 5278 **/ 5279 int i40e_open(struct net_device *netdev) 5280 { 5281 struct i40e_netdev_priv *np = netdev_priv(netdev); 5282 struct i40e_vsi *vsi = np->vsi; 5283 struct i40e_pf *pf = vsi->back; 5284 int err; 5285 5286 /* disallow open during test or if eeprom is broken */ 5287 if (test_bit(__I40E_TESTING, &pf->state) || 5288 test_bit(__I40E_BAD_EEPROM, &pf->state)) 5289 return -EBUSY; 5290 5291 netif_carrier_off(netdev); 5292 5293 err = i40e_vsi_open(vsi); 5294 if (err) 5295 return err; 5296 5297 /* configure global TSO hardware offload settings */ 5298 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | 5299 TCP_FLAG_FIN) >> 16); 5300 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | 5301 TCP_FLAG_FIN | 5302 TCP_FLAG_CWR) >> 16); 5303 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 5304 5305 #ifdef CONFIG_I40E_VXLAN 5306 vxlan_get_rx_port(netdev); 5307 #endif 5308 5309 return 0; 5310 } 5311 5312 /** 5313 * i40e_vsi_open - 5314 * @vsi: the VSI to open 5315 * 5316 * Finish initialization of the VSI. 5317 * 5318 * Returns 0 on success, negative value on failure 5319 **/ 5320 int i40e_vsi_open(struct i40e_vsi *vsi) 5321 { 5322 struct i40e_pf *pf = vsi->back; 5323 char int_name[I40E_INT_NAME_STR_LEN]; 5324 int err; 5325 5326 /* allocate descriptors */ 5327 err = i40e_vsi_setup_tx_resources(vsi); 5328 if (err) 5329 goto err_setup_tx; 5330 err = i40e_vsi_setup_rx_resources(vsi); 5331 if (err) 5332 goto err_setup_rx; 5333 5334 err = i40e_vsi_configure(vsi); 5335 if (err) 5336 goto err_setup_rx; 5337 5338 if (vsi->netdev) { 5339 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 5340 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 5341 err = i40e_vsi_request_irq(vsi, int_name); 5342 if (err) 5343 goto err_setup_rx; 5344 5345 /* Notify the stack of the actual queue counts. */ 5346 err = netif_set_real_num_tx_queues(vsi->netdev, 5347 vsi->num_queue_pairs); 5348 if (err) 5349 goto err_set_queues; 5350 5351 err = netif_set_real_num_rx_queues(vsi->netdev, 5352 vsi->num_queue_pairs); 5353 if (err) 5354 goto err_set_queues; 5355 5356 } else if (vsi->type == I40E_VSI_FDIR) { 5357 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", 5358 dev_driver_string(&pf->pdev->dev), 5359 dev_name(&pf->pdev->dev)); 5360 err = i40e_vsi_request_irq(vsi, int_name); 5361 5362 } else { 5363 err = -EINVAL; 5364 goto err_setup_rx; 5365 } 5366 5367 err = i40e_up_complete(vsi); 5368 if (err) 5369 goto err_up_complete; 5370 5371 return 0; 5372 5373 err_up_complete: 5374 i40e_down(vsi); 5375 err_set_queues: 5376 i40e_vsi_free_irq(vsi); 5377 err_setup_rx: 5378 i40e_vsi_free_rx_resources(vsi); 5379 err_setup_tx: 5380 i40e_vsi_free_tx_resources(vsi); 5381 if (vsi == pf->vsi[pf->lan_vsi]) 5382 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 5383 5384 return err; 5385 } 5386 5387 /** 5388 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting 5389 * @pf: Pointer to PF 5390 * 5391 * This function destroys the hlist where all the Flow Director 5392 * filters were saved. 5393 **/ 5394 static void i40e_fdir_filter_exit(struct i40e_pf *pf) 5395 { 5396 struct i40e_fdir_filter *filter; 5397 struct hlist_node *node2; 5398 5399 hlist_for_each_entry_safe(filter, node2, 5400 &pf->fdir_filter_list, fdir_node) { 5401 hlist_del(&filter->fdir_node); 5402 kfree(filter); 5403 } 5404 pf->fdir_pf_active_filters = 0; 5405 } 5406 5407 /** 5408 * i40e_close - Disables a network interface 5409 * @netdev: network interface device structure 5410 * 5411 * The close entry point is called when an interface is de-activated 5412 * by the OS. The hardware is still under the driver's control, but 5413 * this netdev interface is disabled. 5414 * 5415 * Returns 0, this is not allowed to fail 5416 **/ 5417 #ifdef I40E_FCOE 5418 int i40e_close(struct net_device *netdev) 5419 #else 5420 static int i40e_close(struct net_device *netdev) 5421 #endif 5422 { 5423 struct i40e_netdev_priv *np = netdev_priv(netdev); 5424 struct i40e_vsi *vsi = np->vsi; 5425 5426 i40e_vsi_close(vsi); 5427 5428 return 0; 5429 } 5430 5431 /** 5432 * i40e_do_reset - Start a PF or Core Reset sequence 5433 * @pf: board private structure 5434 * @reset_flags: which reset is requested 5435 * 5436 * The essential difference in resets is that the PF Reset 5437 * doesn't clear the packet buffers, doesn't reset the PE 5438 * firmware, and doesn't bother the other PFs on the chip. 5439 **/ 5440 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) 5441 { 5442 u32 val; 5443 5444 WARN_ON(in_interrupt()); 5445 5446 if (i40e_check_asq_alive(&pf->hw)) 5447 i40e_vc_notify_reset(pf); 5448 5449 /* do the biggest reset indicated */ 5450 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { 5451 5452 /* Request a Global Reset 5453 * 5454 * This will start the chip's countdown to the actual full 5455 * chip reset event, and a warning interrupt to be sent 5456 * to all PFs, including the requestor. Our handler 5457 * for the warning interrupt will deal with the shutdown 5458 * and recovery of the switch setup. 5459 */ 5460 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); 5461 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5462 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 5463 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5464 5465 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { 5466 5467 /* Request a Core Reset 5468 * 5469 * Same as Global Reset, except does *not* include the MAC/PHY 5470 */ 5471 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); 5472 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5473 val |= I40E_GLGEN_RTRIG_CORER_MASK; 5474 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5475 i40e_flush(&pf->hw); 5476 5477 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) { 5478 5479 /* Request a PF Reset 5480 * 5481 * Resets only the PF-specific registers 5482 * 5483 * This goes directly to the tear-down and rebuild of 5484 * the switch, since we need to do all the recovery as 5485 * for the Core Reset. 5486 */ 5487 dev_dbg(&pf->pdev->dev, "PFR requested\n"); 5488 i40e_handle_reset_warning(pf); 5489 5490 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { 5491 int v; 5492 5493 /* Find the VSI(s) that requested a re-init */ 5494 dev_info(&pf->pdev->dev, 5495 "VSI reinit requested\n"); 5496 for (v = 0; v < pf->num_alloc_vsi; v++) { 5497 struct i40e_vsi *vsi = pf->vsi[v]; 5498 5499 if (vsi != NULL && 5500 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 5501 i40e_vsi_reinit_locked(pf->vsi[v]); 5502 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); 5503 } 5504 } 5505 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { 5506 int v; 5507 5508 /* Find the VSI(s) that needs to be brought down */ 5509 dev_info(&pf->pdev->dev, "VSI down requested\n"); 5510 for (v = 0; v < pf->num_alloc_vsi; v++) { 5511 struct i40e_vsi *vsi = pf->vsi[v]; 5512 5513 if (vsi != NULL && 5514 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { 5515 set_bit(__I40E_DOWN, &vsi->state); 5516 i40e_down(vsi); 5517 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); 5518 } 5519 } 5520 } else { 5521 dev_info(&pf->pdev->dev, 5522 "bad reset request 0x%08x\n", reset_flags); 5523 } 5524 } 5525 5526 #ifdef CONFIG_I40E_DCB 5527 /** 5528 * i40e_dcb_need_reconfig - Check if DCB needs reconfig 5529 * @pf: board private structure 5530 * @old_cfg: current DCB config 5531 * @new_cfg: new DCB config 5532 **/ 5533 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, 5534 struct i40e_dcbx_config *old_cfg, 5535 struct i40e_dcbx_config *new_cfg) 5536 { 5537 bool need_reconfig = false; 5538 5539 /* Check if ETS configuration has changed */ 5540 if (memcmp(&new_cfg->etscfg, 5541 &old_cfg->etscfg, 5542 sizeof(new_cfg->etscfg))) { 5543 /* If Priority Table has changed reconfig is needed */ 5544 if (memcmp(&new_cfg->etscfg.prioritytable, 5545 &old_cfg->etscfg.prioritytable, 5546 sizeof(new_cfg->etscfg.prioritytable))) { 5547 need_reconfig = true; 5548 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); 5549 } 5550 5551 if (memcmp(&new_cfg->etscfg.tcbwtable, 5552 &old_cfg->etscfg.tcbwtable, 5553 sizeof(new_cfg->etscfg.tcbwtable))) 5554 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 5555 5556 if (memcmp(&new_cfg->etscfg.tsatable, 5557 &old_cfg->etscfg.tsatable, 5558 sizeof(new_cfg->etscfg.tsatable))) 5559 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); 5560 } 5561 5562 /* Check if PFC configuration has changed */ 5563 if (memcmp(&new_cfg->pfc, 5564 &old_cfg->pfc, 5565 sizeof(new_cfg->pfc))) { 5566 need_reconfig = true; 5567 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); 5568 } 5569 5570 /* Check if APP Table has changed */ 5571 if (memcmp(&new_cfg->app, 5572 &old_cfg->app, 5573 sizeof(new_cfg->app))) { 5574 need_reconfig = true; 5575 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); 5576 } 5577 5578 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); 5579 return need_reconfig; 5580 } 5581 5582 /** 5583 * i40e_handle_lldp_event - Handle LLDP Change MIB event 5584 * @pf: board private structure 5585 * @e: event info posted on ARQ 5586 **/ 5587 static int i40e_handle_lldp_event(struct i40e_pf *pf, 5588 struct i40e_arq_event_info *e) 5589 { 5590 struct i40e_aqc_lldp_get_mib *mib = 5591 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; 5592 struct i40e_hw *hw = &pf->hw; 5593 struct i40e_dcbx_config tmp_dcbx_cfg; 5594 bool need_reconfig = false; 5595 int ret = 0; 5596 u8 type; 5597 5598 /* Not DCB capable or capability disabled */ 5599 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) 5600 return ret; 5601 5602 /* Ignore if event is not for Nearest Bridge */ 5603 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 5604 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 5605 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); 5606 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) 5607 return ret; 5608 5609 /* Check MIB Type and return if event for Remote MIB update */ 5610 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; 5611 dev_dbg(&pf->pdev->dev, 5612 "LLDP event mib type %s\n", type ? "remote" : "local"); 5613 if (type == I40E_AQ_LLDP_MIB_REMOTE) { 5614 /* Update the remote cached instance and return */ 5615 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, 5616 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 5617 &hw->remote_dcbx_config); 5618 goto exit; 5619 } 5620 5621 /* Store the old configuration */ 5622 tmp_dcbx_cfg = hw->local_dcbx_config; 5623 5624 /* Reset the old DCBx configuration data */ 5625 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); 5626 /* Get updated DCBX data from firmware */ 5627 ret = i40e_get_dcb_config(&pf->hw); 5628 if (ret) { 5629 dev_info(&pf->pdev->dev, 5630 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", 5631 i40e_stat_str(&pf->hw, ret), 5632 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5633 goto exit; 5634 } 5635 5636 /* No change detected in DCBX configs */ 5637 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, 5638 sizeof(tmp_dcbx_cfg))) { 5639 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 5640 goto exit; 5641 } 5642 5643 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, 5644 &hw->local_dcbx_config); 5645 5646 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); 5647 5648 if (!need_reconfig) 5649 goto exit; 5650 5651 /* Enable DCB tagging only when more than one TC */ 5652 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5653 pf->flags |= I40E_FLAG_DCB_ENABLED; 5654 else 5655 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 5656 5657 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5658 /* Reconfiguration needed quiesce all VSIs */ 5659 i40e_pf_quiesce_all_vsi(pf); 5660 5661 /* Changes in configuration update VEB/VSI */ 5662 i40e_dcb_reconfigure(pf); 5663 5664 ret = i40e_resume_port_tx(pf); 5665 5666 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5667 /* In case of error no point in resuming VSIs */ 5668 if (ret) 5669 goto exit; 5670 5671 /* Wait for the PF's Tx queues to be disabled */ 5672 ret = i40e_pf_wait_txq_disabled(pf); 5673 if (ret) { 5674 /* Schedule PF reset to recover */ 5675 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5676 i40e_service_event_schedule(pf); 5677 } else { 5678 i40e_pf_unquiesce_all_vsi(pf); 5679 } 5680 5681 exit: 5682 return ret; 5683 } 5684 #endif /* CONFIG_I40E_DCB */ 5685 5686 /** 5687 * i40e_do_reset_safe - Protected reset path for userland calls. 5688 * @pf: board private structure 5689 * @reset_flags: which reset is requested 5690 * 5691 **/ 5692 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) 5693 { 5694 rtnl_lock(); 5695 i40e_do_reset(pf, reset_flags); 5696 rtnl_unlock(); 5697 } 5698 5699 /** 5700 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 5701 * @pf: board private structure 5702 * @e: event info posted on ARQ 5703 * 5704 * Handler for LAN Queue Overflow Event generated by the firmware for PF 5705 * and VF queues 5706 **/ 5707 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 5708 struct i40e_arq_event_info *e) 5709 { 5710 struct i40e_aqc_lan_overflow *data = 5711 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 5712 u32 queue = le32_to_cpu(data->prtdcb_rupto); 5713 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 5714 struct i40e_hw *hw = &pf->hw; 5715 struct i40e_vf *vf; 5716 u16 vf_id; 5717 5718 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", 5719 queue, qtx_ctl); 5720 5721 /* Queue belongs to VF, find the VF and issue VF reset */ 5722 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 5723 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 5724 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 5725 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 5726 vf_id -= hw->func_caps.vf_base_id; 5727 vf = &pf->vf[vf_id]; 5728 i40e_vc_notify_vf_reset(vf); 5729 /* Allow VF to process pending reset notification */ 5730 msleep(20); 5731 i40e_reset_vf(vf, false); 5732 } 5733 } 5734 5735 /** 5736 * i40e_service_event_complete - Finish up the service event 5737 * @pf: board private structure 5738 **/ 5739 static void i40e_service_event_complete(struct i40e_pf *pf) 5740 { 5741 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 5742 5743 /* flush memory to make sure state is correct before next watchog */ 5744 smp_mb__before_atomic(); 5745 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 5746 } 5747 5748 /** 5749 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters 5750 * @pf: board private structure 5751 **/ 5752 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) 5753 { 5754 u32 val, fcnt_prog; 5755 5756 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5757 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); 5758 return fcnt_prog; 5759 } 5760 5761 /** 5762 * i40e_get_current_fd_count - Get total FD filters programmed for this PF 5763 * @pf: board private structure 5764 **/ 5765 u32 i40e_get_current_fd_count(struct i40e_pf *pf) 5766 { 5767 u32 val, fcnt_prog; 5768 5769 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5770 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + 5771 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> 5772 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 5773 return fcnt_prog; 5774 } 5775 5776 /** 5777 * i40e_get_global_fd_count - Get total FD filters programmed on device 5778 * @pf: board private structure 5779 **/ 5780 u32 i40e_get_global_fd_count(struct i40e_pf *pf) 5781 { 5782 u32 val, fcnt_prog; 5783 5784 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); 5785 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + 5786 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> 5787 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); 5788 return fcnt_prog; 5789 } 5790 5791 /** 5792 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 5793 * @pf: board private structure 5794 **/ 5795 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) 5796 { 5797 struct i40e_fdir_filter *filter; 5798 u32 fcnt_prog, fcnt_avail; 5799 struct hlist_node *node; 5800 5801 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 5802 return; 5803 5804 /* Check if, FD SB or ATR was auto disabled and if there is enough room 5805 * to re-enable 5806 */ 5807 fcnt_prog = i40e_get_global_fd_count(pf); 5808 fcnt_avail = pf->fdir_pf_filter_count; 5809 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || 5810 (pf->fd_add_err == 0) || 5811 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { 5812 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 5813 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 5814 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 5815 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5816 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 5817 } 5818 } 5819 /* Wait for some more space to be available to turn on ATR */ 5820 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { 5821 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 5822 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 5823 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5824 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5825 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); 5826 } 5827 } 5828 5829 /* if hw had a problem adding a filter, delete it */ 5830 if (pf->fd_inv > 0) { 5831 hlist_for_each_entry_safe(filter, node, 5832 &pf->fdir_filter_list, fdir_node) { 5833 if (filter->fd_id == pf->fd_inv) { 5834 hlist_del(&filter->fdir_node); 5835 kfree(filter); 5836 pf->fdir_pf_active_filters--; 5837 } 5838 } 5839 } 5840 } 5841 5842 #define I40E_MIN_FD_FLUSH_INTERVAL 10 5843 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 5844 /** 5845 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB 5846 * @pf: board private structure 5847 **/ 5848 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) 5849 { 5850 unsigned long min_flush_time; 5851 int flush_wait_retry = 50; 5852 bool disable_atr = false; 5853 int fd_room; 5854 int reg; 5855 5856 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5857 return; 5858 5859 if (!time_after(jiffies, pf->fd_flush_timestamp + 5860 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) 5861 return; 5862 5863 /* If the flush is happening too quick and we have mostly SB rules we 5864 * should not re-enable ATR for some time. 5865 */ 5866 min_flush_time = pf->fd_flush_timestamp + 5867 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); 5868 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; 5869 5870 if (!(time_after(jiffies, min_flush_time)) && 5871 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { 5872 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5873 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); 5874 disable_atr = true; 5875 } 5876 5877 pf->fd_flush_timestamp = jiffies; 5878 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5879 /* flush all filters */ 5880 wr32(&pf->hw, I40E_PFQF_CTL_1, 5881 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); 5882 i40e_flush(&pf->hw); 5883 pf->fd_flush_cnt++; 5884 pf->fd_add_err = 0; 5885 do { 5886 /* Check FD flush status every 5-6msec */ 5887 usleep_range(5000, 6000); 5888 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); 5889 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) 5890 break; 5891 } while (flush_wait_retry--); 5892 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { 5893 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); 5894 } else { 5895 /* replay sideband filters */ 5896 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); 5897 if (!disable_atr) 5898 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 5899 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 5900 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5901 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 5902 } 5903 5904 } 5905 5906 /** 5907 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed 5908 * @pf: board private structure 5909 **/ 5910 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) 5911 { 5912 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; 5913 } 5914 5915 /* We can see up to 256 filter programming desc in transit if the filters are 5916 * being applied really fast; before we see the first 5917 * filter miss error on Rx queue 0. Accumulating enough error messages before 5918 * reacting will make sure we don't cause flush too often. 5919 */ 5920 #define I40E_MAX_FD_PROGRAM_ERROR 256 5921 5922 /** 5923 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 5924 * @pf: board private structure 5925 **/ 5926 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 5927 { 5928 5929 /* if interface is down do nothing */ 5930 if (test_bit(__I40E_DOWN, &pf->state)) 5931 return; 5932 5933 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5934 return; 5935 5936 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 5937 i40e_fdir_flush_and_replay(pf); 5938 5939 i40e_fdir_check_and_reenable(pf); 5940 5941 } 5942 5943 /** 5944 * i40e_vsi_link_event - notify VSI of a link event 5945 * @vsi: vsi to be notified 5946 * @link_up: link up or down 5947 **/ 5948 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 5949 { 5950 if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) 5951 return; 5952 5953 switch (vsi->type) { 5954 case I40E_VSI_MAIN: 5955 #ifdef I40E_FCOE 5956 case I40E_VSI_FCOE: 5957 #endif 5958 if (!vsi->netdev || !vsi->netdev_registered) 5959 break; 5960 5961 if (link_up) { 5962 netif_carrier_on(vsi->netdev); 5963 netif_tx_wake_all_queues(vsi->netdev); 5964 } else { 5965 netif_carrier_off(vsi->netdev); 5966 netif_tx_stop_all_queues(vsi->netdev); 5967 } 5968 break; 5969 5970 case I40E_VSI_SRIOV: 5971 case I40E_VSI_VMDQ2: 5972 case I40E_VSI_CTRL: 5973 case I40E_VSI_MIRROR: 5974 default: 5975 /* there is no notification for other VSIs */ 5976 break; 5977 } 5978 } 5979 5980 /** 5981 * i40e_veb_link_event - notify elements on the veb of a link event 5982 * @veb: veb to be notified 5983 * @link_up: link up or down 5984 **/ 5985 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 5986 { 5987 struct i40e_pf *pf; 5988 int i; 5989 5990 if (!veb || !veb->pf) 5991 return; 5992 pf = veb->pf; 5993 5994 /* depth first... */ 5995 for (i = 0; i < I40E_MAX_VEB; i++) 5996 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 5997 i40e_veb_link_event(pf->veb[i], link_up); 5998 5999 /* ... now the local VSIs */ 6000 for (i = 0; i < pf->num_alloc_vsi; i++) 6001 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 6002 i40e_vsi_link_event(pf->vsi[i], link_up); 6003 } 6004 6005 /** 6006 * i40e_link_event - Update netif_carrier status 6007 * @pf: board private structure 6008 **/ 6009 static void i40e_link_event(struct i40e_pf *pf) 6010 { 6011 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6012 u8 new_link_speed, old_link_speed; 6013 i40e_status status; 6014 bool new_link, old_link; 6015 6016 /* set this to force the get_link_status call to refresh state */ 6017 pf->hw.phy.get_link_info = true; 6018 6019 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 6020 6021 status = i40e_get_link_status(&pf->hw, &new_link); 6022 if (status) { 6023 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", 6024 status); 6025 return; 6026 } 6027 6028 old_link_speed = pf->hw.phy.link_info_old.link_speed; 6029 new_link_speed = pf->hw.phy.link_info.link_speed; 6030 6031 if (new_link == old_link && 6032 new_link_speed == old_link_speed && 6033 (test_bit(__I40E_DOWN, &vsi->state) || 6034 new_link == netif_carrier_ok(vsi->netdev))) 6035 return; 6036 6037 if (!test_bit(__I40E_DOWN, &vsi->state)) 6038 i40e_print_link_message(vsi, new_link); 6039 6040 /* Notify the base of the switch tree connected to 6041 * the link. Floating VEBs are not notified. 6042 */ 6043 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 6044 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 6045 else 6046 i40e_vsi_link_event(vsi, new_link); 6047 6048 if (pf->vf) 6049 i40e_vc_notify_link_state(pf); 6050 6051 if (pf->flags & I40E_FLAG_PTP) 6052 i40e_ptp_set_increment(pf); 6053 } 6054 6055 /** 6056 * i40e_watchdog_subtask - periodic checks not using event driven response 6057 * @pf: board private structure 6058 **/ 6059 static void i40e_watchdog_subtask(struct i40e_pf *pf) 6060 { 6061 int i; 6062 6063 /* if interface is down do nothing */ 6064 if (test_bit(__I40E_DOWN, &pf->state) || 6065 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 6066 return; 6067 6068 /* make sure we don't do these things too often */ 6069 if (time_before(jiffies, (pf->service_timer_previous + 6070 pf->service_timer_period))) 6071 return; 6072 pf->service_timer_previous = jiffies; 6073 6074 if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) 6075 i40e_link_event(pf); 6076 6077 /* Update the stats for active netdevs so the network stack 6078 * can look at updated numbers whenever it cares to 6079 */ 6080 for (i = 0; i < pf->num_alloc_vsi; i++) 6081 if (pf->vsi[i] && pf->vsi[i]->netdev) 6082 i40e_update_stats(pf->vsi[i]); 6083 6084 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { 6085 /* Update the stats for the active switching components */ 6086 for (i = 0; i < I40E_MAX_VEB; i++) 6087 if (pf->veb[i]) 6088 i40e_update_veb_stats(pf->veb[i]); 6089 } 6090 6091 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); 6092 } 6093 6094 /** 6095 * i40e_reset_subtask - Set up for resetting the device and driver 6096 * @pf: board private structure 6097 **/ 6098 static void i40e_reset_subtask(struct i40e_pf *pf) 6099 { 6100 u32 reset_flags = 0; 6101 6102 rtnl_lock(); 6103 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { 6104 reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED); 6105 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); 6106 } 6107 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { 6108 reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED); 6109 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 6110 } 6111 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { 6112 reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED); 6113 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 6114 } 6115 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { 6116 reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED); 6117 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 6118 } 6119 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { 6120 reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED); 6121 clear_bit(__I40E_DOWN_REQUESTED, &pf->state); 6122 } 6123 6124 /* If there's a recovery already waiting, it takes 6125 * precedence before starting a new reset sequence. 6126 */ 6127 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { 6128 i40e_handle_reset_warning(pf); 6129 goto unlock; 6130 } 6131 6132 /* If we're already down or resetting, just bail */ 6133 if (reset_flags && 6134 !test_bit(__I40E_DOWN, &pf->state) && 6135 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) 6136 i40e_do_reset(pf, reset_flags); 6137 6138 unlock: 6139 rtnl_unlock(); 6140 } 6141 6142 /** 6143 * i40e_handle_link_event - Handle link event 6144 * @pf: board private structure 6145 * @e: event info posted on ARQ 6146 **/ 6147 static void i40e_handle_link_event(struct i40e_pf *pf, 6148 struct i40e_arq_event_info *e) 6149 { 6150 struct i40e_hw *hw = &pf->hw; 6151 struct i40e_aqc_get_link_status *status = 6152 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 6153 6154 /* save off old link status information */ 6155 hw->phy.link_info_old = hw->phy.link_info; 6156 6157 /* Do a new status request to re-enable LSE reporting 6158 * and load new status information into the hw struct 6159 * This completely ignores any state information 6160 * in the ARQ event info, instead choosing to always 6161 * issue the AQ update link status command. 6162 */ 6163 i40e_link_event(pf); 6164 6165 /* check for unqualified module, if link is down */ 6166 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && 6167 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && 6168 (!(status->link_info & I40E_AQ_LINK_UP))) 6169 dev_err(&pf->pdev->dev, 6170 "The driver failed to link because an unqualified module was detected.\n"); 6171 } 6172 6173 /** 6174 * i40e_clean_adminq_subtask - Clean the AdminQ rings 6175 * @pf: board private structure 6176 **/ 6177 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 6178 { 6179 struct i40e_arq_event_info event; 6180 struct i40e_hw *hw = &pf->hw; 6181 u16 pending, i = 0; 6182 i40e_status ret; 6183 u16 opcode; 6184 u32 oldval; 6185 u32 val; 6186 6187 /* Do not run clean AQ when PF reset fails */ 6188 if (test_bit(__I40E_RESET_FAILED, &pf->state)) 6189 return; 6190 6191 /* check for error indications */ 6192 val = rd32(&pf->hw, pf->hw.aq.arq.len); 6193 oldval = val; 6194 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { 6195 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); 6196 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; 6197 } 6198 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { 6199 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); 6200 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; 6201 } 6202 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { 6203 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); 6204 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; 6205 } 6206 if (oldval != val) 6207 wr32(&pf->hw, pf->hw.aq.arq.len, val); 6208 6209 val = rd32(&pf->hw, pf->hw.aq.asq.len); 6210 oldval = val; 6211 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { 6212 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); 6213 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; 6214 } 6215 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { 6216 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); 6217 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; 6218 } 6219 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { 6220 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); 6221 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; 6222 } 6223 if (oldval != val) 6224 wr32(&pf->hw, pf->hw.aq.asq.len, val); 6225 6226 event.buf_len = I40E_MAX_AQ_BUF_SIZE; 6227 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 6228 if (!event.msg_buf) 6229 return; 6230 6231 do { 6232 ret = i40e_clean_arq_element(hw, &event, &pending); 6233 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) 6234 break; 6235 else if (ret) { 6236 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 6237 break; 6238 } 6239 6240 opcode = le16_to_cpu(event.desc.opcode); 6241 switch (opcode) { 6242 6243 case i40e_aqc_opc_get_link_status: 6244 i40e_handle_link_event(pf, &event); 6245 break; 6246 case i40e_aqc_opc_send_msg_to_pf: 6247 ret = i40e_vc_process_vf_msg(pf, 6248 le16_to_cpu(event.desc.retval), 6249 le32_to_cpu(event.desc.cookie_high), 6250 le32_to_cpu(event.desc.cookie_low), 6251 event.msg_buf, 6252 event.msg_len); 6253 break; 6254 case i40e_aqc_opc_lldp_update_mib: 6255 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 6256 #ifdef CONFIG_I40E_DCB 6257 rtnl_lock(); 6258 ret = i40e_handle_lldp_event(pf, &event); 6259 rtnl_unlock(); 6260 #endif /* CONFIG_I40E_DCB */ 6261 break; 6262 case i40e_aqc_opc_event_lan_overflow: 6263 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 6264 i40e_handle_lan_overflow_event(pf, &event); 6265 break; 6266 case i40e_aqc_opc_send_msg_to_peer: 6267 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); 6268 break; 6269 case i40e_aqc_opc_nvm_erase: 6270 case i40e_aqc_opc_nvm_update: 6271 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); 6272 break; 6273 default: 6274 dev_info(&pf->pdev->dev, 6275 "ARQ Error: Unknown event 0x%04x received\n", 6276 opcode); 6277 break; 6278 } 6279 } while (pending && (i++ < pf->adminq_work_limit)); 6280 6281 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 6282 /* re-enable Admin queue interrupt cause */ 6283 val = rd32(hw, I40E_PFINT_ICR0_ENA); 6284 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 6285 wr32(hw, I40E_PFINT_ICR0_ENA, val); 6286 i40e_flush(hw); 6287 6288 kfree(event.msg_buf); 6289 } 6290 6291 /** 6292 * i40e_verify_eeprom - make sure eeprom is good to use 6293 * @pf: board private structure 6294 **/ 6295 static void i40e_verify_eeprom(struct i40e_pf *pf) 6296 { 6297 int err; 6298 6299 err = i40e_diag_eeprom_test(&pf->hw); 6300 if (err) { 6301 /* retry in case of garbage read */ 6302 err = i40e_diag_eeprom_test(&pf->hw); 6303 if (err) { 6304 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", 6305 err); 6306 set_bit(__I40E_BAD_EEPROM, &pf->state); 6307 } 6308 } 6309 6310 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { 6311 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); 6312 clear_bit(__I40E_BAD_EEPROM, &pf->state); 6313 } 6314 } 6315 6316 /** 6317 * i40e_enable_pf_switch_lb 6318 * @pf: pointer to the PF structure 6319 * 6320 * enable switch loop back or die - no point in a return value 6321 **/ 6322 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) 6323 { 6324 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6325 struct i40e_vsi_context ctxt; 6326 int ret; 6327 6328 ctxt.seid = pf->main_vsi_seid; 6329 ctxt.pf_num = pf->hw.pf_id; 6330 ctxt.vf_num = 0; 6331 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6332 if (ret) { 6333 dev_info(&pf->pdev->dev, 6334 "couldn't get PF vsi config, err %s aq_err %s\n", 6335 i40e_stat_str(&pf->hw, ret), 6336 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6337 return; 6338 } 6339 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6340 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6341 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6342 6343 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6344 if (ret) { 6345 dev_info(&pf->pdev->dev, 6346 "update vsi switch failed, err %s aq_err %s\n", 6347 i40e_stat_str(&pf->hw, ret), 6348 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6349 } 6350 } 6351 6352 /** 6353 * i40e_disable_pf_switch_lb 6354 * @pf: pointer to the PF structure 6355 * 6356 * disable switch loop back or die - no point in a return value 6357 **/ 6358 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) 6359 { 6360 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6361 struct i40e_vsi_context ctxt; 6362 int ret; 6363 6364 ctxt.seid = pf->main_vsi_seid; 6365 ctxt.pf_num = pf->hw.pf_id; 6366 ctxt.vf_num = 0; 6367 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6368 if (ret) { 6369 dev_info(&pf->pdev->dev, 6370 "couldn't get PF vsi config, err %s aq_err %s\n", 6371 i40e_stat_str(&pf->hw, ret), 6372 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6373 return; 6374 } 6375 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6376 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6377 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6378 6379 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6380 if (ret) { 6381 dev_info(&pf->pdev->dev, 6382 "update vsi switch failed, err %s aq_err %s\n", 6383 i40e_stat_str(&pf->hw, ret), 6384 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6385 } 6386 } 6387 6388 /** 6389 * i40e_config_bridge_mode - Configure the HW bridge mode 6390 * @veb: pointer to the bridge instance 6391 * 6392 * Configure the loop back mode for the LAN VSI that is downlink to the 6393 * specified HW bridge instance. It is expected this function is called 6394 * when a new HW bridge is instantiated. 6395 **/ 6396 static void i40e_config_bridge_mode(struct i40e_veb *veb) 6397 { 6398 struct i40e_pf *pf = veb->pf; 6399 6400 if (pf->hw.debug_mask & I40E_DEBUG_LAN) 6401 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", 6402 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 6403 if (veb->bridge_mode & BRIDGE_MODE_VEPA) 6404 i40e_disable_pf_switch_lb(pf); 6405 else 6406 i40e_enable_pf_switch_lb(pf); 6407 } 6408 6409 /** 6410 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 6411 * @veb: pointer to the VEB instance 6412 * 6413 * This is a recursive function that first builds the attached VSIs then 6414 * recurses in to build the next layer of VEB. We track the connections 6415 * through our own index numbers because the seid's from the HW could 6416 * change across the reset. 6417 **/ 6418 static int i40e_reconstitute_veb(struct i40e_veb *veb) 6419 { 6420 struct i40e_vsi *ctl_vsi = NULL; 6421 struct i40e_pf *pf = veb->pf; 6422 int v, veb_idx; 6423 int ret; 6424 6425 /* build VSI that owns this VEB, temporarily attached to base VEB */ 6426 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { 6427 if (pf->vsi[v] && 6428 pf->vsi[v]->veb_idx == veb->idx && 6429 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 6430 ctl_vsi = pf->vsi[v]; 6431 break; 6432 } 6433 } 6434 if (!ctl_vsi) { 6435 dev_info(&pf->pdev->dev, 6436 "missing owner VSI for veb_idx %d\n", veb->idx); 6437 ret = -ENOENT; 6438 goto end_reconstitute; 6439 } 6440 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 6441 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 6442 ret = i40e_add_vsi(ctl_vsi); 6443 if (ret) { 6444 dev_info(&pf->pdev->dev, 6445 "rebuild of veb_idx %d owner VSI failed: %d\n", 6446 veb->idx, ret); 6447 goto end_reconstitute; 6448 } 6449 i40e_vsi_reset_stats(ctl_vsi); 6450 6451 /* create the VEB in the switch and move the VSI onto the VEB */ 6452 ret = i40e_add_veb(veb, ctl_vsi); 6453 if (ret) 6454 goto end_reconstitute; 6455 6456 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 6457 veb->bridge_mode = BRIDGE_MODE_VEB; 6458 else 6459 veb->bridge_mode = BRIDGE_MODE_VEPA; 6460 i40e_config_bridge_mode(veb); 6461 6462 /* create the remaining VSIs attached to this VEB */ 6463 for (v = 0; v < pf->num_alloc_vsi; v++) { 6464 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 6465 continue; 6466 6467 if (pf->vsi[v]->veb_idx == veb->idx) { 6468 struct i40e_vsi *vsi = pf->vsi[v]; 6469 6470 vsi->uplink_seid = veb->seid; 6471 ret = i40e_add_vsi(vsi); 6472 if (ret) { 6473 dev_info(&pf->pdev->dev, 6474 "rebuild of vsi_idx %d failed: %d\n", 6475 v, ret); 6476 goto end_reconstitute; 6477 } 6478 i40e_vsi_reset_stats(vsi); 6479 } 6480 } 6481 6482 /* create any VEBs attached to this VEB - RECURSION */ 6483 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 6484 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 6485 pf->veb[veb_idx]->uplink_seid = veb->seid; 6486 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 6487 if (ret) 6488 break; 6489 } 6490 } 6491 6492 end_reconstitute: 6493 return ret; 6494 } 6495 6496 /** 6497 * i40e_get_capabilities - get info about the HW 6498 * @pf: the PF struct 6499 **/ 6500 static int i40e_get_capabilities(struct i40e_pf *pf) 6501 { 6502 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 6503 u16 data_size; 6504 int buf_len; 6505 int err; 6506 6507 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 6508 do { 6509 cap_buf = kzalloc(buf_len, GFP_KERNEL); 6510 if (!cap_buf) 6511 return -ENOMEM; 6512 6513 /* this loads the data into the hw struct for us */ 6514 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 6515 &data_size, 6516 i40e_aqc_opc_list_func_capabilities, 6517 NULL); 6518 /* data loaded, buffer no longer needed */ 6519 kfree(cap_buf); 6520 6521 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 6522 /* retry with a larger buffer */ 6523 buf_len = data_size; 6524 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 6525 dev_info(&pf->pdev->dev, 6526 "capability discovery failed, err %s aq_err %s\n", 6527 i40e_stat_str(&pf->hw, err), 6528 i40e_aq_str(&pf->hw, 6529 pf->hw.aq.asq_last_status)); 6530 return -ENODEV; 6531 } 6532 } while (err); 6533 6534 if (pf->hw.debug_mask & I40E_DEBUG_USER) 6535 dev_info(&pf->pdev->dev, 6536 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 6537 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 6538 pf->hw.func_caps.num_msix_vectors, 6539 pf->hw.func_caps.num_msix_vectors_vf, 6540 pf->hw.func_caps.fd_filters_guaranteed, 6541 pf->hw.func_caps.fd_filters_best_effort, 6542 pf->hw.func_caps.num_tx_qp, 6543 pf->hw.func_caps.num_vsis); 6544 6545 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ 6546 + pf->hw.func_caps.num_vfs) 6547 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { 6548 dev_info(&pf->pdev->dev, 6549 "got num_vsis %d, setting num_vsis to %d\n", 6550 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); 6551 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; 6552 } 6553 6554 return 0; 6555 } 6556 6557 static int i40e_vsi_clear(struct i40e_vsi *vsi); 6558 6559 /** 6560 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband 6561 * @pf: board private structure 6562 **/ 6563 static void i40e_fdir_sb_setup(struct i40e_pf *pf) 6564 { 6565 struct i40e_vsi *vsi; 6566 int i; 6567 6568 /* quick workaround for an NVM issue that leaves a critical register 6569 * uninitialized 6570 */ 6571 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { 6572 static const u32 hkey[] = { 6573 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 6574 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 6575 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 6576 0x95b3a76d}; 6577 6578 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) 6579 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); 6580 } 6581 6582 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 6583 return; 6584 6585 /* find existing VSI and see if it needs configuring */ 6586 vsi = NULL; 6587 for (i = 0; i < pf->num_alloc_vsi; i++) { 6588 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6589 vsi = pf->vsi[i]; 6590 break; 6591 } 6592 } 6593 6594 /* create a new VSI if none exists */ 6595 if (!vsi) { 6596 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, 6597 pf->vsi[pf->lan_vsi]->seid, 0); 6598 if (!vsi) { 6599 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 6600 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 6601 return; 6602 } 6603 } 6604 6605 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); 6606 } 6607 6608 /** 6609 * i40e_fdir_teardown - release the Flow Director resources 6610 * @pf: board private structure 6611 **/ 6612 static void i40e_fdir_teardown(struct i40e_pf *pf) 6613 { 6614 int i; 6615 6616 i40e_fdir_filter_exit(pf); 6617 for (i = 0; i < pf->num_alloc_vsi; i++) { 6618 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6619 i40e_vsi_release(pf->vsi[i]); 6620 break; 6621 } 6622 } 6623 } 6624 6625 /** 6626 * i40e_prep_for_reset - prep for the core to reset 6627 * @pf: board private structure 6628 * 6629 * Close up the VFs and other things in prep for PF Reset. 6630 **/ 6631 static void i40e_prep_for_reset(struct i40e_pf *pf) 6632 { 6633 struct i40e_hw *hw = &pf->hw; 6634 i40e_status ret = 0; 6635 u32 v; 6636 6637 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 6638 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 6639 return; 6640 6641 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 6642 6643 /* quiesce the VSIs and their queues that are not already DOWN */ 6644 i40e_pf_quiesce_all_vsi(pf); 6645 6646 for (v = 0; v < pf->num_alloc_vsi; v++) { 6647 if (pf->vsi[v]) 6648 pf->vsi[v]->seid = 0; 6649 } 6650 6651 i40e_shutdown_adminq(&pf->hw); 6652 6653 /* call shutdown HMC */ 6654 if (hw->hmc.hmc_obj) { 6655 ret = i40e_shutdown_lan_hmc(hw); 6656 if (ret) 6657 dev_warn(&pf->pdev->dev, 6658 "shutdown_lan_hmc failed: %d\n", ret); 6659 } 6660 } 6661 6662 /** 6663 * i40e_send_version - update firmware with driver version 6664 * @pf: PF struct 6665 */ 6666 static void i40e_send_version(struct i40e_pf *pf) 6667 { 6668 struct i40e_driver_version dv; 6669 6670 dv.major_version = DRV_VERSION_MAJOR; 6671 dv.minor_version = DRV_VERSION_MINOR; 6672 dv.build_version = DRV_VERSION_BUILD; 6673 dv.subbuild_version = 0; 6674 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); 6675 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 6676 } 6677 6678 /** 6679 * i40e_reset_and_rebuild - reset and rebuild using a saved config 6680 * @pf: board private structure 6681 * @reinit: if the Main VSI needs to re-initialized. 6682 **/ 6683 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 6684 { 6685 struct i40e_hw *hw = &pf->hw; 6686 u8 set_fc_aq_fail = 0; 6687 i40e_status ret; 6688 u32 v; 6689 6690 /* Now we wait for GRST to settle out. 6691 * We don't have to delete the VEBs or VSIs from the hw switch 6692 * because the reset will make them disappear. 6693 */ 6694 ret = i40e_pf_reset(hw); 6695 if (ret) { 6696 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 6697 set_bit(__I40E_RESET_FAILED, &pf->state); 6698 goto clear_recovery; 6699 } 6700 pf->pfr_count++; 6701 6702 if (test_bit(__I40E_DOWN, &pf->state)) 6703 goto clear_recovery; 6704 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 6705 6706 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 6707 ret = i40e_init_adminq(&pf->hw); 6708 if (ret) { 6709 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", 6710 i40e_stat_str(&pf->hw, ret), 6711 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6712 goto clear_recovery; 6713 } 6714 6715 /* re-verify the eeprom if we just had an EMP reset */ 6716 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) 6717 i40e_verify_eeprom(pf); 6718 6719 i40e_clear_pxe_mode(hw); 6720 ret = i40e_get_capabilities(pf); 6721 if (ret) 6722 goto end_core_reset; 6723 6724 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 6725 hw->func_caps.num_rx_qp, 6726 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 6727 if (ret) { 6728 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 6729 goto end_core_reset; 6730 } 6731 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 6732 if (ret) { 6733 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 6734 goto end_core_reset; 6735 } 6736 6737 #ifdef CONFIG_I40E_DCB 6738 ret = i40e_init_pf_dcb(pf); 6739 if (ret) { 6740 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); 6741 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 6742 /* Continue without DCB enabled */ 6743 } 6744 #endif /* CONFIG_I40E_DCB */ 6745 #ifdef I40E_FCOE 6746 i40e_init_pf_fcoe(pf); 6747 6748 #endif 6749 /* do basic switch setup */ 6750 ret = i40e_setup_pf_switch(pf, reinit); 6751 if (ret) 6752 goto end_core_reset; 6753 6754 /* driver is only interested in link up/down and module qualification 6755 * reports from firmware 6756 */ 6757 ret = i40e_aq_set_phy_int_mask(&pf->hw, 6758 I40E_AQ_EVENT_LINK_UPDOWN | 6759 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); 6760 if (ret) 6761 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 6762 i40e_stat_str(&pf->hw, ret), 6763 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6764 6765 /* make sure our flow control settings are restored */ 6766 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); 6767 if (ret) 6768 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", 6769 i40e_stat_str(&pf->hw, ret), 6770 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6771 6772 /* Rebuild the VSIs and VEBs that existed before reset. 6773 * They are still in our local switch element arrays, so only 6774 * need to rebuild the switch model in the HW. 6775 * 6776 * If there were VEBs but the reconstitution failed, we'll try 6777 * try to recover minimal use by getting the basic PF VSI working. 6778 */ 6779 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 6780 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); 6781 /* find the one VEB connected to the MAC, and find orphans */ 6782 for (v = 0; v < I40E_MAX_VEB; v++) { 6783 if (!pf->veb[v]) 6784 continue; 6785 6786 if (pf->veb[v]->uplink_seid == pf->mac_seid || 6787 pf->veb[v]->uplink_seid == 0) { 6788 ret = i40e_reconstitute_veb(pf->veb[v]); 6789 6790 if (!ret) 6791 continue; 6792 6793 /* If Main VEB failed, we're in deep doodoo, 6794 * so give up rebuilding the switch and set up 6795 * for minimal rebuild of PF VSI. 6796 * If orphan failed, we'll report the error 6797 * but try to keep going. 6798 */ 6799 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 6800 dev_info(&pf->pdev->dev, 6801 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 6802 ret); 6803 pf->vsi[pf->lan_vsi]->uplink_seid 6804 = pf->mac_seid; 6805 break; 6806 } else if (pf->veb[v]->uplink_seid == 0) { 6807 dev_info(&pf->pdev->dev, 6808 "rebuild of orphan VEB failed: %d\n", 6809 ret); 6810 } 6811 } 6812 } 6813 } 6814 6815 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { 6816 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 6817 /* no VEB, so rebuild only the Main VSI */ 6818 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); 6819 if (ret) { 6820 dev_info(&pf->pdev->dev, 6821 "rebuild of Main VSI failed: %d\n", ret); 6822 goto end_core_reset; 6823 } 6824 } 6825 6826 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 6827 (pf->hw.aq.fw_maj_ver < 4)) { 6828 msleep(75); 6829 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 6830 if (ret) 6831 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 6832 i40e_stat_str(&pf->hw, ret), 6833 i40e_aq_str(&pf->hw, 6834 pf->hw.aq.asq_last_status)); 6835 } 6836 /* reinit the misc interrupt */ 6837 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6838 ret = i40e_setup_misc_vector(pf); 6839 6840 /* Add a filter to drop all Flow control frames from any VSI from being 6841 * transmitted. By doing so we stop a malicious VF from sending out 6842 * PAUSE or PFC frames and potentially controlling traffic for other 6843 * PF/VF VSIs. 6844 * The FW can still send Flow control frames if enabled. 6845 */ 6846 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, 6847 pf->main_vsi_seid); 6848 6849 /* restart the VSIs that were rebuilt and running before the reset */ 6850 i40e_pf_unquiesce_all_vsi(pf); 6851 6852 if (pf->num_alloc_vfs) { 6853 for (v = 0; v < pf->num_alloc_vfs; v++) 6854 i40e_reset_vf(&pf->vf[v], true); 6855 } 6856 6857 /* tell the firmware that we're starting */ 6858 i40e_send_version(pf); 6859 6860 end_core_reset: 6861 clear_bit(__I40E_RESET_FAILED, &pf->state); 6862 clear_recovery: 6863 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 6864 } 6865 6866 /** 6867 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild 6868 * @pf: board private structure 6869 * 6870 * Close up the VFs and other things in prep for a Core Reset, 6871 * then get ready to rebuild the world. 6872 **/ 6873 static void i40e_handle_reset_warning(struct i40e_pf *pf) 6874 { 6875 i40e_prep_for_reset(pf); 6876 i40e_reset_and_rebuild(pf, false); 6877 } 6878 6879 /** 6880 * i40e_handle_mdd_event 6881 * @pf: pointer to the PF structure 6882 * 6883 * Called from the MDD irq handler to identify possibly malicious vfs 6884 **/ 6885 static void i40e_handle_mdd_event(struct i40e_pf *pf) 6886 { 6887 struct i40e_hw *hw = &pf->hw; 6888 bool mdd_detected = false; 6889 bool pf_mdd_detected = false; 6890 struct i40e_vf *vf; 6891 u32 reg; 6892 int i; 6893 6894 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) 6895 return; 6896 6897 /* find what triggered the MDD event */ 6898 reg = rd32(hw, I40E_GL_MDET_TX); 6899 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 6900 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 6901 I40E_GL_MDET_TX_PF_NUM_SHIFT; 6902 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 6903 I40E_GL_MDET_TX_VF_NUM_SHIFT; 6904 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 6905 I40E_GL_MDET_TX_EVENT_SHIFT; 6906 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 6907 I40E_GL_MDET_TX_QUEUE_SHIFT) - 6908 pf->hw.func_caps.base_queue; 6909 if (netif_msg_tx_err(pf)) 6910 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", 6911 event, queue, pf_num, vf_num); 6912 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 6913 mdd_detected = true; 6914 } 6915 reg = rd32(hw, I40E_GL_MDET_RX); 6916 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 6917 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 6918 I40E_GL_MDET_RX_FUNCTION_SHIFT; 6919 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 6920 I40E_GL_MDET_RX_EVENT_SHIFT; 6921 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 6922 I40E_GL_MDET_RX_QUEUE_SHIFT) - 6923 pf->hw.func_caps.base_queue; 6924 if (netif_msg_rx_err(pf)) 6925 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 6926 event, queue, func); 6927 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 6928 mdd_detected = true; 6929 } 6930 6931 if (mdd_detected) { 6932 reg = rd32(hw, I40E_PF_MDET_TX); 6933 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 6934 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 6935 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 6936 pf_mdd_detected = true; 6937 } 6938 reg = rd32(hw, I40E_PF_MDET_RX); 6939 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 6940 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 6941 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 6942 pf_mdd_detected = true; 6943 } 6944 /* Queue belongs to the PF, initiate a reset */ 6945 if (pf_mdd_detected) { 6946 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 6947 i40e_service_event_schedule(pf); 6948 } 6949 } 6950 6951 /* see if one of the VFs needs its hand slapped */ 6952 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 6953 vf = &(pf->vf[i]); 6954 reg = rd32(hw, I40E_VP_MDET_TX(i)); 6955 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 6956 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 6957 vf->num_mdd_events++; 6958 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 6959 i); 6960 } 6961 6962 reg = rd32(hw, I40E_VP_MDET_RX(i)); 6963 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 6964 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 6965 vf->num_mdd_events++; 6966 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 6967 i); 6968 } 6969 6970 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 6971 dev_info(&pf->pdev->dev, 6972 "Too many MDD events on VF %d, disabled\n", i); 6973 dev_info(&pf->pdev->dev, 6974 "Use PF Control I/F to re-enable the VF\n"); 6975 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 6976 } 6977 } 6978 6979 /* re-enable mdd interrupt cause */ 6980 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 6981 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 6982 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 6983 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 6984 i40e_flush(hw); 6985 } 6986 6987 #ifdef CONFIG_I40E_VXLAN 6988 /** 6989 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW 6990 * @pf: board private structure 6991 **/ 6992 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) 6993 { 6994 struct i40e_hw *hw = &pf->hw; 6995 i40e_status ret; 6996 __be16 port; 6997 int i; 6998 6999 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) 7000 return; 7001 7002 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; 7003 7004 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 7005 if (pf->pending_vxlan_bitmap & BIT_ULL(i)) { 7006 pf->pending_vxlan_bitmap &= ~BIT_ULL(i); 7007 port = pf->vxlan_ports[i]; 7008 if (port) 7009 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), 7010 I40E_AQC_TUNNEL_TYPE_VXLAN, 7011 NULL, NULL); 7012 else 7013 ret = i40e_aq_del_udp_tunnel(hw, i, NULL); 7014 7015 if (ret) { 7016 dev_info(&pf->pdev->dev, 7017 "%s vxlan port %d, index %d failed, err %s aq_err %s\n", 7018 port ? "add" : "delete", 7019 ntohs(port), i, 7020 i40e_stat_str(&pf->hw, ret), 7021 i40e_aq_str(&pf->hw, 7022 pf->hw.aq.asq_last_status)); 7023 pf->vxlan_ports[i] = 0; 7024 } 7025 } 7026 } 7027 } 7028 7029 #endif 7030 /** 7031 * i40e_service_task - Run the driver's async subtasks 7032 * @work: pointer to work_struct containing our data 7033 **/ 7034 static void i40e_service_task(struct work_struct *work) 7035 { 7036 struct i40e_pf *pf = container_of(work, 7037 struct i40e_pf, 7038 service_task); 7039 unsigned long start_time = jiffies; 7040 7041 /* don't bother with service tasks if a reset is in progress */ 7042 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7043 i40e_service_event_complete(pf); 7044 return; 7045 } 7046 7047 i40e_detect_recover_hung(pf); 7048 i40e_reset_subtask(pf); 7049 i40e_handle_mdd_event(pf); 7050 i40e_vc_process_vflr_event(pf); 7051 i40e_watchdog_subtask(pf); 7052 i40e_fdir_reinit_subtask(pf); 7053 i40e_sync_filters_subtask(pf); 7054 #ifdef CONFIG_I40E_VXLAN 7055 i40e_sync_vxlan_filters_subtask(pf); 7056 #endif 7057 i40e_clean_adminq_subtask(pf); 7058 7059 i40e_service_event_complete(pf); 7060 7061 /* If the tasks have taken longer than one timer cycle or there 7062 * is more work to be done, reschedule the service task now 7063 * rather than wait for the timer to tick again. 7064 */ 7065 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 7066 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || 7067 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || 7068 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 7069 i40e_service_event_schedule(pf); 7070 } 7071 7072 /** 7073 * i40e_service_timer - timer callback 7074 * @data: pointer to PF struct 7075 **/ 7076 static void i40e_service_timer(unsigned long data) 7077 { 7078 struct i40e_pf *pf = (struct i40e_pf *)data; 7079 7080 mod_timer(&pf->service_timer, 7081 round_jiffies(jiffies + pf->service_timer_period)); 7082 i40e_service_event_schedule(pf); 7083 } 7084 7085 /** 7086 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 7087 * @vsi: the VSI being configured 7088 **/ 7089 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 7090 { 7091 struct i40e_pf *pf = vsi->back; 7092 7093 switch (vsi->type) { 7094 case I40E_VSI_MAIN: 7095 vsi->alloc_queue_pairs = pf->num_lan_qps; 7096 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7097 I40E_REQ_DESCRIPTOR_MULTIPLE); 7098 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7099 vsi->num_q_vectors = pf->num_lan_msix; 7100 else 7101 vsi->num_q_vectors = 1; 7102 7103 break; 7104 7105 case I40E_VSI_FDIR: 7106 vsi->alloc_queue_pairs = 1; 7107 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 7108 I40E_REQ_DESCRIPTOR_MULTIPLE); 7109 vsi->num_q_vectors = 1; 7110 break; 7111 7112 case I40E_VSI_VMDQ2: 7113 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 7114 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7115 I40E_REQ_DESCRIPTOR_MULTIPLE); 7116 vsi->num_q_vectors = pf->num_vmdq_msix; 7117 break; 7118 7119 case I40E_VSI_SRIOV: 7120 vsi->alloc_queue_pairs = pf->num_vf_qps; 7121 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7122 I40E_REQ_DESCRIPTOR_MULTIPLE); 7123 break; 7124 7125 #ifdef I40E_FCOE 7126 case I40E_VSI_FCOE: 7127 vsi->alloc_queue_pairs = pf->num_fcoe_qps; 7128 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7129 I40E_REQ_DESCRIPTOR_MULTIPLE); 7130 vsi->num_q_vectors = pf->num_fcoe_msix; 7131 break; 7132 7133 #endif /* I40E_FCOE */ 7134 default: 7135 WARN_ON(1); 7136 return -ENODATA; 7137 } 7138 7139 return 0; 7140 } 7141 7142 /** 7143 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi 7144 * @type: VSI pointer 7145 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 7146 * 7147 * On error: returns error code (negative) 7148 * On success: returns 0 7149 **/ 7150 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) 7151 { 7152 int size; 7153 int ret = 0; 7154 7155 /* allocate memory for both Tx and Rx ring pointers */ 7156 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; 7157 vsi->tx_rings = kzalloc(size, GFP_KERNEL); 7158 if (!vsi->tx_rings) 7159 return -ENOMEM; 7160 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; 7161 7162 if (alloc_qvectors) { 7163 /* allocate memory for q_vector pointers */ 7164 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; 7165 vsi->q_vectors = kzalloc(size, GFP_KERNEL); 7166 if (!vsi->q_vectors) { 7167 ret = -ENOMEM; 7168 goto err_vectors; 7169 } 7170 } 7171 return ret; 7172 7173 err_vectors: 7174 kfree(vsi->tx_rings); 7175 return ret; 7176 } 7177 7178 /** 7179 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 7180 * @pf: board private structure 7181 * @type: type of VSI 7182 * 7183 * On error: returns error code (negative) 7184 * On success: returns vsi index in PF (positive) 7185 **/ 7186 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 7187 { 7188 int ret = -ENODEV; 7189 struct i40e_vsi *vsi; 7190 int vsi_idx; 7191 int i; 7192 7193 /* Need to protect the allocation of the VSIs at the PF level */ 7194 mutex_lock(&pf->switch_mutex); 7195 7196 /* VSI list may be fragmented if VSI creation/destruction has 7197 * been happening. We can afford to do a quick scan to look 7198 * for any free VSIs in the list. 7199 * 7200 * find next empty vsi slot, looping back around if necessary 7201 */ 7202 i = pf->next_vsi; 7203 while (i < pf->num_alloc_vsi && pf->vsi[i]) 7204 i++; 7205 if (i >= pf->num_alloc_vsi) { 7206 i = 0; 7207 while (i < pf->next_vsi && pf->vsi[i]) 7208 i++; 7209 } 7210 7211 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { 7212 vsi_idx = i; /* Found one! */ 7213 } else { 7214 ret = -ENODEV; 7215 goto unlock_pf; /* out of VSI slots! */ 7216 } 7217 pf->next_vsi = ++i; 7218 7219 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 7220 if (!vsi) { 7221 ret = -ENOMEM; 7222 goto unlock_pf; 7223 } 7224 vsi->type = type; 7225 vsi->back = pf; 7226 set_bit(__I40E_DOWN, &vsi->state); 7227 vsi->flags = 0; 7228 vsi->idx = vsi_idx; 7229 vsi->rx_itr_setting = pf->rx_itr_default; 7230 vsi->tx_itr_setting = pf->tx_itr_default; 7231 vsi->int_rate_limit = 0; 7232 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? 7233 pf->rss_table_size : 64; 7234 vsi->netdev_registered = false; 7235 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 7236 INIT_LIST_HEAD(&vsi->mac_filter_list); 7237 vsi->irqs_ready = false; 7238 7239 ret = i40e_set_num_rings_in_vsi(vsi); 7240 if (ret) 7241 goto err_rings; 7242 7243 ret = i40e_vsi_alloc_arrays(vsi, true); 7244 if (ret) 7245 goto err_rings; 7246 7247 /* Setup default MSIX irq handler for VSI */ 7248 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 7249 7250 /* Initialize VSI lock */ 7251 spin_lock_init(&vsi->mac_filter_list_lock); 7252 pf->vsi[vsi_idx] = vsi; 7253 ret = vsi_idx; 7254 goto unlock_pf; 7255 7256 err_rings: 7257 pf->next_vsi = i - 1; 7258 kfree(vsi); 7259 unlock_pf: 7260 mutex_unlock(&pf->switch_mutex); 7261 return ret; 7262 } 7263 7264 /** 7265 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 7266 * @type: VSI pointer 7267 * @free_qvectors: a bool to specify if q_vectors need to be freed. 7268 * 7269 * On error: returns error code (negative) 7270 * On success: returns 0 7271 **/ 7272 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) 7273 { 7274 /* free the ring and vector containers */ 7275 if (free_qvectors) { 7276 kfree(vsi->q_vectors); 7277 vsi->q_vectors = NULL; 7278 } 7279 kfree(vsi->tx_rings); 7280 vsi->tx_rings = NULL; 7281 vsi->rx_rings = NULL; 7282 } 7283 7284 /** 7285 * i40e_vsi_clear - Deallocate the VSI provided 7286 * @vsi: the VSI being un-configured 7287 **/ 7288 static int i40e_vsi_clear(struct i40e_vsi *vsi) 7289 { 7290 struct i40e_pf *pf; 7291 7292 if (!vsi) 7293 return 0; 7294 7295 if (!vsi->back) 7296 goto free_vsi; 7297 pf = vsi->back; 7298 7299 mutex_lock(&pf->switch_mutex); 7300 if (!pf->vsi[vsi->idx]) { 7301 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", 7302 vsi->idx, vsi->idx, vsi, vsi->type); 7303 goto unlock_vsi; 7304 } 7305 7306 if (pf->vsi[vsi->idx] != vsi) { 7307 dev_err(&pf->pdev->dev, 7308 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", 7309 pf->vsi[vsi->idx]->idx, 7310 pf->vsi[vsi->idx], 7311 pf->vsi[vsi->idx]->type, 7312 vsi->idx, vsi, vsi->type); 7313 goto unlock_vsi; 7314 } 7315 7316 /* updates the PF for this cleared vsi */ 7317 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 7318 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 7319 7320 i40e_vsi_free_arrays(vsi, true); 7321 7322 pf->vsi[vsi->idx] = NULL; 7323 if (vsi->idx < pf->next_vsi) 7324 pf->next_vsi = vsi->idx; 7325 7326 unlock_vsi: 7327 mutex_unlock(&pf->switch_mutex); 7328 free_vsi: 7329 kfree(vsi); 7330 7331 return 0; 7332 } 7333 7334 /** 7335 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 7336 * @vsi: the VSI being cleaned 7337 **/ 7338 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) 7339 { 7340 int i; 7341 7342 if (vsi->tx_rings && vsi->tx_rings[0]) { 7343 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 7344 kfree_rcu(vsi->tx_rings[i], rcu); 7345 vsi->tx_rings[i] = NULL; 7346 vsi->rx_rings[i] = NULL; 7347 } 7348 } 7349 } 7350 7351 /** 7352 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 7353 * @vsi: the VSI being configured 7354 **/ 7355 static int i40e_alloc_rings(struct i40e_vsi *vsi) 7356 { 7357 struct i40e_ring *tx_ring, *rx_ring; 7358 struct i40e_pf *pf = vsi->back; 7359 int i; 7360 7361 /* Set basic values in the rings to be used later during open() */ 7362 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 7363 /* allocate space for both Tx and Rx in one shot */ 7364 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 7365 if (!tx_ring) 7366 goto err_out; 7367 7368 tx_ring->queue_index = i; 7369 tx_ring->reg_idx = vsi->base_queue + i; 7370 tx_ring->ring_active = false; 7371 tx_ring->vsi = vsi; 7372 tx_ring->netdev = vsi->netdev; 7373 tx_ring->dev = &pf->pdev->dev; 7374 tx_ring->count = vsi->num_desc; 7375 tx_ring->size = 0; 7376 tx_ring->dcb_tc = 0; 7377 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) 7378 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; 7379 if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) 7380 tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM; 7381 vsi->tx_rings[i] = tx_ring; 7382 7383 rx_ring = &tx_ring[1]; 7384 rx_ring->queue_index = i; 7385 rx_ring->reg_idx = vsi->base_queue + i; 7386 rx_ring->ring_active = false; 7387 rx_ring->vsi = vsi; 7388 rx_ring->netdev = vsi->netdev; 7389 rx_ring->dev = &pf->pdev->dev; 7390 rx_ring->count = vsi->num_desc; 7391 rx_ring->size = 0; 7392 rx_ring->dcb_tc = 0; 7393 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) 7394 set_ring_16byte_desc_enabled(rx_ring); 7395 else 7396 clear_ring_16byte_desc_enabled(rx_ring); 7397 vsi->rx_rings[i] = rx_ring; 7398 } 7399 7400 return 0; 7401 7402 err_out: 7403 i40e_vsi_clear_rings(vsi); 7404 return -ENOMEM; 7405 } 7406 7407 /** 7408 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 7409 * @pf: board private structure 7410 * @vectors: the number of MSI-X vectors to request 7411 * 7412 * Returns the number of vectors reserved, or error 7413 **/ 7414 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 7415 { 7416 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, 7417 I40E_MIN_MSIX, vectors); 7418 if (vectors < 0) { 7419 dev_info(&pf->pdev->dev, 7420 "MSI-X vector reservation failed: %d\n", vectors); 7421 vectors = 0; 7422 } 7423 7424 return vectors; 7425 } 7426 7427 /** 7428 * i40e_init_msix - Setup the MSIX capability 7429 * @pf: board private structure 7430 * 7431 * Work with the OS to set up the MSIX vectors needed. 7432 * 7433 * Returns the number of vectors reserved or negative on failure 7434 **/ 7435 static int i40e_init_msix(struct i40e_pf *pf) 7436 { 7437 struct i40e_hw *hw = &pf->hw; 7438 int vectors_left; 7439 int v_budget, i; 7440 int v_actual; 7441 7442 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 7443 return -ENODEV; 7444 7445 /* The number of vectors we'll request will be comprised of: 7446 * - Add 1 for "other" cause for Admin Queue events, etc. 7447 * - The number of LAN queue pairs 7448 * - Queues being used for RSS. 7449 * We don't need as many as max_rss_size vectors. 7450 * use rss_size instead in the calculation since that 7451 * is governed by number of cpus in the system. 7452 * - assumes symmetric Tx/Rx pairing 7453 * - The number of VMDq pairs 7454 #ifdef I40E_FCOE 7455 * - The number of FCOE qps. 7456 #endif 7457 * Once we count this up, try the request. 7458 * 7459 * If we can't get what we want, we'll simplify to nearly nothing 7460 * and try again. If that still fails, we punt. 7461 */ 7462 vectors_left = hw->func_caps.num_msix_vectors; 7463 v_budget = 0; 7464 7465 /* reserve one vector for miscellaneous handler */ 7466 if (vectors_left) { 7467 v_budget++; 7468 vectors_left--; 7469 } 7470 7471 /* reserve vectors for the main PF traffic queues */ 7472 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); 7473 vectors_left -= pf->num_lan_msix; 7474 v_budget += pf->num_lan_msix; 7475 7476 /* reserve one vector for sideband flow director */ 7477 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7478 if (vectors_left) { 7479 v_budget++; 7480 vectors_left--; 7481 } else { 7482 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7483 } 7484 } 7485 7486 #ifdef I40E_FCOE 7487 /* can we reserve enough for FCoE? */ 7488 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7489 if (!vectors_left) 7490 pf->num_fcoe_msix = 0; 7491 else if (vectors_left >= pf->num_fcoe_qps) 7492 pf->num_fcoe_msix = pf->num_fcoe_qps; 7493 else 7494 pf->num_fcoe_msix = 1; 7495 v_budget += pf->num_fcoe_msix; 7496 vectors_left -= pf->num_fcoe_msix; 7497 } 7498 7499 #endif 7500 /* any vectors left over go for VMDq support */ 7501 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { 7502 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; 7503 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); 7504 7505 /* if we're short on vectors for what's desired, we limit 7506 * the queues per vmdq. If this is still more than are 7507 * available, the user will need to change the number of 7508 * queues/vectors used by the PF later with the ethtool 7509 * channels command 7510 */ 7511 if (vmdq_vecs < vmdq_vecs_wanted) 7512 pf->num_vmdq_qps = 1; 7513 pf->num_vmdq_msix = pf->num_vmdq_qps; 7514 7515 v_budget += vmdq_vecs; 7516 vectors_left -= vmdq_vecs; 7517 } 7518 7519 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 7520 GFP_KERNEL); 7521 if (!pf->msix_entries) 7522 return -ENOMEM; 7523 7524 for (i = 0; i < v_budget; i++) 7525 pf->msix_entries[i].entry = i; 7526 v_actual = i40e_reserve_msix_vectors(pf, v_budget); 7527 7528 if (v_actual != v_budget) { 7529 /* If we have limited resources, we will start with no vectors 7530 * for the special features and then allocate vectors to some 7531 * of these features based on the policy and at the end disable 7532 * the features that did not get any vectors. 7533 */ 7534 #ifdef I40E_FCOE 7535 pf->num_fcoe_qps = 0; 7536 pf->num_fcoe_msix = 0; 7537 #endif 7538 pf->num_vmdq_msix = 0; 7539 } 7540 7541 if (v_actual < I40E_MIN_MSIX) { 7542 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 7543 kfree(pf->msix_entries); 7544 pf->msix_entries = NULL; 7545 return -ENODEV; 7546 7547 } else if (v_actual == I40E_MIN_MSIX) { 7548 /* Adjust for minimal MSIX use */ 7549 pf->num_vmdq_vsis = 0; 7550 pf->num_vmdq_qps = 0; 7551 pf->num_lan_qps = 1; 7552 pf->num_lan_msix = 1; 7553 7554 } else if (v_actual != v_budget) { 7555 int vec; 7556 7557 /* reserve the misc vector */ 7558 vec = v_actual - 1; 7559 7560 /* Scale vector usage down */ 7561 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 7562 pf->num_vmdq_vsis = 1; 7563 pf->num_vmdq_qps = 1; 7564 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7565 7566 /* partition out the remaining vectors */ 7567 switch (vec) { 7568 case 2: 7569 pf->num_lan_msix = 1; 7570 break; 7571 case 3: 7572 #ifdef I40E_FCOE 7573 /* give one vector to FCoE */ 7574 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7575 pf->num_lan_msix = 1; 7576 pf->num_fcoe_msix = 1; 7577 } 7578 #else 7579 pf->num_lan_msix = 2; 7580 #endif 7581 break; 7582 default: 7583 #ifdef I40E_FCOE 7584 /* give one vector to FCoE */ 7585 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7586 pf->num_fcoe_msix = 1; 7587 vec--; 7588 } 7589 #endif 7590 /* give the rest to the PF */ 7591 pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps); 7592 break; 7593 } 7594 } 7595 7596 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 7597 (pf->num_vmdq_msix == 0)) { 7598 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); 7599 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 7600 } 7601 #ifdef I40E_FCOE 7602 7603 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { 7604 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); 7605 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 7606 } 7607 #endif 7608 return v_actual; 7609 } 7610 7611 /** 7612 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 7613 * @vsi: the VSI being configured 7614 * @v_idx: index of the vector in the vsi struct 7615 * 7616 * We allocate one q_vector. If allocation fails we return -ENOMEM. 7617 **/ 7618 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 7619 { 7620 struct i40e_q_vector *q_vector; 7621 7622 /* allocate q_vector */ 7623 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 7624 if (!q_vector) 7625 return -ENOMEM; 7626 7627 q_vector->vsi = vsi; 7628 q_vector->v_idx = v_idx; 7629 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 7630 if (vsi->netdev) 7631 netif_napi_add(vsi->netdev, &q_vector->napi, 7632 i40e_napi_poll, NAPI_POLL_WEIGHT); 7633 7634 q_vector->rx.latency_range = I40E_LOW_LATENCY; 7635 q_vector->tx.latency_range = I40E_LOW_LATENCY; 7636 7637 /* tie q_vector and vsi together */ 7638 vsi->q_vectors[v_idx] = q_vector; 7639 7640 return 0; 7641 } 7642 7643 /** 7644 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 7645 * @vsi: the VSI being configured 7646 * 7647 * We allocate one q_vector per queue interrupt. If allocation fails we 7648 * return -ENOMEM. 7649 **/ 7650 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 7651 { 7652 struct i40e_pf *pf = vsi->back; 7653 int v_idx, num_q_vectors; 7654 int err; 7655 7656 /* if not MSIX, give the one vector only to the LAN VSI */ 7657 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7658 num_q_vectors = vsi->num_q_vectors; 7659 else if (vsi == pf->vsi[pf->lan_vsi]) 7660 num_q_vectors = 1; 7661 else 7662 return -EINVAL; 7663 7664 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 7665 err = i40e_vsi_alloc_q_vector(vsi, v_idx); 7666 if (err) 7667 goto err_out; 7668 } 7669 7670 return 0; 7671 7672 err_out: 7673 while (v_idx--) 7674 i40e_free_q_vector(vsi, v_idx); 7675 7676 return err; 7677 } 7678 7679 /** 7680 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 7681 * @pf: board private structure to initialize 7682 **/ 7683 static int i40e_init_interrupt_scheme(struct i40e_pf *pf) 7684 { 7685 int vectors = 0; 7686 ssize_t size; 7687 7688 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 7689 vectors = i40e_init_msix(pf); 7690 if (vectors < 0) { 7691 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 7692 #ifdef I40E_FCOE 7693 I40E_FLAG_FCOE_ENABLED | 7694 #endif 7695 I40E_FLAG_RSS_ENABLED | 7696 I40E_FLAG_DCB_CAPABLE | 7697 I40E_FLAG_SRIOV_ENABLED | 7698 I40E_FLAG_FD_SB_ENABLED | 7699 I40E_FLAG_FD_ATR_ENABLED | 7700 I40E_FLAG_VMDQ_ENABLED); 7701 7702 /* rework the queue expectations without MSIX */ 7703 i40e_determine_queue_usage(pf); 7704 } 7705 } 7706 7707 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 7708 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 7709 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); 7710 vectors = pci_enable_msi(pf->pdev); 7711 if (vectors < 0) { 7712 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", 7713 vectors); 7714 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 7715 } 7716 vectors = 1; /* one MSI or Legacy vector */ 7717 } 7718 7719 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 7720 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); 7721 7722 /* set up vector assignment tracking */ 7723 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); 7724 pf->irq_pile = kzalloc(size, GFP_KERNEL); 7725 if (!pf->irq_pile) { 7726 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); 7727 return -ENOMEM; 7728 } 7729 pf->irq_pile->num_entries = vectors; 7730 pf->irq_pile->search_hint = 0; 7731 7732 /* track first vector for misc interrupts, ignore return */ 7733 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); 7734 7735 return 0; 7736 } 7737 7738 /** 7739 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 7740 * @pf: board private structure 7741 * 7742 * This sets up the handler for MSIX 0, which is used to manage the 7743 * non-queue interrupts, e.g. AdminQ and errors. This is not used 7744 * when in MSI or Legacy interrupt mode. 7745 **/ 7746 static int i40e_setup_misc_vector(struct i40e_pf *pf) 7747 { 7748 struct i40e_hw *hw = &pf->hw; 7749 int err = 0; 7750 7751 /* Only request the irq if this is the first time through, and 7752 * not when we're rebuilding after a Reset 7753 */ 7754 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7755 err = request_irq(pf->msix_entries[0].vector, 7756 i40e_intr, 0, pf->int_name, pf); 7757 if (err) { 7758 dev_info(&pf->pdev->dev, 7759 "request_irq for %s failed: %d\n", 7760 pf->int_name, err); 7761 return -EFAULT; 7762 } 7763 } 7764 7765 i40e_enable_misc_int_causes(pf); 7766 7767 /* associate no queues to the misc vector */ 7768 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 7769 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 7770 7771 i40e_flush(hw); 7772 7773 i40e_irq_dynamic_enable_icr0(pf); 7774 7775 return err; 7776 } 7777 7778 /** 7779 * i40e_config_rss_aq - Prepare for RSS using AQ commands 7780 * @vsi: vsi structure 7781 * @seed: RSS hash seed 7782 **/ 7783 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed) 7784 { 7785 struct i40e_aqc_get_set_rss_key_data rss_key; 7786 struct i40e_pf *pf = vsi->back; 7787 struct i40e_hw *hw = &pf->hw; 7788 bool pf_lut = false; 7789 u8 *rss_lut; 7790 int ret, i; 7791 7792 memset(&rss_key, 0, sizeof(rss_key)); 7793 memcpy(&rss_key, seed, sizeof(rss_key)); 7794 7795 rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); 7796 if (!rss_lut) 7797 return -ENOMEM; 7798 7799 /* Populate the LUT with max no. of queues in round robin fashion */ 7800 for (i = 0; i < vsi->rss_table_size; i++) 7801 rss_lut[i] = i % vsi->rss_size; 7802 7803 ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key); 7804 if (ret) { 7805 dev_info(&pf->pdev->dev, 7806 "Cannot set RSS key, err %s aq_err %s\n", 7807 i40e_stat_str(&pf->hw, ret), 7808 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7809 goto config_rss_aq_out; 7810 } 7811 7812 if (vsi->type == I40E_VSI_MAIN) 7813 pf_lut = true; 7814 7815 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut, 7816 vsi->rss_table_size); 7817 if (ret) 7818 dev_info(&pf->pdev->dev, 7819 "Cannot set RSS lut, err %s aq_err %s\n", 7820 i40e_stat_str(&pf->hw, ret), 7821 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7822 7823 config_rss_aq_out: 7824 kfree(rss_lut); 7825 return ret; 7826 } 7827 7828 /** 7829 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used 7830 * @vsi: VSI structure 7831 **/ 7832 static int i40e_vsi_config_rss(struct i40e_vsi *vsi) 7833 { 7834 u8 seed[I40E_HKEY_ARRAY_SIZE]; 7835 struct i40e_pf *pf = vsi->back; 7836 7837 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 7838 vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); 7839 7840 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) 7841 return i40e_config_rss_aq(vsi, seed); 7842 7843 return 0; 7844 } 7845 7846 /** 7847 * i40e_config_rss_reg - Prepare for RSS if used 7848 * @pf: board private structure 7849 * @seed: RSS hash seed 7850 **/ 7851 static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed) 7852 { 7853 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 7854 struct i40e_hw *hw = &pf->hw; 7855 u32 *seed_dw = (u32 *)seed; 7856 u32 current_queue = 0; 7857 u32 lut = 0; 7858 int i, j; 7859 7860 /* Fill out hash function seed */ 7861 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 7862 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); 7863 7864 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { 7865 lut = 0; 7866 for (j = 0; j < 4; j++) { 7867 if (current_queue == vsi->rss_size) 7868 current_queue = 0; 7869 lut |= ((current_queue) << (8 * j)); 7870 current_queue++; 7871 } 7872 wr32(&pf->hw, I40E_PFQF_HLUT(i), lut); 7873 } 7874 i40e_flush(hw); 7875 7876 return 0; 7877 } 7878 7879 /** 7880 * i40e_config_rss - Prepare for RSS if used 7881 * @pf: board private structure 7882 **/ 7883 static int i40e_config_rss(struct i40e_pf *pf) 7884 { 7885 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 7886 u8 seed[I40E_HKEY_ARRAY_SIZE]; 7887 struct i40e_hw *hw = &pf->hw; 7888 u32 reg_val; 7889 u64 hena; 7890 7891 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 7892 7893 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 7894 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | 7895 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); 7896 hena |= i40e_pf_get_default_rss_hena(pf); 7897 7898 wr32(hw, I40E_PFQF_HENA(0), (u32)hena); 7899 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 7900 7901 vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); 7902 7903 /* Determine the RSS table size based on the hardware capabilities */ 7904 reg_val = rd32(hw, I40E_PFQF_CTL_0); 7905 reg_val = (pf->rss_table_size == 512) ? 7906 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : 7907 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); 7908 wr32(hw, I40E_PFQF_CTL_0, reg_val); 7909 7910 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) 7911 return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed); 7912 else 7913 return i40e_config_rss_reg(pf, seed); 7914 } 7915 7916 /** 7917 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild 7918 * @pf: board private structure 7919 * @queue_count: the requested queue count for rss. 7920 * 7921 * returns 0 if rss is not enabled, if enabled returns the final rss queue 7922 * count which may be different from the requested queue count. 7923 **/ 7924 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) 7925 { 7926 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 7927 int new_rss_size; 7928 7929 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) 7930 return 0; 7931 7932 new_rss_size = min_t(int, queue_count, pf->rss_size_max); 7933 7934 if (queue_count != vsi->num_queue_pairs) { 7935 vsi->req_queue_pairs = queue_count; 7936 i40e_prep_for_reset(pf); 7937 7938 pf->rss_size = new_rss_size; 7939 7940 i40e_reset_and_rebuild(pf, true); 7941 i40e_config_rss(pf); 7942 } 7943 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); 7944 return pf->rss_size; 7945 } 7946 7947 /** 7948 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition 7949 * @pf: board private structure 7950 **/ 7951 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) 7952 { 7953 i40e_status status; 7954 bool min_valid, max_valid; 7955 u32 max_bw, min_bw; 7956 7957 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, 7958 &min_valid, &max_valid); 7959 7960 if (!status) { 7961 if (min_valid) 7962 pf->npar_min_bw = min_bw; 7963 if (max_valid) 7964 pf->npar_max_bw = max_bw; 7965 } 7966 7967 return status; 7968 } 7969 7970 /** 7971 * i40e_set_npar_bw_setting - Set BW settings for this PF partition 7972 * @pf: board private structure 7973 **/ 7974 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) 7975 { 7976 struct i40e_aqc_configure_partition_bw_data bw_data; 7977 i40e_status status; 7978 7979 /* Set the valid bit for this PF */ 7980 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); 7981 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; 7982 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; 7983 7984 /* Set the new bandwidths */ 7985 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); 7986 7987 return status; 7988 } 7989 7990 /** 7991 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition 7992 * @pf: board private structure 7993 **/ 7994 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) 7995 { 7996 /* Commit temporary BW setting to permanent NVM image */ 7997 enum i40e_admin_queue_err last_aq_status; 7998 i40e_status ret; 7999 u16 nvm_word; 8000 8001 if (pf->hw.partition_id != 1) { 8002 dev_info(&pf->pdev->dev, 8003 "Commit BW only works on partition 1! This is partition %d", 8004 pf->hw.partition_id); 8005 ret = I40E_NOT_SUPPORTED; 8006 goto bw_commit_out; 8007 } 8008 8009 /* Acquire NVM for read access */ 8010 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); 8011 last_aq_status = pf->hw.aq.asq_last_status; 8012 if (ret) { 8013 dev_info(&pf->pdev->dev, 8014 "Cannot acquire NVM for read access, err %s aq_err %s\n", 8015 i40e_stat_str(&pf->hw, ret), 8016 i40e_aq_str(&pf->hw, last_aq_status)); 8017 goto bw_commit_out; 8018 } 8019 8020 /* Read word 0x10 of NVM - SW compatibility word 1 */ 8021 ret = i40e_aq_read_nvm(&pf->hw, 8022 I40E_SR_NVM_CONTROL_WORD, 8023 0x10, sizeof(nvm_word), &nvm_word, 8024 false, NULL); 8025 /* Save off last admin queue command status before releasing 8026 * the NVM 8027 */ 8028 last_aq_status = pf->hw.aq.asq_last_status; 8029 i40e_release_nvm(&pf->hw); 8030 if (ret) { 8031 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", 8032 i40e_stat_str(&pf->hw, ret), 8033 i40e_aq_str(&pf->hw, last_aq_status)); 8034 goto bw_commit_out; 8035 } 8036 8037 /* Wait a bit for NVM release to complete */ 8038 msleep(50); 8039 8040 /* Acquire NVM for write access */ 8041 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); 8042 last_aq_status = pf->hw.aq.asq_last_status; 8043 if (ret) { 8044 dev_info(&pf->pdev->dev, 8045 "Cannot acquire NVM for write access, err %s aq_err %s\n", 8046 i40e_stat_str(&pf->hw, ret), 8047 i40e_aq_str(&pf->hw, last_aq_status)); 8048 goto bw_commit_out; 8049 } 8050 /* Write it back out unchanged to initiate update NVM, 8051 * which will force a write of the shadow (alt) RAM to 8052 * the NVM - thus storing the bandwidth values permanently. 8053 */ 8054 ret = i40e_aq_update_nvm(&pf->hw, 8055 I40E_SR_NVM_CONTROL_WORD, 8056 0x10, sizeof(nvm_word), 8057 &nvm_word, true, NULL); 8058 /* Save off last admin queue command status before releasing 8059 * the NVM 8060 */ 8061 last_aq_status = pf->hw.aq.asq_last_status; 8062 i40e_release_nvm(&pf->hw); 8063 if (ret) 8064 dev_info(&pf->pdev->dev, 8065 "BW settings NOT SAVED, err %s aq_err %s\n", 8066 i40e_stat_str(&pf->hw, ret), 8067 i40e_aq_str(&pf->hw, last_aq_status)); 8068 bw_commit_out: 8069 8070 return ret; 8071 } 8072 8073 /** 8074 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 8075 * @pf: board private structure to initialize 8076 * 8077 * i40e_sw_init initializes the Adapter private data structure. 8078 * Fields are initialized based on PCI device information and 8079 * OS network device settings (MTU size). 8080 **/ 8081 static int i40e_sw_init(struct i40e_pf *pf) 8082 { 8083 int err = 0; 8084 int size; 8085 8086 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, 8087 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); 8088 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; 8089 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { 8090 if (I40E_DEBUG_USER & debug) 8091 pf->hw.debug_mask = debug; 8092 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), 8093 I40E_DEFAULT_MSG_ENABLE); 8094 } 8095 8096 /* Set default capability flags */ 8097 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 8098 I40E_FLAG_MSI_ENABLED | 8099 I40E_FLAG_LINK_POLLING_ENABLED | 8100 I40E_FLAG_MSIX_ENABLED; 8101 8102 if (iommu_present(&pci_bus_type)) 8103 pf->flags |= I40E_FLAG_RX_PS_ENABLED; 8104 else 8105 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; 8106 8107 /* Set default ITR */ 8108 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; 8109 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; 8110 8111 /* Depending on PF configurations, it is possible that the RSS 8112 * maximum might end up larger than the available queues 8113 */ 8114 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); 8115 pf->rss_size = 1; 8116 pf->rss_table_size = pf->hw.func_caps.rss_table_size; 8117 pf->rss_size_max = min_t(int, pf->rss_size_max, 8118 pf->hw.func_caps.num_tx_qp); 8119 if (pf->hw.func_caps.rss) { 8120 pf->flags |= I40E_FLAG_RSS_ENABLED; 8121 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); 8122 } 8123 8124 /* MFP mode enabled */ 8125 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { 8126 pf->flags |= I40E_FLAG_MFP_ENABLED; 8127 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 8128 if (i40e_get_npar_bw_setting(pf)) 8129 dev_warn(&pf->pdev->dev, 8130 "Could not get NPAR bw settings\n"); 8131 else 8132 dev_info(&pf->pdev->dev, 8133 "Min BW = %8.8x, Max BW = %8.8x\n", 8134 pf->npar_min_bw, pf->npar_max_bw); 8135 } 8136 8137 /* FW/NVM is not yet fixed in this regard */ 8138 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 8139 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 8140 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 8141 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 8142 if (pf->flags & I40E_FLAG_MFP_ENABLED && 8143 pf->hw.num_partitions > 1) 8144 dev_info(&pf->pdev->dev, 8145 "Flow Director Sideband mode Disabled in MFP mode\n"); 8146 else 8147 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 8148 pf->fdir_pf_filter_count = 8149 pf->hw.func_caps.fd_filters_guaranteed; 8150 pf->hw.fdir_shared_filter_count = 8151 pf->hw.func_caps.fd_filters_best_effort; 8152 } 8153 8154 if (pf->hw.func_caps.vmdq) { 8155 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 8156 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 8157 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); 8158 } 8159 8160 #ifdef I40E_FCOE 8161 i40e_init_pf_fcoe(pf); 8162 8163 #endif /* I40E_FCOE */ 8164 #ifdef CONFIG_PCI_IOV 8165 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { 8166 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 8167 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 8168 pf->num_req_vfs = min_t(int, 8169 pf->hw.func_caps.num_vfs, 8170 I40E_MAX_VF_COUNT); 8171 } 8172 #endif /* CONFIG_PCI_IOV */ 8173 if (pf->hw.mac.type == I40E_MAC_X722) { 8174 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | 8175 I40E_FLAG_128_QP_RSS_CAPABLE | 8176 I40E_FLAG_HW_ATR_EVICT_CAPABLE | 8177 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | 8178 I40E_FLAG_WB_ON_ITR_CAPABLE | 8179 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE; 8180 } 8181 pf->eeprom_version = 0xDEAD; 8182 pf->lan_veb = I40E_NO_VEB; 8183 pf->lan_vsi = I40E_NO_VSI; 8184 8185 /* By default FW has this off for performance reasons */ 8186 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; 8187 8188 /* set up queue assignment tracking */ 8189 size = sizeof(struct i40e_lump_tracking) 8190 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 8191 pf->qp_pile = kzalloc(size, GFP_KERNEL); 8192 if (!pf->qp_pile) { 8193 err = -ENOMEM; 8194 goto sw_init_done; 8195 } 8196 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 8197 pf->qp_pile->search_hint = 0; 8198 8199 pf->tx_timeout_recovery_level = 1; 8200 8201 mutex_init(&pf->switch_mutex); 8202 8203 /* If NPAR is enabled nudge the Tx scheduler */ 8204 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) 8205 i40e_set_npar_bw_setting(pf); 8206 8207 sw_init_done: 8208 return err; 8209 } 8210 8211 /** 8212 * i40e_set_ntuple - set the ntuple feature flag and take action 8213 * @pf: board private structure to initialize 8214 * @features: the feature set that the stack is suggesting 8215 * 8216 * returns a bool to indicate if reset needs to happen 8217 **/ 8218 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) 8219 { 8220 bool need_reset = false; 8221 8222 /* Check if Flow Director n-tuple support was enabled or disabled. If 8223 * the state changed, we need to reset. 8224 */ 8225 if (features & NETIF_F_NTUPLE) { 8226 /* Enable filters and mark for reset */ 8227 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 8228 need_reset = true; 8229 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 8230 } else { 8231 /* turn off filters, mark for reset and clear SW filter list */ 8232 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 8233 need_reset = true; 8234 i40e_fdir_filter_exit(pf); 8235 } 8236 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 8237 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 8238 /* reset fd counters */ 8239 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; 8240 pf->fdir_pf_active_filters = 0; 8241 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 8242 if (I40E_DEBUG_FD & pf->hw.debug_mask) 8243 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); 8244 /* if ATR was auto disabled it can be re-enabled. */ 8245 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 8246 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 8247 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 8248 } 8249 return need_reset; 8250 } 8251 8252 /** 8253 * i40e_set_features - set the netdev feature flags 8254 * @netdev: ptr to the netdev being adjusted 8255 * @features: the feature set that the stack is suggesting 8256 **/ 8257 static int i40e_set_features(struct net_device *netdev, 8258 netdev_features_t features) 8259 { 8260 struct i40e_netdev_priv *np = netdev_priv(netdev); 8261 struct i40e_vsi *vsi = np->vsi; 8262 struct i40e_pf *pf = vsi->back; 8263 bool need_reset; 8264 8265 if (features & NETIF_F_HW_VLAN_CTAG_RX) 8266 i40e_vlan_stripping_enable(vsi); 8267 else 8268 i40e_vlan_stripping_disable(vsi); 8269 8270 need_reset = i40e_set_ntuple(pf, features); 8271 8272 if (need_reset) 8273 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 8274 8275 return 0; 8276 } 8277 8278 #ifdef CONFIG_I40E_VXLAN 8279 /** 8280 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port 8281 * @pf: board private structure 8282 * @port: The UDP port to look up 8283 * 8284 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 8285 **/ 8286 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) 8287 { 8288 u8 i; 8289 8290 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 8291 if (pf->vxlan_ports[i] == port) 8292 return i; 8293 } 8294 8295 return i; 8296 } 8297 8298 /** 8299 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up 8300 * @netdev: This physical port's netdev 8301 * @sa_family: Socket Family that VXLAN is notifying us about 8302 * @port: New UDP port number that VXLAN started listening to 8303 **/ 8304 static void i40e_add_vxlan_port(struct net_device *netdev, 8305 sa_family_t sa_family, __be16 port) 8306 { 8307 struct i40e_netdev_priv *np = netdev_priv(netdev); 8308 struct i40e_vsi *vsi = np->vsi; 8309 struct i40e_pf *pf = vsi->back; 8310 u8 next_idx; 8311 u8 idx; 8312 8313 if (sa_family == AF_INET6) 8314 return; 8315 8316 idx = i40e_get_vxlan_port_idx(pf, port); 8317 8318 /* Check if port already exists */ 8319 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8320 netdev_info(netdev, "vxlan port %d already offloaded\n", 8321 ntohs(port)); 8322 return; 8323 } 8324 8325 /* Now check if there is space to add the new port */ 8326 next_idx = i40e_get_vxlan_port_idx(pf, 0); 8327 8328 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8329 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", 8330 ntohs(port)); 8331 return; 8332 } 8333 8334 /* New port: add it and mark its index in the bitmap */ 8335 pf->vxlan_ports[next_idx] = port; 8336 pf->pending_vxlan_bitmap |= BIT_ULL(next_idx); 8337 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 8338 } 8339 8340 /** 8341 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away 8342 * @netdev: This physical port's netdev 8343 * @sa_family: Socket Family that VXLAN is notifying us about 8344 * @port: UDP port number that VXLAN stopped listening to 8345 **/ 8346 static void i40e_del_vxlan_port(struct net_device *netdev, 8347 sa_family_t sa_family, __be16 port) 8348 { 8349 struct i40e_netdev_priv *np = netdev_priv(netdev); 8350 struct i40e_vsi *vsi = np->vsi; 8351 struct i40e_pf *pf = vsi->back; 8352 u8 idx; 8353 8354 if (sa_family == AF_INET6) 8355 return; 8356 8357 idx = i40e_get_vxlan_port_idx(pf, port); 8358 8359 /* Check if port already exists */ 8360 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8361 /* if port exists, set it to 0 (mark for deletion) 8362 * and make it pending 8363 */ 8364 pf->vxlan_ports[idx] = 0; 8365 pf->pending_vxlan_bitmap |= BIT_ULL(idx); 8366 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 8367 } else { 8368 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", 8369 ntohs(port)); 8370 } 8371 } 8372 8373 #endif 8374 static int i40e_get_phys_port_id(struct net_device *netdev, 8375 struct netdev_phys_item_id *ppid) 8376 { 8377 struct i40e_netdev_priv *np = netdev_priv(netdev); 8378 struct i40e_pf *pf = np->vsi->back; 8379 struct i40e_hw *hw = &pf->hw; 8380 8381 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) 8382 return -EOPNOTSUPP; 8383 8384 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); 8385 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); 8386 8387 return 0; 8388 } 8389 8390 /** 8391 * i40e_ndo_fdb_add - add an entry to the hardware database 8392 * @ndm: the input from the stack 8393 * @tb: pointer to array of nladdr (unused) 8394 * @dev: the net device pointer 8395 * @addr: the MAC address entry being added 8396 * @flags: instructions from stack about fdb operation 8397 */ 8398 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 8399 struct net_device *dev, 8400 const unsigned char *addr, u16 vid, 8401 u16 flags) 8402 { 8403 struct i40e_netdev_priv *np = netdev_priv(dev); 8404 struct i40e_pf *pf = np->vsi->back; 8405 int err = 0; 8406 8407 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) 8408 return -EOPNOTSUPP; 8409 8410 if (vid) { 8411 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); 8412 return -EINVAL; 8413 } 8414 8415 /* Hardware does not support aging addresses so if a 8416 * ndm_state is given only allow permanent addresses 8417 */ 8418 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 8419 netdev_info(dev, "FDB only supports static addresses\n"); 8420 return -EINVAL; 8421 } 8422 8423 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 8424 err = dev_uc_add_excl(dev, addr); 8425 else if (is_multicast_ether_addr(addr)) 8426 err = dev_mc_add_excl(dev, addr); 8427 else 8428 err = -EINVAL; 8429 8430 /* Only return duplicate errors if NLM_F_EXCL is set */ 8431 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 8432 err = 0; 8433 8434 return err; 8435 } 8436 8437 /** 8438 * i40e_ndo_bridge_setlink - Set the hardware bridge mode 8439 * @dev: the netdev being configured 8440 * @nlh: RTNL message 8441 * 8442 * Inserts a new hardware bridge if not already created and 8443 * enables the bridging mode requested (VEB or VEPA). If the 8444 * hardware bridge has already been inserted and the request 8445 * is to change the mode then that requires a PF reset to 8446 * allow rebuild of the components with required hardware 8447 * bridge mode enabled. 8448 **/ 8449 static int i40e_ndo_bridge_setlink(struct net_device *dev, 8450 struct nlmsghdr *nlh, 8451 u16 flags) 8452 { 8453 struct i40e_netdev_priv *np = netdev_priv(dev); 8454 struct i40e_vsi *vsi = np->vsi; 8455 struct i40e_pf *pf = vsi->back; 8456 struct i40e_veb *veb = NULL; 8457 struct nlattr *attr, *br_spec; 8458 int i, rem; 8459 8460 /* Only for PF VSI for now */ 8461 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 8462 return -EOPNOTSUPP; 8463 8464 /* Find the HW bridge for PF VSI */ 8465 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8466 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 8467 veb = pf->veb[i]; 8468 } 8469 8470 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 8471 8472 nla_for_each_nested(attr, br_spec, rem) { 8473 __u16 mode; 8474 8475 if (nla_type(attr) != IFLA_BRIDGE_MODE) 8476 continue; 8477 8478 mode = nla_get_u16(attr); 8479 if ((mode != BRIDGE_MODE_VEPA) && 8480 (mode != BRIDGE_MODE_VEB)) 8481 return -EINVAL; 8482 8483 /* Insert a new HW bridge */ 8484 if (!veb) { 8485 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 8486 vsi->tc_config.enabled_tc); 8487 if (veb) { 8488 veb->bridge_mode = mode; 8489 i40e_config_bridge_mode(veb); 8490 } else { 8491 /* No Bridge HW offload available */ 8492 return -ENOENT; 8493 } 8494 break; 8495 } else if (mode != veb->bridge_mode) { 8496 /* Existing HW bridge but different mode needs reset */ 8497 veb->bridge_mode = mode; 8498 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ 8499 if (mode == BRIDGE_MODE_VEB) 8500 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 8501 else 8502 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 8503 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 8504 break; 8505 } 8506 } 8507 8508 return 0; 8509 } 8510 8511 /** 8512 * i40e_ndo_bridge_getlink - Get the hardware bridge mode 8513 * @skb: skb buff 8514 * @pid: process id 8515 * @seq: RTNL message seq # 8516 * @dev: the netdev being configured 8517 * @filter_mask: unused 8518 * @nlflags: netlink flags passed in 8519 * 8520 * Return the mode in which the hardware bridge is operating in 8521 * i.e VEB or VEPA. 8522 **/ 8523 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 8524 struct net_device *dev, 8525 u32 __always_unused filter_mask, 8526 int nlflags) 8527 { 8528 struct i40e_netdev_priv *np = netdev_priv(dev); 8529 struct i40e_vsi *vsi = np->vsi; 8530 struct i40e_pf *pf = vsi->back; 8531 struct i40e_veb *veb = NULL; 8532 int i; 8533 8534 /* Only for PF VSI for now */ 8535 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 8536 return -EOPNOTSUPP; 8537 8538 /* Find the HW bridge for the PF VSI */ 8539 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8540 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 8541 veb = pf->veb[i]; 8542 } 8543 8544 if (!veb) 8545 return 0; 8546 8547 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, 8548 nlflags, 0, 0, filter_mask, NULL); 8549 } 8550 8551 #define I40E_MAX_TUNNEL_HDR_LEN 80 8552 /** 8553 * i40e_features_check - Validate encapsulated packet conforms to limits 8554 * @skb: skb buff 8555 * @dev: This physical port's netdev 8556 * @features: Offload features that the stack believes apply 8557 **/ 8558 static netdev_features_t i40e_features_check(struct sk_buff *skb, 8559 struct net_device *dev, 8560 netdev_features_t features) 8561 { 8562 if (skb->encapsulation && 8563 (skb_inner_mac_header(skb) - skb_transport_header(skb) > 8564 I40E_MAX_TUNNEL_HDR_LEN)) 8565 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); 8566 8567 return features; 8568 } 8569 8570 static const struct net_device_ops i40e_netdev_ops = { 8571 .ndo_open = i40e_open, 8572 .ndo_stop = i40e_close, 8573 .ndo_start_xmit = i40e_lan_xmit_frame, 8574 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 8575 .ndo_set_rx_mode = i40e_set_rx_mode, 8576 .ndo_validate_addr = eth_validate_addr, 8577 .ndo_set_mac_address = i40e_set_mac, 8578 .ndo_change_mtu = i40e_change_mtu, 8579 .ndo_do_ioctl = i40e_ioctl, 8580 .ndo_tx_timeout = i40e_tx_timeout, 8581 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 8582 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 8583 #ifdef CONFIG_NET_POLL_CONTROLLER 8584 .ndo_poll_controller = i40e_netpoll, 8585 #endif 8586 .ndo_setup_tc = i40e_setup_tc, 8587 #ifdef I40E_FCOE 8588 .ndo_fcoe_enable = i40e_fcoe_enable, 8589 .ndo_fcoe_disable = i40e_fcoe_disable, 8590 #endif 8591 .ndo_set_features = i40e_set_features, 8592 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 8593 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 8594 .ndo_set_vf_rate = i40e_ndo_set_vf_bw, 8595 .ndo_get_vf_config = i40e_ndo_get_vf_config, 8596 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 8597 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, 8598 #ifdef CONFIG_I40E_VXLAN 8599 .ndo_add_vxlan_port = i40e_add_vxlan_port, 8600 .ndo_del_vxlan_port = i40e_del_vxlan_port, 8601 #endif 8602 .ndo_get_phys_port_id = i40e_get_phys_port_id, 8603 .ndo_fdb_add = i40e_ndo_fdb_add, 8604 .ndo_features_check = i40e_features_check, 8605 .ndo_bridge_getlink = i40e_ndo_bridge_getlink, 8606 .ndo_bridge_setlink = i40e_ndo_bridge_setlink, 8607 }; 8608 8609 /** 8610 * i40e_config_netdev - Setup the netdev flags 8611 * @vsi: the VSI being configured 8612 * 8613 * Returns 0 on success, negative value on failure 8614 **/ 8615 static int i40e_config_netdev(struct i40e_vsi *vsi) 8616 { 8617 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 8618 struct i40e_pf *pf = vsi->back; 8619 struct i40e_hw *hw = &pf->hw; 8620 struct i40e_netdev_priv *np; 8621 struct net_device *netdev; 8622 u8 mac_addr[ETH_ALEN]; 8623 int etherdev_size; 8624 8625 etherdev_size = sizeof(struct i40e_netdev_priv); 8626 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 8627 if (!netdev) 8628 return -ENOMEM; 8629 8630 vsi->netdev = netdev; 8631 np = netdev_priv(netdev); 8632 np->vsi = vsi; 8633 8634 netdev->hw_enc_features |= NETIF_F_IP_CSUM | 8635 NETIF_F_GSO_UDP_TUNNEL | 8636 NETIF_F_GSO_GRE | 8637 NETIF_F_TSO; 8638 8639 netdev->features = NETIF_F_SG | 8640 NETIF_F_IP_CSUM | 8641 NETIF_F_SCTP_CSUM | 8642 NETIF_F_HIGHDMA | 8643 NETIF_F_GSO_UDP_TUNNEL | 8644 NETIF_F_GSO_GRE | 8645 NETIF_F_HW_VLAN_CTAG_TX | 8646 NETIF_F_HW_VLAN_CTAG_RX | 8647 NETIF_F_HW_VLAN_CTAG_FILTER | 8648 NETIF_F_IPV6_CSUM | 8649 NETIF_F_TSO | 8650 NETIF_F_TSO_ECN | 8651 NETIF_F_TSO6 | 8652 NETIF_F_RXCSUM | 8653 NETIF_F_RXHASH | 8654 0; 8655 8656 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 8657 netdev->features |= NETIF_F_NTUPLE; 8658 8659 /* copy netdev features into list of user selectable features */ 8660 netdev->hw_features |= netdev->features; 8661 8662 if (vsi->type == I40E_VSI_MAIN) { 8663 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 8664 ether_addr_copy(mac_addr, hw->mac.perm_addr); 8665 /* The following steps are necessary to prevent reception 8666 * of tagged packets - some older NVM configurations load a 8667 * default a MAC-VLAN filter that accepts any tagged packet 8668 * which must be replaced by a normal filter. 8669 */ 8670 if (!i40e_rm_default_mac_filter(vsi, mac_addr)) { 8671 spin_lock_bh(&vsi->mac_filter_list_lock); 8672 i40e_add_filter(vsi, mac_addr, 8673 I40E_VLAN_ANY, false, true); 8674 spin_unlock_bh(&vsi->mac_filter_list_lock); 8675 } 8676 } else { 8677 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 8678 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 8679 pf->vsi[pf->lan_vsi]->netdev->name); 8680 random_ether_addr(mac_addr); 8681 8682 spin_lock_bh(&vsi->mac_filter_list_lock); 8683 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); 8684 spin_unlock_bh(&vsi->mac_filter_list_lock); 8685 } 8686 8687 spin_lock_bh(&vsi->mac_filter_list_lock); 8688 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 8689 spin_unlock_bh(&vsi->mac_filter_list_lock); 8690 8691 ether_addr_copy(netdev->dev_addr, mac_addr); 8692 ether_addr_copy(netdev->perm_addr, mac_addr); 8693 /* vlan gets same features (except vlan offload) 8694 * after any tweaks for specific VSI types 8695 */ 8696 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | 8697 NETIF_F_HW_VLAN_CTAG_RX | 8698 NETIF_F_HW_VLAN_CTAG_FILTER); 8699 netdev->priv_flags |= IFF_UNICAST_FLT; 8700 netdev->priv_flags |= IFF_SUPP_NOFCS; 8701 /* Setup netdev TC information */ 8702 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 8703 8704 netdev->netdev_ops = &i40e_netdev_ops; 8705 netdev->watchdog_timeo = 5 * HZ; 8706 i40e_set_ethtool_ops(netdev); 8707 #ifdef I40E_FCOE 8708 i40e_fcoe_config_netdev(netdev, vsi); 8709 #endif 8710 8711 return 0; 8712 } 8713 8714 /** 8715 * i40e_vsi_delete - Delete a VSI from the switch 8716 * @vsi: the VSI being removed 8717 * 8718 * Returns 0 on success, negative value on failure 8719 **/ 8720 static void i40e_vsi_delete(struct i40e_vsi *vsi) 8721 { 8722 /* remove default VSI is not allowed */ 8723 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 8724 return; 8725 8726 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 8727 } 8728 8729 /** 8730 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB 8731 * @vsi: the VSI being queried 8732 * 8733 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode 8734 **/ 8735 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) 8736 { 8737 struct i40e_veb *veb; 8738 struct i40e_pf *pf = vsi->back; 8739 8740 /* Uplink is not a bridge so default to VEB */ 8741 if (vsi->veb_idx == I40E_NO_VEB) 8742 return 1; 8743 8744 veb = pf->veb[vsi->veb_idx]; 8745 if (!veb) { 8746 dev_info(&pf->pdev->dev, 8747 "There is no veb associated with the bridge\n"); 8748 return -ENOENT; 8749 } 8750 8751 /* Uplink is a bridge in VEPA mode */ 8752 if (veb->bridge_mode & BRIDGE_MODE_VEPA) { 8753 return 0; 8754 } else { 8755 /* Uplink is a bridge in VEB mode */ 8756 return 1; 8757 } 8758 8759 /* VEPA is now default bridge, so return 0 */ 8760 return 0; 8761 } 8762 8763 /** 8764 * i40e_add_vsi - Add a VSI to the switch 8765 * @vsi: the VSI being configured 8766 * 8767 * This initializes a VSI context depending on the VSI type to be added and 8768 * passes it down to the add_vsi aq command. 8769 **/ 8770 static int i40e_add_vsi(struct i40e_vsi *vsi) 8771 { 8772 int ret = -ENODEV; 8773 u8 laa_macaddr[ETH_ALEN]; 8774 bool found_laa_mac_filter = false; 8775 struct i40e_pf *pf = vsi->back; 8776 struct i40e_hw *hw = &pf->hw; 8777 struct i40e_vsi_context ctxt; 8778 struct i40e_mac_filter *f, *ftmp; 8779 8780 u8 enabled_tc = 0x1; /* TC0 enabled */ 8781 int f_count = 0; 8782 8783 memset(&ctxt, 0, sizeof(ctxt)); 8784 switch (vsi->type) { 8785 case I40E_VSI_MAIN: 8786 /* The PF's main VSI is already setup as part of the 8787 * device initialization, so we'll not bother with 8788 * the add_vsi call, but we will retrieve the current 8789 * VSI context. 8790 */ 8791 ctxt.seid = pf->main_vsi_seid; 8792 ctxt.pf_num = pf->hw.pf_id; 8793 ctxt.vf_num = 0; 8794 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 8795 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 8796 if (ret) { 8797 dev_info(&pf->pdev->dev, 8798 "couldn't get PF vsi config, err %s aq_err %s\n", 8799 i40e_stat_str(&pf->hw, ret), 8800 i40e_aq_str(&pf->hw, 8801 pf->hw.aq.asq_last_status)); 8802 return -ENOENT; 8803 } 8804 vsi->info = ctxt.info; 8805 vsi->info.valid_sections = 0; 8806 8807 vsi->seid = ctxt.seid; 8808 vsi->id = ctxt.vsi_number; 8809 8810 enabled_tc = i40e_pf_get_tc_map(pf); 8811 8812 /* MFP mode setup queue map and update VSI */ 8813 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && 8814 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ 8815 memset(&ctxt, 0, sizeof(ctxt)); 8816 ctxt.seid = pf->main_vsi_seid; 8817 ctxt.pf_num = pf->hw.pf_id; 8818 ctxt.vf_num = 0; 8819 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 8820 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 8821 if (ret) { 8822 dev_info(&pf->pdev->dev, 8823 "update vsi failed, err %s aq_err %s\n", 8824 i40e_stat_str(&pf->hw, ret), 8825 i40e_aq_str(&pf->hw, 8826 pf->hw.aq.asq_last_status)); 8827 ret = -ENOENT; 8828 goto err; 8829 } 8830 /* update the local VSI info queue map */ 8831 i40e_vsi_update_queue_map(vsi, &ctxt); 8832 vsi->info.valid_sections = 0; 8833 } else { 8834 /* Default/Main VSI is only enabled for TC0 8835 * reconfigure it to enable all TCs that are 8836 * available on the port in SFP mode. 8837 * For MFP case the iSCSI PF would use this 8838 * flow to enable LAN+iSCSI TC. 8839 */ 8840 ret = i40e_vsi_config_tc(vsi, enabled_tc); 8841 if (ret) { 8842 dev_info(&pf->pdev->dev, 8843 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", 8844 enabled_tc, 8845 i40e_stat_str(&pf->hw, ret), 8846 i40e_aq_str(&pf->hw, 8847 pf->hw.aq.asq_last_status)); 8848 ret = -ENOENT; 8849 } 8850 } 8851 break; 8852 8853 case I40E_VSI_FDIR: 8854 ctxt.pf_num = hw->pf_id; 8855 ctxt.vf_num = 0; 8856 ctxt.uplink_seid = vsi->uplink_seid; 8857 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 8858 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 8859 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && 8860 (i40e_is_vsi_uplink_mode_veb(vsi))) { 8861 ctxt.info.valid_sections |= 8862 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8863 ctxt.info.switch_id = 8864 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8865 } 8866 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 8867 break; 8868 8869 case I40E_VSI_VMDQ2: 8870 ctxt.pf_num = hw->pf_id; 8871 ctxt.vf_num = 0; 8872 ctxt.uplink_seid = vsi->uplink_seid; 8873 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 8874 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 8875 8876 /* This VSI is connected to VEB so the switch_id 8877 * should be set to zero by default. 8878 */ 8879 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 8880 ctxt.info.valid_sections |= 8881 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8882 ctxt.info.switch_id = 8883 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8884 } 8885 8886 /* Setup the VSI tx/rx queue map for TC0 only for now */ 8887 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 8888 break; 8889 8890 case I40E_VSI_SRIOV: 8891 ctxt.pf_num = hw->pf_id; 8892 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 8893 ctxt.uplink_seid = vsi->uplink_seid; 8894 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 8895 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 8896 8897 /* This VSI is connected to VEB so the switch_id 8898 * should be set to zero by default. 8899 */ 8900 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 8901 ctxt.info.valid_sections |= 8902 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8903 ctxt.info.switch_id = 8904 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8905 } 8906 8907 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 8908 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 8909 if (pf->vf[vsi->vf_id].spoofchk) { 8910 ctxt.info.valid_sections |= 8911 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 8912 ctxt.info.sec_flags |= 8913 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 8914 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 8915 } 8916 /* Setup the VSI tx/rx queue map for TC0 only for now */ 8917 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 8918 break; 8919 8920 #ifdef I40E_FCOE 8921 case I40E_VSI_FCOE: 8922 ret = i40e_fcoe_vsi_init(vsi, &ctxt); 8923 if (ret) { 8924 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); 8925 return ret; 8926 } 8927 break; 8928 8929 #endif /* I40E_FCOE */ 8930 default: 8931 return -ENODEV; 8932 } 8933 8934 if (vsi->type != I40E_VSI_MAIN) { 8935 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 8936 if (ret) { 8937 dev_info(&vsi->back->pdev->dev, 8938 "add vsi failed, err %s aq_err %s\n", 8939 i40e_stat_str(&pf->hw, ret), 8940 i40e_aq_str(&pf->hw, 8941 pf->hw.aq.asq_last_status)); 8942 ret = -ENOENT; 8943 goto err; 8944 } 8945 vsi->info = ctxt.info; 8946 vsi->info.valid_sections = 0; 8947 vsi->seid = ctxt.seid; 8948 vsi->id = ctxt.vsi_number; 8949 } 8950 8951 spin_lock_bh(&vsi->mac_filter_list_lock); 8952 /* If macvlan filters already exist, force them to get loaded */ 8953 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 8954 f->changed = true; 8955 f_count++; 8956 8957 /* Expected to have only one MAC filter entry for LAA in list */ 8958 if (f->is_laa && vsi->type == I40E_VSI_MAIN) { 8959 ether_addr_copy(laa_macaddr, f->macaddr); 8960 found_laa_mac_filter = true; 8961 } 8962 } 8963 spin_unlock_bh(&vsi->mac_filter_list_lock); 8964 8965 if (found_laa_mac_filter) { 8966 struct i40e_aqc_remove_macvlan_element_data element; 8967 8968 memset(&element, 0, sizeof(element)); 8969 ether_addr_copy(element.mac_addr, laa_macaddr); 8970 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 8971 ret = i40e_aq_remove_macvlan(hw, vsi->seid, 8972 &element, 1, NULL); 8973 if (ret) { 8974 /* some older FW has a different default */ 8975 element.flags |= 8976 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 8977 i40e_aq_remove_macvlan(hw, vsi->seid, 8978 &element, 1, NULL); 8979 } 8980 8981 i40e_aq_mac_address_write(hw, 8982 I40E_AQC_WRITE_TYPE_LAA_WOL, 8983 laa_macaddr, NULL); 8984 } 8985 8986 if (f_count) { 8987 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 8988 pf->flags |= I40E_FLAG_FILTER_SYNC; 8989 } 8990 8991 /* Update VSI BW information */ 8992 ret = i40e_vsi_get_bw_info(vsi); 8993 if (ret) { 8994 dev_info(&pf->pdev->dev, 8995 "couldn't get vsi bw info, err %s aq_err %s\n", 8996 i40e_stat_str(&pf->hw, ret), 8997 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 8998 /* VSI is already added so not tearing that up */ 8999 ret = 0; 9000 } 9001 9002 err: 9003 return ret; 9004 } 9005 9006 /** 9007 * i40e_vsi_release - Delete a VSI and free its resources 9008 * @vsi: the VSI being removed 9009 * 9010 * Returns 0 on success or < 0 on error 9011 **/ 9012 int i40e_vsi_release(struct i40e_vsi *vsi) 9013 { 9014 struct i40e_mac_filter *f, *ftmp; 9015 struct i40e_veb *veb = NULL; 9016 struct i40e_pf *pf; 9017 u16 uplink_seid; 9018 int i, n; 9019 9020 pf = vsi->back; 9021 9022 /* release of a VEB-owner or last VSI is not allowed */ 9023 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 9024 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 9025 vsi->seid, vsi->uplink_seid); 9026 return -ENODEV; 9027 } 9028 if (vsi == pf->vsi[pf->lan_vsi] && 9029 !test_bit(__I40E_DOWN, &pf->state)) { 9030 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 9031 return -ENODEV; 9032 } 9033 9034 uplink_seid = vsi->uplink_seid; 9035 if (vsi->type != I40E_VSI_SRIOV) { 9036 if (vsi->netdev_registered) { 9037 vsi->netdev_registered = false; 9038 if (vsi->netdev) { 9039 /* results in a call to i40e_close() */ 9040 unregister_netdev(vsi->netdev); 9041 } 9042 } else { 9043 i40e_vsi_close(vsi); 9044 } 9045 i40e_vsi_disable_irq(vsi); 9046 } 9047 9048 spin_lock_bh(&vsi->mac_filter_list_lock); 9049 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) 9050 i40e_del_filter(vsi, f->macaddr, f->vlan, 9051 f->is_vf, f->is_netdev); 9052 spin_unlock_bh(&vsi->mac_filter_list_lock); 9053 9054 i40e_sync_vsi_filters(vsi, false); 9055 9056 i40e_vsi_delete(vsi); 9057 i40e_vsi_free_q_vectors(vsi); 9058 if (vsi->netdev) { 9059 free_netdev(vsi->netdev); 9060 vsi->netdev = NULL; 9061 } 9062 i40e_vsi_clear_rings(vsi); 9063 i40e_vsi_clear(vsi); 9064 9065 /* If this was the last thing on the VEB, except for the 9066 * controlling VSI, remove the VEB, which puts the controlling 9067 * VSI onto the next level down in the switch. 9068 * 9069 * Well, okay, there's one more exception here: don't remove 9070 * the orphan VEBs yet. We'll wait for an explicit remove request 9071 * from up the network stack. 9072 */ 9073 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { 9074 if (pf->vsi[i] && 9075 pf->vsi[i]->uplink_seid == uplink_seid && 9076 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 9077 n++; /* count the VSIs */ 9078 } 9079 } 9080 for (i = 0; i < I40E_MAX_VEB; i++) { 9081 if (!pf->veb[i]) 9082 continue; 9083 if (pf->veb[i]->uplink_seid == uplink_seid) 9084 n++; /* count the VEBs */ 9085 if (pf->veb[i]->seid == uplink_seid) 9086 veb = pf->veb[i]; 9087 } 9088 if (n == 0 && veb && veb->uplink_seid != 0) 9089 i40e_veb_release(veb); 9090 9091 return 0; 9092 } 9093 9094 /** 9095 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 9096 * @vsi: ptr to the VSI 9097 * 9098 * This should only be called after i40e_vsi_mem_alloc() which allocates the 9099 * corresponding SW VSI structure and initializes num_queue_pairs for the 9100 * newly allocated VSI. 9101 * 9102 * Returns 0 on success or negative on failure 9103 **/ 9104 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 9105 { 9106 int ret = -ENOENT; 9107 struct i40e_pf *pf = vsi->back; 9108 9109 if (vsi->q_vectors[0]) { 9110 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 9111 vsi->seid); 9112 return -EEXIST; 9113 } 9114 9115 if (vsi->base_vector) { 9116 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 9117 vsi->seid, vsi->base_vector); 9118 return -EEXIST; 9119 } 9120 9121 ret = i40e_vsi_alloc_q_vectors(vsi); 9122 if (ret) { 9123 dev_info(&pf->pdev->dev, 9124 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 9125 vsi->num_q_vectors, vsi->seid, ret); 9126 vsi->num_q_vectors = 0; 9127 goto vector_setup_out; 9128 } 9129 9130 /* In Legacy mode, we do not have to get any other vector since we 9131 * piggyback on the misc/ICR0 for queue interrupts. 9132 */ 9133 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 9134 return ret; 9135 if (vsi->num_q_vectors) 9136 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 9137 vsi->num_q_vectors, vsi->idx); 9138 if (vsi->base_vector < 0) { 9139 dev_info(&pf->pdev->dev, 9140 "failed to get tracking for %d vectors for VSI %d, err=%d\n", 9141 vsi->num_q_vectors, vsi->seid, vsi->base_vector); 9142 i40e_vsi_free_q_vectors(vsi); 9143 ret = -ENOENT; 9144 goto vector_setup_out; 9145 } 9146 9147 vector_setup_out: 9148 return ret; 9149 } 9150 9151 /** 9152 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI 9153 * @vsi: pointer to the vsi. 9154 * 9155 * This re-allocates a vsi's queue resources. 9156 * 9157 * Returns pointer to the successfully allocated and configured VSI sw struct 9158 * on success, otherwise returns NULL on failure. 9159 **/ 9160 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) 9161 { 9162 struct i40e_pf *pf = vsi->back; 9163 u8 enabled_tc; 9164 int ret; 9165 9166 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 9167 i40e_vsi_clear_rings(vsi); 9168 9169 i40e_vsi_free_arrays(vsi, false); 9170 i40e_set_num_rings_in_vsi(vsi); 9171 ret = i40e_vsi_alloc_arrays(vsi, false); 9172 if (ret) 9173 goto err_vsi; 9174 9175 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); 9176 if (ret < 0) { 9177 dev_info(&pf->pdev->dev, 9178 "failed to get tracking for %d queues for VSI %d err %d\n", 9179 vsi->alloc_queue_pairs, vsi->seid, ret); 9180 goto err_vsi; 9181 } 9182 vsi->base_queue = ret; 9183 9184 /* Update the FW view of the VSI. Force a reset of TC and queue 9185 * layout configurations. 9186 */ 9187 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 9188 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 9189 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 9190 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 9191 9192 /* assign it some queues */ 9193 ret = i40e_alloc_rings(vsi); 9194 if (ret) 9195 goto err_rings; 9196 9197 /* map all of the rings to the q_vectors */ 9198 i40e_vsi_map_rings_to_vectors(vsi); 9199 return vsi; 9200 9201 err_rings: 9202 i40e_vsi_free_q_vectors(vsi); 9203 if (vsi->netdev_registered) { 9204 vsi->netdev_registered = false; 9205 unregister_netdev(vsi->netdev); 9206 free_netdev(vsi->netdev); 9207 vsi->netdev = NULL; 9208 } 9209 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 9210 err_vsi: 9211 i40e_vsi_clear(vsi); 9212 return NULL; 9213 } 9214 9215 /** 9216 * i40e_vsi_setup - Set up a VSI by a given type 9217 * @pf: board private structure 9218 * @type: VSI type 9219 * @uplink_seid: the switch element to link to 9220 * @param1: usage depends upon VSI type. For VF types, indicates VF id 9221 * 9222 * This allocates the sw VSI structure and its queue resources, then add a VSI 9223 * to the identified VEB. 9224 * 9225 * Returns pointer to the successfully allocated and configure VSI sw struct on 9226 * success, otherwise returns NULL on failure. 9227 **/ 9228 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 9229 u16 uplink_seid, u32 param1) 9230 { 9231 struct i40e_vsi *vsi = NULL; 9232 struct i40e_veb *veb = NULL; 9233 int ret, i; 9234 int v_idx; 9235 9236 /* The requested uplink_seid must be either 9237 * - the PF's port seid 9238 * no VEB is needed because this is the PF 9239 * or this is a Flow Director special case VSI 9240 * - seid of an existing VEB 9241 * - seid of a VSI that owns an existing VEB 9242 * - seid of a VSI that doesn't own a VEB 9243 * a new VEB is created and the VSI becomes the owner 9244 * - seid of the PF VSI, which is what creates the first VEB 9245 * this is a special case of the previous 9246 * 9247 * Find which uplink_seid we were given and create a new VEB if needed 9248 */ 9249 for (i = 0; i < I40E_MAX_VEB; i++) { 9250 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 9251 veb = pf->veb[i]; 9252 break; 9253 } 9254 } 9255 9256 if (!veb && uplink_seid != pf->mac_seid) { 9257 9258 for (i = 0; i < pf->num_alloc_vsi; i++) { 9259 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 9260 vsi = pf->vsi[i]; 9261 break; 9262 } 9263 } 9264 if (!vsi) { 9265 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 9266 uplink_seid); 9267 return NULL; 9268 } 9269 9270 if (vsi->uplink_seid == pf->mac_seid) 9271 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 9272 vsi->tc_config.enabled_tc); 9273 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 9274 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 9275 vsi->tc_config.enabled_tc); 9276 if (veb) { 9277 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { 9278 dev_info(&vsi->back->pdev->dev, 9279 "New VSI creation error, uplink seid of LAN VSI expected.\n"); 9280 return NULL; 9281 } 9282 /* We come up by default in VEPA mode if SRIOV is not 9283 * already enabled, in which case we can't force VEPA 9284 * mode. 9285 */ 9286 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 9287 veb->bridge_mode = BRIDGE_MODE_VEPA; 9288 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 9289 } 9290 i40e_config_bridge_mode(veb); 9291 } 9292 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 9293 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 9294 veb = pf->veb[i]; 9295 } 9296 if (!veb) { 9297 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 9298 return NULL; 9299 } 9300 9301 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 9302 uplink_seid = veb->seid; 9303 } 9304 9305 /* get vsi sw struct */ 9306 v_idx = i40e_vsi_mem_alloc(pf, type); 9307 if (v_idx < 0) 9308 goto err_alloc; 9309 vsi = pf->vsi[v_idx]; 9310 if (!vsi) 9311 goto err_alloc; 9312 vsi->type = type; 9313 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 9314 9315 if (type == I40E_VSI_MAIN) 9316 pf->lan_vsi = v_idx; 9317 else if (type == I40E_VSI_SRIOV) 9318 vsi->vf_id = param1; 9319 /* assign it some queues */ 9320 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, 9321 vsi->idx); 9322 if (ret < 0) { 9323 dev_info(&pf->pdev->dev, 9324 "failed to get tracking for %d queues for VSI %d err=%d\n", 9325 vsi->alloc_queue_pairs, vsi->seid, ret); 9326 goto err_vsi; 9327 } 9328 vsi->base_queue = ret; 9329 9330 /* get a VSI from the hardware */ 9331 vsi->uplink_seid = uplink_seid; 9332 ret = i40e_add_vsi(vsi); 9333 if (ret) 9334 goto err_vsi; 9335 9336 switch (vsi->type) { 9337 /* setup the netdev if needed */ 9338 case I40E_VSI_MAIN: 9339 case I40E_VSI_VMDQ2: 9340 case I40E_VSI_FCOE: 9341 ret = i40e_config_netdev(vsi); 9342 if (ret) 9343 goto err_netdev; 9344 ret = register_netdev(vsi->netdev); 9345 if (ret) 9346 goto err_netdev; 9347 vsi->netdev_registered = true; 9348 netif_carrier_off(vsi->netdev); 9349 #ifdef CONFIG_I40E_DCB 9350 /* Setup DCB netlink interface */ 9351 i40e_dcbnl_setup(vsi); 9352 #endif /* CONFIG_I40E_DCB */ 9353 /* fall through */ 9354 9355 case I40E_VSI_FDIR: 9356 /* set up vectors and rings if needed */ 9357 ret = i40e_vsi_setup_vectors(vsi); 9358 if (ret) 9359 goto err_msix; 9360 9361 ret = i40e_alloc_rings(vsi); 9362 if (ret) 9363 goto err_rings; 9364 9365 /* map all of the rings to the q_vectors */ 9366 i40e_vsi_map_rings_to_vectors(vsi); 9367 9368 i40e_vsi_reset_stats(vsi); 9369 break; 9370 9371 default: 9372 /* no netdev or rings for the other VSI types */ 9373 break; 9374 } 9375 9376 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && 9377 (vsi->type == I40E_VSI_VMDQ2)) { 9378 ret = i40e_vsi_config_rss(vsi); 9379 } 9380 return vsi; 9381 9382 err_rings: 9383 i40e_vsi_free_q_vectors(vsi); 9384 err_msix: 9385 if (vsi->netdev_registered) { 9386 vsi->netdev_registered = false; 9387 unregister_netdev(vsi->netdev); 9388 free_netdev(vsi->netdev); 9389 vsi->netdev = NULL; 9390 } 9391 err_netdev: 9392 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 9393 err_vsi: 9394 i40e_vsi_clear(vsi); 9395 err_alloc: 9396 return NULL; 9397 } 9398 9399 /** 9400 * i40e_veb_get_bw_info - Query VEB BW information 9401 * @veb: the veb to query 9402 * 9403 * Query the Tx scheduler BW configuration data for given VEB 9404 **/ 9405 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 9406 { 9407 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 9408 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 9409 struct i40e_pf *pf = veb->pf; 9410 struct i40e_hw *hw = &pf->hw; 9411 u32 tc_bw_max; 9412 int ret = 0; 9413 int i; 9414 9415 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 9416 &bw_data, NULL); 9417 if (ret) { 9418 dev_info(&pf->pdev->dev, 9419 "query veb bw config failed, err %s aq_err %s\n", 9420 i40e_stat_str(&pf->hw, ret), 9421 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 9422 goto out; 9423 } 9424 9425 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 9426 &ets_data, NULL); 9427 if (ret) { 9428 dev_info(&pf->pdev->dev, 9429 "query veb bw ets config failed, err %s aq_err %s\n", 9430 i40e_stat_str(&pf->hw, ret), 9431 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 9432 goto out; 9433 } 9434 9435 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 9436 veb->bw_max_quanta = ets_data.tc_bw_max; 9437 veb->is_abs_credits = bw_data.absolute_credits_enable; 9438 veb->enabled_tc = ets_data.tc_valid_bits; 9439 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 9440 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 9441 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 9442 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 9443 veb->bw_tc_limit_credits[i] = 9444 le16_to_cpu(bw_data.tc_bw_limits[i]); 9445 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 9446 } 9447 9448 out: 9449 return ret; 9450 } 9451 9452 /** 9453 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 9454 * @pf: board private structure 9455 * 9456 * On error: returns error code (negative) 9457 * On success: returns vsi index in PF (positive) 9458 **/ 9459 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 9460 { 9461 int ret = -ENOENT; 9462 struct i40e_veb *veb; 9463 int i; 9464 9465 /* Need to protect the allocation of switch elements at the PF level */ 9466 mutex_lock(&pf->switch_mutex); 9467 9468 /* VEB list may be fragmented if VEB creation/destruction has 9469 * been happening. We can afford to do a quick scan to look 9470 * for any free slots in the list. 9471 * 9472 * find next empty veb slot, looping back around if necessary 9473 */ 9474 i = 0; 9475 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 9476 i++; 9477 if (i >= I40E_MAX_VEB) { 9478 ret = -ENOMEM; 9479 goto err_alloc_veb; /* out of VEB slots! */ 9480 } 9481 9482 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 9483 if (!veb) { 9484 ret = -ENOMEM; 9485 goto err_alloc_veb; 9486 } 9487 veb->pf = pf; 9488 veb->idx = i; 9489 veb->enabled_tc = 1; 9490 9491 pf->veb[i] = veb; 9492 ret = i; 9493 err_alloc_veb: 9494 mutex_unlock(&pf->switch_mutex); 9495 return ret; 9496 } 9497 9498 /** 9499 * i40e_switch_branch_release - Delete a branch of the switch tree 9500 * @branch: where to start deleting 9501 * 9502 * This uses recursion to find the tips of the branch to be 9503 * removed, deleting until we get back to and can delete this VEB. 9504 **/ 9505 static void i40e_switch_branch_release(struct i40e_veb *branch) 9506 { 9507 struct i40e_pf *pf = branch->pf; 9508 u16 branch_seid = branch->seid; 9509 u16 veb_idx = branch->idx; 9510 int i; 9511 9512 /* release any VEBs on this VEB - RECURSION */ 9513 for (i = 0; i < I40E_MAX_VEB; i++) { 9514 if (!pf->veb[i]) 9515 continue; 9516 if (pf->veb[i]->uplink_seid == branch->seid) 9517 i40e_switch_branch_release(pf->veb[i]); 9518 } 9519 9520 /* Release the VSIs on this VEB, but not the owner VSI. 9521 * 9522 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 9523 * the VEB itself, so don't use (*branch) after this loop. 9524 */ 9525 for (i = 0; i < pf->num_alloc_vsi; i++) { 9526 if (!pf->vsi[i]) 9527 continue; 9528 if (pf->vsi[i]->uplink_seid == branch_seid && 9529 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 9530 i40e_vsi_release(pf->vsi[i]); 9531 } 9532 } 9533 9534 /* There's one corner case where the VEB might not have been 9535 * removed, so double check it here and remove it if needed. 9536 * This case happens if the veb was created from the debugfs 9537 * commands and no VSIs were added to it. 9538 */ 9539 if (pf->veb[veb_idx]) 9540 i40e_veb_release(pf->veb[veb_idx]); 9541 } 9542 9543 /** 9544 * i40e_veb_clear - remove veb struct 9545 * @veb: the veb to remove 9546 **/ 9547 static void i40e_veb_clear(struct i40e_veb *veb) 9548 { 9549 if (!veb) 9550 return; 9551 9552 if (veb->pf) { 9553 struct i40e_pf *pf = veb->pf; 9554 9555 mutex_lock(&pf->switch_mutex); 9556 if (pf->veb[veb->idx] == veb) 9557 pf->veb[veb->idx] = NULL; 9558 mutex_unlock(&pf->switch_mutex); 9559 } 9560 9561 kfree(veb); 9562 } 9563 9564 /** 9565 * i40e_veb_release - Delete a VEB and free its resources 9566 * @veb: the VEB being removed 9567 **/ 9568 void i40e_veb_release(struct i40e_veb *veb) 9569 { 9570 struct i40e_vsi *vsi = NULL; 9571 struct i40e_pf *pf; 9572 int i, n = 0; 9573 9574 pf = veb->pf; 9575 9576 /* find the remaining VSI and check for extras */ 9577 for (i = 0; i < pf->num_alloc_vsi; i++) { 9578 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 9579 n++; 9580 vsi = pf->vsi[i]; 9581 } 9582 } 9583 if (n != 1) { 9584 dev_info(&pf->pdev->dev, 9585 "can't remove VEB %d with %d VSIs left\n", 9586 veb->seid, n); 9587 return; 9588 } 9589 9590 /* move the remaining VSI to uplink veb */ 9591 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 9592 if (veb->uplink_seid) { 9593 vsi->uplink_seid = veb->uplink_seid; 9594 if (veb->uplink_seid == pf->mac_seid) 9595 vsi->veb_idx = I40E_NO_VEB; 9596 else 9597 vsi->veb_idx = veb->veb_idx; 9598 } else { 9599 /* floating VEB */ 9600 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 9601 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 9602 } 9603 9604 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 9605 i40e_veb_clear(veb); 9606 } 9607 9608 /** 9609 * i40e_add_veb - create the VEB in the switch 9610 * @veb: the VEB to be instantiated 9611 * @vsi: the controlling VSI 9612 **/ 9613 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 9614 { 9615 struct i40e_pf *pf = veb->pf; 9616 bool is_default = veb->pf->cur_promisc; 9617 bool is_cloud = false; 9618 int ret; 9619 9620 /* get a VEB from the hardware */ 9621 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, 9622 veb->enabled_tc, is_default, 9623 is_cloud, &veb->seid, NULL); 9624 if (ret) { 9625 dev_info(&pf->pdev->dev, 9626 "couldn't add VEB, err %s aq_err %s\n", 9627 i40e_stat_str(&pf->hw, ret), 9628 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9629 return -EPERM; 9630 } 9631 9632 /* get statistics counter */ 9633 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, 9634 &veb->stats_idx, NULL, NULL, NULL); 9635 if (ret) { 9636 dev_info(&pf->pdev->dev, 9637 "couldn't get VEB statistics idx, err %s aq_err %s\n", 9638 i40e_stat_str(&pf->hw, ret), 9639 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9640 return -EPERM; 9641 } 9642 ret = i40e_veb_get_bw_info(veb); 9643 if (ret) { 9644 dev_info(&pf->pdev->dev, 9645 "couldn't get VEB bw info, err %s aq_err %s\n", 9646 i40e_stat_str(&pf->hw, ret), 9647 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9648 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 9649 return -ENOENT; 9650 } 9651 9652 vsi->uplink_seid = veb->seid; 9653 vsi->veb_idx = veb->idx; 9654 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 9655 9656 return 0; 9657 } 9658 9659 /** 9660 * i40e_veb_setup - Set up a VEB 9661 * @pf: board private structure 9662 * @flags: VEB setup flags 9663 * @uplink_seid: the switch element to link to 9664 * @vsi_seid: the initial VSI seid 9665 * @enabled_tc: Enabled TC bit-map 9666 * 9667 * This allocates the sw VEB structure and links it into the switch 9668 * It is possible and legal for this to be a duplicate of an already 9669 * existing VEB. It is also possible for both uplink and vsi seids 9670 * to be zero, in order to create a floating VEB. 9671 * 9672 * Returns pointer to the successfully allocated VEB sw struct on 9673 * success, otherwise returns NULL on failure. 9674 **/ 9675 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 9676 u16 uplink_seid, u16 vsi_seid, 9677 u8 enabled_tc) 9678 { 9679 struct i40e_veb *veb, *uplink_veb = NULL; 9680 int vsi_idx, veb_idx; 9681 int ret; 9682 9683 /* if one seid is 0, the other must be 0 to create a floating relay */ 9684 if ((uplink_seid == 0 || vsi_seid == 0) && 9685 (uplink_seid + vsi_seid != 0)) { 9686 dev_info(&pf->pdev->dev, 9687 "one, not both seid's are 0: uplink=%d vsi=%d\n", 9688 uplink_seid, vsi_seid); 9689 return NULL; 9690 } 9691 9692 /* make sure there is such a vsi and uplink */ 9693 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) 9694 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 9695 break; 9696 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { 9697 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 9698 vsi_seid); 9699 return NULL; 9700 } 9701 9702 if (uplink_seid && uplink_seid != pf->mac_seid) { 9703 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 9704 if (pf->veb[veb_idx] && 9705 pf->veb[veb_idx]->seid == uplink_seid) { 9706 uplink_veb = pf->veb[veb_idx]; 9707 break; 9708 } 9709 } 9710 if (!uplink_veb) { 9711 dev_info(&pf->pdev->dev, 9712 "uplink seid %d not found\n", uplink_seid); 9713 return NULL; 9714 } 9715 } 9716 9717 /* get veb sw struct */ 9718 veb_idx = i40e_veb_mem_alloc(pf); 9719 if (veb_idx < 0) 9720 goto err_alloc; 9721 veb = pf->veb[veb_idx]; 9722 veb->flags = flags; 9723 veb->uplink_seid = uplink_seid; 9724 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 9725 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 9726 9727 /* create the VEB in the switch */ 9728 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 9729 if (ret) 9730 goto err_veb; 9731 if (vsi_idx == pf->lan_vsi) 9732 pf->lan_veb = veb->idx; 9733 9734 return veb; 9735 9736 err_veb: 9737 i40e_veb_clear(veb); 9738 err_alloc: 9739 return NULL; 9740 } 9741 9742 /** 9743 * i40e_setup_pf_switch_element - set PF vars based on switch type 9744 * @pf: board private structure 9745 * @ele: element we are building info from 9746 * @num_reported: total number of elements 9747 * @printconfig: should we print the contents 9748 * 9749 * helper function to assist in extracting a few useful SEID values. 9750 **/ 9751 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 9752 struct i40e_aqc_switch_config_element_resp *ele, 9753 u16 num_reported, bool printconfig) 9754 { 9755 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 9756 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 9757 u8 element_type = ele->element_type; 9758 u16 seid = le16_to_cpu(ele->seid); 9759 9760 if (printconfig) 9761 dev_info(&pf->pdev->dev, 9762 "type=%d seid=%d uplink=%d downlink=%d\n", 9763 element_type, seid, uplink_seid, downlink_seid); 9764 9765 switch (element_type) { 9766 case I40E_SWITCH_ELEMENT_TYPE_MAC: 9767 pf->mac_seid = seid; 9768 break; 9769 case I40E_SWITCH_ELEMENT_TYPE_VEB: 9770 /* Main VEB? */ 9771 if (uplink_seid != pf->mac_seid) 9772 break; 9773 if (pf->lan_veb == I40E_NO_VEB) { 9774 int v; 9775 9776 /* find existing or else empty VEB */ 9777 for (v = 0; v < I40E_MAX_VEB; v++) { 9778 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 9779 pf->lan_veb = v; 9780 break; 9781 } 9782 } 9783 if (pf->lan_veb == I40E_NO_VEB) { 9784 v = i40e_veb_mem_alloc(pf); 9785 if (v < 0) 9786 break; 9787 pf->lan_veb = v; 9788 } 9789 } 9790 9791 pf->veb[pf->lan_veb]->seid = seid; 9792 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 9793 pf->veb[pf->lan_veb]->pf = pf; 9794 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 9795 break; 9796 case I40E_SWITCH_ELEMENT_TYPE_VSI: 9797 if (num_reported != 1) 9798 break; 9799 /* This is immediately after a reset so we can assume this is 9800 * the PF's VSI 9801 */ 9802 pf->mac_seid = uplink_seid; 9803 pf->pf_seid = downlink_seid; 9804 pf->main_vsi_seid = seid; 9805 if (printconfig) 9806 dev_info(&pf->pdev->dev, 9807 "pf_seid=%d main_vsi_seid=%d\n", 9808 pf->pf_seid, pf->main_vsi_seid); 9809 break; 9810 case I40E_SWITCH_ELEMENT_TYPE_PF: 9811 case I40E_SWITCH_ELEMENT_TYPE_VF: 9812 case I40E_SWITCH_ELEMENT_TYPE_EMP: 9813 case I40E_SWITCH_ELEMENT_TYPE_BMC: 9814 case I40E_SWITCH_ELEMENT_TYPE_PE: 9815 case I40E_SWITCH_ELEMENT_TYPE_PA: 9816 /* ignore these for now */ 9817 break; 9818 default: 9819 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 9820 element_type, seid); 9821 break; 9822 } 9823 } 9824 9825 /** 9826 * i40e_fetch_switch_configuration - Get switch config from firmware 9827 * @pf: board private structure 9828 * @printconfig: should we print the contents 9829 * 9830 * Get the current switch configuration from the device and 9831 * extract a few useful SEID values. 9832 **/ 9833 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 9834 { 9835 struct i40e_aqc_get_switch_config_resp *sw_config; 9836 u16 next_seid = 0; 9837 int ret = 0; 9838 u8 *aq_buf; 9839 int i; 9840 9841 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 9842 if (!aq_buf) 9843 return -ENOMEM; 9844 9845 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 9846 do { 9847 u16 num_reported, num_total; 9848 9849 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 9850 I40E_AQ_LARGE_BUF, 9851 &next_seid, NULL); 9852 if (ret) { 9853 dev_info(&pf->pdev->dev, 9854 "get switch config failed err %s aq_err %s\n", 9855 i40e_stat_str(&pf->hw, ret), 9856 i40e_aq_str(&pf->hw, 9857 pf->hw.aq.asq_last_status)); 9858 kfree(aq_buf); 9859 return -ENOENT; 9860 } 9861 9862 num_reported = le16_to_cpu(sw_config->header.num_reported); 9863 num_total = le16_to_cpu(sw_config->header.num_total); 9864 9865 if (printconfig) 9866 dev_info(&pf->pdev->dev, 9867 "header: %d reported %d total\n", 9868 num_reported, num_total); 9869 9870 for (i = 0; i < num_reported; i++) { 9871 struct i40e_aqc_switch_config_element_resp *ele = 9872 &sw_config->element[i]; 9873 9874 i40e_setup_pf_switch_element(pf, ele, num_reported, 9875 printconfig); 9876 } 9877 } while (next_seid != 0); 9878 9879 kfree(aq_buf); 9880 return ret; 9881 } 9882 9883 /** 9884 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 9885 * @pf: board private structure 9886 * @reinit: if the Main VSI needs to re-initialized. 9887 * 9888 * Returns 0 on success, negative value on failure 9889 **/ 9890 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 9891 { 9892 int ret; 9893 9894 /* find out what's out there already */ 9895 ret = i40e_fetch_switch_configuration(pf, false); 9896 if (ret) { 9897 dev_info(&pf->pdev->dev, 9898 "couldn't fetch switch config, err %s aq_err %s\n", 9899 i40e_stat_str(&pf->hw, ret), 9900 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9901 return ret; 9902 } 9903 i40e_pf_reset_stats(pf); 9904 9905 /* first time setup */ 9906 if (pf->lan_vsi == I40E_NO_VSI || reinit) { 9907 struct i40e_vsi *vsi = NULL; 9908 u16 uplink_seid; 9909 9910 /* Set up the PF VSI associated with the PF's main VSI 9911 * that is already in the HW switch 9912 */ 9913 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 9914 uplink_seid = pf->veb[pf->lan_veb]->seid; 9915 else 9916 uplink_seid = pf->mac_seid; 9917 if (pf->lan_vsi == I40E_NO_VSI) 9918 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 9919 else if (reinit) 9920 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); 9921 if (!vsi) { 9922 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 9923 i40e_fdir_teardown(pf); 9924 return -EAGAIN; 9925 } 9926 } else { 9927 /* force a reset of TC and queue layout configurations */ 9928 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 9929 9930 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 9931 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 9932 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 9933 } 9934 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 9935 9936 i40e_fdir_sb_setup(pf); 9937 9938 /* Setup static PF queue filter control settings */ 9939 ret = i40e_setup_pf_filter_control(pf); 9940 if (ret) { 9941 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 9942 ret); 9943 /* Failure here should not stop continuing other steps */ 9944 } 9945 9946 /* enable RSS in the HW, even for only one queue, as the stack can use 9947 * the hash 9948 */ 9949 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 9950 i40e_config_rss(pf); 9951 9952 /* fill in link information and enable LSE reporting */ 9953 i40e_update_link_info(&pf->hw); 9954 i40e_link_event(pf); 9955 9956 /* Initialize user-specific link properties */ 9957 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 9958 I40E_AQ_AN_COMPLETED) ? true : false); 9959 9960 i40e_ptp_init(pf); 9961 9962 return ret; 9963 } 9964 9965 /** 9966 * i40e_determine_queue_usage - Work out queue distribution 9967 * @pf: board private structure 9968 **/ 9969 static void i40e_determine_queue_usage(struct i40e_pf *pf) 9970 { 9971 int queues_left; 9972 9973 pf->num_lan_qps = 0; 9974 #ifdef I40E_FCOE 9975 pf->num_fcoe_qps = 0; 9976 #endif 9977 9978 /* Find the max queues to be put into basic use. We'll always be 9979 * using TC0, whether or not DCB is running, and TC0 will get the 9980 * big RSS set. 9981 */ 9982 queues_left = pf->hw.func_caps.num_tx_qp; 9983 9984 if ((queues_left == 1) || 9985 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 9986 /* one qp for PF, no queues for anything else */ 9987 queues_left = 0; 9988 pf->rss_size = pf->num_lan_qps = 1; 9989 9990 /* make sure all the fancies are disabled */ 9991 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 9992 #ifdef I40E_FCOE 9993 I40E_FLAG_FCOE_ENABLED | 9994 #endif 9995 I40E_FLAG_FD_SB_ENABLED | 9996 I40E_FLAG_FD_ATR_ENABLED | 9997 I40E_FLAG_DCB_CAPABLE | 9998 I40E_FLAG_SRIOV_ENABLED | 9999 I40E_FLAG_VMDQ_ENABLED); 10000 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | 10001 I40E_FLAG_FD_SB_ENABLED | 10002 I40E_FLAG_FD_ATR_ENABLED | 10003 I40E_FLAG_DCB_CAPABLE))) { 10004 /* one qp for PF */ 10005 pf->rss_size = pf->num_lan_qps = 1; 10006 queues_left -= pf->num_lan_qps; 10007 10008 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 10009 #ifdef I40E_FCOE 10010 I40E_FLAG_FCOE_ENABLED | 10011 #endif 10012 I40E_FLAG_FD_SB_ENABLED | 10013 I40E_FLAG_FD_ATR_ENABLED | 10014 I40E_FLAG_DCB_ENABLED | 10015 I40E_FLAG_VMDQ_ENABLED); 10016 } else { 10017 /* Not enough queues for all TCs */ 10018 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && 10019 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 10020 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10021 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 10022 } 10023 pf->num_lan_qps = max_t(int, pf->rss_size_max, 10024 num_online_cpus()); 10025 pf->num_lan_qps = min_t(int, pf->num_lan_qps, 10026 pf->hw.func_caps.num_tx_qp); 10027 10028 queues_left -= pf->num_lan_qps; 10029 } 10030 10031 #ifdef I40E_FCOE 10032 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 10033 if (I40E_DEFAULT_FCOE <= queues_left) { 10034 pf->num_fcoe_qps = I40E_DEFAULT_FCOE; 10035 } else if (I40E_MINIMUM_FCOE <= queues_left) { 10036 pf->num_fcoe_qps = I40E_MINIMUM_FCOE; 10037 } else { 10038 pf->num_fcoe_qps = 0; 10039 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 10040 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); 10041 } 10042 10043 queues_left -= pf->num_fcoe_qps; 10044 } 10045 10046 #endif 10047 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 10048 if (queues_left > 1) { 10049 queues_left -= 1; /* save 1 queue for FD */ 10050 } else { 10051 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 10052 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); 10053 } 10054 } 10055 10056 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 10057 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 10058 pf->num_req_vfs = min_t(int, pf->num_req_vfs, 10059 (queues_left / pf->num_vf_qps)); 10060 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 10061 } 10062 10063 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 10064 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 10065 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 10066 (queues_left / pf->num_vmdq_qps)); 10067 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 10068 } 10069 10070 pf->queues_left = queues_left; 10071 dev_dbg(&pf->pdev->dev, 10072 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", 10073 pf->hw.func_caps.num_tx_qp, 10074 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), 10075 pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps, 10076 pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); 10077 #ifdef I40E_FCOE 10078 dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); 10079 #endif 10080 } 10081 10082 /** 10083 * i40e_setup_pf_filter_control - Setup PF static filter control 10084 * @pf: PF to be setup 10085 * 10086 * i40e_setup_pf_filter_control sets up a PF's initial filter control 10087 * settings. If PE/FCoE are enabled then it will also set the per PF 10088 * based filter sizes required for them. It also enables Flow director, 10089 * ethertype and macvlan type filter settings for the pf. 10090 * 10091 * Returns 0 on success, negative on failure 10092 **/ 10093 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 10094 { 10095 struct i40e_filter_control_settings *settings = &pf->filter_settings; 10096 10097 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 10098 10099 /* Flow Director is enabled */ 10100 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) 10101 settings->enable_fdir = true; 10102 10103 /* Ethtype and MACVLAN filters enabled for PF */ 10104 settings->enable_ethtype = true; 10105 settings->enable_macvlan = true; 10106 10107 if (i40e_set_filter_control(&pf->hw, settings)) 10108 return -ENOENT; 10109 10110 return 0; 10111 } 10112 10113 #define INFO_STRING_LEN 255 10114 static void i40e_print_features(struct i40e_pf *pf) 10115 { 10116 struct i40e_hw *hw = &pf->hw; 10117 char *buf, *string; 10118 10119 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); 10120 if (!string) { 10121 dev_err(&pf->pdev->dev, "Features string allocation failed\n"); 10122 return; 10123 } 10124 10125 buf = string; 10126 10127 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); 10128 #ifdef CONFIG_PCI_IOV 10129 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); 10130 #endif 10131 buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", 10132 pf->hw.func_caps.num_vsis, 10133 pf->vsi[pf->lan_vsi]->num_queue_pairs, 10134 pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); 10135 10136 if (pf->flags & I40E_FLAG_RSS_ENABLED) 10137 buf += sprintf(buf, "RSS "); 10138 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 10139 buf += sprintf(buf, "FD_ATR "); 10140 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 10141 buf += sprintf(buf, "FD_SB "); 10142 buf += sprintf(buf, "NTUPLE "); 10143 } 10144 if (pf->flags & I40E_FLAG_DCB_CAPABLE) 10145 buf += sprintf(buf, "DCB "); 10146 #if IS_ENABLED(CONFIG_VXLAN) 10147 buf += sprintf(buf, "VxLAN "); 10148 #endif 10149 if (pf->flags & I40E_FLAG_PTP) 10150 buf += sprintf(buf, "PTP "); 10151 #ifdef I40E_FCOE 10152 if (pf->flags & I40E_FLAG_FCOE_ENABLED) 10153 buf += sprintf(buf, "FCOE "); 10154 #endif 10155 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 10156 buf += sprintf(buf, "VEB "); 10157 else 10158 buf += sprintf(buf, "VEPA "); 10159 10160 BUG_ON(buf > (string + INFO_STRING_LEN)); 10161 dev_info(&pf->pdev->dev, "%s\n", string); 10162 kfree(string); 10163 } 10164 10165 /** 10166 * i40e_probe - Device initialization routine 10167 * @pdev: PCI device information struct 10168 * @ent: entry in i40e_pci_tbl 10169 * 10170 * i40e_probe initializes a PF identified by a pci_dev structure. 10171 * The OS initialization, configuring of the PF private structure, 10172 * and a hardware reset occur. 10173 * 10174 * Returns 0 on success, negative on failure 10175 **/ 10176 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 10177 { 10178 struct i40e_aq_get_phy_abilities_resp abilities; 10179 struct i40e_pf *pf; 10180 struct i40e_hw *hw; 10181 static u16 pfs_found; 10182 u16 wol_nvm_bits; 10183 u16 link_status; 10184 int err; 10185 u32 len; 10186 u32 i; 10187 u8 set_fc_aq_fail; 10188 10189 err = pci_enable_device_mem(pdev); 10190 if (err) 10191 return err; 10192 10193 /* set up for high or low dma */ 10194 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10195 if (err) { 10196 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10197 if (err) { 10198 dev_err(&pdev->dev, 10199 "DMA configuration failed: 0x%x\n", err); 10200 goto err_dma; 10201 } 10202 } 10203 10204 /* set up pci connections */ 10205 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 10206 IORESOURCE_MEM), i40e_driver_name); 10207 if (err) { 10208 dev_info(&pdev->dev, 10209 "pci_request_selected_regions failed %d\n", err); 10210 goto err_pci_reg; 10211 } 10212 10213 pci_enable_pcie_error_reporting(pdev); 10214 pci_set_master(pdev); 10215 10216 /* Now that we have a PCI connection, we need to do the 10217 * low level device setup. This is primarily setting up 10218 * the Admin Queue structures and then querying for the 10219 * device's current profile information. 10220 */ 10221 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 10222 if (!pf) { 10223 err = -ENOMEM; 10224 goto err_pf_alloc; 10225 } 10226 pf->next_vsi = 0; 10227 pf->pdev = pdev; 10228 set_bit(__I40E_DOWN, &pf->state); 10229 10230 hw = &pf->hw; 10231 hw->back = pf; 10232 10233 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), 10234 I40E_MAX_CSR_SPACE); 10235 10236 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); 10237 if (!hw->hw_addr) { 10238 err = -EIO; 10239 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 10240 (unsigned int)pci_resource_start(pdev, 0), 10241 pf->ioremap_len, err); 10242 goto err_ioremap; 10243 } 10244 hw->vendor_id = pdev->vendor; 10245 hw->device_id = pdev->device; 10246 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 10247 hw->subsystem_vendor_id = pdev->subsystem_vendor; 10248 hw->subsystem_device_id = pdev->subsystem_device; 10249 hw->bus.device = PCI_SLOT(pdev->devfn); 10250 hw->bus.func = PCI_FUNC(pdev->devfn); 10251 pf->instance = pfs_found; 10252 10253 if (debug != -1) { 10254 pf->msg_enable = pf->hw.debug_mask; 10255 pf->msg_enable = debug; 10256 } 10257 10258 /* do a special CORER for clearing PXE mode once at init */ 10259 if (hw->revision_id == 0 && 10260 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { 10261 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 10262 i40e_flush(hw); 10263 msleep(200); 10264 pf->corer_count++; 10265 10266 i40e_clear_pxe_mode(hw); 10267 } 10268 10269 /* Reset here to make sure all is clean and to define PF 'n' */ 10270 i40e_clear_hw(hw); 10271 err = i40e_pf_reset(hw); 10272 if (err) { 10273 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 10274 goto err_pf_reset; 10275 } 10276 pf->pfr_count++; 10277 10278 hw->aq.num_arq_entries = I40E_AQ_LEN; 10279 hw->aq.num_asq_entries = I40E_AQ_LEN; 10280 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 10281 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 10282 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 10283 10284 snprintf(pf->int_name, sizeof(pf->int_name) - 1, 10285 "%s-%s:misc", 10286 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); 10287 10288 err = i40e_init_shared_code(hw); 10289 if (err) { 10290 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", 10291 err); 10292 goto err_pf_reset; 10293 } 10294 10295 /* set up a default setting for link flow control */ 10296 pf->hw.fc.requested_mode = I40E_FC_NONE; 10297 10298 /* set up the locks for the AQ, do this only once in probe 10299 * and destroy them only once in remove 10300 */ 10301 mutex_init(&hw->aq.asq_mutex); 10302 mutex_init(&hw->aq.arq_mutex); 10303 10304 err = i40e_init_adminq(hw); 10305 10306 /* provide nvm, fw, api versions */ 10307 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", 10308 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 10309 hw->aq.api_maj_ver, hw->aq.api_min_ver, 10310 i40e_nvm_version_str(hw)); 10311 10312 if (err) { 10313 dev_info(&pdev->dev, 10314 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 10315 goto err_pf_reset; 10316 } 10317 10318 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 10319 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) 10320 dev_info(&pdev->dev, 10321 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 10322 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 10323 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) 10324 dev_info(&pdev->dev, 10325 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 10326 10327 i40e_verify_eeprom(pf); 10328 10329 /* Rev 0 hardware was never productized */ 10330 if (hw->revision_id < 1) 10331 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 10332 10333 i40e_clear_pxe_mode(hw); 10334 err = i40e_get_capabilities(pf); 10335 if (err) 10336 goto err_adminq_setup; 10337 10338 err = i40e_sw_init(pf); 10339 if (err) { 10340 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 10341 goto err_sw_init; 10342 } 10343 10344 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 10345 hw->func_caps.num_rx_qp, 10346 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 10347 if (err) { 10348 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 10349 goto err_init_lan_hmc; 10350 } 10351 10352 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 10353 if (err) { 10354 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 10355 err = -ENOENT; 10356 goto err_configure_lan_hmc; 10357 } 10358 10359 /* Disable LLDP for NICs that have firmware versions lower than v4.3. 10360 * Ignore error return codes because if it was already disabled via 10361 * hardware settings this will fail 10362 */ 10363 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 10364 (pf->hw.aq.fw_maj_ver < 4)) { 10365 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); 10366 i40e_aq_stop_lldp(hw, true, NULL); 10367 } 10368 10369 i40e_get_mac_addr(hw, hw->mac.addr); 10370 if (!is_valid_ether_addr(hw->mac.addr)) { 10371 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 10372 err = -EIO; 10373 goto err_mac_addr; 10374 } 10375 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 10376 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); 10377 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 10378 if (is_valid_ether_addr(hw->mac.port_addr)) 10379 pf->flags |= I40E_FLAG_PORT_ID_VALID; 10380 #ifdef I40E_FCOE 10381 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); 10382 if (err) 10383 dev_info(&pdev->dev, 10384 "(non-fatal) SAN MAC retrieval failed: %d\n", err); 10385 if (!is_valid_ether_addr(hw->mac.san_addr)) { 10386 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", 10387 hw->mac.san_addr); 10388 ether_addr_copy(hw->mac.san_addr, hw->mac.addr); 10389 } 10390 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); 10391 #endif /* I40E_FCOE */ 10392 10393 pci_set_drvdata(pdev, pf); 10394 pci_save_state(pdev); 10395 #ifdef CONFIG_I40E_DCB 10396 err = i40e_init_pf_dcb(pf); 10397 if (err) { 10398 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); 10399 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10400 /* Continue without DCB enabled */ 10401 } 10402 #endif /* CONFIG_I40E_DCB */ 10403 10404 /* set up periodic task facility */ 10405 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); 10406 pf->service_timer_period = HZ; 10407 10408 INIT_WORK(&pf->service_task, i40e_service_task); 10409 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 10410 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; 10411 10412 /* NVM bit on means WoL disabled for the port */ 10413 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 10414 if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) 10415 pf->wol_en = false; 10416 else 10417 pf->wol_en = true; 10418 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 10419 10420 /* set up the main switch operations */ 10421 i40e_determine_queue_usage(pf); 10422 err = i40e_init_interrupt_scheme(pf); 10423 if (err) 10424 goto err_switch_setup; 10425 10426 /* The number of VSIs reported by the FW is the minimum guaranteed 10427 * to us; HW supports far more and we share the remaining pool with 10428 * the other PFs. We allocate space for more than the guarantee with 10429 * the understanding that we might not get them all later. 10430 */ 10431 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) 10432 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; 10433 else 10434 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; 10435 10436 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ 10437 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; 10438 pf->vsi = kzalloc(len, GFP_KERNEL); 10439 if (!pf->vsi) { 10440 err = -ENOMEM; 10441 goto err_switch_setup; 10442 } 10443 10444 #ifdef CONFIG_PCI_IOV 10445 /* prep for VF support */ 10446 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 10447 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 10448 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 10449 if (pci_num_vf(pdev)) 10450 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 10451 } 10452 #endif 10453 err = i40e_setup_pf_switch(pf, false); 10454 if (err) { 10455 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 10456 goto err_vsis; 10457 } 10458 10459 /* Make sure flow control is set according to current settings */ 10460 err = i40e_set_fc(hw, &set_fc_aq_fail, true); 10461 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) 10462 dev_dbg(&pf->pdev->dev, 10463 "Set fc with err %s aq_err %s on get_phy_cap\n", 10464 i40e_stat_str(hw, err), 10465 i40e_aq_str(hw, hw->aq.asq_last_status)); 10466 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) 10467 dev_dbg(&pf->pdev->dev, 10468 "Set fc with err %s aq_err %s on set_phy_config\n", 10469 i40e_stat_str(hw, err), 10470 i40e_aq_str(hw, hw->aq.asq_last_status)); 10471 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) 10472 dev_dbg(&pf->pdev->dev, 10473 "Set fc with err %s aq_err %s on get_link_info\n", 10474 i40e_stat_str(hw, err), 10475 i40e_aq_str(hw, hw->aq.asq_last_status)); 10476 10477 /* if FDIR VSI was set up, start it now */ 10478 for (i = 0; i < pf->num_alloc_vsi; i++) { 10479 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 10480 i40e_vsi_open(pf->vsi[i]); 10481 break; 10482 } 10483 } 10484 10485 /* driver is only interested in link up/down and module qualification 10486 * reports from firmware 10487 */ 10488 err = i40e_aq_set_phy_int_mask(&pf->hw, 10489 I40E_AQ_EVENT_LINK_UPDOWN | 10490 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); 10491 if (err) 10492 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 10493 i40e_stat_str(&pf->hw, err), 10494 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10495 10496 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 10497 (pf->hw.aq.fw_maj_ver < 4)) { 10498 msleep(75); 10499 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 10500 if (err) 10501 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 10502 i40e_stat_str(&pf->hw, err), 10503 i40e_aq_str(&pf->hw, 10504 pf->hw.aq.asq_last_status)); 10505 } 10506 /* The main driver is (mostly) up and happy. We need to set this state 10507 * before setting up the misc vector or we get a race and the vector 10508 * ends up disabled forever. 10509 */ 10510 clear_bit(__I40E_DOWN, &pf->state); 10511 10512 /* In case of MSIX we are going to setup the misc vector right here 10513 * to handle admin queue events etc. In case of legacy and MSI 10514 * the misc functionality and queue processing is combined in 10515 * the same vector and that gets setup at open. 10516 */ 10517 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 10518 err = i40e_setup_misc_vector(pf); 10519 if (err) { 10520 dev_info(&pdev->dev, 10521 "setup of misc vector failed: %d\n", err); 10522 goto err_vsis; 10523 } 10524 } 10525 10526 #ifdef CONFIG_PCI_IOV 10527 /* prep for VF support */ 10528 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 10529 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 10530 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 10531 u32 val; 10532 10533 /* disable link interrupts for VFs */ 10534 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 10535 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 10536 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 10537 i40e_flush(hw); 10538 10539 if (pci_num_vf(pdev)) { 10540 dev_info(&pdev->dev, 10541 "Active VFs found, allocating resources.\n"); 10542 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); 10543 if (err) 10544 dev_info(&pdev->dev, 10545 "Error %d allocating resources for existing VFs\n", 10546 err); 10547 } 10548 } 10549 #endif /* CONFIG_PCI_IOV */ 10550 10551 pfs_found++; 10552 10553 i40e_dbg_pf_init(pf); 10554 10555 /* tell the firmware that we're starting */ 10556 i40e_send_version(pf); 10557 10558 /* since everything's happy, start the service_task timer */ 10559 mod_timer(&pf->service_timer, 10560 round_jiffies(jiffies + pf->service_timer_period)); 10561 10562 #ifdef I40E_FCOE 10563 /* create FCoE interface */ 10564 i40e_fcoe_vsi_setup(pf); 10565 10566 #endif 10567 #define PCI_SPEED_SIZE 8 10568 #define PCI_WIDTH_SIZE 8 10569 /* Devices on the IOSF bus do not have this information 10570 * and will report PCI Gen 1 x 1 by default so don't bother 10571 * checking them. 10572 */ 10573 if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) { 10574 char speed[PCI_SPEED_SIZE] = "Unknown"; 10575 char width[PCI_WIDTH_SIZE] = "Unknown"; 10576 10577 /* Get the negotiated link width and speed from PCI config 10578 * space 10579 */ 10580 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, 10581 &link_status); 10582 10583 i40e_set_pci_config_data(hw, link_status); 10584 10585 switch (hw->bus.speed) { 10586 case i40e_bus_speed_8000: 10587 strncpy(speed, "8.0", PCI_SPEED_SIZE); break; 10588 case i40e_bus_speed_5000: 10589 strncpy(speed, "5.0", PCI_SPEED_SIZE); break; 10590 case i40e_bus_speed_2500: 10591 strncpy(speed, "2.5", PCI_SPEED_SIZE); break; 10592 default: 10593 break; 10594 } 10595 switch (hw->bus.width) { 10596 case i40e_bus_width_pcie_x8: 10597 strncpy(width, "8", PCI_WIDTH_SIZE); break; 10598 case i40e_bus_width_pcie_x4: 10599 strncpy(width, "4", PCI_WIDTH_SIZE); break; 10600 case i40e_bus_width_pcie_x2: 10601 strncpy(width, "2", PCI_WIDTH_SIZE); break; 10602 case i40e_bus_width_pcie_x1: 10603 strncpy(width, "1", PCI_WIDTH_SIZE); break; 10604 default: 10605 break; 10606 } 10607 10608 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", 10609 speed, width); 10610 10611 if (hw->bus.width < i40e_bus_width_pcie_x8 || 10612 hw->bus.speed < i40e_bus_speed_8000) { 10613 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 10614 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 10615 } 10616 } 10617 10618 /* get the requested speeds from the fw */ 10619 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); 10620 if (err) 10621 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", 10622 i40e_stat_str(&pf->hw, err), 10623 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10624 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; 10625 10626 /* get the supported phy types from the fw */ 10627 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); 10628 if (err) 10629 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", 10630 i40e_stat_str(&pf->hw, err), 10631 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10632 pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type); 10633 10634 /* Add a filter to drop all Flow control frames from any VSI from being 10635 * transmitted. By doing so we stop a malicious VF from sending out 10636 * PAUSE or PFC frames and potentially controlling traffic for other 10637 * PF/VF VSIs. 10638 * The FW can still send Flow control frames if enabled. 10639 */ 10640 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, 10641 pf->main_vsi_seid); 10642 10643 /* print a string summarizing features */ 10644 i40e_print_features(pf); 10645 10646 return 0; 10647 10648 /* Unwind what we've done if something failed in the setup */ 10649 err_vsis: 10650 set_bit(__I40E_DOWN, &pf->state); 10651 i40e_clear_interrupt_scheme(pf); 10652 kfree(pf->vsi); 10653 err_switch_setup: 10654 i40e_reset_interrupt_capability(pf); 10655 del_timer_sync(&pf->service_timer); 10656 err_mac_addr: 10657 err_configure_lan_hmc: 10658 (void)i40e_shutdown_lan_hmc(hw); 10659 err_init_lan_hmc: 10660 kfree(pf->qp_pile); 10661 err_sw_init: 10662 err_adminq_setup: 10663 (void)i40e_shutdown_adminq(hw); 10664 err_pf_reset: 10665 iounmap(hw->hw_addr); 10666 err_ioremap: 10667 kfree(pf); 10668 err_pf_alloc: 10669 pci_disable_pcie_error_reporting(pdev); 10670 pci_release_selected_regions(pdev, 10671 pci_select_bars(pdev, IORESOURCE_MEM)); 10672 err_pci_reg: 10673 err_dma: 10674 pci_disable_device(pdev); 10675 return err; 10676 } 10677 10678 /** 10679 * i40e_remove - Device removal routine 10680 * @pdev: PCI device information struct 10681 * 10682 * i40e_remove is called by the PCI subsystem to alert the driver 10683 * that is should release a PCI device. This could be caused by a 10684 * Hot-Plug event, or because the driver is going to be removed from 10685 * memory. 10686 **/ 10687 static void i40e_remove(struct pci_dev *pdev) 10688 { 10689 struct i40e_pf *pf = pci_get_drvdata(pdev); 10690 struct i40e_hw *hw = &pf->hw; 10691 i40e_status ret_code; 10692 int i; 10693 10694 i40e_dbg_pf_exit(pf); 10695 10696 i40e_ptp_stop(pf); 10697 10698 /* Disable RSS in hw */ 10699 wr32(hw, I40E_PFQF_HENA(0), 0); 10700 wr32(hw, I40E_PFQF_HENA(1), 0); 10701 10702 /* no more scheduling of any task */ 10703 set_bit(__I40E_DOWN, &pf->state); 10704 del_timer_sync(&pf->service_timer); 10705 cancel_work_sync(&pf->service_task); 10706 10707 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 10708 i40e_free_vfs(pf); 10709 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 10710 } 10711 10712 i40e_fdir_teardown(pf); 10713 10714 /* If there is a switch structure or any orphans, remove them. 10715 * This will leave only the PF's VSI remaining. 10716 */ 10717 for (i = 0; i < I40E_MAX_VEB; i++) { 10718 if (!pf->veb[i]) 10719 continue; 10720 10721 if (pf->veb[i]->uplink_seid == pf->mac_seid || 10722 pf->veb[i]->uplink_seid == 0) 10723 i40e_switch_branch_release(pf->veb[i]); 10724 } 10725 10726 /* Now we can shutdown the PF's VSI, just before we kill 10727 * adminq and hmc. 10728 */ 10729 if (pf->vsi[pf->lan_vsi]) 10730 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 10731 10732 /* shutdown and destroy the HMC */ 10733 if (pf->hw.hmc.hmc_obj) { 10734 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 10735 if (ret_code) 10736 dev_warn(&pdev->dev, 10737 "Failed to destroy the HMC resources: %d\n", 10738 ret_code); 10739 } 10740 10741 /* shutdown the adminq */ 10742 ret_code = i40e_shutdown_adminq(&pf->hw); 10743 if (ret_code) 10744 dev_warn(&pdev->dev, 10745 "Failed to destroy the Admin Queue resources: %d\n", 10746 ret_code); 10747 10748 /* destroy the locks only once, here */ 10749 mutex_destroy(&hw->aq.arq_mutex); 10750 mutex_destroy(&hw->aq.asq_mutex); 10751 10752 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 10753 i40e_clear_interrupt_scheme(pf); 10754 for (i = 0; i < pf->num_alloc_vsi; i++) { 10755 if (pf->vsi[i]) { 10756 i40e_vsi_clear_rings(pf->vsi[i]); 10757 i40e_vsi_clear(pf->vsi[i]); 10758 pf->vsi[i] = NULL; 10759 } 10760 } 10761 10762 for (i = 0; i < I40E_MAX_VEB; i++) { 10763 kfree(pf->veb[i]); 10764 pf->veb[i] = NULL; 10765 } 10766 10767 kfree(pf->qp_pile); 10768 kfree(pf->vsi); 10769 10770 iounmap(pf->hw.hw_addr); 10771 kfree(pf); 10772 pci_release_selected_regions(pdev, 10773 pci_select_bars(pdev, IORESOURCE_MEM)); 10774 10775 pci_disable_pcie_error_reporting(pdev); 10776 pci_disable_device(pdev); 10777 } 10778 10779 /** 10780 * i40e_pci_error_detected - warning that something funky happened in PCI land 10781 * @pdev: PCI device information struct 10782 * 10783 * Called to warn that something happened and the error handling steps 10784 * are in progress. Allows the driver to quiesce things, be ready for 10785 * remediation. 10786 **/ 10787 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 10788 enum pci_channel_state error) 10789 { 10790 struct i40e_pf *pf = pci_get_drvdata(pdev); 10791 10792 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 10793 10794 /* shutdown all operations */ 10795 if (!test_bit(__I40E_SUSPENDED, &pf->state)) { 10796 rtnl_lock(); 10797 i40e_prep_for_reset(pf); 10798 rtnl_unlock(); 10799 } 10800 10801 /* Request a slot reset */ 10802 return PCI_ERS_RESULT_NEED_RESET; 10803 } 10804 10805 /** 10806 * i40e_pci_error_slot_reset - a PCI slot reset just happened 10807 * @pdev: PCI device information struct 10808 * 10809 * Called to find if the driver can work with the device now that 10810 * the pci slot has been reset. If a basic connection seems good 10811 * (registers are readable and have sane content) then return a 10812 * happy little PCI_ERS_RESULT_xxx. 10813 **/ 10814 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 10815 { 10816 struct i40e_pf *pf = pci_get_drvdata(pdev); 10817 pci_ers_result_t result; 10818 int err; 10819 u32 reg; 10820 10821 dev_dbg(&pdev->dev, "%s\n", __func__); 10822 if (pci_enable_device_mem(pdev)) { 10823 dev_info(&pdev->dev, 10824 "Cannot re-enable PCI device after reset.\n"); 10825 result = PCI_ERS_RESULT_DISCONNECT; 10826 } else { 10827 pci_set_master(pdev); 10828 pci_restore_state(pdev); 10829 pci_save_state(pdev); 10830 pci_wake_from_d3(pdev, false); 10831 10832 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 10833 if (reg == 0) 10834 result = PCI_ERS_RESULT_RECOVERED; 10835 else 10836 result = PCI_ERS_RESULT_DISCONNECT; 10837 } 10838 10839 err = pci_cleanup_aer_uncorrect_error_status(pdev); 10840 if (err) { 10841 dev_info(&pdev->dev, 10842 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 10843 err); 10844 /* non-fatal, continue */ 10845 } 10846 10847 return result; 10848 } 10849 10850 /** 10851 * i40e_pci_error_resume - restart operations after PCI error recovery 10852 * @pdev: PCI device information struct 10853 * 10854 * Called to allow the driver to bring things back up after PCI error 10855 * and/or reset recovery has finished. 10856 **/ 10857 static void i40e_pci_error_resume(struct pci_dev *pdev) 10858 { 10859 struct i40e_pf *pf = pci_get_drvdata(pdev); 10860 10861 dev_dbg(&pdev->dev, "%s\n", __func__); 10862 if (test_bit(__I40E_SUSPENDED, &pf->state)) 10863 return; 10864 10865 rtnl_lock(); 10866 i40e_handle_reset_warning(pf); 10867 rtnl_unlock(); 10868 } 10869 10870 /** 10871 * i40e_shutdown - PCI callback for shutting down 10872 * @pdev: PCI device information struct 10873 **/ 10874 static void i40e_shutdown(struct pci_dev *pdev) 10875 { 10876 struct i40e_pf *pf = pci_get_drvdata(pdev); 10877 struct i40e_hw *hw = &pf->hw; 10878 10879 set_bit(__I40E_SUSPENDED, &pf->state); 10880 set_bit(__I40E_DOWN, &pf->state); 10881 rtnl_lock(); 10882 i40e_prep_for_reset(pf); 10883 rtnl_unlock(); 10884 10885 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 10886 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 10887 10888 del_timer_sync(&pf->service_timer); 10889 cancel_work_sync(&pf->service_task); 10890 i40e_fdir_teardown(pf); 10891 10892 rtnl_lock(); 10893 i40e_prep_for_reset(pf); 10894 rtnl_unlock(); 10895 10896 wr32(hw, I40E_PFPM_APM, 10897 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 10898 wr32(hw, I40E_PFPM_WUFC, 10899 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 10900 10901 i40e_clear_interrupt_scheme(pf); 10902 10903 if (system_state == SYSTEM_POWER_OFF) { 10904 pci_wake_from_d3(pdev, pf->wol_en); 10905 pci_set_power_state(pdev, PCI_D3hot); 10906 } 10907 } 10908 10909 #ifdef CONFIG_PM 10910 /** 10911 * i40e_suspend - PCI callback for moving to D3 10912 * @pdev: PCI device information struct 10913 **/ 10914 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) 10915 { 10916 struct i40e_pf *pf = pci_get_drvdata(pdev); 10917 struct i40e_hw *hw = &pf->hw; 10918 10919 set_bit(__I40E_SUSPENDED, &pf->state); 10920 set_bit(__I40E_DOWN, &pf->state); 10921 10922 rtnl_lock(); 10923 i40e_prep_for_reset(pf); 10924 rtnl_unlock(); 10925 10926 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 10927 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 10928 10929 pci_wake_from_d3(pdev, pf->wol_en); 10930 pci_set_power_state(pdev, PCI_D3hot); 10931 10932 return 0; 10933 } 10934 10935 /** 10936 * i40e_resume - PCI callback for waking up from D3 10937 * @pdev: PCI device information struct 10938 **/ 10939 static int i40e_resume(struct pci_dev *pdev) 10940 { 10941 struct i40e_pf *pf = pci_get_drvdata(pdev); 10942 u32 err; 10943 10944 pci_set_power_state(pdev, PCI_D0); 10945 pci_restore_state(pdev); 10946 /* pci_restore_state() clears dev->state_saves, so 10947 * call pci_save_state() again to restore it. 10948 */ 10949 pci_save_state(pdev); 10950 10951 err = pci_enable_device_mem(pdev); 10952 if (err) { 10953 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 10954 return err; 10955 } 10956 pci_set_master(pdev); 10957 10958 /* no wakeup events while running */ 10959 pci_wake_from_d3(pdev, false); 10960 10961 /* handling the reset will rebuild the device state */ 10962 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { 10963 clear_bit(__I40E_DOWN, &pf->state); 10964 rtnl_lock(); 10965 i40e_reset_and_rebuild(pf, false); 10966 rtnl_unlock(); 10967 } 10968 10969 return 0; 10970 } 10971 10972 #endif 10973 static const struct pci_error_handlers i40e_err_handler = { 10974 .error_detected = i40e_pci_error_detected, 10975 .slot_reset = i40e_pci_error_slot_reset, 10976 .resume = i40e_pci_error_resume, 10977 }; 10978 10979 static struct pci_driver i40e_driver = { 10980 .name = i40e_driver_name, 10981 .id_table = i40e_pci_tbl, 10982 .probe = i40e_probe, 10983 .remove = i40e_remove, 10984 #ifdef CONFIG_PM 10985 .suspend = i40e_suspend, 10986 .resume = i40e_resume, 10987 #endif 10988 .shutdown = i40e_shutdown, 10989 .err_handler = &i40e_err_handler, 10990 .sriov_configure = i40e_pci_sriov_configure, 10991 }; 10992 10993 /** 10994 * i40e_init_module - Driver registration routine 10995 * 10996 * i40e_init_module is the first routine called when the driver is 10997 * loaded. All it does is register with the PCI subsystem. 10998 **/ 10999 static int __init i40e_init_module(void) 11000 { 11001 pr_info("%s: %s - version %s\n", i40e_driver_name, 11002 i40e_driver_string, i40e_driver_version_str); 11003 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 11004 11005 i40e_dbg_init(); 11006 return pci_register_driver(&i40e_driver); 11007 } 11008 module_init(i40e_init_module); 11009 11010 /** 11011 * i40e_exit_module - Driver exit cleanup routine 11012 * 11013 * i40e_exit_module is called just before the driver is removed 11014 * from memory. 11015 **/ 11016 static void __exit i40e_exit_module(void) 11017 { 11018 pci_unregister_driver(&i40e_driver); 11019 i40e_dbg_exit(); 11020 } 11021 module_exit(i40e_exit_module); 11022