1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2015 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include <linux/etherdevice.h> 28 #include <linux/of_net.h> 29 #include <linux/pci.h> 30 31 #ifdef CONFIG_SPARC 32 #include <asm/idprom.h> 33 #include <asm/prom.h> 34 #endif 35 36 /* Local includes */ 37 #include "i40e.h" 38 #include "i40e_diag.h" 39 #if IS_ENABLED(CONFIG_VXLAN) 40 #include <net/vxlan.h> 41 #endif 42 #if IS_ENABLED(CONFIG_GENEVE) 43 #include <net/geneve.h> 44 #endif 45 46 const char i40e_driver_name[] = "i40e"; 47 static const char i40e_driver_string[] = 48 "Intel(R) Ethernet Connection XL710 Network Driver"; 49 50 #define DRV_KERN "-k" 51 52 #define DRV_VERSION_MAJOR 1 53 #define DRV_VERSION_MINOR 4 54 #define DRV_VERSION_BUILD 8 55 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 56 __stringify(DRV_VERSION_MINOR) "." \ 57 __stringify(DRV_VERSION_BUILD) DRV_KERN 58 const char i40e_driver_version_str[] = DRV_VERSION; 59 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; 60 61 /* a bit of forward declarations */ 62 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 63 static void i40e_handle_reset_warning(struct i40e_pf *pf); 64 static int i40e_add_vsi(struct i40e_vsi *vsi); 65 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 66 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 67 static int i40e_setup_misc_vector(struct i40e_pf *pf); 68 static void i40e_determine_queue_usage(struct i40e_pf *pf); 69 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 70 static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, 71 u16 rss_table_size, u16 rss_size); 72 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 73 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 74 75 /* i40e_pci_tbl - PCI Device ID Table 76 * 77 * Last entry must be all 0s 78 * 79 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 80 * Class, Class Mask, private data (not used) } 81 */ 82 static const struct pci_device_id i40e_pci_tbl[] = { 83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, 91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, 92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, 94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, 95 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, 96 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 97 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, 98 /* required last entry */ 99 {0, } 100 }; 101 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 102 103 #define I40E_MAX_VF_COUNT 128 104 static int debug = -1; 105 module_param(debug, int, 0); 106 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 107 108 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 109 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 110 MODULE_LICENSE("GPL"); 111 MODULE_VERSION(DRV_VERSION); 112 113 /** 114 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 115 * @hw: pointer to the HW structure 116 * @mem: ptr to mem struct to fill out 117 * @size: size of memory requested 118 * @alignment: what to align the allocation to 119 **/ 120 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 121 u64 size, u32 alignment) 122 { 123 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 124 125 mem->size = ALIGN(size, alignment); 126 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 127 &mem->pa, GFP_KERNEL); 128 if (!mem->va) 129 return -ENOMEM; 130 131 return 0; 132 } 133 134 /** 135 * i40e_free_dma_mem_d - OS specific memory free for shared code 136 * @hw: pointer to the HW structure 137 * @mem: ptr to mem struct to free 138 **/ 139 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 140 { 141 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 142 143 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 144 mem->va = NULL; 145 mem->pa = 0; 146 mem->size = 0; 147 148 return 0; 149 } 150 151 /** 152 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 153 * @hw: pointer to the HW structure 154 * @mem: ptr to mem struct to fill out 155 * @size: size of memory requested 156 **/ 157 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 158 u32 size) 159 { 160 mem->size = size; 161 mem->va = kzalloc(size, GFP_KERNEL); 162 163 if (!mem->va) 164 return -ENOMEM; 165 166 return 0; 167 } 168 169 /** 170 * i40e_free_virt_mem_d - OS specific memory free for shared code 171 * @hw: pointer to the HW structure 172 * @mem: ptr to mem struct to free 173 **/ 174 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 175 { 176 /* it's ok to kfree a NULL pointer */ 177 kfree(mem->va); 178 mem->va = NULL; 179 mem->size = 0; 180 181 return 0; 182 } 183 184 /** 185 * i40e_get_lump - find a lump of free generic resource 186 * @pf: board private structure 187 * @pile: the pile of resource to search 188 * @needed: the number of items needed 189 * @id: an owner id to stick on the items assigned 190 * 191 * Returns the base item index of the lump, or negative for error 192 * 193 * The search_hint trick and lack of advanced fit-finding only work 194 * because we're highly likely to have all the same size lump requests. 195 * Linear search time and any fragmentation should be minimal. 196 **/ 197 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 198 u16 needed, u16 id) 199 { 200 int ret = -ENOMEM; 201 int i, j; 202 203 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 204 dev_info(&pf->pdev->dev, 205 "param err: pile=%p needed=%d id=0x%04x\n", 206 pile, needed, id); 207 return -EINVAL; 208 } 209 210 /* start the linear search with an imperfect hint */ 211 i = pile->search_hint; 212 while (i < pile->num_entries) { 213 /* skip already allocated entries */ 214 if (pile->list[i] & I40E_PILE_VALID_BIT) { 215 i++; 216 continue; 217 } 218 219 /* do we have enough in this lump? */ 220 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 221 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 222 break; 223 } 224 225 if (j == needed) { 226 /* there was enough, so assign it to the requestor */ 227 for (j = 0; j < needed; j++) 228 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 229 ret = i; 230 pile->search_hint = i + j; 231 break; 232 } 233 234 /* not enough, so skip over it and continue looking */ 235 i += j; 236 } 237 238 return ret; 239 } 240 241 /** 242 * i40e_put_lump - return a lump of generic resource 243 * @pile: the pile of resource to search 244 * @index: the base item index 245 * @id: the owner id of the items assigned 246 * 247 * Returns the count of items in the lump 248 **/ 249 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 250 { 251 int valid_id = (id | I40E_PILE_VALID_BIT); 252 int count = 0; 253 int i; 254 255 if (!pile || index >= pile->num_entries) 256 return -EINVAL; 257 258 for (i = index; 259 i < pile->num_entries && pile->list[i] == valid_id; 260 i++) { 261 pile->list[i] = 0; 262 count++; 263 } 264 265 if (count && index < pile->search_hint) 266 pile->search_hint = index; 267 268 return count; 269 } 270 271 /** 272 * i40e_find_vsi_from_id - searches for the vsi with the given id 273 * @pf - the pf structure to search for the vsi 274 * @id - id of the vsi it is searching for 275 **/ 276 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) 277 { 278 int i; 279 280 for (i = 0; i < pf->num_alloc_vsi; i++) 281 if (pf->vsi[i] && (pf->vsi[i]->id == id)) 282 return pf->vsi[i]; 283 284 return NULL; 285 } 286 287 /** 288 * i40e_service_event_schedule - Schedule the service task to wake up 289 * @pf: board private structure 290 * 291 * If not already scheduled, this puts the task into the work queue 292 **/ 293 static void i40e_service_event_schedule(struct i40e_pf *pf) 294 { 295 if (!test_bit(__I40E_DOWN, &pf->state) && 296 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && 297 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) 298 schedule_work(&pf->service_task); 299 } 300 301 /** 302 * i40e_tx_timeout - Respond to a Tx Hang 303 * @netdev: network interface device structure 304 * 305 * If any port has noticed a Tx timeout, it is likely that the whole 306 * device is munged, not just the one netdev port, so go for the full 307 * reset. 308 **/ 309 #ifdef I40E_FCOE 310 void i40e_tx_timeout(struct net_device *netdev) 311 #else 312 static void i40e_tx_timeout(struct net_device *netdev) 313 #endif 314 { 315 struct i40e_netdev_priv *np = netdev_priv(netdev); 316 struct i40e_vsi *vsi = np->vsi; 317 struct i40e_pf *pf = vsi->back; 318 struct i40e_ring *tx_ring = NULL; 319 unsigned int i, hung_queue = 0; 320 u32 head, val; 321 322 pf->tx_timeout_count++; 323 324 /* find the stopped queue the same way the stack does */ 325 for (i = 0; i < netdev->num_tx_queues; i++) { 326 struct netdev_queue *q; 327 unsigned long trans_start; 328 329 q = netdev_get_tx_queue(netdev, i); 330 trans_start = q->trans_start ? : netdev->trans_start; 331 if (netif_xmit_stopped(q) && 332 time_after(jiffies, 333 (trans_start + netdev->watchdog_timeo))) { 334 hung_queue = i; 335 break; 336 } 337 } 338 339 if (i == netdev->num_tx_queues) { 340 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); 341 } else { 342 /* now that we have an index, find the tx_ring struct */ 343 for (i = 0; i < vsi->num_queue_pairs; i++) { 344 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { 345 if (hung_queue == 346 vsi->tx_rings[i]->queue_index) { 347 tx_ring = vsi->tx_rings[i]; 348 break; 349 } 350 } 351 } 352 } 353 354 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 355 pf->tx_timeout_recovery_level = 1; /* reset after some time */ 356 else if (time_before(jiffies, 357 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) 358 return; /* don't do any new action before the next timeout */ 359 360 if (tx_ring) { 361 head = i40e_get_head(tx_ring); 362 /* Read interrupt register */ 363 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 364 val = rd32(&pf->hw, 365 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + 366 tx_ring->vsi->base_vector - 1)); 367 else 368 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); 369 370 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", 371 vsi->seid, hung_queue, tx_ring->next_to_clean, 372 head, tx_ring->next_to_use, 373 readl(tx_ring->tail), val); 374 } 375 376 pf->tx_timeout_last_recovery = jiffies; 377 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", 378 pf->tx_timeout_recovery_level, hung_queue); 379 380 switch (pf->tx_timeout_recovery_level) { 381 case 1: 382 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 383 break; 384 case 2: 385 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 386 break; 387 case 3: 388 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 389 break; 390 default: 391 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 392 break; 393 } 394 395 i40e_service_event_schedule(pf); 396 pf->tx_timeout_recovery_level++; 397 } 398 399 /** 400 * i40e_release_rx_desc - Store the new tail and head values 401 * @rx_ring: ring to bump 402 * @val: new head index 403 **/ 404 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 405 { 406 rx_ring->next_to_use = val; 407 408 /* Force memory writes to complete before letting h/w 409 * know there are new descriptors to fetch. (Only 410 * applicable for weak-ordered memory model archs, 411 * such as IA-64). 412 */ 413 wmb(); 414 writel(val, rx_ring->tail); 415 } 416 417 /** 418 * i40e_get_vsi_stats_struct - Get System Network Statistics 419 * @vsi: the VSI we care about 420 * 421 * Returns the address of the device statistics structure. 422 * The statistics are actually updated from the service task. 423 **/ 424 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 425 { 426 return &vsi->net_stats; 427 } 428 429 /** 430 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 431 * @netdev: network interface device structure 432 * 433 * Returns the address of the device statistics structure. 434 * The statistics are actually updated from the service task. 435 **/ 436 #ifdef I40E_FCOE 437 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 438 struct net_device *netdev, 439 struct rtnl_link_stats64 *stats) 440 #else 441 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 442 struct net_device *netdev, 443 struct rtnl_link_stats64 *stats) 444 #endif 445 { 446 struct i40e_netdev_priv *np = netdev_priv(netdev); 447 struct i40e_ring *tx_ring, *rx_ring; 448 struct i40e_vsi *vsi = np->vsi; 449 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 450 int i; 451 452 if (test_bit(__I40E_DOWN, &vsi->state)) 453 return stats; 454 455 if (!vsi->tx_rings) 456 return stats; 457 458 rcu_read_lock(); 459 for (i = 0; i < vsi->num_queue_pairs; i++) { 460 u64 bytes, packets; 461 unsigned int start; 462 463 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); 464 if (!tx_ring) 465 continue; 466 467 do { 468 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 469 packets = tx_ring->stats.packets; 470 bytes = tx_ring->stats.bytes; 471 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 472 473 stats->tx_packets += packets; 474 stats->tx_bytes += bytes; 475 rx_ring = &tx_ring[1]; 476 477 do { 478 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 479 packets = rx_ring->stats.packets; 480 bytes = rx_ring->stats.bytes; 481 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 482 483 stats->rx_packets += packets; 484 stats->rx_bytes += bytes; 485 } 486 rcu_read_unlock(); 487 488 /* following stats updated by i40e_watchdog_subtask() */ 489 stats->multicast = vsi_stats->multicast; 490 stats->tx_errors = vsi_stats->tx_errors; 491 stats->tx_dropped = vsi_stats->tx_dropped; 492 stats->rx_errors = vsi_stats->rx_errors; 493 stats->rx_dropped = vsi_stats->rx_dropped; 494 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 495 stats->rx_length_errors = vsi_stats->rx_length_errors; 496 497 return stats; 498 } 499 500 /** 501 * i40e_vsi_reset_stats - Resets all stats of the given vsi 502 * @vsi: the VSI to have its stats reset 503 **/ 504 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 505 { 506 struct rtnl_link_stats64 *ns; 507 int i; 508 509 if (!vsi) 510 return; 511 512 ns = i40e_get_vsi_stats_struct(vsi); 513 memset(ns, 0, sizeof(*ns)); 514 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 515 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 516 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 517 if (vsi->rx_rings && vsi->rx_rings[0]) { 518 for (i = 0; i < vsi->num_queue_pairs; i++) { 519 memset(&vsi->rx_rings[i]->stats, 0, 520 sizeof(vsi->rx_rings[i]->stats)); 521 memset(&vsi->rx_rings[i]->rx_stats, 0, 522 sizeof(vsi->rx_rings[i]->rx_stats)); 523 memset(&vsi->tx_rings[i]->stats, 0, 524 sizeof(vsi->tx_rings[i]->stats)); 525 memset(&vsi->tx_rings[i]->tx_stats, 0, 526 sizeof(vsi->tx_rings[i]->tx_stats)); 527 } 528 } 529 vsi->stat_offsets_loaded = false; 530 } 531 532 /** 533 * i40e_pf_reset_stats - Reset all of the stats for the given PF 534 * @pf: the PF to be reset 535 **/ 536 void i40e_pf_reset_stats(struct i40e_pf *pf) 537 { 538 int i; 539 540 memset(&pf->stats, 0, sizeof(pf->stats)); 541 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 542 pf->stat_offsets_loaded = false; 543 544 for (i = 0; i < I40E_MAX_VEB; i++) { 545 if (pf->veb[i]) { 546 memset(&pf->veb[i]->stats, 0, 547 sizeof(pf->veb[i]->stats)); 548 memset(&pf->veb[i]->stats_offsets, 0, 549 sizeof(pf->veb[i]->stats_offsets)); 550 pf->veb[i]->stat_offsets_loaded = false; 551 } 552 } 553 } 554 555 /** 556 * i40e_stat_update48 - read and update a 48 bit stat from the chip 557 * @hw: ptr to the hardware info 558 * @hireg: the high 32 bit reg to read 559 * @loreg: the low 32 bit reg to read 560 * @offset_loaded: has the initial offset been loaded yet 561 * @offset: ptr to current offset value 562 * @stat: ptr to the stat 563 * 564 * Since the device stats are not reset at PFReset, they likely will not 565 * be zeroed when the driver starts. We'll save the first values read 566 * and use them as offsets to be subtracted from the raw values in order 567 * to report stats that count from zero. In the process, we also manage 568 * the potential roll-over. 569 **/ 570 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 571 bool offset_loaded, u64 *offset, u64 *stat) 572 { 573 u64 new_data; 574 575 if (hw->device_id == I40E_DEV_ID_QEMU) { 576 new_data = rd32(hw, loreg); 577 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 578 } else { 579 new_data = rd64(hw, loreg); 580 } 581 if (!offset_loaded) 582 *offset = new_data; 583 if (likely(new_data >= *offset)) 584 *stat = new_data - *offset; 585 else 586 *stat = (new_data + BIT_ULL(48)) - *offset; 587 *stat &= 0xFFFFFFFFFFFFULL; 588 } 589 590 /** 591 * i40e_stat_update32 - read and update a 32 bit stat from the chip 592 * @hw: ptr to the hardware info 593 * @reg: the hw reg to read 594 * @offset_loaded: has the initial offset been loaded yet 595 * @offset: ptr to current offset value 596 * @stat: ptr to the stat 597 **/ 598 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 599 bool offset_loaded, u64 *offset, u64 *stat) 600 { 601 u32 new_data; 602 603 new_data = rd32(hw, reg); 604 if (!offset_loaded) 605 *offset = new_data; 606 if (likely(new_data >= *offset)) 607 *stat = (u32)(new_data - *offset); 608 else 609 *stat = (u32)((new_data + BIT_ULL(32)) - *offset); 610 } 611 612 /** 613 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 614 * @vsi: the VSI to be updated 615 **/ 616 void i40e_update_eth_stats(struct i40e_vsi *vsi) 617 { 618 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 619 struct i40e_pf *pf = vsi->back; 620 struct i40e_hw *hw = &pf->hw; 621 struct i40e_eth_stats *oes; 622 struct i40e_eth_stats *es; /* device's eth stats */ 623 624 es = &vsi->eth_stats; 625 oes = &vsi->eth_stats_offsets; 626 627 /* Gather up the stats that the hw collects */ 628 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 629 vsi->stat_offsets_loaded, 630 &oes->tx_errors, &es->tx_errors); 631 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 632 vsi->stat_offsets_loaded, 633 &oes->rx_discards, &es->rx_discards); 634 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), 635 vsi->stat_offsets_loaded, 636 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); 637 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 638 vsi->stat_offsets_loaded, 639 &oes->tx_errors, &es->tx_errors); 640 641 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 642 I40E_GLV_GORCL(stat_idx), 643 vsi->stat_offsets_loaded, 644 &oes->rx_bytes, &es->rx_bytes); 645 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 646 I40E_GLV_UPRCL(stat_idx), 647 vsi->stat_offsets_loaded, 648 &oes->rx_unicast, &es->rx_unicast); 649 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 650 I40E_GLV_MPRCL(stat_idx), 651 vsi->stat_offsets_loaded, 652 &oes->rx_multicast, &es->rx_multicast); 653 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 654 I40E_GLV_BPRCL(stat_idx), 655 vsi->stat_offsets_loaded, 656 &oes->rx_broadcast, &es->rx_broadcast); 657 658 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 659 I40E_GLV_GOTCL(stat_idx), 660 vsi->stat_offsets_loaded, 661 &oes->tx_bytes, &es->tx_bytes); 662 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 663 I40E_GLV_UPTCL(stat_idx), 664 vsi->stat_offsets_loaded, 665 &oes->tx_unicast, &es->tx_unicast); 666 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 667 I40E_GLV_MPTCL(stat_idx), 668 vsi->stat_offsets_loaded, 669 &oes->tx_multicast, &es->tx_multicast); 670 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 671 I40E_GLV_BPTCL(stat_idx), 672 vsi->stat_offsets_loaded, 673 &oes->tx_broadcast, &es->tx_broadcast); 674 vsi->stat_offsets_loaded = true; 675 } 676 677 /** 678 * i40e_update_veb_stats - Update Switch component statistics 679 * @veb: the VEB being updated 680 **/ 681 static void i40e_update_veb_stats(struct i40e_veb *veb) 682 { 683 struct i40e_pf *pf = veb->pf; 684 struct i40e_hw *hw = &pf->hw; 685 struct i40e_eth_stats *oes; 686 struct i40e_eth_stats *es; /* device's eth stats */ 687 struct i40e_veb_tc_stats *veb_oes; 688 struct i40e_veb_tc_stats *veb_es; 689 int i, idx = 0; 690 691 idx = veb->stats_idx; 692 es = &veb->stats; 693 oes = &veb->stats_offsets; 694 veb_es = &veb->tc_stats; 695 veb_oes = &veb->tc_stats_offsets; 696 697 /* Gather up the stats that the hw collects */ 698 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 699 veb->stat_offsets_loaded, 700 &oes->tx_discards, &es->tx_discards); 701 if (hw->revision_id > 0) 702 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 703 veb->stat_offsets_loaded, 704 &oes->rx_unknown_protocol, 705 &es->rx_unknown_protocol); 706 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 707 veb->stat_offsets_loaded, 708 &oes->rx_bytes, &es->rx_bytes); 709 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 710 veb->stat_offsets_loaded, 711 &oes->rx_unicast, &es->rx_unicast); 712 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 713 veb->stat_offsets_loaded, 714 &oes->rx_multicast, &es->rx_multicast); 715 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 716 veb->stat_offsets_loaded, 717 &oes->rx_broadcast, &es->rx_broadcast); 718 719 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 720 veb->stat_offsets_loaded, 721 &oes->tx_bytes, &es->tx_bytes); 722 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 723 veb->stat_offsets_loaded, 724 &oes->tx_unicast, &es->tx_unicast); 725 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 726 veb->stat_offsets_loaded, 727 &oes->tx_multicast, &es->tx_multicast); 728 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 729 veb->stat_offsets_loaded, 730 &oes->tx_broadcast, &es->tx_broadcast); 731 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 732 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx), 733 I40E_GLVEBTC_RPCL(i, idx), 734 veb->stat_offsets_loaded, 735 &veb_oes->tc_rx_packets[i], 736 &veb_es->tc_rx_packets[i]); 737 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx), 738 I40E_GLVEBTC_RBCL(i, idx), 739 veb->stat_offsets_loaded, 740 &veb_oes->tc_rx_bytes[i], 741 &veb_es->tc_rx_bytes[i]); 742 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx), 743 I40E_GLVEBTC_TPCL(i, idx), 744 veb->stat_offsets_loaded, 745 &veb_oes->tc_tx_packets[i], 746 &veb_es->tc_tx_packets[i]); 747 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx), 748 I40E_GLVEBTC_TBCL(i, idx), 749 veb->stat_offsets_loaded, 750 &veb_oes->tc_tx_bytes[i], 751 &veb_es->tc_tx_bytes[i]); 752 } 753 veb->stat_offsets_loaded = true; 754 } 755 756 #ifdef I40E_FCOE 757 /** 758 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. 759 * @vsi: the VSI that is capable of doing FCoE 760 **/ 761 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) 762 { 763 struct i40e_pf *pf = vsi->back; 764 struct i40e_hw *hw = &pf->hw; 765 struct i40e_fcoe_stats *ofs; 766 struct i40e_fcoe_stats *fs; /* device's eth stats */ 767 int idx; 768 769 if (vsi->type != I40E_VSI_FCOE) 770 return; 771 772 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; 773 fs = &vsi->fcoe_stats; 774 ofs = &vsi->fcoe_stats_offsets; 775 776 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), 777 vsi->fcoe_stat_offsets_loaded, 778 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); 779 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), 780 vsi->fcoe_stat_offsets_loaded, 781 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); 782 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), 783 vsi->fcoe_stat_offsets_loaded, 784 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); 785 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), 786 vsi->fcoe_stat_offsets_loaded, 787 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); 788 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), 789 vsi->fcoe_stat_offsets_loaded, 790 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); 791 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), 792 vsi->fcoe_stat_offsets_loaded, 793 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); 794 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), 795 vsi->fcoe_stat_offsets_loaded, 796 &ofs->fcoe_last_error, &fs->fcoe_last_error); 797 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), 798 vsi->fcoe_stat_offsets_loaded, 799 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); 800 801 vsi->fcoe_stat_offsets_loaded = true; 802 } 803 804 #endif 805 /** 806 * i40e_update_vsi_stats - Update the vsi statistics counters. 807 * @vsi: the VSI to be updated 808 * 809 * There are a few instances where we store the same stat in a 810 * couple of different structs. This is partly because we have 811 * the netdev stats that need to be filled out, which is slightly 812 * different from the "eth_stats" defined by the chip and used in 813 * VF communications. We sort it out here. 814 **/ 815 static void i40e_update_vsi_stats(struct i40e_vsi *vsi) 816 { 817 struct i40e_pf *pf = vsi->back; 818 struct rtnl_link_stats64 *ons; 819 struct rtnl_link_stats64 *ns; /* netdev stats */ 820 struct i40e_eth_stats *oes; 821 struct i40e_eth_stats *es; /* device's eth stats */ 822 u32 tx_restart, tx_busy; 823 struct i40e_ring *p; 824 u32 rx_page, rx_buf; 825 u64 bytes, packets; 826 unsigned int start; 827 u64 tx_linearize; 828 u64 tx_force_wb; 829 u64 rx_p, rx_b; 830 u64 tx_p, tx_b; 831 u16 q; 832 833 if (test_bit(__I40E_DOWN, &vsi->state) || 834 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 835 return; 836 837 ns = i40e_get_vsi_stats_struct(vsi); 838 ons = &vsi->net_stats_offsets; 839 es = &vsi->eth_stats; 840 oes = &vsi->eth_stats_offsets; 841 842 /* Gather up the netdev and vsi stats that the driver collects 843 * on the fly during packet processing 844 */ 845 rx_b = rx_p = 0; 846 tx_b = tx_p = 0; 847 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; 848 rx_page = 0; 849 rx_buf = 0; 850 rcu_read_lock(); 851 for (q = 0; q < vsi->num_queue_pairs; q++) { 852 /* locate Tx ring */ 853 p = ACCESS_ONCE(vsi->tx_rings[q]); 854 855 do { 856 start = u64_stats_fetch_begin_irq(&p->syncp); 857 packets = p->stats.packets; 858 bytes = p->stats.bytes; 859 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 860 tx_b += bytes; 861 tx_p += packets; 862 tx_restart += p->tx_stats.restart_queue; 863 tx_busy += p->tx_stats.tx_busy; 864 tx_linearize += p->tx_stats.tx_linearize; 865 tx_force_wb += p->tx_stats.tx_force_wb; 866 867 /* Rx queue is part of the same block as Tx queue */ 868 p = &p[1]; 869 do { 870 start = u64_stats_fetch_begin_irq(&p->syncp); 871 packets = p->stats.packets; 872 bytes = p->stats.bytes; 873 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 874 rx_b += bytes; 875 rx_p += packets; 876 rx_buf += p->rx_stats.alloc_buff_failed; 877 rx_page += p->rx_stats.alloc_page_failed; 878 } 879 rcu_read_unlock(); 880 vsi->tx_restart = tx_restart; 881 vsi->tx_busy = tx_busy; 882 vsi->tx_linearize = tx_linearize; 883 vsi->tx_force_wb = tx_force_wb; 884 vsi->rx_page_failed = rx_page; 885 vsi->rx_buf_failed = rx_buf; 886 887 ns->rx_packets = rx_p; 888 ns->rx_bytes = rx_b; 889 ns->tx_packets = tx_p; 890 ns->tx_bytes = tx_b; 891 892 /* update netdev stats from eth stats */ 893 i40e_update_eth_stats(vsi); 894 ons->tx_errors = oes->tx_errors; 895 ns->tx_errors = es->tx_errors; 896 ons->multicast = oes->rx_multicast; 897 ns->multicast = es->rx_multicast; 898 ons->rx_dropped = oes->rx_discards; 899 ns->rx_dropped = es->rx_discards; 900 ons->tx_dropped = oes->tx_discards; 901 ns->tx_dropped = es->tx_discards; 902 903 /* pull in a couple PF stats if this is the main vsi */ 904 if (vsi == pf->vsi[pf->lan_vsi]) { 905 ns->rx_crc_errors = pf->stats.crc_errors; 906 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; 907 ns->rx_length_errors = pf->stats.rx_length_errors; 908 } 909 } 910 911 /** 912 * i40e_update_pf_stats - Update the PF statistics counters. 913 * @pf: the PF to be updated 914 **/ 915 static void i40e_update_pf_stats(struct i40e_pf *pf) 916 { 917 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 918 struct i40e_hw_port_stats *nsd = &pf->stats; 919 struct i40e_hw *hw = &pf->hw; 920 u32 val; 921 int i; 922 923 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 924 I40E_GLPRT_GORCL(hw->port), 925 pf->stat_offsets_loaded, 926 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 927 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 928 I40E_GLPRT_GOTCL(hw->port), 929 pf->stat_offsets_loaded, 930 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 931 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 932 pf->stat_offsets_loaded, 933 &osd->eth.rx_discards, 934 &nsd->eth.rx_discards); 935 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 936 I40E_GLPRT_UPRCL(hw->port), 937 pf->stat_offsets_loaded, 938 &osd->eth.rx_unicast, 939 &nsd->eth.rx_unicast); 940 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 941 I40E_GLPRT_MPRCL(hw->port), 942 pf->stat_offsets_loaded, 943 &osd->eth.rx_multicast, 944 &nsd->eth.rx_multicast); 945 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 946 I40E_GLPRT_BPRCL(hw->port), 947 pf->stat_offsets_loaded, 948 &osd->eth.rx_broadcast, 949 &nsd->eth.rx_broadcast); 950 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 951 I40E_GLPRT_UPTCL(hw->port), 952 pf->stat_offsets_loaded, 953 &osd->eth.tx_unicast, 954 &nsd->eth.tx_unicast); 955 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 956 I40E_GLPRT_MPTCL(hw->port), 957 pf->stat_offsets_loaded, 958 &osd->eth.tx_multicast, 959 &nsd->eth.tx_multicast); 960 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 961 I40E_GLPRT_BPTCL(hw->port), 962 pf->stat_offsets_loaded, 963 &osd->eth.tx_broadcast, 964 &nsd->eth.tx_broadcast); 965 966 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 967 pf->stat_offsets_loaded, 968 &osd->tx_dropped_link_down, 969 &nsd->tx_dropped_link_down); 970 971 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 972 pf->stat_offsets_loaded, 973 &osd->crc_errors, &nsd->crc_errors); 974 975 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 976 pf->stat_offsets_loaded, 977 &osd->illegal_bytes, &nsd->illegal_bytes); 978 979 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 980 pf->stat_offsets_loaded, 981 &osd->mac_local_faults, 982 &nsd->mac_local_faults); 983 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 984 pf->stat_offsets_loaded, 985 &osd->mac_remote_faults, 986 &nsd->mac_remote_faults); 987 988 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 989 pf->stat_offsets_loaded, 990 &osd->rx_length_errors, 991 &nsd->rx_length_errors); 992 993 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 994 pf->stat_offsets_loaded, 995 &osd->link_xon_rx, &nsd->link_xon_rx); 996 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 997 pf->stat_offsets_loaded, 998 &osd->link_xon_tx, &nsd->link_xon_tx); 999 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 1000 pf->stat_offsets_loaded, 1001 &osd->link_xoff_rx, &nsd->link_xoff_rx); 1002 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 1003 pf->stat_offsets_loaded, 1004 &osd->link_xoff_tx, &nsd->link_xoff_tx); 1005 1006 for (i = 0; i < 8; i++) { 1007 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 1008 pf->stat_offsets_loaded, 1009 &osd->priority_xoff_rx[i], 1010 &nsd->priority_xoff_rx[i]); 1011 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 1012 pf->stat_offsets_loaded, 1013 &osd->priority_xon_rx[i], 1014 &nsd->priority_xon_rx[i]); 1015 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 1016 pf->stat_offsets_loaded, 1017 &osd->priority_xon_tx[i], 1018 &nsd->priority_xon_tx[i]); 1019 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 1020 pf->stat_offsets_loaded, 1021 &osd->priority_xoff_tx[i], 1022 &nsd->priority_xoff_tx[i]); 1023 i40e_stat_update32(hw, 1024 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 1025 pf->stat_offsets_loaded, 1026 &osd->priority_xon_2_xoff[i], 1027 &nsd->priority_xon_2_xoff[i]); 1028 } 1029 1030 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 1031 I40E_GLPRT_PRC64L(hw->port), 1032 pf->stat_offsets_loaded, 1033 &osd->rx_size_64, &nsd->rx_size_64); 1034 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 1035 I40E_GLPRT_PRC127L(hw->port), 1036 pf->stat_offsets_loaded, 1037 &osd->rx_size_127, &nsd->rx_size_127); 1038 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 1039 I40E_GLPRT_PRC255L(hw->port), 1040 pf->stat_offsets_loaded, 1041 &osd->rx_size_255, &nsd->rx_size_255); 1042 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 1043 I40E_GLPRT_PRC511L(hw->port), 1044 pf->stat_offsets_loaded, 1045 &osd->rx_size_511, &nsd->rx_size_511); 1046 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 1047 I40E_GLPRT_PRC1023L(hw->port), 1048 pf->stat_offsets_loaded, 1049 &osd->rx_size_1023, &nsd->rx_size_1023); 1050 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 1051 I40E_GLPRT_PRC1522L(hw->port), 1052 pf->stat_offsets_loaded, 1053 &osd->rx_size_1522, &nsd->rx_size_1522); 1054 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 1055 I40E_GLPRT_PRC9522L(hw->port), 1056 pf->stat_offsets_loaded, 1057 &osd->rx_size_big, &nsd->rx_size_big); 1058 1059 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 1060 I40E_GLPRT_PTC64L(hw->port), 1061 pf->stat_offsets_loaded, 1062 &osd->tx_size_64, &nsd->tx_size_64); 1063 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 1064 I40E_GLPRT_PTC127L(hw->port), 1065 pf->stat_offsets_loaded, 1066 &osd->tx_size_127, &nsd->tx_size_127); 1067 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 1068 I40E_GLPRT_PTC255L(hw->port), 1069 pf->stat_offsets_loaded, 1070 &osd->tx_size_255, &nsd->tx_size_255); 1071 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 1072 I40E_GLPRT_PTC511L(hw->port), 1073 pf->stat_offsets_loaded, 1074 &osd->tx_size_511, &nsd->tx_size_511); 1075 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 1076 I40E_GLPRT_PTC1023L(hw->port), 1077 pf->stat_offsets_loaded, 1078 &osd->tx_size_1023, &nsd->tx_size_1023); 1079 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 1080 I40E_GLPRT_PTC1522L(hw->port), 1081 pf->stat_offsets_loaded, 1082 &osd->tx_size_1522, &nsd->tx_size_1522); 1083 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 1084 I40E_GLPRT_PTC9522L(hw->port), 1085 pf->stat_offsets_loaded, 1086 &osd->tx_size_big, &nsd->tx_size_big); 1087 1088 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 1089 pf->stat_offsets_loaded, 1090 &osd->rx_undersize, &nsd->rx_undersize); 1091 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 1092 pf->stat_offsets_loaded, 1093 &osd->rx_fragments, &nsd->rx_fragments); 1094 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 1095 pf->stat_offsets_loaded, 1096 &osd->rx_oversize, &nsd->rx_oversize); 1097 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 1098 pf->stat_offsets_loaded, 1099 &osd->rx_jabber, &nsd->rx_jabber); 1100 1101 /* FDIR stats */ 1102 i40e_stat_update32(hw, 1103 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), 1104 pf->stat_offsets_loaded, 1105 &osd->fd_atr_match, &nsd->fd_atr_match); 1106 i40e_stat_update32(hw, 1107 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), 1108 pf->stat_offsets_loaded, 1109 &osd->fd_sb_match, &nsd->fd_sb_match); 1110 i40e_stat_update32(hw, 1111 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)), 1112 pf->stat_offsets_loaded, 1113 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match); 1114 1115 val = rd32(hw, I40E_PRTPM_EEE_STAT); 1116 nsd->tx_lpi_status = 1117 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> 1118 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; 1119 nsd->rx_lpi_status = 1120 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> 1121 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; 1122 i40e_stat_update32(hw, I40E_PRTPM_TLPIC, 1123 pf->stat_offsets_loaded, 1124 &osd->tx_lpi_count, &nsd->tx_lpi_count); 1125 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 1126 pf->stat_offsets_loaded, 1127 &osd->rx_lpi_count, &nsd->rx_lpi_count); 1128 1129 if (pf->flags & I40E_FLAG_FD_SB_ENABLED && 1130 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) 1131 nsd->fd_sb_status = true; 1132 else 1133 nsd->fd_sb_status = false; 1134 1135 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && 1136 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 1137 nsd->fd_atr_status = true; 1138 else 1139 nsd->fd_atr_status = false; 1140 1141 pf->stat_offsets_loaded = true; 1142 } 1143 1144 /** 1145 * i40e_update_stats - Update the various statistics counters. 1146 * @vsi: the VSI to be updated 1147 * 1148 * Update the various stats for this VSI and its related entities. 1149 **/ 1150 void i40e_update_stats(struct i40e_vsi *vsi) 1151 { 1152 struct i40e_pf *pf = vsi->back; 1153 1154 if (vsi == pf->vsi[pf->lan_vsi]) 1155 i40e_update_pf_stats(pf); 1156 1157 i40e_update_vsi_stats(vsi); 1158 #ifdef I40E_FCOE 1159 i40e_update_fcoe_stats(vsi); 1160 #endif 1161 } 1162 1163 /** 1164 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1165 * @vsi: the VSI to be searched 1166 * @macaddr: the MAC address 1167 * @vlan: the vlan 1168 * @is_vf: make sure its a VF filter, else doesn't matter 1169 * @is_netdev: make sure its a netdev filter, else doesn't matter 1170 * 1171 * Returns ptr to the filter object or NULL 1172 **/ 1173 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 1174 u8 *macaddr, s16 vlan, 1175 bool is_vf, bool is_netdev) 1176 { 1177 struct i40e_mac_filter *f; 1178 1179 if (!vsi || !macaddr) 1180 return NULL; 1181 1182 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1183 if ((ether_addr_equal(macaddr, f->macaddr)) && 1184 (vlan == f->vlan) && 1185 (!is_vf || f->is_vf) && 1186 (!is_netdev || f->is_netdev)) 1187 return f; 1188 } 1189 return NULL; 1190 } 1191 1192 /** 1193 * i40e_find_mac - Find a mac addr in the macvlan filters list 1194 * @vsi: the VSI to be searched 1195 * @macaddr: the MAC address we are searching for 1196 * @is_vf: make sure its a VF filter, else doesn't matter 1197 * @is_netdev: make sure its a netdev filter, else doesn't matter 1198 * 1199 * Returns the first filter with the provided MAC address or NULL if 1200 * MAC address was not found 1201 **/ 1202 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 1203 bool is_vf, bool is_netdev) 1204 { 1205 struct i40e_mac_filter *f; 1206 1207 if (!vsi || !macaddr) 1208 return NULL; 1209 1210 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1211 if ((ether_addr_equal(macaddr, f->macaddr)) && 1212 (!is_vf || f->is_vf) && 1213 (!is_netdev || f->is_netdev)) 1214 return f; 1215 } 1216 return NULL; 1217 } 1218 1219 /** 1220 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1221 * @vsi: the VSI to be searched 1222 * 1223 * Returns true if VSI is in vlan mode or false otherwise 1224 **/ 1225 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1226 { 1227 struct i40e_mac_filter *f; 1228 1229 /* Only -1 for all the filters denotes not in vlan mode 1230 * so we have to go through all the list in order to make sure 1231 */ 1232 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1233 if (f->vlan >= 0 || vsi->info.pvid) 1234 return true; 1235 } 1236 1237 return false; 1238 } 1239 1240 /** 1241 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans 1242 * @vsi: the VSI to be searched 1243 * @macaddr: the mac address to be filtered 1244 * @is_vf: true if it is a VF 1245 * @is_netdev: true if it is a netdev 1246 * 1247 * Goes through all the macvlan filters and adds a 1248 * macvlan filter for each unique vlan that already exists 1249 * 1250 * Returns first filter found on success, else NULL 1251 **/ 1252 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1253 bool is_vf, bool is_netdev) 1254 { 1255 struct i40e_mac_filter *f; 1256 1257 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1258 if (vsi->info.pvid) 1259 f->vlan = le16_to_cpu(vsi->info.pvid); 1260 if (!i40e_find_filter(vsi, macaddr, f->vlan, 1261 is_vf, is_netdev)) { 1262 if (!i40e_add_filter(vsi, macaddr, f->vlan, 1263 is_vf, is_netdev)) 1264 return NULL; 1265 } 1266 } 1267 1268 return list_first_entry_or_null(&vsi->mac_filter_list, 1269 struct i40e_mac_filter, list); 1270 } 1271 1272 /** 1273 * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS 1274 * @vsi: the VSI to be searched 1275 * @macaddr: the mac address to be removed 1276 * @is_vf: true if it is a VF 1277 * @is_netdev: true if it is a netdev 1278 * 1279 * Removes a given MAC address from a VSI, regardless of VLAN 1280 * 1281 * Returns 0 for success, or error 1282 **/ 1283 int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1284 bool is_vf, bool is_netdev) 1285 { 1286 struct i40e_mac_filter *f = NULL; 1287 int changed = 0; 1288 1289 WARN(!spin_is_locked(&vsi->mac_filter_list_lock), 1290 "Missing mac_filter_list_lock\n"); 1291 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1292 if ((ether_addr_equal(macaddr, f->macaddr)) && 1293 (is_vf == f->is_vf) && 1294 (is_netdev == f->is_netdev)) { 1295 f->counter--; 1296 f->changed = true; 1297 changed = 1; 1298 } 1299 } 1300 if (changed) { 1301 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1302 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1303 return 0; 1304 } 1305 return -ENOENT; 1306 } 1307 1308 /** 1309 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM 1310 * @vsi: the PF Main VSI - inappropriate for any other VSI 1311 * @macaddr: the MAC address 1312 * 1313 * Some older firmware configurations set up a default promiscuous VLAN 1314 * filter that needs to be removed. 1315 **/ 1316 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) 1317 { 1318 struct i40e_aqc_remove_macvlan_element_data element; 1319 struct i40e_pf *pf = vsi->back; 1320 i40e_status ret; 1321 1322 /* Only appropriate for the PF main VSI */ 1323 if (vsi->type != I40E_VSI_MAIN) 1324 return -EINVAL; 1325 1326 memset(&element, 0, sizeof(element)); 1327 ether_addr_copy(element.mac_addr, macaddr); 1328 element.vlan_tag = 0; 1329 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1330 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1331 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1332 if (ret) 1333 return -ENOENT; 1334 1335 return 0; 1336 } 1337 1338 /** 1339 * i40e_add_filter - Add a mac/vlan filter to the VSI 1340 * @vsi: the VSI to be searched 1341 * @macaddr: the MAC address 1342 * @vlan: the vlan 1343 * @is_vf: make sure its a VF filter, else doesn't matter 1344 * @is_netdev: make sure its a netdev filter, else doesn't matter 1345 * 1346 * Returns ptr to the filter object or NULL when no memory available. 1347 * 1348 * NOTE: This function is expected to be called with mac_filter_list_lock 1349 * being held. 1350 **/ 1351 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1352 u8 *macaddr, s16 vlan, 1353 bool is_vf, bool is_netdev) 1354 { 1355 struct i40e_mac_filter *f; 1356 1357 if (!vsi || !macaddr) 1358 return NULL; 1359 1360 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1361 if (!f) { 1362 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1363 if (!f) 1364 goto add_filter_out; 1365 1366 ether_addr_copy(f->macaddr, macaddr); 1367 f->vlan = vlan; 1368 f->changed = true; 1369 1370 INIT_LIST_HEAD(&f->list); 1371 list_add(&f->list, &vsi->mac_filter_list); 1372 } 1373 1374 /* increment counter and add a new flag if needed */ 1375 if (is_vf) { 1376 if (!f->is_vf) { 1377 f->is_vf = true; 1378 f->counter++; 1379 } 1380 } else if (is_netdev) { 1381 if (!f->is_netdev) { 1382 f->is_netdev = true; 1383 f->counter++; 1384 } 1385 } else { 1386 f->counter++; 1387 } 1388 1389 /* changed tells sync_filters_subtask to 1390 * push the filter down to the firmware 1391 */ 1392 if (f->changed) { 1393 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1394 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1395 } 1396 1397 add_filter_out: 1398 return f; 1399 } 1400 1401 /** 1402 * i40e_del_filter - Remove a mac/vlan filter from the VSI 1403 * @vsi: the VSI to be searched 1404 * @macaddr: the MAC address 1405 * @vlan: the vlan 1406 * @is_vf: make sure it's a VF filter, else doesn't matter 1407 * @is_netdev: make sure it's a netdev filter, else doesn't matter 1408 * 1409 * NOTE: This function is expected to be called with mac_filter_list_lock 1410 * being held. 1411 **/ 1412 void i40e_del_filter(struct i40e_vsi *vsi, 1413 u8 *macaddr, s16 vlan, 1414 bool is_vf, bool is_netdev) 1415 { 1416 struct i40e_mac_filter *f; 1417 1418 if (!vsi || !macaddr) 1419 return; 1420 1421 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1422 if (!f || f->counter == 0) 1423 return; 1424 1425 if (is_vf) { 1426 if (f->is_vf) { 1427 f->is_vf = false; 1428 f->counter--; 1429 } 1430 } else if (is_netdev) { 1431 if (f->is_netdev) { 1432 f->is_netdev = false; 1433 f->counter--; 1434 } 1435 } else { 1436 /* make sure we don't remove a filter in use by VF or netdev */ 1437 int min_f = 0; 1438 1439 min_f += (f->is_vf ? 1 : 0); 1440 min_f += (f->is_netdev ? 1 : 0); 1441 1442 if (f->counter > min_f) 1443 f->counter--; 1444 } 1445 1446 /* counter == 0 tells sync_filters_subtask to 1447 * remove the filter from the firmware's list 1448 */ 1449 if (f->counter == 0) { 1450 f->changed = true; 1451 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1452 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1453 } 1454 } 1455 1456 /** 1457 * i40e_set_mac - NDO callback to set mac address 1458 * @netdev: network interface device structure 1459 * @p: pointer to an address structure 1460 * 1461 * Returns 0 on success, negative on failure 1462 **/ 1463 #ifdef I40E_FCOE 1464 int i40e_set_mac(struct net_device *netdev, void *p) 1465 #else 1466 static int i40e_set_mac(struct net_device *netdev, void *p) 1467 #endif 1468 { 1469 struct i40e_netdev_priv *np = netdev_priv(netdev); 1470 struct i40e_vsi *vsi = np->vsi; 1471 struct i40e_pf *pf = vsi->back; 1472 struct i40e_hw *hw = &pf->hw; 1473 struct sockaddr *addr = p; 1474 struct i40e_mac_filter *f; 1475 1476 if (!is_valid_ether_addr(addr->sa_data)) 1477 return -EADDRNOTAVAIL; 1478 1479 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { 1480 netdev_info(netdev, "already using mac address %pM\n", 1481 addr->sa_data); 1482 return 0; 1483 } 1484 1485 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1486 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1487 return -EADDRNOTAVAIL; 1488 1489 if (ether_addr_equal(hw->mac.addr, addr->sa_data)) 1490 netdev_info(netdev, "returning to hw mac address %pM\n", 1491 hw->mac.addr); 1492 else 1493 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1494 1495 if (vsi->type == I40E_VSI_MAIN) { 1496 i40e_status ret; 1497 1498 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1499 I40E_AQC_WRITE_TYPE_LAA_WOL, 1500 addr->sa_data, NULL); 1501 if (ret) { 1502 netdev_info(netdev, 1503 "Addr change for Main VSI failed: %d\n", 1504 ret); 1505 return -EADDRNOTAVAIL; 1506 } 1507 } 1508 1509 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { 1510 struct i40e_aqc_remove_macvlan_element_data element; 1511 1512 memset(&element, 0, sizeof(element)); 1513 ether_addr_copy(element.mac_addr, netdev->dev_addr); 1514 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1515 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1516 } else { 1517 spin_lock_bh(&vsi->mac_filter_list_lock); 1518 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 1519 false, false); 1520 spin_unlock_bh(&vsi->mac_filter_list_lock); 1521 } 1522 1523 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { 1524 struct i40e_aqc_add_macvlan_element_data element; 1525 1526 memset(&element, 0, sizeof(element)); 1527 ether_addr_copy(element.mac_addr, hw->mac.addr); 1528 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 1529 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1530 } else { 1531 spin_lock_bh(&vsi->mac_filter_list_lock); 1532 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, 1533 false, false); 1534 if (f) 1535 f->is_laa = true; 1536 spin_unlock_bh(&vsi->mac_filter_list_lock); 1537 } 1538 1539 ether_addr_copy(netdev->dev_addr, addr->sa_data); 1540 1541 return i40e_sync_vsi_filters(vsi); 1542 } 1543 1544 /** 1545 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1546 * @vsi: the VSI being setup 1547 * @ctxt: VSI context structure 1548 * @enabled_tc: Enabled TCs bitmap 1549 * @is_add: True if called before Add VSI 1550 * 1551 * Setup VSI queue mapping for enabled traffic classes. 1552 **/ 1553 #ifdef I40E_FCOE 1554 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1555 struct i40e_vsi_context *ctxt, 1556 u8 enabled_tc, 1557 bool is_add) 1558 #else 1559 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1560 struct i40e_vsi_context *ctxt, 1561 u8 enabled_tc, 1562 bool is_add) 1563 #endif 1564 { 1565 struct i40e_pf *pf = vsi->back; 1566 u16 sections = 0; 1567 u8 netdev_tc = 0; 1568 u16 numtc = 0; 1569 u16 qcount; 1570 u8 offset; 1571 u16 qmap; 1572 int i; 1573 u16 num_tc_qps = 0; 1574 1575 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1576 offset = 0; 1577 1578 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1579 /* Find numtc from enabled TC bitmap */ 1580 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1581 if (enabled_tc & BIT(i)) /* TC is enabled */ 1582 numtc++; 1583 } 1584 if (!numtc) { 1585 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1586 numtc = 1; 1587 } 1588 } else { 1589 /* At least TC0 is enabled in case of non-DCB case */ 1590 numtc = 1; 1591 } 1592 1593 vsi->tc_config.numtc = numtc; 1594 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1595 /* Number of queues per enabled TC */ 1596 /* In MFP case we can have a much lower count of MSIx 1597 * vectors available and so we need to lower the used 1598 * q count. 1599 */ 1600 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1601 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); 1602 else 1603 qcount = vsi->alloc_queue_pairs; 1604 num_tc_qps = qcount / numtc; 1605 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); 1606 1607 /* Setup queue offset/count for all TCs for given VSI */ 1608 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1609 /* See if the given TC is enabled for the given VSI */ 1610 if (vsi->tc_config.enabled_tc & BIT(i)) { 1611 /* TC is enabled */ 1612 int pow, num_qps; 1613 1614 switch (vsi->type) { 1615 case I40E_VSI_MAIN: 1616 qcount = min_t(int, pf->alloc_rss_size, 1617 num_tc_qps); 1618 break; 1619 #ifdef I40E_FCOE 1620 case I40E_VSI_FCOE: 1621 qcount = num_tc_qps; 1622 break; 1623 #endif 1624 case I40E_VSI_FDIR: 1625 case I40E_VSI_SRIOV: 1626 case I40E_VSI_VMDQ2: 1627 default: 1628 qcount = num_tc_qps; 1629 WARN_ON(i != 0); 1630 break; 1631 } 1632 vsi->tc_config.tc_info[i].qoffset = offset; 1633 vsi->tc_config.tc_info[i].qcount = qcount; 1634 1635 /* find the next higher power-of-2 of num queue pairs */ 1636 num_qps = qcount; 1637 pow = 0; 1638 while (num_qps && (BIT_ULL(pow) < qcount)) { 1639 pow++; 1640 num_qps >>= 1; 1641 } 1642 1643 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1644 qmap = 1645 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1646 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1647 1648 offset += qcount; 1649 } else { 1650 /* TC is not enabled so set the offset to 1651 * default queue and allocate one queue 1652 * for the given TC. 1653 */ 1654 vsi->tc_config.tc_info[i].qoffset = 0; 1655 vsi->tc_config.tc_info[i].qcount = 1; 1656 vsi->tc_config.tc_info[i].netdev_tc = 0; 1657 1658 qmap = 0; 1659 } 1660 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1661 } 1662 1663 /* Set actual Tx/Rx queue pairs */ 1664 vsi->num_queue_pairs = offset; 1665 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { 1666 if (vsi->req_queue_pairs > 0) 1667 vsi->num_queue_pairs = vsi->req_queue_pairs; 1668 else if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1669 vsi->num_queue_pairs = pf->num_lan_msix; 1670 } 1671 1672 /* Scheduler section valid can only be set for ADD VSI */ 1673 if (is_add) { 1674 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1675 1676 ctxt->info.up_enable_bits = enabled_tc; 1677 } 1678 if (vsi->type == I40E_VSI_SRIOV) { 1679 ctxt->info.mapping_flags |= 1680 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1681 for (i = 0; i < vsi->num_queue_pairs; i++) 1682 ctxt->info.queue_mapping[i] = 1683 cpu_to_le16(vsi->base_queue + i); 1684 } else { 1685 ctxt->info.mapping_flags |= 1686 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1687 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1688 } 1689 ctxt->info.valid_sections |= cpu_to_le16(sections); 1690 } 1691 1692 /** 1693 * i40e_set_rx_mode - NDO callback to set the netdev filters 1694 * @netdev: network interface device structure 1695 **/ 1696 #ifdef I40E_FCOE 1697 void i40e_set_rx_mode(struct net_device *netdev) 1698 #else 1699 static void i40e_set_rx_mode(struct net_device *netdev) 1700 #endif 1701 { 1702 struct i40e_netdev_priv *np = netdev_priv(netdev); 1703 struct i40e_mac_filter *f, *ftmp; 1704 struct i40e_vsi *vsi = np->vsi; 1705 struct netdev_hw_addr *uca; 1706 struct netdev_hw_addr *mca; 1707 struct netdev_hw_addr *ha; 1708 1709 spin_lock_bh(&vsi->mac_filter_list_lock); 1710 1711 /* add addr if not already in the filter list */ 1712 netdev_for_each_uc_addr(uca, netdev) { 1713 if (!i40e_find_mac(vsi, uca->addr, false, true)) { 1714 if (i40e_is_vsi_in_vlan(vsi)) 1715 i40e_put_mac_in_vlan(vsi, uca->addr, 1716 false, true); 1717 else 1718 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, 1719 false, true); 1720 } 1721 } 1722 1723 netdev_for_each_mc_addr(mca, netdev) { 1724 if (!i40e_find_mac(vsi, mca->addr, false, true)) { 1725 if (i40e_is_vsi_in_vlan(vsi)) 1726 i40e_put_mac_in_vlan(vsi, mca->addr, 1727 false, true); 1728 else 1729 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, 1730 false, true); 1731 } 1732 } 1733 1734 /* remove filter if not in netdev list */ 1735 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1736 1737 if (!f->is_netdev) 1738 continue; 1739 1740 netdev_for_each_mc_addr(mca, netdev) 1741 if (ether_addr_equal(mca->addr, f->macaddr)) 1742 goto bottom_of_search_loop; 1743 1744 netdev_for_each_uc_addr(uca, netdev) 1745 if (ether_addr_equal(uca->addr, f->macaddr)) 1746 goto bottom_of_search_loop; 1747 1748 for_each_dev_addr(netdev, ha) 1749 if (ether_addr_equal(ha->addr, f->macaddr)) 1750 goto bottom_of_search_loop; 1751 1752 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */ 1753 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true); 1754 1755 bottom_of_search_loop: 1756 continue; 1757 } 1758 spin_unlock_bh(&vsi->mac_filter_list_lock); 1759 1760 /* check for other flag changes */ 1761 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1762 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1763 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1764 } 1765 } 1766 1767 /** 1768 * i40e_mac_filter_entry_clone - Clones a MAC filter entry 1769 * @src: source MAC filter entry to be clones 1770 * 1771 * Returns the pointer to newly cloned MAC filter entry or NULL 1772 * in case of error 1773 **/ 1774 static struct i40e_mac_filter *i40e_mac_filter_entry_clone( 1775 struct i40e_mac_filter *src) 1776 { 1777 struct i40e_mac_filter *f; 1778 1779 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1780 if (!f) 1781 return NULL; 1782 *f = *src; 1783 1784 INIT_LIST_HEAD(&f->list); 1785 1786 return f; 1787 } 1788 1789 /** 1790 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries 1791 * @vsi: pointer to vsi struct 1792 * @from: Pointer to list which contains MAC filter entries - changes to 1793 * those entries needs to be undone. 1794 * 1795 * MAC filter entries from list were slated to be removed from device. 1796 **/ 1797 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, 1798 struct list_head *from) 1799 { 1800 struct i40e_mac_filter *f, *ftmp; 1801 1802 list_for_each_entry_safe(f, ftmp, from, list) { 1803 f->changed = true; 1804 /* Move the element back into MAC filter list*/ 1805 list_move_tail(&f->list, &vsi->mac_filter_list); 1806 } 1807 } 1808 1809 /** 1810 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries 1811 * @vsi: pointer to vsi struct 1812 * 1813 * MAC filter entries from list were slated to be added from device. 1814 **/ 1815 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi) 1816 { 1817 struct i40e_mac_filter *f, *ftmp; 1818 1819 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1820 if (!f->changed && f->counter) 1821 f->changed = true; 1822 } 1823 } 1824 1825 /** 1826 * i40e_cleanup_add_list - Deletes the element from add list and release 1827 * memory 1828 * @add_list: Pointer to list which contains MAC filter entries 1829 **/ 1830 static void i40e_cleanup_add_list(struct list_head *add_list) 1831 { 1832 struct i40e_mac_filter *f, *ftmp; 1833 1834 list_for_each_entry_safe(f, ftmp, add_list, list) { 1835 list_del(&f->list); 1836 kfree(f); 1837 } 1838 } 1839 1840 /** 1841 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 1842 * @vsi: ptr to the VSI 1843 * 1844 * Push any outstanding VSI filter changes through the AdminQ. 1845 * 1846 * Returns 0 or error value 1847 **/ 1848 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) 1849 { 1850 struct list_head tmp_del_list, tmp_add_list; 1851 struct i40e_mac_filter *f, *ftmp, *fclone; 1852 bool promisc_forced_on = false; 1853 bool add_happened = false; 1854 int filter_list_len = 0; 1855 u32 changed_flags = 0; 1856 i40e_status aq_ret = 0; 1857 bool err_cond = false; 1858 int retval = 0; 1859 struct i40e_pf *pf; 1860 int num_add = 0; 1861 int num_del = 0; 1862 int aq_err = 0; 1863 u16 cmd_flags; 1864 1865 /* empty array typed pointers, kcalloc later */ 1866 struct i40e_aqc_add_macvlan_element_data *add_list; 1867 struct i40e_aqc_remove_macvlan_element_data *del_list; 1868 1869 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) 1870 usleep_range(1000, 2000); 1871 pf = vsi->back; 1872 1873 if (vsi->netdev) { 1874 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 1875 vsi->current_netdev_flags = vsi->netdev->flags; 1876 } 1877 1878 INIT_LIST_HEAD(&tmp_del_list); 1879 INIT_LIST_HEAD(&tmp_add_list); 1880 1881 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1882 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1883 1884 spin_lock_bh(&vsi->mac_filter_list_lock); 1885 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1886 if (!f->changed) 1887 continue; 1888 1889 if (f->counter != 0) 1890 continue; 1891 f->changed = false; 1892 1893 /* Move the element into temporary del_list */ 1894 list_move_tail(&f->list, &tmp_del_list); 1895 } 1896 1897 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1898 if (!f->changed) 1899 continue; 1900 1901 if (f->counter == 0) 1902 continue; 1903 f->changed = false; 1904 1905 /* Clone MAC filter entry and add into temporary list */ 1906 fclone = i40e_mac_filter_entry_clone(f); 1907 if (!fclone) { 1908 err_cond = true; 1909 break; 1910 } 1911 list_add_tail(&fclone->list, &tmp_add_list); 1912 } 1913 1914 /* if failed to clone MAC filter entry - undo */ 1915 if (err_cond) { 1916 i40e_undo_del_filter_entries(vsi, &tmp_del_list); 1917 i40e_undo_add_filter_entries(vsi); 1918 } 1919 spin_unlock_bh(&vsi->mac_filter_list_lock); 1920 1921 if (err_cond) { 1922 i40e_cleanup_add_list(&tmp_add_list); 1923 retval = -ENOMEM; 1924 goto out; 1925 } 1926 } 1927 1928 /* Now process 'del_list' outside the lock */ 1929 if (!list_empty(&tmp_del_list)) { 1930 int del_list_size; 1931 1932 filter_list_len = pf->hw.aq.asq_buf_size / 1933 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1934 del_list_size = filter_list_len * 1935 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1936 del_list = kzalloc(del_list_size, GFP_KERNEL); 1937 if (!del_list) { 1938 i40e_cleanup_add_list(&tmp_add_list); 1939 1940 /* Undo VSI's MAC filter entry element updates */ 1941 spin_lock_bh(&vsi->mac_filter_list_lock); 1942 i40e_undo_del_filter_entries(vsi, &tmp_del_list); 1943 i40e_undo_add_filter_entries(vsi); 1944 spin_unlock_bh(&vsi->mac_filter_list_lock); 1945 retval = -ENOMEM; 1946 goto out; 1947 } 1948 1949 list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { 1950 cmd_flags = 0; 1951 1952 /* add to delete list */ 1953 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); 1954 del_list[num_del].vlan_tag = 1955 cpu_to_le16((u16)(f->vlan == 1956 I40E_VLAN_ANY ? 0 : f->vlan)); 1957 1958 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1959 del_list[num_del].flags = cmd_flags; 1960 num_del++; 1961 1962 /* flush a full buffer */ 1963 if (num_del == filter_list_len) { 1964 aq_ret = i40e_aq_remove_macvlan(&pf->hw, 1965 vsi->seid, 1966 del_list, 1967 num_del, 1968 NULL); 1969 aq_err = pf->hw.aq.asq_last_status; 1970 num_del = 0; 1971 memset(del_list, 0, del_list_size); 1972 1973 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { 1974 retval = -EIO; 1975 dev_err(&pf->pdev->dev, 1976 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", 1977 i40e_stat_str(&pf->hw, aq_ret), 1978 i40e_aq_str(&pf->hw, aq_err)); 1979 } 1980 } 1981 /* Release memory for MAC filter entries which were 1982 * synced up with HW. 1983 */ 1984 list_del(&f->list); 1985 kfree(f); 1986 } 1987 1988 if (num_del) { 1989 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1990 del_list, num_del, 1991 NULL); 1992 aq_err = pf->hw.aq.asq_last_status; 1993 num_del = 0; 1994 1995 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) 1996 dev_info(&pf->pdev->dev, 1997 "ignoring delete macvlan error, err %s aq_err %s\n", 1998 i40e_stat_str(&pf->hw, aq_ret), 1999 i40e_aq_str(&pf->hw, aq_err)); 2000 } 2001 2002 kfree(del_list); 2003 del_list = NULL; 2004 } 2005 2006 if (!list_empty(&tmp_add_list)) { 2007 int add_list_size; 2008 2009 /* do all the adds now */ 2010 filter_list_len = pf->hw.aq.asq_buf_size / 2011 sizeof(struct i40e_aqc_add_macvlan_element_data), 2012 add_list_size = filter_list_len * 2013 sizeof(struct i40e_aqc_add_macvlan_element_data); 2014 add_list = kzalloc(add_list_size, GFP_KERNEL); 2015 if (!add_list) { 2016 /* Purge element from temporary lists */ 2017 i40e_cleanup_add_list(&tmp_add_list); 2018 2019 /* Undo add filter entries from VSI MAC filter list */ 2020 spin_lock_bh(&vsi->mac_filter_list_lock); 2021 i40e_undo_add_filter_entries(vsi); 2022 spin_unlock_bh(&vsi->mac_filter_list_lock); 2023 retval = -ENOMEM; 2024 goto out; 2025 } 2026 2027 list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { 2028 2029 add_happened = true; 2030 cmd_flags = 0; 2031 2032 /* add to add array */ 2033 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); 2034 add_list[num_add].vlan_tag = 2035 cpu_to_le16( 2036 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 2037 add_list[num_add].queue_number = 0; 2038 2039 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 2040 add_list[num_add].flags = cpu_to_le16(cmd_flags); 2041 num_add++; 2042 2043 /* flush a full buffer */ 2044 if (num_add == filter_list_len) { 2045 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 2046 add_list, num_add, 2047 NULL); 2048 aq_err = pf->hw.aq.asq_last_status; 2049 num_add = 0; 2050 2051 if (aq_ret) 2052 break; 2053 memset(add_list, 0, add_list_size); 2054 } 2055 /* Entries from tmp_add_list were cloned from MAC 2056 * filter list, hence clean those cloned entries 2057 */ 2058 list_del(&f->list); 2059 kfree(f); 2060 } 2061 2062 if (num_add) { 2063 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 2064 add_list, num_add, NULL); 2065 aq_err = pf->hw.aq.asq_last_status; 2066 num_add = 0; 2067 } 2068 kfree(add_list); 2069 add_list = NULL; 2070 2071 if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { 2072 retval = i40e_aq_rc_to_posix(aq_ret, aq_err); 2073 dev_info(&pf->pdev->dev, 2074 "add filter failed, err %s aq_err %s\n", 2075 i40e_stat_str(&pf->hw, aq_ret), 2076 i40e_aq_str(&pf->hw, aq_err)); 2077 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 2078 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2079 &vsi->state)) { 2080 promisc_forced_on = true; 2081 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2082 &vsi->state); 2083 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 2084 } 2085 } 2086 } 2087 2088 /* check for changes in promiscuous modes */ 2089 if (changed_flags & IFF_ALLMULTI) { 2090 bool cur_multipromisc; 2091 2092 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 2093 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 2094 vsi->seid, 2095 cur_multipromisc, 2096 NULL); 2097 if (aq_ret) { 2098 retval = i40e_aq_rc_to_posix(aq_ret, 2099 pf->hw.aq.asq_last_status); 2100 dev_info(&pf->pdev->dev, 2101 "set multi promisc failed, err %s aq_err %s\n", 2102 i40e_stat_str(&pf->hw, aq_ret), 2103 i40e_aq_str(&pf->hw, 2104 pf->hw.aq.asq_last_status)); 2105 } 2106 } 2107 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 2108 bool cur_promisc; 2109 2110 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 2111 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2112 &vsi->state)); 2113 if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) { 2114 /* set defport ON for Main VSI instead of true promisc 2115 * this way we will get all unicast/multicast and VLAN 2116 * promisc behavior but will not get VF or VMDq traffic 2117 * replicated on the Main VSI. 2118 */ 2119 if (pf->cur_promisc != cur_promisc) { 2120 pf->cur_promisc = cur_promisc; 2121 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 2122 } 2123 } else { 2124 aq_ret = i40e_aq_set_vsi_unicast_promiscuous( 2125 &vsi->back->hw, 2126 vsi->seid, 2127 cur_promisc, NULL); 2128 if (aq_ret) { 2129 retval = 2130 i40e_aq_rc_to_posix(aq_ret, 2131 pf->hw.aq.asq_last_status); 2132 dev_info(&pf->pdev->dev, 2133 "set unicast promisc failed, err %d, aq_err %d\n", 2134 aq_ret, pf->hw.aq.asq_last_status); 2135 } 2136 aq_ret = i40e_aq_set_vsi_multicast_promiscuous( 2137 &vsi->back->hw, 2138 vsi->seid, 2139 cur_promisc, NULL); 2140 if (aq_ret) { 2141 retval = 2142 i40e_aq_rc_to_posix(aq_ret, 2143 pf->hw.aq.asq_last_status); 2144 dev_info(&pf->pdev->dev, 2145 "set multicast promisc failed, err %d, aq_err %d\n", 2146 aq_ret, pf->hw.aq.asq_last_status); 2147 } 2148 } 2149 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 2150 vsi->seid, 2151 cur_promisc, NULL); 2152 if (aq_ret) { 2153 retval = i40e_aq_rc_to_posix(aq_ret, 2154 pf->hw.aq.asq_last_status); 2155 dev_info(&pf->pdev->dev, 2156 "set brdcast promisc failed, err %s, aq_err %s\n", 2157 i40e_stat_str(&pf->hw, aq_ret), 2158 i40e_aq_str(&pf->hw, 2159 pf->hw.aq.asq_last_status)); 2160 } 2161 } 2162 out: 2163 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 2164 return retval; 2165 } 2166 2167 /** 2168 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 2169 * @pf: board private structure 2170 **/ 2171 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 2172 { 2173 int v; 2174 2175 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 2176 return; 2177 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 2178 2179 for (v = 0; v < pf->num_alloc_vsi; v++) { 2180 if (pf->vsi[v] && 2181 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { 2182 int ret = i40e_sync_vsi_filters(pf->vsi[v]); 2183 2184 if (ret) { 2185 /* come back and try again later */ 2186 pf->flags |= I40E_FLAG_FILTER_SYNC; 2187 break; 2188 } 2189 } 2190 } 2191 } 2192 2193 /** 2194 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 2195 * @netdev: network interface device structure 2196 * @new_mtu: new value for maximum frame size 2197 * 2198 * Returns 0 on success, negative on failure 2199 **/ 2200 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 2201 { 2202 struct i40e_netdev_priv *np = netdev_priv(netdev); 2203 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 2204 struct i40e_vsi *vsi = np->vsi; 2205 2206 /* MTU < 68 is an error and causes problems on some kernels */ 2207 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) 2208 return -EINVAL; 2209 2210 netdev_info(netdev, "changing MTU from %d to %d\n", 2211 netdev->mtu, new_mtu); 2212 netdev->mtu = new_mtu; 2213 if (netif_running(netdev)) 2214 i40e_vsi_reinit_locked(vsi); 2215 2216 return 0; 2217 } 2218 2219 /** 2220 * i40e_ioctl - Access the hwtstamp interface 2221 * @netdev: network interface device structure 2222 * @ifr: interface request data 2223 * @cmd: ioctl command 2224 **/ 2225 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2226 { 2227 struct i40e_netdev_priv *np = netdev_priv(netdev); 2228 struct i40e_pf *pf = np->vsi->back; 2229 2230 switch (cmd) { 2231 case SIOCGHWTSTAMP: 2232 return i40e_ptp_get_ts_config(pf, ifr); 2233 case SIOCSHWTSTAMP: 2234 return i40e_ptp_set_ts_config(pf, ifr); 2235 default: 2236 return -EOPNOTSUPP; 2237 } 2238 } 2239 2240 /** 2241 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 2242 * @vsi: the vsi being adjusted 2243 **/ 2244 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 2245 { 2246 struct i40e_vsi_context ctxt; 2247 i40e_status ret; 2248 2249 if ((vsi->info.valid_sections & 2250 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2251 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 2252 return; /* already enabled */ 2253 2254 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2255 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2256 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 2257 2258 ctxt.seid = vsi->seid; 2259 ctxt.info = vsi->info; 2260 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2261 if (ret) { 2262 dev_info(&vsi->back->pdev->dev, 2263 "update vlan stripping failed, err %s aq_err %s\n", 2264 i40e_stat_str(&vsi->back->hw, ret), 2265 i40e_aq_str(&vsi->back->hw, 2266 vsi->back->hw.aq.asq_last_status)); 2267 } 2268 } 2269 2270 /** 2271 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 2272 * @vsi: the vsi being adjusted 2273 **/ 2274 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 2275 { 2276 struct i40e_vsi_context ctxt; 2277 i40e_status ret; 2278 2279 if ((vsi->info.valid_sections & 2280 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2281 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 2282 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 2283 return; /* already disabled */ 2284 2285 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2286 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2287 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 2288 2289 ctxt.seid = vsi->seid; 2290 ctxt.info = vsi->info; 2291 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2292 if (ret) { 2293 dev_info(&vsi->back->pdev->dev, 2294 "update vlan stripping failed, err %s aq_err %s\n", 2295 i40e_stat_str(&vsi->back->hw, ret), 2296 i40e_aq_str(&vsi->back->hw, 2297 vsi->back->hw.aq.asq_last_status)); 2298 } 2299 } 2300 2301 /** 2302 * i40e_vlan_rx_register - Setup or shutdown vlan offload 2303 * @netdev: network interface to be adjusted 2304 * @features: netdev features to test if VLAN offload is enabled or not 2305 **/ 2306 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 2307 { 2308 struct i40e_netdev_priv *np = netdev_priv(netdev); 2309 struct i40e_vsi *vsi = np->vsi; 2310 2311 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2312 i40e_vlan_stripping_enable(vsi); 2313 else 2314 i40e_vlan_stripping_disable(vsi); 2315 } 2316 2317 /** 2318 * i40e_vsi_add_vlan - Add vsi membership for given vlan 2319 * @vsi: the vsi being configured 2320 * @vid: vlan id to be added (0 = untagged only , -1 = any) 2321 **/ 2322 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) 2323 { 2324 struct i40e_mac_filter *f, *add_f; 2325 bool is_netdev, is_vf; 2326 2327 is_vf = (vsi->type == I40E_VSI_SRIOV); 2328 is_netdev = !!(vsi->netdev); 2329 2330 /* Locked once because all functions invoked below iterates list*/ 2331 spin_lock_bh(&vsi->mac_filter_list_lock); 2332 2333 if (is_netdev) { 2334 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, 2335 is_vf, is_netdev); 2336 if (!add_f) { 2337 dev_info(&vsi->back->pdev->dev, 2338 "Could not add vlan filter %d for %pM\n", 2339 vid, vsi->netdev->dev_addr); 2340 spin_unlock_bh(&vsi->mac_filter_list_lock); 2341 return -ENOMEM; 2342 } 2343 } 2344 2345 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2346 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2347 if (!add_f) { 2348 dev_info(&vsi->back->pdev->dev, 2349 "Could not add vlan filter %d for %pM\n", 2350 vid, f->macaddr); 2351 spin_unlock_bh(&vsi->mac_filter_list_lock); 2352 return -ENOMEM; 2353 } 2354 } 2355 2356 /* Now if we add a vlan tag, make sure to check if it is the first 2357 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" 2358 * with 0, so we now accept untagged and specified tagged traffic 2359 * (and not any taged and untagged) 2360 */ 2361 if (vid > 0) { 2362 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, 2363 I40E_VLAN_ANY, 2364 is_vf, is_netdev)) { 2365 i40e_del_filter(vsi, vsi->netdev->dev_addr, 2366 I40E_VLAN_ANY, is_vf, is_netdev); 2367 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, 2368 is_vf, is_netdev); 2369 if (!add_f) { 2370 dev_info(&vsi->back->pdev->dev, 2371 "Could not add filter 0 for %pM\n", 2372 vsi->netdev->dev_addr); 2373 spin_unlock_bh(&vsi->mac_filter_list_lock); 2374 return -ENOMEM; 2375 } 2376 } 2377 } 2378 2379 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ 2380 if (vid > 0 && !vsi->info.pvid) { 2381 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2382 if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2383 is_vf, is_netdev)) 2384 continue; 2385 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2386 is_vf, is_netdev); 2387 add_f = i40e_add_filter(vsi, f->macaddr, 2388 0, is_vf, is_netdev); 2389 if (!add_f) { 2390 dev_info(&vsi->back->pdev->dev, 2391 "Could not add filter 0 for %pM\n", 2392 f->macaddr); 2393 spin_unlock_bh(&vsi->mac_filter_list_lock); 2394 return -ENOMEM; 2395 } 2396 } 2397 } 2398 2399 spin_unlock_bh(&vsi->mac_filter_list_lock); 2400 2401 /* schedule our worker thread which will take care of 2402 * applying the new filter changes 2403 */ 2404 i40e_service_event_schedule(vsi->back); 2405 return 0; 2406 } 2407 2408 /** 2409 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 2410 * @vsi: the vsi being configured 2411 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 2412 * 2413 * Return: 0 on success or negative otherwise 2414 **/ 2415 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 2416 { 2417 struct net_device *netdev = vsi->netdev; 2418 struct i40e_mac_filter *f, *add_f; 2419 bool is_vf, is_netdev; 2420 int filter_count = 0; 2421 2422 is_vf = (vsi->type == I40E_VSI_SRIOV); 2423 is_netdev = !!(netdev); 2424 2425 /* Locked once because all functions invoked below iterates list */ 2426 spin_lock_bh(&vsi->mac_filter_list_lock); 2427 2428 if (is_netdev) 2429 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); 2430 2431 list_for_each_entry(f, &vsi->mac_filter_list, list) 2432 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2433 2434 /* go through all the filters for this VSI and if there is only 2435 * vid == 0 it means there are no other filters, so vid 0 must 2436 * be replaced with -1. This signifies that we should from now 2437 * on accept any traffic (with any tag present, or untagged) 2438 */ 2439 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2440 if (is_netdev) { 2441 if (f->vlan && 2442 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2443 filter_count++; 2444 } 2445 2446 if (f->vlan) 2447 filter_count++; 2448 } 2449 2450 if (!filter_count && is_netdev) { 2451 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); 2452 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 2453 is_vf, is_netdev); 2454 if (!f) { 2455 dev_info(&vsi->back->pdev->dev, 2456 "Could not add filter %d for %pM\n", 2457 I40E_VLAN_ANY, netdev->dev_addr); 2458 spin_unlock_bh(&vsi->mac_filter_list_lock); 2459 return -ENOMEM; 2460 } 2461 } 2462 2463 if (!filter_count) { 2464 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2465 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); 2466 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2467 is_vf, is_netdev); 2468 if (!add_f) { 2469 dev_info(&vsi->back->pdev->dev, 2470 "Could not add filter %d for %pM\n", 2471 I40E_VLAN_ANY, f->macaddr); 2472 spin_unlock_bh(&vsi->mac_filter_list_lock); 2473 return -ENOMEM; 2474 } 2475 } 2476 } 2477 2478 spin_unlock_bh(&vsi->mac_filter_list_lock); 2479 2480 /* schedule our worker thread which will take care of 2481 * applying the new filter changes 2482 */ 2483 i40e_service_event_schedule(vsi->back); 2484 return 0; 2485 } 2486 2487 /** 2488 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 2489 * @netdev: network interface to be adjusted 2490 * @vid: vlan id to be added 2491 * 2492 * net_device_ops implementation for adding vlan ids 2493 **/ 2494 #ifdef I40E_FCOE 2495 int i40e_vlan_rx_add_vid(struct net_device *netdev, 2496 __always_unused __be16 proto, u16 vid) 2497 #else 2498 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 2499 __always_unused __be16 proto, u16 vid) 2500 #endif 2501 { 2502 struct i40e_netdev_priv *np = netdev_priv(netdev); 2503 struct i40e_vsi *vsi = np->vsi; 2504 int ret = 0; 2505 2506 if (vid > 4095) 2507 return -EINVAL; 2508 2509 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 2510 2511 /* If the network stack called us with vid = 0 then 2512 * it is asking to receive priority tagged packets with 2513 * vlan id 0. Our HW receives them by default when configured 2514 * to receive untagged packets so there is no need to add an 2515 * extra filter for vlan 0 tagged packets. 2516 */ 2517 if (vid) 2518 ret = i40e_vsi_add_vlan(vsi, vid); 2519 2520 if (!ret && (vid < VLAN_N_VID)) 2521 set_bit(vid, vsi->active_vlans); 2522 2523 return ret; 2524 } 2525 2526 /** 2527 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 2528 * @netdev: network interface to be adjusted 2529 * @vid: vlan id to be removed 2530 * 2531 * net_device_ops implementation for removing vlan ids 2532 **/ 2533 #ifdef I40E_FCOE 2534 int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2535 __always_unused __be16 proto, u16 vid) 2536 #else 2537 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2538 __always_unused __be16 proto, u16 vid) 2539 #endif 2540 { 2541 struct i40e_netdev_priv *np = netdev_priv(netdev); 2542 struct i40e_vsi *vsi = np->vsi; 2543 2544 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); 2545 2546 /* return code is ignored as there is nothing a user 2547 * can do about failure to remove and a log message was 2548 * already printed from the other function 2549 */ 2550 i40e_vsi_kill_vlan(vsi, vid); 2551 2552 clear_bit(vid, vsi->active_vlans); 2553 2554 return 0; 2555 } 2556 2557 /** 2558 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 2559 * @vsi: the vsi being brought back up 2560 **/ 2561 static void i40e_restore_vlan(struct i40e_vsi *vsi) 2562 { 2563 u16 vid; 2564 2565 if (!vsi->netdev) 2566 return; 2567 2568 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2569 2570 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 2571 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 2572 vid); 2573 } 2574 2575 /** 2576 * i40e_vsi_add_pvid - Add pvid for the VSI 2577 * @vsi: the vsi being adjusted 2578 * @vid: the vlan id to set as a PVID 2579 **/ 2580 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2581 { 2582 struct i40e_vsi_context ctxt; 2583 i40e_status ret; 2584 2585 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2586 vsi->info.pvid = cpu_to_le16(vid); 2587 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | 2588 I40E_AQ_VSI_PVLAN_INSERT_PVID | 2589 I40E_AQ_VSI_PVLAN_EMOD_STR; 2590 2591 ctxt.seid = vsi->seid; 2592 ctxt.info = vsi->info; 2593 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2594 if (ret) { 2595 dev_info(&vsi->back->pdev->dev, 2596 "add pvid failed, err %s aq_err %s\n", 2597 i40e_stat_str(&vsi->back->hw, ret), 2598 i40e_aq_str(&vsi->back->hw, 2599 vsi->back->hw.aq.asq_last_status)); 2600 return -ENOENT; 2601 } 2602 2603 return 0; 2604 } 2605 2606 /** 2607 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2608 * @vsi: the vsi being adjusted 2609 * 2610 * Just use the vlan_rx_register() service to put it back to normal 2611 **/ 2612 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2613 { 2614 i40e_vlan_stripping_disable(vsi); 2615 2616 vsi->info.pvid = 0; 2617 } 2618 2619 /** 2620 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2621 * @vsi: ptr to the VSI 2622 * 2623 * If this function returns with an error, then it's possible one or 2624 * more of the rings is populated (while the rest are not). It is the 2625 * callers duty to clean those orphaned rings. 2626 * 2627 * Return 0 on success, negative on failure 2628 **/ 2629 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2630 { 2631 int i, err = 0; 2632 2633 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2634 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2635 2636 return err; 2637 } 2638 2639 /** 2640 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2641 * @vsi: ptr to the VSI 2642 * 2643 * Free VSI's transmit software resources 2644 **/ 2645 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2646 { 2647 int i; 2648 2649 if (!vsi->tx_rings) 2650 return; 2651 2652 for (i = 0; i < vsi->num_queue_pairs; i++) 2653 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2654 i40e_free_tx_resources(vsi->tx_rings[i]); 2655 } 2656 2657 /** 2658 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 2659 * @vsi: ptr to the VSI 2660 * 2661 * If this function returns with an error, then it's possible one or 2662 * more of the rings is populated (while the rest are not). It is the 2663 * callers duty to clean those orphaned rings. 2664 * 2665 * Return 0 on success, negative on failure 2666 **/ 2667 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 2668 { 2669 int i, err = 0; 2670 2671 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2672 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2673 #ifdef I40E_FCOE 2674 i40e_fcoe_setup_ddp_resources(vsi); 2675 #endif 2676 return err; 2677 } 2678 2679 /** 2680 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 2681 * @vsi: ptr to the VSI 2682 * 2683 * Free all receive software resources 2684 **/ 2685 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 2686 { 2687 int i; 2688 2689 if (!vsi->rx_rings) 2690 return; 2691 2692 for (i = 0; i < vsi->num_queue_pairs; i++) 2693 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2694 i40e_free_rx_resources(vsi->rx_rings[i]); 2695 #ifdef I40E_FCOE 2696 i40e_fcoe_free_ddp_resources(vsi); 2697 #endif 2698 } 2699 2700 /** 2701 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring 2702 * @ring: The Tx ring to configure 2703 * 2704 * This enables/disables XPS for a given Tx descriptor ring 2705 * based on the TCs enabled for the VSI that ring belongs to. 2706 **/ 2707 static void i40e_config_xps_tx_ring(struct i40e_ring *ring) 2708 { 2709 struct i40e_vsi *vsi = ring->vsi; 2710 cpumask_var_t mask; 2711 2712 if (!ring->q_vector || !ring->netdev) 2713 return; 2714 2715 /* Single TC mode enable XPS */ 2716 if (vsi->tc_config.numtc <= 1) { 2717 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) 2718 netif_set_xps_queue(ring->netdev, 2719 &ring->q_vector->affinity_mask, 2720 ring->queue_index); 2721 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { 2722 /* Disable XPS to allow selection based on TC */ 2723 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); 2724 netif_set_xps_queue(ring->netdev, mask, ring->queue_index); 2725 free_cpumask_var(mask); 2726 } 2727 2728 /* schedule our worker thread which will take care of 2729 * applying the new filter changes 2730 */ 2731 i40e_service_event_schedule(vsi->back); 2732 } 2733 2734 /** 2735 * i40e_configure_tx_ring - Configure a transmit ring context and rest 2736 * @ring: The Tx ring to configure 2737 * 2738 * Configure the Tx descriptor ring in the HMC context. 2739 **/ 2740 static int i40e_configure_tx_ring(struct i40e_ring *ring) 2741 { 2742 struct i40e_vsi *vsi = ring->vsi; 2743 u16 pf_q = vsi->base_queue + ring->queue_index; 2744 struct i40e_hw *hw = &vsi->back->hw; 2745 struct i40e_hmc_obj_txq tx_ctx; 2746 i40e_status err = 0; 2747 u32 qtx_ctl = 0; 2748 2749 /* some ATR related tx ring init */ 2750 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { 2751 ring->atr_sample_rate = vsi->back->atr_sample_rate; 2752 ring->atr_count = 0; 2753 } else { 2754 ring->atr_sample_rate = 0; 2755 } 2756 2757 /* configure XPS */ 2758 i40e_config_xps_tx_ring(ring); 2759 2760 /* clear the context structure first */ 2761 memset(&tx_ctx, 0, sizeof(tx_ctx)); 2762 2763 tx_ctx.new_context = 1; 2764 tx_ctx.base = (ring->dma / 128); 2765 tx_ctx.qlen = ring->count; 2766 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2767 I40E_FLAG_FD_ATR_ENABLED)); 2768 #ifdef I40E_FCOE 2769 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2770 #endif 2771 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2772 /* FDIR VSI tx ring can still use RS bit and writebacks */ 2773 if (vsi->type != I40E_VSI_FDIR) 2774 tx_ctx.head_wb_ena = 1; 2775 tx_ctx.head_wb_addr = ring->dma + 2776 (ring->count * sizeof(struct i40e_tx_desc)); 2777 2778 /* As part of VSI creation/update, FW allocates certain 2779 * Tx arbitration queue sets for each TC enabled for 2780 * the VSI. The FW returns the handles to these queue 2781 * sets as part of the response buffer to Add VSI, 2782 * Update VSI, etc. AQ commands. It is expected that 2783 * these queue set handles be associated with the Tx 2784 * queues by the driver as part of the TX queue context 2785 * initialization. This has to be done regardless of 2786 * DCB as by default everything is mapped to TC0. 2787 */ 2788 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 2789 tx_ctx.rdylist_act = 0; 2790 2791 /* clear the context in the HMC */ 2792 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 2793 if (err) { 2794 dev_info(&vsi->back->pdev->dev, 2795 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 2796 ring->queue_index, pf_q, err); 2797 return -ENOMEM; 2798 } 2799 2800 /* set the context in the HMC */ 2801 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 2802 if (err) { 2803 dev_info(&vsi->back->pdev->dev, 2804 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 2805 ring->queue_index, pf_q, err); 2806 return -ENOMEM; 2807 } 2808 2809 /* Now associate this queue with this PCI function */ 2810 if (vsi->type == I40E_VSI_VMDQ2) { 2811 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 2812 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & 2813 I40E_QTX_CTL_VFVM_INDX_MASK; 2814 } else { 2815 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2816 } 2817 2818 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2819 I40E_QTX_CTL_PF_INDX_MASK); 2820 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2821 i40e_flush(hw); 2822 2823 /* cache tail off for easier writes later */ 2824 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 2825 2826 return 0; 2827 } 2828 2829 /** 2830 * i40e_configure_rx_ring - Configure a receive ring context 2831 * @ring: The Rx ring to configure 2832 * 2833 * Configure the Rx descriptor ring in the HMC context. 2834 **/ 2835 static int i40e_configure_rx_ring(struct i40e_ring *ring) 2836 { 2837 struct i40e_vsi *vsi = ring->vsi; 2838 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 2839 u16 pf_q = vsi->base_queue + ring->queue_index; 2840 struct i40e_hw *hw = &vsi->back->hw; 2841 struct i40e_hmc_obj_rxq rx_ctx; 2842 i40e_status err = 0; 2843 2844 ring->state = 0; 2845 2846 /* clear the context structure first */ 2847 memset(&rx_ctx, 0, sizeof(rx_ctx)); 2848 2849 ring->rx_buf_len = vsi->rx_buf_len; 2850 ring->rx_hdr_len = vsi->rx_hdr_len; 2851 2852 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; 2853 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; 2854 2855 rx_ctx.base = (ring->dma / 128); 2856 rx_ctx.qlen = ring->count; 2857 2858 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { 2859 set_ring_16byte_desc_enabled(ring); 2860 rx_ctx.dsize = 0; 2861 } else { 2862 rx_ctx.dsize = 1; 2863 } 2864 2865 rx_ctx.dtype = vsi->dtype; 2866 if (vsi->dtype) { 2867 set_ring_ps_enabled(ring); 2868 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 2869 I40E_RX_SPLIT_IP | 2870 I40E_RX_SPLIT_TCP_UDP | 2871 I40E_RX_SPLIT_SCTP; 2872 } else { 2873 rx_ctx.hsplit_0 = 0; 2874 } 2875 2876 rx_ctx.rxmax = min_t(u16, vsi->max_frame, 2877 (chain_len * ring->rx_buf_len)); 2878 if (hw->revision_id == 0) 2879 rx_ctx.lrxqthresh = 0; 2880 else 2881 rx_ctx.lrxqthresh = 2; 2882 rx_ctx.crcstrip = 1; 2883 rx_ctx.l2tsel = 1; 2884 /* this controls whether VLAN is stripped from inner headers */ 2885 rx_ctx.showiv = 0; 2886 #ifdef I40E_FCOE 2887 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2888 #endif 2889 /* set the prefena field to 1 because the manual says to */ 2890 rx_ctx.prefena = 1; 2891 2892 /* clear the context in the HMC */ 2893 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2894 if (err) { 2895 dev_info(&vsi->back->pdev->dev, 2896 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2897 ring->queue_index, pf_q, err); 2898 return -ENOMEM; 2899 } 2900 2901 /* set the context in the HMC */ 2902 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 2903 if (err) { 2904 dev_info(&vsi->back->pdev->dev, 2905 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2906 ring->queue_index, pf_q, err); 2907 return -ENOMEM; 2908 } 2909 2910 /* cache tail for quicker writes, and clear the reg before use */ 2911 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 2912 writel(0, ring->tail); 2913 2914 if (ring_is_ps_enabled(ring)) { 2915 i40e_alloc_rx_headers(ring); 2916 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); 2917 } else { 2918 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); 2919 } 2920 2921 return 0; 2922 } 2923 2924 /** 2925 * i40e_vsi_configure_tx - Configure the VSI for Tx 2926 * @vsi: VSI structure describing this set of rings and resources 2927 * 2928 * Configure the Tx VSI for operation. 2929 **/ 2930 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 2931 { 2932 int err = 0; 2933 u16 i; 2934 2935 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 2936 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 2937 2938 return err; 2939 } 2940 2941 /** 2942 * i40e_vsi_configure_rx - Configure the VSI for Rx 2943 * @vsi: the VSI being configured 2944 * 2945 * Configure the Rx VSI for operation. 2946 **/ 2947 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 2948 { 2949 int err = 0; 2950 u16 i; 2951 2952 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) 2953 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN 2954 + ETH_FCS_LEN + VLAN_HLEN; 2955 else 2956 vsi->max_frame = I40E_RXBUFFER_2048; 2957 2958 /* figure out correct receive buffer length */ 2959 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | 2960 I40E_FLAG_RX_PS_ENABLED)) { 2961 case I40E_FLAG_RX_1BUF_ENABLED: 2962 vsi->rx_hdr_len = 0; 2963 vsi->rx_buf_len = vsi->max_frame; 2964 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2965 break; 2966 case I40E_FLAG_RX_PS_ENABLED: 2967 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2968 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2969 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; 2970 break; 2971 default: 2972 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2973 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2974 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; 2975 break; 2976 } 2977 2978 #ifdef I40E_FCOE 2979 /* setup rx buffer for FCoE */ 2980 if ((vsi->type == I40E_VSI_FCOE) && 2981 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { 2982 vsi->rx_hdr_len = 0; 2983 vsi->rx_buf_len = I40E_RXBUFFER_3072; 2984 vsi->max_frame = I40E_RXBUFFER_3072; 2985 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2986 } 2987 2988 #endif /* I40E_FCOE */ 2989 /* round up for the chip's needs */ 2990 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, 2991 BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT)); 2992 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, 2993 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); 2994 2995 /* set up individual rings */ 2996 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2997 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 2998 2999 return err; 3000 } 3001 3002 /** 3003 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 3004 * @vsi: ptr to the VSI 3005 **/ 3006 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 3007 { 3008 struct i40e_ring *tx_ring, *rx_ring; 3009 u16 qoffset, qcount; 3010 int i, n; 3011 3012 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 3013 /* Reset the TC information */ 3014 for (i = 0; i < vsi->num_queue_pairs; i++) { 3015 rx_ring = vsi->rx_rings[i]; 3016 tx_ring = vsi->tx_rings[i]; 3017 rx_ring->dcb_tc = 0; 3018 tx_ring->dcb_tc = 0; 3019 } 3020 } 3021 3022 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 3023 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) 3024 continue; 3025 3026 qoffset = vsi->tc_config.tc_info[n].qoffset; 3027 qcount = vsi->tc_config.tc_info[n].qcount; 3028 for (i = qoffset; i < (qoffset + qcount); i++) { 3029 rx_ring = vsi->rx_rings[i]; 3030 tx_ring = vsi->tx_rings[i]; 3031 rx_ring->dcb_tc = n; 3032 tx_ring->dcb_tc = n; 3033 } 3034 } 3035 } 3036 3037 /** 3038 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 3039 * @vsi: ptr to the VSI 3040 **/ 3041 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 3042 { 3043 if (vsi->netdev) 3044 i40e_set_rx_mode(vsi->netdev); 3045 } 3046 3047 /** 3048 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters 3049 * @vsi: Pointer to the targeted VSI 3050 * 3051 * This function replays the hlist on the hw where all the SB Flow Director 3052 * filters were saved. 3053 **/ 3054 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) 3055 { 3056 struct i40e_fdir_filter *filter; 3057 struct i40e_pf *pf = vsi->back; 3058 struct hlist_node *node; 3059 3060 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 3061 return; 3062 3063 hlist_for_each_entry_safe(filter, node, 3064 &pf->fdir_filter_list, fdir_node) { 3065 i40e_add_del_fdir(vsi, filter, true); 3066 } 3067 } 3068 3069 /** 3070 * i40e_vsi_configure - Set up the VSI for action 3071 * @vsi: the VSI being configured 3072 **/ 3073 static int i40e_vsi_configure(struct i40e_vsi *vsi) 3074 { 3075 int err; 3076 3077 i40e_set_vsi_rx_mode(vsi); 3078 i40e_restore_vlan(vsi); 3079 i40e_vsi_config_dcb_rings(vsi); 3080 err = i40e_vsi_configure_tx(vsi); 3081 if (!err) 3082 err = i40e_vsi_configure_rx(vsi); 3083 3084 return err; 3085 } 3086 3087 /** 3088 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 3089 * @vsi: the VSI being configured 3090 **/ 3091 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 3092 { 3093 struct i40e_pf *pf = vsi->back; 3094 struct i40e_hw *hw = &pf->hw; 3095 u16 vector; 3096 int i, q; 3097 u32 qp; 3098 3099 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 3100 * and PFINT_LNKLSTn registers, e.g.: 3101 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 3102 */ 3103 qp = vsi->base_queue; 3104 vector = vsi->base_vector; 3105 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 3106 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; 3107 3108 q_vector->itr_countdown = ITR_COUNTDOWN_START; 3109 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 3110 q_vector->rx.latency_range = I40E_LOW_LATENCY; 3111 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 3112 q_vector->rx.itr); 3113 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 3114 q_vector->tx.latency_range = I40E_LOW_LATENCY; 3115 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 3116 q_vector->tx.itr); 3117 wr32(hw, I40E_PFINT_RATEN(vector - 1), 3118 INTRL_USEC_TO_REG(vsi->int_rate_limit)); 3119 3120 /* Linked list for the queuepairs assigned to this vector */ 3121 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 3122 for (q = 0; q < q_vector->num_ringpairs; q++) { 3123 u32 val; 3124 3125 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3126 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3127 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 3128 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 3129 (I40E_QUEUE_TYPE_TX 3130 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 3131 3132 wr32(hw, I40E_QINT_RQCTL(qp), val); 3133 3134 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3135 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3136 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 3137 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| 3138 (I40E_QUEUE_TYPE_RX 3139 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3140 3141 /* Terminate the linked list */ 3142 if (q == (q_vector->num_ringpairs - 1)) 3143 val |= (I40E_QUEUE_END_OF_LIST 3144 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3145 3146 wr32(hw, I40E_QINT_TQCTL(qp), val); 3147 qp++; 3148 } 3149 } 3150 3151 i40e_flush(hw); 3152 } 3153 3154 /** 3155 * i40e_enable_misc_int_causes - enable the non-queue interrupts 3156 * @hw: ptr to the hardware info 3157 **/ 3158 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) 3159 { 3160 struct i40e_hw *hw = &pf->hw; 3161 u32 val; 3162 3163 /* clear things first */ 3164 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 3165 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 3166 3167 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 3168 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 3169 I40E_PFINT_ICR0_ENA_GRST_MASK | 3170 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 3171 I40E_PFINT_ICR0_ENA_GPIO_MASK | 3172 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 3173 I40E_PFINT_ICR0_ENA_VFLR_MASK | 3174 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3175 3176 if (pf->flags & I40E_FLAG_IWARP_ENABLED) 3177 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3178 3179 if (pf->flags & I40E_FLAG_PTP) 3180 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3181 3182 wr32(hw, I40E_PFINT_ICR0_ENA, val); 3183 3184 /* SW_ITR_IDX = 0, but don't change INTENA */ 3185 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 3186 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 3187 3188 /* OTHER_ITR_IDX = 0 */ 3189 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 3190 } 3191 3192 /** 3193 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 3194 * @vsi: the VSI being configured 3195 **/ 3196 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 3197 { 3198 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 3199 struct i40e_pf *pf = vsi->back; 3200 struct i40e_hw *hw = &pf->hw; 3201 u32 val; 3202 3203 /* set the ITR configuration */ 3204 q_vector->itr_countdown = ITR_COUNTDOWN_START; 3205 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 3206 q_vector->rx.latency_range = I40E_LOW_LATENCY; 3207 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); 3208 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 3209 q_vector->tx.latency_range = I40E_LOW_LATENCY; 3210 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 3211 3212 i40e_enable_misc_int_causes(pf); 3213 3214 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 3215 wr32(hw, I40E_PFINT_LNKLST0, 0); 3216 3217 /* Associate the queue pair to the vector and enable the queue int */ 3218 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3219 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3220 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3221 3222 wr32(hw, I40E_QINT_RQCTL(0), val); 3223 3224 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3225 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3226 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3227 3228 wr32(hw, I40E_QINT_TQCTL(0), val); 3229 i40e_flush(hw); 3230 } 3231 3232 /** 3233 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 3234 * @pf: board private structure 3235 **/ 3236 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) 3237 { 3238 struct i40e_hw *hw = &pf->hw; 3239 3240 wr32(hw, I40E_PFINT_DYN_CTL0, 3241 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 3242 i40e_flush(hw); 3243 } 3244 3245 /** 3246 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 3247 * @pf: board private structure 3248 **/ 3249 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 3250 { 3251 struct i40e_hw *hw = &pf->hw; 3252 u32 val; 3253 3254 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 3255 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 3256 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 3257 3258 wr32(hw, I40E_PFINT_DYN_CTL0, val); 3259 i40e_flush(hw); 3260 } 3261 3262 /** 3263 * i40e_irq_dynamic_disable - Disable default interrupt generation settings 3264 * @vsi: pointer to a vsi 3265 * @vector: disable a particular Hw Interrupt vector 3266 **/ 3267 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) 3268 { 3269 struct i40e_pf *pf = vsi->back; 3270 struct i40e_hw *hw = &pf->hw; 3271 u32 val; 3272 3273 val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 3274 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 3275 i40e_flush(hw); 3276 } 3277 3278 /** 3279 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 3280 * @irq: interrupt number 3281 * @data: pointer to a q_vector 3282 **/ 3283 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 3284 { 3285 struct i40e_q_vector *q_vector = data; 3286 3287 if (!q_vector->tx.ring && !q_vector->rx.ring) 3288 return IRQ_HANDLED; 3289 3290 napi_schedule_irqoff(&q_vector->napi); 3291 3292 return IRQ_HANDLED; 3293 } 3294 3295 /** 3296 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 3297 * @vsi: the VSI being configured 3298 * @basename: name for the vector 3299 * 3300 * Allocates MSI-X vectors and requests interrupts from the kernel. 3301 **/ 3302 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 3303 { 3304 int q_vectors = vsi->num_q_vectors; 3305 struct i40e_pf *pf = vsi->back; 3306 int base = vsi->base_vector; 3307 int rx_int_idx = 0; 3308 int tx_int_idx = 0; 3309 int vector, err; 3310 3311 for (vector = 0; vector < q_vectors; vector++) { 3312 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 3313 3314 if (q_vector->tx.ring && q_vector->rx.ring) { 3315 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3316 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 3317 tx_int_idx++; 3318 } else if (q_vector->rx.ring) { 3319 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3320 "%s-%s-%d", basename, "rx", rx_int_idx++); 3321 } else if (q_vector->tx.ring) { 3322 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3323 "%s-%s-%d", basename, "tx", tx_int_idx++); 3324 } else { 3325 /* skip this unused q_vector */ 3326 continue; 3327 } 3328 err = request_irq(pf->msix_entries[base + vector].vector, 3329 vsi->irq_handler, 3330 0, 3331 q_vector->name, 3332 q_vector); 3333 if (err) { 3334 dev_info(&pf->pdev->dev, 3335 "MSIX request_irq failed, error: %d\n", err); 3336 goto free_queue_irqs; 3337 } 3338 /* assign the mask for this irq */ 3339 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3340 &q_vector->affinity_mask); 3341 } 3342 3343 vsi->irqs_ready = true; 3344 return 0; 3345 3346 free_queue_irqs: 3347 while (vector) { 3348 vector--; 3349 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3350 NULL); 3351 free_irq(pf->msix_entries[base + vector].vector, 3352 &(vsi->q_vectors[vector])); 3353 } 3354 return err; 3355 } 3356 3357 /** 3358 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 3359 * @vsi: the VSI being un-configured 3360 **/ 3361 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 3362 { 3363 struct i40e_pf *pf = vsi->back; 3364 struct i40e_hw *hw = &pf->hw; 3365 int base = vsi->base_vector; 3366 int i; 3367 3368 for (i = 0; i < vsi->num_queue_pairs; i++) { 3369 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); 3370 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); 3371 } 3372 3373 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3374 for (i = vsi->base_vector; 3375 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3376 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 3377 3378 i40e_flush(hw); 3379 for (i = 0; i < vsi->num_q_vectors; i++) 3380 synchronize_irq(pf->msix_entries[i + base].vector); 3381 } else { 3382 /* Legacy and MSI mode - this stops all interrupt handling */ 3383 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 3384 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 3385 i40e_flush(hw); 3386 synchronize_irq(pf->pdev->irq); 3387 } 3388 } 3389 3390 /** 3391 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 3392 * @vsi: the VSI being configured 3393 **/ 3394 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 3395 { 3396 struct i40e_pf *pf = vsi->back; 3397 int i; 3398 3399 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3400 for (i = 0; i < vsi->num_q_vectors; i++) 3401 i40e_irq_dynamic_enable(vsi, i); 3402 } else { 3403 i40e_irq_dynamic_enable_icr0(pf); 3404 } 3405 3406 i40e_flush(&pf->hw); 3407 return 0; 3408 } 3409 3410 /** 3411 * i40e_stop_misc_vector - Stop the vector that handles non-queue events 3412 * @pf: board private structure 3413 **/ 3414 static void i40e_stop_misc_vector(struct i40e_pf *pf) 3415 { 3416 /* Disable ICR 0 */ 3417 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 3418 i40e_flush(&pf->hw); 3419 } 3420 3421 /** 3422 * i40e_intr - MSI/Legacy and non-queue interrupt handler 3423 * @irq: interrupt number 3424 * @data: pointer to a q_vector 3425 * 3426 * This is the handler used for all MSI/Legacy interrupts, and deals 3427 * with both queue and non-queue interrupts. This is also used in 3428 * MSIX mode to handle the non-queue interrupts. 3429 **/ 3430 static irqreturn_t i40e_intr(int irq, void *data) 3431 { 3432 struct i40e_pf *pf = (struct i40e_pf *)data; 3433 struct i40e_hw *hw = &pf->hw; 3434 irqreturn_t ret = IRQ_NONE; 3435 u32 icr0, icr0_remaining; 3436 u32 val, ena_mask; 3437 3438 icr0 = rd32(hw, I40E_PFINT_ICR0); 3439 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 3440 3441 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 3442 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 3443 goto enable_intr; 3444 3445 /* if interrupt but no bits showing, must be SWINT */ 3446 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || 3447 (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) 3448 pf->sw_int_count++; 3449 3450 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && 3451 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { 3452 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3453 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3454 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n"); 3455 } 3456 3457 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 3458 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 3459 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 3460 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 3461 3462 /* temporarily disable queue cause for NAPI processing */ 3463 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); 3464 3465 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; 3466 wr32(hw, I40E_QINT_RQCTL(0), qval); 3467 3468 qval = rd32(hw, I40E_QINT_TQCTL(0)); 3469 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 3470 wr32(hw, I40E_QINT_TQCTL(0), qval); 3471 3472 if (!test_bit(__I40E_DOWN, &pf->state)) 3473 napi_schedule_irqoff(&q_vector->napi); 3474 } 3475 3476 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 3477 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3478 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 3479 } 3480 3481 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 3482 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 3483 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 3484 } 3485 3486 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 3487 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 3488 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 3489 } 3490 3491 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 3492 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 3493 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 3494 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 3495 val = rd32(hw, I40E_GLGEN_RSTAT); 3496 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 3497 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3498 if (val == I40E_RESET_CORER) { 3499 pf->corer_count++; 3500 } else if (val == I40E_RESET_GLOBR) { 3501 pf->globr_count++; 3502 } else if (val == I40E_RESET_EMPR) { 3503 pf->empr_count++; 3504 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); 3505 } 3506 } 3507 3508 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 3509 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 3510 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 3511 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", 3512 rd32(hw, I40E_PFHMC_ERRORINFO), 3513 rd32(hw, I40E_PFHMC_ERRORDATA)); 3514 } 3515 3516 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 3517 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 3518 3519 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 3520 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3521 i40e_ptp_tx_hwtstamp(pf); 3522 } 3523 } 3524 3525 /* If a critical error is pending we have no choice but to reset the 3526 * device. 3527 * Report and mask out any remaining unexpected interrupts. 3528 */ 3529 icr0_remaining = icr0 & ena_mask; 3530 if (icr0_remaining) { 3531 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 3532 icr0_remaining); 3533 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 3534 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 3535 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { 3536 dev_info(&pf->pdev->dev, "device will be reset\n"); 3537 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 3538 i40e_service_event_schedule(pf); 3539 } 3540 ena_mask &= ~icr0_remaining; 3541 } 3542 ret = IRQ_HANDLED; 3543 3544 enable_intr: 3545 /* re-enable interrupt causes */ 3546 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3547 if (!test_bit(__I40E_DOWN, &pf->state)) { 3548 i40e_service_event_schedule(pf); 3549 i40e_irq_dynamic_enable_icr0(pf); 3550 } 3551 3552 return ret; 3553 } 3554 3555 /** 3556 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes 3557 * @tx_ring: tx ring to clean 3558 * @budget: how many cleans we're allowed 3559 * 3560 * Returns true if there's any budget left (e.g. the clean is finished) 3561 **/ 3562 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) 3563 { 3564 struct i40e_vsi *vsi = tx_ring->vsi; 3565 u16 i = tx_ring->next_to_clean; 3566 struct i40e_tx_buffer *tx_buf; 3567 struct i40e_tx_desc *tx_desc; 3568 3569 tx_buf = &tx_ring->tx_bi[i]; 3570 tx_desc = I40E_TX_DESC(tx_ring, i); 3571 i -= tx_ring->count; 3572 3573 do { 3574 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 3575 3576 /* if next_to_watch is not set then there is no work pending */ 3577 if (!eop_desc) 3578 break; 3579 3580 /* prevent any other reads prior to eop_desc */ 3581 read_barrier_depends(); 3582 3583 /* if the descriptor isn't done, no work yet to do */ 3584 if (!(eop_desc->cmd_type_offset_bsz & 3585 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 3586 break; 3587 3588 /* clear next_to_watch to prevent false hangs */ 3589 tx_buf->next_to_watch = NULL; 3590 3591 tx_desc->buffer_addr = 0; 3592 tx_desc->cmd_type_offset_bsz = 0; 3593 /* move past filter desc */ 3594 tx_buf++; 3595 tx_desc++; 3596 i++; 3597 if (unlikely(!i)) { 3598 i -= tx_ring->count; 3599 tx_buf = tx_ring->tx_bi; 3600 tx_desc = I40E_TX_DESC(tx_ring, 0); 3601 } 3602 /* unmap skb header data */ 3603 dma_unmap_single(tx_ring->dev, 3604 dma_unmap_addr(tx_buf, dma), 3605 dma_unmap_len(tx_buf, len), 3606 DMA_TO_DEVICE); 3607 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) 3608 kfree(tx_buf->raw_buf); 3609 3610 tx_buf->raw_buf = NULL; 3611 tx_buf->tx_flags = 0; 3612 tx_buf->next_to_watch = NULL; 3613 dma_unmap_len_set(tx_buf, len, 0); 3614 tx_desc->buffer_addr = 0; 3615 tx_desc->cmd_type_offset_bsz = 0; 3616 3617 /* move us past the eop_desc for start of next FD desc */ 3618 tx_buf++; 3619 tx_desc++; 3620 i++; 3621 if (unlikely(!i)) { 3622 i -= tx_ring->count; 3623 tx_buf = tx_ring->tx_bi; 3624 tx_desc = I40E_TX_DESC(tx_ring, 0); 3625 } 3626 3627 /* update budget accounting */ 3628 budget--; 3629 } while (likely(budget)); 3630 3631 i += tx_ring->count; 3632 tx_ring->next_to_clean = i; 3633 3634 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) 3635 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); 3636 3637 return budget > 0; 3638 } 3639 3640 /** 3641 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring 3642 * @irq: interrupt number 3643 * @data: pointer to a q_vector 3644 **/ 3645 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) 3646 { 3647 struct i40e_q_vector *q_vector = data; 3648 struct i40e_vsi *vsi; 3649 3650 if (!q_vector->tx.ring) 3651 return IRQ_HANDLED; 3652 3653 vsi = q_vector->tx.ring->vsi; 3654 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); 3655 3656 return IRQ_HANDLED; 3657 } 3658 3659 /** 3660 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 3661 * @vsi: the VSI being configured 3662 * @v_idx: vector index 3663 * @qp_idx: queue pair index 3664 **/ 3665 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 3666 { 3667 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3668 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 3669 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 3670 3671 tx_ring->q_vector = q_vector; 3672 tx_ring->next = q_vector->tx.ring; 3673 q_vector->tx.ring = tx_ring; 3674 q_vector->tx.count++; 3675 3676 rx_ring->q_vector = q_vector; 3677 rx_ring->next = q_vector->rx.ring; 3678 q_vector->rx.ring = rx_ring; 3679 q_vector->rx.count++; 3680 } 3681 3682 /** 3683 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 3684 * @vsi: the VSI being configured 3685 * 3686 * This function maps descriptor rings to the queue-specific vectors 3687 * we were allotted through the MSI-X enabling code. Ideally, we'd have 3688 * one vector per queue pair, but on a constrained vector budget, we 3689 * group the queue pairs as "efficiently" as possible. 3690 **/ 3691 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 3692 { 3693 int qp_remaining = vsi->num_queue_pairs; 3694 int q_vectors = vsi->num_q_vectors; 3695 int num_ringpairs; 3696 int v_start = 0; 3697 int qp_idx = 0; 3698 3699 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 3700 * group them so there are multiple queues per vector. 3701 * It is also important to go through all the vectors available to be 3702 * sure that if we don't use all the vectors, that the remaining vectors 3703 * are cleared. This is especially important when decreasing the 3704 * number of queues in use. 3705 */ 3706 for (; v_start < q_vectors; v_start++) { 3707 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 3708 3709 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 3710 3711 q_vector->num_ringpairs = num_ringpairs; 3712 3713 q_vector->rx.count = 0; 3714 q_vector->tx.count = 0; 3715 q_vector->rx.ring = NULL; 3716 q_vector->tx.ring = NULL; 3717 3718 while (num_ringpairs--) { 3719 i40e_map_vector_to_qp(vsi, v_start, qp_idx); 3720 qp_idx++; 3721 qp_remaining--; 3722 } 3723 } 3724 } 3725 3726 /** 3727 * i40e_vsi_request_irq - Request IRQ from the OS 3728 * @vsi: the VSI being configured 3729 * @basename: name for the vector 3730 **/ 3731 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 3732 { 3733 struct i40e_pf *pf = vsi->back; 3734 int err; 3735 3736 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 3737 err = i40e_vsi_request_irq_msix(vsi, basename); 3738 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3739 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3740 pf->int_name, pf); 3741 else 3742 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3743 pf->int_name, pf); 3744 3745 if (err) 3746 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3747 3748 return err; 3749 } 3750 3751 #ifdef CONFIG_NET_POLL_CONTROLLER 3752 /** 3753 * i40e_netpoll - A Polling 'interrupt'handler 3754 * @netdev: network interface device structure 3755 * 3756 * This is used by netconsole to send skbs without having to re-enable 3757 * interrupts. It's not called while the normal interrupt routine is executing. 3758 **/ 3759 #ifdef I40E_FCOE 3760 void i40e_netpoll(struct net_device *netdev) 3761 #else 3762 static void i40e_netpoll(struct net_device *netdev) 3763 #endif 3764 { 3765 struct i40e_netdev_priv *np = netdev_priv(netdev); 3766 struct i40e_vsi *vsi = np->vsi; 3767 struct i40e_pf *pf = vsi->back; 3768 int i; 3769 3770 /* if interface is down do nothing */ 3771 if (test_bit(__I40E_DOWN, &vsi->state)) 3772 return; 3773 3774 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3775 for (i = 0; i < vsi->num_q_vectors; i++) 3776 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 3777 } else { 3778 i40e_intr(pf->pdev->irq, netdev); 3779 } 3780 } 3781 #endif 3782 3783 /** 3784 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled 3785 * @pf: the PF being configured 3786 * @pf_q: the PF queue 3787 * @enable: enable or disable state of the queue 3788 * 3789 * This routine will wait for the given Tx queue of the PF to reach the 3790 * enabled or disabled state. 3791 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3792 * multiple retries; else will return 0 in case of success. 3793 **/ 3794 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3795 { 3796 int i; 3797 u32 tx_reg; 3798 3799 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3800 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); 3801 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3802 break; 3803 3804 usleep_range(10, 20); 3805 } 3806 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3807 return -ETIMEDOUT; 3808 3809 return 0; 3810 } 3811 3812 /** 3813 * i40e_vsi_control_tx - Start or stop a VSI's rings 3814 * @vsi: the VSI being configured 3815 * @enable: start or stop the rings 3816 **/ 3817 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 3818 { 3819 struct i40e_pf *pf = vsi->back; 3820 struct i40e_hw *hw = &pf->hw; 3821 int i, j, pf_q, ret = 0; 3822 u32 tx_reg; 3823 3824 pf_q = vsi->base_queue; 3825 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3826 3827 /* warn the TX unit of coming changes */ 3828 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); 3829 if (!enable) 3830 usleep_range(10, 20); 3831 3832 for (j = 0; j < 50; j++) { 3833 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3834 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 3835 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) 3836 break; 3837 usleep_range(1000, 2000); 3838 } 3839 /* Skip if the queue is already in the requested state */ 3840 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3841 continue; 3842 3843 /* turn on/off the queue */ 3844 if (enable) { 3845 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 3846 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; 3847 } else { 3848 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3849 } 3850 3851 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 3852 /* No waiting for the Tx queue to disable */ 3853 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) 3854 continue; 3855 3856 /* wait for the change to finish */ 3857 ret = i40e_pf_txq_wait(pf, pf_q, enable); 3858 if (ret) { 3859 dev_info(&pf->pdev->dev, 3860 "VSI seid %d Tx ring %d %sable timeout\n", 3861 vsi->seid, pf_q, (enable ? "en" : "dis")); 3862 break; 3863 } 3864 } 3865 3866 if (hw->revision_id == 0) 3867 mdelay(50); 3868 return ret; 3869 } 3870 3871 /** 3872 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 3873 * @pf: the PF being configured 3874 * @pf_q: the PF queue 3875 * @enable: enable or disable state of the queue 3876 * 3877 * This routine will wait for the given Rx queue of the PF to reach the 3878 * enabled or disabled state. 3879 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3880 * multiple retries; else will return 0 in case of success. 3881 **/ 3882 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3883 { 3884 int i; 3885 u32 rx_reg; 3886 3887 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3888 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); 3889 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3890 break; 3891 3892 usleep_range(10, 20); 3893 } 3894 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3895 return -ETIMEDOUT; 3896 3897 return 0; 3898 } 3899 3900 /** 3901 * i40e_vsi_control_rx - Start or stop a VSI's rings 3902 * @vsi: the VSI being configured 3903 * @enable: start or stop the rings 3904 **/ 3905 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 3906 { 3907 struct i40e_pf *pf = vsi->back; 3908 struct i40e_hw *hw = &pf->hw; 3909 int i, j, pf_q, ret = 0; 3910 u32 rx_reg; 3911 3912 pf_q = vsi->base_queue; 3913 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3914 for (j = 0; j < 50; j++) { 3915 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3916 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == 3917 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) 3918 break; 3919 usleep_range(1000, 2000); 3920 } 3921 3922 /* Skip if the queue is already in the requested state */ 3923 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3924 continue; 3925 3926 /* turn on/off the queue */ 3927 if (enable) 3928 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; 3929 else 3930 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 3931 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3932 3933 /* wait for the change to finish */ 3934 ret = i40e_pf_rxq_wait(pf, pf_q, enable); 3935 if (ret) { 3936 dev_info(&pf->pdev->dev, 3937 "VSI seid %d Rx ring %d %sable timeout\n", 3938 vsi->seid, pf_q, (enable ? "en" : "dis")); 3939 break; 3940 } 3941 } 3942 3943 return ret; 3944 } 3945 3946 /** 3947 * i40e_vsi_control_rings - Start or stop a VSI's rings 3948 * @vsi: the VSI being configured 3949 * @enable: start or stop the rings 3950 **/ 3951 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) 3952 { 3953 int ret = 0; 3954 3955 /* do rx first for enable and last for disable */ 3956 if (request) { 3957 ret = i40e_vsi_control_rx(vsi, request); 3958 if (ret) 3959 return ret; 3960 ret = i40e_vsi_control_tx(vsi, request); 3961 } else { 3962 /* Ignore return value, we need to shutdown whatever we can */ 3963 i40e_vsi_control_tx(vsi, request); 3964 i40e_vsi_control_rx(vsi, request); 3965 } 3966 3967 return ret; 3968 } 3969 3970 /** 3971 * i40e_vsi_free_irq - Free the irq association with the OS 3972 * @vsi: the VSI being configured 3973 **/ 3974 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 3975 { 3976 struct i40e_pf *pf = vsi->back; 3977 struct i40e_hw *hw = &pf->hw; 3978 int base = vsi->base_vector; 3979 u32 val, qp; 3980 int i; 3981 3982 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3983 if (!vsi->q_vectors) 3984 return; 3985 3986 if (!vsi->irqs_ready) 3987 return; 3988 3989 vsi->irqs_ready = false; 3990 for (i = 0; i < vsi->num_q_vectors; i++) { 3991 u16 vector = i + base; 3992 3993 /* free only the irqs that were actually requested */ 3994 if (!vsi->q_vectors[i] || 3995 !vsi->q_vectors[i]->num_ringpairs) 3996 continue; 3997 3998 /* clear the affinity_mask in the IRQ descriptor */ 3999 irq_set_affinity_hint(pf->msix_entries[vector].vector, 4000 NULL); 4001 free_irq(pf->msix_entries[vector].vector, 4002 vsi->q_vectors[i]); 4003 4004 /* Tear down the interrupt queue link list 4005 * 4006 * We know that they come in pairs and always 4007 * the Rx first, then the Tx. To clear the 4008 * link list, stick the EOL value into the 4009 * next_q field of the registers. 4010 */ 4011 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 4012 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 4013 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4014 val |= I40E_QUEUE_END_OF_LIST 4015 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4016 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 4017 4018 while (qp != I40E_QUEUE_END_OF_LIST) { 4019 u32 next; 4020 4021 val = rd32(hw, I40E_QINT_RQCTL(qp)); 4022 4023 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 4024 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 4025 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 4026 I40E_QINT_RQCTL_INTEVENT_MASK); 4027 4028 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 4029 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 4030 4031 wr32(hw, I40E_QINT_RQCTL(qp), val); 4032 4033 val = rd32(hw, I40E_QINT_TQCTL(qp)); 4034 4035 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 4036 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 4037 4038 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 4039 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 4040 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 4041 I40E_QINT_TQCTL_INTEVENT_MASK); 4042 4043 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 4044 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 4045 4046 wr32(hw, I40E_QINT_TQCTL(qp), val); 4047 qp = next; 4048 } 4049 } 4050 } else { 4051 free_irq(pf->pdev->irq, pf); 4052 4053 val = rd32(hw, I40E_PFINT_LNKLST0); 4054 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 4055 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4056 val |= I40E_QUEUE_END_OF_LIST 4057 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 4058 wr32(hw, I40E_PFINT_LNKLST0, val); 4059 4060 val = rd32(hw, I40E_QINT_RQCTL(qp)); 4061 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 4062 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 4063 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 4064 I40E_QINT_RQCTL_INTEVENT_MASK); 4065 4066 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 4067 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 4068 4069 wr32(hw, I40E_QINT_RQCTL(qp), val); 4070 4071 val = rd32(hw, I40E_QINT_TQCTL(qp)); 4072 4073 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 4074 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 4075 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 4076 I40E_QINT_TQCTL_INTEVENT_MASK); 4077 4078 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 4079 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 4080 4081 wr32(hw, I40E_QINT_TQCTL(qp), val); 4082 } 4083 } 4084 4085 /** 4086 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 4087 * @vsi: the VSI being configured 4088 * @v_idx: Index of vector to be freed 4089 * 4090 * This function frees the memory allocated to the q_vector. In addition if 4091 * NAPI is enabled it will delete any references to the NAPI struct prior 4092 * to freeing the q_vector. 4093 **/ 4094 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 4095 { 4096 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 4097 struct i40e_ring *ring; 4098 4099 if (!q_vector) 4100 return; 4101 4102 /* disassociate q_vector from rings */ 4103 i40e_for_each_ring(ring, q_vector->tx) 4104 ring->q_vector = NULL; 4105 4106 i40e_for_each_ring(ring, q_vector->rx) 4107 ring->q_vector = NULL; 4108 4109 /* only VSI w/ an associated netdev is set up w/ NAPI */ 4110 if (vsi->netdev) 4111 netif_napi_del(&q_vector->napi); 4112 4113 vsi->q_vectors[v_idx] = NULL; 4114 4115 kfree_rcu(q_vector, rcu); 4116 } 4117 4118 /** 4119 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 4120 * @vsi: the VSI being un-configured 4121 * 4122 * This frees the memory allocated to the q_vectors and 4123 * deletes references to the NAPI struct. 4124 **/ 4125 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 4126 { 4127 int v_idx; 4128 4129 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 4130 i40e_free_q_vector(vsi, v_idx); 4131 } 4132 4133 /** 4134 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 4135 * @pf: board private structure 4136 **/ 4137 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 4138 { 4139 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 4140 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4141 pci_disable_msix(pf->pdev); 4142 kfree(pf->msix_entries); 4143 pf->msix_entries = NULL; 4144 kfree(pf->irq_pile); 4145 pf->irq_pile = NULL; 4146 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 4147 pci_disable_msi(pf->pdev); 4148 } 4149 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 4150 } 4151 4152 /** 4153 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 4154 * @pf: board private structure 4155 * 4156 * We go through and clear interrupt specific resources and reset the structure 4157 * to pre-load conditions 4158 **/ 4159 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 4160 { 4161 int i; 4162 4163 i40e_stop_misc_vector(pf); 4164 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4165 synchronize_irq(pf->msix_entries[0].vector); 4166 free_irq(pf->msix_entries[0].vector, pf); 4167 } 4168 4169 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 4170 for (i = 0; i < pf->num_alloc_vsi; i++) 4171 if (pf->vsi[i]) 4172 i40e_vsi_free_q_vectors(pf->vsi[i]); 4173 i40e_reset_interrupt_capability(pf); 4174 } 4175 4176 /** 4177 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 4178 * @vsi: the VSI being configured 4179 **/ 4180 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 4181 { 4182 int q_idx; 4183 4184 if (!vsi->netdev) 4185 return; 4186 4187 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4188 napi_enable(&vsi->q_vectors[q_idx]->napi); 4189 } 4190 4191 /** 4192 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 4193 * @vsi: the VSI being configured 4194 **/ 4195 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 4196 { 4197 int q_idx; 4198 4199 if (!vsi->netdev) 4200 return; 4201 4202 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4203 napi_disable(&vsi->q_vectors[q_idx]->napi); 4204 } 4205 4206 /** 4207 * i40e_vsi_close - Shut down a VSI 4208 * @vsi: the vsi to be quelled 4209 **/ 4210 static void i40e_vsi_close(struct i40e_vsi *vsi) 4211 { 4212 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 4213 i40e_down(vsi); 4214 i40e_vsi_free_irq(vsi); 4215 i40e_vsi_free_tx_resources(vsi); 4216 i40e_vsi_free_rx_resources(vsi); 4217 vsi->current_netdev_flags = 0; 4218 } 4219 4220 /** 4221 * i40e_quiesce_vsi - Pause a given VSI 4222 * @vsi: the VSI being paused 4223 **/ 4224 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 4225 { 4226 if (test_bit(__I40E_DOWN, &vsi->state)) 4227 return; 4228 4229 /* No need to disable FCoE VSI when Tx suspended */ 4230 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && 4231 vsi->type == I40E_VSI_FCOE) { 4232 dev_dbg(&vsi->back->pdev->dev, 4233 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid); 4234 return; 4235 } 4236 4237 set_bit(__I40E_NEEDS_RESTART, &vsi->state); 4238 if (vsi->netdev && netif_running(vsi->netdev)) 4239 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 4240 else 4241 i40e_vsi_close(vsi); 4242 } 4243 4244 /** 4245 * i40e_unquiesce_vsi - Resume a given VSI 4246 * @vsi: the VSI being resumed 4247 **/ 4248 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 4249 { 4250 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) 4251 return; 4252 4253 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 4254 if (vsi->netdev && netif_running(vsi->netdev)) 4255 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 4256 else 4257 i40e_vsi_open(vsi); /* this clears the DOWN bit */ 4258 } 4259 4260 /** 4261 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 4262 * @pf: the PF 4263 **/ 4264 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 4265 { 4266 int v; 4267 4268 for (v = 0; v < pf->num_alloc_vsi; v++) { 4269 if (pf->vsi[v]) 4270 i40e_quiesce_vsi(pf->vsi[v]); 4271 } 4272 } 4273 4274 /** 4275 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 4276 * @pf: the PF 4277 **/ 4278 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 4279 { 4280 int v; 4281 4282 for (v = 0; v < pf->num_alloc_vsi; v++) { 4283 if (pf->vsi[v]) 4284 i40e_unquiesce_vsi(pf->vsi[v]); 4285 } 4286 } 4287 4288 #ifdef CONFIG_I40E_DCB 4289 /** 4290 * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled 4291 * @vsi: the VSI being configured 4292 * 4293 * This function waits for the given VSI's Tx queues to be disabled. 4294 **/ 4295 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) 4296 { 4297 struct i40e_pf *pf = vsi->back; 4298 int i, pf_q, ret; 4299 4300 pf_q = vsi->base_queue; 4301 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4302 /* Check and wait for the disable status of the queue */ 4303 ret = i40e_pf_txq_wait(pf, pf_q, false); 4304 if (ret) { 4305 dev_info(&pf->pdev->dev, 4306 "VSI seid %d Tx ring %d disable timeout\n", 4307 vsi->seid, pf_q); 4308 return ret; 4309 } 4310 } 4311 4312 return 0; 4313 } 4314 4315 /** 4316 * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled 4317 * @pf: the PF 4318 * 4319 * This function waits for the Tx queues to be in disabled state for all the 4320 * VSIs that are managed by this PF. 4321 **/ 4322 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) 4323 { 4324 int v, ret = 0; 4325 4326 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4327 /* No need to wait for FCoE VSI queues */ 4328 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { 4329 ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); 4330 if (ret) 4331 break; 4332 } 4333 } 4334 4335 return ret; 4336 } 4337 4338 #endif 4339 4340 /** 4341 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue 4342 * @q_idx: TX queue number 4343 * @vsi: Pointer to VSI struct 4344 * 4345 * This function checks specified queue for given VSI. Detects hung condition. 4346 * Sets hung bit since it is two step process. Before next run of service task 4347 * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not, 4348 * hung condition remain unchanged and during subsequent run, this function 4349 * issues SW interrupt to recover from hung condition. 4350 **/ 4351 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) 4352 { 4353 struct i40e_ring *tx_ring = NULL; 4354 struct i40e_pf *pf; 4355 u32 head, val, tx_pending; 4356 int i; 4357 4358 pf = vsi->back; 4359 4360 /* now that we have an index, find the tx_ring struct */ 4361 for (i = 0; i < vsi->num_queue_pairs; i++) { 4362 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { 4363 if (q_idx == vsi->tx_rings[i]->queue_index) { 4364 tx_ring = vsi->tx_rings[i]; 4365 break; 4366 } 4367 } 4368 } 4369 4370 if (!tx_ring) 4371 return; 4372 4373 /* Read interrupt register */ 4374 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4375 val = rd32(&pf->hw, 4376 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + 4377 tx_ring->vsi->base_vector - 1)); 4378 else 4379 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); 4380 4381 /* Bail out if interrupts are disabled because napi_poll 4382 * execution in-progress or will get scheduled soon. 4383 * napi_poll cleans TX and RX queues and updates 'next_to_clean'. 4384 */ 4385 if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)) 4386 return; 4387 4388 head = i40e_get_head(tx_ring); 4389 4390 tx_pending = i40e_get_tx_pending(tx_ring); 4391 4392 /* HW is done executing descriptors, updated HEAD write back, 4393 * but SW hasn't processed those descriptors. If interrupt is 4394 * not generated from this point ON, it could result into 4395 * dev_watchdog detecting timeout on those netdev_queue, 4396 * hence proactively trigger SW interrupt. 4397 */ 4398 if (tx_pending) { 4399 /* NAPI Poll didn't run and clear since it was set */ 4400 if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, 4401 &tx_ring->q_vector->hung_detected)) { 4402 netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", 4403 vsi->seid, q_idx, tx_pending, 4404 tx_ring->next_to_clean, head, 4405 tx_ring->next_to_use, 4406 readl(tx_ring->tail)); 4407 netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n", 4408 vsi->seid, q_idx, val); 4409 i40e_force_wb(vsi, tx_ring->q_vector); 4410 } else { 4411 /* First Chance - detected possible hung */ 4412 set_bit(I40E_Q_VECTOR_HUNG_DETECT, 4413 &tx_ring->q_vector->hung_detected); 4414 } 4415 } 4416 } 4417 4418 /** 4419 * i40e_detect_recover_hung - Function to detect and recover hung_queues 4420 * @pf: pointer to PF struct 4421 * 4422 * LAN VSI has netdev and netdev has TX queues. This function is to check 4423 * each of those TX queues if they are hung, trigger recovery by issuing 4424 * SW interrupt. 4425 **/ 4426 static void i40e_detect_recover_hung(struct i40e_pf *pf) 4427 { 4428 struct net_device *netdev; 4429 struct i40e_vsi *vsi; 4430 int i; 4431 4432 /* Only for LAN VSI */ 4433 vsi = pf->vsi[pf->lan_vsi]; 4434 4435 if (!vsi) 4436 return; 4437 4438 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ 4439 if (test_bit(__I40E_DOWN, &vsi->back->state) || 4440 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 4441 return; 4442 4443 /* Make sure type is MAIN VSI */ 4444 if (vsi->type != I40E_VSI_MAIN) 4445 return; 4446 4447 netdev = vsi->netdev; 4448 if (!netdev) 4449 return; 4450 4451 /* Bail out if netif_carrier is not OK */ 4452 if (!netif_carrier_ok(netdev)) 4453 return; 4454 4455 /* Go thru' TX queues for netdev */ 4456 for (i = 0; i < netdev->num_tx_queues; i++) { 4457 struct netdev_queue *q; 4458 4459 q = netdev_get_tx_queue(netdev, i); 4460 if (q) 4461 i40e_detect_recover_hung_queue(i, vsi); 4462 } 4463 } 4464 4465 /** 4466 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP 4467 * @pf: pointer to PF 4468 * 4469 * Get TC map for ISCSI PF type that will include iSCSI TC 4470 * and LAN TC. 4471 **/ 4472 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) 4473 { 4474 struct i40e_dcb_app_priority_table app; 4475 struct i40e_hw *hw = &pf->hw; 4476 u8 enabled_tc = 1; /* TC0 is always enabled */ 4477 u8 tc, i; 4478 /* Get the iSCSI APP TLV */ 4479 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4480 4481 for (i = 0; i < dcbcfg->numapps; i++) { 4482 app = dcbcfg->app[i]; 4483 if (app.selector == I40E_APP_SEL_TCPIP && 4484 app.protocolid == I40E_APP_PROTOID_ISCSI) { 4485 tc = dcbcfg->etscfg.prioritytable[app.priority]; 4486 enabled_tc |= BIT(tc); 4487 break; 4488 } 4489 } 4490 4491 return enabled_tc; 4492 } 4493 4494 /** 4495 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 4496 * @dcbcfg: the corresponding DCBx configuration structure 4497 * 4498 * Return the number of TCs from given DCBx configuration 4499 **/ 4500 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 4501 { 4502 u8 num_tc = 0; 4503 int i; 4504 4505 /* Scan the ETS Config Priority Table to find 4506 * traffic class enabled for a given priority 4507 * and use the traffic class index to get the 4508 * number of traffic classes enabled 4509 */ 4510 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4511 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 4512 num_tc = dcbcfg->etscfg.prioritytable[i]; 4513 } 4514 4515 /* Traffic class index starts from zero so 4516 * increment to return the actual count 4517 */ 4518 return num_tc + 1; 4519 } 4520 4521 /** 4522 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 4523 * @dcbcfg: the corresponding DCBx configuration structure 4524 * 4525 * Query the current DCB configuration and return the number of 4526 * traffic classes enabled from the given DCBX config 4527 **/ 4528 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 4529 { 4530 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 4531 u8 enabled_tc = 1; 4532 u8 i; 4533 4534 for (i = 0; i < num_tc; i++) 4535 enabled_tc |= BIT(i); 4536 4537 return enabled_tc; 4538 } 4539 4540 /** 4541 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 4542 * @pf: PF being queried 4543 * 4544 * Return number of traffic classes enabled for the given PF 4545 **/ 4546 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 4547 { 4548 struct i40e_hw *hw = &pf->hw; 4549 u8 i, enabled_tc; 4550 u8 num_tc = 0; 4551 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4552 4553 /* If DCB is not enabled then always in single TC */ 4554 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4555 return 1; 4556 4557 /* SFP mode will be enabled for all TCs on port */ 4558 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4559 return i40e_dcb_get_num_tc(dcbcfg); 4560 4561 /* MFP mode return count of enabled TCs for this PF */ 4562 if (pf->hw.func_caps.iscsi) 4563 enabled_tc = i40e_get_iscsi_tc_map(pf); 4564 else 4565 return 1; /* Only TC0 */ 4566 4567 /* At least have TC0 */ 4568 enabled_tc = (enabled_tc ? enabled_tc : 0x1); 4569 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4570 if (enabled_tc & BIT(i)) 4571 num_tc++; 4572 } 4573 return num_tc; 4574 } 4575 4576 /** 4577 * i40e_pf_get_default_tc - Get bitmap for first enabled TC 4578 * @pf: PF being queried 4579 * 4580 * Return a bitmap for first enabled traffic class for this PF. 4581 **/ 4582 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) 4583 { 4584 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; 4585 u8 i = 0; 4586 4587 if (!enabled_tc) 4588 return 0x1; /* TC0 */ 4589 4590 /* Find the first enabled TC */ 4591 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4592 if (enabled_tc & BIT(i)) 4593 break; 4594 } 4595 4596 return BIT(i); 4597 } 4598 4599 /** 4600 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 4601 * @pf: PF being queried 4602 * 4603 * Return a bitmap for enabled traffic classes for this PF. 4604 **/ 4605 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 4606 { 4607 /* If DCB is not enabled for this PF then just return default TC */ 4608 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4609 return i40e_pf_get_default_tc(pf); 4610 4611 /* SFP mode we want PF to be enabled for all TCs */ 4612 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4613 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 4614 4615 /* MFP enabled and iSCSI PF type */ 4616 if (pf->hw.func_caps.iscsi) 4617 return i40e_get_iscsi_tc_map(pf); 4618 else 4619 return i40e_pf_get_default_tc(pf); 4620 } 4621 4622 /** 4623 * i40e_vsi_get_bw_info - Query VSI BW Information 4624 * @vsi: the VSI being queried 4625 * 4626 * Returns 0 on success, negative value on failure 4627 **/ 4628 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 4629 { 4630 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 4631 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 4632 struct i40e_pf *pf = vsi->back; 4633 struct i40e_hw *hw = &pf->hw; 4634 i40e_status ret; 4635 u32 tc_bw_max; 4636 int i; 4637 4638 /* Get the VSI level BW configuration */ 4639 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 4640 if (ret) { 4641 dev_info(&pf->pdev->dev, 4642 "couldn't get PF vsi bw config, err %s aq_err %s\n", 4643 i40e_stat_str(&pf->hw, ret), 4644 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4645 return -EINVAL; 4646 } 4647 4648 /* Get the VSI level BW configuration per TC */ 4649 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 4650 NULL); 4651 if (ret) { 4652 dev_info(&pf->pdev->dev, 4653 "couldn't get PF vsi ets bw config, err %s aq_err %s\n", 4654 i40e_stat_str(&pf->hw, ret), 4655 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4656 return -EINVAL; 4657 } 4658 4659 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 4660 dev_info(&pf->pdev->dev, 4661 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 4662 bw_config.tc_valid_bits, 4663 bw_ets_config.tc_valid_bits); 4664 /* Still continuing */ 4665 } 4666 4667 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 4668 vsi->bw_max_quanta = bw_config.max_bw; 4669 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 4670 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 4671 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4672 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 4673 vsi->bw_ets_limit_credits[i] = 4674 le16_to_cpu(bw_ets_config.credits[i]); 4675 /* 3 bits out of 4 for each TC */ 4676 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 4677 } 4678 4679 return 0; 4680 } 4681 4682 /** 4683 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 4684 * @vsi: the VSI being configured 4685 * @enabled_tc: TC bitmap 4686 * @bw_credits: BW shared credits per TC 4687 * 4688 * Returns 0 on success, negative value on failure 4689 **/ 4690 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 4691 u8 *bw_share) 4692 { 4693 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 4694 i40e_status ret; 4695 int i; 4696 4697 bw_data.tc_valid_bits = enabled_tc; 4698 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4699 bw_data.tc_bw_credits[i] = bw_share[i]; 4700 4701 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 4702 NULL); 4703 if (ret) { 4704 dev_info(&vsi->back->pdev->dev, 4705 "AQ command Config VSI BW allocation per TC failed = %d\n", 4706 vsi->back->hw.aq.asq_last_status); 4707 return -EINVAL; 4708 } 4709 4710 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4711 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 4712 4713 return 0; 4714 } 4715 4716 /** 4717 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 4718 * @vsi: the VSI being configured 4719 * @enabled_tc: TC map to be enabled 4720 * 4721 **/ 4722 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4723 { 4724 struct net_device *netdev = vsi->netdev; 4725 struct i40e_pf *pf = vsi->back; 4726 struct i40e_hw *hw = &pf->hw; 4727 u8 netdev_tc = 0; 4728 int i; 4729 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4730 4731 if (!netdev) 4732 return; 4733 4734 if (!enabled_tc) { 4735 netdev_reset_tc(netdev); 4736 return; 4737 } 4738 4739 /* Set up actual enabled TCs on the VSI */ 4740 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 4741 return; 4742 4743 /* set per TC queues for the VSI */ 4744 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4745 /* Only set TC queues for enabled tcs 4746 * 4747 * e.g. For a VSI that has TC0 and TC3 enabled the 4748 * enabled_tc bitmap would be 0x00001001; the driver 4749 * will set the numtc for netdev as 2 that will be 4750 * referenced by the netdev layer as TC 0 and 1. 4751 */ 4752 if (vsi->tc_config.enabled_tc & BIT(i)) 4753 netdev_set_tc_queue(netdev, 4754 vsi->tc_config.tc_info[i].netdev_tc, 4755 vsi->tc_config.tc_info[i].qcount, 4756 vsi->tc_config.tc_info[i].qoffset); 4757 } 4758 4759 /* Assign UP2TC map for the VSI */ 4760 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4761 /* Get the actual TC# for the UP */ 4762 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 4763 /* Get the mapped netdev TC# for the UP */ 4764 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 4765 netdev_set_prio_tc_map(netdev, i, netdev_tc); 4766 } 4767 } 4768 4769 /** 4770 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 4771 * @vsi: the VSI being configured 4772 * @ctxt: the ctxt buffer returned from AQ VSI update param command 4773 **/ 4774 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 4775 struct i40e_vsi_context *ctxt) 4776 { 4777 /* copy just the sections touched not the entire info 4778 * since not all sections are valid as returned by 4779 * update vsi params 4780 */ 4781 vsi->info.mapping_flags = ctxt->info.mapping_flags; 4782 memcpy(&vsi->info.queue_mapping, 4783 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 4784 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 4785 sizeof(vsi->info.tc_mapping)); 4786 } 4787 4788 /** 4789 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 4790 * @vsi: VSI to be configured 4791 * @enabled_tc: TC bitmap 4792 * 4793 * This configures a particular VSI for TCs that are mapped to the 4794 * given TC bitmap. It uses default bandwidth share for TCs across 4795 * VSIs to configure TC for a particular VSI. 4796 * 4797 * NOTE: 4798 * It is expected that the VSI queues have been quisced before calling 4799 * this function. 4800 **/ 4801 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4802 { 4803 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 4804 struct i40e_vsi_context ctxt; 4805 int ret = 0; 4806 int i; 4807 4808 /* Check if enabled_tc is same as existing or new TCs */ 4809 if (vsi->tc_config.enabled_tc == enabled_tc) 4810 return ret; 4811 4812 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 4813 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4814 if (enabled_tc & BIT(i)) 4815 bw_share[i] = 1; 4816 } 4817 4818 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 4819 if (ret) { 4820 dev_info(&vsi->back->pdev->dev, 4821 "Failed configuring TC map %d for VSI %d\n", 4822 enabled_tc, vsi->seid); 4823 goto out; 4824 } 4825 4826 /* Update Queue Pairs Mapping for currently enabled UPs */ 4827 ctxt.seid = vsi->seid; 4828 ctxt.pf_num = vsi->back->hw.pf_id; 4829 ctxt.vf_num = 0; 4830 ctxt.uplink_seid = vsi->uplink_seid; 4831 ctxt.info = vsi->info; 4832 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 4833 4834 /* Update the VSI after updating the VSI queue-mapping information */ 4835 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 4836 if (ret) { 4837 dev_info(&vsi->back->pdev->dev, 4838 "Update vsi tc config failed, err %s aq_err %s\n", 4839 i40e_stat_str(&vsi->back->hw, ret), 4840 i40e_aq_str(&vsi->back->hw, 4841 vsi->back->hw.aq.asq_last_status)); 4842 goto out; 4843 } 4844 /* update the local VSI info with updated queue map */ 4845 i40e_vsi_update_queue_map(vsi, &ctxt); 4846 vsi->info.valid_sections = 0; 4847 4848 /* Update current VSI BW information */ 4849 ret = i40e_vsi_get_bw_info(vsi); 4850 if (ret) { 4851 dev_info(&vsi->back->pdev->dev, 4852 "Failed updating vsi bw info, err %s aq_err %s\n", 4853 i40e_stat_str(&vsi->back->hw, ret), 4854 i40e_aq_str(&vsi->back->hw, 4855 vsi->back->hw.aq.asq_last_status)); 4856 goto out; 4857 } 4858 4859 /* Update the netdev TC setup */ 4860 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 4861 out: 4862 return ret; 4863 } 4864 4865 /** 4866 * i40e_veb_config_tc - Configure TCs for given VEB 4867 * @veb: given VEB 4868 * @enabled_tc: TC bitmap 4869 * 4870 * Configures given TC bitmap for VEB (switching) element 4871 **/ 4872 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) 4873 { 4874 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; 4875 struct i40e_pf *pf = veb->pf; 4876 int ret = 0; 4877 int i; 4878 4879 /* No TCs or already enabled TCs just return */ 4880 if (!enabled_tc || veb->enabled_tc == enabled_tc) 4881 return ret; 4882 4883 bw_data.tc_valid_bits = enabled_tc; 4884 /* bw_data.absolute_credits is not set (relative) */ 4885 4886 /* Enable ETS TCs with equal BW Share for now */ 4887 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4888 if (enabled_tc & BIT(i)) 4889 bw_data.tc_bw_share_credits[i] = 1; 4890 } 4891 4892 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, 4893 &bw_data, NULL); 4894 if (ret) { 4895 dev_info(&pf->pdev->dev, 4896 "VEB bw config failed, err %s aq_err %s\n", 4897 i40e_stat_str(&pf->hw, ret), 4898 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4899 goto out; 4900 } 4901 4902 /* Update the BW information */ 4903 ret = i40e_veb_get_bw_info(veb); 4904 if (ret) { 4905 dev_info(&pf->pdev->dev, 4906 "Failed getting veb bw config, err %s aq_err %s\n", 4907 i40e_stat_str(&pf->hw, ret), 4908 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4909 } 4910 4911 out: 4912 return ret; 4913 } 4914 4915 #ifdef CONFIG_I40E_DCB 4916 /** 4917 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs 4918 * @pf: PF struct 4919 * 4920 * Reconfigure VEB/VSIs on a given PF; it is assumed that 4921 * the caller would've quiesce all the VSIs before calling 4922 * this function 4923 **/ 4924 static void i40e_dcb_reconfigure(struct i40e_pf *pf) 4925 { 4926 u8 tc_map = 0; 4927 int ret; 4928 u8 v; 4929 4930 /* Enable the TCs available on PF to all VEBs */ 4931 tc_map = i40e_pf_get_tc_map(pf); 4932 for (v = 0; v < I40E_MAX_VEB; v++) { 4933 if (!pf->veb[v]) 4934 continue; 4935 ret = i40e_veb_config_tc(pf->veb[v], tc_map); 4936 if (ret) { 4937 dev_info(&pf->pdev->dev, 4938 "Failed configuring TC for VEB seid=%d\n", 4939 pf->veb[v]->seid); 4940 /* Will try to configure as many components */ 4941 } 4942 } 4943 4944 /* Update each VSI */ 4945 for (v = 0; v < pf->num_alloc_vsi; v++) { 4946 if (!pf->vsi[v]) 4947 continue; 4948 4949 /* - Enable all TCs for the LAN VSI 4950 #ifdef I40E_FCOE 4951 * - For FCoE VSI only enable the TC configured 4952 * as per the APP TLV 4953 #endif 4954 * - For all others keep them at TC0 for now 4955 */ 4956 if (v == pf->lan_vsi) 4957 tc_map = i40e_pf_get_tc_map(pf); 4958 else 4959 tc_map = i40e_pf_get_default_tc(pf); 4960 #ifdef I40E_FCOE 4961 if (pf->vsi[v]->type == I40E_VSI_FCOE) 4962 tc_map = i40e_get_fcoe_tc_map(pf); 4963 #endif /* #ifdef I40E_FCOE */ 4964 4965 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 4966 if (ret) { 4967 dev_info(&pf->pdev->dev, 4968 "Failed configuring TC for VSI seid=%d\n", 4969 pf->vsi[v]->seid); 4970 /* Will try to configure as many components */ 4971 } else { 4972 /* Re-configure VSI vectors based on updated TC map */ 4973 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); 4974 if (pf->vsi[v]->netdev) 4975 i40e_dcbnl_set_all(pf->vsi[v]); 4976 } 4977 } 4978 } 4979 4980 /** 4981 * i40e_resume_port_tx - Resume port Tx 4982 * @pf: PF struct 4983 * 4984 * Resume a port's Tx and issue a PF reset in case of failure to 4985 * resume. 4986 **/ 4987 static int i40e_resume_port_tx(struct i40e_pf *pf) 4988 { 4989 struct i40e_hw *hw = &pf->hw; 4990 int ret; 4991 4992 ret = i40e_aq_resume_port_tx(hw, NULL); 4993 if (ret) { 4994 dev_info(&pf->pdev->dev, 4995 "Resume Port Tx failed, err %s aq_err %s\n", 4996 i40e_stat_str(&pf->hw, ret), 4997 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4998 /* Schedule PF reset to recover */ 4999 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5000 i40e_service_event_schedule(pf); 5001 } 5002 5003 return ret; 5004 } 5005 5006 /** 5007 * i40e_init_pf_dcb - Initialize DCB configuration 5008 * @pf: PF being configured 5009 * 5010 * Query the current DCB configuration and cache it 5011 * in the hardware structure 5012 **/ 5013 static int i40e_init_pf_dcb(struct i40e_pf *pf) 5014 { 5015 struct i40e_hw *hw = &pf->hw; 5016 int err = 0; 5017 5018 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ 5019 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 5020 (pf->hw.aq.fw_maj_ver < 4)) 5021 goto out; 5022 5023 /* Get the initial DCB configuration */ 5024 err = i40e_init_dcb(hw); 5025 if (!err) { 5026 /* Device/Function is not DCBX capable */ 5027 if ((!hw->func_caps.dcb) || 5028 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { 5029 dev_info(&pf->pdev->dev, 5030 "DCBX offload is not supported or is disabled for this PF.\n"); 5031 5032 if (pf->flags & I40E_FLAG_MFP_ENABLED) 5033 goto out; 5034 5035 } else { 5036 /* When status is not DISABLED then DCBX in FW */ 5037 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 5038 DCB_CAP_DCBX_VER_IEEE; 5039 5040 pf->flags |= I40E_FLAG_DCB_CAPABLE; 5041 /* Enable DCB tagging only when more than one TC */ 5042 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5043 pf->flags |= I40E_FLAG_DCB_ENABLED; 5044 dev_dbg(&pf->pdev->dev, 5045 "DCBX offload is supported for this PF.\n"); 5046 } 5047 } else { 5048 dev_info(&pf->pdev->dev, 5049 "Query for DCB configuration failed, err %s aq_err %s\n", 5050 i40e_stat_str(&pf->hw, err), 5051 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5052 } 5053 5054 out: 5055 return err; 5056 } 5057 #endif /* CONFIG_I40E_DCB */ 5058 #define SPEED_SIZE 14 5059 #define FC_SIZE 8 5060 /** 5061 * i40e_print_link_message - print link up or down 5062 * @vsi: the VSI for which link needs a message 5063 */ 5064 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) 5065 { 5066 char *speed = "Unknown"; 5067 char *fc = "Unknown"; 5068 5069 if (vsi->current_isup == isup) 5070 return; 5071 vsi->current_isup = isup; 5072 if (!isup) { 5073 netdev_info(vsi->netdev, "NIC Link is Down\n"); 5074 return; 5075 } 5076 5077 /* Warn user if link speed on NPAR enabled partition is not at 5078 * least 10GB 5079 */ 5080 if (vsi->back->hw.func_caps.npar_enable && 5081 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || 5082 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) 5083 netdev_warn(vsi->netdev, 5084 "The partition detected link speed that is less than 10Gbps\n"); 5085 5086 switch (vsi->back->hw.phy.link_info.link_speed) { 5087 case I40E_LINK_SPEED_40GB: 5088 speed = "40 G"; 5089 break; 5090 case I40E_LINK_SPEED_20GB: 5091 speed = "20 G"; 5092 break; 5093 case I40E_LINK_SPEED_10GB: 5094 speed = "10 G"; 5095 break; 5096 case I40E_LINK_SPEED_1GB: 5097 speed = "1000 M"; 5098 break; 5099 case I40E_LINK_SPEED_100MB: 5100 speed = "100 M"; 5101 break; 5102 default: 5103 break; 5104 } 5105 5106 switch (vsi->back->hw.fc.current_mode) { 5107 case I40E_FC_FULL: 5108 fc = "RX/TX"; 5109 break; 5110 case I40E_FC_TX_PAUSE: 5111 fc = "TX"; 5112 break; 5113 case I40E_FC_RX_PAUSE: 5114 fc = "RX"; 5115 break; 5116 default: 5117 fc = "None"; 5118 break; 5119 } 5120 5121 netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n", 5122 speed, fc); 5123 } 5124 5125 /** 5126 * i40e_up_complete - Finish the last steps of bringing up a connection 5127 * @vsi: the VSI being configured 5128 **/ 5129 static int i40e_up_complete(struct i40e_vsi *vsi) 5130 { 5131 struct i40e_pf *pf = vsi->back; 5132 int err; 5133 5134 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5135 i40e_vsi_configure_msix(vsi); 5136 else 5137 i40e_configure_msi_and_legacy(vsi); 5138 5139 /* start rings */ 5140 err = i40e_vsi_control_rings(vsi, true); 5141 if (err) 5142 return err; 5143 5144 clear_bit(__I40E_DOWN, &vsi->state); 5145 i40e_napi_enable_all(vsi); 5146 i40e_vsi_enable_irq(vsi); 5147 5148 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 5149 (vsi->netdev)) { 5150 i40e_print_link_message(vsi, true); 5151 netif_tx_start_all_queues(vsi->netdev); 5152 netif_carrier_on(vsi->netdev); 5153 } else if (vsi->netdev) { 5154 i40e_print_link_message(vsi, false); 5155 /* need to check for qualified module here*/ 5156 if ((pf->hw.phy.link_info.link_info & 5157 I40E_AQ_MEDIA_AVAILABLE) && 5158 (!(pf->hw.phy.link_info.an_info & 5159 I40E_AQ_QUALIFIED_MODULE))) 5160 netdev_err(vsi->netdev, 5161 "the driver failed to link because an unqualified module was detected."); 5162 } 5163 5164 /* replay FDIR SB filters */ 5165 if (vsi->type == I40E_VSI_FDIR) { 5166 /* reset fd counters */ 5167 pf->fd_add_err = pf->fd_atr_cnt = 0; 5168 if (pf->fd_tcp_rule > 0) { 5169 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5170 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5171 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); 5172 pf->fd_tcp_rule = 0; 5173 } 5174 i40e_fdir_filter_restore(vsi); 5175 } 5176 i40e_service_event_schedule(pf); 5177 5178 return 0; 5179 } 5180 5181 /** 5182 * i40e_vsi_reinit_locked - Reset the VSI 5183 * @vsi: the VSI being configured 5184 * 5185 * Rebuild the ring structs after some configuration 5186 * has changed, e.g. MTU size. 5187 **/ 5188 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 5189 { 5190 struct i40e_pf *pf = vsi->back; 5191 5192 WARN_ON(in_interrupt()); 5193 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 5194 usleep_range(1000, 2000); 5195 i40e_down(vsi); 5196 5197 /* Give a VF some time to respond to the reset. The 5198 * two second wait is based upon the watchdog cycle in 5199 * the VF driver. 5200 */ 5201 if (vsi->type == I40E_VSI_SRIOV) 5202 msleep(2000); 5203 i40e_up(vsi); 5204 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 5205 } 5206 5207 /** 5208 * i40e_up - Bring the connection back up after being down 5209 * @vsi: the VSI being configured 5210 **/ 5211 int i40e_up(struct i40e_vsi *vsi) 5212 { 5213 int err; 5214 5215 err = i40e_vsi_configure(vsi); 5216 if (!err) 5217 err = i40e_up_complete(vsi); 5218 5219 return err; 5220 } 5221 5222 /** 5223 * i40e_down - Shutdown the connection processing 5224 * @vsi: the VSI being stopped 5225 **/ 5226 void i40e_down(struct i40e_vsi *vsi) 5227 { 5228 int i; 5229 5230 /* It is assumed that the caller of this function 5231 * sets the vsi->state __I40E_DOWN bit. 5232 */ 5233 if (vsi->netdev) { 5234 netif_carrier_off(vsi->netdev); 5235 netif_tx_disable(vsi->netdev); 5236 } 5237 i40e_vsi_disable_irq(vsi); 5238 i40e_vsi_control_rings(vsi, false); 5239 i40e_napi_disable_all(vsi); 5240 5241 for (i = 0; i < vsi->num_queue_pairs; i++) { 5242 i40e_clean_tx_ring(vsi->tx_rings[i]); 5243 i40e_clean_rx_ring(vsi->rx_rings[i]); 5244 } 5245 } 5246 5247 /** 5248 * i40e_setup_tc - configure multiple traffic classes 5249 * @netdev: net device to configure 5250 * @tc: number of traffic classes to enable 5251 **/ 5252 #ifdef I40E_FCOE 5253 int i40e_setup_tc(struct net_device *netdev, u8 tc) 5254 #else 5255 static int i40e_setup_tc(struct net_device *netdev, u8 tc) 5256 #endif 5257 { 5258 struct i40e_netdev_priv *np = netdev_priv(netdev); 5259 struct i40e_vsi *vsi = np->vsi; 5260 struct i40e_pf *pf = vsi->back; 5261 u8 enabled_tc = 0; 5262 int ret = -EINVAL; 5263 int i; 5264 5265 /* Check if DCB enabled to continue */ 5266 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 5267 netdev_info(netdev, "DCB is not enabled for adapter\n"); 5268 goto exit; 5269 } 5270 5271 /* Check if MFP enabled */ 5272 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 5273 netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); 5274 goto exit; 5275 } 5276 5277 /* Check whether tc count is within enabled limit */ 5278 if (tc > i40e_pf_get_num_tc(pf)) { 5279 netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); 5280 goto exit; 5281 } 5282 5283 /* Generate TC map for number of tc requested */ 5284 for (i = 0; i < tc; i++) 5285 enabled_tc |= BIT(i); 5286 5287 /* Requesting same TC configuration as already enabled */ 5288 if (enabled_tc == vsi->tc_config.enabled_tc) 5289 return 0; 5290 5291 /* Quiesce VSI queues */ 5292 i40e_quiesce_vsi(vsi); 5293 5294 /* Configure VSI for enabled TCs */ 5295 ret = i40e_vsi_config_tc(vsi, enabled_tc); 5296 if (ret) { 5297 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 5298 vsi->seid); 5299 goto exit; 5300 } 5301 5302 /* Unquiesce VSI */ 5303 i40e_unquiesce_vsi(vsi); 5304 5305 exit: 5306 return ret; 5307 } 5308 5309 /** 5310 * i40e_open - Called when a network interface is made active 5311 * @netdev: network interface device structure 5312 * 5313 * The open entry point is called when a network interface is made 5314 * active by the system (IFF_UP). At this point all resources needed 5315 * for transmit and receive operations are allocated, the interrupt 5316 * handler is registered with the OS, the netdev watchdog subtask is 5317 * enabled, and the stack is notified that the interface is ready. 5318 * 5319 * Returns 0 on success, negative value on failure 5320 **/ 5321 int i40e_open(struct net_device *netdev) 5322 { 5323 struct i40e_netdev_priv *np = netdev_priv(netdev); 5324 struct i40e_vsi *vsi = np->vsi; 5325 struct i40e_pf *pf = vsi->back; 5326 int err; 5327 5328 /* disallow open during test or if eeprom is broken */ 5329 if (test_bit(__I40E_TESTING, &pf->state) || 5330 test_bit(__I40E_BAD_EEPROM, &pf->state)) 5331 return -EBUSY; 5332 5333 netif_carrier_off(netdev); 5334 5335 err = i40e_vsi_open(vsi); 5336 if (err) 5337 return err; 5338 5339 /* configure global TSO hardware offload settings */ 5340 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | 5341 TCP_FLAG_FIN) >> 16); 5342 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | 5343 TCP_FLAG_FIN | 5344 TCP_FLAG_CWR) >> 16); 5345 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 5346 5347 #ifdef CONFIG_I40E_VXLAN 5348 vxlan_get_rx_port(netdev); 5349 #endif 5350 #ifdef CONFIG_I40E_GENEVE 5351 geneve_get_rx_port(netdev); 5352 #endif 5353 5354 return 0; 5355 } 5356 5357 /** 5358 * i40e_vsi_open - 5359 * @vsi: the VSI to open 5360 * 5361 * Finish initialization of the VSI. 5362 * 5363 * Returns 0 on success, negative value on failure 5364 **/ 5365 int i40e_vsi_open(struct i40e_vsi *vsi) 5366 { 5367 struct i40e_pf *pf = vsi->back; 5368 char int_name[I40E_INT_NAME_STR_LEN]; 5369 int err; 5370 5371 /* allocate descriptors */ 5372 err = i40e_vsi_setup_tx_resources(vsi); 5373 if (err) 5374 goto err_setup_tx; 5375 err = i40e_vsi_setup_rx_resources(vsi); 5376 if (err) 5377 goto err_setup_rx; 5378 5379 err = i40e_vsi_configure(vsi); 5380 if (err) 5381 goto err_setup_rx; 5382 5383 if (vsi->netdev) { 5384 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 5385 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 5386 err = i40e_vsi_request_irq(vsi, int_name); 5387 if (err) 5388 goto err_setup_rx; 5389 5390 /* Notify the stack of the actual queue counts. */ 5391 err = netif_set_real_num_tx_queues(vsi->netdev, 5392 vsi->num_queue_pairs); 5393 if (err) 5394 goto err_set_queues; 5395 5396 err = netif_set_real_num_rx_queues(vsi->netdev, 5397 vsi->num_queue_pairs); 5398 if (err) 5399 goto err_set_queues; 5400 5401 } else if (vsi->type == I40E_VSI_FDIR) { 5402 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", 5403 dev_driver_string(&pf->pdev->dev), 5404 dev_name(&pf->pdev->dev)); 5405 err = i40e_vsi_request_irq(vsi, int_name); 5406 5407 } else { 5408 err = -EINVAL; 5409 goto err_setup_rx; 5410 } 5411 5412 err = i40e_up_complete(vsi); 5413 if (err) 5414 goto err_up_complete; 5415 5416 return 0; 5417 5418 err_up_complete: 5419 i40e_down(vsi); 5420 err_set_queues: 5421 i40e_vsi_free_irq(vsi); 5422 err_setup_rx: 5423 i40e_vsi_free_rx_resources(vsi); 5424 err_setup_tx: 5425 i40e_vsi_free_tx_resources(vsi); 5426 if (vsi == pf->vsi[pf->lan_vsi]) 5427 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 5428 5429 return err; 5430 } 5431 5432 /** 5433 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting 5434 * @pf: Pointer to PF 5435 * 5436 * This function destroys the hlist where all the Flow Director 5437 * filters were saved. 5438 **/ 5439 static void i40e_fdir_filter_exit(struct i40e_pf *pf) 5440 { 5441 struct i40e_fdir_filter *filter; 5442 struct hlist_node *node2; 5443 5444 hlist_for_each_entry_safe(filter, node2, 5445 &pf->fdir_filter_list, fdir_node) { 5446 hlist_del(&filter->fdir_node); 5447 kfree(filter); 5448 } 5449 pf->fdir_pf_active_filters = 0; 5450 } 5451 5452 /** 5453 * i40e_close - Disables a network interface 5454 * @netdev: network interface device structure 5455 * 5456 * The close entry point is called when an interface is de-activated 5457 * by the OS. The hardware is still under the driver's control, but 5458 * this netdev interface is disabled. 5459 * 5460 * Returns 0, this is not allowed to fail 5461 **/ 5462 #ifdef I40E_FCOE 5463 int i40e_close(struct net_device *netdev) 5464 #else 5465 static int i40e_close(struct net_device *netdev) 5466 #endif 5467 { 5468 struct i40e_netdev_priv *np = netdev_priv(netdev); 5469 struct i40e_vsi *vsi = np->vsi; 5470 5471 i40e_vsi_close(vsi); 5472 5473 return 0; 5474 } 5475 5476 /** 5477 * i40e_do_reset - Start a PF or Core Reset sequence 5478 * @pf: board private structure 5479 * @reset_flags: which reset is requested 5480 * 5481 * The essential difference in resets is that the PF Reset 5482 * doesn't clear the packet buffers, doesn't reset the PE 5483 * firmware, and doesn't bother the other PFs on the chip. 5484 **/ 5485 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) 5486 { 5487 u32 val; 5488 5489 WARN_ON(in_interrupt()); 5490 5491 if (i40e_check_asq_alive(&pf->hw)) 5492 i40e_vc_notify_reset(pf); 5493 5494 /* do the biggest reset indicated */ 5495 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { 5496 5497 /* Request a Global Reset 5498 * 5499 * This will start the chip's countdown to the actual full 5500 * chip reset event, and a warning interrupt to be sent 5501 * to all PFs, including the requestor. Our handler 5502 * for the warning interrupt will deal with the shutdown 5503 * and recovery of the switch setup. 5504 */ 5505 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); 5506 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5507 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 5508 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5509 5510 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { 5511 5512 /* Request a Core Reset 5513 * 5514 * Same as Global Reset, except does *not* include the MAC/PHY 5515 */ 5516 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); 5517 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5518 val |= I40E_GLGEN_RTRIG_CORER_MASK; 5519 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5520 i40e_flush(&pf->hw); 5521 5522 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) { 5523 5524 /* Request a PF Reset 5525 * 5526 * Resets only the PF-specific registers 5527 * 5528 * This goes directly to the tear-down and rebuild of 5529 * the switch, since we need to do all the recovery as 5530 * for the Core Reset. 5531 */ 5532 dev_dbg(&pf->pdev->dev, "PFR requested\n"); 5533 i40e_handle_reset_warning(pf); 5534 5535 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { 5536 int v; 5537 5538 /* Find the VSI(s) that requested a re-init */ 5539 dev_info(&pf->pdev->dev, 5540 "VSI reinit requested\n"); 5541 for (v = 0; v < pf->num_alloc_vsi; v++) { 5542 struct i40e_vsi *vsi = pf->vsi[v]; 5543 5544 if (vsi != NULL && 5545 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 5546 i40e_vsi_reinit_locked(pf->vsi[v]); 5547 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); 5548 } 5549 } 5550 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { 5551 int v; 5552 5553 /* Find the VSI(s) that needs to be brought down */ 5554 dev_info(&pf->pdev->dev, "VSI down requested\n"); 5555 for (v = 0; v < pf->num_alloc_vsi; v++) { 5556 struct i40e_vsi *vsi = pf->vsi[v]; 5557 5558 if (vsi != NULL && 5559 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { 5560 set_bit(__I40E_DOWN, &vsi->state); 5561 i40e_down(vsi); 5562 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); 5563 } 5564 } 5565 } else { 5566 dev_info(&pf->pdev->dev, 5567 "bad reset request 0x%08x\n", reset_flags); 5568 } 5569 } 5570 5571 #ifdef CONFIG_I40E_DCB 5572 /** 5573 * i40e_dcb_need_reconfig - Check if DCB needs reconfig 5574 * @pf: board private structure 5575 * @old_cfg: current DCB config 5576 * @new_cfg: new DCB config 5577 **/ 5578 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, 5579 struct i40e_dcbx_config *old_cfg, 5580 struct i40e_dcbx_config *new_cfg) 5581 { 5582 bool need_reconfig = false; 5583 5584 /* Check if ETS configuration has changed */ 5585 if (memcmp(&new_cfg->etscfg, 5586 &old_cfg->etscfg, 5587 sizeof(new_cfg->etscfg))) { 5588 /* If Priority Table has changed reconfig is needed */ 5589 if (memcmp(&new_cfg->etscfg.prioritytable, 5590 &old_cfg->etscfg.prioritytable, 5591 sizeof(new_cfg->etscfg.prioritytable))) { 5592 need_reconfig = true; 5593 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); 5594 } 5595 5596 if (memcmp(&new_cfg->etscfg.tcbwtable, 5597 &old_cfg->etscfg.tcbwtable, 5598 sizeof(new_cfg->etscfg.tcbwtable))) 5599 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 5600 5601 if (memcmp(&new_cfg->etscfg.tsatable, 5602 &old_cfg->etscfg.tsatable, 5603 sizeof(new_cfg->etscfg.tsatable))) 5604 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); 5605 } 5606 5607 /* Check if PFC configuration has changed */ 5608 if (memcmp(&new_cfg->pfc, 5609 &old_cfg->pfc, 5610 sizeof(new_cfg->pfc))) { 5611 need_reconfig = true; 5612 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); 5613 } 5614 5615 /* Check if APP Table has changed */ 5616 if (memcmp(&new_cfg->app, 5617 &old_cfg->app, 5618 sizeof(new_cfg->app))) { 5619 need_reconfig = true; 5620 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); 5621 } 5622 5623 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); 5624 return need_reconfig; 5625 } 5626 5627 /** 5628 * i40e_handle_lldp_event - Handle LLDP Change MIB event 5629 * @pf: board private structure 5630 * @e: event info posted on ARQ 5631 **/ 5632 static int i40e_handle_lldp_event(struct i40e_pf *pf, 5633 struct i40e_arq_event_info *e) 5634 { 5635 struct i40e_aqc_lldp_get_mib *mib = 5636 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; 5637 struct i40e_hw *hw = &pf->hw; 5638 struct i40e_dcbx_config tmp_dcbx_cfg; 5639 bool need_reconfig = false; 5640 int ret = 0; 5641 u8 type; 5642 5643 /* Not DCB capable or capability disabled */ 5644 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) 5645 return ret; 5646 5647 /* Ignore if event is not for Nearest Bridge */ 5648 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 5649 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 5650 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); 5651 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) 5652 return ret; 5653 5654 /* Check MIB Type and return if event for Remote MIB update */ 5655 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; 5656 dev_dbg(&pf->pdev->dev, 5657 "LLDP event mib type %s\n", type ? "remote" : "local"); 5658 if (type == I40E_AQ_LLDP_MIB_REMOTE) { 5659 /* Update the remote cached instance and return */ 5660 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, 5661 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 5662 &hw->remote_dcbx_config); 5663 goto exit; 5664 } 5665 5666 /* Store the old configuration */ 5667 tmp_dcbx_cfg = hw->local_dcbx_config; 5668 5669 /* Reset the old DCBx configuration data */ 5670 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); 5671 /* Get updated DCBX data from firmware */ 5672 ret = i40e_get_dcb_config(&pf->hw); 5673 if (ret) { 5674 dev_info(&pf->pdev->dev, 5675 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", 5676 i40e_stat_str(&pf->hw, ret), 5677 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5678 goto exit; 5679 } 5680 5681 /* No change detected in DCBX configs */ 5682 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, 5683 sizeof(tmp_dcbx_cfg))) { 5684 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 5685 goto exit; 5686 } 5687 5688 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, 5689 &hw->local_dcbx_config); 5690 5691 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); 5692 5693 if (!need_reconfig) 5694 goto exit; 5695 5696 /* Enable DCB tagging only when more than one TC */ 5697 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5698 pf->flags |= I40E_FLAG_DCB_ENABLED; 5699 else 5700 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 5701 5702 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5703 /* Reconfiguration needed quiesce all VSIs */ 5704 i40e_pf_quiesce_all_vsi(pf); 5705 5706 /* Changes in configuration update VEB/VSI */ 5707 i40e_dcb_reconfigure(pf); 5708 5709 ret = i40e_resume_port_tx(pf); 5710 5711 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5712 /* In case of error no point in resuming VSIs */ 5713 if (ret) 5714 goto exit; 5715 5716 /* Wait for the PF's Tx queues to be disabled */ 5717 ret = i40e_pf_wait_txq_disabled(pf); 5718 if (ret) { 5719 /* Schedule PF reset to recover */ 5720 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5721 i40e_service_event_schedule(pf); 5722 } else { 5723 i40e_pf_unquiesce_all_vsi(pf); 5724 } 5725 5726 exit: 5727 return ret; 5728 } 5729 #endif /* CONFIG_I40E_DCB */ 5730 5731 /** 5732 * i40e_do_reset_safe - Protected reset path for userland calls. 5733 * @pf: board private structure 5734 * @reset_flags: which reset is requested 5735 * 5736 **/ 5737 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) 5738 { 5739 rtnl_lock(); 5740 i40e_do_reset(pf, reset_flags); 5741 rtnl_unlock(); 5742 } 5743 5744 /** 5745 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 5746 * @pf: board private structure 5747 * @e: event info posted on ARQ 5748 * 5749 * Handler for LAN Queue Overflow Event generated by the firmware for PF 5750 * and VF queues 5751 **/ 5752 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 5753 struct i40e_arq_event_info *e) 5754 { 5755 struct i40e_aqc_lan_overflow *data = 5756 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 5757 u32 queue = le32_to_cpu(data->prtdcb_rupto); 5758 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 5759 struct i40e_hw *hw = &pf->hw; 5760 struct i40e_vf *vf; 5761 u16 vf_id; 5762 5763 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", 5764 queue, qtx_ctl); 5765 5766 /* Queue belongs to VF, find the VF and issue VF reset */ 5767 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 5768 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 5769 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 5770 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 5771 vf_id -= hw->func_caps.vf_base_id; 5772 vf = &pf->vf[vf_id]; 5773 i40e_vc_notify_vf_reset(vf); 5774 /* Allow VF to process pending reset notification */ 5775 msleep(20); 5776 i40e_reset_vf(vf, false); 5777 } 5778 } 5779 5780 /** 5781 * i40e_service_event_complete - Finish up the service event 5782 * @pf: board private structure 5783 **/ 5784 static void i40e_service_event_complete(struct i40e_pf *pf) 5785 { 5786 WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 5787 5788 /* flush memory to make sure state is correct before next watchog */ 5789 smp_mb__before_atomic(); 5790 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 5791 } 5792 5793 /** 5794 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters 5795 * @pf: board private structure 5796 **/ 5797 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) 5798 { 5799 u32 val, fcnt_prog; 5800 5801 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5802 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); 5803 return fcnt_prog; 5804 } 5805 5806 /** 5807 * i40e_get_current_fd_count - Get total FD filters programmed for this PF 5808 * @pf: board private structure 5809 **/ 5810 u32 i40e_get_current_fd_count(struct i40e_pf *pf) 5811 { 5812 u32 val, fcnt_prog; 5813 5814 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5815 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + 5816 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> 5817 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 5818 return fcnt_prog; 5819 } 5820 5821 /** 5822 * i40e_get_global_fd_count - Get total FD filters programmed on device 5823 * @pf: board private structure 5824 **/ 5825 u32 i40e_get_global_fd_count(struct i40e_pf *pf) 5826 { 5827 u32 val, fcnt_prog; 5828 5829 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); 5830 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + 5831 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> 5832 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); 5833 return fcnt_prog; 5834 } 5835 5836 /** 5837 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 5838 * @pf: board private structure 5839 **/ 5840 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) 5841 { 5842 struct i40e_fdir_filter *filter; 5843 u32 fcnt_prog, fcnt_avail; 5844 struct hlist_node *node; 5845 5846 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 5847 return; 5848 5849 /* Check if, FD SB or ATR was auto disabled and if there is enough room 5850 * to re-enable 5851 */ 5852 fcnt_prog = i40e_get_global_fd_count(pf); 5853 fcnt_avail = pf->fdir_pf_filter_count; 5854 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || 5855 (pf->fd_add_err == 0) || 5856 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { 5857 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 5858 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 5859 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 5860 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5861 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 5862 } 5863 } 5864 /* Wait for some more space to be available to turn on ATR */ 5865 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { 5866 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 5867 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 5868 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5869 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5870 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); 5871 } 5872 } 5873 5874 /* if hw had a problem adding a filter, delete it */ 5875 if (pf->fd_inv > 0) { 5876 hlist_for_each_entry_safe(filter, node, 5877 &pf->fdir_filter_list, fdir_node) { 5878 if (filter->fd_id == pf->fd_inv) { 5879 hlist_del(&filter->fdir_node); 5880 kfree(filter); 5881 pf->fdir_pf_active_filters--; 5882 } 5883 } 5884 } 5885 } 5886 5887 #define I40E_MIN_FD_FLUSH_INTERVAL 10 5888 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 5889 /** 5890 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB 5891 * @pf: board private structure 5892 **/ 5893 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) 5894 { 5895 unsigned long min_flush_time; 5896 int flush_wait_retry = 50; 5897 bool disable_atr = false; 5898 int fd_room; 5899 int reg; 5900 5901 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5902 return; 5903 5904 if (!time_after(jiffies, pf->fd_flush_timestamp + 5905 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) 5906 return; 5907 5908 /* If the flush is happening too quick and we have mostly SB rules we 5909 * should not re-enable ATR for some time. 5910 */ 5911 min_flush_time = pf->fd_flush_timestamp + 5912 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); 5913 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; 5914 5915 if (!(time_after(jiffies, min_flush_time)) && 5916 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { 5917 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5918 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); 5919 disable_atr = true; 5920 } 5921 5922 pf->fd_flush_timestamp = jiffies; 5923 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5924 /* flush all filters */ 5925 wr32(&pf->hw, I40E_PFQF_CTL_1, 5926 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); 5927 i40e_flush(&pf->hw); 5928 pf->fd_flush_cnt++; 5929 pf->fd_add_err = 0; 5930 do { 5931 /* Check FD flush status every 5-6msec */ 5932 usleep_range(5000, 6000); 5933 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); 5934 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) 5935 break; 5936 } while (flush_wait_retry--); 5937 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { 5938 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); 5939 } else { 5940 /* replay sideband filters */ 5941 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); 5942 if (!disable_atr) 5943 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 5944 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 5945 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5946 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 5947 } 5948 5949 } 5950 5951 /** 5952 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed 5953 * @pf: board private structure 5954 **/ 5955 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) 5956 { 5957 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; 5958 } 5959 5960 /* We can see up to 256 filter programming desc in transit if the filters are 5961 * being applied really fast; before we see the first 5962 * filter miss error on Rx queue 0. Accumulating enough error messages before 5963 * reacting will make sure we don't cause flush too often. 5964 */ 5965 #define I40E_MAX_FD_PROGRAM_ERROR 256 5966 5967 /** 5968 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 5969 * @pf: board private structure 5970 **/ 5971 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 5972 { 5973 5974 /* if interface is down do nothing */ 5975 if (test_bit(__I40E_DOWN, &pf->state)) 5976 return; 5977 5978 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5979 return; 5980 5981 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 5982 i40e_fdir_flush_and_replay(pf); 5983 5984 i40e_fdir_check_and_reenable(pf); 5985 5986 } 5987 5988 /** 5989 * i40e_vsi_link_event - notify VSI of a link event 5990 * @vsi: vsi to be notified 5991 * @link_up: link up or down 5992 **/ 5993 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 5994 { 5995 if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) 5996 return; 5997 5998 switch (vsi->type) { 5999 case I40E_VSI_MAIN: 6000 #ifdef I40E_FCOE 6001 case I40E_VSI_FCOE: 6002 #endif 6003 if (!vsi->netdev || !vsi->netdev_registered) 6004 break; 6005 6006 if (link_up) { 6007 netif_carrier_on(vsi->netdev); 6008 netif_tx_wake_all_queues(vsi->netdev); 6009 } else { 6010 netif_carrier_off(vsi->netdev); 6011 netif_tx_stop_all_queues(vsi->netdev); 6012 } 6013 break; 6014 6015 case I40E_VSI_SRIOV: 6016 case I40E_VSI_VMDQ2: 6017 case I40E_VSI_CTRL: 6018 case I40E_VSI_MIRROR: 6019 default: 6020 /* there is no notification for other VSIs */ 6021 break; 6022 } 6023 } 6024 6025 /** 6026 * i40e_veb_link_event - notify elements on the veb of a link event 6027 * @veb: veb to be notified 6028 * @link_up: link up or down 6029 **/ 6030 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 6031 { 6032 struct i40e_pf *pf; 6033 int i; 6034 6035 if (!veb || !veb->pf) 6036 return; 6037 pf = veb->pf; 6038 6039 /* depth first... */ 6040 for (i = 0; i < I40E_MAX_VEB; i++) 6041 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 6042 i40e_veb_link_event(pf->veb[i], link_up); 6043 6044 /* ... now the local VSIs */ 6045 for (i = 0; i < pf->num_alloc_vsi; i++) 6046 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 6047 i40e_vsi_link_event(pf->vsi[i], link_up); 6048 } 6049 6050 /** 6051 * i40e_link_event - Update netif_carrier status 6052 * @pf: board private structure 6053 **/ 6054 static void i40e_link_event(struct i40e_pf *pf) 6055 { 6056 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6057 u8 new_link_speed, old_link_speed; 6058 i40e_status status; 6059 bool new_link, old_link; 6060 6061 /* save off old link status information */ 6062 pf->hw.phy.link_info_old = pf->hw.phy.link_info; 6063 6064 /* set this to force the get_link_status call to refresh state */ 6065 pf->hw.phy.get_link_info = true; 6066 6067 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 6068 6069 status = i40e_get_link_status(&pf->hw, &new_link); 6070 if (status) { 6071 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", 6072 status); 6073 return; 6074 } 6075 6076 old_link_speed = pf->hw.phy.link_info_old.link_speed; 6077 new_link_speed = pf->hw.phy.link_info.link_speed; 6078 6079 if (new_link == old_link && 6080 new_link_speed == old_link_speed && 6081 (test_bit(__I40E_DOWN, &vsi->state) || 6082 new_link == netif_carrier_ok(vsi->netdev))) 6083 return; 6084 6085 if (!test_bit(__I40E_DOWN, &vsi->state)) 6086 i40e_print_link_message(vsi, new_link); 6087 6088 /* Notify the base of the switch tree connected to 6089 * the link. Floating VEBs are not notified. 6090 */ 6091 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 6092 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 6093 else 6094 i40e_vsi_link_event(vsi, new_link); 6095 6096 if (pf->vf) 6097 i40e_vc_notify_link_state(pf); 6098 6099 if (pf->flags & I40E_FLAG_PTP) 6100 i40e_ptp_set_increment(pf); 6101 } 6102 6103 /** 6104 * i40e_watchdog_subtask - periodic checks not using event driven response 6105 * @pf: board private structure 6106 **/ 6107 static void i40e_watchdog_subtask(struct i40e_pf *pf) 6108 { 6109 int i; 6110 6111 /* if interface is down do nothing */ 6112 if (test_bit(__I40E_DOWN, &pf->state) || 6113 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 6114 return; 6115 6116 /* make sure we don't do these things too often */ 6117 if (time_before(jiffies, (pf->service_timer_previous + 6118 pf->service_timer_period))) 6119 return; 6120 pf->service_timer_previous = jiffies; 6121 6122 if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) 6123 i40e_link_event(pf); 6124 6125 /* Update the stats for active netdevs so the network stack 6126 * can look at updated numbers whenever it cares to 6127 */ 6128 for (i = 0; i < pf->num_alloc_vsi; i++) 6129 if (pf->vsi[i] && pf->vsi[i]->netdev) 6130 i40e_update_stats(pf->vsi[i]); 6131 6132 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { 6133 /* Update the stats for the active switching components */ 6134 for (i = 0; i < I40E_MAX_VEB; i++) 6135 if (pf->veb[i]) 6136 i40e_update_veb_stats(pf->veb[i]); 6137 } 6138 6139 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); 6140 } 6141 6142 /** 6143 * i40e_reset_subtask - Set up for resetting the device and driver 6144 * @pf: board private structure 6145 **/ 6146 static void i40e_reset_subtask(struct i40e_pf *pf) 6147 { 6148 u32 reset_flags = 0; 6149 6150 rtnl_lock(); 6151 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { 6152 reset_flags |= BIT(__I40E_REINIT_REQUESTED); 6153 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); 6154 } 6155 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { 6156 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); 6157 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 6158 } 6159 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { 6160 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); 6161 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 6162 } 6163 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { 6164 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); 6165 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 6166 } 6167 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { 6168 reset_flags |= BIT(__I40E_DOWN_REQUESTED); 6169 clear_bit(__I40E_DOWN_REQUESTED, &pf->state); 6170 } 6171 6172 /* If there's a recovery already waiting, it takes 6173 * precedence before starting a new reset sequence. 6174 */ 6175 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { 6176 i40e_handle_reset_warning(pf); 6177 goto unlock; 6178 } 6179 6180 /* If we're already down or resetting, just bail */ 6181 if (reset_flags && 6182 !test_bit(__I40E_DOWN, &pf->state) && 6183 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) 6184 i40e_do_reset(pf, reset_flags); 6185 6186 unlock: 6187 rtnl_unlock(); 6188 } 6189 6190 /** 6191 * i40e_handle_link_event - Handle link event 6192 * @pf: board private structure 6193 * @e: event info posted on ARQ 6194 **/ 6195 static void i40e_handle_link_event(struct i40e_pf *pf, 6196 struct i40e_arq_event_info *e) 6197 { 6198 struct i40e_aqc_get_link_status *status = 6199 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 6200 6201 /* Do a new status request to re-enable LSE reporting 6202 * and load new status information into the hw struct 6203 * This completely ignores any state information 6204 * in the ARQ event info, instead choosing to always 6205 * issue the AQ update link status command. 6206 */ 6207 i40e_link_event(pf); 6208 6209 /* check for unqualified module, if link is down */ 6210 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && 6211 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && 6212 (!(status->link_info & I40E_AQ_LINK_UP))) 6213 dev_err(&pf->pdev->dev, 6214 "The driver failed to link because an unqualified module was detected.\n"); 6215 } 6216 6217 /** 6218 * i40e_clean_adminq_subtask - Clean the AdminQ rings 6219 * @pf: board private structure 6220 **/ 6221 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 6222 { 6223 struct i40e_arq_event_info event; 6224 struct i40e_hw *hw = &pf->hw; 6225 u16 pending, i = 0; 6226 i40e_status ret; 6227 u16 opcode; 6228 u32 oldval; 6229 u32 val; 6230 6231 /* Do not run clean AQ when PF reset fails */ 6232 if (test_bit(__I40E_RESET_FAILED, &pf->state)) 6233 return; 6234 6235 /* check for error indications */ 6236 val = rd32(&pf->hw, pf->hw.aq.arq.len); 6237 oldval = val; 6238 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { 6239 if (hw->debug_mask & I40E_DEBUG_AQ) 6240 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); 6241 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; 6242 } 6243 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { 6244 if (hw->debug_mask & I40E_DEBUG_AQ) 6245 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); 6246 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; 6247 } 6248 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { 6249 if (hw->debug_mask & I40E_DEBUG_AQ) 6250 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); 6251 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; 6252 } 6253 if (oldval != val) 6254 wr32(&pf->hw, pf->hw.aq.arq.len, val); 6255 6256 val = rd32(&pf->hw, pf->hw.aq.asq.len); 6257 oldval = val; 6258 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { 6259 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 6260 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); 6261 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; 6262 } 6263 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { 6264 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 6265 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); 6266 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; 6267 } 6268 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { 6269 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 6270 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); 6271 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; 6272 } 6273 if (oldval != val) 6274 wr32(&pf->hw, pf->hw.aq.asq.len, val); 6275 6276 event.buf_len = I40E_MAX_AQ_BUF_SIZE; 6277 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 6278 if (!event.msg_buf) 6279 return; 6280 6281 do { 6282 ret = i40e_clean_arq_element(hw, &event, &pending); 6283 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) 6284 break; 6285 else if (ret) { 6286 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 6287 break; 6288 } 6289 6290 opcode = le16_to_cpu(event.desc.opcode); 6291 switch (opcode) { 6292 6293 case i40e_aqc_opc_get_link_status: 6294 i40e_handle_link_event(pf, &event); 6295 break; 6296 case i40e_aqc_opc_send_msg_to_pf: 6297 ret = i40e_vc_process_vf_msg(pf, 6298 le16_to_cpu(event.desc.retval), 6299 le32_to_cpu(event.desc.cookie_high), 6300 le32_to_cpu(event.desc.cookie_low), 6301 event.msg_buf, 6302 event.msg_len); 6303 break; 6304 case i40e_aqc_opc_lldp_update_mib: 6305 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 6306 #ifdef CONFIG_I40E_DCB 6307 rtnl_lock(); 6308 ret = i40e_handle_lldp_event(pf, &event); 6309 rtnl_unlock(); 6310 #endif /* CONFIG_I40E_DCB */ 6311 break; 6312 case i40e_aqc_opc_event_lan_overflow: 6313 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 6314 i40e_handle_lan_overflow_event(pf, &event); 6315 break; 6316 case i40e_aqc_opc_send_msg_to_peer: 6317 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); 6318 break; 6319 case i40e_aqc_opc_nvm_erase: 6320 case i40e_aqc_opc_nvm_update: 6321 case i40e_aqc_opc_oem_post_update: 6322 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); 6323 break; 6324 default: 6325 dev_info(&pf->pdev->dev, 6326 "ARQ Error: Unknown event 0x%04x received\n", 6327 opcode); 6328 break; 6329 } 6330 } while (pending && (i++ < pf->adminq_work_limit)); 6331 6332 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 6333 /* re-enable Admin queue interrupt cause */ 6334 val = rd32(hw, I40E_PFINT_ICR0_ENA); 6335 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 6336 wr32(hw, I40E_PFINT_ICR0_ENA, val); 6337 i40e_flush(hw); 6338 6339 kfree(event.msg_buf); 6340 } 6341 6342 /** 6343 * i40e_verify_eeprom - make sure eeprom is good to use 6344 * @pf: board private structure 6345 **/ 6346 static void i40e_verify_eeprom(struct i40e_pf *pf) 6347 { 6348 int err; 6349 6350 err = i40e_diag_eeprom_test(&pf->hw); 6351 if (err) { 6352 /* retry in case of garbage read */ 6353 err = i40e_diag_eeprom_test(&pf->hw); 6354 if (err) { 6355 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", 6356 err); 6357 set_bit(__I40E_BAD_EEPROM, &pf->state); 6358 } 6359 } 6360 6361 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { 6362 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); 6363 clear_bit(__I40E_BAD_EEPROM, &pf->state); 6364 } 6365 } 6366 6367 /** 6368 * i40e_enable_pf_switch_lb 6369 * @pf: pointer to the PF structure 6370 * 6371 * enable switch loop back or die - no point in a return value 6372 **/ 6373 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) 6374 { 6375 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6376 struct i40e_vsi_context ctxt; 6377 int ret; 6378 6379 ctxt.seid = pf->main_vsi_seid; 6380 ctxt.pf_num = pf->hw.pf_id; 6381 ctxt.vf_num = 0; 6382 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6383 if (ret) { 6384 dev_info(&pf->pdev->dev, 6385 "couldn't get PF vsi config, err %s aq_err %s\n", 6386 i40e_stat_str(&pf->hw, ret), 6387 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6388 return; 6389 } 6390 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6391 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6392 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6393 6394 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6395 if (ret) { 6396 dev_info(&pf->pdev->dev, 6397 "update vsi switch failed, err %s aq_err %s\n", 6398 i40e_stat_str(&pf->hw, ret), 6399 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6400 } 6401 } 6402 6403 /** 6404 * i40e_disable_pf_switch_lb 6405 * @pf: pointer to the PF structure 6406 * 6407 * disable switch loop back or die - no point in a return value 6408 **/ 6409 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) 6410 { 6411 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6412 struct i40e_vsi_context ctxt; 6413 int ret; 6414 6415 ctxt.seid = pf->main_vsi_seid; 6416 ctxt.pf_num = pf->hw.pf_id; 6417 ctxt.vf_num = 0; 6418 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6419 if (ret) { 6420 dev_info(&pf->pdev->dev, 6421 "couldn't get PF vsi config, err %s aq_err %s\n", 6422 i40e_stat_str(&pf->hw, ret), 6423 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6424 return; 6425 } 6426 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6427 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6428 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6429 6430 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6431 if (ret) { 6432 dev_info(&pf->pdev->dev, 6433 "update vsi switch failed, err %s aq_err %s\n", 6434 i40e_stat_str(&pf->hw, ret), 6435 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6436 } 6437 } 6438 6439 /** 6440 * i40e_config_bridge_mode - Configure the HW bridge mode 6441 * @veb: pointer to the bridge instance 6442 * 6443 * Configure the loop back mode for the LAN VSI that is downlink to the 6444 * specified HW bridge instance. It is expected this function is called 6445 * when a new HW bridge is instantiated. 6446 **/ 6447 static void i40e_config_bridge_mode(struct i40e_veb *veb) 6448 { 6449 struct i40e_pf *pf = veb->pf; 6450 6451 if (pf->hw.debug_mask & I40E_DEBUG_LAN) 6452 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", 6453 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 6454 if (veb->bridge_mode & BRIDGE_MODE_VEPA) 6455 i40e_disable_pf_switch_lb(pf); 6456 else 6457 i40e_enable_pf_switch_lb(pf); 6458 } 6459 6460 /** 6461 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 6462 * @veb: pointer to the VEB instance 6463 * 6464 * This is a recursive function that first builds the attached VSIs then 6465 * recurses in to build the next layer of VEB. We track the connections 6466 * through our own index numbers because the seid's from the HW could 6467 * change across the reset. 6468 **/ 6469 static int i40e_reconstitute_veb(struct i40e_veb *veb) 6470 { 6471 struct i40e_vsi *ctl_vsi = NULL; 6472 struct i40e_pf *pf = veb->pf; 6473 int v, veb_idx; 6474 int ret; 6475 6476 /* build VSI that owns this VEB, temporarily attached to base VEB */ 6477 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { 6478 if (pf->vsi[v] && 6479 pf->vsi[v]->veb_idx == veb->idx && 6480 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 6481 ctl_vsi = pf->vsi[v]; 6482 break; 6483 } 6484 } 6485 if (!ctl_vsi) { 6486 dev_info(&pf->pdev->dev, 6487 "missing owner VSI for veb_idx %d\n", veb->idx); 6488 ret = -ENOENT; 6489 goto end_reconstitute; 6490 } 6491 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 6492 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 6493 ret = i40e_add_vsi(ctl_vsi); 6494 if (ret) { 6495 dev_info(&pf->pdev->dev, 6496 "rebuild of veb_idx %d owner VSI failed: %d\n", 6497 veb->idx, ret); 6498 goto end_reconstitute; 6499 } 6500 i40e_vsi_reset_stats(ctl_vsi); 6501 6502 /* create the VEB in the switch and move the VSI onto the VEB */ 6503 ret = i40e_add_veb(veb, ctl_vsi); 6504 if (ret) 6505 goto end_reconstitute; 6506 6507 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 6508 veb->bridge_mode = BRIDGE_MODE_VEB; 6509 else 6510 veb->bridge_mode = BRIDGE_MODE_VEPA; 6511 i40e_config_bridge_mode(veb); 6512 6513 /* create the remaining VSIs attached to this VEB */ 6514 for (v = 0; v < pf->num_alloc_vsi; v++) { 6515 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 6516 continue; 6517 6518 if (pf->vsi[v]->veb_idx == veb->idx) { 6519 struct i40e_vsi *vsi = pf->vsi[v]; 6520 6521 vsi->uplink_seid = veb->seid; 6522 ret = i40e_add_vsi(vsi); 6523 if (ret) { 6524 dev_info(&pf->pdev->dev, 6525 "rebuild of vsi_idx %d failed: %d\n", 6526 v, ret); 6527 goto end_reconstitute; 6528 } 6529 i40e_vsi_reset_stats(vsi); 6530 } 6531 } 6532 6533 /* create any VEBs attached to this VEB - RECURSION */ 6534 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 6535 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 6536 pf->veb[veb_idx]->uplink_seid = veb->seid; 6537 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 6538 if (ret) 6539 break; 6540 } 6541 } 6542 6543 end_reconstitute: 6544 return ret; 6545 } 6546 6547 /** 6548 * i40e_get_capabilities - get info about the HW 6549 * @pf: the PF struct 6550 **/ 6551 static int i40e_get_capabilities(struct i40e_pf *pf) 6552 { 6553 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 6554 u16 data_size; 6555 int buf_len; 6556 int err; 6557 6558 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 6559 do { 6560 cap_buf = kzalloc(buf_len, GFP_KERNEL); 6561 if (!cap_buf) 6562 return -ENOMEM; 6563 6564 /* this loads the data into the hw struct for us */ 6565 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 6566 &data_size, 6567 i40e_aqc_opc_list_func_capabilities, 6568 NULL); 6569 /* data loaded, buffer no longer needed */ 6570 kfree(cap_buf); 6571 6572 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 6573 /* retry with a larger buffer */ 6574 buf_len = data_size; 6575 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 6576 dev_info(&pf->pdev->dev, 6577 "capability discovery failed, err %s aq_err %s\n", 6578 i40e_stat_str(&pf->hw, err), 6579 i40e_aq_str(&pf->hw, 6580 pf->hw.aq.asq_last_status)); 6581 return -ENODEV; 6582 } 6583 } while (err); 6584 6585 if (pf->hw.debug_mask & I40E_DEBUG_USER) 6586 dev_info(&pf->pdev->dev, 6587 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 6588 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 6589 pf->hw.func_caps.num_msix_vectors, 6590 pf->hw.func_caps.num_msix_vectors_vf, 6591 pf->hw.func_caps.fd_filters_guaranteed, 6592 pf->hw.func_caps.fd_filters_best_effort, 6593 pf->hw.func_caps.num_tx_qp, 6594 pf->hw.func_caps.num_vsis); 6595 6596 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ 6597 + pf->hw.func_caps.num_vfs) 6598 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { 6599 dev_info(&pf->pdev->dev, 6600 "got num_vsis %d, setting num_vsis to %d\n", 6601 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); 6602 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; 6603 } 6604 6605 return 0; 6606 } 6607 6608 static int i40e_vsi_clear(struct i40e_vsi *vsi); 6609 6610 /** 6611 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband 6612 * @pf: board private structure 6613 **/ 6614 static void i40e_fdir_sb_setup(struct i40e_pf *pf) 6615 { 6616 struct i40e_vsi *vsi; 6617 int i; 6618 6619 /* quick workaround for an NVM issue that leaves a critical register 6620 * uninitialized 6621 */ 6622 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { 6623 static const u32 hkey[] = { 6624 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 6625 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 6626 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 6627 0x95b3a76d}; 6628 6629 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) 6630 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); 6631 } 6632 6633 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 6634 return; 6635 6636 /* find existing VSI and see if it needs configuring */ 6637 vsi = NULL; 6638 for (i = 0; i < pf->num_alloc_vsi; i++) { 6639 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6640 vsi = pf->vsi[i]; 6641 break; 6642 } 6643 } 6644 6645 /* create a new VSI if none exists */ 6646 if (!vsi) { 6647 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, 6648 pf->vsi[pf->lan_vsi]->seid, 0); 6649 if (!vsi) { 6650 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 6651 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 6652 return; 6653 } 6654 } 6655 6656 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); 6657 } 6658 6659 /** 6660 * i40e_fdir_teardown - release the Flow Director resources 6661 * @pf: board private structure 6662 **/ 6663 static void i40e_fdir_teardown(struct i40e_pf *pf) 6664 { 6665 int i; 6666 6667 i40e_fdir_filter_exit(pf); 6668 for (i = 0; i < pf->num_alloc_vsi; i++) { 6669 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6670 i40e_vsi_release(pf->vsi[i]); 6671 break; 6672 } 6673 } 6674 } 6675 6676 /** 6677 * i40e_prep_for_reset - prep for the core to reset 6678 * @pf: board private structure 6679 * 6680 * Close up the VFs and other things in prep for PF Reset. 6681 **/ 6682 static void i40e_prep_for_reset(struct i40e_pf *pf) 6683 { 6684 struct i40e_hw *hw = &pf->hw; 6685 i40e_status ret = 0; 6686 u32 v; 6687 6688 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 6689 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 6690 return; 6691 6692 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 6693 6694 /* quiesce the VSIs and their queues that are not already DOWN */ 6695 i40e_pf_quiesce_all_vsi(pf); 6696 6697 for (v = 0; v < pf->num_alloc_vsi; v++) { 6698 if (pf->vsi[v]) 6699 pf->vsi[v]->seid = 0; 6700 } 6701 6702 i40e_shutdown_adminq(&pf->hw); 6703 6704 /* call shutdown HMC */ 6705 if (hw->hmc.hmc_obj) { 6706 ret = i40e_shutdown_lan_hmc(hw); 6707 if (ret) 6708 dev_warn(&pf->pdev->dev, 6709 "shutdown_lan_hmc failed: %d\n", ret); 6710 } 6711 } 6712 6713 /** 6714 * i40e_send_version - update firmware with driver version 6715 * @pf: PF struct 6716 */ 6717 static void i40e_send_version(struct i40e_pf *pf) 6718 { 6719 struct i40e_driver_version dv; 6720 6721 dv.major_version = DRV_VERSION_MAJOR; 6722 dv.minor_version = DRV_VERSION_MINOR; 6723 dv.build_version = DRV_VERSION_BUILD; 6724 dv.subbuild_version = 0; 6725 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); 6726 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 6727 } 6728 6729 /** 6730 * i40e_reset_and_rebuild - reset and rebuild using a saved config 6731 * @pf: board private structure 6732 * @reinit: if the Main VSI needs to re-initialized. 6733 **/ 6734 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 6735 { 6736 struct i40e_hw *hw = &pf->hw; 6737 u8 set_fc_aq_fail = 0; 6738 i40e_status ret; 6739 u32 val; 6740 u32 v; 6741 6742 /* Now we wait for GRST to settle out. 6743 * We don't have to delete the VEBs or VSIs from the hw switch 6744 * because the reset will make them disappear. 6745 */ 6746 ret = i40e_pf_reset(hw); 6747 if (ret) { 6748 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 6749 set_bit(__I40E_RESET_FAILED, &pf->state); 6750 goto clear_recovery; 6751 } 6752 pf->pfr_count++; 6753 6754 if (test_bit(__I40E_DOWN, &pf->state)) 6755 goto clear_recovery; 6756 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 6757 6758 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 6759 ret = i40e_init_adminq(&pf->hw); 6760 if (ret) { 6761 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", 6762 i40e_stat_str(&pf->hw, ret), 6763 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6764 goto clear_recovery; 6765 } 6766 6767 /* re-verify the eeprom if we just had an EMP reset */ 6768 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) 6769 i40e_verify_eeprom(pf); 6770 6771 i40e_clear_pxe_mode(hw); 6772 ret = i40e_get_capabilities(pf); 6773 if (ret) 6774 goto end_core_reset; 6775 6776 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 6777 hw->func_caps.num_rx_qp, 6778 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 6779 if (ret) { 6780 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 6781 goto end_core_reset; 6782 } 6783 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 6784 if (ret) { 6785 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 6786 goto end_core_reset; 6787 } 6788 6789 #ifdef CONFIG_I40E_DCB 6790 ret = i40e_init_pf_dcb(pf); 6791 if (ret) { 6792 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); 6793 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 6794 /* Continue without DCB enabled */ 6795 } 6796 #endif /* CONFIG_I40E_DCB */ 6797 #ifdef I40E_FCOE 6798 i40e_init_pf_fcoe(pf); 6799 6800 #endif 6801 /* do basic switch setup */ 6802 ret = i40e_setup_pf_switch(pf, reinit); 6803 if (ret) 6804 goto end_core_reset; 6805 6806 /* driver is only interested in link up/down and module qualification 6807 * reports from firmware 6808 */ 6809 ret = i40e_aq_set_phy_int_mask(&pf->hw, 6810 I40E_AQ_EVENT_LINK_UPDOWN | 6811 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); 6812 if (ret) 6813 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 6814 i40e_stat_str(&pf->hw, ret), 6815 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6816 6817 /* make sure our flow control settings are restored */ 6818 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); 6819 if (ret) 6820 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", 6821 i40e_stat_str(&pf->hw, ret), 6822 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6823 6824 /* Rebuild the VSIs and VEBs that existed before reset. 6825 * They are still in our local switch element arrays, so only 6826 * need to rebuild the switch model in the HW. 6827 * 6828 * If there were VEBs but the reconstitution failed, we'll try 6829 * try to recover minimal use by getting the basic PF VSI working. 6830 */ 6831 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 6832 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); 6833 /* find the one VEB connected to the MAC, and find orphans */ 6834 for (v = 0; v < I40E_MAX_VEB; v++) { 6835 if (!pf->veb[v]) 6836 continue; 6837 6838 if (pf->veb[v]->uplink_seid == pf->mac_seid || 6839 pf->veb[v]->uplink_seid == 0) { 6840 ret = i40e_reconstitute_veb(pf->veb[v]); 6841 6842 if (!ret) 6843 continue; 6844 6845 /* If Main VEB failed, we're in deep doodoo, 6846 * so give up rebuilding the switch and set up 6847 * for minimal rebuild of PF VSI. 6848 * If orphan failed, we'll report the error 6849 * but try to keep going. 6850 */ 6851 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 6852 dev_info(&pf->pdev->dev, 6853 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 6854 ret); 6855 pf->vsi[pf->lan_vsi]->uplink_seid 6856 = pf->mac_seid; 6857 break; 6858 } else if (pf->veb[v]->uplink_seid == 0) { 6859 dev_info(&pf->pdev->dev, 6860 "rebuild of orphan VEB failed: %d\n", 6861 ret); 6862 } 6863 } 6864 } 6865 } 6866 6867 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { 6868 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 6869 /* no VEB, so rebuild only the Main VSI */ 6870 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); 6871 if (ret) { 6872 dev_info(&pf->pdev->dev, 6873 "rebuild of Main VSI failed: %d\n", ret); 6874 goto end_core_reset; 6875 } 6876 } 6877 6878 /* Reconfigure hardware for allowing smaller MSS in the case 6879 * of TSO, so that we avoid the MDD being fired and causing 6880 * a reset in the case of small MSS+TSO. 6881 */ 6882 #define I40E_REG_MSS 0x000E64DC 6883 #define I40E_REG_MSS_MIN_MASK 0x3FF0000 6884 #define I40E_64BYTE_MSS 0x400000 6885 val = rd32(hw, I40E_REG_MSS); 6886 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { 6887 val &= ~I40E_REG_MSS_MIN_MASK; 6888 val |= I40E_64BYTE_MSS; 6889 wr32(hw, I40E_REG_MSS, val); 6890 } 6891 6892 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 6893 (pf->hw.aq.fw_maj_ver < 4)) { 6894 msleep(75); 6895 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 6896 if (ret) 6897 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 6898 i40e_stat_str(&pf->hw, ret), 6899 i40e_aq_str(&pf->hw, 6900 pf->hw.aq.asq_last_status)); 6901 } 6902 /* reinit the misc interrupt */ 6903 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6904 ret = i40e_setup_misc_vector(pf); 6905 6906 /* Add a filter to drop all Flow control frames from any VSI from being 6907 * transmitted. By doing so we stop a malicious VF from sending out 6908 * PAUSE or PFC frames and potentially controlling traffic for other 6909 * PF/VF VSIs. 6910 * The FW can still send Flow control frames if enabled. 6911 */ 6912 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, 6913 pf->main_vsi_seid); 6914 6915 /* restart the VSIs that were rebuilt and running before the reset */ 6916 i40e_pf_unquiesce_all_vsi(pf); 6917 6918 if (pf->num_alloc_vfs) { 6919 for (v = 0; v < pf->num_alloc_vfs; v++) 6920 i40e_reset_vf(&pf->vf[v], true); 6921 } 6922 6923 /* tell the firmware that we're starting */ 6924 i40e_send_version(pf); 6925 6926 end_core_reset: 6927 clear_bit(__I40E_RESET_FAILED, &pf->state); 6928 clear_recovery: 6929 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 6930 } 6931 6932 /** 6933 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild 6934 * @pf: board private structure 6935 * 6936 * Close up the VFs and other things in prep for a Core Reset, 6937 * then get ready to rebuild the world. 6938 **/ 6939 static void i40e_handle_reset_warning(struct i40e_pf *pf) 6940 { 6941 i40e_prep_for_reset(pf); 6942 i40e_reset_and_rebuild(pf, false); 6943 } 6944 6945 /** 6946 * i40e_handle_mdd_event 6947 * @pf: pointer to the PF structure 6948 * 6949 * Called from the MDD irq handler to identify possibly malicious vfs 6950 **/ 6951 static void i40e_handle_mdd_event(struct i40e_pf *pf) 6952 { 6953 struct i40e_hw *hw = &pf->hw; 6954 bool mdd_detected = false; 6955 bool pf_mdd_detected = false; 6956 struct i40e_vf *vf; 6957 u32 reg; 6958 int i; 6959 6960 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) 6961 return; 6962 6963 /* find what triggered the MDD event */ 6964 reg = rd32(hw, I40E_GL_MDET_TX); 6965 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 6966 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 6967 I40E_GL_MDET_TX_PF_NUM_SHIFT; 6968 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 6969 I40E_GL_MDET_TX_VF_NUM_SHIFT; 6970 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 6971 I40E_GL_MDET_TX_EVENT_SHIFT; 6972 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 6973 I40E_GL_MDET_TX_QUEUE_SHIFT) - 6974 pf->hw.func_caps.base_queue; 6975 if (netif_msg_tx_err(pf)) 6976 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", 6977 event, queue, pf_num, vf_num); 6978 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 6979 mdd_detected = true; 6980 } 6981 reg = rd32(hw, I40E_GL_MDET_RX); 6982 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 6983 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 6984 I40E_GL_MDET_RX_FUNCTION_SHIFT; 6985 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 6986 I40E_GL_MDET_RX_EVENT_SHIFT; 6987 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 6988 I40E_GL_MDET_RX_QUEUE_SHIFT) - 6989 pf->hw.func_caps.base_queue; 6990 if (netif_msg_rx_err(pf)) 6991 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 6992 event, queue, func); 6993 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 6994 mdd_detected = true; 6995 } 6996 6997 if (mdd_detected) { 6998 reg = rd32(hw, I40E_PF_MDET_TX); 6999 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 7000 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 7001 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 7002 pf_mdd_detected = true; 7003 } 7004 reg = rd32(hw, I40E_PF_MDET_RX); 7005 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 7006 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 7007 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 7008 pf_mdd_detected = true; 7009 } 7010 /* Queue belongs to the PF, initiate a reset */ 7011 if (pf_mdd_detected) { 7012 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 7013 i40e_service_event_schedule(pf); 7014 } 7015 } 7016 7017 /* see if one of the VFs needs its hand slapped */ 7018 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 7019 vf = &(pf->vf[i]); 7020 reg = rd32(hw, I40E_VP_MDET_TX(i)); 7021 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 7022 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 7023 vf->num_mdd_events++; 7024 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 7025 i); 7026 } 7027 7028 reg = rd32(hw, I40E_VP_MDET_RX(i)); 7029 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 7030 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 7031 vf->num_mdd_events++; 7032 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 7033 i); 7034 } 7035 7036 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 7037 dev_info(&pf->pdev->dev, 7038 "Too many MDD events on VF %d, disabled\n", i); 7039 dev_info(&pf->pdev->dev, 7040 "Use PF Control I/F to re-enable the VF\n"); 7041 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 7042 } 7043 } 7044 7045 /* re-enable mdd interrupt cause */ 7046 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 7047 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 7048 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 7049 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 7050 i40e_flush(hw); 7051 } 7052 7053 /** 7054 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW 7055 * @pf: board private structure 7056 **/ 7057 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) 7058 { 7059 #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) 7060 struct i40e_hw *hw = &pf->hw; 7061 i40e_status ret; 7062 __be16 port; 7063 int i; 7064 7065 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) 7066 return; 7067 7068 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC; 7069 7070 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 7071 if (pf->pending_udp_bitmap & BIT_ULL(i)) { 7072 pf->pending_udp_bitmap &= ~BIT_ULL(i); 7073 port = pf->udp_ports[i].index; 7074 if (port) 7075 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), 7076 pf->udp_ports[i].type, 7077 NULL, NULL); 7078 else 7079 ret = i40e_aq_del_udp_tunnel(hw, i, NULL); 7080 7081 if (ret) { 7082 dev_info(&pf->pdev->dev, 7083 "%s vxlan port %d, index %d failed, err %s aq_err %s\n", 7084 port ? "add" : "delete", 7085 ntohs(port), i, 7086 i40e_stat_str(&pf->hw, ret), 7087 i40e_aq_str(&pf->hw, 7088 pf->hw.aq.asq_last_status)); 7089 pf->udp_ports[i].index = 0; 7090 } 7091 } 7092 } 7093 #endif 7094 } 7095 7096 /** 7097 * i40e_service_task - Run the driver's async subtasks 7098 * @work: pointer to work_struct containing our data 7099 **/ 7100 static void i40e_service_task(struct work_struct *work) 7101 { 7102 struct i40e_pf *pf = container_of(work, 7103 struct i40e_pf, 7104 service_task); 7105 unsigned long start_time = jiffies; 7106 7107 /* don't bother with service tasks if a reset is in progress */ 7108 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7109 i40e_service_event_complete(pf); 7110 return; 7111 } 7112 7113 i40e_detect_recover_hung(pf); 7114 i40e_reset_subtask(pf); 7115 i40e_handle_mdd_event(pf); 7116 i40e_vc_process_vflr_event(pf); 7117 i40e_watchdog_subtask(pf); 7118 i40e_fdir_reinit_subtask(pf); 7119 i40e_sync_filters_subtask(pf); 7120 i40e_sync_udp_filters_subtask(pf); 7121 i40e_clean_adminq_subtask(pf); 7122 7123 i40e_service_event_complete(pf); 7124 7125 /* If the tasks have taken longer than one timer cycle or there 7126 * is more work to be done, reschedule the service task now 7127 * rather than wait for the timer to tick again. 7128 */ 7129 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 7130 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || 7131 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || 7132 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 7133 i40e_service_event_schedule(pf); 7134 } 7135 7136 /** 7137 * i40e_service_timer - timer callback 7138 * @data: pointer to PF struct 7139 **/ 7140 static void i40e_service_timer(unsigned long data) 7141 { 7142 struct i40e_pf *pf = (struct i40e_pf *)data; 7143 7144 mod_timer(&pf->service_timer, 7145 round_jiffies(jiffies + pf->service_timer_period)); 7146 i40e_service_event_schedule(pf); 7147 } 7148 7149 /** 7150 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 7151 * @vsi: the VSI being configured 7152 **/ 7153 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 7154 { 7155 struct i40e_pf *pf = vsi->back; 7156 7157 switch (vsi->type) { 7158 case I40E_VSI_MAIN: 7159 vsi->alloc_queue_pairs = pf->num_lan_qps; 7160 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7161 I40E_REQ_DESCRIPTOR_MULTIPLE); 7162 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7163 vsi->num_q_vectors = pf->num_lan_msix; 7164 else 7165 vsi->num_q_vectors = 1; 7166 7167 break; 7168 7169 case I40E_VSI_FDIR: 7170 vsi->alloc_queue_pairs = 1; 7171 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 7172 I40E_REQ_DESCRIPTOR_MULTIPLE); 7173 vsi->num_q_vectors = 1; 7174 break; 7175 7176 case I40E_VSI_VMDQ2: 7177 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 7178 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7179 I40E_REQ_DESCRIPTOR_MULTIPLE); 7180 vsi->num_q_vectors = pf->num_vmdq_msix; 7181 break; 7182 7183 case I40E_VSI_SRIOV: 7184 vsi->alloc_queue_pairs = pf->num_vf_qps; 7185 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7186 I40E_REQ_DESCRIPTOR_MULTIPLE); 7187 break; 7188 7189 #ifdef I40E_FCOE 7190 case I40E_VSI_FCOE: 7191 vsi->alloc_queue_pairs = pf->num_fcoe_qps; 7192 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7193 I40E_REQ_DESCRIPTOR_MULTIPLE); 7194 vsi->num_q_vectors = pf->num_fcoe_msix; 7195 break; 7196 7197 #endif /* I40E_FCOE */ 7198 default: 7199 WARN_ON(1); 7200 return -ENODATA; 7201 } 7202 7203 return 0; 7204 } 7205 7206 /** 7207 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi 7208 * @type: VSI pointer 7209 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 7210 * 7211 * On error: returns error code (negative) 7212 * On success: returns 0 7213 **/ 7214 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) 7215 { 7216 int size; 7217 int ret = 0; 7218 7219 /* allocate memory for both Tx and Rx ring pointers */ 7220 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; 7221 vsi->tx_rings = kzalloc(size, GFP_KERNEL); 7222 if (!vsi->tx_rings) 7223 return -ENOMEM; 7224 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; 7225 7226 if (alloc_qvectors) { 7227 /* allocate memory for q_vector pointers */ 7228 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; 7229 vsi->q_vectors = kzalloc(size, GFP_KERNEL); 7230 if (!vsi->q_vectors) { 7231 ret = -ENOMEM; 7232 goto err_vectors; 7233 } 7234 } 7235 return ret; 7236 7237 err_vectors: 7238 kfree(vsi->tx_rings); 7239 return ret; 7240 } 7241 7242 /** 7243 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 7244 * @pf: board private structure 7245 * @type: type of VSI 7246 * 7247 * On error: returns error code (negative) 7248 * On success: returns vsi index in PF (positive) 7249 **/ 7250 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 7251 { 7252 int ret = -ENODEV; 7253 struct i40e_vsi *vsi; 7254 int vsi_idx; 7255 int i; 7256 7257 /* Need to protect the allocation of the VSIs at the PF level */ 7258 mutex_lock(&pf->switch_mutex); 7259 7260 /* VSI list may be fragmented if VSI creation/destruction has 7261 * been happening. We can afford to do a quick scan to look 7262 * for any free VSIs in the list. 7263 * 7264 * find next empty vsi slot, looping back around if necessary 7265 */ 7266 i = pf->next_vsi; 7267 while (i < pf->num_alloc_vsi && pf->vsi[i]) 7268 i++; 7269 if (i >= pf->num_alloc_vsi) { 7270 i = 0; 7271 while (i < pf->next_vsi && pf->vsi[i]) 7272 i++; 7273 } 7274 7275 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { 7276 vsi_idx = i; /* Found one! */ 7277 } else { 7278 ret = -ENODEV; 7279 goto unlock_pf; /* out of VSI slots! */ 7280 } 7281 pf->next_vsi = ++i; 7282 7283 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 7284 if (!vsi) { 7285 ret = -ENOMEM; 7286 goto unlock_pf; 7287 } 7288 vsi->type = type; 7289 vsi->back = pf; 7290 set_bit(__I40E_DOWN, &vsi->state); 7291 vsi->flags = 0; 7292 vsi->idx = vsi_idx; 7293 vsi->rx_itr_setting = pf->rx_itr_default; 7294 vsi->tx_itr_setting = pf->tx_itr_default; 7295 vsi->int_rate_limit = 0; 7296 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? 7297 pf->rss_table_size : 64; 7298 vsi->netdev_registered = false; 7299 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 7300 INIT_LIST_HEAD(&vsi->mac_filter_list); 7301 vsi->irqs_ready = false; 7302 7303 ret = i40e_set_num_rings_in_vsi(vsi); 7304 if (ret) 7305 goto err_rings; 7306 7307 ret = i40e_vsi_alloc_arrays(vsi, true); 7308 if (ret) 7309 goto err_rings; 7310 7311 /* Setup default MSIX irq handler for VSI */ 7312 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 7313 7314 /* Initialize VSI lock */ 7315 spin_lock_init(&vsi->mac_filter_list_lock); 7316 pf->vsi[vsi_idx] = vsi; 7317 ret = vsi_idx; 7318 goto unlock_pf; 7319 7320 err_rings: 7321 pf->next_vsi = i - 1; 7322 kfree(vsi); 7323 unlock_pf: 7324 mutex_unlock(&pf->switch_mutex); 7325 return ret; 7326 } 7327 7328 /** 7329 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 7330 * @type: VSI pointer 7331 * @free_qvectors: a bool to specify if q_vectors need to be freed. 7332 * 7333 * On error: returns error code (negative) 7334 * On success: returns 0 7335 **/ 7336 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) 7337 { 7338 /* free the ring and vector containers */ 7339 if (free_qvectors) { 7340 kfree(vsi->q_vectors); 7341 vsi->q_vectors = NULL; 7342 } 7343 kfree(vsi->tx_rings); 7344 vsi->tx_rings = NULL; 7345 vsi->rx_rings = NULL; 7346 } 7347 7348 /** 7349 * i40e_clear_rss_config_user - clear the user configured RSS hash keys 7350 * and lookup table 7351 * @vsi: Pointer to VSI structure 7352 */ 7353 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) 7354 { 7355 if (!vsi) 7356 return; 7357 7358 kfree(vsi->rss_hkey_user); 7359 vsi->rss_hkey_user = NULL; 7360 7361 kfree(vsi->rss_lut_user); 7362 vsi->rss_lut_user = NULL; 7363 } 7364 7365 /** 7366 * i40e_vsi_clear - Deallocate the VSI provided 7367 * @vsi: the VSI being un-configured 7368 **/ 7369 static int i40e_vsi_clear(struct i40e_vsi *vsi) 7370 { 7371 struct i40e_pf *pf; 7372 7373 if (!vsi) 7374 return 0; 7375 7376 if (!vsi->back) 7377 goto free_vsi; 7378 pf = vsi->back; 7379 7380 mutex_lock(&pf->switch_mutex); 7381 if (!pf->vsi[vsi->idx]) { 7382 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", 7383 vsi->idx, vsi->idx, vsi, vsi->type); 7384 goto unlock_vsi; 7385 } 7386 7387 if (pf->vsi[vsi->idx] != vsi) { 7388 dev_err(&pf->pdev->dev, 7389 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", 7390 pf->vsi[vsi->idx]->idx, 7391 pf->vsi[vsi->idx], 7392 pf->vsi[vsi->idx]->type, 7393 vsi->idx, vsi, vsi->type); 7394 goto unlock_vsi; 7395 } 7396 7397 /* updates the PF for this cleared vsi */ 7398 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 7399 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 7400 7401 i40e_vsi_free_arrays(vsi, true); 7402 i40e_clear_rss_config_user(vsi); 7403 7404 pf->vsi[vsi->idx] = NULL; 7405 if (vsi->idx < pf->next_vsi) 7406 pf->next_vsi = vsi->idx; 7407 7408 unlock_vsi: 7409 mutex_unlock(&pf->switch_mutex); 7410 free_vsi: 7411 kfree(vsi); 7412 7413 return 0; 7414 } 7415 7416 /** 7417 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 7418 * @vsi: the VSI being cleaned 7419 **/ 7420 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) 7421 { 7422 int i; 7423 7424 if (vsi->tx_rings && vsi->tx_rings[0]) { 7425 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 7426 kfree_rcu(vsi->tx_rings[i], rcu); 7427 vsi->tx_rings[i] = NULL; 7428 vsi->rx_rings[i] = NULL; 7429 } 7430 } 7431 } 7432 7433 /** 7434 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 7435 * @vsi: the VSI being configured 7436 **/ 7437 static int i40e_alloc_rings(struct i40e_vsi *vsi) 7438 { 7439 struct i40e_ring *tx_ring, *rx_ring; 7440 struct i40e_pf *pf = vsi->back; 7441 int i; 7442 7443 /* Set basic values in the rings to be used later during open() */ 7444 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 7445 /* allocate space for both Tx and Rx in one shot */ 7446 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 7447 if (!tx_ring) 7448 goto err_out; 7449 7450 tx_ring->queue_index = i; 7451 tx_ring->reg_idx = vsi->base_queue + i; 7452 tx_ring->ring_active = false; 7453 tx_ring->vsi = vsi; 7454 tx_ring->netdev = vsi->netdev; 7455 tx_ring->dev = &pf->pdev->dev; 7456 tx_ring->count = vsi->num_desc; 7457 tx_ring->size = 0; 7458 tx_ring->dcb_tc = 0; 7459 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) 7460 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; 7461 if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) 7462 tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM; 7463 vsi->tx_rings[i] = tx_ring; 7464 7465 rx_ring = &tx_ring[1]; 7466 rx_ring->queue_index = i; 7467 rx_ring->reg_idx = vsi->base_queue + i; 7468 rx_ring->ring_active = false; 7469 rx_ring->vsi = vsi; 7470 rx_ring->netdev = vsi->netdev; 7471 rx_ring->dev = &pf->pdev->dev; 7472 rx_ring->count = vsi->num_desc; 7473 rx_ring->size = 0; 7474 rx_ring->dcb_tc = 0; 7475 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) 7476 set_ring_16byte_desc_enabled(rx_ring); 7477 else 7478 clear_ring_16byte_desc_enabled(rx_ring); 7479 vsi->rx_rings[i] = rx_ring; 7480 } 7481 7482 return 0; 7483 7484 err_out: 7485 i40e_vsi_clear_rings(vsi); 7486 return -ENOMEM; 7487 } 7488 7489 /** 7490 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 7491 * @pf: board private structure 7492 * @vectors: the number of MSI-X vectors to request 7493 * 7494 * Returns the number of vectors reserved, or error 7495 **/ 7496 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 7497 { 7498 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, 7499 I40E_MIN_MSIX, vectors); 7500 if (vectors < 0) { 7501 dev_info(&pf->pdev->dev, 7502 "MSI-X vector reservation failed: %d\n", vectors); 7503 vectors = 0; 7504 } 7505 7506 return vectors; 7507 } 7508 7509 /** 7510 * i40e_init_msix - Setup the MSIX capability 7511 * @pf: board private structure 7512 * 7513 * Work with the OS to set up the MSIX vectors needed. 7514 * 7515 * Returns the number of vectors reserved or negative on failure 7516 **/ 7517 static int i40e_init_msix(struct i40e_pf *pf) 7518 { 7519 struct i40e_hw *hw = &pf->hw; 7520 int vectors_left; 7521 int v_budget, i; 7522 int v_actual; 7523 7524 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 7525 return -ENODEV; 7526 7527 /* The number of vectors we'll request will be comprised of: 7528 * - Add 1 for "other" cause for Admin Queue events, etc. 7529 * - The number of LAN queue pairs 7530 * - Queues being used for RSS. 7531 * We don't need as many as max_rss_size vectors. 7532 * use rss_size instead in the calculation since that 7533 * is governed by number of cpus in the system. 7534 * - assumes symmetric Tx/Rx pairing 7535 * - The number of VMDq pairs 7536 #ifdef I40E_FCOE 7537 * - The number of FCOE qps. 7538 #endif 7539 * Once we count this up, try the request. 7540 * 7541 * If we can't get what we want, we'll simplify to nearly nothing 7542 * and try again. If that still fails, we punt. 7543 */ 7544 vectors_left = hw->func_caps.num_msix_vectors; 7545 v_budget = 0; 7546 7547 /* reserve one vector for miscellaneous handler */ 7548 if (vectors_left) { 7549 v_budget++; 7550 vectors_left--; 7551 } 7552 7553 /* reserve vectors for the main PF traffic queues */ 7554 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); 7555 vectors_left -= pf->num_lan_msix; 7556 v_budget += pf->num_lan_msix; 7557 7558 /* reserve one vector for sideband flow director */ 7559 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7560 if (vectors_left) { 7561 v_budget++; 7562 vectors_left--; 7563 } else { 7564 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7565 } 7566 } 7567 7568 #ifdef I40E_FCOE 7569 /* can we reserve enough for FCoE? */ 7570 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7571 if (!vectors_left) 7572 pf->num_fcoe_msix = 0; 7573 else if (vectors_left >= pf->num_fcoe_qps) 7574 pf->num_fcoe_msix = pf->num_fcoe_qps; 7575 else 7576 pf->num_fcoe_msix = 1; 7577 v_budget += pf->num_fcoe_msix; 7578 vectors_left -= pf->num_fcoe_msix; 7579 } 7580 7581 #endif 7582 /* any vectors left over go for VMDq support */ 7583 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { 7584 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; 7585 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); 7586 7587 /* if we're short on vectors for what's desired, we limit 7588 * the queues per vmdq. If this is still more than are 7589 * available, the user will need to change the number of 7590 * queues/vectors used by the PF later with the ethtool 7591 * channels command 7592 */ 7593 if (vmdq_vecs < vmdq_vecs_wanted) 7594 pf->num_vmdq_qps = 1; 7595 pf->num_vmdq_msix = pf->num_vmdq_qps; 7596 7597 v_budget += vmdq_vecs; 7598 vectors_left -= vmdq_vecs; 7599 } 7600 7601 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 7602 GFP_KERNEL); 7603 if (!pf->msix_entries) 7604 return -ENOMEM; 7605 7606 for (i = 0; i < v_budget; i++) 7607 pf->msix_entries[i].entry = i; 7608 v_actual = i40e_reserve_msix_vectors(pf, v_budget); 7609 7610 if (v_actual != v_budget) { 7611 /* If we have limited resources, we will start with no vectors 7612 * for the special features and then allocate vectors to some 7613 * of these features based on the policy and at the end disable 7614 * the features that did not get any vectors. 7615 */ 7616 #ifdef I40E_FCOE 7617 pf->num_fcoe_qps = 0; 7618 pf->num_fcoe_msix = 0; 7619 #endif 7620 pf->num_vmdq_msix = 0; 7621 } 7622 7623 if (v_actual < I40E_MIN_MSIX) { 7624 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 7625 kfree(pf->msix_entries); 7626 pf->msix_entries = NULL; 7627 return -ENODEV; 7628 7629 } else if (v_actual == I40E_MIN_MSIX) { 7630 /* Adjust for minimal MSIX use */ 7631 pf->num_vmdq_vsis = 0; 7632 pf->num_vmdq_qps = 0; 7633 pf->num_lan_qps = 1; 7634 pf->num_lan_msix = 1; 7635 7636 } else if (v_actual != v_budget) { 7637 int vec; 7638 7639 /* reserve the misc vector */ 7640 vec = v_actual - 1; 7641 7642 /* Scale vector usage down */ 7643 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 7644 pf->num_vmdq_vsis = 1; 7645 pf->num_vmdq_qps = 1; 7646 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7647 7648 /* partition out the remaining vectors */ 7649 switch (vec) { 7650 case 2: 7651 pf->num_lan_msix = 1; 7652 break; 7653 case 3: 7654 #ifdef I40E_FCOE 7655 /* give one vector to FCoE */ 7656 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7657 pf->num_lan_msix = 1; 7658 pf->num_fcoe_msix = 1; 7659 } 7660 #else 7661 pf->num_lan_msix = 2; 7662 #endif 7663 break; 7664 default: 7665 #ifdef I40E_FCOE 7666 /* give one vector to FCoE */ 7667 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7668 pf->num_fcoe_msix = 1; 7669 vec--; 7670 } 7671 #endif 7672 /* give the rest to the PF */ 7673 pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps); 7674 break; 7675 } 7676 } 7677 7678 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 7679 (pf->num_vmdq_msix == 0)) { 7680 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); 7681 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 7682 } 7683 #ifdef I40E_FCOE 7684 7685 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { 7686 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); 7687 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 7688 } 7689 #endif 7690 return v_actual; 7691 } 7692 7693 /** 7694 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 7695 * @vsi: the VSI being configured 7696 * @v_idx: index of the vector in the vsi struct 7697 * 7698 * We allocate one q_vector. If allocation fails we return -ENOMEM. 7699 **/ 7700 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 7701 { 7702 struct i40e_q_vector *q_vector; 7703 7704 /* allocate q_vector */ 7705 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 7706 if (!q_vector) 7707 return -ENOMEM; 7708 7709 q_vector->vsi = vsi; 7710 q_vector->v_idx = v_idx; 7711 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 7712 if (vsi->netdev) 7713 netif_napi_add(vsi->netdev, &q_vector->napi, 7714 i40e_napi_poll, NAPI_POLL_WEIGHT); 7715 7716 q_vector->rx.latency_range = I40E_LOW_LATENCY; 7717 q_vector->tx.latency_range = I40E_LOW_LATENCY; 7718 7719 /* tie q_vector and vsi together */ 7720 vsi->q_vectors[v_idx] = q_vector; 7721 7722 return 0; 7723 } 7724 7725 /** 7726 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 7727 * @vsi: the VSI being configured 7728 * 7729 * We allocate one q_vector per queue interrupt. If allocation fails we 7730 * return -ENOMEM. 7731 **/ 7732 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 7733 { 7734 struct i40e_pf *pf = vsi->back; 7735 int v_idx, num_q_vectors; 7736 int err; 7737 7738 /* if not MSIX, give the one vector only to the LAN VSI */ 7739 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7740 num_q_vectors = vsi->num_q_vectors; 7741 else if (vsi == pf->vsi[pf->lan_vsi]) 7742 num_q_vectors = 1; 7743 else 7744 return -EINVAL; 7745 7746 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 7747 err = i40e_vsi_alloc_q_vector(vsi, v_idx); 7748 if (err) 7749 goto err_out; 7750 } 7751 7752 return 0; 7753 7754 err_out: 7755 while (v_idx--) 7756 i40e_free_q_vector(vsi, v_idx); 7757 7758 return err; 7759 } 7760 7761 /** 7762 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 7763 * @pf: board private structure to initialize 7764 **/ 7765 static int i40e_init_interrupt_scheme(struct i40e_pf *pf) 7766 { 7767 int vectors = 0; 7768 ssize_t size; 7769 7770 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 7771 vectors = i40e_init_msix(pf); 7772 if (vectors < 0) { 7773 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 7774 #ifdef I40E_FCOE 7775 I40E_FLAG_FCOE_ENABLED | 7776 #endif 7777 I40E_FLAG_RSS_ENABLED | 7778 I40E_FLAG_DCB_CAPABLE | 7779 I40E_FLAG_SRIOV_ENABLED | 7780 I40E_FLAG_FD_SB_ENABLED | 7781 I40E_FLAG_FD_ATR_ENABLED | 7782 I40E_FLAG_VMDQ_ENABLED); 7783 7784 /* rework the queue expectations without MSIX */ 7785 i40e_determine_queue_usage(pf); 7786 } 7787 } 7788 7789 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 7790 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 7791 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); 7792 vectors = pci_enable_msi(pf->pdev); 7793 if (vectors < 0) { 7794 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", 7795 vectors); 7796 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 7797 } 7798 vectors = 1; /* one MSI or Legacy vector */ 7799 } 7800 7801 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 7802 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); 7803 7804 /* set up vector assignment tracking */ 7805 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); 7806 pf->irq_pile = kzalloc(size, GFP_KERNEL); 7807 if (!pf->irq_pile) { 7808 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); 7809 return -ENOMEM; 7810 } 7811 pf->irq_pile->num_entries = vectors; 7812 pf->irq_pile->search_hint = 0; 7813 7814 /* track first vector for misc interrupts, ignore return */ 7815 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); 7816 7817 return 0; 7818 } 7819 7820 /** 7821 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 7822 * @pf: board private structure 7823 * 7824 * This sets up the handler for MSIX 0, which is used to manage the 7825 * non-queue interrupts, e.g. AdminQ and errors. This is not used 7826 * when in MSI or Legacy interrupt mode. 7827 **/ 7828 static int i40e_setup_misc_vector(struct i40e_pf *pf) 7829 { 7830 struct i40e_hw *hw = &pf->hw; 7831 int err = 0; 7832 7833 /* Only request the irq if this is the first time through, and 7834 * not when we're rebuilding after a Reset 7835 */ 7836 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7837 err = request_irq(pf->msix_entries[0].vector, 7838 i40e_intr, 0, pf->int_name, pf); 7839 if (err) { 7840 dev_info(&pf->pdev->dev, 7841 "request_irq for %s failed: %d\n", 7842 pf->int_name, err); 7843 return -EFAULT; 7844 } 7845 } 7846 7847 i40e_enable_misc_int_causes(pf); 7848 7849 /* associate no queues to the misc vector */ 7850 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 7851 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 7852 7853 i40e_flush(hw); 7854 7855 i40e_irq_dynamic_enable_icr0(pf); 7856 7857 return err; 7858 } 7859 7860 /** 7861 * i40e_config_rss_aq - Prepare for RSS using AQ commands 7862 * @vsi: vsi structure 7863 * @seed: RSS hash seed 7864 **/ 7865 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, 7866 u8 *lut, u16 lut_size) 7867 { 7868 struct i40e_aqc_get_set_rss_key_data rss_key; 7869 struct i40e_pf *pf = vsi->back; 7870 struct i40e_hw *hw = &pf->hw; 7871 bool pf_lut = false; 7872 u8 *rss_lut; 7873 int ret, i; 7874 7875 memset(&rss_key, 0, sizeof(rss_key)); 7876 memcpy(&rss_key, seed, sizeof(rss_key)); 7877 7878 rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); 7879 if (!rss_lut) 7880 return -ENOMEM; 7881 7882 /* Populate the LUT with max no. of queues in round robin fashion */ 7883 for (i = 0; i < vsi->rss_table_size; i++) 7884 rss_lut[i] = i % vsi->rss_size; 7885 7886 ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key); 7887 if (ret) { 7888 dev_info(&pf->pdev->dev, 7889 "Cannot set RSS key, err %s aq_err %s\n", 7890 i40e_stat_str(&pf->hw, ret), 7891 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7892 goto config_rss_aq_out; 7893 } 7894 7895 if (vsi->type == I40E_VSI_MAIN) 7896 pf_lut = true; 7897 7898 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut, 7899 vsi->rss_table_size); 7900 if (ret) 7901 dev_info(&pf->pdev->dev, 7902 "Cannot set RSS lut, err %s aq_err %s\n", 7903 i40e_stat_str(&pf->hw, ret), 7904 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7905 7906 config_rss_aq_out: 7907 kfree(rss_lut); 7908 return ret; 7909 } 7910 7911 /** 7912 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used 7913 * @vsi: VSI structure 7914 **/ 7915 static int i40e_vsi_config_rss(struct i40e_vsi *vsi) 7916 { 7917 u8 seed[I40E_HKEY_ARRAY_SIZE]; 7918 struct i40e_pf *pf = vsi->back; 7919 u8 *lut; 7920 int ret; 7921 7922 if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) 7923 return 0; 7924 7925 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 7926 if (!lut) 7927 return -ENOMEM; 7928 7929 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); 7930 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 7931 vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); 7932 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); 7933 kfree(lut); 7934 7935 return ret; 7936 } 7937 7938 /** 7939 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers 7940 * @vsi: Pointer to vsi structure 7941 * @seed: RSS hash seed 7942 * @lut: Lookup table 7943 * @lut_size: Lookup table size 7944 * 7945 * Returns 0 on success, negative on failure 7946 **/ 7947 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, 7948 const u8 *lut, u16 lut_size) 7949 { 7950 struct i40e_pf *pf = vsi->back; 7951 struct i40e_hw *hw = &pf->hw; 7952 u8 i; 7953 7954 /* Fill out hash function seed */ 7955 if (seed) { 7956 u32 *seed_dw = (u32 *)seed; 7957 7958 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 7959 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); 7960 } 7961 7962 if (lut) { 7963 u32 *lut_dw = (u32 *)lut; 7964 7965 if (lut_size != I40E_HLUT_ARRAY_SIZE) 7966 return -EINVAL; 7967 7968 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) 7969 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); 7970 } 7971 i40e_flush(hw); 7972 7973 return 0; 7974 } 7975 7976 /** 7977 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers 7978 * @vsi: Pointer to VSI structure 7979 * @seed: Buffer to store the keys 7980 * @lut: Buffer to store the lookup table entries 7981 * @lut_size: Size of buffer to store the lookup table entries 7982 * 7983 * Returns 0 on success, negative on failure 7984 */ 7985 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, 7986 u8 *lut, u16 lut_size) 7987 { 7988 struct i40e_pf *pf = vsi->back; 7989 struct i40e_hw *hw = &pf->hw; 7990 u16 i; 7991 7992 if (seed) { 7993 u32 *seed_dw = (u32 *)seed; 7994 7995 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 7996 seed_dw[i] = rd32(hw, I40E_PFQF_HKEY(i)); 7997 } 7998 if (lut) { 7999 u32 *lut_dw = (u32 *)lut; 8000 8001 if (lut_size != I40E_HLUT_ARRAY_SIZE) 8002 return -EINVAL; 8003 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) 8004 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i)); 8005 } 8006 8007 return 0; 8008 } 8009 8010 /** 8011 * i40e_config_rss - Configure RSS keys and lut 8012 * @vsi: Pointer to VSI structure 8013 * @seed: RSS hash seed 8014 * @lut: Lookup table 8015 * @lut_size: Lookup table size 8016 * 8017 * Returns 0 on success, negative on failure 8018 */ 8019 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 8020 { 8021 struct i40e_pf *pf = vsi->back; 8022 8023 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) 8024 return i40e_config_rss_aq(vsi, seed, lut, lut_size); 8025 else 8026 return i40e_config_rss_reg(vsi, seed, lut, lut_size); 8027 } 8028 8029 /** 8030 * i40e_get_rss - Get RSS keys and lut 8031 * @vsi: Pointer to VSI structure 8032 * @seed: Buffer to store the keys 8033 * @lut: Buffer to store the lookup table entries 8034 * lut_size: Size of buffer to store the lookup table entries 8035 * 8036 * Returns 0 on success, negative on failure 8037 */ 8038 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 8039 { 8040 return i40e_get_rss_reg(vsi, seed, lut, lut_size); 8041 } 8042 8043 /** 8044 * i40e_fill_rss_lut - Fill the RSS lookup table with default values 8045 * @pf: Pointer to board private structure 8046 * @lut: Lookup table 8047 * @rss_table_size: Lookup table size 8048 * @rss_size: Range of queue number for hashing 8049 */ 8050 static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, 8051 u16 rss_table_size, u16 rss_size) 8052 { 8053 u16 i; 8054 8055 for (i = 0; i < rss_table_size; i++) 8056 lut[i] = i % rss_size; 8057 } 8058 8059 /** 8060 * i40e_pf_config_rss - Prepare for RSS if used 8061 * @pf: board private structure 8062 **/ 8063 static int i40e_pf_config_rss(struct i40e_pf *pf) 8064 { 8065 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 8066 u8 seed[I40E_HKEY_ARRAY_SIZE]; 8067 u8 *lut; 8068 struct i40e_hw *hw = &pf->hw; 8069 u32 reg_val; 8070 u64 hena; 8071 int ret; 8072 8073 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 8074 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | 8075 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); 8076 hena |= i40e_pf_get_default_rss_hena(pf); 8077 8078 wr32(hw, I40E_PFQF_HENA(0), (u32)hena); 8079 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 8080 8081 /* Determine the RSS table size based on the hardware capabilities */ 8082 reg_val = rd32(hw, I40E_PFQF_CTL_0); 8083 reg_val = (pf->rss_table_size == 512) ? 8084 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : 8085 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); 8086 wr32(hw, I40E_PFQF_CTL_0, reg_val); 8087 8088 /* Determine the RSS size of the VSI */ 8089 if (!vsi->rss_size) 8090 vsi->rss_size = min_t(int, pf->alloc_rss_size, 8091 vsi->num_queue_pairs); 8092 8093 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 8094 if (!lut) 8095 return -ENOMEM; 8096 8097 /* Use user configured lut if there is one, otherwise use default */ 8098 if (vsi->rss_lut_user) 8099 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 8100 else 8101 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); 8102 8103 /* Use user configured hash key if there is one, otherwise 8104 * use default. 8105 */ 8106 if (vsi->rss_hkey_user) 8107 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); 8108 else 8109 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 8110 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); 8111 kfree(lut); 8112 8113 return ret; 8114 } 8115 8116 /** 8117 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild 8118 * @pf: board private structure 8119 * @queue_count: the requested queue count for rss. 8120 * 8121 * returns 0 if rss is not enabled, if enabled returns the final rss queue 8122 * count which may be different from the requested queue count. 8123 **/ 8124 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) 8125 { 8126 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 8127 int new_rss_size; 8128 8129 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) 8130 return 0; 8131 8132 new_rss_size = min_t(int, queue_count, pf->rss_size_max); 8133 8134 if (queue_count != vsi->num_queue_pairs) { 8135 vsi->req_queue_pairs = queue_count; 8136 i40e_prep_for_reset(pf); 8137 8138 pf->alloc_rss_size = new_rss_size; 8139 8140 i40e_reset_and_rebuild(pf, true); 8141 8142 /* Discard the user configured hash keys and lut, if less 8143 * queues are enabled. 8144 */ 8145 if (queue_count < vsi->rss_size) { 8146 i40e_clear_rss_config_user(vsi); 8147 dev_dbg(&pf->pdev->dev, 8148 "discard user configured hash keys and lut\n"); 8149 } 8150 8151 /* Reset vsi->rss_size, as number of enabled queues changed */ 8152 vsi->rss_size = min_t(int, pf->alloc_rss_size, 8153 vsi->num_queue_pairs); 8154 8155 i40e_pf_config_rss(pf); 8156 } 8157 dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n", 8158 pf->alloc_rss_size, pf->rss_size_max); 8159 return pf->alloc_rss_size; 8160 } 8161 8162 /** 8163 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition 8164 * @pf: board private structure 8165 **/ 8166 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) 8167 { 8168 i40e_status status; 8169 bool min_valid, max_valid; 8170 u32 max_bw, min_bw; 8171 8172 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, 8173 &min_valid, &max_valid); 8174 8175 if (!status) { 8176 if (min_valid) 8177 pf->npar_min_bw = min_bw; 8178 if (max_valid) 8179 pf->npar_max_bw = max_bw; 8180 } 8181 8182 return status; 8183 } 8184 8185 /** 8186 * i40e_set_npar_bw_setting - Set BW settings for this PF partition 8187 * @pf: board private structure 8188 **/ 8189 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) 8190 { 8191 struct i40e_aqc_configure_partition_bw_data bw_data; 8192 i40e_status status; 8193 8194 /* Set the valid bit for this PF */ 8195 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); 8196 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; 8197 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; 8198 8199 /* Set the new bandwidths */ 8200 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); 8201 8202 return status; 8203 } 8204 8205 /** 8206 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition 8207 * @pf: board private structure 8208 **/ 8209 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) 8210 { 8211 /* Commit temporary BW setting to permanent NVM image */ 8212 enum i40e_admin_queue_err last_aq_status; 8213 i40e_status ret; 8214 u16 nvm_word; 8215 8216 if (pf->hw.partition_id != 1) { 8217 dev_info(&pf->pdev->dev, 8218 "Commit BW only works on partition 1! This is partition %d", 8219 pf->hw.partition_id); 8220 ret = I40E_NOT_SUPPORTED; 8221 goto bw_commit_out; 8222 } 8223 8224 /* Acquire NVM for read access */ 8225 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); 8226 last_aq_status = pf->hw.aq.asq_last_status; 8227 if (ret) { 8228 dev_info(&pf->pdev->dev, 8229 "Cannot acquire NVM for read access, err %s aq_err %s\n", 8230 i40e_stat_str(&pf->hw, ret), 8231 i40e_aq_str(&pf->hw, last_aq_status)); 8232 goto bw_commit_out; 8233 } 8234 8235 /* Read word 0x10 of NVM - SW compatibility word 1 */ 8236 ret = i40e_aq_read_nvm(&pf->hw, 8237 I40E_SR_NVM_CONTROL_WORD, 8238 0x10, sizeof(nvm_word), &nvm_word, 8239 false, NULL); 8240 /* Save off last admin queue command status before releasing 8241 * the NVM 8242 */ 8243 last_aq_status = pf->hw.aq.asq_last_status; 8244 i40e_release_nvm(&pf->hw); 8245 if (ret) { 8246 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", 8247 i40e_stat_str(&pf->hw, ret), 8248 i40e_aq_str(&pf->hw, last_aq_status)); 8249 goto bw_commit_out; 8250 } 8251 8252 /* Wait a bit for NVM release to complete */ 8253 msleep(50); 8254 8255 /* Acquire NVM for write access */ 8256 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); 8257 last_aq_status = pf->hw.aq.asq_last_status; 8258 if (ret) { 8259 dev_info(&pf->pdev->dev, 8260 "Cannot acquire NVM for write access, err %s aq_err %s\n", 8261 i40e_stat_str(&pf->hw, ret), 8262 i40e_aq_str(&pf->hw, last_aq_status)); 8263 goto bw_commit_out; 8264 } 8265 /* Write it back out unchanged to initiate update NVM, 8266 * which will force a write of the shadow (alt) RAM to 8267 * the NVM - thus storing the bandwidth values permanently. 8268 */ 8269 ret = i40e_aq_update_nvm(&pf->hw, 8270 I40E_SR_NVM_CONTROL_WORD, 8271 0x10, sizeof(nvm_word), 8272 &nvm_word, true, NULL); 8273 /* Save off last admin queue command status before releasing 8274 * the NVM 8275 */ 8276 last_aq_status = pf->hw.aq.asq_last_status; 8277 i40e_release_nvm(&pf->hw); 8278 if (ret) 8279 dev_info(&pf->pdev->dev, 8280 "BW settings NOT SAVED, err %s aq_err %s\n", 8281 i40e_stat_str(&pf->hw, ret), 8282 i40e_aq_str(&pf->hw, last_aq_status)); 8283 bw_commit_out: 8284 8285 return ret; 8286 } 8287 8288 /** 8289 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 8290 * @pf: board private structure to initialize 8291 * 8292 * i40e_sw_init initializes the Adapter private data structure. 8293 * Fields are initialized based on PCI device information and 8294 * OS network device settings (MTU size). 8295 **/ 8296 static int i40e_sw_init(struct i40e_pf *pf) 8297 { 8298 int err = 0; 8299 int size; 8300 8301 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, 8302 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); 8303 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; 8304 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { 8305 if (I40E_DEBUG_USER & debug) 8306 pf->hw.debug_mask = debug; 8307 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), 8308 I40E_DEFAULT_MSG_ENABLE); 8309 } 8310 8311 /* Set default capability flags */ 8312 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 8313 I40E_FLAG_MSI_ENABLED | 8314 I40E_FLAG_LINK_POLLING_ENABLED | 8315 I40E_FLAG_MSIX_ENABLED; 8316 8317 if (iommu_present(&pci_bus_type)) 8318 pf->flags |= I40E_FLAG_RX_PS_ENABLED; 8319 else 8320 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; 8321 8322 /* Set default ITR */ 8323 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; 8324 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; 8325 8326 /* Depending on PF configurations, it is possible that the RSS 8327 * maximum might end up larger than the available queues 8328 */ 8329 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); 8330 pf->alloc_rss_size = 1; 8331 pf->rss_table_size = pf->hw.func_caps.rss_table_size; 8332 pf->rss_size_max = min_t(int, pf->rss_size_max, 8333 pf->hw.func_caps.num_tx_qp); 8334 if (pf->hw.func_caps.rss) { 8335 pf->flags |= I40E_FLAG_RSS_ENABLED; 8336 pf->alloc_rss_size = min_t(int, pf->rss_size_max, 8337 num_online_cpus()); 8338 } 8339 8340 /* MFP mode enabled */ 8341 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { 8342 pf->flags |= I40E_FLAG_MFP_ENABLED; 8343 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 8344 if (i40e_get_npar_bw_setting(pf)) 8345 dev_warn(&pf->pdev->dev, 8346 "Could not get NPAR bw settings\n"); 8347 else 8348 dev_info(&pf->pdev->dev, 8349 "Min BW = %8.8x, Max BW = %8.8x\n", 8350 pf->npar_min_bw, pf->npar_max_bw); 8351 } 8352 8353 /* FW/NVM is not yet fixed in this regard */ 8354 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 8355 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 8356 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 8357 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 8358 if (pf->flags & I40E_FLAG_MFP_ENABLED && 8359 pf->hw.num_partitions > 1) 8360 dev_info(&pf->pdev->dev, 8361 "Flow Director Sideband mode Disabled in MFP mode\n"); 8362 else 8363 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 8364 pf->fdir_pf_filter_count = 8365 pf->hw.func_caps.fd_filters_guaranteed; 8366 pf->hw.fdir_shared_filter_count = 8367 pf->hw.func_caps.fd_filters_best_effort; 8368 } 8369 8370 if (pf->hw.func_caps.vmdq) { 8371 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 8372 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 8373 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); 8374 } 8375 8376 #ifdef I40E_FCOE 8377 i40e_init_pf_fcoe(pf); 8378 8379 #endif /* I40E_FCOE */ 8380 #ifdef CONFIG_PCI_IOV 8381 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { 8382 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 8383 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 8384 pf->num_req_vfs = min_t(int, 8385 pf->hw.func_caps.num_vfs, 8386 I40E_MAX_VF_COUNT); 8387 } 8388 #endif /* CONFIG_PCI_IOV */ 8389 if (pf->hw.mac.type == I40E_MAC_X722) { 8390 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | 8391 I40E_FLAG_128_QP_RSS_CAPABLE | 8392 I40E_FLAG_HW_ATR_EVICT_CAPABLE | 8393 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | 8394 I40E_FLAG_WB_ON_ITR_CAPABLE | 8395 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | 8396 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8397 } 8398 pf->eeprom_version = 0xDEAD; 8399 pf->lan_veb = I40E_NO_VEB; 8400 pf->lan_vsi = I40E_NO_VSI; 8401 8402 /* By default FW has this off for performance reasons */ 8403 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; 8404 8405 /* set up queue assignment tracking */ 8406 size = sizeof(struct i40e_lump_tracking) 8407 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 8408 pf->qp_pile = kzalloc(size, GFP_KERNEL); 8409 if (!pf->qp_pile) { 8410 err = -ENOMEM; 8411 goto sw_init_done; 8412 } 8413 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 8414 pf->qp_pile->search_hint = 0; 8415 8416 pf->tx_timeout_recovery_level = 1; 8417 8418 mutex_init(&pf->switch_mutex); 8419 8420 /* If NPAR is enabled nudge the Tx scheduler */ 8421 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) 8422 i40e_set_npar_bw_setting(pf); 8423 8424 sw_init_done: 8425 return err; 8426 } 8427 8428 /** 8429 * i40e_set_ntuple - set the ntuple feature flag and take action 8430 * @pf: board private structure to initialize 8431 * @features: the feature set that the stack is suggesting 8432 * 8433 * returns a bool to indicate if reset needs to happen 8434 **/ 8435 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) 8436 { 8437 bool need_reset = false; 8438 8439 /* Check if Flow Director n-tuple support was enabled or disabled. If 8440 * the state changed, we need to reset. 8441 */ 8442 if (features & NETIF_F_NTUPLE) { 8443 /* Enable filters and mark for reset */ 8444 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 8445 need_reset = true; 8446 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 8447 } else { 8448 /* turn off filters, mark for reset and clear SW filter list */ 8449 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 8450 need_reset = true; 8451 i40e_fdir_filter_exit(pf); 8452 } 8453 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 8454 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 8455 /* reset fd counters */ 8456 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; 8457 pf->fdir_pf_active_filters = 0; 8458 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 8459 if (I40E_DEBUG_FD & pf->hw.debug_mask) 8460 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); 8461 /* if ATR was auto disabled it can be re-enabled. */ 8462 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 8463 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 8464 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 8465 } 8466 return need_reset; 8467 } 8468 8469 /** 8470 * i40e_set_features - set the netdev feature flags 8471 * @netdev: ptr to the netdev being adjusted 8472 * @features: the feature set that the stack is suggesting 8473 **/ 8474 static int i40e_set_features(struct net_device *netdev, 8475 netdev_features_t features) 8476 { 8477 struct i40e_netdev_priv *np = netdev_priv(netdev); 8478 struct i40e_vsi *vsi = np->vsi; 8479 struct i40e_pf *pf = vsi->back; 8480 bool need_reset; 8481 8482 if (features & NETIF_F_HW_VLAN_CTAG_RX) 8483 i40e_vlan_stripping_enable(vsi); 8484 else 8485 i40e_vlan_stripping_disable(vsi); 8486 8487 need_reset = i40e_set_ntuple(pf, features); 8488 8489 if (need_reset) 8490 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 8491 8492 return 0; 8493 } 8494 8495 #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) 8496 /** 8497 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port 8498 * @pf: board private structure 8499 * @port: The UDP port to look up 8500 * 8501 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 8502 **/ 8503 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) 8504 { 8505 u8 i; 8506 8507 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 8508 if (pf->udp_ports[i].index == port) 8509 return i; 8510 } 8511 8512 return i; 8513 } 8514 8515 #endif 8516 8517 #if IS_ENABLED(CONFIG_VXLAN) 8518 /** 8519 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up 8520 * @netdev: This physical port's netdev 8521 * @sa_family: Socket Family that VXLAN is notifying us about 8522 * @port: New UDP port number that VXLAN started listening to 8523 **/ 8524 static void i40e_add_vxlan_port(struct net_device *netdev, 8525 sa_family_t sa_family, __be16 port) 8526 { 8527 struct i40e_netdev_priv *np = netdev_priv(netdev); 8528 struct i40e_vsi *vsi = np->vsi; 8529 struct i40e_pf *pf = vsi->back; 8530 u8 next_idx; 8531 u8 idx; 8532 8533 if (sa_family == AF_INET6) 8534 return; 8535 8536 idx = i40e_get_udp_port_idx(pf, port); 8537 8538 /* Check if port already exists */ 8539 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8540 netdev_info(netdev, "vxlan port %d already offloaded\n", 8541 ntohs(port)); 8542 return; 8543 } 8544 8545 /* Now check if there is space to add the new port */ 8546 next_idx = i40e_get_udp_port_idx(pf, 0); 8547 8548 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8549 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", 8550 ntohs(port)); 8551 return; 8552 } 8553 8554 /* New port: add it and mark its index in the bitmap */ 8555 pf->udp_ports[next_idx].index = port; 8556 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; 8557 pf->pending_udp_bitmap |= BIT_ULL(next_idx); 8558 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8559 } 8560 8561 /** 8562 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away 8563 * @netdev: This physical port's netdev 8564 * @sa_family: Socket Family that VXLAN is notifying us about 8565 * @port: UDP port number that VXLAN stopped listening to 8566 **/ 8567 static void i40e_del_vxlan_port(struct net_device *netdev, 8568 sa_family_t sa_family, __be16 port) 8569 { 8570 struct i40e_netdev_priv *np = netdev_priv(netdev); 8571 struct i40e_vsi *vsi = np->vsi; 8572 struct i40e_pf *pf = vsi->back; 8573 u8 idx; 8574 8575 if (sa_family == AF_INET6) 8576 return; 8577 8578 idx = i40e_get_udp_port_idx(pf, port); 8579 8580 /* Check if port already exists */ 8581 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8582 /* if port exists, set it to 0 (mark for deletion) 8583 * and make it pending 8584 */ 8585 pf->udp_ports[idx].index = 0; 8586 pf->pending_udp_bitmap |= BIT_ULL(idx); 8587 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8588 } else { 8589 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", 8590 ntohs(port)); 8591 } 8592 } 8593 #endif 8594 8595 #if IS_ENABLED(CONFIG_GENEVE) 8596 /** 8597 * i40e_add_geneve_port - Get notifications about GENEVE ports that come up 8598 * @netdev: This physical port's netdev 8599 * @sa_family: Socket Family that GENEVE is notifying us about 8600 * @port: New UDP port number that GENEVE started listening to 8601 **/ 8602 static void i40e_add_geneve_port(struct net_device *netdev, 8603 sa_family_t sa_family, __be16 port) 8604 { 8605 struct i40e_netdev_priv *np = netdev_priv(netdev); 8606 struct i40e_vsi *vsi = np->vsi; 8607 struct i40e_pf *pf = vsi->back; 8608 u8 next_idx; 8609 u8 idx; 8610 8611 if (sa_family == AF_INET6) 8612 return; 8613 8614 idx = i40e_get_udp_port_idx(pf, port); 8615 8616 /* Check if port already exists */ 8617 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8618 netdev_info(netdev, "udp port %d already offloaded\n", 8619 ntohs(port)); 8620 return; 8621 } 8622 8623 /* Now check if there is space to add the new port */ 8624 next_idx = i40e_get_udp_port_idx(pf, 0); 8625 8626 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8627 netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", 8628 ntohs(port)); 8629 return; 8630 } 8631 8632 /* New port: add it and mark its index in the bitmap */ 8633 pf->udp_ports[next_idx].index = port; 8634 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; 8635 pf->pending_udp_bitmap |= BIT_ULL(next_idx); 8636 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8637 8638 dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); 8639 } 8640 8641 /** 8642 * i40e_del_geneve_port - Get notifications about GENEVE ports that go away 8643 * @netdev: This physical port's netdev 8644 * @sa_family: Socket Family that GENEVE is notifying us about 8645 * @port: UDP port number that GENEVE stopped listening to 8646 **/ 8647 static void i40e_del_geneve_port(struct net_device *netdev, 8648 sa_family_t sa_family, __be16 port) 8649 { 8650 struct i40e_netdev_priv *np = netdev_priv(netdev); 8651 struct i40e_vsi *vsi = np->vsi; 8652 struct i40e_pf *pf = vsi->back; 8653 u8 idx; 8654 8655 if (sa_family == AF_INET6) 8656 return; 8657 8658 idx = i40e_get_udp_port_idx(pf, port); 8659 8660 /* Check if port already exists */ 8661 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8662 /* if port exists, set it to 0 (mark for deletion) 8663 * and make it pending 8664 */ 8665 pf->udp_ports[idx].index = 0; 8666 pf->pending_udp_bitmap |= BIT_ULL(idx); 8667 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8668 8669 dev_info(&pf->pdev->dev, "deleting geneve port %d\n", 8670 ntohs(port)); 8671 } else { 8672 netdev_warn(netdev, "geneve port %d was not found, not deleting\n", 8673 ntohs(port)); 8674 } 8675 } 8676 #endif 8677 8678 static int i40e_get_phys_port_id(struct net_device *netdev, 8679 struct netdev_phys_item_id *ppid) 8680 { 8681 struct i40e_netdev_priv *np = netdev_priv(netdev); 8682 struct i40e_pf *pf = np->vsi->back; 8683 struct i40e_hw *hw = &pf->hw; 8684 8685 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) 8686 return -EOPNOTSUPP; 8687 8688 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); 8689 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); 8690 8691 return 0; 8692 } 8693 8694 /** 8695 * i40e_ndo_fdb_add - add an entry to the hardware database 8696 * @ndm: the input from the stack 8697 * @tb: pointer to array of nladdr (unused) 8698 * @dev: the net device pointer 8699 * @addr: the MAC address entry being added 8700 * @flags: instructions from stack about fdb operation 8701 */ 8702 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 8703 struct net_device *dev, 8704 const unsigned char *addr, u16 vid, 8705 u16 flags) 8706 { 8707 struct i40e_netdev_priv *np = netdev_priv(dev); 8708 struct i40e_pf *pf = np->vsi->back; 8709 int err = 0; 8710 8711 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) 8712 return -EOPNOTSUPP; 8713 8714 if (vid) { 8715 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); 8716 return -EINVAL; 8717 } 8718 8719 /* Hardware does not support aging addresses so if a 8720 * ndm_state is given only allow permanent addresses 8721 */ 8722 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 8723 netdev_info(dev, "FDB only supports static addresses\n"); 8724 return -EINVAL; 8725 } 8726 8727 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 8728 err = dev_uc_add_excl(dev, addr); 8729 else if (is_multicast_ether_addr(addr)) 8730 err = dev_mc_add_excl(dev, addr); 8731 else 8732 err = -EINVAL; 8733 8734 /* Only return duplicate errors if NLM_F_EXCL is set */ 8735 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 8736 err = 0; 8737 8738 return err; 8739 } 8740 8741 /** 8742 * i40e_ndo_bridge_setlink - Set the hardware bridge mode 8743 * @dev: the netdev being configured 8744 * @nlh: RTNL message 8745 * 8746 * Inserts a new hardware bridge if not already created and 8747 * enables the bridging mode requested (VEB or VEPA). If the 8748 * hardware bridge has already been inserted and the request 8749 * is to change the mode then that requires a PF reset to 8750 * allow rebuild of the components with required hardware 8751 * bridge mode enabled. 8752 **/ 8753 static int i40e_ndo_bridge_setlink(struct net_device *dev, 8754 struct nlmsghdr *nlh, 8755 u16 flags) 8756 { 8757 struct i40e_netdev_priv *np = netdev_priv(dev); 8758 struct i40e_vsi *vsi = np->vsi; 8759 struct i40e_pf *pf = vsi->back; 8760 struct i40e_veb *veb = NULL; 8761 struct nlattr *attr, *br_spec; 8762 int i, rem; 8763 8764 /* Only for PF VSI for now */ 8765 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 8766 return -EOPNOTSUPP; 8767 8768 /* Find the HW bridge for PF VSI */ 8769 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8770 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 8771 veb = pf->veb[i]; 8772 } 8773 8774 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 8775 8776 nla_for_each_nested(attr, br_spec, rem) { 8777 __u16 mode; 8778 8779 if (nla_type(attr) != IFLA_BRIDGE_MODE) 8780 continue; 8781 8782 mode = nla_get_u16(attr); 8783 if ((mode != BRIDGE_MODE_VEPA) && 8784 (mode != BRIDGE_MODE_VEB)) 8785 return -EINVAL; 8786 8787 /* Insert a new HW bridge */ 8788 if (!veb) { 8789 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 8790 vsi->tc_config.enabled_tc); 8791 if (veb) { 8792 veb->bridge_mode = mode; 8793 i40e_config_bridge_mode(veb); 8794 } else { 8795 /* No Bridge HW offload available */ 8796 return -ENOENT; 8797 } 8798 break; 8799 } else if (mode != veb->bridge_mode) { 8800 /* Existing HW bridge but different mode needs reset */ 8801 veb->bridge_mode = mode; 8802 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ 8803 if (mode == BRIDGE_MODE_VEB) 8804 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 8805 else 8806 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 8807 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 8808 break; 8809 } 8810 } 8811 8812 return 0; 8813 } 8814 8815 /** 8816 * i40e_ndo_bridge_getlink - Get the hardware bridge mode 8817 * @skb: skb buff 8818 * @pid: process id 8819 * @seq: RTNL message seq # 8820 * @dev: the netdev being configured 8821 * @filter_mask: unused 8822 * @nlflags: netlink flags passed in 8823 * 8824 * Return the mode in which the hardware bridge is operating in 8825 * i.e VEB or VEPA. 8826 **/ 8827 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 8828 struct net_device *dev, 8829 u32 __always_unused filter_mask, 8830 int nlflags) 8831 { 8832 struct i40e_netdev_priv *np = netdev_priv(dev); 8833 struct i40e_vsi *vsi = np->vsi; 8834 struct i40e_pf *pf = vsi->back; 8835 struct i40e_veb *veb = NULL; 8836 int i; 8837 8838 /* Only for PF VSI for now */ 8839 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 8840 return -EOPNOTSUPP; 8841 8842 /* Find the HW bridge for the PF VSI */ 8843 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8844 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 8845 veb = pf->veb[i]; 8846 } 8847 8848 if (!veb) 8849 return 0; 8850 8851 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, 8852 nlflags, 0, 0, filter_mask, NULL); 8853 } 8854 8855 /* Hardware supports L4 tunnel length of 128B (=2^7) which includes 8856 * inner mac plus all inner ethertypes. 8857 */ 8858 #define I40E_MAX_TUNNEL_HDR_LEN 128 8859 /** 8860 * i40e_features_check - Validate encapsulated packet conforms to limits 8861 * @skb: skb buff 8862 * @dev: This physical port's netdev 8863 * @features: Offload features that the stack believes apply 8864 **/ 8865 static netdev_features_t i40e_features_check(struct sk_buff *skb, 8866 struct net_device *dev, 8867 netdev_features_t features) 8868 { 8869 if (skb->encapsulation && 8870 ((skb_inner_network_header(skb) - skb_transport_header(skb)) > 8871 I40E_MAX_TUNNEL_HDR_LEN)) 8872 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 8873 8874 return features; 8875 } 8876 8877 static const struct net_device_ops i40e_netdev_ops = { 8878 .ndo_open = i40e_open, 8879 .ndo_stop = i40e_close, 8880 .ndo_start_xmit = i40e_lan_xmit_frame, 8881 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 8882 .ndo_set_rx_mode = i40e_set_rx_mode, 8883 .ndo_validate_addr = eth_validate_addr, 8884 .ndo_set_mac_address = i40e_set_mac, 8885 .ndo_change_mtu = i40e_change_mtu, 8886 .ndo_do_ioctl = i40e_ioctl, 8887 .ndo_tx_timeout = i40e_tx_timeout, 8888 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 8889 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 8890 #ifdef CONFIG_NET_POLL_CONTROLLER 8891 .ndo_poll_controller = i40e_netpoll, 8892 #endif 8893 .ndo_setup_tc = i40e_setup_tc, 8894 #ifdef I40E_FCOE 8895 .ndo_fcoe_enable = i40e_fcoe_enable, 8896 .ndo_fcoe_disable = i40e_fcoe_disable, 8897 #endif 8898 .ndo_set_features = i40e_set_features, 8899 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 8900 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 8901 .ndo_set_vf_rate = i40e_ndo_set_vf_bw, 8902 .ndo_get_vf_config = i40e_ndo_get_vf_config, 8903 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 8904 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, 8905 #if IS_ENABLED(CONFIG_VXLAN) 8906 .ndo_add_vxlan_port = i40e_add_vxlan_port, 8907 .ndo_del_vxlan_port = i40e_del_vxlan_port, 8908 #endif 8909 #if IS_ENABLED(CONFIG_GENEVE) 8910 .ndo_add_geneve_port = i40e_add_geneve_port, 8911 .ndo_del_geneve_port = i40e_del_geneve_port, 8912 #endif 8913 .ndo_get_phys_port_id = i40e_get_phys_port_id, 8914 .ndo_fdb_add = i40e_ndo_fdb_add, 8915 .ndo_features_check = i40e_features_check, 8916 .ndo_bridge_getlink = i40e_ndo_bridge_getlink, 8917 .ndo_bridge_setlink = i40e_ndo_bridge_setlink, 8918 }; 8919 8920 /** 8921 * i40e_config_netdev - Setup the netdev flags 8922 * @vsi: the VSI being configured 8923 * 8924 * Returns 0 on success, negative value on failure 8925 **/ 8926 static int i40e_config_netdev(struct i40e_vsi *vsi) 8927 { 8928 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 8929 struct i40e_pf *pf = vsi->back; 8930 struct i40e_hw *hw = &pf->hw; 8931 struct i40e_netdev_priv *np; 8932 struct net_device *netdev; 8933 u8 mac_addr[ETH_ALEN]; 8934 int etherdev_size; 8935 8936 etherdev_size = sizeof(struct i40e_netdev_priv); 8937 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 8938 if (!netdev) 8939 return -ENOMEM; 8940 8941 vsi->netdev = netdev; 8942 np = netdev_priv(netdev); 8943 np->vsi = vsi; 8944 8945 netdev->hw_enc_features |= NETIF_F_IP_CSUM | 8946 NETIF_F_RXCSUM | 8947 NETIF_F_GSO_UDP_TUNNEL | 8948 NETIF_F_GSO_GRE | 8949 NETIF_F_TSO; 8950 8951 netdev->features = NETIF_F_SG | 8952 NETIF_F_IP_CSUM | 8953 NETIF_F_SCTP_CRC | 8954 NETIF_F_HIGHDMA | 8955 NETIF_F_GSO_UDP_TUNNEL | 8956 NETIF_F_GSO_GRE | 8957 NETIF_F_HW_VLAN_CTAG_TX | 8958 NETIF_F_HW_VLAN_CTAG_RX | 8959 NETIF_F_HW_VLAN_CTAG_FILTER | 8960 NETIF_F_IPV6_CSUM | 8961 NETIF_F_TSO | 8962 NETIF_F_TSO_ECN | 8963 NETIF_F_TSO6 | 8964 NETIF_F_RXCSUM | 8965 NETIF_F_RXHASH | 8966 0; 8967 8968 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 8969 netdev->features |= NETIF_F_NTUPLE; 8970 8971 /* copy netdev features into list of user selectable features */ 8972 netdev->hw_features |= netdev->features; 8973 8974 if (vsi->type == I40E_VSI_MAIN) { 8975 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 8976 ether_addr_copy(mac_addr, hw->mac.perm_addr); 8977 /* The following steps are necessary to prevent reception 8978 * of tagged packets - some older NVM configurations load a 8979 * default a MAC-VLAN filter that accepts any tagged packet 8980 * which must be replaced by a normal filter. 8981 */ 8982 if (!i40e_rm_default_mac_filter(vsi, mac_addr)) { 8983 spin_lock_bh(&vsi->mac_filter_list_lock); 8984 i40e_add_filter(vsi, mac_addr, 8985 I40E_VLAN_ANY, false, true); 8986 spin_unlock_bh(&vsi->mac_filter_list_lock); 8987 } 8988 } else { 8989 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 8990 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 8991 pf->vsi[pf->lan_vsi]->netdev->name); 8992 random_ether_addr(mac_addr); 8993 8994 spin_lock_bh(&vsi->mac_filter_list_lock); 8995 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); 8996 spin_unlock_bh(&vsi->mac_filter_list_lock); 8997 } 8998 8999 spin_lock_bh(&vsi->mac_filter_list_lock); 9000 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 9001 spin_unlock_bh(&vsi->mac_filter_list_lock); 9002 9003 ether_addr_copy(netdev->dev_addr, mac_addr); 9004 ether_addr_copy(netdev->perm_addr, mac_addr); 9005 /* vlan gets same features (except vlan offload) 9006 * after any tweaks for specific VSI types 9007 */ 9008 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | 9009 NETIF_F_HW_VLAN_CTAG_RX | 9010 NETIF_F_HW_VLAN_CTAG_FILTER); 9011 netdev->priv_flags |= IFF_UNICAST_FLT; 9012 netdev->priv_flags |= IFF_SUPP_NOFCS; 9013 /* Setup netdev TC information */ 9014 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 9015 9016 netdev->netdev_ops = &i40e_netdev_ops; 9017 netdev->watchdog_timeo = 5 * HZ; 9018 i40e_set_ethtool_ops(netdev); 9019 #ifdef I40E_FCOE 9020 i40e_fcoe_config_netdev(netdev, vsi); 9021 #endif 9022 9023 return 0; 9024 } 9025 9026 /** 9027 * i40e_vsi_delete - Delete a VSI from the switch 9028 * @vsi: the VSI being removed 9029 * 9030 * Returns 0 on success, negative value on failure 9031 **/ 9032 static void i40e_vsi_delete(struct i40e_vsi *vsi) 9033 { 9034 /* remove default VSI is not allowed */ 9035 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 9036 return; 9037 9038 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 9039 } 9040 9041 /** 9042 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB 9043 * @vsi: the VSI being queried 9044 * 9045 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode 9046 **/ 9047 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) 9048 { 9049 struct i40e_veb *veb; 9050 struct i40e_pf *pf = vsi->back; 9051 9052 /* Uplink is not a bridge so default to VEB */ 9053 if (vsi->veb_idx == I40E_NO_VEB) 9054 return 1; 9055 9056 veb = pf->veb[vsi->veb_idx]; 9057 if (!veb) { 9058 dev_info(&pf->pdev->dev, 9059 "There is no veb associated with the bridge\n"); 9060 return -ENOENT; 9061 } 9062 9063 /* Uplink is a bridge in VEPA mode */ 9064 if (veb->bridge_mode & BRIDGE_MODE_VEPA) { 9065 return 0; 9066 } else { 9067 /* Uplink is a bridge in VEB mode */ 9068 return 1; 9069 } 9070 9071 /* VEPA is now default bridge, so return 0 */ 9072 return 0; 9073 } 9074 9075 /** 9076 * i40e_add_vsi - Add a VSI to the switch 9077 * @vsi: the VSI being configured 9078 * 9079 * This initializes a VSI context depending on the VSI type to be added and 9080 * passes it down to the add_vsi aq command. 9081 **/ 9082 static int i40e_add_vsi(struct i40e_vsi *vsi) 9083 { 9084 int ret = -ENODEV; 9085 u8 laa_macaddr[ETH_ALEN]; 9086 bool found_laa_mac_filter = false; 9087 struct i40e_pf *pf = vsi->back; 9088 struct i40e_hw *hw = &pf->hw; 9089 struct i40e_vsi_context ctxt; 9090 struct i40e_mac_filter *f, *ftmp; 9091 9092 u8 enabled_tc = 0x1; /* TC0 enabled */ 9093 int f_count = 0; 9094 9095 memset(&ctxt, 0, sizeof(ctxt)); 9096 switch (vsi->type) { 9097 case I40E_VSI_MAIN: 9098 /* The PF's main VSI is already setup as part of the 9099 * device initialization, so we'll not bother with 9100 * the add_vsi call, but we will retrieve the current 9101 * VSI context. 9102 */ 9103 ctxt.seid = pf->main_vsi_seid; 9104 ctxt.pf_num = pf->hw.pf_id; 9105 ctxt.vf_num = 0; 9106 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 9107 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 9108 if (ret) { 9109 dev_info(&pf->pdev->dev, 9110 "couldn't get PF vsi config, err %s aq_err %s\n", 9111 i40e_stat_str(&pf->hw, ret), 9112 i40e_aq_str(&pf->hw, 9113 pf->hw.aq.asq_last_status)); 9114 return -ENOENT; 9115 } 9116 vsi->info = ctxt.info; 9117 vsi->info.valid_sections = 0; 9118 9119 vsi->seid = ctxt.seid; 9120 vsi->id = ctxt.vsi_number; 9121 9122 enabled_tc = i40e_pf_get_tc_map(pf); 9123 9124 /* MFP mode setup queue map and update VSI */ 9125 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && 9126 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ 9127 memset(&ctxt, 0, sizeof(ctxt)); 9128 ctxt.seid = pf->main_vsi_seid; 9129 ctxt.pf_num = pf->hw.pf_id; 9130 ctxt.vf_num = 0; 9131 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 9132 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 9133 if (ret) { 9134 dev_info(&pf->pdev->dev, 9135 "update vsi failed, err %s aq_err %s\n", 9136 i40e_stat_str(&pf->hw, ret), 9137 i40e_aq_str(&pf->hw, 9138 pf->hw.aq.asq_last_status)); 9139 ret = -ENOENT; 9140 goto err; 9141 } 9142 /* update the local VSI info queue map */ 9143 i40e_vsi_update_queue_map(vsi, &ctxt); 9144 vsi->info.valid_sections = 0; 9145 } else { 9146 /* Default/Main VSI is only enabled for TC0 9147 * reconfigure it to enable all TCs that are 9148 * available on the port in SFP mode. 9149 * For MFP case the iSCSI PF would use this 9150 * flow to enable LAN+iSCSI TC. 9151 */ 9152 ret = i40e_vsi_config_tc(vsi, enabled_tc); 9153 if (ret) { 9154 dev_info(&pf->pdev->dev, 9155 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", 9156 enabled_tc, 9157 i40e_stat_str(&pf->hw, ret), 9158 i40e_aq_str(&pf->hw, 9159 pf->hw.aq.asq_last_status)); 9160 ret = -ENOENT; 9161 } 9162 } 9163 break; 9164 9165 case I40E_VSI_FDIR: 9166 ctxt.pf_num = hw->pf_id; 9167 ctxt.vf_num = 0; 9168 ctxt.uplink_seid = vsi->uplink_seid; 9169 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 9170 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 9171 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && 9172 (i40e_is_vsi_uplink_mode_veb(vsi))) { 9173 ctxt.info.valid_sections |= 9174 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 9175 ctxt.info.switch_id = 9176 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9177 } 9178 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 9179 break; 9180 9181 case I40E_VSI_VMDQ2: 9182 ctxt.pf_num = hw->pf_id; 9183 ctxt.vf_num = 0; 9184 ctxt.uplink_seid = vsi->uplink_seid; 9185 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 9186 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 9187 9188 /* This VSI is connected to VEB so the switch_id 9189 * should be set to zero by default. 9190 */ 9191 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 9192 ctxt.info.valid_sections |= 9193 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 9194 ctxt.info.switch_id = 9195 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9196 } 9197 9198 /* Setup the VSI tx/rx queue map for TC0 only for now */ 9199 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 9200 break; 9201 9202 case I40E_VSI_SRIOV: 9203 ctxt.pf_num = hw->pf_id; 9204 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 9205 ctxt.uplink_seid = vsi->uplink_seid; 9206 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 9207 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 9208 9209 /* This VSI is connected to VEB so the switch_id 9210 * should be set to zero by default. 9211 */ 9212 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 9213 ctxt.info.valid_sections |= 9214 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 9215 ctxt.info.switch_id = 9216 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9217 } 9218 9219 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 9220 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 9221 if (pf->vf[vsi->vf_id].spoofchk) { 9222 ctxt.info.valid_sections |= 9223 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 9224 ctxt.info.sec_flags |= 9225 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 9226 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 9227 } 9228 /* Setup the VSI tx/rx queue map for TC0 only for now */ 9229 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 9230 break; 9231 9232 #ifdef I40E_FCOE 9233 case I40E_VSI_FCOE: 9234 ret = i40e_fcoe_vsi_init(vsi, &ctxt); 9235 if (ret) { 9236 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); 9237 return ret; 9238 } 9239 break; 9240 9241 #endif /* I40E_FCOE */ 9242 default: 9243 return -ENODEV; 9244 } 9245 9246 if (vsi->type != I40E_VSI_MAIN) { 9247 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 9248 if (ret) { 9249 dev_info(&vsi->back->pdev->dev, 9250 "add vsi failed, err %s aq_err %s\n", 9251 i40e_stat_str(&pf->hw, ret), 9252 i40e_aq_str(&pf->hw, 9253 pf->hw.aq.asq_last_status)); 9254 ret = -ENOENT; 9255 goto err; 9256 } 9257 vsi->info = ctxt.info; 9258 vsi->info.valid_sections = 0; 9259 vsi->seid = ctxt.seid; 9260 vsi->id = ctxt.vsi_number; 9261 } 9262 9263 spin_lock_bh(&vsi->mac_filter_list_lock); 9264 /* If macvlan filters already exist, force them to get loaded */ 9265 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 9266 f->changed = true; 9267 f_count++; 9268 9269 /* Expected to have only one MAC filter entry for LAA in list */ 9270 if (f->is_laa && vsi->type == I40E_VSI_MAIN) { 9271 ether_addr_copy(laa_macaddr, f->macaddr); 9272 found_laa_mac_filter = true; 9273 } 9274 } 9275 spin_unlock_bh(&vsi->mac_filter_list_lock); 9276 9277 if (found_laa_mac_filter) { 9278 struct i40e_aqc_remove_macvlan_element_data element; 9279 9280 memset(&element, 0, sizeof(element)); 9281 ether_addr_copy(element.mac_addr, laa_macaddr); 9282 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 9283 ret = i40e_aq_remove_macvlan(hw, vsi->seid, 9284 &element, 1, NULL); 9285 if (ret) { 9286 /* some older FW has a different default */ 9287 element.flags |= 9288 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 9289 i40e_aq_remove_macvlan(hw, vsi->seid, 9290 &element, 1, NULL); 9291 } 9292 9293 i40e_aq_mac_address_write(hw, 9294 I40E_AQC_WRITE_TYPE_LAA_WOL, 9295 laa_macaddr, NULL); 9296 } 9297 9298 if (f_count) { 9299 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 9300 pf->flags |= I40E_FLAG_FILTER_SYNC; 9301 } 9302 9303 /* Update VSI BW information */ 9304 ret = i40e_vsi_get_bw_info(vsi); 9305 if (ret) { 9306 dev_info(&pf->pdev->dev, 9307 "couldn't get vsi bw info, err %s aq_err %s\n", 9308 i40e_stat_str(&pf->hw, ret), 9309 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9310 /* VSI is already added so not tearing that up */ 9311 ret = 0; 9312 } 9313 9314 err: 9315 return ret; 9316 } 9317 9318 /** 9319 * i40e_vsi_release - Delete a VSI and free its resources 9320 * @vsi: the VSI being removed 9321 * 9322 * Returns 0 on success or < 0 on error 9323 **/ 9324 int i40e_vsi_release(struct i40e_vsi *vsi) 9325 { 9326 struct i40e_mac_filter *f, *ftmp; 9327 struct i40e_veb *veb = NULL; 9328 struct i40e_pf *pf; 9329 u16 uplink_seid; 9330 int i, n; 9331 9332 pf = vsi->back; 9333 9334 /* release of a VEB-owner or last VSI is not allowed */ 9335 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 9336 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 9337 vsi->seid, vsi->uplink_seid); 9338 return -ENODEV; 9339 } 9340 if (vsi == pf->vsi[pf->lan_vsi] && 9341 !test_bit(__I40E_DOWN, &pf->state)) { 9342 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 9343 return -ENODEV; 9344 } 9345 9346 uplink_seid = vsi->uplink_seid; 9347 if (vsi->type != I40E_VSI_SRIOV) { 9348 if (vsi->netdev_registered) { 9349 vsi->netdev_registered = false; 9350 if (vsi->netdev) { 9351 /* results in a call to i40e_close() */ 9352 unregister_netdev(vsi->netdev); 9353 } 9354 } else { 9355 i40e_vsi_close(vsi); 9356 } 9357 i40e_vsi_disable_irq(vsi); 9358 } 9359 9360 spin_lock_bh(&vsi->mac_filter_list_lock); 9361 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) 9362 i40e_del_filter(vsi, f->macaddr, f->vlan, 9363 f->is_vf, f->is_netdev); 9364 spin_unlock_bh(&vsi->mac_filter_list_lock); 9365 9366 i40e_sync_vsi_filters(vsi); 9367 9368 i40e_vsi_delete(vsi); 9369 i40e_vsi_free_q_vectors(vsi); 9370 if (vsi->netdev) { 9371 free_netdev(vsi->netdev); 9372 vsi->netdev = NULL; 9373 } 9374 i40e_vsi_clear_rings(vsi); 9375 i40e_vsi_clear(vsi); 9376 9377 /* If this was the last thing on the VEB, except for the 9378 * controlling VSI, remove the VEB, which puts the controlling 9379 * VSI onto the next level down in the switch. 9380 * 9381 * Well, okay, there's one more exception here: don't remove 9382 * the orphan VEBs yet. We'll wait for an explicit remove request 9383 * from up the network stack. 9384 */ 9385 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { 9386 if (pf->vsi[i] && 9387 pf->vsi[i]->uplink_seid == uplink_seid && 9388 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 9389 n++; /* count the VSIs */ 9390 } 9391 } 9392 for (i = 0; i < I40E_MAX_VEB; i++) { 9393 if (!pf->veb[i]) 9394 continue; 9395 if (pf->veb[i]->uplink_seid == uplink_seid) 9396 n++; /* count the VEBs */ 9397 if (pf->veb[i]->seid == uplink_seid) 9398 veb = pf->veb[i]; 9399 } 9400 if (n == 0 && veb && veb->uplink_seid != 0) 9401 i40e_veb_release(veb); 9402 9403 return 0; 9404 } 9405 9406 /** 9407 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 9408 * @vsi: ptr to the VSI 9409 * 9410 * This should only be called after i40e_vsi_mem_alloc() which allocates the 9411 * corresponding SW VSI structure and initializes num_queue_pairs for the 9412 * newly allocated VSI. 9413 * 9414 * Returns 0 on success or negative on failure 9415 **/ 9416 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 9417 { 9418 int ret = -ENOENT; 9419 struct i40e_pf *pf = vsi->back; 9420 9421 if (vsi->q_vectors[0]) { 9422 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 9423 vsi->seid); 9424 return -EEXIST; 9425 } 9426 9427 if (vsi->base_vector) { 9428 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 9429 vsi->seid, vsi->base_vector); 9430 return -EEXIST; 9431 } 9432 9433 ret = i40e_vsi_alloc_q_vectors(vsi); 9434 if (ret) { 9435 dev_info(&pf->pdev->dev, 9436 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 9437 vsi->num_q_vectors, vsi->seid, ret); 9438 vsi->num_q_vectors = 0; 9439 goto vector_setup_out; 9440 } 9441 9442 /* In Legacy mode, we do not have to get any other vector since we 9443 * piggyback on the misc/ICR0 for queue interrupts. 9444 */ 9445 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 9446 return ret; 9447 if (vsi->num_q_vectors) 9448 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 9449 vsi->num_q_vectors, vsi->idx); 9450 if (vsi->base_vector < 0) { 9451 dev_info(&pf->pdev->dev, 9452 "failed to get tracking for %d vectors for VSI %d, err=%d\n", 9453 vsi->num_q_vectors, vsi->seid, vsi->base_vector); 9454 i40e_vsi_free_q_vectors(vsi); 9455 ret = -ENOENT; 9456 goto vector_setup_out; 9457 } 9458 9459 vector_setup_out: 9460 return ret; 9461 } 9462 9463 /** 9464 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI 9465 * @vsi: pointer to the vsi. 9466 * 9467 * This re-allocates a vsi's queue resources. 9468 * 9469 * Returns pointer to the successfully allocated and configured VSI sw struct 9470 * on success, otherwise returns NULL on failure. 9471 **/ 9472 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) 9473 { 9474 struct i40e_pf *pf = vsi->back; 9475 u8 enabled_tc; 9476 int ret; 9477 9478 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 9479 i40e_vsi_clear_rings(vsi); 9480 9481 i40e_vsi_free_arrays(vsi, false); 9482 i40e_set_num_rings_in_vsi(vsi); 9483 ret = i40e_vsi_alloc_arrays(vsi, false); 9484 if (ret) 9485 goto err_vsi; 9486 9487 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); 9488 if (ret < 0) { 9489 dev_info(&pf->pdev->dev, 9490 "failed to get tracking for %d queues for VSI %d err %d\n", 9491 vsi->alloc_queue_pairs, vsi->seid, ret); 9492 goto err_vsi; 9493 } 9494 vsi->base_queue = ret; 9495 9496 /* Update the FW view of the VSI. Force a reset of TC and queue 9497 * layout configurations. 9498 */ 9499 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 9500 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 9501 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 9502 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 9503 9504 /* assign it some queues */ 9505 ret = i40e_alloc_rings(vsi); 9506 if (ret) 9507 goto err_rings; 9508 9509 /* map all of the rings to the q_vectors */ 9510 i40e_vsi_map_rings_to_vectors(vsi); 9511 return vsi; 9512 9513 err_rings: 9514 i40e_vsi_free_q_vectors(vsi); 9515 if (vsi->netdev_registered) { 9516 vsi->netdev_registered = false; 9517 unregister_netdev(vsi->netdev); 9518 free_netdev(vsi->netdev); 9519 vsi->netdev = NULL; 9520 } 9521 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 9522 err_vsi: 9523 i40e_vsi_clear(vsi); 9524 return NULL; 9525 } 9526 9527 /** 9528 * i40e_macaddr_init - explicitly write the mac address filters. 9529 * 9530 * @vsi: pointer to the vsi. 9531 * @macaddr: the MAC address 9532 * 9533 * This is needed when the macaddr has been obtained by other 9534 * means than the default, e.g., from Open Firmware or IDPROM. 9535 * Returns 0 on success, negative on failure 9536 **/ 9537 static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) 9538 { 9539 int ret; 9540 struct i40e_aqc_add_macvlan_element_data element; 9541 9542 ret = i40e_aq_mac_address_write(&vsi->back->hw, 9543 I40E_AQC_WRITE_TYPE_LAA_WOL, 9544 macaddr, NULL); 9545 if (ret) { 9546 dev_info(&vsi->back->pdev->dev, 9547 "Addr change for VSI failed: %d\n", ret); 9548 return -EADDRNOTAVAIL; 9549 } 9550 9551 memset(&element, 0, sizeof(element)); 9552 ether_addr_copy(element.mac_addr, macaddr); 9553 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 9554 ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); 9555 if (ret) { 9556 dev_info(&vsi->back->pdev->dev, 9557 "add filter failed err %s aq_err %s\n", 9558 i40e_stat_str(&vsi->back->hw, ret), 9559 i40e_aq_str(&vsi->back->hw, 9560 vsi->back->hw.aq.asq_last_status)); 9561 } 9562 return ret; 9563 } 9564 9565 /** 9566 * i40e_vsi_setup - Set up a VSI by a given type 9567 * @pf: board private structure 9568 * @type: VSI type 9569 * @uplink_seid: the switch element to link to 9570 * @param1: usage depends upon VSI type. For VF types, indicates VF id 9571 * 9572 * This allocates the sw VSI structure and its queue resources, then add a VSI 9573 * to the identified VEB. 9574 * 9575 * Returns pointer to the successfully allocated and configure VSI sw struct on 9576 * success, otherwise returns NULL on failure. 9577 **/ 9578 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 9579 u16 uplink_seid, u32 param1) 9580 { 9581 struct i40e_vsi *vsi = NULL; 9582 struct i40e_veb *veb = NULL; 9583 int ret, i; 9584 int v_idx; 9585 9586 /* The requested uplink_seid must be either 9587 * - the PF's port seid 9588 * no VEB is needed because this is the PF 9589 * or this is a Flow Director special case VSI 9590 * - seid of an existing VEB 9591 * - seid of a VSI that owns an existing VEB 9592 * - seid of a VSI that doesn't own a VEB 9593 * a new VEB is created and the VSI becomes the owner 9594 * - seid of the PF VSI, which is what creates the first VEB 9595 * this is a special case of the previous 9596 * 9597 * Find which uplink_seid we were given and create a new VEB if needed 9598 */ 9599 for (i = 0; i < I40E_MAX_VEB; i++) { 9600 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 9601 veb = pf->veb[i]; 9602 break; 9603 } 9604 } 9605 9606 if (!veb && uplink_seid != pf->mac_seid) { 9607 9608 for (i = 0; i < pf->num_alloc_vsi; i++) { 9609 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 9610 vsi = pf->vsi[i]; 9611 break; 9612 } 9613 } 9614 if (!vsi) { 9615 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 9616 uplink_seid); 9617 return NULL; 9618 } 9619 9620 if (vsi->uplink_seid == pf->mac_seid) 9621 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 9622 vsi->tc_config.enabled_tc); 9623 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 9624 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 9625 vsi->tc_config.enabled_tc); 9626 if (veb) { 9627 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { 9628 dev_info(&vsi->back->pdev->dev, 9629 "New VSI creation error, uplink seid of LAN VSI expected.\n"); 9630 return NULL; 9631 } 9632 /* We come up by default in VEPA mode if SRIOV is not 9633 * already enabled, in which case we can't force VEPA 9634 * mode. 9635 */ 9636 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 9637 veb->bridge_mode = BRIDGE_MODE_VEPA; 9638 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 9639 } 9640 i40e_config_bridge_mode(veb); 9641 } 9642 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 9643 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 9644 veb = pf->veb[i]; 9645 } 9646 if (!veb) { 9647 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 9648 return NULL; 9649 } 9650 9651 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 9652 uplink_seid = veb->seid; 9653 } 9654 9655 /* get vsi sw struct */ 9656 v_idx = i40e_vsi_mem_alloc(pf, type); 9657 if (v_idx < 0) 9658 goto err_alloc; 9659 vsi = pf->vsi[v_idx]; 9660 if (!vsi) 9661 goto err_alloc; 9662 vsi->type = type; 9663 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 9664 9665 if (type == I40E_VSI_MAIN) 9666 pf->lan_vsi = v_idx; 9667 else if (type == I40E_VSI_SRIOV) 9668 vsi->vf_id = param1; 9669 /* assign it some queues */ 9670 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, 9671 vsi->idx); 9672 if (ret < 0) { 9673 dev_info(&pf->pdev->dev, 9674 "failed to get tracking for %d queues for VSI %d err=%d\n", 9675 vsi->alloc_queue_pairs, vsi->seid, ret); 9676 goto err_vsi; 9677 } 9678 vsi->base_queue = ret; 9679 9680 /* get a VSI from the hardware */ 9681 vsi->uplink_seid = uplink_seid; 9682 ret = i40e_add_vsi(vsi); 9683 if (ret) 9684 goto err_vsi; 9685 9686 switch (vsi->type) { 9687 /* setup the netdev if needed */ 9688 case I40E_VSI_MAIN: 9689 /* Apply relevant filters if a platform-specific mac 9690 * address was selected. 9691 */ 9692 if (!!(pf->flags & I40E_FLAG_PF_MAC)) { 9693 ret = i40e_macaddr_init(vsi, pf->hw.mac.addr); 9694 if (ret) { 9695 dev_warn(&pf->pdev->dev, 9696 "could not set up macaddr; err %d\n", 9697 ret); 9698 } 9699 } 9700 case I40E_VSI_VMDQ2: 9701 case I40E_VSI_FCOE: 9702 ret = i40e_config_netdev(vsi); 9703 if (ret) 9704 goto err_netdev; 9705 ret = register_netdev(vsi->netdev); 9706 if (ret) 9707 goto err_netdev; 9708 vsi->netdev_registered = true; 9709 netif_carrier_off(vsi->netdev); 9710 #ifdef CONFIG_I40E_DCB 9711 /* Setup DCB netlink interface */ 9712 i40e_dcbnl_setup(vsi); 9713 #endif /* CONFIG_I40E_DCB */ 9714 /* fall through */ 9715 9716 case I40E_VSI_FDIR: 9717 /* set up vectors and rings if needed */ 9718 ret = i40e_vsi_setup_vectors(vsi); 9719 if (ret) 9720 goto err_msix; 9721 9722 ret = i40e_alloc_rings(vsi); 9723 if (ret) 9724 goto err_rings; 9725 9726 /* map all of the rings to the q_vectors */ 9727 i40e_vsi_map_rings_to_vectors(vsi); 9728 9729 i40e_vsi_reset_stats(vsi); 9730 break; 9731 9732 default: 9733 /* no netdev or rings for the other VSI types */ 9734 break; 9735 } 9736 9737 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && 9738 (vsi->type == I40E_VSI_VMDQ2)) { 9739 ret = i40e_vsi_config_rss(vsi); 9740 } 9741 return vsi; 9742 9743 err_rings: 9744 i40e_vsi_free_q_vectors(vsi); 9745 err_msix: 9746 if (vsi->netdev_registered) { 9747 vsi->netdev_registered = false; 9748 unregister_netdev(vsi->netdev); 9749 free_netdev(vsi->netdev); 9750 vsi->netdev = NULL; 9751 } 9752 err_netdev: 9753 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 9754 err_vsi: 9755 i40e_vsi_clear(vsi); 9756 err_alloc: 9757 return NULL; 9758 } 9759 9760 /** 9761 * i40e_veb_get_bw_info - Query VEB BW information 9762 * @veb: the veb to query 9763 * 9764 * Query the Tx scheduler BW configuration data for given VEB 9765 **/ 9766 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 9767 { 9768 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 9769 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 9770 struct i40e_pf *pf = veb->pf; 9771 struct i40e_hw *hw = &pf->hw; 9772 u32 tc_bw_max; 9773 int ret = 0; 9774 int i; 9775 9776 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 9777 &bw_data, NULL); 9778 if (ret) { 9779 dev_info(&pf->pdev->dev, 9780 "query veb bw config failed, err %s aq_err %s\n", 9781 i40e_stat_str(&pf->hw, ret), 9782 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 9783 goto out; 9784 } 9785 9786 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 9787 &ets_data, NULL); 9788 if (ret) { 9789 dev_info(&pf->pdev->dev, 9790 "query veb bw ets config failed, err %s aq_err %s\n", 9791 i40e_stat_str(&pf->hw, ret), 9792 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 9793 goto out; 9794 } 9795 9796 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 9797 veb->bw_max_quanta = ets_data.tc_bw_max; 9798 veb->is_abs_credits = bw_data.absolute_credits_enable; 9799 veb->enabled_tc = ets_data.tc_valid_bits; 9800 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 9801 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 9802 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 9803 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 9804 veb->bw_tc_limit_credits[i] = 9805 le16_to_cpu(bw_data.tc_bw_limits[i]); 9806 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 9807 } 9808 9809 out: 9810 return ret; 9811 } 9812 9813 /** 9814 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 9815 * @pf: board private structure 9816 * 9817 * On error: returns error code (negative) 9818 * On success: returns vsi index in PF (positive) 9819 **/ 9820 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 9821 { 9822 int ret = -ENOENT; 9823 struct i40e_veb *veb; 9824 int i; 9825 9826 /* Need to protect the allocation of switch elements at the PF level */ 9827 mutex_lock(&pf->switch_mutex); 9828 9829 /* VEB list may be fragmented if VEB creation/destruction has 9830 * been happening. We can afford to do a quick scan to look 9831 * for any free slots in the list. 9832 * 9833 * find next empty veb slot, looping back around if necessary 9834 */ 9835 i = 0; 9836 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 9837 i++; 9838 if (i >= I40E_MAX_VEB) { 9839 ret = -ENOMEM; 9840 goto err_alloc_veb; /* out of VEB slots! */ 9841 } 9842 9843 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 9844 if (!veb) { 9845 ret = -ENOMEM; 9846 goto err_alloc_veb; 9847 } 9848 veb->pf = pf; 9849 veb->idx = i; 9850 veb->enabled_tc = 1; 9851 9852 pf->veb[i] = veb; 9853 ret = i; 9854 err_alloc_veb: 9855 mutex_unlock(&pf->switch_mutex); 9856 return ret; 9857 } 9858 9859 /** 9860 * i40e_switch_branch_release - Delete a branch of the switch tree 9861 * @branch: where to start deleting 9862 * 9863 * This uses recursion to find the tips of the branch to be 9864 * removed, deleting until we get back to and can delete this VEB. 9865 **/ 9866 static void i40e_switch_branch_release(struct i40e_veb *branch) 9867 { 9868 struct i40e_pf *pf = branch->pf; 9869 u16 branch_seid = branch->seid; 9870 u16 veb_idx = branch->idx; 9871 int i; 9872 9873 /* release any VEBs on this VEB - RECURSION */ 9874 for (i = 0; i < I40E_MAX_VEB; i++) { 9875 if (!pf->veb[i]) 9876 continue; 9877 if (pf->veb[i]->uplink_seid == branch->seid) 9878 i40e_switch_branch_release(pf->veb[i]); 9879 } 9880 9881 /* Release the VSIs on this VEB, but not the owner VSI. 9882 * 9883 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 9884 * the VEB itself, so don't use (*branch) after this loop. 9885 */ 9886 for (i = 0; i < pf->num_alloc_vsi; i++) { 9887 if (!pf->vsi[i]) 9888 continue; 9889 if (pf->vsi[i]->uplink_seid == branch_seid && 9890 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 9891 i40e_vsi_release(pf->vsi[i]); 9892 } 9893 } 9894 9895 /* There's one corner case where the VEB might not have been 9896 * removed, so double check it here and remove it if needed. 9897 * This case happens if the veb was created from the debugfs 9898 * commands and no VSIs were added to it. 9899 */ 9900 if (pf->veb[veb_idx]) 9901 i40e_veb_release(pf->veb[veb_idx]); 9902 } 9903 9904 /** 9905 * i40e_veb_clear - remove veb struct 9906 * @veb: the veb to remove 9907 **/ 9908 static void i40e_veb_clear(struct i40e_veb *veb) 9909 { 9910 if (!veb) 9911 return; 9912 9913 if (veb->pf) { 9914 struct i40e_pf *pf = veb->pf; 9915 9916 mutex_lock(&pf->switch_mutex); 9917 if (pf->veb[veb->idx] == veb) 9918 pf->veb[veb->idx] = NULL; 9919 mutex_unlock(&pf->switch_mutex); 9920 } 9921 9922 kfree(veb); 9923 } 9924 9925 /** 9926 * i40e_veb_release - Delete a VEB and free its resources 9927 * @veb: the VEB being removed 9928 **/ 9929 void i40e_veb_release(struct i40e_veb *veb) 9930 { 9931 struct i40e_vsi *vsi = NULL; 9932 struct i40e_pf *pf; 9933 int i, n = 0; 9934 9935 pf = veb->pf; 9936 9937 /* find the remaining VSI and check for extras */ 9938 for (i = 0; i < pf->num_alloc_vsi; i++) { 9939 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 9940 n++; 9941 vsi = pf->vsi[i]; 9942 } 9943 } 9944 if (n != 1) { 9945 dev_info(&pf->pdev->dev, 9946 "can't remove VEB %d with %d VSIs left\n", 9947 veb->seid, n); 9948 return; 9949 } 9950 9951 /* move the remaining VSI to uplink veb */ 9952 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 9953 if (veb->uplink_seid) { 9954 vsi->uplink_seid = veb->uplink_seid; 9955 if (veb->uplink_seid == pf->mac_seid) 9956 vsi->veb_idx = I40E_NO_VEB; 9957 else 9958 vsi->veb_idx = veb->veb_idx; 9959 } else { 9960 /* floating VEB */ 9961 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 9962 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 9963 } 9964 9965 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 9966 i40e_veb_clear(veb); 9967 } 9968 9969 /** 9970 * i40e_add_veb - create the VEB in the switch 9971 * @veb: the VEB to be instantiated 9972 * @vsi: the controlling VSI 9973 **/ 9974 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 9975 { 9976 struct i40e_pf *pf = veb->pf; 9977 bool is_default = veb->pf->cur_promisc; 9978 bool is_cloud = false; 9979 int ret; 9980 9981 /* get a VEB from the hardware */ 9982 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, 9983 veb->enabled_tc, is_default, 9984 is_cloud, &veb->seid, NULL); 9985 if (ret) { 9986 dev_info(&pf->pdev->dev, 9987 "couldn't add VEB, err %s aq_err %s\n", 9988 i40e_stat_str(&pf->hw, ret), 9989 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9990 return -EPERM; 9991 } 9992 9993 /* get statistics counter */ 9994 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, 9995 &veb->stats_idx, NULL, NULL, NULL); 9996 if (ret) { 9997 dev_info(&pf->pdev->dev, 9998 "couldn't get VEB statistics idx, err %s aq_err %s\n", 9999 i40e_stat_str(&pf->hw, ret), 10000 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10001 return -EPERM; 10002 } 10003 ret = i40e_veb_get_bw_info(veb); 10004 if (ret) { 10005 dev_info(&pf->pdev->dev, 10006 "couldn't get VEB bw info, err %s aq_err %s\n", 10007 i40e_stat_str(&pf->hw, ret), 10008 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10009 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 10010 return -ENOENT; 10011 } 10012 10013 vsi->uplink_seid = veb->seid; 10014 vsi->veb_idx = veb->idx; 10015 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 10016 10017 return 0; 10018 } 10019 10020 /** 10021 * i40e_veb_setup - Set up a VEB 10022 * @pf: board private structure 10023 * @flags: VEB setup flags 10024 * @uplink_seid: the switch element to link to 10025 * @vsi_seid: the initial VSI seid 10026 * @enabled_tc: Enabled TC bit-map 10027 * 10028 * This allocates the sw VEB structure and links it into the switch 10029 * It is possible and legal for this to be a duplicate of an already 10030 * existing VEB. It is also possible for both uplink and vsi seids 10031 * to be zero, in order to create a floating VEB. 10032 * 10033 * Returns pointer to the successfully allocated VEB sw struct on 10034 * success, otherwise returns NULL on failure. 10035 **/ 10036 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 10037 u16 uplink_seid, u16 vsi_seid, 10038 u8 enabled_tc) 10039 { 10040 struct i40e_veb *veb, *uplink_veb = NULL; 10041 int vsi_idx, veb_idx; 10042 int ret; 10043 10044 /* if one seid is 0, the other must be 0 to create a floating relay */ 10045 if ((uplink_seid == 0 || vsi_seid == 0) && 10046 (uplink_seid + vsi_seid != 0)) { 10047 dev_info(&pf->pdev->dev, 10048 "one, not both seid's are 0: uplink=%d vsi=%d\n", 10049 uplink_seid, vsi_seid); 10050 return NULL; 10051 } 10052 10053 /* make sure there is such a vsi and uplink */ 10054 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) 10055 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 10056 break; 10057 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { 10058 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 10059 vsi_seid); 10060 return NULL; 10061 } 10062 10063 if (uplink_seid && uplink_seid != pf->mac_seid) { 10064 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 10065 if (pf->veb[veb_idx] && 10066 pf->veb[veb_idx]->seid == uplink_seid) { 10067 uplink_veb = pf->veb[veb_idx]; 10068 break; 10069 } 10070 } 10071 if (!uplink_veb) { 10072 dev_info(&pf->pdev->dev, 10073 "uplink seid %d not found\n", uplink_seid); 10074 return NULL; 10075 } 10076 } 10077 10078 /* get veb sw struct */ 10079 veb_idx = i40e_veb_mem_alloc(pf); 10080 if (veb_idx < 0) 10081 goto err_alloc; 10082 veb = pf->veb[veb_idx]; 10083 veb->flags = flags; 10084 veb->uplink_seid = uplink_seid; 10085 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 10086 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 10087 10088 /* create the VEB in the switch */ 10089 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 10090 if (ret) 10091 goto err_veb; 10092 if (vsi_idx == pf->lan_vsi) 10093 pf->lan_veb = veb->idx; 10094 10095 return veb; 10096 10097 err_veb: 10098 i40e_veb_clear(veb); 10099 err_alloc: 10100 return NULL; 10101 } 10102 10103 /** 10104 * i40e_setup_pf_switch_element - set PF vars based on switch type 10105 * @pf: board private structure 10106 * @ele: element we are building info from 10107 * @num_reported: total number of elements 10108 * @printconfig: should we print the contents 10109 * 10110 * helper function to assist in extracting a few useful SEID values. 10111 **/ 10112 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 10113 struct i40e_aqc_switch_config_element_resp *ele, 10114 u16 num_reported, bool printconfig) 10115 { 10116 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 10117 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 10118 u8 element_type = ele->element_type; 10119 u16 seid = le16_to_cpu(ele->seid); 10120 10121 if (printconfig) 10122 dev_info(&pf->pdev->dev, 10123 "type=%d seid=%d uplink=%d downlink=%d\n", 10124 element_type, seid, uplink_seid, downlink_seid); 10125 10126 switch (element_type) { 10127 case I40E_SWITCH_ELEMENT_TYPE_MAC: 10128 pf->mac_seid = seid; 10129 break; 10130 case I40E_SWITCH_ELEMENT_TYPE_VEB: 10131 /* Main VEB? */ 10132 if (uplink_seid != pf->mac_seid) 10133 break; 10134 if (pf->lan_veb == I40E_NO_VEB) { 10135 int v; 10136 10137 /* find existing or else empty VEB */ 10138 for (v = 0; v < I40E_MAX_VEB; v++) { 10139 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 10140 pf->lan_veb = v; 10141 break; 10142 } 10143 } 10144 if (pf->lan_veb == I40E_NO_VEB) { 10145 v = i40e_veb_mem_alloc(pf); 10146 if (v < 0) 10147 break; 10148 pf->lan_veb = v; 10149 } 10150 } 10151 10152 pf->veb[pf->lan_veb]->seid = seid; 10153 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 10154 pf->veb[pf->lan_veb]->pf = pf; 10155 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 10156 break; 10157 case I40E_SWITCH_ELEMENT_TYPE_VSI: 10158 if (num_reported != 1) 10159 break; 10160 /* This is immediately after a reset so we can assume this is 10161 * the PF's VSI 10162 */ 10163 pf->mac_seid = uplink_seid; 10164 pf->pf_seid = downlink_seid; 10165 pf->main_vsi_seid = seid; 10166 if (printconfig) 10167 dev_info(&pf->pdev->dev, 10168 "pf_seid=%d main_vsi_seid=%d\n", 10169 pf->pf_seid, pf->main_vsi_seid); 10170 break; 10171 case I40E_SWITCH_ELEMENT_TYPE_PF: 10172 case I40E_SWITCH_ELEMENT_TYPE_VF: 10173 case I40E_SWITCH_ELEMENT_TYPE_EMP: 10174 case I40E_SWITCH_ELEMENT_TYPE_BMC: 10175 case I40E_SWITCH_ELEMENT_TYPE_PE: 10176 case I40E_SWITCH_ELEMENT_TYPE_PA: 10177 /* ignore these for now */ 10178 break; 10179 default: 10180 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 10181 element_type, seid); 10182 break; 10183 } 10184 } 10185 10186 /** 10187 * i40e_fetch_switch_configuration - Get switch config from firmware 10188 * @pf: board private structure 10189 * @printconfig: should we print the contents 10190 * 10191 * Get the current switch configuration from the device and 10192 * extract a few useful SEID values. 10193 **/ 10194 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 10195 { 10196 struct i40e_aqc_get_switch_config_resp *sw_config; 10197 u16 next_seid = 0; 10198 int ret = 0; 10199 u8 *aq_buf; 10200 int i; 10201 10202 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 10203 if (!aq_buf) 10204 return -ENOMEM; 10205 10206 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 10207 do { 10208 u16 num_reported, num_total; 10209 10210 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 10211 I40E_AQ_LARGE_BUF, 10212 &next_seid, NULL); 10213 if (ret) { 10214 dev_info(&pf->pdev->dev, 10215 "get switch config failed err %s aq_err %s\n", 10216 i40e_stat_str(&pf->hw, ret), 10217 i40e_aq_str(&pf->hw, 10218 pf->hw.aq.asq_last_status)); 10219 kfree(aq_buf); 10220 return -ENOENT; 10221 } 10222 10223 num_reported = le16_to_cpu(sw_config->header.num_reported); 10224 num_total = le16_to_cpu(sw_config->header.num_total); 10225 10226 if (printconfig) 10227 dev_info(&pf->pdev->dev, 10228 "header: %d reported %d total\n", 10229 num_reported, num_total); 10230 10231 for (i = 0; i < num_reported; i++) { 10232 struct i40e_aqc_switch_config_element_resp *ele = 10233 &sw_config->element[i]; 10234 10235 i40e_setup_pf_switch_element(pf, ele, num_reported, 10236 printconfig); 10237 } 10238 } while (next_seid != 0); 10239 10240 kfree(aq_buf); 10241 return ret; 10242 } 10243 10244 /** 10245 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 10246 * @pf: board private structure 10247 * @reinit: if the Main VSI needs to re-initialized. 10248 * 10249 * Returns 0 on success, negative value on failure 10250 **/ 10251 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 10252 { 10253 int ret; 10254 10255 /* find out what's out there already */ 10256 ret = i40e_fetch_switch_configuration(pf, false); 10257 if (ret) { 10258 dev_info(&pf->pdev->dev, 10259 "couldn't fetch switch config, err %s aq_err %s\n", 10260 i40e_stat_str(&pf->hw, ret), 10261 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10262 return ret; 10263 } 10264 i40e_pf_reset_stats(pf); 10265 10266 /* first time setup */ 10267 if (pf->lan_vsi == I40E_NO_VSI || reinit) { 10268 struct i40e_vsi *vsi = NULL; 10269 u16 uplink_seid; 10270 10271 /* Set up the PF VSI associated with the PF's main VSI 10272 * that is already in the HW switch 10273 */ 10274 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 10275 uplink_seid = pf->veb[pf->lan_veb]->seid; 10276 else 10277 uplink_seid = pf->mac_seid; 10278 if (pf->lan_vsi == I40E_NO_VSI) 10279 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 10280 else if (reinit) 10281 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); 10282 if (!vsi) { 10283 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 10284 i40e_fdir_teardown(pf); 10285 return -EAGAIN; 10286 } 10287 } else { 10288 /* force a reset of TC and queue layout configurations */ 10289 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 10290 10291 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 10292 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 10293 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 10294 } 10295 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 10296 10297 i40e_fdir_sb_setup(pf); 10298 10299 /* Setup static PF queue filter control settings */ 10300 ret = i40e_setup_pf_filter_control(pf); 10301 if (ret) { 10302 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 10303 ret); 10304 /* Failure here should not stop continuing other steps */ 10305 } 10306 10307 /* enable RSS in the HW, even for only one queue, as the stack can use 10308 * the hash 10309 */ 10310 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 10311 i40e_pf_config_rss(pf); 10312 10313 /* fill in link information and enable LSE reporting */ 10314 i40e_update_link_info(&pf->hw); 10315 i40e_link_event(pf); 10316 10317 /* Initialize user-specific link properties */ 10318 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 10319 I40E_AQ_AN_COMPLETED) ? true : false); 10320 10321 i40e_ptp_init(pf); 10322 10323 return ret; 10324 } 10325 10326 /** 10327 * i40e_determine_queue_usage - Work out queue distribution 10328 * @pf: board private structure 10329 **/ 10330 static void i40e_determine_queue_usage(struct i40e_pf *pf) 10331 { 10332 int queues_left; 10333 10334 pf->num_lan_qps = 0; 10335 #ifdef I40E_FCOE 10336 pf->num_fcoe_qps = 0; 10337 #endif 10338 10339 /* Find the max queues to be put into basic use. We'll always be 10340 * using TC0, whether or not DCB is running, and TC0 will get the 10341 * big RSS set. 10342 */ 10343 queues_left = pf->hw.func_caps.num_tx_qp; 10344 10345 if ((queues_left == 1) || 10346 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 10347 /* one qp for PF, no queues for anything else */ 10348 queues_left = 0; 10349 pf->alloc_rss_size = pf->num_lan_qps = 1; 10350 10351 /* make sure all the fancies are disabled */ 10352 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 10353 #ifdef I40E_FCOE 10354 I40E_FLAG_FCOE_ENABLED | 10355 #endif 10356 I40E_FLAG_FD_SB_ENABLED | 10357 I40E_FLAG_FD_ATR_ENABLED | 10358 I40E_FLAG_DCB_CAPABLE | 10359 I40E_FLAG_SRIOV_ENABLED | 10360 I40E_FLAG_VMDQ_ENABLED); 10361 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | 10362 I40E_FLAG_FD_SB_ENABLED | 10363 I40E_FLAG_FD_ATR_ENABLED | 10364 I40E_FLAG_DCB_CAPABLE))) { 10365 /* one qp for PF */ 10366 pf->alloc_rss_size = pf->num_lan_qps = 1; 10367 queues_left -= pf->num_lan_qps; 10368 10369 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 10370 #ifdef I40E_FCOE 10371 I40E_FLAG_FCOE_ENABLED | 10372 #endif 10373 I40E_FLAG_FD_SB_ENABLED | 10374 I40E_FLAG_FD_ATR_ENABLED | 10375 I40E_FLAG_DCB_ENABLED | 10376 I40E_FLAG_VMDQ_ENABLED); 10377 } else { 10378 /* Not enough queues for all TCs */ 10379 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && 10380 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 10381 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10382 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 10383 } 10384 pf->num_lan_qps = max_t(int, pf->rss_size_max, 10385 num_online_cpus()); 10386 pf->num_lan_qps = min_t(int, pf->num_lan_qps, 10387 pf->hw.func_caps.num_tx_qp); 10388 10389 queues_left -= pf->num_lan_qps; 10390 } 10391 10392 #ifdef I40E_FCOE 10393 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 10394 if (I40E_DEFAULT_FCOE <= queues_left) { 10395 pf->num_fcoe_qps = I40E_DEFAULT_FCOE; 10396 } else if (I40E_MINIMUM_FCOE <= queues_left) { 10397 pf->num_fcoe_qps = I40E_MINIMUM_FCOE; 10398 } else { 10399 pf->num_fcoe_qps = 0; 10400 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 10401 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); 10402 } 10403 10404 queues_left -= pf->num_fcoe_qps; 10405 } 10406 10407 #endif 10408 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 10409 if (queues_left > 1) { 10410 queues_left -= 1; /* save 1 queue for FD */ 10411 } else { 10412 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 10413 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); 10414 } 10415 } 10416 10417 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 10418 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 10419 pf->num_req_vfs = min_t(int, pf->num_req_vfs, 10420 (queues_left / pf->num_vf_qps)); 10421 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 10422 } 10423 10424 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 10425 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 10426 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 10427 (queues_left / pf->num_vmdq_qps)); 10428 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 10429 } 10430 10431 pf->queues_left = queues_left; 10432 dev_dbg(&pf->pdev->dev, 10433 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", 10434 pf->hw.func_caps.num_tx_qp, 10435 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), 10436 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, 10437 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, 10438 queues_left); 10439 #ifdef I40E_FCOE 10440 dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); 10441 #endif 10442 } 10443 10444 /** 10445 * i40e_setup_pf_filter_control - Setup PF static filter control 10446 * @pf: PF to be setup 10447 * 10448 * i40e_setup_pf_filter_control sets up a PF's initial filter control 10449 * settings. If PE/FCoE are enabled then it will also set the per PF 10450 * based filter sizes required for them. It also enables Flow director, 10451 * ethertype and macvlan type filter settings for the pf. 10452 * 10453 * Returns 0 on success, negative on failure 10454 **/ 10455 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 10456 { 10457 struct i40e_filter_control_settings *settings = &pf->filter_settings; 10458 10459 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 10460 10461 /* Flow Director is enabled */ 10462 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) 10463 settings->enable_fdir = true; 10464 10465 /* Ethtype and MACVLAN filters enabled for PF */ 10466 settings->enable_ethtype = true; 10467 settings->enable_macvlan = true; 10468 10469 if (i40e_set_filter_control(&pf->hw, settings)) 10470 return -ENOENT; 10471 10472 return 0; 10473 } 10474 10475 #define INFO_STRING_LEN 255 10476 #define REMAIN(__x) (INFO_STRING_LEN - (__x)) 10477 static void i40e_print_features(struct i40e_pf *pf) 10478 { 10479 struct i40e_hw *hw = &pf->hw; 10480 char *buf; 10481 int i; 10482 10483 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL); 10484 if (!buf) 10485 return; 10486 10487 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); 10488 #ifdef CONFIG_PCI_IOV 10489 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); 10490 #endif 10491 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s", 10492 pf->hw.func_caps.num_vsis, 10493 pf->vsi[pf->lan_vsi]->num_queue_pairs, 10494 pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); 10495 10496 if (pf->flags & I40E_FLAG_RSS_ENABLED) 10497 i += snprintf(&buf[i], REMAIN(i), " RSS"); 10498 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 10499 i += snprintf(&buf[i], REMAIN(i), " FD_ATR"); 10500 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 10501 i += snprintf(&buf[i], REMAIN(i), " FD_SB"); 10502 i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); 10503 } 10504 if (pf->flags & I40E_FLAG_DCB_CAPABLE) 10505 i += snprintf(&buf[i], REMAIN(i), " DCB"); 10506 #if IS_ENABLED(CONFIG_VXLAN) 10507 i += snprintf(&buf[i], REMAIN(i), " VxLAN"); 10508 #endif 10509 #if IS_ENABLED(CONFIG_GENEVE) 10510 i += snprintf(&buf[i], REMAIN(i), " Geneve"); 10511 #endif 10512 if (pf->flags & I40E_FLAG_PTP) 10513 i += snprintf(&buf[i], REMAIN(i), " PTP"); 10514 #ifdef I40E_FCOE 10515 if (pf->flags & I40E_FLAG_FCOE_ENABLED) 10516 i += snprintf(&buf[i], REMAIN(i), " FCOE"); 10517 #endif 10518 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 10519 i += snprintf(&buf[i], REMAIN(i), " VEB"); 10520 else 10521 i += snprintf(&buf[i], REMAIN(i), " VEPA"); 10522 10523 dev_info(&pf->pdev->dev, "%s\n", buf); 10524 kfree(buf); 10525 WARN_ON(i > INFO_STRING_LEN); 10526 } 10527 10528 /** 10529 * i40e_get_platform_mac_addr - get platform-specific MAC address 10530 * 10531 * @pdev: PCI device information struct 10532 * @pf: board private structure 10533 * 10534 * Look up the MAC address in Open Firmware on systems that support it, 10535 * and use IDPROM on SPARC if no OF address is found. On return, the 10536 * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value 10537 * has been selected. 10538 **/ 10539 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) 10540 { 10541 struct device_node *dp = pci_device_to_OF_node(pdev); 10542 const unsigned char *addr; 10543 u8 *mac_addr = pf->hw.mac.addr; 10544 10545 pf->flags &= ~I40E_FLAG_PF_MAC; 10546 addr = of_get_mac_address(dp); 10547 if (addr) { 10548 ether_addr_copy(mac_addr, addr); 10549 pf->flags |= I40E_FLAG_PF_MAC; 10550 #ifdef CONFIG_SPARC 10551 } else { 10552 ether_addr_copy(mac_addr, idprom->id_ethaddr); 10553 pf->flags |= I40E_FLAG_PF_MAC; 10554 #endif /* CONFIG_SPARC */ 10555 } 10556 } 10557 10558 /** 10559 * i40e_probe - Device initialization routine 10560 * @pdev: PCI device information struct 10561 * @ent: entry in i40e_pci_tbl 10562 * 10563 * i40e_probe initializes a PF identified by a pci_dev structure. 10564 * The OS initialization, configuring of the PF private structure, 10565 * and a hardware reset occur. 10566 * 10567 * Returns 0 on success, negative on failure 10568 **/ 10569 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 10570 { 10571 struct i40e_aq_get_phy_abilities_resp abilities; 10572 struct i40e_pf *pf; 10573 struct i40e_hw *hw; 10574 static u16 pfs_found; 10575 u16 wol_nvm_bits; 10576 u16 link_status; 10577 int err; 10578 u32 len; 10579 u32 val; 10580 u32 i; 10581 u8 set_fc_aq_fail; 10582 10583 err = pci_enable_device_mem(pdev); 10584 if (err) 10585 return err; 10586 10587 /* set up for high or low dma */ 10588 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10589 if (err) { 10590 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10591 if (err) { 10592 dev_err(&pdev->dev, 10593 "DMA configuration failed: 0x%x\n", err); 10594 goto err_dma; 10595 } 10596 } 10597 10598 /* set up pci connections */ 10599 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 10600 IORESOURCE_MEM), i40e_driver_name); 10601 if (err) { 10602 dev_info(&pdev->dev, 10603 "pci_request_selected_regions failed %d\n", err); 10604 goto err_pci_reg; 10605 } 10606 10607 pci_enable_pcie_error_reporting(pdev); 10608 pci_set_master(pdev); 10609 10610 /* Now that we have a PCI connection, we need to do the 10611 * low level device setup. This is primarily setting up 10612 * the Admin Queue structures and then querying for the 10613 * device's current profile information. 10614 */ 10615 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 10616 if (!pf) { 10617 err = -ENOMEM; 10618 goto err_pf_alloc; 10619 } 10620 pf->next_vsi = 0; 10621 pf->pdev = pdev; 10622 set_bit(__I40E_DOWN, &pf->state); 10623 10624 hw = &pf->hw; 10625 hw->back = pf; 10626 10627 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), 10628 I40E_MAX_CSR_SPACE); 10629 10630 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); 10631 if (!hw->hw_addr) { 10632 err = -EIO; 10633 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 10634 (unsigned int)pci_resource_start(pdev, 0), 10635 pf->ioremap_len, err); 10636 goto err_ioremap; 10637 } 10638 hw->vendor_id = pdev->vendor; 10639 hw->device_id = pdev->device; 10640 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 10641 hw->subsystem_vendor_id = pdev->subsystem_vendor; 10642 hw->subsystem_device_id = pdev->subsystem_device; 10643 hw->bus.device = PCI_SLOT(pdev->devfn); 10644 hw->bus.func = PCI_FUNC(pdev->devfn); 10645 pf->instance = pfs_found; 10646 10647 if (debug != -1) { 10648 pf->msg_enable = pf->hw.debug_mask; 10649 pf->msg_enable = debug; 10650 } 10651 10652 /* do a special CORER for clearing PXE mode once at init */ 10653 if (hw->revision_id == 0 && 10654 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { 10655 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 10656 i40e_flush(hw); 10657 msleep(200); 10658 pf->corer_count++; 10659 10660 i40e_clear_pxe_mode(hw); 10661 } 10662 10663 /* Reset here to make sure all is clean and to define PF 'n' */ 10664 i40e_clear_hw(hw); 10665 err = i40e_pf_reset(hw); 10666 if (err) { 10667 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 10668 goto err_pf_reset; 10669 } 10670 pf->pfr_count++; 10671 10672 hw->aq.num_arq_entries = I40E_AQ_LEN; 10673 hw->aq.num_asq_entries = I40E_AQ_LEN; 10674 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 10675 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 10676 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 10677 10678 snprintf(pf->int_name, sizeof(pf->int_name) - 1, 10679 "%s-%s:misc", 10680 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); 10681 10682 err = i40e_init_shared_code(hw); 10683 if (err) { 10684 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", 10685 err); 10686 goto err_pf_reset; 10687 } 10688 10689 /* set up a default setting for link flow control */ 10690 pf->hw.fc.requested_mode = I40E_FC_NONE; 10691 10692 /* set up the locks for the AQ, do this only once in probe 10693 * and destroy them only once in remove 10694 */ 10695 mutex_init(&hw->aq.asq_mutex); 10696 mutex_init(&hw->aq.arq_mutex); 10697 10698 err = i40e_init_adminq(hw); 10699 if (err) { 10700 if (err == I40E_ERR_FIRMWARE_API_VERSION) 10701 dev_info(&pdev->dev, 10702 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 10703 else 10704 dev_info(&pdev->dev, 10705 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); 10706 10707 goto err_pf_reset; 10708 } 10709 10710 /* provide nvm, fw, api versions */ 10711 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", 10712 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 10713 hw->aq.api_maj_ver, hw->aq.api_min_ver, 10714 i40e_nvm_version_str(hw)); 10715 10716 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 10717 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) 10718 dev_info(&pdev->dev, 10719 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 10720 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 10721 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) 10722 dev_info(&pdev->dev, 10723 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 10724 10725 i40e_verify_eeprom(pf); 10726 10727 /* Rev 0 hardware was never productized */ 10728 if (hw->revision_id < 1) 10729 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 10730 10731 i40e_clear_pxe_mode(hw); 10732 err = i40e_get_capabilities(pf); 10733 if (err) 10734 goto err_adminq_setup; 10735 10736 err = i40e_sw_init(pf); 10737 if (err) { 10738 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 10739 goto err_sw_init; 10740 } 10741 10742 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 10743 hw->func_caps.num_rx_qp, 10744 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 10745 if (err) { 10746 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 10747 goto err_init_lan_hmc; 10748 } 10749 10750 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 10751 if (err) { 10752 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 10753 err = -ENOENT; 10754 goto err_configure_lan_hmc; 10755 } 10756 10757 /* Disable LLDP for NICs that have firmware versions lower than v4.3. 10758 * Ignore error return codes because if it was already disabled via 10759 * hardware settings this will fail 10760 */ 10761 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 10762 (pf->hw.aq.fw_maj_ver < 4)) { 10763 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); 10764 i40e_aq_stop_lldp(hw, true, NULL); 10765 } 10766 10767 i40e_get_mac_addr(hw, hw->mac.addr); 10768 /* allow a platform config to override the HW addr */ 10769 i40e_get_platform_mac_addr(pdev, pf); 10770 if (!is_valid_ether_addr(hw->mac.addr)) { 10771 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 10772 err = -EIO; 10773 goto err_mac_addr; 10774 } 10775 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 10776 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); 10777 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 10778 if (is_valid_ether_addr(hw->mac.port_addr)) 10779 pf->flags |= I40E_FLAG_PORT_ID_VALID; 10780 #ifdef I40E_FCOE 10781 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); 10782 if (err) 10783 dev_info(&pdev->dev, 10784 "(non-fatal) SAN MAC retrieval failed: %d\n", err); 10785 if (!is_valid_ether_addr(hw->mac.san_addr)) { 10786 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", 10787 hw->mac.san_addr); 10788 ether_addr_copy(hw->mac.san_addr, hw->mac.addr); 10789 } 10790 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); 10791 #endif /* I40E_FCOE */ 10792 10793 pci_set_drvdata(pdev, pf); 10794 pci_save_state(pdev); 10795 #ifdef CONFIG_I40E_DCB 10796 err = i40e_init_pf_dcb(pf); 10797 if (err) { 10798 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); 10799 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10800 /* Continue without DCB enabled */ 10801 } 10802 #endif /* CONFIG_I40E_DCB */ 10803 10804 /* set up periodic task facility */ 10805 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); 10806 pf->service_timer_period = HZ; 10807 10808 INIT_WORK(&pf->service_task, i40e_service_task); 10809 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 10810 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; 10811 10812 /* NVM bit on means WoL disabled for the port */ 10813 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 10814 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) 10815 pf->wol_en = false; 10816 else 10817 pf->wol_en = true; 10818 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 10819 10820 /* set up the main switch operations */ 10821 i40e_determine_queue_usage(pf); 10822 err = i40e_init_interrupt_scheme(pf); 10823 if (err) 10824 goto err_switch_setup; 10825 10826 /* The number of VSIs reported by the FW is the minimum guaranteed 10827 * to us; HW supports far more and we share the remaining pool with 10828 * the other PFs. We allocate space for more than the guarantee with 10829 * the understanding that we might not get them all later. 10830 */ 10831 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) 10832 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; 10833 else 10834 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; 10835 10836 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ 10837 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; 10838 pf->vsi = kzalloc(len, GFP_KERNEL); 10839 if (!pf->vsi) { 10840 err = -ENOMEM; 10841 goto err_switch_setup; 10842 } 10843 10844 #ifdef CONFIG_PCI_IOV 10845 /* prep for VF support */ 10846 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 10847 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 10848 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 10849 if (pci_num_vf(pdev)) 10850 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 10851 } 10852 #endif 10853 err = i40e_setup_pf_switch(pf, false); 10854 if (err) { 10855 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 10856 goto err_vsis; 10857 } 10858 10859 /* Make sure flow control is set according to current settings */ 10860 err = i40e_set_fc(hw, &set_fc_aq_fail, true); 10861 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) 10862 dev_dbg(&pf->pdev->dev, 10863 "Set fc with err %s aq_err %s on get_phy_cap\n", 10864 i40e_stat_str(hw, err), 10865 i40e_aq_str(hw, hw->aq.asq_last_status)); 10866 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) 10867 dev_dbg(&pf->pdev->dev, 10868 "Set fc with err %s aq_err %s on set_phy_config\n", 10869 i40e_stat_str(hw, err), 10870 i40e_aq_str(hw, hw->aq.asq_last_status)); 10871 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) 10872 dev_dbg(&pf->pdev->dev, 10873 "Set fc with err %s aq_err %s on get_link_info\n", 10874 i40e_stat_str(hw, err), 10875 i40e_aq_str(hw, hw->aq.asq_last_status)); 10876 10877 /* if FDIR VSI was set up, start it now */ 10878 for (i = 0; i < pf->num_alloc_vsi; i++) { 10879 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 10880 i40e_vsi_open(pf->vsi[i]); 10881 break; 10882 } 10883 } 10884 10885 /* driver is only interested in link up/down and module qualification 10886 * reports from firmware 10887 */ 10888 err = i40e_aq_set_phy_int_mask(&pf->hw, 10889 I40E_AQ_EVENT_LINK_UPDOWN | 10890 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); 10891 if (err) 10892 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 10893 i40e_stat_str(&pf->hw, err), 10894 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10895 10896 /* Reconfigure hardware for allowing smaller MSS in the case 10897 * of TSO, so that we avoid the MDD being fired and causing 10898 * a reset in the case of small MSS+TSO. 10899 */ 10900 val = rd32(hw, I40E_REG_MSS); 10901 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { 10902 val &= ~I40E_REG_MSS_MIN_MASK; 10903 val |= I40E_64BYTE_MSS; 10904 wr32(hw, I40E_REG_MSS, val); 10905 } 10906 10907 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 10908 (pf->hw.aq.fw_maj_ver < 4)) { 10909 msleep(75); 10910 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 10911 if (err) 10912 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 10913 i40e_stat_str(&pf->hw, err), 10914 i40e_aq_str(&pf->hw, 10915 pf->hw.aq.asq_last_status)); 10916 } 10917 /* The main driver is (mostly) up and happy. We need to set this state 10918 * before setting up the misc vector or we get a race and the vector 10919 * ends up disabled forever. 10920 */ 10921 clear_bit(__I40E_DOWN, &pf->state); 10922 10923 /* In case of MSIX we are going to setup the misc vector right here 10924 * to handle admin queue events etc. In case of legacy and MSI 10925 * the misc functionality and queue processing is combined in 10926 * the same vector and that gets setup at open. 10927 */ 10928 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 10929 err = i40e_setup_misc_vector(pf); 10930 if (err) { 10931 dev_info(&pdev->dev, 10932 "setup of misc vector failed: %d\n", err); 10933 goto err_vsis; 10934 } 10935 } 10936 10937 #ifdef CONFIG_PCI_IOV 10938 /* prep for VF support */ 10939 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 10940 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 10941 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 10942 u32 val; 10943 10944 /* disable link interrupts for VFs */ 10945 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 10946 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 10947 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 10948 i40e_flush(hw); 10949 10950 if (pci_num_vf(pdev)) { 10951 dev_info(&pdev->dev, 10952 "Active VFs found, allocating resources.\n"); 10953 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); 10954 if (err) 10955 dev_info(&pdev->dev, 10956 "Error %d allocating resources for existing VFs\n", 10957 err); 10958 } 10959 } 10960 #endif /* CONFIG_PCI_IOV */ 10961 10962 pfs_found++; 10963 10964 i40e_dbg_pf_init(pf); 10965 10966 /* tell the firmware that we're starting */ 10967 i40e_send_version(pf); 10968 10969 /* since everything's happy, start the service_task timer */ 10970 mod_timer(&pf->service_timer, 10971 round_jiffies(jiffies + pf->service_timer_period)); 10972 10973 #ifdef I40E_FCOE 10974 /* create FCoE interface */ 10975 i40e_fcoe_vsi_setup(pf); 10976 10977 #endif 10978 #define PCI_SPEED_SIZE 8 10979 #define PCI_WIDTH_SIZE 8 10980 /* Devices on the IOSF bus do not have this information 10981 * and will report PCI Gen 1 x 1 by default so don't bother 10982 * checking them. 10983 */ 10984 if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) { 10985 char speed[PCI_SPEED_SIZE] = "Unknown"; 10986 char width[PCI_WIDTH_SIZE] = "Unknown"; 10987 10988 /* Get the negotiated link width and speed from PCI config 10989 * space 10990 */ 10991 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, 10992 &link_status); 10993 10994 i40e_set_pci_config_data(hw, link_status); 10995 10996 switch (hw->bus.speed) { 10997 case i40e_bus_speed_8000: 10998 strncpy(speed, "8.0", PCI_SPEED_SIZE); break; 10999 case i40e_bus_speed_5000: 11000 strncpy(speed, "5.0", PCI_SPEED_SIZE); break; 11001 case i40e_bus_speed_2500: 11002 strncpy(speed, "2.5", PCI_SPEED_SIZE); break; 11003 default: 11004 break; 11005 } 11006 switch (hw->bus.width) { 11007 case i40e_bus_width_pcie_x8: 11008 strncpy(width, "8", PCI_WIDTH_SIZE); break; 11009 case i40e_bus_width_pcie_x4: 11010 strncpy(width, "4", PCI_WIDTH_SIZE); break; 11011 case i40e_bus_width_pcie_x2: 11012 strncpy(width, "2", PCI_WIDTH_SIZE); break; 11013 case i40e_bus_width_pcie_x1: 11014 strncpy(width, "1", PCI_WIDTH_SIZE); break; 11015 default: 11016 break; 11017 } 11018 11019 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", 11020 speed, width); 11021 11022 if (hw->bus.width < i40e_bus_width_pcie_x8 || 11023 hw->bus.speed < i40e_bus_speed_8000) { 11024 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 11025 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 11026 } 11027 } 11028 11029 /* get the requested speeds from the fw */ 11030 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); 11031 if (err) 11032 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", 11033 i40e_stat_str(&pf->hw, err), 11034 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 11035 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; 11036 11037 /* get the supported phy types from the fw */ 11038 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); 11039 if (err) 11040 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", 11041 i40e_stat_str(&pf->hw, err), 11042 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 11043 pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type); 11044 11045 /* Add a filter to drop all Flow control frames from any VSI from being 11046 * transmitted. By doing so we stop a malicious VF from sending out 11047 * PAUSE or PFC frames and potentially controlling traffic for other 11048 * PF/VF VSIs. 11049 * The FW can still send Flow control frames if enabled. 11050 */ 11051 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, 11052 pf->main_vsi_seid); 11053 11054 /* print a string summarizing features */ 11055 i40e_print_features(pf); 11056 11057 return 0; 11058 11059 /* Unwind what we've done if something failed in the setup */ 11060 err_vsis: 11061 set_bit(__I40E_DOWN, &pf->state); 11062 i40e_clear_interrupt_scheme(pf); 11063 kfree(pf->vsi); 11064 err_switch_setup: 11065 i40e_reset_interrupt_capability(pf); 11066 del_timer_sync(&pf->service_timer); 11067 err_mac_addr: 11068 err_configure_lan_hmc: 11069 (void)i40e_shutdown_lan_hmc(hw); 11070 err_init_lan_hmc: 11071 kfree(pf->qp_pile); 11072 err_sw_init: 11073 err_adminq_setup: 11074 (void)i40e_shutdown_adminq(hw); 11075 err_pf_reset: 11076 iounmap(hw->hw_addr); 11077 err_ioremap: 11078 kfree(pf); 11079 err_pf_alloc: 11080 pci_disable_pcie_error_reporting(pdev); 11081 pci_release_selected_regions(pdev, 11082 pci_select_bars(pdev, IORESOURCE_MEM)); 11083 err_pci_reg: 11084 err_dma: 11085 pci_disable_device(pdev); 11086 return err; 11087 } 11088 11089 /** 11090 * i40e_remove - Device removal routine 11091 * @pdev: PCI device information struct 11092 * 11093 * i40e_remove is called by the PCI subsystem to alert the driver 11094 * that is should release a PCI device. This could be caused by a 11095 * Hot-Plug event, or because the driver is going to be removed from 11096 * memory. 11097 **/ 11098 static void i40e_remove(struct pci_dev *pdev) 11099 { 11100 struct i40e_pf *pf = pci_get_drvdata(pdev); 11101 struct i40e_hw *hw = &pf->hw; 11102 i40e_status ret_code; 11103 int i; 11104 11105 i40e_dbg_pf_exit(pf); 11106 11107 i40e_ptp_stop(pf); 11108 11109 /* Disable RSS in hw */ 11110 wr32(hw, I40E_PFQF_HENA(0), 0); 11111 wr32(hw, I40E_PFQF_HENA(1), 0); 11112 11113 /* no more scheduling of any task */ 11114 set_bit(__I40E_DOWN, &pf->state); 11115 del_timer_sync(&pf->service_timer); 11116 cancel_work_sync(&pf->service_task); 11117 11118 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 11119 i40e_free_vfs(pf); 11120 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 11121 } 11122 11123 i40e_fdir_teardown(pf); 11124 11125 /* If there is a switch structure or any orphans, remove them. 11126 * This will leave only the PF's VSI remaining. 11127 */ 11128 for (i = 0; i < I40E_MAX_VEB; i++) { 11129 if (!pf->veb[i]) 11130 continue; 11131 11132 if (pf->veb[i]->uplink_seid == pf->mac_seid || 11133 pf->veb[i]->uplink_seid == 0) 11134 i40e_switch_branch_release(pf->veb[i]); 11135 } 11136 11137 /* Now we can shutdown the PF's VSI, just before we kill 11138 * adminq and hmc. 11139 */ 11140 if (pf->vsi[pf->lan_vsi]) 11141 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 11142 11143 /* shutdown and destroy the HMC */ 11144 if (pf->hw.hmc.hmc_obj) { 11145 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 11146 if (ret_code) 11147 dev_warn(&pdev->dev, 11148 "Failed to destroy the HMC resources: %d\n", 11149 ret_code); 11150 } 11151 11152 /* shutdown the adminq */ 11153 ret_code = i40e_shutdown_adminq(&pf->hw); 11154 if (ret_code) 11155 dev_warn(&pdev->dev, 11156 "Failed to destroy the Admin Queue resources: %d\n", 11157 ret_code); 11158 11159 /* destroy the locks only once, here */ 11160 mutex_destroy(&hw->aq.arq_mutex); 11161 mutex_destroy(&hw->aq.asq_mutex); 11162 11163 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 11164 i40e_clear_interrupt_scheme(pf); 11165 for (i = 0; i < pf->num_alloc_vsi; i++) { 11166 if (pf->vsi[i]) { 11167 i40e_vsi_clear_rings(pf->vsi[i]); 11168 i40e_vsi_clear(pf->vsi[i]); 11169 pf->vsi[i] = NULL; 11170 } 11171 } 11172 11173 for (i = 0; i < I40E_MAX_VEB; i++) { 11174 kfree(pf->veb[i]); 11175 pf->veb[i] = NULL; 11176 } 11177 11178 kfree(pf->qp_pile); 11179 kfree(pf->vsi); 11180 11181 iounmap(pf->hw.hw_addr); 11182 kfree(pf); 11183 pci_release_selected_regions(pdev, 11184 pci_select_bars(pdev, IORESOURCE_MEM)); 11185 11186 pci_disable_pcie_error_reporting(pdev); 11187 pci_disable_device(pdev); 11188 } 11189 11190 /** 11191 * i40e_pci_error_detected - warning that something funky happened in PCI land 11192 * @pdev: PCI device information struct 11193 * 11194 * Called to warn that something happened and the error handling steps 11195 * are in progress. Allows the driver to quiesce things, be ready for 11196 * remediation. 11197 **/ 11198 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 11199 enum pci_channel_state error) 11200 { 11201 struct i40e_pf *pf = pci_get_drvdata(pdev); 11202 11203 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 11204 11205 /* shutdown all operations */ 11206 if (!test_bit(__I40E_SUSPENDED, &pf->state)) { 11207 rtnl_lock(); 11208 i40e_prep_for_reset(pf); 11209 rtnl_unlock(); 11210 } 11211 11212 /* Request a slot reset */ 11213 return PCI_ERS_RESULT_NEED_RESET; 11214 } 11215 11216 /** 11217 * i40e_pci_error_slot_reset - a PCI slot reset just happened 11218 * @pdev: PCI device information struct 11219 * 11220 * Called to find if the driver can work with the device now that 11221 * the pci slot has been reset. If a basic connection seems good 11222 * (registers are readable and have sane content) then return a 11223 * happy little PCI_ERS_RESULT_xxx. 11224 **/ 11225 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 11226 { 11227 struct i40e_pf *pf = pci_get_drvdata(pdev); 11228 pci_ers_result_t result; 11229 int err; 11230 u32 reg; 11231 11232 dev_dbg(&pdev->dev, "%s\n", __func__); 11233 if (pci_enable_device_mem(pdev)) { 11234 dev_info(&pdev->dev, 11235 "Cannot re-enable PCI device after reset.\n"); 11236 result = PCI_ERS_RESULT_DISCONNECT; 11237 } else { 11238 pci_set_master(pdev); 11239 pci_restore_state(pdev); 11240 pci_save_state(pdev); 11241 pci_wake_from_d3(pdev, false); 11242 11243 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 11244 if (reg == 0) 11245 result = PCI_ERS_RESULT_RECOVERED; 11246 else 11247 result = PCI_ERS_RESULT_DISCONNECT; 11248 } 11249 11250 err = pci_cleanup_aer_uncorrect_error_status(pdev); 11251 if (err) { 11252 dev_info(&pdev->dev, 11253 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 11254 err); 11255 /* non-fatal, continue */ 11256 } 11257 11258 return result; 11259 } 11260 11261 /** 11262 * i40e_pci_error_resume - restart operations after PCI error recovery 11263 * @pdev: PCI device information struct 11264 * 11265 * Called to allow the driver to bring things back up after PCI error 11266 * and/or reset recovery has finished. 11267 **/ 11268 static void i40e_pci_error_resume(struct pci_dev *pdev) 11269 { 11270 struct i40e_pf *pf = pci_get_drvdata(pdev); 11271 11272 dev_dbg(&pdev->dev, "%s\n", __func__); 11273 if (test_bit(__I40E_SUSPENDED, &pf->state)) 11274 return; 11275 11276 rtnl_lock(); 11277 i40e_handle_reset_warning(pf); 11278 rtnl_unlock(); 11279 } 11280 11281 /** 11282 * i40e_shutdown - PCI callback for shutting down 11283 * @pdev: PCI device information struct 11284 **/ 11285 static void i40e_shutdown(struct pci_dev *pdev) 11286 { 11287 struct i40e_pf *pf = pci_get_drvdata(pdev); 11288 struct i40e_hw *hw = &pf->hw; 11289 11290 set_bit(__I40E_SUSPENDED, &pf->state); 11291 set_bit(__I40E_DOWN, &pf->state); 11292 rtnl_lock(); 11293 i40e_prep_for_reset(pf); 11294 rtnl_unlock(); 11295 11296 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11297 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11298 11299 del_timer_sync(&pf->service_timer); 11300 cancel_work_sync(&pf->service_task); 11301 i40e_fdir_teardown(pf); 11302 11303 rtnl_lock(); 11304 i40e_prep_for_reset(pf); 11305 rtnl_unlock(); 11306 11307 wr32(hw, I40E_PFPM_APM, 11308 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11309 wr32(hw, I40E_PFPM_WUFC, 11310 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11311 11312 i40e_clear_interrupt_scheme(pf); 11313 11314 if (system_state == SYSTEM_POWER_OFF) { 11315 pci_wake_from_d3(pdev, pf->wol_en); 11316 pci_set_power_state(pdev, PCI_D3hot); 11317 } 11318 } 11319 11320 #ifdef CONFIG_PM 11321 /** 11322 * i40e_suspend - PCI callback for moving to D3 11323 * @pdev: PCI device information struct 11324 **/ 11325 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) 11326 { 11327 struct i40e_pf *pf = pci_get_drvdata(pdev); 11328 struct i40e_hw *hw = &pf->hw; 11329 11330 set_bit(__I40E_SUSPENDED, &pf->state); 11331 set_bit(__I40E_DOWN, &pf->state); 11332 11333 rtnl_lock(); 11334 i40e_prep_for_reset(pf); 11335 rtnl_unlock(); 11336 11337 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11338 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11339 11340 pci_wake_from_d3(pdev, pf->wol_en); 11341 pci_set_power_state(pdev, PCI_D3hot); 11342 11343 return 0; 11344 } 11345 11346 /** 11347 * i40e_resume - PCI callback for waking up from D3 11348 * @pdev: PCI device information struct 11349 **/ 11350 static int i40e_resume(struct pci_dev *pdev) 11351 { 11352 struct i40e_pf *pf = pci_get_drvdata(pdev); 11353 u32 err; 11354 11355 pci_set_power_state(pdev, PCI_D0); 11356 pci_restore_state(pdev); 11357 /* pci_restore_state() clears dev->state_saves, so 11358 * call pci_save_state() again to restore it. 11359 */ 11360 pci_save_state(pdev); 11361 11362 err = pci_enable_device_mem(pdev); 11363 if (err) { 11364 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 11365 return err; 11366 } 11367 pci_set_master(pdev); 11368 11369 /* no wakeup events while running */ 11370 pci_wake_from_d3(pdev, false); 11371 11372 /* handling the reset will rebuild the device state */ 11373 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { 11374 clear_bit(__I40E_DOWN, &pf->state); 11375 rtnl_lock(); 11376 i40e_reset_and_rebuild(pf, false); 11377 rtnl_unlock(); 11378 } 11379 11380 return 0; 11381 } 11382 11383 #endif 11384 static const struct pci_error_handlers i40e_err_handler = { 11385 .error_detected = i40e_pci_error_detected, 11386 .slot_reset = i40e_pci_error_slot_reset, 11387 .resume = i40e_pci_error_resume, 11388 }; 11389 11390 static struct pci_driver i40e_driver = { 11391 .name = i40e_driver_name, 11392 .id_table = i40e_pci_tbl, 11393 .probe = i40e_probe, 11394 .remove = i40e_remove, 11395 #ifdef CONFIG_PM 11396 .suspend = i40e_suspend, 11397 .resume = i40e_resume, 11398 #endif 11399 .shutdown = i40e_shutdown, 11400 .err_handler = &i40e_err_handler, 11401 .sriov_configure = i40e_pci_sriov_configure, 11402 }; 11403 11404 /** 11405 * i40e_init_module - Driver registration routine 11406 * 11407 * i40e_init_module is the first routine called when the driver is 11408 * loaded. All it does is register with the PCI subsystem. 11409 **/ 11410 static int __init i40e_init_module(void) 11411 { 11412 pr_info("%s: %s - version %s\n", i40e_driver_name, 11413 i40e_driver_string, i40e_driver_version_str); 11414 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 11415 11416 i40e_dbg_init(); 11417 return pci_register_driver(&i40e_driver); 11418 } 11419 module_init(i40e_init_module); 11420 11421 /** 11422 * i40e_exit_module - Driver exit cleanup routine 11423 * 11424 * i40e_exit_module is called just before the driver is removed 11425 * from memory. 11426 **/ 11427 static void __exit i40e_exit_module(void) 11428 { 11429 pci_unregister_driver(&i40e_driver); 11430 i40e_dbg_exit(); 11431 } 11432 module_exit(i40e_exit_module); 11433