1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2017 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include <linux/etherdevice.h> 28 #include <linux/of_net.h> 29 #include <linux/pci.h> 30 #include <linux/bpf.h> 31 32 /* Local includes */ 33 #include "i40e.h" 34 #include "i40e_diag.h" 35 #include <net/udp_tunnel.h> 36 /* All i40e tracepoints are defined by the include below, which 37 * must be included exactly once across the whole kernel with 38 * CREATE_TRACE_POINTS defined 39 */ 40 #define CREATE_TRACE_POINTS 41 #include "i40e_trace.h" 42 43 const char i40e_driver_name[] = "i40e"; 44 static const char i40e_driver_string[] = 45 "Intel(R) Ethernet Connection XL710 Network Driver"; 46 47 #define DRV_KERN "-k" 48 49 #define DRV_VERSION_MAJOR 2 50 #define DRV_VERSION_MINOR 3 51 #define DRV_VERSION_BUILD 2 52 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 53 __stringify(DRV_VERSION_MINOR) "." \ 54 __stringify(DRV_VERSION_BUILD) DRV_KERN 55 const char i40e_driver_version_str[] = DRV_VERSION; 56 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; 57 58 /* a bit of forward declarations */ 59 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 60 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired); 61 static int i40e_add_vsi(struct i40e_vsi *vsi); 62 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 63 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 64 static int i40e_setup_misc_vector(struct i40e_pf *pf); 65 static void i40e_determine_queue_usage(struct i40e_pf *pf); 66 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 67 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired); 68 static int i40e_reset(struct i40e_pf *pf); 69 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired); 70 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 71 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 72 static int i40e_get_capabilities(struct i40e_pf *pf, 73 enum i40e_admin_queue_opc list_type); 74 75 76 /* i40e_pci_tbl - PCI Device ID Table 77 * 78 * Last entry must be all 0s 79 * 80 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 81 * Class, Class Mask, private data (not used) } 82 */ 83 static const struct pci_device_id i40e_pci_tbl[] = { 84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, 92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, 93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, 94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, 95 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, 96 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, 97 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, 98 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, 99 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 100 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, 101 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0}, 102 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0}, 103 /* required last entry */ 104 {0, } 105 }; 106 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 107 108 #define I40E_MAX_VF_COUNT 128 109 static int debug = -1; 110 module_param(debug, uint, 0); 111 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)"); 112 113 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 114 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 115 MODULE_LICENSE("GPL"); 116 MODULE_VERSION(DRV_VERSION); 117 118 static struct workqueue_struct *i40e_wq; 119 120 /** 121 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 122 * @hw: pointer to the HW structure 123 * @mem: ptr to mem struct to fill out 124 * @size: size of memory requested 125 * @alignment: what to align the allocation to 126 **/ 127 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 128 u64 size, u32 alignment) 129 { 130 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 131 132 mem->size = ALIGN(size, alignment); 133 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 134 &mem->pa, GFP_KERNEL); 135 if (!mem->va) 136 return -ENOMEM; 137 138 return 0; 139 } 140 141 /** 142 * i40e_free_dma_mem_d - OS specific memory free for shared code 143 * @hw: pointer to the HW structure 144 * @mem: ptr to mem struct to free 145 **/ 146 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 147 { 148 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 149 150 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 151 mem->va = NULL; 152 mem->pa = 0; 153 mem->size = 0; 154 155 return 0; 156 } 157 158 /** 159 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 160 * @hw: pointer to the HW structure 161 * @mem: ptr to mem struct to fill out 162 * @size: size of memory requested 163 **/ 164 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 165 u32 size) 166 { 167 mem->size = size; 168 mem->va = kzalloc(size, GFP_KERNEL); 169 170 if (!mem->va) 171 return -ENOMEM; 172 173 return 0; 174 } 175 176 /** 177 * i40e_free_virt_mem_d - OS specific memory free for shared code 178 * @hw: pointer to the HW structure 179 * @mem: ptr to mem struct to free 180 **/ 181 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 182 { 183 /* it's ok to kfree a NULL pointer */ 184 kfree(mem->va); 185 mem->va = NULL; 186 mem->size = 0; 187 188 return 0; 189 } 190 191 /** 192 * i40e_get_lump - find a lump of free generic resource 193 * @pf: board private structure 194 * @pile: the pile of resource to search 195 * @needed: the number of items needed 196 * @id: an owner id to stick on the items assigned 197 * 198 * Returns the base item index of the lump, or negative for error 199 * 200 * The search_hint trick and lack of advanced fit-finding only work 201 * because we're highly likely to have all the same size lump requests. 202 * Linear search time and any fragmentation should be minimal. 203 **/ 204 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 205 u16 needed, u16 id) 206 { 207 int ret = -ENOMEM; 208 int i, j; 209 210 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 211 dev_info(&pf->pdev->dev, 212 "param err: pile=%s needed=%d id=0x%04x\n", 213 pile ? "<valid>" : "<null>", needed, id); 214 return -EINVAL; 215 } 216 217 /* start the linear search with an imperfect hint */ 218 i = pile->search_hint; 219 while (i < pile->num_entries) { 220 /* skip already allocated entries */ 221 if (pile->list[i] & I40E_PILE_VALID_BIT) { 222 i++; 223 continue; 224 } 225 226 /* do we have enough in this lump? */ 227 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 228 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 229 break; 230 } 231 232 if (j == needed) { 233 /* there was enough, so assign it to the requestor */ 234 for (j = 0; j < needed; j++) 235 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 236 ret = i; 237 pile->search_hint = i + j; 238 break; 239 } 240 241 /* not enough, so skip over it and continue looking */ 242 i += j; 243 } 244 245 return ret; 246 } 247 248 /** 249 * i40e_put_lump - return a lump of generic resource 250 * @pile: the pile of resource to search 251 * @index: the base item index 252 * @id: the owner id of the items assigned 253 * 254 * Returns the count of items in the lump 255 **/ 256 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 257 { 258 int valid_id = (id | I40E_PILE_VALID_BIT); 259 int count = 0; 260 int i; 261 262 if (!pile || index >= pile->num_entries) 263 return -EINVAL; 264 265 for (i = index; 266 i < pile->num_entries && pile->list[i] == valid_id; 267 i++) { 268 pile->list[i] = 0; 269 count++; 270 } 271 272 if (count && index < pile->search_hint) 273 pile->search_hint = index; 274 275 return count; 276 } 277 278 /** 279 * i40e_find_vsi_from_id - searches for the vsi with the given id 280 * @pf - the pf structure to search for the vsi 281 * @id - id of the vsi it is searching for 282 **/ 283 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) 284 { 285 int i; 286 287 for (i = 0; i < pf->num_alloc_vsi; i++) 288 if (pf->vsi[i] && (pf->vsi[i]->id == id)) 289 return pf->vsi[i]; 290 291 return NULL; 292 } 293 294 /** 295 * i40e_service_event_schedule - Schedule the service task to wake up 296 * @pf: board private structure 297 * 298 * If not already scheduled, this puts the task into the work queue 299 **/ 300 void i40e_service_event_schedule(struct i40e_pf *pf) 301 { 302 if (!test_bit(__I40E_DOWN, pf->state) && 303 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) 304 queue_work(i40e_wq, &pf->service_task); 305 } 306 307 /** 308 * i40e_tx_timeout - Respond to a Tx Hang 309 * @netdev: network interface device structure 310 * 311 * If any port has noticed a Tx timeout, it is likely that the whole 312 * device is munged, not just the one netdev port, so go for the full 313 * reset. 314 **/ 315 static void i40e_tx_timeout(struct net_device *netdev) 316 { 317 struct i40e_netdev_priv *np = netdev_priv(netdev); 318 struct i40e_vsi *vsi = np->vsi; 319 struct i40e_pf *pf = vsi->back; 320 struct i40e_ring *tx_ring = NULL; 321 unsigned int i, hung_queue = 0; 322 u32 head, val; 323 324 pf->tx_timeout_count++; 325 326 /* find the stopped queue the same way the stack does */ 327 for (i = 0; i < netdev->num_tx_queues; i++) { 328 struct netdev_queue *q; 329 unsigned long trans_start; 330 331 q = netdev_get_tx_queue(netdev, i); 332 trans_start = q->trans_start; 333 if (netif_xmit_stopped(q) && 334 time_after(jiffies, 335 (trans_start + netdev->watchdog_timeo))) { 336 hung_queue = i; 337 break; 338 } 339 } 340 341 if (i == netdev->num_tx_queues) { 342 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); 343 } else { 344 /* now that we have an index, find the tx_ring struct */ 345 for (i = 0; i < vsi->num_queue_pairs; i++) { 346 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { 347 if (hung_queue == 348 vsi->tx_rings[i]->queue_index) { 349 tx_ring = vsi->tx_rings[i]; 350 break; 351 } 352 } 353 } 354 } 355 356 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 357 pf->tx_timeout_recovery_level = 1; /* reset after some time */ 358 else if (time_before(jiffies, 359 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) 360 return; /* don't do any new action before the next timeout */ 361 362 if (tx_ring) { 363 head = i40e_get_head(tx_ring); 364 /* Read interrupt register */ 365 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 366 val = rd32(&pf->hw, 367 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + 368 tx_ring->vsi->base_vector - 1)); 369 else 370 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); 371 372 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", 373 vsi->seid, hung_queue, tx_ring->next_to_clean, 374 head, tx_ring->next_to_use, 375 readl(tx_ring->tail), val); 376 } 377 378 pf->tx_timeout_last_recovery = jiffies; 379 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", 380 pf->tx_timeout_recovery_level, hung_queue); 381 382 switch (pf->tx_timeout_recovery_level) { 383 case 1: 384 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); 385 break; 386 case 2: 387 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); 388 break; 389 case 3: 390 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); 391 break; 392 default: 393 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 394 break; 395 } 396 397 i40e_service_event_schedule(pf); 398 pf->tx_timeout_recovery_level++; 399 } 400 401 /** 402 * i40e_get_vsi_stats_struct - Get System Network Statistics 403 * @vsi: the VSI we care about 404 * 405 * Returns the address of the device statistics structure. 406 * The statistics are actually updated from the service task. 407 **/ 408 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 409 { 410 return &vsi->net_stats; 411 } 412 413 /** 414 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring 415 * @ring: Tx ring to get statistics from 416 * @stats: statistics entry to be updated 417 **/ 418 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring, 419 struct rtnl_link_stats64 *stats) 420 { 421 u64 bytes, packets; 422 unsigned int start; 423 424 do { 425 start = u64_stats_fetch_begin_irq(&ring->syncp); 426 packets = ring->stats.packets; 427 bytes = ring->stats.bytes; 428 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 429 430 stats->tx_packets += packets; 431 stats->tx_bytes += bytes; 432 } 433 434 /** 435 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 436 * @netdev: network interface device structure 437 * 438 * Returns the address of the device statistics structure. 439 * The statistics are actually updated from the service task. 440 **/ 441 static void i40e_get_netdev_stats_struct(struct net_device *netdev, 442 struct rtnl_link_stats64 *stats) 443 { 444 struct i40e_netdev_priv *np = netdev_priv(netdev); 445 struct i40e_ring *tx_ring, *rx_ring; 446 struct i40e_vsi *vsi = np->vsi; 447 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 448 int i; 449 450 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 451 return; 452 453 if (!vsi->tx_rings) 454 return; 455 456 rcu_read_lock(); 457 for (i = 0; i < vsi->num_queue_pairs; i++) { 458 u64 bytes, packets; 459 unsigned int start; 460 461 tx_ring = READ_ONCE(vsi->tx_rings[i]); 462 if (!tx_ring) 463 continue; 464 i40e_get_netdev_stats_struct_tx(tx_ring, stats); 465 466 rx_ring = &tx_ring[1]; 467 468 do { 469 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 470 packets = rx_ring->stats.packets; 471 bytes = rx_ring->stats.bytes; 472 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 473 474 stats->rx_packets += packets; 475 stats->rx_bytes += bytes; 476 477 if (i40e_enabled_xdp_vsi(vsi)) 478 i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats); 479 } 480 rcu_read_unlock(); 481 482 /* following stats updated by i40e_watchdog_subtask() */ 483 stats->multicast = vsi_stats->multicast; 484 stats->tx_errors = vsi_stats->tx_errors; 485 stats->tx_dropped = vsi_stats->tx_dropped; 486 stats->rx_errors = vsi_stats->rx_errors; 487 stats->rx_dropped = vsi_stats->rx_dropped; 488 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 489 stats->rx_length_errors = vsi_stats->rx_length_errors; 490 } 491 492 /** 493 * i40e_vsi_reset_stats - Resets all stats of the given vsi 494 * @vsi: the VSI to have its stats reset 495 **/ 496 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 497 { 498 struct rtnl_link_stats64 *ns; 499 int i; 500 501 if (!vsi) 502 return; 503 504 ns = i40e_get_vsi_stats_struct(vsi); 505 memset(ns, 0, sizeof(*ns)); 506 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 507 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 508 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 509 if (vsi->rx_rings && vsi->rx_rings[0]) { 510 for (i = 0; i < vsi->num_queue_pairs; i++) { 511 memset(&vsi->rx_rings[i]->stats, 0, 512 sizeof(vsi->rx_rings[i]->stats)); 513 memset(&vsi->rx_rings[i]->rx_stats, 0, 514 sizeof(vsi->rx_rings[i]->rx_stats)); 515 memset(&vsi->tx_rings[i]->stats, 0, 516 sizeof(vsi->tx_rings[i]->stats)); 517 memset(&vsi->tx_rings[i]->tx_stats, 0, 518 sizeof(vsi->tx_rings[i]->tx_stats)); 519 } 520 } 521 vsi->stat_offsets_loaded = false; 522 } 523 524 /** 525 * i40e_pf_reset_stats - Reset all of the stats for the given PF 526 * @pf: the PF to be reset 527 **/ 528 void i40e_pf_reset_stats(struct i40e_pf *pf) 529 { 530 int i; 531 532 memset(&pf->stats, 0, sizeof(pf->stats)); 533 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 534 pf->stat_offsets_loaded = false; 535 536 for (i = 0; i < I40E_MAX_VEB; i++) { 537 if (pf->veb[i]) { 538 memset(&pf->veb[i]->stats, 0, 539 sizeof(pf->veb[i]->stats)); 540 memset(&pf->veb[i]->stats_offsets, 0, 541 sizeof(pf->veb[i]->stats_offsets)); 542 pf->veb[i]->stat_offsets_loaded = false; 543 } 544 } 545 pf->hw_csum_rx_error = 0; 546 } 547 548 /** 549 * i40e_stat_update48 - read and update a 48 bit stat from the chip 550 * @hw: ptr to the hardware info 551 * @hireg: the high 32 bit reg to read 552 * @loreg: the low 32 bit reg to read 553 * @offset_loaded: has the initial offset been loaded yet 554 * @offset: ptr to current offset value 555 * @stat: ptr to the stat 556 * 557 * Since the device stats are not reset at PFReset, they likely will not 558 * be zeroed when the driver starts. We'll save the first values read 559 * and use them as offsets to be subtracted from the raw values in order 560 * to report stats that count from zero. In the process, we also manage 561 * the potential roll-over. 562 **/ 563 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 564 bool offset_loaded, u64 *offset, u64 *stat) 565 { 566 u64 new_data; 567 568 if (hw->device_id == I40E_DEV_ID_QEMU) { 569 new_data = rd32(hw, loreg); 570 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 571 } else { 572 new_data = rd64(hw, loreg); 573 } 574 if (!offset_loaded) 575 *offset = new_data; 576 if (likely(new_data >= *offset)) 577 *stat = new_data - *offset; 578 else 579 *stat = (new_data + BIT_ULL(48)) - *offset; 580 *stat &= 0xFFFFFFFFFFFFULL; 581 } 582 583 /** 584 * i40e_stat_update32 - read and update a 32 bit stat from the chip 585 * @hw: ptr to the hardware info 586 * @reg: the hw reg to read 587 * @offset_loaded: has the initial offset been loaded yet 588 * @offset: ptr to current offset value 589 * @stat: ptr to the stat 590 **/ 591 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 592 bool offset_loaded, u64 *offset, u64 *stat) 593 { 594 u32 new_data; 595 596 new_data = rd32(hw, reg); 597 if (!offset_loaded) 598 *offset = new_data; 599 if (likely(new_data >= *offset)) 600 *stat = (u32)(new_data - *offset); 601 else 602 *stat = (u32)((new_data + BIT_ULL(32)) - *offset); 603 } 604 605 /** 606 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat 607 * @hw: ptr to the hardware info 608 * @reg: the hw reg to read and clear 609 * @stat: ptr to the stat 610 **/ 611 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat) 612 { 613 u32 new_data = rd32(hw, reg); 614 615 wr32(hw, reg, 1); /* must write a nonzero value to clear register */ 616 *stat += new_data; 617 } 618 619 /** 620 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 621 * @vsi: the VSI to be updated 622 **/ 623 void i40e_update_eth_stats(struct i40e_vsi *vsi) 624 { 625 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 626 struct i40e_pf *pf = vsi->back; 627 struct i40e_hw *hw = &pf->hw; 628 struct i40e_eth_stats *oes; 629 struct i40e_eth_stats *es; /* device's eth stats */ 630 631 es = &vsi->eth_stats; 632 oes = &vsi->eth_stats_offsets; 633 634 /* Gather up the stats that the hw collects */ 635 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 636 vsi->stat_offsets_loaded, 637 &oes->tx_errors, &es->tx_errors); 638 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 639 vsi->stat_offsets_loaded, 640 &oes->rx_discards, &es->rx_discards); 641 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), 642 vsi->stat_offsets_loaded, 643 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); 644 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 645 vsi->stat_offsets_loaded, 646 &oes->tx_errors, &es->tx_errors); 647 648 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 649 I40E_GLV_GORCL(stat_idx), 650 vsi->stat_offsets_loaded, 651 &oes->rx_bytes, &es->rx_bytes); 652 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 653 I40E_GLV_UPRCL(stat_idx), 654 vsi->stat_offsets_loaded, 655 &oes->rx_unicast, &es->rx_unicast); 656 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 657 I40E_GLV_MPRCL(stat_idx), 658 vsi->stat_offsets_loaded, 659 &oes->rx_multicast, &es->rx_multicast); 660 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 661 I40E_GLV_BPRCL(stat_idx), 662 vsi->stat_offsets_loaded, 663 &oes->rx_broadcast, &es->rx_broadcast); 664 665 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 666 I40E_GLV_GOTCL(stat_idx), 667 vsi->stat_offsets_loaded, 668 &oes->tx_bytes, &es->tx_bytes); 669 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 670 I40E_GLV_UPTCL(stat_idx), 671 vsi->stat_offsets_loaded, 672 &oes->tx_unicast, &es->tx_unicast); 673 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 674 I40E_GLV_MPTCL(stat_idx), 675 vsi->stat_offsets_loaded, 676 &oes->tx_multicast, &es->tx_multicast); 677 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 678 I40E_GLV_BPTCL(stat_idx), 679 vsi->stat_offsets_loaded, 680 &oes->tx_broadcast, &es->tx_broadcast); 681 vsi->stat_offsets_loaded = true; 682 } 683 684 /** 685 * i40e_update_veb_stats - Update Switch component statistics 686 * @veb: the VEB being updated 687 **/ 688 static void i40e_update_veb_stats(struct i40e_veb *veb) 689 { 690 struct i40e_pf *pf = veb->pf; 691 struct i40e_hw *hw = &pf->hw; 692 struct i40e_eth_stats *oes; 693 struct i40e_eth_stats *es; /* device's eth stats */ 694 struct i40e_veb_tc_stats *veb_oes; 695 struct i40e_veb_tc_stats *veb_es; 696 int i, idx = 0; 697 698 idx = veb->stats_idx; 699 es = &veb->stats; 700 oes = &veb->stats_offsets; 701 veb_es = &veb->tc_stats; 702 veb_oes = &veb->tc_stats_offsets; 703 704 /* Gather up the stats that the hw collects */ 705 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 706 veb->stat_offsets_loaded, 707 &oes->tx_discards, &es->tx_discards); 708 if (hw->revision_id > 0) 709 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 710 veb->stat_offsets_loaded, 711 &oes->rx_unknown_protocol, 712 &es->rx_unknown_protocol); 713 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 714 veb->stat_offsets_loaded, 715 &oes->rx_bytes, &es->rx_bytes); 716 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 717 veb->stat_offsets_loaded, 718 &oes->rx_unicast, &es->rx_unicast); 719 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 720 veb->stat_offsets_loaded, 721 &oes->rx_multicast, &es->rx_multicast); 722 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 723 veb->stat_offsets_loaded, 724 &oes->rx_broadcast, &es->rx_broadcast); 725 726 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 727 veb->stat_offsets_loaded, 728 &oes->tx_bytes, &es->tx_bytes); 729 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 730 veb->stat_offsets_loaded, 731 &oes->tx_unicast, &es->tx_unicast); 732 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 733 veb->stat_offsets_loaded, 734 &oes->tx_multicast, &es->tx_multicast); 735 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 736 veb->stat_offsets_loaded, 737 &oes->tx_broadcast, &es->tx_broadcast); 738 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 739 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx), 740 I40E_GLVEBTC_RPCL(i, idx), 741 veb->stat_offsets_loaded, 742 &veb_oes->tc_rx_packets[i], 743 &veb_es->tc_rx_packets[i]); 744 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx), 745 I40E_GLVEBTC_RBCL(i, idx), 746 veb->stat_offsets_loaded, 747 &veb_oes->tc_rx_bytes[i], 748 &veb_es->tc_rx_bytes[i]); 749 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx), 750 I40E_GLVEBTC_TPCL(i, idx), 751 veb->stat_offsets_loaded, 752 &veb_oes->tc_tx_packets[i], 753 &veb_es->tc_tx_packets[i]); 754 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx), 755 I40E_GLVEBTC_TBCL(i, idx), 756 veb->stat_offsets_loaded, 757 &veb_oes->tc_tx_bytes[i], 758 &veb_es->tc_tx_bytes[i]); 759 } 760 veb->stat_offsets_loaded = true; 761 } 762 763 /** 764 * i40e_update_vsi_stats - Update the vsi statistics counters. 765 * @vsi: the VSI to be updated 766 * 767 * There are a few instances where we store the same stat in a 768 * couple of different structs. This is partly because we have 769 * the netdev stats that need to be filled out, which is slightly 770 * different from the "eth_stats" defined by the chip and used in 771 * VF communications. We sort it out here. 772 **/ 773 static void i40e_update_vsi_stats(struct i40e_vsi *vsi) 774 { 775 struct i40e_pf *pf = vsi->back; 776 struct rtnl_link_stats64 *ons; 777 struct rtnl_link_stats64 *ns; /* netdev stats */ 778 struct i40e_eth_stats *oes; 779 struct i40e_eth_stats *es; /* device's eth stats */ 780 u32 tx_restart, tx_busy; 781 struct i40e_ring *p; 782 u32 rx_page, rx_buf; 783 u64 bytes, packets; 784 unsigned int start; 785 u64 tx_linearize; 786 u64 tx_force_wb; 787 u64 rx_p, rx_b; 788 u64 tx_p, tx_b; 789 u16 q; 790 791 if (test_bit(__I40E_VSI_DOWN, vsi->state) || 792 test_bit(__I40E_CONFIG_BUSY, pf->state)) 793 return; 794 795 ns = i40e_get_vsi_stats_struct(vsi); 796 ons = &vsi->net_stats_offsets; 797 es = &vsi->eth_stats; 798 oes = &vsi->eth_stats_offsets; 799 800 /* Gather up the netdev and vsi stats that the driver collects 801 * on the fly during packet processing 802 */ 803 rx_b = rx_p = 0; 804 tx_b = tx_p = 0; 805 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; 806 rx_page = 0; 807 rx_buf = 0; 808 rcu_read_lock(); 809 for (q = 0; q < vsi->num_queue_pairs; q++) { 810 /* locate Tx ring */ 811 p = READ_ONCE(vsi->tx_rings[q]); 812 813 do { 814 start = u64_stats_fetch_begin_irq(&p->syncp); 815 packets = p->stats.packets; 816 bytes = p->stats.bytes; 817 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 818 tx_b += bytes; 819 tx_p += packets; 820 tx_restart += p->tx_stats.restart_queue; 821 tx_busy += p->tx_stats.tx_busy; 822 tx_linearize += p->tx_stats.tx_linearize; 823 tx_force_wb += p->tx_stats.tx_force_wb; 824 825 /* Rx queue is part of the same block as Tx queue */ 826 p = &p[1]; 827 do { 828 start = u64_stats_fetch_begin_irq(&p->syncp); 829 packets = p->stats.packets; 830 bytes = p->stats.bytes; 831 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 832 rx_b += bytes; 833 rx_p += packets; 834 rx_buf += p->rx_stats.alloc_buff_failed; 835 rx_page += p->rx_stats.alloc_page_failed; 836 } 837 rcu_read_unlock(); 838 vsi->tx_restart = tx_restart; 839 vsi->tx_busy = tx_busy; 840 vsi->tx_linearize = tx_linearize; 841 vsi->tx_force_wb = tx_force_wb; 842 vsi->rx_page_failed = rx_page; 843 vsi->rx_buf_failed = rx_buf; 844 845 ns->rx_packets = rx_p; 846 ns->rx_bytes = rx_b; 847 ns->tx_packets = tx_p; 848 ns->tx_bytes = tx_b; 849 850 /* update netdev stats from eth stats */ 851 i40e_update_eth_stats(vsi); 852 ons->tx_errors = oes->tx_errors; 853 ns->tx_errors = es->tx_errors; 854 ons->multicast = oes->rx_multicast; 855 ns->multicast = es->rx_multicast; 856 ons->rx_dropped = oes->rx_discards; 857 ns->rx_dropped = es->rx_discards; 858 ons->tx_dropped = oes->tx_discards; 859 ns->tx_dropped = es->tx_discards; 860 861 /* pull in a couple PF stats if this is the main vsi */ 862 if (vsi == pf->vsi[pf->lan_vsi]) { 863 ns->rx_crc_errors = pf->stats.crc_errors; 864 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; 865 ns->rx_length_errors = pf->stats.rx_length_errors; 866 } 867 } 868 869 /** 870 * i40e_update_pf_stats - Update the PF statistics counters. 871 * @pf: the PF to be updated 872 **/ 873 static void i40e_update_pf_stats(struct i40e_pf *pf) 874 { 875 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 876 struct i40e_hw_port_stats *nsd = &pf->stats; 877 struct i40e_hw *hw = &pf->hw; 878 u32 val; 879 int i; 880 881 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 882 I40E_GLPRT_GORCL(hw->port), 883 pf->stat_offsets_loaded, 884 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 885 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 886 I40E_GLPRT_GOTCL(hw->port), 887 pf->stat_offsets_loaded, 888 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 889 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 890 pf->stat_offsets_loaded, 891 &osd->eth.rx_discards, 892 &nsd->eth.rx_discards); 893 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 894 I40E_GLPRT_UPRCL(hw->port), 895 pf->stat_offsets_loaded, 896 &osd->eth.rx_unicast, 897 &nsd->eth.rx_unicast); 898 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 899 I40E_GLPRT_MPRCL(hw->port), 900 pf->stat_offsets_loaded, 901 &osd->eth.rx_multicast, 902 &nsd->eth.rx_multicast); 903 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 904 I40E_GLPRT_BPRCL(hw->port), 905 pf->stat_offsets_loaded, 906 &osd->eth.rx_broadcast, 907 &nsd->eth.rx_broadcast); 908 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 909 I40E_GLPRT_UPTCL(hw->port), 910 pf->stat_offsets_loaded, 911 &osd->eth.tx_unicast, 912 &nsd->eth.tx_unicast); 913 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 914 I40E_GLPRT_MPTCL(hw->port), 915 pf->stat_offsets_loaded, 916 &osd->eth.tx_multicast, 917 &nsd->eth.tx_multicast); 918 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 919 I40E_GLPRT_BPTCL(hw->port), 920 pf->stat_offsets_loaded, 921 &osd->eth.tx_broadcast, 922 &nsd->eth.tx_broadcast); 923 924 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 925 pf->stat_offsets_loaded, 926 &osd->tx_dropped_link_down, 927 &nsd->tx_dropped_link_down); 928 929 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 930 pf->stat_offsets_loaded, 931 &osd->crc_errors, &nsd->crc_errors); 932 933 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 934 pf->stat_offsets_loaded, 935 &osd->illegal_bytes, &nsd->illegal_bytes); 936 937 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 938 pf->stat_offsets_loaded, 939 &osd->mac_local_faults, 940 &nsd->mac_local_faults); 941 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 942 pf->stat_offsets_loaded, 943 &osd->mac_remote_faults, 944 &nsd->mac_remote_faults); 945 946 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 947 pf->stat_offsets_loaded, 948 &osd->rx_length_errors, 949 &nsd->rx_length_errors); 950 951 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 952 pf->stat_offsets_loaded, 953 &osd->link_xon_rx, &nsd->link_xon_rx); 954 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 955 pf->stat_offsets_loaded, 956 &osd->link_xon_tx, &nsd->link_xon_tx); 957 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 958 pf->stat_offsets_loaded, 959 &osd->link_xoff_rx, &nsd->link_xoff_rx); 960 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 961 pf->stat_offsets_loaded, 962 &osd->link_xoff_tx, &nsd->link_xoff_tx); 963 964 for (i = 0; i < 8; i++) { 965 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 966 pf->stat_offsets_loaded, 967 &osd->priority_xoff_rx[i], 968 &nsd->priority_xoff_rx[i]); 969 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 970 pf->stat_offsets_loaded, 971 &osd->priority_xon_rx[i], 972 &nsd->priority_xon_rx[i]); 973 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 974 pf->stat_offsets_loaded, 975 &osd->priority_xon_tx[i], 976 &nsd->priority_xon_tx[i]); 977 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 978 pf->stat_offsets_loaded, 979 &osd->priority_xoff_tx[i], 980 &nsd->priority_xoff_tx[i]); 981 i40e_stat_update32(hw, 982 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 983 pf->stat_offsets_loaded, 984 &osd->priority_xon_2_xoff[i], 985 &nsd->priority_xon_2_xoff[i]); 986 } 987 988 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 989 I40E_GLPRT_PRC64L(hw->port), 990 pf->stat_offsets_loaded, 991 &osd->rx_size_64, &nsd->rx_size_64); 992 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 993 I40E_GLPRT_PRC127L(hw->port), 994 pf->stat_offsets_loaded, 995 &osd->rx_size_127, &nsd->rx_size_127); 996 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 997 I40E_GLPRT_PRC255L(hw->port), 998 pf->stat_offsets_loaded, 999 &osd->rx_size_255, &nsd->rx_size_255); 1000 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 1001 I40E_GLPRT_PRC511L(hw->port), 1002 pf->stat_offsets_loaded, 1003 &osd->rx_size_511, &nsd->rx_size_511); 1004 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 1005 I40E_GLPRT_PRC1023L(hw->port), 1006 pf->stat_offsets_loaded, 1007 &osd->rx_size_1023, &nsd->rx_size_1023); 1008 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 1009 I40E_GLPRT_PRC1522L(hw->port), 1010 pf->stat_offsets_loaded, 1011 &osd->rx_size_1522, &nsd->rx_size_1522); 1012 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 1013 I40E_GLPRT_PRC9522L(hw->port), 1014 pf->stat_offsets_loaded, 1015 &osd->rx_size_big, &nsd->rx_size_big); 1016 1017 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 1018 I40E_GLPRT_PTC64L(hw->port), 1019 pf->stat_offsets_loaded, 1020 &osd->tx_size_64, &nsd->tx_size_64); 1021 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 1022 I40E_GLPRT_PTC127L(hw->port), 1023 pf->stat_offsets_loaded, 1024 &osd->tx_size_127, &nsd->tx_size_127); 1025 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 1026 I40E_GLPRT_PTC255L(hw->port), 1027 pf->stat_offsets_loaded, 1028 &osd->tx_size_255, &nsd->tx_size_255); 1029 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 1030 I40E_GLPRT_PTC511L(hw->port), 1031 pf->stat_offsets_loaded, 1032 &osd->tx_size_511, &nsd->tx_size_511); 1033 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 1034 I40E_GLPRT_PTC1023L(hw->port), 1035 pf->stat_offsets_loaded, 1036 &osd->tx_size_1023, &nsd->tx_size_1023); 1037 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 1038 I40E_GLPRT_PTC1522L(hw->port), 1039 pf->stat_offsets_loaded, 1040 &osd->tx_size_1522, &nsd->tx_size_1522); 1041 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 1042 I40E_GLPRT_PTC9522L(hw->port), 1043 pf->stat_offsets_loaded, 1044 &osd->tx_size_big, &nsd->tx_size_big); 1045 1046 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 1047 pf->stat_offsets_loaded, 1048 &osd->rx_undersize, &nsd->rx_undersize); 1049 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 1050 pf->stat_offsets_loaded, 1051 &osd->rx_fragments, &nsd->rx_fragments); 1052 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 1053 pf->stat_offsets_loaded, 1054 &osd->rx_oversize, &nsd->rx_oversize); 1055 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 1056 pf->stat_offsets_loaded, 1057 &osd->rx_jabber, &nsd->rx_jabber); 1058 1059 /* FDIR stats */ 1060 i40e_stat_update_and_clear32(hw, 1061 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)), 1062 &nsd->fd_atr_match); 1063 i40e_stat_update_and_clear32(hw, 1064 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)), 1065 &nsd->fd_sb_match); 1066 i40e_stat_update_and_clear32(hw, 1067 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)), 1068 &nsd->fd_atr_tunnel_match); 1069 1070 val = rd32(hw, I40E_PRTPM_EEE_STAT); 1071 nsd->tx_lpi_status = 1072 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> 1073 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; 1074 nsd->rx_lpi_status = 1075 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> 1076 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; 1077 i40e_stat_update32(hw, I40E_PRTPM_TLPIC, 1078 pf->stat_offsets_loaded, 1079 &osd->tx_lpi_count, &nsd->tx_lpi_count); 1080 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 1081 pf->stat_offsets_loaded, 1082 &osd->rx_lpi_count, &nsd->rx_lpi_count); 1083 1084 if (pf->flags & I40E_FLAG_FD_SB_ENABLED && 1085 !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) 1086 nsd->fd_sb_status = true; 1087 else 1088 nsd->fd_sb_status = false; 1089 1090 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && 1091 !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) 1092 nsd->fd_atr_status = true; 1093 else 1094 nsd->fd_atr_status = false; 1095 1096 pf->stat_offsets_loaded = true; 1097 } 1098 1099 /** 1100 * i40e_update_stats - Update the various statistics counters. 1101 * @vsi: the VSI to be updated 1102 * 1103 * Update the various stats for this VSI and its related entities. 1104 **/ 1105 void i40e_update_stats(struct i40e_vsi *vsi) 1106 { 1107 struct i40e_pf *pf = vsi->back; 1108 1109 if (vsi == pf->vsi[pf->lan_vsi]) 1110 i40e_update_pf_stats(pf); 1111 1112 i40e_update_vsi_stats(vsi); 1113 } 1114 1115 /** 1116 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1117 * @vsi: the VSI to be searched 1118 * @macaddr: the MAC address 1119 * @vlan: the vlan 1120 * 1121 * Returns ptr to the filter object or NULL 1122 **/ 1123 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 1124 const u8 *macaddr, s16 vlan) 1125 { 1126 struct i40e_mac_filter *f; 1127 u64 key; 1128 1129 if (!vsi || !macaddr) 1130 return NULL; 1131 1132 key = i40e_addr_to_hkey(macaddr); 1133 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { 1134 if ((ether_addr_equal(macaddr, f->macaddr)) && 1135 (vlan == f->vlan)) 1136 return f; 1137 } 1138 return NULL; 1139 } 1140 1141 /** 1142 * i40e_find_mac - Find a mac addr in the macvlan filters list 1143 * @vsi: the VSI to be searched 1144 * @macaddr: the MAC address we are searching for 1145 * 1146 * Returns the first filter with the provided MAC address or NULL if 1147 * MAC address was not found 1148 **/ 1149 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr) 1150 { 1151 struct i40e_mac_filter *f; 1152 u64 key; 1153 1154 if (!vsi || !macaddr) 1155 return NULL; 1156 1157 key = i40e_addr_to_hkey(macaddr); 1158 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { 1159 if ((ether_addr_equal(macaddr, f->macaddr))) 1160 return f; 1161 } 1162 return NULL; 1163 } 1164 1165 /** 1166 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1167 * @vsi: the VSI to be searched 1168 * 1169 * Returns true if VSI is in vlan mode or false otherwise 1170 **/ 1171 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1172 { 1173 /* If we have a PVID, always operate in VLAN mode */ 1174 if (vsi->info.pvid) 1175 return true; 1176 1177 /* We need to operate in VLAN mode whenever we have any filters with 1178 * a VLAN other than I40E_VLAN_ALL. We could check the table each 1179 * time, incurring search cost repeatedly. However, we can notice two 1180 * things: 1181 * 1182 * 1) the only place where we can gain a VLAN filter is in 1183 * i40e_add_filter. 1184 * 1185 * 2) the only place where filters are actually removed is in 1186 * i40e_sync_filters_subtask. 1187 * 1188 * Thus, we can simply use a boolean value, has_vlan_filters which we 1189 * will set to true when we add a VLAN filter in i40e_add_filter. Then 1190 * we have to perform the full search after deleting filters in 1191 * i40e_sync_filters_subtask, but we already have to search 1192 * filters here and can perform the check at the same time. This 1193 * results in avoiding embedding a loop for VLAN mode inside another 1194 * loop over all the filters, and should maintain correctness as noted 1195 * above. 1196 */ 1197 return vsi->has_vlan_filter; 1198 } 1199 1200 /** 1201 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary 1202 * @vsi: the VSI to configure 1203 * @tmp_add_list: list of filters ready to be added 1204 * @tmp_del_list: list of filters ready to be deleted 1205 * @vlan_filters: the number of active VLAN filters 1206 * 1207 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they 1208 * behave as expected. If we have any active VLAN filters remaining or about 1209 * to be added then we need to update non-VLAN filters to be marked as VLAN=0 1210 * so that they only match against untagged traffic. If we no longer have any 1211 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1 1212 * so that they match against both tagged and untagged traffic. In this way, 1213 * we ensure that we correctly receive the desired traffic. This ensures that 1214 * when we have an active VLAN we will receive only untagged traffic and 1215 * traffic matching active VLANs. If we have no active VLANs then we will 1216 * operate in non-VLAN mode and receive all traffic, tagged or untagged. 1217 * 1218 * Finally, in a similar fashion, this function also corrects filters when 1219 * there is an active PVID assigned to this VSI. 1220 * 1221 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0. 1222 * 1223 * This function is only expected to be called from within 1224 * i40e_sync_vsi_filters. 1225 * 1226 * NOTE: This function expects to be called while under the 1227 * mac_filter_hash_lock 1228 */ 1229 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, 1230 struct hlist_head *tmp_add_list, 1231 struct hlist_head *tmp_del_list, 1232 int vlan_filters) 1233 { 1234 s16 pvid = le16_to_cpu(vsi->info.pvid); 1235 struct i40e_mac_filter *f, *add_head; 1236 struct i40e_new_mac_filter *new; 1237 struct hlist_node *h; 1238 int bkt, new_vlan; 1239 1240 /* To determine if a particular filter needs to be replaced we 1241 * have the three following conditions: 1242 * 1243 * a) if we have a PVID assigned, then all filters which are 1244 * not marked as VLAN=PVID must be replaced with filters that 1245 * are. 1246 * b) otherwise, if we have any active VLANS, all filters 1247 * which are marked as VLAN=-1 must be replaced with 1248 * filters marked as VLAN=0 1249 * c) finally, if we do not have any active VLANS, all filters 1250 * which are marked as VLAN=0 must be replaced with filters 1251 * marked as VLAN=-1 1252 */ 1253 1254 /* Update the filters about to be added in place */ 1255 hlist_for_each_entry(new, tmp_add_list, hlist) { 1256 if (pvid && new->f->vlan != pvid) 1257 new->f->vlan = pvid; 1258 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY) 1259 new->f->vlan = 0; 1260 else if (!vlan_filters && new->f->vlan == 0) 1261 new->f->vlan = I40E_VLAN_ANY; 1262 } 1263 1264 /* Update the remaining active filters */ 1265 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 1266 /* Combine the checks for whether a filter needs to be changed 1267 * and then determine the new VLAN inside the if block, in 1268 * order to avoid duplicating code for adding the new filter 1269 * then deleting the old filter. 1270 */ 1271 if ((pvid && f->vlan != pvid) || 1272 (vlan_filters && f->vlan == I40E_VLAN_ANY) || 1273 (!vlan_filters && f->vlan == 0)) { 1274 /* Determine the new vlan we will be adding */ 1275 if (pvid) 1276 new_vlan = pvid; 1277 else if (vlan_filters) 1278 new_vlan = 0; 1279 else 1280 new_vlan = I40E_VLAN_ANY; 1281 1282 /* Create the new filter */ 1283 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); 1284 if (!add_head) 1285 return -ENOMEM; 1286 1287 /* Create a temporary i40e_new_mac_filter */ 1288 new = kzalloc(sizeof(*new), GFP_ATOMIC); 1289 if (!new) 1290 return -ENOMEM; 1291 1292 new->f = add_head; 1293 new->state = add_head->state; 1294 1295 /* Add the new filter to the tmp list */ 1296 hlist_add_head(&new->hlist, tmp_add_list); 1297 1298 /* Put the original filter into the delete list */ 1299 f->state = I40E_FILTER_REMOVE; 1300 hash_del(&f->hlist); 1301 hlist_add_head(&f->hlist, tmp_del_list); 1302 } 1303 } 1304 1305 vsi->has_vlan_filter = !!vlan_filters; 1306 1307 return 0; 1308 } 1309 1310 /** 1311 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM 1312 * @vsi: the PF Main VSI - inappropriate for any other VSI 1313 * @macaddr: the MAC address 1314 * 1315 * Remove whatever filter the firmware set up so the driver can manage 1316 * its own filtering intelligently. 1317 **/ 1318 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) 1319 { 1320 struct i40e_aqc_remove_macvlan_element_data element; 1321 struct i40e_pf *pf = vsi->back; 1322 1323 /* Only appropriate for the PF main VSI */ 1324 if (vsi->type != I40E_VSI_MAIN) 1325 return; 1326 1327 memset(&element, 0, sizeof(element)); 1328 ether_addr_copy(element.mac_addr, macaddr); 1329 element.vlan_tag = 0; 1330 /* Ignore error returns, some firmware does it this way... */ 1331 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1332 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1333 1334 memset(&element, 0, sizeof(element)); 1335 ether_addr_copy(element.mac_addr, macaddr); 1336 element.vlan_tag = 0; 1337 /* ...and some firmware does it this way. */ 1338 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1339 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1340 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1341 } 1342 1343 /** 1344 * i40e_add_filter - Add a mac/vlan filter to the VSI 1345 * @vsi: the VSI to be searched 1346 * @macaddr: the MAC address 1347 * @vlan: the vlan 1348 * 1349 * Returns ptr to the filter object or NULL when no memory available. 1350 * 1351 * NOTE: This function is expected to be called with mac_filter_hash_lock 1352 * being held. 1353 **/ 1354 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1355 const u8 *macaddr, s16 vlan) 1356 { 1357 struct i40e_mac_filter *f; 1358 u64 key; 1359 1360 if (!vsi || !macaddr) 1361 return NULL; 1362 1363 f = i40e_find_filter(vsi, macaddr, vlan); 1364 if (!f) { 1365 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1366 if (!f) 1367 return NULL; 1368 1369 /* Update the boolean indicating if we need to function in 1370 * VLAN mode. 1371 */ 1372 if (vlan >= 0) 1373 vsi->has_vlan_filter = true; 1374 1375 ether_addr_copy(f->macaddr, macaddr); 1376 f->vlan = vlan; 1377 f->state = I40E_FILTER_NEW; 1378 INIT_HLIST_NODE(&f->hlist); 1379 1380 key = i40e_addr_to_hkey(macaddr); 1381 hash_add(vsi->mac_filter_hash, &f->hlist, key); 1382 1383 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1384 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1385 } 1386 1387 /* If we're asked to add a filter that has been marked for removal, it 1388 * is safe to simply restore it to active state. __i40e_del_filter 1389 * will have simply deleted any filters which were previously marked 1390 * NEW or FAILED, so if it is currently marked REMOVE it must have 1391 * previously been ACTIVE. Since we haven't yet run the sync filters 1392 * task, just restore this filter to the ACTIVE state so that the 1393 * sync task leaves it in place 1394 */ 1395 if (f->state == I40E_FILTER_REMOVE) 1396 f->state = I40E_FILTER_ACTIVE; 1397 1398 return f; 1399 } 1400 1401 /** 1402 * __i40e_del_filter - Remove a specific filter from the VSI 1403 * @vsi: VSI to remove from 1404 * @f: the filter to remove from the list 1405 * 1406 * This function should be called instead of i40e_del_filter only if you know 1407 * the exact filter you will remove already, such as via i40e_find_filter or 1408 * i40e_find_mac. 1409 * 1410 * NOTE: This function is expected to be called with mac_filter_hash_lock 1411 * being held. 1412 * ANOTHER NOTE: This function MUST be called from within the context of 1413 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() 1414 * instead of list_for_each_entry(). 1415 **/ 1416 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) 1417 { 1418 if (!f) 1419 return; 1420 1421 /* If the filter was never added to firmware then we can just delete it 1422 * directly and we don't want to set the status to remove or else an 1423 * admin queue command will unnecessarily fire. 1424 */ 1425 if ((f->state == I40E_FILTER_FAILED) || 1426 (f->state == I40E_FILTER_NEW)) { 1427 hash_del(&f->hlist); 1428 kfree(f); 1429 } else { 1430 f->state = I40E_FILTER_REMOVE; 1431 } 1432 1433 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1434 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1435 } 1436 1437 /** 1438 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI 1439 * @vsi: the VSI to be searched 1440 * @macaddr: the MAC address 1441 * @vlan: the VLAN 1442 * 1443 * NOTE: This function is expected to be called with mac_filter_hash_lock 1444 * being held. 1445 * ANOTHER NOTE: This function MUST be called from within the context of 1446 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() 1447 * instead of list_for_each_entry(). 1448 **/ 1449 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) 1450 { 1451 struct i40e_mac_filter *f; 1452 1453 if (!vsi || !macaddr) 1454 return; 1455 1456 f = i40e_find_filter(vsi, macaddr, vlan); 1457 __i40e_del_filter(vsi, f); 1458 } 1459 1460 /** 1461 * i40e_add_mac_filter - Add a MAC filter for all active VLANs 1462 * @vsi: the VSI to be searched 1463 * @macaddr: the mac address to be filtered 1464 * 1465 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise, 1466 * go through all the macvlan filters and add a macvlan filter for each 1467 * unique vlan that already exists. If a PVID has been assigned, instead only 1468 * add the macaddr to that VLAN. 1469 * 1470 * Returns last filter added on success, else NULL 1471 **/ 1472 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, 1473 const u8 *macaddr) 1474 { 1475 struct i40e_mac_filter *f, *add = NULL; 1476 struct hlist_node *h; 1477 int bkt; 1478 1479 if (vsi->info.pvid) 1480 return i40e_add_filter(vsi, macaddr, 1481 le16_to_cpu(vsi->info.pvid)); 1482 1483 if (!i40e_is_vsi_in_vlan(vsi)) 1484 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY); 1485 1486 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 1487 if (f->state == I40E_FILTER_REMOVE) 1488 continue; 1489 add = i40e_add_filter(vsi, macaddr, f->vlan); 1490 if (!add) 1491 return NULL; 1492 } 1493 1494 return add; 1495 } 1496 1497 /** 1498 * i40e_del_mac_filter - Remove a MAC filter from all VLANs 1499 * @vsi: the VSI to be searched 1500 * @macaddr: the mac address to be removed 1501 * 1502 * Removes a given MAC address from a VSI regardless of what VLAN it has been 1503 * associated with. 1504 * 1505 * Returns 0 for success, or error 1506 **/ 1507 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) 1508 { 1509 struct i40e_mac_filter *f; 1510 struct hlist_node *h; 1511 bool found = false; 1512 int bkt; 1513 1514 WARN(!spin_is_locked(&vsi->mac_filter_hash_lock), 1515 "Missing mac_filter_hash_lock\n"); 1516 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 1517 if (ether_addr_equal(macaddr, f->macaddr)) { 1518 __i40e_del_filter(vsi, f); 1519 found = true; 1520 } 1521 } 1522 1523 if (found) 1524 return 0; 1525 else 1526 return -ENOENT; 1527 } 1528 1529 /** 1530 * i40e_set_mac - NDO callback to set mac address 1531 * @netdev: network interface device structure 1532 * @p: pointer to an address structure 1533 * 1534 * Returns 0 on success, negative on failure 1535 **/ 1536 static int i40e_set_mac(struct net_device *netdev, void *p) 1537 { 1538 struct i40e_netdev_priv *np = netdev_priv(netdev); 1539 struct i40e_vsi *vsi = np->vsi; 1540 struct i40e_pf *pf = vsi->back; 1541 struct i40e_hw *hw = &pf->hw; 1542 struct sockaddr *addr = p; 1543 1544 if (!is_valid_ether_addr(addr->sa_data)) 1545 return -EADDRNOTAVAIL; 1546 1547 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { 1548 netdev_info(netdev, "already using mac address %pM\n", 1549 addr->sa_data); 1550 return 0; 1551 } 1552 1553 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || 1554 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) 1555 return -EADDRNOTAVAIL; 1556 1557 if (ether_addr_equal(hw->mac.addr, addr->sa_data)) 1558 netdev_info(netdev, "returning to hw mac address %pM\n", 1559 hw->mac.addr); 1560 else 1561 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1562 1563 /* Copy the address first, so that we avoid a possible race with 1564 * .set_rx_mode(). If we copy after changing the address in the filter 1565 * list, we might open ourselves to a narrow race window where 1566 * .set_rx_mode could delete our dev_addr filter and prevent traffic 1567 * from passing. 1568 */ 1569 ether_addr_copy(netdev->dev_addr, addr->sa_data); 1570 1571 spin_lock_bh(&vsi->mac_filter_hash_lock); 1572 i40e_del_mac_filter(vsi, netdev->dev_addr); 1573 i40e_add_mac_filter(vsi, addr->sa_data); 1574 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1575 if (vsi->type == I40E_VSI_MAIN) { 1576 i40e_status ret; 1577 1578 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1579 I40E_AQC_WRITE_TYPE_LAA_WOL, 1580 addr->sa_data, NULL); 1581 if (ret) 1582 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", 1583 i40e_stat_str(hw, ret), 1584 i40e_aq_str(hw, hw->aq.asq_last_status)); 1585 } 1586 1587 /* schedule our worker thread which will take care of 1588 * applying the new filter changes 1589 */ 1590 i40e_service_event_schedule(vsi->back); 1591 return 0; 1592 } 1593 1594 /** 1595 * i40e_config_rss_aq - Prepare for RSS using AQ commands 1596 * @vsi: vsi structure 1597 * @seed: RSS hash seed 1598 **/ 1599 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, 1600 u8 *lut, u16 lut_size) 1601 { 1602 struct i40e_pf *pf = vsi->back; 1603 struct i40e_hw *hw = &pf->hw; 1604 int ret = 0; 1605 1606 if (seed) { 1607 struct i40e_aqc_get_set_rss_key_data *seed_dw = 1608 (struct i40e_aqc_get_set_rss_key_data *)seed; 1609 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); 1610 if (ret) { 1611 dev_info(&pf->pdev->dev, 1612 "Cannot set RSS key, err %s aq_err %s\n", 1613 i40e_stat_str(hw, ret), 1614 i40e_aq_str(hw, hw->aq.asq_last_status)); 1615 return ret; 1616 } 1617 } 1618 if (lut) { 1619 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; 1620 1621 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); 1622 if (ret) { 1623 dev_info(&pf->pdev->dev, 1624 "Cannot set RSS lut, err %s aq_err %s\n", 1625 i40e_stat_str(hw, ret), 1626 i40e_aq_str(hw, hw->aq.asq_last_status)); 1627 return ret; 1628 } 1629 } 1630 return ret; 1631 } 1632 1633 /** 1634 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used 1635 * @vsi: VSI structure 1636 **/ 1637 static int i40e_vsi_config_rss(struct i40e_vsi *vsi) 1638 { 1639 struct i40e_pf *pf = vsi->back; 1640 u8 seed[I40E_HKEY_ARRAY_SIZE]; 1641 u8 *lut; 1642 int ret; 1643 1644 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) 1645 return 0; 1646 if (!vsi->rss_size) 1647 vsi->rss_size = min_t(int, pf->alloc_rss_size, 1648 vsi->num_queue_pairs); 1649 if (!vsi->rss_size) 1650 return -EINVAL; 1651 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 1652 if (!lut) 1653 return -ENOMEM; 1654 1655 /* Use the user configured hash keys and lookup table if there is one, 1656 * otherwise use default 1657 */ 1658 if (vsi->rss_lut_user) 1659 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 1660 else 1661 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); 1662 if (vsi->rss_hkey_user) 1663 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); 1664 else 1665 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 1666 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); 1667 kfree(lut); 1668 return ret; 1669 } 1670 1671 /** 1672 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config 1673 * @vsi: the VSI being configured, 1674 * @ctxt: VSI context structure 1675 * @enabled_tc: number of traffic classes to enable 1676 * 1677 * Prepares VSI tc_config to have queue configurations based on MQPRIO options. 1678 **/ 1679 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi, 1680 struct i40e_vsi_context *ctxt, 1681 u8 enabled_tc) 1682 { 1683 u16 qcount = 0, max_qcount, qmap, sections = 0; 1684 int i, override_q, pow, num_qps, ret; 1685 u8 netdev_tc = 0, offset = 0; 1686 1687 if (vsi->type != I40E_VSI_MAIN) 1688 return -EINVAL; 1689 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1690 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1691 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc; 1692 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1693 num_qps = vsi->mqprio_qopt.qopt.count[0]; 1694 1695 /* find the next higher power-of-2 of num queue pairs */ 1696 pow = ilog2(num_qps); 1697 if (!is_power_of_2(num_qps)) 1698 pow++; 1699 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1700 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1701 1702 /* Setup queue offset/count for all TCs for given VSI */ 1703 max_qcount = vsi->mqprio_qopt.qopt.count[0]; 1704 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1705 /* See if the given TC is enabled for the given VSI */ 1706 if (vsi->tc_config.enabled_tc & BIT(i)) { 1707 offset = vsi->mqprio_qopt.qopt.offset[i]; 1708 qcount = vsi->mqprio_qopt.qopt.count[i]; 1709 if (qcount > max_qcount) 1710 max_qcount = qcount; 1711 vsi->tc_config.tc_info[i].qoffset = offset; 1712 vsi->tc_config.tc_info[i].qcount = qcount; 1713 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1714 } else { 1715 /* TC is not enabled so set the offset to 1716 * default queue and allocate one queue 1717 * for the given TC. 1718 */ 1719 vsi->tc_config.tc_info[i].qoffset = 0; 1720 vsi->tc_config.tc_info[i].qcount = 1; 1721 vsi->tc_config.tc_info[i].netdev_tc = 0; 1722 } 1723 } 1724 1725 /* Set actual Tx/Rx queue pairs */ 1726 vsi->num_queue_pairs = offset + qcount; 1727 1728 /* Setup queue TC[0].qmap for given VSI context */ 1729 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); 1730 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1731 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1732 ctxt->info.valid_sections |= cpu_to_le16(sections); 1733 1734 /* Reconfigure RSS for main VSI with max queue count */ 1735 vsi->rss_size = max_qcount; 1736 ret = i40e_vsi_config_rss(vsi); 1737 if (ret) { 1738 dev_info(&vsi->back->pdev->dev, 1739 "Failed to reconfig rss for num_queues (%u)\n", 1740 max_qcount); 1741 return ret; 1742 } 1743 vsi->reconfig_rss = true; 1744 dev_dbg(&vsi->back->pdev->dev, 1745 "Reconfigured rss with num_queues (%u)\n", max_qcount); 1746 1747 /* Find queue count available for channel VSIs and starting offset 1748 * for channel VSIs 1749 */ 1750 override_q = vsi->mqprio_qopt.qopt.count[0]; 1751 if (override_q && override_q < vsi->num_queue_pairs) { 1752 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q; 1753 vsi->next_base_queue = override_q; 1754 } 1755 return 0; 1756 } 1757 1758 /** 1759 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1760 * @vsi: the VSI being setup 1761 * @ctxt: VSI context structure 1762 * @enabled_tc: Enabled TCs bitmap 1763 * @is_add: True if called before Add VSI 1764 * 1765 * Setup VSI queue mapping for enabled traffic classes. 1766 **/ 1767 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1768 struct i40e_vsi_context *ctxt, 1769 u8 enabled_tc, 1770 bool is_add) 1771 { 1772 struct i40e_pf *pf = vsi->back; 1773 u16 sections = 0; 1774 u8 netdev_tc = 0; 1775 u16 numtc = 1; 1776 u16 qcount; 1777 u8 offset; 1778 u16 qmap; 1779 int i; 1780 u16 num_tc_qps = 0; 1781 1782 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1783 offset = 0; 1784 1785 /* Number of queues per enabled TC */ 1786 num_tc_qps = vsi->alloc_queue_pairs; 1787 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1788 /* Find numtc from enabled TC bitmap */ 1789 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1790 if (enabled_tc & BIT(i)) /* TC is enabled */ 1791 numtc++; 1792 } 1793 if (!numtc) { 1794 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1795 numtc = 1; 1796 } 1797 num_tc_qps = num_tc_qps / numtc; 1798 num_tc_qps = min_t(int, num_tc_qps, 1799 i40e_pf_get_max_q_per_tc(pf)); 1800 } 1801 1802 vsi->tc_config.numtc = numtc; 1803 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1804 1805 /* Do not allow use more TC queue pairs than MSI-X vectors exist */ 1806 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1807 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); 1808 1809 /* Setup queue offset/count for all TCs for given VSI */ 1810 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1811 /* See if the given TC is enabled for the given VSI */ 1812 if (vsi->tc_config.enabled_tc & BIT(i)) { 1813 /* TC is enabled */ 1814 int pow, num_qps; 1815 1816 switch (vsi->type) { 1817 case I40E_VSI_MAIN: 1818 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | 1819 I40E_FLAG_FD_ATR_ENABLED)) || 1820 vsi->tc_config.enabled_tc != 1) { 1821 qcount = min_t(int, pf->alloc_rss_size, 1822 num_tc_qps); 1823 break; 1824 } 1825 case I40E_VSI_FDIR: 1826 case I40E_VSI_SRIOV: 1827 case I40E_VSI_VMDQ2: 1828 default: 1829 qcount = num_tc_qps; 1830 WARN_ON(i != 0); 1831 break; 1832 } 1833 vsi->tc_config.tc_info[i].qoffset = offset; 1834 vsi->tc_config.tc_info[i].qcount = qcount; 1835 1836 /* find the next higher power-of-2 of num queue pairs */ 1837 num_qps = qcount; 1838 pow = 0; 1839 while (num_qps && (BIT_ULL(pow) < qcount)) { 1840 pow++; 1841 num_qps >>= 1; 1842 } 1843 1844 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1845 qmap = 1846 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1847 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1848 1849 offset += qcount; 1850 } else { 1851 /* TC is not enabled so set the offset to 1852 * default queue and allocate one queue 1853 * for the given TC. 1854 */ 1855 vsi->tc_config.tc_info[i].qoffset = 0; 1856 vsi->tc_config.tc_info[i].qcount = 1; 1857 vsi->tc_config.tc_info[i].netdev_tc = 0; 1858 1859 qmap = 0; 1860 } 1861 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1862 } 1863 1864 /* Set actual Tx/Rx queue pairs */ 1865 vsi->num_queue_pairs = offset; 1866 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { 1867 if (vsi->req_queue_pairs > 0) 1868 vsi->num_queue_pairs = vsi->req_queue_pairs; 1869 else if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1870 vsi->num_queue_pairs = pf->num_lan_msix; 1871 } 1872 1873 /* Scheduler section valid can only be set for ADD VSI */ 1874 if (is_add) { 1875 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1876 1877 ctxt->info.up_enable_bits = enabled_tc; 1878 } 1879 if (vsi->type == I40E_VSI_SRIOV) { 1880 ctxt->info.mapping_flags |= 1881 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1882 for (i = 0; i < vsi->num_queue_pairs; i++) 1883 ctxt->info.queue_mapping[i] = 1884 cpu_to_le16(vsi->base_queue + i); 1885 } else { 1886 ctxt->info.mapping_flags |= 1887 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1888 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1889 } 1890 ctxt->info.valid_sections |= cpu_to_le16(sections); 1891 } 1892 1893 /** 1894 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address 1895 * @netdev: the netdevice 1896 * @addr: address to add 1897 * 1898 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 1899 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 1900 */ 1901 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr) 1902 { 1903 struct i40e_netdev_priv *np = netdev_priv(netdev); 1904 struct i40e_vsi *vsi = np->vsi; 1905 1906 if (i40e_add_mac_filter(vsi, addr)) 1907 return 0; 1908 else 1909 return -ENOMEM; 1910 } 1911 1912 /** 1913 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 1914 * @netdev: the netdevice 1915 * @addr: address to add 1916 * 1917 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 1918 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 1919 */ 1920 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) 1921 { 1922 struct i40e_netdev_priv *np = netdev_priv(netdev); 1923 struct i40e_vsi *vsi = np->vsi; 1924 1925 /* Under some circumstances, we might receive a request to delete 1926 * our own device address from our uc list. Because we store the 1927 * device address in the VSI's MAC/VLAN filter list, we need to ignore 1928 * such requests and not delete our device address from this list. 1929 */ 1930 if (ether_addr_equal(addr, netdev->dev_addr)) 1931 return 0; 1932 1933 i40e_del_mac_filter(vsi, addr); 1934 1935 return 0; 1936 } 1937 1938 /** 1939 * i40e_set_rx_mode - NDO callback to set the netdev filters 1940 * @netdev: network interface device structure 1941 **/ 1942 static void i40e_set_rx_mode(struct net_device *netdev) 1943 { 1944 struct i40e_netdev_priv *np = netdev_priv(netdev); 1945 struct i40e_vsi *vsi = np->vsi; 1946 1947 spin_lock_bh(&vsi->mac_filter_hash_lock); 1948 1949 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync); 1950 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync); 1951 1952 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1953 1954 /* check for other flag changes */ 1955 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1956 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1957 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1958 } 1959 } 1960 1961 /** 1962 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries 1963 * @vsi: Pointer to VSI struct 1964 * @from: Pointer to list which contains MAC filter entries - changes to 1965 * those entries needs to be undone. 1966 * 1967 * MAC filter entries from this list were slated for deletion. 1968 **/ 1969 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, 1970 struct hlist_head *from) 1971 { 1972 struct i40e_mac_filter *f; 1973 struct hlist_node *h; 1974 1975 hlist_for_each_entry_safe(f, h, from, hlist) { 1976 u64 key = i40e_addr_to_hkey(f->macaddr); 1977 1978 /* Move the element back into MAC filter list*/ 1979 hlist_del(&f->hlist); 1980 hash_add(vsi->mac_filter_hash, &f->hlist, key); 1981 } 1982 } 1983 1984 /** 1985 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries 1986 * @vsi: Pointer to vsi struct 1987 * @from: Pointer to list which contains MAC filter entries - changes to 1988 * those entries needs to be undone. 1989 * 1990 * MAC filter entries from this list were slated for addition. 1991 **/ 1992 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, 1993 struct hlist_head *from) 1994 { 1995 struct i40e_new_mac_filter *new; 1996 struct hlist_node *h; 1997 1998 hlist_for_each_entry_safe(new, h, from, hlist) { 1999 /* We can simply free the wrapper structure */ 2000 hlist_del(&new->hlist); 2001 kfree(new); 2002 } 2003 } 2004 2005 /** 2006 * i40e_next_entry - Get the next non-broadcast filter from a list 2007 * @next: pointer to filter in list 2008 * 2009 * Returns the next non-broadcast filter in the list. Required so that we 2010 * ignore broadcast filters within the list, since these are not handled via 2011 * the normal firmware update path. 2012 */ 2013 static 2014 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next) 2015 { 2016 hlist_for_each_entry_continue(next, hlist) { 2017 if (!is_broadcast_ether_addr(next->f->macaddr)) 2018 return next; 2019 } 2020 2021 return NULL; 2022 } 2023 2024 /** 2025 * i40e_update_filter_state - Update filter state based on return data 2026 * from firmware 2027 * @count: Number of filters added 2028 * @add_list: return data from fw 2029 * @head: pointer to first filter in current batch 2030 * 2031 * MAC filter entries from list were slated to be added to device. Returns 2032 * number of successful filters. Note that 0 does NOT mean success! 2033 **/ 2034 static int 2035 i40e_update_filter_state(int count, 2036 struct i40e_aqc_add_macvlan_element_data *add_list, 2037 struct i40e_new_mac_filter *add_head) 2038 { 2039 int retval = 0; 2040 int i; 2041 2042 for (i = 0; i < count; i++) { 2043 /* Always check status of each filter. We don't need to check 2044 * the firmware return status because we pre-set the filter 2045 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter 2046 * request to the adminq. Thus, if it no longer matches then 2047 * we know the filter is active. 2048 */ 2049 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) { 2050 add_head->state = I40E_FILTER_FAILED; 2051 } else { 2052 add_head->state = I40E_FILTER_ACTIVE; 2053 retval++; 2054 } 2055 2056 add_head = i40e_next_filter(add_head); 2057 if (!add_head) 2058 break; 2059 } 2060 2061 return retval; 2062 } 2063 2064 /** 2065 * i40e_aqc_del_filters - Request firmware to delete a set of filters 2066 * @vsi: ptr to the VSI 2067 * @vsi_name: name to display in messages 2068 * @list: the list of filters to send to firmware 2069 * @num_del: the number of filters to delete 2070 * @retval: Set to -EIO on failure to delete 2071 * 2072 * Send a request to firmware via AdminQ to delete a set of filters. Uses 2073 * *retval instead of a return value so that success does not force ret_val to 2074 * be set to 0. This ensures that a sequence of calls to this function 2075 * preserve the previous value of *retval on successful delete. 2076 */ 2077 static 2078 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name, 2079 struct i40e_aqc_remove_macvlan_element_data *list, 2080 int num_del, int *retval) 2081 { 2082 struct i40e_hw *hw = &vsi->back->hw; 2083 i40e_status aq_ret; 2084 int aq_err; 2085 2086 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL); 2087 aq_err = hw->aq.asq_last_status; 2088 2089 /* Explicitly ignore and do not report when firmware returns ENOENT */ 2090 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) { 2091 *retval = -EIO; 2092 dev_info(&vsi->back->pdev->dev, 2093 "ignoring delete macvlan error on %s, err %s, aq_err %s\n", 2094 vsi_name, i40e_stat_str(hw, aq_ret), 2095 i40e_aq_str(hw, aq_err)); 2096 } 2097 } 2098 2099 /** 2100 * i40e_aqc_add_filters - Request firmware to add a set of filters 2101 * @vsi: ptr to the VSI 2102 * @vsi_name: name to display in messages 2103 * @list: the list of filters to send to firmware 2104 * @add_head: Position in the add hlist 2105 * @num_add: the number of filters to add 2106 * 2107 * Send a request to firmware via AdminQ to add a chunk of filters. Will set 2108 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of 2109 * space for more filters. 2110 */ 2111 static 2112 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, 2113 struct i40e_aqc_add_macvlan_element_data *list, 2114 struct i40e_new_mac_filter *add_head, 2115 int num_add) 2116 { 2117 struct i40e_hw *hw = &vsi->back->hw; 2118 int aq_err, fcnt; 2119 2120 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL); 2121 aq_err = hw->aq.asq_last_status; 2122 fcnt = i40e_update_filter_state(num_add, list, add_head); 2123 2124 if (fcnt != num_add) { 2125 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); 2126 dev_warn(&vsi->back->pdev->dev, 2127 "Error %s adding RX filters on %s, promiscuous mode forced on\n", 2128 i40e_aq_str(hw, aq_err), 2129 vsi_name); 2130 } 2131 } 2132 2133 /** 2134 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags 2135 * @vsi: pointer to the VSI 2136 * @f: filter data 2137 * 2138 * This function sets or clears the promiscuous broadcast flags for VLAN 2139 * filters in order to properly receive broadcast frames. Assumes that only 2140 * broadcast filters are passed. 2141 * 2142 * Returns status indicating success or failure; 2143 **/ 2144 static i40e_status 2145 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, 2146 struct i40e_mac_filter *f) 2147 { 2148 bool enable = f->state == I40E_FILTER_NEW; 2149 struct i40e_hw *hw = &vsi->back->hw; 2150 i40e_status aq_ret; 2151 2152 if (f->vlan == I40E_VLAN_ANY) { 2153 aq_ret = i40e_aq_set_vsi_broadcast(hw, 2154 vsi->seid, 2155 enable, 2156 NULL); 2157 } else { 2158 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw, 2159 vsi->seid, 2160 enable, 2161 f->vlan, 2162 NULL); 2163 } 2164 2165 if (aq_ret) { 2166 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); 2167 dev_warn(&vsi->back->pdev->dev, 2168 "Error %s, forcing overflow promiscuous on %s\n", 2169 i40e_aq_str(hw, hw->aq.asq_last_status), 2170 vsi_name); 2171 } 2172 2173 return aq_ret; 2174 } 2175 2176 /** 2177 * i40e_set_promiscuous - set promiscuous mode 2178 * @pf: board private structure 2179 * @promisc: promisc on or off 2180 * 2181 * There are different ways of setting promiscuous mode on a PF depending on 2182 * what state/environment we're in. This identifies and sets it appropriately. 2183 * Returns 0 on success. 2184 **/ 2185 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) 2186 { 2187 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 2188 struct i40e_hw *hw = &pf->hw; 2189 i40e_status aq_ret; 2190 2191 if (vsi->type == I40E_VSI_MAIN && 2192 pf->lan_veb != I40E_NO_VEB && 2193 !(pf->flags & I40E_FLAG_MFP_ENABLED)) { 2194 /* set defport ON for Main VSI instead of true promisc 2195 * this way we will get all unicast/multicast and VLAN 2196 * promisc behavior but will not get VF or VMDq traffic 2197 * replicated on the Main VSI. 2198 */ 2199 if (promisc) 2200 aq_ret = i40e_aq_set_default_vsi(hw, 2201 vsi->seid, 2202 NULL); 2203 else 2204 aq_ret = i40e_aq_clear_default_vsi(hw, 2205 vsi->seid, 2206 NULL); 2207 if (aq_ret) { 2208 dev_info(&pf->pdev->dev, 2209 "Set default VSI failed, err %s, aq_err %s\n", 2210 i40e_stat_str(hw, aq_ret), 2211 i40e_aq_str(hw, hw->aq.asq_last_status)); 2212 } 2213 } else { 2214 aq_ret = i40e_aq_set_vsi_unicast_promiscuous( 2215 hw, 2216 vsi->seid, 2217 promisc, NULL, 2218 true); 2219 if (aq_ret) { 2220 dev_info(&pf->pdev->dev, 2221 "set unicast promisc failed, err %s, aq_err %s\n", 2222 i40e_stat_str(hw, aq_ret), 2223 i40e_aq_str(hw, hw->aq.asq_last_status)); 2224 } 2225 aq_ret = i40e_aq_set_vsi_multicast_promiscuous( 2226 hw, 2227 vsi->seid, 2228 promisc, NULL); 2229 if (aq_ret) { 2230 dev_info(&pf->pdev->dev, 2231 "set multicast promisc failed, err %s, aq_err %s\n", 2232 i40e_stat_str(hw, aq_ret), 2233 i40e_aq_str(hw, hw->aq.asq_last_status)); 2234 } 2235 } 2236 2237 if (!aq_ret) 2238 pf->cur_promisc = promisc; 2239 2240 return aq_ret; 2241 } 2242 2243 /** 2244 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 2245 * @vsi: ptr to the VSI 2246 * 2247 * Push any outstanding VSI filter changes through the AdminQ. 2248 * 2249 * Returns 0 or error value 2250 **/ 2251 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) 2252 { 2253 struct hlist_head tmp_add_list, tmp_del_list; 2254 struct i40e_mac_filter *f; 2255 struct i40e_new_mac_filter *new, *add_head = NULL; 2256 struct i40e_hw *hw = &vsi->back->hw; 2257 bool old_overflow, new_overflow; 2258 unsigned int failed_filters = 0; 2259 unsigned int vlan_filters = 0; 2260 char vsi_name[16] = "PF"; 2261 int filter_list_len = 0; 2262 i40e_status aq_ret = 0; 2263 u32 changed_flags = 0; 2264 struct hlist_node *h; 2265 struct i40e_pf *pf; 2266 int num_add = 0; 2267 int num_del = 0; 2268 int retval = 0; 2269 u16 cmd_flags; 2270 int list_size; 2271 int bkt; 2272 2273 /* empty array typed pointers, kcalloc later */ 2274 struct i40e_aqc_add_macvlan_element_data *add_list; 2275 struct i40e_aqc_remove_macvlan_element_data *del_list; 2276 2277 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state)) 2278 usleep_range(1000, 2000); 2279 pf = vsi->back; 2280 2281 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); 2282 2283 if (vsi->netdev) { 2284 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 2285 vsi->current_netdev_flags = vsi->netdev->flags; 2286 } 2287 2288 INIT_HLIST_HEAD(&tmp_add_list); 2289 INIT_HLIST_HEAD(&tmp_del_list); 2290 2291 if (vsi->type == I40E_VSI_SRIOV) 2292 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); 2293 else if (vsi->type != I40E_VSI_MAIN) 2294 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); 2295 2296 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 2297 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 2298 2299 spin_lock_bh(&vsi->mac_filter_hash_lock); 2300 /* Create a list of filters to delete. */ 2301 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 2302 if (f->state == I40E_FILTER_REMOVE) { 2303 /* Move the element into temporary del_list */ 2304 hash_del(&f->hlist); 2305 hlist_add_head(&f->hlist, &tmp_del_list); 2306 2307 /* Avoid counting removed filters */ 2308 continue; 2309 } 2310 if (f->state == I40E_FILTER_NEW) { 2311 /* Create a temporary i40e_new_mac_filter */ 2312 new = kzalloc(sizeof(*new), GFP_ATOMIC); 2313 if (!new) 2314 goto err_no_memory_locked; 2315 2316 /* Store pointer to the real filter */ 2317 new->f = f; 2318 new->state = f->state; 2319 2320 /* Add it to the hash list */ 2321 hlist_add_head(&new->hlist, &tmp_add_list); 2322 } 2323 2324 /* Count the number of active (current and new) VLAN 2325 * filters we have now. Does not count filters which 2326 * are marked for deletion. 2327 */ 2328 if (f->vlan > 0) 2329 vlan_filters++; 2330 } 2331 2332 retval = i40e_correct_mac_vlan_filters(vsi, 2333 &tmp_add_list, 2334 &tmp_del_list, 2335 vlan_filters); 2336 if (retval) 2337 goto err_no_memory_locked; 2338 2339 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2340 } 2341 2342 /* Now process 'del_list' outside the lock */ 2343 if (!hlist_empty(&tmp_del_list)) { 2344 filter_list_len = hw->aq.asq_buf_size / 2345 sizeof(struct i40e_aqc_remove_macvlan_element_data); 2346 list_size = filter_list_len * 2347 sizeof(struct i40e_aqc_remove_macvlan_element_data); 2348 del_list = kzalloc(list_size, GFP_ATOMIC); 2349 if (!del_list) 2350 goto err_no_memory; 2351 2352 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) { 2353 cmd_flags = 0; 2354 2355 /* handle broadcast filters by updating the broadcast 2356 * promiscuous flag and release filter list. 2357 */ 2358 if (is_broadcast_ether_addr(f->macaddr)) { 2359 i40e_aqc_broadcast_filter(vsi, vsi_name, f); 2360 2361 hlist_del(&f->hlist); 2362 kfree(f); 2363 continue; 2364 } 2365 2366 /* add to delete list */ 2367 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); 2368 if (f->vlan == I40E_VLAN_ANY) { 2369 del_list[num_del].vlan_tag = 0; 2370 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 2371 } else { 2372 del_list[num_del].vlan_tag = 2373 cpu_to_le16((u16)(f->vlan)); 2374 } 2375 2376 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 2377 del_list[num_del].flags = cmd_flags; 2378 num_del++; 2379 2380 /* flush a full buffer */ 2381 if (num_del == filter_list_len) { 2382 i40e_aqc_del_filters(vsi, vsi_name, del_list, 2383 num_del, &retval); 2384 memset(del_list, 0, list_size); 2385 num_del = 0; 2386 } 2387 /* Release memory for MAC filter entries which were 2388 * synced up with HW. 2389 */ 2390 hlist_del(&f->hlist); 2391 kfree(f); 2392 } 2393 2394 if (num_del) { 2395 i40e_aqc_del_filters(vsi, vsi_name, del_list, 2396 num_del, &retval); 2397 } 2398 2399 kfree(del_list); 2400 del_list = NULL; 2401 } 2402 2403 if (!hlist_empty(&tmp_add_list)) { 2404 /* Do all the adds now. */ 2405 filter_list_len = hw->aq.asq_buf_size / 2406 sizeof(struct i40e_aqc_add_macvlan_element_data); 2407 list_size = filter_list_len * 2408 sizeof(struct i40e_aqc_add_macvlan_element_data); 2409 add_list = kzalloc(list_size, GFP_ATOMIC); 2410 if (!add_list) 2411 goto err_no_memory; 2412 2413 num_add = 0; 2414 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { 2415 /* handle broadcast filters by updating the broadcast 2416 * promiscuous flag instead of adding a MAC filter. 2417 */ 2418 if (is_broadcast_ether_addr(new->f->macaddr)) { 2419 if (i40e_aqc_broadcast_filter(vsi, vsi_name, 2420 new->f)) 2421 new->state = I40E_FILTER_FAILED; 2422 else 2423 new->state = I40E_FILTER_ACTIVE; 2424 continue; 2425 } 2426 2427 /* add to add array */ 2428 if (num_add == 0) 2429 add_head = new; 2430 cmd_flags = 0; 2431 ether_addr_copy(add_list[num_add].mac_addr, 2432 new->f->macaddr); 2433 if (new->f->vlan == I40E_VLAN_ANY) { 2434 add_list[num_add].vlan_tag = 0; 2435 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; 2436 } else { 2437 add_list[num_add].vlan_tag = 2438 cpu_to_le16((u16)(new->f->vlan)); 2439 } 2440 add_list[num_add].queue_number = 0; 2441 /* set invalid match method for later detection */ 2442 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES; 2443 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 2444 add_list[num_add].flags = cpu_to_le16(cmd_flags); 2445 num_add++; 2446 2447 /* flush a full buffer */ 2448 if (num_add == filter_list_len) { 2449 i40e_aqc_add_filters(vsi, vsi_name, add_list, 2450 add_head, num_add); 2451 memset(add_list, 0, list_size); 2452 num_add = 0; 2453 } 2454 } 2455 if (num_add) { 2456 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head, 2457 num_add); 2458 } 2459 /* Now move all of the filters from the temp add list back to 2460 * the VSI's list. 2461 */ 2462 spin_lock_bh(&vsi->mac_filter_hash_lock); 2463 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { 2464 /* Only update the state if we're still NEW */ 2465 if (new->f->state == I40E_FILTER_NEW) 2466 new->f->state = new->state; 2467 hlist_del(&new->hlist); 2468 kfree(new); 2469 } 2470 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2471 kfree(add_list); 2472 add_list = NULL; 2473 } 2474 2475 /* Determine the number of active and failed filters. */ 2476 spin_lock_bh(&vsi->mac_filter_hash_lock); 2477 vsi->active_filters = 0; 2478 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 2479 if (f->state == I40E_FILTER_ACTIVE) 2480 vsi->active_filters++; 2481 else if (f->state == I40E_FILTER_FAILED) 2482 failed_filters++; 2483 } 2484 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2485 2486 /* Check if we are able to exit overflow promiscuous mode. We can 2487 * safely exit if we didn't just enter, we no longer have any failed 2488 * filters, and we have reduced filters below the threshold value. 2489 */ 2490 if (old_overflow && !failed_filters && 2491 vsi->active_filters < vsi->promisc_threshold) { 2492 dev_info(&pf->pdev->dev, 2493 "filter logjam cleared on %s, leaving overflow promiscuous mode\n", 2494 vsi_name); 2495 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); 2496 vsi->promisc_threshold = 0; 2497 } 2498 2499 /* if the VF is not trusted do not do promisc */ 2500 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { 2501 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); 2502 goto out; 2503 } 2504 2505 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); 2506 2507 /* If we are entering overflow promiscuous, we need to calculate a new 2508 * threshold for when we are safe to exit 2509 */ 2510 if (!old_overflow && new_overflow) 2511 vsi->promisc_threshold = (vsi->active_filters * 3) / 4; 2512 2513 /* check for changes in promiscuous modes */ 2514 if (changed_flags & IFF_ALLMULTI) { 2515 bool cur_multipromisc; 2516 2517 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 2518 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 2519 vsi->seid, 2520 cur_multipromisc, 2521 NULL); 2522 if (aq_ret) { 2523 retval = i40e_aq_rc_to_posix(aq_ret, 2524 hw->aq.asq_last_status); 2525 dev_info(&pf->pdev->dev, 2526 "set multi promisc failed on %s, err %s aq_err %s\n", 2527 vsi_name, 2528 i40e_stat_str(hw, aq_ret), 2529 i40e_aq_str(hw, hw->aq.asq_last_status)); 2530 } 2531 } 2532 2533 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) { 2534 bool cur_promisc; 2535 2536 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 2537 new_overflow); 2538 aq_ret = i40e_set_promiscuous(pf, cur_promisc); 2539 if (aq_ret) { 2540 retval = i40e_aq_rc_to_posix(aq_ret, 2541 hw->aq.asq_last_status); 2542 dev_info(&pf->pdev->dev, 2543 "Setting promiscuous %s failed on %s, err %s aq_err %s\n", 2544 cur_promisc ? "on" : "off", 2545 vsi_name, 2546 i40e_stat_str(hw, aq_ret), 2547 i40e_aq_str(hw, hw->aq.asq_last_status)); 2548 } 2549 } 2550 out: 2551 /* if something went wrong then set the changed flag so we try again */ 2552 if (retval) 2553 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 2554 2555 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); 2556 return retval; 2557 2558 err_no_memory: 2559 /* Restore elements on the temporary add and delete lists */ 2560 spin_lock_bh(&vsi->mac_filter_hash_lock); 2561 err_no_memory_locked: 2562 i40e_undo_del_filter_entries(vsi, &tmp_del_list); 2563 i40e_undo_add_filter_entries(vsi, &tmp_add_list); 2564 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2565 2566 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 2567 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); 2568 return -ENOMEM; 2569 } 2570 2571 /** 2572 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 2573 * @pf: board private structure 2574 **/ 2575 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 2576 { 2577 int v; 2578 2579 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 2580 return; 2581 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 2582 2583 for (v = 0; v < pf->num_alloc_vsi; v++) { 2584 if (pf->vsi[v] && 2585 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { 2586 int ret = i40e_sync_vsi_filters(pf->vsi[v]); 2587 2588 if (ret) { 2589 /* come back and try again later */ 2590 pf->flags |= I40E_FLAG_FILTER_SYNC; 2591 break; 2592 } 2593 } 2594 } 2595 } 2596 2597 /** 2598 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP 2599 * @vsi: the vsi 2600 **/ 2601 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi) 2602 { 2603 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) 2604 return I40E_RXBUFFER_2048; 2605 else 2606 return I40E_RXBUFFER_3072; 2607 } 2608 2609 /** 2610 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 2611 * @netdev: network interface device structure 2612 * @new_mtu: new value for maximum frame size 2613 * 2614 * Returns 0 on success, negative on failure 2615 **/ 2616 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 2617 { 2618 struct i40e_netdev_priv *np = netdev_priv(netdev); 2619 struct i40e_vsi *vsi = np->vsi; 2620 struct i40e_pf *pf = vsi->back; 2621 2622 if (i40e_enabled_xdp_vsi(vsi)) { 2623 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 2624 2625 if (frame_size > i40e_max_xdp_frame_size(vsi)) 2626 return -EINVAL; 2627 } 2628 2629 netdev_info(netdev, "changing MTU from %d to %d\n", 2630 netdev->mtu, new_mtu); 2631 netdev->mtu = new_mtu; 2632 if (netif_running(netdev)) 2633 i40e_vsi_reinit_locked(vsi); 2634 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED | 2635 I40E_FLAG_CLIENT_L2_CHANGE); 2636 return 0; 2637 } 2638 2639 /** 2640 * i40e_ioctl - Access the hwtstamp interface 2641 * @netdev: network interface device structure 2642 * @ifr: interface request data 2643 * @cmd: ioctl command 2644 **/ 2645 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2646 { 2647 struct i40e_netdev_priv *np = netdev_priv(netdev); 2648 struct i40e_pf *pf = np->vsi->back; 2649 2650 switch (cmd) { 2651 case SIOCGHWTSTAMP: 2652 return i40e_ptp_get_ts_config(pf, ifr); 2653 case SIOCSHWTSTAMP: 2654 return i40e_ptp_set_ts_config(pf, ifr); 2655 default: 2656 return -EOPNOTSUPP; 2657 } 2658 } 2659 2660 /** 2661 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 2662 * @vsi: the vsi being adjusted 2663 **/ 2664 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 2665 { 2666 struct i40e_vsi_context ctxt; 2667 i40e_status ret; 2668 2669 if ((vsi->info.valid_sections & 2670 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2671 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 2672 return; /* already enabled */ 2673 2674 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2675 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2676 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 2677 2678 ctxt.seid = vsi->seid; 2679 ctxt.info = vsi->info; 2680 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2681 if (ret) { 2682 dev_info(&vsi->back->pdev->dev, 2683 "update vlan stripping failed, err %s aq_err %s\n", 2684 i40e_stat_str(&vsi->back->hw, ret), 2685 i40e_aq_str(&vsi->back->hw, 2686 vsi->back->hw.aq.asq_last_status)); 2687 } 2688 } 2689 2690 /** 2691 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 2692 * @vsi: the vsi being adjusted 2693 **/ 2694 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 2695 { 2696 struct i40e_vsi_context ctxt; 2697 i40e_status ret; 2698 2699 if ((vsi->info.valid_sections & 2700 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2701 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 2702 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 2703 return; /* already disabled */ 2704 2705 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2706 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2707 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 2708 2709 ctxt.seid = vsi->seid; 2710 ctxt.info = vsi->info; 2711 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2712 if (ret) { 2713 dev_info(&vsi->back->pdev->dev, 2714 "update vlan stripping failed, err %s aq_err %s\n", 2715 i40e_stat_str(&vsi->back->hw, ret), 2716 i40e_aq_str(&vsi->back->hw, 2717 vsi->back->hw.aq.asq_last_status)); 2718 } 2719 } 2720 2721 /** 2722 * i40e_vlan_rx_register - Setup or shutdown vlan offload 2723 * @netdev: network interface to be adjusted 2724 * @features: netdev features to test if VLAN offload is enabled or not 2725 **/ 2726 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 2727 { 2728 struct i40e_netdev_priv *np = netdev_priv(netdev); 2729 struct i40e_vsi *vsi = np->vsi; 2730 2731 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2732 i40e_vlan_stripping_enable(vsi); 2733 else 2734 i40e_vlan_stripping_disable(vsi); 2735 } 2736 2737 /** 2738 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address 2739 * @vsi: the vsi being configured 2740 * @vid: vlan id to be added (0 = untagged only , -1 = any) 2741 * 2742 * This is a helper function for adding a new MAC/VLAN filter with the 2743 * specified VLAN for each existing MAC address already in the hash table. 2744 * This function does *not* perform any accounting to update filters based on 2745 * VLAN mode. 2746 * 2747 * NOTE: this function expects to be called while under the 2748 * mac_filter_hash_lock 2749 **/ 2750 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) 2751 { 2752 struct i40e_mac_filter *f, *add_f; 2753 struct hlist_node *h; 2754 int bkt; 2755 2756 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 2757 if (f->state == I40E_FILTER_REMOVE) 2758 continue; 2759 add_f = i40e_add_filter(vsi, f->macaddr, vid); 2760 if (!add_f) { 2761 dev_info(&vsi->back->pdev->dev, 2762 "Could not add vlan filter %d for %pM\n", 2763 vid, f->macaddr); 2764 return -ENOMEM; 2765 } 2766 } 2767 2768 return 0; 2769 } 2770 2771 /** 2772 * i40e_vsi_add_vlan - Add VSI membership for given VLAN 2773 * @vsi: the VSI being configured 2774 * @vid: VLAN id to be added 2775 **/ 2776 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid) 2777 { 2778 int err; 2779 2780 if (vsi->info.pvid) 2781 return -EINVAL; 2782 2783 /* The network stack will attempt to add VID=0, with the intention to 2784 * receive priority tagged packets with a VLAN of 0. Our HW receives 2785 * these packets by default when configured to receive untagged 2786 * packets, so we don't need to add a filter for this case. 2787 * Additionally, HW interprets adding a VID=0 filter as meaning to 2788 * receive *only* tagged traffic and stops receiving untagged traffic. 2789 * Thus, we do not want to actually add a filter for VID=0 2790 */ 2791 if (!vid) 2792 return 0; 2793 2794 /* Locked once because all functions invoked below iterates list*/ 2795 spin_lock_bh(&vsi->mac_filter_hash_lock); 2796 err = i40e_add_vlan_all_mac(vsi, vid); 2797 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2798 if (err) 2799 return err; 2800 2801 /* schedule our worker thread which will take care of 2802 * applying the new filter changes 2803 */ 2804 i40e_service_event_schedule(vsi->back); 2805 return 0; 2806 } 2807 2808 /** 2809 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN 2810 * @vsi: the vsi being configured 2811 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 2812 * 2813 * This function should be used to remove all VLAN filters which match the 2814 * given VID. It does not schedule the service event and does not take the 2815 * mac_filter_hash_lock so it may be combined with other operations under 2816 * a single invocation of the mac_filter_hash_lock. 2817 * 2818 * NOTE: this function expects to be called while under the 2819 * mac_filter_hash_lock 2820 */ 2821 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) 2822 { 2823 struct i40e_mac_filter *f; 2824 struct hlist_node *h; 2825 int bkt; 2826 2827 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 2828 if (f->vlan == vid) 2829 __i40e_del_filter(vsi, f); 2830 } 2831 } 2832 2833 /** 2834 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN 2835 * @vsi: the VSI being configured 2836 * @vid: VLAN id to be removed 2837 **/ 2838 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) 2839 { 2840 if (!vid || vsi->info.pvid) 2841 return; 2842 2843 spin_lock_bh(&vsi->mac_filter_hash_lock); 2844 i40e_rm_vlan_all_mac(vsi, vid); 2845 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2846 2847 /* schedule our worker thread which will take care of 2848 * applying the new filter changes 2849 */ 2850 i40e_service_event_schedule(vsi->back); 2851 } 2852 2853 /** 2854 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 2855 * @netdev: network interface to be adjusted 2856 * @vid: vlan id to be added 2857 * 2858 * net_device_ops implementation for adding vlan ids 2859 **/ 2860 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 2861 __always_unused __be16 proto, u16 vid) 2862 { 2863 struct i40e_netdev_priv *np = netdev_priv(netdev); 2864 struct i40e_vsi *vsi = np->vsi; 2865 int ret = 0; 2866 2867 if (vid >= VLAN_N_VID) 2868 return -EINVAL; 2869 2870 ret = i40e_vsi_add_vlan(vsi, vid); 2871 if (!ret) 2872 set_bit(vid, vsi->active_vlans); 2873 2874 return ret; 2875 } 2876 2877 /** 2878 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 2879 * @netdev: network interface to be adjusted 2880 * @vid: vlan id to be removed 2881 * 2882 * net_device_ops implementation for removing vlan ids 2883 **/ 2884 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2885 __always_unused __be16 proto, u16 vid) 2886 { 2887 struct i40e_netdev_priv *np = netdev_priv(netdev); 2888 struct i40e_vsi *vsi = np->vsi; 2889 2890 /* return code is ignored as there is nothing a user 2891 * can do about failure to remove and a log message was 2892 * already printed from the other function 2893 */ 2894 i40e_vsi_kill_vlan(vsi, vid); 2895 2896 clear_bit(vid, vsi->active_vlans); 2897 2898 return 0; 2899 } 2900 2901 /** 2902 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 2903 * @vsi: the vsi being brought back up 2904 **/ 2905 static void i40e_restore_vlan(struct i40e_vsi *vsi) 2906 { 2907 u16 vid; 2908 2909 if (!vsi->netdev) 2910 return; 2911 2912 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2913 2914 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 2915 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 2916 vid); 2917 } 2918 2919 /** 2920 * i40e_vsi_add_pvid - Add pvid for the VSI 2921 * @vsi: the vsi being adjusted 2922 * @vid: the vlan id to set as a PVID 2923 **/ 2924 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2925 { 2926 struct i40e_vsi_context ctxt; 2927 i40e_status ret; 2928 2929 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2930 vsi->info.pvid = cpu_to_le16(vid); 2931 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | 2932 I40E_AQ_VSI_PVLAN_INSERT_PVID | 2933 I40E_AQ_VSI_PVLAN_EMOD_STR; 2934 2935 ctxt.seid = vsi->seid; 2936 ctxt.info = vsi->info; 2937 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2938 if (ret) { 2939 dev_info(&vsi->back->pdev->dev, 2940 "add pvid failed, err %s aq_err %s\n", 2941 i40e_stat_str(&vsi->back->hw, ret), 2942 i40e_aq_str(&vsi->back->hw, 2943 vsi->back->hw.aq.asq_last_status)); 2944 return -ENOENT; 2945 } 2946 2947 return 0; 2948 } 2949 2950 /** 2951 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2952 * @vsi: the vsi being adjusted 2953 * 2954 * Just use the vlan_rx_register() service to put it back to normal 2955 **/ 2956 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2957 { 2958 i40e_vlan_stripping_disable(vsi); 2959 2960 vsi->info.pvid = 0; 2961 } 2962 2963 /** 2964 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2965 * @vsi: ptr to the VSI 2966 * 2967 * If this function returns with an error, then it's possible one or 2968 * more of the rings is populated (while the rest are not). It is the 2969 * callers duty to clean those orphaned rings. 2970 * 2971 * Return 0 on success, negative on failure 2972 **/ 2973 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2974 { 2975 int i, err = 0; 2976 2977 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2978 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2979 2980 if (!i40e_enabled_xdp_vsi(vsi)) 2981 return err; 2982 2983 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2984 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]); 2985 2986 return err; 2987 } 2988 2989 /** 2990 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2991 * @vsi: ptr to the VSI 2992 * 2993 * Free VSI's transmit software resources 2994 **/ 2995 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2996 { 2997 int i; 2998 2999 if (vsi->tx_rings) { 3000 for (i = 0; i < vsi->num_queue_pairs; i++) 3001 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 3002 i40e_free_tx_resources(vsi->tx_rings[i]); 3003 } 3004 3005 if (vsi->xdp_rings) { 3006 for (i = 0; i < vsi->num_queue_pairs; i++) 3007 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) 3008 i40e_free_tx_resources(vsi->xdp_rings[i]); 3009 } 3010 } 3011 3012 /** 3013 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 3014 * @vsi: ptr to the VSI 3015 * 3016 * If this function returns with an error, then it's possible one or 3017 * more of the rings is populated (while the rest are not). It is the 3018 * callers duty to clean those orphaned rings. 3019 * 3020 * Return 0 on success, negative on failure 3021 **/ 3022 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 3023 { 3024 int i, err = 0; 3025 3026 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 3027 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 3028 return err; 3029 } 3030 3031 /** 3032 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 3033 * @vsi: ptr to the VSI 3034 * 3035 * Free all receive software resources 3036 **/ 3037 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 3038 { 3039 int i; 3040 3041 if (!vsi->rx_rings) 3042 return; 3043 3044 for (i = 0; i < vsi->num_queue_pairs; i++) 3045 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 3046 i40e_free_rx_resources(vsi->rx_rings[i]); 3047 } 3048 3049 /** 3050 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring 3051 * @ring: The Tx ring to configure 3052 * 3053 * This enables/disables XPS for a given Tx descriptor ring 3054 * based on the TCs enabled for the VSI that ring belongs to. 3055 **/ 3056 static void i40e_config_xps_tx_ring(struct i40e_ring *ring) 3057 { 3058 int cpu; 3059 3060 if (!ring->q_vector || !ring->netdev || ring->ch) 3061 return; 3062 3063 /* We only initialize XPS once, so as not to overwrite user settings */ 3064 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) 3065 return; 3066 3067 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); 3068 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), 3069 ring->queue_index); 3070 } 3071 3072 /** 3073 * i40e_configure_tx_ring - Configure a transmit ring context and rest 3074 * @ring: The Tx ring to configure 3075 * 3076 * Configure the Tx descriptor ring in the HMC context. 3077 **/ 3078 static int i40e_configure_tx_ring(struct i40e_ring *ring) 3079 { 3080 struct i40e_vsi *vsi = ring->vsi; 3081 u16 pf_q = vsi->base_queue + ring->queue_index; 3082 struct i40e_hw *hw = &vsi->back->hw; 3083 struct i40e_hmc_obj_txq tx_ctx; 3084 i40e_status err = 0; 3085 u32 qtx_ctl = 0; 3086 3087 /* some ATR related tx ring init */ 3088 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { 3089 ring->atr_sample_rate = vsi->back->atr_sample_rate; 3090 ring->atr_count = 0; 3091 } else { 3092 ring->atr_sample_rate = 0; 3093 } 3094 3095 /* configure XPS */ 3096 i40e_config_xps_tx_ring(ring); 3097 3098 /* clear the context structure first */ 3099 memset(&tx_ctx, 0, sizeof(tx_ctx)); 3100 3101 tx_ctx.new_context = 1; 3102 tx_ctx.base = (ring->dma / 128); 3103 tx_ctx.qlen = ring->count; 3104 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 3105 I40E_FLAG_FD_ATR_ENABLED)); 3106 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 3107 /* FDIR VSI tx ring can still use RS bit and writebacks */ 3108 if (vsi->type != I40E_VSI_FDIR) 3109 tx_ctx.head_wb_ena = 1; 3110 tx_ctx.head_wb_addr = ring->dma + 3111 (ring->count * sizeof(struct i40e_tx_desc)); 3112 3113 /* As part of VSI creation/update, FW allocates certain 3114 * Tx arbitration queue sets for each TC enabled for 3115 * the VSI. The FW returns the handles to these queue 3116 * sets as part of the response buffer to Add VSI, 3117 * Update VSI, etc. AQ commands. It is expected that 3118 * these queue set handles be associated with the Tx 3119 * queues by the driver as part of the TX queue context 3120 * initialization. This has to be done regardless of 3121 * DCB as by default everything is mapped to TC0. 3122 */ 3123 3124 if (ring->ch) 3125 tx_ctx.rdylist = 3126 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]); 3127 3128 else 3129 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 3130 3131 tx_ctx.rdylist_act = 0; 3132 3133 /* clear the context in the HMC */ 3134 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 3135 if (err) { 3136 dev_info(&vsi->back->pdev->dev, 3137 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 3138 ring->queue_index, pf_q, err); 3139 return -ENOMEM; 3140 } 3141 3142 /* set the context in the HMC */ 3143 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 3144 if (err) { 3145 dev_info(&vsi->back->pdev->dev, 3146 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 3147 ring->queue_index, pf_q, err); 3148 return -ENOMEM; 3149 } 3150 3151 /* Now associate this queue with this PCI function */ 3152 if (ring->ch) { 3153 if (ring->ch->type == I40E_VSI_VMDQ2) 3154 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 3155 else 3156 return -EINVAL; 3157 3158 qtx_ctl |= (ring->ch->vsi_number << 3159 I40E_QTX_CTL_VFVM_INDX_SHIFT) & 3160 I40E_QTX_CTL_VFVM_INDX_MASK; 3161 } else { 3162 if (vsi->type == I40E_VSI_VMDQ2) { 3163 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 3164 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & 3165 I40E_QTX_CTL_VFVM_INDX_MASK; 3166 } else { 3167 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 3168 } 3169 } 3170 3171 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 3172 I40E_QTX_CTL_PF_INDX_MASK); 3173 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 3174 i40e_flush(hw); 3175 3176 /* cache tail off for easier writes later */ 3177 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 3178 3179 return 0; 3180 } 3181 3182 /** 3183 * i40e_configure_rx_ring - Configure a receive ring context 3184 * @ring: The Rx ring to configure 3185 * 3186 * Configure the Rx descriptor ring in the HMC context. 3187 **/ 3188 static int i40e_configure_rx_ring(struct i40e_ring *ring) 3189 { 3190 struct i40e_vsi *vsi = ring->vsi; 3191 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 3192 u16 pf_q = vsi->base_queue + ring->queue_index; 3193 struct i40e_hw *hw = &vsi->back->hw; 3194 struct i40e_hmc_obj_rxq rx_ctx; 3195 i40e_status err = 0; 3196 3197 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); 3198 3199 /* clear the context structure first */ 3200 memset(&rx_ctx, 0, sizeof(rx_ctx)); 3201 3202 ring->rx_buf_len = vsi->rx_buf_len; 3203 3204 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, 3205 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); 3206 3207 rx_ctx.base = (ring->dma / 128); 3208 rx_ctx.qlen = ring->count; 3209 3210 /* use 32 byte descriptors */ 3211 rx_ctx.dsize = 1; 3212 3213 /* descriptor type is always zero 3214 * rx_ctx.dtype = 0; 3215 */ 3216 rx_ctx.hsplit_0 = 0; 3217 3218 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); 3219 if (hw->revision_id == 0) 3220 rx_ctx.lrxqthresh = 0; 3221 else 3222 rx_ctx.lrxqthresh = 1; 3223 rx_ctx.crcstrip = 1; 3224 rx_ctx.l2tsel = 1; 3225 /* this controls whether VLAN is stripped from inner headers */ 3226 rx_ctx.showiv = 0; 3227 /* set the prefena field to 1 because the manual says to */ 3228 rx_ctx.prefena = 1; 3229 3230 /* clear the context in the HMC */ 3231 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 3232 if (err) { 3233 dev_info(&vsi->back->pdev->dev, 3234 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 3235 ring->queue_index, pf_q, err); 3236 return -ENOMEM; 3237 } 3238 3239 /* set the context in the HMC */ 3240 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 3241 if (err) { 3242 dev_info(&vsi->back->pdev->dev, 3243 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 3244 ring->queue_index, pf_q, err); 3245 return -ENOMEM; 3246 } 3247 3248 /* configure Rx buffer alignment */ 3249 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) 3250 clear_ring_build_skb_enabled(ring); 3251 else 3252 set_ring_build_skb_enabled(ring); 3253 3254 /* cache tail for quicker writes, and clear the reg before use */ 3255 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 3256 writel(0, ring->tail); 3257 3258 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 3259 3260 return 0; 3261 } 3262 3263 /** 3264 * i40e_vsi_configure_tx - Configure the VSI for Tx 3265 * @vsi: VSI structure describing this set of rings and resources 3266 * 3267 * Configure the Tx VSI for operation. 3268 **/ 3269 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 3270 { 3271 int err = 0; 3272 u16 i; 3273 3274 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 3275 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 3276 3277 if (!i40e_enabled_xdp_vsi(vsi)) 3278 return err; 3279 3280 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 3281 err = i40e_configure_tx_ring(vsi->xdp_rings[i]); 3282 3283 return err; 3284 } 3285 3286 /** 3287 * i40e_vsi_configure_rx - Configure the VSI for Rx 3288 * @vsi: the VSI being configured 3289 * 3290 * Configure the Rx VSI for operation. 3291 **/ 3292 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 3293 { 3294 int err = 0; 3295 u16 i; 3296 3297 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { 3298 vsi->max_frame = I40E_MAX_RXBUFFER; 3299 vsi->rx_buf_len = I40E_RXBUFFER_2048; 3300 #if (PAGE_SIZE < 8192) 3301 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING && 3302 (vsi->netdev->mtu <= ETH_DATA_LEN)) { 3303 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; 3304 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; 3305 #endif 3306 } else { 3307 vsi->max_frame = I40E_MAX_RXBUFFER; 3308 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 : 3309 I40E_RXBUFFER_2048; 3310 } 3311 3312 /* set up individual rings */ 3313 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 3314 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 3315 3316 return err; 3317 } 3318 3319 /** 3320 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 3321 * @vsi: ptr to the VSI 3322 **/ 3323 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 3324 { 3325 struct i40e_ring *tx_ring, *rx_ring; 3326 u16 qoffset, qcount; 3327 int i, n; 3328 3329 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 3330 /* Reset the TC information */ 3331 for (i = 0; i < vsi->num_queue_pairs; i++) { 3332 rx_ring = vsi->rx_rings[i]; 3333 tx_ring = vsi->tx_rings[i]; 3334 rx_ring->dcb_tc = 0; 3335 tx_ring->dcb_tc = 0; 3336 } 3337 return; 3338 } 3339 3340 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 3341 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) 3342 continue; 3343 3344 qoffset = vsi->tc_config.tc_info[n].qoffset; 3345 qcount = vsi->tc_config.tc_info[n].qcount; 3346 for (i = qoffset; i < (qoffset + qcount); i++) { 3347 rx_ring = vsi->rx_rings[i]; 3348 tx_ring = vsi->tx_rings[i]; 3349 rx_ring->dcb_tc = n; 3350 tx_ring->dcb_tc = n; 3351 } 3352 } 3353 } 3354 3355 /** 3356 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 3357 * @vsi: ptr to the VSI 3358 **/ 3359 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 3360 { 3361 if (vsi->netdev) 3362 i40e_set_rx_mode(vsi->netdev); 3363 } 3364 3365 /** 3366 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters 3367 * @vsi: Pointer to the targeted VSI 3368 * 3369 * This function replays the hlist on the hw where all the SB Flow Director 3370 * filters were saved. 3371 **/ 3372 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) 3373 { 3374 struct i40e_fdir_filter *filter; 3375 struct i40e_pf *pf = vsi->back; 3376 struct hlist_node *node; 3377 3378 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 3379 return; 3380 3381 /* Reset FDir counters as we're replaying all existing filters */ 3382 pf->fd_tcp4_filter_cnt = 0; 3383 pf->fd_udp4_filter_cnt = 0; 3384 pf->fd_sctp4_filter_cnt = 0; 3385 pf->fd_ip4_filter_cnt = 0; 3386 3387 hlist_for_each_entry_safe(filter, node, 3388 &pf->fdir_filter_list, fdir_node) { 3389 i40e_add_del_fdir(vsi, filter, true); 3390 } 3391 } 3392 3393 /** 3394 * i40e_vsi_configure - Set up the VSI for action 3395 * @vsi: the VSI being configured 3396 **/ 3397 static int i40e_vsi_configure(struct i40e_vsi *vsi) 3398 { 3399 int err; 3400 3401 i40e_set_vsi_rx_mode(vsi); 3402 i40e_restore_vlan(vsi); 3403 i40e_vsi_config_dcb_rings(vsi); 3404 err = i40e_vsi_configure_tx(vsi); 3405 if (!err) 3406 err = i40e_vsi_configure_rx(vsi); 3407 3408 return err; 3409 } 3410 3411 /** 3412 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 3413 * @vsi: the VSI being configured 3414 **/ 3415 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 3416 { 3417 bool has_xdp = i40e_enabled_xdp_vsi(vsi); 3418 struct i40e_pf *pf = vsi->back; 3419 struct i40e_hw *hw = &pf->hw; 3420 u16 vector; 3421 int i, q; 3422 u32 qp; 3423 3424 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 3425 * and PFINT_LNKLSTn registers, e.g.: 3426 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 3427 */ 3428 qp = vsi->base_queue; 3429 vector = vsi->base_vector; 3430 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 3431 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; 3432 3433 q_vector->rx.next_update = jiffies + 1; 3434 q_vector->rx.target_itr = 3435 ITR_TO_REG(vsi->rx_rings[i]->itr_setting); 3436 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 3437 q_vector->rx.target_itr); 3438 q_vector->rx.current_itr = q_vector->rx.target_itr; 3439 3440 q_vector->tx.next_update = jiffies + 1; 3441 q_vector->tx.target_itr = 3442 ITR_TO_REG(vsi->tx_rings[i]->itr_setting); 3443 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 3444 q_vector->tx.target_itr); 3445 q_vector->tx.current_itr = q_vector->tx.target_itr; 3446 3447 wr32(hw, I40E_PFINT_RATEN(vector - 1), 3448 i40e_intrl_usec_to_reg(vsi->int_rate_limit)); 3449 3450 /* Linked list for the queuepairs assigned to this vector */ 3451 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 3452 for (q = 0; q < q_vector->num_ringpairs; q++) { 3453 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; 3454 u32 val; 3455 3456 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3457 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3458 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 3459 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 3460 (I40E_QUEUE_TYPE_TX << 3461 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 3462 3463 wr32(hw, I40E_QINT_RQCTL(qp), val); 3464 3465 if (has_xdp) { 3466 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3467 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3468 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 3469 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | 3470 (I40E_QUEUE_TYPE_TX << 3471 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3472 3473 wr32(hw, I40E_QINT_TQCTL(nextqp), val); 3474 } 3475 3476 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3477 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3478 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 3479 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | 3480 (I40E_QUEUE_TYPE_RX << 3481 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3482 3483 /* Terminate the linked list */ 3484 if (q == (q_vector->num_ringpairs - 1)) 3485 val |= (I40E_QUEUE_END_OF_LIST << 3486 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3487 3488 wr32(hw, I40E_QINT_TQCTL(qp), val); 3489 qp++; 3490 } 3491 } 3492 3493 i40e_flush(hw); 3494 } 3495 3496 /** 3497 * i40e_enable_misc_int_causes - enable the non-queue interrupts 3498 * @hw: ptr to the hardware info 3499 **/ 3500 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) 3501 { 3502 struct i40e_hw *hw = &pf->hw; 3503 u32 val; 3504 3505 /* clear things first */ 3506 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 3507 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 3508 3509 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 3510 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 3511 I40E_PFINT_ICR0_ENA_GRST_MASK | 3512 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 3513 I40E_PFINT_ICR0_ENA_GPIO_MASK | 3514 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 3515 I40E_PFINT_ICR0_ENA_VFLR_MASK | 3516 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3517 3518 if (pf->flags & I40E_FLAG_IWARP_ENABLED) 3519 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3520 3521 if (pf->flags & I40E_FLAG_PTP) 3522 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3523 3524 wr32(hw, I40E_PFINT_ICR0_ENA, val); 3525 3526 /* SW_ITR_IDX = 0, but don't change INTENA */ 3527 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 3528 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 3529 3530 /* OTHER_ITR_IDX = 0 */ 3531 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 3532 } 3533 3534 /** 3535 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 3536 * @vsi: the VSI being configured 3537 **/ 3538 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 3539 { 3540 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0; 3541 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 3542 struct i40e_pf *pf = vsi->back; 3543 struct i40e_hw *hw = &pf->hw; 3544 u32 val; 3545 3546 /* set the ITR configuration */ 3547 q_vector->rx.next_update = jiffies + 1; 3548 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting); 3549 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr); 3550 q_vector->rx.current_itr = q_vector->rx.target_itr; 3551 q_vector->tx.next_update = jiffies + 1; 3552 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting); 3553 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr); 3554 q_vector->tx.current_itr = q_vector->tx.target_itr; 3555 3556 i40e_enable_misc_int_causes(pf); 3557 3558 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 3559 wr32(hw, I40E_PFINT_LNKLST0, 0); 3560 3561 /* Associate the queue pair to the vector and enable the queue int */ 3562 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3563 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3564 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 3565 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3566 3567 wr32(hw, I40E_QINT_RQCTL(0), val); 3568 3569 if (i40e_enabled_xdp_vsi(vsi)) { 3570 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3571 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)| 3572 (I40E_QUEUE_TYPE_TX 3573 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3574 3575 wr32(hw, I40E_QINT_TQCTL(nextqp), val); 3576 } 3577 3578 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3579 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3580 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3581 3582 wr32(hw, I40E_QINT_TQCTL(0), val); 3583 i40e_flush(hw); 3584 } 3585 3586 /** 3587 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 3588 * @pf: board private structure 3589 **/ 3590 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) 3591 { 3592 struct i40e_hw *hw = &pf->hw; 3593 3594 wr32(hw, I40E_PFINT_DYN_CTL0, 3595 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 3596 i40e_flush(hw); 3597 } 3598 3599 /** 3600 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 3601 * @pf: board private structure 3602 **/ 3603 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 3604 { 3605 struct i40e_hw *hw = &pf->hw; 3606 u32 val; 3607 3608 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 3609 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 3610 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 3611 3612 wr32(hw, I40E_PFINT_DYN_CTL0, val); 3613 i40e_flush(hw); 3614 } 3615 3616 /** 3617 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 3618 * @irq: interrupt number 3619 * @data: pointer to a q_vector 3620 **/ 3621 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 3622 { 3623 struct i40e_q_vector *q_vector = data; 3624 3625 if (!q_vector->tx.ring && !q_vector->rx.ring) 3626 return IRQ_HANDLED; 3627 3628 napi_schedule_irqoff(&q_vector->napi); 3629 3630 return IRQ_HANDLED; 3631 } 3632 3633 /** 3634 * i40e_irq_affinity_notify - Callback for affinity changes 3635 * @notify: context as to what irq was changed 3636 * @mask: the new affinity mask 3637 * 3638 * This is a callback function used by the irq_set_affinity_notifier function 3639 * so that we may register to receive changes to the irq affinity masks. 3640 **/ 3641 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify, 3642 const cpumask_t *mask) 3643 { 3644 struct i40e_q_vector *q_vector = 3645 container_of(notify, struct i40e_q_vector, affinity_notify); 3646 3647 cpumask_copy(&q_vector->affinity_mask, mask); 3648 } 3649 3650 /** 3651 * i40e_irq_affinity_release - Callback for affinity notifier release 3652 * @ref: internal core kernel usage 3653 * 3654 * This is a callback function used by the irq_set_affinity_notifier function 3655 * to inform the current notification subscriber that they will no longer 3656 * receive notifications. 3657 **/ 3658 static void i40e_irq_affinity_release(struct kref *ref) {} 3659 3660 /** 3661 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 3662 * @vsi: the VSI being configured 3663 * @basename: name for the vector 3664 * 3665 * Allocates MSI-X vectors and requests interrupts from the kernel. 3666 **/ 3667 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 3668 { 3669 int q_vectors = vsi->num_q_vectors; 3670 struct i40e_pf *pf = vsi->back; 3671 int base = vsi->base_vector; 3672 int rx_int_idx = 0; 3673 int tx_int_idx = 0; 3674 int vector, err; 3675 int irq_num; 3676 int cpu; 3677 3678 for (vector = 0; vector < q_vectors; vector++) { 3679 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 3680 3681 irq_num = pf->msix_entries[base + vector].vector; 3682 3683 if (q_vector->tx.ring && q_vector->rx.ring) { 3684 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3685 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 3686 tx_int_idx++; 3687 } else if (q_vector->rx.ring) { 3688 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3689 "%s-%s-%d", basename, "rx", rx_int_idx++); 3690 } else if (q_vector->tx.ring) { 3691 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3692 "%s-%s-%d", basename, "tx", tx_int_idx++); 3693 } else { 3694 /* skip this unused q_vector */ 3695 continue; 3696 } 3697 err = request_irq(irq_num, 3698 vsi->irq_handler, 3699 0, 3700 q_vector->name, 3701 q_vector); 3702 if (err) { 3703 dev_info(&pf->pdev->dev, 3704 "MSIX request_irq failed, error: %d\n", err); 3705 goto free_queue_irqs; 3706 } 3707 3708 /* register for affinity change notifications */ 3709 q_vector->affinity_notify.notify = i40e_irq_affinity_notify; 3710 q_vector->affinity_notify.release = i40e_irq_affinity_release; 3711 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 3712 /* Spread affinity hints out across online CPUs. 3713 * 3714 * get_cpu_mask returns a static constant mask with 3715 * a permanent lifetime so it's ok to pass to 3716 * irq_set_affinity_hint without making a copy. 3717 */ 3718 cpu = cpumask_local_spread(q_vector->v_idx, -1); 3719 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); 3720 } 3721 3722 vsi->irqs_ready = true; 3723 return 0; 3724 3725 free_queue_irqs: 3726 while (vector) { 3727 vector--; 3728 irq_num = pf->msix_entries[base + vector].vector; 3729 irq_set_affinity_notifier(irq_num, NULL); 3730 irq_set_affinity_hint(irq_num, NULL); 3731 free_irq(irq_num, &vsi->q_vectors[vector]); 3732 } 3733 return err; 3734 } 3735 3736 /** 3737 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 3738 * @vsi: the VSI being un-configured 3739 **/ 3740 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 3741 { 3742 struct i40e_pf *pf = vsi->back; 3743 struct i40e_hw *hw = &pf->hw; 3744 int base = vsi->base_vector; 3745 int i; 3746 3747 /* disable interrupt causation from each queue */ 3748 for (i = 0; i < vsi->num_queue_pairs; i++) { 3749 u32 val; 3750 3751 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); 3752 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 3753 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); 3754 3755 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx)); 3756 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; 3757 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val); 3758 3759 if (!i40e_enabled_xdp_vsi(vsi)) 3760 continue; 3761 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0); 3762 } 3763 3764 /* disable each interrupt */ 3765 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3766 for (i = vsi->base_vector; 3767 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3768 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 3769 3770 i40e_flush(hw); 3771 for (i = 0; i < vsi->num_q_vectors; i++) 3772 synchronize_irq(pf->msix_entries[i + base].vector); 3773 } else { 3774 /* Legacy and MSI mode - this stops all interrupt handling */ 3775 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 3776 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 3777 i40e_flush(hw); 3778 synchronize_irq(pf->pdev->irq); 3779 } 3780 } 3781 3782 /** 3783 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 3784 * @vsi: the VSI being configured 3785 **/ 3786 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 3787 { 3788 struct i40e_pf *pf = vsi->back; 3789 int i; 3790 3791 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3792 for (i = 0; i < vsi->num_q_vectors; i++) 3793 i40e_irq_dynamic_enable(vsi, i); 3794 } else { 3795 i40e_irq_dynamic_enable_icr0(pf); 3796 } 3797 3798 i40e_flush(&pf->hw); 3799 return 0; 3800 } 3801 3802 /** 3803 * i40e_free_misc_vector - Free the vector that handles non-queue events 3804 * @pf: board private structure 3805 **/ 3806 static void i40e_free_misc_vector(struct i40e_pf *pf) 3807 { 3808 /* Disable ICR 0 */ 3809 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 3810 i40e_flush(&pf->hw); 3811 3812 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { 3813 synchronize_irq(pf->msix_entries[0].vector); 3814 free_irq(pf->msix_entries[0].vector, pf); 3815 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); 3816 } 3817 } 3818 3819 /** 3820 * i40e_intr - MSI/Legacy and non-queue interrupt handler 3821 * @irq: interrupt number 3822 * @data: pointer to a q_vector 3823 * 3824 * This is the handler used for all MSI/Legacy interrupts, and deals 3825 * with both queue and non-queue interrupts. This is also used in 3826 * MSIX mode to handle the non-queue interrupts. 3827 **/ 3828 static irqreturn_t i40e_intr(int irq, void *data) 3829 { 3830 struct i40e_pf *pf = (struct i40e_pf *)data; 3831 struct i40e_hw *hw = &pf->hw; 3832 irqreturn_t ret = IRQ_NONE; 3833 u32 icr0, icr0_remaining; 3834 u32 val, ena_mask; 3835 3836 icr0 = rd32(hw, I40E_PFINT_ICR0); 3837 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 3838 3839 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 3840 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 3841 goto enable_intr; 3842 3843 /* if interrupt but no bits showing, must be SWINT */ 3844 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || 3845 (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) 3846 pf->sw_int_count++; 3847 3848 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && 3849 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { 3850 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3851 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); 3852 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); 3853 } 3854 3855 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 3856 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 3857 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 3858 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 3859 3860 /* We do not have a way to disarm Queue causes while leaving 3861 * interrupt enabled for all other causes, ideally 3862 * interrupt should be disabled while we are in NAPI but 3863 * this is not a performance path and napi_schedule() 3864 * can deal with rescheduling. 3865 */ 3866 if (!test_bit(__I40E_DOWN, pf->state)) 3867 napi_schedule_irqoff(&q_vector->napi); 3868 } 3869 3870 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 3871 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3872 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); 3873 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); 3874 } 3875 3876 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 3877 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 3878 set_bit(__I40E_MDD_EVENT_PENDING, pf->state); 3879 } 3880 3881 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 3882 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 3883 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); 3884 } 3885 3886 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 3887 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) 3888 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); 3889 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 3890 val = rd32(hw, I40E_GLGEN_RSTAT); 3891 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 3892 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3893 if (val == I40E_RESET_CORER) { 3894 pf->corer_count++; 3895 } else if (val == I40E_RESET_GLOBR) { 3896 pf->globr_count++; 3897 } else if (val == I40E_RESET_EMPR) { 3898 pf->empr_count++; 3899 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state); 3900 } 3901 } 3902 3903 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 3904 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 3905 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 3906 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", 3907 rd32(hw, I40E_PFHMC_ERRORINFO), 3908 rd32(hw, I40E_PFHMC_ERRORDATA)); 3909 } 3910 3911 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 3912 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 3913 3914 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 3915 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3916 i40e_ptp_tx_hwtstamp(pf); 3917 } 3918 } 3919 3920 /* If a critical error is pending we have no choice but to reset the 3921 * device. 3922 * Report and mask out any remaining unexpected interrupts. 3923 */ 3924 icr0_remaining = icr0 & ena_mask; 3925 if (icr0_remaining) { 3926 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 3927 icr0_remaining); 3928 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 3929 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 3930 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { 3931 dev_info(&pf->pdev->dev, "device will be reset\n"); 3932 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); 3933 i40e_service_event_schedule(pf); 3934 } 3935 ena_mask &= ~icr0_remaining; 3936 } 3937 ret = IRQ_HANDLED; 3938 3939 enable_intr: 3940 /* re-enable interrupt causes */ 3941 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3942 if (!test_bit(__I40E_DOWN, pf->state)) { 3943 i40e_service_event_schedule(pf); 3944 i40e_irq_dynamic_enable_icr0(pf); 3945 } 3946 3947 return ret; 3948 } 3949 3950 /** 3951 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes 3952 * @tx_ring: tx ring to clean 3953 * @budget: how many cleans we're allowed 3954 * 3955 * Returns true if there's any budget left (e.g. the clean is finished) 3956 **/ 3957 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) 3958 { 3959 struct i40e_vsi *vsi = tx_ring->vsi; 3960 u16 i = tx_ring->next_to_clean; 3961 struct i40e_tx_buffer *tx_buf; 3962 struct i40e_tx_desc *tx_desc; 3963 3964 tx_buf = &tx_ring->tx_bi[i]; 3965 tx_desc = I40E_TX_DESC(tx_ring, i); 3966 i -= tx_ring->count; 3967 3968 do { 3969 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 3970 3971 /* if next_to_watch is not set then there is no work pending */ 3972 if (!eop_desc) 3973 break; 3974 3975 /* prevent any other reads prior to eop_desc */ 3976 smp_rmb(); 3977 3978 /* if the descriptor isn't done, no work yet to do */ 3979 if (!(eop_desc->cmd_type_offset_bsz & 3980 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 3981 break; 3982 3983 /* clear next_to_watch to prevent false hangs */ 3984 tx_buf->next_to_watch = NULL; 3985 3986 tx_desc->buffer_addr = 0; 3987 tx_desc->cmd_type_offset_bsz = 0; 3988 /* move past filter desc */ 3989 tx_buf++; 3990 tx_desc++; 3991 i++; 3992 if (unlikely(!i)) { 3993 i -= tx_ring->count; 3994 tx_buf = tx_ring->tx_bi; 3995 tx_desc = I40E_TX_DESC(tx_ring, 0); 3996 } 3997 /* unmap skb header data */ 3998 dma_unmap_single(tx_ring->dev, 3999 dma_unmap_addr(tx_buf, dma), 4000 dma_unmap_len(tx_buf, len), 4001 DMA_TO_DEVICE); 4002 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) 4003 kfree(tx_buf->raw_buf); 4004 4005 tx_buf->raw_buf = NULL; 4006 tx_buf->tx_flags = 0; 4007 tx_buf->next_to_watch = NULL; 4008 dma_unmap_len_set(tx_buf, len, 0); 4009 tx_desc->buffer_addr = 0; 4010 tx_desc->cmd_type_offset_bsz = 0; 4011 4012 /* move us past the eop_desc for start of next FD desc */ 4013 tx_buf++; 4014 tx_desc++; 4015 i++; 4016 if (unlikely(!i)) { 4017 i -= tx_ring->count; 4018 tx_buf = tx_ring->tx_bi; 4019 tx_desc = I40E_TX_DESC(tx_ring, 0); 4020 } 4021 4022 /* update budget accounting */ 4023 budget--; 4024 } while (likely(budget)); 4025 4026 i += tx_ring->count; 4027 tx_ring->next_to_clean = i; 4028 4029 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) 4030 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); 4031 4032 return budget > 0; 4033 } 4034 4035 /** 4036 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring 4037 * @irq: interrupt number 4038 * @data: pointer to a q_vector 4039 **/ 4040 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) 4041 { 4042 struct i40e_q_vector *q_vector = data; 4043 struct i40e_vsi *vsi; 4044 4045 if (!q_vector->tx.ring) 4046 return IRQ_HANDLED; 4047 4048 vsi = q_vector->tx.ring->vsi; 4049 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); 4050 4051 return IRQ_HANDLED; 4052 } 4053 4054 /** 4055 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 4056 * @vsi: the VSI being configured 4057 * @v_idx: vector index 4058 * @qp_idx: queue pair index 4059 **/ 4060 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 4061 { 4062 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 4063 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 4064 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 4065 4066 tx_ring->q_vector = q_vector; 4067 tx_ring->next = q_vector->tx.ring; 4068 q_vector->tx.ring = tx_ring; 4069 q_vector->tx.count++; 4070 4071 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */ 4072 if (i40e_enabled_xdp_vsi(vsi)) { 4073 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx]; 4074 4075 xdp_ring->q_vector = q_vector; 4076 xdp_ring->next = q_vector->tx.ring; 4077 q_vector->tx.ring = xdp_ring; 4078 q_vector->tx.count++; 4079 } 4080 4081 rx_ring->q_vector = q_vector; 4082 rx_ring->next = q_vector->rx.ring; 4083 q_vector->rx.ring = rx_ring; 4084 q_vector->rx.count++; 4085 } 4086 4087 /** 4088 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 4089 * @vsi: the VSI being configured 4090 * 4091 * This function maps descriptor rings to the queue-specific vectors 4092 * we were allotted through the MSI-X enabling code. Ideally, we'd have 4093 * one vector per queue pair, but on a constrained vector budget, we 4094 * group the queue pairs as "efficiently" as possible. 4095 **/ 4096 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 4097 { 4098 int qp_remaining = vsi->num_queue_pairs; 4099 int q_vectors = vsi->num_q_vectors; 4100 int num_ringpairs; 4101 int v_start = 0; 4102 int qp_idx = 0; 4103 4104 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 4105 * group them so there are multiple queues per vector. 4106 * It is also important to go through all the vectors available to be 4107 * sure that if we don't use all the vectors, that the remaining vectors 4108 * are cleared. This is especially important when decreasing the 4109 * number of queues in use. 4110 */ 4111 for (; v_start < q_vectors; v_start++) { 4112 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 4113 4114 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 4115 4116 q_vector->num_ringpairs = num_ringpairs; 4117 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1; 4118 4119 q_vector->rx.count = 0; 4120 q_vector->tx.count = 0; 4121 q_vector->rx.ring = NULL; 4122 q_vector->tx.ring = NULL; 4123 4124 while (num_ringpairs--) { 4125 i40e_map_vector_to_qp(vsi, v_start, qp_idx); 4126 qp_idx++; 4127 qp_remaining--; 4128 } 4129 } 4130 } 4131 4132 /** 4133 * i40e_vsi_request_irq - Request IRQ from the OS 4134 * @vsi: the VSI being configured 4135 * @basename: name for the vector 4136 **/ 4137 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 4138 { 4139 struct i40e_pf *pf = vsi->back; 4140 int err; 4141 4142 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4143 err = i40e_vsi_request_irq_msix(vsi, basename); 4144 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 4145 err = request_irq(pf->pdev->irq, i40e_intr, 0, 4146 pf->int_name, pf); 4147 else 4148 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 4149 pf->int_name, pf); 4150 4151 if (err) 4152 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 4153 4154 return err; 4155 } 4156 4157 #ifdef CONFIG_NET_POLL_CONTROLLER 4158 /** 4159 * i40e_netpoll - A Polling 'interrupt' handler 4160 * @netdev: network interface device structure 4161 * 4162 * This is used by netconsole to send skbs without having to re-enable 4163 * interrupts. It's not called while the normal interrupt routine is executing. 4164 **/ 4165 static void i40e_netpoll(struct net_device *netdev) 4166 { 4167 struct i40e_netdev_priv *np = netdev_priv(netdev); 4168 struct i40e_vsi *vsi = np->vsi; 4169 struct i40e_pf *pf = vsi->back; 4170 int i; 4171 4172 /* if interface is down do nothing */ 4173 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 4174 return; 4175 4176 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4177 for (i = 0; i < vsi->num_q_vectors; i++) 4178 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 4179 } else { 4180 i40e_intr(pf->pdev->irq, netdev); 4181 } 4182 } 4183 #endif 4184 4185 #define I40E_QTX_ENA_WAIT_COUNT 50 4186 4187 /** 4188 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled 4189 * @pf: the PF being configured 4190 * @pf_q: the PF queue 4191 * @enable: enable or disable state of the queue 4192 * 4193 * This routine will wait for the given Tx queue of the PF to reach the 4194 * enabled or disabled state. 4195 * Returns -ETIMEDOUT in case of failing to reach the requested state after 4196 * multiple retries; else will return 0 in case of success. 4197 **/ 4198 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) 4199 { 4200 int i; 4201 u32 tx_reg; 4202 4203 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 4204 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); 4205 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 4206 break; 4207 4208 usleep_range(10, 20); 4209 } 4210 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 4211 return -ETIMEDOUT; 4212 4213 return 0; 4214 } 4215 4216 /** 4217 * i40e_control_tx_q - Start or stop a particular Tx queue 4218 * @pf: the PF structure 4219 * @pf_q: the PF queue to configure 4220 * @enable: start or stop the queue 4221 * 4222 * This function enables or disables a single queue. Note that any delay 4223 * required after the operation is expected to be handled by the caller of 4224 * this function. 4225 **/ 4226 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable) 4227 { 4228 struct i40e_hw *hw = &pf->hw; 4229 u32 tx_reg; 4230 int i; 4231 4232 /* warn the TX unit of coming changes */ 4233 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); 4234 if (!enable) 4235 usleep_range(10, 20); 4236 4237 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) { 4238 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 4239 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 4240 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) 4241 break; 4242 usleep_range(1000, 2000); 4243 } 4244 4245 /* Skip if the queue is already in the requested state */ 4246 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 4247 return; 4248 4249 /* turn on/off the queue */ 4250 if (enable) { 4251 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 4252 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; 4253 } else { 4254 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 4255 } 4256 4257 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 4258 } 4259 4260 /** 4261 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion 4262 * @seid: VSI SEID 4263 * @pf: the PF structure 4264 * @pf_q: the PF queue to configure 4265 * @is_xdp: true if the queue is used for XDP 4266 * @enable: start or stop the queue 4267 **/ 4268 static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, 4269 bool is_xdp, bool enable) 4270 { 4271 int ret; 4272 4273 i40e_control_tx_q(pf, pf_q, enable); 4274 4275 /* wait for the change to finish */ 4276 ret = i40e_pf_txq_wait(pf, pf_q, enable); 4277 if (ret) { 4278 dev_info(&pf->pdev->dev, 4279 "VSI seid %d %sTx ring %d %sable timeout\n", 4280 seid, (is_xdp ? "XDP " : ""), pf_q, 4281 (enable ? "en" : "dis")); 4282 } 4283 4284 return ret; 4285 } 4286 4287 /** 4288 * i40e_vsi_control_tx - Start or stop a VSI's rings 4289 * @vsi: the VSI being configured 4290 * @enable: start or stop the rings 4291 **/ 4292 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 4293 { 4294 struct i40e_pf *pf = vsi->back; 4295 int i, pf_q, ret = 0; 4296 4297 pf_q = vsi->base_queue; 4298 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4299 ret = i40e_control_wait_tx_q(vsi->seid, pf, 4300 pf_q, 4301 false /*is xdp*/, enable); 4302 if (ret) 4303 break; 4304 4305 if (!i40e_enabled_xdp_vsi(vsi)) 4306 continue; 4307 4308 ret = i40e_control_wait_tx_q(vsi->seid, pf, 4309 pf_q + vsi->alloc_queue_pairs, 4310 true /*is xdp*/, enable); 4311 if (ret) 4312 break; 4313 } 4314 4315 return ret; 4316 } 4317 4318 /** 4319 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 4320 * @pf: the PF being configured 4321 * @pf_q: the PF queue 4322 * @enable: enable or disable state of the queue 4323 * 4324 * This routine will wait for the given Rx queue of the PF to reach the 4325 * enabled or disabled state. 4326 * Returns -ETIMEDOUT in case of failing to reach the requested state after 4327 * multiple retries; else will return 0 in case of success. 4328 **/ 4329 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) 4330 { 4331 int i; 4332 u32 rx_reg; 4333 4334 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 4335 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); 4336 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 4337 break; 4338 4339 usleep_range(10, 20); 4340 } 4341 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 4342 return -ETIMEDOUT; 4343 4344 return 0; 4345 } 4346 4347 /** 4348 * i40e_control_rx_q - Start or stop a particular Rx queue 4349 * @pf: the PF structure 4350 * @pf_q: the PF queue to configure 4351 * @enable: start or stop the queue 4352 * 4353 * This function enables or disables a single queue. Note that any delay 4354 * required after the operation is expected to be handled by the caller of 4355 * this function. 4356 **/ 4357 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) 4358 { 4359 struct i40e_hw *hw = &pf->hw; 4360 u32 rx_reg; 4361 int i; 4362 4363 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) { 4364 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 4365 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == 4366 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) 4367 break; 4368 usleep_range(1000, 2000); 4369 } 4370 4371 /* Skip if the queue is already in the requested state */ 4372 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 4373 return; 4374 4375 /* turn on/off the queue */ 4376 if (enable) 4377 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; 4378 else 4379 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 4380 4381 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 4382 } 4383 4384 /** 4385 * i40e_vsi_control_rx - Start or stop a VSI's rings 4386 * @vsi: the VSI being configured 4387 * @enable: start or stop the rings 4388 **/ 4389 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 4390 { 4391 struct i40e_pf *pf = vsi->back; 4392 int i, pf_q, ret = 0; 4393 4394 pf_q = vsi->base_queue; 4395 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4396 i40e_control_rx_q(pf, pf_q, enable); 4397 4398 /* wait for the change to finish */ 4399 ret = i40e_pf_rxq_wait(pf, pf_q, enable); 4400 if (ret) { 4401 dev_info(&pf->pdev->dev, 4402 "VSI seid %d Rx ring %d %sable timeout\n", 4403 vsi->seid, pf_q, (enable ? "en" : "dis")); 4404 break; 4405 } 4406 } 4407 4408 /* Due to HW errata, on Rx disable only, the register can indicate done 4409 * before it really is. Needs 50ms to be sure 4410 */ 4411 if (!enable) 4412 mdelay(50); 4413 4414 return ret; 4415 } 4416 4417 /** 4418 * i40e_vsi_start_rings - Start a VSI's rings 4419 * @vsi: the VSI being configured 4420 **/ 4421 int i40e_vsi_start_rings(struct i40e_vsi *vsi) 4422 { 4423 int ret = 0; 4424 4425 /* do rx first for enable and last for disable */ 4426 ret = i40e_vsi_control_rx(vsi, true); 4427 if (ret) 4428 return ret; 4429 ret = i40e_vsi_control_tx(vsi, true); 4430 4431 return ret; 4432 } 4433 4434 /** 4435 * i40e_vsi_stop_rings - Stop a VSI's rings 4436 * @vsi: the VSI being configured 4437 **/ 4438 void i40e_vsi_stop_rings(struct i40e_vsi *vsi) 4439 { 4440 /* When port TX is suspended, don't wait */ 4441 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) 4442 return i40e_vsi_stop_rings_no_wait(vsi); 4443 4444 /* do rx first for enable and last for disable 4445 * Ignore return value, we need to shutdown whatever we can 4446 */ 4447 i40e_vsi_control_tx(vsi, false); 4448 i40e_vsi_control_rx(vsi, false); 4449 } 4450 4451 /** 4452 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay 4453 * @vsi: the VSI being shutdown 4454 * 4455 * This function stops all the rings for a VSI but does not delay to verify 4456 * that rings have been disabled. It is expected that the caller is shutting 4457 * down multiple VSIs at once and will delay together for all the VSIs after 4458 * initiating the shutdown. This is particularly useful for shutting down lots 4459 * of VFs together. Otherwise, a large delay can be incurred while configuring 4460 * each VSI in serial. 4461 **/ 4462 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi) 4463 { 4464 struct i40e_pf *pf = vsi->back; 4465 int i, pf_q; 4466 4467 pf_q = vsi->base_queue; 4468 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4469 i40e_control_tx_q(pf, pf_q, false); 4470 i40e_control_rx_q(pf, pf_q, false); 4471 } 4472 } 4473 4474 /** 4475 * i40e_vsi_free_irq - Free the irq association with the OS 4476 * @vsi: the VSI being configured 4477 **/ 4478 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 4479 { 4480 struct i40e_pf *pf = vsi->back; 4481 struct i40e_hw *hw = &pf->hw; 4482 int base = vsi->base_vector; 4483 u32 val, qp; 4484 int i; 4485 4486 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4487 if (!vsi->q_vectors) 4488 return; 4489 4490 if (!vsi->irqs_ready) 4491 return; 4492 4493 vsi->irqs_ready = false; 4494 for (i = 0; i < vsi->num_q_vectors; i++) { 4495 int irq_num; 4496 u16 vector; 4497 4498 vector = i + base; 4499 irq_num = pf->msix_entries[vector].vector; 4500 4501 /* free only the irqs that were actually requested */ 4502 if (!vsi->q_vectors[i] || 4503 !vsi->q_vectors[i]->num_ringpairs) 4504 continue; 4505 4506 /* clear the affinity notifier in the IRQ descriptor */ 4507 irq_set_affinity_notifier(irq_num, NULL); 4508 /* remove our suggested affinity mask for this IRQ */ 4509 irq_set_affinity_hint(irq_num, NULL); 4510 synchronize_irq(irq_num); 4511 free_irq(irq_num, vsi->q_vectors[i]); 4512 4513 /* Tear down the interrupt queue link list 4514 * 4515 * We know that they come in pairs and always 4516 * the Rx first, then the Tx. To clear the 4517 * link list, stick the EOL value into the 4518 * next_q field of the registers. 4519 */ 4520 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 4521 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 4522 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4523 val |= I40E_QUEUE_END_OF_LIST 4524 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4525 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 4526 4527 while (qp != I40E_QUEUE_END_OF_LIST) { 4528 u32 next; 4529 4530 val = rd32(hw, I40E_QINT_RQCTL(qp)); 4531 4532 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 4533 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 4534 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 4535 I40E_QINT_RQCTL_INTEVENT_MASK); 4536 4537 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 4538 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 4539 4540 wr32(hw, I40E_QINT_RQCTL(qp), val); 4541 4542 val = rd32(hw, I40E_QINT_TQCTL(qp)); 4543 4544 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 4545 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 4546 4547 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 4548 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 4549 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 4550 I40E_QINT_TQCTL_INTEVENT_MASK); 4551 4552 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 4553 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 4554 4555 wr32(hw, I40E_QINT_TQCTL(qp), val); 4556 qp = next; 4557 } 4558 } 4559 } else { 4560 free_irq(pf->pdev->irq, pf); 4561 4562 val = rd32(hw, I40E_PFINT_LNKLST0); 4563 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 4564 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4565 val |= I40E_QUEUE_END_OF_LIST 4566 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 4567 wr32(hw, I40E_PFINT_LNKLST0, val); 4568 4569 val = rd32(hw, I40E_QINT_RQCTL(qp)); 4570 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 4571 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 4572 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 4573 I40E_QINT_RQCTL_INTEVENT_MASK); 4574 4575 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 4576 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 4577 4578 wr32(hw, I40E_QINT_RQCTL(qp), val); 4579 4580 val = rd32(hw, I40E_QINT_TQCTL(qp)); 4581 4582 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 4583 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 4584 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 4585 I40E_QINT_TQCTL_INTEVENT_MASK); 4586 4587 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 4588 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 4589 4590 wr32(hw, I40E_QINT_TQCTL(qp), val); 4591 } 4592 } 4593 4594 /** 4595 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 4596 * @vsi: the VSI being configured 4597 * @v_idx: Index of vector to be freed 4598 * 4599 * This function frees the memory allocated to the q_vector. In addition if 4600 * NAPI is enabled it will delete any references to the NAPI struct prior 4601 * to freeing the q_vector. 4602 **/ 4603 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 4604 { 4605 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 4606 struct i40e_ring *ring; 4607 4608 if (!q_vector) 4609 return; 4610 4611 /* disassociate q_vector from rings */ 4612 i40e_for_each_ring(ring, q_vector->tx) 4613 ring->q_vector = NULL; 4614 4615 i40e_for_each_ring(ring, q_vector->rx) 4616 ring->q_vector = NULL; 4617 4618 /* only VSI w/ an associated netdev is set up w/ NAPI */ 4619 if (vsi->netdev) 4620 netif_napi_del(&q_vector->napi); 4621 4622 vsi->q_vectors[v_idx] = NULL; 4623 4624 kfree_rcu(q_vector, rcu); 4625 } 4626 4627 /** 4628 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 4629 * @vsi: the VSI being un-configured 4630 * 4631 * This frees the memory allocated to the q_vectors and 4632 * deletes references to the NAPI struct. 4633 **/ 4634 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 4635 { 4636 int v_idx; 4637 4638 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 4639 i40e_free_q_vector(vsi, v_idx); 4640 } 4641 4642 /** 4643 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 4644 * @pf: board private structure 4645 **/ 4646 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 4647 { 4648 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 4649 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4650 pci_disable_msix(pf->pdev); 4651 kfree(pf->msix_entries); 4652 pf->msix_entries = NULL; 4653 kfree(pf->irq_pile); 4654 pf->irq_pile = NULL; 4655 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 4656 pci_disable_msi(pf->pdev); 4657 } 4658 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 4659 } 4660 4661 /** 4662 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 4663 * @pf: board private structure 4664 * 4665 * We go through and clear interrupt specific resources and reset the structure 4666 * to pre-load conditions 4667 **/ 4668 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 4669 { 4670 int i; 4671 4672 i40e_free_misc_vector(pf); 4673 4674 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, 4675 I40E_IWARP_IRQ_PILE_ID); 4676 4677 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 4678 for (i = 0; i < pf->num_alloc_vsi; i++) 4679 if (pf->vsi[i]) 4680 i40e_vsi_free_q_vectors(pf->vsi[i]); 4681 i40e_reset_interrupt_capability(pf); 4682 } 4683 4684 /** 4685 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 4686 * @vsi: the VSI being configured 4687 **/ 4688 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 4689 { 4690 int q_idx; 4691 4692 if (!vsi->netdev) 4693 return; 4694 4695 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { 4696 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; 4697 4698 if (q_vector->rx.ring || q_vector->tx.ring) 4699 napi_enable(&q_vector->napi); 4700 } 4701 } 4702 4703 /** 4704 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 4705 * @vsi: the VSI being configured 4706 **/ 4707 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 4708 { 4709 int q_idx; 4710 4711 if (!vsi->netdev) 4712 return; 4713 4714 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { 4715 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; 4716 4717 if (q_vector->rx.ring || q_vector->tx.ring) 4718 napi_disable(&q_vector->napi); 4719 } 4720 } 4721 4722 /** 4723 * i40e_vsi_close - Shut down a VSI 4724 * @vsi: the vsi to be quelled 4725 **/ 4726 static void i40e_vsi_close(struct i40e_vsi *vsi) 4727 { 4728 struct i40e_pf *pf = vsi->back; 4729 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state)) 4730 i40e_down(vsi); 4731 i40e_vsi_free_irq(vsi); 4732 i40e_vsi_free_tx_resources(vsi); 4733 i40e_vsi_free_rx_resources(vsi); 4734 vsi->current_netdev_flags = 0; 4735 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; 4736 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) 4737 pf->flags |= I40E_FLAG_CLIENT_RESET; 4738 } 4739 4740 /** 4741 * i40e_quiesce_vsi - Pause a given VSI 4742 * @vsi: the VSI being paused 4743 **/ 4744 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 4745 { 4746 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 4747 return; 4748 4749 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state); 4750 if (vsi->netdev && netif_running(vsi->netdev)) 4751 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 4752 else 4753 i40e_vsi_close(vsi); 4754 } 4755 4756 /** 4757 * i40e_unquiesce_vsi - Resume a given VSI 4758 * @vsi: the VSI being resumed 4759 **/ 4760 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 4761 { 4762 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state)) 4763 return; 4764 4765 if (vsi->netdev && netif_running(vsi->netdev)) 4766 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 4767 else 4768 i40e_vsi_open(vsi); /* this clears the DOWN bit */ 4769 } 4770 4771 /** 4772 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 4773 * @pf: the PF 4774 **/ 4775 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 4776 { 4777 int v; 4778 4779 for (v = 0; v < pf->num_alloc_vsi; v++) { 4780 if (pf->vsi[v]) 4781 i40e_quiesce_vsi(pf->vsi[v]); 4782 } 4783 } 4784 4785 /** 4786 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 4787 * @pf: the PF 4788 **/ 4789 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 4790 { 4791 int v; 4792 4793 for (v = 0; v < pf->num_alloc_vsi; v++) { 4794 if (pf->vsi[v]) 4795 i40e_unquiesce_vsi(pf->vsi[v]); 4796 } 4797 } 4798 4799 /** 4800 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled 4801 * @vsi: the VSI being configured 4802 * 4803 * Wait until all queues on a given VSI have been disabled. 4804 **/ 4805 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) 4806 { 4807 struct i40e_pf *pf = vsi->back; 4808 int i, pf_q, ret; 4809 4810 pf_q = vsi->base_queue; 4811 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4812 /* Check and wait for the Tx queue */ 4813 ret = i40e_pf_txq_wait(pf, pf_q, false); 4814 if (ret) { 4815 dev_info(&pf->pdev->dev, 4816 "VSI seid %d Tx ring %d disable timeout\n", 4817 vsi->seid, pf_q); 4818 return ret; 4819 } 4820 4821 if (!i40e_enabled_xdp_vsi(vsi)) 4822 goto wait_rx; 4823 4824 /* Check and wait for the XDP Tx queue */ 4825 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs, 4826 false); 4827 if (ret) { 4828 dev_info(&pf->pdev->dev, 4829 "VSI seid %d XDP Tx ring %d disable timeout\n", 4830 vsi->seid, pf_q); 4831 return ret; 4832 } 4833 wait_rx: 4834 /* Check and wait for the Rx queue */ 4835 ret = i40e_pf_rxq_wait(pf, pf_q, false); 4836 if (ret) { 4837 dev_info(&pf->pdev->dev, 4838 "VSI seid %d Rx ring %d disable timeout\n", 4839 vsi->seid, pf_q); 4840 return ret; 4841 } 4842 } 4843 4844 return 0; 4845 } 4846 4847 #ifdef CONFIG_I40E_DCB 4848 /** 4849 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled 4850 * @pf: the PF 4851 * 4852 * This function waits for the queues to be in disabled state for all the 4853 * VSIs that are managed by this PF. 4854 **/ 4855 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) 4856 { 4857 int v, ret = 0; 4858 4859 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4860 if (pf->vsi[v]) { 4861 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); 4862 if (ret) 4863 break; 4864 } 4865 } 4866 4867 return ret; 4868 } 4869 4870 #endif 4871 4872 /** 4873 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP 4874 * @pf: pointer to PF 4875 * 4876 * Get TC map for ISCSI PF type that will include iSCSI TC 4877 * and LAN TC. 4878 **/ 4879 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) 4880 { 4881 struct i40e_dcb_app_priority_table app; 4882 struct i40e_hw *hw = &pf->hw; 4883 u8 enabled_tc = 1; /* TC0 is always enabled */ 4884 u8 tc, i; 4885 /* Get the iSCSI APP TLV */ 4886 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4887 4888 for (i = 0; i < dcbcfg->numapps; i++) { 4889 app = dcbcfg->app[i]; 4890 if (app.selector == I40E_APP_SEL_TCPIP && 4891 app.protocolid == I40E_APP_PROTOID_ISCSI) { 4892 tc = dcbcfg->etscfg.prioritytable[app.priority]; 4893 enabled_tc |= BIT(tc); 4894 break; 4895 } 4896 } 4897 4898 return enabled_tc; 4899 } 4900 4901 /** 4902 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 4903 * @dcbcfg: the corresponding DCBx configuration structure 4904 * 4905 * Return the number of TCs from given DCBx configuration 4906 **/ 4907 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 4908 { 4909 int i, tc_unused = 0; 4910 u8 num_tc = 0; 4911 u8 ret = 0; 4912 4913 /* Scan the ETS Config Priority Table to find 4914 * traffic class enabled for a given priority 4915 * and create a bitmask of enabled TCs 4916 */ 4917 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) 4918 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); 4919 4920 /* Now scan the bitmask to check for 4921 * contiguous TCs starting with TC0 4922 */ 4923 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4924 if (num_tc & BIT(i)) { 4925 if (!tc_unused) { 4926 ret++; 4927 } else { 4928 pr_err("Non-contiguous TC - Disabling DCB\n"); 4929 return 1; 4930 } 4931 } else { 4932 tc_unused = 1; 4933 } 4934 } 4935 4936 /* There is always at least TC0 */ 4937 if (!ret) 4938 ret = 1; 4939 4940 return ret; 4941 } 4942 4943 /** 4944 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 4945 * @dcbcfg: the corresponding DCBx configuration structure 4946 * 4947 * Query the current DCB configuration and return the number of 4948 * traffic classes enabled from the given DCBX config 4949 **/ 4950 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 4951 { 4952 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 4953 u8 enabled_tc = 1; 4954 u8 i; 4955 4956 for (i = 0; i < num_tc; i++) 4957 enabled_tc |= BIT(i); 4958 4959 return enabled_tc; 4960 } 4961 4962 /** 4963 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes 4964 * @pf: PF being queried 4965 * 4966 * Query the current MQPRIO configuration and return the number of 4967 * traffic classes enabled. 4968 **/ 4969 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) 4970 { 4971 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 4972 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; 4973 u8 enabled_tc = 1, i; 4974 4975 for (i = 1; i < num_tc; i++) 4976 enabled_tc |= BIT(i); 4977 return enabled_tc; 4978 } 4979 4980 /** 4981 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 4982 * @pf: PF being queried 4983 * 4984 * Return number of traffic classes enabled for the given PF 4985 **/ 4986 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 4987 { 4988 struct i40e_hw *hw = &pf->hw; 4989 u8 i, enabled_tc = 1; 4990 u8 num_tc = 0; 4991 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4992 4993 if (pf->flags & I40E_FLAG_TC_MQPRIO) 4994 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; 4995 4996 /* If neither MQPRIO nor DCB is enabled, then always use single TC */ 4997 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4998 return 1; 4999 5000 /* SFP mode will be enabled for all TCs on port */ 5001 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 5002 return i40e_dcb_get_num_tc(dcbcfg); 5003 5004 /* MFP mode return count of enabled TCs for this PF */ 5005 if (pf->hw.func_caps.iscsi) 5006 enabled_tc = i40e_get_iscsi_tc_map(pf); 5007 else 5008 return 1; /* Only TC0 */ 5009 5010 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 5011 if (enabled_tc & BIT(i)) 5012 num_tc++; 5013 } 5014 return num_tc; 5015 } 5016 5017 /** 5018 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 5019 * @pf: PF being queried 5020 * 5021 * Return a bitmap for enabled traffic classes for this PF. 5022 **/ 5023 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 5024 { 5025 if (pf->flags & I40E_FLAG_TC_MQPRIO) 5026 return i40e_mqprio_get_enabled_tc(pf); 5027 5028 /* If neither MQPRIO nor DCB is enabled for this PF then just return 5029 * default TC 5030 */ 5031 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 5032 return I40E_DEFAULT_TRAFFIC_CLASS; 5033 5034 /* SFP mode we want PF to be enabled for all TCs */ 5035 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 5036 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 5037 5038 /* MFP enabled and iSCSI PF type */ 5039 if (pf->hw.func_caps.iscsi) 5040 return i40e_get_iscsi_tc_map(pf); 5041 else 5042 return I40E_DEFAULT_TRAFFIC_CLASS; 5043 } 5044 5045 /** 5046 * i40e_vsi_get_bw_info - Query VSI BW Information 5047 * @vsi: the VSI being queried 5048 * 5049 * Returns 0 on success, negative value on failure 5050 **/ 5051 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 5052 { 5053 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 5054 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 5055 struct i40e_pf *pf = vsi->back; 5056 struct i40e_hw *hw = &pf->hw; 5057 i40e_status ret; 5058 u32 tc_bw_max; 5059 int i; 5060 5061 /* Get the VSI level BW configuration */ 5062 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 5063 if (ret) { 5064 dev_info(&pf->pdev->dev, 5065 "couldn't get PF vsi bw config, err %s aq_err %s\n", 5066 i40e_stat_str(&pf->hw, ret), 5067 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5068 return -EINVAL; 5069 } 5070 5071 /* Get the VSI level BW configuration per TC */ 5072 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 5073 NULL); 5074 if (ret) { 5075 dev_info(&pf->pdev->dev, 5076 "couldn't get PF vsi ets bw config, err %s aq_err %s\n", 5077 i40e_stat_str(&pf->hw, ret), 5078 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5079 return -EINVAL; 5080 } 5081 5082 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 5083 dev_info(&pf->pdev->dev, 5084 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 5085 bw_config.tc_valid_bits, 5086 bw_ets_config.tc_valid_bits); 5087 /* Still continuing */ 5088 } 5089 5090 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 5091 vsi->bw_max_quanta = bw_config.max_bw; 5092 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 5093 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 5094 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 5095 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 5096 vsi->bw_ets_limit_credits[i] = 5097 le16_to_cpu(bw_ets_config.credits[i]); 5098 /* 3 bits out of 4 for each TC */ 5099 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 5100 } 5101 5102 return 0; 5103 } 5104 5105 /** 5106 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 5107 * @vsi: the VSI being configured 5108 * @enabled_tc: TC bitmap 5109 * @bw_credits: BW shared credits per TC 5110 * 5111 * Returns 0 on success, negative value on failure 5112 **/ 5113 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 5114 u8 *bw_share) 5115 { 5116 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 5117 i40e_status ret; 5118 int i; 5119 5120 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) 5121 return 0; 5122 if (!vsi->mqprio_qopt.qopt.hw) { 5123 ret = i40e_set_bw_limit(vsi, vsi->seid, 0); 5124 if (ret) 5125 dev_info(&vsi->back->pdev->dev, 5126 "Failed to reset tx rate for vsi->seid %u\n", 5127 vsi->seid); 5128 return ret; 5129 } 5130 bw_data.tc_valid_bits = enabled_tc; 5131 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 5132 bw_data.tc_bw_credits[i] = bw_share[i]; 5133 5134 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 5135 NULL); 5136 if (ret) { 5137 dev_info(&vsi->back->pdev->dev, 5138 "AQ command Config VSI BW allocation per TC failed = %d\n", 5139 vsi->back->hw.aq.asq_last_status); 5140 return -EINVAL; 5141 } 5142 5143 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 5144 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 5145 5146 return 0; 5147 } 5148 5149 /** 5150 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 5151 * @vsi: the VSI being configured 5152 * @enabled_tc: TC map to be enabled 5153 * 5154 **/ 5155 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 5156 { 5157 struct net_device *netdev = vsi->netdev; 5158 struct i40e_pf *pf = vsi->back; 5159 struct i40e_hw *hw = &pf->hw; 5160 u8 netdev_tc = 0; 5161 int i; 5162 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 5163 5164 if (!netdev) 5165 return; 5166 5167 if (!enabled_tc) { 5168 netdev_reset_tc(netdev); 5169 return; 5170 } 5171 5172 /* Set up actual enabled TCs on the VSI */ 5173 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 5174 return; 5175 5176 /* set per TC queues for the VSI */ 5177 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 5178 /* Only set TC queues for enabled tcs 5179 * 5180 * e.g. For a VSI that has TC0 and TC3 enabled the 5181 * enabled_tc bitmap would be 0x00001001; the driver 5182 * will set the numtc for netdev as 2 that will be 5183 * referenced by the netdev layer as TC 0 and 1. 5184 */ 5185 if (vsi->tc_config.enabled_tc & BIT(i)) 5186 netdev_set_tc_queue(netdev, 5187 vsi->tc_config.tc_info[i].netdev_tc, 5188 vsi->tc_config.tc_info[i].qcount, 5189 vsi->tc_config.tc_info[i].qoffset); 5190 } 5191 5192 if (pf->flags & I40E_FLAG_TC_MQPRIO) 5193 return; 5194 5195 /* Assign UP2TC map for the VSI */ 5196 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 5197 /* Get the actual TC# for the UP */ 5198 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 5199 /* Get the mapped netdev TC# for the UP */ 5200 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 5201 netdev_set_prio_tc_map(netdev, i, netdev_tc); 5202 } 5203 } 5204 5205 /** 5206 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 5207 * @vsi: the VSI being configured 5208 * @ctxt: the ctxt buffer returned from AQ VSI update param command 5209 **/ 5210 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 5211 struct i40e_vsi_context *ctxt) 5212 { 5213 /* copy just the sections touched not the entire info 5214 * since not all sections are valid as returned by 5215 * update vsi params 5216 */ 5217 vsi->info.mapping_flags = ctxt->info.mapping_flags; 5218 memcpy(&vsi->info.queue_mapping, 5219 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 5220 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 5221 sizeof(vsi->info.tc_mapping)); 5222 } 5223 5224 /** 5225 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 5226 * @vsi: VSI to be configured 5227 * @enabled_tc: TC bitmap 5228 * 5229 * This configures a particular VSI for TCs that are mapped to the 5230 * given TC bitmap. It uses default bandwidth share for TCs across 5231 * VSIs to configure TC for a particular VSI. 5232 * 5233 * NOTE: 5234 * It is expected that the VSI queues have been quisced before calling 5235 * this function. 5236 **/ 5237 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 5238 { 5239 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 5240 struct i40e_pf *pf = vsi->back; 5241 struct i40e_hw *hw = &pf->hw; 5242 struct i40e_vsi_context ctxt; 5243 int ret = 0; 5244 int i; 5245 5246 /* Check if enabled_tc is same as existing or new TCs */ 5247 if (vsi->tc_config.enabled_tc == enabled_tc && 5248 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) 5249 return ret; 5250 5251 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 5252 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 5253 if (enabled_tc & BIT(i)) 5254 bw_share[i] = 1; 5255 } 5256 5257 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 5258 if (ret) { 5259 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 5260 5261 dev_info(&pf->pdev->dev, 5262 "Failed configuring TC map %d for VSI %d\n", 5263 enabled_tc, vsi->seid); 5264 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, 5265 &bw_config, NULL); 5266 if (ret) { 5267 dev_info(&pf->pdev->dev, 5268 "Failed querying vsi bw info, err %s aq_err %s\n", 5269 i40e_stat_str(hw, ret), 5270 i40e_aq_str(hw, hw->aq.asq_last_status)); 5271 goto out; 5272 } 5273 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) { 5274 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc; 5275 5276 if (!valid_tc) 5277 valid_tc = bw_config.tc_valid_bits; 5278 /* Always enable TC0, no matter what */ 5279 valid_tc |= 1; 5280 dev_info(&pf->pdev->dev, 5281 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n", 5282 enabled_tc, bw_config.tc_valid_bits, valid_tc); 5283 enabled_tc = valid_tc; 5284 } 5285 5286 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 5287 if (ret) { 5288 dev_err(&pf->pdev->dev, 5289 "Unable to configure TC map %d for VSI %d\n", 5290 enabled_tc, vsi->seid); 5291 goto out; 5292 } 5293 } 5294 5295 /* Update Queue Pairs Mapping for currently enabled UPs */ 5296 ctxt.seid = vsi->seid; 5297 ctxt.pf_num = vsi->back->hw.pf_id; 5298 ctxt.vf_num = 0; 5299 ctxt.uplink_seid = vsi->uplink_seid; 5300 ctxt.info = vsi->info; 5301 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) { 5302 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc); 5303 if (ret) 5304 goto out; 5305 } else { 5306 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 5307 } 5308 5309 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled 5310 * queues changed. 5311 */ 5312 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) { 5313 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size, 5314 vsi->num_queue_pairs); 5315 ret = i40e_vsi_config_rss(vsi); 5316 if (ret) { 5317 dev_info(&vsi->back->pdev->dev, 5318 "Failed to reconfig rss for num_queues\n"); 5319 return ret; 5320 } 5321 vsi->reconfig_rss = false; 5322 } 5323 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { 5324 ctxt.info.valid_sections |= 5325 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); 5326 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; 5327 } 5328 5329 /* Update the VSI after updating the VSI queue-mapping 5330 * information 5331 */ 5332 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 5333 if (ret) { 5334 dev_info(&pf->pdev->dev, 5335 "Update vsi tc config failed, err %s aq_err %s\n", 5336 i40e_stat_str(hw, ret), 5337 i40e_aq_str(hw, hw->aq.asq_last_status)); 5338 goto out; 5339 } 5340 /* update the local VSI info with updated queue map */ 5341 i40e_vsi_update_queue_map(vsi, &ctxt); 5342 vsi->info.valid_sections = 0; 5343 5344 /* Update current VSI BW information */ 5345 ret = i40e_vsi_get_bw_info(vsi); 5346 if (ret) { 5347 dev_info(&pf->pdev->dev, 5348 "Failed updating vsi bw info, err %s aq_err %s\n", 5349 i40e_stat_str(hw, ret), 5350 i40e_aq_str(hw, hw->aq.asq_last_status)); 5351 goto out; 5352 } 5353 5354 /* Update the netdev TC setup */ 5355 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 5356 out: 5357 return ret; 5358 } 5359 5360 /** 5361 * i40e_get_link_speed - Returns link speed for the interface 5362 * @vsi: VSI to be configured 5363 * 5364 **/ 5365 static int i40e_get_link_speed(struct i40e_vsi *vsi) 5366 { 5367 struct i40e_pf *pf = vsi->back; 5368 5369 switch (pf->hw.phy.link_info.link_speed) { 5370 case I40E_LINK_SPEED_40GB: 5371 return 40000; 5372 case I40E_LINK_SPEED_25GB: 5373 return 25000; 5374 case I40E_LINK_SPEED_20GB: 5375 return 20000; 5376 case I40E_LINK_SPEED_10GB: 5377 return 10000; 5378 case I40E_LINK_SPEED_1GB: 5379 return 1000; 5380 default: 5381 return -EINVAL; 5382 } 5383 } 5384 5385 /** 5386 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate 5387 * @vsi: VSI to be configured 5388 * @seid: seid of the channel/VSI 5389 * @max_tx_rate: max TX rate to be configured as BW limit 5390 * 5391 * Helper function to set BW limit for a given VSI 5392 **/ 5393 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) 5394 { 5395 struct i40e_pf *pf = vsi->back; 5396 u64 credits = 0; 5397 int speed = 0; 5398 int ret = 0; 5399 5400 speed = i40e_get_link_speed(vsi); 5401 if (max_tx_rate > speed) { 5402 dev_err(&pf->pdev->dev, 5403 "Invalid max tx rate %llu specified for VSI seid %d.", 5404 max_tx_rate, seid); 5405 return -EINVAL; 5406 } 5407 if (max_tx_rate && max_tx_rate < 50) { 5408 dev_warn(&pf->pdev->dev, 5409 "Setting max tx rate to minimum usable value of 50Mbps.\n"); 5410 max_tx_rate = 50; 5411 } 5412 5413 /* Tx rate credits are in values of 50Mbps, 0 is disabled */ 5414 credits = max_tx_rate; 5415 do_div(credits, I40E_BW_CREDIT_DIVISOR); 5416 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits, 5417 I40E_MAX_BW_INACTIVE_ACCUM, NULL); 5418 if (ret) 5419 dev_err(&pf->pdev->dev, 5420 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n", 5421 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret), 5422 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5423 return ret; 5424 } 5425 5426 /** 5427 * i40e_remove_queue_channels - Remove queue channels for the TCs 5428 * @vsi: VSI to be configured 5429 * 5430 * Remove queue channels for the TCs 5431 **/ 5432 static void i40e_remove_queue_channels(struct i40e_vsi *vsi) 5433 { 5434 enum i40e_admin_queue_err last_aq_status; 5435 struct i40e_cloud_filter *cfilter; 5436 struct i40e_channel *ch, *ch_tmp; 5437 struct i40e_pf *pf = vsi->back; 5438 struct hlist_node *node; 5439 int ret, i; 5440 5441 /* Reset rss size that was stored when reconfiguring rss for 5442 * channel VSIs with non-power-of-2 queue count. 5443 */ 5444 vsi->current_rss_size = 0; 5445 5446 /* perform cleanup for channels if they exist */ 5447 if (list_empty(&vsi->ch_list)) 5448 return; 5449 5450 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { 5451 struct i40e_vsi *p_vsi; 5452 5453 list_del(&ch->list); 5454 p_vsi = ch->parent_vsi; 5455 if (!p_vsi || !ch->initialized) { 5456 kfree(ch); 5457 continue; 5458 } 5459 /* Reset queue contexts */ 5460 for (i = 0; i < ch->num_queue_pairs; i++) { 5461 struct i40e_ring *tx_ring, *rx_ring; 5462 u16 pf_q; 5463 5464 pf_q = ch->base_queue + i; 5465 tx_ring = vsi->tx_rings[pf_q]; 5466 tx_ring->ch = NULL; 5467 5468 rx_ring = vsi->rx_rings[pf_q]; 5469 rx_ring->ch = NULL; 5470 } 5471 5472 /* Reset BW configured for this VSI via mqprio */ 5473 ret = i40e_set_bw_limit(vsi, ch->seid, 0); 5474 if (ret) 5475 dev_info(&vsi->back->pdev->dev, 5476 "Failed to reset tx rate for ch->seid %u\n", 5477 ch->seid); 5478 5479 /* delete cloud filters associated with this channel */ 5480 hlist_for_each_entry_safe(cfilter, node, 5481 &pf->cloud_filter_list, cloud_node) { 5482 if (cfilter->seid != ch->seid) 5483 continue; 5484 5485 hash_del(&cfilter->cloud_node); 5486 if (cfilter->dst_port) 5487 ret = i40e_add_del_cloud_filter_big_buf(vsi, 5488 cfilter, 5489 false); 5490 else 5491 ret = i40e_add_del_cloud_filter(vsi, cfilter, 5492 false); 5493 last_aq_status = pf->hw.aq.asq_last_status; 5494 if (ret) 5495 dev_info(&pf->pdev->dev, 5496 "Failed to delete cloud filter, err %s aq_err %s\n", 5497 i40e_stat_str(&pf->hw, ret), 5498 i40e_aq_str(&pf->hw, last_aq_status)); 5499 kfree(cfilter); 5500 } 5501 5502 /* delete VSI from FW */ 5503 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, 5504 NULL); 5505 if (ret) 5506 dev_err(&vsi->back->pdev->dev, 5507 "unable to remove channel (%d) for parent VSI(%d)\n", 5508 ch->seid, p_vsi->seid); 5509 kfree(ch); 5510 } 5511 INIT_LIST_HEAD(&vsi->ch_list); 5512 } 5513 5514 /** 5515 * i40e_is_any_channel - channel exist or not 5516 * @vsi: ptr to VSI to which channels are associated with 5517 * 5518 * Returns true or false if channel(s) exist for associated VSI or not 5519 **/ 5520 static bool i40e_is_any_channel(struct i40e_vsi *vsi) 5521 { 5522 struct i40e_channel *ch, *ch_tmp; 5523 5524 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { 5525 if (ch->initialized) 5526 return true; 5527 } 5528 5529 return false; 5530 } 5531 5532 /** 5533 * i40e_get_max_queues_for_channel 5534 * @vsi: ptr to VSI to which channels are associated with 5535 * 5536 * Helper function which returns max value among the queue counts set on the 5537 * channels/TCs created. 5538 **/ 5539 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi) 5540 { 5541 struct i40e_channel *ch, *ch_tmp; 5542 int max = 0; 5543 5544 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { 5545 if (!ch->initialized) 5546 continue; 5547 if (ch->num_queue_pairs > max) 5548 max = ch->num_queue_pairs; 5549 } 5550 5551 return max; 5552 } 5553 5554 /** 5555 * i40e_validate_num_queues - validate num_queues w.r.t channel 5556 * @pf: ptr to PF device 5557 * @num_queues: number of queues 5558 * @vsi: the parent VSI 5559 * @reconfig_rss: indicates should the RSS be reconfigured or not 5560 * 5561 * This function validates number of queues in the context of new channel 5562 * which is being established and determines if RSS should be reconfigured 5563 * or not for parent VSI. 5564 **/ 5565 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues, 5566 struct i40e_vsi *vsi, bool *reconfig_rss) 5567 { 5568 int max_ch_queues; 5569 5570 if (!reconfig_rss) 5571 return -EINVAL; 5572 5573 *reconfig_rss = false; 5574 if (vsi->current_rss_size) { 5575 if (num_queues > vsi->current_rss_size) { 5576 dev_dbg(&pf->pdev->dev, 5577 "Error: num_queues (%d) > vsi's current_size(%d)\n", 5578 num_queues, vsi->current_rss_size); 5579 return -EINVAL; 5580 } else if ((num_queues < vsi->current_rss_size) && 5581 (!is_power_of_2(num_queues))) { 5582 dev_dbg(&pf->pdev->dev, 5583 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n", 5584 num_queues, vsi->current_rss_size); 5585 return -EINVAL; 5586 } 5587 } 5588 5589 if (!is_power_of_2(num_queues)) { 5590 /* Find the max num_queues configured for channel if channel 5591 * exist. 5592 * if channel exist, then enforce 'num_queues' to be more than 5593 * max ever queues configured for channel. 5594 */ 5595 max_ch_queues = i40e_get_max_queues_for_channel(vsi); 5596 if (num_queues < max_ch_queues) { 5597 dev_dbg(&pf->pdev->dev, 5598 "Error: num_queues (%d) < max queues configured for channel(%d)\n", 5599 num_queues, max_ch_queues); 5600 return -EINVAL; 5601 } 5602 *reconfig_rss = true; 5603 } 5604 5605 return 0; 5606 } 5607 5608 /** 5609 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size 5610 * @vsi: the VSI being setup 5611 * @rss_size: size of RSS, accordingly LUT gets reprogrammed 5612 * 5613 * This function reconfigures RSS by reprogramming LUTs using 'rss_size' 5614 **/ 5615 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size) 5616 { 5617 struct i40e_pf *pf = vsi->back; 5618 u8 seed[I40E_HKEY_ARRAY_SIZE]; 5619 struct i40e_hw *hw = &pf->hw; 5620 int local_rss_size; 5621 u8 *lut; 5622 int ret; 5623 5624 if (!vsi->rss_size) 5625 return -EINVAL; 5626 5627 if (rss_size > vsi->rss_size) 5628 return -EINVAL; 5629 5630 local_rss_size = min_t(int, vsi->rss_size, rss_size); 5631 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 5632 if (!lut) 5633 return -ENOMEM; 5634 5635 /* Ignoring user configured lut if there is one */ 5636 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size); 5637 5638 /* Use user configured hash key if there is one, otherwise 5639 * use default. 5640 */ 5641 if (vsi->rss_hkey_user) 5642 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); 5643 else 5644 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 5645 5646 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); 5647 if (ret) { 5648 dev_info(&pf->pdev->dev, 5649 "Cannot set RSS lut, err %s aq_err %s\n", 5650 i40e_stat_str(hw, ret), 5651 i40e_aq_str(hw, hw->aq.asq_last_status)); 5652 kfree(lut); 5653 return ret; 5654 } 5655 kfree(lut); 5656 5657 /* Do the update w.r.t. storing rss_size */ 5658 if (!vsi->orig_rss_size) 5659 vsi->orig_rss_size = vsi->rss_size; 5660 vsi->current_rss_size = local_rss_size; 5661 5662 return ret; 5663 } 5664 5665 /** 5666 * i40e_channel_setup_queue_map - Setup a channel queue map 5667 * @pf: ptr to PF device 5668 * @vsi: the VSI being setup 5669 * @ctxt: VSI context structure 5670 * @ch: ptr to channel structure 5671 * 5672 * Setup queue map for a specific channel 5673 **/ 5674 static void i40e_channel_setup_queue_map(struct i40e_pf *pf, 5675 struct i40e_vsi_context *ctxt, 5676 struct i40e_channel *ch) 5677 { 5678 u16 qcount, qmap, sections = 0; 5679 u8 offset = 0; 5680 int pow; 5681 5682 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 5683 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 5684 5685 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix); 5686 ch->num_queue_pairs = qcount; 5687 5688 /* find the next higher power-of-2 of num queue pairs */ 5689 pow = ilog2(qcount); 5690 if (!is_power_of_2(qcount)) 5691 pow++; 5692 5693 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 5694 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 5695 5696 /* Setup queue TC[0].qmap for given VSI context */ 5697 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); 5698 5699 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */ 5700 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 5701 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue); 5702 ctxt->info.valid_sections |= cpu_to_le16(sections); 5703 } 5704 5705 /** 5706 * i40e_add_channel - add a channel by adding VSI 5707 * @pf: ptr to PF device 5708 * @uplink_seid: underlying HW switching element (VEB) ID 5709 * @ch: ptr to channel structure 5710 * 5711 * Add a channel (VSI) using add_vsi and queue_map 5712 **/ 5713 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, 5714 struct i40e_channel *ch) 5715 { 5716 struct i40e_hw *hw = &pf->hw; 5717 struct i40e_vsi_context ctxt; 5718 u8 enabled_tc = 0x1; /* TC0 enabled */ 5719 int ret; 5720 5721 if (ch->type != I40E_VSI_VMDQ2) { 5722 dev_info(&pf->pdev->dev, 5723 "add new vsi failed, ch->type %d\n", ch->type); 5724 return -EINVAL; 5725 } 5726 5727 memset(&ctxt, 0, sizeof(ctxt)); 5728 ctxt.pf_num = hw->pf_id; 5729 ctxt.vf_num = 0; 5730 ctxt.uplink_seid = uplink_seid; 5731 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 5732 if (ch->type == I40E_VSI_VMDQ2) 5733 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 5734 5735 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) { 5736 ctxt.info.valid_sections |= 5737 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 5738 ctxt.info.switch_id = 5739 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 5740 } 5741 5742 /* Set queue map for a given VSI context */ 5743 i40e_channel_setup_queue_map(pf, &ctxt, ch); 5744 5745 /* Now time to create VSI */ 5746 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 5747 if (ret) { 5748 dev_info(&pf->pdev->dev, 5749 "add new vsi failed, err %s aq_err %s\n", 5750 i40e_stat_str(&pf->hw, ret), 5751 i40e_aq_str(&pf->hw, 5752 pf->hw.aq.asq_last_status)); 5753 return -ENOENT; 5754 } 5755 5756 /* Success, update channel */ 5757 ch->enabled_tc = enabled_tc; 5758 ch->seid = ctxt.seid; 5759 ch->vsi_number = ctxt.vsi_number; 5760 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx); 5761 5762 /* copy just the sections touched not the entire info 5763 * since not all sections are valid as returned by 5764 * update vsi params 5765 */ 5766 ch->info.mapping_flags = ctxt.info.mapping_flags; 5767 memcpy(&ch->info.queue_mapping, 5768 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping)); 5769 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping, 5770 sizeof(ctxt.info.tc_mapping)); 5771 5772 return 0; 5773 } 5774 5775 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch, 5776 u8 *bw_share) 5777 { 5778 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 5779 i40e_status ret; 5780 int i; 5781 5782 bw_data.tc_valid_bits = ch->enabled_tc; 5783 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 5784 bw_data.tc_bw_credits[i] = bw_share[i]; 5785 5786 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid, 5787 &bw_data, NULL); 5788 if (ret) { 5789 dev_info(&vsi->back->pdev->dev, 5790 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n", 5791 vsi->back->hw.aq.asq_last_status, ch->seid); 5792 return -EINVAL; 5793 } 5794 5795 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 5796 ch->info.qs_handle[i] = bw_data.qs_handles[i]; 5797 5798 return 0; 5799 } 5800 5801 /** 5802 * i40e_channel_config_tx_ring - config TX ring associated with new channel 5803 * @pf: ptr to PF device 5804 * @vsi: the VSI being setup 5805 * @ch: ptr to channel structure 5806 * 5807 * Configure TX rings associated with channel (VSI) since queues are being 5808 * from parent VSI. 5809 **/ 5810 static int i40e_channel_config_tx_ring(struct i40e_pf *pf, 5811 struct i40e_vsi *vsi, 5812 struct i40e_channel *ch) 5813 { 5814 i40e_status ret; 5815 int i; 5816 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 5817 5818 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 5819 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 5820 if (ch->enabled_tc & BIT(i)) 5821 bw_share[i] = 1; 5822 } 5823 5824 /* configure BW for new VSI */ 5825 ret = i40e_channel_config_bw(vsi, ch, bw_share); 5826 if (ret) { 5827 dev_info(&vsi->back->pdev->dev, 5828 "Failed configuring TC map %d for channel (seid %u)\n", 5829 ch->enabled_tc, ch->seid); 5830 return ret; 5831 } 5832 5833 for (i = 0; i < ch->num_queue_pairs; i++) { 5834 struct i40e_ring *tx_ring, *rx_ring; 5835 u16 pf_q; 5836 5837 pf_q = ch->base_queue + i; 5838 5839 /* Get to TX ring ptr of main VSI, for re-setup TX queue 5840 * context 5841 */ 5842 tx_ring = vsi->tx_rings[pf_q]; 5843 tx_ring->ch = ch; 5844 5845 /* Get the RX ring ptr */ 5846 rx_ring = vsi->rx_rings[pf_q]; 5847 rx_ring->ch = ch; 5848 } 5849 5850 return 0; 5851 } 5852 5853 /** 5854 * i40e_setup_hw_channel - setup new channel 5855 * @pf: ptr to PF device 5856 * @vsi: the VSI being setup 5857 * @ch: ptr to channel structure 5858 * @uplink_seid: underlying HW switching element (VEB) ID 5859 * @type: type of channel to be created (VMDq2/VF) 5860 * 5861 * Setup new channel (VSI) based on specified type (VMDq2/VF) 5862 * and configures TX rings accordingly 5863 **/ 5864 static inline int i40e_setup_hw_channel(struct i40e_pf *pf, 5865 struct i40e_vsi *vsi, 5866 struct i40e_channel *ch, 5867 u16 uplink_seid, u8 type) 5868 { 5869 int ret; 5870 5871 ch->initialized = false; 5872 ch->base_queue = vsi->next_base_queue; 5873 ch->type = type; 5874 5875 /* Proceed with creation of channel (VMDq2) VSI */ 5876 ret = i40e_add_channel(pf, uplink_seid, ch); 5877 if (ret) { 5878 dev_info(&pf->pdev->dev, 5879 "failed to add_channel using uplink_seid %u\n", 5880 uplink_seid); 5881 return ret; 5882 } 5883 5884 /* Mark the successful creation of channel */ 5885 ch->initialized = true; 5886 5887 /* Reconfigure TX queues using QTX_CTL register */ 5888 ret = i40e_channel_config_tx_ring(pf, vsi, ch); 5889 if (ret) { 5890 dev_info(&pf->pdev->dev, 5891 "failed to configure TX rings for channel %u\n", 5892 ch->seid); 5893 return ret; 5894 } 5895 5896 /* update 'next_base_queue' */ 5897 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs; 5898 dev_dbg(&pf->pdev->dev, 5899 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n", 5900 ch->seid, ch->vsi_number, ch->stat_counter_idx, 5901 ch->num_queue_pairs, 5902 vsi->next_base_queue); 5903 return ret; 5904 } 5905 5906 /** 5907 * i40e_setup_channel - setup new channel using uplink element 5908 * @pf: ptr to PF device 5909 * @type: type of channel to be created (VMDq2/VF) 5910 * @uplink_seid: underlying HW switching element (VEB) ID 5911 * @ch: ptr to channel structure 5912 * 5913 * Setup new channel (VSI) based on specified type (VMDq2/VF) 5914 * and uplink switching element (uplink_seid) 5915 **/ 5916 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, 5917 struct i40e_channel *ch) 5918 { 5919 u8 vsi_type; 5920 u16 seid; 5921 int ret; 5922 5923 if (vsi->type == I40E_VSI_MAIN) { 5924 vsi_type = I40E_VSI_VMDQ2; 5925 } else { 5926 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n", 5927 vsi->type); 5928 return false; 5929 } 5930 5931 /* underlying switching element */ 5932 seid = pf->vsi[pf->lan_vsi]->uplink_seid; 5933 5934 /* create channel (VSI), configure TX rings */ 5935 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type); 5936 if (ret) { 5937 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n"); 5938 return false; 5939 } 5940 5941 return ch->initialized ? true : false; 5942 } 5943 5944 /** 5945 * i40e_validate_and_set_switch_mode - sets up switch mode correctly 5946 * @vsi: ptr to VSI which has PF backing 5947 * 5948 * Sets up switch mode correctly if it needs to be changed and perform 5949 * what are allowed modes. 5950 **/ 5951 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi) 5952 { 5953 u8 mode; 5954 struct i40e_pf *pf = vsi->back; 5955 struct i40e_hw *hw = &pf->hw; 5956 int ret; 5957 5958 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities); 5959 if (ret) 5960 return -EINVAL; 5961 5962 if (hw->dev_caps.switch_mode) { 5963 /* if switch mode is set, support mode2 (non-tunneled for 5964 * cloud filter) for now 5965 */ 5966 u32 switch_mode = hw->dev_caps.switch_mode & 5967 I40E_SWITCH_MODE_MASK; 5968 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) { 5969 if (switch_mode == I40E_CLOUD_FILTER_MODE2) 5970 return 0; 5971 dev_err(&pf->pdev->dev, 5972 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n", 5973 hw->dev_caps.switch_mode); 5974 return -EINVAL; 5975 } 5976 } 5977 5978 /* Set Bit 7 to be valid */ 5979 mode = I40E_AQ_SET_SWITCH_BIT7_VALID; 5980 5981 /* Set L4type for TCP support */ 5982 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP; 5983 5984 /* Set cloud filter mode */ 5985 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL; 5986 5987 /* Prep mode field for set_switch_config */ 5988 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, 5989 pf->last_sw_conf_valid_flags, 5990 mode, NULL); 5991 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH) 5992 dev_err(&pf->pdev->dev, 5993 "couldn't set switch config bits, err %s aq_err %s\n", 5994 i40e_stat_str(hw, ret), 5995 i40e_aq_str(hw, 5996 hw->aq.asq_last_status)); 5997 5998 return ret; 5999 } 6000 6001 /** 6002 * i40e_create_queue_channel - function to create channel 6003 * @vsi: VSI to be configured 6004 * @ch: ptr to channel (it contains channel specific params) 6005 * 6006 * This function creates channel (VSI) using num_queues specified by user, 6007 * reconfigs RSS if needed. 6008 **/ 6009 int i40e_create_queue_channel(struct i40e_vsi *vsi, 6010 struct i40e_channel *ch) 6011 { 6012 struct i40e_pf *pf = vsi->back; 6013 bool reconfig_rss; 6014 int err; 6015 6016 if (!ch) 6017 return -EINVAL; 6018 6019 if (!ch->num_queue_pairs) { 6020 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n", 6021 ch->num_queue_pairs); 6022 return -EINVAL; 6023 } 6024 6025 /* validate user requested num_queues for channel */ 6026 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi, 6027 &reconfig_rss); 6028 if (err) { 6029 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n", 6030 ch->num_queue_pairs); 6031 return -EINVAL; 6032 } 6033 6034 /* By default we are in VEPA mode, if this is the first VF/VMDq 6035 * VSI to be added switch to VEB mode. 6036 */ 6037 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) || 6038 (!i40e_is_any_channel(vsi))) { 6039 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) { 6040 dev_dbg(&pf->pdev->dev, 6041 "Failed to create channel. Override queues (%u) not power of 2\n", 6042 vsi->tc_config.tc_info[0].qcount); 6043 return -EINVAL; 6044 } 6045 6046 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 6047 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 6048 6049 if (vsi->type == I40E_VSI_MAIN) { 6050 if (pf->flags & I40E_FLAG_TC_MQPRIO) 6051 i40e_do_reset(pf, I40E_PF_RESET_FLAG, 6052 true); 6053 else 6054 i40e_do_reset_safe(pf, 6055 I40E_PF_RESET_FLAG); 6056 } 6057 } 6058 /* now onwards for main VSI, number of queues will be value 6059 * of TC0's queue count 6060 */ 6061 } 6062 6063 /* By this time, vsi->cnt_q_avail shall be set to non-zero and 6064 * it should be more than num_queues 6065 */ 6066 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) { 6067 dev_dbg(&pf->pdev->dev, 6068 "Error: cnt_q_avail (%u) less than num_queues %d\n", 6069 vsi->cnt_q_avail, ch->num_queue_pairs); 6070 return -EINVAL; 6071 } 6072 6073 /* reconfig_rss only if vsi type is MAIN_VSI */ 6074 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) { 6075 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs); 6076 if (err) { 6077 dev_info(&pf->pdev->dev, 6078 "Error: unable to reconfig rss for num_queues (%u)\n", 6079 ch->num_queue_pairs); 6080 return -EINVAL; 6081 } 6082 } 6083 6084 if (!i40e_setup_channel(pf, vsi, ch)) { 6085 dev_info(&pf->pdev->dev, "Failed to setup channel\n"); 6086 return -EINVAL; 6087 } 6088 6089 dev_info(&pf->pdev->dev, 6090 "Setup channel (id:%u) utilizing num_queues %d\n", 6091 ch->seid, ch->num_queue_pairs); 6092 6093 /* configure VSI for BW limit */ 6094 if (ch->max_tx_rate) { 6095 u64 credits = ch->max_tx_rate; 6096 6097 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) 6098 return -EINVAL; 6099 6100 do_div(credits, I40E_BW_CREDIT_DIVISOR); 6101 dev_dbg(&pf->pdev->dev, 6102 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", 6103 ch->max_tx_rate, 6104 credits, 6105 ch->seid); 6106 } 6107 6108 /* in case of VF, this will be main SRIOV VSI */ 6109 ch->parent_vsi = vsi; 6110 6111 /* and update main_vsi's count for queue_available to use */ 6112 vsi->cnt_q_avail -= ch->num_queue_pairs; 6113 6114 return 0; 6115 } 6116 6117 /** 6118 * i40e_configure_queue_channels - Add queue channel for the given TCs 6119 * @vsi: VSI to be configured 6120 * 6121 * Configures queue channel mapping to the given TCs 6122 **/ 6123 static int i40e_configure_queue_channels(struct i40e_vsi *vsi) 6124 { 6125 struct i40e_channel *ch; 6126 u64 max_rate = 0; 6127 int ret = 0, i; 6128 6129 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */ 6130 vsi->tc_seid_map[0] = vsi->seid; 6131 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) { 6132 if (vsi->tc_config.enabled_tc & BIT(i)) { 6133 ch = kzalloc(sizeof(*ch), GFP_KERNEL); 6134 if (!ch) { 6135 ret = -ENOMEM; 6136 goto err_free; 6137 } 6138 6139 INIT_LIST_HEAD(&ch->list); 6140 ch->num_queue_pairs = 6141 vsi->tc_config.tc_info[i].qcount; 6142 ch->base_queue = 6143 vsi->tc_config.tc_info[i].qoffset; 6144 6145 /* Bandwidth limit through tc interface is in bytes/s, 6146 * change to Mbit/s 6147 */ 6148 max_rate = vsi->mqprio_qopt.max_rate[i]; 6149 do_div(max_rate, I40E_BW_MBPS_DIVISOR); 6150 ch->max_tx_rate = max_rate; 6151 6152 list_add_tail(&ch->list, &vsi->ch_list); 6153 6154 ret = i40e_create_queue_channel(vsi, ch); 6155 if (ret) { 6156 dev_err(&vsi->back->pdev->dev, 6157 "Failed creating queue channel with TC%d: queues %d\n", 6158 i, ch->num_queue_pairs); 6159 goto err_free; 6160 } 6161 vsi->tc_seid_map[i] = ch->seid; 6162 } 6163 } 6164 return ret; 6165 6166 err_free: 6167 i40e_remove_queue_channels(vsi); 6168 return ret; 6169 } 6170 6171 /** 6172 * i40e_veb_config_tc - Configure TCs for given VEB 6173 * @veb: given VEB 6174 * @enabled_tc: TC bitmap 6175 * 6176 * Configures given TC bitmap for VEB (switching) element 6177 **/ 6178 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) 6179 { 6180 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; 6181 struct i40e_pf *pf = veb->pf; 6182 int ret = 0; 6183 int i; 6184 6185 /* No TCs or already enabled TCs just return */ 6186 if (!enabled_tc || veb->enabled_tc == enabled_tc) 6187 return ret; 6188 6189 bw_data.tc_valid_bits = enabled_tc; 6190 /* bw_data.absolute_credits is not set (relative) */ 6191 6192 /* Enable ETS TCs with equal BW Share for now */ 6193 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 6194 if (enabled_tc & BIT(i)) 6195 bw_data.tc_bw_share_credits[i] = 1; 6196 } 6197 6198 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, 6199 &bw_data, NULL); 6200 if (ret) { 6201 dev_info(&pf->pdev->dev, 6202 "VEB bw config failed, err %s aq_err %s\n", 6203 i40e_stat_str(&pf->hw, ret), 6204 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6205 goto out; 6206 } 6207 6208 /* Update the BW information */ 6209 ret = i40e_veb_get_bw_info(veb); 6210 if (ret) { 6211 dev_info(&pf->pdev->dev, 6212 "Failed getting veb bw config, err %s aq_err %s\n", 6213 i40e_stat_str(&pf->hw, ret), 6214 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6215 } 6216 6217 out: 6218 return ret; 6219 } 6220 6221 #ifdef CONFIG_I40E_DCB 6222 /** 6223 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs 6224 * @pf: PF struct 6225 * 6226 * Reconfigure VEB/VSIs on a given PF; it is assumed that 6227 * the caller would've quiesce all the VSIs before calling 6228 * this function 6229 **/ 6230 static void i40e_dcb_reconfigure(struct i40e_pf *pf) 6231 { 6232 u8 tc_map = 0; 6233 int ret; 6234 u8 v; 6235 6236 /* Enable the TCs available on PF to all VEBs */ 6237 tc_map = i40e_pf_get_tc_map(pf); 6238 for (v = 0; v < I40E_MAX_VEB; v++) { 6239 if (!pf->veb[v]) 6240 continue; 6241 ret = i40e_veb_config_tc(pf->veb[v], tc_map); 6242 if (ret) { 6243 dev_info(&pf->pdev->dev, 6244 "Failed configuring TC for VEB seid=%d\n", 6245 pf->veb[v]->seid); 6246 /* Will try to configure as many components */ 6247 } 6248 } 6249 6250 /* Update each VSI */ 6251 for (v = 0; v < pf->num_alloc_vsi; v++) { 6252 if (!pf->vsi[v]) 6253 continue; 6254 6255 /* - Enable all TCs for the LAN VSI 6256 * - For all others keep them at TC0 for now 6257 */ 6258 if (v == pf->lan_vsi) 6259 tc_map = i40e_pf_get_tc_map(pf); 6260 else 6261 tc_map = I40E_DEFAULT_TRAFFIC_CLASS; 6262 6263 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 6264 if (ret) { 6265 dev_info(&pf->pdev->dev, 6266 "Failed configuring TC for VSI seid=%d\n", 6267 pf->vsi[v]->seid); 6268 /* Will try to configure as many components */ 6269 } else { 6270 /* Re-configure VSI vectors based on updated TC map */ 6271 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); 6272 if (pf->vsi[v]->netdev) 6273 i40e_dcbnl_set_all(pf->vsi[v]); 6274 } 6275 } 6276 } 6277 6278 /** 6279 * i40e_resume_port_tx - Resume port Tx 6280 * @pf: PF struct 6281 * 6282 * Resume a port's Tx and issue a PF reset in case of failure to 6283 * resume. 6284 **/ 6285 static int i40e_resume_port_tx(struct i40e_pf *pf) 6286 { 6287 struct i40e_hw *hw = &pf->hw; 6288 int ret; 6289 6290 ret = i40e_aq_resume_port_tx(hw, NULL); 6291 if (ret) { 6292 dev_info(&pf->pdev->dev, 6293 "Resume Port Tx failed, err %s aq_err %s\n", 6294 i40e_stat_str(&pf->hw, ret), 6295 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6296 /* Schedule PF reset to recover */ 6297 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); 6298 i40e_service_event_schedule(pf); 6299 } 6300 6301 return ret; 6302 } 6303 6304 /** 6305 * i40e_init_pf_dcb - Initialize DCB configuration 6306 * @pf: PF being configured 6307 * 6308 * Query the current DCB configuration and cache it 6309 * in the hardware structure 6310 **/ 6311 static int i40e_init_pf_dcb(struct i40e_pf *pf) 6312 { 6313 struct i40e_hw *hw = &pf->hw; 6314 int err = 0; 6315 6316 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable 6317 * Also do not enable DCBx if FW LLDP agent is disabled 6318 */ 6319 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) || 6320 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) 6321 goto out; 6322 6323 /* Get the initial DCB configuration */ 6324 err = i40e_init_dcb(hw); 6325 if (!err) { 6326 /* Device/Function is not DCBX capable */ 6327 if ((!hw->func_caps.dcb) || 6328 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { 6329 dev_info(&pf->pdev->dev, 6330 "DCBX offload is not supported or is disabled for this PF.\n"); 6331 } else { 6332 /* When status is not DISABLED then DCBX in FW */ 6333 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 6334 DCB_CAP_DCBX_VER_IEEE; 6335 6336 pf->flags |= I40E_FLAG_DCB_CAPABLE; 6337 /* Enable DCB tagging only when more than one TC 6338 * or explicitly disable if only one TC 6339 */ 6340 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 6341 pf->flags |= I40E_FLAG_DCB_ENABLED; 6342 else 6343 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 6344 dev_dbg(&pf->pdev->dev, 6345 "DCBX offload is supported for this PF.\n"); 6346 } 6347 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { 6348 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); 6349 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; 6350 } else { 6351 dev_info(&pf->pdev->dev, 6352 "Query for DCB configuration failed, err %s aq_err %s\n", 6353 i40e_stat_str(&pf->hw, err), 6354 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6355 } 6356 6357 out: 6358 return err; 6359 } 6360 #endif /* CONFIG_I40E_DCB */ 6361 #define SPEED_SIZE 14 6362 #define FC_SIZE 8 6363 /** 6364 * i40e_print_link_message - print link up or down 6365 * @vsi: the VSI for which link needs a message 6366 */ 6367 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) 6368 { 6369 enum i40e_aq_link_speed new_speed; 6370 struct i40e_pf *pf = vsi->back; 6371 char *speed = "Unknown"; 6372 char *fc = "Unknown"; 6373 char *fec = ""; 6374 char *req_fec = ""; 6375 char *an = ""; 6376 6377 new_speed = pf->hw.phy.link_info.link_speed; 6378 6379 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) 6380 return; 6381 vsi->current_isup = isup; 6382 vsi->current_speed = new_speed; 6383 if (!isup) { 6384 netdev_info(vsi->netdev, "NIC Link is Down\n"); 6385 return; 6386 } 6387 6388 /* Warn user if link speed on NPAR enabled partition is not at 6389 * least 10GB 6390 */ 6391 if (pf->hw.func_caps.npar_enable && 6392 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || 6393 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) 6394 netdev_warn(vsi->netdev, 6395 "The partition detected link speed that is less than 10Gbps\n"); 6396 6397 switch (pf->hw.phy.link_info.link_speed) { 6398 case I40E_LINK_SPEED_40GB: 6399 speed = "40 G"; 6400 break; 6401 case I40E_LINK_SPEED_20GB: 6402 speed = "20 G"; 6403 break; 6404 case I40E_LINK_SPEED_25GB: 6405 speed = "25 G"; 6406 break; 6407 case I40E_LINK_SPEED_10GB: 6408 speed = "10 G"; 6409 break; 6410 case I40E_LINK_SPEED_1GB: 6411 speed = "1000 M"; 6412 break; 6413 case I40E_LINK_SPEED_100MB: 6414 speed = "100 M"; 6415 break; 6416 default: 6417 break; 6418 } 6419 6420 switch (pf->hw.fc.current_mode) { 6421 case I40E_FC_FULL: 6422 fc = "RX/TX"; 6423 break; 6424 case I40E_FC_TX_PAUSE: 6425 fc = "TX"; 6426 break; 6427 case I40E_FC_RX_PAUSE: 6428 fc = "RX"; 6429 break; 6430 default: 6431 fc = "None"; 6432 break; 6433 } 6434 6435 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { 6436 req_fec = ", Requested FEC: None"; 6437 fec = ", FEC: None"; 6438 an = ", Autoneg: False"; 6439 6440 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) 6441 an = ", Autoneg: True"; 6442 6443 if (pf->hw.phy.link_info.fec_info & 6444 I40E_AQ_CONFIG_FEC_KR_ENA) 6445 fec = ", FEC: CL74 FC-FEC/BASE-R"; 6446 else if (pf->hw.phy.link_info.fec_info & 6447 I40E_AQ_CONFIG_FEC_RS_ENA) 6448 fec = ", FEC: CL108 RS-FEC"; 6449 6450 /* 'CL108 RS-FEC' should be displayed when RS is requested, or 6451 * both RS and FC are requested 6452 */ 6453 if (vsi->back->hw.phy.link_info.req_fec_info & 6454 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) { 6455 if (vsi->back->hw.phy.link_info.req_fec_info & 6456 I40E_AQ_REQUEST_FEC_RS) 6457 req_fec = ", Requested FEC: CL108 RS-FEC"; 6458 else 6459 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R"; 6460 } 6461 } 6462 6463 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n", 6464 speed, req_fec, fec, an, fc); 6465 } 6466 6467 /** 6468 * i40e_up_complete - Finish the last steps of bringing up a connection 6469 * @vsi: the VSI being configured 6470 **/ 6471 static int i40e_up_complete(struct i40e_vsi *vsi) 6472 { 6473 struct i40e_pf *pf = vsi->back; 6474 int err; 6475 6476 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6477 i40e_vsi_configure_msix(vsi); 6478 else 6479 i40e_configure_msi_and_legacy(vsi); 6480 6481 /* start rings */ 6482 err = i40e_vsi_start_rings(vsi); 6483 if (err) 6484 return err; 6485 6486 clear_bit(__I40E_VSI_DOWN, vsi->state); 6487 i40e_napi_enable_all(vsi); 6488 i40e_vsi_enable_irq(vsi); 6489 6490 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 6491 (vsi->netdev)) { 6492 i40e_print_link_message(vsi, true); 6493 netif_tx_start_all_queues(vsi->netdev); 6494 netif_carrier_on(vsi->netdev); 6495 } 6496 6497 /* replay FDIR SB filters */ 6498 if (vsi->type == I40E_VSI_FDIR) { 6499 /* reset fd counters */ 6500 pf->fd_add_err = 0; 6501 pf->fd_atr_cnt = 0; 6502 i40e_fdir_filter_restore(vsi); 6503 } 6504 6505 /* On the next run of the service_task, notify any clients of the new 6506 * opened netdev 6507 */ 6508 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; 6509 i40e_service_event_schedule(pf); 6510 6511 return 0; 6512 } 6513 6514 /** 6515 * i40e_vsi_reinit_locked - Reset the VSI 6516 * @vsi: the VSI being configured 6517 * 6518 * Rebuild the ring structs after some configuration 6519 * has changed, e.g. MTU size. 6520 **/ 6521 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 6522 { 6523 struct i40e_pf *pf = vsi->back; 6524 6525 WARN_ON(in_interrupt()); 6526 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) 6527 usleep_range(1000, 2000); 6528 i40e_down(vsi); 6529 6530 i40e_up(vsi); 6531 clear_bit(__I40E_CONFIG_BUSY, pf->state); 6532 } 6533 6534 /** 6535 * i40e_up - Bring the connection back up after being down 6536 * @vsi: the VSI being configured 6537 **/ 6538 int i40e_up(struct i40e_vsi *vsi) 6539 { 6540 int err; 6541 6542 err = i40e_vsi_configure(vsi); 6543 if (!err) 6544 err = i40e_up_complete(vsi); 6545 6546 return err; 6547 } 6548 6549 /** 6550 * i40e_down - Shutdown the connection processing 6551 * @vsi: the VSI being stopped 6552 **/ 6553 void i40e_down(struct i40e_vsi *vsi) 6554 { 6555 int i; 6556 6557 /* It is assumed that the caller of this function 6558 * sets the vsi->state __I40E_VSI_DOWN bit. 6559 */ 6560 if (vsi->netdev) { 6561 netif_carrier_off(vsi->netdev); 6562 netif_tx_disable(vsi->netdev); 6563 } 6564 i40e_vsi_disable_irq(vsi); 6565 i40e_vsi_stop_rings(vsi); 6566 i40e_napi_disable_all(vsi); 6567 6568 for (i = 0; i < vsi->num_queue_pairs; i++) { 6569 i40e_clean_tx_ring(vsi->tx_rings[i]); 6570 if (i40e_enabled_xdp_vsi(vsi)) 6571 i40e_clean_tx_ring(vsi->xdp_rings[i]); 6572 i40e_clean_rx_ring(vsi->rx_rings[i]); 6573 } 6574 6575 } 6576 6577 /** 6578 * i40e_validate_mqprio_qopt- validate queue mapping info 6579 * @vsi: the VSI being configured 6580 * @mqprio_qopt: queue parametrs 6581 **/ 6582 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, 6583 struct tc_mqprio_qopt_offload *mqprio_qopt) 6584 { 6585 u64 sum_max_rate = 0; 6586 u64 max_rate = 0; 6587 int i; 6588 6589 if (mqprio_qopt->qopt.offset[0] != 0 || 6590 mqprio_qopt->qopt.num_tc < 1 || 6591 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS) 6592 return -EINVAL; 6593 for (i = 0; ; i++) { 6594 if (!mqprio_qopt->qopt.count[i]) 6595 return -EINVAL; 6596 if (mqprio_qopt->min_rate[i]) { 6597 dev_err(&vsi->back->pdev->dev, 6598 "Invalid min tx rate (greater than 0) specified\n"); 6599 return -EINVAL; 6600 } 6601 max_rate = mqprio_qopt->max_rate[i]; 6602 do_div(max_rate, I40E_BW_MBPS_DIVISOR); 6603 sum_max_rate += max_rate; 6604 6605 if (i >= mqprio_qopt->qopt.num_tc - 1) 6606 break; 6607 if (mqprio_qopt->qopt.offset[i + 1] != 6608 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 6609 return -EINVAL; 6610 } 6611 if (vsi->num_queue_pairs < 6612 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) { 6613 return -EINVAL; 6614 } 6615 if (sum_max_rate > i40e_get_link_speed(vsi)) { 6616 dev_err(&vsi->back->pdev->dev, 6617 "Invalid max tx rate specified\n"); 6618 return -EINVAL; 6619 } 6620 return 0; 6621 } 6622 6623 /** 6624 * i40e_vsi_set_default_tc_config - set default values for tc configuration 6625 * @vsi: the VSI being configured 6626 **/ 6627 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi) 6628 { 6629 u16 qcount; 6630 int i; 6631 6632 /* Only TC0 is enabled */ 6633 vsi->tc_config.numtc = 1; 6634 vsi->tc_config.enabled_tc = 1; 6635 qcount = min_t(int, vsi->alloc_queue_pairs, 6636 i40e_pf_get_max_q_per_tc(vsi->back)); 6637 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 6638 /* For the TC that is not enabled set the offset to to default 6639 * queue and allocate one queue for the given TC. 6640 */ 6641 vsi->tc_config.tc_info[i].qoffset = 0; 6642 if (i == 0) 6643 vsi->tc_config.tc_info[i].qcount = qcount; 6644 else 6645 vsi->tc_config.tc_info[i].qcount = 1; 6646 vsi->tc_config.tc_info[i].netdev_tc = 0; 6647 } 6648 } 6649 6650 /** 6651 * i40e_setup_tc - configure multiple traffic classes 6652 * @netdev: net device to configure 6653 * @type_data: tc offload data 6654 **/ 6655 static int i40e_setup_tc(struct net_device *netdev, void *type_data) 6656 { 6657 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 6658 struct i40e_netdev_priv *np = netdev_priv(netdev); 6659 struct i40e_vsi *vsi = np->vsi; 6660 struct i40e_pf *pf = vsi->back; 6661 u8 enabled_tc = 0, num_tc, hw; 6662 bool need_reset = false; 6663 int ret = -EINVAL; 6664 u16 mode; 6665 int i; 6666 6667 num_tc = mqprio_qopt->qopt.num_tc; 6668 hw = mqprio_qopt->qopt.hw; 6669 mode = mqprio_qopt->mode; 6670 if (!hw) { 6671 pf->flags &= ~I40E_FLAG_TC_MQPRIO; 6672 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 6673 goto config_tc; 6674 } 6675 6676 /* Check if MFP enabled */ 6677 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 6678 netdev_info(netdev, 6679 "Configuring TC not supported in MFP mode\n"); 6680 return ret; 6681 } 6682 switch (mode) { 6683 case TC_MQPRIO_MODE_DCB: 6684 pf->flags &= ~I40E_FLAG_TC_MQPRIO; 6685 6686 /* Check if DCB enabled to continue */ 6687 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 6688 netdev_info(netdev, 6689 "DCB is not enabled for adapter\n"); 6690 return ret; 6691 } 6692 6693 /* Check whether tc count is within enabled limit */ 6694 if (num_tc > i40e_pf_get_num_tc(pf)) { 6695 netdev_info(netdev, 6696 "TC count greater than enabled on link for adapter\n"); 6697 return ret; 6698 } 6699 break; 6700 case TC_MQPRIO_MODE_CHANNEL: 6701 if (pf->flags & I40E_FLAG_DCB_ENABLED) { 6702 netdev_info(netdev, 6703 "Full offload of TC Mqprio options is not supported when DCB is enabled\n"); 6704 return ret; 6705 } 6706 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 6707 return ret; 6708 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt); 6709 if (ret) 6710 return ret; 6711 memcpy(&vsi->mqprio_qopt, mqprio_qopt, 6712 sizeof(*mqprio_qopt)); 6713 pf->flags |= I40E_FLAG_TC_MQPRIO; 6714 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 6715 break; 6716 default: 6717 return -EINVAL; 6718 } 6719 6720 config_tc: 6721 /* Generate TC map for number of tc requested */ 6722 for (i = 0; i < num_tc; i++) 6723 enabled_tc |= BIT(i); 6724 6725 /* Requesting same TC configuration as already enabled */ 6726 if (enabled_tc == vsi->tc_config.enabled_tc && 6727 mode != TC_MQPRIO_MODE_CHANNEL) 6728 return 0; 6729 6730 /* Quiesce VSI queues */ 6731 i40e_quiesce_vsi(vsi); 6732 6733 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO)) 6734 i40e_remove_queue_channels(vsi); 6735 6736 /* Configure VSI for enabled TCs */ 6737 ret = i40e_vsi_config_tc(vsi, enabled_tc); 6738 if (ret) { 6739 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 6740 vsi->seid); 6741 need_reset = true; 6742 goto exit; 6743 } 6744 6745 if (pf->flags & I40E_FLAG_TC_MQPRIO) { 6746 if (vsi->mqprio_qopt.max_rate[0]) { 6747 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; 6748 6749 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); 6750 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 6751 if (!ret) { 6752 u64 credits = max_tx_rate; 6753 6754 do_div(credits, I40E_BW_CREDIT_DIVISOR); 6755 dev_dbg(&vsi->back->pdev->dev, 6756 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", 6757 max_tx_rate, 6758 credits, 6759 vsi->seid); 6760 } else { 6761 need_reset = true; 6762 goto exit; 6763 } 6764 } 6765 ret = i40e_configure_queue_channels(vsi); 6766 if (ret) { 6767 netdev_info(netdev, 6768 "Failed configuring queue channels\n"); 6769 need_reset = true; 6770 goto exit; 6771 } 6772 } 6773 6774 exit: 6775 /* Reset the configuration data to defaults, only TC0 is enabled */ 6776 if (need_reset) { 6777 i40e_vsi_set_default_tc_config(vsi); 6778 need_reset = false; 6779 } 6780 6781 /* Unquiesce VSI */ 6782 i40e_unquiesce_vsi(vsi); 6783 return ret; 6784 } 6785 6786 /** 6787 * i40e_set_cld_element - sets cloud filter element data 6788 * @filter: cloud filter rule 6789 * @cld: ptr to cloud filter element data 6790 * 6791 * This is helper function to copy data into cloud filter element 6792 **/ 6793 static inline void 6794 i40e_set_cld_element(struct i40e_cloud_filter *filter, 6795 struct i40e_aqc_cloud_filters_element_data *cld) 6796 { 6797 int i, j; 6798 u32 ipa; 6799 6800 memset(cld, 0, sizeof(*cld)); 6801 ether_addr_copy(cld->outer_mac, filter->dst_mac); 6802 ether_addr_copy(cld->inner_mac, filter->src_mac); 6803 6804 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6) 6805 return; 6806 6807 if (filter->n_proto == ETH_P_IPV6) { 6808 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1) 6809 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6); 6810 i++, j += 2) { 6811 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]); 6812 ipa = cpu_to_le32(ipa); 6813 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa)); 6814 } 6815 } else { 6816 ipa = be32_to_cpu(filter->dst_ipv4); 6817 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa)); 6818 } 6819 6820 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id)); 6821 6822 /* tenant_id is not supported by FW now, once the support is enabled 6823 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id) 6824 */ 6825 if (filter->tenant_id) 6826 return; 6827 } 6828 6829 /** 6830 * i40e_add_del_cloud_filter - Add/del cloud filter 6831 * @vsi: pointer to VSI 6832 * @filter: cloud filter rule 6833 * @add: if true, add, if false, delete 6834 * 6835 * Add or delete a cloud filter for a specific flow spec. 6836 * Returns 0 if the filter were successfully added. 6837 **/ 6838 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, 6839 struct i40e_cloud_filter *filter, bool add) 6840 { 6841 struct i40e_aqc_cloud_filters_element_data cld_filter; 6842 struct i40e_pf *pf = vsi->back; 6843 int ret; 6844 static const u16 flag_table[128] = { 6845 [I40E_CLOUD_FILTER_FLAGS_OMAC] = 6846 I40E_AQC_ADD_CLOUD_FILTER_OMAC, 6847 [I40E_CLOUD_FILTER_FLAGS_IMAC] = 6848 I40E_AQC_ADD_CLOUD_FILTER_IMAC, 6849 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] = 6850 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN, 6851 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] = 6852 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID, 6853 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] = 6854 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC, 6855 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] = 6856 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID, 6857 [I40E_CLOUD_FILTER_FLAGS_IIP] = 6858 I40E_AQC_ADD_CLOUD_FILTER_IIP, 6859 }; 6860 6861 if (filter->flags >= ARRAY_SIZE(flag_table)) 6862 return I40E_ERR_CONFIG; 6863 6864 /* copy element needed to add cloud filter from filter */ 6865 i40e_set_cld_element(filter, &cld_filter); 6866 6867 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE) 6868 cld_filter.flags = cpu_to_le16(filter->tunnel_type << 6869 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT); 6870 6871 if (filter->n_proto == ETH_P_IPV6) 6872 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | 6873 I40E_AQC_ADD_CLOUD_FLAGS_IPV6); 6874 else 6875 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | 6876 I40E_AQC_ADD_CLOUD_FLAGS_IPV4); 6877 6878 if (add) 6879 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid, 6880 &cld_filter, 1); 6881 else 6882 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid, 6883 &cld_filter, 1); 6884 if (ret) 6885 dev_dbg(&pf->pdev->dev, 6886 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n", 6887 add ? "add" : "delete", filter->dst_port, ret, 6888 pf->hw.aq.asq_last_status); 6889 else 6890 dev_info(&pf->pdev->dev, 6891 "%s cloud filter for VSI: %d\n", 6892 add ? "Added" : "Deleted", filter->seid); 6893 return ret; 6894 } 6895 6896 /** 6897 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf 6898 * @vsi: pointer to VSI 6899 * @filter: cloud filter rule 6900 * @add: if true, add, if false, delete 6901 * 6902 * Add or delete a cloud filter for a specific flow spec using big buffer. 6903 * Returns 0 if the filter were successfully added. 6904 **/ 6905 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, 6906 struct i40e_cloud_filter *filter, 6907 bool add) 6908 { 6909 struct i40e_aqc_cloud_filters_element_bb cld_filter; 6910 struct i40e_pf *pf = vsi->back; 6911 int ret; 6912 6913 /* Both (src/dst) valid mac_addr are not supported */ 6914 if ((is_valid_ether_addr(filter->dst_mac) && 6915 is_valid_ether_addr(filter->src_mac)) || 6916 (is_multicast_ether_addr(filter->dst_mac) && 6917 is_multicast_ether_addr(filter->src_mac))) 6918 return -EOPNOTSUPP; 6919 6920 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP 6921 * ports are not supported via big buffer now. 6922 */ 6923 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP) 6924 return -EOPNOTSUPP; 6925 6926 /* adding filter using src_port/src_ip is not supported at this stage */ 6927 if (filter->src_port || filter->src_ipv4 || 6928 !ipv6_addr_any(&filter->ip.v6.src_ip6)) 6929 return -EOPNOTSUPP; 6930 6931 /* copy element needed to add cloud filter from filter */ 6932 i40e_set_cld_element(filter, &cld_filter.element); 6933 6934 if (is_valid_ether_addr(filter->dst_mac) || 6935 is_valid_ether_addr(filter->src_mac) || 6936 is_multicast_ether_addr(filter->dst_mac) || 6937 is_multicast_ether_addr(filter->src_mac)) { 6938 /* MAC + IP : unsupported mode */ 6939 if (filter->dst_ipv4) 6940 return -EOPNOTSUPP; 6941 6942 /* since we validated that L4 port must be valid before 6943 * we get here, start with respective "flags" value 6944 * and update if vlan is present or not 6945 */ 6946 cld_filter.element.flags = 6947 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT); 6948 6949 if (filter->vlan_id) { 6950 cld_filter.element.flags = 6951 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT); 6952 } 6953 6954 } else if (filter->dst_ipv4 || 6955 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) { 6956 cld_filter.element.flags = 6957 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT); 6958 if (filter->n_proto == ETH_P_IPV6) 6959 cld_filter.element.flags |= 6960 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6); 6961 else 6962 cld_filter.element.flags |= 6963 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4); 6964 } else { 6965 dev_err(&pf->pdev->dev, 6966 "either mac or ip has to be valid for cloud filter\n"); 6967 return -EINVAL; 6968 } 6969 6970 /* Now copy L4 port in Byte 6..7 in general fields */ 6971 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] = 6972 be16_to_cpu(filter->dst_port); 6973 6974 if (add) { 6975 /* Validate current device switch mode, change if necessary */ 6976 ret = i40e_validate_and_set_switch_mode(vsi); 6977 if (ret) { 6978 dev_err(&pf->pdev->dev, 6979 "failed to set switch mode, ret %d\n", 6980 ret); 6981 return ret; 6982 } 6983 6984 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid, 6985 &cld_filter, 1); 6986 } else { 6987 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid, 6988 &cld_filter, 1); 6989 } 6990 6991 if (ret) 6992 dev_dbg(&pf->pdev->dev, 6993 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n", 6994 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status); 6995 else 6996 dev_info(&pf->pdev->dev, 6997 "%s cloud filter for VSI: %d, L4 port: %d\n", 6998 add ? "add" : "delete", filter->seid, 6999 ntohs(filter->dst_port)); 7000 return ret; 7001 } 7002 7003 /** 7004 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel 7005 * @vsi: Pointer to VSI 7006 * @cls_flower: Pointer to struct tc_cls_flower_offload 7007 * @filter: Pointer to cloud filter structure 7008 * 7009 **/ 7010 static int i40e_parse_cls_flower(struct i40e_vsi *vsi, 7011 struct tc_cls_flower_offload *f, 7012 struct i40e_cloud_filter *filter) 7013 { 7014 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; 7015 struct i40e_pf *pf = vsi->back; 7016 u8 field_flags = 0; 7017 7018 if (f->dissector->used_keys & 7019 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 7020 BIT(FLOW_DISSECTOR_KEY_BASIC) | 7021 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 7022 BIT(FLOW_DISSECTOR_KEY_VLAN) | 7023 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 7024 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 7025 BIT(FLOW_DISSECTOR_KEY_PORTS) | 7026 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 7027 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", 7028 f->dissector->used_keys); 7029 return -EOPNOTSUPP; 7030 } 7031 7032 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 7033 struct flow_dissector_key_keyid *key = 7034 skb_flow_dissector_target(f->dissector, 7035 FLOW_DISSECTOR_KEY_ENC_KEYID, 7036 f->key); 7037 7038 struct flow_dissector_key_keyid *mask = 7039 skb_flow_dissector_target(f->dissector, 7040 FLOW_DISSECTOR_KEY_ENC_KEYID, 7041 f->mask); 7042 7043 if (mask->keyid != 0) 7044 field_flags |= I40E_CLOUD_FIELD_TEN_ID; 7045 7046 filter->tenant_id = be32_to_cpu(key->keyid); 7047 } 7048 7049 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 7050 struct flow_dissector_key_basic *key = 7051 skb_flow_dissector_target(f->dissector, 7052 FLOW_DISSECTOR_KEY_BASIC, 7053 f->key); 7054 7055 struct flow_dissector_key_basic *mask = 7056 skb_flow_dissector_target(f->dissector, 7057 FLOW_DISSECTOR_KEY_BASIC, 7058 f->mask); 7059 7060 n_proto_key = ntohs(key->n_proto); 7061 n_proto_mask = ntohs(mask->n_proto); 7062 7063 if (n_proto_key == ETH_P_ALL) { 7064 n_proto_key = 0; 7065 n_proto_mask = 0; 7066 } 7067 filter->n_proto = n_proto_key & n_proto_mask; 7068 filter->ip_proto = key->ip_proto; 7069 } 7070 7071 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 7072 struct flow_dissector_key_eth_addrs *key = 7073 skb_flow_dissector_target(f->dissector, 7074 FLOW_DISSECTOR_KEY_ETH_ADDRS, 7075 f->key); 7076 7077 struct flow_dissector_key_eth_addrs *mask = 7078 skb_flow_dissector_target(f->dissector, 7079 FLOW_DISSECTOR_KEY_ETH_ADDRS, 7080 f->mask); 7081 7082 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 7083 if (!is_zero_ether_addr(mask->dst)) { 7084 if (is_broadcast_ether_addr(mask->dst)) { 7085 field_flags |= I40E_CLOUD_FIELD_OMAC; 7086 } else { 7087 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", 7088 mask->dst); 7089 return I40E_ERR_CONFIG; 7090 } 7091 } 7092 7093 if (!is_zero_ether_addr(mask->src)) { 7094 if (is_broadcast_ether_addr(mask->src)) { 7095 field_flags |= I40E_CLOUD_FIELD_IMAC; 7096 } else { 7097 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", 7098 mask->src); 7099 return I40E_ERR_CONFIG; 7100 } 7101 } 7102 ether_addr_copy(filter->dst_mac, key->dst); 7103 ether_addr_copy(filter->src_mac, key->src); 7104 } 7105 7106 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { 7107 struct flow_dissector_key_vlan *key = 7108 skb_flow_dissector_target(f->dissector, 7109 FLOW_DISSECTOR_KEY_VLAN, 7110 f->key); 7111 struct flow_dissector_key_vlan *mask = 7112 skb_flow_dissector_target(f->dissector, 7113 FLOW_DISSECTOR_KEY_VLAN, 7114 f->mask); 7115 7116 if (mask->vlan_id) { 7117 if (mask->vlan_id == VLAN_VID_MASK) { 7118 field_flags |= I40E_CLOUD_FIELD_IVLAN; 7119 7120 } else { 7121 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", 7122 mask->vlan_id); 7123 return I40E_ERR_CONFIG; 7124 } 7125 } 7126 7127 filter->vlan_id = cpu_to_be16(key->vlan_id); 7128 } 7129 7130 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 7131 struct flow_dissector_key_control *key = 7132 skb_flow_dissector_target(f->dissector, 7133 FLOW_DISSECTOR_KEY_CONTROL, 7134 f->key); 7135 7136 addr_type = key->addr_type; 7137 } 7138 7139 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 7140 struct flow_dissector_key_ipv4_addrs *key = 7141 skb_flow_dissector_target(f->dissector, 7142 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 7143 f->key); 7144 struct flow_dissector_key_ipv4_addrs *mask = 7145 skb_flow_dissector_target(f->dissector, 7146 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 7147 f->mask); 7148 7149 if (mask->dst) { 7150 if (mask->dst == cpu_to_be32(0xffffffff)) { 7151 field_flags |= I40E_CLOUD_FIELD_IIP; 7152 } else { 7153 mask->dst = be32_to_cpu(mask->dst); 7154 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4\n", 7155 &mask->dst); 7156 return I40E_ERR_CONFIG; 7157 } 7158 } 7159 7160 if (mask->src) { 7161 if (mask->src == cpu_to_be32(0xffffffff)) { 7162 field_flags |= I40E_CLOUD_FIELD_IIP; 7163 } else { 7164 mask->src = be32_to_cpu(mask->src); 7165 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4\n", 7166 &mask->src); 7167 return I40E_ERR_CONFIG; 7168 } 7169 } 7170 7171 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) { 7172 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); 7173 return I40E_ERR_CONFIG; 7174 } 7175 filter->dst_ipv4 = key->dst; 7176 filter->src_ipv4 = key->src; 7177 } 7178 7179 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 7180 struct flow_dissector_key_ipv6_addrs *key = 7181 skb_flow_dissector_target(f->dissector, 7182 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 7183 f->key); 7184 struct flow_dissector_key_ipv6_addrs *mask = 7185 skb_flow_dissector_target(f->dissector, 7186 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 7187 f->mask); 7188 7189 /* src and dest IPV6 address should not be LOOPBACK 7190 * (0:0:0:0:0:0:0:1), which can be represented as ::1 7191 */ 7192 if (ipv6_addr_loopback(&key->dst) || 7193 ipv6_addr_loopback(&key->src)) { 7194 dev_err(&pf->pdev->dev, 7195 "Bad ipv6, addr is LOOPBACK\n"); 7196 return I40E_ERR_CONFIG; 7197 } 7198 if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) 7199 field_flags |= I40E_CLOUD_FIELD_IIP; 7200 7201 memcpy(&filter->src_ipv6, &key->src.s6_addr32, 7202 sizeof(filter->src_ipv6)); 7203 memcpy(&filter->dst_ipv6, &key->dst.s6_addr32, 7204 sizeof(filter->dst_ipv6)); 7205 } 7206 7207 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { 7208 struct flow_dissector_key_ports *key = 7209 skb_flow_dissector_target(f->dissector, 7210 FLOW_DISSECTOR_KEY_PORTS, 7211 f->key); 7212 struct flow_dissector_key_ports *mask = 7213 skb_flow_dissector_target(f->dissector, 7214 FLOW_DISSECTOR_KEY_PORTS, 7215 f->mask); 7216 7217 if (mask->src) { 7218 if (mask->src == cpu_to_be16(0xffff)) { 7219 field_flags |= I40E_CLOUD_FIELD_IIP; 7220 } else { 7221 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", 7222 be16_to_cpu(mask->src)); 7223 return I40E_ERR_CONFIG; 7224 } 7225 } 7226 7227 if (mask->dst) { 7228 if (mask->dst == cpu_to_be16(0xffff)) { 7229 field_flags |= I40E_CLOUD_FIELD_IIP; 7230 } else { 7231 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", 7232 be16_to_cpu(mask->dst)); 7233 return I40E_ERR_CONFIG; 7234 } 7235 } 7236 7237 filter->dst_port = key->dst; 7238 filter->src_port = key->src; 7239 7240 switch (filter->ip_proto) { 7241 case IPPROTO_TCP: 7242 case IPPROTO_UDP: 7243 break; 7244 default: 7245 dev_err(&pf->pdev->dev, 7246 "Only UDP and TCP transport are supported\n"); 7247 return -EINVAL; 7248 } 7249 } 7250 filter->flags = field_flags; 7251 return 0; 7252 } 7253 7254 /** 7255 * i40e_handle_tclass: Forward to a traffic class on the device 7256 * @vsi: Pointer to VSI 7257 * @tc: traffic class index on the device 7258 * @filter: Pointer to cloud filter structure 7259 * 7260 **/ 7261 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc, 7262 struct i40e_cloud_filter *filter) 7263 { 7264 struct i40e_channel *ch, *ch_tmp; 7265 7266 /* direct to a traffic class on the same device */ 7267 if (tc == 0) { 7268 filter->seid = vsi->seid; 7269 return 0; 7270 } else if (vsi->tc_config.enabled_tc & BIT(tc)) { 7271 if (!filter->dst_port) { 7272 dev_err(&vsi->back->pdev->dev, 7273 "Specify destination port to direct to traffic class that is not default\n"); 7274 return -EINVAL; 7275 } 7276 if (list_empty(&vsi->ch_list)) 7277 return -EINVAL; 7278 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, 7279 list) { 7280 if (ch->seid == vsi->tc_seid_map[tc]) 7281 filter->seid = ch->seid; 7282 } 7283 return 0; 7284 } 7285 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n"); 7286 return -EINVAL; 7287 } 7288 7289 /** 7290 * i40e_configure_clsflower - Configure tc flower filters 7291 * @vsi: Pointer to VSI 7292 * @cls_flower: Pointer to struct tc_cls_flower_offload 7293 * 7294 **/ 7295 static int i40e_configure_clsflower(struct i40e_vsi *vsi, 7296 struct tc_cls_flower_offload *cls_flower) 7297 { 7298 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); 7299 struct i40e_cloud_filter *filter = NULL; 7300 struct i40e_pf *pf = vsi->back; 7301 int err = 0; 7302 7303 if (tc < 0) { 7304 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); 7305 return -EOPNOTSUPP; 7306 } 7307 7308 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || 7309 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) 7310 return -EBUSY; 7311 7312 if (pf->fdir_pf_active_filters || 7313 (!hlist_empty(&pf->fdir_filter_list))) { 7314 dev_err(&vsi->back->pdev->dev, 7315 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n"); 7316 return -EINVAL; 7317 } 7318 7319 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) { 7320 dev_err(&vsi->back->pdev->dev, 7321 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n"); 7322 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7323 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER; 7324 } 7325 7326 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 7327 if (!filter) 7328 return -ENOMEM; 7329 7330 filter->cookie = cls_flower->cookie; 7331 7332 err = i40e_parse_cls_flower(vsi, cls_flower, filter); 7333 if (err < 0) 7334 goto err; 7335 7336 err = i40e_handle_tclass(vsi, tc, filter); 7337 if (err < 0) 7338 goto err; 7339 7340 /* Add cloud filter */ 7341 if (filter->dst_port) 7342 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true); 7343 else 7344 err = i40e_add_del_cloud_filter(vsi, filter, true); 7345 7346 if (err) { 7347 dev_err(&pf->pdev->dev, 7348 "Failed to add cloud filter, err %s\n", 7349 i40e_stat_str(&pf->hw, err)); 7350 goto err; 7351 } 7352 7353 /* add filter to the ordered list */ 7354 INIT_HLIST_NODE(&filter->cloud_node); 7355 7356 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list); 7357 7358 pf->num_cloud_filters++; 7359 7360 return err; 7361 err: 7362 kfree(filter); 7363 return err; 7364 } 7365 7366 /** 7367 * i40e_find_cloud_filter - Find the could filter in the list 7368 * @vsi: Pointer to VSI 7369 * @cookie: filter specific cookie 7370 * 7371 **/ 7372 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi, 7373 unsigned long *cookie) 7374 { 7375 struct i40e_cloud_filter *filter = NULL; 7376 struct hlist_node *node2; 7377 7378 hlist_for_each_entry_safe(filter, node2, 7379 &vsi->back->cloud_filter_list, cloud_node) 7380 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 7381 return filter; 7382 return NULL; 7383 } 7384 7385 /** 7386 * i40e_delete_clsflower - Remove tc flower filters 7387 * @vsi: Pointer to VSI 7388 * @cls_flower: Pointer to struct tc_cls_flower_offload 7389 * 7390 **/ 7391 static int i40e_delete_clsflower(struct i40e_vsi *vsi, 7392 struct tc_cls_flower_offload *cls_flower) 7393 { 7394 struct i40e_cloud_filter *filter = NULL; 7395 struct i40e_pf *pf = vsi->back; 7396 int err = 0; 7397 7398 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie); 7399 7400 if (!filter) 7401 return -EINVAL; 7402 7403 hash_del(&filter->cloud_node); 7404 7405 if (filter->dst_port) 7406 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false); 7407 else 7408 err = i40e_add_del_cloud_filter(vsi, filter, false); 7409 7410 kfree(filter); 7411 if (err) { 7412 dev_err(&pf->pdev->dev, 7413 "Failed to delete cloud filter, err %s\n", 7414 i40e_stat_str(&pf->hw, err)); 7415 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); 7416 } 7417 7418 pf->num_cloud_filters--; 7419 if (!pf->num_cloud_filters) 7420 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && 7421 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { 7422 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 7423 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; 7424 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; 7425 } 7426 return 0; 7427 } 7428 7429 /** 7430 * i40e_setup_tc_cls_flower - flower classifier offloads 7431 * @netdev: net device to configure 7432 * @type_data: offload data 7433 **/ 7434 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np, 7435 struct tc_cls_flower_offload *cls_flower) 7436 { 7437 struct i40e_vsi *vsi = np->vsi; 7438 7439 switch (cls_flower->command) { 7440 case TC_CLSFLOWER_REPLACE: 7441 return i40e_configure_clsflower(vsi, cls_flower); 7442 case TC_CLSFLOWER_DESTROY: 7443 return i40e_delete_clsflower(vsi, cls_flower); 7444 case TC_CLSFLOWER_STATS: 7445 return -EOPNOTSUPP; 7446 default: 7447 return -EINVAL; 7448 } 7449 } 7450 7451 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 7452 void *cb_priv) 7453 { 7454 struct i40e_netdev_priv *np = cb_priv; 7455 7456 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data)) 7457 return -EOPNOTSUPP; 7458 7459 switch (type) { 7460 case TC_SETUP_CLSFLOWER: 7461 return i40e_setup_tc_cls_flower(np, type_data); 7462 7463 default: 7464 return -EOPNOTSUPP; 7465 } 7466 } 7467 7468 static int i40e_setup_tc_block(struct net_device *dev, 7469 struct tc_block_offload *f) 7470 { 7471 struct i40e_netdev_priv *np = netdev_priv(dev); 7472 7473 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 7474 return -EOPNOTSUPP; 7475 7476 switch (f->command) { 7477 case TC_BLOCK_BIND: 7478 return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb, 7479 np, np); 7480 case TC_BLOCK_UNBIND: 7481 tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np); 7482 return 0; 7483 default: 7484 return -EOPNOTSUPP; 7485 } 7486 } 7487 7488 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, 7489 void *type_data) 7490 { 7491 switch (type) { 7492 case TC_SETUP_QDISC_MQPRIO: 7493 return i40e_setup_tc(netdev, type_data); 7494 case TC_SETUP_BLOCK: 7495 return i40e_setup_tc_block(netdev, type_data); 7496 default: 7497 return -EOPNOTSUPP; 7498 } 7499 } 7500 7501 /** 7502 * i40e_open - Called when a network interface is made active 7503 * @netdev: network interface device structure 7504 * 7505 * The open entry point is called when a network interface is made 7506 * active by the system (IFF_UP). At this point all resources needed 7507 * for transmit and receive operations are allocated, the interrupt 7508 * handler is registered with the OS, the netdev watchdog subtask is 7509 * enabled, and the stack is notified that the interface is ready. 7510 * 7511 * Returns 0 on success, negative value on failure 7512 **/ 7513 int i40e_open(struct net_device *netdev) 7514 { 7515 struct i40e_netdev_priv *np = netdev_priv(netdev); 7516 struct i40e_vsi *vsi = np->vsi; 7517 struct i40e_pf *pf = vsi->back; 7518 int err; 7519 7520 /* disallow open during test or if eeprom is broken */ 7521 if (test_bit(__I40E_TESTING, pf->state) || 7522 test_bit(__I40E_BAD_EEPROM, pf->state)) 7523 return -EBUSY; 7524 7525 netif_carrier_off(netdev); 7526 7527 err = i40e_vsi_open(vsi); 7528 if (err) 7529 return err; 7530 7531 /* configure global TSO hardware offload settings */ 7532 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | 7533 TCP_FLAG_FIN) >> 16); 7534 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | 7535 TCP_FLAG_FIN | 7536 TCP_FLAG_CWR) >> 16); 7537 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 7538 7539 udp_tunnel_get_rx_info(netdev); 7540 7541 return 0; 7542 } 7543 7544 /** 7545 * i40e_vsi_open - 7546 * @vsi: the VSI to open 7547 * 7548 * Finish initialization of the VSI. 7549 * 7550 * Returns 0 on success, negative value on failure 7551 * 7552 * Note: expects to be called while under rtnl_lock() 7553 **/ 7554 int i40e_vsi_open(struct i40e_vsi *vsi) 7555 { 7556 struct i40e_pf *pf = vsi->back; 7557 char int_name[I40E_INT_NAME_STR_LEN]; 7558 int err; 7559 7560 /* allocate descriptors */ 7561 err = i40e_vsi_setup_tx_resources(vsi); 7562 if (err) 7563 goto err_setup_tx; 7564 err = i40e_vsi_setup_rx_resources(vsi); 7565 if (err) 7566 goto err_setup_rx; 7567 7568 err = i40e_vsi_configure(vsi); 7569 if (err) 7570 goto err_setup_rx; 7571 7572 if (vsi->netdev) { 7573 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 7574 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 7575 err = i40e_vsi_request_irq(vsi, int_name); 7576 if (err) 7577 goto err_setup_rx; 7578 7579 /* Notify the stack of the actual queue counts. */ 7580 err = netif_set_real_num_tx_queues(vsi->netdev, 7581 vsi->num_queue_pairs); 7582 if (err) 7583 goto err_set_queues; 7584 7585 err = netif_set_real_num_rx_queues(vsi->netdev, 7586 vsi->num_queue_pairs); 7587 if (err) 7588 goto err_set_queues; 7589 7590 } else if (vsi->type == I40E_VSI_FDIR) { 7591 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", 7592 dev_driver_string(&pf->pdev->dev), 7593 dev_name(&pf->pdev->dev)); 7594 err = i40e_vsi_request_irq(vsi, int_name); 7595 7596 } else { 7597 err = -EINVAL; 7598 goto err_setup_rx; 7599 } 7600 7601 err = i40e_up_complete(vsi); 7602 if (err) 7603 goto err_up_complete; 7604 7605 return 0; 7606 7607 err_up_complete: 7608 i40e_down(vsi); 7609 err_set_queues: 7610 i40e_vsi_free_irq(vsi); 7611 err_setup_rx: 7612 i40e_vsi_free_rx_resources(vsi); 7613 err_setup_tx: 7614 i40e_vsi_free_tx_resources(vsi); 7615 if (vsi == pf->vsi[pf->lan_vsi]) 7616 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); 7617 7618 return err; 7619 } 7620 7621 /** 7622 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting 7623 * @pf: Pointer to PF 7624 * 7625 * This function destroys the hlist where all the Flow Director 7626 * filters were saved. 7627 **/ 7628 static void i40e_fdir_filter_exit(struct i40e_pf *pf) 7629 { 7630 struct i40e_fdir_filter *filter; 7631 struct i40e_flex_pit *pit_entry, *tmp; 7632 struct hlist_node *node2; 7633 7634 hlist_for_each_entry_safe(filter, node2, 7635 &pf->fdir_filter_list, fdir_node) { 7636 hlist_del(&filter->fdir_node); 7637 kfree(filter); 7638 } 7639 7640 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) { 7641 list_del(&pit_entry->list); 7642 kfree(pit_entry); 7643 } 7644 INIT_LIST_HEAD(&pf->l3_flex_pit_list); 7645 7646 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) { 7647 list_del(&pit_entry->list); 7648 kfree(pit_entry); 7649 } 7650 INIT_LIST_HEAD(&pf->l4_flex_pit_list); 7651 7652 pf->fdir_pf_active_filters = 0; 7653 pf->fd_tcp4_filter_cnt = 0; 7654 pf->fd_udp4_filter_cnt = 0; 7655 pf->fd_sctp4_filter_cnt = 0; 7656 pf->fd_ip4_filter_cnt = 0; 7657 7658 /* Reprogram the default input set for TCP/IPv4 */ 7659 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, 7660 I40E_L3_SRC_MASK | I40E_L3_DST_MASK | 7661 I40E_L4_SRC_MASK | I40E_L4_DST_MASK); 7662 7663 /* Reprogram the default input set for UDP/IPv4 */ 7664 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP, 7665 I40E_L3_SRC_MASK | I40E_L3_DST_MASK | 7666 I40E_L4_SRC_MASK | I40E_L4_DST_MASK); 7667 7668 /* Reprogram the default input set for SCTP/IPv4 */ 7669 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, 7670 I40E_L3_SRC_MASK | I40E_L3_DST_MASK | 7671 I40E_L4_SRC_MASK | I40E_L4_DST_MASK); 7672 7673 /* Reprogram the default input set for Other/IPv4 */ 7674 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, 7675 I40E_L3_SRC_MASK | I40E_L3_DST_MASK); 7676 7677 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, 7678 I40E_L3_SRC_MASK | I40E_L3_DST_MASK); 7679 } 7680 7681 /** 7682 * i40e_cloud_filter_exit - Cleans up the cloud filters 7683 * @pf: Pointer to PF 7684 * 7685 * This function destroys the hlist where all the cloud filters 7686 * were saved. 7687 **/ 7688 static void i40e_cloud_filter_exit(struct i40e_pf *pf) 7689 { 7690 struct i40e_cloud_filter *cfilter; 7691 struct hlist_node *node; 7692 7693 hlist_for_each_entry_safe(cfilter, node, 7694 &pf->cloud_filter_list, cloud_node) { 7695 hlist_del(&cfilter->cloud_node); 7696 kfree(cfilter); 7697 } 7698 pf->num_cloud_filters = 0; 7699 7700 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && 7701 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { 7702 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 7703 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; 7704 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; 7705 } 7706 } 7707 7708 /** 7709 * i40e_close - Disables a network interface 7710 * @netdev: network interface device structure 7711 * 7712 * The close entry point is called when an interface is de-activated 7713 * by the OS. The hardware is still under the driver's control, but 7714 * this netdev interface is disabled. 7715 * 7716 * Returns 0, this is not allowed to fail 7717 **/ 7718 int i40e_close(struct net_device *netdev) 7719 { 7720 struct i40e_netdev_priv *np = netdev_priv(netdev); 7721 struct i40e_vsi *vsi = np->vsi; 7722 7723 i40e_vsi_close(vsi); 7724 7725 return 0; 7726 } 7727 7728 /** 7729 * i40e_do_reset - Start a PF or Core Reset sequence 7730 * @pf: board private structure 7731 * @reset_flags: which reset is requested 7732 * @lock_acquired: indicates whether or not the lock has been acquired 7733 * before this function was called. 7734 * 7735 * The essential difference in resets is that the PF Reset 7736 * doesn't clear the packet buffers, doesn't reset the PE 7737 * firmware, and doesn't bother the other PFs on the chip. 7738 **/ 7739 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) 7740 { 7741 u32 val; 7742 7743 WARN_ON(in_interrupt()); 7744 7745 7746 /* do the biggest reset indicated */ 7747 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { 7748 7749 /* Request a Global Reset 7750 * 7751 * This will start the chip's countdown to the actual full 7752 * chip reset event, and a warning interrupt to be sent 7753 * to all PFs, including the requestor. Our handler 7754 * for the warning interrupt will deal with the shutdown 7755 * and recovery of the switch setup. 7756 */ 7757 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); 7758 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 7759 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 7760 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 7761 7762 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { 7763 7764 /* Request a Core Reset 7765 * 7766 * Same as Global Reset, except does *not* include the MAC/PHY 7767 */ 7768 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); 7769 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 7770 val |= I40E_GLGEN_RTRIG_CORER_MASK; 7771 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 7772 i40e_flush(&pf->hw); 7773 7774 } else if (reset_flags & I40E_PF_RESET_FLAG) { 7775 7776 /* Request a PF Reset 7777 * 7778 * Resets only the PF-specific registers 7779 * 7780 * This goes directly to the tear-down and rebuild of 7781 * the switch, since we need to do all the recovery as 7782 * for the Core Reset. 7783 */ 7784 dev_dbg(&pf->pdev->dev, "PFR requested\n"); 7785 i40e_handle_reset_warning(pf, lock_acquired); 7786 7787 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { 7788 int v; 7789 7790 /* Find the VSI(s) that requested a re-init */ 7791 dev_info(&pf->pdev->dev, 7792 "VSI reinit requested\n"); 7793 for (v = 0; v < pf->num_alloc_vsi; v++) { 7794 struct i40e_vsi *vsi = pf->vsi[v]; 7795 7796 if (vsi != NULL && 7797 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED, 7798 vsi->state)) 7799 i40e_vsi_reinit_locked(pf->vsi[v]); 7800 } 7801 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { 7802 int v; 7803 7804 /* Find the VSI(s) that needs to be brought down */ 7805 dev_info(&pf->pdev->dev, "VSI down requested\n"); 7806 for (v = 0; v < pf->num_alloc_vsi; v++) { 7807 struct i40e_vsi *vsi = pf->vsi[v]; 7808 7809 if (vsi != NULL && 7810 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED, 7811 vsi->state)) { 7812 set_bit(__I40E_VSI_DOWN, vsi->state); 7813 i40e_down(vsi); 7814 } 7815 } 7816 } else { 7817 dev_info(&pf->pdev->dev, 7818 "bad reset request 0x%08x\n", reset_flags); 7819 } 7820 } 7821 7822 #ifdef CONFIG_I40E_DCB 7823 /** 7824 * i40e_dcb_need_reconfig - Check if DCB needs reconfig 7825 * @pf: board private structure 7826 * @old_cfg: current DCB config 7827 * @new_cfg: new DCB config 7828 **/ 7829 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, 7830 struct i40e_dcbx_config *old_cfg, 7831 struct i40e_dcbx_config *new_cfg) 7832 { 7833 bool need_reconfig = false; 7834 7835 /* Check if ETS configuration has changed */ 7836 if (memcmp(&new_cfg->etscfg, 7837 &old_cfg->etscfg, 7838 sizeof(new_cfg->etscfg))) { 7839 /* If Priority Table has changed reconfig is needed */ 7840 if (memcmp(&new_cfg->etscfg.prioritytable, 7841 &old_cfg->etscfg.prioritytable, 7842 sizeof(new_cfg->etscfg.prioritytable))) { 7843 need_reconfig = true; 7844 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); 7845 } 7846 7847 if (memcmp(&new_cfg->etscfg.tcbwtable, 7848 &old_cfg->etscfg.tcbwtable, 7849 sizeof(new_cfg->etscfg.tcbwtable))) 7850 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 7851 7852 if (memcmp(&new_cfg->etscfg.tsatable, 7853 &old_cfg->etscfg.tsatable, 7854 sizeof(new_cfg->etscfg.tsatable))) 7855 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); 7856 } 7857 7858 /* Check if PFC configuration has changed */ 7859 if (memcmp(&new_cfg->pfc, 7860 &old_cfg->pfc, 7861 sizeof(new_cfg->pfc))) { 7862 need_reconfig = true; 7863 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); 7864 } 7865 7866 /* Check if APP Table has changed */ 7867 if (memcmp(&new_cfg->app, 7868 &old_cfg->app, 7869 sizeof(new_cfg->app))) { 7870 need_reconfig = true; 7871 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); 7872 } 7873 7874 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); 7875 return need_reconfig; 7876 } 7877 7878 /** 7879 * i40e_handle_lldp_event - Handle LLDP Change MIB event 7880 * @pf: board private structure 7881 * @e: event info posted on ARQ 7882 **/ 7883 static int i40e_handle_lldp_event(struct i40e_pf *pf, 7884 struct i40e_arq_event_info *e) 7885 { 7886 struct i40e_aqc_lldp_get_mib *mib = 7887 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; 7888 struct i40e_hw *hw = &pf->hw; 7889 struct i40e_dcbx_config tmp_dcbx_cfg; 7890 bool need_reconfig = false; 7891 int ret = 0; 7892 u8 type; 7893 7894 /* Not DCB capable or capability disabled */ 7895 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) 7896 return ret; 7897 7898 /* Ignore if event is not for Nearest Bridge */ 7899 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 7900 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 7901 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); 7902 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) 7903 return ret; 7904 7905 /* Check MIB Type and return if event for Remote MIB update */ 7906 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; 7907 dev_dbg(&pf->pdev->dev, 7908 "LLDP event mib type %s\n", type ? "remote" : "local"); 7909 if (type == I40E_AQ_LLDP_MIB_REMOTE) { 7910 /* Update the remote cached instance and return */ 7911 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, 7912 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 7913 &hw->remote_dcbx_config); 7914 goto exit; 7915 } 7916 7917 /* Store the old configuration */ 7918 tmp_dcbx_cfg = hw->local_dcbx_config; 7919 7920 /* Reset the old DCBx configuration data */ 7921 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); 7922 /* Get updated DCBX data from firmware */ 7923 ret = i40e_get_dcb_config(&pf->hw); 7924 if (ret) { 7925 dev_info(&pf->pdev->dev, 7926 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", 7927 i40e_stat_str(&pf->hw, ret), 7928 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7929 goto exit; 7930 } 7931 7932 /* No change detected in DCBX configs */ 7933 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, 7934 sizeof(tmp_dcbx_cfg))) { 7935 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 7936 goto exit; 7937 } 7938 7939 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, 7940 &hw->local_dcbx_config); 7941 7942 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); 7943 7944 if (!need_reconfig) 7945 goto exit; 7946 7947 /* Enable DCB tagging only when more than one TC */ 7948 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 7949 pf->flags |= I40E_FLAG_DCB_ENABLED; 7950 else 7951 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 7952 7953 set_bit(__I40E_PORT_SUSPENDED, pf->state); 7954 /* Reconfiguration needed quiesce all VSIs */ 7955 i40e_pf_quiesce_all_vsi(pf); 7956 7957 /* Changes in configuration update VEB/VSI */ 7958 i40e_dcb_reconfigure(pf); 7959 7960 ret = i40e_resume_port_tx(pf); 7961 7962 clear_bit(__I40E_PORT_SUSPENDED, pf->state); 7963 /* In case of error no point in resuming VSIs */ 7964 if (ret) 7965 goto exit; 7966 7967 /* Wait for the PF's queues to be disabled */ 7968 ret = i40e_pf_wait_queues_disabled(pf); 7969 if (ret) { 7970 /* Schedule PF reset to recover */ 7971 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); 7972 i40e_service_event_schedule(pf); 7973 } else { 7974 i40e_pf_unquiesce_all_vsi(pf); 7975 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED | 7976 I40E_FLAG_CLIENT_L2_CHANGE); 7977 } 7978 7979 exit: 7980 return ret; 7981 } 7982 #endif /* CONFIG_I40E_DCB */ 7983 7984 /** 7985 * i40e_do_reset_safe - Protected reset path for userland calls. 7986 * @pf: board private structure 7987 * @reset_flags: which reset is requested 7988 * 7989 **/ 7990 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) 7991 { 7992 rtnl_lock(); 7993 i40e_do_reset(pf, reset_flags, true); 7994 rtnl_unlock(); 7995 } 7996 7997 /** 7998 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 7999 * @pf: board private structure 8000 * @e: event info posted on ARQ 8001 * 8002 * Handler for LAN Queue Overflow Event generated by the firmware for PF 8003 * and VF queues 8004 **/ 8005 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 8006 struct i40e_arq_event_info *e) 8007 { 8008 struct i40e_aqc_lan_overflow *data = 8009 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 8010 u32 queue = le32_to_cpu(data->prtdcb_rupto); 8011 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 8012 struct i40e_hw *hw = &pf->hw; 8013 struct i40e_vf *vf; 8014 u16 vf_id; 8015 8016 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", 8017 queue, qtx_ctl); 8018 8019 /* Queue belongs to VF, find the VF and issue VF reset */ 8020 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 8021 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 8022 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 8023 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 8024 vf_id -= hw->func_caps.vf_base_id; 8025 vf = &pf->vf[vf_id]; 8026 i40e_vc_notify_vf_reset(vf); 8027 /* Allow VF to process pending reset notification */ 8028 msleep(20); 8029 i40e_reset_vf(vf, false); 8030 } 8031 } 8032 8033 /** 8034 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters 8035 * @pf: board private structure 8036 **/ 8037 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) 8038 { 8039 u32 val, fcnt_prog; 8040 8041 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 8042 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); 8043 return fcnt_prog; 8044 } 8045 8046 /** 8047 * i40e_get_current_fd_count - Get total FD filters programmed for this PF 8048 * @pf: board private structure 8049 **/ 8050 u32 i40e_get_current_fd_count(struct i40e_pf *pf) 8051 { 8052 u32 val, fcnt_prog; 8053 8054 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 8055 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + 8056 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> 8057 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 8058 return fcnt_prog; 8059 } 8060 8061 /** 8062 * i40e_get_global_fd_count - Get total FD filters programmed on device 8063 * @pf: board private structure 8064 **/ 8065 u32 i40e_get_global_fd_count(struct i40e_pf *pf) 8066 { 8067 u32 val, fcnt_prog; 8068 8069 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); 8070 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + 8071 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> 8072 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); 8073 return fcnt_prog; 8074 } 8075 8076 /** 8077 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 8078 * @pf: board private structure 8079 **/ 8080 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) 8081 { 8082 struct i40e_fdir_filter *filter; 8083 u32 fcnt_prog, fcnt_avail; 8084 struct hlist_node *node; 8085 8086 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) 8087 return; 8088 8089 /* Check if we have enough room to re-enable FDir SB capability. */ 8090 fcnt_prog = i40e_get_global_fd_count(pf); 8091 fcnt_avail = pf->fdir_pf_filter_count; 8092 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || 8093 (pf->fd_add_err == 0) || 8094 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { 8095 if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) { 8096 pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED; 8097 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 8098 (I40E_DEBUG_FD & pf->hw.debug_mask)) 8099 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 8100 } 8101 } 8102 8103 /* We should wait for even more space before re-enabling ATR. 8104 * Additionally, we cannot enable ATR as long as we still have TCP SB 8105 * rules active. 8106 */ 8107 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) && 8108 (pf->fd_tcp4_filter_cnt == 0)) { 8109 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) { 8110 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; 8111 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 8112 (I40E_DEBUG_FD & pf->hw.debug_mask)) 8113 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); 8114 } 8115 } 8116 8117 /* if hw had a problem adding a filter, delete it */ 8118 if (pf->fd_inv > 0) { 8119 hlist_for_each_entry_safe(filter, node, 8120 &pf->fdir_filter_list, fdir_node) { 8121 if (filter->fd_id == pf->fd_inv) { 8122 hlist_del(&filter->fdir_node); 8123 kfree(filter); 8124 pf->fdir_pf_active_filters--; 8125 pf->fd_inv = 0; 8126 } 8127 } 8128 } 8129 } 8130 8131 #define I40E_MIN_FD_FLUSH_INTERVAL 10 8132 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 8133 /** 8134 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB 8135 * @pf: board private structure 8136 **/ 8137 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) 8138 { 8139 unsigned long min_flush_time; 8140 int flush_wait_retry = 50; 8141 bool disable_atr = false; 8142 int fd_room; 8143 int reg; 8144 8145 if (!time_after(jiffies, pf->fd_flush_timestamp + 8146 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) 8147 return; 8148 8149 /* If the flush is happening too quick and we have mostly SB rules we 8150 * should not re-enable ATR for some time. 8151 */ 8152 min_flush_time = pf->fd_flush_timestamp + 8153 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); 8154 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; 8155 8156 if (!(time_after(jiffies, min_flush_time)) && 8157 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { 8158 if (I40E_DEBUG_FD & pf->hw.debug_mask) 8159 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); 8160 disable_atr = true; 8161 } 8162 8163 pf->fd_flush_timestamp = jiffies; 8164 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; 8165 /* flush all filters */ 8166 wr32(&pf->hw, I40E_PFQF_CTL_1, 8167 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); 8168 i40e_flush(&pf->hw); 8169 pf->fd_flush_cnt++; 8170 pf->fd_add_err = 0; 8171 do { 8172 /* Check FD flush status every 5-6msec */ 8173 usleep_range(5000, 6000); 8174 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); 8175 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) 8176 break; 8177 } while (flush_wait_retry--); 8178 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { 8179 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); 8180 } else { 8181 /* replay sideband filters */ 8182 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); 8183 if (!disable_atr && !pf->fd_tcp4_filter_cnt) 8184 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; 8185 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); 8186 if (I40E_DEBUG_FD & pf->hw.debug_mask) 8187 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 8188 } 8189 } 8190 8191 /** 8192 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed 8193 * @pf: board private structure 8194 **/ 8195 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) 8196 { 8197 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; 8198 } 8199 8200 /* We can see up to 256 filter programming desc in transit if the filters are 8201 * being applied really fast; before we see the first 8202 * filter miss error on Rx queue 0. Accumulating enough error messages before 8203 * reacting will make sure we don't cause flush too often. 8204 */ 8205 #define I40E_MAX_FD_PROGRAM_ERROR 256 8206 8207 /** 8208 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 8209 * @pf: board private structure 8210 **/ 8211 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 8212 { 8213 8214 /* if interface is down do nothing */ 8215 if (test_bit(__I40E_DOWN, pf->state)) 8216 return; 8217 8218 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) 8219 i40e_fdir_flush_and_replay(pf); 8220 8221 i40e_fdir_check_and_reenable(pf); 8222 8223 } 8224 8225 /** 8226 * i40e_vsi_link_event - notify VSI of a link event 8227 * @vsi: vsi to be notified 8228 * @link_up: link up or down 8229 **/ 8230 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 8231 { 8232 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state)) 8233 return; 8234 8235 switch (vsi->type) { 8236 case I40E_VSI_MAIN: 8237 if (!vsi->netdev || !vsi->netdev_registered) 8238 break; 8239 8240 if (link_up) { 8241 netif_carrier_on(vsi->netdev); 8242 netif_tx_wake_all_queues(vsi->netdev); 8243 } else { 8244 netif_carrier_off(vsi->netdev); 8245 netif_tx_stop_all_queues(vsi->netdev); 8246 } 8247 break; 8248 8249 case I40E_VSI_SRIOV: 8250 case I40E_VSI_VMDQ2: 8251 case I40E_VSI_CTRL: 8252 case I40E_VSI_IWARP: 8253 case I40E_VSI_MIRROR: 8254 default: 8255 /* there is no notification for other VSIs */ 8256 break; 8257 } 8258 } 8259 8260 /** 8261 * i40e_veb_link_event - notify elements on the veb of a link event 8262 * @veb: veb to be notified 8263 * @link_up: link up or down 8264 **/ 8265 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 8266 { 8267 struct i40e_pf *pf; 8268 int i; 8269 8270 if (!veb || !veb->pf) 8271 return; 8272 pf = veb->pf; 8273 8274 /* depth first... */ 8275 for (i = 0; i < I40E_MAX_VEB; i++) 8276 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 8277 i40e_veb_link_event(pf->veb[i], link_up); 8278 8279 /* ... now the local VSIs */ 8280 for (i = 0; i < pf->num_alloc_vsi; i++) 8281 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 8282 i40e_vsi_link_event(pf->vsi[i], link_up); 8283 } 8284 8285 /** 8286 * i40e_link_event - Update netif_carrier status 8287 * @pf: board private structure 8288 **/ 8289 static void i40e_link_event(struct i40e_pf *pf) 8290 { 8291 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 8292 u8 new_link_speed, old_link_speed; 8293 i40e_status status; 8294 bool new_link, old_link; 8295 8296 /* save off old link status information */ 8297 pf->hw.phy.link_info_old = pf->hw.phy.link_info; 8298 8299 /* set this to force the get_link_status call to refresh state */ 8300 pf->hw.phy.get_link_info = true; 8301 8302 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 8303 8304 status = i40e_get_link_status(&pf->hw, &new_link); 8305 8306 /* On success, disable temp link polling */ 8307 if (status == I40E_SUCCESS) { 8308 if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING) 8309 pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING; 8310 } else { 8311 /* Enable link polling temporarily until i40e_get_link_status 8312 * returns I40E_SUCCESS 8313 */ 8314 pf->flags |= I40E_FLAG_TEMP_LINK_POLLING; 8315 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", 8316 status); 8317 return; 8318 } 8319 8320 old_link_speed = pf->hw.phy.link_info_old.link_speed; 8321 new_link_speed = pf->hw.phy.link_info.link_speed; 8322 8323 if (new_link == old_link && 8324 new_link_speed == old_link_speed && 8325 (test_bit(__I40E_VSI_DOWN, vsi->state) || 8326 new_link == netif_carrier_ok(vsi->netdev))) 8327 return; 8328 8329 i40e_print_link_message(vsi, new_link); 8330 8331 /* Notify the base of the switch tree connected to 8332 * the link. Floating VEBs are not notified. 8333 */ 8334 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 8335 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 8336 else 8337 i40e_vsi_link_event(vsi, new_link); 8338 8339 if (pf->vf) 8340 i40e_vc_notify_link_state(pf); 8341 8342 if (pf->flags & I40E_FLAG_PTP) 8343 i40e_ptp_set_increment(pf); 8344 } 8345 8346 /** 8347 * i40e_watchdog_subtask - periodic checks not using event driven response 8348 * @pf: board private structure 8349 **/ 8350 static void i40e_watchdog_subtask(struct i40e_pf *pf) 8351 { 8352 int i; 8353 8354 /* if interface is down do nothing */ 8355 if (test_bit(__I40E_DOWN, pf->state) || 8356 test_bit(__I40E_CONFIG_BUSY, pf->state)) 8357 return; 8358 8359 /* make sure we don't do these things too often */ 8360 if (time_before(jiffies, (pf->service_timer_previous + 8361 pf->service_timer_period))) 8362 return; 8363 pf->service_timer_previous = jiffies; 8364 8365 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || 8366 (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)) 8367 i40e_link_event(pf); 8368 8369 /* Update the stats for active netdevs so the network stack 8370 * can look at updated numbers whenever it cares to 8371 */ 8372 for (i = 0; i < pf->num_alloc_vsi; i++) 8373 if (pf->vsi[i] && pf->vsi[i]->netdev) 8374 i40e_update_stats(pf->vsi[i]); 8375 8376 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { 8377 /* Update the stats for the active switching components */ 8378 for (i = 0; i < I40E_MAX_VEB; i++) 8379 if (pf->veb[i]) 8380 i40e_update_veb_stats(pf->veb[i]); 8381 } 8382 8383 i40e_ptp_rx_hang(pf); 8384 i40e_ptp_tx_hang(pf); 8385 } 8386 8387 /** 8388 * i40e_reset_subtask - Set up for resetting the device and driver 8389 * @pf: board private structure 8390 **/ 8391 static void i40e_reset_subtask(struct i40e_pf *pf) 8392 { 8393 u32 reset_flags = 0; 8394 8395 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { 8396 reset_flags |= BIT(__I40E_REINIT_REQUESTED); 8397 clear_bit(__I40E_REINIT_REQUESTED, pf->state); 8398 } 8399 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { 8400 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); 8401 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); 8402 } 8403 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { 8404 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); 8405 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); 8406 } 8407 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { 8408 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); 8409 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); 8410 } 8411 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { 8412 reset_flags |= BIT(__I40E_DOWN_REQUESTED); 8413 clear_bit(__I40E_DOWN_REQUESTED, pf->state); 8414 } 8415 8416 /* If there's a recovery already waiting, it takes 8417 * precedence before starting a new reset sequence. 8418 */ 8419 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { 8420 i40e_prep_for_reset(pf, false); 8421 i40e_reset(pf); 8422 i40e_rebuild(pf, false, false); 8423 } 8424 8425 /* If we're already down or resetting, just bail */ 8426 if (reset_flags && 8427 !test_bit(__I40E_DOWN, pf->state) && 8428 !test_bit(__I40E_CONFIG_BUSY, pf->state)) { 8429 i40e_do_reset(pf, reset_flags, false); 8430 } 8431 } 8432 8433 /** 8434 * i40e_handle_link_event - Handle link event 8435 * @pf: board private structure 8436 * @e: event info posted on ARQ 8437 **/ 8438 static void i40e_handle_link_event(struct i40e_pf *pf, 8439 struct i40e_arq_event_info *e) 8440 { 8441 struct i40e_aqc_get_link_status *status = 8442 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 8443 8444 /* Do a new status request to re-enable LSE reporting 8445 * and load new status information into the hw struct 8446 * This completely ignores any state information 8447 * in the ARQ event info, instead choosing to always 8448 * issue the AQ update link status command. 8449 */ 8450 i40e_link_event(pf); 8451 8452 /* Check if module meets thermal requirements */ 8453 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) { 8454 dev_err(&pf->pdev->dev, 8455 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n"); 8456 dev_err(&pf->pdev->dev, 8457 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 8458 } else { 8459 /* check for unqualified module, if link is down, suppress 8460 * the message if link was forced to be down. 8461 */ 8462 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && 8463 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && 8464 (!(status->link_info & I40E_AQ_LINK_UP)) && 8465 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) { 8466 dev_err(&pf->pdev->dev, 8467 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n"); 8468 dev_err(&pf->pdev->dev, 8469 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 8470 } 8471 } 8472 } 8473 8474 /** 8475 * i40e_clean_adminq_subtask - Clean the AdminQ rings 8476 * @pf: board private structure 8477 **/ 8478 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 8479 { 8480 struct i40e_arq_event_info event; 8481 struct i40e_hw *hw = &pf->hw; 8482 u16 pending, i = 0; 8483 i40e_status ret; 8484 u16 opcode; 8485 u32 oldval; 8486 u32 val; 8487 8488 /* Do not run clean AQ when PF reset fails */ 8489 if (test_bit(__I40E_RESET_FAILED, pf->state)) 8490 return; 8491 8492 /* check for error indications */ 8493 val = rd32(&pf->hw, pf->hw.aq.arq.len); 8494 oldval = val; 8495 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { 8496 if (hw->debug_mask & I40E_DEBUG_AQ) 8497 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); 8498 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; 8499 } 8500 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { 8501 if (hw->debug_mask & I40E_DEBUG_AQ) 8502 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); 8503 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; 8504 pf->arq_overflows++; 8505 } 8506 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { 8507 if (hw->debug_mask & I40E_DEBUG_AQ) 8508 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); 8509 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; 8510 } 8511 if (oldval != val) 8512 wr32(&pf->hw, pf->hw.aq.arq.len, val); 8513 8514 val = rd32(&pf->hw, pf->hw.aq.asq.len); 8515 oldval = val; 8516 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { 8517 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 8518 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); 8519 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; 8520 } 8521 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { 8522 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 8523 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); 8524 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; 8525 } 8526 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { 8527 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 8528 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); 8529 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; 8530 } 8531 if (oldval != val) 8532 wr32(&pf->hw, pf->hw.aq.asq.len, val); 8533 8534 event.buf_len = I40E_MAX_AQ_BUF_SIZE; 8535 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 8536 if (!event.msg_buf) 8537 return; 8538 8539 do { 8540 ret = i40e_clean_arq_element(hw, &event, &pending); 8541 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) 8542 break; 8543 else if (ret) { 8544 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 8545 break; 8546 } 8547 8548 opcode = le16_to_cpu(event.desc.opcode); 8549 switch (opcode) { 8550 8551 case i40e_aqc_opc_get_link_status: 8552 i40e_handle_link_event(pf, &event); 8553 break; 8554 case i40e_aqc_opc_send_msg_to_pf: 8555 ret = i40e_vc_process_vf_msg(pf, 8556 le16_to_cpu(event.desc.retval), 8557 le32_to_cpu(event.desc.cookie_high), 8558 le32_to_cpu(event.desc.cookie_low), 8559 event.msg_buf, 8560 event.msg_len); 8561 break; 8562 case i40e_aqc_opc_lldp_update_mib: 8563 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 8564 #ifdef CONFIG_I40E_DCB 8565 rtnl_lock(); 8566 ret = i40e_handle_lldp_event(pf, &event); 8567 rtnl_unlock(); 8568 #endif /* CONFIG_I40E_DCB */ 8569 break; 8570 case i40e_aqc_opc_event_lan_overflow: 8571 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 8572 i40e_handle_lan_overflow_event(pf, &event); 8573 break; 8574 case i40e_aqc_opc_send_msg_to_peer: 8575 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); 8576 break; 8577 case i40e_aqc_opc_nvm_erase: 8578 case i40e_aqc_opc_nvm_update: 8579 case i40e_aqc_opc_oem_post_update: 8580 i40e_debug(&pf->hw, I40E_DEBUG_NVM, 8581 "ARQ NVM operation 0x%04x completed\n", 8582 opcode); 8583 break; 8584 default: 8585 dev_info(&pf->pdev->dev, 8586 "ARQ: Unknown event 0x%04x ignored\n", 8587 opcode); 8588 break; 8589 } 8590 } while (i++ < pf->adminq_work_limit); 8591 8592 if (i < pf->adminq_work_limit) 8593 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); 8594 8595 /* re-enable Admin queue interrupt cause */ 8596 val = rd32(hw, I40E_PFINT_ICR0_ENA); 8597 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 8598 wr32(hw, I40E_PFINT_ICR0_ENA, val); 8599 i40e_flush(hw); 8600 8601 kfree(event.msg_buf); 8602 } 8603 8604 /** 8605 * i40e_verify_eeprom - make sure eeprom is good to use 8606 * @pf: board private structure 8607 **/ 8608 static void i40e_verify_eeprom(struct i40e_pf *pf) 8609 { 8610 int err; 8611 8612 err = i40e_diag_eeprom_test(&pf->hw); 8613 if (err) { 8614 /* retry in case of garbage read */ 8615 err = i40e_diag_eeprom_test(&pf->hw); 8616 if (err) { 8617 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", 8618 err); 8619 set_bit(__I40E_BAD_EEPROM, pf->state); 8620 } 8621 } 8622 8623 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) { 8624 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); 8625 clear_bit(__I40E_BAD_EEPROM, pf->state); 8626 } 8627 } 8628 8629 /** 8630 * i40e_enable_pf_switch_lb 8631 * @pf: pointer to the PF structure 8632 * 8633 * enable switch loop back or die - no point in a return value 8634 **/ 8635 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) 8636 { 8637 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 8638 struct i40e_vsi_context ctxt; 8639 int ret; 8640 8641 ctxt.seid = pf->main_vsi_seid; 8642 ctxt.pf_num = pf->hw.pf_id; 8643 ctxt.vf_num = 0; 8644 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 8645 if (ret) { 8646 dev_info(&pf->pdev->dev, 8647 "couldn't get PF vsi config, err %s aq_err %s\n", 8648 i40e_stat_str(&pf->hw, ret), 8649 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 8650 return; 8651 } 8652 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 8653 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8654 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8655 8656 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 8657 if (ret) { 8658 dev_info(&pf->pdev->dev, 8659 "update vsi switch failed, err %s aq_err %s\n", 8660 i40e_stat_str(&pf->hw, ret), 8661 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 8662 } 8663 } 8664 8665 /** 8666 * i40e_disable_pf_switch_lb 8667 * @pf: pointer to the PF structure 8668 * 8669 * disable switch loop back or die - no point in a return value 8670 **/ 8671 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) 8672 { 8673 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 8674 struct i40e_vsi_context ctxt; 8675 int ret; 8676 8677 ctxt.seid = pf->main_vsi_seid; 8678 ctxt.pf_num = pf->hw.pf_id; 8679 ctxt.vf_num = 0; 8680 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 8681 if (ret) { 8682 dev_info(&pf->pdev->dev, 8683 "couldn't get PF vsi config, err %s aq_err %s\n", 8684 i40e_stat_str(&pf->hw, ret), 8685 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 8686 return; 8687 } 8688 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 8689 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8690 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8691 8692 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 8693 if (ret) { 8694 dev_info(&pf->pdev->dev, 8695 "update vsi switch failed, err %s aq_err %s\n", 8696 i40e_stat_str(&pf->hw, ret), 8697 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 8698 } 8699 } 8700 8701 /** 8702 * i40e_config_bridge_mode - Configure the HW bridge mode 8703 * @veb: pointer to the bridge instance 8704 * 8705 * Configure the loop back mode for the LAN VSI that is downlink to the 8706 * specified HW bridge instance. It is expected this function is called 8707 * when a new HW bridge is instantiated. 8708 **/ 8709 static void i40e_config_bridge_mode(struct i40e_veb *veb) 8710 { 8711 struct i40e_pf *pf = veb->pf; 8712 8713 if (pf->hw.debug_mask & I40E_DEBUG_LAN) 8714 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", 8715 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 8716 if (veb->bridge_mode & BRIDGE_MODE_VEPA) 8717 i40e_disable_pf_switch_lb(pf); 8718 else 8719 i40e_enable_pf_switch_lb(pf); 8720 } 8721 8722 /** 8723 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 8724 * @veb: pointer to the VEB instance 8725 * 8726 * This is a recursive function that first builds the attached VSIs then 8727 * recurses in to build the next layer of VEB. We track the connections 8728 * through our own index numbers because the seid's from the HW could 8729 * change across the reset. 8730 **/ 8731 static int i40e_reconstitute_veb(struct i40e_veb *veb) 8732 { 8733 struct i40e_vsi *ctl_vsi = NULL; 8734 struct i40e_pf *pf = veb->pf; 8735 int v, veb_idx; 8736 int ret; 8737 8738 /* build VSI that owns this VEB, temporarily attached to base VEB */ 8739 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { 8740 if (pf->vsi[v] && 8741 pf->vsi[v]->veb_idx == veb->idx && 8742 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 8743 ctl_vsi = pf->vsi[v]; 8744 break; 8745 } 8746 } 8747 if (!ctl_vsi) { 8748 dev_info(&pf->pdev->dev, 8749 "missing owner VSI for veb_idx %d\n", veb->idx); 8750 ret = -ENOENT; 8751 goto end_reconstitute; 8752 } 8753 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 8754 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 8755 ret = i40e_add_vsi(ctl_vsi); 8756 if (ret) { 8757 dev_info(&pf->pdev->dev, 8758 "rebuild of veb_idx %d owner VSI failed: %d\n", 8759 veb->idx, ret); 8760 goto end_reconstitute; 8761 } 8762 i40e_vsi_reset_stats(ctl_vsi); 8763 8764 /* create the VEB in the switch and move the VSI onto the VEB */ 8765 ret = i40e_add_veb(veb, ctl_vsi); 8766 if (ret) 8767 goto end_reconstitute; 8768 8769 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 8770 veb->bridge_mode = BRIDGE_MODE_VEB; 8771 else 8772 veb->bridge_mode = BRIDGE_MODE_VEPA; 8773 i40e_config_bridge_mode(veb); 8774 8775 /* create the remaining VSIs attached to this VEB */ 8776 for (v = 0; v < pf->num_alloc_vsi; v++) { 8777 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 8778 continue; 8779 8780 if (pf->vsi[v]->veb_idx == veb->idx) { 8781 struct i40e_vsi *vsi = pf->vsi[v]; 8782 8783 vsi->uplink_seid = veb->seid; 8784 ret = i40e_add_vsi(vsi); 8785 if (ret) { 8786 dev_info(&pf->pdev->dev, 8787 "rebuild of vsi_idx %d failed: %d\n", 8788 v, ret); 8789 goto end_reconstitute; 8790 } 8791 i40e_vsi_reset_stats(vsi); 8792 } 8793 } 8794 8795 /* create any VEBs attached to this VEB - RECURSION */ 8796 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 8797 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 8798 pf->veb[veb_idx]->uplink_seid = veb->seid; 8799 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 8800 if (ret) 8801 break; 8802 } 8803 } 8804 8805 end_reconstitute: 8806 return ret; 8807 } 8808 8809 /** 8810 * i40e_get_capabilities - get info about the HW 8811 * @pf: the PF struct 8812 **/ 8813 static int i40e_get_capabilities(struct i40e_pf *pf, 8814 enum i40e_admin_queue_opc list_type) 8815 { 8816 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 8817 u16 data_size; 8818 int buf_len; 8819 int err; 8820 8821 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 8822 do { 8823 cap_buf = kzalloc(buf_len, GFP_KERNEL); 8824 if (!cap_buf) 8825 return -ENOMEM; 8826 8827 /* this loads the data into the hw struct for us */ 8828 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 8829 &data_size, list_type, 8830 NULL); 8831 /* data loaded, buffer no longer needed */ 8832 kfree(cap_buf); 8833 8834 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 8835 /* retry with a larger buffer */ 8836 buf_len = data_size; 8837 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 8838 dev_info(&pf->pdev->dev, 8839 "capability discovery failed, err %s aq_err %s\n", 8840 i40e_stat_str(&pf->hw, err), 8841 i40e_aq_str(&pf->hw, 8842 pf->hw.aq.asq_last_status)); 8843 return -ENODEV; 8844 } 8845 } while (err); 8846 8847 if (pf->hw.debug_mask & I40E_DEBUG_USER) { 8848 if (list_type == i40e_aqc_opc_list_func_capabilities) { 8849 dev_info(&pf->pdev->dev, 8850 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 8851 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 8852 pf->hw.func_caps.num_msix_vectors, 8853 pf->hw.func_caps.num_msix_vectors_vf, 8854 pf->hw.func_caps.fd_filters_guaranteed, 8855 pf->hw.func_caps.fd_filters_best_effort, 8856 pf->hw.func_caps.num_tx_qp, 8857 pf->hw.func_caps.num_vsis); 8858 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) { 8859 dev_info(&pf->pdev->dev, 8860 "switch_mode=0x%04x, function_valid=0x%08x\n", 8861 pf->hw.dev_caps.switch_mode, 8862 pf->hw.dev_caps.valid_functions); 8863 dev_info(&pf->pdev->dev, 8864 "SR-IOV=%d, num_vfs for all function=%u\n", 8865 pf->hw.dev_caps.sr_iov_1_1, 8866 pf->hw.dev_caps.num_vfs); 8867 dev_info(&pf->pdev->dev, 8868 "num_vsis=%u, num_rx:%u, num_tx=%u\n", 8869 pf->hw.dev_caps.num_vsis, 8870 pf->hw.dev_caps.num_rx_qp, 8871 pf->hw.dev_caps.num_tx_qp); 8872 } 8873 } 8874 if (list_type == i40e_aqc_opc_list_func_capabilities) { 8875 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ 8876 + pf->hw.func_caps.num_vfs) 8877 if (pf->hw.revision_id == 0 && 8878 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) { 8879 dev_info(&pf->pdev->dev, 8880 "got num_vsis %d, setting num_vsis to %d\n", 8881 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); 8882 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; 8883 } 8884 } 8885 return 0; 8886 } 8887 8888 static int i40e_vsi_clear(struct i40e_vsi *vsi); 8889 8890 /** 8891 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband 8892 * @pf: board private structure 8893 **/ 8894 static void i40e_fdir_sb_setup(struct i40e_pf *pf) 8895 { 8896 struct i40e_vsi *vsi; 8897 8898 /* quick workaround for an NVM issue that leaves a critical register 8899 * uninitialized 8900 */ 8901 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { 8902 static const u32 hkey[] = { 8903 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 8904 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 8905 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 8906 0x95b3a76d}; 8907 int i; 8908 8909 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) 8910 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); 8911 } 8912 8913 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 8914 return; 8915 8916 /* find existing VSI and see if it needs configuring */ 8917 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); 8918 8919 /* create a new VSI if none exists */ 8920 if (!vsi) { 8921 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, 8922 pf->vsi[pf->lan_vsi]->seid, 0); 8923 if (!vsi) { 8924 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 8925 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 8926 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; 8927 return; 8928 } 8929 } 8930 8931 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); 8932 } 8933 8934 /** 8935 * i40e_fdir_teardown - release the Flow Director resources 8936 * @pf: board private structure 8937 **/ 8938 static void i40e_fdir_teardown(struct i40e_pf *pf) 8939 { 8940 struct i40e_vsi *vsi; 8941 8942 i40e_fdir_filter_exit(pf); 8943 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); 8944 if (vsi) 8945 i40e_vsi_release(vsi); 8946 } 8947 8948 /** 8949 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs 8950 * @vsi: PF main vsi 8951 * @seid: seid of main or channel VSIs 8952 * 8953 * Rebuilds cloud filters associated with main VSI and channel VSIs if they 8954 * existed before reset 8955 **/ 8956 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) 8957 { 8958 struct i40e_cloud_filter *cfilter; 8959 struct i40e_pf *pf = vsi->back; 8960 struct hlist_node *node; 8961 i40e_status ret; 8962 8963 /* Add cloud filters back if they exist */ 8964 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, 8965 cloud_node) { 8966 if (cfilter->seid != seid) 8967 continue; 8968 8969 if (cfilter->dst_port) 8970 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 8971 true); 8972 else 8973 ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 8974 8975 if (ret) { 8976 dev_dbg(&pf->pdev->dev, 8977 "Failed to rebuild cloud filter, err %s aq_err %s\n", 8978 i40e_stat_str(&pf->hw, ret), 8979 i40e_aq_str(&pf->hw, 8980 pf->hw.aq.asq_last_status)); 8981 return ret; 8982 } 8983 } 8984 return 0; 8985 } 8986 8987 /** 8988 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset 8989 * @vsi: PF main vsi 8990 * 8991 * Rebuilds channel VSIs if they existed before reset 8992 **/ 8993 static int i40e_rebuild_channels(struct i40e_vsi *vsi) 8994 { 8995 struct i40e_channel *ch, *ch_tmp; 8996 i40e_status ret; 8997 8998 if (list_empty(&vsi->ch_list)) 8999 return 0; 9000 9001 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { 9002 if (!ch->initialized) 9003 break; 9004 /* Proceed with creation of channel (VMDq2) VSI */ 9005 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch); 9006 if (ret) { 9007 dev_info(&vsi->back->pdev->dev, 9008 "failed to rebuild channels using uplink_seid %u\n", 9009 vsi->uplink_seid); 9010 return ret; 9011 } 9012 /* Reconfigure TX queues using QTX_CTL register */ 9013 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch); 9014 if (ret) { 9015 dev_info(&vsi->back->pdev->dev, 9016 "failed to configure TX rings for channel %u\n", 9017 ch->seid); 9018 return ret; 9019 } 9020 /* update 'next_base_queue' */ 9021 vsi->next_base_queue = vsi->next_base_queue + 9022 ch->num_queue_pairs; 9023 if (ch->max_tx_rate) { 9024 u64 credits = ch->max_tx_rate; 9025 9026 if (i40e_set_bw_limit(vsi, ch->seid, 9027 ch->max_tx_rate)) 9028 return -EINVAL; 9029 9030 do_div(credits, I40E_BW_CREDIT_DIVISOR); 9031 dev_dbg(&vsi->back->pdev->dev, 9032 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", 9033 ch->max_tx_rate, 9034 credits, 9035 ch->seid); 9036 } 9037 ret = i40e_rebuild_cloud_filters(vsi, ch->seid); 9038 if (ret) { 9039 dev_dbg(&vsi->back->pdev->dev, 9040 "Failed to rebuild cloud filters for channel VSI %u\n", 9041 ch->seid); 9042 return ret; 9043 } 9044 } 9045 return 0; 9046 } 9047 9048 /** 9049 * i40e_prep_for_reset - prep for the core to reset 9050 * @pf: board private structure 9051 * @lock_acquired: indicates whether or not the lock has been acquired 9052 * before this function was called. 9053 * 9054 * Close up the VFs and other things in prep for PF Reset. 9055 **/ 9056 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired) 9057 { 9058 struct i40e_hw *hw = &pf->hw; 9059 i40e_status ret = 0; 9060 u32 v; 9061 9062 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); 9063 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) 9064 return; 9065 if (i40e_check_asq_alive(&pf->hw)) 9066 i40e_vc_notify_reset(pf); 9067 9068 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 9069 9070 /* quiesce the VSIs and their queues that are not already DOWN */ 9071 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */ 9072 if (!lock_acquired) 9073 rtnl_lock(); 9074 i40e_pf_quiesce_all_vsi(pf); 9075 if (!lock_acquired) 9076 rtnl_unlock(); 9077 9078 for (v = 0; v < pf->num_alloc_vsi; v++) { 9079 if (pf->vsi[v]) 9080 pf->vsi[v]->seid = 0; 9081 } 9082 9083 i40e_shutdown_adminq(&pf->hw); 9084 9085 /* call shutdown HMC */ 9086 if (hw->hmc.hmc_obj) { 9087 ret = i40e_shutdown_lan_hmc(hw); 9088 if (ret) 9089 dev_warn(&pf->pdev->dev, 9090 "shutdown_lan_hmc failed: %d\n", ret); 9091 } 9092 } 9093 9094 /** 9095 * i40e_send_version - update firmware with driver version 9096 * @pf: PF struct 9097 */ 9098 static void i40e_send_version(struct i40e_pf *pf) 9099 { 9100 struct i40e_driver_version dv; 9101 9102 dv.major_version = DRV_VERSION_MAJOR; 9103 dv.minor_version = DRV_VERSION_MINOR; 9104 dv.build_version = DRV_VERSION_BUILD; 9105 dv.subbuild_version = 0; 9106 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); 9107 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 9108 } 9109 9110 /** 9111 * i40e_get_oem_version - get OEM specific version information 9112 * @hw: pointer to the hardware structure 9113 **/ 9114 static void i40e_get_oem_version(struct i40e_hw *hw) 9115 { 9116 u16 block_offset = 0xffff; 9117 u16 block_length = 0; 9118 u16 capabilities = 0; 9119 u16 gen_snap = 0; 9120 u16 release = 0; 9121 9122 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B 9123 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00 9124 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01 9125 #define I40E_NVM_OEM_GEN_OFFSET 0x02 9126 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03 9127 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F 9128 #define I40E_NVM_OEM_LENGTH 3 9129 9130 /* Check if pointer to OEM version block is valid. */ 9131 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset); 9132 if (block_offset == 0xffff) 9133 return; 9134 9135 /* Check if OEM version block has correct length. */ 9136 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET, 9137 &block_length); 9138 if (block_length < I40E_NVM_OEM_LENGTH) 9139 return; 9140 9141 /* Check if OEM version format is as expected. */ 9142 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET, 9143 &capabilities); 9144 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0) 9145 return; 9146 9147 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET, 9148 &gen_snap); 9149 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET, 9150 &release); 9151 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release; 9152 hw->nvm.eetrack = I40E_OEM_EETRACK_ID; 9153 } 9154 9155 /** 9156 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen 9157 * @pf: board private structure 9158 **/ 9159 static int i40e_reset(struct i40e_pf *pf) 9160 { 9161 struct i40e_hw *hw = &pf->hw; 9162 i40e_status ret; 9163 9164 ret = i40e_pf_reset(hw); 9165 if (ret) { 9166 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 9167 set_bit(__I40E_RESET_FAILED, pf->state); 9168 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); 9169 } else { 9170 pf->pfr_count++; 9171 } 9172 return ret; 9173 } 9174 9175 /** 9176 * i40e_rebuild - rebuild using a saved config 9177 * @pf: board private structure 9178 * @reinit: if the Main VSI needs to re-initialized. 9179 * @lock_acquired: indicates whether or not the lock has been acquired 9180 * before this function was called. 9181 **/ 9182 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) 9183 { 9184 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 9185 struct i40e_hw *hw = &pf->hw; 9186 u8 set_fc_aq_fail = 0; 9187 i40e_status ret; 9188 u32 val; 9189 int v; 9190 9191 if (test_bit(__I40E_DOWN, pf->state)) 9192 goto clear_recovery; 9193 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 9194 9195 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 9196 ret = i40e_init_adminq(&pf->hw); 9197 if (ret) { 9198 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", 9199 i40e_stat_str(&pf->hw, ret), 9200 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9201 goto clear_recovery; 9202 } 9203 i40e_get_oem_version(&pf->hw); 9204 9205 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && 9206 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) || 9207 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) { 9208 /* The following delay is necessary for 4.33 firmware and older 9209 * to recover after EMP reset. 200 ms should suffice but we 9210 * put here 300 ms to be sure that FW is ready to operate 9211 * after reset. 9212 */ 9213 mdelay(300); 9214 } 9215 9216 /* re-verify the eeprom if we just had an EMP reset */ 9217 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) 9218 i40e_verify_eeprom(pf); 9219 9220 i40e_clear_pxe_mode(hw); 9221 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); 9222 if (ret) 9223 goto end_core_reset; 9224 9225 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 9226 hw->func_caps.num_rx_qp, 0, 0); 9227 if (ret) { 9228 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 9229 goto end_core_reset; 9230 } 9231 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 9232 if (ret) { 9233 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 9234 goto end_core_reset; 9235 } 9236 9237 /* Enable FW to write a default DCB config on link-up */ 9238 i40e_aq_set_dcb_parameters(hw, true, NULL); 9239 9240 #ifdef CONFIG_I40E_DCB 9241 ret = i40e_init_pf_dcb(pf); 9242 if (ret) { 9243 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); 9244 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 9245 /* Continue without DCB enabled */ 9246 } 9247 #endif /* CONFIG_I40E_DCB */ 9248 /* do basic switch setup */ 9249 if (!lock_acquired) 9250 rtnl_lock(); 9251 ret = i40e_setup_pf_switch(pf, reinit); 9252 if (ret) 9253 goto end_unlock; 9254 9255 /* The driver only wants link up/down and module qualification 9256 * reports from firmware. Note the negative logic. 9257 */ 9258 ret = i40e_aq_set_phy_int_mask(&pf->hw, 9259 ~(I40E_AQ_EVENT_LINK_UPDOWN | 9260 I40E_AQ_EVENT_MEDIA_NA | 9261 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); 9262 if (ret) 9263 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 9264 i40e_stat_str(&pf->hw, ret), 9265 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9266 9267 /* make sure our flow control settings are restored */ 9268 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); 9269 if (ret) 9270 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", 9271 i40e_stat_str(&pf->hw, ret), 9272 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9273 9274 /* Rebuild the VSIs and VEBs that existed before reset. 9275 * They are still in our local switch element arrays, so only 9276 * need to rebuild the switch model in the HW. 9277 * 9278 * If there were VEBs but the reconstitution failed, we'll try 9279 * try to recover minimal use by getting the basic PF VSI working. 9280 */ 9281 if (vsi->uplink_seid != pf->mac_seid) { 9282 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); 9283 /* find the one VEB connected to the MAC, and find orphans */ 9284 for (v = 0; v < I40E_MAX_VEB; v++) { 9285 if (!pf->veb[v]) 9286 continue; 9287 9288 if (pf->veb[v]->uplink_seid == pf->mac_seid || 9289 pf->veb[v]->uplink_seid == 0) { 9290 ret = i40e_reconstitute_veb(pf->veb[v]); 9291 9292 if (!ret) 9293 continue; 9294 9295 /* If Main VEB failed, we're in deep doodoo, 9296 * so give up rebuilding the switch and set up 9297 * for minimal rebuild of PF VSI. 9298 * If orphan failed, we'll report the error 9299 * but try to keep going. 9300 */ 9301 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 9302 dev_info(&pf->pdev->dev, 9303 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 9304 ret); 9305 vsi->uplink_seid = pf->mac_seid; 9306 break; 9307 } else if (pf->veb[v]->uplink_seid == 0) { 9308 dev_info(&pf->pdev->dev, 9309 "rebuild of orphan VEB failed: %d\n", 9310 ret); 9311 } 9312 } 9313 } 9314 } 9315 9316 if (vsi->uplink_seid == pf->mac_seid) { 9317 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 9318 /* no VEB, so rebuild only the Main VSI */ 9319 ret = i40e_add_vsi(vsi); 9320 if (ret) { 9321 dev_info(&pf->pdev->dev, 9322 "rebuild of Main VSI failed: %d\n", ret); 9323 goto end_unlock; 9324 } 9325 } 9326 9327 if (vsi->mqprio_qopt.max_rate[0]) { 9328 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; 9329 u64 credits = 0; 9330 9331 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); 9332 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); 9333 if (ret) 9334 goto end_unlock; 9335 9336 credits = max_tx_rate; 9337 do_div(credits, I40E_BW_CREDIT_DIVISOR); 9338 dev_dbg(&vsi->back->pdev->dev, 9339 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", 9340 max_tx_rate, 9341 credits, 9342 vsi->seid); 9343 } 9344 9345 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid); 9346 if (ret) 9347 goto end_unlock; 9348 9349 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs 9350 * for this main VSI if they exist 9351 */ 9352 ret = i40e_rebuild_channels(vsi); 9353 if (ret) 9354 goto end_unlock; 9355 9356 /* Reconfigure hardware for allowing smaller MSS in the case 9357 * of TSO, so that we avoid the MDD being fired and causing 9358 * a reset in the case of small MSS+TSO. 9359 */ 9360 #define I40E_REG_MSS 0x000E64DC 9361 #define I40E_REG_MSS_MIN_MASK 0x3FF0000 9362 #define I40E_64BYTE_MSS 0x400000 9363 val = rd32(hw, I40E_REG_MSS); 9364 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { 9365 val &= ~I40E_REG_MSS_MIN_MASK; 9366 val |= I40E_64BYTE_MSS; 9367 wr32(hw, I40E_REG_MSS, val); 9368 } 9369 9370 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { 9371 msleep(75); 9372 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 9373 if (ret) 9374 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 9375 i40e_stat_str(&pf->hw, ret), 9376 i40e_aq_str(&pf->hw, 9377 pf->hw.aq.asq_last_status)); 9378 } 9379 /* reinit the misc interrupt */ 9380 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 9381 ret = i40e_setup_misc_vector(pf); 9382 9383 /* Add a filter to drop all Flow control frames from any VSI from being 9384 * transmitted. By doing so we stop a malicious VF from sending out 9385 * PAUSE or PFC frames and potentially controlling traffic for other 9386 * PF/VF VSIs. 9387 * The FW can still send Flow control frames if enabled. 9388 */ 9389 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, 9390 pf->main_vsi_seid); 9391 9392 /* restart the VSIs that were rebuilt and running before the reset */ 9393 i40e_pf_unquiesce_all_vsi(pf); 9394 9395 /* Release the RTNL lock before we start resetting VFs */ 9396 if (!lock_acquired) 9397 rtnl_unlock(); 9398 9399 /* Restore promiscuous settings */ 9400 ret = i40e_set_promiscuous(pf, pf->cur_promisc); 9401 if (ret) 9402 dev_warn(&pf->pdev->dev, 9403 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n", 9404 pf->cur_promisc ? "on" : "off", 9405 i40e_stat_str(&pf->hw, ret), 9406 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9407 9408 i40e_reset_all_vfs(pf, true); 9409 9410 /* tell the firmware that we're starting */ 9411 i40e_send_version(pf); 9412 9413 /* We've already released the lock, so don't do it again */ 9414 goto end_core_reset; 9415 9416 end_unlock: 9417 if (!lock_acquired) 9418 rtnl_unlock(); 9419 end_core_reset: 9420 clear_bit(__I40E_RESET_FAILED, pf->state); 9421 clear_recovery: 9422 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); 9423 } 9424 9425 /** 9426 * i40e_reset_and_rebuild - reset and rebuild using a saved config 9427 * @pf: board private structure 9428 * @reinit: if the Main VSI needs to re-initialized. 9429 * @lock_acquired: indicates whether or not the lock has been acquired 9430 * before this function was called. 9431 **/ 9432 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, 9433 bool lock_acquired) 9434 { 9435 int ret; 9436 /* Now we wait for GRST to settle out. 9437 * We don't have to delete the VEBs or VSIs from the hw switch 9438 * because the reset will make them disappear. 9439 */ 9440 ret = i40e_reset(pf); 9441 if (!ret) 9442 i40e_rebuild(pf, reinit, lock_acquired); 9443 } 9444 9445 /** 9446 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild 9447 * @pf: board private structure 9448 * 9449 * Close up the VFs and other things in prep for a Core Reset, 9450 * then get ready to rebuild the world. 9451 * @lock_acquired: indicates whether or not the lock has been acquired 9452 * before this function was called. 9453 **/ 9454 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired) 9455 { 9456 i40e_prep_for_reset(pf, lock_acquired); 9457 i40e_reset_and_rebuild(pf, false, lock_acquired); 9458 } 9459 9460 /** 9461 * i40e_handle_mdd_event 9462 * @pf: pointer to the PF structure 9463 * 9464 * Called from the MDD irq handler to identify possibly malicious vfs 9465 **/ 9466 static void i40e_handle_mdd_event(struct i40e_pf *pf) 9467 { 9468 struct i40e_hw *hw = &pf->hw; 9469 bool mdd_detected = false; 9470 bool pf_mdd_detected = false; 9471 struct i40e_vf *vf; 9472 u32 reg; 9473 int i; 9474 9475 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) 9476 return; 9477 9478 /* find what triggered the MDD event */ 9479 reg = rd32(hw, I40E_GL_MDET_TX); 9480 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 9481 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 9482 I40E_GL_MDET_TX_PF_NUM_SHIFT; 9483 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 9484 I40E_GL_MDET_TX_VF_NUM_SHIFT; 9485 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 9486 I40E_GL_MDET_TX_EVENT_SHIFT; 9487 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 9488 I40E_GL_MDET_TX_QUEUE_SHIFT) - 9489 pf->hw.func_caps.base_queue; 9490 if (netif_msg_tx_err(pf)) 9491 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", 9492 event, queue, pf_num, vf_num); 9493 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 9494 mdd_detected = true; 9495 } 9496 reg = rd32(hw, I40E_GL_MDET_RX); 9497 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 9498 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 9499 I40E_GL_MDET_RX_FUNCTION_SHIFT; 9500 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 9501 I40E_GL_MDET_RX_EVENT_SHIFT; 9502 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 9503 I40E_GL_MDET_RX_QUEUE_SHIFT) - 9504 pf->hw.func_caps.base_queue; 9505 if (netif_msg_rx_err(pf)) 9506 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 9507 event, queue, func); 9508 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 9509 mdd_detected = true; 9510 } 9511 9512 if (mdd_detected) { 9513 reg = rd32(hw, I40E_PF_MDET_TX); 9514 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 9515 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 9516 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 9517 pf_mdd_detected = true; 9518 } 9519 reg = rd32(hw, I40E_PF_MDET_RX); 9520 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 9521 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 9522 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 9523 pf_mdd_detected = true; 9524 } 9525 /* Queue belongs to the PF, initiate a reset */ 9526 if (pf_mdd_detected) { 9527 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); 9528 i40e_service_event_schedule(pf); 9529 } 9530 } 9531 9532 /* see if one of the VFs needs its hand slapped */ 9533 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 9534 vf = &(pf->vf[i]); 9535 reg = rd32(hw, I40E_VP_MDET_TX(i)); 9536 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 9537 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 9538 vf->num_mdd_events++; 9539 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 9540 i); 9541 } 9542 9543 reg = rd32(hw, I40E_VP_MDET_RX(i)); 9544 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 9545 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 9546 vf->num_mdd_events++; 9547 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 9548 i); 9549 } 9550 9551 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 9552 dev_info(&pf->pdev->dev, 9553 "Too many MDD events on VF %d, disabled\n", i); 9554 dev_info(&pf->pdev->dev, 9555 "Use PF Control I/F to re-enable the VF\n"); 9556 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); 9557 } 9558 } 9559 9560 /* re-enable mdd interrupt cause */ 9561 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); 9562 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 9563 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 9564 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 9565 i40e_flush(hw); 9566 } 9567 9568 static const char *i40e_tunnel_name(struct i40e_udp_port_config *port) 9569 { 9570 switch (port->type) { 9571 case UDP_TUNNEL_TYPE_VXLAN: 9572 return "vxlan"; 9573 case UDP_TUNNEL_TYPE_GENEVE: 9574 return "geneve"; 9575 default: 9576 return "unknown"; 9577 } 9578 } 9579 9580 /** 9581 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters 9582 * @pf: board private structure 9583 **/ 9584 static void i40e_sync_udp_filters(struct i40e_pf *pf) 9585 { 9586 int i; 9587 9588 /* loop through and set pending bit for all active UDP filters */ 9589 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 9590 if (pf->udp_ports[i].port) 9591 pf->pending_udp_bitmap |= BIT_ULL(i); 9592 } 9593 9594 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 9595 } 9596 9597 /** 9598 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW 9599 * @pf: board private structure 9600 **/ 9601 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) 9602 { 9603 struct i40e_hw *hw = &pf->hw; 9604 i40e_status ret; 9605 u16 port; 9606 int i; 9607 9608 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) 9609 return; 9610 9611 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC; 9612 9613 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 9614 if (pf->pending_udp_bitmap & BIT_ULL(i)) { 9615 pf->pending_udp_bitmap &= ~BIT_ULL(i); 9616 port = pf->udp_ports[i].port; 9617 if (port) 9618 ret = i40e_aq_add_udp_tunnel(hw, port, 9619 pf->udp_ports[i].type, 9620 NULL, NULL); 9621 else 9622 ret = i40e_aq_del_udp_tunnel(hw, i, NULL); 9623 9624 if (ret) { 9625 dev_info(&pf->pdev->dev, 9626 "%s %s port %d, index %d failed, err %s aq_err %s\n", 9627 i40e_tunnel_name(&pf->udp_ports[i]), 9628 port ? "add" : "delete", 9629 port, i, 9630 i40e_stat_str(&pf->hw, ret), 9631 i40e_aq_str(&pf->hw, 9632 pf->hw.aq.asq_last_status)); 9633 pf->udp_ports[i].port = 0; 9634 } 9635 } 9636 } 9637 } 9638 9639 /** 9640 * i40e_service_task - Run the driver's async subtasks 9641 * @work: pointer to work_struct containing our data 9642 **/ 9643 static void i40e_service_task(struct work_struct *work) 9644 { 9645 struct i40e_pf *pf = container_of(work, 9646 struct i40e_pf, 9647 service_task); 9648 unsigned long start_time = jiffies; 9649 9650 /* don't bother with service tasks if a reset is in progress */ 9651 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) 9652 return; 9653 9654 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) 9655 return; 9656 9657 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); 9658 i40e_sync_filters_subtask(pf); 9659 i40e_reset_subtask(pf); 9660 i40e_handle_mdd_event(pf); 9661 i40e_vc_process_vflr_event(pf); 9662 i40e_watchdog_subtask(pf); 9663 i40e_fdir_reinit_subtask(pf); 9664 if (pf->flags & I40E_FLAG_CLIENT_RESET) { 9665 /* Client subtask will reopen next time through. */ 9666 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true); 9667 pf->flags &= ~I40E_FLAG_CLIENT_RESET; 9668 } else { 9669 i40e_client_subtask(pf); 9670 if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) { 9671 i40e_notify_client_of_l2_param_changes( 9672 pf->vsi[pf->lan_vsi]); 9673 pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE; 9674 } 9675 } 9676 i40e_sync_filters_subtask(pf); 9677 i40e_sync_udp_filters_subtask(pf); 9678 i40e_clean_adminq_subtask(pf); 9679 9680 /* flush memory to make sure state is correct before next watchdog */ 9681 smp_mb__before_atomic(); 9682 clear_bit(__I40E_SERVICE_SCHED, pf->state); 9683 9684 /* If the tasks have taken longer than one timer cycle or there 9685 * is more work to be done, reschedule the service task now 9686 * rather than wait for the timer to tick again. 9687 */ 9688 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 9689 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) || 9690 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) || 9691 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) 9692 i40e_service_event_schedule(pf); 9693 } 9694 9695 /** 9696 * i40e_service_timer - timer callback 9697 * @data: pointer to PF struct 9698 **/ 9699 static void i40e_service_timer(struct timer_list *t) 9700 { 9701 struct i40e_pf *pf = from_timer(pf, t, service_timer); 9702 9703 mod_timer(&pf->service_timer, 9704 round_jiffies(jiffies + pf->service_timer_period)); 9705 i40e_service_event_schedule(pf); 9706 } 9707 9708 /** 9709 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 9710 * @vsi: the VSI being configured 9711 **/ 9712 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 9713 { 9714 struct i40e_pf *pf = vsi->back; 9715 9716 switch (vsi->type) { 9717 case I40E_VSI_MAIN: 9718 vsi->alloc_queue_pairs = pf->num_lan_qps; 9719 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 9720 I40E_REQ_DESCRIPTOR_MULTIPLE); 9721 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 9722 vsi->num_q_vectors = pf->num_lan_msix; 9723 else 9724 vsi->num_q_vectors = 1; 9725 9726 break; 9727 9728 case I40E_VSI_FDIR: 9729 vsi->alloc_queue_pairs = 1; 9730 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 9731 I40E_REQ_DESCRIPTOR_MULTIPLE); 9732 vsi->num_q_vectors = pf->num_fdsb_msix; 9733 break; 9734 9735 case I40E_VSI_VMDQ2: 9736 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 9737 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 9738 I40E_REQ_DESCRIPTOR_MULTIPLE); 9739 vsi->num_q_vectors = pf->num_vmdq_msix; 9740 break; 9741 9742 case I40E_VSI_SRIOV: 9743 vsi->alloc_queue_pairs = pf->num_vf_qps; 9744 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 9745 I40E_REQ_DESCRIPTOR_MULTIPLE); 9746 break; 9747 9748 default: 9749 WARN_ON(1); 9750 return -ENODATA; 9751 } 9752 9753 return 0; 9754 } 9755 9756 /** 9757 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi 9758 * @vsi: VSI pointer 9759 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 9760 * 9761 * On error: returns error code (negative) 9762 * On success: returns 0 9763 **/ 9764 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) 9765 { 9766 struct i40e_ring **next_rings; 9767 int size; 9768 int ret = 0; 9769 9770 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */ 9771 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 9772 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2); 9773 vsi->tx_rings = kzalloc(size, GFP_KERNEL); 9774 if (!vsi->tx_rings) 9775 return -ENOMEM; 9776 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs; 9777 if (i40e_enabled_xdp_vsi(vsi)) { 9778 vsi->xdp_rings = next_rings; 9779 next_rings += vsi->alloc_queue_pairs; 9780 } 9781 vsi->rx_rings = next_rings; 9782 9783 if (alloc_qvectors) { 9784 /* allocate memory for q_vector pointers */ 9785 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; 9786 vsi->q_vectors = kzalloc(size, GFP_KERNEL); 9787 if (!vsi->q_vectors) { 9788 ret = -ENOMEM; 9789 goto err_vectors; 9790 } 9791 } 9792 return ret; 9793 9794 err_vectors: 9795 kfree(vsi->tx_rings); 9796 return ret; 9797 } 9798 9799 /** 9800 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 9801 * @pf: board private structure 9802 * @type: type of VSI 9803 * 9804 * On error: returns error code (negative) 9805 * On success: returns vsi index in PF (positive) 9806 **/ 9807 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 9808 { 9809 int ret = -ENODEV; 9810 struct i40e_vsi *vsi; 9811 int vsi_idx; 9812 int i; 9813 9814 /* Need to protect the allocation of the VSIs at the PF level */ 9815 mutex_lock(&pf->switch_mutex); 9816 9817 /* VSI list may be fragmented if VSI creation/destruction has 9818 * been happening. We can afford to do a quick scan to look 9819 * for any free VSIs in the list. 9820 * 9821 * find next empty vsi slot, looping back around if necessary 9822 */ 9823 i = pf->next_vsi; 9824 while (i < pf->num_alloc_vsi && pf->vsi[i]) 9825 i++; 9826 if (i >= pf->num_alloc_vsi) { 9827 i = 0; 9828 while (i < pf->next_vsi && pf->vsi[i]) 9829 i++; 9830 } 9831 9832 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { 9833 vsi_idx = i; /* Found one! */ 9834 } else { 9835 ret = -ENODEV; 9836 goto unlock_pf; /* out of VSI slots! */ 9837 } 9838 pf->next_vsi = ++i; 9839 9840 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 9841 if (!vsi) { 9842 ret = -ENOMEM; 9843 goto unlock_pf; 9844 } 9845 vsi->type = type; 9846 vsi->back = pf; 9847 set_bit(__I40E_VSI_DOWN, vsi->state); 9848 vsi->flags = 0; 9849 vsi->idx = vsi_idx; 9850 vsi->int_rate_limit = 0; 9851 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? 9852 pf->rss_table_size : 64; 9853 vsi->netdev_registered = false; 9854 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 9855 hash_init(vsi->mac_filter_hash); 9856 vsi->irqs_ready = false; 9857 9858 ret = i40e_set_num_rings_in_vsi(vsi); 9859 if (ret) 9860 goto err_rings; 9861 9862 ret = i40e_vsi_alloc_arrays(vsi, true); 9863 if (ret) 9864 goto err_rings; 9865 9866 /* Setup default MSIX irq handler for VSI */ 9867 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 9868 9869 /* Initialize VSI lock */ 9870 spin_lock_init(&vsi->mac_filter_hash_lock); 9871 pf->vsi[vsi_idx] = vsi; 9872 ret = vsi_idx; 9873 goto unlock_pf; 9874 9875 err_rings: 9876 pf->next_vsi = i - 1; 9877 kfree(vsi); 9878 unlock_pf: 9879 mutex_unlock(&pf->switch_mutex); 9880 return ret; 9881 } 9882 9883 /** 9884 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 9885 * @type: VSI pointer 9886 * @free_qvectors: a bool to specify if q_vectors need to be freed. 9887 * 9888 * On error: returns error code (negative) 9889 * On success: returns 0 9890 **/ 9891 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) 9892 { 9893 /* free the ring and vector containers */ 9894 if (free_qvectors) { 9895 kfree(vsi->q_vectors); 9896 vsi->q_vectors = NULL; 9897 } 9898 kfree(vsi->tx_rings); 9899 vsi->tx_rings = NULL; 9900 vsi->rx_rings = NULL; 9901 vsi->xdp_rings = NULL; 9902 } 9903 9904 /** 9905 * i40e_clear_rss_config_user - clear the user configured RSS hash keys 9906 * and lookup table 9907 * @vsi: Pointer to VSI structure 9908 */ 9909 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) 9910 { 9911 if (!vsi) 9912 return; 9913 9914 kfree(vsi->rss_hkey_user); 9915 vsi->rss_hkey_user = NULL; 9916 9917 kfree(vsi->rss_lut_user); 9918 vsi->rss_lut_user = NULL; 9919 } 9920 9921 /** 9922 * i40e_vsi_clear - Deallocate the VSI provided 9923 * @vsi: the VSI being un-configured 9924 **/ 9925 static int i40e_vsi_clear(struct i40e_vsi *vsi) 9926 { 9927 struct i40e_pf *pf; 9928 9929 if (!vsi) 9930 return 0; 9931 9932 if (!vsi->back) 9933 goto free_vsi; 9934 pf = vsi->back; 9935 9936 mutex_lock(&pf->switch_mutex); 9937 if (!pf->vsi[vsi->idx]) { 9938 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n", 9939 vsi->idx, vsi->idx, vsi->type); 9940 goto unlock_vsi; 9941 } 9942 9943 if (pf->vsi[vsi->idx] != vsi) { 9944 dev_err(&pf->pdev->dev, 9945 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n", 9946 pf->vsi[vsi->idx]->idx, 9947 pf->vsi[vsi->idx]->type, 9948 vsi->idx, vsi->type); 9949 goto unlock_vsi; 9950 } 9951 9952 /* updates the PF for this cleared vsi */ 9953 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 9954 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 9955 9956 i40e_vsi_free_arrays(vsi, true); 9957 i40e_clear_rss_config_user(vsi); 9958 9959 pf->vsi[vsi->idx] = NULL; 9960 if (vsi->idx < pf->next_vsi) 9961 pf->next_vsi = vsi->idx; 9962 9963 unlock_vsi: 9964 mutex_unlock(&pf->switch_mutex); 9965 free_vsi: 9966 kfree(vsi); 9967 9968 return 0; 9969 } 9970 9971 /** 9972 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 9973 * @vsi: the VSI being cleaned 9974 **/ 9975 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) 9976 { 9977 int i; 9978 9979 if (vsi->tx_rings && vsi->tx_rings[0]) { 9980 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 9981 kfree_rcu(vsi->tx_rings[i], rcu); 9982 vsi->tx_rings[i] = NULL; 9983 vsi->rx_rings[i] = NULL; 9984 if (vsi->xdp_rings) 9985 vsi->xdp_rings[i] = NULL; 9986 } 9987 } 9988 } 9989 9990 /** 9991 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 9992 * @vsi: the VSI being configured 9993 **/ 9994 static int i40e_alloc_rings(struct i40e_vsi *vsi) 9995 { 9996 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2; 9997 struct i40e_pf *pf = vsi->back; 9998 struct i40e_ring *ring; 9999 10000 /* Set basic values in the rings to be used later during open() */ 10001 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 10002 /* allocate space for both Tx and Rx in one shot */ 10003 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL); 10004 if (!ring) 10005 goto err_out; 10006 10007 ring->queue_index = i; 10008 ring->reg_idx = vsi->base_queue + i; 10009 ring->ring_active = false; 10010 ring->vsi = vsi; 10011 ring->netdev = vsi->netdev; 10012 ring->dev = &pf->pdev->dev; 10013 ring->count = vsi->num_desc; 10014 ring->size = 0; 10015 ring->dcb_tc = 0; 10016 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) 10017 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; 10018 ring->itr_setting = pf->tx_itr_default; 10019 vsi->tx_rings[i] = ring++; 10020 10021 if (!i40e_enabled_xdp_vsi(vsi)) 10022 goto setup_rx; 10023 10024 ring->queue_index = vsi->alloc_queue_pairs + i; 10025 ring->reg_idx = vsi->base_queue + ring->queue_index; 10026 ring->ring_active = false; 10027 ring->vsi = vsi; 10028 ring->netdev = NULL; 10029 ring->dev = &pf->pdev->dev; 10030 ring->count = vsi->num_desc; 10031 ring->size = 0; 10032 ring->dcb_tc = 0; 10033 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) 10034 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; 10035 set_ring_xdp(ring); 10036 ring->itr_setting = pf->tx_itr_default; 10037 vsi->xdp_rings[i] = ring++; 10038 10039 setup_rx: 10040 ring->queue_index = i; 10041 ring->reg_idx = vsi->base_queue + i; 10042 ring->ring_active = false; 10043 ring->vsi = vsi; 10044 ring->netdev = vsi->netdev; 10045 ring->dev = &pf->pdev->dev; 10046 ring->count = vsi->num_desc; 10047 ring->size = 0; 10048 ring->dcb_tc = 0; 10049 ring->itr_setting = pf->rx_itr_default; 10050 vsi->rx_rings[i] = ring; 10051 } 10052 10053 return 0; 10054 10055 err_out: 10056 i40e_vsi_clear_rings(vsi); 10057 return -ENOMEM; 10058 } 10059 10060 /** 10061 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 10062 * @pf: board private structure 10063 * @vectors: the number of MSI-X vectors to request 10064 * 10065 * Returns the number of vectors reserved, or error 10066 **/ 10067 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 10068 { 10069 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, 10070 I40E_MIN_MSIX, vectors); 10071 if (vectors < 0) { 10072 dev_info(&pf->pdev->dev, 10073 "MSI-X vector reservation failed: %d\n", vectors); 10074 vectors = 0; 10075 } 10076 10077 return vectors; 10078 } 10079 10080 /** 10081 * i40e_init_msix - Setup the MSIX capability 10082 * @pf: board private structure 10083 * 10084 * Work with the OS to set up the MSIX vectors needed. 10085 * 10086 * Returns the number of vectors reserved or negative on failure 10087 **/ 10088 static int i40e_init_msix(struct i40e_pf *pf) 10089 { 10090 struct i40e_hw *hw = &pf->hw; 10091 int cpus, extra_vectors; 10092 int vectors_left; 10093 int v_budget, i; 10094 int v_actual; 10095 int iwarp_requested = 0; 10096 10097 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 10098 return -ENODEV; 10099 10100 /* The number of vectors we'll request will be comprised of: 10101 * - Add 1 for "other" cause for Admin Queue events, etc. 10102 * - The number of LAN queue pairs 10103 * - Queues being used for RSS. 10104 * We don't need as many as max_rss_size vectors. 10105 * use rss_size instead in the calculation since that 10106 * is governed by number of cpus in the system. 10107 * - assumes symmetric Tx/Rx pairing 10108 * - The number of VMDq pairs 10109 * - The CPU count within the NUMA node if iWARP is enabled 10110 * Once we count this up, try the request. 10111 * 10112 * If we can't get what we want, we'll simplify to nearly nothing 10113 * and try again. If that still fails, we punt. 10114 */ 10115 vectors_left = hw->func_caps.num_msix_vectors; 10116 v_budget = 0; 10117 10118 /* reserve one vector for miscellaneous handler */ 10119 if (vectors_left) { 10120 v_budget++; 10121 vectors_left--; 10122 } 10123 10124 /* reserve some vectors for the main PF traffic queues. Initially we 10125 * only reserve at most 50% of the available vectors, in the case that 10126 * the number of online CPUs is large. This ensures that we can enable 10127 * extra features as well. Once we've enabled the other features, we 10128 * will use any remaining vectors to reach as close as we can to the 10129 * number of online CPUs. 10130 */ 10131 cpus = num_online_cpus(); 10132 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2); 10133 vectors_left -= pf->num_lan_msix; 10134 10135 /* reserve one vector for sideband flow director */ 10136 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 10137 if (vectors_left) { 10138 pf->num_fdsb_msix = 1; 10139 v_budget++; 10140 vectors_left--; 10141 } else { 10142 pf->num_fdsb_msix = 0; 10143 } 10144 } 10145 10146 /* can we reserve enough for iWARP? */ 10147 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 10148 iwarp_requested = pf->num_iwarp_msix; 10149 10150 if (!vectors_left) 10151 pf->num_iwarp_msix = 0; 10152 else if (vectors_left < pf->num_iwarp_msix) 10153 pf->num_iwarp_msix = 1; 10154 v_budget += pf->num_iwarp_msix; 10155 vectors_left -= pf->num_iwarp_msix; 10156 } 10157 10158 /* any vectors left over go for VMDq support */ 10159 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { 10160 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; 10161 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); 10162 10163 if (!vectors_left) { 10164 pf->num_vmdq_msix = 0; 10165 pf->num_vmdq_qps = 0; 10166 } else { 10167 /* if we're short on vectors for what's desired, we limit 10168 * the queues per vmdq. If this is still more than are 10169 * available, the user will need to change the number of 10170 * queues/vectors used by the PF later with the ethtool 10171 * channels command 10172 */ 10173 if (vmdq_vecs < vmdq_vecs_wanted) 10174 pf->num_vmdq_qps = 1; 10175 pf->num_vmdq_msix = pf->num_vmdq_qps; 10176 10177 v_budget += vmdq_vecs; 10178 vectors_left -= vmdq_vecs; 10179 } 10180 } 10181 10182 /* On systems with a large number of SMP cores, we previously limited 10183 * the number of vectors for num_lan_msix to be at most 50% of the 10184 * available vectors, to allow for other features. Now, we add back 10185 * the remaining vectors. However, we ensure that the total 10186 * num_lan_msix will not exceed num_online_cpus(). To do this, we 10187 * calculate the number of vectors we can add without going over the 10188 * cap of CPUs. For systems with a small number of CPUs this will be 10189 * zero. 10190 */ 10191 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left); 10192 pf->num_lan_msix += extra_vectors; 10193 vectors_left -= extra_vectors; 10194 10195 WARN(vectors_left < 0, 10196 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n"); 10197 10198 v_budget += pf->num_lan_msix; 10199 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 10200 GFP_KERNEL); 10201 if (!pf->msix_entries) 10202 return -ENOMEM; 10203 10204 for (i = 0; i < v_budget; i++) 10205 pf->msix_entries[i].entry = i; 10206 v_actual = i40e_reserve_msix_vectors(pf, v_budget); 10207 10208 if (v_actual < I40E_MIN_MSIX) { 10209 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 10210 kfree(pf->msix_entries); 10211 pf->msix_entries = NULL; 10212 pci_disable_msix(pf->pdev); 10213 return -ENODEV; 10214 10215 } else if (v_actual == I40E_MIN_MSIX) { 10216 /* Adjust for minimal MSIX use */ 10217 pf->num_vmdq_vsis = 0; 10218 pf->num_vmdq_qps = 0; 10219 pf->num_lan_qps = 1; 10220 pf->num_lan_msix = 1; 10221 10222 } else if (v_actual != v_budget) { 10223 /* If we have limited resources, we will start with no vectors 10224 * for the special features and then allocate vectors to some 10225 * of these features based on the policy and at the end disable 10226 * the features that did not get any vectors. 10227 */ 10228 int vec; 10229 10230 dev_info(&pf->pdev->dev, 10231 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n", 10232 v_actual, v_budget); 10233 /* reserve the misc vector */ 10234 vec = v_actual - 1; 10235 10236 /* Scale vector usage down */ 10237 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 10238 pf->num_vmdq_vsis = 1; 10239 pf->num_vmdq_qps = 1; 10240 10241 /* partition out the remaining vectors */ 10242 switch (vec) { 10243 case 2: 10244 pf->num_lan_msix = 1; 10245 break; 10246 case 3: 10247 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 10248 pf->num_lan_msix = 1; 10249 pf->num_iwarp_msix = 1; 10250 } else { 10251 pf->num_lan_msix = 2; 10252 } 10253 break; 10254 default: 10255 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 10256 pf->num_iwarp_msix = min_t(int, (vec / 3), 10257 iwarp_requested); 10258 pf->num_vmdq_vsis = min_t(int, (vec / 3), 10259 I40E_DEFAULT_NUM_VMDQ_VSI); 10260 } else { 10261 pf->num_vmdq_vsis = min_t(int, (vec / 2), 10262 I40E_DEFAULT_NUM_VMDQ_VSI); 10263 } 10264 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 10265 pf->num_fdsb_msix = 1; 10266 vec--; 10267 } 10268 pf->num_lan_msix = min_t(int, 10269 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), 10270 pf->num_lan_msix); 10271 pf->num_lan_qps = pf->num_lan_msix; 10272 break; 10273 } 10274 } 10275 10276 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 10277 (pf->num_fdsb_msix == 0)) { 10278 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); 10279 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 10280 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; 10281 } 10282 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 10283 (pf->num_vmdq_msix == 0)) { 10284 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); 10285 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 10286 } 10287 10288 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && 10289 (pf->num_iwarp_msix == 0)) { 10290 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); 10291 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; 10292 } 10293 i40e_debug(&pf->hw, I40E_DEBUG_INIT, 10294 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", 10295 pf->num_lan_msix, 10296 pf->num_vmdq_msix * pf->num_vmdq_vsis, 10297 pf->num_fdsb_msix, 10298 pf->num_iwarp_msix); 10299 10300 return v_actual; 10301 } 10302 10303 /** 10304 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 10305 * @vsi: the VSI being configured 10306 * @v_idx: index of the vector in the vsi struct 10307 * @cpu: cpu to be used on affinity_mask 10308 * 10309 * We allocate one q_vector. If allocation fails we return -ENOMEM. 10310 **/ 10311 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) 10312 { 10313 struct i40e_q_vector *q_vector; 10314 10315 /* allocate q_vector */ 10316 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 10317 if (!q_vector) 10318 return -ENOMEM; 10319 10320 q_vector->vsi = vsi; 10321 q_vector->v_idx = v_idx; 10322 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 10323 10324 if (vsi->netdev) 10325 netif_napi_add(vsi->netdev, &q_vector->napi, 10326 i40e_napi_poll, NAPI_POLL_WEIGHT); 10327 10328 /* tie q_vector and vsi together */ 10329 vsi->q_vectors[v_idx] = q_vector; 10330 10331 return 0; 10332 } 10333 10334 /** 10335 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 10336 * @vsi: the VSI being configured 10337 * 10338 * We allocate one q_vector per queue interrupt. If allocation fails we 10339 * return -ENOMEM. 10340 **/ 10341 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 10342 { 10343 struct i40e_pf *pf = vsi->back; 10344 int err, v_idx, num_q_vectors, current_cpu; 10345 10346 /* if not MSIX, give the one vector only to the LAN VSI */ 10347 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 10348 num_q_vectors = vsi->num_q_vectors; 10349 else if (vsi == pf->vsi[pf->lan_vsi]) 10350 num_q_vectors = 1; 10351 else 10352 return -EINVAL; 10353 10354 current_cpu = cpumask_first(cpu_online_mask); 10355 10356 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 10357 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu); 10358 if (err) 10359 goto err_out; 10360 current_cpu = cpumask_next(current_cpu, cpu_online_mask); 10361 if (unlikely(current_cpu >= nr_cpu_ids)) 10362 current_cpu = cpumask_first(cpu_online_mask); 10363 } 10364 10365 return 0; 10366 10367 err_out: 10368 while (v_idx--) 10369 i40e_free_q_vector(vsi, v_idx); 10370 10371 return err; 10372 } 10373 10374 /** 10375 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 10376 * @pf: board private structure to initialize 10377 **/ 10378 static int i40e_init_interrupt_scheme(struct i40e_pf *pf) 10379 { 10380 int vectors = 0; 10381 ssize_t size; 10382 10383 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 10384 vectors = i40e_init_msix(pf); 10385 if (vectors < 0) { 10386 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 10387 I40E_FLAG_IWARP_ENABLED | 10388 I40E_FLAG_RSS_ENABLED | 10389 I40E_FLAG_DCB_CAPABLE | 10390 I40E_FLAG_DCB_ENABLED | 10391 I40E_FLAG_SRIOV_ENABLED | 10392 I40E_FLAG_FD_SB_ENABLED | 10393 I40E_FLAG_FD_ATR_ENABLED | 10394 I40E_FLAG_VMDQ_ENABLED); 10395 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; 10396 10397 /* rework the queue expectations without MSIX */ 10398 i40e_determine_queue_usage(pf); 10399 } 10400 } 10401 10402 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 10403 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 10404 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); 10405 vectors = pci_enable_msi(pf->pdev); 10406 if (vectors < 0) { 10407 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", 10408 vectors); 10409 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 10410 } 10411 vectors = 1; /* one MSI or Legacy vector */ 10412 } 10413 10414 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 10415 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); 10416 10417 /* set up vector assignment tracking */ 10418 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); 10419 pf->irq_pile = kzalloc(size, GFP_KERNEL); 10420 if (!pf->irq_pile) 10421 return -ENOMEM; 10422 10423 pf->irq_pile->num_entries = vectors; 10424 pf->irq_pile->search_hint = 0; 10425 10426 /* track first vector for misc interrupts, ignore return */ 10427 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); 10428 10429 return 0; 10430 } 10431 10432 /** 10433 * i40e_restore_interrupt_scheme - Restore the interrupt scheme 10434 * @pf: private board data structure 10435 * 10436 * Restore the interrupt scheme that was cleared when we suspended the 10437 * device. This should be called during resume to re-allocate the q_vectors 10438 * and reacquire IRQs. 10439 */ 10440 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) 10441 { 10442 int err, i; 10443 10444 /* We cleared the MSI and MSI-X flags when disabling the old interrupt 10445 * scheme. We need to re-enabled them here in order to attempt to 10446 * re-acquire the MSI or MSI-X vectors 10447 */ 10448 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 10449 10450 err = i40e_init_interrupt_scheme(pf); 10451 if (err) 10452 return err; 10453 10454 /* Now that we've re-acquired IRQs, we need to remap the vectors and 10455 * rings together again. 10456 */ 10457 for (i = 0; i < pf->num_alloc_vsi; i++) { 10458 if (pf->vsi[i]) { 10459 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); 10460 if (err) 10461 goto err_unwind; 10462 i40e_vsi_map_rings_to_vectors(pf->vsi[i]); 10463 } 10464 } 10465 10466 err = i40e_setup_misc_vector(pf); 10467 if (err) 10468 goto err_unwind; 10469 10470 return 0; 10471 10472 err_unwind: 10473 while (i--) { 10474 if (pf->vsi[i]) 10475 i40e_vsi_free_q_vectors(pf->vsi[i]); 10476 } 10477 10478 return err; 10479 } 10480 10481 /** 10482 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 10483 * @pf: board private structure 10484 * 10485 * This sets up the handler for MSIX 0, which is used to manage the 10486 * non-queue interrupts, e.g. AdminQ and errors. This is not used 10487 * when in MSI or Legacy interrupt mode. 10488 **/ 10489 static int i40e_setup_misc_vector(struct i40e_pf *pf) 10490 { 10491 struct i40e_hw *hw = &pf->hw; 10492 int err = 0; 10493 10494 /* Only request the IRQ once, the first time through. */ 10495 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) { 10496 err = request_irq(pf->msix_entries[0].vector, 10497 i40e_intr, 0, pf->int_name, pf); 10498 if (err) { 10499 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); 10500 dev_info(&pf->pdev->dev, 10501 "request_irq for %s failed: %d\n", 10502 pf->int_name, err); 10503 return -EFAULT; 10504 } 10505 } 10506 10507 i40e_enable_misc_int_causes(pf); 10508 10509 /* associate no queues to the misc vector */ 10510 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 10511 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 10512 10513 i40e_flush(hw); 10514 10515 i40e_irq_dynamic_enable_icr0(pf); 10516 10517 return err; 10518 } 10519 10520 /** 10521 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands 10522 * @vsi: Pointer to vsi structure 10523 * @seed: Buffter to store the hash keys 10524 * @lut: Buffer to store the lookup table entries 10525 * @lut_size: Size of buffer to store the lookup table entries 10526 * 10527 * Return 0 on success, negative on failure 10528 */ 10529 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, 10530 u8 *lut, u16 lut_size) 10531 { 10532 struct i40e_pf *pf = vsi->back; 10533 struct i40e_hw *hw = &pf->hw; 10534 int ret = 0; 10535 10536 if (seed) { 10537 ret = i40e_aq_get_rss_key(hw, vsi->id, 10538 (struct i40e_aqc_get_set_rss_key_data *)seed); 10539 if (ret) { 10540 dev_info(&pf->pdev->dev, 10541 "Cannot get RSS key, err %s aq_err %s\n", 10542 i40e_stat_str(&pf->hw, ret), 10543 i40e_aq_str(&pf->hw, 10544 pf->hw.aq.asq_last_status)); 10545 return ret; 10546 } 10547 } 10548 10549 if (lut) { 10550 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; 10551 10552 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); 10553 if (ret) { 10554 dev_info(&pf->pdev->dev, 10555 "Cannot get RSS lut, err %s aq_err %s\n", 10556 i40e_stat_str(&pf->hw, ret), 10557 i40e_aq_str(&pf->hw, 10558 pf->hw.aq.asq_last_status)); 10559 return ret; 10560 } 10561 } 10562 10563 return ret; 10564 } 10565 10566 /** 10567 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers 10568 * @vsi: Pointer to vsi structure 10569 * @seed: RSS hash seed 10570 * @lut: Lookup table 10571 * @lut_size: Lookup table size 10572 * 10573 * Returns 0 on success, negative on failure 10574 **/ 10575 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, 10576 const u8 *lut, u16 lut_size) 10577 { 10578 struct i40e_pf *pf = vsi->back; 10579 struct i40e_hw *hw = &pf->hw; 10580 u16 vf_id = vsi->vf_id; 10581 u8 i; 10582 10583 /* Fill out hash function seed */ 10584 if (seed) { 10585 u32 *seed_dw = (u32 *)seed; 10586 10587 if (vsi->type == I40E_VSI_MAIN) { 10588 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 10589 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); 10590 } else if (vsi->type == I40E_VSI_SRIOV) { 10591 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++) 10592 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]); 10593 } else { 10594 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); 10595 } 10596 } 10597 10598 if (lut) { 10599 u32 *lut_dw = (u32 *)lut; 10600 10601 if (vsi->type == I40E_VSI_MAIN) { 10602 if (lut_size != I40E_HLUT_ARRAY_SIZE) 10603 return -EINVAL; 10604 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) 10605 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); 10606 } else if (vsi->type == I40E_VSI_SRIOV) { 10607 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE) 10608 return -EINVAL; 10609 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) 10610 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]); 10611 } else { 10612 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); 10613 } 10614 } 10615 i40e_flush(hw); 10616 10617 return 0; 10618 } 10619 10620 /** 10621 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers 10622 * @vsi: Pointer to VSI structure 10623 * @seed: Buffer to store the keys 10624 * @lut: Buffer to store the lookup table entries 10625 * @lut_size: Size of buffer to store the lookup table entries 10626 * 10627 * Returns 0 on success, negative on failure 10628 */ 10629 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, 10630 u8 *lut, u16 lut_size) 10631 { 10632 struct i40e_pf *pf = vsi->back; 10633 struct i40e_hw *hw = &pf->hw; 10634 u16 i; 10635 10636 if (seed) { 10637 u32 *seed_dw = (u32 *)seed; 10638 10639 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 10640 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 10641 } 10642 if (lut) { 10643 u32 *lut_dw = (u32 *)lut; 10644 10645 if (lut_size != I40E_HLUT_ARRAY_SIZE) 10646 return -EINVAL; 10647 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) 10648 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i)); 10649 } 10650 10651 return 0; 10652 } 10653 10654 /** 10655 * i40e_config_rss - Configure RSS keys and lut 10656 * @vsi: Pointer to VSI structure 10657 * @seed: RSS hash seed 10658 * @lut: Lookup table 10659 * @lut_size: Lookup table size 10660 * 10661 * Returns 0 on success, negative on failure 10662 */ 10663 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 10664 { 10665 struct i40e_pf *pf = vsi->back; 10666 10667 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) 10668 return i40e_config_rss_aq(vsi, seed, lut, lut_size); 10669 else 10670 return i40e_config_rss_reg(vsi, seed, lut, lut_size); 10671 } 10672 10673 /** 10674 * i40e_get_rss - Get RSS keys and lut 10675 * @vsi: Pointer to VSI structure 10676 * @seed: Buffer to store the keys 10677 * @lut: Buffer to store the lookup table entries 10678 * lut_size: Size of buffer to store the lookup table entries 10679 * 10680 * Returns 0 on success, negative on failure 10681 */ 10682 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 10683 { 10684 struct i40e_pf *pf = vsi->back; 10685 10686 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) 10687 return i40e_get_rss_aq(vsi, seed, lut, lut_size); 10688 else 10689 return i40e_get_rss_reg(vsi, seed, lut, lut_size); 10690 } 10691 10692 /** 10693 * i40e_fill_rss_lut - Fill the RSS lookup table with default values 10694 * @pf: Pointer to board private structure 10695 * @lut: Lookup table 10696 * @rss_table_size: Lookup table size 10697 * @rss_size: Range of queue number for hashing 10698 */ 10699 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, 10700 u16 rss_table_size, u16 rss_size) 10701 { 10702 u16 i; 10703 10704 for (i = 0; i < rss_table_size; i++) 10705 lut[i] = i % rss_size; 10706 } 10707 10708 /** 10709 * i40e_pf_config_rss - Prepare for RSS if used 10710 * @pf: board private structure 10711 **/ 10712 static int i40e_pf_config_rss(struct i40e_pf *pf) 10713 { 10714 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 10715 u8 seed[I40E_HKEY_ARRAY_SIZE]; 10716 u8 *lut; 10717 struct i40e_hw *hw = &pf->hw; 10718 u32 reg_val; 10719 u64 hena; 10720 int ret; 10721 10722 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 10723 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 10724 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 10725 hena |= i40e_pf_get_default_rss_hena(pf); 10726 10727 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 10728 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 10729 10730 /* Determine the RSS table size based on the hardware capabilities */ 10731 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 10732 reg_val = (pf->rss_table_size == 512) ? 10733 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : 10734 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); 10735 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val); 10736 10737 /* Determine the RSS size of the VSI */ 10738 if (!vsi->rss_size) { 10739 u16 qcount; 10740 /* If the firmware does something weird during VSI init, we 10741 * could end up with zero TCs. Check for that to avoid 10742 * divide-by-zero. It probably won't pass traffic, but it also 10743 * won't panic. 10744 */ 10745 qcount = vsi->num_queue_pairs / 10746 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1); 10747 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); 10748 } 10749 if (!vsi->rss_size) 10750 return -EINVAL; 10751 10752 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 10753 if (!lut) 10754 return -ENOMEM; 10755 10756 /* Use user configured lut if there is one, otherwise use default */ 10757 if (vsi->rss_lut_user) 10758 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 10759 else 10760 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); 10761 10762 /* Use user configured hash key if there is one, otherwise 10763 * use default. 10764 */ 10765 if (vsi->rss_hkey_user) 10766 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); 10767 else 10768 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 10769 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); 10770 kfree(lut); 10771 10772 return ret; 10773 } 10774 10775 /** 10776 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild 10777 * @pf: board private structure 10778 * @queue_count: the requested queue count for rss. 10779 * 10780 * returns 0 if rss is not enabled, if enabled returns the final rss queue 10781 * count which may be different from the requested queue count. 10782 * Note: expects to be called while under rtnl_lock() 10783 **/ 10784 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) 10785 { 10786 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 10787 int new_rss_size; 10788 10789 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) 10790 return 0; 10791 10792 new_rss_size = min_t(int, queue_count, pf->rss_size_max); 10793 10794 if (queue_count != vsi->num_queue_pairs) { 10795 u16 qcount; 10796 10797 vsi->req_queue_pairs = queue_count; 10798 i40e_prep_for_reset(pf, true); 10799 10800 pf->alloc_rss_size = new_rss_size; 10801 10802 i40e_reset_and_rebuild(pf, true, true); 10803 10804 /* Discard the user configured hash keys and lut, if less 10805 * queues are enabled. 10806 */ 10807 if (queue_count < vsi->rss_size) { 10808 i40e_clear_rss_config_user(vsi); 10809 dev_dbg(&pf->pdev->dev, 10810 "discard user configured hash keys and lut\n"); 10811 } 10812 10813 /* Reset vsi->rss_size, as number of enabled queues changed */ 10814 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; 10815 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); 10816 10817 i40e_pf_config_rss(pf); 10818 } 10819 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n", 10820 vsi->req_queue_pairs, pf->rss_size_max); 10821 return pf->alloc_rss_size; 10822 } 10823 10824 /** 10825 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition 10826 * @pf: board private structure 10827 **/ 10828 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf) 10829 { 10830 i40e_status status; 10831 bool min_valid, max_valid; 10832 u32 max_bw, min_bw; 10833 10834 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, 10835 &min_valid, &max_valid); 10836 10837 if (!status) { 10838 if (min_valid) 10839 pf->min_bw = min_bw; 10840 if (max_valid) 10841 pf->max_bw = max_bw; 10842 } 10843 10844 return status; 10845 } 10846 10847 /** 10848 * i40e_set_partition_bw_setting - Set BW settings for this PF partition 10849 * @pf: board private structure 10850 **/ 10851 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf) 10852 { 10853 struct i40e_aqc_configure_partition_bw_data bw_data; 10854 i40e_status status; 10855 10856 /* Set the valid bit for this PF */ 10857 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); 10858 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK; 10859 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK; 10860 10861 /* Set the new bandwidths */ 10862 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); 10863 10864 return status; 10865 } 10866 10867 /** 10868 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition 10869 * @pf: board private structure 10870 **/ 10871 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf) 10872 { 10873 /* Commit temporary BW setting to permanent NVM image */ 10874 enum i40e_admin_queue_err last_aq_status; 10875 i40e_status ret; 10876 u16 nvm_word; 10877 10878 if (pf->hw.partition_id != 1) { 10879 dev_info(&pf->pdev->dev, 10880 "Commit BW only works on partition 1! This is partition %d", 10881 pf->hw.partition_id); 10882 ret = I40E_NOT_SUPPORTED; 10883 goto bw_commit_out; 10884 } 10885 10886 /* Acquire NVM for read access */ 10887 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); 10888 last_aq_status = pf->hw.aq.asq_last_status; 10889 if (ret) { 10890 dev_info(&pf->pdev->dev, 10891 "Cannot acquire NVM for read access, err %s aq_err %s\n", 10892 i40e_stat_str(&pf->hw, ret), 10893 i40e_aq_str(&pf->hw, last_aq_status)); 10894 goto bw_commit_out; 10895 } 10896 10897 /* Read word 0x10 of NVM - SW compatibility word 1 */ 10898 ret = i40e_aq_read_nvm(&pf->hw, 10899 I40E_SR_NVM_CONTROL_WORD, 10900 0x10, sizeof(nvm_word), &nvm_word, 10901 false, NULL); 10902 /* Save off last admin queue command status before releasing 10903 * the NVM 10904 */ 10905 last_aq_status = pf->hw.aq.asq_last_status; 10906 i40e_release_nvm(&pf->hw); 10907 if (ret) { 10908 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", 10909 i40e_stat_str(&pf->hw, ret), 10910 i40e_aq_str(&pf->hw, last_aq_status)); 10911 goto bw_commit_out; 10912 } 10913 10914 /* Wait a bit for NVM release to complete */ 10915 msleep(50); 10916 10917 /* Acquire NVM for write access */ 10918 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); 10919 last_aq_status = pf->hw.aq.asq_last_status; 10920 if (ret) { 10921 dev_info(&pf->pdev->dev, 10922 "Cannot acquire NVM for write access, err %s aq_err %s\n", 10923 i40e_stat_str(&pf->hw, ret), 10924 i40e_aq_str(&pf->hw, last_aq_status)); 10925 goto bw_commit_out; 10926 } 10927 /* Write it back out unchanged to initiate update NVM, 10928 * which will force a write of the shadow (alt) RAM to 10929 * the NVM - thus storing the bandwidth values permanently. 10930 */ 10931 ret = i40e_aq_update_nvm(&pf->hw, 10932 I40E_SR_NVM_CONTROL_WORD, 10933 0x10, sizeof(nvm_word), 10934 &nvm_word, true, 0, NULL); 10935 /* Save off last admin queue command status before releasing 10936 * the NVM 10937 */ 10938 last_aq_status = pf->hw.aq.asq_last_status; 10939 i40e_release_nvm(&pf->hw); 10940 if (ret) 10941 dev_info(&pf->pdev->dev, 10942 "BW settings NOT SAVED, err %s aq_err %s\n", 10943 i40e_stat_str(&pf->hw, ret), 10944 i40e_aq_str(&pf->hw, last_aq_status)); 10945 bw_commit_out: 10946 10947 return ret; 10948 } 10949 10950 /** 10951 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 10952 * @pf: board private structure to initialize 10953 * 10954 * i40e_sw_init initializes the Adapter private data structure. 10955 * Fields are initialized based on PCI device information and 10956 * OS network device settings (MTU size). 10957 **/ 10958 static int i40e_sw_init(struct i40e_pf *pf) 10959 { 10960 int err = 0; 10961 int size; 10962 10963 /* Set default capability flags */ 10964 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 10965 I40E_FLAG_MSI_ENABLED | 10966 I40E_FLAG_MSIX_ENABLED; 10967 10968 /* Set default ITR */ 10969 pf->rx_itr_default = I40E_ITR_RX_DEF; 10970 pf->tx_itr_default = I40E_ITR_TX_DEF; 10971 10972 /* Depending on PF configurations, it is possible that the RSS 10973 * maximum might end up larger than the available queues 10974 */ 10975 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); 10976 pf->alloc_rss_size = 1; 10977 pf->rss_table_size = pf->hw.func_caps.rss_table_size; 10978 pf->rss_size_max = min_t(int, pf->rss_size_max, 10979 pf->hw.func_caps.num_tx_qp); 10980 if (pf->hw.func_caps.rss) { 10981 pf->flags |= I40E_FLAG_RSS_ENABLED; 10982 pf->alloc_rss_size = min_t(int, pf->rss_size_max, 10983 num_online_cpus()); 10984 } 10985 10986 /* MFP mode enabled */ 10987 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { 10988 pf->flags |= I40E_FLAG_MFP_ENABLED; 10989 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 10990 if (i40e_get_partition_bw_setting(pf)) { 10991 dev_warn(&pf->pdev->dev, 10992 "Could not get partition bw settings\n"); 10993 } else { 10994 dev_info(&pf->pdev->dev, 10995 "Partition BW Min = %8.8x, Max = %8.8x\n", 10996 pf->min_bw, pf->max_bw); 10997 10998 /* nudge the Tx scheduler */ 10999 i40e_set_partition_bw_setting(pf); 11000 } 11001 } 11002 11003 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 11004 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 11005 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 11006 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 11007 if (pf->flags & I40E_FLAG_MFP_ENABLED && 11008 pf->hw.num_partitions > 1) 11009 dev_info(&pf->pdev->dev, 11010 "Flow Director Sideband mode Disabled in MFP mode\n"); 11011 else 11012 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 11013 pf->fdir_pf_filter_count = 11014 pf->hw.func_caps.fd_filters_guaranteed; 11015 pf->hw.fdir_shared_filter_count = 11016 pf->hw.func_caps.fd_filters_best_effort; 11017 } 11018 11019 if (pf->hw.mac.type == I40E_MAC_X722) { 11020 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE | 11021 I40E_HW_128_QP_RSS_CAPABLE | 11022 I40E_HW_ATR_EVICT_CAPABLE | 11023 I40E_HW_WB_ON_ITR_CAPABLE | 11024 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE | 11025 I40E_HW_NO_PCI_LINK_CHECK | 11026 I40E_HW_USE_SET_LLDP_MIB | 11027 I40E_HW_GENEVE_OFFLOAD_CAPABLE | 11028 I40E_HW_PTP_L4_CAPABLE | 11029 I40E_HW_WOL_MC_MAGIC_PKT_WAKE | 11030 I40E_HW_OUTER_UDP_CSUM_CAPABLE); 11031 11032 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03 11033 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != 11034 I40E_FDEVICT_PCTYPE_DEFAULT) { 11035 dev_warn(&pf->pdev->dev, 11036 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n"); 11037 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; 11038 } 11039 } else if ((pf->hw.aq.api_maj_ver > 1) || 11040 ((pf->hw.aq.api_maj_ver == 1) && 11041 (pf->hw.aq.api_min_ver > 4))) { 11042 /* Supported in FW API version higher than 1.4 */ 11043 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; 11044 } 11045 11046 /* Enable HW ATR eviction if possible */ 11047 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE) 11048 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; 11049 11050 if ((pf->hw.mac.type == I40E_MAC_XL710) && 11051 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 11052 (pf->hw.aq.fw_maj_ver < 4))) { 11053 pf->hw_features |= I40E_HW_RESTART_AUTONEG; 11054 /* No DCB support for FW < v4.33 */ 11055 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT; 11056 } 11057 11058 /* Disable FW LLDP if FW < v4.3 */ 11059 if ((pf->hw.mac.type == I40E_MAC_XL710) && 11060 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 11061 (pf->hw.aq.fw_maj_ver < 4))) 11062 pf->hw_features |= I40E_HW_STOP_FW_LLDP; 11063 11064 /* Use the FW Set LLDP MIB API if FW > v4.40 */ 11065 if ((pf->hw.mac.type == I40E_MAC_XL710) && 11066 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || 11067 (pf->hw.aq.fw_maj_ver >= 5))) 11068 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; 11069 11070 /* Enable PTP L4 if FW > v6.0 */ 11071 if (pf->hw.mac.type == I40E_MAC_XL710 && 11072 pf->hw.aq.fw_maj_ver >= 6) 11073 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; 11074 11075 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { 11076 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 11077 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 11078 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); 11079 } 11080 11081 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { 11082 pf->flags |= I40E_FLAG_IWARP_ENABLED; 11083 /* IWARP needs one extra vector for CQP just like MISC.*/ 11084 pf->num_iwarp_msix = (int)num_online_cpus() + 1; 11085 } 11086 /* Stopping the FW LLDP engine is only supported on the 11087 * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP 11088 * engine is not supported if NPAR is functioning on this 11089 * part 11090 */ 11091 if (pf->hw.mac.type == I40E_MAC_XL710 && 11092 !pf->hw.func_caps.npar_enable && 11093 (pf->hw.aq.api_maj_ver > 1 || 11094 (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6))) 11095 pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP; 11096 11097 #ifdef CONFIG_PCI_IOV 11098 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { 11099 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 11100 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 11101 pf->num_req_vfs = min_t(int, 11102 pf->hw.func_caps.num_vfs, 11103 I40E_MAX_VF_COUNT); 11104 } 11105 #endif /* CONFIG_PCI_IOV */ 11106 pf->eeprom_version = 0xDEAD; 11107 pf->lan_veb = I40E_NO_VEB; 11108 pf->lan_vsi = I40E_NO_VSI; 11109 11110 /* By default FW has this off for performance reasons */ 11111 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; 11112 11113 /* set up queue assignment tracking */ 11114 size = sizeof(struct i40e_lump_tracking) 11115 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 11116 pf->qp_pile = kzalloc(size, GFP_KERNEL); 11117 if (!pf->qp_pile) { 11118 err = -ENOMEM; 11119 goto sw_init_done; 11120 } 11121 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 11122 pf->qp_pile->search_hint = 0; 11123 11124 pf->tx_timeout_recovery_level = 1; 11125 11126 mutex_init(&pf->switch_mutex); 11127 11128 sw_init_done: 11129 return err; 11130 } 11131 11132 /** 11133 * i40e_set_ntuple - set the ntuple feature flag and take action 11134 * @pf: board private structure to initialize 11135 * @features: the feature set that the stack is suggesting 11136 * 11137 * returns a bool to indicate if reset needs to happen 11138 **/ 11139 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) 11140 { 11141 bool need_reset = false; 11142 11143 /* Check if Flow Director n-tuple support was enabled or disabled. If 11144 * the state changed, we need to reset. 11145 */ 11146 if (features & NETIF_F_NTUPLE) { 11147 /* Enable filters and mark for reset */ 11148 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 11149 need_reset = true; 11150 /* enable FD_SB only if there is MSI-X vector and no cloud 11151 * filters exist 11152 */ 11153 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { 11154 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 11155 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; 11156 } 11157 } else { 11158 /* turn off filters, mark for reset and clear SW filter list */ 11159 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 11160 need_reset = true; 11161 i40e_fdir_filter_exit(pf); 11162 } 11163 pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED | 11164 I40E_FLAG_FD_SB_AUTO_DISABLED); 11165 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; 11166 11167 /* reset fd counters */ 11168 pf->fd_add_err = 0; 11169 pf->fd_atr_cnt = 0; 11170 /* if ATR was auto disabled it can be re-enabled. */ 11171 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) { 11172 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; 11173 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 11174 (I40E_DEBUG_FD & pf->hw.debug_mask)) 11175 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); 11176 } 11177 } 11178 return need_reset; 11179 } 11180 11181 /** 11182 * i40e_clear_rss_lut - clear the rx hash lookup table 11183 * @vsi: the VSI being configured 11184 **/ 11185 static void i40e_clear_rss_lut(struct i40e_vsi *vsi) 11186 { 11187 struct i40e_pf *pf = vsi->back; 11188 struct i40e_hw *hw = &pf->hw; 11189 u16 vf_id = vsi->vf_id; 11190 u8 i; 11191 11192 if (vsi->type == I40E_VSI_MAIN) { 11193 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) 11194 wr32(hw, I40E_PFQF_HLUT(i), 0); 11195 } else if (vsi->type == I40E_VSI_SRIOV) { 11196 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) 11197 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0); 11198 } else { 11199 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); 11200 } 11201 } 11202 11203 /** 11204 * i40e_set_features - set the netdev feature flags 11205 * @netdev: ptr to the netdev being adjusted 11206 * @features: the feature set that the stack is suggesting 11207 * Note: expects to be called while under rtnl_lock() 11208 **/ 11209 static int i40e_set_features(struct net_device *netdev, 11210 netdev_features_t features) 11211 { 11212 struct i40e_netdev_priv *np = netdev_priv(netdev); 11213 struct i40e_vsi *vsi = np->vsi; 11214 struct i40e_pf *pf = vsi->back; 11215 bool need_reset; 11216 11217 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) 11218 i40e_pf_config_rss(pf); 11219 else if (!(features & NETIF_F_RXHASH) && 11220 netdev->features & NETIF_F_RXHASH) 11221 i40e_clear_rss_lut(vsi); 11222 11223 if (features & NETIF_F_HW_VLAN_CTAG_RX) 11224 i40e_vlan_stripping_enable(vsi); 11225 else 11226 i40e_vlan_stripping_disable(vsi); 11227 11228 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) { 11229 dev_err(&pf->pdev->dev, 11230 "Offloaded tc filters active, can't turn hw_tc_offload off"); 11231 return -EINVAL; 11232 } 11233 11234 need_reset = i40e_set_ntuple(pf, features); 11235 11236 if (need_reset) 11237 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); 11238 11239 return 0; 11240 } 11241 11242 /** 11243 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port 11244 * @pf: board private structure 11245 * @port: The UDP port to look up 11246 * 11247 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 11248 **/ 11249 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port) 11250 { 11251 u8 i; 11252 11253 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 11254 if (pf->udp_ports[i].port == port) 11255 return i; 11256 } 11257 11258 return i; 11259 } 11260 11261 /** 11262 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up 11263 * @netdev: This physical port's netdev 11264 * @ti: Tunnel endpoint information 11265 **/ 11266 static void i40e_udp_tunnel_add(struct net_device *netdev, 11267 struct udp_tunnel_info *ti) 11268 { 11269 struct i40e_netdev_priv *np = netdev_priv(netdev); 11270 struct i40e_vsi *vsi = np->vsi; 11271 struct i40e_pf *pf = vsi->back; 11272 u16 port = ntohs(ti->port); 11273 u8 next_idx; 11274 u8 idx; 11275 11276 idx = i40e_get_udp_port_idx(pf, port); 11277 11278 /* Check if port already exists */ 11279 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 11280 netdev_info(netdev, "port %d already offloaded\n", port); 11281 return; 11282 } 11283 11284 /* Now check if there is space to add the new port */ 11285 next_idx = i40e_get_udp_port_idx(pf, 0); 11286 11287 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 11288 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n", 11289 port); 11290 return; 11291 } 11292 11293 switch (ti->type) { 11294 case UDP_TUNNEL_TYPE_VXLAN: 11295 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; 11296 break; 11297 case UDP_TUNNEL_TYPE_GENEVE: 11298 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE)) 11299 return; 11300 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; 11301 break; 11302 default: 11303 return; 11304 } 11305 11306 /* New port: add it and mark its index in the bitmap */ 11307 pf->udp_ports[next_idx].port = port; 11308 pf->pending_udp_bitmap |= BIT_ULL(next_idx); 11309 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 11310 } 11311 11312 /** 11313 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away 11314 * @netdev: This physical port's netdev 11315 * @ti: Tunnel endpoint information 11316 **/ 11317 static void i40e_udp_tunnel_del(struct net_device *netdev, 11318 struct udp_tunnel_info *ti) 11319 { 11320 struct i40e_netdev_priv *np = netdev_priv(netdev); 11321 struct i40e_vsi *vsi = np->vsi; 11322 struct i40e_pf *pf = vsi->back; 11323 u16 port = ntohs(ti->port); 11324 u8 idx; 11325 11326 idx = i40e_get_udp_port_idx(pf, port); 11327 11328 /* Check if port already exists */ 11329 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS) 11330 goto not_found; 11331 11332 switch (ti->type) { 11333 case UDP_TUNNEL_TYPE_VXLAN: 11334 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN) 11335 goto not_found; 11336 break; 11337 case UDP_TUNNEL_TYPE_GENEVE: 11338 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE) 11339 goto not_found; 11340 break; 11341 default: 11342 goto not_found; 11343 } 11344 11345 /* if port exists, set it to 0 (mark for deletion) 11346 * and make it pending 11347 */ 11348 pf->udp_ports[idx].port = 0; 11349 pf->pending_udp_bitmap |= BIT_ULL(idx); 11350 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 11351 11352 return; 11353 not_found: 11354 netdev_warn(netdev, "UDP port %d was not found, not deleting\n", 11355 port); 11356 } 11357 11358 static int i40e_get_phys_port_id(struct net_device *netdev, 11359 struct netdev_phys_item_id *ppid) 11360 { 11361 struct i40e_netdev_priv *np = netdev_priv(netdev); 11362 struct i40e_pf *pf = np->vsi->back; 11363 struct i40e_hw *hw = &pf->hw; 11364 11365 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) 11366 return -EOPNOTSUPP; 11367 11368 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); 11369 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); 11370 11371 return 0; 11372 } 11373 11374 /** 11375 * i40e_ndo_fdb_add - add an entry to the hardware database 11376 * @ndm: the input from the stack 11377 * @tb: pointer to array of nladdr (unused) 11378 * @dev: the net device pointer 11379 * @addr: the MAC address entry being added 11380 * @flags: instructions from stack about fdb operation 11381 */ 11382 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 11383 struct net_device *dev, 11384 const unsigned char *addr, u16 vid, 11385 u16 flags) 11386 { 11387 struct i40e_netdev_priv *np = netdev_priv(dev); 11388 struct i40e_pf *pf = np->vsi->back; 11389 int err = 0; 11390 11391 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) 11392 return -EOPNOTSUPP; 11393 11394 if (vid) { 11395 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); 11396 return -EINVAL; 11397 } 11398 11399 /* Hardware does not support aging addresses so if a 11400 * ndm_state is given only allow permanent addresses 11401 */ 11402 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 11403 netdev_info(dev, "FDB only supports static addresses\n"); 11404 return -EINVAL; 11405 } 11406 11407 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 11408 err = dev_uc_add_excl(dev, addr); 11409 else if (is_multicast_ether_addr(addr)) 11410 err = dev_mc_add_excl(dev, addr); 11411 else 11412 err = -EINVAL; 11413 11414 /* Only return duplicate errors if NLM_F_EXCL is set */ 11415 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 11416 err = 0; 11417 11418 return err; 11419 } 11420 11421 /** 11422 * i40e_ndo_bridge_setlink - Set the hardware bridge mode 11423 * @dev: the netdev being configured 11424 * @nlh: RTNL message 11425 * 11426 * Inserts a new hardware bridge if not already created and 11427 * enables the bridging mode requested (VEB or VEPA). If the 11428 * hardware bridge has already been inserted and the request 11429 * is to change the mode then that requires a PF reset to 11430 * allow rebuild of the components with required hardware 11431 * bridge mode enabled. 11432 * 11433 * Note: expects to be called while under rtnl_lock() 11434 **/ 11435 static int i40e_ndo_bridge_setlink(struct net_device *dev, 11436 struct nlmsghdr *nlh, 11437 u16 flags) 11438 { 11439 struct i40e_netdev_priv *np = netdev_priv(dev); 11440 struct i40e_vsi *vsi = np->vsi; 11441 struct i40e_pf *pf = vsi->back; 11442 struct i40e_veb *veb = NULL; 11443 struct nlattr *attr, *br_spec; 11444 int i, rem; 11445 11446 /* Only for PF VSI for now */ 11447 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 11448 return -EOPNOTSUPP; 11449 11450 /* Find the HW bridge for PF VSI */ 11451 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 11452 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 11453 veb = pf->veb[i]; 11454 } 11455 11456 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 11457 11458 nla_for_each_nested(attr, br_spec, rem) { 11459 __u16 mode; 11460 11461 if (nla_type(attr) != IFLA_BRIDGE_MODE) 11462 continue; 11463 11464 mode = nla_get_u16(attr); 11465 if ((mode != BRIDGE_MODE_VEPA) && 11466 (mode != BRIDGE_MODE_VEB)) 11467 return -EINVAL; 11468 11469 /* Insert a new HW bridge */ 11470 if (!veb) { 11471 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 11472 vsi->tc_config.enabled_tc); 11473 if (veb) { 11474 veb->bridge_mode = mode; 11475 i40e_config_bridge_mode(veb); 11476 } else { 11477 /* No Bridge HW offload available */ 11478 return -ENOENT; 11479 } 11480 break; 11481 } else if (mode != veb->bridge_mode) { 11482 /* Existing HW bridge but different mode needs reset */ 11483 veb->bridge_mode = mode; 11484 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ 11485 if (mode == BRIDGE_MODE_VEB) 11486 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 11487 else 11488 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 11489 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); 11490 break; 11491 } 11492 } 11493 11494 return 0; 11495 } 11496 11497 /** 11498 * i40e_ndo_bridge_getlink - Get the hardware bridge mode 11499 * @skb: skb buff 11500 * @pid: process id 11501 * @seq: RTNL message seq # 11502 * @dev: the netdev being configured 11503 * @filter_mask: unused 11504 * @nlflags: netlink flags passed in 11505 * 11506 * Return the mode in which the hardware bridge is operating in 11507 * i.e VEB or VEPA. 11508 **/ 11509 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 11510 struct net_device *dev, 11511 u32 __always_unused filter_mask, 11512 int nlflags) 11513 { 11514 struct i40e_netdev_priv *np = netdev_priv(dev); 11515 struct i40e_vsi *vsi = np->vsi; 11516 struct i40e_pf *pf = vsi->back; 11517 struct i40e_veb *veb = NULL; 11518 int i; 11519 11520 /* Only for PF VSI for now */ 11521 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 11522 return -EOPNOTSUPP; 11523 11524 /* Find the HW bridge for the PF VSI */ 11525 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 11526 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 11527 veb = pf->veb[i]; 11528 } 11529 11530 if (!veb) 11531 return 0; 11532 11533 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, 11534 0, 0, nlflags, filter_mask, NULL); 11535 } 11536 11537 /** 11538 * i40e_features_check - Validate encapsulated packet conforms to limits 11539 * @skb: skb buff 11540 * @dev: This physical port's netdev 11541 * @features: Offload features that the stack believes apply 11542 **/ 11543 static netdev_features_t i40e_features_check(struct sk_buff *skb, 11544 struct net_device *dev, 11545 netdev_features_t features) 11546 { 11547 size_t len; 11548 11549 /* No point in doing any of this if neither checksum nor GSO are 11550 * being requested for this frame. We can rule out both by just 11551 * checking for CHECKSUM_PARTIAL 11552 */ 11553 if (skb->ip_summed != CHECKSUM_PARTIAL) 11554 return features; 11555 11556 /* We cannot support GSO if the MSS is going to be less than 11557 * 64 bytes. If it is then we need to drop support for GSO. 11558 */ 11559 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 11560 features &= ~NETIF_F_GSO_MASK; 11561 11562 /* MACLEN can support at most 63 words */ 11563 len = skb_network_header(skb) - skb->data; 11564 if (len & ~(63 * 2)) 11565 goto out_err; 11566 11567 /* IPLEN and EIPLEN can support at most 127 dwords */ 11568 len = skb_transport_header(skb) - skb_network_header(skb); 11569 if (len & ~(127 * 4)) 11570 goto out_err; 11571 11572 if (skb->encapsulation) { 11573 /* L4TUNLEN can support 127 words */ 11574 len = skb_inner_network_header(skb) - skb_transport_header(skb); 11575 if (len & ~(127 * 2)) 11576 goto out_err; 11577 11578 /* IPLEN can support at most 127 dwords */ 11579 len = skb_inner_transport_header(skb) - 11580 skb_inner_network_header(skb); 11581 if (len & ~(127 * 4)) 11582 goto out_err; 11583 } 11584 11585 /* No need to validate L4LEN as TCP is the only protocol with a 11586 * a flexible value and we support all possible values supported 11587 * by TCP, which is at most 15 dwords 11588 */ 11589 11590 return features; 11591 out_err: 11592 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 11593 } 11594 11595 /** 11596 * i40e_xdp_setup - add/remove an XDP program 11597 * @vsi: VSI to changed 11598 * @prog: XDP program 11599 **/ 11600 static int i40e_xdp_setup(struct i40e_vsi *vsi, 11601 struct bpf_prog *prog) 11602 { 11603 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 11604 struct i40e_pf *pf = vsi->back; 11605 struct bpf_prog *old_prog; 11606 bool need_reset; 11607 int i; 11608 11609 /* Don't allow frames that span over multiple buffers */ 11610 if (frame_size > vsi->rx_buf_len) 11611 return -EINVAL; 11612 11613 if (!i40e_enabled_xdp_vsi(vsi) && !prog) 11614 return 0; 11615 11616 /* When turning XDP on->off/off->on we reset and rebuild the rings. */ 11617 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog); 11618 11619 if (need_reset) 11620 i40e_prep_for_reset(pf, true); 11621 11622 old_prog = xchg(&vsi->xdp_prog, prog); 11623 11624 if (need_reset) 11625 i40e_reset_and_rebuild(pf, true, true); 11626 11627 for (i = 0; i < vsi->num_queue_pairs; i++) 11628 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); 11629 11630 if (old_prog) 11631 bpf_prog_put(old_prog); 11632 11633 return 0; 11634 } 11635 11636 /** 11637 * i40e_xdp - implements ndo_bpf for i40e 11638 * @dev: netdevice 11639 * @xdp: XDP command 11640 **/ 11641 static int i40e_xdp(struct net_device *dev, 11642 struct netdev_bpf *xdp) 11643 { 11644 struct i40e_netdev_priv *np = netdev_priv(dev); 11645 struct i40e_vsi *vsi = np->vsi; 11646 11647 if (vsi->type != I40E_VSI_MAIN) 11648 return -EINVAL; 11649 11650 switch (xdp->command) { 11651 case XDP_SETUP_PROG: 11652 return i40e_xdp_setup(vsi, xdp->prog); 11653 case XDP_QUERY_PROG: 11654 xdp->prog_attached = i40e_enabled_xdp_vsi(vsi); 11655 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; 11656 return 0; 11657 default: 11658 return -EINVAL; 11659 } 11660 } 11661 11662 static const struct net_device_ops i40e_netdev_ops = { 11663 .ndo_open = i40e_open, 11664 .ndo_stop = i40e_close, 11665 .ndo_start_xmit = i40e_lan_xmit_frame, 11666 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 11667 .ndo_set_rx_mode = i40e_set_rx_mode, 11668 .ndo_validate_addr = eth_validate_addr, 11669 .ndo_set_mac_address = i40e_set_mac, 11670 .ndo_change_mtu = i40e_change_mtu, 11671 .ndo_do_ioctl = i40e_ioctl, 11672 .ndo_tx_timeout = i40e_tx_timeout, 11673 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 11674 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 11675 #ifdef CONFIG_NET_POLL_CONTROLLER 11676 .ndo_poll_controller = i40e_netpoll, 11677 #endif 11678 .ndo_setup_tc = __i40e_setup_tc, 11679 .ndo_set_features = i40e_set_features, 11680 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 11681 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 11682 .ndo_set_vf_rate = i40e_ndo_set_vf_bw, 11683 .ndo_get_vf_config = i40e_ndo_get_vf_config, 11684 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 11685 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, 11686 .ndo_set_vf_trust = i40e_ndo_set_vf_trust, 11687 .ndo_udp_tunnel_add = i40e_udp_tunnel_add, 11688 .ndo_udp_tunnel_del = i40e_udp_tunnel_del, 11689 .ndo_get_phys_port_id = i40e_get_phys_port_id, 11690 .ndo_fdb_add = i40e_ndo_fdb_add, 11691 .ndo_features_check = i40e_features_check, 11692 .ndo_bridge_getlink = i40e_ndo_bridge_getlink, 11693 .ndo_bridge_setlink = i40e_ndo_bridge_setlink, 11694 .ndo_bpf = i40e_xdp, 11695 }; 11696 11697 /** 11698 * i40e_config_netdev - Setup the netdev flags 11699 * @vsi: the VSI being configured 11700 * 11701 * Returns 0 on success, negative value on failure 11702 **/ 11703 static int i40e_config_netdev(struct i40e_vsi *vsi) 11704 { 11705 struct i40e_pf *pf = vsi->back; 11706 struct i40e_hw *hw = &pf->hw; 11707 struct i40e_netdev_priv *np; 11708 struct net_device *netdev; 11709 u8 broadcast[ETH_ALEN]; 11710 u8 mac_addr[ETH_ALEN]; 11711 int etherdev_size; 11712 netdev_features_t hw_enc_features; 11713 netdev_features_t hw_features; 11714 11715 etherdev_size = sizeof(struct i40e_netdev_priv); 11716 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 11717 if (!netdev) 11718 return -ENOMEM; 11719 11720 vsi->netdev = netdev; 11721 np = netdev_priv(netdev); 11722 np->vsi = vsi; 11723 11724 hw_enc_features = NETIF_F_SG | 11725 NETIF_F_IP_CSUM | 11726 NETIF_F_IPV6_CSUM | 11727 NETIF_F_HIGHDMA | 11728 NETIF_F_SOFT_FEATURES | 11729 NETIF_F_TSO | 11730 NETIF_F_TSO_ECN | 11731 NETIF_F_TSO6 | 11732 NETIF_F_GSO_GRE | 11733 NETIF_F_GSO_GRE_CSUM | 11734 NETIF_F_GSO_PARTIAL | 11735 NETIF_F_GSO_UDP_TUNNEL | 11736 NETIF_F_GSO_UDP_TUNNEL_CSUM | 11737 NETIF_F_SCTP_CRC | 11738 NETIF_F_RXHASH | 11739 NETIF_F_RXCSUM | 11740 0; 11741 11742 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) 11743 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 11744 11745 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 11746 11747 netdev->hw_enc_features |= hw_enc_features; 11748 11749 /* record features VLANs can make use of */ 11750 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 11751 11752 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 11753 netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; 11754 11755 hw_features = hw_enc_features | 11756 NETIF_F_HW_VLAN_CTAG_TX | 11757 NETIF_F_HW_VLAN_CTAG_RX; 11758 11759 netdev->hw_features |= hw_features; 11760 11761 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 11762 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 11763 11764 if (vsi->type == I40E_VSI_MAIN) { 11765 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 11766 ether_addr_copy(mac_addr, hw->mac.perm_addr); 11767 /* The following steps are necessary for two reasons. First, 11768 * some older NVM configurations load a default MAC-VLAN 11769 * filter that will accept any tagged packet, and we want to 11770 * replace this with a normal filter. Additionally, it is 11771 * possible our MAC address was provided by the platform using 11772 * Open Firmware or similar. 11773 * 11774 * Thus, we need to remove the default filter and install one 11775 * specific to the MAC address. 11776 */ 11777 i40e_rm_default_mac_filter(vsi, mac_addr); 11778 spin_lock_bh(&vsi->mac_filter_hash_lock); 11779 i40e_add_mac_filter(vsi, mac_addr); 11780 spin_unlock_bh(&vsi->mac_filter_hash_lock); 11781 } else { 11782 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we 11783 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to 11784 * the end, which is 4 bytes long, so force truncation of the 11785 * original name by IFNAMSIZ - 4 11786 */ 11787 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", 11788 IFNAMSIZ - 4, 11789 pf->vsi[pf->lan_vsi]->netdev->name); 11790 random_ether_addr(mac_addr); 11791 11792 spin_lock_bh(&vsi->mac_filter_hash_lock); 11793 i40e_add_mac_filter(vsi, mac_addr); 11794 spin_unlock_bh(&vsi->mac_filter_hash_lock); 11795 } 11796 11797 /* Add the broadcast filter so that we initially will receive 11798 * broadcast packets. Note that when a new VLAN is first added the 11799 * driver will convert all filters marked I40E_VLAN_ANY into VLAN 11800 * specific filters as part of transitioning into "vlan" operation. 11801 * When more VLANs are added, the driver will copy each existing MAC 11802 * filter and add it for the new VLAN. 11803 * 11804 * Broadcast filters are handled specially by 11805 * i40e_sync_filters_subtask, as the driver must to set the broadcast 11806 * promiscuous bit instead of adding this directly as a MAC/VLAN 11807 * filter. The subtask will update the correct broadcast promiscuous 11808 * bits as VLANs become active or inactive. 11809 */ 11810 eth_broadcast_addr(broadcast); 11811 spin_lock_bh(&vsi->mac_filter_hash_lock); 11812 i40e_add_mac_filter(vsi, broadcast); 11813 spin_unlock_bh(&vsi->mac_filter_hash_lock); 11814 11815 ether_addr_copy(netdev->dev_addr, mac_addr); 11816 ether_addr_copy(netdev->perm_addr, mac_addr); 11817 11818 netdev->priv_flags |= IFF_UNICAST_FLT; 11819 netdev->priv_flags |= IFF_SUPP_NOFCS; 11820 /* Setup netdev TC information */ 11821 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 11822 11823 netdev->netdev_ops = &i40e_netdev_ops; 11824 netdev->watchdog_timeo = 5 * HZ; 11825 i40e_set_ethtool_ops(netdev); 11826 11827 /* MTU range: 68 - 9706 */ 11828 netdev->min_mtu = ETH_MIN_MTU; 11829 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; 11830 11831 return 0; 11832 } 11833 11834 /** 11835 * i40e_vsi_delete - Delete a VSI from the switch 11836 * @vsi: the VSI being removed 11837 * 11838 * Returns 0 on success, negative value on failure 11839 **/ 11840 static void i40e_vsi_delete(struct i40e_vsi *vsi) 11841 { 11842 /* remove default VSI is not allowed */ 11843 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 11844 return; 11845 11846 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 11847 } 11848 11849 /** 11850 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB 11851 * @vsi: the VSI being queried 11852 * 11853 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode 11854 **/ 11855 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) 11856 { 11857 struct i40e_veb *veb; 11858 struct i40e_pf *pf = vsi->back; 11859 11860 /* Uplink is not a bridge so default to VEB */ 11861 if (vsi->veb_idx == I40E_NO_VEB) 11862 return 1; 11863 11864 veb = pf->veb[vsi->veb_idx]; 11865 if (!veb) { 11866 dev_info(&pf->pdev->dev, 11867 "There is no veb associated with the bridge\n"); 11868 return -ENOENT; 11869 } 11870 11871 /* Uplink is a bridge in VEPA mode */ 11872 if (veb->bridge_mode & BRIDGE_MODE_VEPA) { 11873 return 0; 11874 } else { 11875 /* Uplink is a bridge in VEB mode */ 11876 return 1; 11877 } 11878 11879 /* VEPA is now default bridge, so return 0 */ 11880 return 0; 11881 } 11882 11883 /** 11884 * i40e_add_vsi - Add a VSI to the switch 11885 * @vsi: the VSI being configured 11886 * 11887 * This initializes a VSI context depending on the VSI type to be added and 11888 * passes it down to the add_vsi aq command. 11889 **/ 11890 static int i40e_add_vsi(struct i40e_vsi *vsi) 11891 { 11892 int ret = -ENODEV; 11893 struct i40e_pf *pf = vsi->back; 11894 struct i40e_hw *hw = &pf->hw; 11895 struct i40e_vsi_context ctxt; 11896 struct i40e_mac_filter *f; 11897 struct hlist_node *h; 11898 int bkt; 11899 11900 u8 enabled_tc = 0x1; /* TC0 enabled */ 11901 int f_count = 0; 11902 11903 memset(&ctxt, 0, sizeof(ctxt)); 11904 switch (vsi->type) { 11905 case I40E_VSI_MAIN: 11906 /* The PF's main VSI is already setup as part of the 11907 * device initialization, so we'll not bother with 11908 * the add_vsi call, but we will retrieve the current 11909 * VSI context. 11910 */ 11911 ctxt.seid = pf->main_vsi_seid; 11912 ctxt.pf_num = pf->hw.pf_id; 11913 ctxt.vf_num = 0; 11914 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 11915 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 11916 if (ret) { 11917 dev_info(&pf->pdev->dev, 11918 "couldn't get PF vsi config, err %s aq_err %s\n", 11919 i40e_stat_str(&pf->hw, ret), 11920 i40e_aq_str(&pf->hw, 11921 pf->hw.aq.asq_last_status)); 11922 return -ENOENT; 11923 } 11924 vsi->info = ctxt.info; 11925 vsi->info.valid_sections = 0; 11926 11927 vsi->seid = ctxt.seid; 11928 vsi->id = ctxt.vsi_number; 11929 11930 enabled_tc = i40e_pf_get_tc_map(pf); 11931 11932 /* Source pruning is enabled by default, so the flag is 11933 * negative logic - if it's set, we need to fiddle with 11934 * the VSI to disable source pruning. 11935 */ 11936 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) { 11937 memset(&ctxt, 0, sizeof(ctxt)); 11938 ctxt.seid = pf->main_vsi_seid; 11939 ctxt.pf_num = pf->hw.pf_id; 11940 ctxt.vf_num = 0; 11941 ctxt.info.valid_sections |= 11942 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 11943 ctxt.info.switch_id = 11944 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB); 11945 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 11946 if (ret) { 11947 dev_info(&pf->pdev->dev, 11948 "update vsi failed, err %s aq_err %s\n", 11949 i40e_stat_str(&pf->hw, ret), 11950 i40e_aq_str(&pf->hw, 11951 pf->hw.aq.asq_last_status)); 11952 ret = -ENOENT; 11953 goto err; 11954 } 11955 } 11956 11957 /* MFP mode setup queue map and update VSI */ 11958 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && 11959 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ 11960 memset(&ctxt, 0, sizeof(ctxt)); 11961 ctxt.seid = pf->main_vsi_seid; 11962 ctxt.pf_num = pf->hw.pf_id; 11963 ctxt.vf_num = 0; 11964 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 11965 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 11966 if (ret) { 11967 dev_info(&pf->pdev->dev, 11968 "update vsi failed, err %s aq_err %s\n", 11969 i40e_stat_str(&pf->hw, ret), 11970 i40e_aq_str(&pf->hw, 11971 pf->hw.aq.asq_last_status)); 11972 ret = -ENOENT; 11973 goto err; 11974 } 11975 /* update the local VSI info queue map */ 11976 i40e_vsi_update_queue_map(vsi, &ctxt); 11977 vsi->info.valid_sections = 0; 11978 } else { 11979 /* Default/Main VSI is only enabled for TC0 11980 * reconfigure it to enable all TCs that are 11981 * available on the port in SFP mode. 11982 * For MFP case the iSCSI PF would use this 11983 * flow to enable LAN+iSCSI TC. 11984 */ 11985 ret = i40e_vsi_config_tc(vsi, enabled_tc); 11986 if (ret) { 11987 /* Single TC condition is not fatal, 11988 * message and continue 11989 */ 11990 dev_info(&pf->pdev->dev, 11991 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", 11992 enabled_tc, 11993 i40e_stat_str(&pf->hw, ret), 11994 i40e_aq_str(&pf->hw, 11995 pf->hw.aq.asq_last_status)); 11996 } 11997 } 11998 break; 11999 12000 case I40E_VSI_FDIR: 12001 ctxt.pf_num = hw->pf_id; 12002 ctxt.vf_num = 0; 12003 ctxt.uplink_seid = vsi->uplink_seid; 12004 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 12005 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 12006 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && 12007 (i40e_is_vsi_uplink_mode_veb(vsi))) { 12008 ctxt.info.valid_sections |= 12009 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 12010 ctxt.info.switch_id = 12011 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 12012 } 12013 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 12014 break; 12015 12016 case I40E_VSI_VMDQ2: 12017 ctxt.pf_num = hw->pf_id; 12018 ctxt.vf_num = 0; 12019 ctxt.uplink_seid = vsi->uplink_seid; 12020 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 12021 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 12022 12023 /* This VSI is connected to VEB so the switch_id 12024 * should be set to zero by default. 12025 */ 12026 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 12027 ctxt.info.valid_sections |= 12028 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 12029 ctxt.info.switch_id = 12030 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 12031 } 12032 12033 /* Setup the VSI tx/rx queue map for TC0 only for now */ 12034 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 12035 break; 12036 12037 case I40E_VSI_SRIOV: 12038 ctxt.pf_num = hw->pf_id; 12039 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 12040 ctxt.uplink_seid = vsi->uplink_seid; 12041 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 12042 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 12043 12044 /* This VSI is connected to VEB so the switch_id 12045 * should be set to zero by default. 12046 */ 12047 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 12048 ctxt.info.valid_sections |= 12049 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 12050 ctxt.info.switch_id = 12051 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 12052 } 12053 12054 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { 12055 ctxt.info.valid_sections |= 12056 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); 12057 ctxt.info.queueing_opt_flags |= 12058 (I40E_AQ_VSI_QUE_OPT_TCP_ENA | 12059 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI); 12060 } 12061 12062 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 12063 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 12064 if (pf->vf[vsi->vf_id].spoofchk) { 12065 ctxt.info.valid_sections |= 12066 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 12067 ctxt.info.sec_flags |= 12068 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 12069 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 12070 } 12071 /* Setup the VSI tx/rx queue map for TC0 only for now */ 12072 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 12073 break; 12074 12075 case I40E_VSI_IWARP: 12076 /* send down message to iWARP */ 12077 break; 12078 12079 default: 12080 return -ENODEV; 12081 } 12082 12083 if (vsi->type != I40E_VSI_MAIN) { 12084 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 12085 if (ret) { 12086 dev_info(&vsi->back->pdev->dev, 12087 "add vsi failed, err %s aq_err %s\n", 12088 i40e_stat_str(&pf->hw, ret), 12089 i40e_aq_str(&pf->hw, 12090 pf->hw.aq.asq_last_status)); 12091 ret = -ENOENT; 12092 goto err; 12093 } 12094 vsi->info = ctxt.info; 12095 vsi->info.valid_sections = 0; 12096 vsi->seid = ctxt.seid; 12097 vsi->id = ctxt.vsi_number; 12098 } 12099 12100 vsi->active_filters = 0; 12101 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); 12102 spin_lock_bh(&vsi->mac_filter_hash_lock); 12103 /* If macvlan filters already exist, force them to get loaded */ 12104 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 12105 f->state = I40E_FILTER_NEW; 12106 f_count++; 12107 } 12108 spin_unlock_bh(&vsi->mac_filter_hash_lock); 12109 12110 if (f_count) { 12111 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 12112 pf->flags |= I40E_FLAG_FILTER_SYNC; 12113 } 12114 12115 /* Update VSI BW information */ 12116 ret = i40e_vsi_get_bw_info(vsi); 12117 if (ret) { 12118 dev_info(&pf->pdev->dev, 12119 "couldn't get vsi bw info, err %s aq_err %s\n", 12120 i40e_stat_str(&pf->hw, ret), 12121 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 12122 /* VSI is already added so not tearing that up */ 12123 ret = 0; 12124 } 12125 12126 err: 12127 return ret; 12128 } 12129 12130 /** 12131 * i40e_vsi_release - Delete a VSI and free its resources 12132 * @vsi: the VSI being removed 12133 * 12134 * Returns 0 on success or < 0 on error 12135 **/ 12136 int i40e_vsi_release(struct i40e_vsi *vsi) 12137 { 12138 struct i40e_mac_filter *f; 12139 struct hlist_node *h; 12140 struct i40e_veb *veb = NULL; 12141 struct i40e_pf *pf; 12142 u16 uplink_seid; 12143 int i, n, bkt; 12144 12145 pf = vsi->back; 12146 12147 /* release of a VEB-owner or last VSI is not allowed */ 12148 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 12149 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 12150 vsi->seid, vsi->uplink_seid); 12151 return -ENODEV; 12152 } 12153 if (vsi == pf->vsi[pf->lan_vsi] && 12154 !test_bit(__I40E_DOWN, pf->state)) { 12155 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 12156 return -ENODEV; 12157 } 12158 12159 uplink_seid = vsi->uplink_seid; 12160 if (vsi->type != I40E_VSI_SRIOV) { 12161 if (vsi->netdev_registered) { 12162 vsi->netdev_registered = false; 12163 if (vsi->netdev) { 12164 /* results in a call to i40e_close() */ 12165 unregister_netdev(vsi->netdev); 12166 } 12167 } else { 12168 i40e_vsi_close(vsi); 12169 } 12170 i40e_vsi_disable_irq(vsi); 12171 } 12172 12173 spin_lock_bh(&vsi->mac_filter_hash_lock); 12174 12175 /* clear the sync flag on all filters */ 12176 if (vsi->netdev) { 12177 __dev_uc_unsync(vsi->netdev, NULL); 12178 __dev_mc_unsync(vsi->netdev, NULL); 12179 } 12180 12181 /* make sure any remaining filters are marked for deletion */ 12182 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 12183 __i40e_del_filter(vsi, f); 12184 12185 spin_unlock_bh(&vsi->mac_filter_hash_lock); 12186 12187 i40e_sync_vsi_filters(vsi); 12188 12189 i40e_vsi_delete(vsi); 12190 i40e_vsi_free_q_vectors(vsi); 12191 if (vsi->netdev) { 12192 free_netdev(vsi->netdev); 12193 vsi->netdev = NULL; 12194 } 12195 i40e_vsi_clear_rings(vsi); 12196 i40e_vsi_clear(vsi); 12197 12198 /* If this was the last thing on the VEB, except for the 12199 * controlling VSI, remove the VEB, which puts the controlling 12200 * VSI onto the next level down in the switch. 12201 * 12202 * Well, okay, there's one more exception here: don't remove 12203 * the orphan VEBs yet. We'll wait for an explicit remove request 12204 * from up the network stack. 12205 */ 12206 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { 12207 if (pf->vsi[i] && 12208 pf->vsi[i]->uplink_seid == uplink_seid && 12209 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 12210 n++; /* count the VSIs */ 12211 } 12212 } 12213 for (i = 0; i < I40E_MAX_VEB; i++) { 12214 if (!pf->veb[i]) 12215 continue; 12216 if (pf->veb[i]->uplink_seid == uplink_seid) 12217 n++; /* count the VEBs */ 12218 if (pf->veb[i]->seid == uplink_seid) 12219 veb = pf->veb[i]; 12220 } 12221 if (n == 0 && veb && veb->uplink_seid != 0) 12222 i40e_veb_release(veb); 12223 12224 return 0; 12225 } 12226 12227 /** 12228 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 12229 * @vsi: ptr to the VSI 12230 * 12231 * This should only be called after i40e_vsi_mem_alloc() which allocates the 12232 * corresponding SW VSI structure and initializes num_queue_pairs for the 12233 * newly allocated VSI. 12234 * 12235 * Returns 0 on success or negative on failure 12236 **/ 12237 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 12238 { 12239 int ret = -ENOENT; 12240 struct i40e_pf *pf = vsi->back; 12241 12242 if (vsi->q_vectors[0]) { 12243 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 12244 vsi->seid); 12245 return -EEXIST; 12246 } 12247 12248 if (vsi->base_vector) { 12249 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 12250 vsi->seid, vsi->base_vector); 12251 return -EEXIST; 12252 } 12253 12254 ret = i40e_vsi_alloc_q_vectors(vsi); 12255 if (ret) { 12256 dev_info(&pf->pdev->dev, 12257 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 12258 vsi->num_q_vectors, vsi->seid, ret); 12259 vsi->num_q_vectors = 0; 12260 goto vector_setup_out; 12261 } 12262 12263 /* In Legacy mode, we do not have to get any other vector since we 12264 * piggyback on the misc/ICR0 for queue interrupts. 12265 */ 12266 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 12267 return ret; 12268 if (vsi->num_q_vectors) 12269 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 12270 vsi->num_q_vectors, vsi->idx); 12271 if (vsi->base_vector < 0) { 12272 dev_info(&pf->pdev->dev, 12273 "failed to get tracking for %d vectors for VSI %d, err=%d\n", 12274 vsi->num_q_vectors, vsi->seid, vsi->base_vector); 12275 i40e_vsi_free_q_vectors(vsi); 12276 ret = -ENOENT; 12277 goto vector_setup_out; 12278 } 12279 12280 vector_setup_out: 12281 return ret; 12282 } 12283 12284 /** 12285 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI 12286 * @vsi: pointer to the vsi. 12287 * 12288 * This re-allocates a vsi's queue resources. 12289 * 12290 * Returns pointer to the successfully allocated and configured VSI sw struct 12291 * on success, otherwise returns NULL on failure. 12292 **/ 12293 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) 12294 { 12295 u16 alloc_queue_pairs; 12296 struct i40e_pf *pf; 12297 u8 enabled_tc; 12298 int ret; 12299 12300 if (!vsi) 12301 return NULL; 12302 12303 pf = vsi->back; 12304 12305 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 12306 i40e_vsi_clear_rings(vsi); 12307 12308 i40e_vsi_free_arrays(vsi, false); 12309 i40e_set_num_rings_in_vsi(vsi); 12310 ret = i40e_vsi_alloc_arrays(vsi, false); 12311 if (ret) 12312 goto err_vsi; 12313 12314 alloc_queue_pairs = vsi->alloc_queue_pairs * 12315 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); 12316 12317 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); 12318 if (ret < 0) { 12319 dev_info(&pf->pdev->dev, 12320 "failed to get tracking for %d queues for VSI %d err %d\n", 12321 alloc_queue_pairs, vsi->seid, ret); 12322 goto err_vsi; 12323 } 12324 vsi->base_queue = ret; 12325 12326 /* Update the FW view of the VSI. Force a reset of TC and queue 12327 * layout configurations. 12328 */ 12329 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 12330 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 12331 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 12332 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 12333 if (vsi->type == I40E_VSI_MAIN) 12334 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); 12335 12336 /* assign it some queues */ 12337 ret = i40e_alloc_rings(vsi); 12338 if (ret) 12339 goto err_rings; 12340 12341 /* map all of the rings to the q_vectors */ 12342 i40e_vsi_map_rings_to_vectors(vsi); 12343 return vsi; 12344 12345 err_rings: 12346 i40e_vsi_free_q_vectors(vsi); 12347 if (vsi->netdev_registered) { 12348 vsi->netdev_registered = false; 12349 unregister_netdev(vsi->netdev); 12350 free_netdev(vsi->netdev); 12351 vsi->netdev = NULL; 12352 } 12353 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 12354 err_vsi: 12355 i40e_vsi_clear(vsi); 12356 return NULL; 12357 } 12358 12359 /** 12360 * i40e_vsi_setup - Set up a VSI by a given type 12361 * @pf: board private structure 12362 * @type: VSI type 12363 * @uplink_seid: the switch element to link to 12364 * @param1: usage depends upon VSI type. For VF types, indicates VF id 12365 * 12366 * This allocates the sw VSI structure and its queue resources, then add a VSI 12367 * to the identified VEB. 12368 * 12369 * Returns pointer to the successfully allocated and configure VSI sw struct on 12370 * success, otherwise returns NULL on failure. 12371 **/ 12372 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 12373 u16 uplink_seid, u32 param1) 12374 { 12375 struct i40e_vsi *vsi = NULL; 12376 struct i40e_veb *veb = NULL; 12377 u16 alloc_queue_pairs; 12378 int ret, i; 12379 int v_idx; 12380 12381 /* The requested uplink_seid must be either 12382 * - the PF's port seid 12383 * no VEB is needed because this is the PF 12384 * or this is a Flow Director special case VSI 12385 * - seid of an existing VEB 12386 * - seid of a VSI that owns an existing VEB 12387 * - seid of a VSI that doesn't own a VEB 12388 * a new VEB is created and the VSI becomes the owner 12389 * - seid of the PF VSI, which is what creates the first VEB 12390 * this is a special case of the previous 12391 * 12392 * Find which uplink_seid we were given and create a new VEB if needed 12393 */ 12394 for (i = 0; i < I40E_MAX_VEB; i++) { 12395 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 12396 veb = pf->veb[i]; 12397 break; 12398 } 12399 } 12400 12401 if (!veb && uplink_seid != pf->mac_seid) { 12402 12403 for (i = 0; i < pf->num_alloc_vsi; i++) { 12404 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 12405 vsi = pf->vsi[i]; 12406 break; 12407 } 12408 } 12409 if (!vsi) { 12410 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 12411 uplink_seid); 12412 return NULL; 12413 } 12414 12415 if (vsi->uplink_seid == pf->mac_seid) 12416 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 12417 vsi->tc_config.enabled_tc); 12418 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 12419 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 12420 vsi->tc_config.enabled_tc); 12421 if (veb) { 12422 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { 12423 dev_info(&vsi->back->pdev->dev, 12424 "New VSI creation error, uplink seid of LAN VSI expected.\n"); 12425 return NULL; 12426 } 12427 /* We come up by default in VEPA mode if SRIOV is not 12428 * already enabled, in which case we can't force VEPA 12429 * mode. 12430 */ 12431 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 12432 veb->bridge_mode = BRIDGE_MODE_VEPA; 12433 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 12434 } 12435 i40e_config_bridge_mode(veb); 12436 } 12437 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 12438 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 12439 veb = pf->veb[i]; 12440 } 12441 if (!veb) { 12442 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 12443 return NULL; 12444 } 12445 12446 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 12447 uplink_seid = veb->seid; 12448 } 12449 12450 /* get vsi sw struct */ 12451 v_idx = i40e_vsi_mem_alloc(pf, type); 12452 if (v_idx < 0) 12453 goto err_alloc; 12454 vsi = pf->vsi[v_idx]; 12455 if (!vsi) 12456 goto err_alloc; 12457 vsi->type = type; 12458 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 12459 12460 if (type == I40E_VSI_MAIN) 12461 pf->lan_vsi = v_idx; 12462 else if (type == I40E_VSI_SRIOV) 12463 vsi->vf_id = param1; 12464 /* assign it some queues */ 12465 alloc_queue_pairs = vsi->alloc_queue_pairs * 12466 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); 12467 12468 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); 12469 if (ret < 0) { 12470 dev_info(&pf->pdev->dev, 12471 "failed to get tracking for %d queues for VSI %d err=%d\n", 12472 alloc_queue_pairs, vsi->seid, ret); 12473 goto err_vsi; 12474 } 12475 vsi->base_queue = ret; 12476 12477 /* get a VSI from the hardware */ 12478 vsi->uplink_seid = uplink_seid; 12479 ret = i40e_add_vsi(vsi); 12480 if (ret) 12481 goto err_vsi; 12482 12483 switch (vsi->type) { 12484 /* setup the netdev if needed */ 12485 case I40E_VSI_MAIN: 12486 case I40E_VSI_VMDQ2: 12487 ret = i40e_config_netdev(vsi); 12488 if (ret) 12489 goto err_netdev; 12490 ret = register_netdev(vsi->netdev); 12491 if (ret) 12492 goto err_netdev; 12493 vsi->netdev_registered = true; 12494 netif_carrier_off(vsi->netdev); 12495 #ifdef CONFIG_I40E_DCB 12496 /* Setup DCB netlink interface */ 12497 i40e_dcbnl_setup(vsi); 12498 #endif /* CONFIG_I40E_DCB */ 12499 /* fall through */ 12500 12501 case I40E_VSI_FDIR: 12502 /* set up vectors and rings if needed */ 12503 ret = i40e_vsi_setup_vectors(vsi); 12504 if (ret) 12505 goto err_msix; 12506 12507 ret = i40e_alloc_rings(vsi); 12508 if (ret) 12509 goto err_rings; 12510 12511 /* map all of the rings to the q_vectors */ 12512 i40e_vsi_map_rings_to_vectors(vsi); 12513 12514 i40e_vsi_reset_stats(vsi); 12515 break; 12516 12517 default: 12518 /* no netdev or rings for the other VSI types */ 12519 break; 12520 } 12521 12522 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && 12523 (vsi->type == I40E_VSI_VMDQ2)) { 12524 ret = i40e_vsi_config_rss(vsi); 12525 } 12526 return vsi; 12527 12528 err_rings: 12529 i40e_vsi_free_q_vectors(vsi); 12530 err_msix: 12531 if (vsi->netdev_registered) { 12532 vsi->netdev_registered = false; 12533 unregister_netdev(vsi->netdev); 12534 free_netdev(vsi->netdev); 12535 vsi->netdev = NULL; 12536 } 12537 err_netdev: 12538 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 12539 err_vsi: 12540 i40e_vsi_clear(vsi); 12541 err_alloc: 12542 return NULL; 12543 } 12544 12545 /** 12546 * i40e_veb_get_bw_info - Query VEB BW information 12547 * @veb: the veb to query 12548 * 12549 * Query the Tx scheduler BW configuration data for given VEB 12550 **/ 12551 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 12552 { 12553 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 12554 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 12555 struct i40e_pf *pf = veb->pf; 12556 struct i40e_hw *hw = &pf->hw; 12557 u32 tc_bw_max; 12558 int ret = 0; 12559 int i; 12560 12561 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 12562 &bw_data, NULL); 12563 if (ret) { 12564 dev_info(&pf->pdev->dev, 12565 "query veb bw config failed, err %s aq_err %s\n", 12566 i40e_stat_str(&pf->hw, ret), 12567 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 12568 goto out; 12569 } 12570 12571 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 12572 &ets_data, NULL); 12573 if (ret) { 12574 dev_info(&pf->pdev->dev, 12575 "query veb bw ets config failed, err %s aq_err %s\n", 12576 i40e_stat_str(&pf->hw, ret), 12577 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 12578 goto out; 12579 } 12580 12581 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 12582 veb->bw_max_quanta = ets_data.tc_bw_max; 12583 veb->is_abs_credits = bw_data.absolute_credits_enable; 12584 veb->enabled_tc = ets_data.tc_valid_bits; 12585 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 12586 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 12587 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 12588 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 12589 veb->bw_tc_limit_credits[i] = 12590 le16_to_cpu(bw_data.tc_bw_limits[i]); 12591 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 12592 } 12593 12594 out: 12595 return ret; 12596 } 12597 12598 /** 12599 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 12600 * @pf: board private structure 12601 * 12602 * On error: returns error code (negative) 12603 * On success: returns vsi index in PF (positive) 12604 **/ 12605 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 12606 { 12607 int ret = -ENOENT; 12608 struct i40e_veb *veb; 12609 int i; 12610 12611 /* Need to protect the allocation of switch elements at the PF level */ 12612 mutex_lock(&pf->switch_mutex); 12613 12614 /* VEB list may be fragmented if VEB creation/destruction has 12615 * been happening. We can afford to do a quick scan to look 12616 * for any free slots in the list. 12617 * 12618 * find next empty veb slot, looping back around if necessary 12619 */ 12620 i = 0; 12621 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 12622 i++; 12623 if (i >= I40E_MAX_VEB) { 12624 ret = -ENOMEM; 12625 goto err_alloc_veb; /* out of VEB slots! */ 12626 } 12627 12628 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 12629 if (!veb) { 12630 ret = -ENOMEM; 12631 goto err_alloc_veb; 12632 } 12633 veb->pf = pf; 12634 veb->idx = i; 12635 veb->enabled_tc = 1; 12636 12637 pf->veb[i] = veb; 12638 ret = i; 12639 err_alloc_veb: 12640 mutex_unlock(&pf->switch_mutex); 12641 return ret; 12642 } 12643 12644 /** 12645 * i40e_switch_branch_release - Delete a branch of the switch tree 12646 * @branch: where to start deleting 12647 * 12648 * This uses recursion to find the tips of the branch to be 12649 * removed, deleting until we get back to and can delete this VEB. 12650 **/ 12651 static void i40e_switch_branch_release(struct i40e_veb *branch) 12652 { 12653 struct i40e_pf *pf = branch->pf; 12654 u16 branch_seid = branch->seid; 12655 u16 veb_idx = branch->idx; 12656 int i; 12657 12658 /* release any VEBs on this VEB - RECURSION */ 12659 for (i = 0; i < I40E_MAX_VEB; i++) { 12660 if (!pf->veb[i]) 12661 continue; 12662 if (pf->veb[i]->uplink_seid == branch->seid) 12663 i40e_switch_branch_release(pf->veb[i]); 12664 } 12665 12666 /* Release the VSIs on this VEB, but not the owner VSI. 12667 * 12668 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 12669 * the VEB itself, so don't use (*branch) after this loop. 12670 */ 12671 for (i = 0; i < pf->num_alloc_vsi; i++) { 12672 if (!pf->vsi[i]) 12673 continue; 12674 if (pf->vsi[i]->uplink_seid == branch_seid && 12675 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 12676 i40e_vsi_release(pf->vsi[i]); 12677 } 12678 } 12679 12680 /* There's one corner case where the VEB might not have been 12681 * removed, so double check it here and remove it if needed. 12682 * This case happens if the veb was created from the debugfs 12683 * commands and no VSIs were added to it. 12684 */ 12685 if (pf->veb[veb_idx]) 12686 i40e_veb_release(pf->veb[veb_idx]); 12687 } 12688 12689 /** 12690 * i40e_veb_clear - remove veb struct 12691 * @veb: the veb to remove 12692 **/ 12693 static void i40e_veb_clear(struct i40e_veb *veb) 12694 { 12695 if (!veb) 12696 return; 12697 12698 if (veb->pf) { 12699 struct i40e_pf *pf = veb->pf; 12700 12701 mutex_lock(&pf->switch_mutex); 12702 if (pf->veb[veb->idx] == veb) 12703 pf->veb[veb->idx] = NULL; 12704 mutex_unlock(&pf->switch_mutex); 12705 } 12706 12707 kfree(veb); 12708 } 12709 12710 /** 12711 * i40e_veb_release - Delete a VEB and free its resources 12712 * @veb: the VEB being removed 12713 **/ 12714 void i40e_veb_release(struct i40e_veb *veb) 12715 { 12716 struct i40e_vsi *vsi = NULL; 12717 struct i40e_pf *pf; 12718 int i, n = 0; 12719 12720 pf = veb->pf; 12721 12722 /* find the remaining VSI and check for extras */ 12723 for (i = 0; i < pf->num_alloc_vsi; i++) { 12724 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 12725 n++; 12726 vsi = pf->vsi[i]; 12727 } 12728 } 12729 if (n != 1) { 12730 dev_info(&pf->pdev->dev, 12731 "can't remove VEB %d with %d VSIs left\n", 12732 veb->seid, n); 12733 return; 12734 } 12735 12736 /* move the remaining VSI to uplink veb */ 12737 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 12738 if (veb->uplink_seid) { 12739 vsi->uplink_seid = veb->uplink_seid; 12740 if (veb->uplink_seid == pf->mac_seid) 12741 vsi->veb_idx = I40E_NO_VEB; 12742 else 12743 vsi->veb_idx = veb->veb_idx; 12744 } else { 12745 /* floating VEB */ 12746 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 12747 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 12748 } 12749 12750 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 12751 i40e_veb_clear(veb); 12752 } 12753 12754 /** 12755 * i40e_add_veb - create the VEB in the switch 12756 * @veb: the VEB to be instantiated 12757 * @vsi: the controlling VSI 12758 **/ 12759 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 12760 { 12761 struct i40e_pf *pf = veb->pf; 12762 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); 12763 int ret; 12764 12765 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, 12766 veb->enabled_tc, false, 12767 &veb->seid, enable_stats, NULL); 12768 12769 /* get a VEB from the hardware */ 12770 if (ret) { 12771 dev_info(&pf->pdev->dev, 12772 "couldn't add VEB, err %s aq_err %s\n", 12773 i40e_stat_str(&pf->hw, ret), 12774 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 12775 return -EPERM; 12776 } 12777 12778 /* get statistics counter */ 12779 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, 12780 &veb->stats_idx, NULL, NULL, NULL); 12781 if (ret) { 12782 dev_info(&pf->pdev->dev, 12783 "couldn't get VEB statistics idx, err %s aq_err %s\n", 12784 i40e_stat_str(&pf->hw, ret), 12785 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 12786 return -EPERM; 12787 } 12788 ret = i40e_veb_get_bw_info(veb); 12789 if (ret) { 12790 dev_info(&pf->pdev->dev, 12791 "couldn't get VEB bw info, err %s aq_err %s\n", 12792 i40e_stat_str(&pf->hw, ret), 12793 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 12794 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 12795 return -ENOENT; 12796 } 12797 12798 vsi->uplink_seid = veb->seid; 12799 vsi->veb_idx = veb->idx; 12800 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 12801 12802 return 0; 12803 } 12804 12805 /** 12806 * i40e_veb_setup - Set up a VEB 12807 * @pf: board private structure 12808 * @flags: VEB setup flags 12809 * @uplink_seid: the switch element to link to 12810 * @vsi_seid: the initial VSI seid 12811 * @enabled_tc: Enabled TC bit-map 12812 * 12813 * This allocates the sw VEB structure and links it into the switch 12814 * It is possible and legal for this to be a duplicate of an already 12815 * existing VEB. It is also possible for both uplink and vsi seids 12816 * to be zero, in order to create a floating VEB. 12817 * 12818 * Returns pointer to the successfully allocated VEB sw struct on 12819 * success, otherwise returns NULL on failure. 12820 **/ 12821 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 12822 u16 uplink_seid, u16 vsi_seid, 12823 u8 enabled_tc) 12824 { 12825 struct i40e_veb *veb, *uplink_veb = NULL; 12826 int vsi_idx, veb_idx; 12827 int ret; 12828 12829 /* if one seid is 0, the other must be 0 to create a floating relay */ 12830 if ((uplink_seid == 0 || vsi_seid == 0) && 12831 (uplink_seid + vsi_seid != 0)) { 12832 dev_info(&pf->pdev->dev, 12833 "one, not both seid's are 0: uplink=%d vsi=%d\n", 12834 uplink_seid, vsi_seid); 12835 return NULL; 12836 } 12837 12838 /* make sure there is such a vsi and uplink */ 12839 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) 12840 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 12841 break; 12842 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { 12843 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 12844 vsi_seid); 12845 return NULL; 12846 } 12847 12848 if (uplink_seid && uplink_seid != pf->mac_seid) { 12849 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 12850 if (pf->veb[veb_idx] && 12851 pf->veb[veb_idx]->seid == uplink_seid) { 12852 uplink_veb = pf->veb[veb_idx]; 12853 break; 12854 } 12855 } 12856 if (!uplink_veb) { 12857 dev_info(&pf->pdev->dev, 12858 "uplink seid %d not found\n", uplink_seid); 12859 return NULL; 12860 } 12861 } 12862 12863 /* get veb sw struct */ 12864 veb_idx = i40e_veb_mem_alloc(pf); 12865 if (veb_idx < 0) 12866 goto err_alloc; 12867 veb = pf->veb[veb_idx]; 12868 veb->flags = flags; 12869 veb->uplink_seid = uplink_seid; 12870 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 12871 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 12872 12873 /* create the VEB in the switch */ 12874 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 12875 if (ret) 12876 goto err_veb; 12877 if (vsi_idx == pf->lan_vsi) 12878 pf->lan_veb = veb->idx; 12879 12880 return veb; 12881 12882 err_veb: 12883 i40e_veb_clear(veb); 12884 err_alloc: 12885 return NULL; 12886 } 12887 12888 /** 12889 * i40e_setup_pf_switch_element - set PF vars based on switch type 12890 * @pf: board private structure 12891 * @ele: element we are building info from 12892 * @num_reported: total number of elements 12893 * @printconfig: should we print the contents 12894 * 12895 * helper function to assist in extracting a few useful SEID values. 12896 **/ 12897 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 12898 struct i40e_aqc_switch_config_element_resp *ele, 12899 u16 num_reported, bool printconfig) 12900 { 12901 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 12902 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 12903 u8 element_type = ele->element_type; 12904 u16 seid = le16_to_cpu(ele->seid); 12905 12906 if (printconfig) 12907 dev_info(&pf->pdev->dev, 12908 "type=%d seid=%d uplink=%d downlink=%d\n", 12909 element_type, seid, uplink_seid, downlink_seid); 12910 12911 switch (element_type) { 12912 case I40E_SWITCH_ELEMENT_TYPE_MAC: 12913 pf->mac_seid = seid; 12914 break; 12915 case I40E_SWITCH_ELEMENT_TYPE_VEB: 12916 /* Main VEB? */ 12917 if (uplink_seid != pf->mac_seid) 12918 break; 12919 if (pf->lan_veb == I40E_NO_VEB) { 12920 int v; 12921 12922 /* find existing or else empty VEB */ 12923 for (v = 0; v < I40E_MAX_VEB; v++) { 12924 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 12925 pf->lan_veb = v; 12926 break; 12927 } 12928 } 12929 if (pf->lan_veb == I40E_NO_VEB) { 12930 v = i40e_veb_mem_alloc(pf); 12931 if (v < 0) 12932 break; 12933 pf->lan_veb = v; 12934 } 12935 } 12936 12937 pf->veb[pf->lan_veb]->seid = seid; 12938 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 12939 pf->veb[pf->lan_veb]->pf = pf; 12940 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 12941 break; 12942 case I40E_SWITCH_ELEMENT_TYPE_VSI: 12943 if (num_reported != 1) 12944 break; 12945 /* This is immediately after a reset so we can assume this is 12946 * the PF's VSI 12947 */ 12948 pf->mac_seid = uplink_seid; 12949 pf->pf_seid = downlink_seid; 12950 pf->main_vsi_seid = seid; 12951 if (printconfig) 12952 dev_info(&pf->pdev->dev, 12953 "pf_seid=%d main_vsi_seid=%d\n", 12954 pf->pf_seid, pf->main_vsi_seid); 12955 break; 12956 case I40E_SWITCH_ELEMENT_TYPE_PF: 12957 case I40E_SWITCH_ELEMENT_TYPE_VF: 12958 case I40E_SWITCH_ELEMENT_TYPE_EMP: 12959 case I40E_SWITCH_ELEMENT_TYPE_BMC: 12960 case I40E_SWITCH_ELEMENT_TYPE_PE: 12961 case I40E_SWITCH_ELEMENT_TYPE_PA: 12962 /* ignore these for now */ 12963 break; 12964 default: 12965 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 12966 element_type, seid); 12967 break; 12968 } 12969 } 12970 12971 /** 12972 * i40e_fetch_switch_configuration - Get switch config from firmware 12973 * @pf: board private structure 12974 * @printconfig: should we print the contents 12975 * 12976 * Get the current switch configuration from the device and 12977 * extract a few useful SEID values. 12978 **/ 12979 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 12980 { 12981 struct i40e_aqc_get_switch_config_resp *sw_config; 12982 u16 next_seid = 0; 12983 int ret = 0; 12984 u8 *aq_buf; 12985 int i; 12986 12987 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 12988 if (!aq_buf) 12989 return -ENOMEM; 12990 12991 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 12992 do { 12993 u16 num_reported, num_total; 12994 12995 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 12996 I40E_AQ_LARGE_BUF, 12997 &next_seid, NULL); 12998 if (ret) { 12999 dev_info(&pf->pdev->dev, 13000 "get switch config failed err %s aq_err %s\n", 13001 i40e_stat_str(&pf->hw, ret), 13002 i40e_aq_str(&pf->hw, 13003 pf->hw.aq.asq_last_status)); 13004 kfree(aq_buf); 13005 return -ENOENT; 13006 } 13007 13008 num_reported = le16_to_cpu(sw_config->header.num_reported); 13009 num_total = le16_to_cpu(sw_config->header.num_total); 13010 13011 if (printconfig) 13012 dev_info(&pf->pdev->dev, 13013 "header: %d reported %d total\n", 13014 num_reported, num_total); 13015 13016 for (i = 0; i < num_reported; i++) { 13017 struct i40e_aqc_switch_config_element_resp *ele = 13018 &sw_config->element[i]; 13019 13020 i40e_setup_pf_switch_element(pf, ele, num_reported, 13021 printconfig); 13022 } 13023 } while (next_seid != 0); 13024 13025 kfree(aq_buf); 13026 return ret; 13027 } 13028 13029 /** 13030 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 13031 * @pf: board private structure 13032 * @reinit: if the Main VSI needs to re-initialized. 13033 * 13034 * Returns 0 on success, negative value on failure 13035 **/ 13036 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 13037 { 13038 u16 flags = 0; 13039 int ret; 13040 13041 /* find out what's out there already */ 13042 ret = i40e_fetch_switch_configuration(pf, false); 13043 if (ret) { 13044 dev_info(&pf->pdev->dev, 13045 "couldn't fetch switch config, err %s aq_err %s\n", 13046 i40e_stat_str(&pf->hw, ret), 13047 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 13048 return ret; 13049 } 13050 i40e_pf_reset_stats(pf); 13051 13052 /* set the switch config bit for the whole device to 13053 * support limited promisc or true promisc 13054 * when user requests promisc. The default is limited 13055 * promisc. 13056 */ 13057 13058 if ((pf->hw.pf_id == 0) && 13059 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { 13060 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; 13061 pf->last_sw_conf_flags = flags; 13062 } 13063 13064 if (pf->hw.pf_id == 0) { 13065 u16 valid_flags; 13066 13067 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; 13068 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0, 13069 NULL); 13070 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { 13071 dev_info(&pf->pdev->dev, 13072 "couldn't set switch config bits, err %s aq_err %s\n", 13073 i40e_stat_str(&pf->hw, ret), 13074 i40e_aq_str(&pf->hw, 13075 pf->hw.aq.asq_last_status)); 13076 /* not a fatal problem, just keep going */ 13077 } 13078 pf->last_sw_conf_valid_flags = valid_flags; 13079 } 13080 13081 /* first time setup */ 13082 if (pf->lan_vsi == I40E_NO_VSI || reinit) { 13083 struct i40e_vsi *vsi = NULL; 13084 u16 uplink_seid; 13085 13086 /* Set up the PF VSI associated with the PF's main VSI 13087 * that is already in the HW switch 13088 */ 13089 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 13090 uplink_seid = pf->veb[pf->lan_veb]->seid; 13091 else 13092 uplink_seid = pf->mac_seid; 13093 if (pf->lan_vsi == I40E_NO_VSI) 13094 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 13095 else if (reinit) 13096 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); 13097 if (!vsi) { 13098 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 13099 i40e_cloud_filter_exit(pf); 13100 i40e_fdir_teardown(pf); 13101 return -EAGAIN; 13102 } 13103 } else { 13104 /* force a reset of TC and queue layout configurations */ 13105 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 13106 13107 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 13108 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 13109 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 13110 } 13111 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 13112 13113 i40e_fdir_sb_setup(pf); 13114 13115 /* Setup static PF queue filter control settings */ 13116 ret = i40e_setup_pf_filter_control(pf); 13117 if (ret) { 13118 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 13119 ret); 13120 /* Failure here should not stop continuing other steps */ 13121 } 13122 13123 /* enable RSS in the HW, even for only one queue, as the stack can use 13124 * the hash 13125 */ 13126 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 13127 i40e_pf_config_rss(pf); 13128 13129 /* fill in link information and enable LSE reporting */ 13130 i40e_link_event(pf); 13131 13132 /* Initialize user-specific link properties */ 13133 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 13134 I40E_AQ_AN_COMPLETED) ? true : false); 13135 13136 i40e_ptp_init(pf); 13137 13138 /* repopulate tunnel port filters */ 13139 i40e_sync_udp_filters(pf); 13140 13141 return ret; 13142 } 13143 13144 /** 13145 * i40e_determine_queue_usage - Work out queue distribution 13146 * @pf: board private structure 13147 **/ 13148 static void i40e_determine_queue_usage(struct i40e_pf *pf) 13149 { 13150 int queues_left; 13151 int q_max; 13152 13153 pf->num_lan_qps = 0; 13154 13155 /* Find the max queues to be put into basic use. We'll always be 13156 * using TC0, whether or not DCB is running, and TC0 will get the 13157 * big RSS set. 13158 */ 13159 queues_left = pf->hw.func_caps.num_tx_qp; 13160 13161 if ((queues_left == 1) || 13162 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 13163 /* one qp for PF, no queues for anything else */ 13164 queues_left = 0; 13165 pf->alloc_rss_size = pf->num_lan_qps = 1; 13166 13167 /* make sure all the fancies are disabled */ 13168 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 13169 I40E_FLAG_IWARP_ENABLED | 13170 I40E_FLAG_FD_SB_ENABLED | 13171 I40E_FLAG_FD_ATR_ENABLED | 13172 I40E_FLAG_DCB_CAPABLE | 13173 I40E_FLAG_DCB_ENABLED | 13174 I40E_FLAG_SRIOV_ENABLED | 13175 I40E_FLAG_VMDQ_ENABLED); 13176 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; 13177 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | 13178 I40E_FLAG_FD_SB_ENABLED | 13179 I40E_FLAG_FD_ATR_ENABLED | 13180 I40E_FLAG_DCB_CAPABLE))) { 13181 /* one qp for PF */ 13182 pf->alloc_rss_size = pf->num_lan_qps = 1; 13183 queues_left -= pf->num_lan_qps; 13184 13185 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 13186 I40E_FLAG_IWARP_ENABLED | 13187 I40E_FLAG_FD_SB_ENABLED | 13188 I40E_FLAG_FD_ATR_ENABLED | 13189 I40E_FLAG_DCB_ENABLED | 13190 I40E_FLAG_VMDQ_ENABLED); 13191 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; 13192 } else { 13193 /* Not enough queues for all TCs */ 13194 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && 13195 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 13196 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | 13197 I40E_FLAG_DCB_ENABLED); 13198 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 13199 } 13200 13201 /* limit lan qps to the smaller of qps, cpus or msix */ 13202 q_max = max_t(int, pf->rss_size_max, num_online_cpus()); 13203 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp); 13204 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors); 13205 pf->num_lan_qps = q_max; 13206 13207 queues_left -= pf->num_lan_qps; 13208 } 13209 13210 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 13211 if (queues_left > 1) { 13212 queues_left -= 1; /* save 1 queue for FD */ 13213 } else { 13214 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 13215 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; 13216 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); 13217 } 13218 } 13219 13220 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 13221 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 13222 pf->num_req_vfs = min_t(int, pf->num_req_vfs, 13223 (queues_left / pf->num_vf_qps)); 13224 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 13225 } 13226 13227 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 13228 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 13229 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 13230 (queues_left / pf->num_vmdq_qps)); 13231 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 13232 } 13233 13234 pf->queues_left = queues_left; 13235 dev_dbg(&pf->pdev->dev, 13236 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", 13237 pf->hw.func_caps.num_tx_qp, 13238 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), 13239 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, 13240 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, 13241 queues_left); 13242 } 13243 13244 /** 13245 * i40e_setup_pf_filter_control - Setup PF static filter control 13246 * @pf: PF to be setup 13247 * 13248 * i40e_setup_pf_filter_control sets up a PF's initial filter control 13249 * settings. If PE/FCoE are enabled then it will also set the per PF 13250 * based filter sizes required for them. It also enables Flow director, 13251 * ethertype and macvlan type filter settings for the pf. 13252 * 13253 * Returns 0 on success, negative on failure 13254 **/ 13255 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 13256 { 13257 struct i40e_filter_control_settings *settings = &pf->filter_settings; 13258 13259 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 13260 13261 /* Flow Director is enabled */ 13262 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) 13263 settings->enable_fdir = true; 13264 13265 /* Ethtype and MACVLAN filters enabled for PF */ 13266 settings->enable_ethtype = true; 13267 settings->enable_macvlan = true; 13268 13269 if (i40e_set_filter_control(&pf->hw, settings)) 13270 return -ENOENT; 13271 13272 return 0; 13273 } 13274 13275 #define INFO_STRING_LEN 255 13276 #define REMAIN(__x) (INFO_STRING_LEN - (__x)) 13277 static void i40e_print_features(struct i40e_pf *pf) 13278 { 13279 struct i40e_hw *hw = &pf->hw; 13280 char *buf; 13281 int i; 13282 13283 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL); 13284 if (!buf) 13285 return; 13286 13287 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); 13288 #ifdef CONFIG_PCI_IOV 13289 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); 13290 #endif 13291 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", 13292 pf->hw.func_caps.num_vsis, 13293 pf->vsi[pf->lan_vsi]->num_queue_pairs); 13294 if (pf->flags & I40E_FLAG_RSS_ENABLED) 13295 i += snprintf(&buf[i], REMAIN(i), " RSS"); 13296 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 13297 i += snprintf(&buf[i], REMAIN(i), " FD_ATR"); 13298 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 13299 i += snprintf(&buf[i], REMAIN(i), " FD_SB"); 13300 i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); 13301 } 13302 if (pf->flags & I40E_FLAG_DCB_CAPABLE) 13303 i += snprintf(&buf[i], REMAIN(i), " DCB"); 13304 i += snprintf(&buf[i], REMAIN(i), " VxLAN"); 13305 i += snprintf(&buf[i], REMAIN(i), " Geneve"); 13306 if (pf->flags & I40E_FLAG_PTP) 13307 i += snprintf(&buf[i], REMAIN(i), " PTP"); 13308 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 13309 i += snprintf(&buf[i], REMAIN(i), " VEB"); 13310 else 13311 i += snprintf(&buf[i], REMAIN(i), " VEPA"); 13312 13313 dev_info(&pf->pdev->dev, "%s\n", buf); 13314 kfree(buf); 13315 WARN_ON(i > INFO_STRING_LEN); 13316 } 13317 13318 /** 13319 * i40e_get_platform_mac_addr - get platform-specific MAC address 13320 * @pdev: PCI device information struct 13321 * @pf: board private structure 13322 * 13323 * Look up the MAC address for the device. First we'll try 13324 * eth_platform_get_mac_address, which will check Open Firmware, or arch 13325 * specific fallback. Otherwise, we'll default to the stored value in 13326 * firmware. 13327 **/ 13328 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) 13329 { 13330 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) 13331 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); 13332 } 13333 13334 /** 13335 * i40e_probe - Device initialization routine 13336 * @pdev: PCI device information struct 13337 * @ent: entry in i40e_pci_tbl 13338 * 13339 * i40e_probe initializes a PF identified by a pci_dev structure. 13340 * The OS initialization, configuring of the PF private structure, 13341 * and a hardware reset occur. 13342 * 13343 * Returns 0 on success, negative on failure 13344 **/ 13345 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 13346 { 13347 struct i40e_aq_get_phy_abilities_resp abilities; 13348 struct i40e_pf *pf; 13349 struct i40e_hw *hw; 13350 static u16 pfs_found; 13351 u16 wol_nvm_bits; 13352 u16 link_status; 13353 int err; 13354 u32 val; 13355 u32 i; 13356 u8 set_fc_aq_fail; 13357 13358 err = pci_enable_device_mem(pdev); 13359 if (err) 13360 return err; 13361 13362 /* set up for high or low dma */ 13363 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 13364 if (err) { 13365 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 13366 if (err) { 13367 dev_err(&pdev->dev, 13368 "DMA configuration failed: 0x%x\n", err); 13369 goto err_dma; 13370 } 13371 } 13372 13373 /* set up pci connections */ 13374 err = pci_request_mem_regions(pdev, i40e_driver_name); 13375 if (err) { 13376 dev_info(&pdev->dev, 13377 "pci_request_selected_regions failed %d\n", err); 13378 goto err_pci_reg; 13379 } 13380 13381 pci_enable_pcie_error_reporting(pdev); 13382 pci_set_master(pdev); 13383 13384 /* Now that we have a PCI connection, we need to do the 13385 * low level device setup. This is primarily setting up 13386 * the Admin Queue structures and then querying for the 13387 * device's current profile information. 13388 */ 13389 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 13390 if (!pf) { 13391 err = -ENOMEM; 13392 goto err_pf_alloc; 13393 } 13394 pf->next_vsi = 0; 13395 pf->pdev = pdev; 13396 set_bit(__I40E_DOWN, pf->state); 13397 13398 hw = &pf->hw; 13399 hw->back = pf; 13400 13401 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), 13402 I40E_MAX_CSR_SPACE); 13403 13404 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); 13405 if (!hw->hw_addr) { 13406 err = -EIO; 13407 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 13408 (unsigned int)pci_resource_start(pdev, 0), 13409 pf->ioremap_len, err); 13410 goto err_ioremap; 13411 } 13412 hw->vendor_id = pdev->vendor; 13413 hw->device_id = pdev->device; 13414 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 13415 hw->subsystem_vendor_id = pdev->subsystem_vendor; 13416 hw->subsystem_device_id = pdev->subsystem_device; 13417 hw->bus.device = PCI_SLOT(pdev->devfn); 13418 hw->bus.func = PCI_FUNC(pdev->devfn); 13419 hw->bus.bus_id = pdev->bus->number; 13420 pf->instance = pfs_found; 13421 13422 /* Select something other than the 802.1ad ethertype for the 13423 * switch to use internally and drop on ingress. 13424 */ 13425 hw->switch_tag = 0xffff; 13426 hw->first_tag = ETH_P_8021AD; 13427 hw->second_tag = ETH_P_8021Q; 13428 13429 INIT_LIST_HEAD(&pf->l3_flex_pit_list); 13430 INIT_LIST_HEAD(&pf->l4_flex_pit_list); 13431 13432 /* set up the locks for the AQ, do this only once in probe 13433 * and destroy them only once in remove 13434 */ 13435 mutex_init(&hw->aq.asq_mutex); 13436 mutex_init(&hw->aq.arq_mutex); 13437 13438 pf->msg_enable = netif_msg_init(debug, 13439 NETIF_MSG_DRV | 13440 NETIF_MSG_PROBE | 13441 NETIF_MSG_LINK); 13442 if (debug < -1) 13443 pf->hw.debug_mask = debug; 13444 13445 /* do a special CORER for clearing PXE mode once at init */ 13446 if (hw->revision_id == 0 && 13447 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { 13448 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 13449 i40e_flush(hw); 13450 msleep(200); 13451 pf->corer_count++; 13452 13453 i40e_clear_pxe_mode(hw); 13454 } 13455 13456 /* Reset here to make sure all is clean and to define PF 'n' */ 13457 i40e_clear_hw(hw); 13458 err = i40e_pf_reset(hw); 13459 if (err) { 13460 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 13461 goto err_pf_reset; 13462 } 13463 pf->pfr_count++; 13464 13465 hw->aq.num_arq_entries = I40E_AQ_LEN; 13466 hw->aq.num_asq_entries = I40E_AQ_LEN; 13467 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 13468 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 13469 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 13470 13471 snprintf(pf->int_name, sizeof(pf->int_name) - 1, 13472 "%s-%s:misc", 13473 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); 13474 13475 err = i40e_init_shared_code(hw); 13476 if (err) { 13477 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", 13478 err); 13479 goto err_pf_reset; 13480 } 13481 13482 /* set up a default setting for link flow control */ 13483 pf->hw.fc.requested_mode = I40E_FC_NONE; 13484 13485 err = i40e_init_adminq(hw); 13486 if (err) { 13487 if (err == I40E_ERR_FIRMWARE_API_VERSION) 13488 dev_info(&pdev->dev, 13489 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 13490 else 13491 dev_info(&pdev->dev, 13492 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); 13493 13494 goto err_pf_reset; 13495 } 13496 i40e_get_oem_version(hw); 13497 13498 /* provide nvm, fw, api versions */ 13499 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", 13500 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 13501 hw->aq.api_maj_ver, hw->aq.api_min_ver, 13502 i40e_nvm_version_str(hw)); 13503 13504 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 13505 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) 13506 dev_info(&pdev->dev, 13507 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 13508 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) 13509 dev_info(&pdev->dev, 13510 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 13511 13512 i40e_verify_eeprom(pf); 13513 13514 /* Rev 0 hardware was never productized */ 13515 if (hw->revision_id < 1) 13516 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 13517 13518 i40e_clear_pxe_mode(hw); 13519 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); 13520 if (err) 13521 goto err_adminq_setup; 13522 13523 err = i40e_sw_init(pf); 13524 if (err) { 13525 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 13526 goto err_sw_init; 13527 } 13528 13529 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 13530 hw->func_caps.num_rx_qp, 0, 0); 13531 if (err) { 13532 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 13533 goto err_init_lan_hmc; 13534 } 13535 13536 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 13537 if (err) { 13538 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 13539 err = -ENOENT; 13540 goto err_configure_lan_hmc; 13541 } 13542 13543 /* Disable LLDP for NICs that have firmware versions lower than v4.3. 13544 * Ignore error return codes because if it was already disabled via 13545 * hardware settings this will fail 13546 */ 13547 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { 13548 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); 13549 i40e_aq_stop_lldp(hw, true, NULL); 13550 } 13551 13552 /* allow a platform config to override the HW addr */ 13553 i40e_get_platform_mac_addr(pdev, pf); 13554 13555 if (!is_valid_ether_addr(hw->mac.addr)) { 13556 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 13557 err = -EIO; 13558 goto err_mac_addr; 13559 } 13560 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 13561 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); 13562 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 13563 if (is_valid_ether_addr(hw->mac.port_addr)) 13564 pf->hw_features |= I40E_HW_PORT_ID_VALID; 13565 13566 pci_set_drvdata(pdev, pf); 13567 pci_save_state(pdev); 13568 13569 /* Enable FW to write default DCB config on link-up */ 13570 i40e_aq_set_dcb_parameters(hw, true, NULL); 13571 13572 #ifdef CONFIG_I40E_DCB 13573 err = i40e_init_pf_dcb(pf); 13574 if (err) { 13575 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); 13576 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); 13577 /* Continue without DCB enabled */ 13578 } 13579 #endif /* CONFIG_I40E_DCB */ 13580 13581 /* set up periodic task facility */ 13582 timer_setup(&pf->service_timer, i40e_service_timer, 0); 13583 pf->service_timer_period = HZ; 13584 13585 INIT_WORK(&pf->service_task, i40e_service_task); 13586 clear_bit(__I40E_SERVICE_SCHED, pf->state); 13587 13588 /* NVM bit on means WoL disabled for the port */ 13589 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 13590 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) 13591 pf->wol_en = false; 13592 else 13593 pf->wol_en = true; 13594 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 13595 13596 /* set up the main switch operations */ 13597 i40e_determine_queue_usage(pf); 13598 err = i40e_init_interrupt_scheme(pf); 13599 if (err) 13600 goto err_switch_setup; 13601 13602 /* The number of VSIs reported by the FW is the minimum guaranteed 13603 * to us; HW supports far more and we share the remaining pool with 13604 * the other PFs. We allocate space for more than the guarantee with 13605 * the understanding that we might not get them all later. 13606 */ 13607 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) 13608 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; 13609 else 13610 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; 13611 13612 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ 13613 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), 13614 GFP_KERNEL); 13615 if (!pf->vsi) { 13616 err = -ENOMEM; 13617 goto err_switch_setup; 13618 } 13619 13620 #ifdef CONFIG_PCI_IOV 13621 /* prep for VF support */ 13622 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 13623 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 13624 !test_bit(__I40E_BAD_EEPROM, pf->state)) { 13625 if (pci_num_vf(pdev)) 13626 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 13627 } 13628 #endif 13629 err = i40e_setup_pf_switch(pf, false); 13630 if (err) { 13631 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 13632 goto err_vsis; 13633 } 13634 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); 13635 13636 /* Make sure flow control is set according to current settings */ 13637 err = i40e_set_fc(hw, &set_fc_aq_fail, true); 13638 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) 13639 dev_dbg(&pf->pdev->dev, 13640 "Set fc with err %s aq_err %s on get_phy_cap\n", 13641 i40e_stat_str(hw, err), 13642 i40e_aq_str(hw, hw->aq.asq_last_status)); 13643 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) 13644 dev_dbg(&pf->pdev->dev, 13645 "Set fc with err %s aq_err %s on set_phy_config\n", 13646 i40e_stat_str(hw, err), 13647 i40e_aq_str(hw, hw->aq.asq_last_status)); 13648 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) 13649 dev_dbg(&pf->pdev->dev, 13650 "Set fc with err %s aq_err %s on get_link_info\n", 13651 i40e_stat_str(hw, err), 13652 i40e_aq_str(hw, hw->aq.asq_last_status)); 13653 13654 /* if FDIR VSI was set up, start it now */ 13655 for (i = 0; i < pf->num_alloc_vsi; i++) { 13656 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 13657 i40e_vsi_open(pf->vsi[i]); 13658 break; 13659 } 13660 } 13661 13662 /* The driver only wants link up/down and module qualification 13663 * reports from firmware. Note the negative logic. 13664 */ 13665 err = i40e_aq_set_phy_int_mask(&pf->hw, 13666 ~(I40E_AQ_EVENT_LINK_UPDOWN | 13667 I40E_AQ_EVENT_MEDIA_NA | 13668 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); 13669 if (err) 13670 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 13671 i40e_stat_str(&pf->hw, err), 13672 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 13673 13674 /* Reconfigure hardware for allowing smaller MSS in the case 13675 * of TSO, so that we avoid the MDD being fired and causing 13676 * a reset in the case of small MSS+TSO. 13677 */ 13678 val = rd32(hw, I40E_REG_MSS); 13679 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { 13680 val &= ~I40E_REG_MSS_MIN_MASK; 13681 val |= I40E_64BYTE_MSS; 13682 wr32(hw, I40E_REG_MSS, val); 13683 } 13684 13685 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { 13686 msleep(75); 13687 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 13688 if (err) 13689 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 13690 i40e_stat_str(&pf->hw, err), 13691 i40e_aq_str(&pf->hw, 13692 pf->hw.aq.asq_last_status)); 13693 } 13694 /* The main driver is (mostly) up and happy. We need to set this state 13695 * before setting up the misc vector or we get a race and the vector 13696 * ends up disabled forever. 13697 */ 13698 clear_bit(__I40E_DOWN, pf->state); 13699 13700 /* In case of MSIX we are going to setup the misc vector right here 13701 * to handle admin queue events etc. In case of legacy and MSI 13702 * the misc functionality and queue processing is combined in 13703 * the same vector and that gets setup at open. 13704 */ 13705 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 13706 err = i40e_setup_misc_vector(pf); 13707 if (err) { 13708 dev_info(&pdev->dev, 13709 "setup of misc vector failed: %d\n", err); 13710 goto err_vsis; 13711 } 13712 } 13713 13714 #ifdef CONFIG_PCI_IOV 13715 /* prep for VF support */ 13716 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 13717 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 13718 !test_bit(__I40E_BAD_EEPROM, pf->state)) { 13719 /* disable link interrupts for VFs */ 13720 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 13721 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 13722 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 13723 i40e_flush(hw); 13724 13725 if (pci_num_vf(pdev)) { 13726 dev_info(&pdev->dev, 13727 "Active VFs found, allocating resources.\n"); 13728 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); 13729 if (err) 13730 dev_info(&pdev->dev, 13731 "Error %d allocating resources for existing VFs\n", 13732 err); 13733 } 13734 } 13735 #endif /* CONFIG_PCI_IOV */ 13736 13737 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 13738 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, 13739 pf->num_iwarp_msix, 13740 I40E_IWARP_IRQ_PILE_ID); 13741 if (pf->iwarp_base_vector < 0) { 13742 dev_info(&pdev->dev, 13743 "failed to get tracking for %d vectors for IWARP err=%d\n", 13744 pf->num_iwarp_msix, pf->iwarp_base_vector); 13745 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; 13746 } 13747 } 13748 13749 i40e_dbg_pf_init(pf); 13750 13751 /* tell the firmware that we're starting */ 13752 i40e_send_version(pf); 13753 13754 /* since everything's happy, start the service_task timer */ 13755 mod_timer(&pf->service_timer, 13756 round_jiffies(jiffies + pf->service_timer_period)); 13757 13758 /* add this PF to client device list and launch a client service task */ 13759 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 13760 err = i40e_lan_add_device(pf); 13761 if (err) 13762 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", 13763 err); 13764 } 13765 13766 #define PCI_SPEED_SIZE 8 13767 #define PCI_WIDTH_SIZE 8 13768 /* Devices on the IOSF bus do not have this information 13769 * and will report PCI Gen 1 x 1 by default so don't bother 13770 * checking them. 13771 */ 13772 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) { 13773 char speed[PCI_SPEED_SIZE] = "Unknown"; 13774 char width[PCI_WIDTH_SIZE] = "Unknown"; 13775 13776 /* Get the negotiated link width and speed from PCI config 13777 * space 13778 */ 13779 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, 13780 &link_status); 13781 13782 i40e_set_pci_config_data(hw, link_status); 13783 13784 switch (hw->bus.speed) { 13785 case i40e_bus_speed_8000: 13786 strncpy(speed, "8.0", PCI_SPEED_SIZE); break; 13787 case i40e_bus_speed_5000: 13788 strncpy(speed, "5.0", PCI_SPEED_SIZE); break; 13789 case i40e_bus_speed_2500: 13790 strncpy(speed, "2.5", PCI_SPEED_SIZE); break; 13791 default: 13792 break; 13793 } 13794 switch (hw->bus.width) { 13795 case i40e_bus_width_pcie_x8: 13796 strncpy(width, "8", PCI_WIDTH_SIZE); break; 13797 case i40e_bus_width_pcie_x4: 13798 strncpy(width, "4", PCI_WIDTH_SIZE); break; 13799 case i40e_bus_width_pcie_x2: 13800 strncpy(width, "2", PCI_WIDTH_SIZE); break; 13801 case i40e_bus_width_pcie_x1: 13802 strncpy(width, "1", PCI_WIDTH_SIZE); break; 13803 default: 13804 break; 13805 } 13806 13807 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", 13808 speed, width); 13809 13810 if (hw->bus.width < i40e_bus_width_pcie_x8 || 13811 hw->bus.speed < i40e_bus_speed_8000) { 13812 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 13813 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 13814 } 13815 } 13816 13817 /* get the requested speeds from the fw */ 13818 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); 13819 if (err) 13820 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", 13821 i40e_stat_str(&pf->hw, err), 13822 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 13823 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; 13824 13825 /* get the supported phy types from the fw */ 13826 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); 13827 if (err) 13828 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", 13829 i40e_stat_str(&pf->hw, err), 13830 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 13831 13832 /* Add a filter to drop all Flow control frames from any VSI from being 13833 * transmitted. By doing so we stop a malicious VF from sending out 13834 * PAUSE or PFC frames and potentially controlling traffic for other 13835 * PF/VF VSIs. 13836 * The FW can still send Flow control frames if enabled. 13837 */ 13838 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, 13839 pf->main_vsi_seid); 13840 13841 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || 13842 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) 13843 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; 13844 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) 13845 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; 13846 /* print a string summarizing features */ 13847 i40e_print_features(pf); 13848 13849 return 0; 13850 13851 /* Unwind what we've done if something failed in the setup */ 13852 err_vsis: 13853 set_bit(__I40E_DOWN, pf->state); 13854 i40e_clear_interrupt_scheme(pf); 13855 kfree(pf->vsi); 13856 err_switch_setup: 13857 i40e_reset_interrupt_capability(pf); 13858 del_timer_sync(&pf->service_timer); 13859 err_mac_addr: 13860 err_configure_lan_hmc: 13861 (void)i40e_shutdown_lan_hmc(hw); 13862 err_init_lan_hmc: 13863 kfree(pf->qp_pile); 13864 err_sw_init: 13865 err_adminq_setup: 13866 err_pf_reset: 13867 iounmap(hw->hw_addr); 13868 err_ioremap: 13869 kfree(pf); 13870 err_pf_alloc: 13871 pci_disable_pcie_error_reporting(pdev); 13872 pci_release_mem_regions(pdev); 13873 err_pci_reg: 13874 err_dma: 13875 pci_disable_device(pdev); 13876 return err; 13877 } 13878 13879 /** 13880 * i40e_remove - Device removal routine 13881 * @pdev: PCI device information struct 13882 * 13883 * i40e_remove is called by the PCI subsystem to alert the driver 13884 * that is should release a PCI device. This could be caused by a 13885 * Hot-Plug event, or because the driver is going to be removed from 13886 * memory. 13887 **/ 13888 static void i40e_remove(struct pci_dev *pdev) 13889 { 13890 struct i40e_pf *pf = pci_get_drvdata(pdev); 13891 struct i40e_hw *hw = &pf->hw; 13892 i40e_status ret_code; 13893 int i; 13894 13895 i40e_dbg_pf_exit(pf); 13896 13897 i40e_ptp_stop(pf); 13898 13899 /* Disable RSS in hw */ 13900 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); 13901 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); 13902 13903 /* no more scheduling of any task */ 13904 set_bit(__I40E_SUSPENDED, pf->state); 13905 set_bit(__I40E_DOWN, pf->state); 13906 if (pf->service_timer.function) 13907 del_timer_sync(&pf->service_timer); 13908 if (pf->service_task.func) 13909 cancel_work_sync(&pf->service_task); 13910 13911 /* Client close must be called explicitly here because the timer 13912 * has been stopped. 13913 */ 13914 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); 13915 13916 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 13917 i40e_free_vfs(pf); 13918 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 13919 } 13920 13921 i40e_fdir_teardown(pf); 13922 13923 /* If there is a switch structure or any orphans, remove them. 13924 * This will leave only the PF's VSI remaining. 13925 */ 13926 for (i = 0; i < I40E_MAX_VEB; i++) { 13927 if (!pf->veb[i]) 13928 continue; 13929 13930 if (pf->veb[i]->uplink_seid == pf->mac_seid || 13931 pf->veb[i]->uplink_seid == 0) 13932 i40e_switch_branch_release(pf->veb[i]); 13933 } 13934 13935 /* Now we can shutdown the PF's VSI, just before we kill 13936 * adminq and hmc. 13937 */ 13938 if (pf->vsi[pf->lan_vsi]) 13939 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 13940 13941 i40e_cloud_filter_exit(pf); 13942 13943 /* remove attached clients */ 13944 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 13945 ret_code = i40e_lan_del_device(pf); 13946 if (ret_code) 13947 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 13948 ret_code); 13949 } 13950 13951 /* shutdown and destroy the HMC */ 13952 if (hw->hmc.hmc_obj) { 13953 ret_code = i40e_shutdown_lan_hmc(hw); 13954 if (ret_code) 13955 dev_warn(&pdev->dev, 13956 "Failed to destroy the HMC resources: %d\n", 13957 ret_code); 13958 } 13959 13960 /* shutdown the adminq */ 13961 i40e_shutdown_adminq(hw); 13962 13963 /* destroy the locks only once, here */ 13964 mutex_destroy(&hw->aq.arq_mutex); 13965 mutex_destroy(&hw->aq.asq_mutex); 13966 13967 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 13968 i40e_clear_interrupt_scheme(pf); 13969 for (i = 0; i < pf->num_alloc_vsi; i++) { 13970 if (pf->vsi[i]) { 13971 i40e_vsi_clear_rings(pf->vsi[i]); 13972 i40e_vsi_clear(pf->vsi[i]); 13973 pf->vsi[i] = NULL; 13974 } 13975 } 13976 13977 for (i = 0; i < I40E_MAX_VEB; i++) { 13978 kfree(pf->veb[i]); 13979 pf->veb[i] = NULL; 13980 } 13981 13982 kfree(pf->qp_pile); 13983 kfree(pf->vsi); 13984 13985 iounmap(hw->hw_addr); 13986 kfree(pf); 13987 pci_release_mem_regions(pdev); 13988 13989 pci_disable_pcie_error_reporting(pdev); 13990 pci_disable_device(pdev); 13991 } 13992 13993 /** 13994 * i40e_pci_error_detected - warning that something funky happened in PCI land 13995 * @pdev: PCI device information struct 13996 * 13997 * Called to warn that something happened and the error handling steps 13998 * are in progress. Allows the driver to quiesce things, be ready for 13999 * remediation. 14000 **/ 14001 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 14002 enum pci_channel_state error) 14003 { 14004 struct i40e_pf *pf = pci_get_drvdata(pdev); 14005 14006 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 14007 14008 if (!pf) { 14009 dev_info(&pdev->dev, 14010 "Cannot recover - error happened during device probe\n"); 14011 return PCI_ERS_RESULT_DISCONNECT; 14012 } 14013 14014 /* shutdown all operations */ 14015 if (!test_bit(__I40E_SUSPENDED, pf->state)) 14016 i40e_prep_for_reset(pf, false); 14017 14018 /* Request a slot reset */ 14019 return PCI_ERS_RESULT_NEED_RESET; 14020 } 14021 14022 /** 14023 * i40e_pci_error_slot_reset - a PCI slot reset just happened 14024 * @pdev: PCI device information struct 14025 * 14026 * Called to find if the driver can work with the device now that 14027 * the pci slot has been reset. If a basic connection seems good 14028 * (registers are readable and have sane content) then return a 14029 * happy little PCI_ERS_RESULT_xxx. 14030 **/ 14031 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 14032 { 14033 struct i40e_pf *pf = pci_get_drvdata(pdev); 14034 pci_ers_result_t result; 14035 int err; 14036 u32 reg; 14037 14038 dev_dbg(&pdev->dev, "%s\n", __func__); 14039 if (pci_enable_device_mem(pdev)) { 14040 dev_info(&pdev->dev, 14041 "Cannot re-enable PCI device after reset.\n"); 14042 result = PCI_ERS_RESULT_DISCONNECT; 14043 } else { 14044 pci_set_master(pdev); 14045 pci_restore_state(pdev); 14046 pci_save_state(pdev); 14047 pci_wake_from_d3(pdev, false); 14048 14049 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 14050 if (reg == 0) 14051 result = PCI_ERS_RESULT_RECOVERED; 14052 else 14053 result = PCI_ERS_RESULT_DISCONNECT; 14054 } 14055 14056 err = pci_cleanup_aer_uncorrect_error_status(pdev); 14057 if (err) { 14058 dev_info(&pdev->dev, 14059 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 14060 err); 14061 /* non-fatal, continue */ 14062 } 14063 14064 return result; 14065 } 14066 14067 /** 14068 * i40e_pci_error_reset_prepare - prepare device driver for pci reset 14069 * @pdev: PCI device information struct 14070 */ 14071 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev) 14072 { 14073 struct i40e_pf *pf = pci_get_drvdata(pdev); 14074 14075 i40e_prep_for_reset(pf, false); 14076 } 14077 14078 /** 14079 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin 14080 * @pdev: PCI device information struct 14081 */ 14082 static void i40e_pci_error_reset_done(struct pci_dev *pdev) 14083 { 14084 struct i40e_pf *pf = pci_get_drvdata(pdev); 14085 14086 i40e_reset_and_rebuild(pf, false, false); 14087 } 14088 14089 /** 14090 * i40e_pci_error_resume - restart operations after PCI error recovery 14091 * @pdev: PCI device information struct 14092 * 14093 * Called to allow the driver to bring things back up after PCI error 14094 * and/or reset recovery has finished. 14095 **/ 14096 static void i40e_pci_error_resume(struct pci_dev *pdev) 14097 { 14098 struct i40e_pf *pf = pci_get_drvdata(pdev); 14099 14100 dev_dbg(&pdev->dev, "%s\n", __func__); 14101 if (test_bit(__I40E_SUSPENDED, pf->state)) 14102 return; 14103 14104 i40e_handle_reset_warning(pf, false); 14105 } 14106 14107 /** 14108 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up 14109 * using the mac_address_write admin q function 14110 * @pf: pointer to i40e_pf struct 14111 **/ 14112 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) 14113 { 14114 struct i40e_hw *hw = &pf->hw; 14115 i40e_status ret; 14116 u8 mac_addr[6]; 14117 u16 flags = 0; 14118 14119 /* Get current MAC address in case it's an LAA */ 14120 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { 14121 ether_addr_copy(mac_addr, 14122 pf->vsi[pf->lan_vsi]->netdev->dev_addr); 14123 } else { 14124 dev_err(&pf->pdev->dev, 14125 "Failed to retrieve MAC address; using default\n"); 14126 ether_addr_copy(mac_addr, hw->mac.addr); 14127 } 14128 14129 /* The FW expects the mac address write cmd to first be called with 14130 * one of these flags before calling it again with the multicast 14131 * enable flags. 14132 */ 14133 flags = I40E_AQC_WRITE_TYPE_LAA_WOL; 14134 14135 if (hw->func_caps.flex10_enable && hw->partition_id != 1) 14136 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY; 14137 14138 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); 14139 if (ret) { 14140 dev_err(&pf->pdev->dev, 14141 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up"); 14142 return; 14143 } 14144 14145 flags = I40E_AQC_MC_MAG_EN 14146 | I40E_AQC_WOL_PRESERVE_ON_PFR 14147 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG; 14148 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); 14149 if (ret) 14150 dev_err(&pf->pdev->dev, 14151 "Failed to enable Multicast Magic Packet wake up\n"); 14152 } 14153 14154 /** 14155 * i40e_shutdown - PCI callback for shutting down 14156 * @pdev: PCI device information struct 14157 **/ 14158 static void i40e_shutdown(struct pci_dev *pdev) 14159 { 14160 struct i40e_pf *pf = pci_get_drvdata(pdev); 14161 struct i40e_hw *hw = &pf->hw; 14162 14163 set_bit(__I40E_SUSPENDED, pf->state); 14164 set_bit(__I40E_DOWN, pf->state); 14165 rtnl_lock(); 14166 i40e_prep_for_reset(pf, true); 14167 rtnl_unlock(); 14168 14169 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 14170 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 14171 14172 del_timer_sync(&pf->service_timer); 14173 cancel_work_sync(&pf->service_task); 14174 i40e_cloud_filter_exit(pf); 14175 i40e_fdir_teardown(pf); 14176 14177 /* Client close must be called explicitly here because the timer 14178 * has been stopped. 14179 */ 14180 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); 14181 14182 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) 14183 i40e_enable_mc_magic_wake(pf); 14184 14185 i40e_prep_for_reset(pf, false); 14186 14187 wr32(hw, I40E_PFPM_APM, 14188 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 14189 wr32(hw, I40E_PFPM_WUFC, 14190 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 14191 14192 i40e_clear_interrupt_scheme(pf); 14193 14194 if (system_state == SYSTEM_POWER_OFF) { 14195 pci_wake_from_d3(pdev, pf->wol_en); 14196 pci_set_power_state(pdev, PCI_D3hot); 14197 } 14198 } 14199 14200 /** 14201 * i40e_suspend - PM callback for moving to D3 14202 * @dev: generic device information structure 14203 **/ 14204 static int __maybe_unused i40e_suspend(struct device *dev) 14205 { 14206 struct pci_dev *pdev = to_pci_dev(dev); 14207 struct i40e_pf *pf = pci_get_drvdata(pdev); 14208 struct i40e_hw *hw = &pf->hw; 14209 14210 /* If we're already suspended, then there is nothing to do */ 14211 if (test_and_set_bit(__I40E_SUSPENDED, pf->state)) 14212 return 0; 14213 14214 set_bit(__I40E_DOWN, pf->state); 14215 14216 /* Ensure service task will not be running */ 14217 del_timer_sync(&pf->service_timer); 14218 cancel_work_sync(&pf->service_task); 14219 14220 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) 14221 i40e_enable_mc_magic_wake(pf); 14222 14223 i40e_prep_for_reset(pf, false); 14224 14225 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 14226 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 14227 14228 /* Clear the interrupt scheme and release our IRQs so that the system 14229 * can safely hibernate even when there are a large number of CPUs. 14230 * Otherwise hibernation might fail when mapping all the vectors back 14231 * to CPU0. 14232 */ 14233 i40e_clear_interrupt_scheme(pf); 14234 14235 return 0; 14236 } 14237 14238 /** 14239 * i40e_resume - PM callback for waking up from D3 14240 * @dev: generic device information structure 14241 **/ 14242 static int __maybe_unused i40e_resume(struct device *dev) 14243 { 14244 struct pci_dev *pdev = to_pci_dev(dev); 14245 struct i40e_pf *pf = pci_get_drvdata(pdev); 14246 int err; 14247 14248 /* If we're not suspended, then there is nothing to do */ 14249 if (!test_bit(__I40E_SUSPENDED, pf->state)) 14250 return 0; 14251 14252 /* We cleared the interrupt scheme when we suspended, so we need to 14253 * restore it now to resume device functionality. 14254 */ 14255 err = i40e_restore_interrupt_scheme(pf); 14256 if (err) { 14257 dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n", 14258 err); 14259 } 14260 14261 clear_bit(__I40E_DOWN, pf->state); 14262 i40e_reset_and_rebuild(pf, false, false); 14263 14264 /* Clear suspended state last after everything is recovered */ 14265 clear_bit(__I40E_SUSPENDED, pf->state); 14266 14267 /* Restart the service task */ 14268 mod_timer(&pf->service_timer, 14269 round_jiffies(jiffies + pf->service_timer_period)); 14270 14271 return 0; 14272 } 14273 14274 static const struct pci_error_handlers i40e_err_handler = { 14275 .error_detected = i40e_pci_error_detected, 14276 .slot_reset = i40e_pci_error_slot_reset, 14277 .reset_prepare = i40e_pci_error_reset_prepare, 14278 .reset_done = i40e_pci_error_reset_done, 14279 .resume = i40e_pci_error_resume, 14280 }; 14281 14282 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume); 14283 14284 static struct pci_driver i40e_driver = { 14285 .name = i40e_driver_name, 14286 .id_table = i40e_pci_tbl, 14287 .probe = i40e_probe, 14288 .remove = i40e_remove, 14289 .driver = { 14290 .pm = &i40e_pm_ops, 14291 }, 14292 .shutdown = i40e_shutdown, 14293 .err_handler = &i40e_err_handler, 14294 .sriov_configure = i40e_pci_sriov_configure, 14295 }; 14296 14297 /** 14298 * i40e_init_module - Driver registration routine 14299 * 14300 * i40e_init_module is the first routine called when the driver is 14301 * loaded. All it does is register with the PCI subsystem. 14302 **/ 14303 static int __init i40e_init_module(void) 14304 { 14305 pr_info("%s: %s - version %s\n", i40e_driver_name, 14306 i40e_driver_string, i40e_driver_version_str); 14307 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 14308 14309 /* There is no need to throttle the number of active tasks because 14310 * each device limits its own task using a state bit for scheduling 14311 * the service task, and the device tasks do not interfere with each 14312 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM 14313 * since we need to be able to guarantee forward progress even under 14314 * memory pressure. 14315 */ 14316 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name); 14317 if (!i40e_wq) { 14318 pr_err("%s: Failed to create workqueue\n", i40e_driver_name); 14319 return -ENOMEM; 14320 } 14321 14322 i40e_dbg_init(); 14323 return pci_register_driver(&i40e_driver); 14324 } 14325 module_init(i40e_init_module); 14326 14327 /** 14328 * i40e_exit_module - Driver exit cleanup routine 14329 * 14330 * i40e_exit_module is called just before the driver is removed 14331 * from memory. 14332 **/ 14333 static void __exit i40e_exit_module(void) 14334 { 14335 pci_unregister_driver(&i40e_driver); 14336 destroy_workqueue(i40e_wq); 14337 i40e_dbg_exit(); 14338 } 14339 module_exit(i40e_exit_module); 14340