1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2016 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include <linux/etherdevice.h> 28 #include <linux/of_net.h> 29 #include <linux/pci.h> 30 31 /* Local includes */ 32 #include "i40e.h" 33 #include "i40e_diag.h" 34 #if IS_ENABLED(CONFIG_VXLAN) 35 #include <net/vxlan.h> 36 #endif 37 #if IS_ENABLED(CONFIG_GENEVE) 38 #include <net/geneve.h> 39 #endif 40 41 const char i40e_driver_name[] = "i40e"; 42 static const char i40e_driver_string[] = 43 "Intel(R) Ethernet Connection XL710 Network Driver"; 44 45 #define DRV_KERN "-k" 46 47 #define DRV_VERSION_MAJOR 1 48 #define DRV_VERSION_MINOR 5 49 #define DRV_VERSION_BUILD 16 50 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 51 __stringify(DRV_VERSION_MINOR) "." \ 52 __stringify(DRV_VERSION_BUILD) DRV_KERN 53 const char i40e_driver_version_str[] = DRV_VERSION; 54 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; 55 56 /* a bit of forward declarations */ 57 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 58 static void i40e_handle_reset_warning(struct i40e_pf *pf); 59 static int i40e_add_vsi(struct i40e_vsi *vsi); 60 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 61 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 62 static int i40e_setup_misc_vector(struct i40e_pf *pf); 63 static void i40e_determine_queue_usage(struct i40e_pf *pf); 64 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 65 static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, 66 u16 rss_table_size, u16 rss_size); 67 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 68 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 69 70 /* i40e_pci_tbl - PCI Device ID Table 71 * 72 * Last entry must be all 0s 73 * 74 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 75 * Class, Class Mask, private data (not used) } 76 */ 77 static const struct pci_device_id i40e_pci_tbl[] = { 78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, 86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, 87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, 89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, 90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, 91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, 92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, 93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, 94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0}, 95 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 96 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, 97 /* required last entry */ 98 {0, } 99 }; 100 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 101 102 #define I40E_MAX_VF_COUNT 128 103 static int debug = -1; 104 module_param(debug, int, 0); 105 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 106 107 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 108 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 109 MODULE_LICENSE("GPL"); 110 MODULE_VERSION(DRV_VERSION); 111 112 static struct workqueue_struct *i40e_wq; 113 114 /** 115 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 116 * @hw: pointer to the HW structure 117 * @mem: ptr to mem struct to fill out 118 * @size: size of memory requested 119 * @alignment: what to align the allocation to 120 **/ 121 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 122 u64 size, u32 alignment) 123 { 124 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 125 126 mem->size = ALIGN(size, alignment); 127 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 128 &mem->pa, GFP_KERNEL); 129 if (!mem->va) 130 return -ENOMEM; 131 132 return 0; 133 } 134 135 /** 136 * i40e_free_dma_mem_d - OS specific memory free for shared code 137 * @hw: pointer to the HW structure 138 * @mem: ptr to mem struct to free 139 **/ 140 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 141 { 142 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 143 144 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 145 mem->va = NULL; 146 mem->pa = 0; 147 mem->size = 0; 148 149 return 0; 150 } 151 152 /** 153 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 154 * @hw: pointer to the HW structure 155 * @mem: ptr to mem struct to fill out 156 * @size: size of memory requested 157 **/ 158 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 159 u32 size) 160 { 161 mem->size = size; 162 mem->va = kzalloc(size, GFP_KERNEL); 163 164 if (!mem->va) 165 return -ENOMEM; 166 167 return 0; 168 } 169 170 /** 171 * i40e_free_virt_mem_d - OS specific memory free for shared code 172 * @hw: pointer to the HW structure 173 * @mem: ptr to mem struct to free 174 **/ 175 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 176 { 177 /* it's ok to kfree a NULL pointer */ 178 kfree(mem->va); 179 mem->va = NULL; 180 mem->size = 0; 181 182 return 0; 183 } 184 185 /** 186 * i40e_get_lump - find a lump of free generic resource 187 * @pf: board private structure 188 * @pile: the pile of resource to search 189 * @needed: the number of items needed 190 * @id: an owner id to stick on the items assigned 191 * 192 * Returns the base item index of the lump, or negative for error 193 * 194 * The search_hint trick and lack of advanced fit-finding only work 195 * because we're highly likely to have all the same size lump requests. 196 * Linear search time and any fragmentation should be minimal. 197 **/ 198 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 199 u16 needed, u16 id) 200 { 201 int ret = -ENOMEM; 202 int i, j; 203 204 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 205 dev_info(&pf->pdev->dev, 206 "param err: pile=%p needed=%d id=0x%04x\n", 207 pile, needed, id); 208 return -EINVAL; 209 } 210 211 /* start the linear search with an imperfect hint */ 212 i = pile->search_hint; 213 while (i < pile->num_entries) { 214 /* skip already allocated entries */ 215 if (pile->list[i] & I40E_PILE_VALID_BIT) { 216 i++; 217 continue; 218 } 219 220 /* do we have enough in this lump? */ 221 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 222 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 223 break; 224 } 225 226 if (j == needed) { 227 /* there was enough, so assign it to the requestor */ 228 for (j = 0; j < needed; j++) 229 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 230 ret = i; 231 pile->search_hint = i + j; 232 break; 233 } 234 235 /* not enough, so skip over it and continue looking */ 236 i += j; 237 } 238 239 return ret; 240 } 241 242 /** 243 * i40e_put_lump - return a lump of generic resource 244 * @pile: the pile of resource to search 245 * @index: the base item index 246 * @id: the owner id of the items assigned 247 * 248 * Returns the count of items in the lump 249 **/ 250 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 251 { 252 int valid_id = (id | I40E_PILE_VALID_BIT); 253 int count = 0; 254 int i; 255 256 if (!pile || index >= pile->num_entries) 257 return -EINVAL; 258 259 for (i = index; 260 i < pile->num_entries && pile->list[i] == valid_id; 261 i++) { 262 pile->list[i] = 0; 263 count++; 264 } 265 266 if (count && index < pile->search_hint) 267 pile->search_hint = index; 268 269 return count; 270 } 271 272 /** 273 * i40e_find_vsi_from_id - searches for the vsi with the given id 274 * @pf - the pf structure to search for the vsi 275 * @id - id of the vsi it is searching for 276 **/ 277 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) 278 { 279 int i; 280 281 for (i = 0; i < pf->num_alloc_vsi; i++) 282 if (pf->vsi[i] && (pf->vsi[i]->id == id)) 283 return pf->vsi[i]; 284 285 return NULL; 286 } 287 288 /** 289 * i40e_service_event_schedule - Schedule the service task to wake up 290 * @pf: board private structure 291 * 292 * If not already scheduled, this puts the task into the work queue 293 **/ 294 void i40e_service_event_schedule(struct i40e_pf *pf) 295 { 296 if (!test_bit(__I40E_DOWN, &pf->state) && 297 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && 298 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) 299 queue_work(i40e_wq, &pf->service_task); 300 } 301 302 /** 303 * i40e_tx_timeout - Respond to a Tx Hang 304 * @netdev: network interface device structure 305 * 306 * If any port has noticed a Tx timeout, it is likely that the whole 307 * device is munged, not just the one netdev port, so go for the full 308 * reset. 309 **/ 310 #ifdef I40E_FCOE 311 void i40e_tx_timeout(struct net_device *netdev) 312 #else 313 static void i40e_tx_timeout(struct net_device *netdev) 314 #endif 315 { 316 struct i40e_netdev_priv *np = netdev_priv(netdev); 317 struct i40e_vsi *vsi = np->vsi; 318 struct i40e_pf *pf = vsi->back; 319 struct i40e_ring *tx_ring = NULL; 320 unsigned int i, hung_queue = 0; 321 u32 head, val; 322 323 pf->tx_timeout_count++; 324 325 /* find the stopped queue the same way the stack does */ 326 for (i = 0; i < netdev->num_tx_queues; i++) { 327 struct netdev_queue *q; 328 unsigned long trans_start; 329 330 q = netdev_get_tx_queue(netdev, i); 331 trans_start = q->trans_start; 332 if (netif_xmit_stopped(q) && 333 time_after(jiffies, 334 (trans_start + netdev->watchdog_timeo))) { 335 hung_queue = i; 336 break; 337 } 338 } 339 340 if (i == netdev->num_tx_queues) { 341 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); 342 } else { 343 /* now that we have an index, find the tx_ring struct */ 344 for (i = 0; i < vsi->num_queue_pairs; i++) { 345 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { 346 if (hung_queue == 347 vsi->tx_rings[i]->queue_index) { 348 tx_ring = vsi->tx_rings[i]; 349 break; 350 } 351 } 352 } 353 } 354 355 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 356 pf->tx_timeout_recovery_level = 1; /* reset after some time */ 357 else if (time_before(jiffies, 358 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) 359 return; /* don't do any new action before the next timeout */ 360 361 if (tx_ring) { 362 head = i40e_get_head(tx_ring); 363 /* Read interrupt register */ 364 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 365 val = rd32(&pf->hw, 366 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + 367 tx_ring->vsi->base_vector - 1)); 368 else 369 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); 370 371 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", 372 vsi->seid, hung_queue, tx_ring->next_to_clean, 373 head, tx_ring->next_to_use, 374 readl(tx_ring->tail), val); 375 } 376 377 pf->tx_timeout_last_recovery = jiffies; 378 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", 379 pf->tx_timeout_recovery_level, hung_queue); 380 381 switch (pf->tx_timeout_recovery_level) { 382 case 1: 383 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 384 break; 385 case 2: 386 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 387 break; 388 case 3: 389 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 390 break; 391 default: 392 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 393 break; 394 } 395 396 i40e_service_event_schedule(pf); 397 pf->tx_timeout_recovery_level++; 398 } 399 400 /** 401 * i40e_get_vsi_stats_struct - Get System Network Statistics 402 * @vsi: the VSI we care about 403 * 404 * Returns the address of the device statistics structure. 405 * The statistics are actually updated from the service task. 406 **/ 407 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 408 { 409 return &vsi->net_stats; 410 } 411 412 /** 413 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 414 * @netdev: network interface device structure 415 * 416 * Returns the address of the device statistics structure. 417 * The statistics are actually updated from the service task. 418 **/ 419 #ifdef I40E_FCOE 420 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 421 struct net_device *netdev, 422 struct rtnl_link_stats64 *stats) 423 #else 424 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 425 struct net_device *netdev, 426 struct rtnl_link_stats64 *stats) 427 #endif 428 { 429 struct i40e_netdev_priv *np = netdev_priv(netdev); 430 struct i40e_ring *tx_ring, *rx_ring; 431 struct i40e_vsi *vsi = np->vsi; 432 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 433 int i; 434 435 if (test_bit(__I40E_DOWN, &vsi->state)) 436 return stats; 437 438 if (!vsi->tx_rings) 439 return stats; 440 441 rcu_read_lock(); 442 for (i = 0; i < vsi->num_queue_pairs; i++) { 443 u64 bytes, packets; 444 unsigned int start; 445 446 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); 447 if (!tx_ring) 448 continue; 449 450 do { 451 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 452 packets = tx_ring->stats.packets; 453 bytes = tx_ring->stats.bytes; 454 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 455 456 stats->tx_packets += packets; 457 stats->tx_bytes += bytes; 458 rx_ring = &tx_ring[1]; 459 460 do { 461 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 462 packets = rx_ring->stats.packets; 463 bytes = rx_ring->stats.bytes; 464 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 465 466 stats->rx_packets += packets; 467 stats->rx_bytes += bytes; 468 } 469 rcu_read_unlock(); 470 471 /* following stats updated by i40e_watchdog_subtask() */ 472 stats->multicast = vsi_stats->multicast; 473 stats->tx_errors = vsi_stats->tx_errors; 474 stats->tx_dropped = vsi_stats->tx_dropped; 475 stats->rx_errors = vsi_stats->rx_errors; 476 stats->rx_dropped = vsi_stats->rx_dropped; 477 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 478 stats->rx_length_errors = vsi_stats->rx_length_errors; 479 480 return stats; 481 } 482 483 /** 484 * i40e_vsi_reset_stats - Resets all stats of the given vsi 485 * @vsi: the VSI to have its stats reset 486 **/ 487 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 488 { 489 struct rtnl_link_stats64 *ns; 490 int i; 491 492 if (!vsi) 493 return; 494 495 ns = i40e_get_vsi_stats_struct(vsi); 496 memset(ns, 0, sizeof(*ns)); 497 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 498 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 499 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 500 if (vsi->rx_rings && vsi->rx_rings[0]) { 501 for (i = 0; i < vsi->num_queue_pairs; i++) { 502 memset(&vsi->rx_rings[i]->stats, 0, 503 sizeof(vsi->rx_rings[i]->stats)); 504 memset(&vsi->rx_rings[i]->rx_stats, 0, 505 sizeof(vsi->rx_rings[i]->rx_stats)); 506 memset(&vsi->tx_rings[i]->stats, 0, 507 sizeof(vsi->tx_rings[i]->stats)); 508 memset(&vsi->tx_rings[i]->tx_stats, 0, 509 sizeof(vsi->tx_rings[i]->tx_stats)); 510 } 511 } 512 vsi->stat_offsets_loaded = false; 513 } 514 515 /** 516 * i40e_pf_reset_stats - Reset all of the stats for the given PF 517 * @pf: the PF to be reset 518 **/ 519 void i40e_pf_reset_stats(struct i40e_pf *pf) 520 { 521 int i; 522 523 memset(&pf->stats, 0, sizeof(pf->stats)); 524 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 525 pf->stat_offsets_loaded = false; 526 527 for (i = 0; i < I40E_MAX_VEB; i++) { 528 if (pf->veb[i]) { 529 memset(&pf->veb[i]->stats, 0, 530 sizeof(pf->veb[i]->stats)); 531 memset(&pf->veb[i]->stats_offsets, 0, 532 sizeof(pf->veb[i]->stats_offsets)); 533 pf->veb[i]->stat_offsets_loaded = false; 534 } 535 } 536 } 537 538 /** 539 * i40e_stat_update48 - read and update a 48 bit stat from the chip 540 * @hw: ptr to the hardware info 541 * @hireg: the high 32 bit reg to read 542 * @loreg: the low 32 bit reg to read 543 * @offset_loaded: has the initial offset been loaded yet 544 * @offset: ptr to current offset value 545 * @stat: ptr to the stat 546 * 547 * Since the device stats are not reset at PFReset, they likely will not 548 * be zeroed when the driver starts. We'll save the first values read 549 * and use them as offsets to be subtracted from the raw values in order 550 * to report stats that count from zero. In the process, we also manage 551 * the potential roll-over. 552 **/ 553 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 554 bool offset_loaded, u64 *offset, u64 *stat) 555 { 556 u64 new_data; 557 558 if (hw->device_id == I40E_DEV_ID_QEMU) { 559 new_data = rd32(hw, loreg); 560 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 561 } else { 562 new_data = rd64(hw, loreg); 563 } 564 if (!offset_loaded) 565 *offset = new_data; 566 if (likely(new_data >= *offset)) 567 *stat = new_data - *offset; 568 else 569 *stat = (new_data + BIT_ULL(48)) - *offset; 570 *stat &= 0xFFFFFFFFFFFFULL; 571 } 572 573 /** 574 * i40e_stat_update32 - read and update a 32 bit stat from the chip 575 * @hw: ptr to the hardware info 576 * @reg: the hw reg to read 577 * @offset_loaded: has the initial offset been loaded yet 578 * @offset: ptr to current offset value 579 * @stat: ptr to the stat 580 **/ 581 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 582 bool offset_loaded, u64 *offset, u64 *stat) 583 { 584 u32 new_data; 585 586 new_data = rd32(hw, reg); 587 if (!offset_loaded) 588 *offset = new_data; 589 if (likely(new_data >= *offset)) 590 *stat = (u32)(new_data - *offset); 591 else 592 *stat = (u32)((new_data + BIT_ULL(32)) - *offset); 593 } 594 595 /** 596 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 597 * @vsi: the VSI to be updated 598 **/ 599 void i40e_update_eth_stats(struct i40e_vsi *vsi) 600 { 601 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 602 struct i40e_pf *pf = vsi->back; 603 struct i40e_hw *hw = &pf->hw; 604 struct i40e_eth_stats *oes; 605 struct i40e_eth_stats *es; /* device's eth stats */ 606 607 es = &vsi->eth_stats; 608 oes = &vsi->eth_stats_offsets; 609 610 /* Gather up the stats that the hw collects */ 611 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 612 vsi->stat_offsets_loaded, 613 &oes->tx_errors, &es->tx_errors); 614 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 615 vsi->stat_offsets_loaded, 616 &oes->rx_discards, &es->rx_discards); 617 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), 618 vsi->stat_offsets_loaded, 619 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); 620 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 621 vsi->stat_offsets_loaded, 622 &oes->tx_errors, &es->tx_errors); 623 624 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 625 I40E_GLV_GORCL(stat_idx), 626 vsi->stat_offsets_loaded, 627 &oes->rx_bytes, &es->rx_bytes); 628 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 629 I40E_GLV_UPRCL(stat_idx), 630 vsi->stat_offsets_loaded, 631 &oes->rx_unicast, &es->rx_unicast); 632 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 633 I40E_GLV_MPRCL(stat_idx), 634 vsi->stat_offsets_loaded, 635 &oes->rx_multicast, &es->rx_multicast); 636 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 637 I40E_GLV_BPRCL(stat_idx), 638 vsi->stat_offsets_loaded, 639 &oes->rx_broadcast, &es->rx_broadcast); 640 641 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 642 I40E_GLV_GOTCL(stat_idx), 643 vsi->stat_offsets_loaded, 644 &oes->tx_bytes, &es->tx_bytes); 645 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 646 I40E_GLV_UPTCL(stat_idx), 647 vsi->stat_offsets_loaded, 648 &oes->tx_unicast, &es->tx_unicast); 649 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 650 I40E_GLV_MPTCL(stat_idx), 651 vsi->stat_offsets_loaded, 652 &oes->tx_multicast, &es->tx_multicast); 653 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 654 I40E_GLV_BPTCL(stat_idx), 655 vsi->stat_offsets_loaded, 656 &oes->tx_broadcast, &es->tx_broadcast); 657 vsi->stat_offsets_loaded = true; 658 } 659 660 /** 661 * i40e_update_veb_stats - Update Switch component statistics 662 * @veb: the VEB being updated 663 **/ 664 static void i40e_update_veb_stats(struct i40e_veb *veb) 665 { 666 struct i40e_pf *pf = veb->pf; 667 struct i40e_hw *hw = &pf->hw; 668 struct i40e_eth_stats *oes; 669 struct i40e_eth_stats *es; /* device's eth stats */ 670 struct i40e_veb_tc_stats *veb_oes; 671 struct i40e_veb_tc_stats *veb_es; 672 int i, idx = 0; 673 674 idx = veb->stats_idx; 675 es = &veb->stats; 676 oes = &veb->stats_offsets; 677 veb_es = &veb->tc_stats; 678 veb_oes = &veb->tc_stats_offsets; 679 680 /* Gather up the stats that the hw collects */ 681 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 682 veb->stat_offsets_loaded, 683 &oes->tx_discards, &es->tx_discards); 684 if (hw->revision_id > 0) 685 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 686 veb->stat_offsets_loaded, 687 &oes->rx_unknown_protocol, 688 &es->rx_unknown_protocol); 689 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 690 veb->stat_offsets_loaded, 691 &oes->rx_bytes, &es->rx_bytes); 692 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 693 veb->stat_offsets_loaded, 694 &oes->rx_unicast, &es->rx_unicast); 695 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 696 veb->stat_offsets_loaded, 697 &oes->rx_multicast, &es->rx_multicast); 698 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 699 veb->stat_offsets_loaded, 700 &oes->rx_broadcast, &es->rx_broadcast); 701 702 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 703 veb->stat_offsets_loaded, 704 &oes->tx_bytes, &es->tx_bytes); 705 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 706 veb->stat_offsets_loaded, 707 &oes->tx_unicast, &es->tx_unicast); 708 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 709 veb->stat_offsets_loaded, 710 &oes->tx_multicast, &es->tx_multicast); 711 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 712 veb->stat_offsets_loaded, 713 &oes->tx_broadcast, &es->tx_broadcast); 714 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 715 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx), 716 I40E_GLVEBTC_RPCL(i, idx), 717 veb->stat_offsets_loaded, 718 &veb_oes->tc_rx_packets[i], 719 &veb_es->tc_rx_packets[i]); 720 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx), 721 I40E_GLVEBTC_RBCL(i, idx), 722 veb->stat_offsets_loaded, 723 &veb_oes->tc_rx_bytes[i], 724 &veb_es->tc_rx_bytes[i]); 725 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx), 726 I40E_GLVEBTC_TPCL(i, idx), 727 veb->stat_offsets_loaded, 728 &veb_oes->tc_tx_packets[i], 729 &veb_es->tc_tx_packets[i]); 730 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx), 731 I40E_GLVEBTC_TBCL(i, idx), 732 veb->stat_offsets_loaded, 733 &veb_oes->tc_tx_bytes[i], 734 &veb_es->tc_tx_bytes[i]); 735 } 736 veb->stat_offsets_loaded = true; 737 } 738 739 #ifdef I40E_FCOE 740 /** 741 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. 742 * @vsi: the VSI that is capable of doing FCoE 743 **/ 744 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) 745 { 746 struct i40e_pf *pf = vsi->back; 747 struct i40e_hw *hw = &pf->hw; 748 struct i40e_fcoe_stats *ofs; 749 struct i40e_fcoe_stats *fs; /* device's eth stats */ 750 int idx; 751 752 if (vsi->type != I40E_VSI_FCOE) 753 return; 754 755 idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET; 756 fs = &vsi->fcoe_stats; 757 ofs = &vsi->fcoe_stats_offsets; 758 759 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), 760 vsi->fcoe_stat_offsets_loaded, 761 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); 762 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), 763 vsi->fcoe_stat_offsets_loaded, 764 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); 765 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), 766 vsi->fcoe_stat_offsets_loaded, 767 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); 768 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), 769 vsi->fcoe_stat_offsets_loaded, 770 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); 771 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), 772 vsi->fcoe_stat_offsets_loaded, 773 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); 774 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), 775 vsi->fcoe_stat_offsets_loaded, 776 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); 777 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), 778 vsi->fcoe_stat_offsets_loaded, 779 &ofs->fcoe_last_error, &fs->fcoe_last_error); 780 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), 781 vsi->fcoe_stat_offsets_loaded, 782 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); 783 784 vsi->fcoe_stat_offsets_loaded = true; 785 } 786 787 #endif 788 /** 789 * i40e_update_vsi_stats - Update the vsi statistics counters. 790 * @vsi: the VSI to be updated 791 * 792 * There are a few instances where we store the same stat in a 793 * couple of different structs. This is partly because we have 794 * the netdev stats that need to be filled out, which is slightly 795 * different from the "eth_stats" defined by the chip and used in 796 * VF communications. We sort it out here. 797 **/ 798 static void i40e_update_vsi_stats(struct i40e_vsi *vsi) 799 { 800 struct i40e_pf *pf = vsi->back; 801 struct rtnl_link_stats64 *ons; 802 struct rtnl_link_stats64 *ns; /* netdev stats */ 803 struct i40e_eth_stats *oes; 804 struct i40e_eth_stats *es; /* device's eth stats */ 805 u32 tx_restart, tx_busy; 806 u64 tx_lost_interrupt; 807 struct i40e_ring *p; 808 u32 rx_page, rx_buf; 809 u64 bytes, packets; 810 unsigned int start; 811 u64 tx_linearize; 812 u64 tx_force_wb; 813 u64 rx_p, rx_b; 814 u64 tx_p, tx_b; 815 u16 q; 816 817 if (test_bit(__I40E_DOWN, &vsi->state) || 818 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 819 return; 820 821 ns = i40e_get_vsi_stats_struct(vsi); 822 ons = &vsi->net_stats_offsets; 823 es = &vsi->eth_stats; 824 oes = &vsi->eth_stats_offsets; 825 826 /* Gather up the netdev and vsi stats that the driver collects 827 * on the fly during packet processing 828 */ 829 rx_b = rx_p = 0; 830 tx_b = tx_p = 0; 831 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; 832 tx_lost_interrupt = 0; 833 rx_page = 0; 834 rx_buf = 0; 835 rcu_read_lock(); 836 for (q = 0; q < vsi->num_queue_pairs; q++) { 837 /* locate Tx ring */ 838 p = ACCESS_ONCE(vsi->tx_rings[q]); 839 840 do { 841 start = u64_stats_fetch_begin_irq(&p->syncp); 842 packets = p->stats.packets; 843 bytes = p->stats.bytes; 844 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 845 tx_b += bytes; 846 tx_p += packets; 847 tx_restart += p->tx_stats.restart_queue; 848 tx_busy += p->tx_stats.tx_busy; 849 tx_linearize += p->tx_stats.tx_linearize; 850 tx_force_wb += p->tx_stats.tx_force_wb; 851 tx_lost_interrupt += p->tx_stats.tx_lost_interrupt; 852 853 /* Rx queue is part of the same block as Tx queue */ 854 p = &p[1]; 855 do { 856 start = u64_stats_fetch_begin_irq(&p->syncp); 857 packets = p->stats.packets; 858 bytes = p->stats.bytes; 859 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 860 rx_b += bytes; 861 rx_p += packets; 862 rx_buf += p->rx_stats.alloc_buff_failed; 863 rx_page += p->rx_stats.alloc_page_failed; 864 } 865 rcu_read_unlock(); 866 vsi->tx_restart = tx_restart; 867 vsi->tx_busy = tx_busy; 868 vsi->tx_linearize = tx_linearize; 869 vsi->tx_force_wb = tx_force_wb; 870 vsi->tx_lost_interrupt = tx_lost_interrupt; 871 vsi->rx_page_failed = rx_page; 872 vsi->rx_buf_failed = rx_buf; 873 874 ns->rx_packets = rx_p; 875 ns->rx_bytes = rx_b; 876 ns->tx_packets = tx_p; 877 ns->tx_bytes = tx_b; 878 879 /* update netdev stats from eth stats */ 880 i40e_update_eth_stats(vsi); 881 ons->tx_errors = oes->tx_errors; 882 ns->tx_errors = es->tx_errors; 883 ons->multicast = oes->rx_multicast; 884 ns->multicast = es->rx_multicast; 885 ons->rx_dropped = oes->rx_discards; 886 ns->rx_dropped = es->rx_discards; 887 ons->tx_dropped = oes->tx_discards; 888 ns->tx_dropped = es->tx_discards; 889 890 /* pull in a couple PF stats if this is the main vsi */ 891 if (vsi == pf->vsi[pf->lan_vsi]) { 892 ns->rx_crc_errors = pf->stats.crc_errors; 893 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; 894 ns->rx_length_errors = pf->stats.rx_length_errors; 895 } 896 } 897 898 /** 899 * i40e_update_pf_stats - Update the PF statistics counters. 900 * @pf: the PF to be updated 901 **/ 902 static void i40e_update_pf_stats(struct i40e_pf *pf) 903 { 904 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 905 struct i40e_hw_port_stats *nsd = &pf->stats; 906 struct i40e_hw *hw = &pf->hw; 907 u32 val; 908 int i; 909 910 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 911 I40E_GLPRT_GORCL(hw->port), 912 pf->stat_offsets_loaded, 913 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 914 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 915 I40E_GLPRT_GOTCL(hw->port), 916 pf->stat_offsets_loaded, 917 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 918 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 919 pf->stat_offsets_loaded, 920 &osd->eth.rx_discards, 921 &nsd->eth.rx_discards); 922 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 923 I40E_GLPRT_UPRCL(hw->port), 924 pf->stat_offsets_loaded, 925 &osd->eth.rx_unicast, 926 &nsd->eth.rx_unicast); 927 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 928 I40E_GLPRT_MPRCL(hw->port), 929 pf->stat_offsets_loaded, 930 &osd->eth.rx_multicast, 931 &nsd->eth.rx_multicast); 932 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 933 I40E_GLPRT_BPRCL(hw->port), 934 pf->stat_offsets_loaded, 935 &osd->eth.rx_broadcast, 936 &nsd->eth.rx_broadcast); 937 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 938 I40E_GLPRT_UPTCL(hw->port), 939 pf->stat_offsets_loaded, 940 &osd->eth.tx_unicast, 941 &nsd->eth.tx_unicast); 942 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 943 I40E_GLPRT_MPTCL(hw->port), 944 pf->stat_offsets_loaded, 945 &osd->eth.tx_multicast, 946 &nsd->eth.tx_multicast); 947 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 948 I40E_GLPRT_BPTCL(hw->port), 949 pf->stat_offsets_loaded, 950 &osd->eth.tx_broadcast, 951 &nsd->eth.tx_broadcast); 952 953 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 954 pf->stat_offsets_loaded, 955 &osd->tx_dropped_link_down, 956 &nsd->tx_dropped_link_down); 957 958 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 959 pf->stat_offsets_loaded, 960 &osd->crc_errors, &nsd->crc_errors); 961 962 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 963 pf->stat_offsets_loaded, 964 &osd->illegal_bytes, &nsd->illegal_bytes); 965 966 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 967 pf->stat_offsets_loaded, 968 &osd->mac_local_faults, 969 &nsd->mac_local_faults); 970 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 971 pf->stat_offsets_loaded, 972 &osd->mac_remote_faults, 973 &nsd->mac_remote_faults); 974 975 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 976 pf->stat_offsets_loaded, 977 &osd->rx_length_errors, 978 &nsd->rx_length_errors); 979 980 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 981 pf->stat_offsets_loaded, 982 &osd->link_xon_rx, &nsd->link_xon_rx); 983 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 984 pf->stat_offsets_loaded, 985 &osd->link_xon_tx, &nsd->link_xon_tx); 986 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 987 pf->stat_offsets_loaded, 988 &osd->link_xoff_rx, &nsd->link_xoff_rx); 989 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 990 pf->stat_offsets_loaded, 991 &osd->link_xoff_tx, &nsd->link_xoff_tx); 992 993 for (i = 0; i < 8; i++) { 994 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 995 pf->stat_offsets_loaded, 996 &osd->priority_xoff_rx[i], 997 &nsd->priority_xoff_rx[i]); 998 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 999 pf->stat_offsets_loaded, 1000 &osd->priority_xon_rx[i], 1001 &nsd->priority_xon_rx[i]); 1002 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 1003 pf->stat_offsets_loaded, 1004 &osd->priority_xon_tx[i], 1005 &nsd->priority_xon_tx[i]); 1006 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 1007 pf->stat_offsets_loaded, 1008 &osd->priority_xoff_tx[i], 1009 &nsd->priority_xoff_tx[i]); 1010 i40e_stat_update32(hw, 1011 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 1012 pf->stat_offsets_loaded, 1013 &osd->priority_xon_2_xoff[i], 1014 &nsd->priority_xon_2_xoff[i]); 1015 } 1016 1017 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 1018 I40E_GLPRT_PRC64L(hw->port), 1019 pf->stat_offsets_loaded, 1020 &osd->rx_size_64, &nsd->rx_size_64); 1021 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 1022 I40E_GLPRT_PRC127L(hw->port), 1023 pf->stat_offsets_loaded, 1024 &osd->rx_size_127, &nsd->rx_size_127); 1025 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 1026 I40E_GLPRT_PRC255L(hw->port), 1027 pf->stat_offsets_loaded, 1028 &osd->rx_size_255, &nsd->rx_size_255); 1029 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 1030 I40E_GLPRT_PRC511L(hw->port), 1031 pf->stat_offsets_loaded, 1032 &osd->rx_size_511, &nsd->rx_size_511); 1033 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 1034 I40E_GLPRT_PRC1023L(hw->port), 1035 pf->stat_offsets_loaded, 1036 &osd->rx_size_1023, &nsd->rx_size_1023); 1037 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 1038 I40E_GLPRT_PRC1522L(hw->port), 1039 pf->stat_offsets_loaded, 1040 &osd->rx_size_1522, &nsd->rx_size_1522); 1041 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 1042 I40E_GLPRT_PRC9522L(hw->port), 1043 pf->stat_offsets_loaded, 1044 &osd->rx_size_big, &nsd->rx_size_big); 1045 1046 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 1047 I40E_GLPRT_PTC64L(hw->port), 1048 pf->stat_offsets_loaded, 1049 &osd->tx_size_64, &nsd->tx_size_64); 1050 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 1051 I40E_GLPRT_PTC127L(hw->port), 1052 pf->stat_offsets_loaded, 1053 &osd->tx_size_127, &nsd->tx_size_127); 1054 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 1055 I40E_GLPRT_PTC255L(hw->port), 1056 pf->stat_offsets_loaded, 1057 &osd->tx_size_255, &nsd->tx_size_255); 1058 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 1059 I40E_GLPRT_PTC511L(hw->port), 1060 pf->stat_offsets_loaded, 1061 &osd->tx_size_511, &nsd->tx_size_511); 1062 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 1063 I40E_GLPRT_PTC1023L(hw->port), 1064 pf->stat_offsets_loaded, 1065 &osd->tx_size_1023, &nsd->tx_size_1023); 1066 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 1067 I40E_GLPRT_PTC1522L(hw->port), 1068 pf->stat_offsets_loaded, 1069 &osd->tx_size_1522, &nsd->tx_size_1522); 1070 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 1071 I40E_GLPRT_PTC9522L(hw->port), 1072 pf->stat_offsets_loaded, 1073 &osd->tx_size_big, &nsd->tx_size_big); 1074 1075 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 1076 pf->stat_offsets_loaded, 1077 &osd->rx_undersize, &nsd->rx_undersize); 1078 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 1079 pf->stat_offsets_loaded, 1080 &osd->rx_fragments, &nsd->rx_fragments); 1081 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 1082 pf->stat_offsets_loaded, 1083 &osd->rx_oversize, &nsd->rx_oversize); 1084 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 1085 pf->stat_offsets_loaded, 1086 &osd->rx_jabber, &nsd->rx_jabber); 1087 1088 /* FDIR stats */ 1089 i40e_stat_update32(hw, 1090 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), 1091 pf->stat_offsets_loaded, 1092 &osd->fd_atr_match, &nsd->fd_atr_match); 1093 i40e_stat_update32(hw, 1094 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), 1095 pf->stat_offsets_loaded, 1096 &osd->fd_sb_match, &nsd->fd_sb_match); 1097 i40e_stat_update32(hw, 1098 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)), 1099 pf->stat_offsets_loaded, 1100 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match); 1101 1102 val = rd32(hw, I40E_PRTPM_EEE_STAT); 1103 nsd->tx_lpi_status = 1104 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> 1105 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; 1106 nsd->rx_lpi_status = 1107 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> 1108 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; 1109 i40e_stat_update32(hw, I40E_PRTPM_TLPIC, 1110 pf->stat_offsets_loaded, 1111 &osd->tx_lpi_count, &nsd->tx_lpi_count); 1112 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 1113 pf->stat_offsets_loaded, 1114 &osd->rx_lpi_count, &nsd->rx_lpi_count); 1115 1116 if (pf->flags & I40E_FLAG_FD_SB_ENABLED && 1117 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) 1118 nsd->fd_sb_status = true; 1119 else 1120 nsd->fd_sb_status = false; 1121 1122 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && 1123 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 1124 nsd->fd_atr_status = true; 1125 else 1126 nsd->fd_atr_status = false; 1127 1128 pf->stat_offsets_loaded = true; 1129 } 1130 1131 /** 1132 * i40e_update_stats - Update the various statistics counters. 1133 * @vsi: the VSI to be updated 1134 * 1135 * Update the various stats for this VSI and its related entities. 1136 **/ 1137 void i40e_update_stats(struct i40e_vsi *vsi) 1138 { 1139 struct i40e_pf *pf = vsi->back; 1140 1141 if (vsi == pf->vsi[pf->lan_vsi]) 1142 i40e_update_pf_stats(pf); 1143 1144 i40e_update_vsi_stats(vsi); 1145 #ifdef I40E_FCOE 1146 i40e_update_fcoe_stats(vsi); 1147 #endif 1148 } 1149 1150 /** 1151 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1152 * @vsi: the VSI to be searched 1153 * @macaddr: the MAC address 1154 * @vlan: the vlan 1155 * @is_vf: make sure its a VF filter, else doesn't matter 1156 * @is_netdev: make sure its a netdev filter, else doesn't matter 1157 * 1158 * Returns ptr to the filter object or NULL 1159 **/ 1160 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 1161 u8 *macaddr, s16 vlan, 1162 bool is_vf, bool is_netdev) 1163 { 1164 struct i40e_mac_filter *f; 1165 1166 if (!vsi || !macaddr) 1167 return NULL; 1168 1169 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1170 if ((ether_addr_equal(macaddr, f->macaddr)) && 1171 (vlan == f->vlan) && 1172 (!is_vf || f->is_vf) && 1173 (!is_netdev || f->is_netdev)) 1174 return f; 1175 } 1176 return NULL; 1177 } 1178 1179 /** 1180 * i40e_find_mac - Find a mac addr in the macvlan filters list 1181 * @vsi: the VSI to be searched 1182 * @macaddr: the MAC address we are searching for 1183 * @is_vf: make sure its a VF filter, else doesn't matter 1184 * @is_netdev: make sure its a netdev filter, else doesn't matter 1185 * 1186 * Returns the first filter with the provided MAC address or NULL if 1187 * MAC address was not found 1188 **/ 1189 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 1190 bool is_vf, bool is_netdev) 1191 { 1192 struct i40e_mac_filter *f; 1193 1194 if (!vsi || !macaddr) 1195 return NULL; 1196 1197 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1198 if ((ether_addr_equal(macaddr, f->macaddr)) && 1199 (!is_vf || f->is_vf) && 1200 (!is_netdev || f->is_netdev)) 1201 return f; 1202 } 1203 return NULL; 1204 } 1205 1206 /** 1207 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1208 * @vsi: the VSI to be searched 1209 * 1210 * Returns true if VSI is in vlan mode or false otherwise 1211 **/ 1212 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1213 { 1214 struct i40e_mac_filter *f; 1215 1216 /* Only -1 for all the filters denotes not in vlan mode 1217 * so we have to go through all the list in order to make sure 1218 */ 1219 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1220 if (f->vlan >= 0 || vsi->info.pvid) 1221 return true; 1222 } 1223 1224 return false; 1225 } 1226 1227 /** 1228 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans 1229 * @vsi: the VSI to be searched 1230 * @macaddr: the mac address to be filtered 1231 * @is_vf: true if it is a VF 1232 * @is_netdev: true if it is a netdev 1233 * 1234 * Goes through all the macvlan filters and adds a 1235 * macvlan filter for each unique vlan that already exists 1236 * 1237 * Returns first filter found on success, else NULL 1238 **/ 1239 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1240 bool is_vf, bool is_netdev) 1241 { 1242 struct i40e_mac_filter *f; 1243 1244 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1245 if (vsi->info.pvid) 1246 f->vlan = le16_to_cpu(vsi->info.pvid); 1247 if (!i40e_find_filter(vsi, macaddr, f->vlan, 1248 is_vf, is_netdev)) { 1249 if (!i40e_add_filter(vsi, macaddr, f->vlan, 1250 is_vf, is_netdev)) 1251 return NULL; 1252 } 1253 } 1254 1255 return list_first_entry_or_null(&vsi->mac_filter_list, 1256 struct i40e_mac_filter, list); 1257 } 1258 1259 /** 1260 * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS 1261 * @vsi: the VSI to be searched 1262 * @macaddr: the mac address to be removed 1263 * @is_vf: true if it is a VF 1264 * @is_netdev: true if it is a netdev 1265 * 1266 * Removes a given MAC address from a VSI, regardless of VLAN 1267 * 1268 * Returns 0 for success, or error 1269 **/ 1270 int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1271 bool is_vf, bool is_netdev) 1272 { 1273 struct i40e_mac_filter *f = NULL; 1274 int changed = 0; 1275 1276 WARN(!spin_is_locked(&vsi->mac_filter_list_lock), 1277 "Missing mac_filter_list_lock\n"); 1278 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1279 if ((ether_addr_equal(macaddr, f->macaddr)) && 1280 (is_vf == f->is_vf) && 1281 (is_netdev == f->is_netdev)) { 1282 f->counter--; 1283 f->changed = true; 1284 changed = 1; 1285 } 1286 } 1287 if (changed) { 1288 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1289 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1290 return 0; 1291 } 1292 return -ENOENT; 1293 } 1294 1295 /** 1296 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM 1297 * @vsi: the PF Main VSI - inappropriate for any other VSI 1298 * @macaddr: the MAC address 1299 * 1300 * Some older firmware configurations set up a default promiscuous VLAN 1301 * filter that needs to be removed. 1302 **/ 1303 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) 1304 { 1305 struct i40e_aqc_remove_macvlan_element_data element; 1306 struct i40e_pf *pf = vsi->back; 1307 i40e_status ret; 1308 1309 /* Only appropriate for the PF main VSI */ 1310 if (vsi->type != I40E_VSI_MAIN) 1311 return -EINVAL; 1312 1313 memset(&element, 0, sizeof(element)); 1314 ether_addr_copy(element.mac_addr, macaddr); 1315 element.vlan_tag = 0; 1316 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1317 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1318 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1319 if (ret) 1320 return -ENOENT; 1321 1322 return 0; 1323 } 1324 1325 /** 1326 * i40e_add_filter - Add a mac/vlan filter to the VSI 1327 * @vsi: the VSI to be searched 1328 * @macaddr: the MAC address 1329 * @vlan: the vlan 1330 * @is_vf: make sure its a VF filter, else doesn't matter 1331 * @is_netdev: make sure its a netdev filter, else doesn't matter 1332 * 1333 * Returns ptr to the filter object or NULL when no memory available. 1334 * 1335 * NOTE: This function is expected to be called with mac_filter_list_lock 1336 * being held. 1337 **/ 1338 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1339 u8 *macaddr, s16 vlan, 1340 bool is_vf, bool is_netdev) 1341 { 1342 struct i40e_mac_filter *f; 1343 1344 if (!vsi || !macaddr) 1345 return NULL; 1346 1347 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1348 if (!f) { 1349 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1350 if (!f) 1351 goto add_filter_out; 1352 1353 ether_addr_copy(f->macaddr, macaddr); 1354 f->vlan = vlan; 1355 f->changed = true; 1356 1357 INIT_LIST_HEAD(&f->list); 1358 list_add_tail(&f->list, &vsi->mac_filter_list); 1359 } 1360 1361 /* increment counter and add a new flag if needed */ 1362 if (is_vf) { 1363 if (!f->is_vf) { 1364 f->is_vf = true; 1365 f->counter++; 1366 } 1367 } else if (is_netdev) { 1368 if (!f->is_netdev) { 1369 f->is_netdev = true; 1370 f->counter++; 1371 } 1372 } else { 1373 f->counter++; 1374 } 1375 1376 /* changed tells sync_filters_subtask to 1377 * push the filter down to the firmware 1378 */ 1379 if (f->changed) { 1380 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1381 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1382 } 1383 1384 add_filter_out: 1385 return f; 1386 } 1387 1388 /** 1389 * i40e_del_filter - Remove a mac/vlan filter from the VSI 1390 * @vsi: the VSI to be searched 1391 * @macaddr: the MAC address 1392 * @vlan: the vlan 1393 * @is_vf: make sure it's a VF filter, else doesn't matter 1394 * @is_netdev: make sure it's a netdev filter, else doesn't matter 1395 * 1396 * NOTE: This function is expected to be called with mac_filter_list_lock 1397 * being held. 1398 **/ 1399 void i40e_del_filter(struct i40e_vsi *vsi, 1400 u8 *macaddr, s16 vlan, 1401 bool is_vf, bool is_netdev) 1402 { 1403 struct i40e_mac_filter *f; 1404 1405 if (!vsi || !macaddr) 1406 return; 1407 1408 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1409 if (!f || f->counter == 0) 1410 return; 1411 1412 if (is_vf) { 1413 if (f->is_vf) { 1414 f->is_vf = false; 1415 f->counter--; 1416 } 1417 } else if (is_netdev) { 1418 if (f->is_netdev) { 1419 f->is_netdev = false; 1420 f->counter--; 1421 } 1422 } else { 1423 /* make sure we don't remove a filter in use by VF or netdev */ 1424 int min_f = 0; 1425 1426 min_f += (f->is_vf ? 1 : 0); 1427 min_f += (f->is_netdev ? 1 : 0); 1428 1429 if (f->counter > min_f) 1430 f->counter--; 1431 } 1432 1433 /* counter == 0 tells sync_filters_subtask to 1434 * remove the filter from the firmware's list 1435 */ 1436 if (f->counter == 0) { 1437 f->changed = true; 1438 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1439 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1440 } 1441 } 1442 1443 /** 1444 * i40e_set_mac - NDO callback to set mac address 1445 * @netdev: network interface device structure 1446 * @p: pointer to an address structure 1447 * 1448 * Returns 0 on success, negative on failure 1449 **/ 1450 #ifdef I40E_FCOE 1451 int i40e_set_mac(struct net_device *netdev, void *p) 1452 #else 1453 static int i40e_set_mac(struct net_device *netdev, void *p) 1454 #endif 1455 { 1456 struct i40e_netdev_priv *np = netdev_priv(netdev); 1457 struct i40e_vsi *vsi = np->vsi; 1458 struct i40e_pf *pf = vsi->back; 1459 struct i40e_hw *hw = &pf->hw; 1460 struct sockaddr *addr = p; 1461 struct i40e_mac_filter *f; 1462 1463 if (!is_valid_ether_addr(addr->sa_data)) 1464 return -EADDRNOTAVAIL; 1465 1466 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { 1467 netdev_info(netdev, "already using mac address %pM\n", 1468 addr->sa_data); 1469 return 0; 1470 } 1471 1472 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1473 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1474 return -EADDRNOTAVAIL; 1475 1476 if (ether_addr_equal(hw->mac.addr, addr->sa_data)) 1477 netdev_info(netdev, "returning to hw mac address %pM\n", 1478 hw->mac.addr); 1479 else 1480 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1481 1482 if (vsi->type == I40E_VSI_MAIN) { 1483 i40e_status ret; 1484 1485 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1486 I40E_AQC_WRITE_TYPE_LAA_WOL, 1487 addr->sa_data, NULL); 1488 if (ret) { 1489 netdev_info(netdev, 1490 "Addr change for Main VSI failed: %d\n", 1491 ret); 1492 return -EADDRNOTAVAIL; 1493 } 1494 } 1495 1496 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { 1497 struct i40e_aqc_remove_macvlan_element_data element; 1498 1499 memset(&element, 0, sizeof(element)); 1500 ether_addr_copy(element.mac_addr, netdev->dev_addr); 1501 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1502 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1503 } else { 1504 spin_lock_bh(&vsi->mac_filter_list_lock); 1505 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 1506 false, false); 1507 spin_unlock_bh(&vsi->mac_filter_list_lock); 1508 } 1509 1510 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { 1511 struct i40e_aqc_add_macvlan_element_data element; 1512 1513 memset(&element, 0, sizeof(element)); 1514 ether_addr_copy(element.mac_addr, hw->mac.addr); 1515 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 1516 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1517 } else { 1518 spin_lock_bh(&vsi->mac_filter_list_lock); 1519 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, 1520 false, false); 1521 if (f) 1522 f->is_laa = true; 1523 spin_unlock_bh(&vsi->mac_filter_list_lock); 1524 } 1525 1526 ether_addr_copy(netdev->dev_addr, addr->sa_data); 1527 1528 /* schedule our worker thread which will take care of 1529 * applying the new filter changes 1530 */ 1531 i40e_service_event_schedule(vsi->back); 1532 return 0; 1533 } 1534 1535 /** 1536 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1537 * @vsi: the VSI being setup 1538 * @ctxt: VSI context structure 1539 * @enabled_tc: Enabled TCs bitmap 1540 * @is_add: True if called before Add VSI 1541 * 1542 * Setup VSI queue mapping for enabled traffic classes. 1543 **/ 1544 #ifdef I40E_FCOE 1545 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1546 struct i40e_vsi_context *ctxt, 1547 u8 enabled_tc, 1548 bool is_add) 1549 #else 1550 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1551 struct i40e_vsi_context *ctxt, 1552 u8 enabled_tc, 1553 bool is_add) 1554 #endif 1555 { 1556 struct i40e_pf *pf = vsi->back; 1557 u16 sections = 0; 1558 u8 netdev_tc = 0; 1559 u16 numtc = 0; 1560 u16 qcount; 1561 u8 offset; 1562 u16 qmap; 1563 int i; 1564 u16 num_tc_qps = 0; 1565 1566 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1567 offset = 0; 1568 1569 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1570 /* Find numtc from enabled TC bitmap */ 1571 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1572 if (enabled_tc & BIT(i)) /* TC is enabled */ 1573 numtc++; 1574 } 1575 if (!numtc) { 1576 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1577 numtc = 1; 1578 } 1579 } else { 1580 /* At least TC0 is enabled in case of non-DCB case */ 1581 numtc = 1; 1582 } 1583 1584 vsi->tc_config.numtc = numtc; 1585 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1586 /* Number of queues per enabled TC */ 1587 /* In MFP case we can have a much lower count of MSIx 1588 * vectors available and so we need to lower the used 1589 * q count. 1590 */ 1591 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1592 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); 1593 else 1594 qcount = vsi->alloc_queue_pairs; 1595 num_tc_qps = qcount / numtc; 1596 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); 1597 1598 /* Setup queue offset/count for all TCs for given VSI */ 1599 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1600 /* See if the given TC is enabled for the given VSI */ 1601 if (vsi->tc_config.enabled_tc & BIT(i)) { 1602 /* TC is enabled */ 1603 int pow, num_qps; 1604 1605 switch (vsi->type) { 1606 case I40E_VSI_MAIN: 1607 qcount = min_t(int, pf->alloc_rss_size, 1608 num_tc_qps); 1609 break; 1610 #ifdef I40E_FCOE 1611 case I40E_VSI_FCOE: 1612 qcount = num_tc_qps; 1613 break; 1614 #endif 1615 case I40E_VSI_FDIR: 1616 case I40E_VSI_SRIOV: 1617 case I40E_VSI_VMDQ2: 1618 default: 1619 qcount = num_tc_qps; 1620 WARN_ON(i != 0); 1621 break; 1622 } 1623 vsi->tc_config.tc_info[i].qoffset = offset; 1624 vsi->tc_config.tc_info[i].qcount = qcount; 1625 1626 /* find the next higher power-of-2 of num queue pairs */ 1627 num_qps = qcount; 1628 pow = 0; 1629 while (num_qps && (BIT_ULL(pow) < qcount)) { 1630 pow++; 1631 num_qps >>= 1; 1632 } 1633 1634 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1635 qmap = 1636 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1637 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1638 1639 offset += qcount; 1640 } else { 1641 /* TC is not enabled so set the offset to 1642 * default queue and allocate one queue 1643 * for the given TC. 1644 */ 1645 vsi->tc_config.tc_info[i].qoffset = 0; 1646 vsi->tc_config.tc_info[i].qcount = 1; 1647 vsi->tc_config.tc_info[i].netdev_tc = 0; 1648 1649 qmap = 0; 1650 } 1651 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1652 } 1653 1654 /* Set actual Tx/Rx queue pairs */ 1655 vsi->num_queue_pairs = offset; 1656 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { 1657 if (vsi->req_queue_pairs > 0) 1658 vsi->num_queue_pairs = vsi->req_queue_pairs; 1659 else if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1660 vsi->num_queue_pairs = pf->num_lan_msix; 1661 } 1662 1663 /* Scheduler section valid can only be set for ADD VSI */ 1664 if (is_add) { 1665 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1666 1667 ctxt->info.up_enable_bits = enabled_tc; 1668 } 1669 if (vsi->type == I40E_VSI_SRIOV) { 1670 ctxt->info.mapping_flags |= 1671 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1672 for (i = 0; i < vsi->num_queue_pairs; i++) 1673 ctxt->info.queue_mapping[i] = 1674 cpu_to_le16(vsi->base_queue + i); 1675 } else { 1676 ctxt->info.mapping_flags |= 1677 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1678 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1679 } 1680 ctxt->info.valid_sections |= cpu_to_le16(sections); 1681 } 1682 1683 /** 1684 * i40e_set_rx_mode - NDO callback to set the netdev filters 1685 * @netdev: network interface device structure 1686 **/ 1687 #ifdef I40E_FCOE 1688 void i40e_set_rx_mode(struct net_device *netdev) 1689 #else 1690 static void i40e_set_rx_mode(struct net_device *netdev) 1691 #endif 1692 { 1693 struct i40e_netdev_priv *np = netdev_priv(netdev); 1694 struct i40e_mac_filter *f, *ftmp; 1695 struct i40e_vsi *vsi = np->vsi; 1696 struct netdev_hw_addr *uca; 1697 struct netdev_hw_addr *mca; 1698 struct netdev_hw_addr *ha; 1699 1700 spin_lock_bh(&vsi->mac_filter_list_lock); 1701 1702 /* add addr if not already in the filter list */ 1703 netdev_for_each_uc_addr(uca, netdev) { 1704 if (!i40e_find_mac(vsi, uca->addr, false, true)) { 1705 if (i40e_is_vsi_in_vlan(vsi)) 1706 i40e_put_mac_in_vlan(vsi, uca->addr, 1707 false, true); 1708 else 1709 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, 1710 false, true); 1711 } 1712 } 1713 1714 netdev_for_each_mc_addr(mca, netdev) { 1715 if (!i40e_find_mac(vsi, mca->addr, false, true)) { 1716 if (i40e_is_vsi_in_vlan(vsi)) 1717 i40e_put_mac_in_vlan(vsi, mca->addr, 1718 false, true); 1719 else 1720 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, 1721 false, true); 1722 } 1723 } 1724 1725 /* remove filter if not in netdev list */ 1726 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1727 1728 if (!f->is_netdev) 1729 continue; 1730 1731 netdev_for_each_mc_addr(mca, netdev) 1732 if (ether_addr_equal(mca->addr, f->macaddr)) 1733 goto bottom_of_search_loop; 1734 1735 netdev_for_each_uc_addr(uca, netdev) 1736 if (ether_addr_equal(uca->addr, f->macaddr)) 1737 goto bottom_of_search_loop; 1738 1739 for_each_dev_addr(netdev, ha) 1740 if (ether_addr_equal(ha->addr, f->macaddr)) 1741 goto bottom_of_search_loop; 1742 1743 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */ 1744 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true); 1745 1746 bottom_of_search_loop: 1747 continue; 1748 } 1749 spin_unlock_bh(&vsi->mac_filter_list_lock); 1750 1751 /* check for other flag changes */ 1752 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1753 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1754 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1755 } 1756 1757 /* schedule our worker thread which will take care of 1758 * applying the new filter changes 1759 */ 1760 i40e_service_event_schedule(vsi->back); 1761 } 1762 1763 /** 1764 * i40e_mac_filter_entry_clone - Clones a MAC filter entry 1765 * @src: source MAC filter entry to be clones 1766 * 1767 * Returns the pointer to newly cloned MAC filter entry or NULL 1768 * in case of error 1769 **/ 1770 static struct i40e_mac_filter *i40e_mac_filter_entry_clone( 1771 struct i40e_mac_filter *src) 1772 { 1773 struct i40e_mac_filter *f; 1774 1775 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1776 if (!f) 1777 return NULL; 1778 *f = *src; 1779 1780 INIT_LIST_HEAD(&f->list); 1781 1782 return f; 1783 } 1784 1785 /** 1786 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries 1787 * @vsi: pointer to vsi struct 1788 * @from: Pointer to list which contains MAC filter entries - changes to 1789 * those entries needs to be undone. 1790 * 1791 * MAC filter entries from list were slated to be removed from device. 1792 **/ 1793 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, 1794 struct list_head *from) 1795 { 1796 struct i40e_mac_filter *f, *ftmp; 1797 1798 list_for_each_entry_safe(f, ftmp, from, list) { 1799 f->changed = true; 1800 /* Move the element back into MAC filter list*/ 1801 list_move_tail(&f->list, &vsi->mac_filter_list); 1802 } 1803 } 1804 1805 /** 1806 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries 1807 * @vsi: pointer to vsi struct 1808 * 1809 * MAC filter entries from list were slated to be added from device. 1810 **/ 1811 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi) 1812 { 1813 struct i40e_mac_filter *f, *ftmp; 1814 1815 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1816 if (!f->changed && f->counter) 1817 f->changed = true; 1818 } 1819 } 1820 1821 /** 1822 * i40e_cleanup_add_list - Deletes the element from add list and release 1823 * memory 1824 * @add_list: Pointer to list which contains MAC filter entries 1825 **/ 1826 static void i40e_cleanup_add_list(struct list_head *add_list) 1827 { 1828 struct i40e_mac_filter *f, *ftmp; 1829 1830 list_for_each_entry_safe(f, ftmp, add_list, list) { 1831 list_del(&f->list); 1832 kfree(f); 1833 } 1834 } 1835 1836 /** 1837 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 1838 * @vsi: ptr to the VSI 1839 * 1840 * Push any outstanding VSI filter changes through the AdminQ. 1841 * 1842 * Returns 0 or error value 1843 **/ 1844 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) 1845 { 1846 struct list_head tmp_del_list, tmp_add_list; 1847 struct i40e_mac_filter *f, *ftmp, *fclone; 1848 bool promisc_forced_on = false; 1849 bool add_happened = false; 1850 int filter_list_len = 0; 1851 u32 changed_flags = 0; 1852 i40e_status aq_ret = 0; 1853 bool err_cond = false; 1854 int retval = 0; 1855 struct i40e_pf *pf; 1856 int num_add = 0; 1857 int num_del = 0; 1858 int aq_err = 0; 1859 u16 cmd_flags; 1860 1861 /* empty array typed pointers, kcalloc later */ 1862 struct i40e_aqc_add_macvlan_element_data *add_list; 1863 struct i40e_aqc_remove_macvlan_element_data *del_list; 1864 1865 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) 1866 usleep_range(1000, 2000); 1867 pf = vsi->back; 1868 1869 if (vsi->netdev) { 1870 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 1871 vsi->current_netdev_flags = vsi->netdev->flags; 1872 } 1873 1874 INIT_LIST_HEAD(&tmp_del_list); 1875 INIT_LIST_HEAD(&tmp_add_list); 1876 1877 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1878 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1879 1880 spin_lock_bh(&vsi->mac_filter_list_lock); 1881 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1882 if (!f->changed) 1883 continue; 1884 1885 if (f->counter != 0) 1886 continue; 1887 f->changed = false; 1888 1889 /* Move the element into temporary del_list */ 1890 list_move_tail(&f->list, &tmp_del_list); 1891 } 1892 1893 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1894 if (!f->changed) 1895 continue; 1896 1897 if (f->counter == 0) 1898 continue; 1899 f->changed = false; 1900 1901 /* Clone MAC filter entry and add into temporary list */ 1902 fclone = i40e_mac_filter_entry_clone(f); 1903 if (!fclone) { 1904 err_cond = true; 1905 break; 1906 } 1907 list_add_tail(&fclone->list, &tmp_add_list); 1908 } 1909 1910 /* if failed to clone MAC filter entry - undo */ 1911 if (err_cond) { 1912 i40e_undo_del_filter_entries(vsi, &tmp_del_list); 1913 i40e_undo_add_filter_entries(vsi); 1914 } 1915 spin_unlock_bh(&vsi->mac_filter_list_lock); 1916 1917 if (err_cond) { 1918 i40e_cleanup_add_list(&tmp_add_list); 1919 retval = -ENOMEM; 1920 goto out; 1921 } 1922 } 1923 1924 /* Now process 'del_list' outside the lock */ 1925 if (!list_empty(&tmp_del_list)) { 1926 int del_list_size; 1927 1928 filter_list_len = pf->hw.aq.asq_buf_size / 1929 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1930 del_list_size = filter_list_len * 1931 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1932 del_list = kzalloc(del_list_size, GFP_ATOMIC); 1933 if (!del_list) { 1934 i40e_cleanup_add_list(&tmp_add_list); 1935 1936 /* Undo VSI's MAC filter entry element updates */ 1937 spin_lock_bh(&vsi->mac_filter_list_lock); 1938 i40e_undo_del_filter_entries(vsi, &tmp_del_list); 1939 i40e_undo_add_filter_entries(vsi); 1940 spin_unlock_bh(&vsi->mac_filter_list_lock); 1941 retval = -ENOMEM; 1942 goto out; 1943 } 1944 1945 list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { 1946 cmd_flags = 0; 1947 1948 /* add to delete list */ 1949 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); 1950 del_list[num_del].vlan_tag = 1951 cpu_to_le16((u16)(f->vlan == 1952 I40E_VLAN_ANY ? 0 : f->vlan)); 1953 1954 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1955 del_list[num_del].flags = cmd_flags; 1956 num_del++; 1957 1958 /* flush a full buffer */ 1959 if (num_del == filter_list_len) { 1960 aq_ret = i40e_aq_remove_macvlan(&pf->hw, 1961 vsi->seid, 1962 del_list, 1963 num_del, 1964 NULL); 1965 aq_err = pf->hw.aq.asq_last_status; 1966 num_del = 0; 1967 memset(del_list, 0, del_list_size); 1968 1969 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { 1970 retval = -EIO; 1971 dev_err(&pf->pdev->dev, 1972 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", 1973 i40e_stat_str(&pf->hw, aq_ret), 1974 i40e_aq_str(&pf->hw, aq_err)); 1975 } 1976 } 1977 /* Release memory for MAC filter entries which were 1978 * synced up with HW. 1979 */ 1980 list_del(&f->list); 1981 kfree(f); 1982 } 1983 1984 if (num_del) { 1985 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1986 del_list, num_del, 1987 NULL); 1988 aq_err = pf->hw.aq.asq_last_status; 1989 num_del = 0; 1990 1991 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) 1992 dev_info(&pf->pdev->dev, 1993 "ignoring delete macvlan error, err %s aq_err %s\n", 1994 i40e_stat_str(&pf->hw, aq_ret), 1995 i40e_aq_str(&pf->hw, aq_err)); 1996 } 1997 1998 kfree(del_list); 1999 del_list = NULL; 2000 } 2001 2002 if (!list_empty(&tmp_add_list)) { 2003 int add_list_size; 2004 2005 /* do all the adds now */ 2006 filter_list_len = pf->hw.aq.asq_buf_size / 2007 sizeof(struct i40e_aqc_add_macvlan_element_data), 2008 add_list_size = filter_list_len * 2009 sizeof(struct i40e_aqc_add_macvlan_element_data); 2010 add_list = kzalloc(add_list_size, GFP_ATOMIC); 2011 if (!add_list) { 2012 /* Purge element from temporary lists */ 2013 i40e_cleanup_add_list(&tmp_add_list); 2014 2015 /* Undo add filter entries from VSI MAC filter list */ 2016 spin_lock_bh(&vsi->mac_filter_list_lock); 2017 i40e_undo_add_filter_entries(vsi); 2018 spin_unlock_bh(&vsi->mac_filter_list_lock); 2019 retval = -ENOMEM; 2020 goto out; 2021 } 2022 2023 list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { 2024 2025 add_happened = true; 2026 cmd_flags = 0; 2027 2028 /* add to add array */ 2029 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); 2030 add_list[num_add].vlan_tag = 2031 cpu_to_le16( 2032 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 2033 add_list[num_add].queue_number = 0; 2034 2035 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 2036 add_list[num_add].flags = cpu_to_le16(cmd_flags); 2037 num_add++; 2038 2039 /* flush a full buffer */ 2040 if (num_add == filter_list_len) { 2041 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 2042 add_list, num_add, 2043 NULL); 2044 aq_err = pf->hw.aq.asq_last_status; 2045 num_add = 0; 2046 2047 if (aq_ret) 2048 break; 2049 memset(add_list, 0, add_list_size); 2050 } 2051 /* Entries from tmp_add_list were cloned from MAC 2052 * filter list, hence clean those cloned entries 2053 */ 2054 list_del(&f->list); 2055 kfree(f); 2056 } 2057 2058 if (num_add) { 2059 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 2060 add_list, num_add, NULL); 2061 aq_err = pf->hw.aq.asq_last_status; 2062 num_add = 0; 2063 } 2064 kfree(add_list); 2065 add_list = NULL; 2066 2067 if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { 2068 retval = i40e_aq_rc_to_posix(aq_ret, aq_err); 2069 dev_info(&pf->pdev->dev, 2070 "add filter failed, err %s aq_err %s\n", 2071 i40e_stat_str(&pf->hw, aq_ret), 2072 i40e_aq_str(&pf->hw, aq_err)); 2073 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 2074 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2075 &vsi->state)) { 2076 promisc_forced_on = true; 2077 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2078 &vsi->state); 2079 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 2080 } 2081 } 2082 } 2083 2084 /* if the VF is not trusted do not do promisc */ 2085 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { 2086 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); 2087 goto out; 2088 } 2089 2090 /* check for changes in promiscuous modes */ 2091 if (changed_flags & IFF_ALLMULTI) { 2092 bool cur_multipromisc; 2093 2094 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 2095 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 2096 vsi->seid, 2097 cur_multipromisc, 2098 NULL); 2099 if (aq_ret) { 2100 retval = i40e_aq_rc_to_posix(aq_ret, 2101 pf->hw.aq.asq_last_status); 2102 dev_info(&pf->pdev->dev, 2103 "set multi promisc failed, err %s aq_err %s\n", 2104 i40e_stat_str(&pf->hw, aq_ret), 2105 i40e_aq_str(&pf->hw, 2106 pf->hw.aq.asq_last_status)); 2107 } 2108 } 2109 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 2110 bool cur_promisc; 2111 2112 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 2113 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2114 &vsi->state)); 2115 if ((vsi->type == I40E_VSI_MAIN) && 2116 (pf->lan_veb != I40E_NO_VEB) && 2117 !(pf->flags & I40E_FLAG_MFP_ENABLED)) { 2118 /* set defport ON for Main VSI instead of true promisc 2119 * this way we will get all unicast/multicast and VLAN 2120 * promisc behavior but will not get VF or VMDq traffic 2121 * replicated on the Main VSI. 2122 */ 2123 if (pf->cur_promisc != cur_promisc) { 2124 pf->cur_promisc = cur_promisc; 2125 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 2126 } 2127 } else { 2128 aq_ret = i40e_aq_set_vsi_unicast_promiscuous( 2129 &vsi->back->hw, 2130 vsi->seid, 2131 cur_promisc, NULL, 2132 true); 2133 if (aq_ret) { 2134 retval = 2135 i40e_aq_rc_to_posix(aq_ret, 2136 pf->hw.aq.asq_last_status); 2137 dev_info(&pf->pdev->dev, 2138 "set unicast promisc failed, err %d, aq_err %d\n", 2139 aq_ret, pf->hw.aq.asq_last_status); 2140 } 2141 aq_ret = i40e_aq_set_vsi_multicast_promiscuous( 2142 &vsi->back->hw, 2143 vsi->seid, 2144 cur_promisc, NULL); 2145 if (aq_ret) { 2146 retval = 2147 i40e_aq_rc_to_posix(aq_ret, 2148 pf->hw.aq.asq_last_status); 2149 dev_info(&pf->pdev->dev, 2150 "set multicast promisc failed, err %d, aq_err %d\n", 2151 aq_ret, pf->hw.aq.asq_last_status); 2152 } 2153 } 2154 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 2155 vsi->seid, 2156 cur_promisc, NULL); 2157 if (aq_ret) { 2158 retval = i40e_aq_rc_to_posix(aq_ret, 2159 pf->hw.aq.asq_last_status); 2160 dev_info(&pf->pdev->dev, 2161 "set brdcast promisc failed, err %s, aq_err %s\n", 2162 i40e_stat_str(&pf->hw, aq_ret), 2163 i40e_aq_str(&pf->hw, 2164 pf->hw.aq.asq_last_status)); 2165 } 2166 } 2167 out: 2168 /* if something went wrong then set the changed flag so we try again */ 2169 if (retval) 2170 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 2171 2172 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 2173 return retval; 2174 } 2175 2176 /** 2177 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 2178 * @pf: board private structure 2179 **/ 2180 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 2181 { 2182 int v; 2183 2184 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 2185 return; 2186 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 2187 2188 for (v = 0; v < pf->num_alloc_vsi; v++) { 2189 if (pf->vsi[v] && 2190 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { 2191 int ret = i40e_sync_vsi_filters(pf->vsi[v]); 2192 2193 if (ret) { 2194 /* come back and try again later */ 2195 pf->flags |= I40E_FLAG_FILTER_SYNC; 2196 break; 2197 } 2198 } 2199 } 2200 } 2201 2202 /** 2203 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 2204 * @netdev: network interface device structure 2205 * @new_mtu: new value for maximum frame size 2206 * 2207 * Returns 0 on success, negative on failure 2208 **/ 2209 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 2210 { 2211 struct i40e_netdev_priv *np = netdev_priv(netdev); 2212 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 2213 struct i40e_vsi *vsi = np->vsi; 2214 2215 /* MTU < 68 is an error and causes problems on some kernels */ 2216 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) 2217 return -EINVAL; 2218 2219 netdev_info(netdev, "changing MTU from %d to %d\n", 2220 netdev->mtu, new_mtu); 2221 netdev->mtu = new_mtu; 2222 if (netif_running(netdev)) 2223 i40e_vsi_reinit_locked(vsi); 2224 i40e_notify_client_of_l2_param_changes(vsi); 2225 return 0; 2226 } 2227 2228 /** 2229 * i40e_ioctl - Access the hwtstamp interface 2230 * @netdev: network interface device structure 2231 * @ifr: interface request data 2232 * @cmd: ioctl command 2233 **/ 2234 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2235 { 2236 struct i40e_netdev_priv *np = netdev_priv(netdev); 2237 struct i40e_pf *pf = np->vsi->back; 2238 2239 switch (cmd) { 2240 case SIOCGHWTSTAMP: 2241 return i40e_ptp_get_ts_config(pf, ifr); 2242 case SIOCSHWTSTAMP: 2243 return i40e_ptp_set_ts_config(pf, ifr); 2244 default: 2245 return -EOPNOTSUPP; 2246 } 2247 } 2248 2249 /** 2250 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 2251 * @vsi: the vsi being adjusted 2252 **/ 2253 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 2254 { 2255 struct i40e_vsi_context ctxt; 2256 i40e_status ret; 2257 2258 if ((vsi->info.valid_sections & 2259 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2260 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 2261 return; /* already enabled */ 2262 2263 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2264 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2265 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 2266 2267 ctxt.seid = vsi->seid; 2268 ctxt.info = vsi->info; 2269 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2270 if (ret) { 2271 dev_info(&vsi->back->pdev->dev, 2272 "update vlan stripping failed, err %s aq_err %s\n", 2273 i40e_stat_str(&vsi->back->hw, ret), 2274 i40e_aq_str(&vsi->back->hw, 2275 vsi->back->hw.aq.asq_last_status)); 2276 } 2277 } 2278 2279 /** 2280 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 2281 * @vsi: the vsi being adjusted 2282 **/ 2283 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 2284 { 2285 struct i40e_vsi_context ctxt; 2286 i40e_status ret; 2287 2288 if ((vsi->info.valid_sections & 2289 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2290 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 2291 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 2292 return; /* already disabled */ 2293 2294 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2295 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2296 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 2297 2298 ctxt.seid = vsi->seid; 2299 ctxt.info = vsi->info; 2300 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2301 if (ret) { 2302 dev_info(&vsi->back->pdev->dev, 2303 "update vlan stripping failed, err %s aq_err %s\n", 2304 i40e_stat_str(&vsi->back->hw, ret), 2305 i40e_aq_str(&vsi->back->hw, 2306 vsi->back->hw.aq.asq_last_status)); 2307 } 2308 } 2309 2310 /** 2311 * i40e_vlan_rx_register - Setup or shutdown vlan offload 2312 * @netdev: network interface to be adjusted 2313 * @features: netdev features to test if VLAN offload is enabled or not 2314 **/ 2315 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 2316 { 2317 struct i40e_netdev_priv *np = netdev_priv(netdev); 2318 struct i40e_vsi *vsi = np->vsi; 2319 2320 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2321 i40e_vlan_stripping_enable(vsi); 2322 else 2323 i40e_vlan_stripping_disable(vsi); 2324 } 2325 2326 /** 2327 * i40e_vsi_add_vlan - Add vsi membership for given vlan 2328 * @vsi: the vsi being configured 2329 * @vid: vlan id to be added (0 = untagged only , -1 = any) 2330 **/ 2331 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) 2332 { 2333 struct i40e_mac_filter *f, *add_f; 2334 bool is_netdev, is_vf; 2335 2336 is_vf = (vsi->type == I40E_VSI_SRIOV); 2337 is_netdev = !!(vsi->netdev); 2338 2339 /* Locked once because all functions invoked below iterates list*/ 2340 spin_lock_bh(&vsi->mac_filter_list_lock); 2341 2342 if (is_netdev) { 2343 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, 2344 is_vf, is_netdev); 2345 if (!add_f) { 2346 dev_info(&vsi->back->pdev->dev, 2347 "Could not add vlan filter %d for %pM\n", 2348 vid, vsi->netdev->dev_addr); 2349 spin_unlock_bh(&vsi->mac_filter_list_lock); 2350 return -ENOMEM; 2351 } 2352 } 2353 2354 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2355 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2356 if (!add_f) { 2357 dev_info(&vsi->back->pdev->dev, 2358 "Could not add vlan filter %d for %pM\n", 2359 vid, f->macaddr); 2360 spin_unlock_bh(&vsi->mac_filter_list_lock); 2361 return -ENOMEM; 2362 } 2363 } 2364 2365 /* Now if we add a vlan tag, make sure to check if it is the first 2366 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" 2367 * with 0, so we now accept untagged and specified tagged traffic 2368 * (and not any taged and untagged) 2369 */ 2370 if (vid > 0) { 2371 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, 2372 I40E_VLAN_ANY, 2373 is_vf, is_netdev)) { 2374 i40e_del_filter(vsi, vsi->netdev->dev_addr, 2375 I40E_VLAN_ANY, is_vf, is_netdev); 2376 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, 2377 is_vf, is_netdev); 2378 if (!add_f) { 2379 dev_info(&vsi->back->pdev->dev, 2380 "Could not add filter 0 for %pM\n", 2381 vsi->netdev->dev_addr); 2382 spin_unlock_bh(&vsi->mac_filter_list_lock); 2383 return -ENOMEM; 2384 } 2385 } 2386 } 2387 2388 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ 2389 if (vid > 0 && !vsi->info.pvid) { 2390 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2391 if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2392 is_vf, is_netdev)) 2393 continue; 2394 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2395 is_vf, is_netdev); 2396 add_f = i40e_add_filter(vsi, f->macaddr, 2397 0, is_vf, is_netdev); 2398 if (!add_f) { 2399 dev_info(&vsi->back->pdev->dev, 2400 "Could not add filter 0 for %pM\n", 2401 f->macaddr); 2402 spin_unlock_bh(&vsi->mac_filter_list_lock); 2403 return -ENOMEM; 2404 } 2405 } 2406 } 2407 2408 spin_unlock_bh(&vsi->mac_filter_list_lock); 2409 2410 /* schedule our worker thread which will take care of 2411 * applying the new filter changes 2412 */ 2413 i40e_service_event_schedule(vsi->back); 2414 return 0; 2415 } 2416 2417 /** 2418 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 2419 * @vsi: the vsi being configured 2420 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 2421 * 2422 * Return: 0 on success or negative otherwise 2423 **/ 2424 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 2425 { 2426 struct net_device *netdev = vsi->netdev; 2427 struct i40e_mac_filter *f, *add_f; 2428 bool is_vf, is_netdev; 2429 int filter_count = 0; 2430 2431 is_vf = (vsi->type == I40E_VSI_SRIOV); 2432 is_netdev = !!(netdev); 2433 2434 /* Locked once because all functions invoked below iterates list */ 2435 spin_lock_bh(&vsi->mac_filter_list_lock); 2436 2437 if (is_netdev) 2438 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); 2439 2440 list_for_each_entry(f, &vsi->mac_filter_list, list) 2441 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2442 2443 /* go through all the filters for this VSI and if there is only 2444 * vid == 0 it means there are no other filters, so vid 0 must 2445 * be replaced with -1. This signifies that we should from now 2446 * on accept any traffic (with any tag present, or untagged) 2447 */ 2448 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2449 if (is_netdev) { 2450 if (f->vlan && 2451 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2452 filter_count++; 2453 } 2454 2455 if (f->vlan) 2456 filter_count++; 2457 } 2458 2459 if (!filter_count && is_netdev) { 2460 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); 2461 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 2462 is_vf, is_netdev); 2463 if (!f) { 2464 dev_info(&vsi->back->pdev->dev, 2465 "Could not add filter %d for %pM\n", 2466 I40E_VLAN_ANY, netdev->dev_addr); 2467 spin_unlock_bh(&vsi->mac_filter_list_lock); 2468 return -ENOMEM; 2469 } 2470 } 2471 2472 if (!filter_count) { 2473 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2474 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); 2475 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2476 is_vf, is_netdev); 2477 if (!add_f) { 2478 dev_info(&vsi->back->pdev->dev, 2479 "Could not add filter %d for %pM\n", 2480 I40E_VLAN_ANY, f->macaddr); 2481 spin_unlock_bh(&vsi->mac_filter_list_lock); 2482 return -ENOMEM; 2483 } 2484 } 2485 } 2486 2487 spin_unlock_bh(&vsi->mac_filter_list_lock); 2488 2489 /* schedule our worker thread which will take care of 2490 * applying the new filter changes 2491 */ 2492 i40e_service_event_schedule(vsi->back); 2493 return 0; 2494 } 2495 2496 /** 2497 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 2498 * @netdev: network interface to be adjusted 2499 * @vid: vlan id to be added 2500 * 2501 * net_device_ops implementation for adding vlan ids 2502 **/ 2503 #ifdef I40E_FCOE 2504 int i40e_vlan_rx_add_vid(struct net_device *netdev, 2505 __always_unused __be16 proto, u16 vid) 2506 #else 2507 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 2508 __always_unused __be16 proto, u16 vid) 2509 #endif 2510 { 2511 struct i40e_netdev_priv *np = netdev_priv(netdev); 2512 struct i40e_vsi *vsi = np->vsi; 2513 int ret = 0; 2514 2515 if (vid > 4095) 2516 return -EINVAL; 2517 2518 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 2519 2520 /* If the network stack called us with vid = 0 then 2521 * it is asking to receive priority tagged packets with 2522 * vlan id 0. Our HW receives them by default when configured 2523 * to receive untagged packets so there is no need to add an 2524 * extra filter for vlan 0 tagged packets. 2525 */ 2526 if (vid) 2527 ret = i40e_vsi_add_vlan(vsi, vid); 2528 2529 if (!ret && (vid < VLAN_N_VID)) 2530 set_bit(vid, vsi->active_vlans); 2531 2532 return ret; 2533 } 2534 2535 /** 2536 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 2537 * @netdev: network interface to be adjusted 2538 * @vid: vlan id to be removed 2539 * 2540 * net_device_ops implementation for removing vlan ids 2541 **/ 2542 #ifdef I40E_FCOE 2543 int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2544 __always_unused __be16 proto, u16 vid) 2545 #else 2546 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2547 __always_unused __be16 proto, u16 vid) 2548 #endif 2549 { 2550 struct i40e_netdev_priv *np = netdev_priv(netdev); 2551 struct i40e_vsi *vsi = np->vsi; 2552 2553 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); 2554 2555 /* return code is ignored as there is nothing a user 2556 * can do about failure to remove and a log message was 2557 * already printed from the other function 2558 */ 2559 i40e_vsi_kill_vlan(vsi, vid); 2560 2561 clear_bit(vid, vsi->active_vlans); 2562 2563 return 0; 2564 } 2565 2566 /** 2567 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 2568 * @vsi: the vsi being brought back up 2569 **/ 2570 static void i40e_restore_vlan(struct i40e_vsi *vsi) 2571 { 2572 u16 vid; 2573 2574 if (!vsi->netdev) 2575 return; 2576 2577 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2578 2579 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 2580 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 2581 vid); 2582 } 2583 2584 /** 2585 * i40e_vsi_add_pvid - Add pvid for the VSI 2586 * @vsi: the vsi being adjusted 2587 * @vid: the vlan id to set as a PVID 2588 **/ 2589 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2590 { 2591 struct i40e_vsi_context ctxt; 2592 i40e_status ret; 2593 2594 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2595 vsi->info.pvid = cpu_to_le16(vid); 2596 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | 2597 I40E_AQ_VSI_PVLAN_INSERT_PVID | 2598 I40E_AQ_VSI_PVLAN_EMOD_STR; 2599 2600 ctxt.seid = vsi->seid; 2601 ctxt.info = vsi->info; 2602 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2603 if (ret) { 2604 dev_info(&vsi->back->pdev->dev, 2605 "add pvid failed, err %s aq_err %s\n", 2606 i40e_stat_str(&vsi->back->hw, ret), 2607 i40e_aq_str(&vsi->back->hw, 2608 vsi->back->hw.aq.asq_last_status)); 2609 return -ENOENT; 2610 } 2611 2612 return 0; 2613 } 2614 2615 /** 2616 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2617 * @vsi: the vsi being adjusted 2618 * 2619 * Just use the vlan_rx_register() service to put it back to normal 2620 **/ 2621 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2622 { 2623 i40e_vlan_stripping_disable(vsi); 2624 2625 vsi->info.pvid = 0; 2626 } 2627 2628 /** 2629 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2630 * @vsi: ptr to the VSI 2631 * 2632 * If this function returns with an error, then it's possible one or 2633 * more of the rings is populated (while the rest are not). It is the 2634 * callers duty to clean those orphaned rings. 2635 * 2636 * Return 0 on success, negative on failure 2637 **/ 2638 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2639 { 2640 int i, err = 0; 2641 2642 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2643 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2644 2645 return err; 2646 } 2647 2648 /** 2649 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2650 * @vsi: ptr to the VSI 2651 * 2652 * Free VSI's transmit software resources 2653 **/ 2654 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2655 { 2656 int i; 2657 2658 if (!vsi->tx_rings) 2659 return; 2660 2661 for (i = 0; i < vsi->num_queue_pairs; i++) 2662 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2663 i40e_free_tx_resources(vsi->tx_rings[i]); 2664 } 2665 2666 /** 2667 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 2668 * @vsi: ptr to the VSI 2669 * 2670 * If this function returns with an error, then it's possible one or 2671 * more of the rings is populated (while the rest are not). It is the 2672 * callers duty to clean those orphaned rings. 2673 * 2674 * Return 0 on success, negative on failure 2675 **/ 2676 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 2677 { 2678 int i, err = 0; 2679 2680 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2681 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2682 #ifdef I40E_FCOE 2683 i40e_fcoe_setup_ddp_resources(vsi); 2684 #endif 2685 return err; 2686 } 2687 2688 /** 2689 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 2690 * @vsi: ptr to the VSI 2691 * 2692 * Free all receive software resources 2693 **/ 2694 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 2695 { 2696 int i; 2697 2698 if (!vsi->rx_rings) 2699 return; 2700 2701 for (i = 0; i < vsi->num_queue_pairs; i++) 2702 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2703 i40e_free_rx_resources(vsi->rx_rings[i]); 2704 #ifdef I40E_FCOE 2705 i40e_fcoe_free_ddp_resources(vsi); 2706 #endif 2707 } 2708 2709 /** 2710 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring 2711 * @ring: The Tx ring to configure 2712 * 2713 * This enables/disables XPS for a given Tx descriptor ring 2714 * based on the TCs enabled for the VSI that ring belongs to. 2715 **/ 2716 static void i40e_config_xps_tx_ring(struct i40e_ring *ring) 2717 { 2718 struct i40e_vsi *vsi = ring->vsi; 2719 cpumask_var_t mask; 2720 2721 if (!ring->q_vector || !ring->netdev) 2722 return; 2723 2724 /* Single TC mode enable XPS */ 2725 if (vsi->tc_config.numtc <= 1) { 2726 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) 2727 netif_set_xps_queue(ring->netdev, 2728 &ring->q_vector->affinity_mask, 2729 ring->queue_index); 2730 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { 2731 /* Disable XPS to allow selection based on TC */ 2732 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); 2733 netif_set_xps_queue(ring->netdev, mask, ring->queue_index); 2734 free_cpumask_var(mask); 2735 } 2736 2737 /* schedule our worker thread which will take care of 2738 * applying the new filter changes 2739 */ 2740 i40e_service_event_schedule(vsi->back); 2741 } 2742 2743 /** 2744 * i40e_configure_tx_ring - Configure a transmit ring context and rest 2745 * @ring: The Tx ring to configure 2746 * 2747 * Configure the Tx descriptor ring in the HMC context. 2748 **/ 2749 static int i40e_configure_tx_ring(struct i40e_ring *ring) 2750 { 2751 struct i40e_vsi *vsi = ring->vsi; 2752 u16 pf_q = vsi->base_queue + ring->queue_index; 2753 struct i40e_hw *hw = &vsi->back->hw; 2754 struct i40e_hmc_obj_txq tx_ctx; 2755 i40e_status err = 0; 2756 u32 qtx_ctl = 0; 2757 2758 /* some ATR related tx ring init */ 2759 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { 2760 ring->atr_sample_rate = vsi->back->atr_sample_rate; 2761 ring->atr_count = 0; 2762 } else { 2763 ring->atr_sample_rate = 0; 2764 } 2765 2766 /* configure XPS */ 2767 i40e_config_xps_tx_ring(ring); 2768 2769 /* clear the context structure first */ 2770 memset(&tx_ctx, 0, sizeof(tx_ctx)); 2771 2772 tx_ctx.new_context = 1; 2773 tx_ctx.base = (ring->dma / 128); 2774 tx_ctx.qlen = ring->count; 2775 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2776 I40E_FLAG_FD_ATR_ENABLED)); 2777 #ifdef I40E_FCOE 2778 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2779 #endif 2780 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2781 /* FDIR VSI tx ring can still use RS bit and writebacks */ 2782 if (vsi->type != I40E_VSI_FDIR) 2783 tx_ctx.head_wb_ena = 1; 2784 tx_ctx.head_wb_addr = ring->dma + 2785 (ring->count * sizeof(struct i40e_tx_desc)); 2786 2787 /* As part of VSI creation/update, FW allocates certain 2788 * Tx arbitration queue sets for each TC enabled for 2789 * the VSI. The FW returns the handles to these queue 2790 * sets as part of the response buffer to Add VSI, 2791 * Update VSI, etc. AQ commands. It is expected that 2792 * these queue set handles be associated with the Tx 2793 * queues by the driver as part of the TX queue context 2794 * initialization. This has to be done regardless of 2795 * DCB as by default everything is mapped to TC0. 2796 */ 2797 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 2798 tx_ctx.rdylist_act = 0; 2799 2800 /* clear the context in the HMC */ 2801 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 2802 if (err) { 2803 dev_info(&vsi->back->pdev->dev, 2804 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 2805 ring->queue_index, pf_q, err); 2806 return -ENOMEM; 2807 } 2808 2809 /* set the context in the HMC */ 2810 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 2811 if (err) { 2812 dev_info(&vsi->back->pdev->dev, 2813 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 2814 ring->queue_index, pf_q, err); 2815 return -ENOMEM; 2816 } 2817 2818 /* Now associate this queue with this PCI function */ 2819 if (vsi->type == I40E_VSI_VMDQ2) { 2820 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 2821 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & 2822 I40E_QTX_CTL_VFVM_INDX_MASK; 2823 } else { 2824 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2825 } 2826 2827 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2828 I40E_QTX_CTL_PF_INDX_MASK); 2829 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2830 i40e_flush(hw); 2831 2832 /* cache tail off for easier writes later */ 2833 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 2834 2835 return 0; 2836 } 2837 2838 /** 2839 * i40e_configure_rx_ring - Configure a receive ring context 2840 * @ring: The Rx ring to configure 2841 * 2842 * Configure the Rx descriptor ring in the HMC context. 2843 **/ 2844 static int i40e_configure_rx_ring(struct i40e_ring *ring) 2845 { 2846 struct i40e_vsi *vsi = ring->vsi; 2847 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 2848 u16 pf_q = vsi->base_queue + ring->queue_index; 2849 struct i40e_hw *hw = &vsi->back->hw; 2850 struct i40e_hmc_obj_rxq rx_ctx; 2851 i40e_status err = 0; 2852 2853 ring->state = 0; 2854 2855 /* clear the context structure first */ 2856 memset(&rx_ctx, 0, sizeof(rx_ctx)); 2857 2858 ring->rx_buf_len = vsi->rx_buf_len; 2859 2860 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; 2861 2862 rx_ctx.base = (ring->dma / 128); 2863 rx_ctx.qlen = ring->count; 2864 2865 /* use 32 byte descriptors */ 2866 rx_ctx.dsize = 1; 2867 2868 /* descriptor type is always zero 2869 * rx_ctx.dtype = 0; 2870 */ 2871 rx_ctx.hsplit_0 = 0; 2872 2873 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); 2874 if (hw->revision_id == 0) 2875 rx_ctx.lrxqthresh = 0; 2876 else 2877 rx_ctx.lrxqthresh = 2; 2878 rx_ctx.crcstrip = 1; 2879 rx_ctx.l2tsel = 1; 2880 /* this controls whether VLAN is stripped from inner headers */ 2881 rx_ctx.showiv = 0; 2882 #ifdef I40E_FCOE 2883 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2884 #endif 2885 /* set the prefena field to 1 because the manual says to */ 2886 rx_ctx.prefena = 1; 2887 2888 /* clear the context in the HMC */ 2889 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2890 if (err) { 2891 dev_info(&vsi->back->pdev->dev, 2892 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2893 ring->queue_index, pf_q, err); 2894 return -ENOMEM; 2895 } 2896 2897 /* set the context in the HMC */ 2898 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 2899 if (err) { 2900 dev_info(&vsi->back->pdev->dev, 2901 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2902 ring->queue_index, pf_q, err); 2903 return -ENOMEM; 2904 } 2905 2906 /* cache tail for quicker writes, and clear the reg before use */ 2907 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 2908 writel(0, ring->tail); 2909 2910 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 2911 2912 return 0; 2913 } 2914 2915 /** 2916 * i40e_vsi_configure_tx - Configure the VSI for Tx 2917 * @vsi: VSI structure describing this set of rings and resources 2918 * 2919 * Configure the Tx VSI for operation. 2920 **/ 2921 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 2922 { 2923 int err = 0; 2924 u16 i; 2925 2926 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 2927 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 2928 2929 return err; 2930 } 2931 2932 /** 2933 * i40e_vsi_configure_rx - Configure the VSI for Rx 2934 * @vsi: the VSI being configured 2935 * 2936 * Configure the Rx VSI for operation. 2937 **/ 2938 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 2939 { 2940 int err = 0; 2941 u16 i; 2942 2943 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) 2944 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN 2945 + ETH_FCS_LEN + VLAN_HLEN; 2946 else 2947 vsi->max_frame = I40E_RXBUFFER_2048; 2948 2949 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2950 2951 #ifdef I40E_FCOE 2952 /* setup rx buffer for FCoE */ 2953 if ((vsi->type == I40E_VSI_FCOE) && 2954 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { 2955 vsi->rx_buf_len = I40E_RXBUFFER_3072; 2956 vsi->max_frame = I40E_RXBUFFER_3072; 2957 } 2958 2959 #endif /* I40E_FCOE */ 2960 /* round up for the chip's needs */ 2961 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, 2962 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); 2963 2964 /* set up individual rings */ 2965 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2966 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 2967 2968 return err; 2969 } 2970 2971 /** 2972 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 2973 * @vsi: ptr to the VSI 2974 **/ 2975 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 2976 { 2977 struct i40e_ring *tx_ring, *rx_ring; 2978 u16 qoffset, qcount; 2979 int i, n; 2980 2981 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 2982 /* Reset the TC information */ 2983 for (i = 0; i < vsi->num_queue_pairs; i++) { 2984 rx_ring = vsi->rx_rings[i]; 2985 tx_ring = vsi->tx_rings[i]; 2986 rx_ring->dcb_tc = 0; 2987 tx_ring->dcb_tc = 0; 2988 } 2989 } 2990 2991 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2992 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) 2993 continue; 2994 2995 qoffset = vsi->tc_config.tc_info[n].qoffset; 2996 qcount = vsi->tc_config.tc_info[n].qcount; 2997 for (i = qoffset; i < (qoffset + qcount); i++) { 2998 rx_ring = vsi->rx_rings[i]; 2999 tx_ring = vsi->tx_rings[i]; 3000 rx_ring->dcb_tc = n; 3001 tx_ring->dcb_tc = n; 3002 } 3003 } 3004 } 3005 3006 /** 3007 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 3008 * @vsi: ptr to the VSI 3009 **/ 3010 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 3011 { 3012 if (vsi->netdev) 3013 i40e_set_rx_mode(vsi->netdev); 3014 } 3015 3016 /** 3017 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters 3018 * @vsi: Pointer to the targeted VSI 3019 * 3020 * This function replays the hlist on the hw where all the SB Flow Director 3021 * filters were saved. 3022 **/ 3023 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) 3024 { 3025 struct i40e_fdir_filter *filter; 3026 struct i40e_pf *pf = vsi->back; 3027 struct hlist_node *node; 3028 3029 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 3030 return; 3031 3032 hlist_for_each_entry_safe(filter, node, 3033 &pf->fdir_filter_list, fdir_node) { 3034 i40e_add_del_fdir(vsi, filter, true); 3035 } 3036 } 3037 3038 /** 3039 * i40e_vsi_configure - Set up the VSI for action 3040 * @vsi: the VSI being configured 3041 **/ 3042 static int i40e_vsi_configure(struct i40e_vsi *vsi) 3043 { 3044 int err; 3045 3046 i40e_set_vsi_rx_mode(vsi); 3047 i40e_restore_vlan(vsi); 3048 i40e_vsi_config_dcb_rings(vsi); 3049 err = i40e_vsi_configure_tx(vsi); 3050 if (!err) 3051 err = i40e_vsi_configure_rx(vsi); 3052 3053 return err; 3054 } 3055 3056 /** 3057 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 3058 * @vsi: the VSI being configured 3059 **/ 3060 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 3061 { 3062 struct i40e_pf *pf = vsi->back; 3063 struct i40e_hw *hw = &pf->hw; 3064 u16 vector; 3065 int i, q; 3066 u32 qp; 3067 3068 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 3069 * and PFINT_LNKLSTn registers, e.g.: 3070 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 3071 */ 3072 qp = vsi->base_queue; 3073 vector = vsi->base_vector; 3074 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 3075 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; 3076 3077 q_vector->itr_countdown = ITR_COUNTDOWN_START; 3078 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting); 3079 q_vector->rx.latency_range = I40E_LOW_LATENCY; 3080 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 3081 q_vector->rx.itr); 3082 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting); 3083 q_vector->tx.latency_range = I40E_LOW_LATENCY; 3084 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 3085 q_vector->tx.itr); 3086 wr32(hw, I40E_PFINT_RATEN(vector - 1), 3087 INTRL_USEC_TO_REG(vsi->int_rate_limit)); 3088 3089 /* Linked list for the queuepairs assigned to this vector */ 3090 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 3091 for (q = 0; q < q_vector->num_ringpairs; q++) { 3092 u32 val; 3093 3094 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3095 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3096 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 3097 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 3098 (I40E_QUEUE_TYPE_TX 3099 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 3100 3101 wr32(hw, I40E_QINT_RQCTL(qp), val); 3102 3103 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3104 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3105 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 3106 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| 3107 (I40E_QUEUE_TYPE_RX 3108 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3109 3110 /* Terminate the linked list */ 3111 if (q == (q_vector->num_ringpairs - 1)) 3112 val |= (I40E_QUEUE_END_OF_LIST 3113 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3114 3115 wr32(hw, I40E_QINT_TQCTL(qp), val); 3116 qp++; 3117 } 3118 } 3119 3120 i40e_flush(hw); 3121 } 3122 3123 /** 3124 * i40e_enable_misc_int_causes - enable the non-queue interrupts 3125 * @hw: ptr to the hardware info 3126 **/ 3127 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) 3128 { 3129 struct i40e_hw *hw = &pf->hw; 3130 u32 val; 3131 3132 /* clear things first */ 3133 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 3134 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 3135 3136 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 3137 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 3138 I40E_PFINT_ICR0_ENA_GRST_MASK | 3139 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 3140 I40E_PFINT_ICR0_ENA_GPIO_MASK | 3141 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 3142 I40E_PFINT_ICR0_ENA_VFLR_MASK | 3143 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3144 3145 if (pf->flags & I40E_FLAG_IWARP_ENABLED) 3146 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3147 3148 if (pf->flags & I40E_FLAG_PTP) 3149 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3150 3151 wr32(hw, I40E_PFINT_ICR0_ENA, val); 3152 3153 /* SW_ITR_IDX = 0, but don't change INTENA */ 3154 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 3155 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 3156 3157 /* OTHER_ITR_IDX = 0 */ 3158 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 3159 } 3160 3161 /** 3162 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 3163 * @vsi: the VSI being configured 3164 **/ 3165 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 3166 { 3167 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 3168 struct i40e_pf *pf = vsi->back; 3169 struct i40e_hw *hw = &pf->hw; 3170 u32 val; 3171 3172 /* set the ITR configuration */ 3173 q_vector->itr_countdown = ITR_COUNTDOWN_START; 3174 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting); 3175 q_vector->rx.latency_range = I40E_LOW_LATENCY; 3176 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); 3177 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting); 3178 q_vector->tx.latency_range = I40E_LOW_LATENCY; 3179 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 3180 3181 i40e_enable_misc_int_causes(pf); 3182 3183 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 3184 wr32(hw, I40E_PFINT_LNKLST0, 0); 3185 3186 /* Associate the queue pair to the vector and enable the queue int */ 3187 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3188 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3189 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3190 3191 wr32(hw, I40E_QINT_RQCTL(0), val); 3192 3193 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3194 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3195 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3196 3197 wr32(hw, I40E_QINT_TQCTL(0), val); 3198 i40e_flush(hw); 3199 } 3200 3201 /** 3202 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 3203 * @pf: board private structure 3204 **/ 3205 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) 3206 { 3207 struct i40e_hw *hw = &pf->hw; 3208 3209 wr32(hw, I40E_PFINT_DYN_CTL0, 3210 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 3211 i40e_flush(hw); 3212 } 3213 3214 /** 3215 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 3216 * @pf: board private structure 3217 * @clearpba: true when all pending interrupt events should be cleared 3218 **/ 3219 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba) 3220 { 3221 struct i40e_hw *hw = &pf->hw; 3222 u32 val; 3223 3224 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 3225 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) | 3226 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 3227 3228 wr32(hw, I40E_PFINT_DYN_CTL0, val); 3229 i40e_flush(hw); 3230 } 3231 3232 /** 3233 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 3234 * @irq: interrupt number 3235 * @data: pointer to a q_vector 3236 **/ 3237 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 3238 { 3239 struct i40e_q_vector *q_vector = data; 3240 3241 if (!q_vector->tx.ring && !q_vector->rx.ring) 3242 return IRQ_HANDLED; 3243 3244 napi_schedule_irqoff(&q_vector->napi); 3245 3246 return IRQ_HANDLED; 3247 } 3248 3249 /** 3250 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 3251 * @vsi: the VSI being configured 3252 * @basename: name for the vector 3253 * 3254 * Allocates MSI-X vectors and requests interrupts from the kernel. 3255 **/ 3256 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 3257 { 3258 int q_vectors = vsi->num_q_vectors; 3259 struct i40e_pf *pf = vsi->back; 3260 int base = vsi->base_vector; 3261 int rx_int_idx = 0; 3262 int tx_int_idx = 0; 3263 int vector, err; 3264 3265 for (vector = 0; vector < q_vectors; vector++) { 3266 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 3267 3268 if (q_vector->tx.ring && q_vector->rx.ring) { 3269 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3270 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 3271 tx_int_idx++; 3272 } else if (q_vector->rx.ring) { 3273 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3274 "%s-%s-%d", basename, "rx", rx_int_idx++); 3275 } else if (q_vector->tx.ring) { 3276 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3277 "%s-%s-%d", basename, "tx", tx_int_idx++); 3278 } else { 3279 /* skip this unused q_vector */ 3280 continue; 3281 } 3282 err = request_irq(pf->msix_entries[base + vector].vector, 3283 vsi->irq_handler, 3284 0, 3285 q_vector->name, 3286 q_vector); 3287 if (err) { 3288 dev_info(&pf->pdev->dev, 3289 "MSIX request_irq failed, error: %d\n", err); 3290 goto free_queue_irqs; 3291 } 3292 /* assign the mask for this irq */ 3293 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3294 &q_vector->affinity_mask); 3295 } 3296 3297 vsi->irqs_ready = true; 3298 return 0; 3299 3300 free_queue_irqs: 3301 while (vector) { 3302 vector--; 3303 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3304 NULL); 3305 free_irq(pf->msix_entries[base + vector].vector, 3306 &(vsi->q_vectors[vector])); 3307 } 3308 return err; 3309 } 3310 3311 /** 3312 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 3313 * @vsi: the VSI being un-configured 3314 **/ 3315 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 3316 { 3317 struct i40e_pf *pf = vsi->back; 3318 struct i40e_hw *hw = &pf->hw; 3319 int base = vsi->base_vector; 3320 int i; 3321 3322 for (i = 0; i < vsi->num_queue_pairs; i++) { 3323 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); 3324 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); 3325 } 3326 3327 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3328 for (i = vsi->base_vector; 3329 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3330 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 3331 3332 i40e_flush(hw); 3333 for (i = 0; i < vsi->num_q_vectors; i++) 3334 synchronize_irq(pf->msix_entries[i + base].vector); 3335 } else { 3336 /* Legacy and MSI mode - this stops all interrupt handling */ 3337 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 3338 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 3339 i40e_flush(hw); 3340 synchronize_irq(pf->pdev->irq); 3341 } 3342 } 3343 3344 /** 3345 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 3346 * @vsi: the VSI being configured 3347 **/ 3348 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 3349 { 3350 struct i40e_pf *pf = vsi->back; 3351 int i; 3352 3353 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3354 for (i = 0; i < vsi->num_q_vectors; i++) 3355 i40e_irq_dynamic_enable(vsi, i); 3356 } else { 3357 i40e_irq_dynamic_enable_icr0(pf, true); 3358 } 3359 3360 i40e_flush(&pf->hw); 3361 return 0; 3362 } 3363 3364 /** 3365 * i40e_stop_misc_vector - Stop the vector that handles non-queue events 3366 * @pf: board private structure 3367 **/ 3368 static void i40e_stop_misc_vector(struct i40e_pf *pf) 3369 { 3370 /* Disable ICR 0 */ 3371 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 3372 i40e_flush(&pf->hw); 3373 } 3374 3375 /** 3376 * i40e_intr - MSI/Legacy and non-queue interrupt handler 3377 * @irq: interrupt number 3378 * @data: pointer to a q_vector 3379 * 3380 * This is the handler used for all MSI/Legacy interrupts, and deals 3381 * with both queue and non-queue interrupts. This is also used in 3382 * MSIX mode to handle the non-queue interrupts. 3383 **/ 3384 static irqreturn_t i40e_intr(int irq, void *data) 3385 { 3386 struct i40e_pf *pf = (struct i40e_pf *)data; 3387 struct i40e_hw *hw = &pf->hw; 3388 irqreturn_t ret = IRQ_NONE; 3389 u32 icr0, icr0_remaining; 3390 u32 val, ena_mask; 3391 3392 icr0 = rd32(hw, I40E_PFINT_ICR0); 3393 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 3394 3395 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 3396 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 3397 goto enable_intr; 3398 3399 /* if interrupt but no bits showing, must be SWINT */ 3400 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || 3401 (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) 3402 pf->sw_int_count++; 3403 3404 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && 3405 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { 3406 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3407 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3408 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n"); 3409 } 3410 3411 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 3412 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 3413 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 3414 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 3415 3416 /* We do not have a way to disarm Queue causes while leaving 3417 * interrupt enabled for all other causes, ideally 3418 * interrupt should be disabled while we are in NAPI but 3419 * this is not a performance path and napi_schedule() 3420 * can deal with rescheduling. 3421 */ 3422 if (!test_bit(__I40E_DOWN, &pf->state)) 3423 napi_schedule_irqoff(&q_vector->napi); 3424 } 3425 3426 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 3427 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3428 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 3429 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); 3430 } 3431 3432 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 3433 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 3434 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 3435 } 3436 3437 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 3438 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 3439 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 3440 } 3441 3442 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 3443 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 3444 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 3445 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 3446 val = rd32(hw, I40E_GLGEN_RSTAT); 3447 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 3448 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3449 if (val == I40E_RESET_CORER) { 3450 pf->corer_count++; 3451 } else if (val == I40E_RESET_GLOBR) { 3452 pf->globr_count++; 3453 } else if (val == I40E_RESET_EMPR) { 3454 pf->empr_count++; 3455 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); 3456 } 3457 } 3458 3459 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 3460 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 3461 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 3462 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", 3463 rd32(hw, I40E_PFHMC_ERRORINFO), 3464 rd32(hw, I40E_PFHMC_ERRORDATA)); 3465 } 3466 3467 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 3468 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 3469 3470 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 3471 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3472 i40e_ptp_tx_hwtstamp(pf); 3473 } 3474 } 3475 3476 /* If a critical error is pending we have no choice but to reset the 3477 * device. 3478 * Report and mask out any remaining unexpected interrupts. 3479 */ 3480 icr0_remaining = icr0 & ena_mask; 3481 if (icr0_remaining) { 3482 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 3483 icr0_remaining); 3484 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 3485 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 3486 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { 3487 dev_info(&pf->pdev->dev, "device will be reset\n"); 3488 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 3489 i40e_service_event_schedule(pf); 3490 } 3491 ena_mask &= ~icr0_remaining; 3492 } 3493 ret = IRQ_HANDLED; 3494 3495 enable_intr: 3496 /* re-enable interrupt causes */ 3497 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3498 if (!test_bit(__I40E_DOWN, &pf->state)) { 3499 i40e_service_event_schedule(pf); 3500 i40e_irq_dynamic_enable_icr0(pf, false); 3501 } 3502 3503 return ret; 3504 } 3505 3506 /** 3507 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes 3508 * @tx_ring: tx ring to clean 3509 * @budget: how many cleans we're allowed 3510 * 3511 * Returns true if there's any budget left (e.g. the clean is finished) 3512 **/ 3513 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) 3514 { 3515 struct i40e_vsi *vsi = tx_ring->vsi; 3516 u16 i = tx_ring->next_to_clean; 3517 struct i40e_tx_buffer *tx_buf; 3518 struct i40e_tx_desc *tx_desc; 3519 3520 tx_buf = &tx_ring->tx_bi[i]; 3521 tx_desc = I40E_TX_DESC(tx_ring, i); 3522 i -= tx_ring->count; 3523 3524 do { 3525 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 3526 3527 /* if next_to_watch is not set then there is no work pending */ 3528 if (!eop_desc) 3529 break; 3530 3531 /* prevent any other reads prior to eop_desc */ 3532 read_barrier_depends(); 3533 3534 /* if the descriptor isn't done, no work yet to do */ 3535 if (!(eop_desc->cmd_type_offset_bsz & 3536 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 3537 break; 3538 3539 /* clear next_to_watch to prevent false hangs */ 3540 tx_buf->next_to_watch = NULL; 3541 3542 tx_desc->buffer_addr = 0; 3543 tx_desc->cmd_type_offset_bsz = 0; 3544 /* move past filter desc */ 3545 tx_buf++; 3546 tx_desc++; 3547 i++; 3548 if (unlikely(!i)) { 3549 i -= tx_ring->count; 3550 tx_buf = tx_ring->tx_bi; 3551 tx_desc = I40E_TX_DESC(tx_ring, 0); 3552 } 3553 /* unmap skb header data */ 3554 dma_unmap_single(tx_ring->dev, 3555 dma_unmap_addr(tx_buf, dma), 3556 dma_unmap_len(tx_buf, len), 3557 DMA_TO_DEVICE); 3558 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) 3559 kfree(tx_buf->raw_buf); 3560 3561 tx_buf->raw_buf = NULL; 3562 tx_buf->tx_flags = 0; 3563 tx_buf->next_to_watch = NULL; 3564 dma_unmap_len_set(tx_buf, len, 0); 3565 tx_desc->buffer_addr = 0; 3566 tx_desc->cmd_type_offset_bsz = 0; 3567 3568 /* move us past the eop_desc for start of next FD desc */ 3569 tx_buf++; 3570 tx_desc++; 3571 i++; 3572 if (unlikely(!i)) { 3573 i -= tx_ring->count; 3574 tx_buf = tx_ring->tx_bi; 3575 tx_desc = I40E_TX_DESC(tx_ring, 0); 3576 } 3577 3578 /* update budget accounting */ 3579 budget--; 3580 } while (likely(budget)); 3581 3582 i += tx_ring->count; 3583 tx_ring->next_to_clean = i; 3584 3585 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) 3586 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); 3587 3588 return budget > 0; 3589 } 3590 3591 /** 3592 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring 3593 * @irq: interrupt number 3594 * @data: pointer to a q_vector 3595 **/ 3596 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) 3597 { 3598 struct i40e_q_vector *q_vector = data; 3599 struct i40e_vsi *vsi; 3600 3601 if (!q_vector->tx.ring) 3602 return IRQ_HANDLED; 3603 3604 vsi = q_vector->tx.ring->vsi; 3605 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); 3606 3607 return IRQ_HANDLED; 3608 } 3609 3610 /** 3611 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 3612 * @vsi: the VSI being configured 3613 * @v_idx: vector index 3614 * @qp_idx: queue pair index 3615 **/ 3616 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 3617 { 3618 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3619 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 3620 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 3621 3622 tx_ring->q_vector = q_vector; 3623 tx_ring->next = q_vector->tx.ring; 3624 q_vector->tx.ring = tx_ring; 3625 q_vector->tx.count++; 3626 3627 rx_ring->q_vector = q_vector; 3628 rx_ring->next = q_vector->rx.ring; 3629 q_vector->rx.ring = rx_ring; 3630 q_vector->rx.count++; 3631 } 3632 3633 /** 3634 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 3635 * @vsi: the VSI being configured 3636 * 3637 * This function maps descriptor rings to the queue-specific vectors 3638 * we were allotted through the MSI-X enabling code. Ideally, we'd have 3639 * one vector per queue pair, but on a constrained vector budget, we 3640 * group the queue pairs as "efficiently" as possible. 3641 **/ 3642 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 3643 { 3644 int qp_remaining = vsi->num_queue_pairs; 3645 int q_vectors = vsi->num_q_vectors; 3646 int num_ringpairs; 3647 int v_start = 0; 3648 int qp_idx = 0; 3649 3650 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 3651 * group them so there are multiple queues per vector. 3652 * It is also important to go through all the vectors available to be 3653 * sure that if we don't use all the vectors, that the remaining vectors 3654 * are cleared. This is especially important when decreasing the 3655 * number of queues in use. 3656 */ 3657 for (; v_start < q_vectors; v_start++) { 3658 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 3659 3660 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 3661 3662 q_vector->num_ringpairs = num_ringpairs; 3663 3664 q_vector->rx.count = 0; 3665 q_vector->tx.count = 0; 3666 q_vector->rx.ring = NULL; 3667 q_vector->tx.ring = NULL; 3668 3669 while (num_ringpairs--) { 3670 i40e_map_vector_to_qp(vsi, v_start, qp_idx); 3671 qp_idx++; 3672 qp_remaining--; 3673 } 3674 } 3675 } 3676 3677 /** 3678 * i40e_vsi_request_irq - Request IRQ from the OS 3679 * @vsi: the VSI being configured 3680 * @basename: name for the vector 3681 **/ 3682 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 3683 { 3684 struct i40e_pf *pf = vsi->back; 3685 int err; 3686 3687 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 3688 err = i40e_vsi_request_irq_msix(vsi, basename); 3689 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3690 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3691 pf->int_name, pf); 3692 else 3693 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3694 pf->int_name, pf); 3695 3696 if (err) 3697 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3698 3699 return err; 3700 } 3701 3702 #ifdef CONFIG_NET_POLL_CONTROLLER 3703 /** 3704 * i40e_netpoll - A Polling 'interrupt' handler 3705 * @netdev: network interface device structure 3706 * 3707 * This is used by netconsole to send skbs without having to re-enable 3708 * interrupts. It's not called while the normal interrupt routine is executing. 3709 **/ 3710 #ifdef I40E_FCOE 3711 void i40e_netpoll(struct net_device *netdev) 3712 #else 3713 static void i40e_netpoll(struct net_device *netdev) 3714 #endif 3715 { 3716 struct i40e_netdev_priv *np = netdev_priv(netdev); 3717 struct i40e_vsi *vsi = np->vsi; 3718 struct i40e_pf *pf = vsi->back; 3719 int i; 3720 3721 /* if interface is down do nothing */ 3722 if (test_bit(__I40E_DOWN, &vsi->state)) 3723 return; 3724 3725 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3726 for (i = 0; i < vsi->num_q_vectors; i++) 3727 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 3728 } else { 3729 i40e_intr(pf->pdev->irq, netdev); 3730 } 3731 } 3732 #endif 3733 3734 /** 3735 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled 3736 * @pf: the PF being configured 3737 * @pf_q: the PF queue 3738 * @enable: enable or disable state of the queue 3739 * 3740 * This routine will wait for the given Tx queue of the PF to reach the 3741 * enabled or disabled state. 3742 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3743 * multiple retries; else will return 0 in case of success. 3744 **/ 3745 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3746 { 3747 int i; 3748 u32 tx_reg; 3749 3750 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3751 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); 3752 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3753 break; 3754 3755 usleep_range(10, 20); 3756 } 3757 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3758 return -ETIMEDOUT; 3759 3760 return 0; 3761 } 3762 3763 /** 3764 * i40e_vsi_control_tx - Start or stop a VSI's rings 3765 * @vsi: the VSI being configured 3766 * @enable: start or stop the rings 3767 **/ 3768 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 3769 { 3770 struct i40e_pf *pf = vsi->back; 3771 struct i40e_hw *hw = &pf->hw; 3772 int i, j, pf_q, ret = 0; 3773 u32 tx_reg; 3774 3775 pf_q = vsi->base_queue; 3776 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3777 3778 /* warn the TX unit of coming changes */ 3779 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); 3780 if (!enable) 3781 usleep_range(10, 20); 3782 3783 for (j = 0; j < 50; j++) { 3784 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3785 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 3786 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) 3787 break; 3788 usleep_range(1000, 2000); 3789 } 3790 /* Skip if the queue is already in the requested state */ 3791 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3792 continue; 3793 3794 /* turn on/off the queue */ 3795 if (enable) { 3796 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 3797 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; 3798 } else { 3799 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3800 } 3801 3802 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 3803 /* No waiting for the Tx queue to disable */ 3804 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) 3805 continue; 3806 3807 /* wait for the change to finish */ 3808 ret = i40e_pf_txq_wait(pf, pf_q, enable); 3809 if (ret) { 3810 dev_info(&pf->pdev->dev, 3811 "VSI seid %d Tx ring %d %sable timeout\n", 3812 vsi->seid, pf_q, (enable ? "en" : "dis")); 3813 break; 3814 } 3815 } 3816 3817 if (hw->revision_id == 0) 3818 mdelay(50); 3819 return ret; 3820 } 3821 3822 /** 3823 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 3824 * @pf: the PF being configured 3825 * @pf_q: the PF queue 3826 * @enable: enable or disable state of the queue 3827 * 3828 * This routine will wait for the given Rx queue of the PF to reach the 3829 * enabled or disabled state. 3830 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3831 * multiple retries; else will return 0 in case of success. 3832 **/ 3833 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3834 { 3835 int i; 3836 u32 rx_reg; 3837 3838 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3839 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); 3840 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3841 break; 3842 3843 usleep_range(10, 20); 3844 } 3845 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3846 return -ETIMEDOUT; 3847 3848 return 0; 3849 } 3850 3851 /** 3852 * i40e_vsi_control_rx - Start or stop a VSI's rings 3853 * @vsi: the VSI being configured 3854 * @enable: start or stop the rings 3855 **/ 3856 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 3857 { 3858 struct i40e_pf *pf = vsi->back; 3859 struct i40e_hw *hw = &pf->hw; 3860 int i, j, pf_q, ret = 0; 3861 u32 rx_reg; 3862 3863 pf_q = vsi->base_queue; 3864 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3865 for (j = 0; j < 50; j++) { 3866 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3867 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == 3868 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) 3869 break; 3870 usleep_range(1000, 2000); 3871 } 3872 3873 /* Skip if the queue is already in the requested state */ 3874 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3875 continue; 3876 3877 /* turn on/off the queue */ 3878 if (enable) 3879 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; 3880 else 3881 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 3882 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3883 /* No waiting for the Tx queue to disable */ 3884 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) 3885 continue; 3886 3887 /* wait for the change to finish */ 3888 ret = i40e_pf_rxq_wait(pf, pf_q, enable); 3889 if (ret) { 3890 dev_info(&pf->pdev->dev, 3891 "VSI seid %d Rx ring %d %sable timeout\n", 3892 vsi->seid, pf_q, (enable ? "en" : "dis")); 3893 break; 3894 } 3895 } 3896 3897 return ret; 3898 } 3899 3900 /** 3901 * i40e_vsi_control_rings - Start or stop a VSI's rings 3902 * @vsi: the VSI being configured 3903 * @enable: start or stop the rings 3904 **/ 3905 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) 3906 { 3907 int ret = 0; 3908 3909 /* do rx first for enable and last for disable */ 3910 if (request) { 3911 ret = i40e_vsi_control_rx(vsi, request); 3912 if (ret) 3913 return ret; 3914 ret = i40e_vsi_control_tx(vsi, request); 3915 } else { 3916 /* Ignore return value, we need to shutdown whatever we can */ 3917 i40e_vsi_control_tx(vsi, request); 3918 i40e_vsi_control_rx(vsi, request); 3919 } 3920 3921 return ret; 3922 } 3923 3924 /** 3925 * i40e_vsi_free_irq - Free the irq association with the OS 3926 * @vsi: the VSI being configured 3927 **/ 3928 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 3929 { 3930 struct i40e_pf *pf = vsi->back; 3931 struct i40e_hw *hw = &pf->hw; 3932 int base = vsi->base_vector; 3933 u32 val, qp; 3934 int i; 3935 3936 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3937 if (!vsi->q_vectors) 3938 return; 3939 3940 if (!vsi->irqs_ready) 3941 return; 3942 3943 vsi->irqs_ready = false; 3944 for (i = 0; i < vsi->num_q_vectors; i++) { 3945 u16 vector = i + base; 3946 3947 /* free only the irqs that were actually requested */ 3948 if (!vsi->q_vectors[i] || 3949 !vsi->q_vectors[i]->num_ringpairs) 3950 continue; 3951 3952 /* clear the affinity_mask in the IRQ descriptor */ 3953 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3954 NULL); 3955 free_irq(pf->msix_entries[vector].vector, 3956 vsi->q_vectors[i]); 3957 3958 /* Tear down the interrupt queue link list 3959 * 3960 * We know that they come in pairs and always 3961 * the Rx first, then the Tx. To clear the 3962 * link list, stick the EOL value into the 3963 * next_q field of the registers. 3964 */ 3965 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 3966 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3967 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3968 val |= I40E_QUEUE_END_OF_LIST 3969 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3970 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 3971 3972 while (qp != I40E_QUEUE_END_OF_LIST) { 3973 u32 next; 3974 3975 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3976 3977 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3978 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3979 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3980 I40E_QINT_RQCTL_INTEVENT_MASK); 3981 3982 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3983 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3984 3985 wr32(hw, I40E_QINT_RQCTL(qp), val); 3986 3987 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3988 3989 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 3990 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 3991 3992 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3993 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3994 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3995 I40E_QINT_TQCTL_INTEVENT_MASK); 3996 3997 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3998 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3999 4000 wr32(hw, I40E_QINT_TQCTL(qp), val); 4001 qp = next; 4002 } 4003 } 4004 } else { 4005 free_irq(pf->pdev->irq, pf); 4006 4007 val = rd32(hw, I40E_PFINT_LNKLST0); 4008 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 4009 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4010 val |= I40E_QUEUE_END_OF_LIST 4011 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 4012 wr32(hw, I40E_PFINT_LNKLST0, val); 4013 4014 val = rd32(hw, I40E_QINT_RQCTL(qp)); 4015 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 4016 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 4017 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 4018 I40E_QINT_RQCTL_INTEVENT_MASK); 4019 4020 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 4021 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 4022 4023 wr32(hw, I40E_QINT_RQCTL(qp), val); 4024 4025 val = rd32(hw, I40E_QINT_TQCTL(qp)); 4026 4027 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 4028 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 4029 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 4030 I40E_QINT_TQCTL_INTEVENT_MASK); 4031 4032 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 4033 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 4034 4035 wr32(hw, I40E_QINT_TQCTL(qp), val); 4036 } 4037 } 4038 4039 /** 4040 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 4041 * @vsi: the VSI being configured 4042 * @v_idx: Index of vector to be freed 4043 * 4044 * This function frees the memory allocated to the q_vector. In addition if 4045 * NAPI is enabled it will delete any references to the NAPI struct prior 4046 * to freeing the q_vector. 4047 **/ 4048 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 4049 { 4050 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 4051 struct i40e_ring *ring; 4052 4053 if (!q_vector) 4054 return; 4055 4056 /* disassociate q_vector from rings */ 4057 i40e_for_each_ring(ring, q_vector->tx) 4058 ring->q_vector = NULL; 4059 4060 i40e_for_each_ring(ring, q_vector->rx) 4061 ring->q_vector = NULL; 4062 4063 /* only VSI w/ an associated netdev is set up w/ NAPI */ 4064 if (vsi->netdev) 4065 netif_napi_del(&q_vector->napi); 4066 4067 vsi->q_vectors[v_idx] = NULL; 4068 4069 kfree_rcu(q_vector, rcu); 4070 } 4071 4072 /** 4073 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 4074 * @vsi: the VSI being un-configured 4075 * 4076 * This frees the memory allocated to the q_vectors and 4077 * deletes references to the NAPI struct. 4078 **/ 4079 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 4080 { 4081 int v_idx; 4082 4083 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 4084 i40e_free_q_vector(vsi, v_idx); 4085 } 4086 4087 /** 4088 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 4089 * @pf: board private structure 4090 **/ 4091 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 4092 { 4093 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 4094 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4095 pci_disable_msix(pf->pdev); 4096 kfree(pf->msix_entries); 4097 pf->msix_entries = NULL; 4098 kfree(pf->irq_pile); 4099 pf->irq_pile = NULL; 4100 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 4101 pci_disable_msi(pf->pdev); 4102 } 4103 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 4104 } 4105 4106 /** 4107 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 4108 * @pf: board private structure 4109 * 4110 * We go through and clear interrupt specific resources and reset the structure 4111 * to pre-load conditions 4112 **/ 4113 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 4114 { 4115 int i; 4116 4117 i40e_stop_misc_vector(pf); 4118 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { 4119 synchronize_irq(pf->msix_entries[0].vector); 4120 free_irq(pf->msix_entries[0].vector, pf); 4121 } 4122 4123 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, 4124 I40E_IWARP_IRQ_PILE_ID); 4125 4126 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 4127 for (i = 0; i < pf->num_alloc_vsi; i++) 4128 if (pf->vsi[i]) 4129 i40e_vsi_free_q_vectors(pf->vsi[i]); 4130 i40e_reset_interrupt_capability(pf); 4131 } 4132 4133 /** 4134 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 4135 * @vsi: the VSI being configured 4136 **/ 4137 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 4138 { 4139 int q_idx; 4140 4141 if (!vsi->netdev) 4142 return; 4143 4144 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4145 napi_enable(&vsi->q_vectors[q_idx]->napi); 4146 } 4147 4148 /** 4149 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 4150 * @vsi: the VSI being configured 4151 **/ 4152 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 4153 { 4154 int q_idx; 4155 4156 if (!vsi->netdev) 4157 return; 4158 4159 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4160 napi_disable(&vsi->q_vectors[q_idx]->napi); 4161 } 4162 4163 /** 4164 * i40e_vsi_close - Shut down a VSI 4165 * @vsi: the vsi to be quelled 4166 **/ 4167 static void i40e_vsi_close(struct i40e_vsi *vsi) 4168 { 4169 bool reset = false; 4170 4171 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 4172 i40e_down(vsi); 4173 i40e_vsi_free_irq(vsi); 4174 i40e_vsi_free_tx_resources(vsi); 4175 i40e_vsi_free_rx_resources(vsi); 4176 vsi->current_netdev_flags = 0; 4177 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 4178 reset = true; 4179 i40e_notify_client_of_netdev_close(vsi, reset); 4180 } 4181 4182 /** 4183 * i40e_quiesce_vsi - Pause a given VSI 4184 * @vsi: the VSI being paused 4185 **/ 4186 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 4187 { 4188 if (test_bit(__I40E_DOWN, &vsi->state)) 4189 return; 4190 4191 /* No need to disable FCoE VSI when Tx suspended */ 4192 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && 4193 vsi->type == I40E_VSI_FCOE) { 4194 dev_dbg(&vsi->back->pdev->dev, 4195 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid); 4196 return; 4197 } 4198 4199 set_bit(__I40E_NEEDS_RESTART, &vsi->state); 4200 if (vsi->netdev && netif_running(vsi->netdev)) 4201 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 4202 else 4203 i40e_vsi_close(vsi); 4204 } 4205 4206 /** 4207 * i40e_unquiesce_vsi - Resume a given VSI 4208 * @vsi: the VSI being resumed 4209 **/ 4210 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 4211 { 4212 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) 4213 return; 4214 4215 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 4216 if (vsi->netdev && netif_running(vsi->netdev)) 4217 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 4218 else 4219 i40e_vsi_open(vsi); /* this clears the DOWN bit */ 4220 } 4221 4222 /** 4223 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 4224 * @pf: the PF 4225 **/ 4226 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 4227 { 4228 int v; 4229 4230 for (v = 0; v < pf->num_alloc_vsi; v++) { 4231 if (pf->vsi[v]) 4232 i40e_quiesce_vsi(pf->vsi[v]); 4233 } 4234 } 4235 4236 /** 4237 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 4238 * @pf: the PF 4239 **/ 4240 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 4241 { 4242 int v; 4243 4244 for (v = 0; v < pf->num_alloc_vsi; v++) { 4245 if (pf->vsi[v]) 4246 i40e_unquiesce_vsi(pf->vsi[v]); 4247 } 4248 } 4249 4250 #ifdef CONFIG_I40E_DCB 4251 /** 4252 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled 4253 * @vsi: the VSI being configured 4254 * 4255 * This function waits for the given VSI's queues to be disabled. 4256 **/ 4257 static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) 4258 { 4259 struct i40e_pf *pf = vsi->back; 4260 int i, pf_q, ret; 4261 4262 pf_q = vsi->base_queue; 4263 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4264 /* Check and wait for the disable status of the queue */ 4265 ret = i40e_pf_txq_wait(pf, pf_q, false); 4266 if (ret) { 4267 dev_info(&pf->pdev->dev, 4268 "VSI seid %d Tx ring %d disable timeout\n", 4269 vsi->seid, pf_q); 4270 return ret; 4271 } 4272 } 4273 4274 pf_q = vsi->base_queue; 4275 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4276 /* Check and wait for the disable status of the queue */ 4277 ret = i40e_pf_rxq_wait(pf, pf_q, false); 4278 if (ret) { 4279 dev_info(&pf->pdev->dev, 4280 "VSI seid %d Rx ring %d disable timeout\n", 4281 vsi->seid, pf_q); 4282 return ret; 4283 } 4284 } 4285 4286 return 0; 4287 } 4288 4289 /** 4290 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled 4291 * @pf: the PF 4292 * 4293 * This function waits for the queues to be in disabled state for all the 4294 * VSIs that are managed by this PF. 4295 **/ 4296 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) 4297 { 4298 int v, ret = 0; 4299 4300 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4301 /* No need to wait for FCoE VSI queues */ 4302 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { 4303 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); 4304 if (ret) 4305 break; 4306 } 4307 } 4308 4309 return ret; 4310 } 4311 4312 #endif 4313 4314 /** 4315 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue 4316 * @q_idx: TX queue number 4317 * @vsi: Pointer to VSI struct 4318 * 4319 * This function checks specified queue for given VSI. Detects hung condition. 4320 * Sets hung bit since it is two step process. Before next run of service task 4321 * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not, 4322 * hung condition remain unchanged and during subsequent run, this function 4323 * issues SW interrupt to recover from hung condition. 4324 **/ 4325 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) 4326 { 4327 struct i40e_ring *tx_ring = NULL; 4328 struct i40e_pf *pf; 4329 u32 head, val, tx_pending_hw; 4330 int i; 4331 4332 pf = vsi->back; 4333 4334 /* now that we have an index, find the tx_ring struct */ 4335 for (i = 0; i < vsi->num_queue_pairs; i++) { 4336 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { 4337 if (q_idx == vsi->tx_rings[i]->queue_index) { 4338 tx_ring = vsi->tx_rings[i]; 4339 break; 4340 } 4341 } 4342 } 4343 4344 if (!tx_ring) 4345 return; 4346 4347 /* Read interrupt register */ 4348 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4349 val = rd32(&pf->hw, 4350 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + 4351 tx_ring->vsi->base_vector - 1)); 4352 else 4353 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); 4354 4355 head = i40e_get_head(tx_ring); 4356 4357 tx_pending_hw = i40e_get_tx_pending(tx_ring, false); 4358 4359 /* HW is done executing descriptors, updated HEAD write back, 4360 * but SW hasn't processed those descriptors. If interrupt is 4361 * not generated from this point ON, it could result into 4362 * dev_watchdog detecting timeout on those netdev_queue, 4363 * hence proactively trigger SW interrupt. 4364 */ 4365 if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { 4366 /* NAPI Poll didn't run and clear since it was set */ 4367 if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, 4368 &tx_ring->q_vector->hung_detected)) { 4369 netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", 4370 vsi->seid, q_idx, tx_pending_hw, 4371 tx_ring->next_to_clean, head, 4372 tx_ring->next_to_use, 4373 readl(tx_ring->tail)); 4374 netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n", 4375 vsi->seid, q_idx, val); 4376 i40e_force_wb(vsi, tx_ring->q_vector); 4377 } else { 4378 /* First Chance - detected possible hung */ 4379 set_bit(I40E_Q_VECTOR_HUNG_DETECT, 4380 &tx_ring->q_vector->hung_detected); 4381 } 4382 } 4383 4384 /* This is the case where we have interrupts missing, 4385 * so the tx_pending in HW will most likely be 0, but we 4386 * will have tx_pending in SW since the WB happened but the 4387 * interrupt got lost. 4388 */ 4389 if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) && 4390 (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { 4391 if (napi_reschedule(&tx_ring->q_vector->napi)) 4392 tx_ring->tx_stats.tx_lost_interrupt++; 4393 } 4394 } 4395 4396 /** 4397 * i40e_detect_recover_hung - Function to detect and recover hung_queues 4398 * @pf: pointer to PF struct 4399 * 4400 * LAN VSI has netdev and netdev has TX queues. This function is to check 4401 * each of those TX queues if they are hung, trigger recovery by issuing 4402 * SW interrupt. 4403 **/ 4404 static void i40e_detect_recover_hung(struct i40e_pf *pf) 4405 { 4406 struct net_device *netdev; 4407 struct i40e_vsi *vsi; 4408 int i; 4409 4410 /* Only for LAN VSI */ 4411 vsi = pf->vsi[pf->lan_vsi]; 4412 4413 if (!vsi) 4414 return; 4415 4416 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ 4417 if (test_bit(__I40E_DOWN, &vsi->back->state) || 4418 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 4419 return; 4420 4421 /* Make sure type is MAIN VSI */ 4422 if (vsi->type != I40E_VSI_MAIN) 4423 return; 4424 4425 netdev = vsi->netdev; 4426 if (!netdev) 4427 return; 4428 4429 /* Bail out if netif_carrier is not OK */ 4430 if (!netif_carrier_ok(netdev)) 4431 return; 4432 4433 /* Go thru' TX queues for netdev */ 4434 for (i = 0; i < netdev->num_tx_queues; i++) { 4435 struct netdev_queue *q; 4436 4437 q = netdev_get_tx_queue(netdev, i); 4438 if (q) 4439 i40e_detect_recover_hung_queue(i, vsi); 4440 } 4441 } 4442 4443 /** 4444 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP 4445 * @pf: pointer to PF 4446 * 4447 * Get TC map for ISCSI PF type that will include iSCSI TC 4448 * and LAN TC. 4449 **/ 4450 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) 4451 { 4452 struct i40e_dcb_app_priority_table app; 4453 struct i40e_hw *hw = &pf->hw; 4454 u8 enabled_tc = 1; /* TC0 is always enabled */ 4455 u8 tc, i; 4456 /* Get the iSCSI APP TLV */ 4457 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4458 4459 for (i = 0; i < dcbcfg->numapps; i++) { 4460 app = dcbcfg->app[i]; 4461 if (app.selector == I40E_APP_SEL_TCPIP && 4462 app.protocolid == I40E_APP_PROTOID_ISCSI) { 4463 tc = dcbcfg->etscfg.prioritytable[app.priority]; 4464 enabled_tc |= BIT(tc); 4465 break; 4466 } 4467 } 4468 4469 return enabled_tc; 4470 } 4471 4472 /** 4473 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 4474 * @dcbcfg: the corresponding DCBx configuration structure 4475 * 4476 * Return the number of TCs from given DCBx configuration 4477 **/ 4478 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 4479 { 4480 u8 num_tc = 0; 4481 int i; 4482 4483 /* Scan the ETS Config Priority Table to find 4484 * traffic class enabled for a given priority 4485 * and use the traffic class index to get the 4486 * number of traffic classes enabled 4487 */ 4488 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4489 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 4490 num_tc = dcbcfg->etscfg.prioritytable[i]; 4491 } 4492 4493 /* Traffic class index starts from zero so 4494 * increment to return the actual count 4495 */ 4496 return num_tc + 1; 4497 } 4498 4499 /** 4500 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 4501 * @dcbcfg: the corresponding DCBx configuration structure 4502 * 4503 * Query the current DCB configuration and return the number of 4504 * traffic classes enabled from the given DCBX config 4505 **/ 4506 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 4507 { 4508 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 4509 u8 enabled_tc = 1; 4510 u8 i; 4511 4512 for (i = 0; i < num_tc; i++) 4513 enabled_tc |= BIT(i); 4514 4515 return enabled_tc; 4516 } 4517 4518 /** 4519 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 4520 * @pf: PF being queried 4521 * 4522 * Return number of traffic classes enabled for the given PF 4523 **/ 4524 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 4525 { 4526 struct i40e_hw *hw = &pf->hw; 4527 u8 i, enabled_tc; 4528 u8 num_tc = 0; 4529 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4530 4531 /* If DCB is not enabled then always in single TC */ 4532 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4533 return 1; 4534 4535 /* SFP mode will be enabled for all TCs on port */ 4536 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4537 return i40e_dcb_get_num_tc(dcbcfg); 4538 4539 /* MFP mode return count of enabled TCs for this PF */ 4540 if (pf->hw.func_caps.iscsi) 4541 enabled_tc = i40e_get_iscsi_tc_map(pf); 4542 else 4543 return 1; /* Only TC0 */ 4544 4545 /* At least have TC0 */ 4546 enabled_tc = (enabled_tc ? enabled_tc : 0x1); 4547 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4548 if (enabled_tc & BIT(i)) 4549 num_tc++; 4550 } 4551 return num_tc; 4552 } 4553 4554 /** 4555 * i40e_pf_get_default_tc - Get bitmap for first enabled TC 4556 * @pf: PF being queried 4557 * 4558 * Return a bitmap for first enabled traffic class for this PF. 4559 **/ 4560 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) 4561 { 4562 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; 4563 u8 i = 0; 4564 4565 if (!enabled_tc) 4566 return 0x1; /* TC0 */ 4567 4568 /* Find the first enabled TC */ 4569 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4570 if (enabled_tc & BIT(i)) 4571 break; 4572 } 4573 4574 return BIT(i); 4575 } 4576 4577 /** 4578 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 4579 * @pf: PF being queried 4580 * 4581 * Return a bitmap for enabled traffic classes for this PF. 4582 **/ 4583 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 4584 { 4585 /* If DCB is not enabled for this PF then just return default TC */ 4586 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4587 return i40e_pf_get_default_tc(pf); 4588 4589 /* SFP mode we want PF to be enabled for all TCs */ 4590 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4591 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 4592 4593 /* MFP enabled and iSCSI PF type */ 4594 if (pf->hw.func_caps.iscsi) 4595 return i40e_get_iscsi_tc_map(pf); 4596 else 4597 return i40e_pf_get_default_tc(pf); 4598 } 4599 4600 /** 4601 * i40e_vsi_get_bw_info - Query VSI BW Information 4602 * @vsi: the VSI being queried 4603 * 4604 * Returns 0 on success, negative value on failure 4605 **/ 4606 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 4607 { 4608 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 4609 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 4610 struct i40e_pf *pf = vsi->back; 4611 struct i40e_hw *hw = &pf->hw; 4612 i40e_status ret; 4613 u32 tc_bw_max; 4614 int i; 4615 4616 /* Get the VSI level BW configuration */ 4617 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 4618 if (ret) { 4619 dev_info(&pf->pdev->dev, 4620 "couldn't get PF vsi bw config, err %s aq_err %s\n", 4621 i40e_stat_str(&pf->hw, ret), 4622 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4623 return -EINVAL; 4624 } 4625 4626 /* Get the VSI level BW configuration per TC */ 4627 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 4628 NULL); 4629 if (ret) { 4630 dev_info(&pf->pdev->dev, 4631 "couldn't get PF vsi ets bw config, err %s aq_err %s\n", 4632 i40e_stat_str(&pf->hw, ret), 4633 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4634 return -EINVAL; 4635 } 4636 4637 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 4638 dev_info(&pf->pdev->dev, 4639 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 4640 bw_config.tc_valid_bits, 4641 bw_ets_config.tc_valid_bits); 4642 /* Still continuing */ 4643 } 4644 4645 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 4646 vsi->bw_max_quanta = bw_config.max_bw; 4647 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 4648 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 4649 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4650 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 4651 vsi->bw_ets_limit_credits[i] = 4652 le16_to_cpu(bw_ets_config.credits[i]); 4653 /* 3 bits out of 4 for each TC */ 4654 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 4655 } 4656 4657 return 0; 4658 } 4659 4660 /** 4661 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 4662 * @vsi: the VSI being configured 4663 * @enabled_tc: TC bitmap 4664 * @bw_credits: BW shared credits per TC 4665 * 4666 * Returns 0 on success, negative value on failure 4667 **/ 4668 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 4669 u8 *bw_share) 4670 { 4671 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 4672 i40e_status ret; 4673 int i; 4674 4675 bw_data.tc_valid_bits = enabled_tc; 4676 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4677 bw_data.tc_bw_credits[i] = bw_share[i]; 4678 4679 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 4680 NULL); 4681 if (ret) { 4682 dev_info(&vsi->back->pdev->dev, 4683 "AQ command Config VSI BW allocation per TC failed = %d\n", 4684 vsi->back->hw.aq.asq_last_status); 4685 return -EINVAL; 4686 } 4687 4688 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4689 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 4690 4691 return 0; 4692 } 4693 4694 /** 4695 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 4696 * @vsi: the VSI being configured 4697 * @enabled_tc: TC map to be enabled 4698 * 4699 **/ 4700 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4701 { 4702 struct net_device *netdev = vsi->netdev; 4703 struct i40e_pf *pf = vsi->back; 4704 struct i40e_hw *hw = &pf->hw; 4705 u8 netdev_tc = 0; 4706 int i; 4707 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4708 4709 if (!netdev) 4710 return; 4711 4712 if (!enabled_tc) { 4713 netdev_reset_tc(netdev); 4714 return; 4715 } 4716 4717 /* Set up actual enabled TCs on the VSI */ 4718 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 4719 return; 4720 4721 /* set per TC queues for the VSI */ 4722 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4723 /* Only set TC queues for enabled tcs 4724 * 4725 * e.g. For a VSI that has TC0 and TC3 enabled the 4726 * enabled_tc bitmap would be 0x00001001; the driver 4727 * will set the numtc for netdev as 2 that will be 4728 * referenced by the netdev layer as TC 0 and 1. 4729 */ 4730 if (vsi->tc_config.enabled_tc & BIT(i)) 4731 netdev_set_tc_queue(netdev, 4732 vsi->tc_config.tc_info[i].netdev_tc, 4733 vsi->tc_config.tc_info[i].qcount, 4734 vsi->tc_config.tc_info[i].qoffset); 4735 } 4736 4737 /* Assign UP2TC map for the VSI */ 4738 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4739 /* Get the actual TC# for the UP */ 4740 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 4741 /* Get the mapped netdev TC# for the UP */ 4742 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 4743 netdev_set_prio_tc_map(netdev, i, netdev_tc); 4744 } 4745 } 4746 4747 /** 4748 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 4749 * @vsi: the VSI being configured 4750 * @ctxt: the ctxt buffer returned from AQ VSI update param command 4751 **/ 4752 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 4753 struct i40e_vsi_context *ctxt) 4754 { 4755 /* copy just the sections touched not the entire info 4756 * since not all sections are valid as returned by 4757 * update vsi params 4758 */ 4759 vsi->info.mapping_flags = ctxt->info.mapping_flags; 4760 memcpy(&vsi->info.queue_mapping, 4761 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 4762 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 4763 sizeof(vsi->info.tc_mapping)); 4764 } 4765 4766 /** 4767 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 4768 * @vsi: VSI to be configured 4769 * @enabled_tc: TC bitmap 4770 * 4771 * This configures a particular VSI for TCs that are mapped to the 4772 * given TC bitmap. It uses default bandwidth share for TCs across 4773 * VSIs to configure TC for a particular VSI. 4774 * 4775 * NOTE: 4776 * It is expected that the VSI queues have been quisced before calling 4777 * this function. 4778 **/ 4779 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4780 { 4781 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 4782 struct i40e_vsi_context ctxt; 4783 int ret = 0; 4784 int i; 4785 4786 /* Check if enabled_tc is same as existing or new TCs */ 4787 if (vsi->tc_config.enabled_tc == enabled_tc) 4788 return ret; 4789 4790 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 4791 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4792 if (enabled_tc & BIT(i)) 4793 bw_share[i] = 1; 4794 } 4795 4796 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 4797 if (ret) { 4798 dev_info(&vsi->back->pdev->dev, 4799 "Failed configuring TC map %d for VSI %d\n", 4800 enabled_tc, vsi->seid); 4801 goto out; 4802 } 4803 4804 /* Update Queue Pairs Mapping for currently enabled UPs */ 4805 ctxt.seid = vsi->seid; 4806 ctxt.pf_num = vsi->back->hw.pf_id; 4807 ctxt.vf_num = 0; 4808 ctxt.uplink_seid = vsi->uplink_seid; 4809 ctxt.info = vsi->info; 4810 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 4811 4812 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { 4813 ctxt.info.valid_sections |= 4814 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); 4815 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; 4816 } 4817 4818 /* Update the VSI after updating the VSI queue-mapping information */ 4819 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 4820 if (ret) { 4821 dev_info(&vsi->back->pdev->dev, 4822 "Update vsi tc config failed, err %s aq_err %s\n", 4823 i40e_stat_str(&vsi->back->hw, ret), 4824 i40e_aq_str(&vsi->back->hw, 4825 vsi->back->hw.aq.asq_last_status)); 4826 goto out; 4827 } 4828 /* update the local VSI info with updated queue map */ 4829 i40e_vsi_update_queue_map(vsi, &ctxt); 4830 vsi->info.valid_sections = 0; 4831 4832 /* Update current VSI BW information */ 4833 ret = i40e_vsi_get_bw_info(vsi); 4834 if (ret) { 4835 dev_info(&vsi->back->pdev->dev, 4836 "Failed updating vsi bw info, err %s aq_err %s\n", 4837 i40e_stat_str(&vsi->back->hw, ret), 4838 i40e_aq_str(&vsi->back->hw, 4839 vsi->back->hw.aq.asq_last_status)); 4840 goto out; 4841 } 4842 4843 /* Update the netdev TC setup */ 4844 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 4845 out: 4846 return ret; 4847 } 4848 4849 /** 4850 * i40e_veb_config_tc - Configure TCs for given VEB 4851 * @veb: given VEB 4852 * @enabled_tc: TC bitmap 4853 * 4854 * Configures given TC bitmap for VEB (switching) element 4855 **/ 4856 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) 4857 { 4858 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; 4859 struct i40e_pf *pf = veb->pf; 4860 int ret = 0; 4861 int i; 4862 4863 /* No TCs or already enabled TCs just return */ 4864 if (!enabled_tc || veb->enabled_tc == enabled_tc) 4865 return ret; 4866 4867 bw_data.tc_valid_bits = enabled_tc; 4868 /* bw_data.absolute_credits is not set (relative) */ 4869 4870 /* Enable ETS TCs with equal BW Share for now */ 4871 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4872 if (enabled_tc & BIT(i)) 4873 bw_data.tc_bw_share_credits[i] = 1; 4874 } 4875 4876 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, 4877 &bw_data, NULL); 4878 if (ret) { 4879 dev_info(&pf->pdev->dev, 4880 "VEB bw config failed, err %s aq_err %s\n", 4881 i40e_stat_str(&pf->hw, ret), 4882 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4883 goto out; 4884 } 4885 4886 /* Update the BW information */ 4887 ret = i40e_veb_get_bw_info(veb); 4888 if (ret) { 4889 dev_info(&pf->pdev->dev, 4890 "Failed getting veb bw config, err %s aq_err %s\n", 4891 i40e_stat_str(&pf->hw, ret), 4892 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4893 } 4894 4895 out: 4896 return ret; 4897 } 4898 4899 #ifdef CONFIG_I40E_DCB 4900 /** 4901 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs 4902 * @pf: PF struct 4903 * 4904 * Reconfigure VEB/VSIs on a given PF; it is assumed that 4905 * the caller would've quiesce all the VSIs before calling 4906 * this function 4907 **/ 4908 static void i40e_dcb_reconfigure(struct i40e_pf *pf) 4909 { 4910 u8 tc_map = 0; 4911 int ret; 4912 u8 v; 4913 4914 /* Enable the TCs available on PF to all VEBs */ 4915 tc_map = i40e_pf_get_tc_map(pf); 4916 for (v = 0; v < I40E_MAX_VEB; v++) { 4917 if (!pf->veb[v]) 4918 continue; 4919 ret = i40e_veb_config_tc(pf->veb[v], tc_map); 4920 if (ret) { 4921 dev_info(&pf->pdev->dev, 4922 "Failed configuring TC for VEB seid=%d\n", 4923 pf->veb[v]->seid); 4924 /* Will try to configure as many components */ 4925 } 4926 } 4927 4928 /* Update each VSI */ 4929 for (v = 0; v < pf->num_alloc_vsi; v++) { 4930 if (!pf->vsi[v]) 4931 continue; 4932 4933 /* - Enable all TCs for the LAN VSI 4934 #ifdef I40E_FCOE 4935 * - For FCoE VSI only enable the TC configured 4936 * as per the APP TLV 4937 #endif 4938 * - For all others keep them at TC0 for now 4939 */ 4940 if (v == pf->lan_vsi) 4941 tc_map = i40e_pf_get_tc_map(pf); 4942 else 4943 tc_map = i40e_pf_get_default_tc(pf); 4944 #ifdef I40E_FCOE 4945 if (pf->vsi[v]->type == I40E_VSI_FCOE) 4946 tc_map = i40e_get_fcoe_tc_map(pf); 4947 #endif /* #ifdef I40E_FCOE */ 4948 4949 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 4950 if (ret) { 4951 dev_info(&pf->pdev->dev, 4952 "Failed configuring TC for VSI seid=%d\n", 4953 pf->vsi[v]->seid); 4954 /* Will try to configure as many components */ 4955 } else { 4956 /* Re-configure VSI vectors based on updated TC map */ 4957 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); 4958 if (pf->vsi[v]->netdev) 4959 i40e_dcbnl_set_all(pf->vsi[v]); 4960 } 4961 i40e_notify_client_of_l2_param_changes(pf->vsi[v]); 4962 } 4963 } 4964 4965 /** 4966 * i40e_resume_port_tx - Resume port Tx 4967 * @pf: PF struct 4968 * 4969 * Resume a port's Tx and issue a PF reset in case of failure to 4970 * resume. 4971 **/ 4972 static int i40e_resume_port_tx(struct i40e_pf *pf) 4973 { 4974 struct i40e_hw *hw = &pf->hw; 4975 int ret; 4976 4977 ret = i40e_aq_resume_port_tx(hw, NULL); 4978 if (ret) { 4979 dev_info(&pf->pdev->dev, 4980 "Resume Port Tx failed, err %s aq_err %s\n", 4981 i40e_stat_str(&pf->hw, ret), 4982 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4983 /* Schedule PF reset to recover */ 4984 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 4985 i40e_service_event_schedule(pf); 4986 } 4987 4988 return ret; 4989 } 4990 4991 /** 4992 * i40e_init_pf_dcb - Initialize DCB configuration 4993 * @pf: PF being configured 4994 * 4995 * Query the current DCB configuration and cache it 4996 * in the hardware structure 4997 **/ 4998 static int i40e_init_pf_dcb(struct i40e_pf *pf) 4999 { 5000 struct i40e_hw *hw = &pf->hw; 5001 int err = 0; 5002 5003 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ 5004 if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT) 5005 goto out; 5006 5007 /* Get the initial DCB configuration */ 5008 err = i40e_init_dcb(hw); 5009 if (!err) { 5010 /* Device/Function is not DCBX capable */ 5011 if ((!hw->func_caps.dcb) || 5012 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { 5013 dev_info(&pf->pdev->dev, 5014 "DCBX offload is not supported or is disabled for this PF.\n"); 5015 5016 if (pf->flags & I40E_FLAG_MFP_ENABLED) 5017 goto out; 5018 5019 } else { 5020 /* When status is not DISABLED then DCBX in FW */ 5021 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 5022 DCB_CAP_DCBX_VER_IEEE; 5023 5024 pf->flags |= I40E_FLAG_DCB_CAPABLE; 5025 /* Enable DCB tagging only when more than one TC */ 5026 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5027 pf->flags |= I40E_FLAG_DCB_ENABLED; 5028 dev_dbg(&pf->pdev->dev, 5029 "DCBX offload is supported for this PF.\n"); 5030 } 5031 } else { 5032 dev_info(&pf->pdev->dev, 5033 "Query for DCB configuration failed, err %s aq_err %s\n", 5034 i40e_stat_str(&pf->hw, err), 5035 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5036 } 5037 5038 out: 5039 return err; 5040 } 5041 #endif /* CONFIG_I40E_DCB */ 5042 #define SPEED_SIZE 14 5043 #define FC_SIZE 8 5044 /** 5045 * i40e_print_link_message - print link up or down 5046 * @vsi: the VSI for which link needs a message 5047 */ 5048 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) 5049 { 5050 char *speed = "Unknown"; 5051 char *fc = "Unknown"; 5052 5053 if (vsi->current_isup == isup) 5054 return; 5055 vsi->current_isup = isup; 5056 if (!isup) { 5057 netdev_info(vsi->netdev, "NIC Link is Down\n"); 5058 return; 5059 } 5060 5061 /* Warn user if link speed on NPAR enabled partition is not at 5062 * least 10GB 5063 */ 5064 if (vsi->back->hw.func_caps.npar_enable && 5065 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || 5066 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) 5067 netdev_warn(vsi->netdev, 5068 "The partition detected link speed that is less than 10Gbps\n"); 5069 5070 switch (vsi->back->hw.phy.link_info.link_speed) { 5071 case I40E_LINK_SPEED_40GB: 5072 speed = "40 G"; 5073 break; 5074 case I40E_LINK_SPEED_20GB: 5075 speed = "20 G"; 5076 break; 5077 case I40E_LINK_SPEED_10GB: 5078 speed = "10 G"; 5079 break; 5080 case I40E_LINK_SPEED_1GB: 5081 speed = "1000 M"; 5082 break; 5083 case I40E_LINK_SPEED_100MB: 5084 speed = "100 M"; 5085 break; 5086 default: 5087 break; 5088 } 5089 5090 switch (vsi->back->hw.fc.current_mode) { 5091 case I40E_FC_FULL: 5092 fc = "RX/TX"; 5093 break; 5094 case I40E_FC_TX_PAUSE: 5095 fc = "TX"; 5096 break; 5097 case I40E_FC_RX_PAUSE: 5098 fc = "RX"; 5099 break; 5100 default: 5101 fc = "None"; 5102 break; 5103 } 5104 5105 netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n", 5106 speed, fc); 5107 } 5108 5109 /** 5110 * i40e_up_complete - Finish the last steps of bringing up a connection 5111 * @vsi: the VSI being configured 5112 **/ 5113 static int i40e_up_complete(struct i40e_vsi *vsi) 5114 { 5115 struct i40e_pf *pf = vsi->back; 5116 int err; 5117 5118 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5119 i40e_vsi_configure_msix(vsi); 5120 else 5121 i40e_configure_msi_and_legacy(vsi); 5122 5123 /* start rings */ 5124 err = i40e_vsi_control_rings(vsi, true); 5125 if (err) 5126 return err; 5127 5128 clear_bit(__I40E_DOWN, &vsi->state); 5129 i40e_napi_enable_all(vsi); 5130 i40e_vsi_enable_irq(vsi); 5131 5132 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 5133 (vsi->netdev)) { 5134 i40e_print_link_message(vsi, true); 5135 netif_tx_start_all_queues(vsi->netdev); 5136 netif_carrier_on(vsi->netdev); 5137 } else if (vsi->netdev) { 5138 i40e_print_link_message(vsi, false); 5139 /* need to check for qualified module here*/ 5140 if ((pf->hw.phy.link_info.link_info & 5141 I40E_AQ_MEDIA_AVAILABLE) && 5142 (!(pf->hw.phy.link_info.an_info & 5143 I40E_AQ_QUALIFIED_MODULE))) 5144 netdev_err(vsi->netdev, 5145 "the driver failed to link because an unqualified module was detected."); 5146 } 5147 5148 /* replay FDIR SB filters */ 5149 if (vsi->type == I40E_VSI_FDIR) { 5150 /* reset fd counters */ 5151 pf->fd_add_err = pf->fd_atr_cnt = 0; 5152 if (pf->fd_tcp_rule > 0) { 5153 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5154 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5155 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); 5156 pf->fd_tcp_rule = 0; 5157 } 5158 i40e_fdir_filter_restore(vsi); 5159 } 5160 5161 /* On the next run of the service_task, notify any clients of the new 5162 * opened netdev 5163 */ 5164 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; 5165 i40e_service_event_schedule(pf); 5166 5167 return 0; 5168 } 5169 5170 /** 5171 * i40e_vsi_reinit_locked - Reset the VSI 5172 * @vsi: the VSI being configured 5173 * 5174 * Rebuild the ring structs after some configuration 5175 * has changed, e.g. MTU size. 5176 **/ 5177 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 5178 { 5179 struct i40e_pf *pf = vsi->back; 5180 5181 WARN_ON(in_interrupt()); 5182 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 5183 usleep_range(1000, 2000); 5184 i40e_down(vsi); 5185 5186 /* Give a VF some time to respond to the reset. The 5187 * two second wait is based upon the watchdog cycle in 5188 * the VF driver. 5189 */ 5190 if (vsi->type == I40E_VSI_SRIOV) 5191 msleep(2000); 5192 i40e_up(vsi); 5193 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 5194 } 5195 5196 /** 5197 * i40e_up - Bring the connection back up after being down 5198 * @vsi: the VSI being configured 5199 **/ 5200 int i40e_up(struct i40e_vsi *vsi) 5201 { 5202 int err; 5203 5204 err = i40e_vsi_configure(vsi); 5205 if (!err) 5206 err = i40e_up_complete(vsi); 5207 5208 return err; 5209 } 5210 5211 /** 5212 * i40e_down - Shutdown the connection processing 5213 * @vsi: the VSI being stopped 5214 **/ 5215 void i40e_down(struct i40e_vsi *vsi) 5216 { 5217 int i; 5218 5219 /* It is assumed that the caller of this function 5220 * sets the vsi->state __I40E_DOWN bit. 5221 */ 5222 if (vsi->netdev) { 5223 netif_carrier_off(vsi->netdev); 5224 netif_tx_disable(vsi->netdev); 5225 } 5226 i40e_vsi_disable_irq(vsi); 5227 i40e_vsi_control_rings(vsi, false); 5228 i40e_napi_disable_all(vsi); 5229 5230 for (i = 0; i < vsi->num_queue_pairs; i++) { 5231 i40e_clean_tx_ring(vsi->tx_rings[i]); 5232 i40e_clean_rx_ring(vsi->rx_rings[i]); 5233 } 5234 } 5235 5236 /** 5237 * i40e_setup_tc - configure multiple traffic classes 5238 * @netdev: net device to configure 5239 * @tc: number of traffic classes to enable 5240 **/ 5241 static int i40e_setup_tc(struct net_device *netdev, u8 tc) 5242 { 5243 struct i40e_netdev_priv *np = netdev_priv(netdev); 5244 struct i40e_vsi *vsi = np->vsi; 5245 struct i40e_pf *pf = vsi->back; 5246 u8 enabled_tc = 0; 5247 int ret = -EINVAL; 5248 int i; 5249 5250 /* Check if DCB enabled to continue */ 5251 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 5252 netdev_info(netdev, "DCB is not enabled for adapter\n"); 5253 goto exit; 5254 } 5255 5256 /* Check if MFP enabled */ 5257 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 5258 netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); 5259 goto exit; 5260 } 5261 5262 /* Check whether tc count is within enabled limit */ 5263 if (tc > i40e_pf_get_num_tc(pf)) { 5264 netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); 5265 goto exit; 5266 } 5267 5268 /* Generate TC map for number of tc requested */ 5269 for (i = 0; i < tc; i++) 5270 enabled_tc |= BIT(i); 5271 5272 /* Requesting same TC configuration as already enabled */ 5273 if (enabled_tc == vsi->tc_config.enabled_tc) 5274 return 0; 5275 5276 /* Quiesce VSI queues */ 5277 i40e_quiesce_vsi(vsi); 5278 5279 /* Configure VSI for enabled TCs */ 5280 ret = i40e_vsi_config_tc(vsi, enabled_tc); 5281 if (ret) { 5282 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 5283 vsi->seid); 5284 goto exit; 5285 } 5286 5287 /* Unquiesce VSI */ 5288 i40e_unquiesce_vsi(vsi); 5289 5290 exit: 5291 return ret; 5292 } 5293 5294 #ifdef I40E_FCOE 5295 int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, 5296 struct tc_to_netdev *tc) 5297 #else 5298 static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, 5299 struct tc_to_netdev *tc) 5300 #endif 5301 { 5302 if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) 5303 return -EINVAL; 5304 return i40e_setup_tc(netdev, tc->tc); 5305 } 5306 5307 /** 5308 * i40e_open - Called when a network interface is made active 5309 * @netdev: network interface device structure 5310 * 5311 * The open entry point is called when a network interface is made 5312 * active by the system (IFF_UP). At this point all resources needed 5313 * for transmit and receive operations are allocated, the interrupt 5314 * handler is registered with the OS, the netdev watchdog subtask is 5315 * enabled, and the stack is notified that the interface is ready. 5316 * 5317 * Returns 0 on success, negative value on failure 5318 **/ 5319 int i40e_open(struct net_device *netdev) 5320 { 5321 struct i40e_netdev_priv *np = netdev_priv(netdev); 5322 struct i40e_vsi *vsi = np->vsi; 5323 struct i40e_pf *pf = vsi->back; 5324 int err; 5325 5326 /* disallow open during test or if eeprom is broken */ 5327 if (test_bit(__I40E_TESTING, &pf->state) || 5328 test_bit(__I40E_BAD_EEPROM, &pf->state)) 5329 return -EBUSY; 5330 5331 netif_carrier_off(netdev); 5332 5333 err = i40e_vsi_open(vsi); 5334 if (err) 5335 return err; 5336 5337 /* configure global TSO hardware offload settings */ 5338 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | 5339 TCP_FLAG_FIN) >> 16); 5340 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | 5341 TCP_FLAG_FIN | 5342 TCP_FLAG_CWR) >> 16); 5343 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 5344 5345 #ifdef CONFIG_I40E_VXLAN 5346 vxlan_get_rx_port(netdev); 5347 #endif 5348 #ifdef CONFIG_I40E_GENEVE 5349 if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE) 5350 geneve_get_rx_port(netdev); 5351 #endif 5352 5353 i40e_notify_client_of_netdev_open(vsi); 5354 5355 return 0; 5356 } 5357 5358 /** 5359 * i40e_vsi_open - 5360 * @vsi: the VSI to open 5361 * 5362 * Finish initialization of the VSI. 5363 * 5364 * Returns 0 on success, negative value on failure 5365 **/ 5366 int i40e_vsi_open(struct i40e_vsi *vsi) 5367 { 5368 struct i40e_pf *pf = vsi->back; 5369 char int_name[I40E_INT_NAME_STR_LEN]; 5370 int err; 5371 5372 /* allocate descriptors */ 5373 err = i40e_vsi_setup_tx_resources(vsi); 5374 if (err) 5375 goto err_setup_tx; 5376 err = i40e_vsi_setup_rx_resources(vsi); 5377 if (err) 5378 goto err_setup_rx; 5379 5380 err = i40e_vsi_configure(vsi); 5381 if (err) 5382 goto err_setup_rx; 5383 5384 if (vsi->netdev) { 5385 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 5386 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 5387 err = i40e_vsi_request_irq(vsi, int_name); 5388 if (err) 5389 goto err_setup_rx; 5390 5391 /* Notify the stack of the actual queue counts. */ 5392 err = netif_set_real_num_tx_queues(vsi->netdev, 5393 vsi->num_queue_pairs); 5394 if (err) 5395 goto err_set_queues; 5396 5397 err = netif_set_real_num_rx_queues(vsi->netdev, 5398 vsi->num_queue_pairs); 5399 if (err) 5400 goto err_set_queues; 5401 5402 } else if (vsi->type == I40E_VSI_FDIR) { 5403 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", 5404 dev_driver_string(&pf->pdev->dev), 5405 dev_name(&pf->pdev->dev)); 5406 err = i40e_vsi_request_irq(vsi, int_name); 5407 5408 } else { 5409 err = -EINVAL; 5410 goto err_setup_rx; 5411 } 5412 5413 err = i40e_up_complete(vsi); 5414 if (err) 5415 goto err_up_complete; 5416 5417 return 0; 5418 5419 err_up_complete: 5420 i40e_down(vsi); 5421 err_set_queues: 5422 i40e_vsi_free_irq(vsi); 5423 err_setup_rx: 5424 i40e_vsi_free_rx_resources(vsi); 5425 err_setup_tx: 5426 i40e_vsi_free_tx_resources(vsi); 5427 if (vsi == pf->vsi[pf->lan_vsi]) 5428 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 5429 5430 return err; 5431 } 5432 5433 /** 5434 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting 5435 * @pf: Pointer to PF 5436 * 5437 * This function destroys the hlist where all the Flow Director 5438 * filters were saved. 5439 **/ 5440 static void i40e_fdir_filter_exit(struct i40e_pf *pf) 5441 { 5442 struct i40e_fdir_filter *filter; 5443 struct hlist_node *node2; 5444 5445 hlist_for_each_entry_safe(filter, node2, 5446 &pf->fdir_filter_list, fdir_node) { 5447 hlist_del(&filter->fdir_node); 5448 kfree(filter); 5449 } 5450 pf->fdir_pf_active_filters = 0; 5451 } 5452 5453 /** 5454 * i40e_close - Disables a network interface 5455 * @netdev: network interface device structure 5456 * 5457 * The close entry point is called when an interface is de-activated 5458 * by the OS. The hardware is still under the driver's control, but 5459 * this netdev interface is disabled. 5460 * 5461 * Returns 0, this is not allowed to fail 5462 **/ 5463 int i40e_close(struct net_device *netdev) 5464 { 5465 struct i40e_netdev_priv *np = netdev_priv(netdev); 5466 struct i40e_vsi *vsi = np->vsi; 5467 5468 i40e_vsi_close(vsi); 5469 5470 return 0; 5471 } 5472 5473 /** 5474 * i40e_do_reset - Start a PF or Core Reset sequence 5475 * @pf: board private structure 5476 * @reset_flags: which reset is requested 5477 * 5478 * The essential difference in resets is that the PF Reset 5479 * doesn't clear the packet buffers, doesn't reset the PE 5480 * firmware, and doesn't bother the other PFs on the chip. 5481 **/ 5482 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) 5483 { 5484 u32 val; 5485 5486 WARN_ON(in_interrupt()); 5487 5488 5489 /* do the biggest reset indicated */ 5490 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { 5491 5492 /* Request a Global Reset 5493 * 5494 * This will start the chip's countdown to the actual full 5495 * chip reset event, and a warning interrupt to be sent 5496 * to all PFs, including the requestor. Our handler 5497 * for the warning interrupt will deal with the shutdown 5498 * and recovery of the switch setup. 5499 */ 5500 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); 5501 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5502 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 5503 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5504 5505 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { 5506 5507 /* Request a Core Reset 5508 * 5509 * Same as Global Reset, except does *not* include the MAC/PHY 5510 */ 5511 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); 5512 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5513 val |= I40E_GLGEN_RTRIG_CORER_MASK; 5514 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5515 i40e_flush(&pf->hw); 5516 5517 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) { 5518 5519 /* Request a PF Reset 5520 * 5521 * Resets only the PF-specific registers 5522 * 5523 * This goes directly to the tear-down and rebuild of 5524 * the switch, since we need to do all the recovery as 5525 * for the Core Reset. 5526 */ 5527 dev_dbg(&pf->pdev->dev, "PFR requested\n"); 5528 i40e_handle_reset_warning(pf); 5529 5530 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { 5531 int v; 5532 5533 /* Find the VSI(s) that requested a re-init */ 5534 dev_info(&pf->pdev->dev, 5535 "VSI reinit requested\n"); 5536 for (v = 0; v < pf->num_alloc_vsi; v++) { 5537 struct i40e_vsi *vsi = pf->vsi[v]; 5538 5539 if (vsi != NULL && 5540 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 5541 i40e_vsi_reinit_locked(pf->vsi[v]); 5542 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); 5543 } 5544 } 5545 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { 5546 int v; 5547 5548 /* Find the VSI(s) that needs to be brought down */ 5549 dev_info(&pf->pdev->dev, "VSI down requested\n"); 5550 for (v = 0; v < pf->num_alloc_vsi; v++) { 5551 struct i40e_vsi *vsi = pf->vsi[v]; 5552 5553 if (vsi != NULL && 5554 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { 5555 set_bit(__I40E_DOWN, &vsi->state); 5556 i40e_down(vsi); 5557 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); 5558 } 5559 } 5560 } else { 5561 dev_info(&pf->pdev->dev, 5562 "bad reset request 0x%08x\n", reset_flags); 5563 } 5564 } 5565 5566 #ifdef CONFIG_I40E_DCB 5567 /** 5568 * i40e_dcb_need_reconfig - Check if DCB needs reconfig 5569 * @pf: board private structure 5570 * @old_cfg: current DCB config 5571 * @new_cfg: new DCB config 5572 **/ 5573 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, 5574 struct i40e_dcbx_config *old_cfg, 5575 struct i40e_dcbx_config *new_cfg) 5576 { 5577 bool need_reconfig = false; 5578 5579 /* Check if ETS configuration has changed */ 5580 if (memcmp(&new_cfg->etscfg, 5581 &old_cfg->etscfg, 5582 sizeof(new_cfg->etscfg))) { 5583 /* If Priority Table has changed reconfig is needed */ 5584 if (memcmp(&new_cfg->etscfg.prioritytable, 5585 &old_cfg->etscfg.prioritytable, 5586 sizeof(new_cfg->etscfg.prioritytable))) { 5587 need_reconfig = true; 5588 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); 5589 } 5590 5591 if (memcmp(&new_cfg->etscfg.tcbwtable, 5592 &old_cfg->etscfg.tcbwtable, 5593 sizeof(new_cfg->etscfg.tcbwtable))) 5594 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 5595 5596 if (memcmp(&new_cfg->etscfg.tsatable, 5597 &old_cfg->etscfg.tsatable, 5598 sizeof(new_cfg->etscfg.tsatable))) 5599 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); 5600 } 5601 5602 /* Check if PFC configuration has changed */ 5603 if (memcmp(&new_cfg->pfc, 5604 &old_cfg->pfc, 5605 sizeof(new_cfg->pfc))) { 5606 need_reconfig = true; 5607 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); 5608 } 5609 5610 /* Check if APP Table has changed */ 5611 if (memcmp(&new_cfg->app, 5612 &old_cfg->app, 5613 sizeof(new_cfg->app))) { 5614 need_reconfig = true; 5615 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); 5616 } 5617 5618 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); 5619 return need_reconfig; 5620 } 5621 5622 /** 5623 * i40e_handle_lldp_event - Handle LLDP Change MIB event 5624 * @pf: board private structure 5625 * @e: event info posted on ARQ 5626 **/ 5627 static int i40e_handle_lldp_event(struct i40e_pf *pf, 5628 struct i40e_arq_event_info *e) 5629 { 5630 struct i40e_aqc_lldp_get_mib *mib = 5631 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; 5632 struct i40e_hw *hw = &pf->hw; 5633 struct i40e_dcbx_config tmp_dcbx_cfg; 5634 bool need_reconfig = false; 5635 int ret = 0; 5636 u8 type; 5637 5638 /* Not DCB capable or capability disabled */ 5639 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) 5640 return ret; 5641 5642 /* Ignore if event is not for Nearest Bridge */ 5643 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 5644 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 5645 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); 5646 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) 5647 return ret; 5648 5649 /* Check MIB Type and return if event for Remote MIB update */ 5650 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; 5651 dev_dbg(&pf->pdev->dev, 5652 "LLDP event mib type %s\n", type ? "remote" : "local"); 5653 if (type == I40E_AQ_LLDP_MIB_REMOTE) { 5654 /* Update the remote cached instance and return */ 5655 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, 5656 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 5657 &hw->remote_dcbx_config); 5658 goto exit; 5659 } 5660 5661 /* Store the old configuration */ 5662 tmp_dcbx_cfg = hw->local_dcbx_config; 5663 5664 /* Reset the old DCBx configuration data */ 5665 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); 5666 /* Get updated DCBX data from firmware */ 5667 ret = i40e_get_dcb_config(&pf->hw); 5668 if (ret) { 5669 dev_info(&pf->pdev->dev, 5670 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", 5671 i40e_stat_str(&pf->hw, ret), 5672 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5673 goto exit; 5674 } 5675 5676 /* No change detected in DCBX configs */ 5677 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, 5678 sizeof(tmp_dcbx_cfg))) { 5679 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 5680 goto exit; 5681 } 5682 5683 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, 5684 &hw->local_dcbx_config); 5685 5686 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); 5687 5688 if (!need_reconfig) 5689 goto exit; 5690 5691 /* Enable DCB tagging only when more than one TC */ 5692 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5693 pf->flags |= I40E_FLAG_DCB_ENABLED; 5694 else 5695 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 5696 5697 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5698 /* Reconfiguration needed quiesce all VSIs */ 5699 i40e_pf_quiesce_all_vsi(pf); 5700 5701 /* Changes in configuration update VEB/VSI */ 5702 i40e_dcb_reconfigure(pf); 5703 5704 ret = i40e_resume_port_tx(pf); 5705 5706 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5707 /* In case of error no point in resuming VSIs */ 5708 if (ret) 5709 goto exit; 5710 5711 /* Wait for the PF's queues to be disabled */ 5712 ret = i40e_pf_wait_queues_disabled(pf); 5713 if (ret) { 5714 /* Schedule PF reset to recover */ 5715 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5716 i40e_service_event_schedule(pf); 5717 } else { 5718 i40e_pf_unquiesce_all_vsi(pf); 5719 } 5720 5721 exit: 5722 return ret; 5723 } 5724 #endif /* CONFIG_I40E_DCB */ 5725 5726 /** 5727 * i40e_do_reset_safe - Protected reset path for userland calls. 5728 * @pf: board private structure 5729 * @reset_flags: which reset is requested 5730 * 5731 **/ 5732 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) 5733 { 5734 rtnl_lock(); 5735 i40e_do_reset(pf, reset_flags); 5736 rtnl_unlock(); 5737 } 5738 5739 /** 5740 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 5741 * @pf: board private structure 5742 * @e: event info posted on ARQ 5743 * 5744 * Handler for LAN Queue Overflow Event generated by the firmware for PF 5745 * and VF queues 5746 **/ 5747 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 5748 struct i40e_arq_event_info *e) 5749 { 5750 struct i40e_aqc_lan_overflow *data = 5751 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 5752 u32 queue = le32_to_cpu(data->prtdcb_rupto); 5753 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 5754 struct i40e_hw *hw = &pf->hw; 5755 struct i40e_vf *vf; 5756 u16 vf_id; 5757 5758 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", 5759 queue, qtx_ctl); 5760 5761 /* Queue belongs to VF, find the VF and issue VF reset */ 5762 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 5763 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 5764 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 5765 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 5766 vf_id -= hw->func_caps.vf_base_id; 5767 vf = &pf->vf[vf_id]; 5768 i40e_vc_notify_vf_reset(vf); 5769 /* Allow VF to process pending reset notification */ 5770 msleep(20); 5771 i40e_reset_vf(vf, false); 5772 } 5773 } 5774 5775 /** 5776 * i40e_service_event_complete - Finish up the service event 5777 * @pf: board private structure 5778 **/ 5779 static void i40e_service_event_complete(struct i40e_pf *pf) 5780 { 5781 WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 5782 5783 /* flush memory to make sure state is correct before next watchog */ 5784 smp_mb__before_atomic(); 5785 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 5786 } 5787 5788 /** 5789 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters 5790 * @pf: board private structure 5791 **/ 5792 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) 5793 { 5794 u32 val, fcnt_prog; 5795 5796 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5797 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); 5798 return fcnt_prog; 5799 } 5800 5801 /** 5802 * i40e_get_current_fd_count - Get total FD filters programmed for this PF 5803 * @pf: board private structure 5804 **/ 5805 u32 i40e_get_current_fd_count(struct i40e_pf *pf) 5806 { 5807 u32 val, fcnt_prog; 5808 5809 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5810 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + 5811 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> 5812 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 5813 return fcnt_prog; 5814 } 5815 5816 /** 5817 * i40e_get_global_fd_count - Get total FD filters programmed on device 5818 * @pf: board private structure 5819 **/ 5820 u32 i40e_get_global_fd_count(struct i40e_pf *pf) 5821 { 5822 u32 val, fcnt_prog; 5823 5824 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); 5825 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + 5826 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> 5827 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); 5828 return fcnt_prog; 5829 } 5830 5831 /** 5832 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 5833 * @pf: board private structure 5834 **/ 5835 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) 5836 { 5837 struct i40e_fdir_filter *filter; 5838 u32 fcnt_prog, fcnt_avail; 5839 struct hlist_node *node; 5840 5841 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 5842 return; 5843 5844 /* Check if, FD SB or ATR was auto disabled and if there is enough room 5845 * to re-enable 5846 */ 5847 fcnt_prog = i40e_get_global_fd_count(pf); 5848 fcnt_avail = pf->fdir_pf_filter_count; 5849 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || 5850 (pf->fd_add_err == 0) || 5851 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { 5852 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 5853 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 5854 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 5855 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5856 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 5857 } 5858 } 5859 /* Wait for some more space to be available to turn on ATR */ 5860 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { 5861 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 5862 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 5863 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5864 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5865 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); 5866 } 5867 } 5868 5869 /* if hw had a problem adding a filter, delete it */ 5870 if (pf->fd_inv > 0) { 5871 hlist_for_each_entry_safe(filter, node, 5872 &pf->fdir_filter_list, fdir_node) { 5873 if (filter->fd_id == pf->fd_inv) { 5874 hlist_del(&filter->fdir_node); 5875 kfree(filter); 5876 pf->fdir_pf_active_filters--; 5877 } 5878 } 5879 } 5880 } 5881 5882 #define I40E_MIN_FD_FLUSH_INTERVAL 10 5883 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 5884 /** 5885 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB 5886 * @pf: board private structure 5887 **/ 5888 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) 5889 { 5890 unsigned long min_flush_time; 5891 int flush_wait_retry = 50; 5892 bool disable_atr = false; 5893 int fd_room; 5894 int reg; 5895 5896 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5897 return; 5898 5899 if (!time_after(jiffies, pf->fd_flush_timestamp + 5900 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) 5901 return; 5902 5903 /* If the flush is happening too quick and we have mostly SB rules we 5904 * should not re-enable ATR for some time. 5905 */ 5906 min_flush_time = pf->fd_flush_timestamp + 5907 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); 5908 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; 5909 5910 if (!(time_after(jiffies, min_flush_time)) && 5911 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { 5912 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5913 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); 5914 disable_atr = true; 5915 } 5916 5917 pf->fd_flush_timestamp = jiffies; 5918 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5919 /* flush all filters */ 5920 wr32(&pf->hw, I40E_PFQF_CTL_1, 5921 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); 5922 i40e_flush(&pf->hw); 5923 pf->fd_flush_cnt++; 5924 pf->fd_add_err = 0; 5925 do { 5926 /* Check FD flush status every 5-6msec */ 5927 usleep_range(5000, 6000); 5928 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); 5929 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) 5930 break; 5931 } while (flush_wait_retry--); 5932 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { 5933 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); 5934 } else { 5935 /* replay sideband filters */ 5936 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); 5937 if (!disable_atr) 5938 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 5939 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 5940 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5941 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 5942 } 5943 5944 } 5945 5946 /** 5947 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed 5948 * @pf: board private structure 5949 **/ 5950 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) 5951 { 5952 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; 5953 } 5954 5955 /* We can see up to 256 filter programming desc in transit if the filters are 5956 * being applied really fast; before we see the first 5957 * filter miss error on Rx queue 0. Accumulating enough error messages before 5958 * reacting will make sure we don't cause flush too often. 5959 */ 5960 #define I40E_MAX_FD_PROGRAM_ERROR 256 5961 5962 /** 5963 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 5964 * @pf: board private structure 5965 **/ 5966 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 5967 { 5968 5969 /* if interface is down do nothing */ 5970 if (test_bit(__I40E_DOWN, &pf->state)) 5971 return; 5972 5973 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5974 return; 5975 5976 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 5977 i40e_fdir_flush_and_replay(pf); 5978 5979 i40e_fdir_check_and_reenable(pf); 5980 5981 } 5982 5983 /** 5984 * i40e_vsi_link_event - notify VSI of a link event 5985 * @vsi: vsi to be notified 5986 * @link_up: link up or down 5987 **/ 5988 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 5989 { 5990 if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) 5991 return; 5992 5993 switch (vsi->type) { 5994 case I40E_VSI_MAIN: 5995 #ifdef I40E_FCOE 5996 case I40E_VSI_FCOE: 5997 #endif 5998 if (!vsi->netdev || !vsi->netdev_registered) 5999 break; 6000 6001 if (link_up) { 6002 netif_carrier_on(vsi->netdev); 6003 netif_tx_wake_all_queues(vsi->netdev); 6004 } else { 6005 netif_carrier_off(vsi->netdev); 6006 netif_tx_stop_all_queues(vsi->netdev); 6007 } 6008 break; 6009 6010 case I40E_VSI_SRIOV: 6011 case I40E_VSI_VMDQ2: 6012 case I40E_VSI_CTRL: 6013 case I40E_VSI_IWARP: 6014 case I40E_VSI_MIRROR: 6015 default: 6016 /* there is no notification for other VSIs */ 6017 break; 6018 } 6019 } 6020 6021 /** 6022 * i40e_veb_link_event - notify elements on the veb of a link event 6023 * @veb: veb to be notified 6024 * @link_up: link up or down 6025 **/ 6026 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 6027 { 6028 struct i40e_pf *pf; 6029 int i; 6030 6031 if (!veb || !veb->pf) 6032 return; 6033 pf = veb->pf; 6034 6035 /* depth first... */ 6036 for (i = 0; i < I40E_MAX_VEB; i++) 6037 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 6038 i40e_veb_link_event(pf->veb[i], link_up); 6039 6040 /* ... now the local VSIs */ 6041 for (i = 0; i < pf->num_alloc_vsi; i++) 6042 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 6043 i40e_vsi_link_event(pf->vsi[i], link_up); 6044 } 6045 6046 /** 6047 * i40e_link_event - Update netif_carrier status 6048 * @pf: board private structure 6049 **/ 6050 static void i40e_link_event(struct i40e_pf *pf) 6051 { 6052 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6053 u8 new_link_speed, old_link_speed; 6054 i40e_status status; 6055 bool new_link, old_link; 6056 6057 /* save off old link status information */ 6058 pf->hw.phy.link_info_old = pf->hw.phy.link_info; 6059 6060 /* set this to force the get_link_status call to refresh state */ 6061 pf->hw.phy.get_link_info = true; 6062 6063 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 6064 6065 status = i40e_get_link_status(&pf->hw, &new_link); 6066 if (status) { 6067 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", 6068 status); 6069 return; 6070 } 6071 6072 old_link_speed = pf->hw.phy.link_info_old.link_speed; 6073 new_link_speed = pf->hw.phy.link_info.link_speed; 6074 6075 if (new_link == old_link && 6076 new_link_speed == old_link_speed && 6077 (test_bit(__I40E_DOWN, &vsi->state) || 6078 new_link == netif_carrier_ok(vsi->netdev))) 6079 return; 6080 6081 if (!test_bit(__I40E_DOWN, &vsi->state)) 6082 i40e_print_link_message(vsi, new_link); 6083 6084 /* Notify the base of the switch tree connected to 6085 * the link. Floating VEBs are not notified. 6086 */ 6087 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 6088 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 6089 else 6090 i40e_vsi_link_event(vsi, new_link); 6091 6092 if (pf->vf) 6093 i40e_vc_notify_link_state(pf); 6094 6095 if (pf->flags & I40E_FLAG_PTP) 6096 i40e_ptp_set_increment(pf); 6097 } 6098 6099 /** 6100 * i40e_watchdog_subtask - periodic checks not using event driven response 6101 * @pf: board private structure 6102 **/ 6103 static void i40e_watchdog_subtask(struct i40e_pf *pf) 6104 { 6105 int i; 6106 6107 /* if interface is down do nothing */ 6108 if (test_bit(__I40E_DOWN, &pf->state) || 6109 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 6110 return; 6111 6112 /* make sure we don't do these things too often */ 6113 if (time_before(jiffies, (pf->service_timer_previous + 6114 pf->service_timer_period))) 6115 return; 6116 pf->service_timer_previous = jiffies; 6117 6118 if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) 6119 i40e_link_event(pf); 6120 6121 /* Update the stats for active netdevs so the network stack 6122 * can look at updated numbers whenever it cares to 6123 */ 6124 for (i = 0; i < pf->num_alloc_vsi; i++) 6125 if (pf->vsi[i] && pf->vsi[i]->netdev) 6126 i40e_update_stats(pf->vsi[i]); 6127 6128 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { 6129 /* Update the stats for the active switching components */ 6130 for (i = 0; i < I40E_MAX_VEB; i++) 6131 if (pf->veb[i]) 6132 i40e_update_veb_stats(pf->veb[i]); 6133 } 6134 6135 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); 6136 } 6137 6138 /** 6139 * i40e_reset_subtask - Set up for resetting the device and driver 6140 * @pf: board private structure 6141 **/ 6142 static void i40e_reset_subtask(struct i40e_pf *pf) 6143 { 6144 u32 reset_flags = 0; 6145 6146 rtnl_lock(); 6147 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { 6148 reset_flags |= BIT(__I40E_REINIT_REQUESTED); 6149 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); 6150 } 6151 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { 6152 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); 6153 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 6154 } 6155 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { 6156 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); 6157 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 6158 } 6159 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { 6160 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); 6161 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 6162 } 6163 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { 6164 reset_flags |= BIT(__I40E_DOWN_REQUESTED); 6165 clear_bit(__I40E_DOWN_REQUESTED, &pf->state); 6166 } 6167 6168 /* If there's a recovery already waiting, it takes 6169 * precedence before starting a new reset sequence. 6170 */ 6171 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { 6172 i40e_handle_reset_warning(pf); 6173 goto unlock; 6174 } 6175 6176 /* If we're already down or resetting, just bail */ 6177 if (reset_flags && 6178 !test_bit(__I40E_DOWN, &pf->state) && 6179 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) 6180 i40e_do_reset(pf, reset_flags); 6181 6182 unlock: 6183 rtnl_unlock(); 6184 } 6185 6186 /** 6187 * i40e_handle_link_event - Handle link event 6188 * @pf: board private structure 6189 * @e: event info posted on ARQ 6190 **/ 6191 static void i40e_handle_link_event(struct i40e_pf *pf, 6192 struct i40e_arq_event_info *e) 6193 { 6194 struct i40e_aqc_get_link_status *status = 6195 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 6196 6197 /* Do a new status request to re-enable LSE reporting 6198 * and load new status information into the hw struct 6199 * This completely ignores any state information 6200 * in the ARQ event info, instead choosing to always 6201 * issue the AQ update link status command. 6202 */ 6203 i40e_link_event(pf); 6204 6205 /* check for unqualified module, if link is down */ 6206 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && 6207 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && 6208 (!(status->link_info & I40E_AQ_LINK_UP))) 6209 dev_err(&pf->pdev->dev, 6210 "The driver failed to link because an unqualified module was detected.\n"); 6211 } 6212 6213 /** 6214 * i40e_clean_adminq_subtask - Clean the AdminQ rings 6215 * @pf: board private structure 6216 **/ 6217 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 6218 { 6219 struct i40e_arq_event_info event; 6220 struct i40e_hw *hw = &pf->hw; 6221 u16 pending, i = 0; 6222 i40e_status ret; 6223 u16 opcode; 6224 u32 oldval; 6225 u32 val; 6226 6227 /* Do not run clean AQ when PF reset fails */ 6228 if (test_bit(__I40E_RESET_FAILED, &pf->state)) 6229 return; 6230 6231 /* check for error indications */ 6232 val = rd32(&pf->hw, pf->hw.aq.arq.len); 6233 oldval = val; 6234 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { 6235 if (hw->debug_mask & I40E_DEBUG_AQ) 6236 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); 6237 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; 6238 } 6239 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { 6240 if (hw->debug_mask & I40E_DEBUG_AQ) 6241 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); 6242 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; 6243 pf->arq_overflows++; 6244 } 6245 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { 6246 if (hw->debug_mask & I40E_DEBUG_AQ) 6247 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); 6248 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; 6249 } 6250 if (oldval != val) 6251 wr32(&pf->hw, pf->hw.aq.arq.len, val); 6252 6253 val = rd32(&pf->hw, pf->hw.aq.asq.len); 6254 oldval = val; 6255 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { 6256 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 6257 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); 6258 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; 6259 } 6260 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { 6261 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 6262 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); 6263 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; 6264 } 6265 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { 6266 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 6267 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); 6268 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; 6269 } 6270 if (oldval != val) 6271 wr32(&pf->hw, pf->hw.aq.asq.len, val); 6272 6273 event.buf_len = I40E_MAX_AQ_BUF_SIZE; 6274 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 6275 if (!event.msg_buf) 6276 return; 6277 6278 do { 6279 ret = i40e_clean_arq_element(hw, &event, &pending); 6280 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) 6281 break; 6282 else if (ret) { 6283 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 6284 break; 6285 } 6286 6287 opcode = le16_to_cpu(event.desc.opcode); 6288 switch (opcode) { 6289 6290 case i40e_aqc_opc_get_link_status: 6291 i40e_handle_link_event(pf, &event); 6292 break; 6293 case i40e_aqc_opc_send_msg_to_pf: 6294 ret = i40e_vc_process_vf_msg(pf, 6295 le16_to_cpu(event.desc.retval), 6296 le32_to_cpu(event.desc.cookie_high), 6297 le32_to_cpu(event.desc.cookie_low), 6298 event.msg_buf, 6299 event.msg_len); 6300 break; 6301 case i40e_aqc_opc_lldp_update_mib: 6302 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 6303 #ifdef CONFIG_I40E_DCB 6304 rtnl_lock(); 6305 ret = i40e_handle_lldp_event(pf, &event); 6306 rtnl_unlock(); 6307 #endif /* CONFIG_I40E_DCB */ 6308 break; 6309 case i40e_aqc_opc_event_lan_overflow: 6310 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 6311 i40e_handle_lan_overflow_event(pf, &event); 6312 break; 6313 case i40e_aqc_opc_send_msg_to_peer: 6314 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); 6315 break; 6316 case i40e_aqc_opc_nvm_erase: 6317 case i40e_aqc_opc_nvm_update: 6318 case i40e_aqc_opc_oem_post_update: 6319 i40e_debug(&pf->hw, I40E_DEBUG_NVM, 6320 "ARQ NVM operation 0x%04x completed\n", 6321 opcode); 6322 break; 6323 default: 6324 dev_info(&pf->pdev->dev, 6325 "ARQ: Unknown event 0x%04x ignored\n", 6326 opcode); 6327 break; 6328 } 6329 } while (pending && (i++ < pf->adminq_work_limit)); 6330 6331 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 6332 /* re-enable Admin queue interrupt cause */ 6333 val = rd32(hw, I40E_PFINT_ICR0_ENA); 6334 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 6335 wr32(hw, I40E_PFINT_ICR0_ENA, val); 6336 i40e_flush(hw); 6337 6338 kfree(event.msg_buf); 6339 } 6340 6341 /** 6342 * i40e_verify_eeprom - make sure eeprom is good to use 6343 * @pf: board private structure 6344 **/ 6345 static void i40e_verify_eeprom(struct i40e_pf *pf) 6346 { 6347 int err; 6348 6349 err = i40e_diag_eeprom_test(&pf->hw); 6350 if (err) { 6351 /* retry in case of garbage read */ 6352 err = i40e_diag_eeprom_test(&pf->hw); 6353 if (err) { 6354 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", 6355 err); 6356 set_bit(__I40E_BAD_EEPROM, &pf->state); 6357 } 6358 } 6359 6360 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { 6361 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); 6362 clear_bit(__I40E_BAD_EEPROM, &pf->state); 6363 } 6364 } 6365 6366 /** 6367 * i40e_enable_pf_switch_lb 6368 * @pf: pointer to the PF structure 6369 * 6370 * enable switch loop back or die - no point in a return value 6371 **/ 6372 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) 6373 { 6374 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6375 struct i40e_vsi_context ctxt; 6376 int ret; 6377 6378 ctxt.seid = pf->main_vsi_seid; 6379 ctxt.pf_num = pf->hw.pf_id; 6380 ctxt.vf_num = 0; 6381 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6382 if (ret) { 6383 dev_info(&pf->pdev->dev, 6384 "couldn't get PF vsi config, err %s aq_err %s\n", 6385 i40e_stat_str(&pf->hw, ret), 6386 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6387 return; 6388 } 6389 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6390 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6391 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6392 6393 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6394 if (ret) { 6395 dev_info(&pf->pdev->dev, 6396 "update vsi switch failed, err %s aq_err %s\n", 6397 i40e_stat_str(&pf->hw, ret), 6398 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6399 } 6400 } 6401 6402 /** 6403 * i40e_disable_pf_switch_lb 6404 * @pf: pointer to the PF structure 6405 * 6406 * disable switch loop back or die - no point in a return value 6407 **/ 6408 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) 6409 { 6410 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6411 struct i40e_vsi_context ctxt; 6412 int ret; 6413 6414 ctxt.seid = pf->main_vsi_seid; 6415 ctxt.pf_num = pf->hw.pf_id; 6416 ctxt.vf_num = 0; 6417 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6418 if (ret) { 6419 dev_info(&pf->pdev->dev, 6420 "couldn't get PF vsi config, err %s aq_err %s\n", 6421 i40e_stat_str(&pf->hw, ret), 6422 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6423 return; 6424 } 6425 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6426 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6427 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6428 6429 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6430 if (ret) { 6431 dev_info(&pf->pdev->dev, 6432 "update vsi switch failed, err %s aq_err %s\n", 6433 i40e_stat_str(&pf->hw, ret), 6434 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6435 } 6436 } 6437 6438 /** 6439 * i40e_config_bridge_mode - Configure the HW bridge mode 6440 * @veb: pointer to the bridge instance 6441 * 6442 * Configure the loop back mode for the LAN VSI that is downlink to the 6443 * specified HW bridge instance. It is expected this function is called 6444 * when a new HW bridge is instantiated. 6445 **/ 6446 static void i40e_config_bridge_mode(struct i40e_veb *veb) 6447 { 6448 struct i40e_pf *pf = veb->pf; 6449 6450 if (pf->hw.debug_mask & I40E_DEBUG_LAN) 6451 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", 6452 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 6453 if (veb->bridge_mode & BRIDGE_MODE_VEPA) 6454 i40e_disable_pf_switch_lb(pf); 6455 else 6456 i40e_enable_pf_switch_lb(pf); 6457 } 6458 6459 /** 6460 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 6461 * @veb: pointer to the VEB instance 6462 * 6463 * This is a recursive function that first builds the attached VSIs then 6464 * recurses in to build the next layer of VEB. We track the connections 6465 * through our own index numbers because the seid's from the HW could 6466 * change across the reset. 6467 **/ 6468 static int i40e_reconstitute_veb(struct i40e_veb *veb) 6469 { 6470 struct i40e_vsi *ctl_vsi = NULL; 6471 struct i40e_pf *pf = veb->pf; 6472 int v, veb_idx; 6473 int ret; 6474 6475 /* build VSI that owns this VEB, temporarily attached to base VEB */ 6476 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { 6477 if (pf->vsi[v] && 6478 pf->vsi[v]->veb_idx == veb->idx && 6479 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 6480 ctl_vsi = pf->vsi[v]; 6481 break; 6482 } 6483 } 6484 if (!ctl_vsi) { 6485 dev_info(&pf->pdev->dev, 6486 "missing owner VSI for veb_idx %d\n", veb->idx); 6487 ret = -ENOENT; 6488 goto end_reconstitute; 6489 } 6490 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 6491 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 6492 ret = i40e_add_vsi(ctl_vsi); 6493 if (ret) { 6494 dev_info(&pf->pdev->dev, 6495 "rebuild of veb_idx %d owner VSI failed: %d\n", 6496 veb->idx, ret); 6497 goto end_reconstitute; 6498 } 6499 i40e_vsi_reset_stats(ctl_vsi); 6500 6501 /* create the VEB in the switch and move the VSI onto the VEB */ 6502 ret = i40e_add_veb(veb, ctl_vsi); 6503 if (ret) 6504 goto end_reconstitute; 6505 6506 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 6507 veb->bridge_mode = BRIDGE_MODE_VEB; 6508 else 6509 veb->bridge_mode = BRIDGE_MODE_VEPA; 6510 i40e_config_bridge_mode(veb); 6511 6512 /* create the remaining VSIs attached to this VEB */ 6513 for (v = 0; v < pf->num_alloc_vsi; v++) { 6514 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 6515 continue; 6516 6517 if (pf->vsi[v]->veb_idx == veb->idx) { 6518 struct i40e_vsi *vsi = pf->vsi[v]; 6519 6520 vsi->uplink_seid = veb->seid; 6521 ret = i40e_add_vsi(vsi); 6522 if (ret) { 6523 dev_info(&pf->pdev->dev, 6524 "rebuild of vsi_idx %d failed: %d\n", 6525 v, ret); 6526 goto end_reconstitute; 6527 } 6528 i40e_vsi_reset_stats(vsi); 6529 } 6530 } 6531 6532 /* create any VEBs attached to this VEB - RECURSION */ 6533 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 6534 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 6535 pf->veb[veb_idx]->uplink_seid = veb->seid; 6536 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 6537 if (ret) 6538 break; 6539 } 6540 } 6541 6542 end_reconstitute: 6543 return ret; 6544 } 6545 6546 /** 6547 * i40e_get_capabilities - get info about the HW 6548 * @pf: the PF struct 6549 **/ 6550 static int i40e_get_capabilities(struct i40e_pf *pf) 6551 { 6552 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 6553 u16 data_size; 6554 int buf_len; 6555 int err; 6556 6557 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 6558 do { 6559 cap_buf = kzalloc(buf_len, GFP_KERNEL); 6560 if (!cap_buf) 6561 return -ENOMEM; 6562 6563 /* this loads the data into the hw struct for us */ 6564 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 6565 &data_size, 6566 i40e_aqc_opc_list_func_capabilities, 6567 NULL); 6568 /* data loaded, buffer no longer needed */ 6569 kfree(cap_buf); 6570 6571 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 6572 /* retry with a larger buffer */ 6573 buf_len = data_size; 6574 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 6575 dev_info(&pf->pdev->dev, 6576 "capability discovery failed, err %s aq_err %s\n", 6577 i40e_stat_str(&pf->hw, err), 6578 i40e_aq_str(&pf->hw, 6579 pf->hw.aq.asq_last_status)); 6580 return -ENODEV; 6581 } 6582 } while (err); 6583 6584 if (pf->hw.debug_mask & I40E_DEBUG_USER) 6585 dev_info(&pf->pdev->dev, 6586 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 6587 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 6588 pf->hw.func_caps.num_msix_vectors, 6589 pf->hw.func_caps.num_msix_vectors_vf, 6590 pf->hw.func_caps.fd_filters_guaranteed, 6591 pf->hw.func_caps.fd_filters_best_effort, 6592 pf->hw.func_caps.num_tx_qp, 6593 pf->hw.func_caps.num_vsis); 6594 6595 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ 6596 + pf->hw.func_caps.num_vfs) 6597 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { 6598 dev_info(&pf->pdev->dev, 6599 "got num_vsis %d, setting num_vsis to %d\n", 6600 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); 6601 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; 6602 } 6603 6604 return 0; 6605 } 6606 6607 static int i40e_vsi_clear(struct i40e_vsi *vsi); 6608 6609 /** 6610 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband 6611 * @pf: board private structure 6612 **/ 6613 static void i40e_fdir_sb_setup(struct i40e_pf *pf) 6614 { 6615 struct i40e_vsi *vsi; 6616 int i; 6617 6618 /* quick workaround for an NVM issue that leaves a critical register 6619 * uninitialized 6620 */ 6621 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { 6622 static const u32 hkey[] = { 6623 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 6624 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 6625 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 6626 0x95b3a76d}; 6627 6628 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) 6629 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); 6630 } 6631 6632 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 6633 return; 6634 6635 /* find existing VSI and see if it needs configuring */ 6636 vsi = NULL; 6637 for (i = 0; i < pf->num_alloc_vsi; i++) { 6638 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6639 vsi = pf->vsi[i]; 6640 break; 6641 } 6642 } 6643 6644 /* create a new VSI if none exists */ 6645 if (!vsi) { 6646 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, 6647 pf->vsi[pf->lan_vsi]->seid, 0); 6648 if (!vsi) { 6649 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 6650 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 6651 return; 6652 } 6653 } 6654 6655 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); 6656 } 6657 6658 /** 6659 * i40e_fdir_teardown - release the Flow Director resources 6660 * @pf: board private structure 6661 **/ 6662 static void i40e_fdir_teardown(struct i40e_pf *pf) 6663 { 6664 int i; 6665 6666 i40e_fdir_filter_exit(pf); 6667 for (i = 0; i < pf->num_alloc_vsi; i++) { 6668 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6669 i40e_vsi_release(pf->vsi[i]); 6670 break; 6671 } 6672 } 6673 } 6674 6675 /** 6676 * i40e_prep_for_reset - prep for the core to reset 6677 * @pf: board private structure 6678 * 6679 * Close up the VFs and other things in prep for PF Reset. 6680 **/ 6681 static void i40e_prep_for_reset(struct i40e_pf *pf) 6682 { 6683 struct i40e_hw *hw = &pf->hw; 6684 i40e_status ret = 0; 6685 u32 v; 6686 6687 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 6688 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 6689 return; 6690 if (i40e_check_asq_alive(&pf->hw)) 6691 i40e_vc_notify_reset(pf); 6692 6693 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 6694 6695 /* quiesce the VSIs and their queues that are not already DOWN */ 6696 i40e_pf_quiesce_all_vsi(pf); 6697 6698 for (v = 0; v < pf->num_alloc_vsi; v++) { 6699 if (pf->vsi[v]) 6700 pf->vsi[v]->seid = 0; 6701 } 6702 6703 i40e_shutdown_adminq(&pf->hw); 6704 6705 /* call shutdown HMC */ 6706 if (hw->hmc.hmc_obj) { 6707 ret = i40e_shutdown_lan_hmc(hw); 6708 if (ret) 6709 dev_warn(&pf->pdev->dev, 6710 "shutdown_lan_hmc failed: %d\n", ret); 6711 } 6712 } 6713 6714 /** 6715 * i40e_send_version - update firmware with driver version 6716 * @pf: PF struct 6717 */ 6718 static void i40e_send_version(struct i40e_pf *pf) 6719 { 6720 struct i40e_driver_version dv; 6721 6722 dv.major_version = DRV_VERSION_MAJOR; 6723 dv.minor_version = DRV_VERSION_MINOR; 6724 dv.build_version = DRV_VERSION_BUILD; 6725 dv.subbuild_version = 0; 6726 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); 6727 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 6728 } 6729 6730 /** 6731 * i40e_reset_and_rebuild - reset and rebuild using a saved config 6732 * @pf: board private structure 6733 * @reinit: if the Main VSI needs to re-initialized. 6734 **/ 6735 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 6736 { 6737 struct i40e_hw *hw = &pf->hw; 6738 u8 set_fc_aq_fail = 0; 6739 i40e_status ret; 6740 u32 val; 6741 u32 v; 6742 6743 /* Now we wait for GRST to settle out. 6744 * We don't have to delete the VEBs or VSIs from the hw switch 6745 * because the reset will make them disappear. 6746 */ 6747 ret = i40e_pf_reset(hw); 6748 if (ret) { 6749 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 6750 set_bit(__I40E_RESET_FAILED, &pf->state); 6751 goto clear_recovery; 6752 } 6753 pf->pfr_count++; 6754 6755 if (test_bit(__I40E_DOWN, &pf->state)) 6756 goto clear_recovery; 6757 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 6758 6759 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 6760 ret = i40e_init_adminq(&pf->hw); 6761 if (ret) { 6762 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", 6763 i40e_stat_str(&pf->hw, ret), 6764 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6765 goto clear_recovery; 6766 } 6767 6768 /* re-verify the eeprom if we just had an EMP reset */ 6769 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) 6770 i40e_verify_eeprom(pf); 6771 6772 i40e_clear_pxe_mode(hw); 6773 ret = i40e_get_capabilities(pf); 6774 if (ret) 6775 goto end_core_reset; 6776 6777 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 6778 hw->func_caps.num_rx_qp, 6779 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 6780 if (ret) { 6781 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 6782 goto end_core_reset; 6783 } 6784 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 6785 if (ret) { 6786 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 6787 goto end_core_reset; 6788 } 6789 6790 #ifdef CONFIG_I40E_DCB 6791 ret = i40e_init_pf_dcb(pf); 6792 if (ret) { 6793 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); 6794 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 6795 /* Continue without DCB enabled */ 6796 } 6797 #endif /* CONFIG_I40E_DCB */ 6798 #ifdef I40E_FCOE 6799 i40e_init_pf_fcoe(pf); 6800 6801 #endif 6802 /* do basic switch setup */ 6803 ret = i40e_setup_pf_switch(pf, reinit); 6804 if (ret) 6805 goto end_core_reset; 6806 6807 /* The driver only wants link up/down and module qualification 6808 * reports from firmware. Note the negative logic. 6809 */ 6810 ret = i40e_aq_set_phy_int_mask(&pf->hw, 6811 ~(I40E_AQ_EVENT_LINK_UPDOWN | 6812 I40E_AQ_EVENT_MEDIA_NA | 6813 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); 6814 if (ret) 6815 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 6816 i40e_stat_str(&pf->hw, ret), 6817 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6818 6819 /* make sure our flow control settings are restored */ 6820 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); 6821 if (ret) 6822 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", 6823 i40e_stat_str(&pf->hw, ret), 6824 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6825 6826 /* Rebuild the VSIs and VEBs that existed before reset. 6827 * They are still in our local switch element arrays, so only 6828 * need to rebuild the switch model in the HW. 6829 * 6830 * If there were VEBs but the reconstitution failed, we'll try 6831 * try to recover minimal use by getting the basic PF VSI working. 6832 */ 6833 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 6834 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); 6835 /* find the one VEB connected to the MAC, and find orphans */ 6836 for (v = 0; v < I40E_MAX_VEB; v++) { 6837 if (!pf->veb[v]) 6838 continue; 6839 6840 if (pf->veb[v]->uplink_seid == pf->mac_seid || 6841 pf->veb[v]->uplink_seid == 0) { 6842 ret = i40e_reconstitute_veb(pf->veb[v]); 6843 6844 if (!ret) 6845 continue; 6846 6847 /* If Main VEB failed, we're in deep doodoo, 6848 * so give up rebuilding the switch and set up 6849 * for minimal rebuild of PF VSI. 6850 * If orphan failed, we'll report the error 6851 * but try to keep going. 6852 */ 6853 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 6854 dev_info(&pf->pdev->dev, 6855 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 6856 ret); 6857 pf->vsi[pf->lan_vsi]->uplink_seid 6858 = pf->mac_seid; 6859 break; 6860 } else if (pf->veb[v]->uplink_seid == 0) { 6861 dev_info(&pf->pdev->dev, 6862 "rebuild of orphan VEB failed: %d\n", 6863 ret); 6864 } 6865 } 6866 } 6867 } 6868 6869 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { 6870 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 6871 /* no VEB, so rebuild only the Main VSI */ 6872 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); 6873 if (ret) { 6874 dev_info(&pf->pdev->dev, 6875 "rebuild of Main VSI failed: %d\n", ret); 6876 goto end_core_reset; 6877 } 6878 } 6879 6880 /* Reconfigure hardware for allowing smaller MSS in the case 6881 * of TSO, so that we avoid the MDD being fired and causing 6882 * a reset in the case of small MSS+TSO. 6883 */ 6884 #define I40E_REG_MSS 0x000E64DC 6885 #define I40E_REG_MSS_MIN_MASK 0x3FF0000 6886 #define I40E_64BYTE_MSS 0x400000 6887 val = rd32(hw, I40E_REG_MSS); 6888 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { 6889 val &= ~I40E_REG_MSS_MIN_MASK; 6890 val |= I40E_64BYTE_MSS; 6891 wr32(hw, I40E_REG_MSS, val); 6892 } 6893 6894 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { 6895 msleep(75); 6896 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 6897 if (ret) 6898 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 6899 i40e_stat_str(&pf->hw, ret), 6900 i40e_aq_str(&pf->hw, 6901 pf->hw.aq.asq_last_status)); 6902 } 6903 /* reinit the misc interrupt */ 6904 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6905 ret = i40e_setup_misc_vector(pf); 6906 6907 /* Add a filter to drop all Flow control frames from any VSI from being 6908 * transmitted. By doing so we stop a malicious VF from sending out 6909 * PAUSE or PFC frames and potentially controlling traffic for other 6910 * PF/VF VSIs. 6911 * The FW can still send Flow control frames if enabled. 6912 */ 6913 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, 6914 pf->main_vsi_seid); 6915 6916 /* restart the VSIs that were rebuilt and running before the reset */ 6917 i40e_pf_unquiesce_all_vsi(pf); 6918 6919 if (pf->num_alloc_vfs) { 6920 for (v = 0; v < pf->num_alloc_vfs; v++) 6921 i40e_reset_vf(&pf->vf[v], true); 6922 } 6923 6924 /* tell the firmware that we're starting */ 6925 i40e_send_version(pf); 6926 6927 end_core_reset: 6928 clear_bit(__I40E_RESET_FAILED, &pf->state); 6929 clear_recovery: 6930 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 6931 } 6932 6933 /** 6934 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild 6935 * @pf: board private structure 6936 * 6937 * Close up the VFs and other things in prep for a Core Reset, 6938 * then get ready to rebuild the world. 6939 **/ 6940 static void i40e_handle_reset_warning(struct i40e_pf *pf) 6941 { 6942 i40e_prep_for_reset(pf); 6943 i40e_reset_and_rebuild(pf, false); 6944 } 6945 6946 /** 6947 * i40e_handle_mdd_event 6948 * @pf: pointer to the PF structure 6949 * 6950 * Called from the MDD irq handler to identify possibly malicious vfs 6951 **/ 6952 static void i40e_handle_mdd_event(struct i40e_pf *pf) 6953 { 6954 struct i40e_hw *hw = &pf->hw; 6955 bool mdd_detected = false; 6956 bool pf_mdd_detected = false; 6957 struct i40e_vf *vf; 6958 u32 reg; 6959 int i; 6960 6961 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) 6962 return; 6963 6964 /* find what triggered the MDD event */ 6965 reg = rd32(hw, I40E_GL_MDET_TX); 6966 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 6967 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 6968 I40E_GL_MDET_TX_PF_NUM_SHIFT; 6969 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 6970 I40E_GL_MDET_TX_VF_NUM_SHIFT; 6971 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 6972 I40E_GL_MDET_TX_EVENT_SHIFT; 6973 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 6974 I40E_GL_MDET_TX_QUEUE_SHIFT) - 6975 pf->hw.func_caps.base_queue; 6976 if (netif_msg_tx_err(pf)) 6977 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", 6978 event, queue, pf_num, vf_num); 6979 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 6980 mdd_detected = true; 6981 } 6982 reg = rd32(hw, I40E_GL_MDET_RX); 6983 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 6984 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 6985 I40E_GL_MDET_RX_FUNCTION_SHIFT; 6986 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 6987 I40E_GL_MDET_RX_EVENT_SHIFT; 6988 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 6989 I40E_GL_MDET_RX_QUEUE_SHIFT) - 6990 pf->hw.func_caps.base_queue; 6991 if (netif_msg_rx_err(pf)) 6992 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 6993 event, queue, func); 6994 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 6995 mdd_detected = true; 6996 } 6997 6998 if (mdd_detected) { 6999 reg = rd32(hw, I40E_PF_MDET_TX); 7000 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 7001 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 7002 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 7003 pf_mdd_detected = true; 7004 } 7005 reg = rd32(hw, I40E_PF_MDET_RX); 7006 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 7007 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 7008 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 7009 pf_mdd_detected = true; 7010 } 7011 /* Queue belongs to the PF, initiate a reset */ 7012 if (pf_mdd_detected) { 7013 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 7014 i40e_service_event_schedule(pf); 7015 } 7016 } 7017 7018 /* see if one of the VFs needs its hand slapped */ 7019 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 7020 vf = &(pf->vf[i]); 7021 reg = rd32(hw, I40E_VP_MDET_TX(i)); 7022 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 7023 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 7024 vf->num_mdd_events++; 7025 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 7026 i); 7027 } 7028 7029 reg = rd32(hw, I40E_VP_MDET_RX(i)); 7030 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 7031 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 7032 vf->num_mdd_events++; 7033 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 7034 i); 7035 } 7036 7037 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 7038 dev_info(&pf->pdev->dev, 7039 "Too many MDD events on VF %d, disabled\n", i); 7040 dev_info(&pf->pdev->dev, 7041 "Use PF Control I/F to re-enable the VF\n"); 7042 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 7043 } 7044 } 7045 7046 /* re-enable mdd interrupt cause */ 7047 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 7048 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 7049 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 7050 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 7051 i40e_flush(hw); 7052 } 7053 7054 /** 7055 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW 7056 * @pf: board private structure 7057 **/ 7058 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) 7059 { 7060 #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) 7061 struct i40e_hw *hw = &pf->hw; 7062 i40e_status ret; 7063 __be16 port; 7064 int i; 7065 7066 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) 7067 return; 7068 7069 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC; 7070 7071 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 7072 if (pf->pending_udp_bitmap & BIT_ULL(i)) { 7073 pf->pending_udp_bitmap &= ~BIT_ULL(i); 7074 port = pf->udp_ports[i].index; 7075 if (port) 7076 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), 7077 pf->udp_ports[i].type, 7078 NULL, NULL); 7079 else 7080 ret = i40e_aq_del_udp_tunnel(hw, i, NULL); 7081 7082 if (ret) { 7083 dev_dbg(&pf->pdev->dev, 7084 "%s %s port %d, index %d failed, err %s aq_err %s\n", 7085 pf->udp_ports[i].type ? "vxlan" : "geneve", 7086 port ? "add" : "delete", 7087 ntohs(port), i, 7088 i40e_stat_str(&pf->hw, ret), 7089 i40e_aq_str(&pf->hw, 7090 pf->hw.aq.asq_last_status)); 7091 pf->udp_ports[i].index = 0; 7092 } 7093 } 7094 } 7095 #endif 7096 } 7097 7098 /** 7099 * i40e_service_task - Run the driver's async subtasks 7100 * @work: pointer to work_struct containing our data 7101 **/ 7102 static void i40e_service_task(struct work_struct *work) 7103 { 7104 struct i40e_pf *pf = container_of(work, 7105 struct i40e_pf, 7106 service_task); 7107 unsigned long start_time = jiffies; 7108 7109 /* don't bother with service tasks if a reset is in progress */ 7110 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7111 i40e_service_event_complete(pf); 7112 return; 7113 } 7114 7115 i40e_detect_recover_hung(pf); 7116 i40e_sync_filters_subtask(pf); 7117 i40e_reset_subtask(pf); 7118 i40e_handle_mdd_event(pf); 7119 i40e_vc_process_vflr_event(pf); 7120 i40e_watchdog_subtask(pf); 7121 i40e_fdir_reinit_subtask(pf); 7122 i40e_client_subtask(pf); 7123 i40e_sync_filters_subtask(pf); 7124 i40e_sync_udp_filters_subtask(pf); 7125 i40e_clean_adminq_subtask(pf); 7126 7127 i40e_service_event_complete(pf); 7128 7129 /* If the tasks have taken longer than one timer cycle or there 7130 * is more work to be done, reschedule the service task now 7131 * rather than wait for the timer to tick again. 7132 */ 7133 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 7134 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || 7135 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || 7136 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 7137 i40e_service_event_schedule(pf); 7138 } 7139 7140 /** 7141 * i40e_service_timer - timer callback 7142 * @data: pointer to PF struct 7143 **/ 7144 static void i40e_service_timer(unsigned long data) 7145 { 7146 struct i40e_pf *pf = (struct i40e_pf *)data; 7147 7148 mod_timer(&pf->service_timer, 7149 round_jiffies(jiffies + pf->service_timer_period)); 7150 i40e_service_event_schedule(pf); 7151 } 7152 7153 /** 7154 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 7155 * @vsi: the VSI being configured 7156 **/ 7157 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 7158 { 7159 struct i40e_pf *pf = vsi->back; 7160 7161 switch (vsi->type) { 7162 case I40E_VSI_MAIN: 7163 vsi->alloc_queue_pairs = pf->num_lan_qps; 7164 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7165 I40E_REQ_DESCRIPTOR_MULTIPLE); 7166 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7167 vsi->num_q_vectors = pf->num_lan_msix; 7168 else 7169 vsi->num_q_vectors = 1; 7170 7171 break; 7172 7173 case I40E_VSI_FDIR: 7174 vsi->alloc_queue_pairs = 1; 7175 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 7176 I40E_REQ_DESCRIPTOR_MULTIPLE); 7177 vsi->num_q_vectors = 1; 7178 break; 7179 7180 case I40E_VSI_VMDQ2: 7181 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 7182 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7183 I40E_REQ_DESCRIPTOR_MULTIPLE); 7184 vsi->num_q_vectors = pf->num_vmdq_msix; 7185 break; 7186 7187 case I40E_VSI_SRIOV: 7188 vsi->alloc_queue_pairs = pf->num_vf_qps; 7189 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7190 I40E_REQ_DESCRIPTOR_MULTIPLE); 7191 break; 7192 7193 #ifdef I40E_FCOE 7194 case I40E_VSI_FCOE: 7195 vsi->alloc_queue_pairs = pf->num_fcoe_qps; 7196 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7197 I40E_REQ_DESCRIPTOR_MULTIPLE); 7198 vsi->num_q_vectors = pf->num_fcoe_msix; 7199 break; 7200 7201 #endif /* I40E_FCOE */ 7202 default: 7203 WARN_ON(1); 7204 return -ENODATA; 7205 } 7206 7207 return 0; 7208 } 7209 7210 /** 7211 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi 7212 * @type: VSI pointer 7213 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 7214 * 7215 * On error: returns error code (negative) 7216 * On success: returns 0 7217 **/ 7218 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) 7219 { 7220 int size; 7221 int ret = 0; 7222 7223 /* allocate memory for both Tx and Rx ring pointers */ 7224 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; 7225 vsi->tx_rings = kzalloc(size, GFP_KERNEL); 7226 if (!vsi->tx_rings) 7227 return -ENOMEM; 7228 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; 7229 7230 if (alloc_qvectors) { 7231 /* allocate memory for q_vector pointers */ 7232 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; 7233 vsi->q_vectors = kzalloc(size, GFP_KERNEL); 7234 if (!vsi->q_vectors) { 7235 ret = -ENOMEM; 7236 goto err_vectors; 7237 } 7238 } 7239 return ret; 7240 7241 err_vectors: 7242 kfree(vsi->tx_rings); 7243 return ret; 7244 } 7245 7246 /** 7247 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 7248 * @pf: board private structure 7249 * @type: type of VSI 7250 * 7251 * On error: returns error code (negative) 7252 * On success: returns vsi index in PF (positive) 7253 **/ 7254 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 7255 { 7256 int ret = -ENODEV; 7257 struct i40e_vsi *vsi; 7258 int vsi_idx; 7259 int i; 7260 7261 /* Need to protect the allocation of the VSIs at the PF level */ 7262 mutex_lock(&pf->switch_mutex); 7263 7264 /* VSI list may be fragmented if VSI creation/destruction has 7265 * been happening. We can afford to do a quick scan to look 7266 * for any free VSIs in the list. 7267 * 7268 * find next empty vsi slot, looping back around if necessary 7269 */ 7270 i = pf->next_vsi; 7271 while (i < pf->num_alloc_vsi && pf->vsi[i]) 7272 i++; 7273 if (i >= pf->num_alloc_vsi) { 7274 i = 0; 7275 while (i < pf->next_vsi && pf->vsi[i]) 7276 i++; 7277 } 7278 7279 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { 7280 vsi_idx = i; /* Found one! */ 7281 } else { 7282 ret = -ENODEV; 7283 goto unlock_pf; /* out of VSI slots! */ 7284 } 7285 pf->next_vsi = ++i; 7286 7287 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 7288 if (!vsi) { 7289 ret = -ENOMEM; 7290 goto unlock_pf; 7291 } 7292 vsi->type = type; 7293 vsi->back = pf; 7294 set_bit(__I40E_DOWN, &vsi->state); 7295 vsi->flags = 0; 7296 vsi->idx = vsi_idx; 7297 vsi->int_rate_limit = 0; 7298 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? 7299 pf->rss_table_size : 64; 7300 vsi->netdev_registered = false; 7301 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 7302 INIT_LIST_HEAD(&vsi->mac_filter_list); 7303 vsi->irqs_ready = false; 7304 7305 ret = i40e_set_num_rings_in_vsi(vsi); 7306 if (ret) 7307 goto err_rings; 7308 7309 ret = i40e_vsi_alloc_arrays(vsi, true); 7310 if (ret) 7311 goto err_rings; 7312 7313 /* Setup default MSIX irq handler for VSI */ 7314 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 7315 7316 /* Initialize VSI lock */ 7317 spin_lock_init(&vsi->mac_filter_list_lock); 7318 pf->vsi[vsi_idx] = vsi; 7319 ret = vsi_idx; 7320 goto unlock_pf; 7321 7322 err_rings: 7323 pf->next_vsi = i - 1; 7324 kfree(vsi); 7325 unlock_pf: 7326 mutex_unlock(&pf->switch_mutex); 7327 return ret; 7328 } 7329 7330 /** 7331 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 7332 * @type: VSI pointer 7333 * @free_qvectors: a bool to specify if q_vectors need to be freed. 7334 * 7335 * On error: returns error code (negative) 7336 * On success: returns 0 7337 **/ 7338 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) 7339 { 7340 /* free the ring and vector containers */ 7341 if (free_qvectors) { 7342 kfree(vsi->q_vectors); 7343 vsi->q_vectors = NULL; 7344 } 7345 kfree(vsi->tx_rings); 7346 vsi->tx_rings = NULL; 7347 vsi->rx_rings = NULL; 7348 } 7349 7350 /** 7351 * i40e_clear_rss_config_user - clear the user configured RSS hash keys 7352 * and lookup table 7353 * @vsi: Pointer to VSI structure 7354 */ 7355 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) 7356 { 7357 if (!vsi) 7358 return; 7359 7360 kfree(vsi->rss_hkey_user); 7361 vsi->rss_hkey_user = NULL; 7362 7363 kfree(vsi->rss_lut_user); 7364 vsi->rss_lut_user = NULL; 7365 } 7366 7367 /** 7368 * i40e_vsi_clear - Deallocate the VSI provided 7369 * @vsi: the VSI being un-configured 7370 **/ 7371 static int i40e_vsi_clear(struct i40e_vsi *vsi) 7372 { 7373 struct i40e_pf *pf; 7374 7375 if (!vsi) 7376 return 0; 7377 7378 if (!vsi->back) 7379 goto free_vsi; 7380 pf = vsi->back; 7381 7382 mutex_lock(&pf->switch_mutex); 7383 if (!pf->vsi[vsi->idx]) { 7384 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", 7385 vsi->idx, vsi->idx, vsi, vsi->type); 7386 goto unlock_vsi; 7387 } 7388 7389 if (pf->vsi[vsi->idx] != vsi) { 7390 dev_err(&pf->pdev->dev, 7391 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", 7392 pf->vsi[vsi->idx]->idx, 7393 pf->vsi[vsi->idx], 7394 pf->vsi[vsi->idx]->type, 7395 vsi->idx, vsi, vsi->type); 7396 goto unlock_vsi; 7397 } 7398 7399 /* updates the PF for this cleared vsi */ 7400 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 7401 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 7402 7403 i40e_vsi_free_arrays(vsi, true); 7404 i40e_clear_rss_config_user(vsi); 7405 7406 pf->vsi[vsi->idx] = NULL; 7407 if (vsi->idx < pf->next_vsi) 7408 pf->next_vsi = vsi->idx; 7409 7410 unlock_vsi: 7411 mutex_unlock(&pf->switch_mutex); 7412 free_vsi: 7413 kfree(vsi); 7414 7415 return 0; 7416 } 7417 7418 /** 7419 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 7420 * @vsi: the VSI being cleaned 7421 **/ 7422 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) 7423 { 7424 int i; 7425 7426 if (vsi->tx_rings && vsi->tx_rings[0]) { 7427 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 7428 kfree_rcu(vsi->tx_rings[i], rcu); 7429 vsi->tx_rings[i] = NULL; 7430 vsi->rx_rings[i] = NULL; 7431 } 7432 } 7433 } 7434 7435 /** 7436 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 7437 * @vsi: the VSI being configured 7438 **/ 7439 static int i40e_alloc_rings(struct i40e_vsi *vsi) 7440 { 7441 struct i40e_ring *tx_ring, *rx_ring; 7442 struct i40e_pf *pf = vsi->back; 7443 int i; 7444 7445 /* Set basic values in the rings to be used later during open() */ 7446 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 7447 /* allocate space for both Tx and Rx in one shot */ 7448 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 7449 if (!tx_ring) 7450 goto err_out; 7451 7452 tx_ring->queue_index = i; 7453 tx_ring->reg_idx = vsi->base_queue + i; 7454 tx_ring->ring_active = false; 7455 tx_ring->vsi = vsi; 7456 tx_ring->netdev = vsi->netdev; 7457 tx_ring->dev = &pf->pdev->dev; 7458 tx_ring->count = vsi->num_desc; 7459 tx_ring->size = 0; 7460 tx_ring->dcb_tc = 0; 7461 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) 7462 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; 7463 tx_ring->tx_itr_setting = pf->tx_itr_default; 7464 vsi->tx_rings[i] = tx_ring; 7465 7466 rx_ring = &tx_ring[1]; 7467 rx_ring->queue_index = i; 7468 rx_ring->reg_idx = vsi->base_queue + i; 7469 rx_ring->ring_active = false; 7470 rx_ring->vsi = vsi; 7471 rx_ring->netdev = vsi->netdev; 7472 rx_ring->dev = &pf->pdev->dev; 7473 rx_ring->count = vsi->num_desc; 7474 rx_ring->size = 0; 7475 rx_ring->dcb_tc = 0; 7476 rx_ring->rx_itr_setting = pf->rx_itr_default; 7477 vsi->rx_rings[i] = rx_ring; 7478 } 7479 7480 return 0; 7481 7482 err_out: 7483 i40e_vsi_clear_rings(vsi); 7484 return -ENOMEM; 7485 } 7486 7487 /** 7488 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 7489 * @pf: board private structure 7490 * @vectors: the number of MSI-X vectors to request 7491 * 7492 * Returns the number of vectors reserved, or error 7493 **/ 7494 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 7495 { 7496 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, 7497 I40E_MIN_MSIX, vectors); 7498 if (vectors < 0) { 7499 dev_info(&pf->pdev->dev, 7500 "MSI-X vector reservation failed: %d\n", vectors); 7501 vectors = 0; 7502 } 7503 7504 return vectors; 7505 } 7506 7507 /** 7508 * i40e_init_msix - Setup the MSIX capability 7509 * @pf: board private structure 7510 * 7511 * Work with the OS to set up the MSIX vectors needed. 7512 * 7513 * Returns the number of vectors reserved or negative on failure 7514 **/ 7515 static int i40e_init_msix(struct i40e_pf *pf) 7516 { 7517 struct i40e_hw *hw = &pf->hw; 7518 int vectors_left; 7519 int v_budget, i; 7520 int v_actual; 7521 int iwarp_requested = 0; 7522 7523 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 7524 return -ENODEV; 7525 7526 /* The number of vectors we'll request will be comprised of: 7527 * - Add 1 for "other" cause for Admin Queue events, etc. 7528 * - The number of LAN queue pairs 7529 * - Queues being used for RSS. 7530 * We don't need as many as max_rss_size vectors. 7531 * use rss_size instead in the calculation since that 7532 * is governed by number of cpus in the system. 7533 * - assumes symmetric Tx/Rx pairing 7534 * - The number of VMDq pairs 7535 * - The CPU count within the NUMA node if iWARP is enabled 7536 #ifdef I40E_FCOE 7537 * - The number of FCOE qps. 7538 #endif 7539 * Once we count this up, try the request. 7540 * 7541 * If we can't get what we want, we'll simplify to nearly nothing 7542 * and try again. If that still fails, we punt. 7543 */ 7544 vectors_left = hw->func_caps.num_msix_vectors; 7545 v_budget = 0; 7546 7547 /* reserve one vector for miscellaneous handler */ 7548 if (vectors_left) { 7549 v_budget++; 7550 vectors_left--; 7551 } 7552 7553 /* reserve vectors for the main PF traffic queues */ 7554 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); 7555 vectors_left -= pf->num_lan_msix; 7556 v_budget += pf->num_lan_msix; 7557 7558 /* reserve one vector for sideband flow director */ 7559 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7560 if (vectors_left) { 7561 v_budget++; 7562 vectors_left--; 7563 } else { 7564 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7565 } 7566 } 7567 7568 #ifdef I40E_FCOE 7569 /* can we reserve enough for FCoE? */ 7570 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7571 if (!vectors_left) 7572 pf->num_fcoe_msix = 0; 7573 else if (vectors_left >= pf->num_fcoe_qps) 7574 pf->num_fcoe_msix = pf->num_fcoe_qps; 7575 else 7576 pf->num_fcoe_msix = 1; 7577 v_budget += pf->num_fcoe_msix; 7578 vectors_left -= pf->num_fcoe_msix; 7579 } 7580 7581 #endif 7582 /* can we reserve enough for iWARP? */ 7583 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 7584 if (!vectors_left) 7585 pf->num_iwarp_msix = 0; 7586 else if (vectors_left < pf->num_iwarp_msix) 7587 pf->num_iwarp_msix = 1; 7588 v_budget += pf->num_iwarp_msix; 7589 vectors_left -= pf->num_iwarp_msix; 7590 } 7591 7592 /* any vectors left over go for VMDq support */ 7593 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { 7594 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; 7595 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); 7596 7597 /* if we're short on vectors for what's desired, we limit 7598 * the queues per vmdq. If this is still more than are 7599 * available, the user will need to change the number of 7600 * queues/vectors used by the PF later with the ethtool 7601 * channels command 7602 */ 7603 if (vmdq_vecs < vmdq_vecs_wanted) 7604 pf->num_vmdq_qps = 1; 7605 pf->num_vmdq_msix = pf->num_vmdq_qps; 7606 7607 v_budget += vmdq_vecs; 7608 vectors_left -= vmdq_vecs; 7609 } 7610 7611 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 7612 GFP_KERNEL); 7613 if (!pf->msix_entries) 7614 return -ENOMEM; 7615 7616 for (i = 0; i < v_budget; i++) 7617 pf->msix_entries[i].entry = i; 7618 v_actual = i40e_reserve_msix_vectors(pf, v_budget); 7619 7620 if (v_actual != v_budget) { 7621 /* If we have limited resources, we will start with no vectors 7622 * for the special features and then allocate vectors to some 7623 * of these features based on the policy and at the end disable 7624 * the features that did not get any vectors. 7625 */ 7626 iwarp_requested = pf->num_iwarp_msix; 7627 pf->num_iwarp_msix = 0; 7628 #ifdef I40E_FCOE 7629 pf->num_fcoe_qps = 0; 7630 pf->num_fcoe_msix = 0; 7631 #endif 7632 pf->num_vmdq_msix = 0; 7633 } 7634 7635 if (v_actual < I40E_MIN_MSIX) { 7636 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 7637 kfree(pf->msix_entries); 7638 pf->msix_entries = NULL; 7639 return -ENODEV; 7640 7641 } else if (v_actual == I40E_MIN_MSIX) { 7642 /* Adjust for minimal MSIX use */ 7643 pf->num_vmdq_vsis = 0; 7644 pf->num_vmdq_qps = 0; 7645 pf->num_lan_qps = 1; 7646 pf->num_lan_msix = 1; 7647 7648 } else if (v_actual != v_budget) { 7649 int vec; 7650 7651 /* reserve the misc vector */ 7652 vec = v_actual - 1; 7653 7654 /* Scale vector usage down */ 7655 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 7656 pf->num_vmdq_vsis = 1; 7657 pf->num_vmdq_qps = 1; 7658 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7659 7660 /* partition out the remaining vectors */ 7661 switch (vec) { 7662 case 2: 7663 pf->num_lan_msix = 1; 7664 break; 7665 case 3: 7666 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 7667 pf->num_lan_msix = 1; 7668 pf->num_iwarp_msix = 1; 7669 } else { 7670 pf->num_lan_msix = 2; 7671 } 7672 #ifdef I40E_FCOE 7673 /* give one vector to FCoE */ 7674 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7675 pf->num_lan_msix = 1; 7676 pf->num_fcoe_msix = 1; 7677 } 7678 #endif 7679 break; 7680 default: 7681 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 7682 pf->num_iwarp_msix = min_t(int, (vec / 3), 7683 iwarp_requested); 7684 pf->num_vmdq_vsis = min_t(int, (vec / 3), 7685 I40E_DEFAULT_NUM_VMDQ_VSI); 7686 } else { 7687 pf->num_vmdq_vsis = min_t(int, (vec / 2), 7688 I40E_DEFAULT_NUM_VMDQ_VSI); 7689 } 7690 pf->num_lan_msix = min_t(int, 7691 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), 7692 pf->num_lan_msix); 7693 #ifdef I40E_FCOE 7694 /* give one vector to FCoE */ 7695 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7696 pf->num_fcoe_msix = 1; 7697 vec--; 7698 } 7699 #endif 7700 break; 7701 } 7702 } 7703 7704 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 7705 (pf->num_vmdq_msix == 0)) { 7706 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); 7707 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 7708 } 7709 7710 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && 7711 (pf->num_iwarp_msix == 0)) { 7712 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); 7713 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; 7714 } 7715 #ifdef I40E_FCOE 7716 7717 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { 7718 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); 7719 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 7720 } 7721 #endif 7722 return v_actual; 7723 } 7724 7725 /** 7726 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 7727 * @vsi: the VSI being configured 7728 * @v_idx: index of the vector in the vsi struct 7729 * 7730 * We allocate one q_vector. If allocation fails we return -ENOMEM. 7731 **/ 7732 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 7733 { 7734 struct i40e_q_vector *q_vector; 7735 7736 /* allocate q_vector */ 7737 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 7738 if (!q_vector) 7739 return -ENOMEM; 7740 7741 q_vector->vsi = vsi; 7742 q_vector->v_idx = v_idx; 7743 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 7744 if (vsi->netdev) 7745 netif_napi_add(vsi->netdev, &q_vector->napi, 7746 i40e_napi_poll, NAPI_POLL_WEIGHT); 7747 7748 q_vector->rx.latency_range = I40E_LOW_LATENCY; 7749 q_vector->tx.latency_range = I40E_LOW_LATENCY; 7750 7751 /* tie q_vector and vsi together */ 7752 vsi->q_vectors[v_idx] = q_vector; 7753 7754 return 0; 7755 } 7756 7757 /** 7758 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 7759 * @vsi: the VSI being configured 7760 * 7761 * We allocate one q_vector per queue interrupt. If allocation fails we 7762 * return -ENOMEM. 7763 **/ 7764 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 7765 { 7766 struct i40e_pf *pf = vsi->back; 7767 int v_idx, num_q_vectors; 7768 int err; 7769 7770 /* if not MSIX, give the one vector only to the LAN VSI */ 7771 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7772 num_q_vectors = vsi->num_q_vectors; 7773 else if (vsi == pf->vsi[pf->lan_vsi]) 7774 num_q_vectors = 1; 7775 else 7776 return -EINVAL; 7777 7778 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 7779 err = i40e_vsi_alloc_q_vector(vsi, v_idx); 7780 if (err) 7781 goto err_out; 7782 } 7783 7784 return 0; 7785 7786 err_out: 7787 while (v_idx--) 7788 i40e_free_q_vector(vsi, v_idx); 7789 7790 return err; 7791 } 7792 7793 /** 7794 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 7795 * @pf: board private structure to initialize 7796 **/ 7797 static int i40e_init_interrupt_scheme(struct i40e_pf *pf) 7798 { 7799 int vectors = 0; 7800 ssize_t size; 7801 7802 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 7803 vectors = i40e_init_msix(pf); 7804 if (vectors < 0) { 7805 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 7806 I40E_FLAG_IWARP_ENABLED | 7807 #ifdef I40E_FCOE 7808 I40E_FLAG_FCOE_ENABLED | 7809 #endif 7810 I40E_FLAG_RSS_ENABLED | 7811 I40E_FLAG_DCB_CAPABLE | 7812 I40E_FLAG_SRIOV_ENABLED | 7813 I40E_FLAG_FD_SB_ENABLED | 7814 I40E_FLAG_FD_ATR_ENABLED | 7815 I40E_FLAG_VMDQ_ENABLED); 7816 7817 /* rework the queue expectations without MSIX */ 7818 i40e_determine_queue_usage(pf); 7819 } 7820 } 7821 7822 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 7823 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 7824 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); 7825 vectors = pci_enable_msi(pf->pdev); 7826 if (vectors < 0) { 7827 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", 7828 vectors); 7829 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 7830 } 7831 vectors = 1; /* one MSI or Legacy vector */ 7832 } 7833 7834 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 7835 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); 7836 7837 /* set up vector assignment tracking */ 7838 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); 7839 pf->irq_pile = kzalloc(size, GFP_KERNEL); 7840 if (!pf->irq_pile) { 7841 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); 7842 return -ENOMEM; 7843 } 7844 pf->irq_pile->num_entries = vectors; 7845 pf->irq_pile->search_hint = 0; 7846 7847 /* track first vector for misc interrupts, ignore return */ 7848 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); 7849 7850 return 0; 7851 } 7852 7853 /** 7854 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 7855 * @pf: board private structure 7856 * 7857 * This sets up the handler for MSIX 0, which is used to manage the 7858 * non-queue interrupts, e.g. AdminQ and errors. This is not used 7859 * when in MSI or Legacy interrupt mode. 7860 **/ 7861 static int i40e_setup_misc_vector(struct i40e_pf *pf) 7862 { 7863 struct i40e_hw *hw = &pf->hw; 7864 int err = 0; 7865 7866 /* Only request the irq if this is the first time through, and 7867 * not when we're rebuilding after a Reset 7868 */ 7869 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7870 err = request_irq(pf->msix_entries[0].vector, 7871 i40e_intr, 0, pf->int_name, pf); 7872 if (err) { 7873 dev_info(&pf->pdev->dev, 7874 "request_irq for %s failed: %d\n", 7875 pf->int_name, err); 7876 return -EFAULT; 7877 } 7878 } 7879 7880 i40e_enable_misc_int_causes(pf); 7881 7882 /* associate no queues to the misc vector */ 7883 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 7884 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 7885 7886 i40e_flush(hw); 7887 7888 i40e_irq_dynamic_enable_icr0(pf, true); 7889 7890 return err; 7891 } 7892 7893 /** 7894 * i40e_config_rss_aq - Prepare for RSS using AQ commands 7895 * @vsi: vsi structure 7896 * @seed: RSS hash seed 7897 **/ 7898 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, 7899 u8 *lut, u16 lut_size) 7900 { 7901 struct i40e_aqc_get_set_rss_key_data rss_key; 7902 struct i40e_pf *pf = vsi->back; 7903 struct i40e_hw *hw = &pf->hw; 7904 bool pf_lut = false; 7905 u8 *rss_lut; 7906 int ret, i; 7907 7908 memset(&rss_key, 0, sizeof(rss_key)); 7909 memcpy(&rss_key, seed, sizeof(rss_key)); 7910 7911 rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); 7912 if (!rss_lut) 7913 return -ENOMEM; 7914 7915 /* Populate the LUT with max no. of queues in round robin fashion */ 7916 for (i = 0; i < vsi->rss_table_size; i++) 7917 rss_lut[i] = i % vsi->rss_size; 7918 7919 ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key); 7920 if (ret) { 7921 dev_info(&pf->pdev->dev, 7922 "Cannot set RSS key, err %s aq_err %s\n", 7923 i40e_stat_str(&pf->hw, ret), 7924 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7925 goto config_rss_aq_out; 7926 } 7927 7928 if (vsi->type == I40E_VSI_MAIN) 7929 pf_lut = true; 7930 7931 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut, 7932 vsi->rss_table_size); 7933 if (ret) 7934 dev_info(&pf->pdev->dev, 7935 "Cannot set RSS lut, err %s aq_err %s\n", 7936 i40e_stat_str(&pf->hw, ret), 7937 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7938 7939 config_rss_aq_out: 7940 kfree(rss_lut); 7941 return ret; 7942 } 7943 7944 /** 7945 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used 7946 * @vsi: VSI structure 7947 **/ 7948 static int i40e_vsi_config_rss(struct i40e_vsi *vsi) 7949 { 7950 u8 seed[I40E_HKEY_ARRAY_SIZE]; 7951 struct i40e_pf *pf = vsi->back; 7952 u8 *lut; 7953 int ret; 7954 7955 if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) 7956 return 0; 7957 7958 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 7959 if (!lut) 7960 return -ENOMEM; 7961 7962 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); 7963 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 7964 vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); 7965 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); 7966 kfree(lut); 7967 7968 return ret; 7969 } 7970 7971 /** 7972 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands 7973 * @vsi: Pointer to vsi structure 7974 * @seed: Buffter to store the hash keys 7975 * @lut: Buffer to store the lookup table entries 7976 * @lut_size: Size of buffer to store the lookup table entries 7977 * 7978 * Return 0 on success, negative on failure 7979 */ 7980 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, 7981 u8 *lut, u16 lut_size) 7982 { 7983 struct i40e_pf *pf = vsi->back; 7984 struct i40e_hw *hw = &pf->hw; 7985 int ret = 0; 7986 7987 if (seed) { 7988 ret = i40e_aq_get_rss_key(hw, vsi->id, 7989 (struct i40e_aqc_get_set_rss_key_data *)seed); 7990 if (ret) { 7991 dev_info(&pf->pdev->dev, 7992 "Cannot get RSS key, err %s aq_err %s\n", 7993 i40e_stat_str(&pf->hw, ret), 7994 i40e_aq_str(&pf->hw, 7995 pf->hw.aq.asq_last_status)); 7996 return ret; 7997 } 7998 } 7999 8000 if (lut) { 8001 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; 8002 8003 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); 8004 if (ret) { 8005 dev_info(&pf->pdev->dev, 8006 "Cannot get RSS lut, err %s aq_err %s\n", 8007 i40e_stat_str(&pf->hw, ret), 8008 i40e_aq_str(&pf->hw, 8009 pf->hw.aq.asq_last_status)); 8010 return ret; 8011 } 8012 } 8013 8014 return ret; 8015 } 8016 8017 /** 8018 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers 8019 * @vsi: Pointer to vsi structure 8020 * @seed: RSS hash seed 8021 * @lut: Lookup table 8022 * @lut_size: Lookup table size 8023 * 8024 * Returns 0 on success, negative on failure 8025 **/ 8026 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, 8027 const u8 *lut, u16 lut_size) 8028 { 8029 struct i40e_pf *pf = vsi->back; 8030 struct i40e_hw *hw = &pf->hw; 8031 u16 vf_id = vsi->vf_id; 8032 u8 i; 8033 8034 /* Fill out hash function seed */ 8035 if (seed) { 8036 u32 *seed_dw = (u32 *)seed; 8037 8038 if (vsi->type == I40E_VSI_MAIN) { 8039 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 8040 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), 8041 seed_dw[i]); 8042 } else if (vsi->type == I40E_VSI_SRIOV) { 8043 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++) 8044 i40e_write_rx_ctl(hw, 8045 I40E_VFQF_HKEY1(i, vf_id), 8046 seed_dw[i]); 8047 } else { 8048 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); 8049 } 8050 } 8051 8052 if (lut) { 8053 u32 *lut_dw = (u32 *)lut; 8054 8055 if (vsi->type == I40E_VSI_MAIN) { 8056 if (lut_size != I40E_HLUT_ARRAY_SIZE) 8057 return -EINVAL; 8058 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) 8059 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); 8060 } else if (vsi->type == I40E_VSI_SRIOV) { 8061 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE) 8062 return -EINVAL; 8063 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) 8064 i40e_write_rx_ctl(hw, 8065 I40E_VFQF_HLUT1(i, vf_id), 8066 lut_dw[i]); 8067 } else { 8068 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); 8069 } 8070 } 8071 i40e_flush(hw); 8072 8073 return 0; 8074 } 8075 8076 /** 8077 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers 8078 * @vsi: Pointer to VSI structure 8079 * @seed: Buffer to store the keys 8080 * @lut: Buffer to store the lookup table entries 8081 * @lut_size: Size of buffer to store the lookup table entries 8082 * 8083 * Returns 0 on success, negative on failure 8084 */ 8085 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, 8086 u8 *lut, u16 lut_size) 8087 { 8088 struct i40e_pf *pf = vsi->back; 8089 struct i40e_hw *hw = &pf->hw; 8090 u16 i; 8091 8092 if (seed) { 8093 u32 *seed_dw = (u32 *)seed; 8094 8095 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 8096 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 8097 } 8098 if (lut) { 8099 u32 *lut_dw = (u32 *)lut; 8100 8101 if (lut_size != I40E_HLUT_ARRAY_SIZE) 8102 return -EINVAL; 8103 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) 8104 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i)); 8105 } 8106 8107 return 0; 8108 } 8109 8110 /** 8111 * i40e_config_rss - Configure RSS keys and lut 8112 * @vsi: Pointer to VSI structure 8113 * @seed: RSS hash seed 8114 * @lut: Lookup table 8115 * @lut_size: Lookup table size 8116 * 8117 * Returns 0 on success, negative on failure 8118 */ 8119 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 8120 { 8121 struct i40e_pf *pf = vsi->back; 8122 8123 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) 8124 return i40e_config_rss_aq(vsi, seed, lut, lut_size); 8125 else 8126 return i40e_config_rss_reg(vsi, seed, lut, lut_size); 8127 } 8128 8129 /** 8130 * i40e_get_rss - Get RSS keys and lut 8131 * @vsi: Pointer to VSI structure 8132 * @seed: Buffer to store the keys 8133 * @lut: Buffer to store the lookup table entries 8134 * lut_size: Size of buffer to store the lookup table entries 8135 * 8136 * Returns 0 on success, negative on failure 8137 */ 8138 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 8139 { 8140 struct i40e_pf *pf = vsi->back; 8141 8142 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) 8143 return i40e_get_rss_aq(vsi, seed, lut, lut_size); 8144 else 8145 return i40e_get_rss_reg(vsi, seed, lut, lut_size); 8146 } 8147 8148 /** 8149 * i40e_fill_rss_lut - Fill the RSS lookup table with default values 8150 * @pf: Pointer to board private structure 8151 * @lut: Lookup table 8152 * @rss_table_size: Lookup table size 8153 * @rss_size: Range of queue number for hashing 8154 */ 8155 static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, 8156 u16 rss_table_size, u16 rss_size) 8157 { 8158 u16 i; 8159 8160 for (i = 0; i < rss_table_size; i++) 8161 lut[i] = i % rss_size; 8162 } 8163 8164 /** 8165 * i40e_pf_config_rss - Prepare for RSS if used 8166 * @pf: board private structure 8167 **/ 8168 static int i40e_pf_config_rss(struct i40e_pf *pf) 8169 { 8170 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 8171 u8 seed[I40E_HKEY_ARRAY_SIZE]; 8172 u8 *lut; 8173 struct i40e_hw *hw = &pf->hw; 8174 u32 reg_val; 8175 u64 hena; 8176 int ret; 8177 8178 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 8179 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 8180 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 8181 hena |= i40e_pf_get_default_rss_hena(pf); 8182 8183 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 8184 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 8185 8186 /* Determine the RSS table size based on the hardware capabilities */ 8187 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 8188 reg_val = (pf->rss_table_size == 512) ? 8189 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : 8190 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); 8191 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val); 8192 8193 /* Determine the RSS size of the VSI */ 8194 if (!vsi->rss_size) 8195 vsi->rss_size = min_t(int, pf->alloc_rss_size, 8196 vsi->num_queue_pairs); 8197 8198 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 8199 if (!lut) 8200 return -ENOMEM; 8201 8202 /* Use user configured lut if there is one, otherwise use default */ 8203 if (vsi->rss_lut_user) 8204 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 8205 else 8206 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); 8207 8208 /* Use user configured hash key if there is one, otherwise 8209 * use default. 8210 */ 8211 if (vsi->rss_hkey_user) 8212 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); 8213 else 8214 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 8215 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); 8216 kfree(lut); 8217 8218 return ret; 8219 } 8220 8221 /** 8222 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild 8223 * @pf: board private structure 8224 * @queue_count: the requested queue count for rss. 8225 * 8226 * returns 0 if rss is not enabled, if enabled returns the final rss queue 8227 * count which may be different from the requested queue count. 8228 **/ 8229 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) 8230 { 8231 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 8232 int new_rss_size; 8233 8234 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) 8235 return 0; 8236 8237 new_rss_size = min_t(int, queue_count, pf->rss_size_max); 8238 8239 if (queue_count != vsi->num_queue_pairs) { 8240 vsi->req_queue_pairs = queue_count; 8241 i40e_prep_for_reset(pf); 8242 8243 pf->alloc_rss_size = new_rss_size; 8244 8245 i40e_reset_and_rebuild(pf, true); 8246 8247 /* Discard the user configured hash keys and lut, if less 8248 * queues are enabled. 8249 */ 8250 if (queue_count < vsi->rss_size) { 8251 i40e_clear_rss_config_user(vsi); 8252 dev_dbg(&pf->pdev->dev, 8253 "discard user configured hash keys and lut\n"); 8254 } 8255 8256 /* Reset vsi->rss_size, as number of enabled queues changed */ 8257 vsi->rss_size = min_t(int, pf->alloc_rss_size, 8258 vsi->num_queue_pairs); 8259 8260 i40e_pf_config_rss(pf); 8261 } 8262 dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n", 8263 pf->alloc_rss_size, pf->rss_size_max); 8264 return pf->alloc_rss_size; 8265 } 8266 8267 /** 8268 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition 8269 * @pf: board private structure 8270 **/ 8271 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) 8272 { 8273 i40e_status status; 8274 bool min_valid, max_valid; 8275 u32 max_bw, min_bw; 8276 8277 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, 8278 &min_valid, &max_valid); 8279 8280 if (!status) { 8281 if (min_valid) 8282 pf->npar_min_bw = min_bw; 8283 if (max_valid) 8284 pf->npar_max_bw = max_bw; 8285 } 8286 8287 return status; 8288 } 8289 8290 /** 8291 * i40e_set_npar_bw_setting - Set BW settings for this PF partition 8292 * @pf: board private structure 8293 **/ 8294 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) 8295 { 8296 struct i40e_aqc_configure_partition_bw_data bw_data; 8297 i40e_status status; 8298 8299 /* Set the valid bit for this PF */ 8300 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); 8301 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; 8302 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; 8303 8304 /* Set the new bandwidths */ 8305 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); 8306 8307 return status; 8308 } 8309 8310 /** 8311 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition 8312 * @pf: board private structure 8313 **/ 8314 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) 8315 { 8316 /* Commit temporary BW setting to permanent NVM image */ 8317 enum i40e_admin_queue_err last_aq_status; 8318 i40e_status ret; 8319 u16 nvm_word; 8320 8321 if (pf->hw.partition_id != 1) { 8322 dev_info(&pf->pdev->dev, 8323 "Commit BW only works on partition 1! This is partition %d", 8324 pf->hw.partition_id); 8325 ret = I40E_NOT_SUPPORTED; 8326 goto bw_commit_out; 8327 } 8328 8329 /* Acquire NVM for read access */ 8330 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); 8331 last_aq_status = pf->hw.aq.asq_last_status; 8332 if (ret) { 8333 dev_info(&pf->pdev->dev, 8334 "Cannot acquire NVM for read access, err %s aq_err %s\n", 8335 i40e_stat_str(&pf->hw, ret), 8336 i40e_aq_str(&pf->hw, last_aq_status)); 8337 goto bw_commit_out; 8338 } 8339 8340 /* Read word 0x10 of NVM - SW compatibility word 1 */ 8341 ret = i40e_aq_read_nvm(&pf->hw, 8342 I40E_SR_NVM_CONTROL_WORD, 8343 0x10, sizeof(nvm_word), &nvm_word, 8344 false, NULL); 8345 /* Save off last admin queue command status before releasing 8346 * the NVM 8347 */ 8348 last_aq_status = pf->hw.aq.asq_last_status; 8349 i40e_release_nvm(&pf->hw); 8350 if (ret) { 8351 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", 8352 i40e_stat_str(&pf->hw, ret), 8353 i40e_aq_str(&pf->hw, last_aq_status)); 8354 goto bw_commit_out; 8355 } 8356 8357 /* Wait a bit for NVM release to complete */ 8358 msleep(50); 8359 8360 /* Acquire NVM for write access */ 8361 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); 8362 last_aq_status = pf->hw.aq.asq_last_status; 8363 if (ret) { 8364 dev_info(&pf->pdev->dev, 8365 "Cannot acquire NVM for write access, err %s aq_err %s\n", 8366 i40e_stat_str(&pf->hw, ret), 8367 i40e_aq_str(&pf->hw, last_aq_status)); 8368 goto bw_commit_out; 8369 } 8370 /* Write it back out unchanged to initiate update NVM, 8371 * which will force a write of the shadow (alt) RAM to 8372 * the NVM - thus storing the bandwidth values permanently. 8373 */ 8374 ret = i40e_aq_update_nvm(&pf->hw, 8375 I40E_SR_NVM_CONTROL_WORD, 8376 0x10, sizeof(nvm_word), 8377 &nvm_word, true, NULL); 8378 /* Save off last admin queue command status before releasing 8379 * the NVM 8380 */ 8381 last_aq_status = pf->hw.aq.asq_last_status; 8382 i40e_release_nvm(&pf->hw); 8383 if (ret) 8384 dev_info(&pf->pdev->dev, 8385 "BW settings NOT SAVED, err %s aq_err %s\n", 8386 i40e_stat_str(&pf->hw, ret), 8387 i40e_aq_str(&pf->hw, last_aq_status)); 8388 bw_commit_out: 8389 8390 return ret; 8391 } 8392 8393 /** 8394 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 8395 * @pf: board private structure to initialize 8396 * 8397 * i40e_sw_init initializes the Adapter private data structure. 8398 * Fields are initialized based on PCI device information and 8399 * OS network device settings (MTU size). 8400 **/ 8401 static int i40e_sw_init(struct i40e_pf *pf) 8402 { 8403 int err = 0; 8404 int size; 8405 8406 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, 8407 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); 8408 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { 8409 if (I40E_DEBUG_USER & debug) 8410 pf->hw.debug_mask = debug; 8411 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), 8412 I40E_DEFAULT_MSG_ENABLE); 8413 } 8414 8415 /* Set default capability flags */ 8416 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 8417 I40E_FLAG_MSI_ENABLED | 8418 I40E_FLAG_MSIX_ENABLED; 8419 8420 /* Set default ITR */ 8421 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; 8422 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; 8423 8424 /* Depending on PF configurations, it is possible that the RSS 8425 * maximum might end up larger than the available queues 8426 */ 8427 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); 8428 pf->alloc_rss_size = 1; 8429 pf->rss_table_size = pf->hw.func_caps.rss_table_size; 8430 pf->rss_size_max = min_t(int, pf->rss_size_max, 8431 pf->hw.func_caps.num_tx_qp); 8432 if (pf->hw.func_caps.rss) { 8433 pf->flags |= I40E_FLAG_RSS_ENABLED; 8434 pf->alloc_rss_size = min_t(int, pf->rss_size_max, 8435 num_online_cpus()); 8436 } 8437 8438 /* MFP mode enabled */ 8439 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { 8440 pf->flags |= I40E_FLAG_MFP_ENABLED; 8441 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 8442 if (i40e_get_npar_bw_setting(pf)) 8443 dev_warn(&pf->pdev->dev, 8444 "Could not get NPAR bw settings\n"); 8445 else 8446 dev_info(&pf->pdev->dev, 8447 "Min BW = %8.8x, Max BW = %8.8x\n", 8448 pf->npar_min_bw, pf->npar_max_bw); 8449 } 8450 8451 /* FW/NVM is not yet fixed in this regard */ 8452 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 8453 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 8454 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 8455 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 8456 if (pf->flags & I40E_FLAG_MFP_ENABLED && 8457 pf->hw.num_partitions > 1) 8458 dev_info(&pf->pdev->dev, 8459 "Flow Director Sideband mode Disabled in MFP mode\n"); 8460 else 8461 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 8462 pf->fdir_pf_filter_count = 8463 pf->hw.func_caps.fd_filters_guaranteed; 8464 pf->hw.fdir_shared_filter_count = 8465 pf->hw.func_caps.fd_filters_best_effort; 8466 } 8467 8468 if (i40e_is_mac_710(&pf->hw) && 8469 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 8470 (pf->hw.aq.fw_maj_ver < 4))) { 8471 pf->flags |= I40E_FLAG_RESTART_AUTONEG; 8472 /* No DCB support for FW < v4.33 */ 8473 pf->flags |= I40E_FLAG_NO_DCB_SUPPORT; 8474 } 8475 8476 /* Disable FW LLDP if FW < v4.3 */ 8477 if (i40e_is_mac_710(&pf->hw) && 8478 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 8479 (pf->hw.aq.fw_maj_ver < 4))) 8480 pf->flags |= I40E_FLAG_STOP_FW_LLDP; 8481 8482 /* Use the FW Set LLDP MIB API if FW > v4.40 */ 8483 if (i40e_is_mac_710(&pf->hw) && 8484 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || 8485 (pf->hw.aq.fw_maj_ver >= 5))) 8486 pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB; 8487 8488 if (pf->hw.func_caps.vmdq) { 8489 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 8490 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 8491 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); 8492 } 8493 8494 if (pf->hw.func_caps.iwarp) { 8495 pf->flags |= I40E_FLAG_IWARP_ENABLED; 8496 /* IWARP needs one extra vector for CQP just like MISC.*/ 8497 pf->num_iwarp_msix = (int)num_online_cpus() + 1; 8498 } 8499 8500 #ifdef I40E_FCOE 8501 i40e_init_pf_fcoe(pf); 8502 8503 #endif /* I40E_FCOE */ 8504 #ifdef CONFIG_PCI_IOV 8505 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { 8506 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 8507 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 8508 pf->num_req_vfs = min_t(int, 8509 pf->hw.func_caps.num_vfs, 8510 I40E_MAX_VF_COUNT); 8511 } 8512 #endif /* CONFIG_PCI_IOV */ 8513 if (pf->hw.mac.type == I40E_MAC_X722) { 8514 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | 8515 I40E_FLAG_128_QP_RSS_CAPABLE | 8516 I40E_FLAG_HW_ATR_EVICT_CAPABLE | 8517 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | 8518 I40E_FLAG_WB_ON_ITR_CAPABLE | 8519 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | 8520 I40E_FLAG_NO_PCI_LINK_CHECK | 8521 I40E_FLAG_100M_SGMII_CAPABLE | 8522 I40E_FLAG_USE_SET_LLDP_MIB | 8523 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8524 } else if ((pf->hw.aq.api_maj_ver > 1) || 8525 ((pf->hw.aq.api_maj_ver == 1) && 8526 (pf->hw.aq.api_min_ver > 4))) { 8527 /* Supported in FW API version higher than 1.4 */ 8528 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8529 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; 8530 } else { 8531 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; 8532 } 8533 8534 pf->eeprom_version = 0xDEAD; 8535 pf->lan_veb = I40E_NO_VEB; 8536 pf->lan_vsi = I40E_NO_VSI; 8537 8538 /* By default FW has this off for performance reasons */ 8539 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; 8540 8541 /* set up queue assignment tracking */ 8542 size = sizeof(struct i40e_lump_tracking) 8543 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 8544 pf->qp_pile = kzalloc(size, GFP_KERNEL); 8545 if (!pf->qp_pile) { 8546 err = -ENOMEM; 8547 goto sw_init_done; 8548 } 8549 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 8550 pf->qp_pile->search_hint = 0; 8551 8552 pf->tx_timeout_recovery_level = 1; 8553 8554 mutex_init(&pf->switch_mutex); 8555 8556 /* If NPAR is enabled nudge the Tx scheduler */ 8557 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) 8558 i40e_set_npar_bw_setting(pf); 8559 8560 sw_init_done: 8561 return err; 8562 } 8563 8564 /** 8565 * i40e_set_ntuple - set the ntuple feature flag and take action 8566 * @pf: board private structure to initialize 8567 * @features: the feature set that the stack is suggesting 8568 * 8569 * returns a bool to indicate if reset needs to happen 8570 **/ 8571 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) 8572 { 8573 bool need_reset = false; 8574 8575 /* Check if Flow Director n-tuple support was enabled or disabled. If 8576 * the state changed, we need to reset. 8577 */ 8578 if (features & NETIF_F_NTUPLE) { 8579 /* Enable filters and mark for reset */ 8580 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 8581 need_reset = true; 8582 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 8583 } else { 8584 /* turn off filters, mark for reset and clear SW filter list */ 8585 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 8586 need_reset = true; 8587 i40e_fdir_filter_exit(pf); 8588 } 8589 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 8590 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 8591 /* reset fd counters */ 8592 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; 8593 pf->fdir_pf_active_filters = 0; 8594 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 8595 if (I40E_DEBUG_FD & pf->hw.debug_mask) 8596 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); 8597 /* if ATR was auto disabled it can be re-enabled. */ 8598 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 8599 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 8600 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 8601 } 8602 return need_reset; 8603 } 8604 8605 /** 8606 * i40e_set_features - set the netdev feature flags 8607 * @netdev: ptr to the netdev being adjusted 8608 * @features: the feature set that the stack is suggesting 8609 **/ 8610 static int i40e_set_features(struct net_device *netdev, 8611 netdev_features_t features) 8612 { 8613 struct i40e_netdev_priv *np = netdev_priv(netdev); 8614 struct i40e_vsi *vsi = np->vsi; 8615 struct i40e_pf *pf = vsi->back; 8616 bool need_reset; 8617 8618 if (features & NETIF_F_HW_VLAN_CTAG_RX) 8619 i40e_vlan_stripping_enable(vsi); 8620 else 8621 i40e_vlan_stripping_disable(vsi); 8622 8623 need_reset = i40e_set_ntuple(pf, features); 8624 8625 if (need_reset) 8626 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 8627 8628 return 0; 8629 } 8630 8631 #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) 8632 /** 8633 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port 8634 * @pf: board private structure 8635 * @port: The UDP port to look up 8636 * 8637 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 8638 **/ 8639 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) 8640 { 8641 u8 i; 8642 8643 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 8644 if (pf->udp_ports[i].index == port) 8645 return i; 8646 } 8647 8648 return i; 8649 } 8650 8651 #endif 8652 8653 #if IS_ENABLED(CONFIG_VXLAN) 8654 /** 8655 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up 8656 * @netdev: This physical port's netdev 8657 * @sa_family: Socket Family that VXLAN is notifying us about 8658 * @port: New UDP port number that VXLAN started listening to 8659 **/ 8660 static void i40e_add_vxlan_port(struct net_device *netdev, 8661 sa_family_t sa_family, __be16 port) 8662 { 8663 struct i40e_netdev_priv *np = netdev_priv(netdev); 8664 struct i40e_vsi *vsi = np->vsi; 8665 struct i40e_pf *pf = vsi->back; 8666 u8 next_idx; 8667 u8 idx; 8668 8669 idx = i40e_get_udp_port_idx(pf, port); 8670 8671 /* Check if port already exists */ 8672 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8673 netdev_info(netdev, "vxlan port %d already offloaded\n", 8674 ntohs(port)); 8675 return; 8676 } 8677 8678 /* Now check if there is space to add the new port */ 8679 next_idx = i40e_get_udp_port_idx(pf, 0); 8680 8681 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8682 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", 8683 ntohs(port)); 8684 return; 8685 } 8686 8687 /* New port: add it and mark its index in the bitmap */ 8688 pf->udp_ports[next_idx].index = port; 8689 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; 8690 pf->pending_udp_bitmap |= BIT_ULL(next_idx); 8691 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8692 } 8693 8694 /** 8695 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away 8696 * @netdev: This physical port's netdev 8697 * @sa_family: Socket Family that VXLAN is notifying us about 8698 * @port: UDP port number that VXLAN stopped listening to 8699 **/ 8700 static void i40e_del_vxlan_port(struct net_device *netdev, 8701 sa_family_t sa_family, __be16 port) 8702 { 8703 struct i40e_netdev_priv *np = netdev_priv(netdev); 8704 struct i40e_vsi *vsi = np->vsi; 8705 struct i40e_pf *pf = vsi->back; 8706 u8 idx; 8707 8708 idx = i40e_get_udp_port_idx(pf, port); 8709 8710 /* Check if port already exists */ 8711 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8712 /* if port exists, set it to 0 (mark for deletion) 8713 * and make it pending 8714 */ 8715 pf->udp_ports[idx].index = 0; 8716 pf->pending_udp_bitmap |= BIT_ULL(idx); 8717 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8718 } else { 8719 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", 8720 ntohs(port)); 8721 } 8722 } 8723 #endif 8724 8725 #if IS_ENABLED(CONFIG_GENEVE) 8726 /** 8727 * i40e_add_geneve_port - Get notifications about GENEVE ports that come up 8728 * @netdev: This physical port's netdev 8729 * @sa_family: Socket Family that GENEVE is notifying us about 8730 * @port: New UDP port number that GENEVE started listening to 8731 **/ 8732 static void i40e_add_geneve_port(struct net_device *netdev, 8733 sa_family_t sa_family, __be16 port) 8734 { 8735 struct i40e_netdev_priv *np = netdev_priv(netdev); 8736 struct i40e_vsi *vsi = np->vsi; 8737 struct i40e_pf *pf = vsi->back; 8738 u8 next_idx; 8739 u8 idx; 8740 8741 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) 8742 return; 8743 8744 idx = i40e_get_udp_port_idx(pf, port); 8745 8746 /* Check if port already exists */ 8747 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8748 netdev_info(netdev, "udp port %d already offloaded\n", 8749 ntohs(port)); 8750 return; 8751 } 8752 8753 /* Now check if there is space to add the new port */ 8754 next_idx = i40e_get_udp_port_idx(pf, 0); 8755 8756 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8757 netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", 8758 ntohs(port)); 8759 return; 8760 } 8761 8762 /* New port: add it and mark its index in the bitmap */ 8763 pf->udp_ports[next_idx].index = port; 8764 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; 8765 pf->pending_udp_bitmap |= BIT_ULL(next_idx); 8766 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8767 8768 dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); 8769 } 8770 8771 /** 8772 * i40e_del_geneve_port - Get notifications about GENEVE ports that go away 8773 * @netdev: This physical port's netdev 8774 * @sa_family: Socket Family that GENEVE is notifying us about 8775 * @port: UDP port number that GENEVE stopped listening to 8776 **/ 8777 static void i40e_del_geneve_port(struct net_device *netdev, 8778 sa_family_t sa_family, __be16 port) 8779 { 8780 struct i40e_netdev_priv *np = netdev_priv(netdev); 8781 struct i40e_vsi *vsi = np->vsi; 8782 struct i40e_pf *pf = vsi->back; 8783 u8 idx; 8784 8785 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) 8786 return; 8787 8788 idx = i40e_get_udp_port_idx(pf, port); 8789 8790 /* Check if port already exists */ 8791 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8792 /* if port exists, set it to 0 (mark for deletion) 8793 * and make it pending 8794 */ 8795 pf->udp_ports[idx].index = 0; 8796 pf->pending_udp_bitmap |= BIT_ULL(idx); 8797 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8798 8799 dev_info(&pf->pdev->dev, "deleting geneve port %d\n", 8800 ntohs(port)); 8801 } else { 8802 netdev_warn(netdev, "geneve port %d was not found, not deleting\n", 8803 ntohs(port)); 8804 } 8805 } 8806 #endif 8807 8808 static int i40e_get_phys_port_id(struct net_device *netdev, 8809 struct netdev_phys_item_id *ppid) 8810 { 8811 struct i40e_netdev_priv *np = netdev_priv(netdev); 8812 struct i40e_pf *pf = np->vsi->back; 8813 struct i40e_hw *hw = &pf->hw; 8814 8815 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) 8816 return -EOPNOTSUPP; 8817 8818 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); 8819 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); 8820 8821 return 0; 8822 } 8823 8824 /** 8825 * i40e_ndo_fdb_add - add an entry to the hardware database 8826 * @ndm: the input from the stack 8827 * @tb: pointer to array of nladdr (unused) 8828 * @dev: the net device pointer 8829 * @addr: the MAC address entry being added 8830 * @flags: instructions from stack about fdb operation 8831 */ 8832 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 8833 struct net_device *dev, 8834 const unsigned char *addr, u16 vid, 8835 u16 flags) 8836 { 8837 struct i40e_netdev_priv *np = netdev_priv(dev); 8838 struct i40e_pf *pf = np->vsi->back; 8839 int err = 0; 8840 8841 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) 8842 return -EOPNOTSUPP; 8843 8844 if (vid) { 8845 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); 8846 return -EINVAL; 8847 } 8848 8849 /* Hardware does not support aging addresses so if a 8850 * ndm_state is given only allow permanent addresses 8851 */ 8852 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 8853 netdev_info(dev, "FDB only supports static addresses\n"); 8854 return -EINVAL; 8855 } 8856 8857 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 8858 err = dev_uc_add_excl(dev, addr); 8859 else if (is_multicast_ether_addr(addr)) 8860 err = dev_mc_add_excl(dev, addr); 8861 else 8862 err = -EINVAL; 8863 8864 /* Only return duplicate errors if NLM_F_EXCL is set */ 8865 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 8866 err = 0; 8867 8868 return err; 8869 } 8870 8871 /** 8872 * i40e_ndo_bridge_setlink - Set the hardware bridge mode 8873 * @dev: the netdev being configured 8874 * @nlh: RTNL message 8875 * 8876 * Inserts a new hardware bridge if not already created and 8877 * enables the bridging mode requested (VEB or VEPA). If the 8878 * hardware bridge has already been inserted and the request 8879 * is to change the mode then that requires a PF reset to 8880 * allow rebuild of the components with required hardware 8881 * bridge mode enabled. 8882 **/ 8883 static int i40e_ndo_bridge_setlink(struct net_device *dev, 8884 struct nlmsghdr *nlh, 8885 u16 flags) 8886 { 8887 struct i40e_netdev_priv *np = netdev_priv(dev); 8888 struct i40e_vsi *vsi = np->vsi; 8889 struct i40e_pf *pf = vsi->back; 8890 struct i40e_veb *veb = NULL; 8891 struct nlattr *attr, *br_spec; 8892 int i, rem; 8893 8894 /* Only for PF VSI for now */ 8895 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 8896 return -EOPNOTSUPP; 8897 8898 /* Find the HW bridge for PF VSI */ 8899 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8900 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 8901 veb = pf->veb[i]; 8902 } 8903 8904 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 8905 8906 nla_for_each_nested(attr, br_spec, rem) { 8907 __u16 mode; 8908 8909 if (nla_type(attr) != IFLA_BRIDGE_MODE) 8910 continue; 8911 8912 mode = nla_get_u16(attr); 8913 if ((mode != BRIDGE_MODE_VEPA) && 8914 (mode != BRIDGE_MODE_VEB)) 8915 return -EINVAL; 8916 8917 /* Insert a new HW bridge */ 8918 if (!veb) { 8919 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 8920 vsi->tc_config.enabled_tc); 8921 if (veb) { 8922 veb->bridge_mode = mode; 8923 i40e_config_bridge_mode(veb); 8924 } else { 8925 /* No Bridge HW offload available */ 8926 return -ENOENT; 8927 } 8928 break; 8929 } else if (mode != veb->bridge_mode) { 8930 /* Existing HW bridge but different mode needs reset */ 8931 veb->bridge_mode = mode; 8932 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ 8933 if (mode == BRIDGE_MODE_VEB) 8934 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 8935 else 8936 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 8937 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 8938 break; 8939 } 8940 } 8941 8942 return 0; 8943 } 8944 8945 /** 8946 * i40e_ndo_bridge_getlink - Get the hardware bridge mode 8947 * @skb: skb buff 8948 * @pid: process id 8949 * @seq: RTNL message seq # 8950 * @dev: the netdev being configured 8951 * @filter_mask: unused 8952 * @nlflags: netlink flags passed in 8953 * 8954 * Return the mode in which the hardware bridge is operating in 8955 * i.e VEB or VEPA. 8956 **/ 8957 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 8958 struct net_device *dev, 8959 u32 __always_unused filter_mask, 8960 int nlflags) 8961 { 8962 struct i40e_netdev_priv *np = netdev_priv(dev); 8963 struct i40e_vsi *vsi = np->vsi; 8964 struct i40e_pf *pf = vsi->back; 8965 struct i40e_veb *veb = NULL; 8966 int i; 8967 8968 /* Only for PF VSI for now */ 8969 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 8970 return -EOPNOTSUPP; 8971 8972 /* Find the HW bridge for the PF VSI */ 8973 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8974 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 8975 veb = pf->veb[i]; 8976 } 8977 8978 if (!veb) 8979 return 0; 8980 8981 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, 8982 nlflags, 0, 0, filter_mask, NULL); 8983 } 8984 8985 /* Hardware supports L4 tunnel length of 128B (=2^7) which includes 8986 * inner mac plus all inner ethertypes. 8987 */ 8988 #define I40E_MAX_TUNNEL_HDR_LEN 128 8989 /** 8990 * i40e_features_check - Validate encapsulated packet conforms to limits 8991 * @skb: skb buff 8992 * @dev: This physical port's netdev 8993 * @features: Offload features that the stack believes apply 8994 **/ 8995 static netdev_features_t i40e_features_check(struct sk_buff *skb, 8996 struct net_device *dev, 8997 netdev_features_t features) 8998 { 8999 if (skb->encapsulation && 9000 ((skb_inner_network_header(skb) - skb_transport_header(skb)) > 9001 I40E_MAX_TUNNEL_HDR_LEN)) 9002 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 9003 9004 return features; 9005 } 9006 9007 static const struct net_device_ops i40e_netdev_ops = { 9008 .ndo_open = i40e_open, 9009 .ndo_stop = i40e_close, 9010 .ndo_start_xmit = i40e_lan_xmit_frame, 9011 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 9012 .ndo_set_rx_mode = i40e_set_rx_mode, 9013 .ndo_validate_addr = eth_validate_addr, 9014 .ndo_set_mac_address = i40e_set_mac, 9015 .ndo_change_mtu = i40e_change_mtu, 9016 .ndo_do_ioctl = i40e_ioctl, 9017 .ndo_tx_timeout = i40e_tx_timeout, 9018 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 9019 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 9020 #ifdef CONFIG_NET_POLL_CONTROLLER 9021 .ndo_poll_controller = i40e_netpoll, 9022 #endif 9023 .ndo_setup_tc = __i40e_setup_tc, 9024 #ifdef I40E_FCOE 9025 .ndo_fcoe_enable = i40e_fcoe_enable, 9026 .ndo_fcoe_disable = i40e_fcoe_disable, 9027 #endif 9028 .ndo_set_features = i40e_set_features, 9029 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 9030 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 9031 .ndo_set_vf_rate = i40e_ndo_set_vf_bw, 9032 .ndo_get_vf_config = i40e_ndo_get_vf_config, 9033 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 9034 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, 9035 .ndo_set_vf_trust = i40e_ndo_set_vf_trust, 9036 #if IS_ENABLED(CONFIG_VXLAN) 9037 .ndo_add_vxlan_port = i40e_add_vxlan_port, 9038 .ndo_del_vxlan_port = i40e_del_vxlan_port, 9039 #endif 9040 #if IS_ENABLED(CONFIG_GENEVE) 9041 .ndo_add_geneve_port = i40e_add_geneve_port, 9042 .ndo_del_geneve_port = i40e_del_geneve_port, 9043 #endif 9044 .ndo_get_phys_port_id = i40e_get_phys_port_id, 9045 .ndo_fdb_add = i40e_ndo_fdb_add, 9046 .ndo_features_check = i40e_features_check, 9047 .ndo_bridge_getlink = i40e_ndo_bridge_getlink, 9048 .ndo_bridge_setlink = i40e_ndo_bridge_setlink, 9049 }; 9050 9051 /** 9052 * i40e_config_netdev - Setup the netdev flags 9053 * @vsi: the VSI being configured 9054 * 9055 * Returns 0 on success, negative value on failure 9056 **/ 9057 static int i40e_config_netdev(struct i40e_vsi *vsi) 9058 { 9059 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 9060 struct i40e_pf *pf = vsi->back; 9061 struct i40e_hw *hw = &pf->hw; 9062 struct i40e_netdev_priv *np; 9063 struct net_device *netdev; 9064 u8 mac_addr[ETH_ALEN]; 9065 int etherdev_size; 9066 9067 etherdev_size = sizeof(struct i40e_netdev_priv); 9068 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 9069 if (!netdev) 9070 return -ENOMEM; 9071 9072 vsi->netdev = netdev; 9073 np = netdev_priv(netdev); 9074 np->vsi = vsi; 9075 9076 netdev->hw_enc_features |= NETIF_F_SG | 9077 NETIF_F_IP_CSUM | 9078 NETIF_F_IPV6_CSUM | 9079 NETIF_F_HIGHDMA | 9080 NETIF_F_SOFT_FEATURES | 9081 NETIF_F_TSO | 9082 NETIF_F_TSO_ECN | 9083 NETIF_F_TSO6 | 9084 NETIF_F_GSO_GRE | 9085 NETIF_F_GSO_GRE_CSUM | 9086 NETIF_F_GSO_IPXIP4 | 9087 NETIF_F_GSO_IPXIP6 | 9088 NETIF_F_GSO_UDP_TUNNEL | 9089 NETIF_F_GSO_UDP_TUNNEL_CSUM | 9090 NETIF_F_GSO_PARTIAL | 9091 NETIF_F_SCTP_CRC | 9092 NETIF_F_RXHASH | 9093 NETIF_F_RXCSUM | 9094 0; 9095 9096 if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)) 9097 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 9098 9099 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 9100 9101 /* record features VLANs can make use of */ 9102 netdev->vlan_features |= netdev->hw_enc_features | 9103 NETIF_F_TSO_MANGLEID; 9104 9105 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 9106 netdev->hw_features |= NETIF_F_NTUPLE; 9107 9108 netdev->hw_features |= netdev->hw_enc_features | 9109 NETIF_F_HW_VLAN_CTAG_TX | 9110 NETIF_F_HW_VLAN_CTAG_RX; 9111 9112 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 9113 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 9114 9115 if (vsi->type == I40E_VSI_MAIN) { 9116 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 9117 ether_addr_copy(mac_addr, hw->mac.perm_addr); 9118 /* The following steps are necessary to prevent reception 9119 * of tagged packets - some older NVM configurations load a 9120 * default a MAC-VLAN filter that accepts any tagged packet 9121 * which must be replaced by a normal filter. 9122 */ 9123 if (!i40e_rm_default_mac_filter(vsi, mac_addr)) { 9124 spin_lock_bh(&vsi->mac_filter_list_lock); 9125 i40e_add_filter(vsi, mac_addr, 9126 I40E_VLAN_ANY, false, true); 9127 spin_unlock_bh(&vsi->mac_filter_list_lock); 9128 } 9129 } else if ((pf->hw.aq.api_maj_ver > 1) || 9130 ((pf->hw.aq.api_maj_ver == 1) && 9131 (pf->hw.aq.api_min_ver > 4))) { 9132 /* Supported in FW API version higher than 1.4 */ 9133 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 9134 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; 9135 } else { 9136 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 9137 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 9138 pf->vsi[pf->lan_vsi]->netdev->name); 9139 random_ether_addr(mac_addr); 9140 9141 spin_lock_bh(&vsi->mac_filter_list_lock); 9142 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); 9143 spin_unlock_bh(&vsi->mac_filter_list_lock); 9144 } 9145 9146 spin_lock_bh(&vsi->mac_filter_list_lock); 9147 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 9148 spin_unlock_bh(&vsi->mac_filter_list_lock); 9149 9150 ether_addr_copy(netdev->dev_addr, mac_addr); 9151 ether_addr_copy(netdev->perm_addr, mac_addr); 9152 9153 netdev->priv_flags |= IFF_UNICAST_FLT; 9154 netdev->priv_flags |= IFF_SUPP_NOFCS; 9155 /* Setup netdev TC information */ 9156 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 9157 9158 netdev->netdev_ops = &i40e_netdev_ops; 9159 netdev->watchdog_timeo = 5 * HZ; 9160 i40e_set_ethtool_ops(netdev); 9161 #ifdef I40E_FCOE 9162 i40e_fcoe_config_netdev(netdev, vsi); 9163 #endif 9164 9165 return 0; 9166 } 9167 9168 /** 9169 * i40e_vsi_delete - Delete a VSI from the switch 9170 * @vsi: the VSI being removed 9171 * 9172 * Returns 0 on success, negative value on failure 9173 **/ 9174 static void i40e_vsi_delete(struct i40e_vsi *vsi) 9175 { 9176 /* remove default VSI is not allowed */ 9177 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 9178 return; 9179 9180 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 9181 } 9182 9183 /** 9184 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB 9185 * @vsi: the VSI being queried 9186 * 9187 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode 9188 **/ 9189 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) 9190 { 9191 struct i40e_veb *veb; 9192 struct i40e_pf *pf = vsi->back; 9193 9194 /* Uplink is not a bridge so default to VEB */ 9195 if (vsi->veb_idx == I40E_NO_VEB) 9196 return 1; 9197 9198 veb = pf->veb[vsi->veb_idx]; 9199 if (!veb) { 9200 dev_info(&pf->pdev->dev, 9201 "There is no veb associated with the bridge\n"); 9202 return -ENOENT; 9203 } 9204 9205 /* Uplink is a bridge in VEPA mode */ 9206 if (veb->bridge_mode & BRIDGE_MODE_VEPA) { 9207 return 0; 9208 } else { 9209 /* Uplink is a bridge in VEB mode */ 9210 return 1; 9211 } 9212 9213 /* VEPA is now default bridge, so return 0 */ 9214 return 0; 9215 } 9216 9217 /** 9218 * i40e_add_vsi - Add a VSI to the switch 9219 * @vsi: the VSI being configured 9220 * 9221 * This initializes a VSI context depending on the VSI type to be added and 9222 * passes it down to the add_vsi aq command. 9223 **/ 9224 static int i40e_add_vsi(struct i40e_vsi *vsi) 9225 { 9226 int ret = -ENODEV; 9227 u8 laa_macaddr[ETH_ALEN]; 9228 bool found_laa_mac_filter = false; 9229 struct i40e_pf *pf = vsi->back; 9230 struct i40e_hw *hw = &pf->hw; 9231 struct i40e_vsi_context ctxt; 9232 struct i40e_mac_filter *f, *ftmp; 9233 9234 u8 enabled_tc = 0x1; /* TC0 enabled */ 9235 int f_count = 0; 9236 9237 memset(&ctxt, 0, sizeof(ctxt)); 9238 switch (vsi->type) { 9239 case I40E_VSI_MAIN: 9240 /* The PF's main VSI is already setup as part of the 9241 * device initialization, so we'll not bother with 9242 * the add_vsi call, but we will retrieve the current 9243 * VSI context. 9244 */ 9245 ctxt.seid = pf->main_vsi_seid; 9246 ctxt.pf_num = pf->hw.pf_id; 9247 ctxt.vf_num = 0; 9248 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 9249 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 9250 if (ret) { 9251 dev_info(&pf->pdev->dev, 9252 "couldn't get PF vsi config, err %s aq_err %s\n", 9253 i40e_stat_str(&pf->hw, ret), 9254 i40e_aq_str(&pf->hw, 9255 pf->hw.aq.asq_last_status)); 9256 return -ENOENT; 9257 } 9258 vsi->info = ctxt.info; 9259 vsi->info.valid_sections = 0; 9260 9261 vsi->seid = ctxt.seid; 9262 vsi->id = ctxt.vsi_number; 9263 9264 enabled_tc = i40e_pf_get_tc_map(pf); 9265 9266 /* MFP mode setup queue map and update VSI */ 9267 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && 9268 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ 9269 memset(&ctxt, 0, sizeof(ctxt)); 9270 ctxt.seid = pf->main_vsi_seid; 9271 ctxt.pf_num = pf->hw.pf_id; 9272 ctxt.vf_num = 0; 9273 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 9274 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 9275 if (ret) { 9276 dev_info(&pf->pdev->dev, 9277 "update vsi failed, err %s aq_err %s\n", 9278 i40e_stat_str(&pf->hw, ret), 9279 i40e_aq_str(&pf->hw, 9280 pf->hw.aq.asq_last_status)); 9281 ret = -ENOENT; 9282 goto err; 9283 } 9284 /* update the local VSI info queue map */ 9285 i40e_vsi_update_queue_map(vsi, &ctxt); 9286 vsi->info.valid_sections = 0; 9287 } else { 9288 /* Default/Main VSI is only enabled for TC0 9289 * reconfigure it to enable all TCs that are 9290 * available on the port in SFP mode. 9291 * For MFP case the iSCSI PF would use this 9292 * flow to enable LAN+iSCSI TC. 9293 */ 9294 ret = i40e_vsi_config_tc(vsi, enabled_tc); 9295 if (ret) { 9296 dev_info(&pf->pdev->dev, 9297 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", 9298 enabled_tc, 9299 i40e_stat_str(&pf->hw, ret), 9300 i40e_aq_str(&pf->hw, 9301 pf->hw.aq.asq_last_status)); 9302 ret = -ENOENT; 9303 } 9304 } 9305 break; 9306 9307 case I40E_VSI_FDIR: 9308 ctxt.pf_num = hw->pf_id; 9309 ctxt.vf_num = 0; 9310 ctxt.uplink_seid = vsi->uplink_seid; 9311 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 9312 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 9313 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && 9314 (i40e_is_vsi_uplink_mode_veb(vsi))) { 9315 ctxt.info.valid_sections |= 9316 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 9317 ctxt.info.switch_id = 9318 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9319 } 9320 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 9321 break; 9322 9323 case I40E_VSI_VMDQ2: 9324 ctxt.pf_num = hw->pf_id; 9325 ctxt.vf_num = 0; 9326 ctxt.uplink_seid = vsi->uplink_seid; 9327 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 9328 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 9329 9330 /* This VSI is connected to VEB so the switch_id 9331 * should be set to zero by default. 9332 */ 9333 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 9334 ctxt.info.valid_sections |= 9335 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 9336 ctxt.info.switch_id = 9337 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9338 } 9339 9340 /* Setup the VSI tx/rx queue map for TC0 only for now */ 9341 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 9342 break; 9343 9344 case I40E_VSI_SRIOV: 9345 ctxt.pf_num = hw->pf_id; 9346 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 9347 ctxt.uplink_seid = vsi->uplink_seid; 9348 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 9349 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 9350 9351 /* This VSI is connected to VEB so the switch_id 9352 * should be set to zero by default. 9353 */ 9354 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 9355 ctxt.info.valid_sections |= 9356 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 9357 ctxt.info.switch_id = 9358 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9359 } 9360 9361 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { 9362 ctxt.info.valid_sections |= 9363 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); 9364 ctxt.info.queueing_opt_flags |= 9365 (I40E_AQ_VSI_QUE_OPT_TCP_ENA | 9366 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI); 9367 } 9368 9369 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 9370 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 9371 if (pf->vf[vsi->vf_id].spoofchk) { 9372 ctxt.info.valid_sections |= 9373 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 9374 ctxt.info.sec_flags |= 9375 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 9376 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 9377 } 9378 /* Setup the VSI tx/rx queue map for TC0 only for now */ 9379 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 9380 break; 9381 9382 #ifdef I40E_FCOE 9383 case I40E_VSI_FCOE: 9384 ret = i40e_fcoe_vsi_init(vsi, &ctxt); 9385 if (ret) { 9386 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); 9387 return ret; 9388 } 9389 break; 9390 9391 #endif /* I40E_FCOE */ 9392 case I40E_VSI_IWARP: 9393 /* send down message to iWARP */ 9394 break; 9395 9396 default: 9397 return -ENODEV; 9398 } 9399 9400 if (vsi->type != I40E_VSI_MAIN) { 9401 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 9402 if (ret) { 9403 dev_info(&vsi->back->pdev->dev, 9404 "add vsi failed, err %s aq_err %s\n", 9405 i40e_stat_str(&pf->hw, ret), 9406 i40e_aq_str(&pf->hw, 9407 pf->hw.aq.asq_last_status)); 9408 ret = -ENOENT; 9409 goto err; 9410 } 9411 vsi->info = ctxt.info; 9412 vsi->info.valid_sections = 0; 9413 vsi->seid = ctxt.seid; 9414 vsi->id = ctxt.vsi_number; 9415 } 9416 9417 spin_lock_bh(&vsi->mac_filter_list_lock); 9418 /* If macvlan filters already exist, force them to get loaded */ 9419 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 9420 f->changed = true; 9421 f_count++; 9422 9423 /* Expected to have only one MAC filter entry for LAA in list */ 9424 if (f->is_laa && vsi->type == I40E_VSI_MAIN) { 9425 ether_addr_copy(laa_macaddr, f->macaddr); 9426 found_laa_mac_filter = true; 9427 } 9428 } 9429 spin_unlock_bh(&vsi->mac_filter_list_lock); 9430 9431 if (found_laa_mac_filter) { 9432 struct i40e_aqc_remove_macvlan_element_data element; 9433 9434 memset(&element, 0, sizeof(element)); 9435 ether_addr_copy(element.mac_addr, laa_macaddr); 9436 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 9437 ret = i40e_aq_remove_macvlan(hw, vsi->seid, 9438 &element, 1, NULL); 9439 if (ret) { 9440 /* some older FW has a different default */ 9441 element.flags |= 9442 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 9443 i40e_aq_remove_macvlan(hw, vsi->seid, 9444 &element, 1, NULL); 9445 } 9446 9447 i40e_aq_mac_address_write(hw, 9448 I40E_AQC_WRITE_TYPE_LAA_WOL, 9449 laa_macaddr, NULL); 9450 } 9451 9452 if (f_count) { 9453 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 9454 pf->flags |= I40E_FLAG_FILTER_SYNC; 9455 } 9456 9457 /* Update VSI BW information */ 9458 ret = i40e_vsi_get_bw_info(vsi); 9459 if (ret) { 9460 dev_info(&pf->pdev->dev, 9461 "couldn't get vsi bw info, err %s aq_err %s\n", 9462 i40e_stat_str(&pf->hw, ret), 9463 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9464 /* VSI is already added so not tearing that up */ 9465 ret = 0; 9466 } 9467 9468 err: 9469 return ret; 9470 } 9471 9472 /** 9473 * i40e_vsi_release - Delete a VSI and free its resources 9474 * @vsi: the VSI being removed 9475 * 9476 * Returns 0 on success or < 0 on error 9477 **/ 9478 int i40e_vsi_release(struct i40e_vsi *vsi) 9479 { 9480 struct i40e_mac_filter *f, *ftmp; 9481 struct i40e_veb *veb = NULL; 9482 struct i40e_pf *pf; 9483 u16 uplink_seid; 9484 int i, n; 9485 9486 pf = vsi->back; 9487 9488 /* release of a VEB-owner or last VSI is not allowed */ 9489 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 9490 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 9491 vsi->seid, vsi->uplink_seid); 9492 return -ENODEV; 9493 } 9494 if (vsi == pf->vsi[pf->lan_vsi] && 9495 !test_bit(__I40E_DOWN, &pf->state)) { 9496 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 9497 return -ENODEV; 9498 } 9499 9500 uplink_seid = vsi->uplink_seid; 9501 if (vsi->type != I40E_VSI_SRIOV) { 9502 if (vsi->netdev_registered) { 9503 vsi->netdev_registered = false; 9504 if (vsi->netdev) { 9505 /* results in a call to i40e_close() */ 9506 unregister_netdev(vsi->netdev); 9507 } 9508 } else { 9509 i40e_vsi_close(vsi); 9510 } 9511 i40e_vsi_disable_irq(vsi); 9512 } 9513 9514 spin_lock_bh(&vsi->mac_filter_list_lock); 9515 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) 9516 i40e_del_filter(vsi, f->macaddr, f->vlan, 9517 f->is_vf, f->is_netdev); 9518 spin_unlock_bh(&vsi->mac_filter_list_lock); 9519 9520 i40e_sync_vsi_filters(vsi); 9521 9522 i40e_vsi_delete(vsi); 9523 i40e_vsi_free_q_vectors(vsi); 9524 if (vsi->netdev) { 9525 free_netdev(vsi->netdev); 9526 vsi->netdev = NULL; 9527 } 9528 i40e_vsi_clear_rings(vsi); 9529 i40e_vsi_clear(vsi); 9530 9531 /* If this was the last thing on the VEB, except for the 9532 * controlling VSI, remove the VEB, which puts the controlling 9533 * VSI onto the next level down in the switch. 9534 * 9535 * Well, okay, there's one more exception here: don't remove 9536 * the orphan VEBs yet. We'll wait for an explicit remove request 9537 * from up the network stack. 9538 */ 9539 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { 9540 if (pf->vsi[i] && 9541 pf->vsi[i]->uplink_seid == uplink_seid && 9542 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 9543 n++; /* count the VSIs */ 9544 } 9545 } 9546 for (i = 0; i < I40E_MAX_VEB; i++) { 9547 if (!pf->veb[i]) 9548 continue; 9549 if (pf->veb[i]->uplink_seid == uplink_seid) 9550 n++; /* count the VEBs */ 9551 if (pf->veb[i]->seid == uplink_seid) 9552 veb = pf->veb[i]; 9553 } 9554 if (n == 0 && veb && veb->uplink_seid != 0) 9555 i40e_veb_release(veb); 9556 9557 return 0; 9558 } 9559 9560 /** 9561 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 9562 * @vsi: ptr to the VSI 9563 * 9564 * This should only be called after i40e_vsi_mem_alloc() which allocates the 9565 * corresponding SW VSI structure and initializes num_queue_pairs for the 9566 * newly allocated VSI. 9567 * 9568 * Returns 0 on success or negative on failure 9569 **/ 9570 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 9571 { 9572 int ret = -ENOENT; 9573 struct i40e_pf *pf = vsi->back; 9574 9575 if (vsi->q_vectors[0]) { 9576 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 9577 vsi->seid); 9578 return -EEXIST; 9579 } 9580 9581 if (vsi->base_vector) { 9582 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 9583 vsi->seid, vsi->base_vector); 9584 return -EEXIST; 9585 } 9586 9587 ret = i40e_vsi_alloc_q_vectors(vsi); 9588 if (ret) { 9589 dev_info(&pf->pdev->dev, 9590 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 9591 vsi->num_q_vectors, vsi->seid, ret); 9592 vsi->num_q_vectors = 0; 9593 goto vector_setup_out; 9594 } 9595 9596 /* In Legacy mode, we do not have to get any other vector since we 9597 * piggyback on the misc/ICR0 for queue interrupts. 9598 */ 9599 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 9600 return ret; 9601 if (vsi->num_q_vectors) 9602 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 9603 vsi->num_q_vectors, vsi->idx); 9604 if (vsi->base_vector < 0) { 9605 dev_info(&pf->pdev->dev, 9606 "failed to get tracking for %d vectors for VSI %d, err=%d\n", 9607 vsi->num_q_vectors, vsi->seid, vsi->base_vector); 9608 i40e_vsi_free_q_vectors(vsi); 9609 ret = -ENOENT; 9610 goto vector_setup_out; 9611 } 9612 9613 vector_setup_out: 9614 return ret; 9615 } 9616 9617 /** 9618 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI 9619 * @vsi: pointer to the vsi. 9620 * 9621 * This re-allocates a vsi's queue resources. 9622 * 9623 * Returns pointer to the successfully allocated and configured VSI sw struct 9624 * on success, otherwise returns NULL on failure. 9625 **/ 9626 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) 9627 { 9628 struct i40e_pf *pf; 9629 u8 enabled_tc; 9630 int ret; 9631 9632 if (!vsi) 9633 return NULL; 9634 9635 pf = vsi->back; 9636 9637 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 9638 i40e_vsi_clear_rings(vsi); 9639 9640 i40e_vsi_free_arrays(vsi, false); 9641 i40e_set_num_rings_in_vsi(vsi); 9642 ret = i40e_vsi_alloc_arrays(vsi, false); 9643 if (ret) 9644 goto err_vsi; 9645 9646 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); 9647 if (ret < 0) { 9648 dev_info(&pf->pdev->dev, 9649 "failed to get tracking for %d queues for VSI %d err %d\n", 9650 vsi->alloc_queue_pairs, vsi->seid, ret); 9651 goto err_vsi; 9652 } 9653 vsi->base_queue = ret; 9654 9655 /* Update the FW view of the VSI. Force a reset of TC and queue 9656 * layout configurations. 9657 */ 9658 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 9659 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 9660 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 9661 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 9662 9663 /* assign it some queues */ 9664 ret = i40e_alloc_rings(vsi); 9665 if (ret) 9666 goto err_rings; 9667 9668 /* map all of the rings to the q_vectors */ 9669 i40e_vsi_map_rings_to_vectors(vsi); 9670 return vsi; 9671 9672 err_rings: 9673 i40e_vsi_free_q_vectors(vsi); 9674 if (vsi->netdev_registered) { 9675 vsi->netdev_registered = false; 9676 unregister_netdev(vsi->netdev); 9677 free_netdev(vsi->netdev); 9678 vsi->netdev = NULL; 9679 } 9680 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 9681 err_vsi: 9682 i40e_vsi_clear(vsi); 9683 return NULL; 9684 } 9685 9686 /** 9687 * i40e_macaddr_init - explicitly write the mac address filters. 9688 * 9689 * @vsi: pointer to the vsi. 9690 * @macaddr: the MAC address 9691 * 9692 * This is needed when the macaddr has been obtained by other 9693 * means than the default, e.g., from Open Firmware or IDPROM. 9694 * Returns 0 on success, negative on failure 9695 **/ 9696 static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) 9697 { 9698 int ret; 9699 struct i40e_aqc_add_macvlan_element_data element; 9700 9701 ret = i40e_aq_mac_address_write(&vsi->back->hw, 9702 I40E_AQC_WRITE_TYPE_LAA_WOL, 9703 macaddr, NULL); 9704 if (ret) { 9705 dev_info(&vsi->back->pdev->dev, 9706 "Addr change for VSI failed: %d\n", ret); 9707 return -EADDRNOTAVAIL; 9708 } 9709 9710 memset(&element, 0, sizeof(element)); 9711 ether_addr_copy(element.mac_addr, macaddr); 9712 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 9713 ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); 9714 if (ret) { 9715 dev_info(&vsi->back->pdev->dev, 9716 "add filter failed err %s aq_err %s\n", 9717 i40e_stat_str(&vsi->back->hw, ret), 9718 i40e_aq_str(&vsi->back->hw, 9719 vsi->back->hw.aq.asq_last_status)); 9720 } 9721 return ret; 9722 } 9723 9724 /** 9725 * i40e_vsi_setup - Set up a VSI by a given type 9726 * @pf: board private structure 9727 * @type: VSI type 9728 * @uplink_seid: the switch element to link to 9729 * @param1: usage depends upon VSI type. For VF types, indicates VF id 9730 * 9731 * This allocates the sw VSI structure and its queue resources, then add a VSI 9732 * to the identified VEB. 9733 * 9734 * Returns pointer to the successfully allocated and configure VSI sw struct on 9735 * success, otherwise returns NULL on failure. 9736 **/ 9737 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 9738 u16 uplink_seid, u32 param1) 9739 { 9740 struct i40e_vsi *vsi = NULL; 9741 struct i40e_veb *veb = NULL; 9742 int ret, i; 9743 int v_idx; 9744 9745 /* The requested uplink_seid must be either 9746 * - the PF's port seid 9747 * no VEB is needed because this is the PF 9748 * or this is a Flow Director special case VSI 9749 * - seid of an existing VEB 9750 * - seid of a VSI that owns an existing VEB 9751 * - seid of a VSI that doesn't own a VEB 9752 * a new VEB is created and the VSI becomes the owner 9753 * - seid of the PF VSI, which is what creates the first VEB 9754 * this is a special case of the previous 9755 * 9756 * Find which uplink_seid we were given and create a new VEB if needed 9757 */ 9758 for (i = 0; i < I40E_MAX_VEB; i++) { 9759 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 9760 veb = pf->veb[i]; 9761 break; 9762 } 9763 } 9764 9765 if (!veb && uplink_seid != pf->mac_seid) { 9766 9767 for (i = 0; i < pf->num_alloc_vsi; i++) { 9768 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 9769 vsi = pf->vsi[i]; 9770 break; 9771 } 9772 } 9773 if (!vsi) { 9774 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 9775 uplink_seid); 9776 return NULL; 9777 } 9778 9779 if (vsi->uplink_seid == pf->mac_seid) 9780 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 9781 vsi->tc_config.enabled_tc); 9782 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 9783 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 9784 vsi->tc_config.enabled_tc); 9785 if (veb) { 9786 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { 9787 dev_info(&vsi->back->pdev->dev, 9788 "New VSI creation error, uplink seid of LAN VSI expected.\n"); 9789 return NULL; 9790 } 9791 /* We come up by default in VEPA mode if SRIOV is not 9792 * already enabled, in which case we can't force VEPA 9793 * mode. 9794 */ 9795 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 9796 veb->bridge_mode = BRIDGE_MODE_VEPA; 9797 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 9798 } 9799 i40e_config_bridge_mode(veb); 9800 } 9801 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 9802 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 9803 veb = pf->veb[i]; 9804 } 9805 if (!veb) { 9806 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 9807 return NULL; 9808 } 9809 9810 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 9811 uplink_seid = veb->seid; 9812 } 9813 9814 /* get vsi sw struct */ 9815 v_idx = i40e_vsi_mem_alloc(pf, type); 9816 if (v_idx < 0) 9817 goto err_alloc; 9818 vsi = pf->vsi[v_idx]; 9819 if (!vsi) 9820 goto err_alloc; 9821 vsi->type = type; 9822 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 9823 9824 if (type == I40E_VSI_MAIN) 9825 pf->lan_vsi = v_idx; 9826 else if (type == I40E_VSI_SRIOV) 9827 vsi->vf_id = param1; 9828 /* assign it some queues */ 9829 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, 9830 vsi->idx); 9831 if (ret < 0) { 9832 dev_info(&pf->pdev->dev, 9833 "failed to get tracking for %d queues for VSI %d err=%d\n", 9834 vsi->alloc_queue_pairs, vsi->seid, ret); 9835 goto err_vsi; 9836 } 9837 vsi->base_queue = ret; 9838 9839 /* get a VSI from the hardware */ 9840 vsi->uplink_seid = uplink_seid; 9841 ret = i40e_add_vsi(vsi); 9842 if (ret) 9843 goto err_vsi; 9844 9845 switch (vsi->type) { 9846 /* setup the netdev if needed */ 9847 case I40E_VSI_MAIN: 9848 /* Apply relevant filters if a platform-specific mac 9849 * address was selected. 9850 */ 9851 if (!!(pf->flags & I40E_FLAG_PF_MAC)) { 9852 ret = i40e_macaddr_init(vsi, pf->hw.mac.addr); 9853 if (ret) { 9854 dev_warn(&pf->pdev->dev, 9855 "could not set up macaddr; err %d\n", 9856 ret); 9857 } 9858 } 9859 case I40E_VSI_VMDQ2: 9860 case I40E_VSI_FCOE: 9861 ret = i40e_config_netdev(vsi); 9862 if (ret) 9863 goto err_netdev; 9864 ret = register_netdev(vsi->netdev); 9865 if (ret) 9866 goto err_netdev; 9867 vsi->netdev_registered = true; 9868 netif_carrier_off(vsi->netdev); 9869 #ifdef CONFIG_I40E_DCB 9870 /* Setup DCB netlink interface */ 9871 i40e_dcbnl_setup(vsi); 9872 #endif /* CONFIG_I40E_DCB */ 9873 /* fall through */ 9874 9875 case I40E_VSI_FDIR: 9876 /* set up vectors and rings if needed */ 9877 ret = i40e_vsi_setup_vectors(vsi); 9878 if (ret) 9879 goto err_msix; 9880 9881 ret = i40e_alloc_rings(vsi); 9882 if (ret) 9883 goto err_rings; 9884 9885 /* map all of the rings to the q_vectors */ 9886 i40e_vsi_map_rings_to_vectors(vsi); 9887 9888 i40e_vsi_reset_stats(vsi); 9889 break; 9890 9891 default: 9892 /* no netdev or rings for the other VSI types */ 9893 break; 9894 } 9895 9896 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && 9897 (vsi->type == I40E_VSI_VMDQ2)) { 9898 ret = i40e_vsi_config_rss(vsi); 9899 } 9900 return vsi; 9901 9902 err_rings: 9903 i40e_vsi_free_q_vectors(vsi); 9904 err_msix: 9905 if (vsi->netdev_registered) { 9906 vsi->netdev_registered = false; 9907 unregister_netdev(vsi->netdev); 9908 free_netdev(vsi->netdev); 9909 vsi->netdev = NULL; 9910 } 9911 err_netdev: 9912 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 9913 err_vsi: 9914 i40e_vsi_clear(vsi); 9915 err_alloc: 9916 return NULL; 9917 } 9918 9919 /** 9920 * i40e_veb_get_bw_info - Query VEB BW information 9921 * @veb: the veb to query 9922 * 9923 * Query the Tx scheduler BW configuration data for given VEB 9924 **/ 9925 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 9926 { 9927 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 9928 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 9929 struct i40e_pf *pf = veb->pf; 9930 struct i40e_hw *hw = &pf->hw; 9931 u32 tc_bw_max; 9932 int ret = 0; 9933 int i; 9934 9935 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 9936 &bw_data, NULL); 9937 if (ret) { 9938 dev_info(&pf->pdev->dev, 9939 "query veb bw config failed, err %s aq_err %s\n", 9940 i40e_stat_str(&pf->hw, ret), 9941 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 9942 goto out; 9943 } 9944 9945 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 9946 &ets_data, NULL); 9947 if (ret) { 9948 dev_info(&pf->pdev->dev, 9949 "query veb bw ets config failed, err %s aq_err %s\n", 9950 i40e_stat_str(&pf->hw, ret), 9951 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 9952 goto out; 9953 } 9954 9955 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 9956 veb->bw_max_quanta = ets_data.tc_bw_max; 9957 veb->is_abs_credits = bw_data.absolute_credits_enable; 9958 veb->enabled_tc = ets_data.tc_valid_bits; 9959 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 9960 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 9961 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 9962 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 9963 veb->bw_tc_limit_credits[i] = 9964 le16_to_cpu(bw_data.tc_bw_limits[i]); 9965 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 9966 } 9967 9968 out: 9969 return ret; 9970 } 9971 9972 /** 9973 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 9974 * @pf: board private structure 9975 * 9976 * On error: returns error code (negative) 9977 * On success: returns vsi index in PF (positive) 9978 **/ 9979 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 9980 { 9981 int ret = -ENOENT; 9982 struct i40e_veb *veb; 9983 int i; 9984 9985 /* Need to protect the allocation of switch elements at the PF level */ 9986 mutex_lock(&pf->switch_mutex); 9987 9988 /* VEB list may be fragmented if VEB creation/destruction has 9989 * been happening. We can afford to do a quick scan to look 9990 * for any free slots in the list. 9991 * 9992 * find next empty veb slot, looping back around if necessary 9993 */ 9994 i = 0; 9995 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 9996 i++; 9997 if (i >= I40E_MAX_VEB) { 9998 ret = -ENOMEM; 9999 goto err_alloc_veb; /* out of VEB slots! */ 10000 } 10001 10002 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 10003 if (!veb) { 10004 ret = -ENOMEM; 10005 goto err_alloc_veb; 10006 } 10007 veb->pf = pf; 10008 veb->idx = i; 10009 veb->enabled_tc = 1; 10010 10011 pf->veb[i] = veb; 10012 ret = i; 10013 err_alloc_veb: 10014 mutex_unlock(&pf->switch_mutex); 10015 return ret; 10016 } 10017 10018 /** 10019 * i40e_switch_branch_release - Delete a branch of the switch tree 10020 * @branch: where to start deleting 10021 * 10022 * This uses recursion to find the tips of the branch to be 10023 * removed, deleting until we get back to and can delete this VEB. 10024 **/ 10025 static void i40e_switch_branch_release(struct i40e_veb *branch) 10026 { 10027 struct i40e_pf *pf = branch->pf; 10028 u16 branch_seid = branch->seid; 10029 u16 veb_idx = branch->idx; 10030 int i; 10031 10032 /* release any VEBs on this VEB - RECURSION */ 10033 for (i = 0; i < I40E_MAX_VEB; i++) { 10034 if (!pf->veb[i]) 10035 continue; 10036 if (pf->veb[i]->uplink_seid == branch->seid) 10037 i40e_switch_branch_release(pf->veb[i]); 10038 } 10039 10040 /* Release the VSIs on this VEB, but not the owner VSI. 10041 * 10042 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 10043 * the VEB itself, so don't use (*branch) after this loop. 10044 */ 10045 for (i = 0; i < pf->num_alloc_vsi; i++) { 10046 if (!pf->vsi[i]) 10047 continue; 10048 if (pf->vsi[i]->uplink_seid == branch_seid && 10049 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 10050 i40e_vsi_release(pf->vsi[i]); 10051 } 10052 } 10053 10054 /* There's one corner case where the VEB might not have been 10055 * removed, so double check it here and remove it if needed. 10056 * This case happens if the veb was created from the debugfs 10057 * commands and no VSIs were added to it. 10058 */ 10059 if (pf->veb[veb_idx]) 10060 i40e_veb_release(pf->veb[veb_idx]); 10061 } 10062 10063 /** 10064 * i40e_veb_clear - remove veb struct 10065 * @veb: the veb to remove 10066 **/ 10067 static void i40e_veb_clear(struct i40e_veb *veb) 10068 { 10069 if (!veb) 10070 return; 10071 10072 if (veb->pf) { 10073 struct i40e_pf *pf = veb->pf; 10074 10075 mutex_lock(&pf->switch_mutex); 10076 if (pf->veb[veb->idx] == veb) 10077 pf->veb[veb->idx] = NULL; 10078 mutex_unlock(&pf->switch_mutex); 10079 } 10080 10081 kfree(veb); 10082 } 10083 10084 /** 10085 * i40e_veb_release - Delete a VEB and free its resources 10086 * @veb: the VEB being removed 10087 **/ 10088 void i40e_veb_release(struct i40e_veb *veb) 10089 { 10090 struct i40e_vsi *vsi = NULL; 10091 struct i40e_pf *pf; 10092 int i, n = 0; 10093 10094 pf = veb->pf; 10095 10096 /* find the remaining VSI and check for extras */ 10097 for (i = 0; i < pf->num_alloc_vsi; i++) { 10098 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 10099 n++; 10100 vsi = pf->vsi[i]; 10101 } 10102 } 10103 if (n != 1) { 10104 dev_info(&pf->pdev->dev, 10105 "can't remove VEB %d with %d VSIs left\n", 10106 veb->seid, n); 10107 return; 10108 } 10109 10110 /* move the remaining VSI to uplink veb */ 10111 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 10112 if (veb->uplink_seid) { 10113 vsi->uplink_seid = veb->uplink_seid; 10114 if (veb->uplink_seid == pf->mac_seid) 10115 vsi->veb_idx = I40E_NO_VEB; 10116 else 10117 vsi->veb_idx = veb->veb_idx; 10118 } else { 10119 /* floating VEB */ 10120 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 10121 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 10122 } 10123 10124 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 10125 i40e_veb_clear(veb); 10126 } 10127 10128 /** 10129 * i40e_add_veb - create the VEB in the switch 10130 * @veb: the VEB to be instantiated 10131 * @vsi: the controlling VSI 10132 **/ 10133 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 10134 { 10135 struct i40e_pf *pf = veb->pf; 10136 bool is_default = veb->pf->cur_promisc; 10137 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); 10138 int ret; 10139 10140 /* get a VEB from the hardware */ 10141 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, 10142 veb->enabled_tc, is_default, 10143 &veb->seid, enable_stats, NULL); 10144 if (ret) { 10145 dev_info(&pf->pdev->dev, 10146 "couldn't add VEB, err %s aq_err %s\n", 10147 i40e_stat_str(&pf->hw, ret), 10148 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10149 return -EPERM; 10150 } 10151 10152 /* get statistics counter */ 10153 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, 10154 &veb->stats_idx, NULL, NULL, NULL); 10155 if (ret) { 10156 dev_info(&pf->pdev->dev, 10157 "couldn't get VEB statistics idx, err %s aq_err %s\n", 10158 i40e_stat_str(&pf->hw, ret), 10159 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10160 return -EPERM; 10161 } 10162 ret = i40e_veb_get_bw_info(veb); 10163 if (ret) { 10164 dev_info(&pf->pdev->dev, 10165 "couldn't get VEB bw info, err %s aq_err %s\n", 10166 i40e_stat_str(&pf->hw, ret), 10167 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10168 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 10169 return -ENOENT; 10170 } 10171 10172 vsi->uplink_seid = veb->seid; 10173 vsi->veb_idx = veb->idx; 10174 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 10175 10176 return 0; 10177 } 10178 10179 /** 10180 * i40e_veb_setup - Set up a VEB 10181 * @pf: board private structure 10182 * @flags: VEB setup flags 10183 * @uplink_seid: the switch element to link to 10184 * @vsi_seid: the initial VSI seid 10185 * @enabled_tc: Enabled TC bit-map 10186 * 10187 * This allocates the sw VEB structure and links it into the switch 10188 * It is possible and legal for this to be a duplicate of an already 10189 * existing VEB. It is also possible for both uplink and vsi seids 10190 * to be zero, in order to create a floating VEB. 10191 * 10192 * Returns pointer to the successfully allocated VEB sw struct on 10193 * success, otherwise returns NULL on failure. 10194 **/ 10195 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 10196 u16 uplink_seid, u16 vsi_seid, 10197 u8 enabled_tc) 10198 { 10199 struct i40e_veb *veb, *uplink_veb = NULL; 10200 int vsi_idx, veb_idx; 10201 int ret; 10202 10203 /* if one seid is 0, the other must be 0 to create a floating relay */ 10204 if ((uplink_seid == 0 || vsi_seid == 0) && 10205 (uplink_seid + vsi_seid != 0)) { 10206 dev_info(&pf->pdev->dev, 10207 "one, not both seid's are 0: uplink=%d vsi=%d\n", 10208 uplink_seid, vsi_seid); 10209 return NULL; 10210 } 10211 10212 /* make sure there is such a vsi and uplink */ 10213 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) 10214 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 10215 break; 10216 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { 10217 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 10218 vsi_seid); 10219 return NULL; 10220 } 10221 10222 if (uplink_seid && uplink_seid != pf->mac_seid) { 10223 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 10224 if (pf->veb[veb_idx] && 10225 pf->veb[veb_idx]->seid == uplink_seid) { 10226 uplink_veb = pf->veb[veb_idx]; 10227 break; 10228 } 10229 } 10230 if (!uplink_veb) { 10231 dev_info(&pf->pdev->dev, 10232 "uplink seid %d not found\n", uplink_seid); 10233 return NULL; 10234 } 10235 } 10236 10237 /* get veb sw struct */ 10238 veb_idx = i40e_veb_mem_alloc(pf); 10239 if (veb_idx < 0) 10240 goto err_alloc; 10241 veb = pf->veb[veb_idx]; 10242 veb->flags = flags; 10243 veb->uplink_seid = uplink_seid; 10244 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 10245 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 10246 10247 /* create the VEB in the switch */ 10248 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 10249 if (ret) 10250 goto err_veb; 10251 if (vsi_idx == pf->lan_vsi) 10252 pf->lan_veb = veb->idx; 10253 10254 return veb; 10255 10256 err_veb: 10257 i40e_veb_clear(veb); 10258 err_alloc: 10259 return NULL; 10260 } 10261 10262 /** 10263 * i40e_setup_pf_switch_element - set PF vars based on switch type 10264 * @pf: board private structure 10265 * @ele: element we are building info from 10266 * @num_reported: total number of elements 10267 * @printconfig: should we print the contents 10268 * 10269 * helper function to assist in extracting a few useful SEID values. 10270 **/ 10271 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 10272 struct i40e_aqc_switch_config_element_resp *ele, 10273 u16 num_reported, bool printconfig) 10274 { 10275 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 10276 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 10277 u8 element_type = ele->element_type; 10278 u16 seid = le16_to_cpu(ele->seid); 10279 10280 if (printconfig) 10281 dev_info(&pf->pdev->dev, 10282 "type=%d seid=%d uplink=%d downlink=%d\n", 10283 element_type, seid, uplink_seid, downlink_seid); 10284 10285 switch (element_type) { 10286 case I40E_SWITCH_ELEMENT_TYPE_MAC: 10287 pf->mac_seid = seid; 10288 break; 10289 case I40E_SWITCH_ELEMENT_TYPE_VEB: 10290 /* Main VEB? */ 10291 if (uplink_seid != pf->mac_seid) 10292 break; 10293 if (pf->lan_veb == I40E_NO_VEB) { 10294 int v; 10295 10296 /* find existing or else empty VEB */ 10297 for (v = 0; v < I40E_MAX_VEB; v++) { 10298 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 10299 pf->lan_veb = v; 10300 break; 10301 } 10302 } 10303 if (pf->lan_veb == I40E_NO_VEB) { 10304 v = i40e_veb_mem_alloc(pf); 10305 if (v < 0) 10306 break; 10307 pf->lan_veb = v; 10308 } 10309 } 10310 10311 pf->veb[pf->lan_veb]->seid = seid; 10312 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 10313 pf->veb[pf->lan_veb]->pf = pf; 10314 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 10315 break; 10316 case I40E_SWITCH_ELEMENT_TYPE_VSI: 10317 if (num_reported != 1) 10318 break; 10319 /* This is immediately after a reset so we can assume this is 10320 * the PF's VSI 10321 */ 10322 pf->mac_seid = uplink_seid; 10323 pf->pf_seid = downlink_seid; 10324 pf->main_vsi_seid = seid; 10325 if (printconfig) 10326 dev_info(&pf->pdev->dev, 10327 "pf_seid=%d main_vsi_seid=%d\n", 10328 pf->pf_seid, pf->main_vsi_seid); 10329 break; 10330 case I40E_SWITCH_ELEMENT_TYPE_PF: 10331 case I40E_SWITCH_ELEMENT_TYPE_VF: 10332 case I40E_SWITCH_ELEMENT_TYPE_EMP: 10333 case I40E_SWITCH_ELEMENT_TYPE_BMC: 10334 case I40E_SWITCH_ELEMENT_TYPE_PE: 10335 case I40E_SWITCH_ELEMENT_TYPE_PA: 10336 /* ignore these for now */ 10337 break; 10338 default: 10339 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 10340 element_type, seid); 10341 break; 10342 } 10343 } 10344 10345 /** 10346 * i40e_fetch_switch_configuration - Get switch config from firmware 10347 * @pf: board private structure 10348 * @printconfig: should we print the contents 10349 * 10350 * Get the current switch configuration from the device and 10351 * extract a few useful SEID values. 10352 **/ 10353 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 10354 { 10355 struct i40e_aqc_get_switch_config_resp *sw_config; 10356 u16 next_seid = 0; 10357 int ret = 0; 10358 u8 *aq_buf; 10359 int i; 10360 10361 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 10362 if (!aq_buf) 10363 return -ENOMEM; 10364 10365 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 10366 do { 10367 u16 num_reported, num_total; 10368 10369 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 10370 I40E_AQ_LARGE_BUF, 10371 &next_seid, NULL); 10372 if (ret) { 10373 dev_info(&pf->pdev->dev, 10374 "get switch config failed err %s aq_err %s\n", 10375 i40e_stat_str(&pf->hw, ret), 10376 i40e_aq_str(&pf->hw, 10377 pf->hw.aq.asq_last_status)); 10378 kfree(aq_buf); 10379 return -ENOENT; 10380 } 10381 10382 num_reported = le16_to_cpu(sw_config->header.num_reported); 10383 num_total = le16_to_cpu(sw_config->header.num_total); 10384 10385 if (printconfig) 10386 dev_info(&pf->pdev->dev, 10387 "header: %d reported %d total\n", 10388 num_reported, num_total); 10389 10390 for (i = 0; i < num_reported; i++) { 10391 struct i40e_aqc_switch_config_element_resp *ele = 10392 &sw_config->element[i]; 10393 10394 i40e_setup_pf_switch_element(pf, ele, num_reported, 10395 printconfig); 10396 } 10397 } while (next_seid != 0); 10398 10399 kfree(aq_buf); 10400 return ret; 10401 } 10402 10403 /** 10404 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 10405 * @pf: board private structure 10406 * @reinit: if the Main VSI needs to re-initialized. 10407 * 10408 * Returns 0 on success, negative value on failure 10409 **/ 10410 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 10411 { 10412 u16 flags = 0; 10413 int ret; 10414 10415 /* find out what's out there already */ 10416 ret = i40e_fetch_switch_configuration(pf, false); 10417 if (ret) { 10418 dev_info(&pf->pdev->dev, 10419 "couldn't fetch switch config, err %s aq_err %s\n", 10420 i40e_stat_str(&pf->hw, ret), 10421 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10422 return ret; 10423 } 10424 i40e_pf_reset_stats(pf); 10425 10426 /* set the switch config bit for the whole device to 10427 * support limited promisc or true promisc 10428 * when user requests promisc. The default is limited 10429 * promisc. 10430 */ 10431 10432 if ((pf->hw.pf_id == 0) && 10433 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) 10434 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; 10435 10436 if (pf->hw.pf_id == 0) { 10437 u16 valid_flags; 10438 10439 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; 10440 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 10441 NULL); 10442 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { 10443 dev_info(&pf->pdev->dev, 10444 "couldn't set switch config bits, err %s aq_err %s\n", 10445 i40e_stat_str(&pf->hw, ret), 10446 i40e_aq_str(&pf->hw, 10447 pf->hw.aq.asq_last_status)); 10448 /* not a fatal problem, just keep going */ 10449 } 10450 } 10451 10452 /* first time setup */ 10453 if (pf->lan_vsi == I40E_NO_VSI || reinit) { 10454 struct i40e_vsi *vsi = NULL; 10455 u16 uplink_seid; 10456 10457 /* Set up the PF VSI associated with the PF's main VSI 10458 * that is already in the HW switch 10459 */ 10460 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 10461 uplink_seid = pf->veb[pf->lan_veb]->seid; 10462 else 10463 uplink_seid = pf->mac_seid; 10464 if (pf->lan_vsi == I40E_NO_VSI) 10465 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 10466 else if (reinit) 10467 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); 10468 if (!vsi) { 10469 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 10470 i40e_fdir_teardown(pf); 10471 return -EAGAIN; 10472 } 10473 } else { 10474 /* force a reset of TC and queue layout configurations */ 10475 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 10476 10477 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 10478 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 10479 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 10480 } 10481 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 10482 10483 i40e_fdir_sb_setup(pf); 10484 10485 /* Setup static PF queue filter control settings */ 10486 ret = i40e_setup_pf_filter_control(pf); 10487 if (ret) { 10488 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 10489 ret); 10490 /* Failure here should not stop continuing other steps */ 10491 } 10492 10493 /* enable RSS in the HW, even for only one queue, as the stack can use 10494 * the hash 10495 */ 10496 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 10497 i40e_pf_config_rss(pf); 10498 10499 /* fill in link information and enable LSE reporting */ 10500 i40e_update_link_info(&pf->hw); 10501 i40e_link_event(pf); 10502 10503 /* Initialize user-specific link properties */ 10504 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 10505 I40E_AQ_AN_COMPLETED) ? true : false); 10506 10507 i40e_ptp_init(pf); 10508 10509 return ret; 10510 } 10511 10512 /** 10513 * i40e_determine_queue_usage - Work out queue distribution 10514 * @pf: board private structure 10515 **/ 10516 static void i40e_determine_queue_usage(struct i40e_pf *pf) 10517 { 10518 int queues_left; 10519 10520 pf->num_lan_qps = 0; 10521 #ifdef I40E_FCOE 10522 pf->num_fcoe_qps = 0; 10523 #endif 10524 10525 /* Find the max queues to be put into basic use. We'll always be 10526 * using TC0, whether or not DCB is running, and TC0 will get the 10527 * big RSS set. 10528 */ 10529 queues_left = pf->hw.func_caps.num_tx_qp; 10530 10531 if ((queues_left == 1) || 10532 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 10533 /* one qp for PF, no queues for anything else */ 10534 queues_left = 0; 10535 pf->alloc_rss_size = pf->num_lan_qps = 1; 10536 10537 /* make sure all the fancies are disabled */ 10538 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 10539 I40E_FLAG_IWARP_ENABLED | 10540 #ifdef I40E_FCOE 10541 I40E_FLAG_FCOE_ENABLED | 10542 #endif 10543 I40E_FLAG_FD_SB_ENABLED | 10544 I40E_FLAG_FD_ATR_ENABLED | 10545 I40E_FLAG_DCB_CAPABLE | 10546 I40E_FLAG_SRIOV_ENABLED | 10547 I40E_FLAG_VMDQ_ENABLED); 10548 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | 10549 I40E_FLAG_FD_SB_ENABLED | 10550 I40E_FLAG_FD_ATR_ENABLED | 10551 I40E_FLAG_DCB_CAPABLE))) { 10552 /* one qp for PF */ 10553 pf->alloc_rss_size = pf->num_lan_qps = 1; 10554 queues_left -= pf->num_lan_qps; 10555 10556 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 10557 I40E_FLAG_IWARP_ENABLED | 10558 #ifdef I40E_FCOE 10559 I40E_FLAG_FCOE_ENABLED | 10560 #endif 10561 I40E_FLAG_FD_SB_ENABLED | 10562 I40E_FLAG_FD_ATR_ENABLED | 10563 I40E_FLAG_DCB_ENABLED | 10564 I40E_FLAG_VMDQ_ENABLED); 10565 } else { 10566 /* Not enough queues for all TCs */ 10567 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && 10568 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 10569 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10570 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 10571 } 10572 pf->num_lan_qps = max_t(int, pf->rss_size_max, 10573 num_online_cpus()); 10574 pf->num_lan_qps = min_t(int, pf->num_lan_qps, 10575 pf->hw.func_caps.num_tx_qp); 10576 10577 queues_left -= pf->num_lan_qps; 10578 } 10579 10580 #ifdef I40E_FCOE 10581 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 10582 if (I40E_DEFAULT_FCOE <= queues_left) { 10583 pf->num_fcoe_qps = I40E_DEFAULT_FCOE; 10584 } else if (I40E_MINIMUM_FCOE <= queues_left) { 10585 pf->num_fcoe_qps = I40E_MINIMUM_FCOE; 10586 } else { 10587 pf->num_fcoe_qps = 0; 10588 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 10589 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); 10590 } 10591 10592 queues_left -= pf->num_fcoe_qps; 10593 } 10594 10595 #endif 10596 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 10597 if (queues_left > 1) { 10598 queues_left -= 1; /* save 1 queue for FD */ 10599 } else { 10600 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 10601 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); 10602 } 10603 } 10604 10605 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 10606 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 10607 pf->num_req_vfs = min_t(int, pf->num_req_vfs, 10608 (queues_left / pf->num_vf_qps)); 10609 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 10610 } 10611 10612 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 10613 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 10614 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 10615 (queues_left / pf->num_vmdq_qps)); 10616 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 10617 } 10618 10619 pf->queues_left = queues_left; 10620 dev_dbg(&pf->pdev->dev, 10621 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", 10622 pf->hw.func_caps.num_tx_qp, 10623 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), 10624 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, 10625 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, 10626 queues_left); 10627 #ifdef I40E_FCOE 10628 dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); 10629 #endif 10630 } 10631 10632 /** 10633 * i40e_setup_pf_filter_control - Setup PF static filter control 10634 * @pf: PF to be setup 10635 * 10636 * i40e_setup_pf_filter_control sets up a PF's initial filter control 10637 * settings. If PE/FCoE are enabled then it will also set the per PF 10638 * based filter sizes required for them. It also enables Flow director, 10639 * ethertype and macvlan type filter settings for the pf. 10640 * 10641 * Returns 0 on success, negative on failure 10642 **/ 10643 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 10644 { 10645 struct i40e_filter_control_settings *settings = &pf->filter_settings; 10646 10647 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 10648 10649 /* Flow Director is enabled */ 10650 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) 10651 settings->enable_fdir = true; 10652 10653 /* Ethtype and MACVLAN filters enabled for PF */ 10654 settings->enable_ethtype = true; 10655 settings->enable_macvlan = true; 10656 10657 if (i40e_set_filter_control(&pf->hw, settings)) 10658 return -ENOENT; 10659 10660 return 0; 10661 } 10662 10663 #define INFO_STRING_LEN 255 10664 #define REMAIN(__x) (INFO_STRING_LEN - (__x)) 10665 static void i40e_print_features(struct i40e_pf *pf) 10666 { 10667 struct i40e_hw *hw = &pf->hw; 10668 char *buf; 10669 int i; 10670 10671 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL); 10672 if (!buf) 10673 return; 10674 10675 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); 10676 #ifdef CONFIG_PCI_IOV 10677 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); 10678 #endif 10679 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", 10680 pf->hw.func_caps.num_vsis, 10681 pf->vsi[pf->lan_vsi]->num_queue_pairs); 10682 if (pf->flags & I40E_FLAG_RSS_ENABLED) 10683 i += snprintf(&buf[i], REMAIN(i), " RSS"); 10684 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 10685 i += snprintf(&buf[i], REMAIN(i), " FD_ATR"); 10686 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 10687 i += snprintf(&buf[i], REMAIN(i), " FD_SB"); 10688 i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); 10689 } 10690 if (pf->flags & I40E_FLAG_DCB_CAPABLE) 10691 i += snprintf(&buf[i], REMAIN(i), " DCB"); 10692 #if IS_ENABLED(CONFIG_VXLAN) 10693 i += snprintf(&buf[i], REMAIN(i), " VxLAN"); 10694 #endif 10695 #if IS_ENABLED(CONFIG_GENEVE) 10696 i += snprintf(&buf[i], REMAIN(i), " Geneve"); 10697 #endif 10698 if (pf->flags & I40E_FLAG_PTP) 10699 i += snprintf(&buf[i], REMAIN(i), " PTP"); 10700 #ifdef I40E_FCOE 10701 if (pf->flags & I40E_FLAG_FCOE_ENABLED) 10702 i += snprintf(&buf[i], REMAIN(i), " FCOE"); 10703 #endif 10704 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 10705 i += snprintf(&buf[i], REMAIN(i), " VEB"); 10706 else 10707 i += snprintf(&buf[i], REMAIN(i), " VEPA"); 10708 10709 dev_info(&pf->pdev->dev, "%s\n", buf); 10710 kfree(buf); 10711 WARN_ON(i > INFO_STRING_LEN); 10712 } 10713 10714 /** 10715 * i40e_get_platform_mac_addr - get platform-specific MAC address 10716 * 10717 * @pdev: PCI device information struct 10718 * @pf: board private structure 10719 * 10720 * Look up the MAC address in Open Firmware on systems that support it, 10721 * and use IDPROM on SPARC if no OF address is found. On return, the 10722 * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value 10723 * has been selected. 10724 **/ 10725 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) 10726 { 10727 pf->flags &= ~I40E_FLAG_PF_MAC; 10728 if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) 10729 pf->flags |= I40E_FLAG_PF_MAC; 10730 } 10731 10732 /** 10733 * i40e_probe - Device initialization routine 10734 * @pdev: PCI device information struct 10735 * @ent: entry in i40e_pci_tbl 10736 * 10737 * i40e_probe initializes a PF identified by a pci_dev structure. 10738 * The OS initialization, configuring of the PF private structure, 10739 * and a hardware reset occur. 10740 * 10741 * Returns 0 on success, negative on failure 10742 **/ 10743 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 10744 { 10745 struct i40e_aq_get_phy_abilities_resp abilities; 10746 struct i40e_pf *pf; 10747 struct i40e_hw *hw; 10748 static u16 pfs_found; 10749 u16 wol_nvm_bits; 10750 u16 link_status; 10751 int err; 10752 u32 val; 10753 u32 i; 10754 u8 set_fc_aq_fail; 10755 10756 err = pci_enable_device_mem(pdev); 10757 if (err) 10758 return err; 10759 10760 /* set up for high or low dma */ 10761 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10762 if (err) { 10763 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10764 if (err) { 10765 dev_err(&pdev->dev, 10766 "DMA configuration failed: 0x%x\n", err); 10767 goto err_dma; 10768 } 10769 } 10770 10771 /* set up pci connections */ 10772 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 10773 IORESOURCE_MEM), i40e_driver_name); 10774 if (err) { 10775 dev_info(&pdev->dev, 10776 "pci_request_selected_regions failed %d\n", err); 10777 goto err_pci_reg; 10778 } 10779 10780 pci_enable_pcie_error_reporting(pdev); 10781 pci_set_master(pdev); 10782 10783 /* Now that we have a PCI connection, we need to do the 10784 * low level device setup. This is primarily setting up 10785 * the Admin Queue structures and then querying for the 10786 * device's current profile information. 10787 */ 10788 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 10789 if (!pf) { 10790 err = -ENOMEM; 10791 goto err_pf_alloc; 10792 } 10793 pf->next_vsi = 0; 10794 pf->pdev = pdev; 10795 set_bit(__I40E_DOWN, &pf->state); 10796 10797 hw = &pf->hw; 10798 hw->back = pf; 10799 10800 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), 10801 I40E_MAX_CSR_SPACE); 10802 10803 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); 10804 if (!hw->hw_addr) { 10805 err = -EIO; 10806 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 10807 (unsigned int)pci_resource_start(pdev, 0), 10808 pf->ioremap_len, err); 10809 goto err_ioremap; 10810 } 10811 hw->vendor_id = pdev->vendor; 10812 hw->device_id = pdev->device; 10813 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 10814 hw->subsystem_vendor_id = pdev->subsystem_vendor; 10815 hw->subsystem_device_id = pdev->subsystem_device; 10816 hw->bus.device = PCI_SLOT(pdev->devfn); 10817 hw->bus.func = PCI_FUNC(pdev->devfn); 10818 pf->instance = pfs_found; 10819 10820 /* set up the locks for the AQ, do this only once in probe 10821 * and destroy them only once in remove 10822 */ 10823 mutex_init(&hw->aq.asq_mutex); 10824 mutex_init(&hw->aq.arq_mutex); 10825 10826 if (debug != -1) { 10827 pf->msg_enable = pf->hw.debug_mask; 10828 pf->msg_enable = debug; 10829 } 10830 10831 /* do a special CORER for clearing PXE mode once at init */ 10832 if (hw->revision_id == 0 && 10833 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { 10834 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 10835 i40e_flush(hw); 10836 msleep(200); 10837 pf->corer_count++; 10838 10839 i40e_clear_pxe_mode(hw); 10840 } 10841 10842 /* Reset here to make sure all is clean and to define PF 'n' */ 10843 i40e_clear_hw(hw); 10844 err = i40e_pf_reset(hw); 10845 if (err) { 10846 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 10847 goto err_pf_reset; 10848 } 10849 pf->pfr_count++; 10850 10851 hw->aq.num_arq_entries = I40E_AQ_LEN; 10852 hw->aq.num_asq_entries = I40E_AQ_LEN; 10853 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 10854 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 10855 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 10856 10857 snprintf(pf->int_name, sizeof(pf->int_name) - 1, 10858 "%s-%s:misc", 10859 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); 10860 10861 err = i40e_init_shared_code(hw); 10862 if (err) { 10863 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", 10864 err); 10865 goto err_pf_reset; 10866 } 10867 10868 /* set up a default setting for link flow control */ 10869 pf->hw.fc.requested_mode = I40E_FC_NONE; 10870 10871 err = i40e_init_adminq(hw); 10872 if (err) { 10873 if (err == I40E_ERR_FIRMWARE_API_VERSION) 10874 dev_info(&pdev->dev, 10875 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 10876 else 10877 dev_info(&pdev->dev, 10878 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); 10879 10880 goto err_pf_reset; 10881 } 10882 10883 /* provide nvm, fw, api versions */ 10884 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", 10885 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 10886 hw->aq.api_maj_ver, hw->aq.api_min_ver, 10887 i40e_nvm_version_str(hw)); 10888 10889 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 10890 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) 10891 dev_info(&pdev->dev, 10892 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 10893 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 10894 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) 10895 dev_info(&pdev->dev, 10896 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 10897 10898 i40e_verify_eeprom(pf); 10899 10900 /* Rev 0 hardware was never productized */ 10901 if (hw->revision_id < 1) 10902 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 10903 10904 i40e_clear_pxe_mode(hw); 10905 err = i40e_get_capabilities(pf); 10906 if (err) 10907 goto err_adminq_setup; 10908 10909 err = i40e_sw_init(pf); 10910 if (err) { 10911 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 10912 goto err_sw_init; 10913 } 10914 10915 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 10916 hw->func_caps.num_rx_qp, 10917 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 10918 if (err) { 10919 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 10920 goto err_init_lan_hmc; 10921 } 10922 10923 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 10924 if (err) { 10925 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 10926 err = -ENOENT; 10927 goto err_configure_lan_hmc; 10928 } 10929 10930 /* Disable LLDP for NICs that have firmware versions lower than v4.3. 10931 * Ignore error return codes because if it was already disabled via 10932 * hardware settings this will fail 10933 */ 10934 if (pf->flags & I40E_FLAG_STOP_FW_LLDP) { 10935 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); 10936 i40e_aq_stop_lldp(hw, true, NULL); 10937 } 10938 10939 i40e_get_mac_addr(hw, hw->mac.addr); 10940 /* allow a platform config to override the HW addr */ 10941 i40e_get_platform_mac_addr(pdev, pf); 10942 if (!is_valid_ether_addr(hw->mac.addr)) { 10943 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 10944 err = -EIO; 10945 goto err_mac_addr; 10946 } 10947 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 10948 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); 10949 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 10950 if (is_valid_ether_addr(hw->mac.port_addr)) 10951 pf->flags |= I40E_FLAG_PORT_ID_VALID; 10952 #ifdef I40E_FCOE 10953 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); 10954 if (err) 10955 dev_info(&pdev->dev, 10956 "(non-fatal) SAN MAC retrieval failed: %d\n", err); 10957 if (!is_valid_ether_addr(hw->mac.san_addr)) { 10958 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", 10959 hw->mac.san_addr); 10960 ether_addr_copy(hw->mac.san_addr, hw->mac.addr); 10961 } 10962 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); 10963 #endif /* I40E_FCOE */ 10964 10965 pci_set_drvdata(pdev, pf); 10966 pci_save_state(pdev); 10967 #ifdef CONFIG_I40E_DCB 10968 err = i40e_init_pf_dcb(pf); 10969 if (err) { 10970 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); 10971 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10972 /* Continue without DCB enabled */ 10973 } 10974 #endif /* CONFIG_I40E_DCB */ 10975 10976 /* set up periodic task facility */ 10977 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); 10978 pf->service_timer_period = HZ; 10979 10980 INIT_WORK(&pf->service_task, i40e_service_task); 10981 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 10982 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; 10983 10984 /* NVM bit on means WoL disabled for the port */ 10985 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 10986 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) 10987 pf->wol_en = false; 10988 else 10989 pf->wol_en = true; 10990 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 10991 10992 /* set up the main switch operations */ 10993 i40e_determine_queue_usage(pf); 10994 err = i40e_init_interrupt_scheme(pf); 10995 if (err) 10996 goto err_switch_setup; 10997 10998 /* The number of VSIs reported by the FW is the minimum guaranteed 10999 * to us; HW supports far more and we share the remaining pool with 11000 * the other PFs. We allocate space for more than the guarantee with 11001 * the understanding that we might not get them all later. 11002 */ 11003 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) 11004 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; 11005 else 11006 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; 11007 11008 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ 11009 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), 11010 GFP_KERNEL); 11011 if (!pf->vsi) { 11012 err = -ENOMEM; 11013 goto err_switch_setup; 11014 } 11015 11016 #ifdef CONFIG_PCI_IOV 11017 /* prep for VF support */ 11018 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 11019 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 11020 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 11021 if (pci_num_vf(pdev)) 11022 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 11023 } 11024 #endif 11025 err = i40e_setup_pf_switch(pf, false); 11026 if (err) { 11027 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 11028 goto err_vsis; 11029 } 11030 11031 /* Make sure flow control is set according to current settings */ 11032 err = i40e_set_fc(hw, &set_fc_aq_fail, true); 11033 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) 11034 dev_dbg(&pf->pdev->dev, 11035 "Set fc with err %s aq_err %s on get_phy_cap\n", 11036 i40e_stat_str(hw, err), 11037 i40e_aq_str(hw, hw->aq.asq_last_status)); 11038 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) 11039 dev_dbg(&pf->pdev->dev, 11040 "Set fc with err %s aq_err %s on set_phy_config\n", 11041 i40e_stat_str(hw, err), 11042 i40e_aq_str(hw, hw->aq.asq_last_status)); 11043 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) 11044 dev_dbg(&pf->pdev->dev, 11045 "Set fc with err %s aq_err %s on get_link_info\n", 11046 i40e_stat_str(hw, err), 11047 i40e_aq_str(hw, hw->aq.asq_last_status)); 11048 11049 /* if FDIR VSI was set up, start it now */ 11050 for (i = 0; i < pf->num_alloc_vsi; i++) { 11051 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 11052 i40e_vsi_open(pf->vsi[i]); 11053 break; 11054 } 11055 } 11056 11057 /* The driver only wants link up/down and module qualification 11058 * reports from firmware. Note the negative logic. 11059 */ 11060 err = i40e_aq_set_phy_int_mask(&pf->hw, 11061 ~(I40E_AQ_EVENT_LINK_UPDOWN | 11062 I40E_AQ_EVENT_MEDIA_NA | 11063 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); 11064 if (err) 11065 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 11066 i40e_stat_str(&pf->hw, err), 11067 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 11068 11069 /* Reconfigure hardware for allowing smaller MSS in the case 11070 * of TSO, so that we avoid the MDD being fired and causing 11071 * a reset in the case of small MSS+TSO. 11072 */ 11073 val = rd32(hw, I40E_REG_MSS); 11074 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { 11075 val &= ~I40E_REG_MSS_MIN_MASK; 11076 val |= I40E_64BYTE_MSS; 11077 wr32(hw, I40E_REG_MSS, val); 11078 } 11079 11080 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { 11081 msleep(75); 11082 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 11083 if (err) 11084 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 11085 i40e_stat_str(&pf->hw, err), 11086 i40e_aq_str(&pf->hw, 11087 pf->hw.aq.asq_last_status)); 11088 } 11089 /* The main driver is (mostly) up and happy. We need to set this state 11090 * before setting up the misc vector or we get a race and the vector 11091 * ends up disabled forever. 11092 */ 11093 clear_bit(__I40E_DOWN, &pf->state); 11094 11095 /* In case of MSIX we are going to setup the misc vector right here 11096 * to handle admin queue events etc. In case of legacy and MSI 11097 * the misc functionality and queue processing is combined in 11098 * the same vector and that gets setup at open. 11099 */ 11100 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 11101 err = i40e_setup_misc_vector(pf); 11102 if (err) { 11103 dev_info(&pdev->dev, 11104 "setup of misc vector failed: %d\n", err); 11105 goto err_vsis; 11106 } 11107 } 11108 11109 #ifdef CONFIG_PCI_IOV 11110 /* prep for VF support */ 11111 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 11112 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 11113 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 11114 /* disable link interrupts for VFs */ 11115 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 11116 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 11117 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 11118 i40e_flush(hw); 11119 11120 if (pci_num_vf(pdev)) { 11121 dev_info(&pdev->dev, 11122 "Active VFs found, allocating resources.\n"); 11123 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); 11124 if (err) 11125 dev_info(&pdev->dev, 11126 "Error %d allocating resources for existing VFs\n", 11127 err); 11128 } 11129 } 11130 #endif /* CONFIG_PCI_IOV */ 11131 11132 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 11133 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, 11134 pf->num_iwarp_msix, 11135 I40E_IWARP_IRQ_PILE_ID); 11136 if (pf->iwarp_base_vector < 0) { 11137 dev_info(&pdev->dev, 11138 "failed to get tracking for %d vectors for IWARP err=%d\n", 11139 pf->num_iwarp_msix, pf->iwarp_base_vector); 11140 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; 11141 } 11142 } 11143 11144 i40e_dbg_pf_init(pf); 11145 11146 /* tell the firmware that we're starting */ 11147 i40e_send_version(pf); 11148 11149 /* since everything's happy, start the service_task timer */ 11150 mod_timer(&pf->service_timer, 11151 round_jiffies(jiffies + pf->service_timer_period)); 11152 11153 /* add this PF to client device list and launch a client service task */ 11154 err = i40e_lan_add_device(pf); 11155 if (err) 11156 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", 11157 err); 11158 11159 #ifdef I40E_FCOE 11160 /* create FCoE interface */ 11161 i40e_fcoe_vsi_setup(pf); 11162 11163 #endif 11164 #define PCI_SPEED_SIZE 8 11165 #define PCI_WIDTH_SIZE 8 11166 /* Devices on the IOSF bus do not have this information 11167 * and will report PCI Gen 1 x 1 by default so don't bother 11168 * checking them. 11169 */ 11170 if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) { 11171 char speed[PCI_SPEED_SIZE] = "Unknown"; 11172 char width[PCI_WIDTH_SIZE] = "Unknown"; 11173 11174 /* Get the negotiated link width and speed from PCI config 11175 * space 11176 */ 11177 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, 11178 &link_status); 11179 11180 i40e_set_pci_config_data(hw, link_status); 11181 11182 switch (hw->bus.speed) { 11183 case i40e_bus_speed_8000: 11184 strncpy(speed, "8.0", PCI_SPEED_SIZE); break; 11185 case i40e_bus_speed_5000: 11186 strncpy(speed, "5.0", PCI_SPEED_SIZE); break; 11187 case i40e_bus_speed_2500: 11188 strncpy(speed, "2.5", PCI_SPEED_SIZE); break; 11189 default: 11190 break; 11191 } 11192 switch (hw->bus.width) { 11193 case i40e_bus_width_pcie_x8: 11194 strncpy(width, "8", PCI_WIDTH_SIZE); break; 11195 case i40e_bus_width_pcie_x4: 11196 strncpy(width, "4", PCI_WIDTH_SIZE); break; 11197 case i40e_bus_width_pcie_x2: 11198 strncpy(width, "2", PCI_WIDTH_SIZE); break; 11199 case i40e_bus_width_pcie_x1: 11200 strncpy(width, "1", PCI_WIDTH_SIZE); break; 11201 default: 11202 break; 11203 } 11204 11205 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", 11206 speed, width); 11207 11208 if (hw->bus.width < i40e_bus_width_pcie_x8 || 11209 hw->bus.speed < i40e_bus_speed_8000) { 11210 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 11211 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 11212 } 11213 } 11214 11215 /* get the requested speeds from the fw */ 11216 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); 11217 if (err) 11218 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", 11219 i40e_stat_str(&pf->hw, err), 11220 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 11221 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; 11222 11223 /* get the supported phy types from the fw */ 11224 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); 11225 if (err) 11226 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", 11227 i40e_stat_str(&pf->hw, err), 11228 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 11229 pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type); 11230 11231 /* Add a filter to drop all Flow control frames from any VSI from being 11232 * transmitted. By doing so we stop a malicious VF from sending out 11233 * PAUSE or PFC frames and potentially controlling traffic for other 11234 * PF/VF VSIs. 11235 * The FW can still send Flow control frames if enabled. 11236 */ 11237 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, 11238 pf->main_vsi_seid); 11239 11240 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || 11241 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) 11242 pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY; 11243 11244 /* print a string summarizing features */ 11245 i40e_print_features(pf); 11246 11247 return 0; 11248 11249 /* Unwind what we've done if something failed in the setup */ 11250 err_vsis: 11251 set_bit(__I40E_DOWN, &pf->state); 11252 i40e_clear_interrupt_scheme(pf); 11253 kfree(pf->vsi); 11254 err_switch_setup: 11255 i40e_reset_interrupt_capability(pf); 11256 del_timer_sync(&pf->service_timer); 11257 err_mac_addr: 11258 err_configure_lan_hmc: 11259 (void)i40e_shutdown_lan_hmc(hw); 11260 err_init_lan_hmc: 11261 kfree(pf->qp_pile); 11262 err_sw_init: 11263 err_adminq_setup: 11264 err_pf_reset: 11265 iounmap(hw->hw_addr); 11266 err_ioremap: 11267 kfree(pf); 11268 err_pf_alloc: 11269 pci_disable_pcie_error_reporting(pdev); 11270 pci_release_selected_regions(pdev, 11271 pci_select_bars(pdev, IORESOURCE_MEM)); 11272 err_pci_reg: 11273 err_dma: 11274 pci_disable_device(pdev); 11275 return err; 11276 } 11277 11278 /** 11279 * i40e_remove - Device removal routine 11280 * @pdev: PCI device information struct 11281 * 11282 * i40e_remove is called by the PCI subsystem to alert the driver 11283 * that is should release a PCI device. This could be caused by a 11284 * Hot-Plug event, or because the driver is going to be removed from 11285 * memory. 11286 **/ 11287 static void i40e_remove(struct pci_dev *pdev) 11288 { 11289 struct i40e_pf *pf = pci_get_drvdata(pdev); 11290 struct i40e_hw *hw = &pf->hw; 11291 i40e_status ret_code; 11292 int i; 11293 11294 i40e_dbg_pf_exit(pf); 11295 11296 i40e_ptp_stop(pf); 11297 11298 /* Disable RSS in hw */ 11299 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); 11300 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); 11301 11302 /* no more scheduling of any task */ 11303 set_bit(__I40E_SUSPENDED, &pf->state); 11304 set_bit(__I40E_DOWN, &pf->state); 11305 if (pf->service_timer.data) 11306 del_timer_sync(&pf->service_timer); 11307 if (pf->service_task.func) 11308 cancel_work_sync(&pf->service_task); 11309 11310 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 11311 i40e_free_vfs(pf); 11312 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 11313 } 11314 11315 i40e_fdir_teardown(pf); 11316 11317 /* If there is a switch structure or any orphans, remove them. 11318 * This will leave only the PF's VSI remaining. 11319 */ 11320 for (i = 0; i < I40E_MAX_VEB; i++) { 11321 if (!pf->veb[i]) 11322 continue; 11323 11324 if (pf->veb[i]->uplink_seid == pf->mac_seid || 11325 pf->veb[i]->uplink_seid == 0) 11326 i40e_switch_branch_release(pf->veb[i]); 11327 } 11328 11329 /* Now we can shutdown the PF's VSI, just before we kill 11330 * adminq and hmc. 11331 */ 11332 if (pf->vsi[pf->lan_vsi]) 11333 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 11334 11335 /* remove attached clients */ 11336 ret_code = i40e_lan_del_device(pf); 11337 if (ret_code) { 11338 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 11339 ret_code); 11340 } 11341 11342 /* shutdown and destroy the HMC */ 11343 if (hw->hmc.hmc_obj) { 11344 ret_code = i40e_shutdown_lan_hmc(hw); 11345 if (ret_code) 11346 dev_warn(&pdev->dev, 11347 "Failed to destroy the HMC resources: %d\n", 11348 ret_code); 11349 } 11350 11351 /* shutdown the adminq */ 11352 ret_code = i40e_shutdown_adminq(hw); 11353 if (ret_code) 11354 dev_warn(&pdev->dev, 11355 "Failed to destroy the Admin Queue resources: %d\n", 11356 ret_code); 11357 11358 /* destroy the locks only once, here */ 11359 mutex_destroy(&hw->aq.arq_mutex); 11360 mutex_destroy(&hw->aq.asq_mutex); 11361 11362 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 11363 i40e_clear_interrupt_scheme(pf); 11364 for (i = 0; i < pf->num_alloc_vsi; i++) { 11365 if (pf->vsi[i]) { 11366 i40e_vsi_clear_rings(pf->vsi[i]); 11367 i40e_vsi_clear(pf->vsi[i]); 11368 pf->vsi[i] = NULL; 11369 } 11370 } 11371 11372 for (i = 0; i < I40E_MAX_VEB; i++) { 11373 kfree(pf->veb[i]); 11374 pf->veb[i] = NULL; 11375 } 11376 11377 kfree(pf->qp_pile); 11378 kfree(pf->vsi); 11379 11380 iounmap(hw->hw_addr); 11381 kfree(pf); 11382 pci_release_selected_regions(pdev, 11383 pci_select_bars(pdev, IORESOURCE_MEM)); 11384 11385 pci_disable_pcie_error_reporting(pdev); 11386 pci_disable_device(pdev); 11387 } 11388 11389 /** 11390 * i40e_pci_error_detected - warning that something funky happened in PCI land 11391 * @pdev: PCI device information struct 11392 * 11393 * Called to warn that something happened and the error handling steps 11394 * are in progress. Allows the driver to quiesce things, be ready for 11395 * remediation. 11396 **/ 11397 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 11398 enum pci_channel_state error) 11399 { 11400 struct i40e_pf *pf = pci_get_drvdata(pdev); 11401 11402 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 11403 11404 /* shutdown all operations */ 11405 if (!test_bit(__I40E_SUSPENDED, &pf->state)) { 11406 rtnl_lock(); 11407 i40e_prep_for_reset(pf); 11408 rtnl_unlock(); 11409 } 11410 11411 /* Request a slot reset */ 11412 return PCI_ERS_RESULT_NEED_RESET; 11413 } 11414 11415 /** 11416 * i40e_pci_error_slot_reset - a PCI slot reset just happened 11417 * @pdev: PCI device information struct 11418 * 11419 * Called to find if the driver can work with the device now that 11420 * the pci slot has been reset. If a basic connection seems good 11421 * (registers are readable and have sane content) then return a 11422 * happy little PCI_ERS_RESULT_xxx. 11423 **/ 11424 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 11425 { 11426 struct i40e_pf *pf = pci_get_drvdata(pdev); 11427 pci_ers_result_t result; 11428 int err; 11429 u32 reg; 11430 11431 dev_dbg(&pdev->dev, "%s\n", __func__); 11432 if (pci_enable_device_mem(pdev)) { 11433 dev_info(&pdev->dev, 11434 "Cannot re-enable PCI device after reset.\n"); 11435 result = PCI_ERS_RESULT_DISCONNECT; 11436 } else { 11437 pci_set_master(pdev); 11438 pci_restore_state(pdev); 11439 pci_save_state(pdev); 11440 pci_wake_from_d3(pdev, false); 11441 11442 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 11443 if (reg == 0) 11444 result = PCI_ERS_RESULT_RECOVERED; 11445 else 11446 result = PCI_ERS_RESULT_DISCONNECT; 11447 } 11448 11449 err = pci_cleanup_aer_uncorrect_error_status(pdev); 11450 if (err) { 11451 dev_info(&pdev->dev, 11452 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 11453 err); 11454 /* non-fatal, continue */ 11455 } 11456 11457 return result; 11458 } 11459 11460 /** 11461 * i40e_pci_error_resume - restart operations after PCI error recovery 11462 * @pdev: PCI device information struct 11463 * 11464 * Called to allow the driver to bring things back up after PCI error 11465 * and/or reset recovery has finished. 11466 **/ 11467 static void i40e_pci_error_resume(struct pci_dev *pdev) 11468 { 11469 struct i40e_pf *pf = pci_get_drvdata(pdev); 11470 11471 dev_dbg(&pdev->dev, "%s\n", __func__); 11472 if (test_bit(__I40E_SUSPENDED, &pf->state)) 11473 return; 11474 11475 rtnl_lock(); 11476 i40e_handle_reset_warning(pf); 11477 rtnl_unlock(); 11478 } 11479 11480 /** 11481 * i40e_shutdown - PCI callback for shutting down 11482 * @pdev: PCI device information struct 11483 **/ 11484 static void i40e_shutdown(struct pci_dev *pdev) 11485 { 11486 struct i40e_pf *pf = pci_get_drvdata(pdev); 11487 struct i40e_hw *hw = &pf->hw; 11488 11489 set_bit(__I40E_SUSPENDED, &pf->state); 11490 set_bit(__I40E_DOWN, &pf->state); 11491 rtnl_lock(); 11492 i40e_prep_for_reset(pf); 11493 rtnl_unlock(); 11494 11495 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11496 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11497 11498 del_timer_sync(&pf->service_timer); 11499 cancel_work_sync(&pf->service_task); 11500 i40e_fdir_teardown(pf); 11501 11502 rtnl_lock(); 11503 i40e_prep_for_reset(pf); 11504 rtnl_unlock(); 11505 11506 wr32(hw, I40E_PFPM_APM, 11507 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11508 wr32(hw, I40E_PFPM_WUFC, 11509 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11510 11511 i40e_clear_interrupt_scheme(pf); 11512 11513 if (system_state == SYSTEM_POWER_OFF) { 11514 pci_wake_from_d3(pdev, pf->wol_en); 11515 pci_set_power_state(pdev, PCI_D3hot); 11516 } 11517 } 11518 11519 #ifdef CONFIG_PM 11520 /** 11521 * i40e_suspend - PCI callback for moving to D3 11522 * @pdev: PCI device information struct 11523 **/ 11524 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) 11525 { 11526 struct i40e_pf *pf = pci_get_drvdata(pdev); 11527 struct i40e_hw *hw = &pf->hw; 11528 11529 set_bit(__I40E_SUSPENDED, &pf->state); 11530 set_bit(__I40E_DOWN, &pf->state); 11531 11532 rtnl_lock(); 11533 i40e_prep_for_reset(pf); 11534 rtnl_unlock(); 11535 11536 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11537 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11538 11539 pci_wake_from_d3(pdev, pf->wol_en); 11540 pci_set_power_state(pdev, PCI_D3hot); 11541 11542 return 0; 11543 } 11544 11545 /** 11546 * i40e_resume - PCI callback for waking up from D3 11547 * @pdev: PCI device information struct 11548 **/ 11549 static int i40e_resume(struct pci_dev *pdev) 11550 { 11551 struct i40e_pf *pf = pci_get_drvdata(pdev); 11552 u32 err; 11553 11554 pci_set_power_state(pdev, PCI_D0); 11555 pci_restore_state(pdev); 11556 /* pci_restore_state() clears dev->state_saves, so 11557 * call pci_save_state() again to restore it. 11558 */ 11559 pci_save_state(pdev); 11560 11561 err = pci_enable_device_mem(pdev); 11562 if (err) { 11563 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 11564 return err; 11565 } 11566 pci_set_master(pdev); 11567 11568 /* no wakeup events while running */ 11569 pci_wake_from_d3(pdev, false); 11570 11571 /* handling the reset will rebuild the device state */ 11572 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { 11573 clear_bit(__I40E_DOWN, &pf->state); 11574 rtnl_lock(); 11575 i40e_reset_and_rebuild(pf, false); 11576 rtnl_unlock(); 11577 } 11578 11579 return 0; 11580 } 11581 11582 #endif 11583 static const struct pci_error_handlers i40e_err_handler = { 11584 .error_detected = i40e_pci_error_detected, 11585 .slot_reset = i40e_pci_error_slot_reset, 11586 .resume = i40e_pci_error_resume, 11587 }; 11588 11589 static struct pci_driver i40e_driver = { 11590 .name = i40e_driver_name, 11591 .id_table = i40e_pci_tbl, 11592 .probe = i40e_probe, 11593 .remove = i40e_remove, 11594 #ifdef CONFIG_PM 11595 .suspend = i40e_suspend, 11596 .resume = i40e_resume, 11597 #endif 11598 .shutdown = i40e_shutdown, 11599 .err_handler = &i40e_err_handler, 11600 .sriov_configure = i40e_pci_sriov_configure, 11601 }; 11602 11603 /** 11604 * i40e_init_module - Driver registration routine 11605 * 11606 * i40e_init_module is the first routine called when the driver is 11607 * loaded. All it does is register with the PCI subsystem. 11608 **/ 11609 static int __init i40e_init_module(void) 11610 { 11611 pr_info("%s: %s - version %s\n", i40e_driver_name, 11612 i40e_driver_string, i40e_driver_version_str); 11613 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 11614 11615 /* we will see if single thread per module is enough for now, 11616 * it can't be any worse than using the system workqueue which 11617 * was already single threaded 11618 */ 11619 i40e_wq = create_singlethread_workqueue(i40e_driver_name); 11620 if (!i40e_wq) { 11621 pr_err("%s: Failed to create workqueue\n", i40e_driver_name); 11622 return -ENOMEM; 11623 } 11624 11625 i40e_dbg_init(); 11626 return pci_register_driver(&i40e_driver); 11627 } 11628 module_init(i40e_init_module); 11629 11630 /** 11631 * i40e_exit_module - Driver exit cleanup routine 11632 * 11633 * i40e_exit_module is called just before the driver is removed 11634 * from memory. 11635 **/ 11636 static void __exit i40e_exit_module(void) 11637 { 11638 pci_unregister_driver(&i40e_driver); 11639 destroy_workqueue(i40e_wq); 11640 i40e_dbg_exit(); 11641 } 11642 module_exit(i40e_exit_module); 11643