1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2016 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include <linux/etherdevice.h> 28 #include <linux/of_net.h> 29 #include <linux/pci.h> 30 31 /* Local includes */ 32 #include "i40e.h" 33 #include "i40e_diag.h" 34 #include <net/udp_tunnel.h> 35 36 const char i40e_driver_name[] = "i40e"; 37 static const char i40e_driver_string[] = 38 "Intel(R) Ethernet Connection XL710 Network Driver"; 39 40 #define DRV_KERN "-k" 41 42 #define DRV_VERSION_MAJOR 1 43 #define DRV_VERSION_MINOR 6 44 #define DRV_VERSION_BUILD 25 45 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 46 __stringify(DRV_VERSION_MINOR) "." \ 47 __stringify(DRV_VERSION_BUILD) DRV_KERN 48 const char i40e_driver_version_str[] = DRV_VERSION; 49 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; 50 51 /* a bit of forward declarations */ 52 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 53 static void i40e_handle_reset_warning(struct i40e_pf *pf); 54 static int i40e_add_vsi(struct i40e_vsi *vsi); 55 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 56 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 57 static int i40e_setup_misc_vector(struct i40e_pf *pf); 58 static void i40e_determine_queue_usage(struct i40e_pf *pf); 59 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 60 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 61 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 62 63 /* i40e_pci_tbl - PCI Device ID Table 64 * 65 * Last entry must be all 0s 66 * 67 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 68 * Class, Class Mask, private data (not used) } 69 */ 70 static const struct pci_device_id i40e_pci_tbl[] = { 71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, 79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, 80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, 82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, 83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, 84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, 85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, 86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, 87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, 89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0}, 90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0}, 91 /* required last entry */ 92 {0, } 93 }; 94 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 95 96 #define I40E_MAX_VF_COUNT 128 97 static int debug = -1; 98 module_param(debug, uint, 0); 99 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)"); 100 101 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 102 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 103 MODULE_LICENSE("GPL"); 104 MODULE_VERSION(DRV_VERSION); 105 106 static struct workqueue_struct *i40e_wq; 107 108 /** 109 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 110 * @hw: pointer to the HW structure 111 * @mem: ptr to mem struct to fill out 112 * @size: size of memory requested 113 * @alignment: what to align the allocation to 114 **/ 115 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 116 u64 size, u32 alignment) 117 { 118 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 119 120 mem->size = ALIGN(size, alignment); 121 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 122 &mem->pa, GFP_KERNEL); 123 if (!mem->va) 124 return -ENOMEM; 125 126 return 0; 127 } 128 129 /** 130 * i40e_free_dma_mem_d - OS specific memory free for shared code 131 * @hw: pointer to the HW structure 132 * @mem: ptr to mem struct to free 133 **/ 134 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 135 { 136 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 137 138 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 139 mem->va = NULL; 140 mem->pa = 0; 141 mem->size = 0; 142 143 return 0; 144 } 145 146 /** 147 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 148 * @hw: pointer to the HW structure 149 * @mem: ptr to mem struct to fill out 150 * @size: size of memory requested 151 **/ 152 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 153 u32 size) 154 { 155 mem->size = size; 156 mem->va = kzalloc(size, GFP_KERNEL); 157 158 if (!mem->va) 159 return -ENOMEM; 160 161 return 0; 162 } 163 164 /** 165 * i40e_free_virt_mem_d - OS specific memory free for shared code 166 * @hw: pointer to the HW structure 167 * @mem: ptr to mem struct to free 168 **/ 169 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 170 { 171 /* it's ok to kfree a NULL pointer */ 172 kfree(mem->va); 173 mem->va = NULL; 174 mem->size = 0; 175 176 return 0; 177 } 178 179 /** 180 * i40e_get_lump - find a lump of free generic resource 181 * @pf: board private structure 182 * @pile: the pile of resource to search 183 * @needed: the number of items needed 184 * @id: an owner id to stick on the items assigned 185 * 186 * Returns the base item index of the lump, or negative for error 187 * 188 * The search_hint trick and lack of advanced fit-finding only work 189 * because we're highly likely to have all the same size lump requests. 190 * Linear search time and any fragmentation should be minimal. 191 **/ 192 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 193 u16 needed, u16 id) 194 { 195 int ret = -ENOMEM; 196 int i, j; 197 198 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 199 dev_info(&pf->pdev->dev, 200 "param err: pile=%p needed=%d id=0x%04x\n", 201 pile, needed, id); 202 return -EINVAL; 203 } 204 205 /* start the linear search with an imperfect hint */ 206 i = pile->search_hint; 207 while (i < pile->num_entries) { 208 /* skip already allocated entries */ 209 if (pile->list[i] & I40E_PILE_VALID_BIT) { 210 i++; 211 continue; 212 } 213 214 /* do we have enough in this lump? */ 215 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 216 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 217 break; 218 } 219 220 if (j == needed) { 221 /* there was enough, so assign it to the requestor */ 222 for (j = 0; j < needed; j++) 223 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 224 ret = i; 225 pile->search_hint = i + j; 226 break; 227 } 228 229 /* not enough, so skip over it and continue looking */ 230 i += j; 231 } 232 233 return ret; 234 } 235 236 /** 237 * i40e_put_lump - return a lump of generic resource 238 * @pile: the pile of resource to search 239 * @index: the base item index 240 * @id: the owner id of the items assigned 241 * 242 * Returns the count of items in the lump 243 **/ 244 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 245 { 246 int valid_id = (id | I40E_PILE_VALID_BIT); 247 int count = 0; 248 int i; 249 250 if (!pile || index >= pile->num_entries) 251 return -EINVAL; 252 253 for (i = index; 254 i < pile->num_entries && pile->list[i] == valid_id; 255 i++) { 256 pile->list[i] = 0; 257 count++; 258 } 259 260 if (count && index < pile->search_hint) 261 pile->search_hint = index; 262 263 return count; 264 } 265 266 /** 267 * i40e_find_vsi_from_id - searches for the vsi with the given id 268 * @pf - the pf structure to search for the vsi 269 * @id - id of the vsi it is searching for 270 **/ 271 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) 272 { 273 int i; 274 275 for (i = 0; i < pf->num_alloc_vsi; i++) 276 if (pf->vsi[i] && (pf->vsi[i]->id == id)) 277 return pf->vsi[i]; 278 279 return NULL; 280 } 281 282 /** 283 * i40e_service_event_schedule - Schedule the service task to wake up 284 * @pf: board private structure 285 * 286 * If not already scheduled, this puts the task into the work queue 287 **/ 288 void i40e_service_event_schedule(struct i40e_pf *pf) 289 { 290 if (!test_bit(__I40E_DOWN, &pf->state) && 291 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 292 queue_work(i40e_wq, &pf->service_task); 293 } 294 295 /** 296 * i40e_tx_timeout - Respond to a Tx Hang 297 * @netdev: network interface device structure 298 * 299 * If any port has noticed a Tx timeout, it is likely that the whole 300 * device is munged, not just the one netdev port, so go for the full 301 * reset. 302 **/ 303 #ifdef I40E_FCOE 304 void i40e_tx_timeout(struct net_device *netdev) 305 #else 306 static void i40e_tx_timeout(struct net_device *netdev) 307 #endif 308 { 309 struct i40e_netdev_priv *np = netdev_priv(netdev); 310 struct i40e_vsi *vsi = np->vsi; 311 struct i40e_pf *pf = vsi->back; 312 struct i40e_ring *tx_ring = NULL; 313 unsigned int i, hung_queue = 0; 314 u32 head, val; 315 316 pf->tx_timeout_count++; 317 318 /* find the stopped queue the same way the stack does */ 319 for (i = 0; i < netdev->num_tx_queues; i++) { 320 struct netdev_queue *q; 321 unsigned long trans_start; 322 323 q = netdev_get_tx_queue(netdev, i); 324 trans_start = q->trans_start; 325 if (netif_xmit_stopped(q) && 326 time_after(jiffies, 327 (trans_start + netdev->watchdog_timeo))) { 328 hung_queue = i; 329 break; 330 } 331 } 332 333 if (i == netdev->num_tx_queues) { 334 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); 335 } else { 336 /* now that we have an index, find the tx_ring struct */ 337 for (i = 0; i < vsi->num_queue_pairs; i++) { 338 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { 339 if (hung_queue == 340 vsi->tx_rings[i]->queue_index) { 341 tx_ring = vsi->tx_rings[i]; 342 break; 343 } 344 } 345 } 346 } 347 348 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 349 pf->tx_timeout_recovery_level = 1; /* reset after some time */ 350 else if (time_before(jiffies, 351 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) 352 return; /* don't do any new action before the next timeout */ 353 354 if (tx_ring) { 355 head = i40e_get_head(tx_ring); 356 /* Read interrupt register */ 357 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 358 val = rd32(&pf->hw, 359 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + 360 tx_ring->vsi->base_vector - 1)); 361 else 362 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); 363 364 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", 365 vsi->seid, hung_queue, tx_ring->next_to_clean, 366 head, tx_ring->next_to_use, 367 readl(tx_ring->tail), val); 368 } 369 370 pf->tx_timeout_last_recovery = jiffies; 371 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", 372 pf->tx_timeout_recovery_level, hung_queue); 373 374 switch (pf->tx_timeout_recovery_level) { 375 case 1: 376 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 377 break; 378 case 2: 379 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 380 break; 381 case 3: 382 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 383 break; 384 default: 385 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 386 break; 387 } 388 389 i40e_service_event_schedule(pf); 390 pf->tx_timeout_recovery_level++; 391 } 392 393 /** 394 * i40e_get_vsi_stats_struct - Get System Network Statistics 395 * @vsi: the VSI we care about 396 * 397 * Returns the address of the device statistics structure. 398 * The statistics are actually updated from the service task. 399 **/ 400 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 401 { 402 return &vsi->net_stats; 403 } 404 405 /** 406 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 407 * @netdev: network interface device structure 408 * 409 * Returns the address of the device statistics structure. 410 * The statistics are actually updated from the service task. 411 **/ 412 #ifdef I40E_FCOE 413 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 414 struct net_device *netdev, 415 struct rtnl_link_stats64 *stats) 416 #else 417 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 418 struct net_device *netdev, 419 struct rtnl_link_stats64 *stats) 420 #endif 421 { 422 struct i40e_netdev_priv *np = netdev_priv(netdev); 423 struct i40e_ring *tx_ring, *rx_ring; 424 struct i40e_vsi *vsi = np->vsi; 425 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 426 int i; 427 428 if (test_bit(__I40E_DOWN, &vsi->state)) 429 return stats; 430 431 if (!vsi->tx_rings) 432 return stats; 433 434 rcu_read_lock(); 435 for (i = 0; i < vsi->num_queue_pairs; i++) { 436 u64 bytes, packets; 437 unsigned int start; 438 439 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); 440 if (!tx_ring) 441 continue; 442 443 do { 444 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 445 packets = tx_ring->stats.packets; 446 bytes = tx_ring->stats.bytes; 447 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 448 449 stats->tx_packets += packets; 450 stats->tx_bytes += bytes; 451 rx_ring = &tx_ring[1]; 452 453 do { 454 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 455 packets = rx_ring->stats.packets; 456 bytes = rx_ring->stats.bytes; 457 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 458 459 stats->rx_packets += packets; 460 stats->rx_bytes += bytes; 461 } 462 rcu_read_unlock(); 463 464 /* following stats updated by i40e_watchdog_subtask() */ 465 stats->multicast = vsi_stats->multicast; 466 stats->tx_errors = vsi_stats->tx_errors; 467 stats->tx_dropped = vsi_stats->tx_dropped; 468 stats->rx_errors = vsi_stats->rx_errors; 469 stats->rx_dropped = vsi_stats->rx_dropped; 470 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 471 stats->rx_length_errors = vsi_stats->rx_length_errors; 472 473 return stats; 474 } 475 476 /** 477 * i40e_vsi_reset_stats - Resets all stats of the given vsi 478 * @vsi: the VSI to have its stats reset 479 **/ 480 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 481 { 482 struct rtnl_link_stats64 *ns; 483 int i; 484 485 if (!vsi) 486 return; 487 488 ns = i40e_get_vsi_stats_struct(vsi); 489 memset(ns, 0, sizeof(*ns)); 490 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 491 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 492 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 493 if (vsi->rx_rings && vsi->rx_rings[0]) { 494 for (i = 0; i < vsi->num_queue_pairs; i++) { 495 memset(&vsi->rx_rings[i]->stats, 0, 496 sizeof(vsi->rx_rings[i]->stats)); 497 memset(&vsi->rx_rings[i]->rx_stats, 0, 498 sizeof(vsi->rx_rings[i]->rx_stats)); 499 memset(&vsi->tx_rings[i]->stats, 0, 500 sizeof(vsi->tx_rings[i]->stats)); 501 memset(&vsi->tx_rings[i]->tx_stats, 0, 502 sizeof(vsi->tx_rings[i]->tx_stats)); 503 } 504 } 505 vsi->stat_offsets_loaded = false; 506 } 507 508 /** 509 * i40e_pf_reset_stats - Reset all of the stats for the given PF 510 * @pf: the PF to be reset 511 **/ 512 void i40e_pf_reset_stats(struct i40e_pf *pf) 513 { 514 int i; 515 516 memset(&pf->stats, 0, sizeof(pf->stats)); 517 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 518 pf->stat_offsets_loaded = false; 519 520 for (i = 0; i < I40E_MAX_VEB; i++) { 521 if (pf->veb[i]) { 522 memset(&pf->veb[i]->stats, 0, 523 sizeof(pf->veb[i]->stats)); 524 memset(&pf->veb[i]->stats_offsets, 0, 525 sizeof(pf->veb[i]->stats_offsets)); 526 pf->veb[i]->stat_offsets_loaded = false; 527 } 528 } 529 pf->hw_csum_rx_error = 0; 530 } 531 532 /** 533 * i40e_stat_update48 - read and update a 48 bit stat from the chip 534 * @hw: ptr to the hardware info 535 * @hireg: the high 32 bit reg to read 536 * @loreg: the low 32 bit reg to read 537 * @offset_loaded: has the initial offset been loaded yet 538 * @offset: ptr to current offset value 539 * @stat: ptr to the stat 540 * 541 * Since the device stats are not reset at PFReset, they likely will not 542 * be zeroed when the driver starts. We'll save the first values read 543 * and use them as offsets to be subtracted from the raw values in order 544 * to report stats that count from zero. In the process, we also manage 545 * the potential roll-over. 546 **/ 547 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 548 bool offset_loaded, u64 *offset, u64 *stat) 549 { 550 u64 new_data; 551 552 if (hw->device_id == I40E_DEV_ID_QEMU) { 553 new_data = rd32(hw, loreg); 554 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 555 } else { 556 new_data = rd64(hw, loreg); 557 } 558 if (!offset_loaded) 559 *offset = new_data; 560 if (likely(new_data >= *offset)) 561 *stat = new_data - *offset; 562 else 563 *stat = (new_data + BIT_ULL(48)) - *offset; 564 *stat &= 0xFFFFFFFFFFFFULL; 565 } 566 567 /** 568 * i40e_stat_update32 - read and update a 32 bit stat from the chip 569 * @hw: ptr to the hardware info 570 * @reg: the hw reg to read 571 * @offset_loaded: has the initial offset been loaded yet 572 * @offset: ptr to current offset value 573 * @stat: ptr to the stat 574 **/ 575 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 576 bool offset_loaded, u64 *offset, u64 *stat) 577 { 578 u32 new_data; 579 580 new_data = rd32(hw, reg); 581 if (!offset_loaded) 582 *offset = new_data; 583 if (likely(new_data >= *offset)) 584 *stat = (u32)(new_data - *offset); 585 else 586 *stat = (u32)((new_data + BIT_ULL(32)) - *offset); 587 } 588 589 /** 590 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 591 * @vsi: the VSI to be updated 592 **/ 593 void i40e_update_eth_stats(struct i40e_vsi *vsi) 594 { 595 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 596 struct i40e_pf *pf = vsi->back; 597 struct i40e_hw *hw = &pf->hw; 598 struct i40e_eth_stats *oes; 599 struct i40e_eth_stats *es; /* device's eth stats */ 600 601 es = &vsi->eth_stats; 602 oes = &vsi->eth_stats_offsets; 603 604 /* Gather up the stats that the hw collects */ 605 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 606 vsi->stat_offsets_loaded, 607 &oes->tx_errors, &es->tx_errors); 608 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 609 vsi->stat_offsets_loaded, 610 &oes->rx_discards, &es->rx_discards); 611 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), 612 vsi->stat_offsets_loaded, 613 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); 614 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 615 vsi->stat_offsets_loaded, 616 &oes->tx_errors, &es->tx_errors); 617 618 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 619 I40E_GLV_GORCL(stat_idx), 620 vsi->stat_offsets_loaded, 621 &oes->rx_bytes, &es->rx_bytes); 622 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 623 I40E_GLV_UPRCL(stat_idx), 624 vsi->stat_offsets_loaded, 625 &oes->rx_unicast, &es->rx_unicast); 626 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 627 I40E_GLV_MPRCL(stat_idx), 628 vsi->stat_offsets_loaded, 629 &oes->rx_multicast, &es->rx_multicast); 630 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 631 I40E_GLV_BPRCL(stat_idx), 632 vsi->stat_offsets_loaded, 633 &oes->rx_broadcast, &es->rx_broadcast); 634 635 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 636 I40E_GLV_GOTCL(stat_idx), 637 vsi->stat_offsets_loaded, 638 &oes->tx_bytes, &es->tx_bytes); 639 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 640 I40E_GLV_UPTCL(stat_idx), 641 vsi->stat_offsets_loaded, 642 &oes->tx_unicast, &es->tx_unicast); 643 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 644 I40E_GLV_MPTCL(stat_idx), 645 vsi->stat_offsets_loaded, 646 &oes->tx_multicast, &es->tx_multicast); 647 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 648 I40E_GLV_BPTCL(stat_idx), 649 vsi->stat_offsets_loaded, 650 &oes->tx_broadcast, &es->tx_broadcast); 651 vsi->stat_offsets_loaded = true; 652 } 653 654 /** 655 * i40e_update_veb_stats - Update Switch component statistics 656 * @veb: the VEB being updated 657 **/ 658 static void i40e_update_veb_stats(struct i40e_veb *veb) 659 { 660 struct i40e_pf *pf = veb->pf; 661 struct i40e_hw *hw = &pf->hw; 662 struct i40e_eth_stats *oes; 663 struct i40e_eth_stats *es; /* device's eth stats */ 664 struct i40e_veb_tc_stats *veb_oes; 665 struct i40e_veb_tc_stats *veb_es; 666 int i, idx = 0; 667 668 idx = veb->stats_idx; 669 es = &veb->stats; 670 oes = &veb->stats_offsets; 671 veb_es = &veb->tc_stats; 672 veb_oes = &veb->tc_stats_offsets; 673 674 /* Gather up the stats that the hw collects */ 675 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 676 veb->stat_offsets_loaded, 677 &oes->tx_discards, &es->tx_discards); 678 if (hw->revision_id > 0) 679 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 680 veb->stat_offsets_loaded, 681 &oes->rx_unknown_protocol, 682 &es->rx_unknown_protocol); 683 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 684 veb->stat_offsets_loaded, 685 &oes->rx_bytes, &es->rx_bytes); 686 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 687 veb->stat_offsets_loaded, 688 &oes->rx_unicast, &es->rx_unicast); 689 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 690 veb->stat_offsets_loaded, 691 &oes->rx_multicast, &es->rx_multicast); 692 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 693 veb->stat_offsets_loaded, 694 &oes->rx_broadcast, &es->rx_broadcast); 695 696 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 697 veb->stat_offsets_loaded, 698 &oes->tx_bytes, &es->tx_bytes); 699 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 700 veb->stat_offsets_loaded, 701 &oes->tx_unicast, &es->tx_unicast); 702 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 703 veb->stat_offsets_loaded, 704 &oes->tx_multicast, &es->tx_multicast); 705 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 706 veb->stat_offsets_loaded, 707 &oes->tx_broadcast, &es->tx_broadcast); 708 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 709 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx), 710 I40E_GLVEBTC_RPCL(i, idx), 711 veb->stat_offsets_loaded, 712 &veb_oes->tc_rx_packets[i], 713 &veb_es->tc_rx_packets[i]); 714 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx), 715 I40E_GLVEBTC_RBCL(i, idx), 716 veb->stat_offsets_loaded, 717 &veb_oes->tc_rx_bytes[i], 718 &veb_es->tc_rx_bytes[i]); 719 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx), 720 I40E_GLVEBTC_TPCL(i, idx), 721 veb->stat_offsets_loaded, 722 &veb_oes->tc_tx_packets[i], 723 &veb_es->tc_tx_packets[i]); 724 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx), 725 I40E_GLVEBTC_TBCL(i, idx), 726 veb->stat_offsets_loaded, 727 &veb_oes->tc_tx_bytes[i], 728 &veb_es->tc_tx_bytes[i]); 729 } 730 veb->stat_offsets_loaded = true; 731 } 732 733 #ifdef I40E_FCOE 734 /** 735 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. 736 * @vsi: the VSI that is capable of doing FCoE 737 **/ 738 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) 739 { 740 struct i40e_pf *pf = vsi->back; 741 struct i40e_hw *hw = &pf->hw; 742 struct i40e_fcoe_stats *ofs; 743 struct i40e_fcoe_stats *fs; /* device's eth stats */ 744 int idx; 745 746 if (vsi->type != I40E_VSI_FCOE) 747 return; 748 749 idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET; 750 fs = &vsi->fcoe_stats; 751 ofs = &vsi->fcoe_stats_offsets; 752 753 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), 754 vsi->fcoe_stat_offsets_loaded, 755 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); 756 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), 757 vsi->fcoe_stat_offsets_loaded, 758 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); 759 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), 760 vsi->fcoe_stat_offsets_loaded, 761 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); 762 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), 763 vsi->fcoe_stat_offsets_loaded, 764 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); 765 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), 766 vsi->fcoe_stat_offsets_loaded, 767 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); 768 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), 769 vsi->fcoe_stat_offsets_loaded, 770 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); 771 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), 772 vsi->fcoe_stat_offsets_loaded, 773 &ofs->fcoe_last_error, &fs->fcoe_last_error); 774 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), 775 vsi->fcoe_stat_offsets_loaded, 776 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); 777 778 vsi->fcoe_stat_offsets_loaded = true; 779 } 780 781 #endif 782 /** 783 * i40e_update_vsi_stats - Update the vsi statistics counters. 784 * @vsi: the VSI to be updated 785 * 786 * There are a few instances where we store the same stat in a 787 * couple of different structs. This is partly because we have 788 * the netdev stats that need to be filled out, which is slightly 789 * different from the "eth_stats" defined by the chip and used in 790 * VF communications. We sort it out here. 791 **/ 792 static void i40e_update_vsi_stats(struct i40e_vsi *vsi) 793 { 794 struct i40e_pf *pf = vsi->back; 795 struct rtnl_link_stats64 *ons; 796 struct rtnl_link_stats64 *ns; /* netdev stats */ 797 struct i40e_eth_stats *oes; 798 struct i40e_eth_stats *es; /* device's eth stats */ 799 u32 tx_restart, tx_busy; 800 u64 tx_lost_interrupt; 801 struct i40e_ring *p; 802 u32 rx_page, rx_buf; 803 u64 bytes, packets; 804 unsigned int start; 805 u64 tx_linearize; 806 u64 tx_force_wb; 807 u64 rx_p, rx_b; 808 u64 tx_p, tx_b; 809 u16 q; 810 811 if (test_bit(__I40E_DOWN, &vsi->state) || 812 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 813 return; 814 815 ns = i40e_get_vsi_stats_struct(vsi); 816 ons = &vsi->net_stats_offsets; 817 es = &vsi->eth_stats; 818 oes = &vsi->eth_stats_offsets; 819 820 /* Gather up the netdev and vsi stats that the driver collects 821 * on the fly during packet processing 822 */ 823 rx_b = rx_p = 0; 824 tx_b = tx_p = 0; 825 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; 826 tx_lost_interrupt = 0; 827 rx_page = 0; 828 rx_buf = 0; 829 rcu_read_lock(); 830 for (q = 0; q < vsi->num_queue_pairs; q++) { 831 /* locate Tx ring */ 832 p = ACCESS_ONCE(vsi->tx_rings[q]); 833 834 do { 835 start = u64_stats_fetch_begin_irq(&p->syncp); 836 packets = p->stats.packets; 837 bytes = p->stats.bytes; 838 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 839 tx_b += bytes; 840 tx_p += packets; 841 tx_restart += p->tx_stats.restart_queue; 842 tx_busy += p->tx_stats.tx_busy; 843 tx_linearize += p->tx_stats.tx_linearize; 844 tx_force_wb += p->tx_stats.tx_force_wb; 845 tx_lost_interrupt += p->tx_stats.tx_lost_interrupt; 846 847 /* Rx queue is part of the same block as Tx queue */ 848 p = &p[1]; 849 do { 850 start = u64_stats_fetch_begin_irq(&p->syncp); 851 packets = p->stats.packets; 852 bytes = p->stats.bytes; 853 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 854 rx_b += bytes; 855 rx_p += packets; 856 rx_buf += p->rx_stats.alloc_buff_failed; 857 rx_page += p->rx_stats.alloc_page_failed; 858 } 859 rcu_read_unlock(); 860 vsi->tx_restart = tx_restart; 861 vsi->tx_busy = tx_busy; 862 vsi->tx_linearize = tx_linearize; 863 vsi->tx_force_wb = tx_force_wb; 864 vsi->tx_lost_interrupt = tx_lost_interrupt; 865 vsi->rx_page_failed = rx_page; 866 vsi->rx_buf_failed = rx_buf; 867 868 ns->rx_packets = rx_p; 869 ns->rx_bytes = rx_b; 870 ns->tx_packets = tx_p; 871 ns->tx_bytes = tx_b; 872 873 /* update netdev stats from eth stats */ 874 i40e_update_eth_stats(vsi); 875 ons->tx_errors = oes->tx_errors; 876 ns->tx_errors = es->tx_errors; 877 ons->multicast = oes->rx_multicast; 878 ns->multicast = es->rx_multicast; 879 ons->rx_dropped = oes->rx_discards; 880 ns->rx_dropped = es->rx_discards; 881 ons->tx_dropped = oes->tx_discards; 882 ns->tx_dropped = es->tx_discards; 883 884 /* pull in a couple PF stats if this is the main vsi */ 885 if (vsi == pf->vsi[pf->lan_vsi]) { 886 ns->rx_crc_errors = pf->stats.crc_errors; 887 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; 888 ns->rx_length_errors = pf->stats.rx_length_errors; 889 } 890 } 891 892 /** 893 * i40e_update_pf_stats - Update the PF statistics counters. 894 * @pf: the PF to be updated 895 **/ 896 static void i40e_update_pf_stats(struct i40e_pf *pf) 897 { 898 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 899 struct i40e_hw_port_stats *nsd = &pf->stats; 900 struct i40e_hw *hw = &pf->hw; 901 u32 val; 902 int i; 903 904 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 905 I40E_GLPRT_GORCL(hw->port), 906 pf->stat_offsets_loaded, 907 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 908 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 909 I40E_GLPRT_GOTCL(hw->port), 910 pf->stat_offsets_loaded, 911 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 912 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 913 pf->stat_offsets_loaded, 914 &osd->eth.rx_discards, 915 &nsd->eth.rx_discards); 916 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 917 I40E_GLPRT_UPRCL(hw->port), 918 pf->stat_offsets_loaded, 919 &osd->eth.rx_unicast, 920 &nsd->eth.rx_unicast); 921 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 922 I40E_GLPRT_MPRCL(hw->port), 923 pf->stat_offsets_loaded, 924 &osd->eth.rx_multicast, 925 &nsd->eth.rx_multicast); 926 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 927 I40E_GLPRT_BPRCL(hw->port), 928 pf->stat_offsets_loaded, 929 &osd->eth.rx_broadcast, 930 &nsd->eth.rx_broadcast); 931 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 932 I40E_GLPRT_UPTCL(hw->port), 933 pf->stat_offsets_loaded, 934 &osd->eth.tx_unicast, 935 &nsd->eth.tx_unicast); 936 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 937 I40E_GLPRT_MPTCL(hw->port), 938 pf->stat_offsets_loaded, 939 &osd->eth.tx_multicast, 940 &nsd->eth.tx_multicast); 941 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 942 I40E_GLPRT_BPTCL(hw->port), 943 pf->stat_offsets_loaded, 944 &osd->eth.tx_broadcast, 945 &nsd->eth.tx_broadcast); 946 947 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 948 pf->stat_offsets_loaded, 949 &osd->tx_dropped_link_down, 950 &nsd->tx_dropped_link_down); 951 952 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 953 pf->stat_offsets_loaded, 954 &osd->crc_errors, &nsd->crc_errors); 955 956 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 957 pf->stat_offsets_loaded, 958 &osd->illegal_bytes, &nsd->illegal_bytes); 959 960 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 961 pf->stat_offsets_loaded, 962 &osd->mac_local_faults, 963 &nsd->mac_local_faults); 964 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 965 pf->stat_offsets_loaded, 966 &osd->mac_remote_faults, 967 &nsd->mac_remote_faults); 968 969 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 970 pf->stat_offsets_loaded, 971 &osd->rx_length_errors, 972 &nsd->rx_length_errors); 973 974 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 975 pf->stat_offsets_loaded, 976 &osd->link_xon_rx, &nsd->link_xon_rx); 977 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 978 pf->stat_offsets_loaded, 979 &osd->link_xon_tx, &nsd->link_xon_tx); 980 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 981 pf->stat_offsets_loaded, 982 &osd->link_xoff_rx, &nsd->link_xoff_rx); 983 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 984 pf->stat_offsets_loaded, 985 &osd->link_xoff_tx, &nsd->link_xoff_tx); 986 987 for (i = 0; i < 8; i++) { 988 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 989 pf->stat_offsets_loaded, 990 &osd->priority_xoff_rx[i], 991 &nsd->priority_xoff_rx[i]); 992 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 993 pf->stat_offsets_loaded, 994 &osd->priority_xon_rx[i], 995 &nsd->priority_xon_rx[i]); 996 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 997 pf->stat_offsets_loaded, 998 &osd->priority_xon_tx[i], 999 &nsd->priority_xon_tx[i]); 1000 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 1001 pf->stat_offsets_loaded, 1002 &osd->priority_xoff_tx[i], 1003 &nsd->priority_xoff_tx[i]); 1004 i40e_stat_update32(hw, 1005 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 1006 pf->stat_offsets_loaded, 1007 &osd->priority_xon_2_xoff[i], 1008 &nsd->priority_xon_2_xoff[i]); 1009 } 1010 1011 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 1012 I40E_GLPRT_PRC64L(hw->port), 1013 pf->stat_offsets_loaded, 1014 &osd->rx_size_64, &nsd->rx_size_64); 1015 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 1016 I40E_GLPRT_PRC127L(hw->port), 1017 pf->stat_offsets_loaded, 1018 &osd->rx_size_127, &nsd->rx_size_127); 1019 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 1020 I40E_GLPRT_PRC255L(hw->port), 1021 pf->stat_offsets_loaded, 1022 &osd->rx_size_255, &nsd->rx_size_255); 1023 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 1024 I40E_GLPRT_PRC511L(hw->port), 1025 pf->stat_offsets_loaded, 1026 &osd->rx_size_511, &nsd->rx_size_511); 1027 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 1028 I40E_GLPRT_PRC1023L(hw->port), 1029 pf->stat_offsets_loaded, 1030 &osd->rx_size_1023, &nsd->rx_size_1023); 1031 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 1032 I40E_GLPRT_PRC1522L(hw->port), 1033 pf->stat_offsets_loaded, 1034 &osd->rx_size_1522, &nsd->rx_size_1522); 1035 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 1036 I40E_GLPRT_PRC9522L(hw->port), 1037 pf->stat_offsets_loaded, 1038 &osd->rx_size_big, &nsd->rx_size_big); 1039 1040 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 1041 I40E_GLPRT_PTC64L(hw->port), 1042 pf->stat_offsets_loaded, 1043 &osd->tx_size_64, &nsd->tx_size_64); 1044 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 1045 I40E_GLPRT_PTC127L(hw->port), 1046 pf->stat_offsets_loaded, 1047 &osd->tx_size_127, &nsd->tx_size_127); 1048 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 1049 I40E_GLPRT_PTC255L(hw->port), 1050 pf->stat_offsets_loaded, 1051 &osd->tx_size_255, &nsd->tx_size_255); 1052 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 1053 I40E_GLPRT_PTC511L(hw->port), 1054 pf->stat_offsets_loaded, 1055 &osd->tx_size_511, &nsd->tx_size_511); 1056 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 1057 I40E_GLPRT_PTC1023L(hw->port), 1058 pf->stat_offsets_loaded, 1059 &osd->tx_size_1023, &nsd->tx_size_1023); 1060 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 1061 I40E_GLPRT_PTC1522L(hw->port), 1062 pf->stat_offsets_loaded, 1063 &osd->tx_size_1522, &nsd->tx_size_1522); 1064 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 1065 I40E_GLPRT_PTC9522L(hw->port), 1066 pf->stat_offsets_loaded, 1067 &osd->tx_size_big, &nsd->tx_size_big); 1068 1069 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 1070 pf->stat_offsets_loaded, 1071 &osd->rx_undersize, &nsd->rx_undersize); 1072 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 1073 pf->stat_offsets_loaded, 1074 &osd->rx_fragments, &nsd->rx_fragments); 1075 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 1076 pf->stat_offsets_loaded, 1077 &osd->rx_oversize, &nsd->rx_oversize); 1078 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 1079 pf->stat_offsets_loaded, 1080 &osd->rx_jabber, &nsd->rx_jabber); 1081 1082 /* FDIR stats */ 1083 i40e_stat_update32(hw, 1084 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), 1085 pf->stat_offsets_loaded, 1086 &osd->fd_atr_match, &nsd->fd_atr_match); 1087 i40e_stat_update32(hw, 1088 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), 1089 pf->stat_offsets_loaded, 1090 &osd->fd_sb_match, &nsd->fd_sb_match); 1091 i40e_stat_update32(hw, 1092 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)), 1093 pf->stat_offsets_loaded, 1094 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match); 1095 1096 val = rd32(hw, I40E_PRTPM_EEE_STAT); 1097 nsd->tx_lpi_status = 1098 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> 1099 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; 1100 nsd->rx_lpi_status = 1101 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> 1102 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; 1103 i40e_stat_update32(hw, I40E_PRTPM_TLPIC, 1104 pf->stat_offsets_loaded, 1105 &osd->tx_lpi_count, &nsd->tx_lpi_count); 1106 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 1107 pf->stat_offsets_loaded, 1108 &osd->rx_lpi_count, &nsd->rx_lpi_count); 1109 1110 if (pf->flags & I40E_FLAG_FD_SB_ENABLED && 1111 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) 1112 nsd->fd_sb_status = true; 1113 else 1114 nsd->fd_sb_status = false; 1115 1116 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && 1117 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 1118 nsd->fd_atr_status = true; 1119 else 1120 nsd->fd_atr_status = false; 1121 1122 pf->stat_offsets_loaded = true; 1123 } 1124 1125 /** 1126 * i40e_update_stats - Update the various statistics counters. 1127 * @vsi: the VSI to be updated 1128 * 1129 * Update the various stats for this VSI and its related entities. 1130 **/ 1131 void i40e_update_stats(struct i40e_vsi *vsi) 1132 { 1133 struct i40e_pf *pf = vsi->back; 1134 1135 if (vsi == pf->vsi[pf->lan_vsi]) 1136 i40e_update_pf_stats(pf); 1137 1138 i40e_update_vsi_stats(vsi); 1139 #ifdef I40E_FCOE 1140 i40e_update_fcoe_stats(vsi); 1141 #endif 1142 } 1143 1144 /** 1145 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1146 * @vsi: the VSI to be searched 1147 * @macaddr: the MAC address 1148 * @vlan: the vlan 1149 * 1150 * Returns ptr to the filter object or NULL 1151 **/ 1152 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 1153 const u8 *macaddr, s16 vlan) 1154 { 1155 struct i40e_mac_filter *f; 1156 u64 key; 1157 1158 if (!vsi || !macaddr) 1159 return NULL; 1160 1161 key = i40e_addr_to_hkey(macaddr); 1162 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { 1163 if ((ether_addr_equal(macaddr, f->macaddr)) && 1164 (vlan == f->vlan)) 1165 return f; 1166 } 1167 return NULL; 1168 } 1169 1170 /** 1171 * i40e_find_mac - Find a mac addr in the macvlan filters list 1172 * @vsi: the VSI to be searched 1173 * @macaddr: the MAC address we are searching for 1174 * 1175 * Returns the first filter with the provided MAC address or NULL if 1176 * MAC address was not found 1177 **/ 1178 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr) 1179 { 1180 struct i40e_mac_filter *f; 1181 u64 key; 1182 1183 if (!vsi || !macaddr) 1184 return NULL; 1185 1186 key = i40e_addr_to_hkey(macaddr); 1187 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { 1188 if ((ether_addr_equal(macaddr, f->macaddr))) 1189 return f; 1190 } 1191 return NULL; 1192 } 1193 1194 /** 1195 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1196 * @vsi: the VSI to be searched 1197 * 1198 * Returns true if VSI is in vlan mode or false otherwise 1199 **/ 1200 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1201 { 1202 /* If we have a PVID, always operate in VLAN mode */ 1203 if (vsi->info.pvid) 1204 return true; 1205 1206 /* We need to operate in VLAN mode whenever we have any filters with 1207 * a VLAN other than I40E_VLAN_ALL. We could check the table each 1208 * time, incurring search cost repeatedly. However, we can notice two 1209 * things: 1210 * 1211 * 1) the only place where we can gain a VLAN filter is in 1212 * i40e_add_filter. 1213 * 1214 * 2) the only place where filters are actually removed is in 1215 * i40e_sync_filters_subtask. 1216 * 1217 * Thus, we can simply use a boolean value, has_vlan_filters which we 1218 * will set to true when we add a VLAN filter in i40e_add_filter. Then 1219 * we have to perform the full search after deleting filters in 1220 * i40e_sync_filters_subtask, but we already have to search 1221 * filters here and can perform the check at the same time. This 1222 * results in avoiding embedding a loop for VLAN mode inside another 1223 * loop over all the filters, and should maintain correctness as noted 1224 * above. 1225 */ 1226 return vsi->has_vlan_filter; 1227 } 1228 1229 /** 1230 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary 1231 * @vsi: the VSI to configure 1232 * @tmp_add_list: list of filters ready to be added 1233 * @tmp_del_list: list of filters ready to be deleted 1234 * @vlan_filters: the number of active VLAN filters 1235 * 1236 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they 1237 * behave as expected. If we have any active VLAN filters remaining or about 1238 * to be added then we need to update non-VLAN filters to be marked as VLAN=0 1239 * so that they only match against untagged traffic. If we no longer have any 1240 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1 1241 * so that they match against both tagged and untagged traffic. In this way, 1242 * we ensure that we correctly receive the desired traffic. This ensures that 1243 * when we have an active VLAN we will receive only untagged traffic and 1244 * traffic matching active VLANs. If we have no active VLANs then we will 1245 * operate in non-VLAN mode and receive all traffic, tagged or untagged. 1246 * 1247 * Finally, in a similar fashion, this function also corrects filters when 1248 * there is an active PVID assigned to this VSI. 1249 * 1250 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0. 1251 * 1252 * This function is only expected to be called from within 1253 * i40e_sync_vsi_filters. 1254 * 1255 * NOTE: This function expects to be called while under the 1256 * mac_filter_hash_lock 1257 */ 1258 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, 1259 struct hlist_head *tmp_add_list, 1260 struct hlist_head *tmp_del_list, 1261 int vlan_filters) 1262 { 1263 struct i40e_mac_filter *f, *add_head; 1264 struct hlist_node *h; 1265 int bkt, new_vlan; 1266 1267 /* To determine if a particular filter needs to be replaced we 1268 * have the three following conditions: 1269 * 1270 * a) if we have a PVID assigned, then all filters which are 1271 * not marked as VLAN=PVID must be replaced with filters that 1272 * are. 1273 * b) otherwise, if we have any active VLANS, all filters 1274 * which are marked as VLAN=-1 must be replaced with 1275 * filters marked as VLAN=0 1276 * c) finally, if we do not have any active VLANS, all filters 1277 * which are marked as VLAN=0 must be replaced with filters 1278 * marked as VLAN=-1 1279 */ 1280 1281 /* Update the filters about to be added in place */ 1282 hlist_for_each_entry(f, tmp_add_list, hlist) { 1283 if (vsi->info.pvid && f->vlan != vsi->info.pvid) 1284 f->vlan = vsi->info.pvid; 1285 else if (vlan_filters && f->vlan == I40E_VLAN_ANY) 1286 f->vlan = 0; 1287 else if (!vlan_filters && f->vlan == 0) 1288 f->vlan = I40E_VLAN_ANY; 1289 } 1290 1291 /* Update the remaining active filters */ 1292 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 1293 /* Combine the checks for whether a filter needs to be changed 1294 * and then determine the new VLAN inside the if block, in 1295 * order to avoid duplicating code for adding the new filter 1296 * then deleting the old filter. 1297 */ 1298 if ((vsi->info.pvid && f->vlan != vsi->info.pvid) || 1299 (vlan_filters && f->vlan == I40E_VLAN_ANY) || 1300 (!vlan_filters && f->vlan == 0)) { 1301 /* Determine the new vlan we will be adding */ 1302 if (vsi->info.pvid) 1303 new_vlan = vsi->info.pvid; 1304 else if (vlan_filters) 1305 new_vlan = 0; 1306 else 1307 new_vlan = I40E_VLAN_ANY; 1308 1309 /* Create the new filter */ 1310 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); 1311 if (!add_head) 1312 return -ENOMEM; 1313 1314 /* Put the replacement filter into the add list */ 1315 hash_del(&add_head->hlist); 1316 hlist_add_head(&add_head->hlist, tmp_add_list); 1317 1318 /* Put the original filter into the delete list */ 1319 f->state = I40E_FILTER_REMOVE; 1320 hash_del(&f->hlist); 1321 hlist_add_head(&f->hlist, tmp_del_list); 1322 } 1323 } 1324 1325 vsi->has_vlan_filter = !!vlan_filters; 1326 1327 return 0; 1328 } 1329 1330 /** 1331 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM 1332 * @vsi: the PF Main VSI - inappropriate for any other VSI 1333 * @macaddr: the MAC address 1334 * 1335 * Remove whatever filter the firmware set up so the driver can manage 1336 * its own filtering intelligently. 1337 **/ 1338 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) 1339 { 1340 struct i40e_aqc_remove_macvlan_element_data element; 1341 struct i40e_pf *pf = vsi->back; 1342 1343 /* Only appropriate for the PF main VSI */ 1344 if (vsi->type != I40E_VSI_MAIN) 1345 return; 1346 1347 memset(&element, 0, sizeof(element)); 1348 ether_addr_copy(element.mac_addr, macaddr); 1349 element.vlan_tag = 0; 1350 /* Ignore error returns, some firmware does it this way... */ 1351 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1352 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1353 1354 memset(&element, 0, sizeof(element)); 1355 ether_addr_copy(element.mac_addr, macaddr); 1356 element.vlan_tag = 0; 1357 /* ...and some firmware does it this way. */ 1358 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1359 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1360 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1361 } 1362 1363 /** 1364 * i40e_add_filter - Add a mac/vlan filter to the VSI 1365 * @vsi: the VSI to be searched 1366 * @macaddr: the MAC address 1367 * @vlan: the vlan 1368 * 1369 * Returns ptr to the filter object or NULL when no memory available. 1370 * 1371 * NOTE: This function is expected to be called with mac_filter_hash_lock 1372 * being held. 1373 **/ 1374 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1375 const u8 *macaddr, s16 vlan) 1376 { 1377 struct i40e_mac_filter *f; 1378 u64 key; 1379 1380 if (!vsi || !macaddr) 1381 return NULL; 1382 1383 f = i40e_find_filter(vsi, macaddr, vlan); 1384 if (!f) { 1385 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1386 if (!f) 1387 return NULL; 1388 1389 /* Update the boolean indicating if we need to function in 1390 * VLAN mode. 1391 */ 1392 if (vlan >= 0) 1393 vsi->has_vlan_filter = true; 1394 1395 ether_addr_copy(f->macaddr, macaddr); 1396 f->vlan = vlan; 1397 /* If we're in overflow promisc mode, set the state directly 1398 * to failed, so we don't bother to try sending the filter 1399 * to the hardware. 1400 */ 1401 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) 1402 f->state = I40E_FILTER_FAILED; 1403 else 1404 f->state = I40E_FILTER_NEW; 1405 INIT_HLIST_NODE(&f->hlist); 1406 1407 key = i40e_addr_to_hkey(macaddr); 1408 hash_add(vsi->mac_filter_hash, &f->hlist, key); 1409 1410 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1411 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1412 } 1413 1414 /* If we're asked to add a filter that has been marked for removal, it 1415 * is safe to simply restore it to active state. __i40e_del_filter 1416 * will have simply deleted any filters which were previously marked 1417 * NEW or FAILED, so if it is currently marked REMOVE it must have 1418 * previously been ACTIVE. Since we haven't yet run the sync filters 1419 * task, just restore this filter to the ACTIVE state so that the 1420 * sync task leaves it in place 1421 */ 1422 if (f->state == I40E_FILTER_REMOVE) 1423 f->state = I40E_FILTER_ACTIVE; 1424 1425 return f; 1426 } 1427 1428 /** 1429 * __i40e_del_filter - Remove a specific filter from the VSI 1430 * @vsi: VSI to remove from 1431 * @f: the filter to remove from the list 1432 * 1433 * This function should be called instead of i40e_del_filter only if you know 1434 * the exact filter you will remove already, such as via i40e_find_filter or 1435 * i40e_find_mac. 1436 * 1437 * NOTE: This function is expected to be called with mac_filter_hash_lock 1438 * being held. 1439 * ANOTHER NOTE: This function MUST be called from within the context of 1440 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() 1441 * instead of list_for_each_entry(). 1442 **/ 1443 static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) 1444 { 1445 if (!f) 1446 return; 1447 1448 if ((f->state == I40E_FILTER_FAILED) || 1449 (f->state == I40E_FILTER_NEW)) { 1450 /* this one never got added by the FW. Just remove it, 1451 * no need to sync anything. 1452 */ 1453 hash_del(&f->hlist); 1454 kfree(f); 1455 } else { 1456 f->state = I40E_FILTER_REMOVE; 1457 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1458 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1459 } 1460 } 1461 1462 /** 1463 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI 1464 * @vsi: the VSI to be searched 1465 * @macaddr: the MAC address 1466 * @vlan: the VLAN 1467 * 1468 * NOTE: This function is expected to be called with mac_filter_hash_lock 1469 * being held. 1470 * ANOTHER NOTE: This function MUST be called from within the context of 1471 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() 1472 * instead of list_for_each_entry(). 1473 **/ 1474 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan) 1475 { 1476 struct i40e_mac_filter *f; 1477 1478 if (!vsi || !macaddr) 1479 return; 1480 1481 f = i40e_find_filter(vsi, macaddr, vlan); 1482 __i40e_del_filter(vsi, f); 1483 } 1484 1485 /** 1486 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans 1487 * @vsi: the VSI to be searched 1488 * @macaddr: the mac address to be filtered 1489 * 1490 * Goes through all the macvlan filters and adds a macvlan filter for each 1491 * unique vlan that already exists. If a PVID has been assigned, instead only 1492 * add the macaddr to that VLAN. 1493 * 1494 * Returns last filter added on success, else NULL 1495 **/ 1496 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, 1497 const u8 *macaddr) 1498 { 1499 struct i40e_mac_filter *f, *add = NULL; 1500 struct hlist_node *h; 1501 int bkt; 1502 1503 if (vsi->info.pvid) 1504 return i40e_add_filter(vsi, macaddr, 1505 le16_to_cpu(vsi->info.pvid)); 1506 1507 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 1508 if (f->state == I40E_FILTER_REMOVE) 1509 continue; 1510 add = i40e_add_filter(vsi, macaddr, f->vlan); 1511 if (!add) 1512 return NULL; 1513 } 1514 1515 return add; 1516 } 1517 1518 /** 1519 * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS 1520 * @vsi: the VSI to be searched 1521 * @macaddr: the mac address to be removed 1522 * 1523 * Removes a given MAC address from a VSI, regardless of VLAN 1524 * 1525 * Returns 0 for success, or error 1526 **/ 1527 int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr) 1528 { 1529 struct i40e_mac_filter *f; 1530 struct hlist_node *h; 1531 bool found = false; 1532 int bkt; 1533 1534 WARN(!spin_is_locked(&vsi->mac_filter_hash_lock), 1535 "Missing mac_filter_hash_lock\n"); 1536 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 1537 if (ether_addr_equal(macaddr, f->macaddr)) { 1538 __i40e_del_filter(vsi, f); 1539 found = true; 1540 } 1541 } 1542 1543 if (found) 1544 return 0; 1545 else 1546 return -ENOENT; 1547 } 1548 1549 /** 1550 * i40e_set_mac - NDO callback to set mac address 1551 * @netdev: network interface device structure 1552 * @p: pointer to an address structure 1553 * 1554 * Returns 0 on success, negative on failure 1555 **/ 1556 #ifdef I40E_FCOE 1557 int i40e_set_mac(struct net_device *netdev, void *p) 1558 #else 1559 static int i40e_set_mac(struct net_device *netdev, void *p) 1560 #endif 1561 { 1562 struct i40e_netdev_priv *np = netdev_priv(netdev); 1563 struct i40e_vsi *vsi = np->vsi; 1564 struct i40e_pf *pf = vsi->back; 1565 struct i40e_hw *hw = &pf->hw; 1566 struct sockaddr *addr = p; 1567 1568 if (!is_valid_ether_addr(addr->sa_data)) 1569 return -EADDRNOTAVAIL; 1570 1571 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { 1572 netdev_info(netdev, "already using mac address %pM\n", 1573 addr->sa_data); 1574 return 0; 1575 } 1576 1577 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1578 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1579 return -EADDRNOTAVAIL; 1580 1581 if (ether_addr_equal(hw->mac.addr, addr->sa_data)) 1582 netdev_info(netdev, "returning to hw mac address %pM\n", 1583 hw->mac.addr); 1584 else 1585 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1586 1587 spin_lock_bh(&vsi->mac_filter_hash_lock); 1588 i40e_del_mac_all_vlan(vsi, netdev->dev_addr); 1589 i40e_put_mac_in_vlan(vsi, addr->sa_data); 1590 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1591 ether_addr_copy(netdev->dev_addr, addr->sa_data); 1592 if (vsi->type == I40E_VSI_MAIN) { 1593 i40e_status ret; 1594 1595 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1596 I40E_AQC_WRITE_TYPE_LAA_WOL, 1597 addr->sa_data, NULL); 1598 if (ret) 1599 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", 1600 i40e_stat_str(hw, ret), 1601 i40e_aq_str(hw, hw->aq.asq_last_status)); 1602 } 1603 1604 /* schedule our worker thread which will take care of 1605 * applying the new filter changes 1606 */ 1607 i40e_service_event_schedule(vsi->back); 1608 return 0; 1609 } 1610 1611 /** 1612 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1613 * @vsi: the VSI being setup 1614 * @ctxt: VSI context structure 1615 * @enabled_tc: Enabled TCs bitmap 1616 * @is_add: True if called before Add VSI 1617 * 1618 * Setup VSI queue mapping for enabled traffic classes. 1619 **/ 1620 #ifdef I40E_FCOE 1621 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1622 struct i40e_vsi_context *ctxt, 1623 u8 enabled_tc, 1624 bool is_add) 1625 #else 1626 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1627 struct i40e_vsi_context *ctxt, 1628 u8 enabled_tc, 1629 bool is_add) 1630 #endif 1631 { 1632 struct i40e_pf *pf = vsi->back; 1633 u16 sections = 0; 1634 u8 netdev_tc = 0; 1635 u16 numtc = 0; 1636 u16 qcount; 1637 u8 offset; 1638 u16 qmap; 1639 int i; 1640 u16 num_tc_qps = 0; 1641 1642 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1643 offset = 0; 1644 1645 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1646 /* Find numtc from enabled TC bitmap */ 1647 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1648 if (enabled_tc & BIT(i)) /* TC is enabled */ 1649 numtc++; 1650 } 1651 if (!numtc) { 1652 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1653 numtc = 1; 1654 } 1655 } else { 1656 /* At least TC0 is enabled in case of non-DCB case */ 1657 numtc = 1; 1658 } 1659 1660 vsi->tc_config.numtc = numtc; 1661 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1662 /* Number of queues per enabled TC */ 1663 qcount = vsi->alloc_queue_pairs; 1664 1665 num_tc_qps = qcount / numtc; 1666 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); 1667 1668 /* Setup queue offset/count for all TCs for given VSI */ 1669 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1670 /* See if the given TC is enabled for the given VSI */ 1671 if (vsi->tc_config.enabled_tc & BIT(i)) { 1672 /* TC is enabled */ 1673 int pow, num_qps; 1674 1675 switch (vsi->type) { 1676 case I40E_VSI_MAIN: 1677 qcount = min_t(int, pf->alloc_rss_size, 1678 num_tc_qps); 1679 break; 1680 #ifdef I40E_FCOE 1681 case I40E_VSI_FCOE: 1682 qcount = num_tc_qps; 1683 break; 1684 #endif 1685 case I40E_VSI_FDIR: 1686 case I40E_VSI_SRIOV: 1687 case I40E_VSI_VMDQ2: 1688 default: 1689 qcount = num_tc_qps; 1690 WARN_ON(i != 0); 1691 break; 1692 } 1693 vsi->tc_config.tc_info[i].qoffset = offset; 1694 vsi->tc_config.tc_info[i].qcount = qcount; 1695 1696 /* find the next higher power-of-2 of num queue pairs */ 1697 num_qps = qcount; 1698 pow = 0; 1699 while (num_qps && (BIT_ULL(pow) < qcount)) { 1700 pow++; 1701 num_qps >>= 1; 1702 } 1703 1704 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1705 qmap = 1706 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1707 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1708 1709 offset += qcount; 1710 } else { 1711 /* TC is not enabled so set the offset to 1712 * default queue and allocate one queue 1713 * for the given TC. 1714 */ 1715 vsi->tc_config.tc_info[i].qoffset = 0; 1716 vsi->tc_config.tc_info[i].qcount = 1; 1717 vsi->tc_config.tc_info[i].netdev_tc = 0; 1718 1719 qmap = 0; 1720 } 1721 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1722 } 1723 1724 /* Set actual Tx/Rx queue pairs */ 1725 vsi->num_queue_pairs = offset; 1726 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { 1727 if (vsi->req_queue_pairs > 0) 1728 vsi->num_queue_pairs = vsi->req_queue_pairs; 1729 else if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1730 vsi->num_queue_pairs = pf->num_lan_msix; 1731 } 1732 1733 /* Scheduler section valid can only be set for ADD VSI */ 1734 if (is_add) { 1735 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1736 1737 ctxt->info.up_enable_bits = enabled_tc; 1738 } 1739 if (vsi->type == I40E_VSI_SRIOV) { 1740 ctxt->info.mapping_flags |= 1741 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1742 for (i = 0; i < vsi->num_queue_pairs; i++) 1743 ctxt->info.queue_mapping[i] = 1744 cpu_to_le16(vsi->base_queue + i); 1745 } else { 1746 ctxt->info.mapping_flags |= 1747 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1748 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1749 } 1750 ctxt->info.valid_sections |= cpu_to_le16(sections); 1751 } 1752 1753 /** 1754 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address 1755 * @netdev: the netdevice 1756 * @addr: address to add 1757 * 1758 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 1759 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 1760 */ 1761 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr) 1762 { 1763 struct i40e_netdev_priv *np = netdev_priv(netdev); 1764 struct i40e_vsi *vsi = np->vsi; 1765 struct i40e_mac_filter *f; 1766 1767 if (i40e_is_vsi_in_vlan(vsi)) 1768 f = i40e_put_mac_in_vlan(vsi, addr); 1769 else 1770 f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY); 1771 1772 if (f) 1773 return 0; 1774 else 1775 return -ENOMEM; 1776 } 1777 1778 /** 1779 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 1780 * @netdev: the netdevice 1781 * @addr: address to add 1782 * 1783 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 1784 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 1785 */ 1786 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) 1787 { 1788 struct i40e_netdev_priv *np = netdev_priv(netdev); 1789 struct i40e_vsi *vsi = np->vsi; 1790 1791 if (i40e_is_vsi_in_vlan(vsi)) 1792 i40e_del_mac_all_vlan(vsi, addr); 1793 else 1794 i40e_del_filter(vsi, addr, I40E_VLAN_ANY); 1795 1796 return 0; 1797 } 1798 1799 /** 1800 * i40e_set_rx_mode - NDO callback to set the netdev filters 1801 * @netdev: network interface device structure 1802 **/ 1803 #ifdef I40E_FCOE 1804 void i40e_set_rx_mode(struct net_device *netdev) 1805 #else 1806 static void i40e_set_rx_mode(struct net_device *netdev) 1807 #endif 1808 { 1809 struct i40e_netdev_priv *np = netdev_priv(netdev); 1810 struct i40e_vsi *vsi = np->vsi; 1811 1812 spin_lock_bh(&vsi->mac_filter_hash_lock); 1813 1814 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync); 1815 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync); 1816 1817 spin_unlock_bh(&vsi->mac_filter_hash_lock); 1818 1819 /* check for other flag changes */ 1820 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1821 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1822 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1823 } 1824 1825 /* schedule our worker thread which will take care of 1826 * applying the new filter changes 1827 */ 1828 i40e_service_event_schedule(vsi->back); 1829 } 1830 1831 /** 1832 * i40e_undo_filter_entries - Undo the changes made to MAC filter entries 1833 * @vsi: Pointer to VSI struct 1834 * @from: Pointer to list which contains MAC filter entries - changes to 1835 * those entries needs to be undone. 1836 * 1837 * MAC filter entries from list were slated to be sent to firmware, either for 1838 * addition or deletion. 1839 **/ 1840 static void i40e_undo_filter_entries(struct i40e_vsi *vsi, 1841 struct hlist_head *from) 1842 { 1843 struct i40e_mac_filter *f; 1844 struct hlist_node *h; 1845 1846 hlist_for_each_entry_safe(f, h, from, hlist) { 1847 u64 key = i40e_addr_to_hkey(f->macaddr); 1848 1849 /* Move the element back into MAC filter list*/ 1850 hlist_del(&f->hlist); 1851 hash_add(vsi->mac_filter_hash, &f->hlist, key); 1852 } 1853 } 1854 1855 /** 1856 * i40e_update_filter_state - Update filter state based on return data 1857 * from firmware 1858 * @count: Number of filters added 1859 * @add_list: return data from fw 1860 * @head: pointer to first filter in current batch 1861 * 1862 * MAC filter entries from list were slated to be added to device. Returns 1863 * number of successful filters. Note that 0 does NOT mean success! 1864 **/ 1865 static int 1866 i40e_update_filter_state(int count, 1867 struct i40e_aqc_add_macvlan_element_data *add_list, 1868 struct i40e_mac_filter *add_head) 1869 { 1870 int retval = 0; 1871 int i; 1872 1873 for (i = 0; i < count; i++) { 1874 /* Always check status of each filter. We don't need to check 1875 * the firmware return status because we pre-set the filter 1876 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter 1877 * request to the adminq. Thus, if it no longer matches then 1878 * we know the filter is active. 1879 */ 1880 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) { 1881 add_head->state = I40E_FILTER_FAILED; 1882 } else { 1883 add_head->state = I40E_FILTER_ACTIVE; 1884 retval++; 1885 } 1886 1887 add_head = hlist_entry(add_head->hlist.next, 1888 typeof(struct i40e_mac_filter), 1889 hlist); 1890 } 1891 1892 return retval; 1893 } 1894 1895 /** 1896 * i40e_aqc_del_filters - Request firmware to delete a set of filters 1897 * @vsi: ptr to the VSI 1898 * @vsi_name: name to display in messages 1899 * @list: the list of filters to send to firmware 1900 * @num_del: the number of filters to delete 1901 * @retval: Set to -EIO on failure to delete 1902 * 1903 * Send a request to firmware via AdminQ to delete a set of filters. Uses 1904 * *retval instead of a return value so that success does not force ret_val to 1905 * be set to 0. This ensures that a sequence of calls to this function 1906 * preserve the previous value of *retval on successful delete. 1907 */ 1908 static 1909 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name, 1910 struct i40e_aqc_remove_macvlan_element_data *list, 1911 int num_del, int *retval) 1912 { 1913 struct i40e_hw *hw = &vsi->back->hw; 1914 i40e_status aq_ret; 1915 int aq_err; 1916 1917 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL); 1918 aq_err = hw->aq.asq_last_status; 1919 1920 /* Explicitly ignore and do not report when firmware returns ENOENT */ 1921 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) { 1922 *retval = -EIO; 1923 dev_info(&vsi->back->pdev->dev, 1924 "ignoring delete macvlan error on %s, err %s, aq_err %s\n", 1925 vsi_name, i40e_stat_str(hw, aq_ret), 1926 i40e_aq_str(hw, aq_err)); 1927 } 1928 } 1929 1930 /** 1931 * i40e_aqc_add_filters - Request firmware to add a set of filters 1932 * @vsi: ptr to the VSI 1933 * @vsi_name: name to display in messages 1934 * @list: the list of filters to send to firmware 1935 * @add_head: Position in the add hlist 1936 * @num_add: the number of filters to add 1937 * @promisc_change: set to true on exit if promiscuous mode was forced on 1938 * 1939 * Send a request to firmware via AdminQ to add a chunk of filters. Will set 1940 * promisc_changed to true if the firmware has run out of space for more 1941 * filters. 1942 */ 1943 static 1944 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, 1945 struct i40e_aqc_add_macvlan_element_data *list, 1946 struct i40e_mac_filter *add_head, 1947 int num_add, bool *promisc_changed) 1948 { 1949 struct i40e_hw *hw = &vsi->back->hw; 1950 int aq_err, fcnt; 1951 1952 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL); 1953 aq_err = hw->aq.asq_last_status; 1954 fcnt = i40e_update_filter_state(num_add, list, add_head); 1955 1956 if (fcnt != num_add) { 1957 *promisc_changed = true; 1958 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); 1959 dev_warn(&vsi->back->pdev->dev, 1960 "Error %s adding RX filters on %s, promiscuous mode forced on\n", 1961 i40e_aq_str(hw, aq_err), 1962 vsi_name); 1963 } 1964 } 1965 1966 /** 1967 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags 1968 * @vsi: pointer to the VSI 1969 * @f: filter data 1970 * 1971 * This function sets or clears the promiscuous broadcast flags for VLAN 1972 * filters in order to properly receive broadcast frames. Assumes that only 1973 * broadcast filters are passed. 1974 **/ 1975 static 1976 void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, 1977 struct i40e_mac_filter *f) 1978 { 1979 bool enable = f->state == I40E_FILTER_NEW; 1980 struct i40e_hw *hw = &vsi->back->hw; 1981 i40e_status aq_ret; 1982 1983 if (f->vlan == I40E_VLAN_ANY) { 1984 aq_ret = i40e_aq_set_vsi_broadcast(hw, 1985 vsi->seid, 1986 enable, 1987 NULL); 1988 } else { 1989 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw, 1990 vsi->seid, 1991 enable, 1992 f->vlan, 1993 NULL); 1994 } 1995 1996 if (aq_ret) { 1997 dev_warn(&vsi->back->pdev->dev, 1998 "Error %s setting broadcast promiscuous mode on %s\n", 1999 i40e_aq_str(hw, hw->aq.asq_last_status), 2000 vsi_name); 2001 f->state = I40E_FILTER_FAILED; 2002 } else if (enable) { 2003 f->state = I40E_FILTER_ACTIVE; 2004 } 2005 } 2006 2007 /** 2008 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 2009 * @vsi: ptr to the VSI 2010 * 2011 * Push any outstanding VSI filter changes through the AdminQ. 2012 * 2013 * Returns 0 or error value 2014 **/ 2015 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) 2016 { 2017 struct hlist_head tmp_add_list, tmp_del_list; 2018 struct i40e_mac_filter *f, *add_head = NULL; 2019 struct i40e_hw *hw = &vsi->back->hw; 2020 unsigned int failed_filters = 0; 2021 unsigned int vlan_filters = 0; 2022 bool promisc_changed = false; 2023 char vsi_name[16] = "PF"; 2024 int filter_list_len = 0; 2025 i40e_status aq_ret = 0; 2026 u32 changed_flags = 0; 2027 struct hlist_node *h; 2028 struct i40e_pf *pf; 2029 int num_add = 0; 2030 int num_del = 0; 2031 int retval = 0; 2032 u16 cmd_flags; 2033 int list_size; 2034 int bkt; 2035 2036 /* empty array typed pointers, kcalloc later */ 2037 struct i40e_aqc_add_macvlan_element_data *add_list; 2038 struct i40e_aqc_remove_macvlan_element_data *del_list; 2039 2040 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) 2041 usleep_range(1000, 2000); 2042 pf = vsi->back; 2043 2044 if (vsi->netdev) { 2045 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 2046 vsi->current_netdev_flags = vsi->netdev->flags; 2047 } 2048 2049 INIT_HLIST_HEAD(&tmp_add_list); 2050 INIT_HLIST_HEAD(&tmp_del_list); 2051 2052 if (vsi->type == I40E_VSI_SRIOV) 2053 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); 2054 else if (vsi->type != I40E_VSI_MAIN) 2055 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); 2056 2057 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 2058 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 2059 2060 spin_lock_bh(&vsi->mac_filter_hash_lock); 2061 /* Create a list of filters to delete. */ 2062 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 2063 if (f->state == I40E_FILTER_REMOVE) { 2064 /* Move the element into temporary del_list */ 2065 hash_del(&f->hlist); 2066 hlist_add_head(&f->hlist, &tmp_del_list); 2067 2068 /* Avoid counting removed filters */ 2069 continue; 2070 } 2071 if (f->state == I40E_FILTER_NEW) { 2072 hash_del(&f->hlist); 2073 hlist_add_head(&f->hlist, &tmp_add_list); 2074 } 2075 2076 /* Count the number of active (current and new) VLAN 2077 * filters we have now. Does not count filters which 2078 * are marked for deletion. 2079 */ 2080 if (f->vlan > 0) 2081 vlan_filters++; 2082 } 2083 2084 retval = i40e_correct_mac_vlan_filters(vsi, 2085 &tmp_add_list, 2086 &tmp_del_list, 2087 vlan_filters); 2088 if (retval) 2089 goto err_no_memory_locked; 2090 2091 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2092 } 2093 2094 /* Now process 'del_list' outside the lock */ 2095 if (!hlist_empty(&tmp_del_list)) { 2096 filter_list_len = hw->aq.asq_buf_size / 2097 sizeof(struct i40e_aqc_remove_macvlan_element_data); 2098 list_size = filter_list_len * 2099 sizeof(struct i40e_aqc_remove_macvlan_element_data); 2100 del_list = kzalloc(list_size, GFP_ATOMIC); 2101 if (!del_list) 2102 goto err_no_memory; 2103 2104 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) { 2105 cmd_flags = 0; 2106 2107 /* handle broadcast filters by updating the broadcast 2108 * promiscuous flag instead of deleting a MAC filter. 2109 */ 2110 if (is_broadcast_ether_addr(f->macaddr)) { 2111 i40e_aqc_broadcast_filter(vsi, vsi_name, f); 2112 2113 hlist_del(&f->hlist); 2114 kfree(f); 2115 continue; 2116 } 2117 2118 /* add to delete list */ 2119 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); 2120 if (f->vlan == I40E_VLAN_ANY) { 2121 del_list[num_del].vlan_tag = 0; 2122 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 2123 } else { 2124 del_list[num_del].vlan_tag = 2125 cpu_to_le16((u16)(f->vlan)); 2126 } 2127 2128 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 2129 del_list[num_del].flags = cmd_flags; 2130 num_del++; 2131 2132 /* flush a full buffer */ 2133 if (num_del == filter_list_len) { 2134 i40e_aqc_del_filters(vsi, vsi_name, del_list, 2135 num_del, &retval); 2136 memset(del_list, 0, list_size); 2137 num_del = 0; 2138 } 2139 /* Release memory for MAC filter entries which were 2140 * synced up with HW. 2141 */ 2142 hlist_del(&f->hlist); 2143 kfree(f); 2144 } 2145 2146 if (num_del) { 2147 i40e_aqc_del_filters(vsi, vsi_name, del_list, 2148 num_del, &retval); 2149 } 2150 2151 kfree(del_list); 2152 del_list = NULL; 2153 } 2154 2155 if (!hlist_empty(&tmp_add_list)) { 2156 /* Do all the adds now. */ 2157 filter_list_len = hw->aq.asq_buf_size / 2158 sizeof(struct i40e_aqc_add_macvlan_element_data); 2159 list_size = filter_list_len * 2160 sizeof(struct i40e_aqc_add_macvlan_element_data); 2161 add_list = kzalloc(list_size, GFP_ATOMIC); 2162 if (!add_list) 2163 goto err_no_memory; 2164 2165 num_add = 0; 2166 hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) { 2167 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2168 &vsi->state)) { 2169 f->state = I40E_FILTER_FAILED; 2170 continue; 2171 } 2172 2173 /* handle broadcast filters by updating the broadcast 2174 * promiscuous flag instead of adding a MAC filter. 2175 */ 2176 if (is_broadcast_ether_addr(f->macaddr)) { 2177 u64 key = i40e_addr_to_hkey(f->macaddr); 2178 i40e_aqc_broadcast_filter(vsi, vsi_name, f); 2179 2180 hlist_del(&f->hlist); 2181 hash_add(vsi->mac_filter_hash, &f->hlist, key); 2182 continue; 2183 } 2184 2185 /* add to add array */ 2186 if (num_add == 0) 2187 add_head = f; 2188 cmd_flags = 0; 2189 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); 2190 if (f->vlan == I40E_VLAN_ANY) { 2191 add_list[num_add].vlan_tag = 0; 2192 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; 2193 } else { 2194 add_list[num_add].vlan_tag = 2195 cpu_to_le16((u16)(f->vlan)); 2196 } 2197 add_list[num_add].queue_number = 0; 2198 /* set invalid match method for later detection */ 2199 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES; 2200 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 2201 add_list[num_add].flags = cpu_to_le16(cmd_flags); 2202 num_add++; 2203 2204 /* flush a full buffer */ 2205 if (num_add == filter_list_len) { 2206 i40e_aqc_add_filters(vsi, vsi_name, add_list, 2207 add_head, num_add, 2208 &promisc_changed); 2209 memset(add_list, 0, list_size); 2210 num_add = 0; 2211 } 2212 } 2213 if (num_add) { 2214 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head, 2215 num_add, &promisc_changed); 2216 } 2217 /* Now move all of the filters from the temp add list back to 2218 * the VSI's list. 2219 */ 2220 spin_lock_bh(&vsi->mac_filter_hash_lock); 2221 hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) { 2222 u64 key = i40e_addr_to_hkey(f->macaddr); 2223 2224 hlist_del(&f->hlist); 2225 hash_add(vsi->mac_filter_hash, &f->hlist, key); 2226 } 2227 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2228 kfree(add_list); 2229 add_list = NULL; 2230 } 2231 2232 /* Determine the number of active and failed filters. */ 2233 spin_lock_bh(&vsi->mac_filter_hash_lock); 2234 vsi->active_filters = 0; 2235 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { 2236 if (f->state == I40E_FILTER_ACTIVE) 2237 vsi->active_filters++; 2238 else if (f->state == I40E_FILTER_FAILED) 2239 failed_filters++; 2240 } 2241 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2242 2243 /* If promiscuous mode has changed, we need to calculate a new 2244 * threshold for when we are safe to exit 2245 */ 2246 if (promisc_changed) 2247 vsi->promisc_threshold = (vsi->active_filters * 3) / 4; 2248 2249 /* Check if we are able to exit overflow promiscuous mode. We can 2250 * safely exit if we didn't just enter, we no longer have any failed 2251 * filters, and we have reduced filters below the threshold value. 2252 */ 2253 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) && 2254 !promisc_changed && !failed_filters && 2255 (vsi->active_filters < vsi->promisc_threshold)) { 2256 dev_info(&pf->pdev->dev, 2257 "filter logjam cleared on %s, leaving overflow promiscuous mode\n", 2258 vsi_name); 2259 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); 2260 promisc_changed = true; 2261 vsi->promisc_threshold = 0; 2262 } 2263 2264 /* if the VF is not trusted do not do promisc */ 2265 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { 2266 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); 2267 goto out; 2268 } 2269 2270 /* check for changes in promiscuous modes */ 2271 if (changed_flags & IFF_ALLMULTI) { 2272 bool cur_multipromisc; 2273 2274 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 2275 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 2276 vsi->seid, 2277 cur_multipromisc, 2278 NULL); 2279 if (aq_ret) { 2280 retval = i40e_aq_rc_to_posix(aq_ret, 2281 hw->aq.asq_last_status); 2282 dev_info(&pf->pdev->dev, 2283 "set multi promisc failed on %s, err %s aq_err %s\n", 2284 vsi_name, 2285 i40e_stat_str(hw, aq_ret), 2286 i40e_aq_str(hw, hw->aq.asq_last_status)); 2287 } 2288 } 2289 if ((changed_flags & IFF_PROMISC) || 2290 (promisc_changed && 2291 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) { 2292 bool cur_promisc; 2293 2294 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 2295 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2296 &vsi->state)); 2297 if ((vsi->type == I40E_VSI_MAIN) && 2298 (pf->lan_veb != I40E_NO_VEB) && 2299 !(pf->flags & I40E_FLAG_MFP_ENABLED)) { 2300 /* set defport ON for Main VSI instead of true promisc 2301 * this way we will get all unicast/multicast and VLAN 2302 * promisc behavior but will not get VF or VMDq traffic 2303 * replicated on the Main VSI. 2304 */ 2305 if (pf->cur_promisc != cur_promisc) { 2306 pf->cur_promisc = cur_promisc; 2307 if (cur_promisc) 2308 aq_ret = 2309 i40e_aq_set_default_vsi(hw, 2310 vsi->seid, 2311 NULL); 2312 else 2313 aq_ret = 2314 i40e_aq_clear_default_vsi(hw, 2315 vsi->seid, 2316 NULL); 2317 if (aq_ret) { 2318 retval = i40e_aq_rc_to_posix(aq_ret, 2319 hw->aq.asq_last_status); 2320 dev_info(&pf->pdev->dev, 2321 "Set default VSI failed on %s, err %s, aq_err %s\n", 2322 vsi_name, 2323 i40e_stat_str(hw, aq_ret), 2324 i40e_aq_str(hw, 2325 hw->aq.asq_last_status)); 2326 } 2327 } 2328 } else { 2329 aq_ret = i40e_aq_set_vsi_unicast_promiscuous( 2330 hw, 2331 vsi->seid, 2332 cur_promisc, NULL, 2333 true); 2334 if (aq_ret) { 2335 retval = 2336 i40e_aq_rc_to_posix(aq_ret, 2337 hw->aq.asq_last_status); 2338 dev_info(&pf->pdev->dev, 2339 "set unicast promisc failed on %s, err %s, aq_err %s\n", 2340 vsi_name, 2341 i40e_stat_str(hw, aq_ret), 2342 i40e_aq_str(hw, 2343 hw->aq.asq_last_status)); 2344 } 2345 aq_ret = i40e_aq_set_vsi_multicast_promiscuous( 2346 hw, 2347 vsi->seid, 2348 cur_promisc, NULL); 2349 if (aq_ret) { 2350 retval = 2351 i40e_aq_rc_to_posix(aq_ret, 2352 hw->aq.asq_last_status); 2353 dev_info(&pf->pdev->dev, 2354 "set multicast promisc failed on %s, err %s, aq_err %s\n", 2355 vsi_name, 2356 i40e_stat_str(hw, aq_ret), 2357 i40e_aq_str(hw, 2358 hw->aq.asq_last_status)); 2359 } 2360 } 2361 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 2362 vsi->seid, 2363 cur_promisc, NULL); 2364 if (aq_ret) { 2365 retval = i40e_aq_rc_to_posix(aq_ret, 2366 pf->hw.aq.asq_last_status); 2367 dev_info(&pf->pdev->dev, 2368 "set brdcast promisc failed, err %s, aq_err %s\n", 2369 i40e_stat_str(hw, aq_ret), 2370 i40e_aq_str(hw, 2371 hw->aq.asq_last_status)); 2372 } 2373 } 2374 out: 2375 /* if something went wrong then set the changed flag so we try again */ 2376 if (retval) 2377 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 2378 2379 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 2380 return retval; 2381 2382 err_no_memory: 2383 /* Restore elements on the temporary add and delete lists */ 2384 spin_lock_bh(&vsi->mac_filter_hash_lock); 2385 err_no_memory_locked: 2386 i40e_undo_filter_entries(vsi, &tmp_del_list); 2387 i40e_undo_filter_entries(vsi, &tmp_add_list); 2388 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2389 2390 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 2391 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 2392 return -ENOMEM; 2393 } 2394 2395 /** 2396 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 2397 * @pf: board private structure 2398 **/ 2399 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 2400 { 2401 int v; 2402 2403 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 2404 return; 2405 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 2406 2407 for (v = 0; v < pf->num_alloc_vsi; v++) { 2408 if (pf->vsi[v] && 2409 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { 2410 int ret = i40e_sync_vsi_filters(pf->vsi[v]); 2411 2412 if (ret) { 2413 /* come back and try again later */ 2414 pf->flags |= I40E_FLAG_FILTER_SYNC; 2415 break; 2416 } 2417 } 2418 } 2419 } 2420 2421 /** 2422 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 2423 * @netdev: network interface device structure 2424 * @new_mtu: new value for maximum frame size 2425 * 2426 * Returns 0 on success, negative on failure 2427 **/ 2428 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 2429 { 2430 struct i40e_netdev_priv *np = netdev_priv(netdev); 2431 struct i40e_vsi *vsi = np->vsi; 2432 2433 netdev_info(netdev, "changing MTU from %d to %d\n", 2434 netdev->mtu, new_mtu); 2435 netdev->mtu = new_mtu; 2436 if (netif_running(netdev)) 2437 i40e_vsi_reinit_locked(vsi); 2438 i40e_notify_client_of_l2_param_changes(vsi); 2439 return 0; 2440 } 2441 2442 /** 2443 * i40e_ioctl - Access the hwtstamp interface 2444 * @netdev: network interface device structure 2445 * @ifr: interface request data 2446 * @cmd: ioctl command 2447 **/ 2448 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2449 { 2450 struct i40e_netdev_priv *np = netdev_priv(netdev); 2451 struct i40e_pf *pf = np->vsi->back; 2452 2453 switch (cmd) { 2454 case SIOCGHWTSTAMP: 2455 return i40e_ptp_get_ts_config(pf, ifr); 2456 case SIOCSHWTSTAMP: 2457 return i40e_ptp_set_ts_config(pf, ifr); 2458 default: 2459 return -EOPNOTSUPP; 2460 } 2461 } 2462 2463 /** 2464 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 2465 * @vsi: the vsi being adjusted 2466 **/ 2467 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 2468 { 2469 struct i40e_vsi_context ctxt; 2470 i40e_status ret; 2471 2472 if ((vsi->info.valid_sections & 2473 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2474 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 2475 return; /* already enabled */ 2476 2477 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2478 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2479 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 2480 2481 ctxt.seid = vsi->seid; 2482 ctxt.info = vsi->info; 2483 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2484 if (ret) { 2485 dev_info(&vsi->back->pdev->dev, 2486 "update vlan stripping failed, err %s aq_err %s\n", 2487 i40e_stat_str(&vsi->back->hw, ret), 2488 i40e_aq_str(&vsi->back->hw, 2489 vsi->back->hw.aq.asq_last_status)); 2490 } 2491 } 2492 2493 /** 2494 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 2495 * @vsi: the vsi being adjusted 2496 **/ 2497 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 2498 { 2499 struct i40e_vsi_context ctxt; 2500 i40e_status ret; 2501 2502 if ((vsi->info.valid_sections & 2503 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2504 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 2505 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 2506 return; /* already disabled */ 2507 2508 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2509 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2510 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 2511 2512 ctxt.seid = vsi->seid; 2513 ctxt.info = vsi->info; 2514 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2515 if (ret) { 2516 dev_info(&vsi->back->pdev->dev, 2517 "update vlan stripping failed, err %s aq_err %s\n", 2518 i40e_stat_str(&vsi->back->hw, ret), 2519 i40e_aq_str(&vsi->back->hw, 2520 vsi->back->hw.aq.asq_last_status)); 2521 } 2522 } 2523 2524 /** 2525 * i40e_vlan_rx_register - Setup or shutdown vlan offload 2526 * @netdev: network interface to be adjusted 2527 * @features: netdev features to test if VLAN offload is enabled or not 2528 **/ 2529 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 2530 { 2531 struct i40e_netdev_priv *np = netdev_priv(netdev); 2532 struct i40e_vsi *vsi = np->vsi; 2533 2534 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2535 i40e_vlan_stripping_enable(vsi); 2536 else 2537 i40e_vlan_stripping_disable(vsi); 2538 } 2539 2540 /** 2541 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address 2542 * @vsi: the vsi being configured 2543 * @vid: vlan id to be added (0 = untagged only , -1 = any) 2544 * 2545 * This is a helper function for adding a new MAC/VLAN filter with the 2546 * specified VLAN for each existing MAC address already in the hash table. 2547 * This function does *not* perform any accounting to update filters based on 2548 * VLAN mode. 2549 * 2550 * NOTE: this function expects to be called while under the 2551 * mac_filter_hash_lock 2552 **/ 2553 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) 2554 { 2555 struct i40e_mac_filter *f, *add_f; 2556 struct hlist_node *h; 2557 int bkt; 2558 2559 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 2560 if (f->state == I40E_FILTER_REMOVE) 2561 continue; 2562 add_f = i40e_add_filter(vsi, f->macaddr, vid); 2563 if (!add_f) { 2564 dev_info(&vsi->back->pdev->dev, 2565 "Could not add vlan filter %d for %pM\n", 2566 vid, f->macaddr); 2567 return -ENOMEM; 2568 } 2569 } 2570 2571 return 0; 2572 } 2573 2574 /** 2575 * i40e_vsi_add_vlan - Add VSI membership for given VLAN 2576 * @vsi: the VSI being configured 2577 * @vid: VLAN id to be added (0 = untagged only , -1 = any) 2578 **/ 2579 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) 2580 { 2581 int err; 2582 2583 /* Locked once because all functions invoked below iterates list*/ 2584 spin_lock_bh(&vsi->mac_filter_hash_lock); 2585 err = i40e_add_vlan_all_mac(vsi, vid); 2586 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2587 if (err) 2588 return err; 2589 2590 /* schedule our worker thread which will take care of 2591 * applying the new filter changes 2592 */ 2593 i40e_service_event_schedule(vsi->back); 2594 return 0; 2595 } 2596 2597 /** 2598 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN 2599 * @vsi: the vsi being configured 2600 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 2601 * 2602 * This function should be used to remove all VLAN filters which match the 2603 * given VID. It does not schedule the service event and does not take the 2604 * mac_filter_hash_lock so it may be combined with other operations under 2605 * a single invocation of the mac_filter_hash_lock. 2606 * 2607 * NOTE: this function expects to be called while under the 2608 * mac_filter_hash_lock 2609 */ 2610 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid) 2611 { 2612 struct i40e_mac_filter *f; 2613 struct hlist_node *h; 2614 int bkt; 2615 2616 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 2617 if (f->vlan == vid) 2618 __i40e_del_filter(vsi, f); 2619 } 2620 } 2621 2622 /** 2623 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN 2624 * @vsi: the VSI being configured 2625 * @vid: VLAN id to be removed (0 = untagged only , -1 = any) 2626 **/ 2627 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 2628 { 2629 spin_lock_bh(&vsi->mac_filter_hash_lock); 2630 i40e_rm_vlan_all_mac(vsi, vid); 2631 spin_unlock_bh(&vsi->mac_filter_hash_lock); 2632 2633 /* schedule our worker thread which will take care of 2634 * applying the new filter changes 2635 */ 2636 i40e_service_event_schedule(vsi->back); 2637 } 2638 2639 /** 2640 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 2641 * @netdev: network interface to be adjusted 2642 * @vid: vlan id to be added 2643 * 2644 * net_device_ops implementation for adding vlan ids 2645 **/ 2646 #ifdef I40E_FCOE 2647 int i40e_vlan_rx_add_vid(struct net_device *netdev, 2648 __always_unused __be16 proto, u16 vid) 2649 #else 2650 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 2651 __always_unused __be16 proto, u16 vid) 2652 #endif 2653 { 2654 struct i40e_netdev_priv *np = netdev_priv(netdev); 2655 struct i40e_vsi *vsi = np->vsi; 2656 int ret = 0; 2657 2658 if (vid >= VLAN_N_VID) 2659 return -EINVAL; 2660 2661 /* If the network stack called us with vid = 0 then 2662 * it is asking to receive priority tagged packets with 2663 * vlan id 0. Our HW receives them by default when configured 2664 * to receive untagged packets so there is no need to add an 2665 * extra filter for vlan 0 tagged packets. 2666 */ 2667 if (vid) 2668 ret = i40e_vsi_add_vlan(vsi, vid); 2669 2670 if (!ret) 2671 set_bit(vid, vsi->active_vlans); 2672 2673 return ret; 2674 } 2675 2676 /** 2677 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 2678 * @netdev: network interface to be adjusted 2679 * @vid: vlan id to be removed 2680 * 2681 * net_device_ops implementation for removing vlan ids 2682 **/ 2683 #ifdef I40E_FCOE 2684 int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2685 __always_unused __be16 proto, u16 vid) 2686 #else 2687 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2688 __always_unused __be16 proto, u16 vid) 2689 #endif 2690 { 2691 struct i40e_netdev_priv *np = netdev_priv(netdev); 2692 struct i40e_vsi *vsi = np->vsi; 2693 2694 /* return code is ignored as there is nothing a user 2695 * can do about failure to remove and a log message was 2696 * already printed from the other function 2697 */ 2698 i40e_vsi_kill_vlan(vsi, vid); 2699 2700 clear_bit(vid, vsi->active_vlans); 2701 2702 return 0; 2703 } 2704 2705 /** 2706 * i40e_macaddr_init - explicitly write the mac address filters 2707 * 2708 * @vsi: pointer to the vsi 2709 * @macaddr: the MAC address 2710 * 2711 * This is needed when the macaddr has been obtained by other 2712 * means than the default, e.g., from Open Firmware or IDPROM. 2713 * Returns 0 on success, negative on failure 2714 **/ 2715 static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) 2716 { 2717 int ret; 2718 struct i40e_aqc_add_macvlan_element_data element; 2719 2720 ret = i40e_aq_mac_address_write(&vsi->back->hw, 2721 I40E_AQC_WRITE_TYPE_LAA_WOL, 2722 macaddr, NULL); 2723 if (ret) { 2724 dev_info(&vsi->back->pdev->dev, 2725 "Addr change for VSI failed: %d\n", ret); 2726 return -EADDRNOTAVAIL; 2727 } 2728 2729 memset(&element, 0, sizeof(element)); 2730 ether_addr_copy(element.mac_addr, macaddr); 2731 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 2732 ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); 2733 if (ret) { 2734 dev_info(&vsi->back->pdev->dev, 2735 "add filter failed err %s aq_err %s\n", 2736 i40e_stat_str(&vsi->back->hw, ret), 2737 i40e_aq_str(&vsi->back->hw, 2738 vsi->back->hw.aq.asq_last_status)); 2739 } 2740 return ret; 2741 } 2742 2743 /** 2744 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 2745 * @vsi: the vsi being brought back up 2746 **/ 2747 static void i40e_restore_vlan(struct i40e_vsi *vsi) 2748 { 2749 u16 vid; 2750 2751 if (!vsi->netdev) 2752 return; 2753 2754 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2755 2756 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 2757 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 2758 vid); 2759 } 2760 2761 /** 2762 * i40e_vsi_add_pvid - Add pvid for the VSI 2763 * @vsi: the vsi being adjusted 2764 * @vid: the vlan id to set as a PVID 2765 **/ 2766 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2767 { 2768 struct i40e_vsi_context ctxt; 2769 i40e_status ret; 2770 2771 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2772 vsi->info.pvid = cpu_to_le16(vid); 2773 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | 2774 I40E_AQ_VSI_PVLAN_INSERT_PVID | 2775 I40E_AQ_VSI_PVLAN_EMOD_STR; 2776 2777 ctxt.seid = vsi->seid; 2778 ctxt.info = vsi->info; 2779 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2780 if (ret) { 2781 dev_info(&vsi->back->pdev->dev, 2782 "add pvid failed, err %s aq_err %s\n", 2783 i40e_stat_str(&vsi->back->hw, ret), 2784 i40e_aq_str(&vsi->back->hw, 2785 vsi->back->hw.aq.asq_last_status)); 2786 return -ENOENT; 2787 } 2788 2789 return 0; 2790 } 2791 2792 /** 2793 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2794 * @vsi: the vsi being adjusted 2795 * 2796 * Just use the vlan_rx_register() service to put it back to normal 2797 **/ 2798 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2799 { 2800 i40e_vlan_stripping_disable(vsi); 2801 2802 vsi->info.pvid = 0; 2803 } 2804 2805 /** 2806 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2807 * @vsi: ptr to the VSI 2808 * 2809 * If this function returns with an error, then it's possible one or 2810 * more of the rings is populated (while the rest are not). It is the 2811 * callers duty to clean those orphaned rings. 2812 * 2813 * Return 0 on success, negative on failure 2814 **/ 2815 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2816 { 2817 int i, err = 0; 2818 2819 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2820 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2821 2822 return err; 2823 } 2824 2825 /** 2826 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2827 * @vsi: ptr to the VSI 2828 * 2829 * Free VSI's transmit software resources 2830 **/ 2831 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2832 { 2833 int i; 2834 2835 if (!vsi->tx_rings) 2836 return; 2837 2838 for (i = 0; i < vsi->num_queue_pairs; i++) 2839 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2840 i40e_free_tx_resources(vsi->tx_rings[i]); 2841 } 2842 2843 /** 2844 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 2845 * @vsi: ptr to the VSI 2846 * 2847 * If this function returns with an error, then it's possible one or 2848 * more of the rings is populated (while the rest are not). It is the 2849 * callers duty to clean those orphaned rings. 2850 * 2851 * Return 0 on success, negative on failure 2852 **/ 2853 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 2854 { 2855 int i, err = 0; 2856 2857 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2858 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2859 #ifdef I40E_FCOE 2860 i40e_fcoe_setup_ddp_resources(vsi); 2861 #endif 2862 return err; 2863 } 2864 2865 /** 2866 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 2867 * @vsi: ptr to the VSI 2868 * 2869 * Free all receive software resources 2870 **/ 2871 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 2872 { 2873 int i; 2874 2875 if (!vsi->rx_rings) 2876 return; 2877 2878 for (i = 0; i < vsi->num_queue_pairs; i++) 2879 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2880 i40e_free_rx_resources(vsi->rx_rings[i]); 2881 #ifdef I40E_FCOE 2882 i40e_fcoe_free_ddp_resources(vsi); 2883 #endif 2884 } 2885 2886 /** 2887 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring 2888 * @ring: The Tx ring to configure 2889 * 2890 * This enables/disables XPS for a given Tx descriptor ring 2891 * based on the TCs enabled for the VSI that ring belongs to. 2892 **/ 2893 static void i40e_config_xps_tx_ring(struct i40e_ring *ring) 2894 { 2895 struct i40e_vsi *vsi = ring->vsi; 2896 cpumask_var_t mask; 2897 2898 if (!ring->q_vector || !ring->netdev) 2899 return; 2900 2901 /* Single TC mode enable XPS */ 2902 if (vsi->tc_config.numtc <= 1) { 2903 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) 2904 netif_set_xps_queue(ring->netdev, 2905 &ring->q_vector->affinity_mask, 2906 ring->queue_index); 2907 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { 2908 /* Disable XPS to allow selection based on TC */ 2909 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); 2910 netif_set_xps_queue(ring->netdev, mask, ring->queue_index); 2911 free_cpumask_var(mask); 2912 } 2913 2914 /* schedule our worker thread which will take care of 2915 * applying the new filter changes 2916 */ 2917 i40e_service_event_schedule(vsi->back); 2918 } 2919 2920 /** 2921 * i40e_configure_tx_ring - Configure a transmit ring context and rest 2922 * @ring: The Tx ring to configure 2923 * 2924 * Configure the Tx descriptor ring in the HMC context. 2925 **/ 2926 static int i40e_configure_tx_ring(struct i40e_ring *ring) 2927 { 2928 struct i40e_vsi *vsi = ring->vsi; 2929 u16 pf_q = vsi->base_queue + ring->queue_index; 2930 struct i40e_hw *hw = &vsi->back->hw; 2931 struct i40e_hmc_obj_txq tx_ctx; 2932 i40e_status err = 0; 2933 u32 qtx_ctl = 0; 2934 2935 /* some ATR related tx ring init */ 2936 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { 2937 ring->atr_sample_rate = vsi->back->atr_sample_rate; 2938 ring->atr_count = 0; 2939 } else { 2940 ring->atr_sample_rate = 0; 2941 } 2942 2943 /* configure XPS */ 2944 i40e_config_xps_tx_ring(ring); 2945 2946 /* clear the context structure first */ 2947 memset(&tx_ctx, 0, sizeof(tx_ctx)); 2948 2949 tx_ctx.new_context = 1; 2950 tx_ctx.base = (ring->dma / 128); 2951 tx_ctx.qlen = ring->count; 2952 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2953 I40E_FLAG_FD_ATR_ENABLED)); 2954 #ifdef I40E_FCOE 2955 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2956 #endif 2957 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2958 /* FDIR VSI tx ring can still use RS bit and writebacks */ 2959 if (vsi->type != I40E_VSI_FDIR) 2960 tx_ctx.head_wb_ena = 1; 2961 tx_ctx.head_wb_addr = ring->dma + 2962 (ring->count * sizeof(struct i40e_tx_desc)); 2963 2964 /* As part of VSI creation/update, FW allocates certain 2965 * Tx arbitration queue sets for each TC enabled for 2966 * the VSI. The FW returns the handles to these queue 2967 * sets as part of the response buffer to Add VSI, 2968 * Update VSI, etc. AQ commands. It is expected that 2969 * these queue set handles be associated with the Tx 2970 * queues by the driver as part of the TX queue context 2971 * initialization. This has to be done regardless of 2972 * DCB as by default everything is mapped to TC0. 2973 */ 2974 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 2975 tx_ctx.rdylist_act = 0; 2976 2977 /* clear the context in the HMC */ 2978 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 2979 if (err) { 2980 dev_info(&vsi->back->pdev->dev, 2981 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 2982 ring->queue_index, pf_q, err); 2983 return -ENOMEM; 2984 } 2985 2986 /* set the context in the HMC */ 2987 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 2988 if (err) { 2989 dev_info(&vsi->back->pdev->dev, 2990 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 2991 ring->queue_index, pf_q, err); 2992 return -ENOMEM; 2993 } 2994 2995 /* Now associate this queue with this PCI function */ 2996 if (vsi->type == I40E_VSI_VMDQ2) { 2997 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 2998 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & 2999 I40E_QTX_CTL_VFVM_INDX_MASK; 3000 } else { 3001 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 3002 } 3003 3004 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 3005 I40E_QTX_CTL_PF_INDX_MASK); 3006 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 3007 i40e_flush(hw); 3008 3009 /* cache tail off for easier writes later */ 3010 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 3011 3012 return 0; 3013 } 3014 3015 /** 3016 * i40e_configure_rx_ring - Configure a receive ring context 3017 * @ring: The Rx ring to configure 3018 * 3019 * Configure the Rx descriptor ring in the HMC context. 3020 **/ 3021 static int i40e_configure_rx_ring(struct i40e_ring *ring) 3022 { 3023 struct i40e_vsi *vsi = ring->vsi; 3024 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 3025 u16 pf_q = vsi->base_queue + ring->queue_index; 3026 struct i40e_hw *hw = &vsi->back->hw; 3027 struct i40e_hmc_obj_rxq rx_ctx; 3028 i40e_status err = 0; 3029 3030 ring->state = 0; 3031 3032 /* clear the context structure first */ 3033 memset(&rx_ctx, 0, sizeof(rx_ctx)); 3034 3035 ring->rx_buf_len = vsi->rx_buf_len; 3036 3037 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; 3038 3039 rx_ctx.base = (ring->dma / 128); 3040 rx_ctx.qlen = ring->count; 3041 3042 /* use 32 byte descriptors */ 3043 rx_ctx.dsize = 1; 3044 3045 /* descriptor type is always zero 3046 * rx_ctx.dtype = 0; 3047 */ 3048 rx_ctx.hsplit_0 = 0; 3049 3050 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); 3051 if (hw->revision_id == 0) 3052 rx_ctx.lrxqthresh = 0; 3053 else 3054 rx_ctx.lrxqthresh = 2; 3055 rx_ctx.crcstrip = 1; 3056 rx_ctx.l2tsel = 1; 3057 /* this controls whether VLAN is stripped from inner headers */ 3058 rx_ctx.showiv = 0; 3059 #ifdef I40E_FCOE 3060 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 3061 #endif 3062 /* set the prefena field to 1 because the manual says to */ 3063 rx_ctx.prefena = 1; 3064 3065 /* clear the context in the HMC */ 3066 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 3067 if (err) { 3068 dev_info(&vsi->back->pdev->dev, 3069 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 3070 ring->queue_index, pf_q, err); 3071 return -ENOMEM; 3072 } 3073 3074 /* set the context in the HMC */ 3075 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 3076 if (err) { 3077 dev_info(&vsi->back->pdev->dev, 3078 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 3079 ring->queue_index, pf_q, err); 3080 return -ENOMEM; 3081 } 3082 3083 /* cache tail for quicker writes, and clear the reg before use */ 3084 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 3085 writel(0, ring->tail); 3086 3087 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 3088 3089 return 0; 3090 } 3091 3092 /** 3093 * i40e_vsi_configure_tx - Configure the VSI for Tx 3094 * @vsi: VSI structure describing this set of rings and resources 3095 * 3096 * Configure the Tx VSI for operation. 3097 **/ 3098 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 3099 { 3100 int err = 0; 3101 u16 i; 3102 3103 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 3104 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 3105 3106 return err; 3107 } 3108 3109 /** 3110 * i40e_vsi_configure_rx - Configure the VSI for Rx 3111 * @vsi: the VSI being configured 3112 * 3113 * Configure the Rx VSI for operation. 3114 **/ 3115 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 3116 { 3117 int err = 0; 3118 u16 i; 3119 3120 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) 3121 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN 3122 + ETH_FCS_LEN + VLAN_HLEN; 3123 else 3124 vsi->max_frame = I40E_RXBUFFER_2048; 3125 3126 vsi->rx_buf_len = I40E_RXBUFFER_2048; 3127 3128 #ifdef I40E_FCOE 3129 /* setup rx buffer for FCoE */ 3130 if ((vsi->type == I40E_VSI_FCOE) && 3131 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { 3132 vsi->rx_buf_len = I40E_RXBUFFER_3072; 3133 vsi->max_frame = I40E_RXBUFFER_3072; 3134 } 3135 3136 #endif /* I40E_FCOE */ 3137 /* round up for the chip's needs */ 3138 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, 3139 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); 3140 3141 /* set up individual rings */ 3142 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 3143 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 3144 3145 return err; 3146 } 3147 3148 /** 3149 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 3150 * @vsi: ptr to the VSI 3151 **/ 3152 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 3153 { 3154 struct i40e_ring *tx_ring, *rx_ring; 3155 u16 qoffset, qcount; 3156 int i, n; 3157 3158 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 3159 /* Reset the TC information */ 3160 for (i = 0; i < vsi->num_queue_pairs; i++) { 3161 rx_ring = vsi->rx_rings[i]; 3162 tx_ring = vsi->tx_rings[i]; 3163 rx_ring->dcb_tc = 0; 3164 tx_ring->dcb_tc = 0; 3165 } 3166 } 3167 3168 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 3169 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) 3170 continue; 3171 3172 qoffset = vsi->tc_config.tc_info[n].qoffset; 3173 qcount = vsi->tc_config.tc_info[n].qcount; 3174 for (i = qoffset; i < (qoffset + qcount); i++) { 3175 rx_ring = vsi->rx_rings[i]; 3176 tx_ring = vsi->tx_rings[i]; 3177 rx_ring->dcb_tc = n; 3178 tx_ring->dcb_tc = n; 3179 } 3180 } 3181 } 3182 3183 /** 3184 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 3185 * @vsi: ptr to the VSI 3186 **/ 3187 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 3188 { 3189 struct i40e_pf *pf = vsi->back; 3190 int err; 3191 3192 if (vsi->netdev) 3193 i40e_set_rx_mode(vsi->netdev); 3194 3195 if (!!(pf->flags & I40E_FLAG_PF_MAC)) { 3196 err = i40e_macaddr_init(vsi, pf->hw.mac.addr); 3197 if (err) { 3198 dev_warn(&pf->pdev->dev, 3199 "could not set up macaddr; err %d\n", err); 3200 } 3201 } 3202 } 3203 3204 /** 3205 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters 3206 * @vsi: Pointer to the targeted VSI 3207 * 3208 * This function replays the hlist on the hw where all the SB Flow Director 3209 * filters were saved. 3210 **/ 3211 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) 3212 { 3213 struct i40e_fdir_filter *filter; 3214 struct i40e_pf *pf = vsi->back; 3215 struct hlist_node *node; 3216 3217 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 3218 return; 3219 3220 hlist_for_each_entry_safe(filter, node, 3221 &pf->fdir_filter_list, fdir_node) { 3222 i40e_add_del_fdir(vsi, filter, true); 3223 } 3224 } 3225 3226 /** 3227 * i40e_vsi_configure - Set up the VSI for action 3228 * @vsi: the VSI being configured 3229 **/ 3230 static int i40e_vsi_configure(struct i40e_vsi *vsi) 3231 { 3232 int err; 3233 3234 i40e_set_vsi_rx_mode(vsi); 3235 i40e_restore_vlan(vsi); 3236 i40e_vsi_config_dcb_rings(vsi); 3237 err = i40e_vsi_configure_tx(vsi); 3238 if (!err) 3239 err = i40e_vsi_configure_rx(vsi); 3240 3241 return err; 3242 } 3243 3244 /** 3245 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 3246 * @vsi: the VSI being configured 3247 **/ 3248 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 3249 { 3250 struct i40e_pf *pf = vsi->back; 3251 struct i40e_hw *hw = &pf->hw; 3252 u16 vector; 3253 int i, q; 3254 u32 qp; 3255 3256 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 3257 * and PFINT_LNKLSTn registers, e.g.: 3258 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 3259 */ 3260 qp = vsi->base_queue; 3261 vector = vsi->base_vector; 3262 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 3263 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; 3264 3265 q_vector->itr_countdown = ITR_COUNTDOWN_START; 3266 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting); 3267 q_vector->rx.latency_range = I40E_LOW_LATENCY; 3268 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 3269 q_vector->rx.itr); 3270 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting); 3271 q_vector->tx.latency_range = I40E_LOW_LATENCY; 3272 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 3273 q_vector->tx.itr); 3274 wr32(hw, I40E_PFINT_RATEN(vector - 1), 3275 INTRL_USEC_TO_REG(vsi->int_rate_limit)); 3276 3277 /* Linked list for the queuepairs assigned to this vector */ 3278 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 3279 for (q = 0; q < q_vector->num_ringpairs; q++) { 3280 u32 val; 3281 3282 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3283 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3284 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 3285 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 3286 (I40E_QUEUE_TYPE_TX 3287 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 3288 3289 wr32(hw, I40E_QINT_RQCTL(qp), val); 3290 3291 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3292 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3293 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 3294 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| 3295 (I40E_QUEUE_TYPE_RX 3296 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3297 3298 /* Terminate the linked list */ 3299 if (q == (q_vector->num_ringpairs - 1)) 3300 val |= (I40E_QUEUE_END_OF_LIST 3301 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3302 3303 wr32(hw, I40E_QINT_TQCTL(qp), val); 3304 qp++; 3305 } 3306 } 3307 3308 i40e_flush(hw); 3309 } 3310 3311 /** 3312 * i40e_enable_misc_int_causes - enable the non-queue interrupts 3313 * @hw: ptr to the hardware info 3314 **/ 3315 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) 3316 { 3317 struct i40e_hw *hw = &pf->hw; 3318 u32 val; 3319 3320 /* clear things first */ 3321 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 3322 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 3323 3324 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 3325 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 3326 I40E_PFINT_ICR0_ENA_GRST_MASK | 3327 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 3328 I40E_PFINT_ICR0_ENA_GPIO_MASK | 3329 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 3330 I40E_PFINT_ICR0_ENA_VFLR_MASK | 3331 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3332 3333 if (pf->flags & I40E_FLAG_IWARP_ENABLED) 3334 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3335 3336 if (pf->flags & I40E_FLAG_PTP) 3337 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3338 3339 wr32(hw, I40E_PFINT_ICR0_ENA, val); 3340 3341 /* SW_ITR_IDX = 0, but don't change INTENA */ 3342 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 3343 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 3344 3345 /* OTHER_ITR_IDX = 0 */ 3346 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 3347 } 3348 3349 /** 3350 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 3351 * @vsi: the VSI being configured 3352 **/ 3353 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 3354 { 3355 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 3356 struct i40e_pf *pf = vsi->back; 3357 struct i40e_hw *hw = &pf->hw; 3358 u32 val; 3359 3360 /* set the ITR configuration */ 3361 q_vector->itr_countdown = ITR_COUNTDOWN_START; 3362 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting); 3363 q_vector->rx.latency_range = I40E_LOW_LATENCY; 3364 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); 3365 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting); 3366 q_vector->tx.latency_range = I40E_LOW_LATENCY; 3367 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 3368 3369 i40e_enable_misc_int_causes(pf); 3370 3371 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 3372 wr32(hw, I40E_PFINT_LNKLST0, 0); 3373 3374 /* Associate the queue pair to the vector and enable the queue int */ 3375 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3376 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3377 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3378 3379 wr32(hw, I40E_QINT_RQCTL(0), val); 3380 3381 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3382 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3383 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3384 3385 wr32(hw, I40E_QINT_TQCTL(0), val); 3386 i40e_flush(hw); 3387 } 3388 3389 /** 3390 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 3391 * @pf: board private structure 3392 **/ 3393 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) 3394 { 3395 struct i40e_hw *hw = &pf->hw; 3396 3397 wr32(hw, I40E_PFINT_DYN_CTL0, 3398 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 3399 i40e_flush(hw); 3400 } 3401 3402 /** 3403 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 3404 * @pf: board private structure 3405 * @clearpba: true when all pending interrupt events should be cleared 3406 **/ 3407 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba) 3408 { 3409 struct i40e_hw *hw = &pf->hw; 3410 u32 val; 3411 3412 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 3413 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) | 3414 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 3415 3416 wr32(hw, I40E_PFINT_DYN_CTL0, val); 3417 i40e_flush(hw); 3418 } 3419 3420 /** 3421 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 3422 * @irq: interrupt number 3423 * @data: pointer to a q_vector 3424 **/ 3425 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 3426 { 3427 struct i40e_q_vector *q_vector = data; 3428 3429 if (!q_vector->tx.ring && !q_vector->rx.ring) 3430 return IRQ_HANDLED; 3431 3432 napi_schedule_irqoff(&q_vector->napi); 3433 3434 return IRQ_HANDLED; 3435 } 3436 3437 /** 3438 * i40e_irq_affinity_notify - Callback for affinity changes 3439 * @notify: context as to what irq was changed 3440 * @mask: the new affinity mask 3441 * 3442 * This is a callback function used by the irq_set_affinity_notifier function 3443 * so that we may register to receive changes to the irq affinity masks. 3444 **/ 3445 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify, 3446 const cpumask_t *mask) 3447 { 3448 struct i40e_q_vector *q_vector = 3449 container_of(notify, struct i40e_q_vector, affinity_notify); 3450 3451 q_vector->affinity_mask = *mask; 3452 } 3453 3454 /** 3455 * i40e_irq_affinity_release - Callback for affinity notifier release 3456 * @ref: internal core kernel usage 3457 * 3458 * This is a callback function used by the irq_set_affinity_notifier function 3459 * to inform the current notification subscriber that they will no longer 3460 * receive notifications. 3461 **/ 3462 static void i40e_irq_affinity_release(struct kref *ref) {} 3463 3464 /** 3465 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 3466 * @vsi: the VSI being configured 3467 * @basename: name for the vector 3468 * 3469 * Allocates MSI-X vectors and requests interrupts from the kernel. 3470 **/ 3471 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 3472 { 3473 int q_vectors = vsi->num_q_vectors; 3474 struct i40e_pf *pf = vsi->back; 3475 int base = vsi->base_vector; 3476 int rx_int_idx = 0; 3477 int tx_int_idx = 0; 3478 int vector, err; 3479 int irq_num; 3480 3481 for (vector = 0; vector < q_vectors; vector++) { 3482 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 3483 3484 irq_num = pf->msix_entries[base + vector].vector; 3485 3486 if (q_vector->tx.ring && q_vector->rx.ring) { 3487 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3488 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 3489 tx_int_idx++; 3490 } else if (q_vector->rx.ring) { 3491 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3492 "%s-%s-%d", basename, "rx", rx_int_idx++); 3493 } else if (q_vector->tx.ring) { 3494 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3495 "%s-%s-%d", basename, "tx", tx_int_idx++); 3496 } else { 3497 /* skip this unused q_vector */ 3498 continue; 3499 } 3500 err = request_irq(irq_num, 3501 vsi->irq_handler, 3502 0, 3503 q_vector->name, 3504 q_vector); 3505 if (err) { 3506 dev_info(&pf->pdev->dev, 3507 "MSIX request_irq failed, error: %d\n", err); 3508 goto free_queue_irqs; 3509 } 3510 3511 /* register for affinity change notifications */ 3512 q_vector->affinity_notify.notify = i40e_irq_affinity_notify; 3513 q_vector->affinity_notify.release = i40e_irq_affinity_release; 3514 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 3515 /* assign the mask for this irq */ 3516 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 3517 } 3518 3519 vsi->irqs_ready = true; 3520 return 0; 3521 3522 free_queue_irqs: 3523 while (vector) { 3524 vector--; 3525 irq_num = pf->msix_entries[base + vector].vector; 3526 irq_set_affinity_notifier(irq_num, NULL); 3527 irq_set_affinity_hint(irq_num, NULL); 3528 free_irq(irq_num, &vsi->q_vectors[vector]); 3529 } 3530 return err; 3531 } 3532 3533 /** 3534 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 3535 * @vsi: the VSI being un-configured 3536 **/ 3537 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 3538 { 3539 struct i40e_pf *pf = vsi->back; 3540 struct i40e_hw *hw = &pf->hw; 3541 int base = vsi->base_vector; 3542 int i; 3543 3544 for (i = 0; i < vsi->num_queue_pairs; i++) { 3545 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); 3546 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); 3547 } 3548 3549 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3550 for (i = vsi->base_vector; 3551 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3552 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 3553 3554 i40e_flush(hw); 3555 for (i = 0; i < vsi->num_q_vectors; i++) 3556 synchronize_irq(pf->msix_entries[i + base].vector); 3557 } else { 3558 /* Legacy and MSI mode - this stops all interrupt handling */ 3559 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 3560 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 3561 i40e_flush(hw); 3562 synchronize_irq(pf->pdev->irq); 3563 } 3564 } 3565 3566 /** 3567 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 3568 * @vsi: the VSI being configured 3569 **/ 3570 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 3571 { 3572 struct i40e_pf *pf = vsi->back; 3573 int i; 3574 3575 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3576 for (i = 0; i < vsi->num_q_vectors; i++) 3577 i40e_irq_dynamic_enable(vsi, i); 3578 } else { 3579 i40e_irq_dynamic_enable_icr0(pf, true); 3580 } 3581 3582 i40e_flush(&pf->hw); 3583 return 0; 3584 } 3585 3586 /** 3587 * i40e_stop_misc_vector - Stop the vector that handles non-queue events 3588 * @pf: board private structure 3589 **/ 3590 static void i40e_stop_misc_vector(struct i40e_pf *pf) 3591 { 3592 /* Disable ICR 0 */ 3593 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 3594 i40e_flush(&pf->hw); 3595 } 3596 3597 /** 3598 * i40e_intr - MSI/Legacy and non-queue interrupt handler 3599 * @irq: interrupt number 3600 * @data: pointer to a q_vector 3601 * 3602 * This is the handler used for all MSI/Legacy interrupts, and deals 3603 * with both queue and non-queue interrupts. This is also used in 3604 * MSIX mode to handle the non-queue interrupts. 3605 **/ 3606 static irqreturn_t i40e_intr(int irq, void *data) 3607 { 3608 struct i40e_pf *pf = (struct i40e_pf *)data; 3609 struct i40e_hw *hw = &pf->hw; 3610 irqreturn_t ret = IRQ_NONE; 3611 u32 icr0, icr0_remaining; 3612 u32 val, ena_mask; 3613 3614 icr0 = rd32(hw, I40E_PFINT_ICR0); 3615 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 3616 3617 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 3618 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 3619 goto enable_intr; 3620 3621 /* if interrupt but no bits showing, must be SWINT */ 3622 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || 3623 (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) 3624 pf->sw_int_count++; 3625 3626 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && 3627 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { 3628 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3629 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3630 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); 3631 } 3632 3633 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 3634 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 3635 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 3636 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 3637 3638 /* We do not have a way to disarm Queue causes while leaving 3639 * interrupt enabled for all other causes, ideally 3640 * interrupt should be disabled while we are in NAPI but 3641 * this is not a performance path and napi_schedule() 3642 * can deal with rescheduling. 3643 */ 3644 if (!test_bit(__I40E_DOWN, &pf->state)) 3645 napi_schedule_irqoff(&q_vector->napi); 3646 } 3647 3648 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 3649 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3650 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 3651 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); 3652 } 3653 3654 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 3655 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 3656 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 3657 } 3658 3659 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 3660 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 3661 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 3662 } 3663 3664 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 3665 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 3666 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 3667 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 3668 val = rd32(hw, I40E_GLGEN_RSTAT); 3669 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 3670 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3671 if (val == I40E_RESET_CORER) { 3672 pf->corer_count++; 3673 } else if (val == I40E_RESET_GLOBR) { 3674 pf->globr_count++; 3675 } else if (val == I40E_RESET_EMPR) { 3676 pf->empr_count++; 3677 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); 3678 } 3679 } 3680 3681 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 3682 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 3683 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 3684 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", 3685 rd32(hw, I40E_PFHMC_ERRORINFO), 3686 rd32(hw, I40E_PFHMC_ERRORDATA)); 3687 } 3688 3689 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 3690 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 3691 3692 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 3693 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3694 i40e_ptp_tx_hwtstamp(pf); 3695 } 3696 } 3697 3698 /* If a critical error is pending we have no choice but to reset the 3699 * device. 3700 * Report and mask out any remaining unexpected interrupts. 3701 */ 3702 icr0_remaining = icr0 & ena_mask; 3703 if (icr0_remaining) { 3704 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 3705 icr0_remaining); 3706 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 3707 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 3708 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { 3709 dev_info(&pf->pdev->dev, "device will be reset\n"); 3710 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 3711 i40e_service_event_schedule(pf); 3712 } 3713 ena_mask &= ~icr0_remaining; 3714 } 3715 ret = IRQ_HANDLED; 3716 3717 enable_intr: 3718 /* re-enable interrupt causes */ 3719 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3720 if (!test_bit(__I40E_DOWN, &pf->state)) { 3721 i40e_service_event_schedule(pf); 3722 i40e_irq_dynamic_enable_icr0(pf, false); 3723 } 3724 3725 return ret; 3726 } 3727 3728 /** 3729 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes 3730 * @tx_ring: tx ring to clean 3731 * @budget: how many cleans we're allowed 3732 * 3733 * Returns true if there's any budget left (e.g. the clean is finished) 3734 **/ 3735 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) 3736 { 3737 struct i40e_vsi *vsi = tx_ring->vsi; 3738 u16 i = tx_ring->next_to_clean; 3739 struct i40e_tx_buffer *tx_buf; 3740 struct i40e_tx_desc *tx_desc; 3741 3742 tx_buf = &tx_ring->tx_bi[i]; 3743 tx_desc = I40E_TX_DESC(tx_ring, i); 3744 i -= tx_ring->count; 3745 3746 do { 3747 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 3748 3749 /* if next_to_watch is not set then there is no work pending */ 3750 if (!eop_desc) 3751 break; 3752 3753 /* prevent any other reads prior to eop_desc */ 3754 read_barrier_depends(); 3755 3756 /* if the descriptor isn't done, no work yet to do */ 3757 if (!(eop_desc->cmd_type_offset_bsz & 3758 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 3759 break; 3760 3761 /* clear next_to_watch to prevent false hangs */ 3762 tx_buf->next_to_watch = NULL; 3763 3764 tx_desc->buffer_addr = 0; 3765 tx_desc->cmd_type_offset_bsz = 0; 3766 /* move past filter desc */ 3767 tx_buf++; 3768 tx_desc++; 3769 i++; 3770 if (unlikely(!i)) { 3771 i -= tx_ring->count; 3772 tx_buf = tx_ring->tx_bi; 3773 tx_desc = I40E_TX_DESC(tx_ring, 0); 3774 } 3775 /* unmap skb header data */ 3776 dma_unmap_single(tx_ring->dev, 3777 dma_unmap_addr(tx_buf, dma), 3778 dma_unmap_len(tx_buf, len), 3779 DMA_TO_DEVICE); 3780 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) 3781 kfree(tx_buf->raw_buf); 3782 3783 tx_buf->raw_buf = NULL; 3784 tx_buf->tx_flags = 0; 3785 tx_buf->next_to_watch = NULL; 3786 dma_unmap_len_set(tx_buf, len, 0); 3787 tx_desc->buffer_addr = 0; 3788 tx_desc->cmd_type_offset_bsz = 0; 3789 3790 /* move us past the eop_desc for start of next FD desc */ 3791 tx_buf++; 3792 tx_desc++; 3793 i++; 3794 if (unlikely(!i)) { 3795 i -= tx_ring->count; 3796 tx_buf = tx_ring->tx_bi; 3797 tx_desc = I40E_TX_DESC(tx_ring, 0); 3798 } 3799 3800 /* update budget accounting */ 3801 budget--; 3802 } while (likely(budget)); 3803 3804 i += tx_ring->count; 3805 tx_ring->next_to_clean = i; 3806 3807 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) 3808 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); 3809 3810 return budget > 0; 3811 } 3812 3813 /** 3814 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring 3815 * @irq: interrupt number 3816 * @data: pointer to a q_vector 3817 **/ 3818 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) 3819 { 3820 struct i40e_q_vector *q_vector = data; 3821 struct i40e_vsi *vsi; 3822 3823 if (!q_vector->tx.ring) 3824 return IRQ_HANDLED; 3825 3826 vsi = q_vector->tx.ring->vsi; 3827 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); 3828 3829 return IRQ_HANDLED; 3830 } 3831 3832 /** 3833 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 3834 * @vsi: the VSI being configured 3835 * @v_idx: vector index 3836 * @qp_idx: queue pair index 3837 **/ 3838 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 3839 { 3840 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3841 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 3842 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 3843 3844 tx_ring->q_vector = q_vector; 3845 tx_ring->next = q_vector->tx.ring; 3846 q_vector->tx.ring = tx_ring; 3847 q_vector->tx.count++; 3848 3849 rx_ring->q_vector = q_vector; 3850 rx_ring->next = q_vector->rx.ring; 3851 q_vector->rx.ring = rx_ring; 3852 q_vector->rx.count++; 3853 } 3854 3855 /** 3856 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 3857 * @vsi: the VSI being configured 3858 * 3859 * This function maps descriptor rings to the queue-specific vectors 3860 * we were allotted through the MSI-X enabling code. Ideally, we'd have 3861 * one vector per queue pair, but on a constrained vector budget, we 3862 * group the queue pairs as "efficiently" as possible. 3863 **/ 3864 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 3865 { 3866 int qp_remaining = vsi->num_queue_pairs; 3867 int q_vectors = vsi->num_q_vectors; 3868 int num_ringpairs; 3869 int v_start = 0; 3870 int qp_idx = 0; 3871 3872 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 3873 * group them so there are multiple queues per vector. 3874 * It is also important to go through all the vectors available to be 3875 * sure that if we don't use all the vectors, that the remaining vectors 3876 * are cleared. This is especially important when decreasing the 3877 * number of queues in use. 3878 */ 3879 for (; v_start < q_vectors; v_start++) { 3880 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 3881 3882 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 3883 3884 q_vector->num_ringpairs = num_ringpairs; 3885 3886 q_vector->rx.count = 0; 3887 q_vector->tx.count = 0; 3888 q_vector->rx.ring = NULL; 3889 q_vector->tx.ring = NULL; 3890 3891 while (num_ringpairs--) { 3892 i40e_map_vector_to_qp(vsi, v_start, qp_idx); 3893 qp_idx++; 3894 qp_remaining--; 3895 } 3896 } 3897 } 3898 3899 /** 3900 * i40e_vsi_request_irq - Request IRQ from the OS 3901 * @vsi: the VSI being configured 3902 * @basename: name for the vector 3903 **/ 3904 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 3905 { 3906 struct i40e_pf *pf = vsi->back; 3907 int err; 3908 3909 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 3910 err = i40e_vsi_request_irq_msix(vsi, basename); 3911 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3912 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3913 pf->int_name, pf); 3914 else 3915 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3916 pf->int_name, pf); 3917 3918 if (err) 3919 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3920 3921 return err; 3922 } 3923 3924 #ifdef CONFIG_NET_POLL_CONTROLLER 3925 /** 3926 * i40e_netpoll - A Polling 'interrupt' handler 3927 * @netdev: network interface device structure 3928 * 3929 * This is used by netconsole to send skbs without having to re-enable 3930 * interrupts. It's not called while the normal interrupt routine is executing. 3931 **/ 3932 #ifdef I40E_FCOE 3933 void i40e_netpoll(struct net_device *netdev) 3934 #else 3935 static void i40e_netpoll(struct net_device *netdev) 3936 #endif 3937 { 3938 struct i40e_netdev_priv *np = netdev_priv(netdev); 3939 struct i40e_vsi *vsi = np->vsi; 3940 struct i40e_pf *pf = vsi->back; 3941 int i; 3942 3943 /* if interface is down do nothing */ 3944 if (test_bit(__I40E_DOWN, &vsi->state)) 3945 return; 3946 3947 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3948 for (i = 0; i < vsi->num_q_vectors; i++) 3949 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 3950 } else { 3951 i40e_intr(pf->pdev->irq, netdev); 3952 } 3953 } 3954 #endif 3955 3956 /** 3957 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled 3958 * @pf: the PF being configured 3959 * @pf_q: the PF queue 3960 * @enable: enable or disable state of the queue 3961 * 3962 * This routine will wait for the given Tx queue of the PF to reach the 3963 * enabled or disabled state. 3964 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3965 * multiple retries; else will return 0 in case of success. 3966 **/ 3967 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3968 { 3969 int i; 3970 u32 tx_reg; 3971 3972 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3973 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); 3974 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3975 break; 3976 3977 usleep_range(10, 20); 3978 } 3979 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3980 return -ETIMEDOUT; 3981 3982 return 0; 3983 } 3984 3985 /** 3986 * i40e_vsi_control_tx - Start or stop a VSI's rings 3987 * @vsi: the VSI being configured 3988 * @enable: start or stop the rings 3989 **/ 3990 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 3991 { 3992 struct i40e_pf *pf = vsi->back; 3993 struct i40e_hw *hw = &pf->hw; 3994 int i, j, pf_q, ret = 0; 3995 u32 tx_reg; 3996 3997 pf_q = vsi->base_queue; 3998 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3999 4000 /* warn the TX unit of coming changes */ 4001 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); 4002 if (!enable) 4003 usleep_range(10, 20); 4004 4005 for (j = 0; j < 50; j++) { 4006 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 4007 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 4008 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) 4009 break; 4010 usleep_range(1000, 2000); 4011 } 4012 /* Skip if the queue is already in the requested state */ 4013 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 4014 continue; 4015 4016 /* turn on/off the queue */ 4017 if (enable) { 4018 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 4019 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; 4020 } else { 4021 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 4022 } 4023 4024 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 4025 /* No waiting for the Tx queue to disable */ 4026 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) 4027 continue; 4028 4029 /* wait for the change to finish */ 4030 ret = i40e_pf_txq_wait(pf, pf_q, enable); 4031 if (ret) { 4032 dev_info(&pf->pdev->dev, 4033 "VSI seid %d Tx ring %d %sable timeout\n", 4034 vsi->seid, pf_q, (enable ? "en" : "dis")); 4035 break; 4036 } 4037 } 4038 4039 if (hw->revision_id == 0) 4040 mdelay(50); 4041 return ret; 4042 } 4043 4044 /** 4045 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 4046 * @pf: the PF being configured 4047 * @pf_q: the PF queue 4048 * @enable: enable or disable state of the queue 4049 * 4050 * This routine will wait for the given Rx queue of the PF to reach the 4051 * enabled or disabled state. 4052 * Returns -ETIMEDOUT in case of failing to reach the requested state after 4053 * multiple retries; else will return 0 in case of success. 4054 **/ 4055 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) 4056 { 4057 int i; 4058 u32 rx_reg; 4059 4060 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 4061 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); 4062 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 4063 break; 4064 4065 usleep_range(10, 20); 4066 } 4067 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 4068 return -ETIMEDOUT; 4069 4070 return 0; 4071 } 4072 4073 /** 4074 * i40e_vsi_control_rx - Start or stop a VSI's rings 4075 * @vsi: the VSI being configured 4076 * @enable: start or stop the rings 4077 **/ 4078 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 4079 { 4080 struct i40e_pf *pf = vsi->back; 4081 struct i40e_hw *hw = &pf->hw; 4082 int i, j, pf_q, ret = 0; 4083 u32 rx_reg; 4084 4085 pf_q = vsi->base_queue; 4086 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4087 for (j = 0; j < 50; j++) { 4088 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 4089 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == 4090 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) 4091 break; 4092 usleep_range(1000, 2000); 4093 } 4094 4095 /* Skip if the queue is already in the requested state */ 4096 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 4097 continue; 4098 4099 /* turn on/off the queue */ 4100 if (enable) 4101 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; 4102 else 4103 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 4104 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 4105 /* No waiting for the Tx queue to disable */ 4106 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) 4107 continue; 4108 4109 /* wait for the change to finish */ 4110 ret = i40e_pf_rxq_wait(pf, pf_q, enable); 4111 if (ret) { 4112 dev_info(&pf->pdev->dev, 4113 "VSI seid %d Rx ring %d %sable timeout\n", 4114 vsi->seid, pf_q, (enable ? "en" : "dis")); 4115 break; 4116 } 4117 } 4118 4119 return ret; 4120 } 4121 4122 /** 4123 * i40e_vsi_start_rings - Start a VSI's rings 4124 * @vsi: the VSI being configured 4125 **/ 4126 int i40e_vsi_start_rings(struct i40e_vsi *vsi) 4127 { 4128 int ret = 0; 4129 4130 /* do rx first for enable and last for disable */ 4131 ret = i40e_vsi_control_rx(vsi, true); 4132 if (ret) 4133 return ret; 4134 ret = i40e_vsi_control_tx(vsi, true); 4135 4136 return ret; 4137 } 4138 4139 /** 4140 * i40e_vsi_stop_rings - Stop a VSI's rings 4141 * @vsi: the VSI being configured 4142 **/ 4143 void i40e_vsi_stop_rings(struct i40e_vsi *vsi) 4144 { 4145 /* do rx first for enable and last for disable 4146 * Ignore return value, we need to shutdown whatever we can 4147 */ 4148 i40e_vsi_control_tx(vsi, false); 4149 i40e_vsi_control_rx(vsi, false); 4150 } 4151 4152 /** 4153 * i40e_vsi_free_irq - Free the irq association with the OS 4154 * @vsi: the VSI being configured 4155 **/ 4156 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 4157 { 4158 struct i40e_pf *pf = vsi->back; 4159 struct i40e_hw *hw = &pf->hw; 4160 int base = vsi->base_vector; 4161 u32 val, qp; 4162 int i; 4163 4164 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4165 if (!vsi->q_vectors) 4166 return; 4167 4168 if (!vsi->irqs_ready) 4169 return; 4170 4171 vsi->irqs_ready = false; 4172 for (i = 0; i < vsi->num_q_vectors; i++) { 4173 int irq_num; 4174 u16 vector; 4175 4176 vector = i + base; 4177 irq_num = pf->msix_entries[vector].vector; 4178 4179 /* free only the irqs that were actually requested */ 4180 if (!vsi->q_vectors[i] || 4181 !vsi->q_vectors[i]->num_ringpairs) 4182 continue; 4183 4184 /* clear the affinity notifier in the IRQ descriptor */ 4185 irq_set_affinity_notifier(irq_num, NULL); 4186 /* clear the affinity_mask in the IRQ descriptor */ 4187 irq_set_affinity_hint(irq_num, NULL); 4188 synchronize_irq(irq_num); 4189 free_irq(irq_num, vsi->q_vectors[i]); 4190 4191 /* Tear down the interrupt queue link list 4192 * 4193 * We know that they come in pairs and always 4194 * the Rx first, then the Tx. To clear the 4195 * link list, stick the EOL value into the 4196 * next_q field of the registers. 4197 */ 4198 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 4199 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 4200 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4201 val |= I40E_QUEUE_END_OF_LIST 4202 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4203 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 4204 4205 while (qp != I40E_QUEUE_END_OF_LIST) { 4206 u32 next; 4207 4208 val = rd32(hw, I40E_QINT_RQCTL(qp)); 4209 4210 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 4211 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 4212 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 4213 I40E_QINT_RQCTL_INTEVENT_MASK); 4214 4215 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 4216 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 4217 4218 wr32(hw, I40E_QINT_RQCTL(qp), val); 4219 4220 val = rd32(hw, I40E_QINT_TQCTL(qp)); 4221 4222 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 4223 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 4224 4225 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 4226 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 4227 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 4228 I40E_QINT_TQCTL_INTEVENT_MASK); 4229 4230 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 4231 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 4232 4233 wr32(hw, I40E_QINT_TQCTL(qp), val); 4234 qp = next; 4235 } 4236 } 4237 } else { 4238 free_irq(pf->pdev->irq, pf); 4239 4240 val = rd32(hw, I40E_PFINT_LNKLST0); 4241 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 4242 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 4243 val |= I40E_QUEUE_END_OF_LIST 4244 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 4245 wr32(hw, I40E_PFINT_LNKLST0, val); 4246 4247 val = rd32(hw, I40E_QINT_RQCTL(qp)); 4248 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 4249 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 4250 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 4251 I40E_QINT_RQCTL_INTEVENT_MASK); 4252 4253 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 4254 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 4255 4256 wr32(hw, I40E_QINT_RQCTL(qp), val); 4257 4258 val = rd32(hw, I40E_QINT_TQCTL(qp)); 4259 4260 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 4261 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 4262 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 4263 I40E_QINT_TQCTL_INTEVENT_MASK); 4264 4265 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 4266 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 4267 4268 wr32(hw, I40E_QINT_TQCTL(qp), val); 4269 } 4270 } 4271 4272 /** 4273 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 4274 * @vsi: the VSI being configured 4275 * @v_idx: Index of vector to be freed 4276 * 4277 * This function frees the memory allocated to the q_vector. In addition if 4278 * NAPI is enabled it will delete any references to the NAPI struct prior 4279 * to freeing the q_vector. 4280 **/ 4281 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 4282 { 4283 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 4284 struct i40e_ring *ring; 4285 4286 if (!q_vector) 4287 return; 4288 4289 /* disassociate q_vector from rings */ 4290 i40e_for_each_ring(ring, q_vector->tx) 4291 ring->q_vector = NULL; 4292 4293 i40e_for_each_ring(ring, q_vector->rx) 4294 ring->q_vector = NULL; 4295 4296 /* only VSI w/ an associated netdev is set up w/ NAPI */ 4297 if (vsi->netdev) 4298 netif_napi_del(&q_vector->napi); 4299 4300 vsi->q_vectors[v_idx] = NULL; 4301 4302 kfree_rcu(q_vector, rcu); 4303 } 4304 4305 /** 4306 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 4307 * @vsi: the VSI being un-configured 4308 * 4309 * This frees the memory allocated to the q_vectors and 4310 * deletes references to the NAPI struct. 4311 **/ 4312 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 4313 { 4314 int v_idx; 4315 4316 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 4317 i40e_free_q_vector(vsi, v_idx); 4318 } 4319 4320 /** 4321 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 4322 * @pf: board private structure 4323 **/ 4324 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 4325 { 4326 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 4327 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 4328 pci_disable_msix(pf->pdev); 4329 kfree(pf->msix_entries); 4330 pf->msix_entries = NULL; 4331 kfree(pf->irq_pile); 4332 pf->irq_pile = NULL; 4333 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 4334 pci_disable_msi(pf->pdev); 4335 } 4336 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 4337 } 4338 4339 /** 4340 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 4341 * @pf: board private structure 4342 * 4343 * We go through and clear interrupt specific resources and reset the structure 4344 * to pre-load conditions 4345 **/ 4346 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 4347 { 4348 int i; 4349 4350 i40e_stop_misc_vector(pf); 4351 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { 4352 synchronize_irq(pf->msix_entries[0].vector); 4353 free_irq(pf->msix_entries[0].vector, pf); 4354 } 4355 4356 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, 4357 I40E_IWARP_IRQ_PILE_ID); 4358 4359 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 4360 for (i = 0; i < pf->num_alloc_vsi; i++) 4361 if (pf->vsi[i]) 4362 i40e_vsi_free_q_vectors(pf->vsi[i]); 4363 i40e_reset_interrupt_capability(pf); 4364 } 4365 4366 /** 4367 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 4368 * @vsi: the VSI being configured 4369 **/ 4370 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 4371 { 4372 int q_idx; 4373 4374 if (!vsi->netdev) 4375 return; 4376 4377 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4378 napi_enable(&vsi->q_vectors[q_idx]->napi); 4379 } 4380 4381 /** 4382 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 4383 * @vsi: the VSI being configured 4384 **/ 4385 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 4386 { 4387 int q_idx; 4388 4389 if (!vsi->netdev) 4390 return; 4391 4392 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4393 napi_disable(&vsi->q_vectors[q_idx]->napi); 4394 } 4395 4396 /** 4397 * i40e_vsi_close - Shut down a VSI 4398 * @vsi: the vsi to be quelled 4399 **/ 4400 static void i40e_vsi_close(struct i40e_vsi *vsi) 4401 { 4402 bool reset = false; 4403 4404 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 4405 i40e_down(vsi); 4406 i40e_vsi_free_irq(vsi); 4407 i40e_vsi_free_tx_resources(vsi); 4408 i40e_vsi_free_rx_resources(vsi); 4409 vsi->current_netdev_flags = 0; 4410 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 4411 reset = true; 4412 i40e_notify_client_of_netdev_close(vsi, reset); 4413 } 4414 4415 /** 4416 * i40e_quiesce_vsi - Pause a given VSI 4417 * @vsi: the VSI being paused 4418 **/ 4419 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 4420 { 4421 if (test_bit(__I40E_DOWN, &vsi->state)) 4422 return; 4423 4424 /* No need to disable FCoE VSI when Tx suspended */ 4425 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && 4426 vsi->type == I40E_VSI_FCOE) { 4427 dev_dbg(&vsi->back->pdev->dev, 4428 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid); 4429 return; 4430 } 4431 4432 set_bit(__I40E_NEEDS_RESTART, &vsi->state); 4433 if (vsi->netdev && netif_running(vsi->netdev)) 4434 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 4435 else 4436 i40e_vsi_close(vsi); 4437 } 4438 4439 /** 4440 * i40e_unquiesce_vsi - Resume a given VSI 4441 * @vsi: the VSI being resumed 4442 **/ 4443 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 4444 { 4445 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) 4446 return; 4447 4448 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 4449 if (vsi->netdev && netif_running(vsi->netdev)) 4450 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 4451 else 4452 i40e_vsi_open(vsi); /* this clears the DOWN bit */ 4453 } 4454 4455 /** 4456 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 4457 * @pf: the PF 4458 **/ 4459 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 4460 { 4461 int v; 4462 4463 for (v = 0; v < pf->num_alloc_vsi; v++) { 4464 if (pf->vsi[v]) 4465 i40e_quiesce_vsi(pf->vsi[v]); 4466 } 4467 } 4468 4469 /** 4470 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 4471 * @pf: the PF 4472 **/ 4473 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 4474 { 4475 int v; 4476 4477 for (v = 0; v < pf->num_alloc_vsi; v++) { 4478 if (pf->vsi[v]) 4479 i40e_unquiesce_vsi(pf->vsi[v]); 4480 } 4481 } 4482 4483 #ifdef CONFIG_I40E_DCB 4484 /** 4485 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled 4486 * @vsi: the VSI being configured 4487 * 4488 * This function waits for the given VSI's queues to be disabled. 4489 **/ 4490 static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) 4491 { 4492 struct i40e_pf *pf = vsi->back; 4493 int i, pf_q, ret; 4494 4495 pf_q = vsi->base_queue; 4496 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4497 /* Check and wait for the disable status of the queue */ 4498 ret = i40e_pf_txq_wait(pf, pf_q, false); 4499 if (ret) { 4500 dev_info(&pf->pdev->dev, 4501 "VSI seid %d Tx ring %d disable timeout\n", 4502 vsi->seid, pf_q); 4503 return ret; 4504 } 4505 } 4506 4507 pf_q = vsi->base_queue; 4508 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4509 /* Check and wait for the disable status of the queue */ 4510 ret = i40e_pf_rxq_wait(pf, pf_q, false); 4511 if (ret) { 4512 dev_info(&pf->pdev->dev, 4513 "VSI seid %d Rx ring %d disable timeout\n", 4514 vsi->seid, pf_q); 4515 return ret; 4516 } 4517 } 4518 4519 return 0; 4520 } 4521 4522 /** 4523 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled 4524 * @pf: the PF 4525 * 4526 * This function waits for the queues to be in disabled state for all the 4527 * VSIs that are managed by this PF. 4528 **/ 4529 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) 4530 { 4531 int v, ret = 0; 4532 4533 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4534 /* No need to wait for FCoE VSI queues */ 4535 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { 4536 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); 4537 if (ret) 4538 break; 4539 } 4540 } 4541 4542 return ret; 4543 } 4544 4545 #endif 4546 4547 /** 4548 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue 4549 * @q_idx: TX queue number 4550 * @vsi: Pointer to VSI struct 4551 * 4552 * This function checks specified queue for given VSI. Detects hung condition. 4553 * Sets hung bit since it is two step process. Before next run of service task 4554 * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not, 4555 * hung condition remain unchanged and during subsequent run, this function 4556 * issues SW interrupt to recover from hung condition. 4557 **/ 4558 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) 4559 { 4560 struct i40e_ring *tx_ring = NULL; 4561 struct i40e_pf *pf; 4562 u32 head, val, tx_pending_hw; 4563 int i; 4564 4565 pf = vsi->back; 4566 4567 /* now that we have an index, find the tx_ring struct */ 4568 for (i = 0; i < vsi->num_queue_pairs; i++) { 4569 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { 4570 if (q_idx == vsi->tx_rings[i]->queue_index) { 4571 tx_ring = vsi->tx_rings[i]; 4572 break; 4573 } 4574 } 4575 } 4576 4577 if (!tx_ring) 4578 return; 4579 4580 /* Read interrupt register */ 4581 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4582 val = rd32(&pf->hw, 4583 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + 4584 tx_ring->vsi->base_vector - 1)); 4585 else 4586 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); 4587 4588 head = i40e_get_head(tx_ring); 4589 4590 tx_pending_hw = i40e_get_tx_pending(tx_ring, false); 4591 4592 /* HW is done executing descriptors, updated HEAD write back, 4593 * but SW hasn't processed those descriptors. If interrupt is 4594 * not generated from this point ON, it could result into 4595 * dev_watchdog detecting timeout on those netdev_queue, 4596 * hence proactively trigger SW interrupt. 4597 */ 4598 if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { 4599 /* NAPI Poll didn't run and clear since it was set */ 4600 if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, 4601 &tx_ring->q_vector->hung_detected)) { 4602 netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", 4603 vsi->seid, q_idx, tx_pending_hw, 4604 tx_ring->next_to_clean, head, 4605 tx_ring->next_to_use, 4606 readl(tx_ring->tail)); 4607 netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n", 4608 vsi->seid, q_idx, val); 4609 i40e_force_wb(vsi, tx_ring->q_vector); 4610 } else { 4611 /* First Chance - detected possible hung */ 4612 set_bit(I40E_Q_VECTOR_HUNG_DETECT, 4613 &tx_ring->q_vector->hung_detected); 4614 } 4615 } 4616 4617 /* This is the case where we have interrupts missing, 4618 * so the tx_pending in HW will most likely be 0, but we 4619 * will have tx_pending in SW since the WB happened but the 4620 * interrupt got lost. 4621 */ 4622 if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) && 4623 (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { 4624 if (napi_reschedule(&tx_ring->q_vector->napi)) 4625 tx_ring->tx_stats.tx_lost_interrupt++; 4626 } 4627 } 4628 4629 /** 4630 * i40e_detect_recover_hung - Function to detect and recover hung_queues 4631 * @pf: pointer to PF struct 4632 * 4633 * LAN VSI has netdev and netdev has TX queues. This function is to check 4634 * each of those TX queues if they are hung, trigger recovery by issuing 4635 * SW interrupt. 4636 **/ 4637 static void i40e_detect_recover_hung(struct i40e_pf *pf) 4638 { 4639 struct net_device *netdev; 4640 struct i40e_vsi *vsi; 4641 int i; 4642 4643 /* Only for LAN VSI */ 4644 vsi = pf->vsi[pf->lan_vsi]; 4645 4646 if (!vsi) 4647 return; 4648 4649 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ 4650 if (test_bit(__I40E_DOWN, &vsi->back->state) || 4651 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 4652 return; 4653 4654 /* Make sure type is MAIN VSI */ 4655 if (vsi->type != I40E_VSI_MAIN) 4656 return; 4657 4658 netdev = vsi->netdev; 4659 if (!netdev) 4660 return; 4661 4662 /* Bail out if netif_carrier is not OK */ 4663 if (!netif_carrier_ok(netdev)) 4664 return; 4665 4666 /* Go thru' TX queues for netdev */ 4667 for (i = 0; i < netdev->num_tx_queues; i++) { 4668 struct netdev_queue *q; 4669 4670 q = netdev_get_tx_queue(netdev, i); 4671 if (q) 4672 i40e_detect_recover_hung_queue(i, vsi); 4673 } 4674 } 4675 4676 /** 4677 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP 4678 * @pf: pointer to PF 4679 * 4680 * Get TC map for ISCSI PF type that will include iSCSI TC 4681 * and LAN TC. 4682 **/ 4683 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) 4684 { 4685 struct i40e_dcb_app_priority_table app; 4686 struct i40e_hw *hw = &pf->hw; 4687 u8 enabled_tc = 1; /* TC0 is always enabled */ 4688 u8 tc, i; 4689 /* Get the iSCSI APP TLV */ 4690 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4691 4692 for (i = 0; i < dcbcfg->numapps; i++) { 4693 app = dcbcfg->app[i]; 4694 if (app.selector == I40E_APP_SEL_TCPIP && 4695 app.protocolid == I40E_APP_PROTOID_ISCSI) { 4696 tc = dcbcfg->etscfg.prioritytable[app.priority]; 4697 enabled_tc |= BIT(tc); 4698 break; 4699 } 4700 } 4701 4702 return enabled_tc; 4703 } 4704 4705 /** 4706 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 4707 * @dcbcfg: the corresponding DCBx configuration structure 4708 * 4709 * Return the number of TCs from given DCBx configuration 4710 **/ 4711 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 4712 { 4713 int i, tc_unused = 0; 4714 u8 num_tc = 0; 4715 u8 ret = 0; 4716 4717 /* Scan the ETS Config Priority Table to find 4718 * traffic class enabled for a given priority 4719 * and create a bitmask of enabled TCs 4720 */ 4721 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) 4722 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); 4723 4724 /* Now scan the bitmask to check for 4725 * contiguous TCs starting with TC0 4726 */ 4727 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4728 if (num_tc & BIT(i)) { 4729 if (!tc_unused) { 4730 ret++; 4731 } else { 4732 pr_err("Non-contiguous TC - Disabling DCB\n"); 4733 return 1; 4734 } 4735 } else { 4736 tc_unused = 1; 4737 } 4738 } 4739 4740 /* There is always at least TC0 */ 4741 if (!ret) 4742 ret = 1; 4743 4744 return ret; 4745 } 4746 4747 /** 4748 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 4749 * @dcbcfg: the corresponding DCBx configuration structure 4750 * 4751 * Query the current DCB configuration and return the number of 4752 * traffic classes enabled from the given DCBX config 4753 **/ 4754 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 4755 { 4756 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 4757 u8 enabled_tc = 1; 4758 u8 i; 4759 4760 for (i = 0; i < num_tc; i++) 4761 enabled_tc |= BIT(i); 4762 4763 return enabled_tc; 4764 } 4765 4766 /** 4767 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 4768 * @pf: PF being queried 4769 * 4770 * Return number of traffic classes enabled for the given PF 4771 **/ 4772 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 4773 { 4774 struct i40e_hw *hw = &pf->hw; 4775 u8 i, enabled_tc = 1; 4776 u8 num_tc = 0; 4777 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4778 4779 /* If DCB is not enabled then always in single TC */ 4780 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4781 return 1; 4782 4783 /* SFP mode will be enabled for all TCs on port */ 4784 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4785 return i40e_dcb_get_num_tc(dcbcfg); 4786 4787 /* MFP mode return count of enabled TCs for this PF */ 4788 if (pf->hw.func_caps.iscsi) 4789 enabled_tc = i40e_get_iscsi_tc_map(pf); 4790 else 4791 return 1; /* Only TC0 */ 4792 4793 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4794 if (enabled_tc & BIT(i)) 4795 num_tc++; 4796 } 4797 return num_tc; 4798 } 4799 4800 /** 4801 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 4802 * @pf: PF being queried 4803 * 4804 * Return a bitmap for enabled traffic classes for this PF. 4805 **/ 4806 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 4807 { 4808 /* If DCB is not enabled for this PF then just return default TC */ 4809 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4810 return I40E_DEFAULT_TRAFFIC_CLASS; 4811 4812 /* SFP mode we want PF to be enabled for all TCs */ 4813 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4814 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 4815 4816 /* MFP enabled and iSCSI PF type */ 4817 if (pf->hw.func_caps.iscsi) 4818 return i40e_get_iscsi_tc_map(pf); 4819 else 4820 return I40E_DEFAULT_TRAFFIC_CLASS; 4821 } 4822 4823 /** 4824 * i40e_vsi_get_bw_info - Query VSI BW Information 4825 * @vsi: the VSI being queried 4826 * 4827 * Returns 0 on success, negative value on failure 4828 **/ 4829 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 4830 { 4831 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 4832 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 4833 struct i40e_pf *pf = vsi->back; 4834 struct i40e_hw *hw = &pf->hw; 4835 i40e_status ret; 4836 u32 tc_bw_max; 4837 int i; 4838 4839 /* Get the VSI level BW configuration */ 4840 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 4841 if (ret) { 4842 dev_info(&pf->pdev->dev, 4843 "couldn't get PF vsi bw config, err %s aq_err %s\n", 4844 i40e_stat_str(&pf->hw, ret), 4845 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4846 return -EINVAL; 4847 } 4848 4849 /* Get the VSI level BW configuration per TC */ 4850 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 4851 NULL); 4852 if (ret) { 4853 dev_info(&pf->pdev->dev, 4854 "couldn't get PF vsi ets bw config, err %s aq_err %s\n", 4855 i40e_stat_str(&pf->hw, ret), 4856 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4857 return -EINVAL; 4858 } 4859 4860 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 4861 dev_info(&pf->pdev->dev, 4862 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 4863 bw_config.tc_valid_bits, 4864 bw_ets_config.tc_valid_bits); 4865 /* Still continuing */ 4866 } 4867 4868 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 4869 vsi->bw_max_quanta = bw_config.max_bw; 4870 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 4871 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 4872 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4873 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 4874 vsi->bw_ets_limit_credits[i] = 4875 le16_to_cpu(bw_ets_config.credits[i]); 4876 /* 3 bits out of 4 for each TC */ 4877 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 4878 } 4879 4880 return 0; 4881 } 4882 4883 /** 4884 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 4885 * @vsi: the VSI being configured 4886 * @enabled_tc: TC bitmap 4887 * @bw_credits: BW shared credits per TC 4888 * 4889 * Returns 0 on success, negative value on failure 4890 **/ 4891 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 4892 u8 *bw_share) 4893 { 4894 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 4895 i40e_status ret; 4896 int i; 4897 4898 bw_data.tc_valid_bits = enabled_tc; 4899 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4900 bw_data.tc_bw_credits[i] = bw_share[i]; 4901 4902 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 4903 NULL); 4904 if (ret) { 4905 dev_info(&vsi->back->pdev->dev, 4906 "AQ command Config VSI BW allocation per TC failed = %d\n", 4907 vsi->back->hw.aq.asq_last_status); 4908 return -EINVAL; 4909 } 4910 4911 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4912 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 4913 4914 return 0; 4915 } 4916 4917 /** 4918 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 4919 * @vsi: the VSI being configured 4920 * @enabled_tc: TC map to be enabled 4921 * 4922 **/ 4923 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4924 { 4925 struct net_device *netdev = vsi->netdev; 4926 struct i40e_pf *pf = vsi->back; 4927 struct i40e_hw *hw = &pf->hw; 4928 u8 netdev_tc = 0; 4929 int i; 4930 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4931 4932 if (!netdev) 4933 return; 4934 4935 if (!enabled_tc) { 4936 netdev_reset_tc(netdev); 4937 return; 4938 } 4939 4940 /* Set up actual enabled TCs on the VSI */ 4941 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 4942 return; 4943 4944 /* set per TC queues for the VSI */ 4945 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4946 /* Only set TC queues for enabled tcs 4947 * 4948 * e.g. For a VSI that has TC0 and TC3 enabled the 4949 * enabled_tc bitmap would be 0x00001001; the driver 4950 * will set the numtc for netdev as 2 that will be 4951 * referenced by the netdev layer as TC 0 and 1. 4952 */ 4953 if (vsi->tc_config.enabled_tc & BIT(i)) 4954 netdev_set_tc_queue(netdev, 4955 vsi->tc_config.tc_info[i].netdev_tc, 4956 vsi->tc_config.tc_info[i].qcount, 4957 vsi->tc_config.tc_info[i].qoffset); 4958 } 4959 4960 /* Assign UP2TC map for the VSI */ 4961 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4962 /* Get the actual TC# for the UP */ 4963 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 4964 /* Get the mapped netdev TC# for the UP */ 4965 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 4966 netdev_set_prio_tc_map(netdev, i, netdev_tc); 4967 } 4968 } 4969 4970 /** 4971 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 4972 * @vsi: the VSI being configured 4973 * @ctxt: the ctxt buffer returned from AQ VSI update param command 4974 **/ 4975 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 4976 struct i40e_vsi_context *ctxt) 4977 { 4978 /* copy just the sections touched not the entire info 4979 * since not all sections are valid as returned by 4980 * update vsi params 4981 */ 4982 vsi->info.mapping_flags = ctxt->info.mapping_flags; 4983 memcpy(&vsi->info.queue_mapping, 4984 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 4985 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 4986 sizeof(vsi->info.tc_mapping)); 4987 } 4988 4989 /** 4990 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 4991 * @vsi: VSI to be configured 4992 * @enabled_tc: TC bitmap 4993 * 4994 * This configures a particular VSI for TCs that are mapped to the 4995 * given TC bitmap. It uses default bandwidth share for TCs across 4996 * VSIs to configure TC for a particular VSI. 4997 * 4998 * NOTE: 4999 * It is expected that the VSI queues have been quisced before calling 5000 * this function. 5001 **/ 5002 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 5003 { 5004 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 5005 struct i40e_vsi_context ctxt; 5006 int ret = 0; 5007 int i; 5008 5009 /* Check if enabled_tc is same as existing or new TCs */ 5010 if (vsi->tc_config.enabled_tc == enabled_tc) 5011 return ret; 5012 5013 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 5014 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 5015 if (enabled_tc & BIT(i)) 5016 bw_share[i] = 1; 5017 } 5018 5019 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 5020 if (ret) { 5021 dev_info(&vsi->back->pdev->dev, 5022 "Failed configuring TC map %d for VSI %d\n", 5023 enabled_tc, vsi->seid); 5024 goto out; 5025 } 5026 5027 /* Update Queue Pairs Mapping for currently enabled UPs */ 5028 ctxt.seid = vsi->seid; 5029 ctxt.pf_num = vsi->back->hw.pf_id; 5030 ctxt.vf_num = 0; 5031 ctxt.uplink_seid = vsi->uplink_seid; 5032 ctxt.info = vsi->info; 5033 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 5034 5035 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { 5036 ctxt.info.valid_sections |= 5037 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); 5038 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; 5039 } 5040 5041 /* Update the VSI after updating the VSI queue-mapping information */ 5042 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 5043 if (ret) { 5044 dev_info(&vsi->back->pdev->dev, 5045 "Update vsi tc config failed, err %s aq_err %s\n", 5046 i40e_stat_str(&vsi->back->hw, ret), 5047 i40e_aq_str(&vsi->back->hw, 5048 vsi->back->hw.aq.asq_last_status)); 5049 goto out; 5050 } 5051 /* update the local VSI info with updated queue map */ 5052 i40e_vsi_update_queue_map(vsi, &ctxt); 5053 vsi->info.valid_sections = 0; 5054 5055 /* Update current VSI BW information */ 5056 ret = i40e_vsi_get_bw_info(vsi); 5057 if (ret) { 5058 dev_info(&vsi->back->pdev->dev, 5059 "Failed updating vsi bw info, err %s aq_err %s\n", 5060 i40e_stat_str(&vsi->back->hw, ret), 5061 i40e_aq_str(&vsi->back->hw, 5062 vsi->back->hw.aq.asq_last_status)); 5063 goto out; 5064 } 5065 5066 /* Update the netdev TC setup */ 5067 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 5068 out: 5069 return ret; 5070 } 5071 5072 /** 5073 * i40e_veb_config_tc - Configure TCs for given VEB 5074 * @veb: given VEB 5075 * @enabled_tc: TC bitmap 5076 * 5077 * Configures given TC bitmap for VEB (switching) element 5078 **/ 5079 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) 5080 { 5081 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; 5082 struct i40e_pf *pf = veb->pf; 5083 int ret = 0; 5084 int i; 5085 5086 /* No TCs or already enabled TCs just return */ 5087 if (!enabled_tc || veb->enabled_tc == enabled_tc) 5088 return ret; 5089 5090 bw_data.tc_valid_bits = enabled_tc; 5091 /* bw_data.absolute_credits is not set (relative) */ 5092 5093 /* Enable ETS TCs with equal BW Share for now */ 5094 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 5095 if (enabled_tc & BIT(i)) 5096 bw_data.tc_bw_share_credits[i] = 1; 5097 } 5098 5099 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, 5100 &bw_data, NULL); 5101 if (ret) { 5102 dev_info(&pf->pdev->dev, 5103 "VEB bw config failed, err %s aq_err %s\n", 5104 i40e_stat_str(&pf->hw, ret), 5105 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5106 goto out; 5107 } 5108 5109 /* Update the BW information */ 5110 ret = i40e_veb_get_bw_info(veb); 5111 if (ret) { 5112 dev_info(&pf->pdev->dev, 5113 "Failed getting veb bw config, err %s aq_err %s\n", 5114 i40e_stat_str(&pf->hw, ret), 5115 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5116 } 5117 5118 out: 5119 return ret; 5120 } 5121 5122 #ifdef CONFIG_I40E_DCB 5123 /** 5124 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs 5125 * @pf: PF struct 5126 * 5127 * Reconfigure VEB/VSIs on a given PF; it is assumed that 5128 * the caller would've quiesce all the VSIs before calling 5129 * this function 5130 **/ 5131 static void i40e_dcb_reconfigure(struct i40e_pf *pf) 5132 { 5133 u8 tc_map = 0; 5134 int ret; 5135 u8 v; 5136 5137 /* Enable the TCs available on PF to all VEBs */ 5138 tc_map = i40e_pf_get_tc_map(pf); 5139 for (v = 0; v < I40E_MAX_VEB; v++) { 5140 if (!pf->veb[v]) 5141 continue; 5142 ret = i40e_veb_config_tc(pf->veb[v], tc_map); 5143 if (ret) { 5144 dev_info(&pf->pdev->dev, 5145 "Failed configuring TC for VEB seid=%d\n", 5146 pf->veb[v]->seid); 5147 /* Will try to configure as many components */ 5148 } 5149 } 5150 5151 /* Update each VSI */ 5152 for (v = 0; v < pf->num_alloc_vsi; v++) { 5153 if (!pf->vsi[v]) 5154 continue; 5155 5156 /* - Enable all TCs for the LAN VSI 5157 #ifdef I40E_FCOE 5158 * - For FCoE VSI only enable the TC configured 5159 * as per the APP TLV 5160 #endif 5161 * - For all others keep them at TC0 for now 5162 */ 5163 if (v == pf->lan_vsi) 5164 tc_map = i40e_pf_get_tc_map(pf); 5165 else 5166 tc_map = I40E_DEFAULT_TRAFFIC_CLASS; 5167 #ifdef I40E_FCOE 5168 if (pf->vsi[v]->type == I40E_VSI_FCOE) 5169 tc_map = i40e_get_fcoe_tc_map(pf); 5170 #endif /* #ifdef I40E_FCOE */ 5171 5172 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 5173 if (ret) { 5174 dev_info(&pf->pdev->dev, 5175 "Failed configuring TC for VSI seid=%d\n", 5176 pf->vsi[v]->seid); 5177 /* Will try to configure as many components */ 5178 } else { 5179 /* Re-configure VSI vectors based on updated TC map */ 5180 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); 5181 if (pf->vsi[v]->netdev) 5182 i40e_dcbnl_set_all(pf->vsi[v]); 5183 } 5184 } 5185 } 5186 5187 /** 5188 * i40e_resume_port_tx - Resume port Tx 5189 * @pf: PF struct 5190 * 5191 * Resume a port's Tx and issue a PF reset in case of failure to 5192 * resume. 5193 **/ 5194 static int i40e_resume_port_tx(struct i40e_pf *pf) 5195 { 5196 struct i40e_hw *hw = &pf->hw; 5197 int ret; 5198 5199 ret = i40e_aq_resume_port_tx(hw, NULL); 5200 if (ret) { 5201 dev_info(&pf->pdev->dev, 5202 "Resume Port Tx failed, err %s aq_err %s\n", 5203 i40e_stat_str(&pf->hw, ret), 5204 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5205 /* Schedule PF reset to recover */ 5206 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5207 i40e_service_event_schedule(pf); 5208 } 5209 5210 return ret; 5211 } 5212 5213 /** 5214 * i40e_init_pf_dcb - Initialize DCB configuration 5215 * @pf: PF being configured 5216 * 5217 * Query the current DCB configuration and cache it 5218 * in the hardware structure 5219 **/ 5220 static int i40e_init_pf_dcb(struct i40e_pf *pf) 5221 { 5222 struct i40e_hw *hw = &pf->hw; 5223 int err = 0; 5224 5225 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ 5226 if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT) 5227 goto out; 5228 5229 /* Get the initial DCB configuration */ 5230 err = i40e_init_dcb(hw); 5231 if (!err) { 5232 /* Device/Function is not DCBX capable */ 5233 if ((!hw->func_caps.dcb) || 5234 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { 5235 dev_info(&pf->pdev->dev, 5236 "DCBX offload is not supported or is disabled for this PF.\n"); 5237 5238 if (pf->flags & I40E_FLAG_MFP_ENABLED) 5239 goto out; 5240 5241 } else { 5242 /* When status is not DISABLED then DCBX in FW */ 5243 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 5244 DCB_CAP_DCBX_VER_IEEE; 5245 5246 pf->flags |= I40E_FLAG_DCB_CAPABLE; 5247 /* Enable DCB tagging only when more than one TC 5248 * or explicitly disable if only one TC 5249 */ 5250 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5251 pf->flags |= I40E_FLAG_DCB_ENABLED; 5252 else 5253 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 5254 dev_dbg(&pf->pdev->dev, 5255 "DCBX offload is supported for this PF.\n"); 5256 } 5257 } else { 5258 dev_info(&pf->pdev->dev, 5259 "Query for DCB configuration failed, err %s aq_err %s\n", 5260 i40e_stat_str(&pf->hw, err), 5261 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5262 } 5263 5264 out: 5265 return err; 5266 } 5267 #endif /* CONFIG_I40E_DCB */ 5268 #define SPEED_SIZE 14 5269 #define FC_SIZE 8 5270 /** 5271 * i40e_print_link_message - print link up or down 5272 * @vsi: the VSI for which link needs a message 5273 */ 5274 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) 5275 { 5276 enum i40e_aq_link_speed new_speed; 5277 char *speed = "Unknown"; 5278 char *fc = "Unknown"; 5279 5280 new_speed = vsi->back->hw.phy.link_info.link_speed; 5281 5282 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) 5283 return; 5284 vsi->current_isup = isup; 5285 vsi->current_speed = new_speed; 5286 if (!isup) { 5287 netdev_info(vsi->netdev, "NIC Link is Down\n"); 5288 return; 5289 } 5290 5291 /* Warn user if link speed on NPAR enabled partition is not at 5292 * least 10GB 5293 */ 5294 if (vsi->back->hw.func_caps.npar_enable && 5295 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || 5296 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) 5297 netdev_warn(vsi->netdev, 5298 "The partition detected link speed that is less than 10Gbps\n"); 5299 5300 switch (vsi->back->hw.phy.link_info.link_speed) { 5301 case I40E_LINK_SPEED_40GB: 5302 speed = "40 G"; 5303 break; 5304 case I40E_LINK_SPEED_20GB: 5305 speed = "20 G"; 5306 break; 5307 case I40E_LINK_SPEED_25GB: 5308 speed = "25 G"; 5309 break; 5310 case I40E_LINK_SPEED_10GB: 5311 speed = "10 G"; 5312 break; 5313 case I40E_LINK_SPEED_1GB: 5314 speed = "1000 M"; 5315 break; 5316 case I40E_LINK_SPEED_100MB: 5317 speed = "100 M"; 5318 break; 5319 default: 5320 break; 5321 } 5322 5323 switch (vsi->back->hw.fc.current_mode) { 5324 case I40E_FC_FULL: 5325 fc = "RX/TX"; 5326 break; 5327 case I40E_FC_TX_PAUSE: 5328 fc = "TX"; 5329 break; 5330 case I40E_FC_RX_PAUSE: 5331 fc = "RX"; 5332 break; 5333 default: 5334 fc = "None"; 5335 break; 5336 } 5337 5338 netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n", 5339 speed, fc); 5340 } 5341 5342 /** 5343 * i40e_up_complete - Finish the last steps of bringing up a connection 5344 * @vsi: the VSI being configured 5345 **/ 5346 static int i40e_up_complete(struct i40e_vsi *vsi) 5347 { 5348 struct i40e_pf *pf = vsi->back; 5349 int err; 5350 5351 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5352 i40e_vsi_configure_msix(vsi); 5353 else 5354 i40e_configure_msi_and_legacy(vsi); 5355 5356 /* start rings */ 5357 err = i40e_vsi_start_rings(vsi); 5358 if (err) 5359 return err; 5360 5361 clear_bit(__I40E_DOWN, &vsi->state); 5362 i40e_napi_enable_all(vsi); 5363 i40e_vsi_enable_irq(vsi); 5364 5365 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 5366 (vsi->netdev)) { 5367 i40e_print_link_message(vsi, true); 5368 netif_tx_start_all_queues(vsi->netdev); 5369 netif_carrier_on(vsi->netdev); 5370 } else if (vsi->netdev) { 5371 i40e_print_link_message(vsi, false); 5372 /* need to check for qualified module here*/ 5373 if ((pf->hw.phy.link_info.link_info & 5374 I40E_AQ_MEDIA_AVAILABLE) && 5375 (!(pf->hw.phy.link_info.an_info & 5376 I40E_AQ_QUALIFIED_MODULE))) 5377 netdev_err(vsi->netdev, 5378 "the driver failed to link because an unqualified module was detected."); 5379 } 5380 5381 /* replay FDIR SB filters */ 5382 if (vsi->type == I40E_VSI_FDIR) { 5383 /* reset fd counters */ 5384 pf->fd_add_err = pf->fd_atr_cnt = 0; 5385 if (pf->fd_tcp_rule > 0) { 5386 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; 5387 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5388 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); 5389 pf->fd_tcp_rule = 0; 5390 } 5391 i40e_fdir_filter_restore(vsi); 5392 } 5393 5394 /* On the next run of the service_task, notify any clients of the new 5395 * opened netdev 5396 */ 5397 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; 5398 i40e_service_event_schedule(pf); 5399 5400 return 0; 5401 } 5402 5403 /** 5404 * i40e_vsi_reinit_locked - Reset the VSI 5405 * @vsi: the VSI being configured 5406 * 5407 * Rebuild the ring structs after some configuration 5408 * has changed, e.g. MTU size. 5409 **/ 5410 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 5411 { 5412 struct i40e_pf *pf = vsi->back; 5413 5414 WARN_ON(in_interrupt()); 5415 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 5416 usleep_range(1000, 2000); 5417 i40e_down(vsi); 5418 5419 i40e_up(vsi); 5420 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 5421 } 5422 5423 /** 5424 * i40e_up - Bring the connection back up after being down 5425 * @vsi: the VSI being configured 5426 **/ 5427 int i40e_up(struct i40e_vsi *vsi) 5428 { 5429 int err; 5430 5431 err = i40e_vsi_configure(vsi); 5432 if (!err) 5433 err = i40e_up_complete(vsi); 5434 5435 return err; 5436 } 5437 5438 /** 5439 * i40e_down - Shutdown the connection processing 5440 * @vsi: the VSI being stopped 5441 **/ 5442 void i40e_down(struct i40e_vsi *vsi) 5443 { 5444 int i; 5445 5446 /* It is assumed that the caller of this function 5447 * sets the vsi->state __I40E_DOWN bit. 5448 */ 5449 if (vsi->netdev) { 5450 netif_carrier_off(vsi->netdev); 5451 netif_tx_disable(vsi->netdev); 5452 } 5453 i40e_vsi_disable_irq(vsi); 5454 i40e_vsi_stop_rings(vsi); 5455 i40e_napi_disable_all(vsi); 5456 5457 for (i = 0; i < vsi->num_queue_pairs; i++) { 5458 i40e_clean_tx_ring(vsi->tx_rings[i]); 5459 i40e_clean_rx_ring(vsi->rx_rings[i]); 5460 } 5461 5462 i40e_notify_client_of_netdev_close(vsi, false); 5463 5464 } 5465 5466 /** 5467 * i40e_setup_tc - configure multiple traffic classes 5468 * @netdev: net device to configure 5469 * @tc: number of traffic classes to enable 5470 **/ 5471 static int i40e_setup_tc(struct net_device *netdev, u8 tc) 5472 { 5473 struct i40e_netdev_priv *np = netdev_priv(netdev); 5474 struct i40e_vsi *vsi = np->vsi; 5475 struct i40e_pf *pf = vsi->back; 5476 u8 enabled_tc = 0; 5477 int ret = -EINVAL; 5478 int i; 5479 5480 /* Check if DCB enabled to continue */ 5481 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 5482 netdev_info(netdev, "DCB is not enabled for adapter\n"); 5483 goto exit; 5484 } 5485 5486 /* Check if MFP enabled */ 5487 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 5488 netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); 5489 goto exit; 5490 } 5491 5492 /* Check whether tc count is within enabled limit */ 5493 if (tc > i40e_pf_get_num_tc(pf)) { 5494 netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); 5495 goto exit; 5496 } 5497 5498 /* Generate TC map for number of tc requested */ 5499 for (i = 0; i < tc; i++) 5500 enabled_tc |= BIT(i); 5501 5502 /* Requesting same TC configuration as already enabled */ 5503 if (enabled_tc == vsi->tc_config.enabled_tc) 5504 return 0; 5505 5506 /* Quiesce VSI queues */ 5507 i40e_quiesce_vsi(vsi); 5508 5509 /* Configure VSI for enabled TCs */ 5510 ret = i40e_vsi_config_tc(vsi, enabled_tc); 5511 if (ret) { 5512 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 5513 vsi->seid); 5514 goto exit; 5515 } 5516 5517 /* Unquiesce VSI */ 5518 i40e_unquiesce_vsi(vsi); 5519 5520 exit: 5521 return ret; 5522 } 5523 5524 #ifdef I40E_FCOE 5525 int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, 5526 struct tc_to_netdev *tc) 5527 #else 5528 static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, 5529 struct tc_to_netdev *tc) 5530 #endif 5531 { 5532 if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) 5533 return -EINVAL; 5534 return i40e_setup_tc(netdev, tc->tc); 5535 } 5536 5537 /** 5538 * i40e_open - Called when a network interface is made active 5539 * @netdev: network interface device structure 5540 * 5541 * The open entry point is called when a network interface is made 5542 * active by the system (IFF_UP). At this point all resources needed 5543 * for transmit and receive operations are allocated, the interrupt 5544 * handler is registered with the OS, the netdev watchdog subtask is 5545 * enabled, and the stack is notified that the interface is ready. 5546 * 5547 * Returns 0 on success, negative value on failure 5548 **/ 5549 int i40e_open(struct net_device *netdev) 5550 { 5551 struct i40e_netdev_priv *np = netdev_priv(netdev); 5552 struct i40e_vsi *vsi = np->vsi; 5553 struct i40e_pf *pf = vsi->back; 5554 int err; 5555 5556 /* disallow open during test or if eeprom is broken */ 5557 if (test_bit(__I40E_TESTING, &pf->state) || 5558 test_bit(__I40E_BAD_EEPROM, &pf->state)) 5559 return -EBUSY; 5560 5561 netif_carrier_off(netdev); 5562 5563 err = i40e_vsi_open(vsi); 5564 if (err) 5565 return err; 5566 5567 /* configure global TSO hardware offload settings */ 5568 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | 5569 TCP_FLAG_FIN) >> 16); 5570 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | 5571 TCP_FLAG_FIN | 5572 TCP_FLAG_CWR) >> 16); 5573 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 5574 5575 udp_tunnel_get_rx_info(netdev); 5576 5577 return 0; 5578 } 5579 5580 /** 5581 * i40e_vsi_open - 5582 * @vsi: the VSI to open 5583 * 5584 * Finish initialization of the VSI. 5585 * 5586 * Returns 0 on success, negative value on failure 5587 **/ 5588 int i40e_vsi_open(struct i40e_vsi *vsi) 5589 { 5590 struct i40e_pf *pf = vsi->back; 5591 char int_name[I40E_INT_NAME_STR_LEN]; 5592 int err; 5593 5594 /* allocate descriptors */ 5595 err = i40e_vsi_setup_tx_resources(vsi); 5596 if (err) 5597 goto err_setup_tx; 5598 err = i40e_vsi_setup_rx_resources(vsi); 5599 if (err) 5600 goto err_setup_rx; 5601 5602 err = i40e_vsi_configure(vsi); 5603 if (err) 5604 goto err_setup_rx; 5605 5606 if (vsi->netdev) { 5607 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 5608 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 5609 err = i40e_vsi_request_irq(vsi, int_name); 5610 if (err) 5611 goto err_setup_rx; 5612 5613 /* Notify the stack of the actual queue counts. */ 5614 err = netif_set_real_num_tx_queues(vsi->netdev, 5615 vsi->num_queue_pairs); 5616 if (err) 5617 goto err_set_queues; 5618 5619 err = netif_set_real_num_rx_queues(vsi->netdev, 5620 vsi->num_queue_pairs); 5621 if (err) 5622 goto err_set_queues; 5623 5624 } else if (vsi->type == I40E_VSI_FDIR) { 5625 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", 5626 dev_driver_string(&pf->pdev->dev), 5627 dev_name(&pf->pdev->dev)); 5628 err = i40e_vsi_request_irq(vsi, int_name); 5629 5630 } else { 5631 err = -EINVAL; 5632 goto err_setup_rx; 5633 } 5634 5635 err = i40e_up_complete(vsi); 5636 if (err) 5637 goto err_up_complete; 5638 5639 return 0; 5640 5641 err_up_complete: 5642 i40e_down(vsi); 5643 err_set_queues: 5644 i40e_vsi_free_irq(vsi); 5645 err_setup_rx: 5646 i40e_vsi_free_rx_resources(vsi); 5647 err_setup_tx: 5648 i40e_vsi_free_tx_resources(vsi); 5649 if (vsi == pf->vsi[pf->lan_vsi]) 5650 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 5651 5652 return err; 5653 } 5654 5655 /** 5656 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting 5657 * @pf: Pointer to PF 5658 * 5659 * This function destroys the hlist where all the Flow Director 5660 * filters were saved. 5661 **/ 5662 static void i40e_fdir_filter_exit(struct i40e_pf *pf) 5663 { 5664 struct i40e_fdir_filter *filter; 5665 struct hlist_node *node2; 5666 5667 hlist_for_each_entry_safe(filter, node2, 5668 &pf->fdir_filter_list, fdir_node) { 5669 hlist_del(&filter->fdir_node); 5670 kfree(filter); 5671 } 5672 pf->fdir_pf_active_filters = 0; 5673 } 5674 5675 /** 5676 * i40e_close - Disables a network interface 5677 * @netdev: network interface device structure 5678 * 5679 * The close entry point is called when an interface is de-activated 5680 * by the OS. The hardware is still under the driver's control, but 5681 * this netdev interface is disabled. 5682 * 5683 * Returns 0, this is not allowed to fail 5684 **/ 5685 int i40e_close(struct net_device *netdev) 5686 { 5687 struct i40e_netdev_priv *np = netdev_priv(netdev); 5688 struct i40e_vsi *vsi = np->vsi; 5689 5690 i40e_vsi_close(vsi); 5691 5692 return 0; 5693 } 5694 5695 /** 5696 * i40e_do_reset - Start a PF or Core Reset sequence 5697 * @pf: board private structure 5698 * @reset_flags: which reset is requested 5699 * 5700 * The essential difference in resets is that the PF Reset 5701 * doesn't clear the packet buffers, doesn't reset the PE 5702 * firmware, and doesn't bother the other PFs on the chip. 5703 **/ 5704 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) 5705 { 5706 u32 val; 5707 5708 WARN_ON(in_interrupt()); 5709 5710 5711 /* do the biggest reset indicated */ 5712 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { 5713 5714 /* Request a Global Reset 5715 * 5716 * This will start the chip's countdown to the actual full 5717 * chip reset event, and a warning interrupt to be sent 5718 * to all PFs, including the requestor. Our handler 5719 * for the warning interrupt will deal with the shutdown 5720 * and recovery of the switch setup. 5721 */ 5722 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); 5723 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5724 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 5725 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5726 5727 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { 5728 5729 /* Request a Core Reset 5730 * 5731 * Same as Global Reset, except does *not* include the MAC/PHY 5732 */ 5733 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); 5734 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5735 val |= I40E_GLGEN_RTRIG_CORER_MASK; 5736 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5737 i40e_flush(&pf->hw); 5738 5739 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) { 5740 5741 /* Request a PF Reset 5742 * 5743 * Resets only the PF-specific registers 5744 * 5745 * This goes directly to the tear-down and rebuild of 5746 * the switch, since we need to do all the recovery as 5747 * for the Core Reset. 5748 */ 5749 dev_dbg(&pf->pdev->dev, "PFR requested\n"); 5750 i40e_handle_reset_warning(pf); 5751 5752 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { 5753 int v; 5754 5755 /* Find the VSI(s) that requested a re-init */ 5756 dev_info(&pf->pdev->dev, 5757 "VSI reinit requested\n"); 5758 for (v = 0; v < pf->num_alloc_vsi; v++) { 5759 struct i40e_vsi *vsi = pf->vsi[v]; 5760 5761 if (vsi != NULL && 5762 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 5763 i40e_vsi_reinit_locked(pf->vsi[v]); 5764 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); 5765 } 5766 } 5767 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { 5768 int v; 5769 5770 /* Find the VSI(s) that needs to be brought down */ 5771 dev_info(&pf->pdev->dev, "VSI down requested\n"); 5772 for (v = 0; v < pf->num_alloc_vsi; v++) { 5773 struct i40e_vsi *vsi = pf->vsi[v]; 5774 5775 if (vsi != NULL && 5776 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { 5777 set_bit(__I40E_DOWN, &vsi->state); 5778 i40e_down(vsi); 5779 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); 5780 } 5781 } 5782 } else { 5783 dev_info(&pf->pdev->dev, 5784 "bad reset request 0x%08x\n", reset_flags); 5785 } 5786 } 5787 5788 #ifdef CONFIG_I40E_DCB 5789 /** 5790 * i40e_dcb_need_reconfig - Check if DCB needs reconfig 5791 * @pf: board private structure 5792 * @old_cfg: current DCB config 5793 * @new_cfg: new DCB config 5794 **/ 5795 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, 5796 struct i40e_dcbx_config *old_cfg, 5797 struct i40e_dcbx_config *new_cfg) 5798 { 5799 bool need_reconfig = false; 5800 5801 /* Check if ETS configuration has changed */ 5802 if (memcmp(&new_cfg->etscfg, 5803 &old_cfg->etscfg, 5804 sizeof(new_cfg->etscfg))) { 5805 /* If Priority Table has changed reconfig is needed */ 5806 if (memcmp(&new_cfg->etscfg.prioritytable, 5807 &old_cfg->etscfg.prioritytable, 5808 sizeof(new_cfg->etscfg.prioritytable))) { 5809 need_reconfig = true; 5810 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); 5811 } 5812 5813 if (memcmp(&new_cfg->etscfg.tcbwtable, 5814 &old_cfg->etscfg.tcbwtable, 5815 sizeof(new_cfg->etscfg.tcbwtable))) 5816 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 5817 5818 if (memcmp(&new_cfg->etscfg.tsatable, 5819 &old_cfg->etscfg.tsatable, 5820 sizeof(new_cfg->etscfg.tsatable))) 5821 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); 5822 } 5823 5824 /* Check if PFC configuration has changed */ 5825 if (memcmp(&new_cfg->pfc, 5826 &old_cfg->pfc, 5827 sizeof(new_cfg->pfc))) { 5828 need_reconfig = true; 5829 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); 5830 } 5831 5832 /* Check if APP Table has changed */ 5833 if (memcmp(&new_cfg->app, 5834 &old_cfg->app, 5835 sizeof(new_cfg->app))) { 5836 need_reconfig = true; 5837 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); 5838 } 5839 5840 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); 5841 return need_reconfig; 5842 } 5843 5844 /** 5845 * i40e_handle_lldp_event - Handle LLDP Change MIB event 5846 * @pf: board private structure 5847 * @e: event info posted on ARQ 5848 **/ 5849 static int i40e_handle_lldp_event(struct i40e_pf *pf, 5850 struct i40e_arq_event_info *e) 5851 { 5852 struct i40e_aqc_lldp_get_mib *mib = 5853 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; 5854 struct i40e_hw *hw = &pf->hw; 5855 struct i40e_dcbx_config tmp_dcbx_cfg; 5856 bool need_reconfig = false; 5857 int ret = 0; 5858 u8 type; 5859 5860 /* Not DCB capable or capability disabled */ 5861 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) 5862 return ret; 5863 5864 /* Ignore if event is not for Nearest Bridge */ 5865 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 5866 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 5867 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); 5868 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) 5869 return ret; 5870 5871 /* Check MIB Type and return if event for Remote MIB update */ 5872 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; 5873 dev_dbg(&pf->pdev->dev, 5874 "LLDP event mib type %s\n", type ? "remote" : "local"); 5875 if (type == I40E_AQ_LLDP_MIB_REMOTE) { 5876 /* Update the remote cached instance and return */ 5877 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, 5878 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 5879 &hw->remote_dcbx_config); 5880 goto exit; 5881 } 5882 5883 /* Store the old configuration */ 5884 tmp_dcbx_cfg = hw->local_dcbx_config; 5885 5886 /* Reset the old DCBx configuration data */ 5887 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); 5888 /* Get updated DCBX data from firmware */ 5889 ret = i40e_get_dcb_config(&pf->hw); 5890 if (ret) { 5891 dev_info(&pf->pdev->dev, 5892 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", 5893 i40e_stat_str(&pf->hw, ret), 5894 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5895 goto exit; 5896 } 5897 5898 /* No change detected in DCBX configs */ 5899 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, 5900 sizeof(tmp_dcbx_cfg))) { 5901 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 5902 goto exit; 5903 } 5904 5905 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, 5906 &hw->local_dcbx_config); 5907 5908 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); 5909 5910 if (!need_reconfig) 5911 goto exit; 5912 5913 /* Enable DCB tagging only when more than one TC */ 5914 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5915 pf->flags |= I40E_FLAG_DCB_ENABLED; 5916 else 5917 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 5918 5919 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5920 /* Reconfiguration needed quiesce all VSIs */ 5921 i40e_pf_quiesce_all_vsi(pf); 5922 5923 /* Changes in configuration update VEB/VSI */ 5924 i40e_dcb_reconfigure(pf); 5925 5926 ret = i40e_resume_port_tx(pf); 5927 5928 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5929 /* In case of error no point in resuming VSIs */ 5930 if (ret) 5931 goto exit; 5932 5933 /* Wait for the PF's queues to be disabled */ 5934 ret = i40e_pf_wait_queues_disabled(pf); 5935 if (ret) { 5936 /* Schedule PF reset to recover */ 5937 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5938 i40e_service_event_schedule(pf); 5939 } else { 5940 i40e_pf_unquiesce_all_vsi(pf); 5941 /* Notify the client for the DCB changes */ 5942 i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]); 5943 } 5944 5945 exit: 5946 return ret; 5947 } 5948 #endif /* CONFIG_I40E_DCB */ 5949 5950 /** 5951 * i40e_do_reset_safe - Protected reset path for userland calls. 5952 * @pf: board private structure 5953 * @reset_flags: which reset is requested 5954 * 5955 **/ 5956 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) 5957 { 5958 rtnl_lock(); 5959 i40e_do_reset(pf, reset_flags); 5960 rtnl_unlock(); 5961 } 5962 5963 /** 5964 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 5965 * @pf: board private structure 5966 * @e: event info posted on ARQ 5967 * 5968 * Handler for LAN Queue Overflow Event generated by the firmware for PF 5969 * and VF queues 5970 **/ 5971 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 5972 struct i40e_arq_event_info *e) 5973 { 5974 struct i40e_aqc_lan_overflow *data = 5975 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 5976 u32 queue = le32_to_cpu(data->prtdcb_rupto); 5977 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 5978 struct i40e_hw *hw = &pf->hw; 5979 struct i40e_vf *vf; 5980 u16 vf_id; 5981 5982 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", 5983 queue, qtx_ctl); 5984 5985 /* Queue belongs to VF, find the VF and issue VF reset */ 5986 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 5987 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 5988 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 5989 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 5990 vf_id -= hw->func_caps.vf_base_id; 5991 vf = &pf->vf[vf_id]; 5992 i40e_vc_notify_vf_reset(vf); 5993 /* Allow VF to process pending reset notification */ 5994 msleep(20); 5995 i40e_reset_vf(vf, false); 5996 } 5997 } 5998 5999 /** 6000 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters 6001 * @pf: board private structure 6002 **/ 6003 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) 6004 { 6005 u32 val, fcnt_prog; 6006 6007 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 6008 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); 6009 return fcnt_prog; 6010 } 6011 6012 /** 6013 * i40e_get_current_fd_count - Get total FD filters programmed for this PF 6014 * @pf: board private structure 6015 **/ 6016 u32 i40e_get_current_fd_count(struct i40e_pf *pf) 6017 { 6018 u32 val, fcnt_prog; 6019 6020 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 6021 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + 6022 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> 6023 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 6024 return fcnt_prog; 6025 } 6026 6027 /** 6028 * i40e_get_global_fd_count - Get total FD filters programmed on device 6029 * @pf: board private structure 6030 **/ 6031 u32 i40e_get_global_fd_count(struct i40e_pf *pf) 6032 { 6033 u32 val, fcnt_prog; 6034 6035 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); 6036 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + 6037 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> 6038 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); 6039 return fcnt_prog; 6040 } 6041 6042 /** 6043 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 6044 * @pf: board private structure 6045 **/ 6046 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) 6047 { 6048 struct i40e_fdir_filter *filter; 6049 u32 fcnt_prog, fcnt_avail; 6050 struct hlist_node *node; 6051 6052 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 6053 return; 6054 6055 /* Check if, FD SB or ATR was auto disabled and if there is enough room 6056 * to re-enable 6057 */ 6058 fcnt_prog = i40e_get_global_fd_count(pf); 6059 fcnt_avail = pf->fdir_pf_filter_count; 6060 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || 6061 (pf->fd_add_err == 0) || 6062 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { 6063 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 6064 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 6065 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 6066 if (I40E_DEBUG_FD & pf->hw.debug_mask) 6067 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 6068 } 6069 } 6070 6071 /* Wait for some more space to be available to turn on ATR. We also 6072 * must check that no existing ntuple rules for TCP are in effect 6073 */ 6074 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { 6075 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 6076 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) && 6077 (pf->fd_tcp_rule == 0)) { 6078 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 6079 if (I40E_DEBUG_FD & pf->hw.debug_mask) 6080 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); 6081 } 6082 } 6083 6084 /* if hw had a problem adding a filter, delete it */ 6085 if (pf->fd_inv > 0) { 6086 hlist_for_each_entry_safe(filter, node, 6087 &pf->fdir_filter_list, fdir_node) { 6088 if (filter->fd_id == pf->fd_inv) { 6089 hlist_del(&filter->fdir_node); 6090 kfree(filter); 6091 pf->fdir_pf_active_filters--; 6092 } 6093 } 6094 } 6095 } 6096 6097 #define I40E_MIN_FD_FLUSH_INTERVAL 10 6098 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 6099 /** 6100 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB 6101 * @pf: board private structure 6102 **/ 6103 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) 6104 { 6105 unsigned long min_flush_time; 6106 int flush_wait_retry = 50; 6107 bool disable_atr = false; 6108 int fd_room; 6109 int reg; 6110 6111 if (!time_after(jiffies, pf->fd_flush_timestamp + 6112 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) 6113 return; 6114 6115 /* If the flush is happening too quick and we have mostly SB rules we 6116 * should not re-enable ATR for some time. 6117 */ 6118 min_flush_time = pf->fd_flush_timestamp + 6119 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); 6120 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; 6121 6122 if (!(time_after(jiffies, min_flush_time)) && 6123 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { 6124 if (I40E_DEBUG_FD & pf->hw.debug_mask) 6125 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); 6126 disable_atr = true; 6127 } 6128 6129 pf->fd_flush_timestamp = jiffies; 6130 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; 6131 /* flush all filters */ 6132 wr32(&pf->hw, I40E_PFQF_CTL_1, 6133 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); 6134 i40e_flush(&pf->hw); 6135 pf->fd_flush_cnt++; 6136 pf->fd_add_err = 0; 6137 do { 6138 /* Check FD flush status every 5-6msec */ 6139 usleep_range(5000, 6000); 6140 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); 6141 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) 6142 break; 6143 } while (flush_wait_retry--); 6144 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { 6145 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); 6146 } else { 6147 /* replay sideband filters */ 6148 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); 6149 if (!disable_atr) 6150 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 6151 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 6152 if (I40E_DEBUG_FD & pf->hw.debug_mask) 6153 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 6154 } 6155 } 6156 6157 /** 6158 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed 6159 * @pf: board private structure 6160 **/ 6161 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) 6162 { 6163 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; 6164 } 6165 6166 /* We can see up to 256 filter programming desc in transit if the filters are 6167 * being applied really fast; before we see the first 6168 * filter miss error on Rx queue 0. Accumulating enough error messages before 6169 * reacting will make sure we don't cause flush too often. 6170 */ 6171 #define I40E_MAX_FD_PROGRAM_ERROR 256 6172 6173 /** 6174 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 6175 * @pf: board private structure 6176 **/ 6177 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 6178 { 6179 6180 /* if interface is down do nothing */ 6181 if (test_bit(__I40E_DOWN, &pf->state)) 6182 return; 6183 6184 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 6185 i40e_fdir_flush_and_replay(pf); 6186 6187 i40e_fdir_check_and_reenable(pf); 6188 6189 } 6190 6191 /** 6192 * i40e_vsi_link_event - notify VSI of a link event 6193 * @vsi: vsi to be notified 6194 * @link_up: link up or down 6195 **/ 6196 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 6197 { 6198 if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) 6199 return; 6200 6201 switch (vsi->type) { 6202 case I40E_VSI_MAIN: 6203 #ifdef I40E_FCOE 6204 case I40E_VSI_FCOE: 6205 #endif 6206 if (!vsi->netdev || !vsi->netdev_registered) 6207 break; 6208 6209 if (link_up) { 6210 netif_carrier_on(vsi->netdev); 6211 netif_tx_wake_all_queues(vsi->netdev); 6212 } else { 6213 netif_carrier_off(vsi->netdev); 6214 netif_tx_stop_all_queues(vsi->netdev); 6215 } 6216 break; 6217 6218 case I40E_VSI_SRIOV: 6219 case I40E_VSI_VMDQ2: 6220 case I40E_VSI_CTRL: 6221 case I40E_VSI_IWARP: 6222 case I40E_VSI_MIRROR: 6223 default: 6224 /* there is no notification for other VSIs */ 6225 break; 6226 } 6227 } 6228 6229 /** 6230 * i40e_veb_link_event - notify elements on the veb of a link event 6231 * @veb: veb to be notified 6232 * @link_up: link up or down 6233 **/ 6234 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 6235 { 6236 struct i40e_pf *pf; 6237 int i; 6238 6239 if (!veb || !veb->pf) 6240 return; 6241 pf = veb->pf; 6242 6243 /* depth first... */ 6244 for (i = 0; i < I40E_MAX_VEB; i++) 6245 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 6246 i40e_veb_link_event(pf->veb[i], link_up); 6247 6248 /* ... now the local VSIs */ 6249 for (i = 0; i < pf->num_alloc_vsi; i++) 6250 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 6251 i40e_vsi_link_event(pf->vsi[i], link_up); 6252 } 6253 6254 /** 6255 * i40e_link_event - Update netif_carrier status 6256 * @pf: board private structure 6257 **/ 6258 static void i40e_link_event(struct i40e_pf *pf) 6259 { 6260 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6261 u8 new_link_speed, old_link_speed; 6262 i40e_status status; 6263 bool new_link, old_link; 6264 6265 /* save off old link status information */ 6266 pf->hw.phy.link_info_old = pf->hw.phy.link_info; 6267 6268 /* set this to force the get_link_status call to refresh state */ 6269 pf->hw.phy.get_link_info = true; 6270 6271 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 6272 6273 status = i40e_get_link_status(&pf->hw, &new_link); 6274 if (status) { 6275 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", 6276 status); 6277 return; 6278 } 6279 6280 old_link_speed = pf->hw.phy.link_info_old.link_speed; 6281 new_link_speed = pf->hw.phy.link_info.link_speed; 6282 6283 if (new_link == old_link && 6284 new_link_speed == old_link_speed && 6285 (test_bit(__I40E_DOWN, &vsi->state) || 6286 new_link == netif_carrier_ok(vsi->netdev))) 6287 return; 6288 6289 if (!test_bit(__I40E_DOWN, &vsi->state)) 6290 i40e_print_link_message(vsi, new_link); 6291 6292 /* Notify the base of the switch tree connected to 6293 * the link. Floating VEBs are not notified. 6294 */ 6295 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 6296 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 6297 else 6298 i40e_vsi_link_event(vsi, new_link); 6299 6300 if (pf->vf) 6301 i40e_vc_notify_link_state(pf); 6302 6303 if (pf->flags & I40E_FLAG_PTP) 6304 i40e_ptp_set_increment(pf); 6305 } 6306 6307 /** 6308 * i40e_watchdog_subtask - periodic checks not using event driven response 6309 * @pf: board private structure 6310 **/ 6311 static void i40e_watchdog_subtask(struct i40e_pf *pf) 6312 { 6313 int i; 6314 6315 /* if interface is down do nothing */ 6316 if (test_bit(__I40E_DOWN, &pf->state) || 6317 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 6318 return; 6319 6320 /* make sure we don't do these things too often */ 6321 if (time_before(jiffies, (pf->service_timer_previous + 6322 pf->service_timer_period))) 6323 return; 6324 pf->service_timer_previous = jiffies; 6325 6326 if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) 6327 i40e_link_event(pf); 6328 6329 /* Update the stats for active netdevs so the network stack 6330 * can look at updated numbers whenever it cares to 6331 */ 6332 for (i = 0; i < pf->num_alloc_vsi; i++) 6333 if (pf->vsi[i] && pf->vsi[i]->netdev) 6334 i40e_update_stats(pf->vsi[i]); 6335 6336 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { 6337 /* Update the stats for the active switching components */ 6338 for (i = 0; i < I40E_MAX_VEB; i++) 6339 if (pf->veb[i]) 6340 i40e_update_veb_stats(pf->veb[i]); 6341 } 6342 6343 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); 6344 } 6345 6346 /** 6347 * i40e_reset_subtask - Set up for resetting the device and driver 6348 * @pf: board private structure 6349 **/ 6350 static void i40e_reset_subtask(struct i40e_pf *pf) 6351 { 6352 u32 reset_flags = 0; 6353 6354 rtnl_lock(); 6355 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { 6356 reset_flags |= BIT(__I40E_REINIT_REQUESTED); 6357 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); 6358 } 6359 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { 6360 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); 6361 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 6362 } 6363 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { 6364 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); 6365 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 6366 } 6367 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { 6368 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); 6369 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 6370 } 6371 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { 6372 reset_flags |= BIT(__I40E_DOWN_REQUESTED); 6373 clear_bit(__I40E_DOWN_REQUESTED, &pf->state); 6374 } 6375 6376 /* If there's a recovery already waiting, it takes 6377 * precedence before starting a new reset sequence. 6378 */ 6379 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { 6380 i40e_handle_reset_warning(pf); 6381 goto unlock; 6382 } 6383 6384 /* If we're already down or resetting, just bail */ 6385 if (reset_flags && 6386 !test_bit(__I40E_DOWN, &pf->state) && 6387 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) 6388 i40e_do_reset(pf, reset_flags); 6389 6390 unlock: 6391 rtnl_unlock(); 6392 } 6393 6394 /** 6395 * i40e_handle_link_event - Handle link event 6396 * @pf: board private structure 6397 * @e: event info posted on ARQ 6398 **/ 6399 static void i40e_handle_link_event(struct i40e_pf *pf, 6400 struct i40e_arq_event_info *e) 6401 { 6402 struct i40e_aqc_get_link_status *status = 6403 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 6404 6405 /* Do a new status request to re-enable LSE reporting 6406 * and load new status information into the hw struct 6407 * This completely ignores any state information 6408 * in the ARQ event info, instead choosing to always 6409 * issue the AQ update link status command. 6410 */ 6411 i40e_link_event(pf); 6412 6413 /* check for unqualified module, if link is down */ 6414 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && 6415 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && 6416 (!(status->link_info & I40E_AQ_LINK_UP))) 6417 dev_err(&pf->pdev->dev, 6418 "The driver failed to link because an unqualified module was detected.\n"); 6419 } 6420 6421 /** 6422 * i40e_clean_adminq_subtask - Clean the AdminQ rings 6423 * @pf: board private structure 6424 **/ 6425 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 6426 { 6427 struct i40e_arq_event_info event; 6428 struct i40e_hw *hw = &pf->hw; 6429 u16 pending, i = 0; 6430 i40e_status ret; 6431 u16 opcode; 6432 u32 oldval; 6433 u32 val; 6434 6435 /* Do not run clean AQ when PF reset fails */ 6436 if (test_bit(__I40E_RESET_FAILED, &pf->state)) 6437 return; 6438 6439 /* check for error indications */ 6440 val = rd32(&pf->hw, pf->hw.aq.arq.len); 6441 oldval = val; 6442 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { 6443 if (hw->debug_mask & I40E_DEBUG_AQ) 6444 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); 6445 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; 6446 } 6447 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { 6448 if (hw->debug_mask & I40E_DEBUG_AQ) 6449 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); 6450 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; 6451 pf->arq_overflows++; 6452 } 6453 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { 6454 if (hw->debug_mask & I40E_DEBUG_AQ) 6455 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); 6456 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; 6457 } 6458 if (oldval != val) 6459 wr32(&pf->hw, pf->hw.aq.arq.len, val); 6460 6461 val = rd32(&pf->hw, pf->hw.aq.asq.len); 6462 oldval = val; 6463 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { 6464 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 6465 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); 6466 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; 6467 } 6468 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { 6469 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 6470 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); 6471 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; 6472 } 6473 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { 6474 if (pf->hw.debug_mask & I40E_DEBUG_AQ) 6475 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); 6476 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; 6477 } 6478 if (oldval != val) 6479 wr32(&pf->hw, pf->hw.aq.asq.len, val); 6480 6481 event.buf_len = I40E_MAX_AQ_BUF_SIZE; 6482 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 6483 if (!event.msg_buf) 6484 return; 6485 6486 do { 6487 ret = i40e_clean_arq_element(hw, &event, &pending); 6488 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) 6489 break; 6490 else if (ret) { 6491 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 6492 break; 6493 } 6494 6495 opcode = le16_to_cpu(event.desc.opcode); 6496 switch (opcode) { 6497 6498 case i40e_aqc_opc_get_link_status: 6499 i40e_handle_link_event(pf, &event); 6500 break; 6501 case i40e_aqc_opc_send_msg_to_pf: 6502 ret = i40e_vc_process_vf_msg(pf, 6503 le16_to_cpu(event.desc.retval), 6504 le32_to_cpu(event.desc.cookie_high), 6505 le32_to_cpu(event.desc.cookie_low), 6506 event.msg_buf, 6507 event.msg_len); 6508 break; 6509 case i40e_aqc_opc_lldp_update_mib: 6510 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 6511 #ifdef CONFIG_I40E_DCB 6512 rtnl_lock(); 6513 ret = i40e_handle_lldp_event(pf, &event); 6514 rtnl_unlock(); 6515 #endif /* CONFIG_I40E_DCB */ 6516 break; 6517 case i40e_aqc_opc_event_lan_overflow: 6518 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 6519 i40e_handle_lan_overflow_event(pf, &event); 6520 break; 6521 case i40e_aqc_opc_send_msg_to_peer: 6522 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); 6523 break; 6524 case i40e_aqc_opc_nvm_erase: 6525 case i40e_aqc_opc_nvm_update: 6526 case i40e_aqc_opc_oem_post_update: 6527 i40e_debug(&pf->hw, I40E_DEBUG_NVM, 6528 "ARQ NVM operation 0x%04x completed\n", 6529 opcode); 6530 break; 6531 default: 6532 dev_info(&pf->pdev->dev, 6533 "ARQ: Unknown event 0x%04x ignored\n", 6534 opcode); 6535 break; 6536 } 6537 } while (pending && (i++ < pf->adminq_work_limit)); 6538 6539 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 6540 /* re-enable Admin queue interrupt cause */ 6541 val = rd32(hw, I40E_PFINT_ICR0_ENA); 6542 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 6543 wr32(hw, I40E_PFINT_ICR0_ENA, val); 6544 i40e_flush(hw); 6545 6546 kfree(event.msg_buf); 6547 } 6548 6549 /** 6550 * i40e_verify_eeprom - make sure eeprom is good to use 6551 * @pf: board private structure 6552 **/ 6553 static void i40e_verify_eeprom(struct i40e_pf *pf) 6554 { 6555 int err; 6556 6557 err = i40e_diag_eeprom_test(&pf->hw); 6558 if (err) { 6559 /* retry in case of garbage read */ 6560 err = i40e_diag_eeprom_test(&pf->hw); 6561 if (err) { 6562 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", 6563 err); 6564 set_bit(__I40E_BAD_EEPROM, &pf->state); 6565 } 6566 } 6567 6568 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { 6569 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); 6570 clear_bit(__I40E_BAD_EEPROM, &pf->state); 6571 } 6572 } 6573 6574 /** 6575 * i40e_enable_pf_switch_lb 6576 * @pf: pointer to the PF structure 6577 * 6578 * enable switch loop back or die - no point in a return value 6579 **/ 6580 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) 6581 { 6582 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6583 struct i40e_vsi_context ctxt; 6584 int ret; 6585 6586 ctxt.seid = pf->main_vsi_seid; 6587 ctxt.pf_num = pf->hw.pf_id; 6588 ctxt.vf_num = 0; 6589 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6590 if (ret) { 6591 dev_info(&pf->pdev->dev, 6592 "couldn't get PF vsi config, err %s aq_err %s\n", 6593 i40e_stat_str(&pf->hw, ret), 6594 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6595 return; 6596 } 6597 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6598 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6599 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6600 6601 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6602 if (ret) { 6603 dev_info(&pf->pdev->dev, 6604 "update vsi switch failed, err %s aq_err %s\n", 6605 i40e_stat_str(&pf->hw, ret), 6606 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6607 } 6608 } 6609 6610 /** 6611 * i40e_disable_pf_switch_lb 6612 * @pf: pointer to the PF structure 6613 * 6614 * disable switch loop back or die - no point in a return value 6615 **/ 6616 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) 6617 { 6618 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6619 struct i40e_vsi_context ctxt; 6620 int ret; 6621 6622 ctxt.seid = pf->main_vsi_seid; 6623 ctxt.pf_num = pf->hw.pf_id; 6624 ctxt.vf_num = 0; 6625 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6626 if (ret) { 6627 dev_info(&pf->pdev->dev, 6628 "couldn't get PF vsi config, err %s aq_err %s\n", 6629 i40e_stat_str(&pf->hw, ret), 6630 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6631 return; 6632 } 6633 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6634 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6635 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6636 6637 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6638 if (ret) { 6639 dev_info(&pf->pdev->dev, 6640 "update vsi switch failed, err %s aq_err %s\n", 6641 i40e_stat_str(&pf->hw, ret), 6642 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6643 } 6644 } 6645 6646 /** 6647 * i40e_config_bridge_mode - Configure the HW bridge mode 6648 * @veb: pointer to the bridge instance 6649 * 6650 * Configure the loop back mode for the LAN VSI that is downlink to the 6651 * specified HW bridge instance. It is expected this function is called 6652 * when a new HW bridge is instantiated. 6653 **/ 6654 static void i40e_config_bridge_mode(struct i40e_veb *veb) 6655 { 6656 struct i40e_pf *pf = veb->pf; 6657 6658 if (pf->hw.debug_mask & I40E_DEBUG_LAN) 6659 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", 6660 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 6661 if (veb->bridge_mode & BRIDGE_MODE_VEPA) 6662 i40e_disable_pf_switch_lb(pf); 6663 else 6664 i40e_enable_pf_switch_lb(pf); 6665 } 6666 6667 /** 6668 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 6669 * @veb: pointer to the VEB instance 6670 * 6671 * This is a recursive function that first builds the attached VSIs then 6672 * recurses in to build the next layer of VEB. We track the connections 6673 * through our own index numbers because the seid's from the HW could 6674 * change across the reset. 6675 **/ 6676 static int i40e_reconstitute_veb(struct i40e_veb *veb) 6677 { 6678 struct i40e_vsi *ctl_vsi = NULL; 6679 struct i40e_pf *pf = veb->pf; 6680 int v, veb_idx; 6681 int ret; 6682 6683 /* build VSI that owns this VEB, temporarily attached to base VEB */ 6684 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { 6685 if (pf->vsi[v] && 6686 pf->vsi[v]->veb_idx == veb->idx && 6687 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 6688 ctl_vsi = pf->vsi[v]; 6689 break; 6690 } 6691 } 6692 if (!ctl_vsi) { 6693 dev_info(&pf->pdev->dev, 6694 "missing owner VSI for veb_idx %d\n", veb->idx); 6695 ret = -ENOENT; 6696 goto end_reconstitute; 6697 } 6698 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 6699 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 6700 ret = i40e_add_vsi(ctl_vsi); 6701 if (ret) { 6702 dev_info(&pf->pdev->dev, 6703 "rebuild of veb_idx %d owner VSI failed: %d\n", 6704 veb->idx, ret); 6705 goto end_reconstitute; 6706 } 6707 i40e_vsi_reset_stats(ctl_vsi); 6708 6709 /* create the VEB in the switch and move the VSI onto the VEB */ 6710 ret = i40e_add_veb(veb, ctl_vsi); 6711 if (ret) 6712 goto end_reconstitute; 6713 6714 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 6715 veb->bridge_mode = BRIDGE_MODE_VEB; 6716 else 6717 veb->bridge_mode = BRIDGE_MODE_VEPA; 6718 i40e_config_bridge_mode(veb); 6719 6720 /* create the remaining VSIs attached to this VEB */ 6721 for (v = 0; v < pf->num_alloc_vsi; v++) { 6722 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 6723 continue; 6724 6725 if (pf->vsi[v]->veb_idx == veb->idx) { 6726 struct i40e_vsi *vsi = pf->vsi[v]; 6727 6728 vsi->uplink_seid = veb->seid; 6729 ret = i40e_add_vsi(vsi); 6730 if (ret) { 6731 dev_info(&pf->pdev->dev, 6732 "rebuild of vsi_idx %d failed: %d\n", 6733 v, ret); 6734 goto end_reconstitute; 6735 } 6736 i40e_vsi_reset_stats(vsi); 6737 } 6738 } 6739 6740 /* create any VEBs attached to this VEB - RECURSION */ 6741 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 6742 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 6743 pf->veb[veb_idx]->uplink_seid = veb->seid; 6744 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 6745 if (ret) 6746 break; 6747 } 6748 } 6749 6750 end_reconstitute: 6751 return ret; 6752 } 6753 6754 /** 6755 * i40e_get_capabilities - get info about the HW 6756 * @pf: the PF struct 6757 **/ 6758 static int i40e_get_capabilities(struct i40e_pf *pf) 6759 { 6760 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 6761 u16 data_size; 6762 int buf_len; 6763 int err; 6764 6765 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 6766 do { 6767 cap_buf = kzalloc(buf_len, GFP_KERNEL); 6768 if (!cap_buf) 6769 return -ENOMEM; 6770 6771 /* this loads the data into the hw struct for us */ 6772 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 6773 &data_size, 6774 i40e_aqc_opc_list_func_capabilities, 6775 NULL); 6776 /* data loaded, buffer no longer needed */ 6777 kfree(cap_buf); 6778 6779 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 6780 /* retry with a larger buffer */ 6781 buf_len = data_size; 6782 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 6783 dev_info(&pf->pdev->dev, 6784 "capability discovery failed, err %s aq_err %s\n", 6785 i40e_stat_str(&pf->hw, err), 6786 i40e_aq_str(&pf->hw, 6787 pf->hw.aq.asq_last_status)); 6788 return -ENODEV; 6789 } 6790 } while (err); 6791 6792 if (pf->hw.debug_mask & I40E_DEBUG_USER) 6793 dev_info(&pf->pdev->dev, 6794 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 6795 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 6796 pf->hw.func_caps.num_msix_vectors, 6797 pf->hw.func_caps.num_msix_vectors_vf, 6798 pf->hw.func_caps.fd_filters_guaranteed, 6799 pf->hw.func_caps.fd_filters_best_effort, 6800 pf->hw.func_caps.num_tx_qp, 6801 pf->hw.func_caps.num_vsis); 6802 6803 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ 6804 + pf->hw.func_caps.num_vfs) 6805 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { 6806 dev_info(&pf->pdev->dev, 6807 "got num_vsis %d, setting num_vsis to %d\n", 6808 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); 6809 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; 6810 } 6811 6812 return 0; 6813 } 6814 6815 static int i40e_vsi_clear(struct i40e_vsi *vsi); 6816 6817 /** 6818 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband 6819 * @pf: board private structure 6820 **/ 6821 static void i40e_fdir_sb_setup(struct i40e_pf *pf) 6822 { 6823 struct i40e_vsi *vsi; 6824 6825 /* quick workaround for an NVM issue that leaves a critical register 6826 * uninitialized 6827 */ 6828 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { 6829 static const u32 hkey[] = { 6830 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 6831 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 6832 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 6833 0x95b3a76d}; 6834 int i; 6835 6836 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) 6837 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); 6838 } 6839 6840 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 6841 return; 6842 6843 /* find existing VSI and see if it needs configuring */ 6844 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); 6845 6846 /* create a new VSI if none exists */ 6847 if (!vsi) { 6848 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, 6849 pf->vsi[pf->lan_vsi]->seid, 0); 6850 if (!vsi) { 6851 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 6852 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 6853 return; 6854 } 6855 } 6856 6857 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); 6858 } 6859 6860 /** 6861 * i40e_fdir_teardown - release the Flow Director resources 6862 * @pf: board private structure 6863 **/ 6864 static void i40e_fdir_teardown(struct i40e_pf *pf) 6865 { 6866 struct i40e_vsi *vsi; 6867 6868 i40e_fdir_filter_exit(pf); 6869 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); 6870 if (vsi) 6871 i40e_vsi_release(vsi); 6872 } 6873 6874 /** 6875 * i40e_prep_for_reset - prep for the core to reset 6876 * @pf: board private structure 6877 * 6878 * Close up the VFs and other things in prep for PF Reset. 6879 **/ 6880 static void i40e_prep_for_reset(struct i40e_pf *pf) 6881 { 6882 struct i40e_hw *hw = &pf->hw; 6883 i40e_status ret = 0; 6884 u32 v; 6885 6886 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 6887 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 6888 return; 6889 if (i40e_check_asq_alive(&pf->hw)) 6890 i40e_vc_notify_reset(pf); 6891 6892 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 6893 6894 /* quiesce the VSIs and their queues that are not already DOWN */ 6895 i40e_pf_quiesce_all_vsi(pf); 6896 6897 for (v = 0; v < pf->num_alloc_vsi; v++) { 6898 if (pf->vsi[v]) 6899 pf->vsi[v]->seid = 0; 6900 } 6901 6902 i40e_shutdown_adminq(&pf->hw); 6903 6904 /* call shutdown HMC */ 6905 if (hw->hmc.hmc_obj) { 6906 ret = i40e_shutdown_lan_hmc(hw); 6907 if (ret) 6908 dev_warn(&pf->pdev->dev, 6909 "shutdown_lan_hmc failed: %d\n", ret); 6910 } 6911 } 6912 6913 /** 6914 * i40e_send_version - update firmware with driver version 6915 * @pf: PF struct 6916 */ 6917 static void i40e_send_version(struct i40e_pf *pf) 6918 { 6919 struct i40e_driver_version dv; 6920 6921 dv.major_version = DRV_VERSION_MAJOR; 6922 dv.minor_version = DRV_VERSION_MINOR; 6923 dv.build_version = DRV_VERSION_BUILD; 6924 dv.subbuild_version = 0; 6925 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); 6926 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 6927 } 6928 6929 /** 6930 * i40e_reset_and_rebuild - reset and rebuild using a saved config 6931 * @pf: board private structure 6932 * @reinit: if the Main VSI needs to re-initialized. 6933 **/ 6934 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 6935 { 6936 struct i40e_hw *hw = &pf->hw; 6937 u8 set_fc_aq_fail = 0; 6938 i40e_status ret; 6939 u32 val; 6940 u32 v; 6941 6942 /* Now we wait for GRST to settle out. 6943 * We don't have to delete the VEBs or VSIs from the hw switch 6944 * because the reset will make them disappear. 6945 */ 6946 ret = i40e_pf_reset(hw); 6947 if (ret) { 6948 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 6949 set_bit(__I40E_RESET_FAILED, &pf->state); 6950 goto clear_recovery; 6951 } 6952 pf->pfr_count++; 6953 6954 if (test_bit(__I40E_DOWN, &pf->state)) 6955 goto clear_recovery; 6956 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 6957 6958 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 6959 ret = i40e_init_adminq(&pf->hw); 6960 if (ret) { 6961 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", 6962 i40e_stat_str(&pf->hw, ret), 6963 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6964 goto clear_recovery; 6965 } 6966 6967 /* re-verify the eeprom if we just had an EMP reset */ 6968 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) 6969 i40e_verify_eeprom(pf); 6970 6971 i40e_clear_pxe_mode(hw); 6972 ret = i40e_get_capabilities(pf); 6973 if (ret) 6974 goto end_core_reset; 6975 6976 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 6977 hw->func_caps.num_rx_qp, 6978 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 6979 if (ret) { 6980 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 6981 goto end_core_reset; 6982 } 6983 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 6984 if (ret) { 6985 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 6986 goto end_core_reset; 6987 } 6988 6989 #ifdef CONFIG_I40E_DCB 6990 ret = i40e_init_pf_dcb(pf); 6991 if (ret) { 6992 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); 6993 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 6994 /* Continue without DCB enabled */ 6995 } 6996 #endif /* CONFIG_I40E_DCB */ 6997 #ifdef I40E_FCOE 6998 i40e_init_pf_fcoe(pf); 6999 7000 #endif 7001 /* do basic switch setup */ 7002 ret = i40e_setup_pf_switch(pf, reinit); 7003 if (ret) 7004 goto end_core_reset; 7005 7006 /* The driver only wants link up/down and module qualification 7007 * reports from firmware. Note the negative logic. 7008 */ 7009 ret = i40e_aq_set_phy_int_mask(&pf->hw, 7010 ~(I40E_AQ_EVENT_LINK_UPDOWN | 7011 I40E_AQ_EVENT_MEDIA_NA | 7012 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); 7013 if (ret) 7014 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 7015 i40e_stat_str(&pf->hw, ret), 7016 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7017 7018 /* make sure our flow control settings are restored */ 7019 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); 7020 if (ret) 7021 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", 7022 i40e_stat_str(&pf->hw, ret), 7023 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7024 7025 /* Rebuild the VSIs and VEBs that existed before reset. 7026 * They are still in our local switch element arrays, so only 7027 * need to rebuild the switch model in the HW. 7028 * 7029 * If there were VEBs but the reconstitution failed, we'll try 7030 * try to recover minimal use by getting the basic PF VSI working. 7031 */ 7032 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 7033 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); 7034 /* find the one VEB connected to the MAC, and find orphans */ 7035 for (v = 0; v < I40E_MAX_VEB; v++) { 7036 if (!pf->veb[v]) 7037 continue; 7038 7039 if (pf->veb[v]->uplink_seid == pf->mac_seid || 7040 pf->veb[v]->uplink_seid == 0) { 7041 ret = i40e_reconstitute_veb(pf->veb[v]); 7042 7043 if (!ret) 7044 continue; 7045 7046 /* If Main VEB failed, we're in deep doodoo, 7047 * so give up rebuilding the switch and set up 7048 * for minimal rebuild of PF VSI. 7049 * If orphan failed, we'll report the error 7050 * but try to keep going. 7051 */ 7052 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 7053 dev_info(&pf->pdev->dev, 7054 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 7055 ret); 7056 pf->vsi[pf->lan_vsi]->uplink_seid 7057 = pf->mac_seid; 7058 break; 7059 } else if (pf->veb[v]->uplink_seid == 0) { 7060 dev_info(&pf->pdev->dev, 7061 "rebuild of orphan VEB failed: %d\n", 7062 ret); 7063 } 7064 } 7065 } 7066 } 7067 7068 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { 7069 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 7070 /* no VEB, so rebuild only the Main VSI */ 7071 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); 7072 if (ret) { 7073 dev_info(&pf->pdev->dev, 7074 "rebuild of Main VSI failed: %d\n", ret); 7075 goto end_core_reset; 7076 } 7077 } 7078 7079 /* Reconfigure hardware for allowing smaller MSS in the case 7080 * of TSO, so that we avoid the MDD being fired and causing 7081 * a reset in the case of small MSS+TSO. 7082 */ 7083 #define I40E_REG_MSS 0x000E64DC 7084 #define I40E_REG_MSS_MIN_MASK 0x3FF0000 7085 #define I40E_64BYTE_MSS 0x400000 7086 val = rd32(hw, I40E_REG_MSS); 7087 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { 7088 val &= ~I40E_REG_MSS_MIN_MASK; 7089 val |= I40E_64BYTE_MSS; 7090 wr32(hw, I40E_REG_MSS, val); 7091 } 7092 7093 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { 7094 msleep(75); 7095 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 7096 if (ret) 7097 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 7098 i40e_stat_str(&pf->hw, ret), 7099 i40e_aq_str(&pf->hw, 7100 pf->hw.aq.asq_last_status)); 7101 } 7102 /* reinit the misc interrupt */ 7103 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7104 ret = i40e_setup_misc_vector(pf); 7105 7106 /* Add a filter to drop all Flow control frames from any VSI from being 7107 * transmitted. By doing so we stop a malicious VF from sending out 7108 * PAUSE or PFC frames and potentially controlling traffic for other 7109 * PF/VF VSIs. 7110 * The FW can still send Flow control frames if enabled. 7111 */ 7112 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, 7113 pf->main_vsi_seid); 7114 7115 /* restart the VSIs that were rebuilt and running before the reset */ 7116 i40e_pf_unquiesce_all_vsi(pf); 7117 7118 if (pf->num_alloc_vfs) { 7119 for (v = 0; v < pf->num_alloc_vfs; v++) 7120 i40e_reset_vf(&pf->vf[v], true); 7121 } 7122 7123 /* tell the firmware that we're starting */ 7124 i40e_send_version(pf); 7125 7126 end_core_reset: 7127 clear_bit(__I40E_RESET_FAILED, &pf->state); 7128 clear_recovery: 7129 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 7130 } 7131 7132 /** 7133 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild 7134 * @pf: board private structure 7135 * 7136 * Close up the VFs and other things in prep for a Core Reset, 7137 * then get ready to rebuild the world. 7138 **/ 7139 static void i40e_handle_reset_warning(struct i40e_pf *pf) 7140 { 7141 i40e_prep_for_reset(pf); 7142 i40e_reset_and_rebuild(pf, false); 7143 } 7144 7145 /** 7146 * i40e_handle_mdd_event 7147 * @pf: pointer to the PF structure 7148 * 7149 * Called from the MDD irq handler to identify possibly malicious vfs 7150 **/ 7151 static void i40e_handle_mdd_event(struct i40e_pf *pf) 7152 { 7153 struct i40e_hw *hw = &pf->hw; 7154 bool mdd_detected = false; 7155 bool pf_mdd_detected = false; 7156 struct i40e_vf *vf; 7157 u32 reg; 7158 int i; 7159 7160 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) 7161 return; 7162 7163 /* find what triggered the MDD event */ 7164 reg = rd32(hw, I40E_GL_MDET_TX); 7165 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 7166 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 7167 I40E_GL_MDET_TX_PF_NUM_SHIFT; 7168 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 7169 I40E_GL_MDET_TX_VF_NUM_SHIFT; 7170 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 7171 I40E_GL_MDET_TX_EVENT_SHIFT; 7172 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 7173 I40E_GL_MDET_TX_QUEUE_SHIFT) - 7174 pf->hw.func_caps.base_queue; 7175 if (netif_msg_tx_err(pf)) 7176 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", 7177 event, queue, pf_num, vf_num); 7178 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 7179 mdd_detected = true; 7180 } 7181 reg = rd32(hw, I40E_GL_MDET_RX); 7182 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 7183 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 7184 I40E_GL_MDET_RX_FUNCTION_SHIFT; 7185 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 7186 I40E_GL_MDET_RX_EVENT_SHIFT; 7187 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 7188 I40E_GL_MDET_RX_QUEUE_SHIFT) - 7189 pf->hw.func_caps.base_queue; 7190 if (netif_msg_rx_err(pf)) 7191 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 7192 event, queue, func); 7193 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 7194 mdd_detected = true; 7195 } 7196 7197 if (mdd_detected) { 7198 reg = rd32(hw, I40E_PF_MDET_TX); 7199 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 7200 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 7201 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 7202 pf_mdd_detected = true; 7203 } 7204 reg = rd32(hw, I40E_PF_MDET_RX); 7205 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 7206 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 7207 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 7208 pf_mdd_detected = true; 7209 } 7210 /* Queue belongs to the PF, initiate a reset */ 7211 if (pf_mdd_detected) { 7212 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 7213 i40e_service_event_schedule(pf); 7214 } 7215 } 7216 7217 /* see if one of the VFs needs its hand slapped */ 7218 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 7219 vf = &(pf->vf[i]); 7220 reg = rd32(hw, I40E_VP_MDET_TX(i)); 7221 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 7222 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 7223 vf->num_mdd_events++; 7224 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 7225 i); 7226 } 7227 7228 reg = rd32(hw, I40E_VP_MDET_RX(i)); 7229 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 7230 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 7231 vf->num_mdd_events++; 7232 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 7233 i); 7234 } 7235 7236 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 7237 dev_info(&pf->pdev->dev, 7238 "Too many MDD events on VF %d, disabled\n", i); 7239 dev_info(&pf->pdev->dev, 7240 "Use PF Control I/F to re-enable the VF\n"); 7241 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 7242 } 7243 } 7244 7245 /* re-enable mdd interrupt cause */ 7246 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 7247 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 7248 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 7249 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 7250 i40e_flush(hw); 7251 } 7252 7253 /** 7254 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW 7255 * @pf: board private structure 7256 **/ 7257 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) 7258 { 7259 struct i40e_hw *hw = &pf->hw; 7260 i40e_status ret; 7261 __be16 port; 7262 int i; 7263 7264 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) 7265 return; 7266 7267 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC; 7268 7269 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 7270 if (pf->pending_udp_bitmap & BIT_ULL(i)) { 7271 pf->pending_udp_bitmap &= ~BIT_ULL(i); 7272 port = pf->udp_ports[i].index; 7273 if (port) 7274 ret = i40e_aq_add_udp_tunnel(hw, port, 7275 pf->udp_ports[i].type, 7276 NULL, NULL); 7277 else 7278 ret = i40e_aq_del_udp_tunnel(hw, i, NULL); 7279 7280 if (ret) { 7281 dev_dbg(&pf->pdev->dev, 7282 "%s %s port %d, index %d failed, err %s aq_err %s\n", 7283 pf->udp_ports[i].type ? "vxlan" : "geneve", 7284 port ? "add" : "delete", 7285 ntohs(port), i, 7286 i40e_stat_str(&pf->hw, ret), 7287 i40e_aq_str(&pf->hw, 7288 pf->hw.aq.asq_last_status)); 7289 pf->udp_ports[i].index = 0; 7290 } 7291 } 7292 } 7293 } 7294 7295 /** 7296 * i40e_service_task - Run the driver's async subtasks 7297 * @work: pointer to work_struct containing our data 7298 **/ 7299 static void i40e_service_task(struct work_struct *work) 7300 { 7301 struct i40e_pf *pf = container_of(work, 7302 struct i40e_pf, 7303 service_task); 7304 unsigned long start_time = jiffies; 7305 7306 /* don't bother with service tasks if a reset is in progress */ 7307 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7308 return; 7309 } 7310 7311 if (test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) 7312 return; 7313 7314 i40e_detect_recover_hung(pf); 7315 i40e_sync_filters_subtask(pf); 7316 i40e_reset_subtask(pf); 7317 i40e_handle_mdd_event(pf); 7318 i40e_vc_process_vflr_event(pf); 7319 i40e_watchdog_subtask(pf); 7320 i40e_fdir_reinit_subtask(pf); 7321 i40e_client_subtask(pf); 7322 i40e_sync_filters_subtask(pf); 7323 i40e_sync_udp_filters_subtask(pf); 7324 i40e_clean_adminq_subtask(pf); 7325 7326 /* flush memory to make sure state is correct before next watchdog */ 7327 smp_mb__before_atomic(); 7328 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 7329 7330 /* If the tasks have taken longer than one timer cycle or there 7331 * is more work to be done, reschedule the service task now 7332 * rather than wait for the timer to tick again. 7333 */ 7334 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 7335 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || 7336 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || 7337 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 7338 i40e_service_event_schedule(pf); 7339 } 7340 7341 /** 7342 * i40e_service_timer - timer callback 7343 * @data: pointer to PF struct 7344 **/ 7345 static void i40e_service_timer(unsigned long data) 7346 { 7347 struct i40e_pf *pf = (struct i40e_pf *)data; 7348 7349 mod_timer(&pf->service_timer, 7350 round_jiffies(jiffies + pf->service_timer_period)); 7351 i40e_service_event_schedule(pf); 7352 } 7353 7354 /** 7355 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 7356 * @vsi: the VSI being configured 7357 **/ 7358 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 7359 { 7360 struct i40e_pf *pf = vsi->back; 7361 7362 switch (vsi->type) { 7363 case I40E_VSI_MAIN: 7364 vsi->alloc_queue_pairs = pf->num_lan_qps; 7365 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7366 I40E_REQ_DESCRIPTOR_MULTIPLE); 7367 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7368 vsi->num_q_vectors = pf->num_lan_msix; 7369 else 7370 vsi->num_q_vectors = 1; 7371 7372 break; 7373 7374 case I40E_VSI_FDIR: 7375 vsi->alloc_queue_pairs = 1; 7376 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 7377 I40E_REQ_DESCRIPTOR_MULTIPLE); 7378 vsi->num_q_vectors = pf->num_fdsb_msix; 7379 break; 7380 7381 case I40E_VSI_VMDQ2: 7382 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 7383 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7384 I40E_REQ_DESCRIPTOR_MULTIPLE); 7385 vsi->num_q_vectors = pf->num_vmdq_msix; 7386 break; 7387 7388 case I40E_VSI_SRIOV: 7389 vsi->alloc_queue_pairs = pf->num_vf_qps; 7390 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7391 I40E_REQ_DESCRIPTOR_MULTIPLE); 7392 break; 7393 7394 #ifdef I40E_FCOE 7395 case I40E_VSI_FCOE: 7396 vsi->alloc_queue_pairs = pf->num_fcoe_qps; 7397 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 7398 I40E_REQ_DESCRIPTOR_MULTIPLE); 7399 vsi->num_q_vectors = pf->num_fcoe_msix; 7400 break; 7401 7402 #endif /* I40E_FCOE */ 7403 default: 7404 WARN_ON(1); 7405 return -ENODATA; 7406 } 7407 7408 return 0; 7409 } 7410 7411 /** 7412 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi 7413 * @type: VSI pointer 7414 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 7415 * 7416 * On error: returns error code (negative) 7417 * On success: returns 0 7418 **/ 7419 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) 7420 { 7421 int size; 7422 int ret = 0; 7423 7424 /* allocate memory for both Tx and Rx ring pointers */ 7425 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; 7426 vsi->tx_rings = kzalloc(size, GFP_KERNEL); 7427 if (!vsi->tx_rings) 7428 return -ENOMEM; 7429 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; 7430 7431 if (alloc_qvectors) { 7432 /* allocate memory for q_vector pointers */ 7433 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; 7434 vsi->q_vectors = kzalloc(size, GFP_KERNEL); 7435 if (!vsi->q_vectors) { 7436 ret = -ENOMEM; 7437 goto err_vectors; 7438 } 7439 } 7440 return ret; 7441 7442 err_vectors: 7443 kfree(vsi->tx_rings); 7444 return ret; 7445 } 7446 7447 /** 7448 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 7449 * @pf: board private structure 7450 * @type: type of VSI 7451 * 7452 * On error: returns error code (negative) 7453 * On success: returns vsi index in PF (positive) 7454 **/ 7455 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 7456 { 7457 int ret = -ENODEV; 7458 struct i40e_vsi *vsi; 7459 int vsi_idx; 7460 int i; 7461 7462 /* Need to protect the allocation of the VSIs at the PF level */ 7463 mutex_lock(&pf->switch_mutex); 7464 7465 /* VSI list may be fragmented if VSI creation/destruction has 7466 * been happening. We can afford to do a quick scan to look 7467 * for any free VSIs in the list. 7468 * 7469 * find next empty vsi slot, looping back around if necessary 7470 */ 7471 i = pf->next_vsi; 7472 while (i < pf->num_alloc_vsi && pf->vsi[i]) 7473 i++; 7474 if (i >= pf->num_alloc_vsi) { 7475 i = 0; 7476 while (i < pf->next_vsi && pf->vsi[i]) 7477 i++; 7478 } 7479 7480 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { 7481 vsi_idx = i; /* Found one! */ 7482 } else { 7483 ret = -ENODEV; 7484 goto unlock_pf; /* out of VSI slots! */ 7485 } 7486 pf->next_vsi = ++i; 7487 7488 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 7489 if (!vsi) { 7490 ret = -ENOMEM; 7491 goto unlock_pf; 7492 } 7493 vsi->type = type; 7494 vsi->back = pf; 7495 set_bit(__I40E_DOWN, &vsi->state); 7496 vsi->flags = 0; 7497 vsi->idx = vsi_idx; 7498 vsi->int_rate_limit = 0; 7499 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? 7500 pf->rss_table_size : 64; 7501 vsi->netdev_registered = false; 7502 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 7503 hash_init(vsi->mac_filter_hash); 7504 vsi->irqs_ready = false; 7505 7506 ret = i40e_set_num_rings_in_vsi(vsi); 7507 if (ret) 7508 goto err_rings; 7509 7510 ret = i40e_vsi_alloc_arrays(vsi, true); 7511 if (ret) 7512 goto err_rings; 7513 7514 /* Setup default MSIX irq handler for VSI */ 7515 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 7516 7517 /* Initialize VSI lock */ 7518 spin_lock_init(&vsi->mac_filter_hash_lock); 7519 pf->vsi[vsi_idx] = vsi; 7520 ret = vsi_idx; 7521 goto unlock_pf; 7522 7523 err_rings: 7524 pf->next_vsi = i - 1; 7525 kfree(vsi); 7526 unlock_pf: 7527 mutex_unlock(&pf->switch_mutex); 7528 return ret; 7529 } 7530 7531 /** 7532 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 7533 * @type: VSI pointer 7534 * @free_qvectors: a bool to specify if q_vectors need to be freed. 7535 * 7536 * On error: returns error code (negative) 7537 * On success: returns 0 7538 **/ 7539 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) 7540 { 7541 /* free the ring and vector containers */ 7542 if (free_qvectors) { 7543 kfree(vsi->q_vectors); 7544 vsi->q_vectors = NULL; 7545 } 7546 kfree(vsi->tx_rings); 7547 vsi->tx_rings = NULL; 7548 vsi->rx_rings = NULL; 7549 } 7550 7551 /** 7552 * i40e_clear_rss_config_user - clear the user configured RSS hash keys 7553 * and lookup table 7554 * @vsi: Pointer to VSI structure 7555 */ 7556 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) 7557 { 7558 if (!vsi) 7559 return; 7560 7561 kfree(vsi->rss_hkey_user); 7562 vsi->rss_hkey_user = NULL; 7563 7564 kfree(vsi->rss_lut_user); 7565 vsi->rss_lut_user = NULL; 7566 } 7567 7568 /** 7569 * i40e_vsi_clear - Deallocate the VSI provided 7570 * @vsi: the VSI being un-configured 7571 **/ 7572 static int i40e_vsi_clear(struct i40e_vsi *vsi) 7573 { 7574 struct i40e_pf *pf; 7575 7576 if (!vsi) 7577 return 0; 7578 7579 if (!vsi->back) 7580 goto free_vsi; 7581 pf = vsi->back; 7582 7583 mutex_lock(&pf->switch_mutex); 7584 if (!pf->vsi[vsi->idx]) { 7585 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", 7586 vsi->idx, vsi->idx, vsi, vsi->type); 7587 goto unlock_vsi; 7588 } 7589 7590 if (pf->vsi[vsi->idx] != vsi) { 7591 dev_err(&pf->pdev->dev, 7592 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", 7593 pf->vsi[vsi->idx]->idx, 7594 pf->vsi[vsi->idx], 7595 pf->vsi[vsi->idx]->type, 7596 vsi->idx, vsi, vsi->type); 7597 goto unlock_vsi; 7598 } 7599 7600 /* updates the PF for this cleared vsi */ 7601 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 7602 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 7603 7604 i40e_vsi_free_arrays(vsi, true); 7605 i40e_clear_rss_config_user(vsi); 7606 7607 pf->vsi[vsi->idx] = NULL; 7608 if (vsi->idx < pf->next_vsi) 7609 pf->next_vsi = vsi->idx; 7610 7611 unlock_vsi: 7612 mutex_unlock(&pf->switch_mutex); 7613 free_vsi: 7614 kfree(vsi); 7615 7616 return 0; 7617 } 7618 7619 /** 7620 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 7621 * @vsi: the VSI being cleaned 7622 **/ 7623 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) 7624 { 7625 int i; 7626 7627 if (vsi->tx_rings && vsi->tx_rings[0]) { 7628 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 7629 kfree_rcu(vsi->tx_rings[i], rcu); 7630 vsi->tx_rings[i] = NULL; 7631 vsi->rx_rings[i] = NULL; 7632 } 7633 } 7634 } 7635 7636 /** 7637 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 7638 * @vsi: the VSI being configured 7639 **/ 7640 static int i40e_alloc_rings(struct i40e_vsi *vsi) 7641 { 7642 struct i40e_ring *tx_ring, *rx_ring; 7643 struct i40e_pf *pf = vsi->back; 7644 int i; 7645 7646 /* Set basic values in the rings to be used later during open() */ 7647 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 7648 /* allocate space for both Tx and Rx in one shot */ 7649 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 7650 if (!tx_ring) 7651 goto err_out; 7652 7653 tx_ring->queue_index = i; 7654 tx_ring->reg_idx = vsi->base_queue + i; 7655 tx_ring->ring_active = false; 7656 tx_ring->vsi = vsi; 7657 tx_ring->netdev = vsi->netdev; 7658 tx_ring->dev = &pf->pdev->dev; 7659 tx_ring->count = vsi->num_desc; 7660 tx_ring->size = 0; 7661 tx_ring->dcb_tc = 0; 7662 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) 7663 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; 7664 tx_ring->tx_itr_setting = pf->tx_itr_default; 7665 vsi->tx_rings[i] = tx_ring; 7666 7667 rx_ring = &tx_ring[1]; 7668 rx_ring->queue_index = i; 7669 rx_ring->reg_idx = vsi->base_queue + i; 7670 rx_ring->ring_active = false; 7671 rx_ring->vsi = vsi; 7672 rx_ring->netdev = vsi->netdev; 7673 rx_ring->dev = &pf->pdev->dev; 7674 rx_ring->count = vsi->num_desc; 7675 rx_ring->size = 0; 7676 rx_ring->dcb_tc = 0; 7677 rx_ring->rx_itr_setting = pf->rx_itr_default; 7678 vsi->rx_rings[i] = rx_ring; 7679 } 7680 7681 return 0; 7682 7683 err_out: 7684 i40e_vsi_clear_rings(vsi); 7685 return -ENOMEM; 7686 } 7687 7688 /** 7689 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 7690 * @pf: board private structure 7691 * @vectors: the number of MSI-X vectors to request 7692 * 7693 * Returns the number of vectors reserved, or error 7694 **/ 7695 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 7696 { 7697 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, 7698 I40E_MIN_MSIX, vectors); 7699 if (vectors < 0) { 7700 dev_info(&pf->pdev->dev, 7701 "MSI-X vector reservation failed: %d\n", vectors); 7702 vectors = 0; 7703 } 7704 7705 return vectors; 7706 } 7707 7708 /** 7709 * i40e_init_msix - Setup the MSIX capability 7710 * @pf: board private structure 7711 * 7712 * Work with the OS to set up the MSIX vectors needed. 7713 * 7714 * Returns the number of vectors reserved or negative on failure 7715 **/ 7716 static int i40e_init_msix(struct i40e_pf *pf) 7717 { 7718 struct i40e_hw *hw = &pf->hw; 7719 int vectors_left; 7720 int v_budget, i; 7721 int v_actual; 7722 int iwarp_requested = 0; 7723 7724 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 7725 return -ENODEV; 7726 7727 /* The number of vectors we'll request will be comprised of: 7728 * - Add 1 for "other" cause for Admin Queue events, etc. 7729 * - The number of LAN queue pairs 7730 * - Queues being used for RSS. 7731 * We don't need as many as max_rss_size vectors. 7732 * use rss_size instead in the calculation since that 7733 * is governed by number of cpus in the system. 7734 * - assumes symmetric Tx/Rx pairing 7735 * - The number of VMDq pairs 7736 * - The CPU count within the NUMA node if iWARP is enabled 7737 #ifdef I40E_FCOE 7738 * - The number of FCOE qps. 7739 #endif 7740 * Once we count this up, try the request. 7741 * 7742 * If we can't get what we want, we'll simplify to nearly nothing 7743 * and try again. If that still fails, we punt. 7744 */ 7745 vectors_left = hw->func_caps.num_msix_vectors; 7746 v_budget = 0; 7747 7748 /* reserve one vector for miscellaneous handler */ 7749 if (vectors_left) { 7750 v_budget++; 7751 vectors_left--; 7752 } 7753 7754 /* reserve vectors for the main PF traffic queues */ 7755 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); 7756 vectors_left -= pf->num_lan_msix; 7757 v_budget += pf->num_lan_msix; 7758 7759 /* reserve one vector for sideband flow director */ 7760 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7761 if (vectors_left) { 7762 pf->num_fdsb_msix = 1; 7763 v_budget++; 7764 vectors_left--; 7765 } else { 7766 pf->num_fdsb_msix = 0; 7767 } 7768 } 7769 7770 #ifdef I40E_FCOE 7771 /* can we reserve enough for FCoE? */ 7772 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7773 if (!vectors_left) 7774 pf->num_fcoe_msix = 0; 7775 else if (vectors_left >= pf->num_fcoe_qps) 7776 pf->num_fcoe_msix = pf->num_fcoe_qps; 7777 else 7778 pf->num_fcoe_msix = 1; 7779 v_budget += pf->num_fcoe_msix; 7780 vectors_left -= pf->num_fcoe_msix; 7781 } 7782 7783 #endif 7784 /* can we reserve enough for iWARP? */ 7785 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 7786 iwarp_requested = pf->num_iwarp_msix; 7787 7788 if (!vectors_left) 7789 pf->num_iwarp_msix = 0; 7790 else if (vectors_left < pf->num_iwarp_msix) 7791 pf->num_iwarp_msix = 1; 7792 v_budget += pf->num_iwarp_msix; 7793 vectors_left -= pf->num_iwarp_msix; 7794 } 7795 7796 /* any vectors left over go for VMDq support */ 7797 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { 7798 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; 7799 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); 7800 7801 if (!vectors_left) { 7802 pf->num_vmdq_msix = 0; 7803 pf->num_vmdq_qps = 0; 7804 } else { 7805 /* if we're short on vectors for what's desired, we limit 7806 * the queues per vmdq. If this is still more than are 7807 * available, the user will need to change the number of 7808 * queues/vectors used by the PF later with the ethtool 7809 * channels command 7810 */ 7811 if (vmdq_vecs < vmdq_vecs_wanted) 7812 pf->num_vmdq_qps = 1; 7813 pf->num_vmdq_msix = pf->num_vmdq_qps; 7814 7815 v_budget += vmdq_vecs; 7816 vectors_left -= vmdq_vecs; 7817 } 7818 } 7819 7820 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 7821 GFP_KERNEL); 7822 if (!pf->msix_entries) 7823 return -ENOMEM; 7824 7825 for (i = 0; i < v_budget; i++) 7826 pf->msix_entries[i].entry = i; 7827 v_actual = i40e_reserve_msix_vectors(pf, v_budget); 7828 7829 if (v_actual < I40E_MIN_MSIX) { 7830 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 7831 kfree(pf->msix_entries); 7832 pf->msix_entries = NULL; 7833 pci_disable_msix(pf->pdev); 7834 return -ENODEV; 7835 7836 } else if (v_actual == I40E_MIN_MSIX) { 7837 /* Adjust for minimal MSIX use */ 7838 pf->num_vmdq_vsis = 0; 7839 pf->num_vmdq_qps = 0; 7840 pf->num_lan_qps = 1; 7841 pf->num_lan_msix = 1; 7842 7843 } else if (!vectors_left) { 7844 /* If we have limited resources, we will start with no vectors 7845 * for the special features and then allocate vectors to some 7846 * of these features based on the policy and at the end disable 7847 * the features that did not get any vectors. 7848 */ 7849 int vec; 7850 7851 dev_info(&pf->pdev->dev, 7852 "MSI-X vector limit reached, attempting to redistribute vectors\n"); 7853 /* reserve the misc vector */ 7854 vec = v_actual - 1; 7855 7856 /* Scale vector usage down */ 7857 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 7858 pf->num_vmdq_vsis = 1; 7859 pf->num_vmdq_qps = 1; 7860 #ifdef I40E_FCOE 7861 pf->num_fcoe_qps = 0; 7862 pf->num_fcoe_msix = 0; 7863 #endif 7864 7865 /* partition out the remaining vectors */ 7866 switch (vec) { 7867 case 2: 7868 pf->num_lan_msix = 1; 7869 break; 7870 case 3: 7871 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 7872 pf->num_lan_msix = 1; 7873 pf->num_iwarp_msix = 1; 7874 } else { 7875 pf->num_lan_msix = 2; 7876 } 7877 #ifdef I40E_FCOE 7878 /* give one vector to FCoE */ 7879 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7880 pf->num_lan_msix = 1; 7881 pf->num_fcoe_msix = 1; 7882 } 7883 #endif 7884 break; 7885 default: 7886 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 7887 pf->num_iwarp_msix = min_t(int, (vec / 3), 7888 iwarp_requested); 7889 pf->num_vmdq_vsis = min_t(int, (vec / 3), 7890 I40E_DEFAULT_NUM_VMDQ_VSI); 7891 } else { 7892 pf->num_vmdq_vsis = min_t(int, (vec / 2), 7893 I40E_DEFAULT_NUM_VMDQ_VSI); 7894 } 7895 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7896 pf->num_fdsb_msix = 1; 7897 vec--; 7898 } 7899 pf->num_lan_msix = min_t(int, 7900 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), 7901 pf->num_lan_msix); 7902 pf->num_lan_qps = pf->num_lan_msix; 7903 #ifdef I40E_FCOE 7904 /* give one vector to FCoE */ 7905 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7906 pf->num_fcoe_msix = 1; 7907 vec--; 7908 } 7909 #endif 7910 break; 7911 } 7912 } 7913 7914 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 7915 (pf->num_fdsb_msix == 0)) { 7916 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); 7917 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7918 } 7919 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 7920 (pf->num_vmdq_msix == 0)) { 7921 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); 7922 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 7923 } 7924 7925 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && 7926 (pf->num_iwarp_msix == 0)) { 7927 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); 7928 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; 7929 } 7930 #ifdef I40E_FCOE 7931 7932 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { 7933 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); 7934 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 7935 } 7936 #endif 7937 i40e_debug(&pf->hw, I40E_DEBUG_INIT, 7938 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", 7939 pf->num_lan_msix, 7940 pf->num_vmdq_msix * pf->num_vmdq_vsis, 7941 pf->num_fdsb_msix, 7942 pf->num_iwarp_msix); 7943 7944 return v_actual; 7945 } 7946 7947 /** 7948 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 7949 * @vsi: the VSI being configured 7950 * @v_idx: index of the vector in the vsi struct 7951 * @cpu: cpu to be used on affinity_mask 7952 * 7953 * We allocate one q_vector. If allocation fails we return -ENOMEM. 7954 **/ 7955 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) 7956 { 7957 struct i40e_q_vector *q_vector; 7958 7959 /* allocate q_vector */ 7960 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 7961 if (!q_vector) 7962 return -ENOMEM; 7963 7964 q_vector->vsi = vsi; 7965 q_vector->v_idx = v_idx; 7966 cpumask_set_cpu(cpu, &q_vector->affinity_mask); 7967 7968 if (vsi->netdev) 7969 netif_napi_add(vsi->netdev, &q_vector->napi, 7970 i40e_napi_poll, NAPI_POLL_WEIGHT); 7971 7972 q_vector->rx.latency_range = I40E_LOW_LATENCY; 7973 q_vector->tx.latency_range = I40E_LOW_LATENCY; 7974 7975 /* tie q_vector and vsi together */ 7976 vsi->q_vectors[v_idx] = q_vector; 7977 7978 return 0; 7979 } 7980 7981 /** 7982 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 7983 * @vsi: the VSI being configured 7984 * 7985 * We allocate one q_vector per queue interrupt. If allocation fails we 7986 * return -ENOMEM. 7987 **/ 7988 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 7989 { 7990 struct i40e_pf *pf = vsi->back; 7991 int err, v_idx, num_q_vectors, current_cpu; 7992 7993 /* if not MSIX, give the one vector only to the LAN VSI */ 7994 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7995 num_q_vectors = vsi->num_q_vectors; 7996 else if (vsi == pf->vsi[pf->lan_vsi]) 7997 num_q_vectors = 1; 7998 else 7999 return -EINVAL; 8000 8001 current_cpu = cpumask_first(cpu_online_mask); 8002 8003 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 8004 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu); 8005 if (err) 8006 goto err_out; 8007 current_cpu = cpumask_next(current_cpu, cpu_online_mask); 8008 if (unlikely(current_cpu >= nr_cpu_ids)) 8009 current_cpu = cpumask_first(cpu_online_mask); 8010 } 8011 8012 return 0; 8013 8014 err_out: 8015 while (v_idx--) 8016 i40e_free_q_vector(vsi, v_idx); 8017 8018 return err; 8019 } 8020 8021 /** 8022 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 8023 * @pf: board private structure to initialize 8024 **/ 8025 static int i40e_init_interrupt_scheme(struct i40e_pf *pf) 8026 { 8027 int vectors = 0; 8028 ssize_t size; 8029 8030 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 8031 vectors = i40e_init_msix(pf); 8032 if (vectors < 0) { 8033 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 8034 I40E_FLAG_IWARP_ENABLED | 8035 #ifdef I40E_FCOE 8036 I40E_FLAG_FCOE_ENABLED | 8037 #endif 8038 I40E_FLAG_RSS_ENABLED | 8039 I40E_FLAG_DCB_CAPABLE | 8040 I40E_FLAG_DCB_ENABLED | 8041 I40E_FLAG_SRIOV_ENABLED | 8042 I40E_FLAG_FD_SB_ENABLED | 8043 I40E_FLAG_FD_ATR_ENABLED | 8044 I40E_FLAG_VMDQ_ENABLED); 8045 8046 /* rework the queue expectations without MSIX */ 8047 i40e_determine_queue_usage(pf); 8048 } 8049 } 8050 8051 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 8052 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 8053 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); 8054 vectors = pci_enable_msi(pf->pdev); 8055 if (vectors < 0) { 8056 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", 8057 vectors); 8058 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 8059 } 8060 vectors = 1; /* one MSI or Legacy vector */ 8061 } 8062 8063 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 8064 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); 8065 8066 /* set up vector assignment tracking */ 8067 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); 8068 pf->irq_pile = kzalloc(size, GFP_KERNEL); 8069 if (!pf->irq_pile) { 8070 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); 8071 return -ENOMEM; 8072 } 8073 pf->irq_pile->num_entries = vectors; 8074 pf->irq_pile->search_hint = 0; 8075 8076 /* track first vector for misc interrupts, ignore return */ 8077 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); 8078 8079 return 0; 8080 } 8081 8082 /** 8083 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 8084 * @pf: board private structure 8085 * 8086 * This sets up the handler for MSIX 0, which is used to manage the 8087 * non-queue interrupts, e.g. AdminQ and errors. This is not used 8088 * when in MSI or Legacy interrupt mode. 8089 **/ 8090 static int i40e_setup_misc_vector(struct i40e_pf *pf) 8091 { 8092 struct i40e_hw *hw = &pf->hw; 8093 int err = 0; 8094 8095 /* Only request the irq if this is the first time through, and 8096 * not when we're rebuilding after a Reset 8097 */ 8098 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 8099 err = request_irq(pf->msix_entries[0].vector, 8100 i40e_intr, 0, pf->int_name, pf); 8101 if (err) { 8102 dev_info(&pf->pdev->dev, 8103 "request_irq for %s failed: %d\n", 8104 pf->int_name, err); 8105 return -EFAULT; 8106 } 8107 } 8108 8109 i40e_enable_misc_int_causes(pf); 8110 8111 /* associate no queues to the misc vector */ 8112 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 8113 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 8114 8115 i40e_flush(hw); 8116 8117 i40e_irq_dynamic_enable_icr0(pf, true); 8118 8119 return err; 8120 } 8121 8122 /** 8123 * i40e_config_rss_aq - Prepare for RSS using AQ commands 8124 * @vsi: vsi structure 8125 * @seed: RSS hash seed 8126 **/ 8127 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, 8128 u8 *lut, u16 lut_size) 8129 { 8130 struct i40e_pf *pf = vsi->back; 8131 struct i40e_hw *hw = &pf->hw; 8132 int ret = 0; 8133 8134 if (seed) { 8135 struct i40e_aqc_get_set_rss_key_data *seed_dw = 8136 (struct i40e_aqc_get_set_rss_key_data *)seed; 8137 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); 8138 if (ret) { 8139 dev_info(&pf->pdev->dev, 8140 "Cannot set RSS key, err %s aq_err %s\n", 8141 i40e_stat_str(hw, ret), 8142 i40e_aq_str(hw, hw->aq.asq_last_status)); 8143 return ret; 8144 } 8145 } 8146 if (lut) { 8147 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; 8148 8149 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); 8150 if (ret) { 8151 dev_info(&pf->pdev->dev, 8152 "Cannot set RSS lut, err %s aq_err %s\n", 8153 i40e_stat_str(hw, ret), 8154 i40e_aq_str(hw, hw->aq.asq_last_status)); 8155 return ret; 8156 } 8157 } 8158 return ret; 8159 } 8160 8161 /** 8162 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands 8163 * @vsi: Pointer to vsi structure 8164 * @seed: Buffter to store the hash keys 8165 * @lut: Buffer to store the lookup table entries 8166 * @lut_size: Size of buffer to store the lookup table entries 8167 * 8168 * Return 0 on success, negative on failure 8169 */ 8170 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, 8171 u8 *lut, u16 lut_size) 8172 { 8173 struct i40e_pf *pf = vsi->back; 8174 struct i40e_hw *hw = &pf->hw; 8175 int ret = 0; 8176 8177 if (seed) { 8178 ret = i40e_aq_get_rss_key(hw, vsi->id, 8179 (struct i40e_aqc_get_set_rss_key_data *)seed); 8180 if (ret) { 8181 dev_info(&pf->pdev->dev, 8182 "Cannot get RSS key, err %s aq_err %s\n", 8183 i40e_stat_str(&pf->hw, ret), 8184 i40e_aq_str(&pf->hw, 8185 pf->hw.aq.asq_last_status)); 8186 return ret; 8187 } 8188 } 8189 8190 if (lut) { 8191 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; 8192 8193 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); 8194 if (ret) { 8195 dev_info(&pf->pdev->dev, 8196 "Cannot get RSS lut, err %s aq_err %s\n", 8197 i40e_stat_str(&pf->hw, ret), 8198 i40e_aq_str(&pf->hw, 8199 pf->hw.aq.asq_last_status)); 8200 return ret; 8201 } 8202 } 8203 8204 return ret; 8205 } 8206 8207 /** 8208 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used 8209 * @vsi: VSI structure 8210 **/ 8211 static int i40e_vsi_config_rss(struct i40e_vsi *vsi) 8212 { 8213 u8 seed[I40E_HKEY_ARRAY_SIZE]; 8214 struct i40e_pf *pf = vsi->back; 8215 u8 *lut; 8216 int ret; 8217 8218 if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) 8219 return 0; 8220 8221 if (!vsi->rss_size) 8222 vsi->rss_size = min_t(int, pf->alloc_rss_size, 8223 vsi->num_queue_pairs); 8224 if (!vsi->rss_size) 8225 return -EINVAL; 8226 8227 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 8228 if (!lut) 8229 return -ENOMEM; 8230 /* Use the user configured hash keys and lookup table if there is one, 8231 * otherwise use default 8232 */ 8233 if (vsi->rss_lut_user) 8234 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 8235 else 8236 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); 8237 if (vsi->rss_hkey_user) 8238 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); 8239 else 8240 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 8241 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); 8242 kfree(lut); 8243 8244 return ret; 8245 } 8246 8247 /** 8248 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers 8249 * @vsi: Pointer to vsi structure 8250 * @seed: RSS hash seed 8251 * @lut: Lookup table 8252 * @lut_size: Lookup table size 8253 * 8254 * Returns 0 on success, negative on failure 8255 **/ 8256 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, 8257 const u8 *lut, u16 lut_size) 8258 { 8259 struct i40e_pf *pf = vsi->back; 8260 struct i40e_hw *hw = &pf->hw; 8261 u16 vf_id = vsi->vf_id; 8262 u8 i; 8263 8264 /* Fill out hash function seed */ 8265 if (seed) { 8266 u32 *seed_dw = (u32 *)seed; 8267 8268 if (vsi->type == I40E_VSI_MAIN) { 8269 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 8270 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), 8271 seed_dw[i]); 8272 } else if (vsi->type == I40E_VSI_SRIOV) { 8273 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++) 8274 i40e_write_rx_ctl(hw, 8275 I40E_VFQF_HKEY1(i, vf_id), 8276 seed_dw[i]); 8277 } else { 8278 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); 8279 } 8280 } 8281 8282 if (lut) { 8283 u32 *lut_dw = (u32 *)lut; 8284 8285 if (vsi->type == I40E_VSI_MAIN) { 8286 if (lut_size != I40E_HLUT_ARRAY_SIZE) 8287 return -EINVAL; 8288 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) 8289 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); 8290 } else if (vsi->type == I40E_VSI_SRIOV) { 8291 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE) 8292 return -EINVAL; 8293 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) 8294 i40e_write_rx_ctl(hw, 8295 I40E_VFQF_HLUT1(i, vf_id), 8296 lut_dw[i]); 8297 } else { 8298 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); 8299 } 8300 } 8301 i40e_flush(hw); 8302 8303 return 0; 8304 } 8305 8306 /** 8307 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers 8308 * @vsi: Pointer to VSI structure 8309 * @seed: Buffer to store the keys 8310 * @lut: Buffer to store the lookup table entries 8311 * @lut_size: Size of buffer to store the lookup table entries 8312 * 8313 * Returns 0 on success, negative on failure 8314 */ 8315 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, 8316 u8 *lut, u16 lut_size) 8317 { 8318 struct i40e_pf *pf = vsi->back; 8319 struct i40e_hw *hw = &pf->hw; 8320 u16 i; 8321 8322 if (seed) { 8323 u32 *seed_dw = (u32 *)seed; 8324 8325 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 8326 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 8327 } 8328 if (lut) { 8329 u32 *lut_dw = (u32 *)lut; 8330 8331 if (lut_size != I40E_HLUT_ARRAY_SIZE) 8332 return -EINVAL; 8333 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) 8334 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i)); 8335 } 8336 8337 return 0; 8338 } 8339 8340 /** 8341 * i40e_config_rss - Configure RSS keys and lut 8342 * @vsi: Pointer to VSI structure 8343 * @seed: RSS hash seed 8344 * @lut: Lookup table 8345 * @lut_size: Lookup table size 8346 * 8347 * Returns 0 on success, negative on failure 8348 */ 8349 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 8350 { 8351 struct i40e_pf *pf = vsi->back; 8352 8353 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) 8354 return i40e_config_rss_aq(vsi, seed, lut, lut_size); 8355 else 8356 return i40e_config_rss_reg(vsi, seed, lut, lut_size); 8357 } 8358 8359 /** 8360 * i40e_get_rss - Get RSS keys and lut 8361 * @vsi: Pointer to VSI structure 8362 * @seed: Buffer to store the keys 8363 * @lut: Buffer to store the lookup table entries 8364 * lut_size: Size of buffer to store the lookup table entries 8365 * 8366 * Returns 0 on success, negative on failure 8367 */ 8368 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 8369 { 8370 struct i40e_pf *pf = vsi->back; 8371 8372 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) 8373 return i40e_get_rss_aq(vsi, seed, lut, lut_size); 8374 else 8375 return i40e_get_rss_reg(vsi, seed, lut, lut_size); 8376 } 8377 8378 /** 8379 * i40e_fill_rss_lut - Fill the RSS lookup table with default values 8380 * @pf: Pointer to board private structure 8381 * @lut: Lookup table 8382 * @rss_table_size: Lookup table size 8383 * @rss_size: Range of queue number for hashing 8384 */ 8385 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, 8386 u16 rss_table_size, u16 rss_size) 8387 { 8388 u16 i; 8389 8390 for (i = 0; i < rss_table_size; i++) 8391 lut[i] = i % rss_size; 8392 } 8393 8394 /** 8395 * i40e_pf_config_rss - Prepare for RSS if used 8396 * @pf: board private structure 8397 **/ 8398 static int i40e_pf_config_rss(struct i40e_pf *pf) 8399 { 8400 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 8401 u8 seed[I40E_HKEY_ARRAY_SIZE]; 8402 u8 *lut; 8403 struct i40e_hw *hw = &pf->hw; 8404 u32 reg_val; 8405 u64 hena; 8406 int ret; 8407 8408 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 8409 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 8410 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 8411 hena |= i40e_pf_get_default_rss_hena(pf); 8412 8413 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 8414 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 8415 8416 /* Determine the RSS table size based on the hardware capabilities */ 8417 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 8418 reg_val = (pf->rss_table_size == 512) ? 8419 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : 8420 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); 8421 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val); 8422 8423 /* Determine the RSS size of the VSI */ 8424 if (!vsi->rss_size) 8425 vsi->rss_size = min_t(int, pf->alloc_rss_size, 8426 vsi->num_queue_pairs); 8427 if (!vsi->rss_size) 8428 return -EINVAL; 8429 8430 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 8431 if (!lut) 8432 return -ENOMEM; 8433 8434 /* Use user configured lut if there is one, otherwise use default */ 8435 if (vsi->rss_lut_user) 8436 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); 8437 else 8438 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); 8439 8440 /* Use user configured hash key if there is one, otherwise 8441 * use default. 8442 */ 8443 if (vsi->rss_hkey_user) 8444 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); 8445 else 8446 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 8447 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); 8448 kfree(lut); 8449 8450 return ret; 8451 } 8452 8453 /** 8454 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild 8455 * @pf: board private structure 8456 * @queue_count: the requested queue count for rss. 8457 * 8458 * returns 0 if rss is not enabled, if enabled returns the final rss queue 8459 * count which may be different from the requested queue count. 8460 **/ 8461 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) 8462 { 8463 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 8464 int new_rss_size; 8465 8466 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) 8467 return 0; 8468 8469 new_rss_size = min_t(int, queue_count, pf->rss_size_max); 8470 8471 if (queue_count != vsi->num_queue_pairs) { 8472 vsi->req_queue_pairs = queue_count; 8473 i40e_prep_for_reset(pf); 8474 8475 pf->alloc_rss_size = new_rss_size; 8476 8477 i40e_reset_and_rebuild(pf, true); 8478 8479 /* Discard the user configured hash keys and lut, if less 8480 * queues are enabled. 8481 */ 8482 if (queue_count < vsi->rss_size) { 8483 i40e_clear_rss_config_user(vsi); 8484 dev_dbg(&pf->pdev->dev, 8485 "discard user configured hash keys and lut\n"); 8486 } 8487 8488 /* Reset vsi->rss_size, as number of enabled queues changed */ 8489 vsi->rss_size = min_t(int, pf->alloc_rss_size, 8490 vsi->num_queue_pairs); 8491 8492 i40e_pf_config_rss(pf); 8493 } 8494 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n", 8495 vsi->req_queue_pairs, pf->rss_size_max); 8496 return pf->alloc_rss_size; 8497 } 8498 8499 /** 8500 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition 8501 * @pf: board private structure 8502 **/ 8503 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) 8504 { 8505 i40e_status status; 8506 bool min_valid, max_valid; 8507 u32 max_bw, min_bw; 8508 8509 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, 8510 &min_valid, &max_valid); 8511 8512 if (!status) { 8513 if (min_valid) 8514 pf->npar_min_bw = min_bw; 8515 if (max_valid) 8516 pf->npar_max_bw = max_bw; 8517 } 8518 8519 return status; 8520 } 8521 8522 /** 8523 * i40e_set_npar_bw_setting - Set BW settings for this PF partition 8524 * @pf: board private structure 8525 **/ 8526 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) 8527 { 8528 struct i40e_aqc_configure_partition_bw_data bw_data; 8529 i40e_status status; 8530 8531 /* Set the valid bit for this PF */ 8532 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); 8533 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; 8534 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; 8535 8536 /* Set the new bandwidths */ 8537 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); 8538 8539 return status; 8540 } 8541 8542 /** 8543 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition 8544 * @pf: board private structure 8545 **/ 8546 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) 8547 { 8548 /* Commit temporary BW setting to permanent NVM image */ 8549 enum i40e_admin_queue_err last_aq_status; 8550 i40e_status ret; 8551 u16 nvm_word; 8552 8553 if (pf->hw.partition_id != 1) { 8554 dev_info(&pf->pdev->dev, 8555 "Commit BW only works on partition 1! This is partition %d", 8556 pf->hw.partition_id); 8557 ret = I40E_NOT_SUPPORTED; 8558 goto bw_commit_out; 8559 } 8560 8561 /* Acquire NVM for read access */ 8562 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); 8563 last_aq_status = pf->hw.aq.asq_last_status; 8564 if (ret) { 8565 dev_info(&pf->pdev->dev, 8566 "Cannot acquire NVM for read access, err %s aq_err %s\n", 8567 i40e_stat_str(&pf->hw, ret), 8568 i40e_aq_str(&pf->hw, last_aq_status)); 8569 goto bw_commit_out; 8570 } 8571 8572 /* Read word 0x10 of NVM - SW compatibility word 1 */ 8573 ret = i40e_aq_read_nvm(&pf->hw, 8574 I40E_SR_NVM_CONTROL_WORD, 8575 0x10, sizeof(nvm_word), &nvm_word, 8576 false, NULL); 8577 /* Save off last admin queue command status before releasing 8578 * the NVM 8579 */ 8580 last_aq_status = pf->hw.aq.asq_last_status; 8581 i40e_release_nvm(&pf->hw); 8582 if (ret) { 8583 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", 8584 i40e_stat_str(&pf->hw, ret), 8585 i40e_aq_str(&pf->hw, last_aq_status)); 8586 goto bw_commit_out; 8587 } 8588 8589 /* Wait a bit for NVM release to complete */ 8590 msleep(50); 8591 8592 /* Acquire NVM for write access */ 8593 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); 8594 last_aq_status = pf->hw.aq.asq_last_status; 8595 if (ret) { 8596 dev_info(&pf->pdev->dev, 8597 "Cannot acquire NVM for write access, err %s aq_err %s\n", 8598 i40e_stat_str(&pf->hw, ret), 8599 i40e_aq_str(&pf->hw, last_aq_status)); 8600 goto bw_commit_out; 8601 } 8602 /* Write it back out unchanged to initiate update NVM, 8603 * which will force a write of the shadow (alt) RAM to 8604 * the NVM - thus storing the bandwidth values permanently. 8605 */ 8606 ret = i40e_aq_update_nvm(&pf->hw, 8607 I40E_SR_NVM_CONTROL_WORD, 8608 0x10, sizeof(nvm_word), 8609 &nvm_word, true, NULL); 8610 /* Save off last admin queue command status before releasing 8611 * the NVM 8612 */ 8613 last_aq_status = pf->hw.aq.asq_last_status; 8614 i40e_release_nvm(&pf->hw); 8615 if (ret) 8616 dev_info(&pf->pdev->dev, 8617 "BW settings NOT SAVED, err %s aq_err %s\n", 8618 i40e_stat_str(&pf->hw, ret), 8619 i40e_aq_str(&pf->hw, last_aq_status)); 8620 bw_commit_out: 8621 8622 return ret; 8623 } 8624 8625 /** 8626 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 8627 * @pf: board private structure to initialize 8628 * 8629 * i40e_sw_init initializes the Adapter private data structure. 8630 * Fields are initialized based on PCI device information and 8631 * OS network device settings (MTU size). 8632 **/ 8633 static int i40e_sw_init(struct i40e_pf *pf) 8634 { 8635 int err = 0; 8636 int size; 8637 8638 /* Set default capability flags */ 8639 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 8640 I40E_FLAG_MSI_ENABLED | 8641 I40E_FLAG_MSIX_ENABLED; 8642 8643 /* Set default ITR */ 8644 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; 8645 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; 8646 8647 /* Depending on PF configurations, it is possible that the RSS 8648 * maximum might end up larger than the available queues 8649 */ 8650 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); 8651 pf->alloc_rss_size = 1; 8652 pf->rss_table_size = pf->hw.func_caps.rss_table_size; 8653 pf->rss_size_max = min_t(int, pf->rss_size_max, 8654 pf->hw.func_caps.num_tx_qp); 8655 if (pf->hw.func_caps.rss) { 8656 pf->flags |= I40E_FLAG_RSS_ENABLED; 8657 pf->alloc_rss_size = min_t(int, pf->rss_size_max, 8658 num_online_cpus()); 8659 } 8660 8661 /* MFP mode enabled */ 8662 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { 8663 pf->flags |= I40E_FLAG_MFP_ENABLED; 8664 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 8665 if (i40e_get_npar_bw_setting(pf)) 8666 dev_warn(&pf->pdev->dev, 8667 "Could not get NPAR bw settings\n"); 8668 else 8669 dev_info(&pf->pdev->dev, 8670 "Min BW = %8.8x, Max BW = %8.8x\n", 8671 pf->npar_min_bw, pf->npar_max_bw); 8672 } 8673 8674 /* FW/NVM is not yet fixed in this regard */ 8675 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 8676 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 8677 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 8678 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 8679 if (pf->flags & I40E_FLAG_MFP_ENABLED && 8680 pf->hw.num_partitions > 1) 8681 dev_info(&pf->pdev->dev, 8682 "Flow Director Sideband mode Disabled in MFP mode\n"); 8683 else 8684 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 8685 pf->fdir_pf_filter_count = 8686 pf->hw.func_caps.fd_filters_guaranteed; 8687 pf->hw.fdir_shared_filter_count = 8688 pf->hw.func_caps.fd_filters_best_effort; 8689 } 8690 8691 if (i40e_is_mac_710(&pf->hw) && 8692 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 8693 (pf->hw.aq.fw_maj_ver < 4))) { 8694 pf->flags |= I40E_FLAG_RESTART_AUTONEG; 8695 /* No DCB support for FW < v4.33 */ 8696 pf->flags |= I40E_FLAG_NO_DCB_SUPPORT; 8697 } 8698 8699 /* Disable FW LLDP if FW < v4.3 */ 8700 if (i40e_is_mac_710(&pf->hw) && 8701 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 8702 (pf->hw.aq.fw_maj_ver < 4))) 8703 pf->flags |= I40E_FLAG_STOP_FW_LLDP; 8704 8705 /* Use the FW Set LLDP MIB API if FW > v4.40 */ 8706 if (i40e_is_mac_710(&pf->hw) && 8707 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || 8708 (pf->hw.aq.fw_maj_ver >= 5))) 8709 pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB; 8710 8711 if (pf->hw.func_caps.vmdq) { 8712 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 8713 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 8714 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); 8715 } 8716 8717 if (pf->hw.func_caps.iwarp) { 8718 pf->flags |= I40E_FLAG_IWARP_ENABLED; 8719 /* IWARP needs one extra vector for CQP just like MISC.*/ 8720 pf->num_iwarp_msix = (int)num_online_cpus() + 1; 8721 } 8722 8723 #ifdef I40E_FCOE 8724 i40e_init_pf_fcoe(pf); 8725 8726 #endif /* I40E_FCOE */ 8727 #ifdef CONFIG_PCI_IOV 8728 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { 8729 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 8730 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 8731 pf->num_req_vfs = min_t(int, 8732 pf->hw.func_caps.num_vfs, 8733 I40E_MAX_VF_COUNT); 8734 } 8735 #endif /* CONFIG_PCI_IOV */ 8736 if (pf->hw.mac.type == I40E_MAC_X722) { 8737 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | 8738 I40E_FLAG_128_QP_RSS_CAPABLE | 8739 I40E_FLAG_HW_ATR_EVICT_CAPABLE | 8740 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | 8741 I40E_FLAG_WB_ON_ITR_CAPABLE | 8742 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | 8743 I40E_FLAG_NO_PCI_LINK_CHECK | 8744 I40E_FLAG_USE_SET_LLDP_MIB | 8745 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE | 8746 I40E_FLAG_PTP_L4_CAPABLE; 8747 } else if ((pf->hw.aq.api_maj_ver > 1) || 8748 ((pf->hw.aq.api_maj_ver == 1) && 8749 (pf->hw.aq.api_min_ver > 4))) { 8750 /* Supported in FW API version higher than 1.4 */ 8751 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8752 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; 8753 } else { 8754 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; 8755 } 8756 8757 pf->eeprom_version = 0xDEAD; 8758 pf->lan_veb = I40E_NO_VEB; 8759 pf->lan_vsi = I40E_NO_VSI; 8760 8761 /* By default FW has this off for performance reasons */ 8762 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; 8763 8764 /* set up queue assignment tracking */ 8765 size = sizeof(struct i40e_lump_tracking) 8766 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 8767 pf->qp_pile = kzalloc(size, GFP_KERNEL); 8768 if (!pf->qp_pile) { 8769 err = -ENOMEM; 8770 goto sw_init_done; 8771 } 8772 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 8773 pf->qp_pile->search_hint = 0; 8774 8775 pf->tx_timeout_recovery_level = 1; 8776 8777 mutex_init(&pf->switch_mutex); 8778 8779 /* If NPAR is enabled nudge the Tx scheduler */ 8780 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) 8781 i40e_set_npar_bw_setting(pf); 8782 8783 sw_init_done: 8784 return err; 8785 } 8786 8787 /** 8788 * i40e_set_ntuple - set the ntuple feature flag and take action 8789 * @pf: board private structure to initialize 8790 * @features: the feature set that the stack is suggesting 8791 * 8792 * returns a bool to indicate if reset needs to happen 8793 **/ 8794 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) 8795 { 8796 bool need_reset = false; 8797 8798 /* Check if Flow Director n-tuple support was enabled or disabled. If 8799 * the state changed, we need to reset. 8800 */ 8801 if (features & NETIF_F_NTUPLE) { 8802 /* Enable filters and mark for reset */ 8803 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 8804 need_reset = true; 8805 /* enable FD_SB only if there is MSI-X vector */ 8806 if (pf->num_fdsb_msix > 0) 8807 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 8808 } else { 8809 /* turn off filters, mark for reset and clear SW filter list */ 8810 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 8811 need_reset = true; 8812 i40e_fdir_filter_exit(pf); 8813 } 8814 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 8815 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 8816 /* reset fd counters */ 8817 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; 8818 pf->fdir_pf_active_filters = 0; 8819 /* if ATR was auto disabled it can be re-enabled. */ 8820 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 8821 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 8822 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 8823 if (I40E_DEBUG_FD & pf->hw.debug_mask) 8824 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); 8825 } 8826 } 8827 return need_reset; 8828 } 8829 8830 /** 8831 * i40e_clear_rss_lut - clear the rx hash lookup table 8832 * @vsi: the VSI being configured 8833 **/ 8834 static void i40e_clear_rss_lut(struct i40e_vsi *vsi) 8835 { 8836 struct i40e_pf *pf = vsi->back; 8837 struct i40e_hw *hw = &pf->hw; 8838 u16 vf_id = vsi->vf_id; 8839 u8 i; 8840 8841 if (vsi->type == I40E_VSI_MAIN) { 8842 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) 8843 wr32(hw, I40E_PFQF_HLUT(i), 0); 8844 } else if (vsi->type == I40E_VSI_SRIOV) { 8845 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) 8846 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0); 8847 } else { 8848 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); 8849 } 8850 } 8851 8852 /** 8853 * i40e_set_features - set the netdev feature flags 8854 * @netdev: ptr to the netdev being adjusted 8855 * @features: the feature set that the stack is suggesting 8856 **/ 8857 static int i40e_set_features(struct net_device *netdev, 8858 netdev_features_t features) 8859 { 8860 struct i40e_netdev_priv *np = netdev_priv(netdev); 8861 struct i40e_vsi *vsi = np->vsi; 8862 struct i40e_pf *pf = vsi->back; 8863 bool need_reset; 8864 8865 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) 8866 i40e_pf_config_rss(pf); 8867 else if (!(features & NETIF_F_RXHASH) && 8868 netdev->features & NETIF_F_RXHASH) 8869 i40e_clear_rss_lut(vsi); 8870 8871 if (features & NETIF_F_HW_VLAN_CTAG_RX) 8872 i40e_vlan_stripping_enable(vsi); 8873 else 8874 i40e_vlan_stripping_disable(vsi); 8875 8876 need_reset = i40e_set_ntuple(pf, features); 8877 8878 if (need_reset) 8879 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 8880 8881 return 0; 8882 } 8883 8884 /** 8885 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port 8886 * @pf: board private structure 8887 * @port: The UDP port to look up 8888 * 8889 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 8890 **/ 8891 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) 8892 { 8893 u8 i; 8894 8895 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 8896 if (pf->udp_ports[i].index == port) 8897 return i; 8898 } 8899 8900 return i; 8901 } 8902 8903 /** 8904 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up 8905 * @netdev: This physical port's netdev 8906 * @ti: Tunnel endpoint information 8907 **/ 8908 static void i40e_udp_tunnel_add(struct net_device *netdev, 8909 struct udp_tunnel_info *ti) 8910 { 8911 struct i40e_netdev_priv *np = netdev_priv(netdev); 8912 struct i40e_vsi *vsi = np->vsi; 8913 struct i40e_pf *pf = vsi->back; 8914 __be16 port = ti->port; 8915 u8 next_idx; 8916 u8 idx; 8917 8918 idx = i40e_get_udp_port_idx(pf, port); 8919 8920 /* Check if port already exists */ 8921 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8922 netdev_info(netdev, "port %d already offloaded\n", 8923 ntohs(port)); 8924 return; 8925 } 8926 8927 /* Now check if there is space to add the new port */ 8928 next_idx = i40e_get_udp_port_idx(pf, 0); 8929 8930 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8931 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n", 8932 ntohs(port)); 8933 return; 8934 } 8935 8936 switch (ti->type) { 8937 case UDP_TUNNEL_TYPE_VXLAN: 8938 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; 8939 break; 8940 case UDP_TUNNEL_TYPE_GENEVE: 8941 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) 8942 return; 8943 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; 8944 break; 8945 default: 8946 return; 8947 } 8948 8949 /* New port: add it and mark its index in the bitmap */ 8950 pf->udp_ports[next_idx].index = port; 8951 pf->pending_udp_bitmap |= BIT_ULL(next_idx); 8952 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8953 } 8954 8955 /** 8956 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away 8957 * @netdev: This physical port's netdev 8958 * @ti: Tunnel endpoint information 8959 **/ 8960 static void i40e_udp_tunnel_del(struct net_device *netdev, 8961 struct udp_tunnel_info *ti) 8962 { 8963 struct i40e_netdev_priv *np = netdev_priv(netdev); 8964 struct i40e_vsi *vsi = np->vsi; 8965 struct i40e_pf *pf = vsi->back; 8966 __be16 port = ti->port; 8967 u8 idx; 8968 8969 idx = i40e_get_udp_port_idx(pf, port); 8970 8971 /* Check if port already exists */ 8972 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS) 8973 goto not_found; 8974 8975 switch (ti->type) { 8976 case UDP_TUNNEL_TYPE_VXLAN: 8977 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN) 8978 goto not_found; 8979 break; 8980 case UDP_TUNNEL_TYPE_GENEVE: 8981 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE) 8982 goto not_found; 8983 break; 8984 default: 8985 goto not_found; 8986 } 8987 8988 /* if port exists, set it to 0 (mark for deletion) 8989 * and make it pending 8990 */ 8991 pf->udp_ports[idx].index = 0; 8992 pf->pending_udp_bitmap |= BIT_ULL(idx); 8993 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; 8994 8995 return; 8996 not_found: 8997 netdev_warn(netdev, "UDP port %d was not found, not deleting\n", 8998 ntohs(port)); 8999 } 9000 9001 static int i40e_get_phys_port_id(struct net_device *netdev, 9002 struct netdev_phys_item_id *ppid) 9003 { 9004 struct i40e_netdev_priv *np = netdev_priv(netdev); 9005 struct i40e_pf *pf = np->vsi->back; 9006 struct i40e_hw *hw = &pf->hw; 9007 9008 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) 9009 return -EOPNOTSUPP; 9010 9011 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); 9012 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); 9013 9014 return 0; 9015 } 9016 9017 /** 9018 * i40e_ndo_fdb_add - add an entry to the hardware database 9019 * @ndm: the input from the stack 9020 * @tb: pointer to array of nladdr (unused) 9021 * @dev: the net device pointer 9022 * @addr: the MAC address entry being added 9023 * @flags: instructions from stack about fdb operation 9024 */ 9025 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 9026 struct net_device *dev, 9027 const unsigned char *addr, u16 vid, 9028 u16 flags) 9029 { 9030 struct i40e_netdev_priv *np = netdev_priv(dev); 9031 struct i40e_pf *pf = np->vsi->back; 9032 int err = 0; 9033 9034 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) 9035 return -EOPNOTSUPP; 9036 9037 if (vid) { 9038 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); 9039 return -EINVAL; 9040 } 9041 9042 /* Hardware does not support aging addresses so if a 9043 * ndm_state is given only allow permanent addresses 9044 */ 9045 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 9046 netdev_info(dev, "FDB only supports static addresses\n"); 9047 return -EINVAL; 9048 } 9049 9050 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 9051 err = dev_uc_add_excl(dev, addr); 9052 else if (is_multicast_ether_addr(addr)) 9053 err = dev_mc_add_excl(dev, addr); 9054 else 9055 err = -EINVAL; 9056 9057 /* Only return duplicate errors if NLM_F_EXCL is set */ 9058 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 9059 err = 0; 9060 9061 return err; 9062 } 9063 9064 /** 9065 * i40e_ndo_bridge_setlink - Set the hardware bridge mode 9066 * @dev: the netdev being configured 9067 * @nlh: RTNL message 9068 * 9069 * Inserts a new hardware bridge if not already created and 9070 * enables the bridging mode requested (VEB or VEPA). If the 9071 * hardware bridge has already been inserted and the request 9072 * is to change the mode then that requires a PF reset to 9073 * allow rebuild of the components with required hardware 9074 * bridge mode enabled. 9075 **/ 9076 static int i40e_ndo_bridge_setlink(struct net_device *dev, 9077 struct nlmsghdr *nlh, 9078 u16 flags) 9079 { 9080 struct i40e_netdev_priv *np = netdev_priv(dev); 9081 struct i40e_vsi *vsi = np->vsi; 9082 struct i40e_pf *pf = vsi->back; 9083 struct i40e_veb *veb = NULL; 9084 struct nlattr *attr, *br_spec; 9085 int i, rem; 9086 9087 /* Only for PF VSI for now */ 9088 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 9089 return -EOPNOTSUPP; 9090 9091 /* Find the HW bridge for PF VSI */ 9092 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 9093 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 9094 veb = pf->veb[i]; 9095 } 9096 9097 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 9098 9099 nla_for_each_nested(attr, br_spec, rem) { 9100 __u16 mode; 9101 9102 if (nla_type(attr) != IFLA_BRIDGE_MODE) 9103 continue; 9104 9105 mode = nla_get_u16(attr); 9106 if ((mode != BRIDGE_MODE_VEPA) && 9107 (mode != BRIDGE_MODE_VEB)) 9108 return -EINVAL; 9109 9110 /* Insert a new HW bridge */ 9111 if (!veb) { 9112 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 9113 vsi->tc_config.enabled_tc); 9114 if (veb) { 9115 veb->bridge_mode = mode; 9116 i40e_config_bridge_mode(veb); 9117 } else { 9118 /* No Bridge HW offload available */ 9119 return -ENOENT; 9120 } 9121 break; 9122 } else if (mode != veb->bridge_mode) { 9123 /* Existing HW bridge but different mode needs reset */ 9124 veb->bridge_mode = mode; 9125 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ 9126 if (mode == BRIDGE_MODE_VEB) 9127 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 9128 else 9129 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 9130 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 9131 break; 9132 } 9133 } 9134 9135 return 0; 9136 } 9137 9138 /** 9139 * i40e_ndo_bridge_getlink - Get the hardware bridge mode 9140 * @skb: skb buff 9141 * @pid: process id 9142 * @seq: RTNL message seq # 9143 * @dev: the netdev being configured 9144 * @filter_mask: unused 9145 * @nlflags: netlink flags passed in 9146 * 9147 * Return the mode in which the hardware bridge is operating in 9148 * i.e VEB or VEPA. 9149 **/ 9150 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 9151 struct net_device *dev, 9152 u32 __always_unused filter_mask, 9153 int nlflags) 9154 { 9155 struct i40e_netdev_priv *np = netdev_priv(dev); 9156 struct i40e_vsi *vsi = np->vsi; 9157 struct i40e_pf *pf = vsi->back; 9158 struct i40e_veb *veb = NULL; 9159 int i; 9160 9161 /* Only for PF VSI for now */ 9162 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 9163 return -EOPNOTSUPP; 9164 9165 /* Find the HW bridge for the PF VSI */ 9166 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 9167 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 9168 veb = pf->veb[i]; 9169 } 9170 9171 if (!veb) 9172 return 0; 9173 9174 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, 9175 0, 0, nlflags, filter_mask, NULL); 9176 } 9177 9178 /** 9179 * i40e_features_check - Validate encapsulated packet conforms to limits 9180 * @skb: skb buff 9181 * @dev: This physical port's netdev 9182 * @features: Offload features that the stack believes apply 9183 **/ 9184 static netdev_features_t i40e_features_check(struct sk_buff *skb, 9185 struct net_device *dev, 9186 netdev_features_t features) 9187 { 9188 size_t len; 9189 9190 /* No point in doing any of this if neither checksum nor GSO are 9191 * being requested for this frame. We can rule out both by just 9192 * checking for CHECKSUM_PARTIAL 9193 */ 9194 if (skb->ip_summed != CHECKSUM_PARTIAL) 9195 return features; 9196 9197 /* We cannot support GSO if the MSS is going to be less than 9198 * 64 bytes. If it is then we need to drop support for GSO. 9199 */ 9200 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 9201 features &= ~NETIF_F_GSO_MASK; 9202 9203 /* MACLEN can support at most 63 words */ 9204 len = skb_network_header(skb) - skb->data; 9205 if (len & ~(63 * 2)) 9206 goto out_err; 9207 9208 /* IPLEN and EIPLEN can support at most 127 dwords */ 9209 len = skb_transport_header(skb) - skb_network_header(skb); 9210 if (len & ~(127 * 4)) 9211 goto out_err; 9212 9213 if (skb->encapsulation) { 9214 /* L4TUNLEN can support 127 words */ 9215 len = skb_inner_network_header(skb) - skb_transport_header(skb); 9216 if (len & ~(127 * 2)) 9217 goto out_err; 9218 9219 /* IPLEN can support at most 127 dwords */ 9220 len = skb_inner_transport_header(skb) - 9221 skb_inner_network_header(skb); 9222 if (len & ~(127 * 4)) 9223 goto out_err; 9224 } 9225 9226 /* No need to validate L4LEN as TCP is the only protocol with a 9227 * a flexible value and we support all possible values supported 9228 * by TCP, which is at most 15 dwords 9229 */ 9230 9231 return features; 9232 out_err: 9233 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 9234 } 9235 9236 static const struct net_device_ops i40e_netdev_ops = { 9237 .ndo_open = i40e_open, 9238 .ndo_stop = i40e_close, 9239 .ndo_start_xmit = i40e_lan_xmit_frame, 9240 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 9241 .ndo_set_rx_mode = i40e_set_rx_mode, 9242 .ndo_validate_addr = eth_validate_addr, 9243 .ndo_set_mac_address = i40e_set_mac, 9244 .ndo_change_mtu = i40e_change_mtu, 9245 .ndo_do_ioctl = i40e_ioctl, 9246 .ndo_tx_timeout = i40e_tx_timeout, 9247 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 9248 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 9249 #ifdef CONFIG_NET_POLL_CONTROLLER 9250 .ndo_poll_controller = i40e_netpoll, 9251 #endif 9252 .ndo_setup_tc = __i40e_setup_tc, 9253 #ifdef I40E_FCOE 9254 .ndo_fcoe_enable = i40e_fcoe_enable, 9255 .ndo_fcoe_disable = i40e_fcoe_disable, 9256 #endif 9257 .ndo_set_features = i40e_set_features, 9258 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 9259 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 9260 .ndo_set_vf_rate = i40e_ndo_set_vf_bw, 9261 .ndo_get_vf_config = i40e_ndo_get_vf_config, 9262 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 9263 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, 9264 .ndo_set_vf_trust = i40e_ndo_set_vf_trust, 9265 .ndo_udp_tunnel_add = i40e_udp_tunnel_add, 9266 .ndo_udp_tunnel_del = i40e_udp_tunnel_del, 9267 .ndo_get_phys_port_id = i40e_get_phys_port_id, 9268 .ndo_fdb_add = i40e_ndo_fdb_add, 9269 .ndo_features_check = i40e_features_check, 9270 .ndo_bridge_getlink = i40e_ndo_bridge_getlink, 9271 .ndo_bridge_setlink = i40e_ndo_bridge_setlink, 9272 }; 9273 9274 /** 9275 * i40e_config_netdev - Setup the netdev flags 9276 * @vsi: the VSI being configured 9277 * 9278 * Returns 0 on success, negative value on failure 9279 **/ 9280 static int i40e_config_netdev(struct i40e_vsi *vsi) 9281 { 9282 struct i40e_pf *pf = vsi->back; 9283 struct i40e_hw *hw = &pf->hw; 9284 struct i40e_netdev_priv *np; 9285 struct net_device *netdev; 9286 u8 broadcast[ETH_ALEN]; 9287 u8 mac_addr[ETH_ALEN]; 9288 int etherdev_size; 9289 9290 etherdev_size = sizeof(struct i40e_netdev_priv); 9291 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 9292 if (!netdev) 9293 return -ENOMEM; 9294 9295 vsi->netdev = netdev; 9296 np = netdev_priv(netdev); 9297 np->vsi = vsi; 9298 9299 netdev->hw_enc_features |= NETIF_F_SG | 9300 NETIF_F_IP_CSUM | 9301 NETIF_F_IPV6_CSUM | 9302 NETIF_F_HIGHDMA | 9303 NETIF_F_SOFT_FEATURES | 9304 NETIF_F_TSO | 9305 NETIF_F_TSO_ECN | 9306 NETIF_F_TSO6 | 9307 NETIF_F_GSO_GRE | 9308 NETIF_F_GSO_GRE_CSUM | 9309 NETIF_F_GSO_IPXIP4 | 9310 NETIF_F_GSO_IPXIP6 | 9311 NETIF_F_GSO_UDP_TUNNEL | 9312 NETIF_F_GSO_UDP_TUNNEL_CSUM | 9313 NETIF_F_GSO_PARTIAL | 9314 NETIF_F_SCTP_CRC | 9315 NETIF_F_RXHASH | 9316 NETIF_F_RXCSUM | 9317 0; 9318 9319 if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)) 9320 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 9321 9322 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 9323 9324 /* record features VLANs can make use of */ 9325 netdev->vlan_features |= netdev->hw_enc_features | 9326 NETIF_F_TSO_MANGLEID; 9327 9328 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 9329 netdev->hw_features |= NETIF_F_NTUPLE; 9330 9331 netdev->hw_features |= netdev->hw_enc_features | 9332 NETIF_F_HW_VLAN_CTAG_TX | 9333 NETIF_F_HW_VLAN_CTAG_RX; 9334 9335 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 9336 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 9337 9338 if (vsi->type == I40E_VSI_MAIN) { 9339 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 9340 ether_addr_copy(mac_addr, hw->mac.perm_addr); 9341 /* The following steps are necessary to prevent reception 9342 * of tagged packets - some older NVM configurations load a 9343 * default a MAC-VLAN filter that accepts any tagged packet 9344 * which must be replaced by a normal filter. 9345 */ 9346 i40e_rm_default_mac_filter(vsi, mac_addr); 9347 spin_lock_bh(&vsi->mac_filter_hash_lock); 9348 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY); 9349 spin_unlock_bh(&vsi->mac_filter_hash_lock); 9350 } else { 9351 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 9352 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 9353 pf->vsi[pf->lan_vsi]->netdev->name); 9354 random_ether_addr(mac_addr); 9355 9356 spin_lock_bh(&vsi->mac_filter_hash_lock); 9357 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY); 9358 spin_unlock_bh(&vsi->mac_filter_hash_lock); 9359 } 9360 9361 /* Add the broadcast filter so that we initially will receive 9362 * broadcast packets. Note that when a new VLAN is first added the 9363 * driver will convert all filters marked I40E_VLAN_ANY into VLAN 9364 * specific filters as part of transitioning into "vlan" operation. 9365 * When more VLANs are added, the driver will copy each existing MAC 9366 * filter and add it for the new VLAN. 9367 * 9368 * Broadcast filters are handled specially by 9369 * i40e_sync_filters_subtask, as the driver must to set the broadcast 9370 * promiscuous bit instead of adding this directly as a MAC/VLAN 9371 * filter. The subtask will update the correct broadcast promiscuous 9372 * bits as VLANs become active or inactive. 9373 */ 9374 eth_broadcast_addr(broadcast); 9375 spin_lock_bh(&vsi->mac_filter_hash_lock); 9376 i40e_add_filter(vsi, broadcast, I40E_VLAN_ANY); 9377 spin_unlock_bh(&vsi->mac_filter_hash_lock); 9378 9379 ether_addr_copy(netdev->dev_addr, mac_addr); 9380 ether_addr_copy(netdev->perm_addr, mac_addr); 9381 9382 netdev->priv_flags |= IFF_UNICAST_FLT; 9383 netdev->priv_flags |= IFF_SUPP_NOFCS; 9384 /* Setup netdev TC information */ 9385 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 9386 9387 netdev->netdev_ops = &i40e_netdev_ops; 9388 netdev->watchdog_timeo = 5 * HZ; 9389 i40e_set_ethtool_ops(netdev); 9390 #ifdef I40E_FCOE 9391 i40e_fcoe_config_netdev(netdev, vsi); 9392 #endif 9393 9394 /* MTU range: 68 - 9706 */ 9395 netdev->min_mtu = ETH_MIN_MTU; 9396 netdev->max_mtu = I40E_MAX_RXBUFFER - 9397 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 9398 9399 return 0; 9400 } 9401 9402 /** 9403 * i40e_vsi_delete - Delete a VSI from the switch 9404 * @vsi: the VSI being removed 9405 * 9406 * Returns 0 on success, negative value on failure 9407 **/ 9408 static void i40e_vsi_delete(struct i40e_vsi *vsi) 9409 { 9410 /* remove default VSI is not allowed */ 9411 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 9412 return; 9413 9414 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 9415 } 9416 9417 /** 9418 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB 9419 * @vsi: the VSI being queried 9420 * 9421 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode 9422 **/ 9423 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) 9424 { 9425 struct i40e_veb *veb; 9426 struct i40e_pf *pf = vsi->back; 9427 9428 /* Uplink is not a bridge so default to VEB */ 9429 if (vsi->veb_idx == I40E_NO_VEB) 9430 return 1; 9431 9432 veb = pf->veb[vsi->veb_idx]; 9433 if (!veb) { 9434 dev_info(&pf->pdev->dev, 9435 "There is no veb associated with the bridge\n"); 9436 return -ENOENT; 9437 } 9438 9439 /* Uplink is a bridge in VEPA mode */ 9440 if (veb->bridge_mode & BRIDGE_MODE_VEPA) { 9441 return 0; 9442 } else { 9443 /* Uplink is a bridge in VEB mode */ 9444 return 1; 9445 } 9446 9447 /* VEPA is now default bridge, so return 0 */ 9448 return 0; 9449 } 9450 9451 /** 9452 * i40e_add_vsi - Add a VSI to the switch 9453 * @vsi: the VSI being configured 9454 * 9455 * This initializes a VSI context depending on the VSI type to be added and 9456 * passes it down to the add_vsi aq command. 9457 **/ 9458 static int i40e_add_vsi(struct i40e_vsi *vsi) 9459 { 9460 int ret = -ENODEV; 9461 struct i40e_pf *pf = vsi->back; 9462 struct i40e_hw *hw = &pf->hw; 9463 struct i40e_vsi_context ctxt; 9464 struct i40e_mac_filter *f; 9465 struct hlist_node *h; 9466 int bkt; 9467 9468 u8 enabled_tc = 0x1; /* TC0 enabled */ 9469 int f_count = 0; 9470 9471 memset(&ctxt, 0, sizeof(ctxt)); 9472 switch (vsi->type) { 9473 case I40E_VSI_MAIN: 9474 /* The PF's main VSI is already setup as part of the 9475 * device initialization, so we'll not bother with 9476 * the add_vsi call, but we will retrieve the current 9477 * VSI context. 9478 */ 9479 ctxt.seid = pf->main_vsi_seid; 9480 ctxt.pf_num = pf->hw.pf_id; 9481 ctxt.vf_num = 0; 9482 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 9483 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 9484 if (ret) { 9485 dev_info(&pf->pdev->dev, 9486 "couldn't get PF vsi config, err %s aq_err %s\n", 9487 i40e_stat_str(&pf->hw, ret), 9488 i40e_aq_str(&pf->hw, 9489 pf->hw.aq.asq_last_status)); 9490 return -ENOENT; 9491 } 9492 vsi->info = ctxt.info; 9493 vsi->info.valid_sections = 0; 9494 9495 vsi->seid = ctxt.seid; 9496 vsi->id = ctxt.vsi_number; 9497 9498 enabled_tc = i40e_pf_get_tc_map(pf); 9499 9500 /* MFP mode setup queue map and update VSI */ 9501 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && 9502 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ 9503 memset(&ctxt, 0, sizeof(ctxt)); 9504 ctxt.seid = pf->main_vsi_seid; 9505 ctxt.pf_num = pf->hw.pf_id; 9506 ctxt.vf_num = 0; 9507 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 9508 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 9509 if (ret) { 9510 dev_info(&pf->pdev->dev, 9511 "update vsi failed, err %s aq_err %s\n", 9512 i40e_stat_str(&pf->hw, ret), 9513 i40e_aq_str(&pf->hw, 9514 pf->hw.aq.asq_last_status)); 9515 ret = -ENOENT; 9516 goto err; 9517 } 9518 /* update the local VSI info queue map */ 9519 i40e_vsi_update_queue_map(vsi, &ctxt); 9520 vsi->info.valid_sections = 0; 9521 } else { 9522 /* Default/Main VSI is only enabled for TC0 9523 * reconfigure it to enable all TCs that are 9524 * available on the port in SFP mode. 9525 * For MFP case the iSCSI PF would use this 9526 * flow to enable LAN+iSCSI TC. 9527 */ 9528 ret = i40e_vsi_config_tc(vsi, enabled_tc); 9529 if (ret) { 9530 dev_info(&pf->pdev->dev, 9531 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", 9532 enabled_tc, 9533 i40e_stat_str(&pf->hw, ret), 9534 i40e_aq_str(&pf->hw, 9535 pf->hw.aq.asq_last_status)); 9536 ret = -ENOENT; 9537 } 9538 } 9539 break; 9540 9541 case I40E_VSI_FDIR: 9542 ctxt.pf_num = hw->pf_id; 9543 ctxt.vf_num = 0; 9544 ctxt.uplink_seid = vsi->uplink_seid; 9545 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 9546 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 9547 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && 9548 (i40e_is_vsi_uplink_mode_veb(vsi))) { 9549 ctxt.info.valid_sections |= 9550 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 9551 ctxt.info.switch_id = 9552 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9553 } 9554 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 9555 break; 9556 9557 case I40E_VSI_VMDQ2: 9558 ctxt.pf_num = hw->pf_id; 9559 ctxt.vf_num = 0; 9560 ctxt.uplink_seid = vsi->uplink_seid; 9561 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 9562 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 9563 9564 /* This VSI is connected to VEB so the switch_id 9565 * should be set to zero by default. 9566 */ 9567 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 9568 ctxt.info.valid_sections |= 9569 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 9570 ctxt.info.switch_id = 9571 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9572 } 9573 9574 /* Setup the VSI tx/rx queue map for TC0 only for now */ 9575 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 9576 break; 9577 9578 case I40E_VSI_SRIOV: 9579 ctxt.pf_num = hw->pf_id; 9580 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 9581 ctxt.uplink_seid = vsi->uplink_seid; 9582 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 9583 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 9584 9585 /* This VSI is connected to VEB so the switch_id 9586 * should be set to zero by default. 9587 */ 9588 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 9589 ctxt.info.valid_sections |= 9590 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 9591 ctxt.info.switch_id = 9592 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 9593 } 9594 9595 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { 9596 ctxt.info.valid_sections |= 9597 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); 9598 ctxt.info.queueing_opt_flags |= 9599 (I40E_AQ_VSI_QUE_OPT_TCP_ENA | 9600 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI); 9601 } 9602 9603 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 9604 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 9605 if (pf->vf[vsi->vf_id].spoofchk) { 9606 ctxt.info.valid_sections |= 9607 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 9608 ctxt.info.sec_flags |= 9609 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 9610 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 9611 } 9612 /* Setup the VSI tx/rx queue map for TC0 only for now */ 9613 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 9614 break; 9615 9616 #ifdef I40E_FCOE 9617 case I40E_VSI_FCOE: 9618 ret = i40e_fcoe_vsi_init(vsi, &ctxt); 9619 if (ret) { 9620 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); 9621 return ret; 9622 } 9623 break; 9624 9625 #endif /* I40E_FCOE */ 9626 case I40E_VSI_IWARP: 9627 /* send down message to iWARP */ 9628 break; 9629 9630 default: 9631 return -ENODEV; 9632 } 9633 9634 if (vsi->type != I40E_VSI_MAIN) { 9635 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 9636 if (ret) { 9637 dev_info(&vsi->back->pdev->dev, 9638 "add vsi failed, err %s aq_err %s\n", 9639 i40e_stat_str(&pf->hw, ret), 9640 i40e_aq_str(&pf->hw, 9641 pf->hw.aq.asq_last_status)); 9642 ret = -ENOENT; 9643 goto err; 9644 } 9645 vsi->info = ctxt.info; 9646 vsi->info.valid_sections = 0; 9647 vsi->seid = ctxt.seid; 9648 vsi->id = ctxt.vsi_number; 9649 } 9650 9651 vsi->active_filters = 0; 9652 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); 9653 spin_lock_bh(&vsi->mac_filter_hash_lock); 9654 /* If macvlan filters already exist, force them to get loaded */ 9655 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { 9656 f->state = I40E_FILTER_NEW; 9657 f_count++; 9658 } 9659 spin_unlock_bh(&vsi->mac_filter_hash_lock); 9660 9661 if (f_count) { 9662 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 9663 pf->flags |= I40E_FLAG_FILTER_SYNC; 9664 } 9665 9666 /* Update VSI BW information */ 9667 ret = i40e_vsi_get_bw_info(vsi); 9668 if (ret) { 9669 dev_info(&pf->pdev->dev, 9670 "couldn't get vsi bw info, err %s aq_err %s\n", 9671 i40e_stat_str(&pf->hw, ret), 9672 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9673 /* VSI is already added so not tearing that up */ 9674 ret = 0; 9675 } 9676 9677 err: 9678 return ret; 9679 } 9680 9681 /** 9682 * i40e_vsi_release - Delete a VSI and free its resources 9683 * @vsi: the VSI being removed 9684 * 9685 * Returns 0 on success or < 0 on error 9686 **/ 9687 int i40e_vsi_release(struct i40e_vsi *vsi) 9688 { 9689 struct i40e_mac_filter *f; 9690 struct hlist_node *h; 9691 struct i40e_veb *veb = NULL; 9692 struct i40e_pf *pf; 9693 u16 uplink_seid; 9694 int i, n, bkt; 9695 9696 pf = vsi->back; 9697 9698 /* release of a VEB-owner or last VSI is not allowed */ 9699 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 9700 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 9701 vsi->seid, vsi->uplink_seid); 9702 return -ENODEV; 9703 } 9704 if (vsi == pf->vsi[pf->lan_vsi] && 9705 !test_bit(__I40E_DOWN, &pf->state)) { 9706 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 9707 return -ENODEV; 9708 } 9709 9710 uplink_seid = vsi->uplink_seid; 9711 if (vsi->type != I40E_VSI_SRIOV) { 9712 if (vsi->netdev_registered) { 9713 vsi->netdev_registered = false; 9714 if (vsi->netdev) { 9715 /* results in a call to i40e_close() */ 9716 unregister_netdev(vsi->netdev); 9717 } 9718 } else { 9719 i40e_vsi_close(vsi); 9720 } 9721 i40e_vsi_disable_irq(vsi); 9722 } 9723 9724 spin_lock_bh(&vsi->mac_filter_hash_lock); 9725 9726 /* clear the sync flag on all filters */ 9727 if (vsi->netdev) { 9728 __dev_uc_unsync(vsi->netdev, NULL); 9729 __dev_mc_unsync(vsi->netdev, NULL); 9730 } 9731 9732 /* make sure any remaining filters are marked for deletion */ 9733 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) 9734 __i40e_del_filter(vsi, f); 9735 9736 spin_unlock_bh(&vsi->mac_filter_hash_lock); 9737 9738 i40e_sync_vsi_filters(vsi); 9739 9740 i40e_vsi_delete(vsi); 9741 i40e_vsi_free_q_vectors(vsi); 9742 if (vsi->netdev) { 9743 free_netdev(vsi->netdev); 9744 vsi->netdev = NULL; 9745 } 9746 i40e_vsi_clear_rings(vsi); 9747 i40e_vsi_clear(vsi); 9748 9749 /* If this was the last thing on the VEB, except for the 9750 * controlling VSI, remove the VEB, which puts the controlling 9751 * VSI onto the next level down in the switch. 9752 * 9753 * Well, okay, there's one more exception here: don't remove 9754 * the orphan VEBs yet. We'll wait for an explicit remove request 9755 * from up the network stack. 9756 */ 9757 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { 9758 if (pf->vsi[i] && 9759 pf->vsi[i]->uplink_seid == uplink_seid && 9760 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 9761 n++; /* count the VSIs */ 9762 } 9763 } 9764 for (i = 0; i < I40E_MAX_VEB; i++) { 9765 if (!pf->veb[i]) 9766 continue; 9767 if (pf->veb[i]->uplink_seid == uplink_seid) 9768 n++; /* count the VEBs */ 9769 if (pf->veb[i]->seid == uplink_seid) 9770 veb = pf->veb[i]; 9771 } 9772 if (n == 0 && veb && veb->uplink_seid != 0) 9773 i40e_veb_release(veb); 9774 9775 return 0; 9776 } 9777 9778 /** 9779 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 9780 * @vsi: ptr to the VSI 9781 * 9782 * This should only be called after i40e_vsi_mem_alloc() which allocates the 9783 * corresponding SW VSI structure and initializes num_queue_pairs for the 9784 * newly allocated VSI. 9785 * 9786 * Returns 0 on success or negative on failure 9787 **/ 9788 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 9789 { 9790 int ret = -ENOENT; 9791 struct i40e_pf *pf = vsi->back; 9792 9793 if (vsi->q_vectors[0]) { 9794 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 9795 vsi->seid); 9796 return -EEXIST; 9797 } 9798 9799 if (vsi->base_vector) { 9800 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 9801 vsi->seid, vsi->base_vector); 9802 return -EEXIST; 9803 } 9804 9805 ret = i40e_vsi_alloc_q_vectors(vsi); 9806 if (ret) { 9807 dev_info(&pf->pdev->dev, 9808 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 9809 vsi->num_q_vectors, vsi->seid, ret); 9810 vsi->num_q_vectors = 0; 9811 goto vector_setup_out; 9812 } 9813 9814 /* In Legacy mode, we do not have to get any other vector since we 9815 * piggyback on the misc/ICR0 for queue interrupts. 9816 */ 9817 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 9818 return ret; 9819 if (vsi->num_q_vectors) 9820 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 9821 vsi->num_q_vectors, vsi->idx); 9822 if (vsi->base_vector < 0) { 9823 dev_info(&pf->pdev->dev, 9824 "failed to get tracking for %d vectors for VSI %d, err=%d\n", 9825 vsi->num_q_vectors, vsi->seid, vsi->base_vector); 9826 i40e_vsi_free_q_vectors(vsi); 9827 ret = -ENOENT; 9828 goto vector_setup_out; 9829 } 9830 9831 vector_setup_out: 9832 return ret; 9833 } 9834 9835 /** 9836 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI 9837 * @vsi: pointer to the vsi. 9838 * 9839 * This re-allocates a vsi's queue resources. 9840 * 9841 * Returns pointer to the successfully allocated and configured VSI sw struct 9842 * on success, otherwise returns NULL on failure. 9843 **/ 9844 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) 9845 { 9846 struct i40e_pf *pf; 9847 u8 enabled_tc; 9848 int ret; 9849 9850 if (!vsi) 9851 return NULL; 9852 9853 pf = vsi->back; 9854 9855 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 9856 i40e_vsi_clear_rings(vsi); 9857 9858 i40e_vsi_free_arrays(vsi, false); 9859 i40e_set_num_rings_in_vsi(vsi); 9860 ret = i40e_vsi_alloc_arrays(vsi, false); 9861 if (ret) 9862 goto err_vsi; 9863 9864 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); 9865 if (ret < 0) { 9866 dev_info(&pf->pdev->dev, 9867 "failed to get tracking for %d queues for VSI %d err %d\n", 9868 vsi->alloc_queue_pairs, vsi->seid, ret); 9869 goto err_vsi; 9870 } 9871 vsi->base_queue = ret; 9872 9873 /* Update the FW view of the VSI. Force a reset of TC and queue 9874 * layout configurations. 9875 */ 9876 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 9877 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 9878 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 9879 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 9880 if (vsi->type == I40E_VSI_MAIN) 9881 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); 9882 9883 /* assign it some queues */ 9884 ret = i40e_alloc_rings(vsi); 9885 if (ret) 9886 goto err_rings; 9887 9888 /* map all of the rings to the q_vectors */ 9889 i40e_vsi_map_rings_to_vectors(vsi); 9890 return vsi; 9891 9892 err_rings: 9893 i40e_vsi_free_q_vectors(vsi); 9894 if (vsi->netdev_registered) { 9895 vsi->netdev_registered = false; 9896 unregister_netdev(vsi->netdev); 9897 free_netdev(vsi->netdev); 9898 vsi->netdev = NULL; 9899 } 9900 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 9901 err_vsi: 9902 i40e_vsi_clear(vsi); 9903 return NULL; 9904 } 9905 9906 /** 9907 * i40e_vsi_setup - Set up a VSI by a given type 9908 * @pf: board private structure 9909 * @type: VSI type 9910 * @uplink_seid: the switch element to link to 9911 * @param1: usage depends upon VSI type. For VF types, indicates VF id 9912 * 9913 * This allocates the sw VSI structure and its queue resources, then add a VSI 9914 * to the identified VEB. 9915 * 9916 * Returns pointer to the successfully allocated and configure VSI sw struct on 9917 * success, otherwise returns NULL on failure. 9918 **/ 9919 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 9920 u16 uplink_seid, u32 param1) 9921 { 9922 struct i40e_vsi *vsi = NULL; 9923 struct i40e_veb *veb = NULL; 9924 int ret, i; 9925 int v_idx; 9926 9927 /* The requested uplink_seid must be either 9928 * - the PF's port seid 9929 * no VEB is needed because this is the PF 9930 * or this is a Flow Director special case VSI 9931 * - seid of an existing VEB 9932 * - seid of a VSI that owns an existing VEB 9933 * - seid of a VSI that doesn't own a VEB 9934 * a new VEB is created and the VSI becomes the owner 9935 * - seid of the PF VSI, which is what creates the first VEB 9936 * this is a special case of the previous 9937 * 9938 * Find which uplink_seid we were given and create a new VEB if needed 9939 */ 9940 for (i = 0; i < I40E_MAX_VEB; i++) { 9941 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 9942 veb = pf->veb[i]; 9943 break; 9944 } 9945 } 9946 9947 if (!veb && uplink_seid != pf->mac_seid) { 9948 9949 for (i = 0; i < pf->num_alloc_vsi; i++) { 9950 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 9951 vsi = pf->vsi[i]; 9952 break; 9953 } 9954 } 9955 if (!vsi) { 9956 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 9957 uplink_seid); 9958 return NULL; 9959 } 9960 9961 if (vsi->uplink_seid == pf->mac_seid) 9962 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 9963 vsi->tc_config.enabled_tc); 9964 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 9965 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 9966 vsi->tc_config.enabled_tc); 9967 if (veb) { 9968 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { 9969 dev_info(&vsi->back->pdev->dev, 9970 "New VSI creation error, uplink seid of LAN VSI expected.\n"); 9971 return NULL; 9972 } 9973 /* We come up by default in VEPA mode if SRIOV is not 9974 * already enabled, in which case we can't force VEPA 9975 * mode. 9976 */ 9977 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 9978 veb->bridge_mode = BRIDGE_MODE_VEPA; 9979 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 9980 } 9981 i40e_config_bridge_mode(veb); 9982 } 9983 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 9984 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 9985 veb = pf->veb[i]; 9986 } 9987 if (!veb) { 9988 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 9989 return NULL; 9990 } 9991 9992 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 9993 uplink_seid = veb->seid; 9994 } 9995 9996 /* get vsi sw struct */ 9997 v_idx = i40e_vsi_mem_alloc(pf, type); 9998 if (v_idx < 0) 9999 goto err_alloc; 10000 vsi = pf->vsi[v_idx]; 10001 if (!vsi) 10002 goto err_alloc; 10003 vsi->type = type; 10004 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 10005 10006 if (type == I40E_VSI_MAIN) 10007 pf->lan_vsi = v_idx; 10008 else if (type == I40E_VSI_SRIOV) 10009 vsi->vf_id = param1; 10010 /* assign it some queues */ 10011 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, 10012 vsi->idx); 10013 if (ret < 0) { 10014 dev_info(&pf->pdev->dev, 10015 "failed to get tracking for %d queues for VSI %d err=%d\n", 10016 vsi->alloc_queue_pairs, vsi->seid, ret); 10017 goto err_vsi; 10018 } 10019 vsi->base_queue = ret; 10020 10021 /* get a VSI from the hardware */ 10022 vsi->uplink_seid = uplink_seid; 10023 ret = i40e_add_vsi(vsi); 10024 if (ret) 10025 goto err_vsi; 10026 10027 switch (vsi->type) { 10028 /* setup the netdev if needed */ 10029 case I40E_VSI_MAIN: 10030 /* Apply relevant filters if a platform-specific mac 10031 * address was selected. 10032 */ 10033 if (!!(pf->flags & I40E_FLAG_PF_MAC)) { 10034 ret = i40e_macaddr_init(vsi, pf->hw.mac.addr); 10035 if (ret) { 10036 dev_warn(&pf->pdev->dev, 10037 "could not set up macaddr; err %d\n", 10038 ret); 10039 } 10040 } 10041 case I40E_VSI_VMDQ2: 10042 case I40E_VSI_FCOE: 10043 ret = i40e_config_netdev(vsi); 10044 if (ret) 10045 goto err_netdev; 10046 ret = register_netdev(vsi->netdev); 10047 if (ret) 10048 goto err_netdev; 10049 vsi->netdev_registered = true; 10050 netif_carrier_off(vsi->netdev); 10051 #ifdef CONFIG_I40E_DCB 10052 /* Setup DCB netlink interface */ 10053 i40e_dcbnl_setup(vsi); 10054 #endif /* CONFIG_I40E_DCB */ 10055 /* fall through */ 10056 10057 case I40E_VSI_FDIR: 10058 /* set up vectors and rings if needed */ 10059 ret = i40e_vsi_setup_vectors(vsi); 10060 if (ret) 10061 goto err_msix; 10062 10063 ret = i40e_alloc_rings(vsi); 10064 if (ret) 10065 goto err_rings; 10066 10067 /* map all of the rings to the q_vectors */ 10068 i40e_vsi_map_rings_to_vectors(vsi); 10069 10070 i40e_vsi_reset_stats(vsi); 10071 break; 10072 10073 default: 10074 /* no netdev or rings for the other VSI types */ 10075 break; 10076 } 10077 10078 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && 10079 (vsi->type == I40E_VSI_VMDQ2)) { 10080 ret = i40e_vsi_config_rss(vsi); 10081 } 10082 return vsi; 10083 10084 err_rings: 10085 i40e_vsi_free_q_vectors(vsi); 10086 err_msix: 10087 if (vsi->netdev_registered) { 10088 vsi->netdev_registered = false; 10089 unregister_netdev(vsi->netdev); 10090 free_netdev(vsi->netdev); 10091 vsi->netdev = NULL; 10092 } 10093 err_netdev: 10094 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 10095 err_vsi: 10096 i40e_vsi_clear(vsi); 10097 err_alloc: 10098 return NULL; 10099 } 10100 10101 /** 10102 * i40e_veb_get_bw_info - Query VEB BW information 10103 * @veb: the veb to query 10104 * 10105 * Query the Tx scheduler BW configuration data for given VEB 10106 **/ 10107 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 10108 { 10109 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 10110 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 10111 struct i40e_pf *pf = veb->pf; 10112 struct i40e_hw *hw = &pf->hw; 10113 u32 tc_bw_max; 10114 int ret = 0; 10115 int i; 10116 10117 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 10118 &bw_data, NULL); 10119 if (ret) { 10120 dev_info(&pf->pdev->dev, 10121 "query veb bw config failed, err %s aq_err %s\n", 10122 i40e_stat_str(&pf->hw, ret), 10123 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 10124 goto out; 10125 } 10126 10127 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 10128 &ets_data, NULL); 10129 if (ret) { 10130 dev_info(&pf->pdev->dev, 10131 "query veb bw ets config failed, err %s aq_err %s\n", 10132 i40e_stat_str(&pf->hw, ret), 10133 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 10134 goto out; 10135 } 10136 10137 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 10138 veb->bw_max_quanta = ets_data.tc_bw_max; 10139 veb->is_abs_credits = bw_data.absolute_credits_enable; 10140 veb->enabled_tc = ets_data.tc_valid_bits; 10141 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 10142 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 10143 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 10144 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 10145 veb->bw_tc_limit_credits[i] = 10146 le16_to_cpu(bw_data.tc_bw_limits[i]); 10147 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 10148 } 10149 10150 out: 10151 return ret; 10152 } 10153 10154 /** 10155 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 10156 * @pf: board private structure 10157 * 10158 * On error: returns error code (negative) 10159 * On success: returns vsi index in PF (positive) 10160 **/ 10161 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 10162 { 10163 int ret = -ENOENT; 10164 struct i40e_veb *veb; 10165 int i; 10166 10167 /* Need to protect the allocation of switch elements at the PF level */ 10168 mutex_lock(&pf->switch_mutex); 10169 10170 /* VEB list may be fragmented if VEB creation/destruction has 10171 * been happening. We can afford to do a quick scan to look 10172 * for any free slots in the list. 10173 * 10174 * find next empty veb slot, looping back around if necessary 10175 */ 10176 i = 0; 10177 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 10178 i++; 10179 if (i >= I40E_MAX_VEB) { 10180 ret = -ENOMEM; 10181 goto err_alloc_veb; /* out of VEB slots! */ 10182 } 10183 10184 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 10185 if (!veb) { 10186 ret = -ENOMEM; 10187 goto err_alloc_veb; 10188 } 10189 veb->pf = pf; 10190 veb->idx = i; 10191 veb->enabled_tc = 1; 10192 10193 pf->veb[i] = veb; 10194 ret = i; 10195 err_alloc_veb: 10196 mutex_unlock(&pf->switch_mutex); 10197 return ret; 10198 } 10199 10200 /** 10201 * i40e_switch_branch_release - Delete a branch of the switch tree 10202 * @branch: where to start deleting 10203 * 10204 * This uses recursion to find the tips of the branch to be 10205 * removed, deleting until we get back to and can delete this VEB. 10206 **/ 10207 static void i40e_switch_branch_release(struct i40e_veb *branch) 10208 { 10209 struct i40e_pf *pf = branch->pf; 10210 u16 branch_seid = branch->seid; 10211 u16 veb_idx = branch->idx; 10212 int i; 10213 10214 /* release any VEBs on this VEB - RECURSION */ 10215 for (i = 0; i < I40E_MAX_VEB; i++) { 10216 if (!pf->veb[i]) 10217 continue; 10218 if (pf->veb[i]->uplink_seid == branch->seid) 10219 i40e_switch_branch_release(pf->veb[i]); 10220 } 10221 10222 /* Release the VSIs on this VEB, but not the owner VSI. 10223 * 10224 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 10225 * the VEB itself, so don't use (*branch) after this loop. 10226 */ 10227 for (i = 0; i < pf->num_alloc_vsi; i++) { 10228 if (!pf->vsi[i]) 10229 continue; 10230 if (pf->vsi[i]->uplink_seid == branch_seid && 10231 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 10232 i40e_vsi_release(pf->vsi[i]); 10233 } 10234 } 10235 10236 /* There's one corner case where the VEB might not have been 10237 * removed, so double check it here and remove it if needed. 10238 * This case happens if the veb was created from the debugfs 10239 * commands and no VSIs were added to it. 10240 */ 10241 if (pf->veb[veb_idx]) 10242 i40e_veb_release(pf->veb[veb_idx]); 10243 } 10244 10245 /** 10246 * i40e_veb_clear - remove veb struct 10247 * @veb: the veb to remove 10248 **/ 10249 static void i40e_veb_clear(struct i40e_veb *veb) 10250 { 10251 if (!veb) 10252 return; 10253 10254 if (veb->pf) { 10255 struct i40e_pf *pf = veb->pf; 10256 10257 mutex_lock(&pf->switch_mutex); 10258 if (pf->veb[veb->idx] == veb) 10259 pf->veb[veb->idx] = NULL; 10260 mutex_unlock(&pf->switch_mutex); 10261 } 10262 10263 kfree(veb); 10264 } 10265 10266 /** 10267 * i40e_veb_release - Delete a VEB and free its resources 10268 * @veb: the VEB being removed 10269 **/ 10270 void i40e_veb_release(struct i40e_veb *veb) 10271 { 10272 struct i40e_vsi *vsi = NULL; 10273 struct i40e_pf *pf; 10274 int i, n = 0; 10275 10276 pf = veb->pf; 10277 10278 /* find the remaining VSI and check for extras */ 10279 for (i = 0; i < pf->num_alloc_vsi; i++) { 10280 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 10281 n++; 10282 vsi = pf->vsi[i]; 10283 } 10284 } 10285 if (n != 1) { 10286 dev_info(&pf->pdev->dev, 10287 "can't remove VEB %d with %d VSIs left\n", 10288 veb->seid, n); 10289 return; 10290 } 10291 10292 /* move the remaining VSI to uplink veb */ 10293 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 10294 if (veb->uplink_seid) { 10295 vsi->uplink_seid = veb->uplink_seid; 10296 if (veb->uplink_seid == pf->mac_seid) 10297 vsi->veb_idx = I40E_NO_VEB; 10298 else 10299 vsi->veb_idx = veb->veb_idx; 10300 } else { 10301 /* floating VEB */ 10302 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 10303 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 10304 } 10305 10306 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 10307 i40e_veb_clear(veb); 10308 } 10309 10310 /** 10311 * i40e_add_veb - create the VEB in the switch 10312 * @veb: the VEB to be instantiated 10313 * @vsi: the controlling VSI 10314 **/ 10315 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 10316 { 10317 struct i40e_pf *pf = veb->pf; 10318 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); 10319 int ret; 10320 10321 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, 10322 veb->enabled_tc, false, 10323 &veb->seid, enable_stats, NULL); 10324 10325 /* get a VEB from the hardware */ 10326 if (ret) { 10327 dev_info(&pf->pdev->dev, 10328 "couldn't add VEB, err %s aq_err %s\n", 10329 i40e_stat_str(&pf->hw, ret), 10330 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10331 return -EPERM; 10332 } 10333 10334 /* get statistics counter */ 10335 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, 10336 &veb->stats_idx, NULL, NULL, NULL); 10337 if (ret) { 10338 dev_info(&pf->pdev->dev, 10339 "couldn't get VEB statistics idx, err %s aq_err %s\n", 10340 i40e_stat_str(&pf->hw, ret), 10341 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10342 return -EPERM; 10343 } 10344 ret = i40e_veb_get_bw_info(veb); 10345 if (ret) { 10346 dev_info(&pf->pdev->dev, 10347 "couldn't get VEB bw info, err %s aq_err %s\n", 10348 i40e_stat_str(&pf->hw, ret), 10349 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10350 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 10351 return -ENOENT; 10352 } 10353 10354 vsi->uplink_seid = veb->seid; 10355 vsi->veb_idx = veb->idx; 10356 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 10357 10358 return 0; 10359 } 10360 10361 /** 10362 * i40e_veb_setup - Set up a VEB 10363 * @pf: board private structure 10364 * @flags: VEB setup flags 10365 * @uplink_seid: the switch element to link to 10366 * @vsi_seid: the initial VSI seid 10367 * @enabled_tc: Enabled TC bit-map 10368 * 10369 * This allocates the sw VEB structure and links it into the switch 10370 * It is possible and legal for this to be a duplicate of an already 10371 * existing VEB. It is also possible for both uplink and vsi seids 10372 * to be zero, in order to create a floating VEB. 10373 * 10374 * Returns pointer to the successfully allocated VEB sw struct on 10375 * success, otherwise returns NULL on failure. 10376 **/ 10377 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 10378 u16 uplink_seid, u16 vsi_seid, 10379 u8 enabled_tc) 10380 { 10381 struct i40e_veb *veb, *uplink_veb = NULL; 10382 int vsi_idx, veb_idx; 10383 int ret; 10384 10385 /* if one seid is 0, the other must be 0 to create a floating relay */ 10386 if ((uplink_seid == 0 || vsi_seid == 0) && 10387 (uplink_seid + vsi_seid != 0)) { 10388 dev_info(&pf->pdev->dev, 10389 "one, not both seid's are 0: uplink=%d vsi=%d\n", 10390 uplink_seid, vsi_seid); 10391 return NULL; 10392 } 10393 10394 /* make sure there is such a vsi and uplink */ 10395 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) 10396 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 10397 break; 10398 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { 10399 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 10400 vsi_seid); 10401 return NULL; 10402 } 10403 10404 if (uplink_seid && uplink_seid != pf->mac_seid) { 10405 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 10406 if (pf->veb[veb_idx] && 10407 pf->veb[veb_idx]->seid == uplink_seid) { 10408 uplink_veb = pf->veb[veb_idx]; 10409 break; 10410 } 10411 } 10412 if (!uplink_veb) { 10413 dev_info(&pf->pdev->dev, 10414 "uplink seid %d not found\n", uplink_seid); 10415 return NULL; 10416 } 10417 } 10418 10419 /* get veb sw struct */ 10420 veb_idx = i40e_veb_mem_alloc(pf); 10421 if (veb_idx < 0) 10422 goto err_alloc; 10423 veb = pf->veb[veb_idx]; 10424 veb->flags = flags; 10425 veb->uplink_seid = uplink_seid; 10426 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 10427 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 10428 10429 /* create the VEB in the switch */ 10430 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 10431 if (ret) 10432 goto err_veb; 10433 if (vsi_idx == pf->lan_vsi) 10434 pf->lan_veb = veb->idx; 10435 10436 return veb; 10437 10438 err_veb: 10439 i40e_veb_clear(veb); 10440 err_alloc: 10441 return NULL; 10442 } 10443 10444 /** 10445 * i40e_setup_pf_switch_element - set PF vars based on switch type 10446 * @pf: board private structure 10447 * @ele: element we are building info from 10448 * @num_reported: total number of elements 10449 * @printconfig: should we print the contents 10450 * 10451 * helper function to assist in extracting a few useful SEID values. 10452 **/ 10453 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 10454 struct i40e_aqc_switch_config_element_resp *ele, 10455 u16 num_reported, bool printconfig) 10456 { 10457 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 10458 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 10459 u8 element_type = ele->element_type; 10460 u16 seid = le16_to_cpu(ele->seid); 10461 10462 if (printconfig) 10463 dev_info(&pf->pdev->dev, 10464 "type=%d seid=%d uplink=%d downlink=%d\n", 10465 element_type, seid, uplink_seid, downlink_seid); 10466 10467 switch (element_type) { 10468 case I40E_SWITCH_ELEMENT_TYPE_MAC: 10469 pf->mac_seid = seid; 10470 break; 10471 case I40E_SWITCH_ELEMENT_TYPE_VEB: 10472 /* Main VEB? */ 10473 if (uplink_seid != pf->mac_seid) 10474 break; 10475 if (pf->lan_veb == I40E_NO_VEB) { 10476 int v; 10477 10478 /* find existing or else empty VEB */ 10479 for (v = 0; v < I40E_MAX_VEB; v++) { 10480 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 10481 pf->lan_veb = v; 10482 break; 10483 } 10484 } 10485 if (pf->lan_veb == I40E_NO_VEB) { 10486 v = i40e_veb_mem_alloc(pf); 10487 if (v < 0) 10488 break; 10489 pf->lan_veb = v; 10490 } 10491 } 10492 10493 pf->veb[pf->lan_veb]->seid = seid; 10494 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 10495 pf->veb[pf->lan_veb]->pf = pf; 10496 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 10497 break; 10498 case I40E_SWITCH_ELEMENT_TYPE_VSI: 10499 if (num_reported != 1) 10500 break; 10501 /* This is immediately after a reset so we can assume this is 10502 * the PF's VSI 10503 */ 10504 pf->mac_seid = uplink_seid; 10505 pf->pf_seid = downlink_seid; 10506 pf->main_vsi_seid = seid; 10507 if (printconfig) 10508 dev_info(&pf->pdev->dev, 10509 "pf_seid=%d main_vsi_seid=%d\n", 10510 pf->pf_seid, pf->main_vsi_seid); 10511 break; 10512 case I40E_SWITCH_ELEMENT_TYPE_PF: 10513 case I40E_SWITCH_ELEMENT_TYPE_VF: 10514 case I40E_SWITCH_ELEMENT_TYPE_EMP: 10515 case I40E_SWITCH_ELEMENT_TYPE_BMC: 10516 case I40E_SWITCH_ELEMENT_TYPE_PE: 10517 case I40E_SWITCH_ELEMENT_TYPE_PA: 10518 /* ignore these for now */ 10519 break; 10520 default: 10521 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 10522 element_type, seid); 10523 break; 10524 } 10525 } 10526 10527 /** 10528 * i40e_fetch_switch_configuration - Get switch config from firmware 10529 * @pf: board private structure 10530 * @printconfig: should we print the contents 10531 * 10532 * Get the current switch configuration from the device and 10533 * extract a few useful SEID values. 10534 **/ 10535 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 10536 { 10537 struct i40e_aqc_get_switch_config_resp *sw_config; 10538 u16 next_seid = 0; 10539 int ret = 0; 10540 u8 *aq_buf; 10541 int i; 10542 10543 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 10544 if (!aq_buf) 10545 return -ENOMEM; 10546 10547 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 10548 do { 10549 u16 num_reported, num_total; 10550 10551 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 10552 I40E_AQ_LARGE_BUF, 10553 &next_seid, NULL); 10554 if (ret) { 10555 dev_info(&pf->pdev->dev, 10556 "get switch config failed err %s aq_err %s\n", 10557 i40e_stat_str(&pf->hw, ret), 10558 i40e_aq_str(&pf->hw, 10559 pf->hw.aq.asq_last_status)); 10560 kfree(aq_buf); 10561 return -ENOENT; 10562 } 10563 10564 num_reported = le16_to_cpu(sw_config->header.num_reported); 10565 num_total = le16_to_cpu(sw_config->header.num_total); 10566 10567 if (printconfig) 10568 dev_info(&pf->pdev->dev, 10569 "header: %d reported %d total\n", 10570 num_reported, num_total); 10571 10572 for (i = 0; i < num_reported; i++) { 10573 struct i40e_aqc_switch_config_element_resp *ele = 10574 &sw_config->element[i]; 10575 10576 i40e_setup_pf_switch_element(pf, ele, num_reported, 10577 printconfig); 10578 } 10579 } while (next_seid != 0); 10580 10581 kfree(aq_buf); 10582 return ret; 10583 } 10584 10585 /** 10586 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 10587 * @pf: board private structure 10588 * @reinit: if the Main VSI needs to re-initialized. 10589 * 10590 * Returns 0 on success, negative value on failure 10591 **/ 10592 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 10593 { 10594 u16 flags = 0; 10595 int ret; 10596 10597 /* find out what's out there already */ 10598 ret = i40e_fetch_switch_configuration(pf, false); 10599 if (ret) { 10600 dev_info(&pf->pdev->dev, 10601 "couldn't fetch switch config, err %s aq_err %s\n", 10602 i40e_stat_str(&pf->hw, ret), 10603 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10604 return ret; 10605 } 10606 i40e_pf_reset_stats(pf); 10607 10608 /* set the switch config bit for the whole device to 10609 * support limited promisc or true promisc 10610 * when user requests promisc. The default is limited 10611 * promisc. 10612 */ 10613 10614 if ((pf->hw.pf_id == 0) && 10615 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) 10616 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; 10617 10618 if (pf->hw.pf_id == 0) { 10619 u16 valid_flags; 10620 10621 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; 10622 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 10623 NULL); 10624 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { 10625 dev_info(&pf->pdev->dev, 10626 "couldn't set switch config bits, err %s aq_err %s\n", 10627 i40e_stat_str(&pf->hw, ret), 10628 i40e_aq_str(&pf->hw, 10629 pf->hw.aq.asq_last_status)); 10630 /* not a fatal problem, just keep going */ 10631 } 10632 } 10633 10634 /* first time setup */ 10635 if (pf->lan_vsi == I40E_NO_VSI || reinit) { 10636 struct i40e_vsi *vsi = NULL; 10637 u16 uplink_seid; 10638 10639 /* Set up the PF VSI associated with the PF's main VSI 10640 * that is already in the HW switch 10641 */ 10642 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 10643 uplink_seid = pf->veb[pf->lan_veb]->seid; 10644 else 10645 uplink_seid = pf->mac_seid; 10646 if (pf->lan_vsi == I40E_NO_VSI) 10647 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 10648 else if (reinit) 10649 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); 10650 if (!vsi) { 10651 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 10652 i40e_fdir_teardown(pf); 10653 return -EAGAIN; 10654 } 10655 } else { 10656 /* force a reset of TC and queue layout configurations */ 10657 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 10658 10659 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 10660 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 10661 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 10662 } 10663 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 10664 10665 i40e_fdir_sb_setup(pf); 10666 10667 /* Setup static PF queue filter control settings */ 10668 ret = i40e_setup_pf_filter_control(pf); 10669 if (ret) { 10670 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 10671 ret); 10672 /* Failure here should not stop continuing other steps */ 10673 } 10674 10675 /* enable RSS in the HW, even for only one queue, as the stack can use 10676 * the hash 10677 */ 10678 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 10679 i40e_pf_config_rss(pf); 10680 10681 /* fill in link information and enable LSE reporting */ 10682 i40e_update_link_info(&pf->hw); 10683 i40e_link_event(pf); 10684 10685 /* Initialize user-specific link properties */ 10686 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 10687 I40E_AQ_AN_COMPLETED) ? true : false); 10688 10689 i40e_ptp_init(pf); 10690 10691 return ret; 10692 } 10693 10694 /** 10695 * i40e_determine_queue_usage - Work out queue distribution 10696 * @pf: board private structure 10697 **/ 10698 static void i40e_determine_queue_usage(struct i40e_pf *pf) 10699 { 10700 int queues_left; 10701 10702 pf->num_lan_qps = 0; 10703 #ifdef I40E_FCOE 10704 pf->num_fcoe_qps = 0; 10705 #endif 10706 10707 /* Find the max queues to be put into basic use. We'll always be 10708 * using TC0, whether or not DCB is running, and TC0 will get the 10709 * big RSS set. 10710 */ 10711 queues_left = pf->hw.func_caps.num_tx_qp; 10712 10713 if ((queues_left == 1) || 10714 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 10715 /* one qp for PF, no queues for anything else */ 10716 queues_left = 0; 10717 pf->alloc_rss_size = pf->num_lan_qps = 1; 10718 10719 /* make sure all the fancies are disabled */ 10720 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 10721 I40E_FLAG_IWARP_ENABLED | 10722 #ifdef I40E_FCOE 10723 I40E_FLAG_FCOE_ENABLED | 10724 #endif 10725 I40E_FLAG_FD_SB_ENABLED | 10726 I40E_FLAG_FD_ATR_ENABLED | 10727 I40E_FLAG_DCB_CAPABLE | 10728 I40E_FLAG_DCB_ENABLED | 10729 I40E_FLAG_SRIOV_ENABLED | 10730 I40E_FLAG_VMDQ_ENABLED); 10731 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | 10732 I40E_FLAG_FD_SB_ENABLED | 10733 I40E_FLAG_FD_ATR_ENABLED | 10734 I40E_FLAG_DCB_CAPABLE))) { 10735 /* one qp for PF */ 10736 pf->alloc_rss_size = pf->num_lan_qps = 1; 10737 queues_left -= pf->num_lan_qps; 10738 10739 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 10740 I40E_FLAG_IWARP_ENABLED | 10741 #ifdef I40E_FCOE 10742 I40E_FLAG_FCOE_ENABLED | 10743 #endif 10744 I40E_FLAG_FD_SB_ENABLED | 10745 I40E_FLAG_FD_ATR_ENABLED | 10746 I40E_FLAG_DCB_ENABLED | 10747 I40E_FLAG_VMDQ_ENABLED); 10748 } else { 10749 /* Not enough queues for all TCs */ 10750 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && 10751 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 10752 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | 10753 I40E_FLAG_DCB_ENABLED); 10754 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 10755 } 10756 pf->num_lan_qps = max_t(int, pf->rss_size_max, 10757 num_online_cpus()); 10758 pf->num_lan_qps = min_t(int, pf->num_lan_qps, 10759 pf->hw.func_caps.num_tx_qp); 10760 10761 queues_left -= pf->num_lan_qps; 10762 } 10763 10764 #ifdef I40E_FCOE 10765 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 10766 if (I40E_DEFAULT_FCOE <= queues_left) { 10767 pf->num_fcoe_qps = I40E_DEFAULT_FCOE; 10768 } else if (I40E_MINIMUM_FCOE <= queues_left) { 10769 pf->num_fcoe_qps = I40E_MINIMUM_FCOE; 10770 } else { 10771 pf->num_fcoe_qps = 0; 10772 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 10773 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); 10774 } 10775 10776 queues_left -= pf->num_fcoe_qps; 10777 } 10778 10779 #endif 10780 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 10781 if (queues_left > 1) { 10782 queues_left -= 1; /* save 1 queue for FD */ 10783 } else { 10784 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 10785 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); 10786 } 10787 } 10788 10789 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 10790 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 10791 pf->num_req_vfs = min_t(int, pf->num_req_vfs, 10792 (queues_left / pf->num_vf_qps)); 10793 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 10794 } 10795 10796 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 10797 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 10798 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 10799 (queues_left / pf->num_vmdq_qps)); 10800 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 10801 } 10802 10803 pf->queues_left = queues_left; 10804 dev_dbg(&pf->pdev->dev, 10805 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", 10806 pf->hw.func_caps.num_tx_qp, 10807 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), 10808 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, 10809 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, 10810 queues_left); 10811 #ifdef I40E_FCOE 10812 dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); 10813 #endif 10814 } 10815 10816 /** 10817 * i40e_setup_pf_filter_control - Setup PF static filter control 10818 * @pf: PF to be setup 10819 * 10820 * i40e_setup_pf_filter_control sets up a PF's initial filter control 10821 * settings. If PE/FCoE are enabled then it will also set the per PF 10822 * based filter sizes required for them. It also enables Flow director, 10823 * ethertype and macvlan type filter settings for the pf. 10824 * 10825 * Returns 0 on success, negative on failure 10826 **/ 10827 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 10828 { 10829 struct i40e_filter_control_settings *settings = &pf->filter_settings; 10830 10831 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 10832 10833 /* Flow Director is enabled */ 10834 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) 10835 settings->enable_fdir = true; 10836 10837 /* Ethtype and MACVLAN filters enabled for PF */ 10838 settings->enable_ethtype = true; 10839 settings->enable_macvlan = true; 10840 10841 if (i40e_set_filter_control(&pf->hw, settings)) 10842 return -ENOENT; 10843 10844 return 0; 10845 } 10846 10847 #define INFO_STRING_LEN 255 10848 #define REMAIN(__x) (INFO_STRING_LEN - (__x)) 10849 static void i40e_print_features(struct i40e_pf *pf) 10850 { 10851 struct i40e_hw *hw = &pf->hw; 10852 char *buf; 10853 int i; 10854 10855 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL); 10856 if (!buf) 10857 return; 10858 10859 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); 10860 #ifdef CONFIG_PCI_IOV 10861 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); 10862 #endif 10863 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", 10864 pf->hw.func_caps.num_vsis, 10865 pf->vsi[pf->lan_vsi]->num_queue_pairs); 10866 if (pf->flags & I40E_FLAG_RSS_ENABLED) 10867 i += snprintf(&buf[i], REMAIN(i), " RSS"); 10868 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 10869 i += snprintf(&buf[i], REMAIN(i), " FD_ATR"); 10870 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 10871 i += snprintf(&buf[i], REMAIN(i), " FD_SB"); 10872 i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); 10873 } 10874 if (pf->flags & I40E_FLAG_DCB_CAPABLE) 10875 i += snprintf(&buf[i], REMAIN(i), " DCB"); 10876 i += snprintf(&buf[i], REMAIN(i), " VxLAN"); 10877 i += snprintf(&buf[i], REMAIN(i), " Geneve"); 10878 if (pf->flags & I40E_FLAG_PTP) 10879 i += snprintf(&buf[i], REMAIN(i), " PTP"); 10880 #ifdef I40E_FCOE 10881 if (pf->flags & I40E_FLAG_FCOE_ENABLED) 10882 i += snprintf(&buf[i], REMAIN(i), " FCOE"); 10883 #endif 10884 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 10885 i += snprintf(&buf[i], REMAIN(i), " VEB"); 10886 else 10887 i += snprintf(&buf[i], REMAIN(i), " VEPA"); 10888 10889 dev_info(&pf->pdev->dev, "%s\n", buf); 10890 kfree(buf); 10891 WARN_ON(i > INFO_STRING_LEN); 10892 } 10893 10894 /** 10895 * i40e_get_platform_mac_addr - get platform-specific MAC address 10896 * 10897 * @pdev: PCI device information struct 10898 * @pf: board private structure 10899 * 10900 * Look up the MAC address in Open Firmware on systems that support it, 10901 * and use IDPROM on SPARC if no OF address is found. On return, the 10902 * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value 10903 * has been selected. 10904 **/ 10905 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) 10906 { 10907 pf->flags &= ~I40E_FLAG_PF_MAC; 10908 if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) 10909 pf->flags |= I40E_FLAG_PF_MAC; 10910 } 10911 10912 /** 10913 * i40e_probe - Device initialization routine 10914 * @pdev: PCI device information struct 10915 * @ent: entry in i40e_pci_tbl 10916 * 10917 * i40e_probe initializes a PF identified by a pci_dev structure. 10918 * The OS initialization, configuring of the PF private structure, 10919 * and a hardware reset occur. 10920 * 10921 * Returns 0 on success, negative on failure 10922 **/ 10923 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 10924 { 10925 struct i40e_aq_get_phy_abilities_resp abilities; 10926 struct i40e_pf *pf; 10927 struct i40e_hw *hw; 10928 static u16 pfs_found; 10929 u16 wol_nvm_bits; 10930 u16 link_status; 10931 int err; 10932 u32 val; 10933 u32 i; 10934 u8 set_fc_aq_fail; 10935 10936 err = pci_enable_device_mem(pdev); 10937 if (err) 10938 return err; 10939 10940 /* set up for high or low dma */ 10941 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10942 if (err) { 10943 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10944 if (err) { 10945 dev_err(&pdev->dev, 10946 "DMA configuration failed: 0x%x\n", err); 10947 goto err_dma; 10948 } 10949 } 10950 10951 /* set up pci connections */ 10952 err = pci_request_mem_regions(pdev, i40e_driver_name); 10953 if (err) { 10954 dev_info(&pdev->dev, 10955 "pci_request_selected_regions failed %d\n", err); 10956 goto err_pci_reg; 10957 } 10958 10959 pci_enable_pcie_error_reporting(pdev); 10960 pci_set_master(pdev); 10961 10962 /* Now that we have a PCI connection, we need to do the 10963 * low level device setup. This is primarily setting up 10964 * the Admin Queue structures and then querying for the 10965 * device's current profile information. 10966 */ 10967 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 10968 if (!pf) { 10969 err = -ENOMEM; 10970 goto err_pf_alloc; 10971 } 10972 pf->next_vsi = 0; 10973 pf->pdev = pdev; 10974 set_bit(__I40E_DOWN, &pf->state); 10975 10976 hw = &pf->hw; 10977 hw->back = pf; 10978 10979 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), 10980 I40E_MAX_CSR_SPACE); 10981 10982 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); 10983 if (!hw->hw_addr) { 10984 err = -EIO; 10985 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 10986 (unsigned int)pci_resource_start(pdev, 0), 10987 pf->ioremap_len, err); 10988 goto err_ioremap; 10989 } 10990 hw->vendor_id = pdev->vendor; 10991 hw->device_id = pdev->device; 10992 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 10993 hw->subsystem_vendor_id = pdev->subsystem_vendor; 10994 hw->subsystem_device_id = pdev->subsystem_device; 10995 hw->bus.device = PCI_SLOT(pdev->devfn); 10996 hw->bus.func = PCI_FUNC(pdev->devfn); 10997 pf->instance = pfs_found; 10998 10999 /* set up the locks for the AQ, do this only once in probe 11000 * and destroy them only once in remove 11001 */ 11002 mutex_init(&hw->aq.asq_mutex); 11003 mutex_init(&hw->aq.arq_mutex); 11004 11005 pf->msg_enable = netif_msg_init(debug, 11006 NETIF_MSG_DRV | 11007 NETIF_MSG_PROBE | 11008 NETIF_MSG_LINK); 11009 if (debug < -1) 11010 pf->hw.debug_mask = debug; 11011 11012 /* do a special CORER for clearing PXE mode once at init */ 11013 if (hw->revision_id == 0 && 11014 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { 11015 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 11016 i40e_flush(hw); 11017 msleep(200); 11018 pf->corer_count++; 11019 11020 i40e_clear_pxe_mode(hw); 11021 } 11022 11023 /* Reset here to make sure all is clean and to define PF 'n' */ 11024 i40e_clear_hw(hw); 11025 err = i40e_pf_reset(hw); 11026 if (err) { 11027 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 11028 goto err_pf_reset; 11029 } 11030 pf->pfr_count++; 11031 11032 hw->aq.num_arq_entries = I40E_AQ_LEN; 11033 hw->aq.num_asq_entries = I40E_AQ_LEN; 11034 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 11035 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 11036 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 11037 11038 snprintf(pf->int_name, sizeof(pf->int_name) - 1, 11039 "%s-%s:misc", 11040 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); 11041 11042 err = i40e_init_shared_code(hw); 11043 if (err) { 11044 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", 11045 err); 11046 goto err_pf_reset; 11047 } 11048 11049 /* set up a default setting for link flow control */ 11050 pf->hw.fc.requested_mode = I40E_FC_NONE; 11051 11052 err = i40e_init_adminq(hw); 11053 if (err) { 11054 if (err == I40E_ERR_FIRMWARE_API_VERSION) 11055 dev_info(&pdev->dev, 11056 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 11057 else 11058 dev_info(&pdev->dev, 11059 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); 11060 11061 goto err_pf_reset; 11062 } 11063 11064 /* provide nvm, fw, api versions */ 11065 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", 11066 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 11067 hw->aq.api_maj_ver, hw->aq.api_min_ver, 11068 i40e_nvm_version_str(hw)); 11069 11070 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 11071 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) 11072 dev_info(&pdev->dev, 11073 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 11074 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 11075 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) 11076 dev_info(&pdev->dev, 11077 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 11078 11079 i40e_verify_eeprom(pf); 11080 11081 /* Rev 0 hardware was never productized */ 11082 if (hw->revision_id < 1) 11083 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 11084 11085 i40e_clear_pxe_mode(hw); 11086 err = i40e_get_capabilities(pf); 11087 if (err) 11088 goto err_adminq_setup; 11089 11090 err = i40e_sw_init(pf); 11091 if (err) { 11092 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 11093 goto err_sw_init; 11094 } 11095 11096 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 11097 hw->func_caps.num_rx_qp, 11098 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 11099 if (err) { 11100 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 11101 goto err_init_lan_hmc; 11102 } 11103 11104 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 11105 if (err) { 11106 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 11107 err = -ENOENT; 11108 goto err_configure_lan_hmc; 11109 } 11110 11111 /* Disable LLDP for NICs that have firmware versions lower than v4.3. 11112 * Ignore error return codes because if it was already disabled via 11113 * hardware settings this will fail 11114 */ 11115 if (pf->flags & I40E_FLAG_STOP_FW_LLDP) { 11116 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); 11117 i40e_aq_stop_lldp(hw, true, NULL); 11118 } 11119 11120 i40e_get_mac_addr(hw, hw->mac.addr); 11121 /* allow a platform config to override the HW addr */ 11122 i40e_get_platform_mac_addr(pdev, pf); 11123 if (!is_valid_ether_addr(hw->mac.addr)) { 11124 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 11125 err = -EIO; 11126 goto err_mac_addr; 11127 } 11128 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 11129 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); 11130 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 11131 if (is_valid_ether_addr(hw->mac.port_addr)) 11132 pf->flags |= I40E_FLAG_PORT_ID_VALID; 11133 #ifdef I40E_FCOE 11134 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); 11135 if (err) 11136 dev_info(&pdev->dev, 11137 "(non-fatal) SAN MAC retrieval failed: %d\n", err); 11138 if (!is_valid_ether_addr(hw->mac.san_addr)) { 11139 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", 11140 hw->mac.san_addr); 11141 ether_addr_copy(hw->mac.san_addr, hw->mac.addr); 11142 } 11143 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); 11144 #endif /* I40E_FCOE */ 11145 11146 pci_set_drvdata(pdev, pf); 11147 pci_save_state(pdev); 11148 #ifdef CONFIG_I40E_DCB 11149 err = i40e_init_pf_dcb(pf); 11150 if (err) { 11151 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); 11152 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); 11153 /* Continue without DCB enabled */ 11154 } 11155 #endif /* CONFIG_I40E_DCB */ 11156 11157 /* set up periodic task facility */ 11158 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); 11159 pf->service_timer_period = HZ; 11160 11161 INIT_WORK(&pf->service_task, i40e_service_task); 11162 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 11163 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; 11164 11165 /* NVM bit on means WoL disabled for the port */ 11166 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 11167 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) 11168 pf->wol_en = false; 11169 else 11170 pf->wol_en = true; 11171 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 11172 11173 /* set up the main switch operations */ 11174 i40e_determine_queue_usage(pf); 11175 err = i40e_init_interrupt_scheme(pf); 11176 if (err) 11177 goto err_switch_setup; 11178 11179 /* The number of VSIs reported by the FW is the minimum guaranteed 11180 * to us; HW supports far more and we share the remaining pool with 11181 * the other PFs. We allocate space for more than the guarantee with 11182 * the understanding that we might not get them all later. 11183 */ 11184 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) 11185 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; 11186 else 11187 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; 11188 11189 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ 11190 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), 11191 GFP_KERNEL); 11192 if (!pf->vsi) { 11193 err = -ENOMEM; 11194 goto err_switch_setup; 11195 } 11196 11197 #ifdef CONFIG_PCI_IOV 11198 /* prep for VF support */ 11199 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 11200 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 11201 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 11202 if (pci_num_vf(pdev)) 11203 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 11204 } 11205 #endif 11206 err = i40e_setup_pf_switch(pf, false); 11207 if (err) { 11208 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 11209 goto err_vsis; 11210 } 11211 11212 /* Make sure flow control is set according to current settings */ 11213 err = i40e_set_fc(hw, &set_fc_aq_fail, true); 11214 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) 11215 dev_dbg(&pf->pdev->dev, 11216 "Set fc with err %s aq_err %s on get_phy_cap\n", 11217 i40e_stat_str(hw, err), 11218 i40e_aq_str(hw, hw->aq.asq_last_status)); 11219 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) 11220 dev_dbg(&pf->pdev->dev, 11221 "Set fc with err %s aq_err %s on set_phy_config\n", 11222 i40e_stat_str(hw, err), 11223 i40e_aq_str(hw, hw->aq.asq_last_status)); 11224 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) 11225 dev_dbg(&pf->pdev->dev, 11226 "Set fc with err %s aq_err %s on get_link_info\n", 11227 i40e_stat_str(hw, err), 11228 i40e_aq_str(hw, hw->aq.asq_last_status)); 11229 11230 /* if FDIR VSI was set up, start it now */ 11231 for (i = 0; i < pf->num_alloc_vsi; i++) { 11232 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 11233 i40e_vsi_open(pf->vsi[i]); 11234 break; 11235 } 11236 } 11237 11238 /* The driver only wants link up/down and module qualification 11239 * reports from firmware. Note the negative logic. 11240 */ 11241 err = i40e_aq_set_phy_int_mask(&pf->hw, 11242 ~(I40E_AQ_EVENT_LINK_UPDOWN | 11243 I40E_AQ_EVENT_MEDIA_NA | 11244 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); 11245 if (err) 11246 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 11247 i40e_stat_str(&pf->hw, err), 11248 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 11249 11250 /* Reconfigure hardware for allowing smaller MSS in the case 11251 * of TSO, so that we avoid the MDD being fired and causing 11252 * a reset in the case of small MSS+TSO. 11253 */ 11254 val = rd32(hw, I40E_REG_MSS); 11255 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { 11256 val &= ~I40E_REG_MSS_MIN_MASK; 11257 val |= I40E_64BYTE_MSS; 11258 wr32(hw, I40E_REG_MSS, val); 11259 } 11260 11261 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { 11262 msleep(75); 11263 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 11264 if (err) 11265 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 11266 i40e_stat_str(&pf->hw, err), 11267 i40e_aq_str(&pf->hw, 11268 pf->hw.aq.asq_last_status)); 11269 } 11270 /* The main driver is (mostly) up and happy. We need to set this state 11271 * before setting up the misc vector or we get a race and the vector 11272 * ends up disabled forever. 11273 */ 11274 clear_bit(__I40E_DOWN, &pf->state); 11275 11276 /* In case of MSIX we are going to setup the misc vector right here 11277 * to handle admin queue events etc. In case of legacy and MSI 11278 * the misc functionality and queue processing is combined in 11279 * the same vector and that gets setup at open. 11280 */ 11281 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 11282 err = i40e_setup_misc_vector(pf); 11283 if (err) { 11284 dev_info(&pdev->dev, 11285 "setup of misc vector failed: %d\n", err); 11286 goto err_vsis; 11287 } 11288 } 11289 11290 #ifdef CONFIG_PCI_IOV 11291 /* prep for VF support */ 11292 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 11293 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 11294 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 11295 /* disable link interrupts for VFs */ 11296 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 11297 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 11298 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 11299 i40e_flush(hw); 11300 11301 if (pci_num_vf(pdev)) { 11302 dev_info(&pdev->dev, 11303 "Active VFs found, allocating resources.\n"); 11304 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); 11305 if (err) 11306 dev_info(&pdev->dev, 11307 "Error %d allocating resources for existing VFs\n", 11308 err); 11309 } 11310 } 11311 #endif /* CONFIG_PCI_IOV */ 11312 11313 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { 11314 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, 11315 pf->num_iwarp_msix, 11316 I40E_IWARP_IRQ_PILE_ID); 11317 if (pf->iwarp_base_vector < 0) { 11318 dev_info(&pdev->dev, 11319 "failed to get tracking for %d vectors for IWARP err=%d\n", 11320 pf->num_iwarp_msix, pf->iwarp_base_vector); 11321 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; 11322 } 11323 } 11324 11325 i40e_dbg_pf_init(pf); 11326 11327 /* tell the firmware that we're starting */ 11328 i40e_send_version(pf); 11329 11330 /* since everything's happy, start the service_task timer */ 11331 mod_timer(&pf->service_timer, 11332 round_jiffies(jiffies + pf->service_timer_period)); 11333 11334 /* add this PF to client device list and launch a client service task */ 11335 err = i40e_lan_add_device(pf); 11336 if (err) 11337 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", 11338 err); 11339 11340 #ifdef I40E_FCOE 11341 /* create FCoE interface */ 11342 i40e_fcoe_vsi_setup(pf); 11343 11344 #endif 11345 #define PCI_SPEED_SIZE 8 11346 #define PCI_WIDTH_SIZE 8 11347 /* Devices on the IOSF bus do not have this information 11348 * and will report PCI Gen 1 x 1 by default so don't bother 11349 * checking them. 11350 */ 11351 if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) { 11352 char speed[PCI_SPEED_SIZE] = "Unknown"; 11353 char width[PCI_WIDTH_SIZE] = "Unknown"; 11354 11355 /* Get the negotiated link width and speed from PCI config 11356 * space 11357 */ 11358 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, 11359 &link_status); 11360 11361 i40e_set_pci_config_data(hw, link_status); 11362 11363 switch (hw->bus.speed) { 11364 case i40e_bus_speed_8000: 11365 strncpy(speed, "8.0", PCI_SPEED_SIZE); break; 11366 case i40e_bus_speed_5000: 11367 strncpy(speed, "5.0", PCI_SPEED_SIZE); break; 11368 case i40e_bus_speed_2500: 11369 strncpy(speed, "2.5", PCI_SPEED_SIZE); break; 11370 default: 11371 break; 11372 } 11373 switch (hw->bus.width) { 11374 case i40e_bus_width_pcie_x8: 11375 strncpy(width, "8", PCI_WIDTH_SIZE); break; 11376 case i40e_bus_width_pcie_x4: 11377 strncpy(width, "4", PCI_WIDTH_SIZE); break; 11378 case i40e_bus_width_pcie_x2: 11379 strncpy(width, "2", PCI_WIDTH_SIZE); break; 11380 case i40e_bus_width_pcie_x1: 11381 strncpy(width, "1", PCI_WIDTH_SIZE); break; 11382 default: 11383 break; 11384 } 11385 11386 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", 11387 speed, width); 11388 11389 if (hw->bus.width < i40e_bus_width_pcie_x8 || 11390 hw->bus.speed < i40e_bus_speed_8000) { 11391 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 11392 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 11393 } 11394 } 11395 11396 /* get the requested speeds from the fw */ 11397 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); 11398 if (err) 11399 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", 11400 i40e_stat_str(&pf->hw, err), 11401 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 11402 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; 11403 11404 /* get the supported phy types from the fw */ 11405 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); 11406 if (err) 11407 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", 11408 i40e_stat_str(&pf->hw, err), 11409 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 11410 11411 /* Add a filter to drop all Flow control frames from any VSI from being 11412 * transmitted. By doing so we stop a malicious VF from sending out 11413 * PAUSE or PFC frames and potentially controlling traffic for other 11414 * PF/VF VSIs. 11415 * The FW can still send Flow control frames if enabled. 11416 */ 11417 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, 11418 pf->main_vsi_seid); 11419 11420 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || 11421 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) 11422 pf->flags |= I40E_FLAG_PHY_CONTROLS_LEDS; 11423 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) 11424 pf->flags |= I40E_FLAG_HAVE_CRT_RETIMER; 11425 /* print a string summarizing features */ 11426 i40e_print_features(pf); 11427 11428 return 0; 11429 11430 /* Unwind what we've done if something failed in the setup */ 11431 err_vsis: 11432 set_bit(__I40E_DOWN, &pf->state); 11433 i40e_clear_interrupt_scheme(pf); 11434 kfree(pf->vsi); 11435 err_switch_setup: 11436 i40e_reset_interrupt_capability(pf); 11437 del_timer_sync(&pf->service_timer); 11438 err_mac_addr: 11439 err_configure_lan_hmc: 11440 (void)i40e_shutdown_lan_hmc(hw); 11441 err_init_lan_hmc: 11442 kfree(pf->qp_pile); 11443 err_sw_init: 11444 err_adminq_setup: 11445 err_pf_reset: 11446 iounmap(hw->hw_addr); 11447 err_ioremap: 11448 kfree(pf); 11449 err_pf_alloc: 11450 pci_disable_pcie_error_reporting(pdev); 11451 pci_release_mem_regions(pdev); 11452 err_pci_reg: 11453 err_dma: 11454 pci_disable_device(pdev); 11455 return err; 11456 } 11457 11458 /** 11459 * i40e_remove - Device removal routine 11460 * @pdev: PCI device information struct 11461 * 11462 * i40e_remove is called by the PCI subsystem to alert the driver 11463 * that is should release a PCI device. This could be caused by a 11464 * Hot-Plug event, or because the driver is going to be removed from 11465 * memory. 11466 **/ 11467 static void i40e_remove(struct pci_dev *pdev) 11468 { 11469 struct i40e_pf *pf = pci_get_drvdata(pdev); 11470 struct i40e_hw *hw = &pf->hw; 11471 i40e_status ret_code; 11472 int i; 11473 11474 i40e_dbg_pf_exit(pf); 11475 11476 i40e_ptp_stop(pf); 11477 11478 /* Disable RSS in hw */ 11479 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); 11480 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); 11481 11482 /* no more scheduling of any task */ 11483 set_bit(__I40E_SUSPENDED, &pf->state); 11484 set_bit(__I40E_DOWN, &pf->state); 11485 if (pf->service_timer.data) 11486 del_timer_sync(&pf->service_timer); 11487 if (pf->service_task.func) 11488 cancel_work_sync(&pf->service_task); 11489 11490 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 11491 i40e_free_vfs(pf); 11492 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 11493 } 11494 11495 i40e_fdir_teardown(pf); 11496 11497 /* If there is a switch structure or any orphans, remove them. 11498 * This will leave only the PF's VSI remaining. 11499 */ 11500 for (i = 0; i < I40E_MAX_VEB; i++) { 11501 if (!pf->veb[i]) 11502 continue; 11503 11504 if (pf->veb[i]->uplink_seid == pf->mac_seid || 11505 pf->veb[i]->uplink_seid == 0) 11506 i40e_switch_branch_release(pf->veb[i]); 11507 } 11508 11509 /* Now we can shutdown the PF's VSI, just before we kill 11510 * adminq and hmc. 11511 */ 11512 if (pf->vsi[pf->lan_vsi]) 11513 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 11514 11515 /* remove attached clients */ 11516 ret_code = i40e_lan_del_device(pf); 11517 if (ret_code) { 11518 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 11519 ret_code); 11520 } 11521 11522 /* shutdown and destroy the HMC */ 11523 if (hw->hmc.hmc_obj) { 11524 ret_code = i40e_shutdown_lan_hmc(hw); 11525 if (ret_code) 11526 dev_warn(&pdev->dev, 11527 "Failed to destroy the HMC resources: %d\n", 11528 ret_code); 11529 } 11530 11531 /* shutdown the adminq */ 11532 i40e_shutdown_adminq(hw); 11533 11534 /* destroy the locks only once, here */ 11535 mutex_destroy(&hw->aq.arq_mutex); 11536 mutex_destroy(&hw->aq.asq_mutex); 11537 11538 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 11539 i40e_clear_interrupt_scheme(pf); 11540 for (i = 0; i < pf->num_alloc_vsi; i++) { 11541 if (pf->vsi[i]) { 11542 i40e_vsi_clear_rings(pf->vsi[i]); 11543 i40e_vsi_clear(pf->vsi[i]); 11544 pf->vsi[i] = NULL; 11545 } 11546 } 11547 11548 for (i = 0; i < I40E_MAX_VEB; i++) { 11549 kfree(pf->veb[i]); 11550 pf->veb[i] = NULL; 11551 } 11552 11553 kfree(pf->qp_pile); 11554 kfree(pf->vsi); 11555 11556 iounmap(hw->hw_addr); 11557 kfree(pf); 11558 pci_release_mem_regions(pdev); 11559 11560 pci_disable_pcie_error_reporting(pdev); 11561 pci_disable_device(pdev); 11562 } 11563 11564 /** 11565 * i40e_pci_error_detected - warning that something funky happened in PCI land 11566 * @pdev: PCI device information struct 11567 * 11568 * Called to warn that something happened and the error handling steps 11569 * are in progress. Allows the driver to quiesce things, be ready for 11570 * remediation. 11571 **/ 11572 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 11573 enum pci_channel_state error) 11574 { 11575 struct i40e_pf *pf = pci_get_drvdata(pdev); 11576 11577 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 11578 11579 if (!pf) { 11580 dev_info(&pdev->dev, 11581 "Cannot recover - error happened during device probe\n"); 11582 return PCI_ERS_RESULT_DISCONNECT; 11583 } 11584 11585 /* shutdown all operations */ 11586 if (!test_bit(__I40E_SUSPENDED, &pf->state)) { 11587 rtnl_lock(); 11588 i40e_prep_for_reset(pf); 11589 rtnl_unlock(); 11590 } 11591 11592 /* Request a slot reset */ 11593 return PCI_ERS_RESULT_NEED_RESET; 11594 } 11595 11596 /** 11597 * i40e_pci_error_slot_reset - a PCI slot reset just happened 11598 * @pdev: PCI device information struct 11599 * 11600 * Called to find if the driver can work with the device now that 11601 * the pci slot has been reset. If a basic connection seems good 11602 * (registers are readable and have sane content) then return a 11603 * happy little PCI_ERS_RESULT_xxx. 11604 **/ 11605 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 11606 { 11607 struct i40e_pf *pf = pci_get_drvdata(pdev); 11608 pci_ers_result_t result; 11609 int err; 11610 u32 reg; 11611 11612 dev_dbg(&pdev->dev, "%s\n", __func__); 11613 if (pci_enable_device_mem(pdev)) { 11614 dev_info(&pdev->dev, 11615 "Cannot re-enable PCI device after reset.\n"); 11616 result = PCI_ERS_RESULT_DISCONNECT; 11617 } else { 11618 pci_set_master(pdev); 11619 pci_restore_state(pdev); 11620 pci_save_state(pdev); 11621 pci_wake_from_d3(pdev, false); 11622 11623 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 11624 if (reg == 0) 11625 result = PCI_ERS_RESULT_RECOVERED; 11626 else 11627 result = PCI_ERS_RESULT_DISCONNECT; 11628 } 11629 11630 err = pci_cleanup_aer_uncorrect_error_status(pdev); 11631 if (err) { 11632 dev_info(&pdev->dev, 11633 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 11634 err); 11635 /* non-fatal, continue */ 11636 } 11637 11638 return result; 11639 } 11640 11641 /** 11642 * i40e_pci_error_resume - restart operations after PCI error recovery 11643 * @pdev: PCI device information struct 11644 * 11645 * Called to allow the driver to bring things back up after PCI error 11646 * and/or reset recovery has finished. 11647 **/ 11648 static void i40e_pci_error_resume(struct pci_dev *pdev) 11649 { 11650 struct i40e_pf *pf = pci_get_drvdata(pdev); 11651 11652 dev_dbg(&pdev->dev, "%s\n", __func__); 11653 if (test_bit(__I40E_SUSPENDED, &pf->state)) 11654 return; 11655 11656 rtnl_lock(); 11657 i40e_handle_reset_warning(pf); 11658 rtnl_unlock(); 11659 } 11660 11661 /** 11662 * i40e_shutdown - PCI callback for shutting down 11663 * @pdev: PCI device information struct 11664 **/ 11665 static void i40e_shutdown(struct pci_dev *pdev) 11666 { 11667 struct i40e_pf *pf = pci_get_drvdata(pdev); 11668 struct i40e_hw *hw = &pf->hw; 11669 11670 set_bit(__I40E_SUSPENDED, &pf->state); 11671 set_bit(__I40E_DOWN, &pf->state); 11672 rtnl_lock(); 11673 i40e_prep_for_reset(pf); 11674 rtnl_unlock(); 11675 11676 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11677 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11678 11679 del_timer_sync(&pf->service_timer); 11680 cancel_work_sync(&pf->service_task); 11681 i40e_fdir_teardown(pf); 11682 11683 rtnl_lock(); 11684 i40e_prep_for_reset(pf); 11685 rtnl_unlock(); 11686 11687 wr32(hw, I40E_PFPM_APM, 11688 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11689 wr32(hw, I40E_PFPM_WUFC, 11690 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11691 11692 i40e_clear_interrupt_scheme(pf); 11693 11694 if (system_state == SYSTEM_POWER_OFF) { 11695 pci_wake_from_d3(pdev, pf->wol_en); 11696 pci_set_power_state(pdev, PCI_D3hot); 11697 } 11698 } 11699 11700 #ifdef CONFIG_PM 11701 /** 11702 * i40e_suspend - PCI callback for moving to D3 11703 * @pdev: PCI device information struct 11704 **/ 11705 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) 11706 { 11707 struct i40e_pf *pf = pci_get_drvdata(pdev); 11708 struct i40e_hw *hw = &pf->hw; 11709 int retval = 0; 11710 11711 set_bit(__I40E_SUSPENDED, &pf->state); 11712 set_bit(__I40E_DOWN, &pf->state); 11713 11714 rtnl_lock(); 11715 i40e_prep_for_reset(pf); 11716 rtnl_unlock(); 11717 11718 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11719 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11720 11721 i40e_stop_misc_vector(pf); 11722 11723 retval = pci_save_state(pdev); 11724 if (retval) 11725 return retval; 11726 11727 pci_wake_from_d3(pdev, pf->wol_en); 11728 pci_set_power_state(pdev, PCI_D3hot); 11729 11730 return retval; 11731 } 11732 11733 /** 11734 * i40e_resume - PCI callback for waking up from D3 11735 * @pdev: PCI device information struct 11736 **/ 11737 static int i40e_resume(struct pci_dev *pdev) 11738 { 11739 struct i40e_pf *pf = pci_get_drvdata(pdev); 11740 u32 err; 11741 11742 pci_set_power_state(pdev, PCI_D0); 11743 pci_restore_state(pdev); 11744 /* pci_restore_state() clears dev->state_saves, so 11745 * call pci_save_state() again to restore it. 11746 */ 11747 pci_save_state(pdev); 11748 11749 err = pci_enable_device_mem(pdev); 11750 if (err) { 11751 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 11752 return err; 11753 } 11754 pci_set_master(pdev); 11755 11756 /* no wakeup events while running */ 11757 pci_wake_from_d3(pdev, false); 11758 11759 /* handling the reset will rebuild the device state */ 11760 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { 11761 clear_bit(__I40E_DOWN, &pf->state); 11762 rtnl_lock(); 11763 i40e_reset_and_rebuild(pf, false); 11764 rtnl_unlock(); 11765 } 11766 11767 return 0; 11768 } 11769 11770 #endif 11771 static const struct pci_error_handlers i40e_err_handler = { 11772 .error_detected = i40e_pci_error_detected, 11773 .slot_reset = i40e_pci_error_slot_reset, 11774 .resume = i40e_pci_error_resume, 11775 }; 11776 11777 static struct pci_driver i40e_driver = { 11778 .name = i40e_driver_name, 11779 .id_table = i40e_pci_tbl, 11780 .probe = i40e_probe, 11781 .remove = i40e_remove, 11782 #ifdef CONFIG_PM 11783 .suspend = i40e_suspend, 11784 .resume = i40e_resume, 11785 #endif 11786 .shutdown = i40e_shutdown, 11787 .err_handler = &i40e_err_handler, 11788 .sriov_configure = i40e_pci_sriov_configure, 11789 }; 11790 11791 /** 11792 * i40e_init_module - Driver registration routine 11793 * 11794 * i40e_init_module is the first routine called when the driver is 11795 * loaded. All it does is register with the PCI subsystem. 11796 **/ 11797 static int __init i40e_init_module(void) 11798 { 11799 pr_info("%s: %s - version %s\n", i40e_driver_name, 11800 i40e_driver_string, i40e_driver_version_str); 11801 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 11802 11803 /* we will see if single thread per module is enough for now, 11804 * it can't be any worse than using the system workqueue which 11805 * was already single threaded 11806 */ 11807 i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 11808 i40e_driver_name); 11809 if (!i40e_wq) { 11810 pr_err("%s: Failed to create workqueue\n", i40e_driver_name); 11811 return -ENOMEM; 11812 } 11813 11814 i40e_dbg_init(); 11815 return pci_register_driver(&i40e_driver); 11816 } 11817 module_init(i40e_init_module); 11818 11819 /** 11820 * i40e_exit_module - Driver exit cleanup routine 11821 * 11822 * i40e_exit_module is called just before the driver is removed 11823 * from memory. 11824 **/ 11825 static void __exit i40e_exit_module(void) 11826 { 11827 pci_unregister_driver(&i40e_driver); 11828 destroy_workqueue(i40e_wq); 11829 i40e_dbg_exit(); 11830 } 11831 module_exit(i40e_exit_module); 11832