1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2015 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 /* Local includes */ 28 #include "i40e.h" 29 #include "i40e_diag.h" 30 #ifdef CONFIG_I40E_VXLAN 31 #include <net/vxlan.h> 32 #endif 33 34 const char i40e_driver_name[] = "i40e"; 35 static const char i40e_driver_string[] = 36 "Intel(R) Ethernet Connection XL710 Network Driver"; 37 38 #define DRV_KERN "-k" 39 40 #define DRV_VERSION_MAJOR 1 41 #define DRV_VERSION_MINOR 3 42 #define DRV_VERSION_BUILD 9 43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \ 45 __stringify(DRV_VERSION_BUILD) DRV_KERN 46 const char i40e_driver_version_str[] = DRV_VERSION; 47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; 48 49 /* a bit of forward declarations */ 50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 51 static void i40e_handle_reset_warning(struct i40e_pf *pf); 52 static int i40e_add_vsi(struct i40e_vsi *vsi); 53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 55 static int i40e_setup_misc_vector(struct i40e_pf *pf); 56 static void i40e_determine_queue_usage(struct i40e_pf *pf); 57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 58 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 59 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 60 61 /* i40e_pci_tbl - PCI Device ID Table 62 * 63 * Last entry must be all 0s 64 * 65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 66 * Class, Class Mask, private data (not used) } 67 */ 68 static const struct pci_device_id i40e_pci_tbl[] = { 69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, 72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, 78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, 79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, 80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, 81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, 82 /* required last entry */ 83 {0, } 84 }; 85 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 86 87 #define I40E_MAX_VF_COUNT 128 88 static int debug = -1; 89 module_param(debug, int, 0); 90 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 91 92 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 93 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 94 MODULE_LICENSE("GPL"); 95 MODULE_VERSION(DRV_VERSION); 96 97 /** 98 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 99 * @hw: pointer to the HW structure 100 * @mem: ptr to mem struct to fill out 101 * @size: size of memory requested 102 * @alignment: what to align the allocation to 103 **/ 104 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 105 u64 size, u32 alignment) 106 { 107 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 108 109 mem->size = ALIGN(size, alignment); 110 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 111 &mem->pa, GFP_KERNEL); 112 if (!mem->va) 113 return -ENOMEM; 114 115 return 0; 116 } 117 118 /** 119 * i40e_free_dma_mem_d - OS specific memory free for shared code 120 * @hw: pointer to the HW structure 121 * @mem: ptr to mem struct to free 122 **/ 123 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 124 { 125 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 126 127 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 128 mem->va = NULL; 129 mem->pa = 0; 130 mem->size = 0; 131 132 return 0; 133 } 134 135 /** 136 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 137 * @hw: pointer to the HW structure 138 * @mem: ptr to mem struct to fill out 139 * @size: size of memory requested 140 **/ 141 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 142 u32 size) 143 { 144 mem->size = size; 145 mem->va = kzalloc(size, GFP_KERNEL); 146 147 if (!mem->va) 148 return -ENOMEM; 149 150 return 0; 151 } 152 153 /** 154 * i40e_free_virt_mem_d - OS specific memory free for shared code 155 * @hw: pointer to the HW structure 156 * @mem: ptr to mem struct to free 157 **/ 158 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 159 { 160 /* it's ok to kfree a NULL pointer */ 161 kfree(mem->va); 162 mem->va = NULL; 163 mem->size = 0; 164 165 return 0; 166 } 167 168 /** 169 * i40e_get_lump - find a lump of free generic resource 170 * @pf: board private structure 171 * @pile: the pile of resource to search 172 * @needed: the number of items needed 173 * @id: an owner id to stick on the items assigned 174 * 175 * Returns the base item index of the lump, or negative for error 176 * 177 * The search_hint trick and lack of advanced fit-finding only work 178 * because we're highly likely to have all the same size lump requests. 179 * Linear search time and any fragmentation should be minimal. 180 **/ 181 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 182 u16 needed, u16 id) 183 { 184 int ret = -ENOMEM; 185 int i, j; 186 187 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 188 dev_info(&pf->pdev->dev, 189 "param err: pile=%p needed=%d id=0x%04x\n", 190 pile, needed, id); 191 return -EINVAL; 192 } 193 194 /* start the linear search with an imperfect hint */ 195 i = pile->search_hint; 196 while (i < pile->num_entries) { 197 /* skip already allocated entries */ 198 if (pile->list[i] & I40E_PILE_VALID_BIT) { 199 i++; 200 continue; 201 } 202 203 /* do we have enough in this lump? */ 204 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 205 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 206 break; 207 } 208 209 if (j == needed) { 210 /* there was enough, so assign it to the requestor */ 211 for (j = 0; j < needed; j++) 212 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 213 ret = i; 214 pile->search_hint = i + j; 215 break; 216 } else { 217 /* not enough, so skip over it and continue looking */ 218 i += j; 219 } 220 } 221 222 return ret; 223 } 224 225 /** 226 * i40e_put_lump - return a lump of generic resource 227 * @pile: the pile of resource to search 228 * @index: the base item index 229 * @id: the owner id of the items assigned 230 * 231 * Returns the count of items in the lump 232 **/ 233 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 234 { 235 int valid_id = (id | I40E_PILE_VALID_BIT); 236 int count = 0; 237 int i; 238 239 if (!pile || index >= pile->num_entries) 240 return -EINVAL; 241 242 for (i = index; 243 i < pile->num_entries && pile->list[i] == valid_id; 244 i++) { 245 pile->list[i] = 0; 246 count++; 247 } 248 249 if (count && index < pile->search_hint) 250 pile->search_hint = index; 251 252 return count; 253 } 254 255 /** 256 * i40e_find_vsi_from_id - searches for the vsi with the given id 257 * @pf - the pf structure to search for the vsi 258 * @id - id of the vsi it is searching for 259 **/ 260 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) 261 { 262 int i; 263 264 for (i = 0; i < pf->num_alloc_vsi; i++) 265 if (pf->vsi[i] && (pf->vsi[i]->id == id)) 266 return pf->vsi[i]; 267 268 return NULL; 269 } 270 271 /** 272 * i40e_service_event_schedule - Schedule the service task to wake up 273 * @pf: board private structure 274 * 275 * If not already scheduled, this puts the task into the work queue 276 **/ 277 static void i40e_service_event_schedule(struct i40e_pf *pf) 278 { 279 if (!test_bit(__I40E_DOWN, &pf->state) && 280 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && 281 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) 282 schedule_work(&pf->service_task); 283 } 284 285 /** 286 * i40e_tx_timeout - Respond to a Tx Hang 287 * @netdev: network interface device structure 288 * 289 * If any port has noticed a Tx timeout, it is likely that the whole 290 * device is munged, not just the one netdev port, so go for the full 291 * reset. 292 **/ 293 #ifdef I40E_FCOE 294 void i40e_tx_timeout(struct net_device *netdev) 295 #else 296 static void i40e_tx_timeout(struct net_device *netdev) 297 #endif 298 { 299 struct i40e_netdev_priv *np = netdev_priv(netdev); 300 struct i40e_vsi *vsi = np->vsi; 301 struct i40e_pf *pf = vsi->back; 302 303 pf->tx_timeout_count++; 304 305 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 306 pf->tx_timeout_recovery_level = 1; 307 pf->tx_timeout_last_recovery = jiffies; 308 netdev_info(netdev, "tx_timeout recovery level %d\n", 309 pf->tx_timeout_recovery_level); 310 311 switch (pf->tx_timeout_recovery_level) { 312 case 0: 313 /* disable and re-enable queues for the VSI */ 314 if (in_interrupt()) { 315 set_bit(__I40E_REINIT_REQUESTED, &pf->state); 316 set_bit(__I40E_REINIT_REQUESTED, &vsi->state); 317 } else { 318 i40e_vsi_reinit_locked(vsi); 319 } 320 break; 321 case 1: 322 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 323 break; 324 case 2: 325 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 326 break; 327 case 3: 328 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 329 break; 330 default: 331 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 332 set_bit(__I40E_DOWN_REQUESTED, &pf->state); 333 set_bit(__I40E_DOWN_REQUESTED, &vsi->state); 334 break; 335 } 336 i40e_service_event_schedule(pf); 337 pf->tx_timeout_recovery_level++; 338 } 339 340 /** 341 * i40e_release_rx_desc - Store the new tail and head values 342 * @rx_ring: ring to bump 343 * @val: new head index 344 **/ 345 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 346 { 347 rx_ring->next_to_use = val; 348 349 /* Force memory writes to complete before letting h/w 350 * know there are new descriptors to fetch. (Only 351 * applicable for weak-ordered memory model archs, 352 * such as IA-64). 353 */ 354 wmb(); 355 writel(val, rx_ring->tail); 356 } 357 358 /** 359 * i40e_get_vsi_stats_struct - Get System Network Statistics 360 * @vsi: the VSI we care about 361 * 362 * Returns the address of the device statistics structure. 363 * The statistics are actually updated from the service task. 364 **/ 365 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 366 { 367 return &vsi->net_stats; 368 } 369 370 /** 371 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 372 * @netdev: network interface device structure 373 * 374 * Returns the address of the device statistics structure. 375 * The statistics are actually updated from the service task. 376 **/ 377 #ifdef I40E_FCOE 378 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 379 struct net_device *netdev, 380 struct rtnl_link_stats64 *stats) 381 #else 382 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 383 struct net_device *netdev, 384 struct rtnl_link_stats64 *stats) 385 #endif 386 { 387 struct i40e_netdev_priv *np = netdev_priv(netdev); 388 struct i40e_ring *tx_ring, *rx_ring; 389 struct i40e_vsi *vsi = np->vsi; 390 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 391 int i; 392 393 if (test_bit(__I40E_DOWN, &vsi->state)) 394 return stats; 395 396 if (!vsi->tx_rings) 397 return stats; 398 399 rcu_read_lock(); 400 for (i = 0; i < vsi->num_queue_pairs; i++) { 401 u64 bytes, packets; 402 unsigned int start; 403 404 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); 405 if (!tx_ring) 406 continue; 407 408 do { 409 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 410 packets = tx_ring->stats.packets; 411 bytes = tx_ring->stats.bytes; 412 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 413 414 stats->tx_packets += packets; 415 stats->tx_bytes += bytes; 416 rx_ring = &tx_ring[1]; 417 418 do { 419 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 420 packets = rx_ring->stats.packets; 421 bytes = rx_ring->stats.bytes; 422 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 423 424 stats->rx_packets += packets; 425 stats->rx_bytes += bytes; 426 } 427 rcu_read_unlock(); 428 429 /* following stats updated by i40e_watchdog_subtask() */ 430 stats->multicast = vsi_stats->multicast; 431 stats->tx_errors = vsi_stats->tx_errors; 432 stats->tx_dropped = vsi_stats->tx_dropped; 433 stats->rx_errors = vsi_stats->rx_errors; 434 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 435 stats->rx_length_errors = vsi_stats->rx_length_errors; 436 437 return stats; 438 } 439 440 /** 441 * i40e_vsi_reset_stats - Resets all stats of the given vsi 442 * @vsi: the VSI to have its stats reset 443 **/ 444 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 445 { 446 struct rtnl_link_stats64 *ns; 447 int i; 448 449 if (!vsi) 450 return; 451 452 ns = i40e_get_vsi_stats_struct(vsi); 453 memset(ns, 0, sizeof(*ns)); 454 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 455 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 456 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 457 if (vsi->rx_rings && vsi->rx_rings[0]) { 458 for (i = 0; i < vsi->num_queue_pairs; i++) { 459 memset(&vsi->rx_rings[i]->stats, 0 , 460 sizeof(vsi->rx_rings[i]->stats)); 461 memset(&vsi->rx_rings[i]->rx_stats, 0 , 462 sizeof(vsi->rx_rings[i]->rx_stats)); 463 memset(&vsi->tx_rings[i]->stats, 0 , 464 sizeof(vsi->tx_rings[i]->stats)); 465 memset(&vsi->tx_rings[i]->tx_stats, 0, 466 sizeof(vsi->tx_rings[i]->tx_stats)); 467 } 468 } 469 vsi->stat_offsets_loaded = false; 470 } 471 472 /** 473 * i40e_pf_reset_stats - Reset all of the stats for the given PF 474 * @pf: the PF to be reset 475 **/ 476 void i40e_pf_reset_stats(struct i40e_pf *pf) 477 { 478 int i; 479 480 memset(&pf->stats, 0, sizeof(pf->stats)); 481 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 482 pf->stat_offsets_loaded = false; 483 484 for (i = 0; i < I40E_MAX_VEB; i++) { 485 if (pf->veb[i]) { 486 memset(&pf->veb[i]->stats, 0, 487 sizeof(pf->veb[i]->stats)); 488 memset(&pf->veb[i]->stats_offsets, 0, 489 sizeof(pf->veb[i]->stats_offsets)); 490 pf->veb[i]->stat_offsets_loaded = false; 491 } 492 } 493 } 494 495 /** 496 * i40e_stat_update48 - read and update a 48 bit stat from the chip 497 * @hw: ptr to the hardware info 498 * @hireg: the high 32 bit reg to read 499 * @loreg: the low 32 bit reg to read 500 * @offset_loaded: has the initial offset been loaded yet 501 * @offset: ptr to current offset value 502 * @stat: ptr to the stat 503 * 504 * Since the device stats are not reset at PFReset, they likely will not 505 * be zeroed when the driver starts. We'll save the first values read 506 * and use them as offsets to be subtracted from the raw values in order 507 * to report stats that count from zero. In the process, we also manage 508 * the potential roll-over. 509 **/ 510 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 511 bool offset_loaded, u64 *offset, u64 *stat) 512 { 513 u64 new_data; 514 515 if (hw->device_id == I40E_DEV_ID_QEMU) { 516 new_data = rd32(hw, loreg); 517 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 518 } else { 519 new_data = rd64(hw, loreg); 520 } 521 if (!offset_loaded) 522 *offset = new_data; 523 if (likely(new_data >= *offset)) 524 *stat = new_data - *offset; 525 else 526 *stat = (new_data + BIT_ULL(48)) - *offset; 527 *stat &= 0xFFFFFFFFFFFFULL; 528 } 529 530 /** 531 * i40e_stat_update32 - read and update a 32 bit stat from the chip 532 * @hw: ptr to the hardware info 533 * @reg: the hw reg to read 534 * @offset_loaded: has the initial offset been loaded yet 535 * @offset: ptr to current offset value 536 * @stat: ptr to the stat 537 **/ 538 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 539 bool offset_loaded, u64 *offset, u64 *stat) 540 { 541 u32 new_data; 542 543 new_data = rd32(hw, reg); 544 if (!offset_loaded) 545 *offset = new_data; 546 if (likely(new_data >= *offset)) 547 *stat = (u32)(new_data - *offset); 548 else 549 *stat = (u32)((new_data + BIT_ULL(32)) - *offset); 550 } 551 552 /** 553 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 554 * @vsi: the VSI to be updated 555 **/ 556 void i40e_update_eth_stats(struct i40e_vsi *vsi) 557 { 558 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 559 struct i40e_pf *pf = vsi->back; 560 struct i40e_hw *hw = &pf->hw; 561 struct i40e_eth_stats *oes; 562 struct i40e_eth_stats *es; /* device's eth stats */ 563 564 es = &vsi->eth_stats; 565 oes = &vsi->eth_stats_offsets; 566 567 /* Gather up the stats that the hw collects */ 568 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 569 vsi->stat_offsets_loaded, 570 &oes->tx_errors, &es->tx_errors); 571 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 572 vsi->stat_offsets_loaded, 573 &oes->rx_discards, &es->rx_discards); 574 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), 575 vsi->stat_offsets_loaded, 576 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); 577 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 578 vsi->stat_offsets_loaded, 579 &oes->tx_errors, &es->tx_errors); 580 581 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 582 I40E_GLV_GORCL(stat_idx), 583 vsi->stat_offsets_loaded, 584 &oes->rx_bytes, &es->rx_bytes); 585 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 586 I40E_GLV_UPRCL(stat_idx), 587 vsi->stat_offsets_loaded, 588 &oes->rx_unicast, &es->rx_unicast); 589 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 590 I40E_GLV_MPRCL(stat_idx), 591 vsi->stat_offsets_loaded, 592 &oes->rx_multicast, &es->rx_multicast); 593 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 594 I40E_GLV_BPRCL(stat_idx), 595 vsi->stat_offsets_loaded, 596 &oes->rx_broadcast, &es->rx_broadcast); 597 598 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 599 I40E_GLV_GOTCL(stat_idx), 600 vsi->stat_offsets_loaded, 601 &oes->tx_bytes, &es->tx_bytes); 602 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 603 I40E_GLV_UPTCL(stat_idx), 604 vsi->stat_offsets_loaded, 605 &oes->tx_unicast, &es->tx_unicast); 606 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 607 I40E_GLV_MPTCL(stat_idx), 608 vsi->stat_offsets_loaded, 609 &oes->tx_multicast, &es->tx_multicast); 610 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 611 I40E_GLV_BPTCL(stat_idx), 612 vsi->stat_offsets_loaded, 613 &oes->tx_broadcast, &es->tx_broadcast); 614 vsi->stat_offsets_loaded = true; 615 } 616 617 /** 618 * i40e_update_veb_stats - Update Switch component statistics 619 * @veb: the VEB being updated 620 **/ 621 static void i40e_update_veb_stats(struct i40e_veb *veb) 622 { 623 struct i40e_pf *pf = veb->pf; 624 struct i40e_hw *hw = &pf->hw; 625 struct i40e_eth_stats *oes; 626 struct i40e_eth_stats *es; /* device's eth stats */ 627 struct i40e_veb_tc_stats *veb_oes; 628 struct i40e_veb_tc_stats *veb_es; 629 int i, idx = 0; 630 631 idx = veb->stats_idx; 632 es = &veb->stats; 633 oes = &veb->stats_offsets; 634 veb_es = &veb->tc_stats; 635 veb_oes = &veb->tc_stats_offsets; 636 637 /* Gather up the stats that the hw collects */ 638 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 639 veb->stat_offsets_loaded, 640 &oes->tx_discards, &es->tx_discards); 641 if (hw->revision_id > 0) 642 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 643 veb->stat_offsets_loaded, 644 &oes->rx_unknown_protocol, 645 &es->rx_unknown_protocol); 646 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 647 veb->stat_offsets_loaded, 648 &oes->rx_bytes, &es->rx_bytes); 649 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 650 veb->stat_offsets_loaded, 651 &oes->rx_unicast, &es->rx_unicast); 652 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 653 veb->stat_offsets_loaded, 654 &oes->rx_multicast, &es->rx_multicast); 655 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 656 veb->stat_offsets_loaded, 657 &oes->rx_broadcast, &es->rx_broadcast); 658 659 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 660 veb->stat_offsets_loaded, 661 &oes->tx_bytes, &es->tx_bytes); 662 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 663 veb->stat_offsets_loaded, 664 &oes->tx_unicast, &es->tx_unicast); 665 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 666 veb->stat_offsets_loaded, 667 &oes->tx_multicast, &es->tx_multicast); 668 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 669 veb->stat_offsets_loaded, 670 &oes->tx_broadcast, &es->tx_broadcast); 671 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 672 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx), 673 I40E_GLVEBTC_RPCL(i, idx), 674 veb->stat_offsets_loaded, 675 &veb_oes->tc_rx_packets[i], 676 &veb_es->tc_rx_packets[i]); 677 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx), 678 I40E_GLVEBTC_RBCL(i, idx), 679 veb->stat_offsets_loaded, 680 &veb_oes->tc_rx_bytes[i], 681 &veb_es->tc_rx_bytes[i]); 682 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx), 683 I40E_GLVEBTC_TPCL(i, idx), 684 veb->stat_offsets_loaded, 685 &veb_oes->tc_tx_packets[i], 686 &veb_es->tc_tx_packets[i]); 687 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx), 688 I40E_GLVEBTC_TBCL(i, idx), 689 veb->stat_offsets_loaded, 690 &veb_oes->tc_tx_bytes[i], 691 &veb_es->tc_tx_bytes[i]); 692 } 693 veb->stat_offsets_loaded = true; 694 } 695 696 #ifdef I40E_FCOE 697 /** 698 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. 699 * @vsi: the VSI that is capable of doing FCoE 700 **/ 701 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) 702 { 703 struct i40e_pf *pf = vsi->back; 704 struct i40e_hw *hw = &pf->hw; 705 struct i40e_fcoe_stats *ofs; 706 struct i40e_fcoe_stats *fs; /* device's eth stats */ 707 int idx; 708 709 if (vsi->type != I40E_VSI_FCOE) 710 return; 711 712 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; 713 fs = &vsi->fcoe_stats; 714 ofs = &vsi->fcoe_stats_offsets; 715 716 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), 717 vsi->fcoe_stat_offsets_loaded, 718 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); 719 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), 720 vsi->fcoe_stat_offsets_loaded, 721 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); 722 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), 723 vsi->fcoe_stat_offsets_loaded, 724 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); 725 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), 726 vsi->fcoe_stat_offsets_loaded, 727 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); 728 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), 729 vsi->fcoe_stat_offsets_loaded, 730 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); 731 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), 732 vsi->fcoe_stat_offsets_loaded, 733 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); 734 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), 735 vsi->fcoe_stat_offsets_loaded, 736 &ofs->fcoe_last_error, &fs->fcoe_last_error); 737 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), 738 vsi->fcoe_stat_offsets_loaded, 739 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); 740 741 vsi->fcoe_stat_offsets_loaded = true; 742 } 743 744 #endif 745 /** 746 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode 747 * @pf: the corresponding PF 748 * 749 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode 750 **/ 751 static void i40e_update_link_xoff_rx(struct i40e_pf *pf) 752 { 753 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 754 struct i40e_hw_port_stats *nsd = &pf->stats; 755 struct i40e_hw *hw = &pf->hw; 756 u64 xoff = 0; 757 u16 i, v; 758 759 if ((hw->fc.current_mode != I40E_FC_FULL) && 760 (hw->fc.current_mode != I40E_FC_RX_PAUSE)) 761 return; 762 763 xoff = nsd->link_xoff_rx; 764 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 765 pf->stat_offsets_loaded, 766 &osd->link_xoff_rx, &nsd->link_xoff_rx); 767 768 /* No new LFC xoff rx */ 769 if (!(nsd->link_xoff_rx - xoff)) 770 return; 771 772 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ 773 for (v = 0; v < pf->num_alloc_vsi; v++) { 774 struct i40e_vsi *vsi = pf->vsi[v]; 775 776 if (!vsi || !vsi->tx_rings[0]) 777 continue; 778 779 for (i = 0; i < vsi->num_queue_pairs; i++) { 780 struct i40e_ring *ring = vsi->tx_rings[i]; 781 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 782 } 783 } 784 } 785 786 /** 787 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode 788 * @pf: the corresponding PF 789 * 790 * Update the Rx XOFF counter (PAUSE frames) in PFC mode 791 **/ 792 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) 793 { 794 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 795 struct i40e_hw_port_stats *nsd = &pf->stats; 796 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; 797 struct i40e_dcbx_config *dcb_cfg; 798 struct i40e_hw *hw = &pf->hw; 799 u16 i, v; 800 u8 tc; 801 802 dcb_cfg = &hw->local_dcbx_config; 803 804 /* Collect Link XOFF stats when PFC is disabled */ 805 if (!dcb_cfg->pfc.pfcenable) { 806 i40e_update_link_xoff_rx(pf); 807 return; 808 } 809 810 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 811 u64 prio_xoff = nsd->priority_xoff_rx[i]; 812 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 813 pf->stat_offsets_loaded, 814 &osd->priority_xoff_rx[i], 815 &nsd->priority_xoff_rx[i]); 816 817 /* No new PFC xoff rx */ 818 if (!(nsd->priority_xoff_rx[i] - prio_xoff)) 819 continue; 820 /* Get the TC for given priority */ 821 tc = dcb_cfg->etscfg.prioritytable[i]; 822 xoff[tc] = true; 823 } 824 825 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ 826 for (v = 0; v < pf->num_alloc_vsi; v++) { 827 struct i40e_vsi *vsi = pf->vsi[v]; 828 829 if (!vsi || !vsi->tx_rings[0]) 830 continue; 831 832 for (i = 0; i < vsi->num_queue_pairs; i++) { 833 struct i40e_ring *ring = vsi->tx_rings[i]; 834 835 tc = ring->dcb_tc; 836 if (xoff[tc]) 837 clear_bit(__I40E_HANG_CHECK_ARMED, 838 &ring->state); 839 } 840 } 841 } 842 843 /** 844 * i40e_update_vsi_stats - Update the vsi statistics counters. 845 * @vsi: the VSI to be updated 846 * 847 * There are a few instances where we store the same stat in a 848 * couple of different structs. This is partly because we have 849 * the netdev stats that need to be filled out, which is slightly 850 * different from the "eth_stats" defined by the chip and used in 851 * VF communications. We sort it out here. 852 **/ 853 static void i40e_update_vsi_stats(struct i40e_vsi *vsi) 854 { 855 struct i40e_pf *pf = vsi->back; 856 struct rtnl_link_stats64 *ons; 857 struct rtnl_link_stats64 *ns; /* netdev stats */ 858 struct i40e_eth_stats *oes; 859 struct i40e_eth_stats *es; /* device's eth stats */ 860 u32 tx_restart, tx_busy; 861 struct i40e_ring *p; 862 u32 rx_page, rx_buf; 863 u64 bytes, packets; 864 unsigned int start; 865 u64 rx_p, rx_b; 866 u64 tx_p, tx_b; 867 u16 q; 868 869 if (test_bit(__I40E_DOWN, &vsi->state) || 870 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 871 return; 872 873 ns = i40e_get_vsi_stats_struct(vsi); 874 ons = &vsi->net_stats_offsets; 875 es = &vsi->eth_stats; 876 oes = &vsi->eth_stats_offsets; 877 878 /* Gather up the netdev and vsi stats that the driver collects 879 * on the fly during packet processing 880 */ 881 rx_b = rx_p = 0; 882 tx_b = tx_p = 0; 883 tx_restart = tx_busy = 0; 884 rx_page = 0; 885 rx_buf = 0; 886 rcu_read_lock(); 887 for (q = 0; q < vsi->num_queue_pairs; q++) { 888 /* locate Tx ring */ 889 p = ACCESS_ONCE(vsi->tx_rings[q]); 890 891 do { 892 start = u64_stats_fetch_begin_irq(&p->syncp); 893 packets = p->stats.packets; 894 bytes = p->stats.bytes; 895 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 896 tx_b += bytes; 897 tx_p += packets; 898 tx_restart += p->tx_stats.restart_queue; 899 tx_busy += p->tx_stats.tx_busy; 900 901 /* Rx queue is part of the same block as Tx queue */ 902 p = &p[1]; 903 do { 904 start = u64_stats_fetch_begin_irq(&p->syncp); 905 packets = p->stats.packets; 906 bytes = p->stats.bytes; 907 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 908 rx_b += bytes; 909 rx_p += packets; 910 rx_buf += p->rx_stats.alloc_buff_failed; 911 rx_page += p->rx_stats.alloc_page_failed; 912 } 913 rcu_read_unlock(); 914 vsi->tx_restart = tx_restart; 915 vsi->tx_busy = tx_busy; 916 vsi->rx_page_failed = rx_page; 917 vsi->rx_buf_failed = rx_buf; 918 919 ns->rx_packets = rx_p; 920 ns->rx_bytes = rx_b; 921 ns->tx_packets = tx_p; 922 ns->tx_bytes = tx_b; 923 924 /* update netdev stats from eth stats */ 925 i40e_update_eth_stats(vsi); 926 ons->tx_errors = oes->tx_errors; 927 ns->tx_errors = es->tx_errors; 928 ons->multicast = oes->rx_multicast; 929 ns->multicast = es->rx_multicast; 930 ons->rx_dropped = oes->rx_discards; 931 ns->rx_dropped = es->rx_discards; 932 ons->tx_dropped = oes->tx_discards; 933 ns->tx_dropped = es->tx_discards; 934 935 /* pull in a couple PF stats if this is the main vsi */ 936 if (vsi == pf->vsi[pf->lan_vsi]) { 937 ns->rx_crc_errors = pf->stats.crc_errors; 938 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; 939 ns->rx_length_errors = pf->stats.rx_length_errors; 940 } 941 } 942 943 /** 944 * i40e_update_pf_stats - Update the PF statistics counters. 945 * @pf: the PF to be updated 946 **/ 947 static void i40e_update_pf_stats(struct i40e_pf *pf) 948 { 949 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 950 struct i40e_hw_port_stats *nsd = &pf->stats; 951 struct i40e_hw *hw = &pf->hw; 952 u32 val; 953 int i; 954 955 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 956 I40E_GLPRT_GORCL(hw->port), 957 pf->stat_offsets_loaded, 958 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 959 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 960 I40E_GLPRT_GOTCL(hw->port), 961 pf->stat_offsets_loaded, 962 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 963 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 964 pf->stat_offsets_loaded, 965 &osd->eth.rx_discards, 966 &nsd->eth.rx_discards); 967 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 968 I40E_GLPRT_UPRCL(hw->port), 969 pf->stat_offsets_loaded, 970 &osd->eth.rx_unicast, 971 &nsd->eth.rx_unicast); 972 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 973 I40E_GLPRT_MPRCL(hw->port), 974 pf->stat_offsets_loaded, 975 &osd->eth.rx_multicast, 976 &nsd->eth.rx_multicast); 977 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 978 I40E_GLPRT_BPRCL(hw->port), 979 pf->stat_offsets_loaded, 980 &osd->eth.rx_broadcast, 981 &nsd->eth.rx_broadcast); 982 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 983 I40E_GLPRT_UPTCL(hw->port), 984 pf->stat_offsets_loaded, 985 &osd->eth.tx_unicast, 986 &nsd->eth.tx_unicast); 987 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 988 I40E_GLPRT_MPTCL(hw->port), 989 pf->stat_offsets_loaded, 990 &osd->eth.tx_multicast, 991 &nsd->eth.tx_multicast); 992 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 993 I40E_GLPRT_BPTCL(hw->port), 994 pf->stat_offsets_loaded, 995 &osd->eth.tx_broadcast, 996 &nsd->eth.tx_broadcast); 997 998 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 999 pf->stat_offsets_loaded, 1000 &osd->tx_dropped_link_down, 1001 &nsd->tx_dropped_link_down); 1002 1003 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 1004 pf->stat_offsets_loaded, 1005 &osd->crc_errors, &nsd->crc_errors); 1006 1007 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 1008 pf->stat_offsets_loaded, 1009 &osd->illegal_bytes, &nsd->illegal_bytes); 1010 1011 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 1012 pf->stat_offsets_loaded, 1013 &osd->mac_local_faults, 1014 &nsd->mac_local_faults); 1015 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 1016 pf->stat_offsets_loaded, 1017 &osd->mac_remote_faults, 1018 &nsd->mac_remote_faults); 1019 1020 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 1021 pf->stat_offsets_loaded, 1022 &osd->rx_length_errors, 1023 &nsd->rx_length_errors); 1024 1025 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 1026 pf->stat_offsets_loaded, 1027 &osd->link_xon_rx, &nsd->link_xon_rx); 1028 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 1029 pf->stat_offsets_loaded, 1030 &osd->link_xon_tx, &nsd->link_xon_tx); 1031 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ 1032 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 1033 pf->stat_offsets_loaded, 1034 &osd->link_xoff_tx, &nsd->link_xoff_tx); 1035 1036 for (i = 0; i < 8; i++) { 1037 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 1038 pf->stat_offsets_loaded, 1039 &osd->priority_xon_rx[i], 1040 &nsd->priority_xon_rx[i]); 1041 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 1042 pf->stat_offsets_loaded, 1043 &osd->priority_xon_tx[i], 1044 &nsd->priority_xon_tx[i]); 1045 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 1046 pf->stat_offsets_loaded, 1047 &osd->priority_xoff_tx[i], 1048 &nsd->priority_xoff_tx[i]); 1049 i40e_stat_update32(hw, 1050 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 1051 pf->stat_offsets_loaded, 1052 &osd->priority_xon_2_xoff[i], 1053 &nsd->priority_xon_2_xoff[i]); 1054 } 1055 1056 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 1057 I40E_GLPRT_PRC64L(hw->port), 1058 pf->stat_offsets_loaded, 1059 &osd->rx_size_64, &nsd->rx_size_64); 1060 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 1061 I40E_GLPRT_PRC127L(hw->port), 1062 pf->stat_offsets_loaded, 1063 &osd->rx_size_127, &nsd->rx_size_127); 1064 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 1065 I40E_GLPRT_PRC255L(hw->port), 1066 pf->stat_offsets_loaded, 1067 &osd->rx_size_255, &nsd->rx_size_255); 1068 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 1069 I40E_GLPRT_PRC511L(hw->port), 1070 pf->stat_offsets_loaded, 1071 &osd->rx_size_511, &nsd->rx_size_511); 1072 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 1073 I40E_GLPRT_PRC1023L(hw->port), 1074 pf->stat_offsets_loaded, 1075 &osd->rx_size_1023, &nsd->rx_size_1023); 1076 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 1077 I40E_GLPRT_PRC1522L(hw->port), 1078 pf->stat_offsets_loaded, 1079 &osd->rx_size_1522, &nsd->rx_size_1522); 1080 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 1081 I40E_GLPRT_PRC9522L(hw->port), 1082 pf->stat_offsets_loaded, 1083 &osd->rx_size_big, &nsd->rx_size_big); 1084 1085 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 1086 I40E_GLPRT_PTC64L(hw->port), 1087 pf->stat_offsets_loaded, 1088 &osd->tx_size_64, &nsd->tx_size_64); 1089 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 1090 I40E_GLPRT_PTC127L(hw->port), 1091 pf->stat_offsets_loaded, 1092 &osd->tx_size_127, &nsd->tx_size_127); 1093 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 1094 I40E_GLPRT_PTC255L(hw->port), 1095 pf->stat_offsets_loaded, 1096 &osd->tx_size_255, &nsd->tx_size_255); 1097 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 1098 I40E_GLPRT_PTC511L(hw->port), 1099 pf->stat_offsets_loaded, 1100 &osd->tx_size_511, &nsd->tx_size_511); 1101 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 1102 I40E_GLPRT_PTC1023L(hw->port), 1103 pf->stat_offsets_loaded, 1104 &osd->tx_size_1023, &nsd->tx_size_1023); 1105 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 1106 I40E_GLPRT_PTC1522L(hw->port), 1107 pf->stat_offsets_loaded, 1108 &osd->tx_size_1522, &nsd->tx_size_1522); 1109 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 1110 I40E_GLPRT_PTC9522L(hw->port), 1111 pf->stat_offsets_loaded, 1112 &osd->tx_size_big, &nsd->tx_size_big); 1113 1114 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 1115 pf->stat_offsets_loaded, 1116 &osd->rx_undersize, &nsd->rx_undersize); 1117 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 1118 pf->stat_offsets_loaded, 1119 &osd->rx_fragments, &nsd->rx_fragments); 1120 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 1121 pf->stat_offsets_loaded, 1122 &osd->rx_oversize, &nsd->rx_oversize); 1123 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 1124 pf->stat_offsets_loaded, 1125 &osd->rx_jabber, &nsd->rx_jabber); 1126 1127 /* FDIR stats */ 1128 i40e_stat_update32(hw, 1129 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), 1130 pf->stat_offsets_loaded, 1131 &osd->fd_atr_match, &nsd->fd_atr_match); 1132 i40e_stat_update32(hw, 1133 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), 1134 pf->stat_offsets_loaded, 1135 &osd->fd_sb_match, &nsd->fd_sb_match); 1136 i40e_stat_update32(hw, 1137 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)), 1138 pf->stat_offsets_loaded, 1139 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match); 1140 1141 val = rd32(hw, I40E_PRTPM_EEE_STAT); 1142 nsd->tx_lpi_status = 1143 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> 1144 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; 1145 nsd->rx_lpi_status = 1146 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> 1147 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; 1148 i40e_stat_update32(hw, I40E_PRTPM_TLPIC, 1149 pf->stat_offsets_loaded, 1150 &osd->tx_lpi_count, &nsd->tx_lpi_count); 1151 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 1152 pf->stat_offsets_loaded, 1153 &osd->rx_lpi_count, &nsd->rx_lpi_count); 1154 1155 if (pf->flags & I40E_FLAG_FD_SB_ENABLED && 1156 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) 1157 nsd->fd_sb_status = true; 1158 else 1159 nsd->fd_sb_status = false; 1160 1161 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && 1162 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 1163 nsd->fd_atr_status = true; 1164 else 1165 nsd->fd_atr_status = false; 1166 1167 pf->stat_offsets_loaded = true; 1168 } 1169 1170 /** 1171 * i40e_update_stats - Update the various statistics counters. 1172 * @vsi: the VSI to be updated 1173 * 1174 * Update the various stats for this VSI and its related entities. 1175 **/ 1176 void i40e_update_stats(struct i40e_vsi *vsi) 1177 { 1178 struct i40e_pf *pf = vsi->back; 1179 1180 if (vsi == pf->vsi[pf->lan_vsi]) 1181 i40e_update_pf_stats(pf); 1182 1183 i40e_update_vsi_stats(vsi); 1184 #ifdef I40E_FCOE 1185 i40e_update_fcoe_stats(vsi); 1186 #endif 1187 } 1188 1189 /** 1190 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 1191 * @vsi: the VSI to be searched 1192 * @macaddr: the MAC address 1193 * @vlan: the vlan 1194 * @is_vf: make sure its a VF filter, else doesn't matter 1195 * @is_netdev: make sure its a netdev filter, else doesn't matter 1196 * 1197 * Returns ptr to the filter object or NULL 1198 **/ 1199 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 1200 u8 *macaddr, s16 vlan, 1201 bool is_vf, bool is_netdev) 1202 { 1203 struct i40e_mac_filter *f; 1204 1205 if (!vsi || !macaddr) 1206 return NULL; 1207 1208 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1209 if ((ether_addr_equal(macaddr, f->macaddr)) && 1210 (vlan == f->vlan) && 1211 (!is_vf || f->is_vf) && 1212 (!is_netdev || f->is_netdev)) 1213 return f; 1214 } 1215 return NULL; 1216 } 1217 1218 /** 1219 * i40e_find_mac - Find a mac addr in the macvlan filters list 1220 * @vsi: the VSI to be searched 1221 * @macaddr: the MAC address we are searching for 1222 * @is_vf: make sure its a VF filter, else doesn't matter 1223 * @is_netdev: make sure its a netdev filter, else doesn't matter 1224 * 1225 * Returns the first filter with the provided MAC address or NULL if 1226 * MAC address was not found 1227 **/ 1228 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 1229 bool is_vf, bool is_netdev) 1230 { 1231 struct i40e_mac_filter *f; 1232 1233 if (!vsi || !macaddr) 1234 return NULL; 1235 1236 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1237 if ((ether_addr_equal(macaddr, f->macaddr)) && 1238 (!is_vf || f->is_vf) && 1239 (!is_netdev || f->is_netdev)) 1240 return f; 1241 } 1242 return NULL; 1243 } 1244 1245 /** 1246 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1247 * @vsi: the VSI to be searched 1248 * 1249 * Returns true if VSI is in vlan mode or false otherwise 1250 **/ 1251 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1252 { 1253 struct i40e_mac_filter *f; 1254 1255 /* Only -1 for all the filters denotes not in vlan mode 1256 * so we have to go through all the list in order to make sure 1257 */ 1258 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1259 if (f->vlan >= 0) 1260 return true; 1261 } 1262 1263 return false; 1264 } 1265 1266 /** 1267 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans 1268 * @vsi: the VSI to be searched 1269 * @macaddr: the mac address to be filtered 1270 * @is_vf: true if it is a VF 1271 * @is_netdev: true if it is a netdev 1272 * 1273 * Goes through all the macvlan filters and adds a 1274 * macvlan filter for each unique vlan that already exists 1275 * 1276 * Returns first filter found on success, else NULL 1277 **/ 1278 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1279 bool is_vf, bool is_netdev) 1280 { 1281 struct i40e_mac_filter *f; 1282 1283 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1284 if (vsi->info.pvid) 1285 f->vlan = le16_to_cpu(vsi->info.pvid); 1286 if (!i40e_find_filter(vsi, macaddr, f->vlan, 1287 is_vf, is_netdev)) { 1288 if (!i40e_add_filter(vsi, macaddr, f->vlan, 1289 is_vf, is_netdev)) 1290 return NULL; 1291 } 1292 } 1293 1294 return list_first_entry_or_null(&vsi->mac_filter_list, 1295 struct i40e_mac_filter, list); 1296 } 1297 1298 /** 1299 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM 1300 * @vsi: the PF Main VSI - inappropriate for any other VSI 1301 * @macaddr: the MAC address 1302 * 1303 * Some older firmware configurations set up a default promiscuous VLAN 1304 * filter that needs to be removed. 1305 **/ 1306 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) 1307 { 1308 struct i40e_aqc_remove_macvlan_element_data element; 1309 struct i40e_pf *pf = vsi->back; 1310 i40e_status ret; 1311 1312 /* Only appropriate for the PF main VSI */ 1313 if (vsi->type != I40E_VSI_MAIN) 1314 return -EINVAL; 1315 1316 memset(&element, 0, sizeof(element)); 1317 ether_addr_copy(element.mac_addr, macaddr); 1318 element.vlan_tag = 0; 1319 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1320 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1321 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1322 if (ret) 1323 return -ENOENT; 1324 1325 return 0; 1326 } 1327 1328 /** 1329 * i40e_add_filter - Add a mac/vlan filter to the VSI 1330 * @vsi: the VSI to be searched 1331 * @macaddr: the MAC address 1332 * @vlan: the vlan 1333 * @is_vf: make sure its a VF filter, else doesn't matter 1334 * @is_netdev: make sure its a netdev filter, else doesn't matter 1335 * 1336 * Returns ptr to the filter object or NULL when no memory available. 1337 **/ 1338 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1339 u8 *macaddr, s16 vlan, 1340 bool is_vf, bool is_netdev) 1341 { 1342 struct i40e_mac_filter *f; 1343 1344 if (!vsi || !macaddr) 1345 return NULL; 1346 1347 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1348 if (!f) { 1349 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1350 if (!f) 1351 goto add_filter_out; 1352 1353 ether_addr_copy(f->macaddr, macaddr); 1354 f->vlan = vlan; 1355 f->changed = true; 1356 1357 INIT_LIST_HEAD(&f->list); 1358 list_add(&f->list, &vsi->mac_filter_list); 1359 } 1360 1361 /* increment counter and add a new flag if needed */ 1362 if (is_vf) { 1363 if (!f->is_vf) { 1364 f->is_vf = true; 1365 f->counter++; 1366 } 1367 } else if (is_netdev) { 1368 if (!f->is_netdev) { 1369 f->is_netdev = true; 1370 f->counter++; 1371 } 1372 } else { 1373 f->counter++; 1374 } 1375 1376 /* changed tells sync_filters_subtask to 1377 * push the filter down to the firmware 1378 */ 1379 if (f->changed) { 1380 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1381 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1382 } 1383 1384 add_filter_out: 1385 return f; 1386 } 1387 1388 /** 1389 * i40e_del_filter - Remove a mac/vlan filter from the VSI 1390 * @vsi: the VSI to be searched 1391 * @macaddr: the MAC address 1392 * @vlan: the vlan 1393 * @is_vf: make sure it's a VF filter, else doesn't matter 1394 * @is_netdev: make sure it's a netdev filter, else doesn't matter 1395 **/ 1396 void i40e_del_filter(struct i40e_vsi *vsi, 1397 u8 *macaddr, s16 vlan, 1398 bool is_vf, bool is_netdev) 1399 { 1400 struct i40e_mac_filter *f; 1401 1402 if (!vsi || !macaddr) 1403 return; 1404 1405 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1406 if (!f || f->counter == 0) 1407 return; 1408 1409 if (is_vf) { 1410 if (f->is_vf) { 1411 f->is_vf = false; 1412 f->counter--; 1413 } 1414 } else if (is_netdev) { 1415 if (f->is_netdev) { 1416 f->is_netdev = false; 1417 f->counter--; 1418 } 1419 } else { 1420 /* make sure we don't remove a filter in use by VF or netdev */ 1421 int min_f = 0; 1422 min_f += (f->is_vf ? 1 : 0); 1423 min_f += (f->is_netdev ? 1 : 0); 1424 1425 if (f->counter > min_f) 1426 f->counter--; 1427 } 1428 1429 /* counter == 0 tells sync_filters_subtask to 1430 * remove the filter from the firmware's list 1431 */ 1432 if (f->counter == 0) { 1433 f->changed = true; 1434 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1435 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1436 } 1437 } 1438 1439 /** 1440 * i40e_set_mac - NDO callback to set mac address 1441 * @netdev: network interface device structure 1442 * @p: pointer to an address structure 1443 * 1444 * Returns 0 on success, negative on failure 1445 **/ 1446 #ifdef I40E_FCOE 1447 int i40e_set_mac(struct net_device *netdev, void *p) 1448 #else 1449 static int i40e_set_mac(struct net_device *netdev, void *p) 1450 #endif 1451 { 1452 struct i40e_netdev_priv *np = netdev_priv(netdev); 1453 struct i40e_vsi *vsi = np->vsi; 1454 struct i40e_pf *pf = vsi->back; 1455 struct i40e_hw *hw = &pf->hw; 1456 struct sockaddr *addr = p; 1457 struct i40e_mac_filter *f; 1458 1459 if (!is_valid_ether_addr(addr->sa_data)) 1460 return -EADDRNOTAVAIL; 1461 1462 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { 1463 netdev_info(netdev, "already using mac address %pM\n", 1464 addr->sa_data); 1465 return 0; 1466 } 1467 1468 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1469 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1470 return -EADDRNOTAVAIL; 1471 1472 if (ether_addr_equal(hw->mac.addr, addr->sa_data)) 1473 netdev_info(netdev, "returning to hw mac address %pM\n", 1474 hw->mac.addr); 1475 else 1476 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); 1477 1478 if (vsi->type == I40E_VSI_MAIN) { 1479 i40e_status ret; 1480 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1481 I40E_AQC_WRITE_TYPE_LAA_WOL, 1482 addr->sa_data, NULL); 1483 if (ret) { 1484 netdev_info(netdev, 1485 "Addr change for Main VSI failed: %d\n", 1486 ret); 1487 return -EADDRNOTAVAIL; 1488 } 1489 } 1490 1491 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { 1492 struct i40e_aqc_remove_macvlan_element_data element; 1493 1494 memset(&element, 0, sizeof(element)); 1495 ether_addr_copy(element.mac_addr, netdev->dev_addr); 1496 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1497 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1498 } else { 1499 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 1500 false, false); 1501 } 1502 1503 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { 1504 struct i40e_aqc_add_macvlan_element_data element; 1505 1506 memset(&element, 0, sizeof(element)); 1507 ether_addr_copy(element.mac_addr, hw->mac.addr); 1508 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 1509 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1510 } else { 1511 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, 1512 false, false); 1513 if (f) 1514 f->is_laa = true; 1515 } 1516 1517 i40e_sync_vsi_filters(vsi); 1518 ether_addr_copy(netdev->dev_addr, addr->sa_data); 1519 1520 return 0; 1521 } 1522 1523 /** 1524 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1525 * @vsi: the VSI being setup 1526 * @ctxt: VSI context structure 1527 * @enabled_tc: Enabled TCs bitmap 1528 * @is_add: True if called before Add VSI 1529 * 1530 * Setup VSI queue mapping for enabled traffic classes. 1531 **/ 1532 #ifdef I40E_FCOE 1533 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1534 struct i40e_vsi_context *ctxt, 1535 u8 enabled_tc, 1536 bool is_add) 1537 #else 1538 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1539 struct i40e_vsi_context *ctxt, 1540 u8 enabled_tc, 1541 bool is_add) 1542 #endif 1543 { 1544 struct i40e_pf *pf = vsi->back; 1545 u16 sections = 0; 1546 u8 netdev_tc = 0; 1547 u16 numtc = 0; 1548 u16 qcount; 1549 u8 offset; 1550 u16 qmap; 1551 int i; 1552 u16 num_tc_qps = 0; 1553 1554 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1555 offset = 0; 1556 1557 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1558 /* Find numtc from enabled TC bitmap */ 1559 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1560 if (enabled_tc & BIT_ULL(i)) /* TC is enabled */ 1561 numtc++; 1562 } 1563 if (!numtc) { 1564 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1565 numtc = 1; 1566 } 1567 } else { 1568 /* At least TC0 is enabled in case of non-DCB case */ 1569 numtc = 1; 1570 } 1571 1572 vsi->tc_config.numtc = numtc; 1573 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1574 /* Number of queues per enabled TC */ 1575 /* In MFP case we can have a much lower count of MSIx 1576 * vectors available and so we need to lower the used 1577 * q count. 1578 */ 1579 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1580 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); 1581 else 1582 qcount = vsi->alloc_queue_pairs; 1583 num_tc_qps = qcount / numtc; 1584 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); 1585 1586 /* Setup queue offset/count for all TCs for given VSI */ 1587 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1588 /* See if the given TC is enabled for the given VSI */ 1589 if (vsi->tc_config.enabled_tc & BIT_ULL(i)) { 1590 /* TC is enabled */ 1591 int pow, num_qps; 1592 1593 switch (vsi->type) { 1594 case I40E_VSI_MAIN: 1595 qcount = min_t(int, pf->rss_size, num_tc_qps); 1596 break; 1597 #ifdef I40E_FCOE 1598 case I40E_VSI_FCOE: 1599 qcount = num_tc_qps; 1600 break; 1601 #endif 1602 case I40E_VSI_FDIR: 1603 case I40E_VSI_SRIOV: 1604 case I40E_VSI_VMDQ2: 1605 default: 1606 qcount = num_tc_qps; 1607 WARN_ON(i != 0); 1608 break; 1609 } 1610 vsi->tc_config.tc_info[i].qoffset = offset; 1611 vsi->tc_config.tc_info[i].qcount = qcount; 1612 1613 /* find the next higher power-of-2 of num queue pairs */ 1614 num_qps = qcount; 1615 pow = 0; 1616 while (num_qps && (BIT_ULL(pow) < qcount)) { 1617 pow++; 1618 num_qps >>= 1; 1619 } 1620 1621 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1622 qmap = 1623 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1624 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1625 1626 offset += qcount; 1627 } else { 1628 /* TC is not enabled so set the offset to 1629 * default queue and allocate one queue 1630 * for the given TC. 1631 */ 1632 vsi->tc_config.tc_info[i].qoffset = 0; 1633 vsi->tc_config.tc_info[i].qcount = 1; 1634 vsi->tc_config.tc_info[i].netdev_tc = 0; 1635 1636 qmap = 0; 1637 } 1638 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1639 } 1640 1641 /* Set actual Tx/Rx queue pairs */ 1642 vsi->num_queue_pairs = offset; 1643 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { 1644 if (vsi->req_queue_pairs > 0) 1645 vsi->num_queue_pairs = vsi->req_queue_pairs; 1646 else if (pf->flags & I40E_FLAG_MSIX_ENABLED) 1647 vsi->num_queue_pairs = pf->num_lan_msix; 1648 } 1649 1650 /* Scheduler section valid can only be set for ADD VSI */ 1651 if (is_add) { 1652 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1653 1654 ctxt->info.up_enable_bits = enabled_tc; 1655 } 1656 if (vsi->type == I40E_VSI_SRIOV) { 1657 ctxt->info.mapping_flags |= 1658 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1659 for (i = 0; i < vsi->num_queue_pairs; i++) 1660 ctxt->info.queue_mapping[i] = 1661 cpu_to_le16(vsi->base_queue + i); 1662 } else { 1663 ctxt->info.mapping_flags |= 1664 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1665 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1666 } 1667 ctxt->info.valid_sections |= cpu_to_le16(sections); 1668 } 1669 1670 /** 1671 * i40e_set_rx_mode - NDO callback to set the netdev filters 1672 * @netdev: network interface device structure 1673 **/ 1674 #ifdef I40E_FCOE 1675 void i40e_set_rx_mode(struct net_device *netdev) 1676 #else 1677 static void i40e_set_rx_mode(struct net_device *netdev) 1678 #endif 1679 { 1680 struct i40e_netdev_priv *np = netdev_priv(netdev); 1681 struct i40e_mac_filter *f, *ftmp; 1682 struct i40e_vsi *vsi = np->vsi; 1683 struct netdev_hw_addr *uca; 1684 struct netdev_hw_addr *mca; 1685 struct netdev_hw_addr *ha; 1686 1687 /* add addr if not already in the filter list */ 1688 netdev_for_each_uc_addr(uca, netdev) { 1689 if (!i40e_find_mac(vsi, uca->addr, false, true)) { 1690 if (i40e_is_vsi_in_vlan(vsi)) 1691 i40e_put_mac_in_vlan(vsi, uca->addr, 1692 false, true); 1693 else 1694 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, 1695 false, true); 1696 } 1697 } 1698 1699 netdev_for_each_mc_addr(mca, netdev) { 1700 if (!i40e_find_mac(vsi, mca->addr, false, true)) { 1701 if (i40e_is_vsi_in_vlan(vsi)) 1702 i40e_put_mac_in_vlan(vsi, mca->addr, 1703 false, true); 1704 else 1705 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, 1706 false, true); 1707 } 1708 } 1709 1710 /* remove filter if not in netdev list */ 1711 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1712 bool found = false; 1713 1714 if (!f->is_netdev) 1715 continue; 1716 1717 if (is_multicast_ether_addr(f->macaddr)) { 1718 netdev_for_each_mc_addr(mca, netdev) { 1719 if (ether_addr_equal(mca->addr, f->macaddr)) { 1720 found = true; 1721 break; 1722 } 1723 } 1724 } else { 1725 netdev_for_each_uc_addr(uca, netdev) { 1726 if (ether_addr_equal(uca->addr, f->macaddr)) { 1727 found = true; 1728 break; 1729 } 1730 } 1731 1732 for_each_dev_addr(netdev, ha) { 1733 if (ether_addr_equal(ha->addr, f->macaddr)) { 1734 found = true; 1735 break; 1736 } 1737 } 1738 } 1739 if (!found) 1740 i40e_del_filter( 1741 vsi, f->macaddr, I40E_VLAN_ANY, false, true); 1742 } 1743 1744 /* check for other flag changes */ 1745 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1746 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1747 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1748 } 1749 } 1750 1751 /** 1752 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 1753 * @vsi: ptr to the VSI 1754 * 1755 * Push any outstanding VSI filter changes through the AdminQ. 1756 * 1757 * Returns 0 or error value 1758 **/ 1759 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) 1760 { 1761 struct i40e_mac_filter *f, *ftmp; 1762 bool promisc_forced_on = false; 1763 bool add_happened = false; 1764 int filter_list_len = 0; 1765 u32 changed_flags = 0; 1766 i40e_status ret = 0; 1767 struct i40e_pf *pf; 1768 int num_add = 0; 1769 int num_del = 0; 1770 int aq_err = 0; 1771 u16 cmd_flags; 1772 1773 /* empty array typed pointers, kcalloc later */ 1774 struct i40e_aqc_add_macvlan_element_data *add_list; 1775 struct i40e_aqc_remove_macvlan_element_data *del_list; 1776 1777 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) 1778 usleep_range(1000, 2000); 1779 pf = vsi->back; 1780 1781 if (vsi->netdev) { 1782 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 1783 vsi->current_netdev_flags = vsi->netdev->flags; 1784 } 1785 1786 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1787 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1788 1789 filter_list_len = pf->hw.aq.asq_buf_size / 1790 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1791 del_list = kcalloc(filter_list_len, 1792 sizeof(struct i40e_aqc_remove_macvlan_element_data), 1793 GFP_KERNEL); 1794 if (!del_list) 1795 return -ENOMEM; 1796 1797 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1798 if (!f->changed) 1799 continue; 1800 1801 if (f->counter != 0) 1802 continue; 1803 f->changed = false; 1804 cmd_flags = 0; 1805 1806 /* add to delete list */ 1807 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); 1808 del_list[num_del].vlan_tag = 1809 cpu_to_le16((u16)(f->vlan == 1810 I40E_VLAN_ANY ? 0 : f->vlan)); 1811 1812 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1813 del_list[num_del].flags = cmd_flags; 1814 num_del++; 1815 1816 /* unlink from filter list */ 1817 list_del(&f->list); 1818 kfree(f); 1819 1820 /* flush a full buffer */ 1821 if (num_del == filter_list_len) { 1822 ret = i40e_aq_remove_macvlan(&pf->hw, 1823 vsi->seid, del_list, num_del, 1824 NULL); 1825 aq_err = pf->hw.aq.asq_last_status; 1826 num_del = 0; 1827 memset(del_list, 0, sizeof(*del_list)); 1828 1829 if (ret && aq_err != I40E_AQ_RC_ENOENT) 1830 dev_info(&pf->pdev->dev, 1831 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", 1832 i40e_stat_str(&pf->hw, ret), 1833 i40e_aq_str(&pf->hw, aq_err)); 1834 } 1835 } 1836 if (num_del) { 1837 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1838 del_list, num_del, NULL); 1839 aq_err = pf->hw.aq.asq_last_status; 1840 num_del = 0; 1841 1842 if (ret && aq_err != I40E_AQ_RC_ENOENT) 1843 dev_info(&pf->pdev->dev, 1844 "ignoring delete macvlan error, err %s aq_err %s\n", 1845 i40e_stat_str(&pf->hw, ret), 1846 i40e_aq_str(&pf->hw, aq_err)); 1847 } 1848 1849 kfree(del_list); 1850 del_list = NULL; 1851 1852 /* do all the adds now */ 1853 filter_list_len = pf->hw.aq.asq_buf_size / 1854 sizeof(struct i40e_aqc_add_macvlan_element_data), 1855 add_list = kcalloc(filter_list_len, 1856 sizeof(struct i40e_aqc_add_macvlan_element_data), 1857 GFP_KERNEL); 1858 if (!add_list) 1859 return -ENOMEM; 1860 1861 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1862 if (!f->changed) 1863 continue; 1864 1865 if (f->counter == 0) 1866 continue; 1867 f->changed = false; 1868 add_happened = true; 1869 cmd_flags = 0; 1870 1871 /* add to add array */ 1872 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); 1873 add_list[num_add].vlan_tag = 1874 cpu_to_le16( 1875 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 1876 add_list[num_add].queue_number = 0; 1877 1878 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1879 add_list[num_add].flags = cpu_to_le16(cmd_flags); 1880 num_add++; 1881 1882 /* flush a full buffer */ 1883 if (num_add == filter_list_len) { 1884 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1885 add_list, num_add, 1886 NULL); 1887 aq_err = pf->hw.aq.asq_last_status; 1888 num_add = 0; 1889 1890 if (ret) 1891 break; 1892 memset(add_list, 0, sizeof(*add_list)); 1893 } 1894 } 1895 if (num_add) { 1896 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1897 add_list, num_add, NULL); 1898 aq_err = pf->hw.aq.asq_last_status; 1899 num_add = 0; 1900 } 1901 kfree(add_list); 1902 add_list = NULL; 1903 1904 if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) { 1905 dev_info(&pf->pdev->dev, 1906 "add filter failed, err %s aq_err %s\n", 1907 i40e_stat_str(&pf->hw, ret), 1908 i40e_aq_str(&pf->hw, aq_err)); 1909 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 1910 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1911 &vsi->state)) { 1912 promisc_forced_on = true; 1913 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1914 &vsi->state); 1915 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 1916 } 1917 } 1918 } 1919 1920 /* check for changes in promiscuous modes */ 1921 if (changed_flags & IFF_ALLMULTI) { 1922 bool cur_multipromisc; 1923 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 1924 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 1925 vsi->seid, 1926 cur_multipromisc, 1927 NULL); 1928 if (ret) 1929 dev_info(&pf->pdev->dev, 1930 "set multi promisc failed, err %s aq_err %s\n", 1931 i40e_stat_str(&pf->hw, ret), 1932 i40e_aq_str(&pf->hw, 1933 pf->hw.aq.asq_last_status)); 1934 } 1935 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 1936 bool cur_promisc; 1937 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 1938 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1939 &vsi->state)); 1940 if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) { 1941 /* set defport ON for Main VSI instead of true promisc 1942 * this way we will get all unicast/multicast and VLAN 1943 * promisc behavior but will not get VF or VMDq traffic 1944 * replicated on the Main VSI. 1945 */ 1946 if (pf->cur_promisc != cur_promisc) { 1947 pf->cur_promisc = cur_promisc; 1948 i40e_do_reset_safe(pf, 1949 BIT(__I40E_PF_RESET_REQUESTED)); 1950 } 1951 } else { 1952 ret = i40e_aq_set_vsi_unicast_promiscuous( 1953 &vsi->back->hw, 1954 vsi->seid, 1955 cur_promisc, NULL); 1956 if (ret) 1957 dev_info(&pf->pdev->dev, 1958 "set unicast promisc failed, err %d, aq_err %d\n", 1959 ret, pf->hw.aq.asq_last_status); 1960 ret = i40e_aq_set_vsi_multicast_promiscuous( 1961 &vsi->back->hw, 1962 vsi->seid, 1963 cur_promisc, NULL); 1964 if (ret) 1965 dev_info(&pf->pdev->dev, 1966 "set multicast promisc failed, err %d, aq_err %d\n", 1967 ret, pf->hw.aq.asq_last_status); 1968 } 1969 ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 1970 vsi->seid, 1971 cur_promisc, NULL); 1972 if (ret) 1973 dev_info(&pf->pdev->dev, 1974 "set brdcast promisc failed, err %s, aq_err %s\n", 1975 i40e_stat_str(&pf->hw, ret), 1976 i40e_aq_str(&pf->hw, 1977 pf->hw.aq.asq_last_status)); 1978 } 1979 1980 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 1981 return 0; 1982 } 1983 1984 /** 1985 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 1986 * @pf: board private structure 1987 **/ 1988 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 1989 { 1990 int v; 1991 1992 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 1993 return; 1994 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 1995 1996 for (v = 0; v < pf->num_alloc_vsi; v++) { 1997 if (pf->vsi[v] && 1998 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) 1999 i40e_sync_vsi_filters(pf->vsi[v]); 2000 } 2001 } 2002 2003 /** 2004 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 2005 * @netdev: network interface device structure 2006 * @new_mtu: new value for maximum frame size 2007 * 2008 * Returns 0 on success, negative on failure 2009 **/ 2010 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 2011 { 2012 struct i40e_netdev_priv *np = netdev_priv(netdev); 2013 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 2014 struct i40e_vsi *vsi = np->vsi; 2015 2016 /* MTU < 68 is an error and causes problems on some kernels */ 2017 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) 2018 return -EINVAL; 2019 2020 netdev_info(netdev, "changing MTU from %d to %d\n", 2021 netdev->mtu, new_mtu); 2022 netdev->mtu = new_mtu; 2023 if (netif_running(netdev)) 2024 i40e_vsi_reinit_locked(vsi); 2025 2026 return 0; 2027 } 2028 2029 /** 2030 * i40e_ioctl - Access the hwtstamp interface 2031 * @netdev: network interface device structure 2032 * @ifr: interface request data 2033 * @cmd: ioctl command 2034 **/ 2035 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2036 { 2037 struct i40e_netdev_priv *np = netdev_priv(netdev); 2038 struct i40e_pf *pf = np->vsi->back; 2039 2040 switch (cmd) { 2041 case SIOCGHWTSTAMP: 2042 return i40e_ptp_get_ts_config(pf, ifr); 2043 case SIOCSHWTSTAMP: 2044 return i40e_ptp_set_ts_config(pf, ifr); 2045 default: 2046 return -EOPNOTSUPP; 2047 } 2048 } 2049 2050 /** 2051 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 2052 * @vsi: the vsi being adjusted 2053 **/ 2054 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 2055 { 2056 struct i40e_vsi_context ctxt; 2057 i40e_status ret; 2058 2059 if ((vsi->info.valid_sections & 2060 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2061 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 2062 return; /* already enabled */ 2063 2064 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2065 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2066 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 2067 2068 ctxt.seid = vsi->seid; 2069 ctxt.info = vsi->info; 2070 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2071 if (ret) { 2072 dev_info(&vsi->back->pdev->dev, 2073 "update vlan stripping failed, err %s aq_err %s\n", 2074 i40e_stat_str(&vsi->back->hw, ret), 2075 i40e_aq_str(&vsi->back->hw, 2076 vsi->back->hw.aq.asq_last_status)); 2077 } 2078 } 2079 2080 /** 2081 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 2082 * @vsi: the vsi being adjusted 2083 **/ 2084 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 2085 { 2086 struct i40e_vsi_context ctxt; 2087 i40e_status ret; 2088 2089 if ((vsi->info.valid_sections & 2090 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 2091 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 2092 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 2093 return; /* already disabled */ 2094 2095 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2096 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2097 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 2098 2099 ctxt.seid = vsi->seid; 2100 ctxt.info = vsi->info; 2101 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2102 if (ret) { 2103 dev_info(&vsi->back->pdev->dev, 2104 "update vlan stripping failed, err %s aq_err %s\n", 2105 i40e_stat_str(&vsi->back->hw, ret), 2106 i40e_aq_str(&vsi->back->hw, 2107 vsi->back->hw.aq.asq_last_status)); 2108 } 2109 } 2110 2111 /** 2112 * i40e_vlan_rx_register - Setup or shutdown vlan offload 2113 * @netdev: network interface to be adjusted 2114 * @features: netdev features to test if VLAN offload is enabled or not 2115 **/ 2116 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 2117 { 2118 struct i40e_netdev_priv *np = netdev_priv(netdev); 2119 struct i40e_vsi *vsi = np->vsi; 2120 2121 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2122 i40e_vlan_stripping_enable(vsi); 2123 else 2124 i40e_vlan_stripping_disable(vsi); 2125 } 2126 2127 /** 2128 * i40e_vsi_add_vlan - Add vsi membership for given vlan 2129 * @vsi: the vsi being configured 2130 * @vid: vlan id to be added (0 = untagged only , -1 = any) 2131 **/ 2132 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) 2133 { 2134 struct i40e_mac_filter *f, *add_f; 2135 bool is_netdev, is_vf; 2136 2137 is_vf = (vsi->type == I40E_VSI_SRIOV); 2138 is_netdev = !!(vsi->netdev); 2139 2140 if (is_netdev) { 2141 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, 2142 is_vf, is_netdev); 2143 if (!add_f) { 2144 dev_info(&vsi->back->pdev->dev, 2145 "Could not add vlan filter %d for %pM\n", 2146 vid, vsi->netdev->dev_addr); 2147 return -ENOMEM; 2148 } 2149 } 2150 2151 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2152 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2153 if (!add_f) { 2154 dev_info(&vsi->back->pdev->dev, 2155 "Could not add vlan filter %d for %pM\n", 2156 vid, f->macaddr); 2157 return -ENOMEM; 2158 } 2159 } 2160 2161 /* Now if we add a vlan tag, make sure to check if it is the first 2162 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" 2163 * with 0, so we now accept untagged and specified tagged traffic 2164 * (and not any taged and untagged) 2165 */ 2166 if (vid > 0) { 2167 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, 2168 I40E_VLAN_ANY, 2169 is_vf, is_netdev)) { 2170 i40e_del_filter(vsi, vsi->netdev->dev_addr, 2171 I40E_VLAN_ANY, is_vf, is_netdev); 2172 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, 2173 is_vf, is_netdev); 2174 if (!add_f) { 2175 dev_info(&vsi->back->pdev->dev, 2176 "Could not add filter 0 for %pM\n", 2177 vsi->netdev->dev_addr); 2178 return -ENOMEM; 2179 } 2180 } 2181 } 2182 2183 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ 2184 if (vid > 0 && !vsi->info.pvid) { 2185 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2186 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2187 is_vf, is_netdev)) { 2188 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2189 is_vf, is_netdev); 2190 add_f = i40e_add_filter(vsi, f->macaddr, 2191 0, is_vf, is_netdev); 2192 if (!add_f) { 2193 dev_info(&vsi->back->pdev->dev, 2194 "Could not add filter 0 for %pM\n", 2195 f->macaddr); 2196 return -ENOMEM; 2197 } 2198 } 2199 } 2200 } 2201 2202 if (test_bit(__I40E_DOWN, &vsi->back->state) || 2203 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 2204 return 0; 2205 2206 return i40e_sync_vsi_filters(vsi); 2207 } 2208 2209 /** 2210 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 2211 * @vsi: the vsi being configured 2212 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 2213 * 2214 * Return: 0 on success or negative otherwise 2215 **/ 2216 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 2217 { 2218 struct net_device *netdev = vsi->netdev; 2219 struct i40e_mac_filter *f, *add_f; 2220 bool is_vf, is_netdev; 2221 int filter_count = 0; 2222 2223 is_vf = (vsi->type == I40E_VSI_SRIOV); 2224 is_netdev = !!(netdev); 2225 2226 if (is_netdev) 2227 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); 2228 2229 list_for_each_entry(f, &vsi->mac_filter_list, list) 2230 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 2231 2232 /* go through all the filters for this VSI and if there is only 2233 * vid == 0 it means there are no other filters, so vid 0 must 2234 * be replaced with -1. This signifies that we should from now 2235 * on accept any traffic (with any tag present, or untagged) 2236 */ 2237 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2238 if (is_netdev) { 2239 if (f->vlan && 2240 ether_addr_equal(netdev->dev_addr, f->macaddr)) 2241 filter_count++; 2242 } 2243 2244 if (f->vlan) 2245 filter_count++; 2246 } 2247 2248 if (!filter_count && is_netdev) { 2249 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); 2250 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 2251 is_vf, is_netdev); 2252 if (!f) { 2253 dev_info(&vsi->back->pdev->dev, 2254 "Could not add filter %d for %pM\n", 2255 I40E_VLAN_ANY, netdev->dev_addr); 2256 return -ENOMEM; 2257 } 2258 } 2259 2260 if (!filter_count) { 2261 list_for_each_entry(f, &vsi->mac_filter_list, list) { 2262 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); 2263 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, 2264 is_vf, is_netdev); 2265 if (!add_f) { 2266 dev_info(&vsi->back->pdev->dev, 2267 "Could not add filter %d for %pM\n", 2268 I40E_VLAN_ANY, f->macaddr); 2269 return -ENOMEM; 2270 } 2271 } 2272 } 2273 2274 if (test_bit(__I40E_DOWN, &vsi->back->state) || 2275 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 2276 return 0; 2277 2278 return i40e_sync_vsi_filters(vsi); 2279 } 2280 2281 /** 2282 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 2283 * @netdev: network interface to be adjusted 2284 * @vid: vlan id to be added 2285 * 2286 * net_device_ops implementation for adding vlan ids 2287 **/ 2288 #ifdef I40E_FCOE 2289 int i40e_vlan_rx_add_vid(struct net_device *netdev, 2290 __always_unused __be16 proto, u16 vid) 2291 #else 2292 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 2293 __always_unused __be16 proto, u16 vid) 2294 #endif 2295 { 2296 struct i40e_netdev_priv *np = netdev_priv(netdev); 2297 struct i40e_vsi *vsi = np->vsi; 2298 int ret = 0; 2299 2300 if (vid > 4095) 2301 return -EINVAL; 2302 2303 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 2304 2305 /* If the network stack called us with vid = 0 then 2306 * it is asking to receive priority tagged packets with 2307 * vlan id 0. Our HW receives them by default when configured 2308 * to receive untagged packets so there is no need to add an 2309 * extra filter for vlan 0 tagged packets. 2310 */ 2311 if (vid) 2312 ret = i40e_vsi_add_vlan(vsi, vid); 2313 2314 if (!ret && (vid < VLAN_N_VID)) 2315 set_bit(vid, vsi->active_vlans); 2316 2317 return ret; 2318 } 2319 2320 /** 2321 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 2322 * @netdev: network interface to be adjusted 2323 * @vid: vlan id to be removed 2324 * 2325 * net_device_ops implementation for removing vlan ids 2326 **/ 2327 #ifdef I40E_FCOE 2328 int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2329 __always_unused __be16 proto, u16 vid) 2330 #else 2331 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2332 __always_unused __be16 proto, u16 vid) 2333 #endif 2334 { 2335 struct i40e_netdev_priv *np = netdev_priv(netdev); 2336 struct i40e_vsi *vsi = np->vsi; 2337 2338 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); 2339 2340 /* return code is ignored as there is nothing a user 2341 * can do about failure to remove and a log message was 2342 * already printed from the other function 2343 */ 2344 i40e_vsi_kill_vlan(vsi, vid); 2345 2346 clear_bit(vid, vsi->active_vlans); 2347 2348 return 0; 2349 } 2350 2351 /** 2352 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 2353 * @vsi: the vsi being brought back up 2354 **/ 2355 static void i40e_restore_vlan(struct i40e_vsi *vsi) 2356 { 2357 u16 vid; 2358 2359 if (!vsi->netdev) 2360 return; 2361 2362 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2363 2364 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 2365 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 2366 vid); 2367 } 2368 2369 /** 2370 * i40e_vsi_add_pvid - Add pvid for the VSI 2371 * @vsi: the vsi being adjusted 2372 * @vid: the vlan id to set as a PVID 2373 **/ 2374 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2375 { 2376 struct i40e_vsi_context ctxt; 2377 i40e_status ret; 2378 2379 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2380 vsi->info.pvid = cpu_to_le16(vid); 2381 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | 2382 I40E_AQ_VSI_PVLAN_INSERT_PVID | 2383 I40E_AQ_VSI_PVLAN_EMOD_STR; 2384 2385 ctxt.seid = vsi->seid; 2386 ctxt.info = vsi->info; 2387 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2388 if (ret) { 2389 dev_info(&vsi->back->pdev->dev, 2390 "add pvid failed, err %s aq_err %s\n", 2391 i40e_stat_str(&vsi->back->hw, ret), 2392 i40e_aq_str(&vsi->back->hw, 2393 vsi->back->hw.aq.asq_last_status)); 2394 return -ENOENT; 2395 } 2396 2397 return 0; 2398 } 2399 2400 /** 2401 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2402 * @vsi: the vsi being adjusted 2403 * 2404 * Just use the vlan_rx_register() service to put it back to normal 2405 **/ 2406 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2407 { 2408 i40e_vlan_stripping_disable(vsi); 2409 2410 vsi->info.pvid = 0; 2411 } 2412 2413 /** 2414 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2415 * @vsi: ptr to the VSI 2416 * 2417 * If this function returns with an error, then it's possible one or 2418 * more of the rings is populated (while the rest are not). It is the 2419 * callers duty to clean those orphaned rings. 2420 * 2421 * Return 0 on success, negative on failure 2422 **/ 2423 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2424 { 2425 int i, err = 0; 2426 2427 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2428 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2429 2430 return err; 2431 } 2432 2433 /** 2434 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2435 * @vsi: ptr to the VSI 2436 * 2437 * Free VSI's transmit software resources 2438 **/ 2439 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2440 { 2441 int i; 2442 2443 if (!vsi->tx_rings) 2444 return; 2445 2446 for (i = 0; i < vsi->num_queue_pairs; i++) 2447 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2448 i40e_free_tx_resources(vsi->tx_rings[i]); 2449 } 2450 2451 /** 2452 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 2453 * @vsi: ptr to the VSI 2454 * 2455 * If this function returns with an error, then it's possible one or 2456 * more of the rings is populated (while the rest are not). It is the 2457 * callers duty to clean those orphaned rings. 2458 * 2459 * Return 0 on success, negative on failure 2460 **/ 2461 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 2462 { 2463 int i, err = 0; 2464 2465 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2466 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2467 #ifdef I40E_FCOE 2468 i40e_fcoe_setup_ddp_resources(vsi); 2469 #endif 2470 return err; 2471 } 2472 2473 /** 2474 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 2475 * @vsi: ptr to the VSI 2476 * 2477 * Free all receive software resources 2478 **/ 2479 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 2480 { 2481 int i; 2482 2483 if (!vsi->rx_rings) 2484 return; 2485 2486 for (i = 0; i < vsi->num_queue_pairs; i++) 2487 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2488 i40e_free_rx_resources(vsi->rx_rings[i]); 2489 #ifdef I40E_FCOE 2490 i40e_fcoe_free_ddp_resources(vsi); 2491 #endif 2492 } 2493 2494 /** 2495 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring 2496 * @ring: The Tx ring to configure 2497 * 2498 * This enables/disables XPS for a given Tx descriptor ring 2499 * based on the TCs enabled for the VSI that ring belongs to. 2500 **/ 2501 static void i40e_config_xps_tx_ring(struct i40e_ring *ring) 2502 { 2503 struct i40e_vsi *vsi = ring->vsi; 2504 cpumask_var_t mask; 2505 2506 if (!ring->q_vector || !ring->netdev) 2507 return; 2508 2509 /* Single TC mode enable XPS */ 2510 if (vsi->tc_config.numtc <= 1) { 2511 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) 2512 netif_set_xps_queue(ring->netdev, 2513 &ring->q_vector->affinity_mask, 2514 ring->queue_index); 2515 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { 2516 /* Disable XPS to allow selection based on TC */ 2517 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); 2518 netif_set_xps_queue(ring->netdev, mask, ring->queue_index); 2519 free_cpumask_var(mask); 2520 } 2521 } 2522 2523 /** 2524 * i40e_configure_tx_ring - Configure a transmit ring context and rest 2525 * @ring: The Tx ring to configure 2526 * 2527 * Configure the Tx descriptor ring in the HMC context. 2528 **/ 2529 static int i40e_configure_tx_ring(struct i40e_ring *ring) 2530 { 2531 struct i40e_vsi *vsi = ring->vsi; 2532 u16 pf_q = vsi->base_queue + ring->queue_index; 2533 struct i40e_hw *hw = &vsi->back->hw; 2534 struct i40e_hmc_obj_txq tx_ctx; 2535 i40e_status err = 0; 2536 u32 qtx_ctl = 0; 2537 2538 /* some ATR related tx ring init */ 2539 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { 2540 ring->atr_sample_rate = vsi->back->atr_sample_rate; 2541 ring->atr_count = 0; 2542 } else { 2543 ring->atr_sample_rate = 0; 2544 } 2545 2546 /* configure XPS */ 2547 i40e_config_xps_tx_ring(ring); 2548 2549 /* clear the context structure first */ 2550 memset(&tx_ctx, 0, sizeof(tx_ctx)); 2551 2552 tx_ctx.new_context = 1; 2553 tx_ctx.base = (ring->dma / 128); 2554 tx_ctx.qlen = ring->count; 2555 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2556 I40E_FLAG_FD_ATR_ENABLED)); 2557 #ifdef I40E_FCOE 2558 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2559 #endif 2560 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2561 /* FDIR VSI tx ring can still use RS bit and writebacks */ 2562 if (vsi->type != I40E_VSI_FDIR) 2563 tx_ctx.head_wb_ena = 1; 2564 tx_ctx.head_wb_addr = ring->dma + 2565 (ring->count * sizeof(struct i40e_tx_desc)); 2566 2567 /* As part of VSI creation/update, FW allocates certain 2568 * Tx arbitration queue sets for each TC enabled for 2569 * the VSI. The FW returns the handles to these queue 2570 * sets as part of the response buffer to Add VSI, 2571 * Update VSI, etc. AQ commands. It is expected that 2572 * these queue set handles be associated with the Tx 2573 * queues by the driver as part of the TX queue context 2574 * initialization. This has to be done regardless of 2575 * DCB as by default everything is mapped to TC0. 2576 */ 2577 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 2578 tx_ctx.rdylist_act = 0; 2579 2580 /* clear the context in the HMC */ 2581 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 2582 if (err) { 2583 dev_info(&vsi->back->pdev->dev, 2584 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 2585 ring->queue_index, pf_q, err); 2586 return -ENOMEM; 2587 } 2588 2589 /* set the context in the HMC */ 2590 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 2591 if (err) { 2592 dev_info(&vsi->back->pdev->dev, 2593 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 2594 ring->queue_index, pf_q, err); 2595 return -ENOMEM; 2596 } 2597 2598 /* Now associate this queue with this PCI function */ 2599 if (vsi->type == I40E_VSI_VMDQ2) { 2600 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 2601 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & 2602 I40E_QTX_CTL_VFVM_INDX_MASK; 2603 } else { 2604 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2605 } 2606 2607 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2608 I40E_QTX_CTL_PF_INDX_MASK); 2609 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2610 i40e_flush(hw); 2611 2612 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 2613 2614 /* cache tail off for easier writes later */ 2615 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 2616 2617 return 0; 2618 } 2619 2620 /** 2621 * i40e_configure_rx_ring - Configure a receive ring context 2622 * @ring: The Rx ring to configure 2623 * 2624 * Configure the Rx descriptor ring in the HMC context. 2625 **/ 2626 static int i40e_configure_rx_ring(struct i40e_ring *ring) 2627 { 2628 struct i40e_vsi *vsi = ring->vsi; 2629 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 2630 u16 pf_q = vsi->base_queue + ring->queue_index; 2631 struct i40e_hw *hw = &vsi->back->hw; 2632 struct i40e_hmc_obj_rxq rx_ctx; 2633 i40e_status err = 0; 2634 2635 ring->state = 0; 2636 2637 /* clear the context structure first */ 2638 memset(&rx_ctx, 0, sizeof(rx_ctx)); 2639 2640 ring->rx_buf_len = vsi->rx_buf_len; 2641 ring->rx_hdr_len = vsi->rx_hdr_len; 2642 2643 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; 2644 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; 2645 2646 rx_ctx.base = (ring->dma / 128); 2647 rx_ctx.qlen = ring->count; 2648 2649 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { 2650 set_ring_16byte_desc_enabled(ring); 2651 rx_ctx.dsize = 0; 2652 } else { 2653 rx_ctx.dsize = 1; 2654 } 2655 2656 rx_ctx.dtype = vsi->dtype; 2657 if (vsi->dtype) { 2658 set_ring_ps_enabled(ring); 2659 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 2660 I40E_RX_SPLIT_IP | 2661 I40E_RX_SPLIT_TCP_UDP | 2662 I40E_RX_SPLIT_SCTP; 2663 } else { 2664 rx_ctx.hsplit_0 = 0; 2665 } 2666 2667 rx_ctx.rxmax = min_t(u16, vsi->max_frame, 2668 (chain_len * ring->rx_buf_len)); 2669 if (hw->revision_id == 0) 2670 rx_ctx.lrxqthresh = 0; 2671 else 2672 rx_ctx.lrxqthresh = 2; 2673 rx_ctx.crcstrip = 1; 2674 rx_ctx.l2tsel = 1; 2675 rx_ctx.showiv = 1; 2676 #ifdef I40E_FCOE 2677 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2678 #endif 2679 /* set the prefena field to 1 because the manual says to */ 2680 rx_ctx.prefena = 1; 2681 2682 /* clear the context in the HMC */ 2683 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2684 if (err) { 2685 dev_info(&vsi->back->pdev->dev, 2686 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2687 ring->queue_index, pf_q, err); 2688 return -ENOMEM; 2689 } 2690 2691 /* set the context in the HMC */ 2692 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 2693 if (err) { 2694 dev_info(&vsi->back->pdev->dev, 2695 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2696 ring->queue_index, pf_q, err); 2697 return -ENOMEM; 2698 } 2699 2700 /* cache tail for quicker writes, and clear the reg before use */ 2701 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 2702 writel(0, ring->tail); 2703 2704 if (ring_is_ps_enabled(ring)) { 2705 i40e_alloc_rx_headers(ring); 2706 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); 2707 } else { 2708 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); 2709 } 2710 2711 return 0; 2712 } 2713 2714 /** 2715 * i40e_vsi_configure_tx - Configure the VSI for Tx 2716 * @vsi: VSI structure describing this set of rings and resources 2717 * 2718 * Configure the Tx VSI for operation. 2719 **/ 2720 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 2721 { 2722 int err = 0; 2723 u16 i; 2724 2725 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 2726 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 2727 2728 return err; 2729 } 2730 2731 /** 2732 * i40e_vsi_configure_rx - Configure the VSI for Rx 2733 * @vsi: the VSI being configured 2734 * 2735 * Configure the Rx VSI for operation. 2736 **/ 2737 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 2738 { 2739 int err = 0; 2740 u16 i; 2741 2742 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) 2743 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN 2744 + ETH_FCS_LEN + VLAN_HLEN; 2745 else 2746 vsi->max_frame = I40E_RXBUFFER_2048; 2747 2748 /* figure out correct receive buffer length */ 2749 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | 2750 I40E_FLAG_RX_PS_ENABLED)) { 2751 case I40E_FLAG_RX_1BUF_ENABLED: 2752 vsi->rx_hdr_len = 0; 2753 vsi->rx_buf_len = vsi->max_frame; 2754 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2755 break; 2756 case I40E_FLAG_RX_PS_ENABLED: 2757 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2758 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2759 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; 2760 break; 2761 default: 2762 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2763 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2764 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; 2765 break; 2766 } 2767 2768 #ifdef I40E_FCOE 2769 /* setup rx buffer for FCoE */ 2770 if ((vsi->type == I40E_VSI_FCOE) && 2771 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { 2772 vsi->rx_hdr_len = 0; 2773 vsi->rx_buf_len = I40E_RXBUFFER_3072; 2774 vsi->max_frame = I40E_RXBUFFER_3072; 2775 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2776 } 2777 2778 #endif /* I40E_FCOE */ 2779 /* round up for the chip's needs */ 2780 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, 2781 BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT)); 2782 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, 2783 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); 2784 2785 /* set up individual rings */ 2786 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2787 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 2788 2789 return err; 2790 } 2791 2792 /** 2793 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 2794 * @vsi: ptr to the VSI 2795 **/ 2796 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 2797 { 2798 struct i40e_ring *tx_ring, *rx_ring; 2799 u16 qoffset, qcount; 2800 int i, n; 2801 2802 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 2803 /* Reset the TC information */ 2804 for (i = 0; i < vsi->num_queue_pairs; i++) { 2805 rx_ring = vsi->rx_rings[i]; 2806 tx_ring = vsi->tx_rings[i]; 2807 rx_ring->dcb_tc = 0; 2808 tx_ring->dcb_tc = 0; 2809 } 2810 } 2811 2812 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2813 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) 2814 continue; 2815 2816 qoffset = vsi->tc_config.tc_info[n].qoffset; 2817 qcount = vsi->tc_config.tc_info[n].qcount; 2818 for (i = qoffset; i < (qoffset + qcount); i++) { 2819 rx_ring = vsi->rx_rings[i]; 2820 tx_ring = vsi->tx_rings[i]; 2821 rx_ring->dcb_tc = n; 2822 tx_ring->dcb_tc = n; 2823 } 2824 } 2825 } 2826 2827 /** 2828 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 2829 * @vsi: ptr to the VSI 2830 **/ 2831 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 2832 { 2833 if (vsi->netdev) 2834 i40e_set_rx_mode(vsi->netdev); 2835 } 2836 2837 /** 2838 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters 2839 * @vsi: Pointer to the targeted VSI 2840 * 2841 * This function replays the hlist on the hw where all the SB Flow Director 2842 * filters were saved. 2843 **/ 2844 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) 2845 { 2846 struct i40e_fdir_filter *filter; 2847 struct i40e_pf *pf = vsi->back; 2848 struct hlist_node *node; 2849 2850 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 2851 return; 2852 2853 hlist_for_each_entry_safe(filter, node, 2854 &pf->fdir_filter_list, fdir_node) { 2855 i40e_add_del_fdir(vsi, filter, true); 2856 } 2857 } 2858 2859 /** 2860 * i40e_vsi_configure - Set up the VSI for action 2861 * @vsi: the VSI being configured 2862 **/ 2863 static int i40e_vsi_configure(struct i40e_vsi *vsi) 2864 { 2865 int err; 2866 2867 i40e_set_vsi_rx_mode(vsi); 2868 i40e_restore_vlan(vsi); 2869 i40e_vsi_config_dcb_rings(vsi); 2870 err = i40e_vsi_configure_tx(vsi); 2871 if (!err) 2872 err = i40e_vsi_configure_rx(vsi); 2873 2874 return err; 2875 } 2876 2877 /** 2878 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 2879 * @vsi: the VSI being configured 2880 **/ 2881 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 2882 { 2883 struct i40e_pf *pf = vsi->back; 2884 struct i40e_q_vector *q_vector; 2885 struct i40e_hw *hw = &pf->hw; 2886 u16 vector; 2887 int i, q; 2888 u32 val; 2889 u32 qp; 2890 2891 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 2892 * and PFINT_LNKLSTn registers, e.g.: 2893 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 2894 */ 2895 qp = vsi->base_queue; 2896 vector = vsi->base_vector; 2897 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 2898 q_vector = vsi->q_vectors[i]; 2899 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2900 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2901 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 2902 q_vector->rx.itr); 2903 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2904 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2905 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 2906 q_vector->tx.itr); 2907 2908 /* Linked list for the queuepairs assigned to this vector */ 2909 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 2910 for (q = 0; q < q_vector->num_ringpairs; q++) { 2911 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2912 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2913 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 2914 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 2915 (I40E_QUEUE_TYPE_TX 2916 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 2917 2918 wr32(hw, I40E_QINT_RQCTL(qp), val); 2919 2920 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2921 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2922 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 2923 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| 2924 (I40E_QUEUE_TYPE_RX 2925 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2926 2927 /* Terminate the linked list */ 2928 if (q == (q_vector->num_ringpairs - 1)) 2929 val |= (I40E_QUEUE_END_OF_LIST 2930 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2931 2932 wr32(hw, I40E_QINT_TQCTL(qp), val); 2933 qp++; 2934 } 2935 } 2936 2937 i40e_flush(hw); 2938 } 2939 2940 /** 2941 * i40e_enable_misc_int_causes - enable the non-queue interrupts 2942 * @hw: ptr to the hardware info 2943 **/ 2944 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) 2945 { 2946 struct i40e_hw *hw = &pf->hw; 2947 u32 val; 2948 2949 /* clear things first */ 2950 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 2951 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 2952 2953 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 2954 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 2955 I40E_PFINT_ICR0_ENA_GRST_MASK | 2956 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 2957 I40E_PFINT_ICR0_ENA_GPIO_MASK | 2958 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 2959 I40E_PFINT_ICR0_ENA_VFLR_MASK | 2960 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2961 2962 if (pf->flags & I40E_FLAG_IWARP_ENABLED) 2963 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 2964 2965 if (pf->flags & I40E_FLAG_PTP) 2966 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 2967 2968 wr32(hw, I40E_PFINT_ICR0_ENA, val); 2969 2970 /* SW_ITR_IDX = 0, but don't change INTENA */ 2971 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 2972 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 2973 2974 /* OTHER_ITR_IDX = 0 */ 2975 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 2976 } 2977 2978 /** 2979 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 2980 * @vsi: the VSI being configured 2981 **/ 2982 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 2983 { 2984 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 2985 struct i40e_pf *pf = vsi->back; 2986 struct i40e_hw *hw = &pf->hw; 2987 u32 val; 2988 2989 /* set the ITR configuration */ 2990 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2991 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2992 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); 2993 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2994 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2995 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 2996 2997 i40e_enable_misc_int_causes(pf); 2998 2999 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 3000 wr32(hw, I40E_PFINT_LNKLST0, 0); 3001 3002 /* Associate the queue pair to the vector and enable the queue int */ 3003 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3004 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 3005 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 3006 3007 wr32(hw, I40E_QINT_RQCTL(0), val); 3008 3009 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3010 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 3011 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 3012 3013 wr32(hw, I40E_QINT_TQCTL(0), val); 3014 i40e_flush(hw); 3015 } 3016 3017 /** 3018 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 3019 * @pf: board private structure 3020 **/ 3021 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) 3022 { 3023 struct i40e_hw *hw = &pf->hw; 3024 3025 wr32(hw, I40E_PFINT_DYN_CTL0, 3026 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 3027 i40e_flush(hw); 3028 } 3029 3030 /** 3031 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 3032 * @pf: board private structure 3033 **/ 3034 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 3035 { 3036 struct i40e_hw *hw = &pf->hw; 3037 u32 val; 3038 3039 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 3040 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 3041 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 3042 3043 wr32(hw, I40E_PFINT_DYN_CTL0, val); 3044 i40e_flush(hw); 3045 } 3046 3047 /** 3048 * i40e_irq_dynamic_enable - Enable default interrupt generation settings 3049 * @vsi: pointer to a vsi 3050 * @vector: enable a particular Hw Interrupt vector 3051 **/ 3052 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) 3053 { 3054 struct i40e_pf *pf = vsi->back; 3055 struct i40e_hw *hw = &pf->hw; 3056 u32 val; 3057 3058 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 3059 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 3060 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 3061 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 3062 /* skip the flush */ 3063 } 3064 3065 /** 3066 * i40e_irq_dynamic_disable - Disable default interrupt generation settings 3067 * @vsi: pointer to a vsi 3068 * @vector: disable a particular Hw Interrupt vector 3069 **/ 3070 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) 3071 { 3072 struct i40e_pf *pf = vsi->back; 3073 struct i40e_hw *hw = &pf->hw; 3074 u32 val; 3075 3076 val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 3077 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 3078 i40e_flush(hw); 3079 } 3080 3081 /** 3082 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 3083 * @irq: interrupt number 3084 * @data: pointer to a q_vector 3085 **/ 3086 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 3087 { 3088 struct i40e_q_vector *q_vector = data; 3089 3090 if (!q_vector->tx.ring && !q_vector->rx.ring) 3091 return IRQ_HANDLED; 3092 3093 napi_schedule(&q_vector->napi); 3094 3095 return IRQ_HANDLED; 3096 } 3097 3098 /** 3099 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 3100 * @vsi: the VSI being configured 3101 * @basename: name for the vector 3102 * 3103 * Allocates MSI-X vectors and requests interrupts from the kernel. 3104 **/ 3105 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 3106 { 3107 int q_vectors = vsi->num_q_vectors; 3108 struct i40e_pf *pf = vsi->back; 3109 int base = vsi->base_vector; 3110 int rx_int_idx = 0; 3111 int tx_int_idx = 0; 3112 int vector, err; 3113 3114 for (vector = 0; vector < q_vectors; vector++) { 3115 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 3116 3117 if (q_vector->tx.ring && q_vector->rx.ring) { 3118 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3119 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 3120 tx_int_idx++; 3121 } else if (q_vector->rx.ring) { 3122 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3123 "%s-%s-%d", basename, "rx", rx_int_idx++); 3124 } else if (q_vector->tx.ring) { 3125 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 3126 "%s-%s-%d", basename, "tx", tx_int_idx++); 3127 } else { 3128 /* skip this unused q_vector */ 3129 continue; 3130 } 3131 err = request_irq(pf->msix_entries[base + vector].vector, 3132 vsi->irq_handler, 3133 0, 3134 q_vector->name, 3135 q_vector); 3136 if (err) { 3137 dev_info(&pf->pdev->dev, 3138 "%s: request_irq failed, error: %d\n", 3139 __func__, err); 3140 goto free_queue_irqs; 3141 } 3142 /* assign the mask for this irq */ 3143 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3144 &q_vector->affinity_mask); 3145 } 3146 3147 vsi->irqs_ready = true; 3148 return 0; 3149 3150 free_queue_irqs: 3151 while (vector) { 3152 vector--; 3153 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 3154 NULL); 3155 free_irq(pf->msix_entries[base + vector].vector, 3156 &(vsi->q_vectors[vector])); 3157 } 3158 return err; 3159 } 3160 3161 /** 3162 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 3163 * @vsi: the VSI being un-configured 3164 **/ 3165 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 3166 { 3167 struct i40e_pf *pf = vsi->back; 3168 struct i40e_hw *hw = &pf->hw; 3169 int base = vsi->base_vector; 3170 int i; 3171 3172 for (i = 0; i < vsi->num_queue_pairs; i++) { 3173 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); 3174 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); 3175 } 3176 3177 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3178 for (i = vsi->base_vector; 3179 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3180 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 3181 3182 i40e_flush(hw); 3183 for (i = 0; i < vsi->num_q_vectors; i++) 3184 synchronize_irq(pf->msix_entries[i + base].vector); 3185 } else { 3186 /* Legacy and MSI mode - this stops all interrupt handling */ 3187 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 3188 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 3189 i40e_flush(hw); 3190 synchronize_irq(pf->pdev->irq); 3191 } 3192 } 3193 3194 /** 3195 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 3196 * @vsi: the VSI being configured 3197 **/ 3198 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 3199 { 3200 struct i40e_pf *pf = vsi->back; 3201 int i; 3202 3203 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3204 for (i = vsi->base_vector; 3205 i < (vsi->num_q_vectors + vsi->base_vector); i++) 3206 i40e_irq_dynamic_enable(vsi, i); 3207 } else { 3208 i40e_irq_dynamic_enable_icr0(pf); 3209 } 3210 3211 i40e_flush(&pf->hw); 3212 return 0; 3213 } 3214 3215 /** 3216 * i40e_stop_misc_vector - Stop the vector that handles non-queue events 3217 * @pf: board private structure 3218 **/ 3219 static void i40e_stop_misc_vector(struct i40e_pf *pf) 3220 { 3221 /* Disable ICR 0 */ 3222 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 3223 i40e_flush(&pf->hw); 3224 } 3225 3226 /** 3227 * i40e_intr - MSI/Legacy and non-queue interrupt handler 3228 * @irq: interrupt number 3229 * @data: pointer to a q_vector 3230 * 3231 * This is the handler used for all MSI/Legacy interrupts, and deals 3232 * with both queue and non-queue interrupts. This is also used in 3233 * MSIX mode to handle the non-queue interrupts. 3234 **/ 3235 static irqreturn_t i40e_intr(int irq, void *data) 3236 { 3237 struct i40e_pf *pf = (struct i40e_pf *)data; 3238 struct i40e_hw *hw = &pf->hw; 3239 irqreturn_t ret = IRQ_NONE; 3240 u32 icr0, icr0_remaining; 3241 u32 val, ena_mask; 3242 3243 icr0 = rd32(hw, I40E_PFINT_ICR0); 3244 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 3245 3246 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 3247 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 3248 goto enable_intr; 3249 3250 /* if interrupt but no bits showing, must be SWINT */ 3251 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || 3252 (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) 3253 pf->sw_int_count++; 3254 3255 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && 3256 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { 3257 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3258 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; 3259 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n"); 3260 } 3261 3262 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 3263 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 3264 3265 /* temporarily disable queue cause for NAPI processing */ 3266 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); 3267 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; 3268 wr32(hw, I40E_QINT_RQCTL(0), qval); 3269 3270 qval = rd32(hw, I40E_QINT_TQCTL(0)); 3271 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 3272 wr32(hw, I40E_QINT_TQCTL(0), qval); 3273 3274 if (!test_bit(__I40E_DOWN, &pf->state)) 3275 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); 3276 } 3277 3278 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 3279 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 3280 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 3281 } 3282 3283 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 3284 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 3285 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 3286 } 3287 3288 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 3289 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 3290 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 3291 } 3292 3293 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 3294 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 3295 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 3296 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 3297 val = rd32(hw, I40E_GLGEN_RSTAT); 3298 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 3299 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 3300 if (val == I40E_RESET_CORER) { 3301 pf->corer_count++; 3302 } else if (val == I40E_RESET_GLOBR) { 3303 pf->globr_count++; 3304 } else if (val == I40E_RESET_EMPR) { 3305 pf->empr_count++; 3306 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); 3307 } 3308 } 3309 3310 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 3311 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 3312 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 3313 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", 3314 rd32(hw, I40E_PFHMC_ERRORINFO), 3315 rd32(hw, I40E_PFHMC_ERRORDATA)); 3316 } 3317 3318 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 3319 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 3320 3321 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 3322 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 3323 i40e_ptp_tx_hwtstamp(pf); 3324 } 3325 } 3326 3327 /* If a critical error is pending we have no choice but to reset the 3328 * device. 3329 * Report and mask out any remaining unexpected interrupts. 3330 */ 3331 icr0_remaining = icr0 & ena_mask; 3332 if (icr0_remaining) { 3333 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 3334 icr0_remaining); 3335 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 3336 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 3337 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { 3338 dev_info(&pf->pdev->dev, "device will be reset\n"); 3339 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 3340 i40e_service_event_schedule(pf); 3341 } 3342 ena_mask &= ~icr0_remaining; 3343 } 3344 ret = IRQ_HANDLED; 3345 3346 enable_intr: 3347 /* re-enable interrupt causes */ 3348 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3349 if (!test_bit(__I40E_DOWN, &pf->state)) { 3350 i40e_service_event_schedule(pf); 3351 i40e_irq_dynamic_enable_icr0(pf); 3352 } 3353 3354 return ret; 3355 } 3356 3357 /** 3358 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes 3359 * @tx_ring: tx ring to clean 3360 * @budget: how many cleans we're allowed 3361 * 3362 * Returns true if there's any budget left (e.g. the clean is finished) 3363 **/ 3364 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) 3365 { 3366 struct i40e_vsi *vsi = tx_ring->vsi; 3367 u16 i = tx_ring->next_to_clean; 3368 struct i40e_tx_buffer *tx_buf; 3369 struct i40e_tx_desc *tx_desc; 3370 3371 tx_buf = &tx_ring->tx_bi[i]; 3372 tx_desc = I40E_TX_DESC(tx_ring, i); 3373 i -= tx_ring->count; 3374 3375 do { 3376 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 3377 3378 /* if next_to_watch is not set then there is no work pending */ 3379 if (!eop_desc) 3380 break; 3381 3382 /* prevent any other reads prior to eop_desc */ 3383 read_barrier_depends(); 3384 3385 /* if the descriptor isn't done, no work yet to do */ 3386 if (!(eop_desc->cmd_type_offset_bsz & 3387 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 3388 break; 3389 3390 /* clear next_to_watch to prevent false hangs */ 3391 tx_buf->next_to_watch = NULL; 3392 3393 tx_desc->buffer_addr = 0; 3394 tx_desc->cmd_type_offset_bsz = 0; 3395 /* move past filter desc */ 3396 tx_buf++; 3397 tx_desc++; 3398 i++; 3399 if (unlikely(!i)) { 3400 i -= tx_ring->count; 3401 tx_buf = tx_ring->tx_bi; 3402 tx_desc = I40E_TX_DESC(tx_ring, 0); 3403 } 3404 /* unmap skb header data */ 3405 dma_unmap_single(tx_ring->dev, 3406 dma_unmap_addr(tx_buf, dma), 3407 dma_unmap_len(tx_buf, len), 3408 DMA_TO_DEVICE); 3409 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) 3410 kfree(tx_buf->raw_buf); 3411 3412 tx_buf->raw_buf = NULL; 3413 tx_buf->tx_flags = 0; 3414 tx_buf->next_to_watch = NULL; 3415 dma_unmap_len_set(tx_buf, len, 0); 3416 tx_desc->buffer_addr = 0; 3417 tx_desc->cmd_type_offset_bsz = 0; 3418 3419 /* move us past the eop_desc for start of next FD desc */ 3420 tx_buf++; 3421 tx_desc++; 3422 i++; 3423 if (unlikely(!i)) { 3424 i -= tx_ring->count; 3425 tx_buf = tx_ring->tx_bi; 3426 tx_desc = I40E_TX_DESC(tx_ring, 0); 3427 } 3428 3429 /* update budget accounting */ 3430 budget--; 3431 } while (likely(budget)); 3432 3433 i += tx_ring->count; 3434 tx_ring->next_to_clean = i; 3435 3436 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 3437 i40e_irq_dynamic_enable(vsi, 3438 tx_ring->q_vector->v_idx + vsi->base_vector); 3439 } 3440 return budget > 0; 3441 } 3442 3443 /** 3444 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring 3445 * @irq: interrupt number 3446 * @data: pointer to a q_vector 3447 **/ 3448 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) 3449 { 3450 struct i40e_q_vector *q_vector = data; 3451 struct i40e_vsi *vsi; 3452 3453 if (!q_vector->tx.ring) 3454 return IRQ_HANDLED; 3455 3456 vsi = q_vector->tx.ring->vsi; 3457 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); 3458 3459 return IRQ_HANDLED; 3460 } 3461 3462 /** 3463 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 3464 * @vsi: the VSI being configured 3465 * @v_idx: vector index 3466 * @qp_idx: queue pair index 3467 **/ 3468 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 3469 { 3470 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3471 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 3472 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 3473 3474 tx_ring->q_vector = q_vector; 3475 tx_ring->next = q_vector->tx.ring; 3476 q_vector->tx.ring = tx_ring; 3477 q_vector->tx.count++; 3478 3479 rx_ring->q_vector = q_vector; 3480 rx_ring->next = q_vector->rx.ring; 3481 q_vector->rx.ring = rx_ring; 3482 q_vector->rx.count++; 3483 } 3484 3485 /** 3486 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 3487 * @vsi: the VSI being configured 3488 * 3489 * This function maps descriptor rings to the queue-specific vectors 3490 * we were allotted through the MSI-X enabling code. Ideally, we'd have 3491 * one vector per queue pair, but on a constrained vector budget, we 3492 * group the queue pairs as "efficiently" as possible. 3493 **/ 3494 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 3495 { 3496 int qp_remaining = vsi->num_queue_pairs; 3497 int q_vectors = vsi->num_q_vectors; 3498 int num_ringpairs; 3499 int v_start = 0; 3500 int qp_idx = 0; 3501 3502 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 3503 * group them so there are multiple queues per vector. 3504 * It is also important to go through all the vectors available to be 3505 * sure that if we don't use all the vectors, that the remaining vectors 3506 * are cleared. This is especially important when decreasing the 3507 * number of queues in use. 3508 */ 3509 for (; v_start < q_vectors; v_start++) { 3510 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 3511 3512 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 3513 3514 q_vector->num_ringpairs = num_ringpairs; 3515 3516 q_vector->rx.count = 0; 3517 q_vector->tx.count = 0; 3518 q_vector->rx.ring = NULL; 3519 q_vector->tx.ring = NULL; 3520 3521 while (num_ringpairs--) { 3522 i40e_map_vector_to_qp(vsi, v_start, qp_idx); 3523 qp_idx++; 3524 qp_remaining--; 3525 } 3526 } 3527 } 3528 3529 /** 3530 * i40e_vsi_request_irq - Request IRQ from the OS 3531 * @vsi: the VSI being configured 3532 * @basename: name for the vector 3533 **/ 3534 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 3535 { 3536 struct i40e_pf *pf = vsi->back; 3537 int err; 3538 3539 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 3540 err = i40e_vsi_request_irq_msix(vsi, basename); 3541 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3542 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3543 pf->int_name, pf); 3544 else 3545 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3546 pf->int_name, pf); 3547 3548 if (err) 3549 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3550 3551 return err; 3552 } 3553 3554 #ifdef CONFIG_NET_POLL_CONTROLLER 3555 /** 3556 * i40e_netpoll - A Polling 'interrupt'handler 3557 * @netdev: network interface device structure 3558 * 3559 * This is used by netconsole to send skbs without having to re-enable 3560 * interrupts. It's not called while the normal interrupt routine is executing. 3561 **/ 3562 #ifdef I40E_FCOE 3563 void i40e_netpoll(struct net_device *netdev) 3564 #else 3565 static void i40e_netpoll(struct net_device *netdev) 3566 #endif 3567 { 3568 struct i40e_netdev_priv *np = netdev_priv(netdev); 3569 struct i40e_vsi *vsi = np->vsi; 3570 struct i40e_pf *pf = vsi->back; 3571 int i; 3572 3573 /* if interface is down do nothing */ 3574 if (test_bit(__I40E_DOWN, &vsi->state)) 3575 return; 3576 3577 pf->flags |= I40E_FLAG_IN_NETPOLL; 3578 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3579 for (i = 0; i < vsi->num_q_vectors; i++) 3580 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 3581 } else { 3582 i40e_intr(pf->pdev->irq, netdev); 3583 } 3584 pf->flags &= ~I40E_FLAG_IN_NETPOLL; 3585 } 3586 #endif 3587 3588 /** 3589 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled 3590 * @pf: the PF being configured 3591 * @pf_q: the PF queue 3592 * @enable: enable or disable state of the queue 3593 * 3594 * This routine will wait for the given Tx queue of the PF to reach the 3595 * enabled or disabled state. 3596 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3597 * multiple retries; else will return 0 in case of success. 3598 **/ 3599 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3600 { 3601 int i; 3602 u32 tx_reg; 3603 3604 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3605 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); 3606 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3607 break; 3608 3609 usleep_range(10, 20); 3610 } 3611 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3612 return -ETIMEDOUT; 3613 3614 return 0; 3615 } 3616 3617 /** 3618 * i40e_vsi_control_tx - Start or stop a VSI's rings 3619 * @vsi: the VSI being configured 3620 * @enable: start or stop the rings 3621 **/ 3622 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 3623 { 3624 struct i40e_pf *pf = vsi->back; 3625 struct i40e_hw *hw = &pf->hw; 3626 int i, j, pf_q, ret = 0; 3627 u32 tx_reg; 3628 3629 pf_q = vsi->base_queue; 3630 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3631 3632 /* warn the TX unit of coming changes */ 3633 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); 3634 if (!enable) 3635 usleep_range(10, 20); 3636 3637 for (j = 0; j < 50; j++) { 3638 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3639 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 3640 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) 3641 break; 3642 usleep_range(1000, 2000); 3643 } 3644 /* Skip if the queue is already in the requested state */ 3645 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3646 continue; 3647 3648 /* turn on/off the queue */ 3649 if (enable) { 3650 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 3651 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; 3652 } else { 3653 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3654 } 3655 3656 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 3657 /* No waiting for the Tx queue to disable */ 3658 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) 3659 continue; 3660 3661 /* wait for the change to finish */ 3662 ret = i40e_pf_txq_wait(pf, pf_q, enable); 3663 if (ret) { 3664 dev_info(&pf->pdev->dev, 3665 "%s: VSI seid %d Tx ring %d %sable timeout\n", 3666 __func__, vsi->seid, pf_q, 3667 (enable ? "en" : "dis")); 3668 break; 3669 } 3670 } 3671 3672 if (hw->revision_id == 0) 3673 mdelay(50); 3674 return ret; 3675 } 3676 3677 /** 3678 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 3679 * @pf: the PF being configured 3680 * @pf_q: the PF queue 3681 * @enable: enable or disable state of the queue 3682 * 3683 * This routine will wait for the given Rx queue of the PF to reach the 3684 * enabled or disabled state. 3685 * Returns -ETIMEDOUT in case of failing to reach the requested state after 3686 * multiple retries; else will return 0 in case of success. 3687 **/ 3688 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) 3689 { 3690 int i; 3691 u32 rx_reg; 3692 3693 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { 3694 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); 3695 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3696 break; 3697 3698 usleep_range(10, 20); 3699 } 3700 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) 3701 return -ETIMEDOUT; 3702 3703 return 0; 3704 } 3705 3706 /** 3707 * i40e_vsi_control_rx - Start or stop a VSI's rings 3708 * @vsi: the VSI being configured 3709 * @enable: start or stop the rings 3710 **/ 3711 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 3712 { 3713 struct i40e_pf *pf = vsi->back; 3714 struct i40e_hw *hw = &pf->hw; 3715 int i, j, pf_q, ret = 0; 3716 u32 rx_reg; 3717 3718 pf_q = vsi->base_queue; 3719 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3720 for (j = 0; j < 50; j++) { 3721 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3722 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == 3723 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) 3724 break; 3725 usleep_range(1000, 2000); 3726 } 3727 3728 /* Skip if the queue is already in the requested state */ 3729 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3730 continue; 3731 3732 /* turn on/off the queue */ 3733 if (enable) 3734 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; 3735 else 3736 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 3737 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3738 3739 /* wait for the change to finish */ 3740 ret = i40e_pf_rxq_wait(pf, pf_q, enable); 3741 if (ret) { 3742 dev_info(&pf->pdev->dev, 3743 "%s: VSI seid %d Rx ring %d %sable timeout\n", 3744 __func__, vsi->seid, pf_q, 3745 (enable ? "en" : "dis")); 3746 break; 3747 } 3748 } 3749 3750 return ret; 3751 } 3752 3753 /** 3754 * i40e_vsi_control_rings - Start or stop a VSI's rings 3755 * @vsi: the VSI being configured 3756 * @enable: start or stop the rings 3757 **/ 3758 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) 3759 { 3760 int ret = 0; 3761 3762 /* do rx first for enable and last for disable */ 3763 if (request) { 3764 ret = i40e_vsi_control_rx(vsi, request); 3765 if (ret) 3766 return ret; 3767 ret = i40e_vsi_control_tx(vsi, request); 3768 } else { 3769 /* Ignore return value, we need to shutdown whatever we can */ 3770 i40e_vsi_control_tx(vsi, request); 3771 i40e_vsi_control_rx(vsi, request); 3772 } 3773 3774 return ret; 3775 } 3776 3777 /** 3778 * i40e_vsi_free_irq - Free the irq association with the OS 3779 * @vsi: the VSI being configured 3780 **/ 3781 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 3782 { 3783 struct i40e_pf *pf = vsi->back; 3784 struct i40e_hw *hw = &pf->hw; 3785 int base = vsi->base_vector; 3786 u32 val, qp; 3787 int i; 3788 3789 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3790 if (!vsi->q_vectors) 3791 return; 3792 3793 if (!vsi->irqs_ready) 3794 return; 3795 3796 vsi->irqs_ready = false; 3797 for (i = 0; i < vsi->num_q_vectors; i++) { 3798 u16 vector = i + base; 3799 3800 /* free only the irqs that were actually requested */ 3801 if (!vsi->q_vectors[i] || 3802 !vsi->q_vectors[i]->num_ringpairs) 3803 continue; 3804 3805 /* clear the affinity_mask in the IRQ descriptor */ 3806 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3807 NULL); 3808 free_irq(pf->msix_entries[vector].vector, 3809 vsi->q_vectors[i]); 3810 3811 /* Tear down the interrupt queue link list 3812 * 3813 * We know that they come in pairs and always 3814 * the Rx first, then the Tx. To clear the 3815 * link list, stick the EOL value into the 3816 * next_q field of the registers. 3817 */ 3818 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 3819 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3820 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3821 val |= I40E_QUEUE_END_OF_LIST 3822 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3823 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 3824 3825 while (qp != I40E_QUEUE_END_OF_LIST) { 3826 u32 next; 3827 3828 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3829 3830 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3831 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3832 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3833 I40E_QINT_RQCTL_INTEVENT_MASK); 3834 3835 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3836 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3837 3838 wr32(hw, I40E_QINT_RQCTL(qp), val); 3839 3840 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3841 3842 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 3843 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 3844 3845 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3846 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3847 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3848 I40E_QINT_TQCTL_INTEVENT_MASK); 3849 3850 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3851 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3852 3853 wr32(hw, I40E_QINT_TQCTL(qp), val); 3854 qp = next; 3855 } 3856 } 3857 } else { 3858 free_irq(pf->pdev->irq, pf); 3859 3860 val = rd32(hw, I40E_PFINT_LNKLST0); 3861 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3862 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3863 val |= I40E_QUEUE_END_OF_LIST 3864 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 3865 wr32(hw, I40E_PFINT_LNKLST0, val); 3866 3867 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3868 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3869 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3870 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3871 I40E_QINT_RQCTL_INTEVENT_MASK); 3872 3873 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3874 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3875 3876 wr32(hw, I40E_QINT_RQCTL(qp), val); 3877 3878 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3879 3880 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3881 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3882 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3883 I40E_QINT_TQCTL_INTEVENT_MASK); 3884 3885 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3886 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3887 3888 wr32(hw, I40E_QINT_TQCTL(qp), val); 3889 } 3890 } 3891 3892 /** 3893 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 3894 * @vsi: the VSI being configured 3895 * @v_idx: Index of vector to be freed 3896 * 3897 * This function frees the memory allocated to the q_vector. In addition if 3898 * NAPI is enabled it will delete any references to the NAPI struct prior 3899 * to freeing the q_vector. 3900 **/ 3901 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 3902 { 3903 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3904 struct i40e_ring *ring; 3905 3906 if (!q_vector) 3907 return; 3908 3909 /* disassociate q_vector from rings */ 3910 i40e_for_each_ring(ring, q_vector->tx) 3911 ring->q_vector = NULL; 3912 3913 i40e_for_each_ring(ring, q_vector->rx) 3914 ring->q_vector = NULL; 3915 3916 /* only VSI w/ an associated netdev is set up w/ NAPI */ 3917 if (vsi->netdev) 3918 netif_napi_del(&q_vector->napi); 3919 3920 vsi->q_vectors[v_idx] = NULL; 3921 3922 kfree_rcu(q_vector, rcu); 3923 } 3924 3925 /** 3926 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 3927 * @vsi: the VSI being un-configured 3928 * 3929 * This frees the memory allocated to the q_vectors and 3930 * deletes references to the NAPI struct. 3931 **/ 3932 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 3933 { 3934 int v_idx; 3935 3936 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 3937 i40e_free_q_vector(vsi, v_idx); 3938 } 3939 3940 /** 3941 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 3942 * @pf: board private structure 3943 **/ 3944 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 3945 { 3946 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 3947 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3948 pci_disable_msix(pf->pdev); 3949 kfree(pf->msix_entries); 3950 pf->msix_entries = NULL; 3951 kfree(pf->irq_pile); 3952 pf->irq_pile = NULL; 3953 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 3954 pci_disable_msi(pf->pdev); 3955 } 3956 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 3957 } 3958 3959 /** 3960 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 3961 * @pf: board private structure 3962 * 3963 * We go through and clear interrupt specific resources and reset the structure 3964 * to pre-load conditions 3965 **/ 3966 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 3967 { 3968 int i; 3969 3970 i40e_stop_misc_vector(pf); 3971 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3972 synchronize_irq(pf->msix_entries[0].vector); 3973 free_irq(pf->msix_entries[0].vector, pf); 3974 } 3975 3976 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3977 for (i = 0; i < pf->num_alloc_vsi; i++) 3978 if (pf->vsi[i]) 3979 i40e_vsi_free_q_vectors(pf->vsi[i]); 3980 i40e_reset_interrupt_capability(pf); 3981 } 3982 3983 /** 3984 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 3985 * @vsi: the VSI being configured 3986 **/ 3987 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 3988 { 3989 int q_idx; 3990 3991 if (!vsi->netdev) 3992 return; 3993 3994 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3995 napi_enable(&vsi->q_vectors[q_idx]->napi); 3996 } 3997 3998 /** 3999 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 4000 * @vsi: the VSI being configured 4001 **/ 4002 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 4003 { 4004 int q_idx; 4005 4006 if (!vsi->netdev) 4007 return; 4008 4009 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4010 napi_disable(&vsi->q_vectors[q_idx]->napi); 4011 } 4012 4013 /** 4014 * i40e_vsi_close - Shut down a VSI 4015 * @vsi: the vsi to be quelled 4016 **/ 4017 static void i40e_vsi_close(struct i40e_vsi *vsi) 4018 { 4019 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 4020 i40e_down(vsi); 4021 i40e_vsi_free_irq(vsi); 4022 i40e_vsi_free_tx_resources(vsi); 4023 i40e_vsi_free_rx_resources(vsi); 4024 vsi->current_netdev_flags = 0; 4025 } 4026 4027 /** 4028 * i40e_quiesce_vsi - Pause a given VSI 4029 * @vsi: the VSI being paused 4030 **/ 4031 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 4032 { 4033 if (test_bit(__I40E_DOWN, &vsi->state)) 4034 return; 4035 4036 /* No need to disable FCoE VSI when Tx suspended */ 4037 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && 4038 vsi->type == I40E_VSI_FCOE) { 4039 dev_dbg(&vsi->back->pdev->dev, 4040 "%s: VSI seid %d skipping FCoE VSI disable\n", 4041 __func__, vsi->seid); 4042 return; 4043 } 4044 4045 set_bit(__I40E_NEEDS_RESTART, &vsi->state); 4046 if (vsi->netdev && netif_running(vsi->netdev)) { 4047 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 4048 } else { 4049 i40e_vsi_close(vsi); 4050 } 4051 } 4052 4053 /** 4054 * i40e_unquiesce_vsi - Resume a given VSI 4055 * @vsi: the VSI being resumed 4056 **/ 4057 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 4058 { 4059 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) 4060 return; 4061 4062 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 4063 if (vsi->netdev && netif_running(vsi->netdev)) 4064 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 4065 else 4066 i40e_vsi_open(vsi); /* this clears the DOWN bit */ 4067 } 4068 4069 /** 4070 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 4071 * @pf: the PF 4072 **/ 4073 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 4074 { 4075 int v; 4076 4077 for (v = 0; v < pf->num_alloc_vsi; v++) { 4078 if (pf->vsi[v]) 4079 i40e_quiesce_vsi(pf->vsi[v]); 4080 } 4081 } 4082 4083 /** 4084 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 4085 * @pf: the PF 4086 **/ 4087 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 4088 { 4089 int v; 4090 4091 for (v = 0; v < pf->num_alloc_vsi; v++) { 4092 if (pf->vsi[v]) 4093 i40e_unquiesce_vsi(pf->vsi[v]); 4094 } 4095 } 4096 4097 #ifdef CONFIG_I40E_DCB 4098 /** 4099 * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled 4100 * @vsi: the VSI being configured 4101 * 4102 * This function waits for the given VSI's Tx queues to be disabled. 4103 **/ 4104 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) 4105 { 4106 struct i40e_pf *pf = vsi->back; 4107 int i, pf_q, ret; 4108 4109 pf_q = vsi->base_queue; 4110 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4111 /* Check and wait for the disable status of the queue */ 4112 ret = i40e_pf_txq_wait(pf, pf_q, false); 4113 if (ret) { 4114 dev_info(&pf->pdev->dev, 4115 "%s: VSI seid %d Tx ring %d disable timeout\n", 4116 __func__, vsi->seid, pf_q); 4117 return ret; 4118 } 4119 } 4120 4121 return 0; 4122 } 4123 4124 /** 4125 * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled 4126 * @pf: the PF 4127 * 4128 * This function waits for the Tx queues to be in disabled state for all the 4129 * VSIs that are managed by this PF. 4130 **/ 4131 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) 4132 { 4133 int v, ret = 0; 4134 4135 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4136 /* No need to wait for FCoE VSI queues */ 4137 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { 4138 ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); 4139 if (ret) 4140 break; 4141 } 4142 } 4143 4144 return ret; 4145 } 4146 4147 #endif 4148 /** 4149 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP 4150 * @pf: pointer to PF 4151 * 4152 * Get TC map for ISCSI PF type that will include iSCSI TC 4153 * and LAN TC. 4154 **/ 4155 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) 4156 { 4157 struct i40e_dcb_app_priority_table app; 4158 struct i40e_hw *hw = &pf->hw; 4159 u8 enabled_tc = 1; /* TC0 is always enabled */ 4160 u8 tc, i; 4161 /* Get the iSCSI APP TLV */ 4162 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4163 4164 for (i = 0; i < dcbcfg->numapps; i++) { 4165 app = dcbcfg->app[i]; 4166 if (app.selector == I40E_APP_SEL_TCPIP && 4167 app.protocolid == I40E_APP_PROTOID_ISCSI) { 4168 tc = dcbcfg->etscfg.prioritytable[app.priority]; 4169 enabled_tc |= BIT_ULL(tc); 4170 break; 4171 } 4172 } 4173 4174 return enabled_tc; 4175 } 4176 4177 /** 4178 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 4179 * @dcbcfg: the corresponding DCBx configuration structure 4180 * 4181 * Return the number of TCs from given DCBx configuration 4182 **/ 4183 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 4184 { 4185 u8 num_tc = 0; 4186 int i; 4187 4188 /* Scan the ETS Config Priority Table to find 4189 * traffic class enabled for a given priority 4190 * and use the traffic class index to get the 4191 * number of traffic classes enabled 4192 */ 4193 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4194 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 4195 num_tc = dcbcfg->etscfg.prioritytable[i]; 4196 } 4197 4198 /* Traffic class index starts from zero so 4199 * increment to return the actual count 4200 */ 4201 return num_tc + 1; 4202 } 4203 4204 /** 4205 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 4206 * @dcbcfg: the corresponding DCBx configuration structure 4207 * 4208 * Query the current DCB configuration and return the number of 4209 * traffic classes enabled from the given DCBX config 4210 **/ 4211 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 4212 { 4213 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 4214 u8 enabled_tc = 1; 4215 u8 i; 4216 4217 for (i = 0; i < num_tc; i++) 4218 enabled_tc |= BIT(i); 4219 4220 return enabled_tc; 4221 } 4222 4223 /** 4224 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 4225 * @pf: PF being queried 4226 * 4227 * Return number of traffic classes enabled for the given PF 4228 **/ 4229 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 4230 { 4231 struct i40e_hw *hw = &pf->hw; 4232 u8 i, enabled_tc; 4233 u8 num_tc = 0; 4234 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4235 4236 /* If DCB is not enabled then always in single TC */ 4237 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4238 return 1; 4239 4240 /* SFP mode will be enabled for all TCs on port */ 4241 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4242 return i40e_dcb_get_num_tc(dcbcfg); 4243 4244 /* MFP mode return count of enabled TCs for this PF */ 4245 if (pf->hw.func_caps.iscsi) 4246 enabled_tc = i40e_get_iscsi_tc_map(pf); 4247 else 4248 return 1; /* Only TC0 */ 4249 4250 /* At least have TC0 */ 4251 enabled_tc = (enabled_tc ? enabled_tc : 0x1); 4252 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4253 if (enabled_tc & BIT_ULL(i)) 4254 num_tc++; 4255 } 4256 return num_tc; 4257 } 4258 4259 /** 4260 * i40e_pf_get_default_tc - Get bitmap for first enabled TC 4261 * @pf: PF being queried 4262 * 4263 * Return a bitmap for first enabled traffic class for this PF. 4264 **/ 4265 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) 4266 { 4267 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; 4268 u8 i = 0; 4269 4270 if (!enabled_tc) 4271 return 0x1; /* TC0 */ 4272 4273 /* Find the first enabled TC */ 4274 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4275 if (enabled_tc & BIT_ULL(i)) 4276 break; 4277 } 4278 4279 return BIT(i); 4280 } 4281 4282 /** 4283 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 4284 * @pf: PF being queried 4285 * 4286 * Return a bitmap for enabled traffic classes for this PF. 4287 **/ 4288 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 4289 { 4290 /* If DCB is not enabled for this PF then just return default TC */ 4291 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4292 return i40e_pf_get_default_tc(pf); 4293 4294 /* SFP mode we want PF to be enabled for all TCs */ 4295 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 4296 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 4297 4298 /* MFP enabled and iSCSI PF type */ 4299 if (pf->hw.func_caps.iscsi) 4300 return i40e_get_iscsi_tc_map(pf); 4301 else 4302 return i40e_pf_get_default_tc(pf); 4303 } 4304 4305 /** 4306 * i40e_vsi_get_bw_info - Query VSI BW Information 4307 * @vsi: the VSI being queried 4308 * 4309 * Returns 0 on success, negative value on failure 4310 **/ 4311 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 4312 { 4313 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 4314 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 4315 struct i40e_pf *pf = vsi->back; 4316 struct i40e_hw *hw = &pf->hw; 4317 i40e_status ret; 4318 u32 tc_bw_max; 4319 int i; 4320 4321 /* Get the VSI level BW configuration */ 4322 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 4323 if (ret) { 4324 dev_info(&pf->pdev->dev, 4325 "couldn't get PF vsi bw config, err %s aq_err %s\n", 4326 i40e_stat_str(&pf->hw, ret), 4327 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4328 return -EINVAL; 4329 } 4330 4331 /* Get the VSI level BW configuration per TC */ 4332 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 4333 NULL); 4334 if (ret) { 4335 dev_info(&pf->pdev->dev, 4336 "couldn't get PF vsi ets bw config, err %s aq_err %s\n", 4337 i40e_stat_str(&pf->hw, ret), 4338 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4339 return -EINVAL; 4340 } 4341 4342 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 4343 dev_info(&pf->pdev->dev, 4344 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 4345 bw_config.tc_valid_bits, 4346 bw_ets_config.tc_valid_bits); 4347 /* Still continuing */ 4348 } 4349 4350 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 4351 vsi->bw_max_quanta = bw_config.max_bw; 4352 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 4353 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 4354 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4355 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 4356 vsi->bw_ets_limit_credits[i] = 4357 le16_to_cpu(bw_ets_config.credits[i]); 4358 /* 3 bits out of 4 for each TC */ 4359 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 4360 } 4361 4362 return 0; 4363 } 4364 4365 /** 4366 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 4367 * @vsi: the VSI being configured 4368 * @enabled_tc: TC bitmap 4369 * @bw_credits: BW shared credits per TC 4370 * 4371 * Returns 0 on success, negative value on failure 4372 **/ 4373 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 4374 u8 *bw_share) 4375 { 4376 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 4377 i40e_status ret; 4378 int i; 4379 4380 bw_data.tc_valid_bits = enabled_tc; 4381 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4382 bw_data.tc_bw_credits[i] = bw_share[i]; 4383 4384 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 4385 NULL); 4386 if (ret) { 4387 dev_info(&vsi->back->pdev->dev, 4388 "AQ command Config VSI BW allocation per TC failed = %d\n", 4389 vsi->back->hw.aq.asq_last_status); 4390 return -EINVAL; 4391 } 4392 4393 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 4394 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 4395 4396 return 0; 4397 } 4398 4399 /** 4400 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 4401 * @vsi: the VSI being configured 4402 * @enabled_tc: TC map to be enabled 4403 * 4404 **/ 4405 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4406 { 4407 struct net_device *netdev = vsi->netdev; 4408 struct i40e_pf *pf = vsi->back; 4409 struct i40e_hw *hw = &pf->hw; 4410 u8 netdev_tc = 0; 4411 int i; 4412 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 4413 4414 if (!netdev) 4415 return; 4416 4417 if (!enabled_tc) { 4418 netdev_reset_tc(netdev); 4419 return; 4420 } 4421 4422 /* Set up actual enabled TCs on the VSI */ 4423 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 4424 return; 4425 4426 /* set per TC queues for the VSI */ 4427 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4428 /* Only set TC queues for enabled tcs 4429 * 4430 * e.g. For a VSI that has TC0 and TC3 enabled the 4431 * enabled_tc bitmap would be 0x00001001; the driver 4432 * will set the numtc for netdev as 2 that will be 4433 * referenced by the netdev layer as TC 0 and 1. 4434 */ 4435 if (vsi->tc_config.enabled_tc & BIT_ULL(i)) 4436 netdev_set_tc_queue(netdev, 4437 vsi->tc_config.tc_info[i].netdev_tc, 4438 vsi->tc_config.tc_info[i].qcount, 4439 vsi->tc_config.tc_info[i].qoffset); 4440 } 4441 4442 /* Assign UP2TC map for the VSI */ 4443 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4444 /* Get the actual TC# for the UP */ 4445 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 4446 /* Get the mapped netdev TC# for the UP */ 4447 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 4448 netdev_set_prio_tc_map(netdev, i, netdev_tc); 4449 } 4450 } 4451 4452 /** 4453 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 4454 * @vsi: the VSI being configured 4455 * @ctxt: the ctxt buffer returned from AQ VSI update param command 4456 **/ 4457 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 4458 struct i40e_vsi_context *ctxt) 4459 { 4460 /* copy just the sections touched not the entire info 4461 * since not all sections are valid as returned by 4462 * update vsi params 4463 */ 4464 vsi->info.mapping_flags = ctxt->info.mapping_flags; 4465 memcpy(&vsi->info.queue_mapping, 4466 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 4467 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 4468 sizeof(vsi->info.tc_mapping)); 4469 } 4470 4471 /** 4472 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 4473 * @vsi: VSI to be configured 4474 * @enabled_tc: TC bitmap 4475 * 4476 * This configures a particular VSI for TCs that are mapped to the 4477 * given TC bitmap. It uses default bandwidth share for TCs across 4478 * VSIs to configure TC for a particular VSI. 4479 * 4480 * NOTE: 4481 * It is expected that the VSI queues have been quisced before calling 4482 * this function. 4483 **/ 4484 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 4485 { 4486 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 4487 struct i40e_vsi_context ctxt; 4488 int ret = 0; 4489 int i; 4490 4491 /* Check if enabled_tc is same as existing or new TCs */ 4492 if (vsi->tc_config.enabled_tc == enabled_tc) 4493 return ret; 4494 4495 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 4496 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4497 if (enabled_tc & BIT_ULL(i)) 4498 bw_share[i] = 1; 4499 } 4500 4501 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 4502 if (ret) { 4503 dev_info(&vsi->back->pdev->dev, 4504 "Failed configuring TC map %d for VSI %d\n", 4505 enabled_tc, vsi->seid); 4506 goto out; 4507 } 4508 4509 /* Update Queue Pairs Mapping for currently enabled UPs */ 4510 ctxt.seid = vsi->seid; 4511 ctxt.pf_num = vsi->back->hw.pf_id; 4512 ctxt.vf_num = 0; 4513 ctxt.uplink_seid = vsi->uplink_seid; 4514 ctxt.info = vsi->info; 4515 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 4516 4517 /* Update the VSI after updating the VSI queue-mapping information */ 4518 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 4519 if (ret) { 4520 dev_info(&vsi->back->pdev->dev, 4521 "Update vsi tc config failed, err %s aq_err %s\n", 4522 i40e_stat_str(&vsi->back->hw, ret), 4523 i40e_aq_str(&vsi->back->hw, 4524 vsi->back->hw.aq.asq_last_status)); 4525 goto out; 4526 } 4527 /* update the local VSI info with updated queue map */ 4528 i40e_vsi_update_queue_map(vsi, &ctxt); 4529 vsi->info.valid_sections = 0; 4530 4531 /* Update current VSI BW information */ 4532 ret = i40e_vsi_get_bw_info(vsi); 4533 if (ret) { 4534 dev_info(&vsi->back->pdev->dev, 4535 "Failed updating vsi bw info, err %s aq_err %s\n", 4536 i40e_stat_str(&vsi->back->hw, ret), 4537 i40e_aq_str(&vsi->back->hw, 4538 vsi->back->hw.aq.asq_last_status)); 4539 goto out; 4540 } 4541 4542 /* Update the netdev TC setup */ 4543 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 4544 out: 4545 return ret; 4546 } 4547 4548 /** 4549 * i40e_veb_config_tc - Configure TCs for given VEB 4550 * @veb: given VEB 4551 * @enabled_tc: TC bitmap 4552 * 4553 * Configures given TC bitmap for VEB (switching) element 4554 **/ 4555 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) 4556 { 4557 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; 4558 struct i40e_pf *pf = veb->pf; 4559 int ret = 0; 4560 int i; 4561 4562 /* No TCs or already enabled TCs just return */ 4563 if (!enabled_tc || veb->enabled_tc == enabled_tc) 4564 return ret; 4565 4566 bw_data.tc_valid_bits = enabled_tc; 4567 /* bw_data.absolute_credits is not set (relative) */ 4568 4569 /* Enable ETS TCs with equal BW Share for now */ 4570 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 4571 if (enabled_tc & BIT_ULL(i)) 4572 bw_data.tc_bw_share_credits[i] = 1; 4573 } 4574 4575 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, 4576 &bw_data, NULL); 4577 if (ret) { 4578 dev_info(&pf->pdev->dev, 4579 "VEB bw config failed, err %s aq_err %s\n", 4580 i40e_stat_str(&pf->hw, ret), 4581 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4582 goto out; 4583 } 4584 4585 /* Update the BW information */ 4586 ret = i40e_veb_get_bw_info(veb); 4587 if (ret) { 4588 dev_info(&pf->pdev->dev, 4589 "Failed getting veb bw config, err %s aq_err %s\n", 4590 i40e_stat_str(&pf->hw, ret), 4591 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4592 } 4593 4594 out: 4595 return ret; 4596 } 4597 4598 #ifdef CONFIG_I40E_DCB 4599 /** 4600 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs 4601 * @pf: PF struct 4602 * 4603 * Reconfigure VEB/VSIs on a given PF; it is assumed that 4604 * the caller would've quiesce all the VSIs before calling 4605 * this function 4606 **/ 4607 static void i40e_dcb_reconfigure(struct i40e_pf *pf) 4608 { 4609 u8 tc_map = 0; 4610 int ret; 4611 u8 v; 4612 4613 /* Enable the TCs available on PF to all VEBs */ 4614 tc_map = i40e_pf_get_tc_map(pf); 4615 for (v = 0; v < I40E_MAX_VEB; v++) { 4616 if (!pf->veb[v]) 4617 continue; 4618 ret = i40e_veb_config_tc(pf->veb[v], tc_map); 4619 if (ret) { 4620 dev_info(&pf->pdev->dev, 4621 "Failed configuring TC for VEB seid=%d\n", 4622 pf->veb[v]->seid); 4623 /* Will try to configure as many components */ 4624 } 4625 } 4626 4627 /* Update each VSI */ 4628 for (v = 0; v < pf->num_alloc_vsi; v++) { 4629 if (!pf->vsi[v]) 4630 continue; 4631 4632 /* - Enable all TCs for the LAN VSI 4633 #ifdef I40E_FCOE 4634 * - For FCoE VSI only enable the TC configured 4635 * as per the APP TLV 4636 #endif 4637 * - For all others keep them at TC0 for now 4638 */ 4639 if (v == pf->lan_vsi) 4640 tc_map = i40e_pf_get_tc_map(pf); 4641 else 4642 tc_map = i40e_pf_get_default_tc(pf); 4643 #ifdef I40E_FCOE 4644 if (pf->vsi[v]->type == I40E_VSI_FCOE) 4645 tc_map = i40e_get_fcoe_tc_map(pf); 4646 #endif /* #ifdef I40E_FCOE */ 4647 4648 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 4649 if (ret) { 4650 dev_info(&pf->pdev->dev, 4651 "Failed configuring TC for VSI seid=%d\n", 4652 pf->vsi[v]->seid); 4653 /* Will try to configure as many components */ 4654 } else { 4655 /* Re-configure VSI vectors based on updated TC map */ 4656 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); 4657 if (pf->vsi[v]->netdev) 4658 i40e_dcbnl_set_all(pf->vsi[v]); 4659 } 4660 } 4661 } 4662 4663 /** 4664 * i40e_resume_port_tx - Resume port Tx 4665 * @pf: PF struct 4666 * 4667 * Resume a port's Tx and issue a PF reset in case of failure to 4668 * resume. 4669 **/ 4670 static int i40e_resume_port_tx(struct i40e_pf *pf) 4671 { 4672 struct i40e_hw *hw = &pf->hw; 4673 int ret; 4674 4675 ret = i40e_aq_resume_port_tx(hw, NULL); 4676 if (ret) { 4677 dev_info(&pf->pdev->dev, 4678 "Resume Port Tx failed, err %s aq_err %s\n", 4679 i40e_stat_str(&pf->hw, ret), 4680 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4681 /* Schedule PF reset to recover */ 4682 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 4683 i40e_service_event_schedule(pf); 4684 } 4685 4686 return ret; 4687 } 4688 4689 /** 4690 * i40e_init_pf_dcb - Initialize DCB configuration 4691 * @pf: PF being configured 4692 * 4693 * Query the current DCB configuration and cache it 4694 * in the hardware structure 4695 **/ 4696 static int i40e_init_pf_dcb(struct i40e_pf *pf) 4697 { 4698 struct i40e_hw *hw = &pf->hw; 4699 int err = 0; 4700 4701 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ 4702 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 4703 (pf->hw.aq.fw_maj_ver < 4)) 4704 goto out; 4705 4706 /* Get the initial DCB configuration */ 4707 err = i40e_init_dcb(hw); 4708 if (!err) { 4709 /* Device/Function is not DCBX capable */ 4710 if ((!hw->func_caps.dcb) || 4711 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { 4712 dev_info(&pf->pdev->dev, 4713 "DCBX offload is not supported or is disabled for this PF.\n"); 4714 4715 if (pf->flags & I40E_FLAG_MFP_ENABLED) 4716 goto out; 4717 4718 } else { 4719 /* When status is not DISABLED then DCBX in FW */ 4720 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 4721 DCB_CAP_DCBX_VER_IEEE; 4722 4723 pf->flags |= I40E_FLAG_DCB_CAPABLE; 4724 /* Enable DCB tagging only when more than one TC */ 4725 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 4726 pf->flags |= I40E_FLAG_DCB_ENABLED; 4727 dev_dbg(&pf->pdev->dev, 4728 "DCBX offload is supported for this PF.\n"); 4729 } 4730 } else { 4731 dev_info(&pf->pdev->dev, 4732 "Query for DCB configuration failed, err %s aq_err %s\n", 4733 i40e_stat_str(&pf->hw, err), 4734 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 4735 } 4736 4737 out: 4738 return err; 4739 } 4740 #endif /* CONFIG_I40E_DCB */ 4741 #define SPEED_SIZE 14 4742 #define FC_SIZE 8 4743 /** 4744 * i40e_print_link_message - print link up or down 4745 * @vsi: the VSI for which link needs a message 4746 */ 4747 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) 4748 { 4749 char speed[SPEED_SIZE] = "Unknown"; 4750 char fc[FC_SIZE] = "RX/TX"; 4751 4752 if (!isup) { 4753 netdev_info(vsi->netdev, "NIC Link is Down\n"); 4754 return; 4755 } 4756 4757 /* Warn user if link speed on NPAR enabled partition is not at 4758 * least 10GB 4759 */ 4760 if (vsi->back->hw.func_caps.npar_enable && 4761 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || 4762 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) 4763 netdev_warn(vsi->netdev, 4764 "The partition detected link speed that is less than 10Gbps\n"); 4765 4766 switch (vsi->back->hw.phy.link_info.link_speed) { 4767 case I40E_LINK_SPEED_40GB: 4768 strlcpy(speed, "40 Gbps", SPEED_SIZE); 4769 break; 4770 case I40E_LINK_SPEED_20GB: 4771 strncpy(speed, "20 Gbps", SPEED_SIZE); 4772 break; 4773 case I40E_LINK_SPEED_10GB: 4774 strlcpy(speed, "10 Gbps", SPEED_SIZE); 4775 break; 4776 case I40E_LINK_SPEED_1GB: 4777 strlcpy(speed, "1000 Mbps", SPEED_SIZE); 4778 break; 4779 case I40E_LINK_SPEED_100MB: 4780 strncpy(speed, "100 Mbps", SPEED_SIZE); 4781 break; 4782 default: 4783 break; 4784 } 4785 4786 switch (vsi->back->hw.fc.current_mode) { 4787 case I40E_FC_FULL: 4788 strlcpy(fc, "RX/TX", FC_SIZE); 4789 break; 4790 case I40E_FC_TX_PAUSE: 4791 strlcpy(fc, "TX", FC_SIZE); 4792 break; 4793 case I40E_FC_RX_PAUSE: 4794 strlcpy(fc, "RX", FC_SIZE); 4795 break; 4796 default: 4797 strlcpy(fc, "None", FC_SIZE); 4798 break; 4799 } 4800 4801 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n", 4802 speed, fc); 4803 } 4804 4805 /** 4806 * i40e_up_complete - Finish the last steps of bringing up a connection 4807 * @vsi: the VSI being configured 4808 **/ 4809 static int i40e_up_complete(struct i40e_vsi *vsi) 4810 { 4811 struct i40e_pf *pf = vsi->back; 4812 int err; 4813 4814 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4815 i40e_vsi_configure_msix(vsi); 4816 else 4817 i40e_configure_msi_and_legacy(vsi); 4818 4819 /* start rings */ 4820 err = i40e_vsi_control_rings(vsi, true); 4821 if (err) 4822 return err; 4823 4824 clear_bit(__I40E_DOWN, &vsi->state); 4825 i40e_napi_enable_all(vsi); 4826 i40e_vsi_enable_irq(vsi); 4827 4828 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 4829 (vsi->netdev)) { 4830 i40e_print_link_message(vsi, true); 4831 netif_tx_start_all_queues(vsi->netdev); 4832 netif_carrier_on(vsi->netdev); 4833 } else if (vsi->netdev) { 4834 i40e_print_link_message(vsi, false); 4835 /* need to check for qualified module here*/ 4836 if ((pf->hw.phy.link_info.link_info & 4837 I40E_AQ_MEDIA_AVAILABLE) && 4838 (!(pf->hw.phy.link_info.an_info & 4839 I40E_AQ_QUALIFIED_MODULE))) 4840 netdev_err(vsi->netdev, 4841 "the driver failed to link because an unqualified module was detected."); 4842 } 4843 4844 /* replay FDIR SB filters */ 4845 if (vsi->type == I40E_VSI_FDIR) { 4846 /* reset fd counters */ 4847 pf->fd_add_err = pf->fd_atr_cnt = 0; 4848 if (pf->fd_tcp_rule > 0) { 4849 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 4850 if (I40E_DEBUG_FD & pf->hw.debug_mask) 4851 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); 4852 pf->fd_tcp_rule = 0; 4853 } 4854 i40e_fdir_filter_restore(vsi); 4855 } 4856 i40e_service_event_schedule(pf); 4857 4858 return 0; 4859 } 4860 4861 /** 4862 * i40e_vsi_reinit_locked - Reset the VSI 4863 * @vsi: the VSI being configured 4864 * 4865 * Rebuild the ring structs after some configuration 4866 * has changed, e.g. MTU size. 4867 **/ 4868 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 4869 { 4870 struct i40e_pf *pf = vsi->back; 4871 4872 WARN_ON(in_interrupt()); 4873 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 4874 usleep_range(1000, 2000); 4875 i40e_down(vsi); 4876 4877 /* Give a VF some time to respond to the reset. The 4878 * two second wait is based upon the watchdog cycle in 4879 * the VF driver. 4880 */ 4881 if (vsi->type == I40E_VSI_SRIOV) 4882 msleep(2000); 4883 i40e_up(vsi); 4884 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 4885 } 4886 4887 /** 4888 * i40e_up - Bring the connection back up after being down 4889 * @vsi: the VSI being configured 4890 **/ 4891 int i40e_up(struct i40e_vsi *vsi) 4892 { 4893 int err; 4894 4895 err = i40e_vsi_configure(vsi); 4896 if (!err) 4897 err = i40e_up_complete(vsi); 4898 4899 return err; 4900 } 4901 4902 /** 4903 * i40e_down - Shutdown the connection processing 4904 * @vsi: the VSI being stopped 4905 **/ 4906 void i40e_down(struct i40e_vsi *vsi) 4907 { 4908 int i; 4909 4910 /* It is assumed that the caller of this function 4911 * sets the vsi->state __I40E_DOWN bit. 4912 */ 4913 if (vsi->netdev) { 4914 netif_carrier_off(vsi->netdev); 4915 netif_tx_disable(vsi->netdev); 4916 } 4917 i40e_vsi_disable_irq(vsi); 4918 i40e_vsi_control_rings(vsi, false); 4919 i40e_napi_disable_all(vsi); 4920 4921 for (i = 0; i < vsi->num_queue_pairs; i++) { 4922 i40e_clean_tx_ring(vsi->tx_rings[i]); 4923 i40e_clean_rx_ring(vsi->rx_rings[i]); 4924 } 4925 } 4926 4927 /** 4928 * i40e_setup_tc - configure multiple traffic classes 4929 * @netdev: net device to configure 4930 * @tc: number of traffic classes to enable 4931 **/ 4932 #ifdef I40E_FCOE 4933 int i40e_setup_tc(struct net_device *netdev, u8 tc) 4934 #else 4935 static int i40e_setup_tc(struct net_device *netdev, u8 tc) 4936 #endif 4937 { 4938 struct i40e_netdev_priv *np = netdev_priv(netdev); 4939 struct i40e_vsi *vsi = np->vsi; 4940 struct i40e_pf *pf = vsi->back; 4941 u8 enabled_tc = 0; 4942 int ret = -EINVAL; 4943 int i; 4944 4945 /* Check if DCB enabled to continue */ 4946 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 4947 netdev_info(netdev, "DCB is not enabled for adapter\n"); 4948 goto exit; 4949 } 4950 4951 /* Check if MFP enabled */ 4952 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4953 netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); 4954 goto exit; 4955 } 4956 4957 /* Check whether tc count is within enabled limit */ 4958 if (tc > i40e_pf_get_num_tc(pf)) { 4959 netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); 4960 goto exit; 4961 } 4962 4963 /* Generate TC map for number of tc requested */ 4964 for (i = 0; i < tc; i++) 4965 enabled_tc |= BIT_ULL(i); 4966 4967 /* Requesting same TC configuration as already enabled */ 4968 if (enabled_tc == vsi->tc_config.enabled_tc) 4969 return 0; 4970 4971 /* Quiesce VSI queues */ 4972 i40e_quiesce_vsi(vsi); 4973 4974 /* Configure VSI for enabled TCs */ 4975 ret = i40e_vsi_config_tc(vsi, enabled_tc); 4976 if (ret) { 4977 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 4978 vsi->seid); 4979 goto exit; 4980 } 4981 4982 /* Unquiesce VSI */ 4983 i40e_unquiesce_vsi(vsi); 4984 4985 exit: 4986 return ret; 4987 } 4988 4989 /** 4990 * i40e_open - Called when a network interface is made active 4991 * @netdev: network interface device structure 4992 * 4993 * The open entry point is called when a network interface is made 4994 * active by the system (IFF_UP). At this point all resources needed 4995 * for transmit and receive operations are allocated, the interrupt 4996 * handler is registered with the OS, the netdev watchdog subtask is 4997 * enabled, and the stack is notified that the interface is ready. 4998 * 4999 * Returns 0 on success, negative value on failure 5000 **/ 5001 int i40e_open(struct net_device *netdev) 5002 { 5003 struct i40e_netdev_priv *np = netdev_priv(netdev); 5004 struct i40e_vsi *vsi = np->vsi; 5005 struct i40e_pf *pf = vsi->back; 5006 int err; 5007 5008 /* disallow open during test or if eeprom is broken */ 5009 if (test_bit(__I40E_TESTING, &pf->state) || 5010 test_bit(__I40E_BAD_EEPROM, &pf->state)) 5011 return -EBUSY; 5012 5013 netif_carrier_off(netdev); 5014 5015 err = i40e_vsi_open(vsi); 5016 if (err) 5017 return err; 5018 5019 /* configure global TSO hardware offload settings */ 5020 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | 5021 TCP_FLAG_FIN) >> 16); 5022 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | 5023 TCP_FLAG_FIN | 5024 TCP_FLAG_CWR) >> 16); 5025 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 5026 5027 #ifdef CONFIG_I40E_VXLAN 5028 vxlan_get_rx_port(netdev); 5029 #endif 5030 5031 return 0; 5032 } 5033 5034 /** 5035 * i40e_vsi_open - 5036 * @vsi: the VSI to open 5037 * 5038 * Finish initialization of the VSI. 5039 * 5040 * Returns 0 on success, negative value on failure 5041 **/ 5042 int i40e_vsi_open(struct i40e_vsi *vsi) 5043 { 5044 struct i40e_pf *pf = vsi->back; 5045 char int_name[I40E_INT_NAME_STR_LEN]; 5046 int err; 5047 5048 /* allocate descriptors */ 5049 err = i40e_vsi_setup_tx_resources(vsi); 5050 if (err) 5051 goto err_setup_tx; 5052 err = i40e_vsi_setup_rx_resources(vsi); 5053 if (err) 5054 goto err_setup_rx; 5055 5056 err = i40e_vsi_configure(vsi); 5057 if (err) 5058 goto err_setup_rx; 5059 5060 if (vsi->netdev) { 5061 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 5062 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 5063 err = i40e_vsi_request_irq(vsi, int_name); 5064 if (err) 5065 goto err_setup_rx; 5066 5067 /* Notify the stack of the actual queue counts. */ 5068 err = netif_set_real_num_tx_queues(vsi->netdev, 5069 vsi->num_queue_pairs); 5070 if (err) 5071 goto err_set_queues; 5072 5073 err = netif_set_real_num_rx_queues(vsi->netdev, 5074 vsi->num_queue_pairs); 5075 if (err) 5076 goto err_set_queues; 5077 5078 } else if (vsi->type == I40E_VSI_FDIR) { 5079 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", 5080 dev_driver_string(&pf->pdev->dev), 5081 dev_name(&pf->pdev->dev)); 5082 err = i40e_vsi_request_irq(vsi, int_name); 5083 5084 } else { 5085 err = -EINVAL; 5086 goto err_setup_rx; 5087 } 5088 5089 err = i40e_up_complete(vsi); 5090 if (err) 5091 goto err_up_complete; 5092 5093 return 0; 5094 5095 err_up_complete: 5096 i40e_down(vsi); 5097 err_set_queues: 5098 i40e_vsi_free_irq(vsi); 5099 err_setup_rx: 5100 i40e_vsi_free_rx_resources(vsi); 5101 err_setup_tx: 5102 i40e_vsi_free_tx_resources(vsi); 5103 if (vsi == pf->vsi[pf->lan_vsi]) 5104 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 5105 5106 return err; 5107 } 5108 5109 /** 5110 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting 5111 * @pf: Pointer to PF 5112 * 5113 * This function destroys the hlist where all the Flow Director 5114 * filters were saved. 5115 **/ 5116 static void i40e_fdir_filter_exit(struct i40e_pf *pf) 5117 { 5118 struct i40e_fdir_filter *filter; 5119 struct hlist_node *node2; 5120 5121 hlist_for_each_entry_safe(filter, node2, 5122 &pf->fdir_filter_list, fdir_node) { 5123 hlist_del(&filter->fdir_node); 5124 kfree(filter); 5125 } 5126 pf->fdir_pf_active_filters = 0; 5127 } 5128 5129 /** 5130 * i40e_close - Disables a network interface 5131 * @netdev: network interface device structure 5132 * 5133 * The close entry point is called when an interface is de-activated 5134 * by the OS. The hardware is still under the driver's control, but 5135 * this netdev interface is disabled. 5136 * 5137 * Returns 0, this is not allowed to fail 5138 **/ 5139 #ifdef I40E_FCOE 5140 int i40e_close(struct net_device *netdev) 5141 #else 5142 static int i40e_close(struct net_device *netdev) 5143 #endif 5144 { 5145 struct i40e_netdev_priv *np = netdev_priv(netdev); 5146 struct i40e_vsi *vsi = np->vsi; 5147 5148 i40e_vsi_close(vsi); 5149 5150 return 0; 5151 } 5152 5153 /** 5154 * i40e_do_reset - Start a PF or Core Reset sequence 5155 * @pf: board private structure 5156 * @reset_flags: which reset is requested 5157 * 5158 * The essential difference in resets is that the PF Reset 5159 * doesn't clear the packet buffers, doesn't reset the PE 5160 * firmware, and doesn't bother the other PFs on the chip. 5161 **/ 5162 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) 5163 { 5164 u32 val; 5165 5166 WARN_ON(in_interrupt()); 5167 5168 if (i40e_check_asq_alive(&pf->hw)) 5169 i40e_vc_notify_reset(pf); 5170 5171 /* do the biggest reset indicated */ 5172 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { 5173 5174 /* Request a Global Reset 5175 * 5176 * This will start the chip's countdown to the actual full 5177 * chip reset event, and a warning interrupt to be sent 5178 * to all PFs, including the requestor. Our handler 5179 * for the warning interrupt will deal with the shutdown 5180 * and recovery of the switch setup. 5181 */ 5182 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); 5183 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5184 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 5185 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5186 5187 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { 5188 5189 /* Request a Core Reset 5190 * 5191 * Same as Global Reset, except does *not* include the MAC/PHY 5192 */ 5193 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); 5194 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 5195 val |= I40E_GLGEN_RTRIG_CORER_MASK; 5196 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5197 i40e_flush(&pf->hw); 5198 5199 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) { 5200 5201 /* Request a PF Reset 5202 * 5203 * Resets only the PF-specific registers 5204 * 5205 * This goes directly to the tear-down and rebuild of 5206 * the switch, since we need to do all the recovery as 5207 * for the Core Reset. 5208 */ 5209 dev_dbg(&pf->pdev->dev, "PFR requested\n"); 5210 i40e_handle_reset_warning(pf); 5211 5212 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { 5213 int v; 5214 5215 /* Find the VSI(s) that requested a re-init */ 5216 dev_info(&pf->pdev->dev, 5217 "VSI reinit requested\n"); 5218 for (v = 0; v < pf->num_alloc_vsi; v++) { 5219 struct i40e_vsi *vsi = pf->vsi[v]; 5220 if (vsi != NULL && 5221 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 5222 i40e_vsi_reinit_locked(pf->vsi[v]); 5223 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); 5224 } 5225 } 5226 5227 /* no further action needed, so return now */ 5228 return; 5229 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { 5230 int v; 5231 5232 /* Find the VSI(s) that needs to be brought down */ 5233 dev_info(&pf->pdev->dev, "VSI down requested\n"); 5234 for (v = 0; v < pf->num_alloc_vsi; v++) { 5235 struct i40e_vsi *vsi = pf->vsi[v]; 5236 if (vsi != NULL && 5237 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { 5238 set_bit(__I40E_DOWN, &vsi->state); 5239 i40e_down(vsi); 5240 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); 5241 } 5242 } 5243 5244 /* no further action needed, so return now */ 5245 return; 5246 } else { 5247 dev_info(&pf->pdev->dev, 5248 "bad reset request 0x%08x\n", reset_flags); 5249 return; 5250 } 5251 } 5252 5253 #ifdef CONFIG_I40E_DCB 5254 /** 5255 * i40e_dcb_need_reconfig - Check if DCB needs reconfig 5256 * @pf: board private structure 5257 * @old_cfg: current DCB config 5258 * @new_cfg: new DCB config 5259 **/ 5260 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, 5261 struct i40e_dcbx_config *old_cfg, 5262 struct i40e_dcbx_config *new_cfg) 5263 { 5264 bool need_reconfig = false; 5265 5266 /* Check if ETS configuration has changed */ 5267 if (memcmp(&new_cfg->etscfg, 5268 &old_cfg->etscfg, 5269 sizeof(new_cfg->etscfg))) { 5270 /* If Priority Table has changed reconfig is needed */ 5271 if (memcmp(&new_cfg->etscfg.prioritytable, 5272 &old_cfg->etscfg.prioritytable, 5273 sizeof(new_cfg->etscfg.prioritytable))) { 5274 need_reconfig = true; 5275 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); 5276 } 5277 5278 if (memcmp(&new_cfg->etscfg.tcbwtable, 5279 &old_cfg->etscfg.tcbwtable, 5280 sizeof(new_cfg->etscfg.tcbwtable))) 5281 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 5282 5283 if (memcmp(&new_cfg->etscfg.tsatable, 5284 &old_cfg->etscfg.tsatable, 5285 sizeof(new_cfg->etscfg.tsatable))) 5286 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); 5287 } 5288 5289 /* Check if PFC configuration has changed */ 5290 if (memcmp(&new_cfg->pfc, 5291 &old_cfg->pfc, 5292 sizeof(new_cfg->pfc))) { 5293 need_reconfig = true; 5294 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); 5295 } 5296 5297 /* Check if APP Table has changed */ 5298 if (memcmp(&new_cfg->app, 5299 &old_cfg->app, 5300 sizeof(new_cfg->app))) { 5301 need_reconfig = true; 5302 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); 5303 } 5304 5305 dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__, 5306 need_reconfig); 5307 return need_reconfig; 5308 } 5309 5310 /** 5311 * i40e_handle_lldp_event - Handle LLDP Change MIB event 5312 * @pf: board private structure 5313 * @e: event info posted on ARQ 5314 **/ 5315 static int i40e_handle_lldp_event(struct i40e_pf *pf, 5316 struct i40e_arq_event_info *e) 5317 { 5318 struct i40e_aqc_lldp_get_mib *mib = 5319 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; 5320 struct i40e_hw *hw = &pf->hw; 5321 struct i40e_dcbx_config tmp_dcbx_cfg; 5322 bool need_reconfig = false; 5323 int ret = 0; 5324 u8 type; 5325 5326 /* Not DCB capable or capability disabled */ 5327 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) 5328 return ret; 5329 5330 /* Ignore if event is not for Nearest Bridge */ 5331 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 5332 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 5333 dev_dbg(&pf->pdev->dev, 5334 "%s: LLDP event mib bridge type 0x%x\n", __func__, type); 5335 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) 5336 return ret; 5337 5338 /* Check MIB Type and return if event for Remote MIB update */ 5339 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; 5340 dev_dbg(&pf->pdev->dev, 5341 "%s: LLDP event mib type %s\n", __func__, 5342 type ? "remote" : "local"); 5343 if (type == I40E_AQ_LLDP_MIB_REMOTE) { 5344 /* Update the remote cached instance and return */ 5345 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, 5346 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 5347 &hw->remote_dcbx_config); 5348 goto exit; 5349 } 5350 5351 /* Store the old configuration */ 5352 tmp_dcbx_cfg = hw->local_dcbx_config; 5353 5354 /* Reset the old DCBx configuration data */ 5355 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); 5356 /* Get updated DCBX data from firmware */ 5357 ret = i40e_get_dcb_config(&pf->hw); 5358 if (ret) { 5359 dev_info(&pf->pdev->dev, 5360 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", 5361 i40e_stat_str(&pf->hw, ret), 5362 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 5363 goto exit; 5364 } 5365 5366 /* No change detected in DCBX configs */ 5367 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, 5368 sizeof(tmp_dcbx_cfg))) { 5369 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 5370 goto exit; 5371 } 5372 5373 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, 5374 &hw->local_dcbx_config); 5375 5376 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); 5377 5378 if (!need_reconfig) 5379 goto exit; 5380 5381 /* Enable DCB tagging only when more than one TC */ 5382 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5383 pf->flags |= I40E_FLAG_DCB_ENABLED; 5384 else 5385 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 5386 5387 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5388 /* Reconfiguration needed quiesce all VSIs */ 5389 i40e_pf_quiesce_all_vsi(pf); 5390 5391 /* Changes in configuration update VEB/VSI */ 5392 i40e_dcb_reconfigure(pf); 5393 5394 ret = i40e_resume_port_tx(pf); 5395 5396 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); 5397 /* In case of error no point in resuming VSIs */ 5398 if (ret) 5399 goto exit; 5400 5401 /* Wait for the PF's Tx queues to be disabled */ 5402 ret = i40e_pf_wait_txq_disabled(pf); 5403 if (ret) { 5404 /* Schedule PF reset to recover */ 5405 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5406 i40e_service_event_schedule(pf); 5407 } else { 5408 i40e_pf_unquiesce_all_vsi(pf); 5409 } 5410 5411 exit: 5412 return ret; 5413 } 5414 #endif /* CONFIG_I40E_DCB */ 5415 5416 /** 5417 * i40e_do_reset_safe - Protected reset path for userland calls. 5418 * @pf: board private structure 5419 * @reset_flags: which reset is requested 5420 * 5421 **/ 5422 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) 5423 { 5424 rtnl_lock(); 5425 i40e_do_reset(pf, reset_flags); 5426 rtnl_unlock(); 5427 } 5428 5429 /** 5430 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 5431 * @pf: board private structure 5432 * @e: event info posted on ARQ 5433 * 5434 * Handler for LAN Queue Overflow Event generated by the firmware for PF 5435 * and VF queues 5436 **/ 5437 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 5438 struct i40e_arq_event_info *e) 5439 { 5440 struct i40e_aqc_lan_overflow *data = 5441 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 5442 u32 queue = le32_to_cpu(data->prtdcb_rupto); 5443 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 5444 struct i40e_hw *hw = &pf->hw; 5445 struct i40e_vf *vf; 5446 u16 vf_id; 5447 5448 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", 5449 queue, qtx_ctl); 5450 5451 /* Queue belongs to VF, find the VF and issue VF reset */ 5452 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 5453 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 5454 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 5455 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 5456 vf_id -= hw->func_caps.vf_base_id; 5457 vf = &pf->vf[vf_id]; 5458 i40e_vc_notify_vf_reset(vf); 5459 /* Allow VF to process pending reset notification */ 5460 msleep(20); 5461 i40e_reset_vf(vf, false); 5462 } 5463 } 5464 5465 /** 5466 * i40e_service_event_complete - Finish up the service event 5467 * @pf: board private structure 5468 **/ 5469 static void i40e_service_event_complete(struct i40e_pf *pf) 5470 { 5471 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 5472 5473 /* flush memory to make sure state is correct before next watchog */ 5474 smp_mb__before_atomic(); 5475 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 5476 } 5477 5478 /** 5479 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters 5480 * @pf: board private structure 5481 **/ 5482 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) 5483 { 5484 u32 val, fcnt_prog; 5485 5486 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5487 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); 5488 return fcnt_prog; 5489 } 5490 5491 /** 5492 * i40e_get_current_fd_count - Get total FD filters programmed for this PF 5493 * @pf: board private structure 5494 **/ 5495 u32 i40e_get_current_fd_count(struct i40e_pf *pf) 5496 { 5497 u32 val, fcnt_prog; 5498 5499 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 5500 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + 5501 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> 5502 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 5503 return fcnt_prog; 5504 } 5505 5506 /** 5507 * i40e_get_global_fd_count - Get total FD filters programmed on device 5508 * @pf: board private structure 5509 **/ 5510 u32 i40e_get_global_fd_count(struct i40e_pf *pf) 5511 { 5512 u32 val, fcnt_prog; 5513 5514 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); 5515 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + 5516 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> 5517 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); 5518 return fcnt_prog; 5519 } 5520 5521 /** 5522 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 5523 * @pf: board private structure 5524 **/ 5525 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) 5526 { 5527 u32 fcnt_prog, fcnt_avail; 5528 5529 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 5530 return; 5531 5532 /* Check if, FD SB or ATR was auto disabled and if there is enough room 5533 * to re-enable 5534 */ 5535 fcnt_prog = i40e_get_global_fd_count(pf); 5536 fcnt_avail = pf->fdir_pf_filter_count; 5537 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || 5538 (pf->fd_add_err == 0) || 5539 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { 5540 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 5541 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 5542 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 5543 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5544 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 5545 } 5546 } 5547 /* Wait for some more space to be available to turn on ATR */ 5548 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { 5549 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 5550 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 5551 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5552 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5553 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); 5554 } 5555 } 5556 } 5557 5558 #define I40E_MIN_FD_FLUSH_INTERVAL 10 5559 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 5560 /** 5561 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB 5562 * @pf: board private structure 5563 **/ 5564 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) 5565 { 5566 unsigned long min_flush_time; 5567 int flush_wait_retry = 50; 5568 bool disable_atr = false; 5569 int fd_room; 5570 int reg; 5571 5572 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5573 return; 5574 5575 if (time_after(jiffies, pf->fd_flush_timestamp + 5576 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { 5577 /* If the flush is happening too quick and we have mostly 5578 * SB rules we should not re-enable ATR for some time. 5579 */ 5580 min_flush_time = pf->fd_flush_timestamp 5581 + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); 5582 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; 5583 5584 if (!(time_after(jiffies, min_flush_time)) && 5585 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { 5586 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5587 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); 5588 disable_atr = true; 5589 } 5590 5591 pf->fd_flush_timestamp = jiffies; 5592 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5593 /* flush all filters */ 5594 wr32(&pf->hw, I40E_PFQF_CTL_1, 5595 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); 5596 i40e_flush(&pf->hw); 5597 pf->fd_flush_cnt++; 5598 pf->fd_add_err = 0; 5599 do { 5600 /* Check FD flush status every 5-6msec */ 5601 usleep_range(5000, 6000); 5602 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); 5603 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) 5604 break; 5605 } while (flush_wait_retry--); 5606 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { 5607 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); 5608 } else { 5609 /* replay sideband filters */ 5610 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); 5611 if (!disable_atr) 5612 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 5613 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 5614 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5615 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 5616 } 5617 } 5618 } 5619 5620 /** 5621 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed 5622 * @pf: board private structure 5623 **/ 5624 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) 5625 { 5626 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; 5627 } 5628 5629 /* We can see up to 256 filter programming desc in transit if the filters are 5630 * being applied really fast; before we see the first 5631 * filter miss error on Rx queue 0. Accumulating enough error messages before 5632 * reacting will make sure we don't cause flush too often. 5633 */ 5634 #define I40E_MAX_FD_PROGRAM_ERROR 256 5635 5636 /** 5637 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 5638 * @pf: board private structure 5639 **/ 5640 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 5641 { 5642 5643 /* if interface is down do nothing */ 5644 if (test_bit(__I40E_DOWN, &pf->state)) 5645 return; 5646 5647 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) 5648 return; 5649 5650 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) 5651 i40e_fdir_flush_and_replay(pf); 5652 5653 i40e_fdir_check_and_reenable(pf); 5654 5655 } 5656 5657 /** 5658 * i40e_vsi_link_event - notify VSI of a link event 5659 * @vsi: vsi to be notified 5660 * @link_up: link up or down 5661 **/ 5662 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 5663 { 5664 if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) 5665 return; 5666 5667 switch (vsi->type) { 5668 case I40E_VSI_MAIN: 5669 #ifdef I40E_FCOE 5670 case I40E_VSI_FCOE: 5671 #endif 5672 if (!vsi->netdev || !vsi->netdev_registered) 5673 break; 5674 5675 if (link_up) { 5676 netif_carrier_on(vsi->netdev); 5677 netif_tx_wake_all_queues(vsi->netdev); 5678 } else { 5679 netif_carrier_off(vsi->netdev); 5680 netif_tx_stop_all_queues(vsi->netdev); 5681 } 5682 break; 5683 5684 case I40E_VSI_SRIOV: 5685 case I40E_VSI_VMDQ2: 5686 case I40E_VSI_CTRL: 5687 case I40E_VSI_MIRROR: 5688 default: 5689 /* there is no notification for other VSIs */ 5690 break; 5691 } 5692 } 5693 5694 /** 5695 * i40e_veb_link_event - notify elements on the veb of a link event 5696 * @veb: veb to be notified 5697 * @link_up: link up or down 5698 **/ 5699 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 5700 { 5701 struct i40e_pf *pf; 5702 int i; 5703 5704 if (!veb || !veb->pf) 5705 return; 5706 pf = veb->pf; 5707 5708 /* depth first... */ 5709 for (i = 0; i < I40E_MAX_VEB; i++) 5710 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 5711 i40e_veb_link_event(pf->veb[i], link_up); 5712 5713 /* ... now the local VSIs */ 5714 for (i = 0; i < pf->num_alloc_vsi; i++) 5715 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 5716 i40e_vsi_link_event(pf->vsi[i], link_up); 5717 } 5718 5719 /** 5720 * i40e_link_event - Update netif_carrier status 5721 * @pf: board private structure 5722 **/ 5723 static void i40e_link_event(struct i40e_pf *pf) 5724 { 5725 bool new_link, old_link; 5726 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 5727 u8 new_link_speed, old_link_speed; 5728 5729 /* set this to force the get_link_status call to refresh state */ 5730 pf->hw.phy.get_link_info = true; 5731 5732 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 5733 new_link = i40e_get_link_status(&pf->hw); 5734 old_link_speed = pf->hw.phy.link_info_old.link_speed; 5735 new_link_speed = pf->hw.phy.link_info.link_speed; 5736 5737 if (new_link == old_link && 5738 new_link_speed == old_link_speed && 5739 (test_bit(__I40E_DOWN, &vsi->state) || 5740 new_link == netif_carrier_ok(vsi->netdev))) 5741 return; 5742 5743 if (!test_bit(__I40E_DOWN, &vsi->state)) 5744 i40e_print_link_message(vsi, new_link); 5745 5746 /* Notify the base of the switch tree connected to 5747 * the link. Floating VEBs are not notified. 5748 */ 5749 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 5750 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 5751 else 5752 i40e_vsi_link_event(vsi, new_link); 5753 5754 if (pf->vf) 5755 i40e_vc_notify_link_state(pf); 5756 5757 if (pf->flags & I40E_FLAG_PTP) 5758 i40e_ptp_set_increment(pf); 5759 } 5760 5761 /** 5762 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts 5763 * @pf: board private structure 5764 * 5765 * Set the per-queue flags to request a check for stuck queues in the irq 5766 * clean functions, then force interrupts to be sure the irq clean is called. 5767 **/ 5768 static void i40e_check_hang_subtask(struct i40e_pf *pf) 5769 { 5770 int i, v; 5771 5772 /* If we're down or resetting, just bail */ 5773 if (test_bit(__I40E_DOWN, &pf->state) || 5774 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5775 return; 5776 5777 /* for each VSI/netdev 5778 * for each Tx queue 5779 * set the check flag 5780 * for each q_vector 5781 * force an interrupt 5782 */ 5783 for (v = 0; v < pf->num_alloc_vsi; v++) { 5784 struct i40e_vsi *vsi = pf->vsi[v]; 5785 int armed = 0; 5786 5787 if (!pf->vsi[v] || 5788 test_bit(__I40E_DOWN, &vsi->state) || 5789 (vsi->netdev && !netif_carrier_ok(vsi->netdev))) 5790 continue; 5791 5792 for (i = 0; i < vsi->num_queue_pairs; i++) { 5793 set_check_for_tx_hang(vsi->tx_rings[i]); 5794 if (test_bit(__I40E_HANG_CHECK_ARMED, 5795 &vsi->tx_rings[i]->state)) 5796 armed++; 5797 } 5798 5799 if (armed) { 5800 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 5801 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, 5802 (I40E_PFINT_DYN_CTL0_INTENA_MASK | 5803 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | 5804 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | 5805 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | 5806 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); 5807 } else { 5808 u16 vec = vsi->base_vector - 1; 5809 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | 5810 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | 5811 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | 5812 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | 5813 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK); 5814 for (i = 0; i < vsi->num_q_vectors; i++, vec++) 5815 wr32(&vsi->back->hw, 5816 I40E_PFINT_DYN_CTLN(vec), val); 5817 } 5818 i40e_flush(&vsi->back->hw); 5819 } 5820 } 5821 } 5822 5823 /** 5824 * i40e_watchdog_subtask - periodic checks not using event driven response 5825 * @pf: board private structure 5826 **/ 5827 static void i40e_watchdog_subtask(struct i40e_pf *pf) 5828 { 5829 int i; 5830 5831 /* if interface is down do nothing */ 5832 if (test_bit(__I40E_DOWN, &pf->state) || 5833 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5834 return; 5835 5836 /* make sure we don't do these things too often */ 5837 if (time_before(jiffies, (pf->service_timer_previous + 5838 pf->service_timer_period))) 5839 return; 5840 pf->service_timer_previous = jiffies; 5841 5842 i40e_check_hang_subtask(pf); 5843 i40e_link_event(pf); 5844 5845 /* Update the stats for active netdevs so the network stack 5846 * can look at updated numbers whenever it cares to 5847 */ 5848 for (i = 0; i < pf->num_alloc_vsi; i++) 5849 if (pf->vsi[i] && pf->vsi[i]->netdev) 5850 i40e_update_stats(pf->vsi[i]); 5851 5852 /* Update the stats for the active switching components */ 5853 for (i = 0; i < I40E_MAX_VEB; i++) 5854 if (pf->veb[i]) 5855 i40e_update_veb_stats(pf->veb[i]); 5856 5857 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); 5858 } 5859 5860 /** 5861 * i40e_reset_subtask - Set up for resetting the device and driver 5862 * @pf: board private structure 5863 **/ 5864 static void i40e_reset_subtask(struct i40e_pf *pf) 5865 { 5866 u32 reset_flags = 0; 5867 5868 rtnl_lock(); 5869 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { 5870 reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED); 5871 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); 5872 } 5873 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { 5874 reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED); 5875 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 5876 } 5877 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { 5878 reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED); 5879 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 5880 } 5881 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { 5882 reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED); 5883 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 5884 } 5885 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { 5886 reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED); 5887 clear_bit(__I40E_DOWN_REQUESTED, &pf->state); 5888 } 5889 5890 /* If there's a recovery already waiting, it takes 5891 * precedence before starting a new reset sequence. 5892 */ 5893 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { 5894 i40e_handle_reset_warning(pf); 5895 goto unlock; 5896 } 5897 5898 /* If we're already down or resetting, just bail */ 5899 if (reset_flags && 5900 !test_bit(__I40E_DOWN, &pf->state) && 5901 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5902 i40e_do_reset(pf, reset_flags); 5903 5904 unlock: 5905 rtnl_unlock(); 5906 } 5907 5908 /** 5909 * i40e_handle_link_event - Handle link event 5910 * @pf: board private structure 5911 * @e: event info posted on ARQ 5912 **/ 5913 static void i40e_handle_link_event(struct i40e_pf *pf, 5914 struct i40e_arq_event_info *e) 5915 { 5916 struct i40e_hw *hw = &pf->hw; 5917 struct i40e_aqc_get_link_status *status = 5918 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 5919 5920 /* save off old link status information */ 5921 hw->phy.link_info_old = hw->phy.link_info; 5922 5923 /* Do a new status request to re-enable LSE reporting 5924 * and load new status information into the hw struct 5925 * This completely ignores any state information 5926 * in the ARQ event info, instead choosing to always 5927 * issue the AQ update link status command. 5928 */ 5929 i40e_link_event(pf); 5930 5931 /* check for unqualified module, if link is down */ 5932 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && 5933 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && 5934 (!(status->link_info & I40E_AQ_LINK_UP))) 5935 dev_err(&pf->pdev->dev, 5936 "The driver failed to link because an unqualified module was detected.\n"); 5937 } 5938 5939 /** 5940 * i40e_clean_adminq_subtask - Clean the AdminQ rings 5941 * @pf: board private structure 5942 **/ 5943 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 5944 { 5945 struct i40e_arq_event_info event; 5946 struct i40e_hw *hw = &pf->hw; 5947 u16 pending, i = 0; 5948 i40e_status ret; 5949 u16 opcode; 5950 u32 oldval; 5951 u32 val; 5952 5953 /* Do not run clean AQ when PF reset fails */ 5954 if (test_bit(__I40E_RESET_FAILED, &pf->state)) 5955 return; 5956 5957 /* check for error indications */ 5958 val = rd32(&pf->hw, pf->hw.aq.arq.len); 5959 oldval = val; 5960 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { 5961 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); 5962 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; 5963 } 5964 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { 5965 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); 5966 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; 5967 } 5968 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { 5969 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); 5970 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; 5971 } 5972 if (oldval != val) 5973 wr32(&pf->hw, pf->hw.aq.arq.len, val); 5974 5975 val = rd32(&pf->hw, pf->hw.aq.asq.len); 5976 oldval = val; 5977 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { 5978 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); 5979 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; 5980 } 5981 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { 5982 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); 5983 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; 5984 } 5985 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { 5986 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); 5987 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; 5988 } 5989 if (oldval != val) 5990 wr32(&pf->hw, pf->hw.aq.asq.len, val); 5991 5992 event.buf_len = I40E_MAX_AQ_BUF_SIZE; 5993 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 5994 if (!event.msg_buf) 5995 return; 5996 5997 do { 5998 ret = i40e_clean_arq_element(hw, &event, &pending); 5999 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) 6000 break; 6001 else if (ret) { 6002 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 6003 break; 6004 } 6005 6006 opcode = le16_to_cpu(event.desc.opcode); 6007 switch (opcode) { 6008 6009 case i40e_aqc_opc_get_link_status: 6010 i40e_handle_link_event(pf, &event); 6011 break; 6012 case i40e_aqc_opc_send_msg_to_pf: 6013 ret = i40e_vc_process_vf_msg(pf, 6014 le16_to_cpu(event.desc.retval), 6015 le32_to_cpu(event.desc.cookie_high), 6016 le32_to_cpu(event.desc.cookie_low), 6017 event.msg_buf, 6018 event.msg_len); 6019 break; 6020 case i40e_aqc_opc_lldp_update_mib: 6021 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 6022 #ifdef CONFIG_I40E_DCB 6023 rtnl_lock(); 6024 ret = i40e_handle_lldp_event(pf, &event); 6025 rtnl_unlock(); 6026 #endif /* CONFIG_I40E_DCB */ 6027 break; 6028 case i40e_aqc_opc_event_lan_overflow: 6029 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 6030 i40e_handle_lan_overflow_event(pf, &event); 6031 break; 6032 case i40e_aqc_opc_send_msg_to_peer: 6033 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); 6034 break; 6035 case i40e_aqc_opc_nvm_erase: 6036 case i40e_aqc_opc_nvm_update: 6037 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); 6038 break; 6039 default: 6040 dev_info(&pf->pdev->dev, 6041 "ARQ Error: Unknown event 0x%04x received\n", 6042 opcode); 6043 break; 6044 } 6045 } while (pending && (i++ < pf->adminq_work_limit)); 6046 6047 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 6048 /* re-enable Admin queue interrupt cause */ 6049 val = rd32(hw, I40E_PFINT_ICR0_ENA); 6050 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 6051 wr32(hw, I40E_PFINT_ICR0_ENA, val); 6052 i40e_flush(hw); 6053 6054 kfree(event.msg_buf); 6055 } 6056 6057 /** 6058 * i40e_verify_eeprom - make sure eeprom is good to use 6059 * @pf: board private structure 6060 **/ 6061 static void i40e_verify_eeprom(struct i40e_pf *pf) 6062 { 6063 int err; 6064 6065 err = i40e_diag_eeprom_test(&pf->hw); 6066 if (err) { 6067 /* retry in case of garbage read */ 6068 err = i40e_diag_eeprom_test(&pf->hw); 6069 if (err) { 6070 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", 6071 err); 6072 set_bit(__I40E_BAD_EEPROM, &pf->state); 6073 } 6074 } 6075 6076 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { 6077 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); 6078 clear_bit(__I40E_BAD_EEPROM, &pf->state); 6079 } 6080 } 6081 6082 /** 6083 * i40e_enable_pf_switch_lb 6084 * @pf: pointer to the PF structure 6085 * 6086 * enable switch loop back or die - no point in a return value 6087 **/ 6088 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) 6089 { 6090 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6091 struct i40e_vsi_context ctxt; 6092 int ret; 6093 6094 ctxt.seid = pf->main_vsi_seid; 6095 ctxt.pf_num = pf->hw.pf_id; 6096 ctxt.vf_num = 0; 6097 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6098 if (ret) { 6099 dev_info(&pf->pdev->dev, 6100 "couldn't get PF vsi config, err %s aq_err %s\n", 6101 i40e_stat_str(&pf->hw, ret), 6102 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6103 return; 6104 } 6105 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6106 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6107 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6108 6109 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6110 if (ret) { 6111 dev_info(&pf->pdev->dev, 6112 "update vsi switch failed, err %s aq_err %s\n", 6113 i40e_stat_str(&pf->hw, ret), 6114 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6115 } 6116 } 6117 6118 /** 6119 * i40e_disable_pf_switch_lb 6120 * @pf: pointer to the PF structure 6121 * 6122 * disable switch loop back or die - no point in a return value 6123 **/ 6124 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) 6125 { 6126 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 6127 struct i40e_vsi_context ctxt; 6128 int ret; 6129 6130 ctxt.seid = pf->main_vsi_seid; 6131 ctxt.pf_num = pf->hw.pf_id; 6132 ctxt.vf_num = 0; 6133 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6134 if (ret) { 6135 dev_info(&pf->pdev->dev, 6136 "couldn't get PF vsi config, err %s aq_err %s\n", 6137 i40e_stat_str(&pf->hw, ret), 6138 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6139 return; 6140 } 6141 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6142 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6143 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6144 6145 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 6146 if (ret) { 6147 dev_info(&pf->pdev->dev, 6148 "update vsi switch failed, err %s aq_err %s\n", 6149 i40e_stat_str(&pf->hw, ret), 6150 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6151 } 6152 } 6153 6154 /** 6155 * i40e_config_bridge_mode - Configure the HW bridge mode 6156 * @veb: pointer to the bridge instance 6157 * 6158 * Configure the loop back mode for the LAN VSI that is downlink to the 6159 * specified HW bridge instance. It is expected this function is called 6160 * when a new HW bridge is instantiated. 6161 **/ 6162 static void i40e_config_bridge_mode(struct i40e_veb *veb) 6163 { 6164 struct i40e_pf *pf = veb->pf; 6165 6166 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", 6167 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); 6168 if (veb->bridge_mode & BRIDGE_MODE_VEPA) 6169 i40e_disable_pf_switch_lb(pf); 6170 else 6171 i40e_enable_pf_switch_lb(pf); 6172 } 6173 6174 /** 6175 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 6176 * @veb: pointer to the VEB instance 6177 * 6178 * This is a recursive function that first builds the attached VSIs then 6179 * recurses in to build the next layer of VEB. We track the connections 6180 * through our own index numbers because the seid's from the HW could 6181 * change across the reset. 6182 **/ 6183 static int i40e_reconstitute_veb(struct i40e_veb *veb) 6184 { 6185 struct i40e_vsi *ctl_vsi = NULL; 6186 struct i40e_pf *pf = veb->pf; 6187 int v, veb_idx; 6188 int ret; 6189 6190 /* build VSI that owns this VEB, temporarily attached to base VEB */ 6191 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { 6192 if (pf->vsi[v] && 6193 pf->vsi[v]->veb_idx == veb->idx && 6194 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 6195 ctl_vsi = pf->vsi[v]; 6196 break; 6197 } 6198 } 6199 if (!ctl_vsi) { 6200 dev_info(&pf->pdev->dev, 6201 "missing owner VSI for veb_idx %d\n", veb->idx); 6202 ret = -ENOENT; 6203 goto end_reconstitute; 6204 } 6205 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 6206 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 6207 ret = i40e_add_vsi(ctl_vsi); 6208 if (ret) { 6209 dev_info(&pf->pdev->dev, 6210 "rebuild of veb_idx %d owner VSI failed: %d\n", 6211 veb->idx, ret); 6212 goto end_reconstitute; 6213 } 6214 i40e_vsi_reset_stats(ctl_vsi); 6215 6216 /* create the VEB in the switch and move the VSI onto the VEB */ 6217 ret = i40e_add_veb(veb, ctl_vsi); 6218 if (ret) 6219 goto end_reconstitute; 6220 6221 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) 6222 veb->bridge_mode = BRIDGE_MODE_VEB; 6223 else 6224 veb->bridge_mode = BRIDGE_MODE_VEPA; 6225 i40e_config_bridge_mode(veb); 6226 6227 /* create the remaining VSIs attached to this VEB */ 6228 for (v = 0; v < pf->num_alloc_vsi; v++) { 6229 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 6230 continue; 6231 6232 if (pf->vsi[v]->veb_idx == veb->idx) { 6233 struct i40e_vsi *vsi = pf->vsi[v]; 6234 vsi->uplink_seid = veb->seid; 6235 ret = i40e_add_vsi(vsi); 6236 if (ret) { 6237 dev_info(&pf->pdev->dev, 6238 "rebuild of vsi_idx %d failed: %d\n", 6239 v, ret); 6240 goto end_reconstitute; 6241 } 6242 i40e_vsi_reset_stats(vsi); 6243 } 6244 } 6245 6246 /* create any VEBs attached to this VEB - RECURSION */ 6247 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 6248 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 6249 pf->veb[veb_idx]->uplink_seid = veb->seid; 6250 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 6251 if (ret) 6252 break; 6253 } 6254 } 6255 6256 end_reconstitute: 6257 return ret; 6258 } 6259 6260 /** 6261 * i40e_get_capabilities - get info about the HW 6262 * @pf: the PF struct 6263 **/ 6264 static int i40e_get_capabilities(struct i40e_pf *pf) 6265 { 6266 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 6267 u16 data_size; 6268 int buf_len; 6269 int err; 6270 6271 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 6272 do { 6273 cap_buf = kzalloc(buf_len, GFP_KERNEL); 6274 if (!cap_buf) 6275 return -ENOMEM; 6276 6277 /* this loads the data into the hw struct for us */ 6278 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 6279 &data_size, 6280 i40e_aqc_opc_list_func_capabilities, 6281 NULL); 6282 /* data loaded, buffer no longer needed */ 6283 kfree(cap_buf); 6284 6285 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 6286 /* retry with a larger buffer */ 6287 buf_len = data_size; 6288 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 6289 dev_info(&pf->pdev->dev, 6290 "capability discovery failed, err %s aq_err %s\n", 6291 i40e_stat_str(&pf->hw, err), 6292 i40e_aq_str(&pf->hw, 6293 pf->hw.aq.asq_last_status)); 6294 return -ENODEV; 6295 } 6296 } while (err); 6297 6298 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || 6299 (pf->hw.aq.fw_maj_ver < 2)) { 6300 pf->hw.func_caps.num_msix_vectors++; 6301 pf->hw.func_caps.num_msix_vectors_vf++; 6302 } 6303 6304 if (pf->hw.debug_mask & I40E_DEBUG_USER) 6305 dev_info(&pf->pdev->dev, 6306 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 6307 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 6308 pf->hw.func_caps.num_msix_vectors, 6309 pf->hw.func_caps.num_msix_vectors_vf, 6310 pf->hw.func_caps.fd_filters_guaranteed, 6311 pf->hw.func_caps.fd_filters_best_effort, 6312 pf->hw.func_caps.num_tx_qp, 6313 pf->hw.func_caps.num_vsis); 6314 6315 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ 6316 + pf->hw.func_caps.num_vfs) 6317 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { 6318 dev_info(&pf->pdev->dev, 6319 "got num_vsis %d, setting num_vsis to %d\n", 6320 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); 6321 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; 6322 } 6323 6324 return 0; 6325 } 6326 6327 static int i40e_vsi_clear(struct i40e_vsi *vsi); 6328 6329 /** 6330 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband 6331 * @pf: board private structure 6332 **/ 6333 static void i40e_fdir_sb_setup(struct i40e_pf *pf) 6334 { 6335 struct i40e_vsi *vsi; 6336 int i; 6337 6338 /* quick workaround for an NVM issue that leaves a critical register 6339 * uninitialized 6340 */ 6341 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { 6342 static const u32 hkey[] = { 6343 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 6344 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 6345 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 6346 0x95b3a76d}; 6347 6348 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) 6349 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); 6350 } 6351 6352 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 6353 return; 6354 6355 /* find existing VSI and see if it needs configuring */ 6356 vsi = NULL; 6357 for (i = 0; i < pf->num_alloc_vsi; i++) { 6358 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6359 vsi = pf->vsi[i]; 6360 break; 6361 } 6362 } 6363 6364 /* create a new VSI if none exists */ 6365 if (!vsi) { 6366 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, 6367 pf->vsi[pf->lan_vsi]->seid, 0); 6368 if (!vsi) { 6369 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 6370 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 6371 return; 6372 } 6373 } 6374 6375 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); 6376 } 6377 6378 /** 6379 * i40e_fdir_teardown - release the Flow Director resources 6380 * @pf: board private structure 6381 **/ 6382 static void i40e_fdir_teardown(struct i40e_pf *pf) 6383 { 6384 int i; 6385 6386 i40e_fdir_filter_exit(pf); 6387 for (i = 0; i < pf->num_alloc_vsi; i++) { 6388 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 6389 i40e_vsi_release(pf->vsi[i]); 6390 break; 6391 } 6392 } 6393 } 6394 6395 /** 6396 * i40e_prep_for_reset - prep for the core to reset 6397 * @pf: board private structure 6398 * 6399 * Close up the VFs and other things in prep for PF Reset. 6400 **/ 6401 static void i40e_prep_for_reset(struct i40e_pf *pf) 6402 { 6403 struct i40e_hw *hw = &pf->hw; 6404 i40e_status ret = 0; 6405 u32 v; 6406 6407 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 6408 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 6409 return; 6410 6411 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 6412 6413 /* quiesce the VSIs and their queues that are not already DOWN */ 6414 i40e_pf_quiesce_all_vsi(pf); 6415 6416 for (v = 0; v < pf->num_alloc_vsi; v++) { 6417 if (pf->vsi[v]) 6418 pf->vsi[v]->seid = 0; 6419 } 6420 6421 i40e_shutdown_adminq(&pf->hw); 6422 6423 /* call shutdown HMC */ 6424 if (hw->hmc.hmc_obj) { 6425 ret = i40e_shutdown_lan_hmc(hw); 6426 if (ret) 6427 dev_warn(&pf->pdev->dev, 6428 "shutdown_lan_hmc failed: %d\n", ret); 6429 } 6430 } 6431 6432 /** 6433 * i40e_send_version - update firmware with driver version 6434 * @pf: PF struct 6435 */ 6436 static void i40e_send_version(struct i40e_pf *pf) 6437 { 6438 struct i40e_driver_version dv; 6439 6440 dv.major_version = DRV_VERSION_MAJOR; 6441 dv.minor_version = DRV_VERSION_MINOR; 6442 dv.build_version = DRV_VERSION_BUILD; 6443 dv.subbuild_version = 0; 6444 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); 6445 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 6446 } 6447 6448 /** 6449 * i40e_reset_and_rebuild - reset and rebuild using a saved config 6450 * @pf: board private structure 6451 * @reinit: if the Main VSI needs to re-initialized. 6452 **/ 6453 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 6454 { 6455 struct i40e_hw *hw = &pf->hw; 6456 u8 set_fc_aq_fail = 0; 6457 i40e_status ret; 6458 u32 v; 6459 6460 /* Now we wait for GRST to settle out. 6461 * We don't have to delete the VEBs or VSIs from the hw switch 6462 * because the reset will make them disappear. 6463 */ 6464 ret = i40e_pf_reset(hw); 6465 if (ret) { 6466 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 6467 set_bit(__I40E_RESET_FAILED, &pf->state); 6468 goto clear_recovery; 6469 } 6470 pf->pfr_count++; 6471 6472 if (test_bit(__I40E_DOWN, &pf->state)) 6473 goto clear_recovery; 6474 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 6475 6476 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 6477 ret = i40e_init_adminq(&pf->hw); 6478 if (ret) { 6479 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", 6480 i40e_stat_str(&pf->hw, ret), 6481 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6482 goto clear_recovery; 6483 } 6484 6485 /* re-verify the eeprom if we just had an EMP reset */ 6486 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) 6487 i40e_verify_eeprom(pf); 6488 6489 i40e_clear_pxe_mode(hw); 6490 ret = i40e_get_capabilities(pf); 6491 if (ret) 6492 goto end_core_reset; 6493 6494 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 6495 hw->func_caps.num_rx_qp, 6496 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 6497 if (ret) { 6498 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 6499 goto end_core_reset; 6500 } 6501 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 6502 if (ret) { 6503 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 6504 goto end_core_reset; 6505 } 6506 6507 #ifdef CONFIG_I40E_DCB 6508 ret = i40e_init_pf_dcb(pf); 6509 if (ret) { 6510 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); 6511 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 6512 /* Continue without DCB enabled */ 6513 } 6514 #endif /* CONFIG_I40E_DCB */ 6515 #ifdef I40E_FCOE 6516 ret = i40e_init_pf_fcoe(pf); 6517 if (ret) 6518 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret); 6519 6520 #endif 6521 /* do basic switch setup */ 6522 ret = i40e_setup_pf_switch(pf, reinit); 6523 if (ret) 6524 goto end_core_reset; 6525 6526 /* driver is only interested in link up/down and module qualification 6527 * reports from firmware 6528 */ 6529 ret = i40e_aq_set_phy_int_mask(&pf->hw, 6530 I40E_AQ_EVENT_LINK_UPDOWN | 6531 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); 6532 if (ret) 6533 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 6534 i40e_stat_str(&pf->hw, ret), 6535 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6536 6537 /* make sure our flow control settings are restored */ 6538 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); 6539 if (ret) 6540 dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n", 6541 i40e_stat_str(&pf->hw, ret), 6542 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 6543 6544 /* Rebuild the VSIs and VEBs that existed before reset. 6545 * They are still in our local switch element arrays, so only 6546 * need to rebuild the switch model in the HW. 6547 * 6548 * If there were VEBs but the reconstitution failed, we'll try 6549 * try to recover minimal use by getting the basic PF VSI working. 6550 */ 6551 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 6552 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); 6553 /* find the one VEB connected to the MAC, and find orphans */ 6554 for (v = 0; v < I40E_MAX_VEB; v++) { 6555 if (!pf->veb[v]) 6556 continue; 6557 6558 if (pf->veb[v]->uplink_seid == pf->mac_seid || 6559 pf->veb[v]->uplink_seid == 0) { 6560 ret = i40e_reconstitute_veb(pf->veb[v]); 6561 6562 if (!ret) 6563 continue; 6564 6565 /* If Main VEB failed, we're in deep doodoo, 6566 * so give up rebuilding the switch and set up 6567 * for minimal rebuild of PF VSI. 6568 * If orphan failed, we'll report the error 6569 * but try to keep going. 6570 */ 6571 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 6572 dev_info(&pf->pdev->dev, 6573 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 6574 ret); 6575 pf->vsi[pf->lan_vsi]->uplink_seid 6576 = pf->mac_seid; 6577 break; 6578 } else if (pf->veb[v]->uplink_seid == 0) { 6579 dev_info(&pf->pdev->dev, 6580 "rebuild of orphan VEB failed: %d\n", 6581 ret); 6582 } 6583 } 6584 } 6585 } 6586 6587 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { 6588 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 6589 /* no VEB, so rebuild only the Main VSI */ 6590 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); 6591 if (ret) { 6592 dev_info(&pf->pdev->dev, 6593 "rebuild of Main VSI failed: %d\n", ret); 6594 goto end_core_reset; 6595 } 6596 } 6597 6598 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 6599 (pf->hw.aq.fw_maj_ver < 4)) { 6600 msleep(75); 6601 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 6602 if (ret) 6603 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 6604 i40e_stat_str(&pf->hw, ret), 6605 i40e_aq_str(&pf->hw, 6606 pf->hw.aq.asq_last_status)); 6607 } 6608 /* reinit the misc interrupt */ 6609 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6610 ret = i40e_setup_misc_vector(pf); 6611 6612 /* restart the VSIs that were rebuilt and running before the reset */ 6613 i40e_pf_unquiesce_all_vsi(pf); 6614 6615 if (pf->num_alloc_vfs) { 6616 for (v = 0; v < pf->num_alloc_vfs; v++) 6617 i40e_reset_vf(&pf->vf[v], true); 6618 } 6619 6620 /* tell the firmware that we're starting */ 6621 i40e_send_version(pf); 6622 6623 end_core_reset: 6624 clear_bit(__I40E_RESET_FAILED, &pf->state); 6625 clear_recovery: 6626 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 6627 } 6628 6629 /** 6630 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild 6631 * @pf: board private structure 6632 * 6633 * Close up the VFs and other things in prep for a Core Reset, 6634 * then get ready to rebuild the world. 6635 **/ 6636 static void i40e_handle_reset_warning(struct i40e_pf *pf) 6637 { 6638 i40e_prep_for_reset(pf); 6639 i40e_reset_and_rebuild(pf, false); 6640 } 6641 6642 /** 6643 * i40e_handle_mdd_event 6644 * @pf: pointer to the PF structure 6645 * 6646 * Called from the MDD irq handler to identify possibly malicious vfs 6647 **/ 6648 static void i40e_handle_mdd_event(struct i40e_pf *pf) 6649 { 6650 struct i40e_hw *hw = &pf->hw; 6651 bool mdd_detected = false; 6652 bool pf_mdd_detected = false; 6653 struct i40e_vf *vf; 6654 u32 reg; 6655 int i; 6656 6657 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) 6658 return; 6659 6660 /* find what triggered the MDD event */ 6661 reg = rd32(hw, I40E_GL_MDET_TX); 6662 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 6663 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 6664 I40E_GL_MDET_TX_PF_NUM_SHIFT; 6665 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 6666 I40E_GL_MDET_TX_VF_NUM_SHIFT; 6667 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 6668 I40E_GL_MDET_TX_EVENT_SHIFT; 6669 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 6670 I40E_GL_MDET_TX_QUEUE_SHIFT) - 6671 pf->hw.func_caps.base_queue; 6672 if (netif_msg_tx_err(pf)) 6673 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", 6674 event, queue, pf_num, vf_num); 6675 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 6676 mdd_detected = true; 6677 } 6678 reg = rd32(hw, I40E_GL_MDET_RX); 6679 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 6680 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 6681 I40E_GL_MDET_RX_FUNCTION_SHIFT; 6682 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 6683 I40E_GL_MDET_RX_EVENT_SHIFT; 6684 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 6685 I40E_GL_MDET_RX_QUEUE_SHIFT) - 6686 pf->hw.func_caps.base_queue; 6687 if (netif_msg_rx_err(pf)) 6688 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 6689 event, queue, func); 6690 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 6691 mdd_detected = true; 6692 } 6693 6694 if (mdd_detected) { 6695 reg = rd32(hw, I40E_PF_MDET_TX); 6696 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 6697 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 6698 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 6699 pf_mdd_detected = true; 6700 } 6701 reg = rd32(hw, I40E_PF_MDET_RX); 6702 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 6703 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 6704 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 6705 pf_mdd_detected = true; 6706 } 6707 /* Queue belongs to the PF, initiate a reset */ 6708 if (pf_mdd_detected) { 6709 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 6710 i40e_service_event_schedule(pf); 6711 } 6712 } 6713 6714 /* see if one of the VFs needs its hand slapped */ 6715 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 6716 vf = &(pf->vf[i]); 6717 reg = rd32(hw, I40E_VP_MDET_TX(i)); 6718 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 6719 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 6720 vf->num_mdd_events++; 6721 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 6722 i); 6723 } 6724 6725 reg = rd32(hw, I40E_VP_MDET_RX(i)); 6726 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 6727 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 6728 vf->num_mdd_events++; 6729 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 6730 i); 6731 } 6732 6733 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 6734 dev_info(&pf->pdev->dev, 6735 "Too many MDD events on VF %d, disabled\n", i); 6736 dev_info(&pf->pdev->dev, 6737 "Use PF Control I/F to re-enable the VF\n"); 6738 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 6739 } 6740 } 6741 6742 /* re-enable mdd interrupt cause */ 6743 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 6744 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 6745 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 6746 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 6747 i40e_flush(hw); 6748 } 6749 6750 #ifdef CONFIG_I40E_VXLAN 6751 /** 6752 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW 6753 * @pf: board private structure 6754 **/ 6755 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) 6756 { 6757 struct i40e_hw *hw = &pf->hw; 6758 i40e_status ret; 6759 __be16 port; 6760 int i; 6761 6762 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) 6763 return; 6764 6765 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; 6766 6767 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 6768 if (pf->pending_vxlan_bitmap & BIT_ULL(i)) { 6769 pf->pending_vxlan_bitmap &= ~BIT_ULL(i); 6770 port = pf->vxlan_ports[i]; 6771 if (port) 6772 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), 6773 I40E_AQC_TUNNEL_TYPE_VXLAN, 6774 NULL, NULL); 6775 else 6776 ret = i40e_aq_del_udp_tunnel(hw, i, NULL); 6777 6778 if (ret) { 6779 dev_info(&pf->pdev->dev, 6780 "%s vxlan port %d, index %d failed, err %s aq_err %s\n", 6781 port ? "add" : "delete", 6782 ntohs(port), i, 6783 i40e_stat_str(&pf->hw, ret), 6784 i40e_aq_str(&pf->hw, 6785 pf->hw.aq.asq_last_status)); 6786 pf->vxlan_ports[i] = 0; 6787 } 6788 } 6789 } 6790 } 6791 6792 #endif 6793 /** 6794 * i40e_service_task - Run the driver's async subtasks 6795 * @work: pointer to work_struct containing our data 6796 **/ 6797 static void i40e_service_task(struct work_struct *work) 6798 { 6799 struct i40e_pf *pf = container_of(work, 6800 struct i40e_pf, 6801 service_task); 6802 unsigned long start_time = jiffies; 6803 6804 /* don't bother with service tasks if a reset is in progress */ 6805 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 6806 i40e_service_event_complete(pf); 6807 return; 6808 } 6809 6810 i40e_reset_subtask(pf); 6811 i40e_handle_mdd_event(pf); 6812 i40e_vc_process_vflr_event(pf); 6813 i40e_watchdog_subtask(pf); 6814 i40e_fdir_reinit_subtask(pf); 6815 i40e_sync_filters_subtask(pf); 6816 #ifdef CONFIG_I40E_VXLAN 6817 i40e_sync_vxlan_filters_subtask(pf); 6818 #endif 6819 i40e_clean_adminq_subtask(pf); 6820 6821 i40e_service_event_complete(pf); 6822 6823 /* If the tasks have taken longer than one timer cycle or there 6824 * is more work to be done, reschedule the service task now 6825 * rather than wait for the timer to tick again. 6826 */ 6827 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 6828 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || 6829 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || 6830 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 6831 i40e_service_event_schedule(pf); 6832 } 6833 6834 /** 6835 * i40e_service_timer - timer callback 6836 * @data: pointer to PF struct 6837 **/ 6838 static void i40e_service_timer(unsigned long data) 6839 { 6840 struct i40e_pf *pf = (struct i40e_pf *)data; 6841 6842 mod_timer(&pf->service_timer, 6843 round_jiffies(jiffies + pf->service_timer_period)); 6844 i40e_service_event_schedule(pf); 6845 } 6846 6847 /** 6848 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 6849 * @vsi: the VSI being configured 6850 **/ 6851 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 6852 { 6853 struct i40e_pf *pf = vsi->back; 6854 6855 switch (vsi->type) { 6856 case I40E_VSI_MAIN: 6857 vsi->alloc_queue_pairs = pf->num_lan_qps; 6858 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6859 I40E_REQ_DESCRIPTOR_MULTIPLE); 6860 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6861 vsi->num_q_vectors = pf->num_lan_msix; 6862 else 6863 vsi->num_q_vectors = 1; 6864 6865 break; 6866 6867 case I40E_VSI_FDIR: 6868 vsi->alloc_queue_pairs = 1; 6869 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 6870 I40E_REQ_DESCRIPTOR_MULTIPLE); 6871 vsi->num_q_vectors = 1; 6872 break; 6873 6874 case I40E_VSI_VMDQ2: 6875 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 6876 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6877 I40E_REQ_DESCRIPTOR_MULTIPLE); 6878 vsi->num_q_vectors = pf->num_vmdq_msix; 6879 break; 6880 6881 case I40E_VSI_SRIOV: 6882 vsi->alloc_queue_pairs = pf->num_vf_qps; 6883 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6884 I40E_REQ_DESCRIPTOR_MULTIPLE); 6885 break; 6886 6887 #ifdef I40E_FCOE 6888 case I40E_VSI_FCOE: 6889 vsi->alloc_queue_pairs = pf->num_fcoe_qps; 6890 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 6891 I40E_REQ_DESCRIPTOR_MULTIPLE); 6892 vsi->num_q_vectors = pf->num_fcoe_msix; 6893 break; 6894 6895 #endif /* I40E_FCOE */ 6896 default: 6897 WARN_ON(1); 6898 return -ENODATA; 6899 } 6900 6901 return 0; 6902 } 6903 6904 /** 6905 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi 6906 * @type: VSI pointer 6907 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 6908 * 6909 * On error: returns error code (negative) 6910 * On success: returns 0 6911 **/ 6912 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) 6913 { 6914 int size; 6915 int ret = 0; 6916 6917 /* allocate memory for both Tx and Rx ring pointers */ 6918 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; 6919 vsi->tx_rings = kzalloc(size, GFP_KERNEL); 6920 if (!vsi->tx_rings) 6921 return -ENOMEM; 6922 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; 6923 6924 if (alloc_qvectors) { 6925 /* allocate memory for q_vector pointers */ 6926 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; 6927 vsi->q_vectors = kzalloc(size, GFP_KERNEL); 6928 if (!vsi->q_vectors) { 6929 ret = -ENOMEM; 6930 goto err_vectors; 6931 } 6932 } 6933 return ret; 6934 6935 err_vectors: 6936 kfree(vsi->tx_rings); 6937 return ret; 6938 } 6939 6940 /** 6941 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 6942 * @pf: board private structure 6943 * @type: type of VSI 6944 * 6945 * On error: returns error code (negative) 6946 * On success: returns vsi index in PF (positive) 6947 **/ 6948 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 6949 { 6950 int ret = -ENODEV; 6951 struct i40e_vsi *vsi; 6952 int vsi_idx; 6953 int i; 6954 6955 /* Need to protect the allocation of the VSIs at the PF level */ 6956 mutex_lock(&pf->switch_mutex); 6957 6958 /* VSI list may be fragmented if VSI creation/destruction has 6959 * been happening. We can afford to do a quick scan to look 6960 * for any free VSIs in the list. 6961 * 6962 * find next empty vsi slot, looping back around if necessary 6963 */ 6964 i = pf->next_vsi; 6965 while (i < pf->num_alloc_vsi && pf->vsi[i]) 6966 i++; 6967 if (i >= pf->num_alloc_vsi) { 6968 i = 0; 6969 while (i < pf->next_vsi && pf->vsi[i]) 6970 i++; 6971 } 6972 6973 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { 6974 vsi_idx = i; /* Found one! */ 6975 } else { 6976 ret = -ENODEV; 6977 goto unlock_pf; /* out of VSI slots! */ 6978 } 6979 pf->next_vsi = ++i; 6980 6981 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 6982 if (!vsi) { 6983 ret = -ENOMEM; 6984 goto unlock_pf; 6985 } 6986 vsi->type = type; 6987 vsi->back = pf; 6988 set_bit(__I40E_DOWN, &vsi->state); 6989 vsi->flags = 0; 6990 vsi->idx = vsi_idx; 6991 vsi->rx_itr_setting = pf->rx_itr_default; 6992 vsi->tx_itr_setting = pf->tx_itr_default; 6993 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? 6994 pf->rss_table_size : 64; 6995 vsi->netdev_registered = false; 6996 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 6997 INIT_LIST_HEAD(&vsi->mac_filter_list); 6998 vsi->irqs_ready = false; 6999 7000 ret = i40e_set_num_rings_in_vsi(vsi); 7001 if (ret) 7002 goto err_rings; 7003 7004 ret = i40e_vsi_alloc_arrays(vsi, true); 7005 if (ret) 7006 goto err_rings; 7007 7008 /* Setup default MSIX irq handler for VSI */ 7009 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 7010 7011 pf->vsi[vsi_idx] = vsi; 7012 ret = vsi_idx; 7013 goto unlock_pf; 7014 7015 err_rings: 7016 pf->next_vsi = i - 1; 7017 kfree(vsi); 7018 unlock_pf: 7019 mutex_unlock(&pf->switch_mutex); 7020 return ret; 7021 } 7022 7023 /** 7024 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 7025 * @type: VSI pointer 7026 * @free_qvectors: a bool to specify if q_vectors need to be freed. 7027 * 7028 * On error: returns error code (negative) 7029 * On success: returns 0 7030 **/ 7031 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) 7032 { 7033 /* free the ring and vector containers */ 7034 if (free_qvectors) { 7035 kfree(vsi->q_vectors); 7036 vsi->q_vectors = NULL; 7037 } 7038 kfree(vsi->tx_rings); 7039 vsi->tx_rings = NULL; 7040 vsi->rx_rings = NULL; 7041 } 7042 7043 /** 7044 * i40e_vsi_clear - Deallocate the VSI provided 7045 * @vsi: the VSI being un-configured 7046 **/ 7047 static int i40e_vsi_clear(struct i40e_vsi *vsi) 7048 { 7049 struct i40e_pf *pf; 7050 7051 if (!vsi) 7052 return 0; 7053 7054 if (!vsi->back) 7055 goto free_vsi; 7056 pf = vsi->back; 7057 7058 mutex_lock(&pf->switch_mutex); 7059 if (!pf->vsi[vsi->idx]) { 7060 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", 7061 vsi->idx, vsi->idx, vsi, vsi->type); 7062 goto unlock_vsi; 7063 } 7064 7065 if (pf->vsi[vsi->idx] != vsi) { 7066 dev_err(&pf->pdev->dev, 7067 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", 7068 pf->vsi[vsi->idx]->idx, 7069 pf->vsi[vsi->idx], 7070 pf->vsi[vsi->idx]->type, 7071 vsi->idx, vsi, vsi->type); 7072 goto unlock_vsi; 7073 } 7074 7075 /* updates the PF for this cleared vsi */ 7076 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 7077 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 7078 7079 i40e_vsi_free_arrays(vsi, true); 7080 7081 pf->vsi[vsi->idx] = NULL; 7082 if (vsi->idx < pf->next_vsi) 7083 pf->next_vsi = vsi->idx; 7084 7085 unlock_vsi: 7086 mutex_unlock(&pf->switch_mutex); 7087 free_vsi: 7088 kfree(vsi); 7089 7090 return 0; 7091 } 7092 7093 /** 7094 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 7095 * @vsi: the VSI being cleaned 7096 **/ 7097 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) 7098 { 7099 int i; 7100 7101 if (vsi->tx_rings && vsi->tx_rings[0]) { 7102 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 7103 kfree_rcu(vsi->tx_rings[i], rcu); 7104 vsi->tx_rings[i] = NULL; 7105 vsi->rx_rings[i] = NULL; 7106 } 7107 } 7108 } 7109 7110 /** 7111 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 7112 * @vsi: the VSI being configured 7113 **/ 7114 static int i40e_alloc_rings(struct i40e_vsi *vsi) 7115 { 7116 struct i40e_ring *tx_ring, *rx_ring; 7117 struct i40e_pf *pf = vsi->back; 7118 int i; 7119 7120 /* Set basic values in the rings to be used later during open() */ 7121 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 7122 /* allocate space for both Tx and Rx in one shot */ 7123 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 7124 if (!tx_ring) 7125 goto err_out; 7126 7127 tx_ring->queue_index = i; 7128 tx_ring->reg_idx = vsi->base_queue + i; 7129 tx_ring->ring_active = false; 7130 tx_ring->vsi = vsi; 7131 tx_ring->netdev = vsi->netdev; 7132 tx_ring->dev = &pf->pdev->dev; 7133 tx_ring->count = vsi->num_desc; 7134 tx_ring->size = 0; 7135 tx_ring->dcb_tc = 0; 7136 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) 7137 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; 7138 if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) 7139 tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM; 7140 vsi->tx_rings[i] = tx_ring; 7141 7142 rx_ring = &tx_ring[1]; 7143 rx_ring->queue_index = i; 7144 rx_ring->reg_idx = vsi->base_queue + i; 7145 rx_ring->ring_active = false; 7146 rx_ring->vsi = vsi; 7147 rx_ring->netdev = vsi->netdev; 7148 rx_ring->dev = &pf->pdev->dev; 7149 rx_ring->count = vsi->num_desc; 7150 rx_ring->size = 0; 7151 rx_ring->dcb_tc = 0; 7152 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) 7153 set_ring_16byte_desc_enabled(rx_ring); 7154 else 7155 clear_ring_16byte_desc_enabled(rx_ring); 7156 vsi->rx_rings[i] = rx_ring; 7157 } 7158 7159 return 0; 7160 7161 err_out: 7162 i40e_vsi_clear_rings(vsi); 7163 return -ENOMEM; 7164 } 7165 7166 /** 7167 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 7168 * @pf: board private structure 7169 * @vectors: the number of MSI-X vectors to request 7170 * 7171 * Returns the number of vectors reserved, or error 7172 **/ 7173 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 7174 { 7175 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, 7176 I40E_MIN_MSIX, vectors); 7177 if (vectors < 0) { 7178 dev_info(&pf->pdev->dev, 7179 "MSI-X vector reservation failed: %d\n", vectors); 7180 vectors = 0; 7181 } 7182 7183 return vectors; 7184 } 7185 7186 /** 7187 * i40e_init_msix - Setup the MSIX capability 7188 * @pf: board private structure 7189 * 7190 * Work with the OS to set up the MSIX vectors needed. 7191 * 7192 * Returns the number of vectors reserved or negative on failure 7193 **/ 7194 static int i40e_init_msix(struct i40e_pf *pf) 7195 { 7196 struct i40e_hw *hw = &pf->hw; 7197 int vectors_left; 7198 int v_budget, i; 7199 int v_actual; 7200 7201 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 7202 return -ENODEV; 7203 7204 /* The number of vectors we'll request will be comprised of: 7205 * - Add 1 for "other" cause for Admin Queue events, etc. 7206 * - The number of LAN queue pairs 7207 * - Queues being used for RSS. 7208 * We don't need as many as max_rss_size vectors. 7209 * use rss_size instead in the calculation since that 7210 * is governed by number of cpus in the system. 7211 * - assumes symmetric Tx/Rx pairing 7212 * - The number of VMDq pairs 7213 #ifdef I40E_FCOE 7214 * - The number of FCOE qps. 7215 #endif 7216 * Once we count this up, try the request. 7217 * 7218 * If we can't get what we want, we'll simplify to nearly nothing 7219 * and try again. If that still fails, we punt. 7220 */ 7221 vectors_left = hw->func_caps.num_msix_vectors; 7222 v_budget = 0; 7223 7224 /* reserve one vector for miscellaneous handler */ 7225 if (vectors_left) { 7226 v_budget++; 7227 vectors_left--; 7228 } 7229 7230 /* reserve vectors for the main PF traffic queues */ 7231 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); 7232 vectors_left -= pf->num_lan_msix; 7233 v_budget += pf->num_lan_msix; 7234 7235 /* reserve one vector for sideband flow director */ 7236 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7237 if (vectors_left) { 7238 v_budget++; 7239 vectors_left--; 7240 } else { 7241 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7242 } 7243 } 7244 7245 #ifdef I40E_FCOE 7246 /* can we reserve enough for FCoE? */ 7247 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7248 if (!vectors_left) 7249 pf->num_fcoe_msix = 0; 7250 else if (vectors_left >= pf->num_fcoe_qps) 7251 pf->num_fcoe_msix = pf->num_fcoe_qps; 7252 else 7253 pf->num_fcoe_msix = 1; 7254 v_budget += pf->num_fcoe_msix; 7255 vectors_left -= pf->num_fcoe_msix; 7256 } 7257 7258 #endif 7259 /* any vectors left over go for VMDq support */ 7260 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { 7261 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; 7262 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); 7263 7264 /* if we're short on vectors for what's desired, we limit 7265 * the queues per vmdq. If this is still more than are 7266 * available, the user will need to change the number of 7267 * queues/vectors used by the PF later with the ethtool 7268 * channels command 7269 */ 7270 if (vmdq_vecs < vmdq_vecs_wanted) 7271 pf->num_vmdq_qps = 1; 7272 pf->num_vmdq_msix = pf->num_vmdq_qps; 7273 7274 v_budget += vmdq_vecs; 7275 vectors_left -= vmdq_vecs; 7276 } 7277 7278 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 7279 GFP_KERNEL); 7280 if (!pf->msix_entries) 7281 return -ENOMEM; 7282 7283 for (i = 0; i < v_budget; i++) 7284 pf->msix_entries[i].entry = i; 7285 v_actual = i40e_reserve_msix_vectors(pf, v_budget); 7286 7287 if (v_actual != v_budget) { 7288 /* If we have limited resources, we will start with no vectors 7289 * for the special features and then allocate vectors to some 7290 * of these features based on the policy and at the end disable 7291 * the features that did not get any vectors. 7292 */ 7293 #ifdef I40E_FCOE 7294 pf->num_fcoe_qps = 0; 7295 pf->num_fcoe_msix = 0; 7296 #endif 7297 pf->num_vmdq_msix = 0; 7298 } 7299 7300 if (v_actual < I40E_MIN_MSIX) { 7301 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 7302 kfree(pf->msix_entries); 7303 pf->msix_entries = NULL; 7304 return -ENODEV; 7305 7306 } else if (v_actual == I40E_MIN_MSIX) { 7307 /* Adjust for minimal MSIX use */ 7308 pf->num_vmdq_vsis = 0; 7309 pf->num_vmdq_qps = 0; 7310 pf->num_lan_qps = 1; 7311 pf->num_lan_msix = 1; 7312 7313 } else if (v_actual != v_budget) { 7314 int vec; 7315 7316 /* reserve the misc vector */ 7317 vec = v_actual - 1; 7318 7319 /* Scale vector usage down */ 7320 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 7321 pf->num_vmdq_vsis = 1; 7322 pf->num_vmdq_qps = 1; 7323 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7324 7325 /* partition out the remaining vectors */ 7326 switch (vec) { 7327 case 2: 7328 pf->num_lan_msix = 1; 7329 break; 7330 case 3: 7331 #ifdef I40E_FCOE 7332 /* give one vector to FCoE */ 7333 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7334 pf->num_lan_msix = 1; 7335 pf->num_fcoe_msix = 1; 7336 } 7337 #else 7338 pf->num_lan_msix = 2; 7339 #endif 7340 break; 7341 default: 7342 #ifdef I40E_FCOE 7343 /* give one vector to FCoE */ 7344 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 7345 pf->num_fcoe_msix = 1; 7346 vec--; 7347 } 7348 #endif 7349 /* give the rest to the PF */ 7350 pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps); 7351 break; 7352 } 7353 } 7354 7355 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 7356 (pf->num_vmdq_msix == 0)) { 7357 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); 7358 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 7359 } 7360 #ifdef I40E_FCOE 7361 7362 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { 7363 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); 7364 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 7365 } 7366 #endif 7367 return v_actual; 7368 } 7369 7370 /** 7371 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 7372 * @vsi: the VSI being configured 7373 * @v_idx: index of the vector in the vsi struct 7374 * 7375 * We allocate one q_vector. If allocation fails we return -ENOMEM. 7376 **/ 7377 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 7378 { 7379 struct i40e_q_vector *q_vector; 7380 7381 /* allocate q_vector */ 7382 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 7383 if (!q_vector) 7384 return -ENOMEM; 7385 7386 q_vector->vsi = vsi; 7387 q_vector->v_idx = v_idx; 7388 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 7389 if (vsi->netdev) 7390 netif_napi_add(vsi->netdev, &q_vector->napi, 7391 i40e_napi_poll, NAPI_POLL_WEIGHT); 7392 7393 q_vector->rx.latency_range = I40E_LOW_LATENCY; 7394 q_vector->tx.latency_range = I40E_LOW_LATENCY; 7395 7396 /* tie q_vector and vsi together */ 7397 vsi->q_vectors[v_idx] = q_vector; 7398 7399 return 0; 7400 } 7401 7402 /** 7403 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 7404 * @vsi: the VSI being configured 7405 * 7406 * We allocate one q_vector per queue interrupt. If allocation fails we 7407 * return -ENOMEM. 7408 **/ 7409 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 7410 { 7411 struct i40e_pf *pf = vsi->back; 7412 int v_idx, num_q_vectors; 7413 int err; 7414 7415 /* if not MSIX, give the one vector only to the LAN VSI */ 7416 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 7417 num_q_vectors = vsi->num_q_vectors; 7418 else if (vsi == pf->vsi[pf->lan_vsi]) 7419 num_q_vectors = 1; 7420 else 7421 return -EINVAL; 7422 7423 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 7424 err = i40e_vsi_alloc_q_vector(vsi, v_idx); 7425 if (err) 7426 goto err_out; 7427 } 7428 7429 return 0; 7430 7431 err_out: 7432 while (v_idx--) 7433 i40e_free_q_vector(vsi, v_idx); 7434 7435 return err; 7436 } 7437 7438 /** 7439 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 7440 * @pf: board private structure to initialize 7441 **/ 7442 static int i40e_init_interrupt_scheme(struct i40e_pf *pf) 7443 { 7444 int vectors = 0; 7445 ssize_t size; 7446 7447 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 7448 vectors = i40e_init_msix(pf); 7449 if (vectors < 0) { 7450 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 7451 #ifdef I40E_FCOE 7452 I40E_FLAG_FCOE_ENABLED | 7453 #endif 7454 I40E_FLAG_RSS_ENABLED | 7455 I40E_FLAG_DCB_CAPABLE | 7456 I40E_FLAG_SRIOV_ENABLED | 7457 I40E_FLAG_FD_SB_ENABLED | 7458 I40E_FLAG_FD_ATR_ENABLED | 7459 I40E_FLAG_VMDQ_ENABLED); 7460 7461 /* rework the queue expectations without MSIX */ 7462 i40e_determine_queue_usage(pf); 7463 } 7464 } 7465 7466 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 7467 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 7468 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); 7469 vectors = pci_enable_msi(pf->pdev); 7470 if (vectors < 0) { 7471 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", 7472 vectors); 7473 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 7474 } 7475 vectors = 1; /* one MSI or Legacy vector */ 7476 } 7477 7478 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 7479 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); 7480 7481 /* set up vector assignment tracking */ 7482 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); 7483 pf->irq_pile = kzalloc(size, GFP_KERNEL); 7484 if (!pf->irq_pile) { 7485 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); 7486 return -ENOMEM; 7487 } 7488 pf->irq_pile->num_entries = vectors; 7489 pf->irq_pile->search_hint = 0; 7490 7491 /* track first vector for misc interrupts, ignore return */ 7492 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); 7493 7494 return 0; 7495 } 7496 7497 /** 7498 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 7499 * @pf: board private structure 7500 * 7501 * This sets up the handler for MSIX 0, which is used to manage the 7502 * non-queue interrupts, e.g. AdminQ and errors. This is not used 7503 * when in MSI or Legacy interrupt mode. 7504 **/ 7505 static int i40e_setup_misc_vector(struct i40e_pf *pf) 7506 { 7507 struct i40e_hw *hw = &pf->hw; 7508 int err = 0; 7509 7510 /* Only request the irq if this is the first time through, and 7511 * not when we're rebuilding after a Reset 7512 */ 7513 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7514 err = request_irq(pf->msix_entries[0].vector, 7515 i40e_intr, 0, pf->int_name, pf); 7516 if (err) { 7517 dev_info(&pf->pdev->dev, 7518 "request_irq for %s failed: %d\n", 7519 pf->int_name, err); 7520 return -EFAULT; 7521 } 7522 } 7523 7524 i40e_enable_misc_int_causes(pf); 7525 7526 /* associate no queues to the misc vector */ 7527 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 7528 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 7529 7530 i40e_flush(hw); 7531 7532 i40e_irq_dynamic_enable_icr0(pf); 7533 7534 return err; 7535 } 7536 7537 /** 7538 * i40e_config_rss_aq - Prepare for RSS using AQ commands 7539 * @vsi: vsi structure 7540 * @seed: RSS hash seed 7541 **/ 7542 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed) 7543 { 7544 struct i40e_aqc_get_set_rss_key_data rss_key; 7545 struct i40e_pf *pf = vsi->back; 7546 struct i40e_hw *hw = &pf->hw; 7547 bool pf_lut = false; 7548 u8 *rss_lut; 7549 int ret, i; 7550 7551 memset(&rss_key, 0, sizeof(rss_key)); 7552 memcpy(&rss_key, seed, sizeof(rss_key)); 7553 7554 rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); 7555 if (!rss_lut) 7556 return -ENOMEM; 7557 7558 /* Populate the LUT with max no. of queues in round robin fashion */ 7559 for (i = 0; i < vsi->rss_table_size; i++) 7560 rss_lut[i] = i % vsi->rss_size; 7561 7562 ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key); 7563 if (ret) { 7564 dev_info(&pf->pdev->dev, 7565 "Cannot set RSS key, err %s aq_err %s\n", 7566 i40e_stat_str(&pf->hw, ret), 7567 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7568 return ret; 7569 } 7570 7571 if (vsi->type == I40E_VSI_MAIN) 7572 pf_lut = true; 7573 7574 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut, 7575 vsi->rss_table_size); 7576 if (ret) 7577 dev_info(&pf->pdev->dev, 7578 "Cannot set RSS lut, err %s aq_err %s\n", 7579 i40e_stat_str(&pf->hw, ret), 7580 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 7581 7582 return ret; 7583 } 7584 7585 /** 7586 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used 7587 * @vsi: VSI structure 7588 **/ 7589 static int i40e_vsi_config_rss(struct i40e_vsi *vsi) 7590 { 7591 u8 seed[I40E_HKEY_ARRAY_SIZE]; 7592 struct i40e_pf *pf = vsi->back; 7593 7594 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 7595 vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); 7596 7597 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) 7598 return i40e_config_rss_aq(vsi, seed); 7599 7600 return 0; 7601 } 7602 7603 /** 7604 * i40e_config_rss_reg - Prepare for RSS if used 7605 * @pf: board private structure 7606 * @seed: RSS hash seed 7607 **/ 7608 static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed) 7609 { 7610 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 7611 struct i40e_hw *hw = &pf->hw; 7612 u32 *seed_dw = (u32 *)seed; 7613 u32 current_queue = 0; 7614 u32 lut = 0; 7615 int i, j; 7616 7617 /* Fill out hash function seed */ 7618 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 7619 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); 7620 7621 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { 7622 lut = 0; 7623 for (j = 0; j < 4; j++) { 7624 if (current_queue == vsi->rss_size) 7625 current_queue = 0; 7626 lut |= ((current_queue) << (8 * j)); 7627 current_queue++; 7628 } 7629 wr32(&pf->hw, I40E_PFQF_HLUT(i), lut); 7630 } 7631 i40e_flush(hw); 7632 7633 return 0; 7634 } 7635 7636 /** 7637 * i40e_config_rss - Prepare for RSS if used 7638 * @pf: board private structure 7639 **/ 7640 static int i40e_config_rss(struct i40e_pf *pf) 7641 { 7642 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 7643 u8 seed[I40E_HKEY_ARRAY_SIZE]; 7644 struct i40e_hw *hw = &pf->hw; 7645 u32 reg_val; 7646 u64 hena; 7647 7648 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); 7649 7650 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 7651 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | 7652 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); 7653 hena |= i40e_pf_get_default_rss_hena(pf); 7654 7655 wr32(hw, I40E_PFQF_HENA(0), (u32)hena); 7656 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 7657 7658 vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); 7659 7660 /* Determine the RSS table size based on the hardware capabilities */ 7661 reg_val = rd32(hw, I40E_PFQF_CTL_0); 7662 reg_val = (pf->rss_table_size == 512) ? 7663 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : 7664 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); 7665 wr32(hw, I40E_PFQF_CTL_0, reg_val); 7666 7667 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) 7668 return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed); 7669 else 7670 return i40e_config_rss_reg(pf, seed); 7671 } 7672 7673 /** 7674 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild 7675 * @pf: board private structure 7676 * @queue_count: the requested queue count for rss. 7677 * 7678 * returns 0 if rss is not enabled, if enabled returns the final rss queue 7679 * count which may be different from the requested queue count. 7680 **/ 7681 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) 7682 { 7683 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 7684 int new_rss_size; 7685 7686 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) 7687 return 0; 7688 7689 new_rss_size = min_t(int, queue_count, pf->rss_size_max); 7690 7691 if (queue_count != vsi->num_queue_pairs) { 7692 vsi->req_queue_pairs = queue_count; 7693 i40e_prep_for_reset(pf); 7694 7695 pf->rss_size = new_rss_size; 7696 7697 i40e_reset_and_rebuild(pf, true); 7698 i40e_config_rss(pf); 7699 } 7700 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); 7701 return pf->rss_size; 7702 } 7703 7704 /** 7705 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition 7706 * @pf: board private structure 7707 **/ 7708 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) 7709 { 7710 i40e_status status; 7711 bool min_valid, max_valid; 7712 u32 max_bw, min_bw; 7713 7714 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, 7715 &min_valid, &max_valid); 7716 7717 if (!status) { 7718 if (min_valid) 7719 pf->npar_min_bw = min_bw; 7720 if (max_valid) 7721 pf->npar_max_bw = max_bw; 7722 } 7723 7724 return status; 7725 } 7726 7727 /** 7728 * i40e_set_npar_bw_setting - Set BW settings for this PF partition 7729 * @pf: board private structure 7730 **/ 7731 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) 7732 { 7733 struct i40e_aqc_configure_partition_bw_data bw_data; 7734 i40e_status status; 7735 7736 /* Set the valid bit for this PF */ 7737 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); 7738 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; 7739 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; 7740 7741 /* Set the new bandwidths */ 7742 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); 7743 7744 return status; 7745 } 7746 7747 /** 7748 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition 7749 * @pf: board private structure 7750 **/ 7751 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) 7752 { 7753 /* Commit temporary BW setting to permanent NVM image */ 7754 enum i40e_admin_queue_err last_aq_status; 7755 i40e_status ret; 7756 u16 nvm_word; 7757 7758 if (pf->hw.partition_id != 1) { 7759 dev_info(&pf->pdev->dev, 7760 "Commit BW only works on partition 1! This is partition %d", 7761 pf->hw.partition_id); 7762 ret = I40E_NOT_SUPPORTED; 7763 goto bw_commit_out; 7764 } 7765 7766 /* Acquire NVM for read access */ 7767 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); 7768 last_aq_status = pf->hw.aq.asq_last_status; 7769 if (ret) { 7770 dev_info(&pf->pdev->dev, 7771 "Cannot acquire NVM for read access, err %s aq_err %s\n", 7772 i40e_stat_str(&pf->hw, ret), 7773 i40e_aq_str(&pf->hw, last_aq_status)); 7774 goto bw_commit_out; 7775 } 7776 7777 /* Read word 0x10 of NVM - SW compatibility word 1 */ 7778 ret = i40e_aq_read_nvm(&pf->hw, 7779 I40E_SR_NVM_CONTROL_WORD, 7780 0x10, sizeof(nvm_word), &nvm_word, 7781 false, NULL); 7782 /* Save off last admin queue command status before releasing 7783 * the NVM 7784 */ 7785 last_aq_status = pf->hw.aq.asq_last_status; 7786 i40e_release_nvm(&pf->hw); 7787 if (ret) { 7788 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", 7789 i40e_stat_str(&pf->hw, ret), 7790 i40e_aq_str(&pf->hw, last_aq_status)); 7791 goto bw_commit_out; 7792 } 7793 7794 /* Wait a bit for NVM release to complete */ 7795 msleep(50); 7796 7797 /* Acquire NVM for write access */ 7798 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); 7799 last_aq_status = pf->hw.aq.asq_last_status; 7800 if (ret) { 7801 dev_info(&pf->pdev->dev, 7802 "Cannot acquire NVM for write access, err %s aq_err %s\n", 7803 i40e_stat_str(&pf->hw, ret), 7804 i40e_aq_str(&pf->hw, last_aq_status)); 7805 goto bw_commit_out; 7806 } 7807 /* Write it back out unchanged to initiate update NVM, 7808 * which will force a write of the shadow (alt) RAM to 7809 * the NVM - thus storing the bandwidth values permanently. 7810 */ 7811 ret = i40e_aq_update_nvm(&pf->hw, 7812 I40E_SR_NVM_CONTROL_WORD, 7813 0x10, sizeof(nvm_word), 7814 &nvm_word, true, NULL); 7815 /* Save off last admin queue command status before releasing 7816 * the NVM 7817 */ 7818 last_aq_status = pf->hw.aq.asq_last_status; 7819 i40e_release_nvm(&pf->hw); 7820 if (ret) 7821 dev_info(&pf->pdev->dev, 7822 "BW settings NOT SAVED, err %s aq_err %s\n", 7823 i40e_stat_str(&pf->hw, ret), 7824 i40e_aq_str(&pf->hw, last_aq_status)); 7825 bw_commit_out: 7826 7827 return ret; 7828 } 7829 7830 /** 7831 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 7832 * @pf: board private structure to initialize 7833 * 7834 * i40e_sw_init initializes the Adapter private data structure. 7835 * Fields are initialized based on PCI device information and 7836 * OS network device settings (MTU size). 7837 **/ 7838 static int i40e_sw_init(struct i40e_pf *pf) 7839 { 7840 int err = 0; 7841 int size; 7842 7843 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, 7844 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); 7845 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; 7846 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { 7847 if (I40E_DEBUG_USER & debug) 7848 pf->hw.debug_mask = debug; 7849 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), 7850 I40E_DEFAULT_MSG_ENABLE); 7851 } 7852 7853 /* Set default capability flags */ 7854 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 7855 I40E_FLAG_MSI_ENABLED | 7856 I40E_FLAG_MSIX_ENABLED; 7857 7858 if (iommu_present(&pci_bus_type)) 7859 pf->flags |= I40E_FLAG_RX_PS_ENABLED; 7860 else 7861 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; 7862 7863 /* Set default ITR */ 7864 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; 7865 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; 7866 7867 /* Depending on PF configurations, it is possible that the RSS 7868 * maximum might end up larger than the available queues 7869 */ 7870 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); 7871 pf->rss_size = 1; 7872 pf->rss_table_size = pf->hw.func_caps.rss_table_size; 7873 pf->rss_size_max = min_t(int, pf->rss_size_max, 7874 pf->hw.func_caps.num_tx_qp); 7875 if (pf->hw.func_caps.rss) { 7876 pf->flags |= I40E_FLAG_RSS_ENABLED; 7877 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); 7878 } 7879 7880 /* MFP mode enabled */ 7881 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { 7882 pf->flags |= I40E_FLAG_MFP_ENABLED; 7883 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 7884 if (i40e_get_npar_bw_setting(pf)) 7885 dev_warn(&pf->pdev->dev, 7886 "Could not get NPAR bw settings\n"); 7887 else 7888 dev_info(&pf->pdev->dev, 7889 "Min BW = %8.8x, Max BW = %8.8x\n", 7890 pf->npar_min_bw, pf->npar_max_bw); 7891 } 7892 7893 /* FW/NVM is not yet fixed in this regard */ 7894 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 7895 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 7896 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 7897 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 7898 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 7899 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 7900 } else { 7901 dev_info(&pf->pdev->dev, 7902 "Flow Director Sideband mode Disabled in MFP mode\n"); 7903 } 7904 pf->fdir_pf_filter_count = 7905 pf->hw.func_caps.fd_filters_guaranteed; 7906 pf->hw.fdir_shared_filter_count = 7907 pf->hw.func_caps.fd_filters_best_effort; 7908 } 7909 7910 if (pf->hw.func_caps.vmdq) { 7911 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 7912 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 7913 } 7914 7915 #ifdef I40E_FCOE 7916 err = i40e_init_pf_fcoe(pf); 7917 if (err) 7918 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err); 7919 7920 #endif /* I40E_FCOE */ 7921 #ifdef CONFIG_PCI_IOV 7922 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { 7923 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 7924 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 7925 pf->num_req_vfs = min_t(int, 7926 pf->hw.func_caps.num_vfs, 7927 I40E_MAX_VF_COUNT); 7928 } 7929 #endif /* CONFIG_PCI_IOV */ 7930 if (pf->hw.mac.type == I40E_MAC_X722) { 7931 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | 7932 I40E_FLAG_128_QP_RSS_CAPABLE | 7933 I40E_FLAG_HW_ATR_EVICT_CAPABLE | 7934 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | 7935 I40E_FLAG_WB_ON_ITR_CAPABLE | 7936 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE; 7937 } 7938 pf->eeprom_version = 0xDEAD; 7939 pf->lan_veb = I40E_NO_VEB; 7940 pf->lan_vsi = I40E_NO_VSI; 7941 7942 /* set up queue assignment tracking */ 7943 size = sizeof(struct i40e_lump_tracking) 7944 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 7945 pf->qp_pile = kzalloc(size, GFP_KERNEL); 7946 if (!pf->qp_pile) { 7947 err = -ENOMEM; 7948 goto sw_init_done; 7949 } 7950 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 7951 pf->qp_pile->search_hint = 0; 7952 7953 pf->tx_timeout_recovery_level = 1; 7954 7955 mutex_init(&pf->switch_mutex); 7956 7957 /* If NPAR is enabled nudge the Tx scheduler */ 7958 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) 7959 i40e_set_npar_bw_setting(pf); 7960 7961 sw_init_done: 7962 return err; 7963 } 7964 7965 /** 7966 * i40e_set_ntuple - set the ntuple feature flag and take action 7967 * @pf: board private structure to initialize 7968 * @features: the feature set that the stack is suggesting 7969 * 7970 * returns a bool to indicate if reset needs to happen 7971 **/ 7972 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) 7973 { 7974 bool need_reset = false; 7975 7976 /* Check if Flow Director n-tuple support was enabled or disabled. If 7977 * the state changed, we need to reset. 7978 */ 7979 if (features & NETIF_F_NTUPLE) { 7980 /* Enable filters and mark for reset */ 7981 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 7982 need_reset = true; 7983 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 7984 } else { 7985 /* turn off filters, mark for reset and clear SW filter list */ 7986 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7987 need_reset = true; 7988 i40e_fdir_filter_exit(pf); 7989 } 7990 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7991 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 7992 /* reset fd counters */ 7993 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; 7994 pf->fdir_pf_active_filters = 0; 7995 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 7996 if (I40E_DEBUG_FD & pf->hw.debug_mask) 7997 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); 7998 /* if ATR was auto disabled it can be re-enabled. */ 7999 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 8000 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 8001 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 8002 } 8003 return need_reset; 8004 } 8005 8006 /** 8007 * i40e_set_features - set the netdev feature flags 8008 * @netdev: ptr to the netdev being adjusted 8009 * @features: the feature set that the stack is suggesting 8010 **/ 8011 static int i40e_set_features(struct net_device *netdev, 8012 netdev_features_t features) 8013 { 8014 struct i40e_netdev_priv *np = netdev_priv(netdev); 8015 struct i40e_vsi *vsi = np->vsi; 8016 struct i40e_pf *pf = vsi->back; 8017 bool need_reset; 8018 8019 if (features & NETIF_F_HW_VLAN_CTAG_RX) 8020 i40e_vlan_stripping_enable(vsi); 8021 else 8022 i40e_vlan_stripping_disable(vsi); 8023 8024 need_reset = i40e_set_ntuple(pf, features); 8025 8026 if (need_reset) 8027 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 8028 8029 return 0; 8030 } 8031 8032 #ifdef CONFIG_I40E_VXLAN 8033 /** 8034 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port 8035 * @pf: board private structure 8036 * @port: The UDP port to look up 8037 * 8038 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 8039 **/ 8040 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) 8041 { 8042 u8 i; 8043 8044 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 8045 if (pf->vxlan_ports[i] == port) 8046 return i; 8047 } 8048 8049 return i; 8050 } 8051 8052 /** 8053 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up 8054 * @netdev: This physical port's netdev 8055 * @sa_family: Socket Family that VXLAN is notifying us about 8056 * @port: New UDP port number that VXLAN started listening to 8057 **/ 8058 static void i40e_add_vxlan_port(struct net_device *netdev, 8059 sa_family_t sa_family, __be16 port) 8060 { 8061 struct i40e_netdev_priv *np = netdev_priv(netdev); 8062 struct i40e_vsi *vsi = np->vsi; 8063 struct i40e_pf *pf = vsi->back; 8064 u8 next_idx; 8065 u8 idx; 8066 8067 if (sa_family == AF_INET6) 8068 return; 8069 8070 idx = i40e_get_vxlan_port_idx(pf, port); 8071 8072 /* Check if port already exists */ 8073 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8074 netdev_info(netdev, "vxlan port %d already offloaded\n", 8075 ntohs(port)); 8076 return; 8077 } 8078 8079 /* Now check if there is space to add the new port */ 8080 next_idx = i40e_get_vxlan_port_idx(pf, 0); 8081 8082 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8083 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", 8084 ntohs(port)); 8085 return; 8086 } 8087 8088 /* New port: add it and mark its index in the bitmap */ 8089 pf->vxlan_ports[next_idx] = port; 8090 pf->pending_vxlan_bitmap |= BIT_ULL(next_idx); 8091 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 8092 } 8093 8094 /** 8095 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away 8096 * @netdev: This physical port's netdev 8097 * @sa_family: Socket Family that VXLAN is notifying us about 8098 * @port: UDP port number that VXLAN stopped listening to 8099 **/ 8100 static void i40e_del_vxlan_port(struct net_device *netdev, 8101 sa_family_t sa_family, __be16 port) 8102 { 8103 struct i40e_netdev_priv *np = netdev_priv(netdev); 8104 struct i40e_vsi *vsi = np->vsi; 8105 struct i40e_pf *pf = vsi->back; 8106 u8 idx; 8107 8108 if (sa_family == AF_INET6) 8109 return; 8110 8111 idx = i40e_get_vxlan_port_idx(pf, port); 8112 8113 /* Check if port already exists */ 8114 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 8115 /* if port exists, set it to 0 (mark for deletion) 8116 * and make it pending 8117 */ 8118 pf->vxlan_ports[idx] = 0; 8119 pf->pending_vxlan_bitmap |= BIT_ULL(idx); 8120 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 8121 8122 dev_info(&pf->pdev->dev, "deleting vxlan port %d\n", 8123 ntohs(port)); 8124 } else { 8125 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", 8126 ntohs(port)); 8127 } 8128 } 8129 8130 #endif 8131 static int i40e_get_phys_port_id(struct net_device *netdev, 8132 struct netdev_phys_item_id *ppid) 8133 { 8134 struct i40e_netdev_priv *np = netdev_priv(netdev); 8135 struct i40e_pf *pf = np->vsi->back; 8136 struct i40e_hw *hw = &pf->hw; 8137 8138 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) 8139 return -EOPNOTSUPP; 8140 8141 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); 8142 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); 8143 8144 return 0; 8145 } 8146 8147 /** 8148 * i40e_ndo_fdb_add - add an entry to the hardware database 8149 * @ndm: the input from the stack 8150 * @tb: pointer to array of nladdr (unused) 8151 * @dev: the net device pointer 8152 * @addr: the MAC address entry being added 8153 * @flags: instructions from stack about fdb operation 8154 */ 8155 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 8156 struct net_device *dev, 8157 const unsigned char *addr, u16 vid, 8158 u16 flags) 8159 { 8160 struct i40e_netdev_priv *np = netdev_priv(dev); 8161 struct i40e_pf *pf = np->vsi->back; 8162 int err = 0; 8163 8164 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) 8165 return -EOPNOTSUPP; 8166 8167 if (vid) { 8168 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); 8169 return -EINVAL; 8170 } 8171 8172 /* Hardware does not support aging addresses so if a 8173 * ndm_state is given only allow permanent addresses 8174 */ 8175 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 8176 netdev_info(dev, "FDB only supports static addresses\n"); 8177 return -EINVAL; 8178 } 8179 8180 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 8181 err = dev_uc_add_excl(dev, addr); 8182 else if (is_multicast_ether_addr(addr)) 8183 err = dev_mc_add_excl(dev, addr); 8184 else 8185 err = -EINVAL; 8186 8187 /* Only return duplicate errors if NLM_F_EXCL is set */ 8188 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 8189 err = 0; 8190 8191 return err; 8192 } 8193 8194 /** 8195 * i40e_ndo_bridge_setlink - Set the hardware bridge mode 8196 * @dev: the netdev being configured 8197 * @nlh: RTNL message 8198 * 8199 * Inserts a new hardware bridge if not already created and 8200 * enables the bridging mode requested (VEB or VEPA). If the 8201 * hardware bridge has already been inserted and the request 8202 * is to change the mode then that requires a PF reset to 8203 * allow rebuild of the components with required hardware 8204 * bridge mode enabled. 8205 **/ 8206 static int i40e_ndo_bridge_setlink(struct net_device *dev, 8207 struct nlmsghdr *nlh, 8208 u16 flags) 8209 { 8210 struct i40e_netdev_priv *np = netdev_priv(dev); 8211 struct i40e_vsi *vsi = np->vsi; 8212 struct i40e_pf *pf = vsi->back; 8213 struct i40e_veb *veb = NULL; 8214 struct nlattr *attr, *br_spec; 8215 int i, rem; 8216 8217 /* Only for PF VSI for now */ 8218 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 8219 return -EOPNOTSUPP; 8220 8221 /* Find the HW bridge for PF VSI */ 8222 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8223 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 8224 veb = pf->veb[i]; 8225 } 8226 8227 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 8228 8229 nla_for_each_nested(attr, br_spec, rem) { 8230 __u16 mode; 8231 8232 if (nla_type(attr) != IFLA_BRIDGE_MODE) 8233 continue; 8234 8235 mode = nla_get_u16(attr); 8236 if ((mode != BRIDGE_MODE_VEPA) && 8237 (mode != BRIDGE_MODE_VEB)) 8238 return -EINVAL; 8239 8240 /* Insert a new HW bridge */ 8241 if (!veb) { 8242 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 8243 vsi->tc_config.enabled_tc); 8244 if (veb) { 8245 veb->bridge_mode = mode; 8246 i40e_config_bridge_mode(veb); 8247 } else { 8248 /* No Bridge HW offload available */ 8249 return -ENOENT; 8250 } 8251 break; 8252 } else if (mode != veb->bridge_mode) { 8253 /* Existing HW bridge but different mode needs reset */ 8254 veb->bridge_mode = mode; 8255 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ 8256 if (mode == BRIDGE_MODE_VEB) 8257 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 8258 else 8259 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 8260 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); 8261 break; 8262 } 8263 } 8264 8265 return 0; 8266 } 8267 8268 /** 8269 * i40e_ndo_bridge_getlink - Get the hardware bridge mode 8270 * @skb: skb buff 8271 * @pid: process id 8272 * @seq: RTNL message seq # 8273 * @dev: the netdev being configured 8274 * @filter_mask: unused 8275 * 8276 * Return the mode in which the hardware bridge is operating in 8277 * i.e VEB or VEPA. 8278 **/ 8279 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 8280 struct net_device *dev, 8281 u32 filter_mask, int nlflags) 8282 { 8283 struct i40e_netdev_priv *np = netdev_priv(dev); 8284 struct i40e_vsi *vsi = np->vsi; 8285 struct i40e_pf *pf = vsi->back; 8286 struct i40e_veb *veb = NULL; 8287 int i; 8288 8289 /* Only for PF VSI for now */ 8290 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) 8291 return -EOPNOTSUPP; 8292 8293 /* Find the HW bridge for the PF VSI */ 8294 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8295 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 8296 veb = pf->veb[i]; 8297 } 8298 8299 if (!veb) 8300 return 0; 8301 8302 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, 8303 nlflags, 0, 0, filter_mask, NULL); 8304 } 8305 8306 #define I40E_MAX_TUNNEL_HDR_LEN 80 8307 /** 8308 * i40e_features_check - Validate encapsulated packet conforms to limits 8309 * @skb: skb buff 8310 * @netdev: This physical port's netdev 8311 * @features: Offload features that the stack believes apply 8312 **/ 8313 static netdev_features_t i40e_features_check(struct sk_buff *skb, 8314 struct net_device *dev, 8315 netdev_features_t features) 8316 { 8317 if (skb->encapsulation && 8318 (skb_inner_mac_header(skb) - skb_transport_header(skb) > 8319 I40E_MAX_TUNNEL_HDR_LEN)) 8320 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); 8321 8322 return features; 8323 } 8324 8325 static const struct net_device_ops i40e_netdev_ops = { 8326 .ndo_open = i40e_open, 8327 .ndo_stop = i40e_close, 8328 .ndo_start_xmit = i40e_lan_xmit_frame, 8329 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 8330 .ndo_set_rx_mode = i40e_set_rx_mode, 8331 .ndo_validate_addr = eth_validate_addr, 8332 .ndo_set_mac_address = i40e_set_mac, 8333 .ndo_change_mtu = i40e_change_mtu, 8334 .ndo_do_ioctl = i40e_ioctl, 8335 .ndo_tx_timeout = i40e_tx_timeout, 8336 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 8337 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 8338 #ifdef CONFIG_NET_POLL_CONTROLLER 8339 .ndo_poll_controller = i40e_netpoll, 8340 #endif 8341 .ndo_setup_tc = i40e_setup_tc, 8342 #ifdef I40E_FCOE 8343 .ndo_fcoe_enable = i40e_fcoe_enable, 8344 .ndo_fcoe_disable = i40e_fcoe_disable, 8345 #endif 8346 .ndo_set_features = i40e_set_features, 8347 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 8348 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 8349 .ndo_set_vf_rate = i40e_ndo_set_vf_bw, 8350 .ndo_get_vf_config = i40e_ndo_get_vf_config, 8351 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 8352 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, 8353 #ifdef CONFIG_I40E_VXLAN 8354 .ndo_add_vxlan_port = i40e_add_vxlan_port, 8355 .ndo_del_vxlan_port = i40e_del_vxlan_port, 8356 #endif 8357 .ndo_get_phys_port_id = i40e_get_phys_port_id, 8358 .ndo_fdb_add = i40e_ndo_fdb_add, 8359 .ndo_features_check = i40e_features_check, 8360 .ndo_bridge_getlink = i40e_ndo_bridge_getlink, 8361 .ndo_bridge_setlink = i40e_ndo_bridge_setlink, 8362 }; 8363 8364 /** 8365 * i40e_config_netdev - Setup the netdev flags 8366 * @vsi: the VSI being configured 8367 * 8368 * Returns 0 on success, negative value on failure 8369 **/ 8370 static int i40e_config_netdev(struct i40e_vsi *vsi) 8371 { 8372 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 8373 struct i40e_pf *pf = vsi->back; 8374 struct i40e_hw *hw = &pf->hw; 8375 struct i40e_netdev_priv *np; 8376 struct net_device *netdev; 8377 u8 mac_addr[ETH_ALEN]; 8378 int etherdev_size; 8379 8380 etherdev_size = sizeof(struct i40e_netdev_priv); 8381 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 8382 if (!netdev) 8383 return -ENOMEM; 8384 8385 vsi->netdev = netdev; 8386 np = netdev_priv(netdev); 8387 np->vsi = vsi; 8388 8389 netdev->hw_enc_features |= NETIF_F_IP_CSUM | 8390 NETIF_F_GSO_UDP_TUNNEL | 8391 NETIF_F_TSO; 8392 8393 netdev->features = NETIF_F_SG | 8394 NETIF_F_IP_CSUM | 8395 NETIF_F_SCTP_CSUM | 8396 NETIF_F_HIGHDMA | 8397 NETIF_F_GSO_UDP_TUNNEL | 8398 NETIF_F_HW_VLAN_CTAG_TX | 8399 NETIF_F_HW_VLAN_CTAG_RX | 8400 NETIF_F_HW_VLAN_CTAG_FILTER | 8401 NETIF_F_IPV6_CSUM | 8402 NETIF_F_TSO | 8403 NETIF_F_TSO_ECN | 8404 NETIF_F_TSO6 | 8405 NETIF_F_RXCSUM | 8406 NETIF_F_RXHASH | 8407 0; 8408 8409 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) 8410 netdev->features |= NETIF_F_NTUPLE; 8411 8412 /* copy netdev features into list of user selectable features */ 8413 netdev->hw_features |= netdev->features; 8414 8415 if (vsi->type == I40E_VSI_MAIN) { 8416 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 8417 ether_addr_copy(mac_addr, hw->mac.perm_addr); 8418 /* The following steps are necessary to prevent reception 8419 * of tagged packets - some older NVM configurations load a 8420 * default a MAC-VLAN filter that accepts any tagged packet 8421 * which must be replaced by a normal filter. 8422 */ 8423 if (!i40e_rm_default_mac_filter(vsi, mac_addr)) 8424 i40e_add_filter(vsi, mac_addr, 8425 I40E_VLAN_ANY, false, true); 8426 } else { 8427 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 8428 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 8429 pf->vsi[pf->lan_vsi]->netdev->name); 8430 random_ether_addr(mac_addr); 8431 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); 8432 } 8433 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 8434 8435 ether_addr_copy(netdev->dev_addr, mac_addr); 8436 ether_addr_copy(netdev->perm_addr, mac_addr); 8437 /* vlan gets same features (except vlan offload) 8438 * after any tweaks for specific VSI types 8439 */ 8440 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | 8441 NETIF_F_HW_VLAN_CTAG_RX | 8442 NETIF_F_HW_VLAN_CTAG_FILTER); 8443 netdev->priv_flags |= IFF_UNICAST_FLT; 8444 netdev->priv_flags |= IFF_SUPP_NOFCS; 8445 /* Setup netdev TC information */ 8446 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 8447 8448 netdev->netdev_ops = &i40e_netdev_ops; 8449 netdev->watchdog_timeo = 5 * HZ; 8450 i40e_set_ethtool_ops(netdev); 8451 #ifdef I40E_FCOE 8452 i40e_fcoe_config_netdev(netdev, vsi); 8453 #endif 8454 8455 return 0; 8456 } 8457 8458 /** 8459 * i40e_vsi_delete - Delete a VSI from the switch 8460 * @vsi: the VSI being removed 8461 * 8462 * Returns 0 on success, negative value on failure 8463 **/ 8464 static void i40e_vsi_delete(struct i40e_vsi *vsi) 8465 { 8466 /* remove default VSI is not allowed */ 8467 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 8468 return; 8469 8470 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 8471 } 8472 8473 /** 8474 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB 8475 * @vsi: the VSI being queried 8476 * 8477 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode 8478 **/ 8479 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) 8480 { 8481 struct i40e_veb *veb; 8482 struct i40e_pf *pf = vsi->back; 8483 8484 /* Uplink is not a bridge so default to VEB */ 8485 if (vsi->veb_idx == I40E_NO_VEB) 8486 return 1; 8487 8488 veb = pf->veb[vsi->veb_idx]; 8489 /* Uplink is a bridge in VEPA mode */ 8490 if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) 8491 return 0; 8492 8493 /* Uplink is a bridge in VEB mode */ 8494 return 1; 8495 } 8496 8497 /** 8498 * i40e_add_vsi - Add a VSI to the switch 8499 * @vsi: the VSI being configured 8500 * 8501 * This initializes a VSI context depending on the VSI type to be added and 8502 * passes it down to the add_vsi aq command. 8503 **/ 8504 static int i40e_add_vsi(struct i40e_vsi *vsi) 8505 { 8506 int ret = -ENODEV; 8507 struct i40e_mac_filter *f, *ftmp; 8508 struct i40e_pf *pf = vsi->back; 8509 struct i40e_hw *hw = &pf->hw; 8510 struct i40e_vsi_context ctxt; 8511 u8 enabled_tc = 0x1; /* TC0 enabled */ 8512 int f_count = 0; 8513 8514 memset(&ctxt, 0, sizeof(ctxt)); 8515 switch (vsi->type) { 8516 case I40E_VSI_MAIN: 8517 /* The PF's main VSI is already setup as part of the 8518 * device initialization, so we'll not bother with 8519 * the add_vsi call, but we will retrieve the current 8520 * VSI context. 8521 */ 8522 ctxt.seid = pf->main_vsi_seid; 8523 ctxt.pf_num = pf->hw.pf_id; 8524 ctxt.vf_num = 0; 8525 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 8526 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 8527 if (ret) { 8528 dev_info(&pf->pdev->dev, 8529 "couldn't get PF vsi config, err %s aq_err %s\n", 8530 i40e_stat_str(&pf->hw, ret), 8531 i40e_aq_str(&pf->hw, 8532 pf->hw.aq.asq_last_status)); 8533 return -ENOENT; 8534 } 8535 vsi->info = ctxt.info; 8536 vsi->info.valid_sections = 0; 8537 8538 vsi->seid = ctxt.seid; 8539 vsi->id = ctxt.vsi_number; 8540 8541 enabled_tc = i40e_pf_get_tc_map(pf); 8542 8543 /* MFP mode setup queue map and update VSI */ 8544 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && 8545 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ 8546 memset(&ctxt, 0, sizeof(ctxt)); 8547 ctxt.seid = pf->main_vsi_seid; 8548 ctxt.pf_num = pf->hw.pf_id; 8549 ctxt.vf_num = 0; 8550 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 8551 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 8552 if (ret) { 8553 dev_info(&pf->pdev->dev, 8554 "update vsi failed, err %s aq_err %s\n", 8555 i40e_stat_str(&pf->hw, ret), 8556 i40e_aq_str(&pf->hw, 8557 pf->hw.aq.asq_last_status)); 8558 ret = -ENOENT; 8559 goto err; 8560 } 8561 /* update the local VSI info queue map */ 8562 i40e_vsi_update_queue_map(vsi, &ctxt); 8563 vsi->info.valid_sections = 0; 8564 } else { 8565 /* Default/Main VSI is only enabled for TC0 8566 * reconfigure it to enable all TCs that are 8567 * available on the port in SFP mode. 8568 * For MFP case the iSCSI PF would use this 8569 * flow to enable LAN+iSCSI TC. 8570 */ 8571 ret = i40e_vsi_config_tc(vsi, enabled_tc); 8572 if (ret) { 8573 dev_info(&pf->pdev->dev, 8574 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", 8575 enabled_tc, 8576 i40e_stat_str(&pf->hw, ret), 8577 i40e_aq_str(&pf->hw, 8578 pf->hw.aq.asq_last_status)); 8579 ret = -ENOENT; 8580 } 8581 } 8582 break; 8583 8584 case I40E_VSI_FDIR: 8585 ctxt.pf_num = hw->pf_id; 8586 ctxt.vf_num = 0; 8587 ctxt.uplink_seid = vsi->uplink_seid; 8588 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 8589 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 8590 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && 8591 (i40e_is_vsi_uplink_mode_veb(vsi))) { 8592 ctxt.info.valid_sections |= 8593 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8594 ctxt.info.switch_id = 8595 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8596 } 8597 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 8598 break; 8599 8600 case I40E_VSI_VMDQ2: 8601 ctxt.pf_num = hw->pf_id; 8602 ctxt.vf_num = 0; 8603 ctxt.uplink_seid = vsi->uplink_seid; 8604 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 8605 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 8606 8607 /* This VSI is connected to VEB so the switch_id 8608 * should be set to zero by default. 8609 */ 8610 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 8611 ctxt.info.valid_sections |= 8612 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8613 ctxt.info.switch_id = 8614 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8615 } 8616 8617 /* Setup the VSI tx/rx queue map for TC0 only for now */ 8618 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 8619 break; 8620 8621 case I40E_VSI_SRIOV: 8622 ctxt.pf_num = hw->pf_id; 8623 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 8624 ctxt.uplink_seid = vsi->uplink_seid; 8625 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 8626 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 8627 8628 /* This VSI is connected to VEB so the switch_id 8629 * should be set to zero by default. 8630 */ 8631 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 8632 ctxt.info.valid_sections |= 8633 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8634 ctxt.info.switch_id = 8635 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8636 } 8637 8638 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 8639 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 8640 if (pf->vf[vsi->vf_id].spoofchk) { 8641 ctxt.info.valid_sections |= 8642 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 8643 ctxt.info.sec_flags |= 8644 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | 8645 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); 8646 } 8647 /* Setup the VSI tx/rx queue map for TC0 only for now */ 8648 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 8649 break; 8650 8651 #ifdef I40E_FCOE 8652 case I40E_VSI_FCOE: 8653 ret = i40e_fcoe_vsi_init(vsi, &ctxt); 8654 if (ret) { 8655 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); 8656 return ret; 8657 } 8658 break; 8659 8660 #endif /* I40E_FCOE */ 8661 default: 8662 return -ENODEV; 8663 } 8664 8665 if (vsi->type != I40E_VSI_MAIN) { 8666 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 8667 if (ret) { 8668 dev_info(&vsi->back->pdev->dev, 8669 "add vsi failed, err %s aq_err %s\n", 8670 i40e_stat_str(&pf->hw, ret), 8671 i40e_aq_str(&pf->hw, 8672 pf->hw.aq.asq_last_status)); 8673 ret = -ENOENT; 8674 goto err; 8675 } 8676 vsi->info = ctxt.info; 8677 vsi->info.valid_sections = 0; 8678 vsi->seid = ctxt.seid; 8679 vsi->id = ctxt.vsi_number; 8680 } 8681 8682 /* If macvlan filters already exist, force them to get loaded */ 8683 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 8684 f->changed = true; 8685 f_count++; 8686 8687 if (f->is_laa && vsi->type == I40E_VSI_MAIN) { 8688 struct i40e_aqc_remove_macvlan_element_data element; 8689 8690 memset(&element, 0, sizeof(element)); 8691 ether_addr_copy(element.mac_addr, f->macaddr); 8692 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 8693 ret = i40e_aq_remove_macvlan(hw, vsi->seid, 8694 &element, 1, NULL); 8695 if (ret) { 8696 /* some older FW has a different default */ 8697 element.flags |= 8698 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 8699 i40e_aq_remove_macvlan(hw, vsi->seid, 8700 &element, 1, NULL); 8701 } 8702 8703 i40e_aq_mac_address_write(hw, 8704 I40E_AQC_WRITE_TYPE_LAA_WOL, 8705 f->macaddr, NULL); 8706 } 8707 } 8708 if (f_count) { 8709 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 8710 pf->flags |= I40E_FLAG_FILTER_SYNC; 8711 } 8712 8713 /* Update VSI BW information */ 8714 ret = i40e_vsi_get_bw_info(vsi); 8715 if (ret) { 8716 dev_info(&pf->pdev->dev, 8717 "couldn't get vsi bw info, err %s aq_err %s\n", 8718 i40e_stat_str(&pf->hw, ret), 8719 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 8720 /* VSI is already added so not tearing that up */ 8721 ret = 0; 8722 } 8723 8724 err: 8725 return ret; 8726 } 8727 8728 /** 8729 * i40e_vsi_release - Delete a VSI and free its resources 8730 * @vsi: the VSI being removed 8731 * 8732 * Returns 0 on success or < 0 on error 8733 **/ 8734 int i40e_vsi_release(struct i40e_vsi *vsi) 8735 { 8736 struct i40e_mac_filter *f, *ftmp; 8737 struct i40e_veb *veb = NULL; 8738 struct i40e_pf *pf; 8739 u16 uplink_seid; 8740 int i, n; 8741 8742 pf = vsi->back; 8743 8744 /* release of a VEB-owner or last VSI is not allowed */ 8745 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 8746 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 8747 vsi->seid, vsi->uplink_seid); 8748 return -ENODEV; 8749 } 8750 if (vsi == pf->vsi[pf->lan_vsi] && 8751 !test_bit(__I40E_DOWN, &pf->state)) { 8752 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 8753 return -ENODEV; 8754 } 8755 8756 uplink_seid = vsi->uplink_seid; 8757 if (vsi->type != I40E_VSI_SRIOV) { 8758 if (vsi->netdev_registered) { 8759 vsi->netdev_registered = false; 8760 if (vsi->netdev) { 8761 /* results in a call to i40e_close() */ 8762 unregister_netdev(vsi->netdev); 8763 } 8764 } else { 8765 i40e_vsi_close(vsi); 8766 } 8767 i40e_vsi_disable_irq(vsi); 8768 } 8769 8770 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) 8771 i40e_del_filter(vsi, f->macaddr, f->vlan, 8772 f->is_vf, f->is_netdev); 8773 i40e_sync_vsi_filters(vsi); 8774 8775 i40e_vsi_delete(vsi); 8776 i40e_vsi_free_q_vectors(vsi); 8777 if (vsi->netdev) { 8778 free_netdev(vsi->netdev); 8779 vsi->netdev = NULL; 8780 } 8781 i40e_vsi_clear_rings(vsi); 8782 i40e_vsi_clear(vsi); 8783 8784 /* If this was the last thing on the VEB, except for the 8785 * controlling VSI, remove the VEB, which puts the controlling 8786 * VSI onto the next level down in the switch. 8787 * 8788 * Well, okay, there's one more exception here: don't remove 8789 * the orphan VEBs yet. We'll wait for an explicit remove request 8790 * from up the network stack. 8791 */ 8792 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { 8793 if (pf->vsi[i] && 8794 pf->vsi[i]->uplink_seid == uplink_seid && 8795 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 8796 n++; /* count the VSIs */ 8797 } 8798 } 8799 for (i = 0; i < I40E_MAX_VEB; i++) { 8800 if (!pf->veb[i]) 8801 continue; 8802 if (pf->veb[i]->uplink_seid == uplink_seid) 8803 n++; /* count the VEBs */ 8804 if (pf->veb[i]->seid == uplink_seid) 8805 veb = pf->veb[i]; 8806 } 8807 if (n == 0 && veb && veb->uplink_seid != 0) 8808 i40e_veb_release(veb); 8809 8810 return 0; 8811 } 8812 8813 /** 8814 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 8815 * @vsi: ptr to the VSI 8816 * 8817 * This should only be called after i40e_vsi_mem_alloc() which allocates the 8818 * corresponding SW VSI structure and initializes num_queue_pairs for the 8819 * newly allocated VSI. 8820 * 8821 * Returns 0 on success or negative on failure 8822 **/ 8823 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 8824 { 8825 int ret = -ENOENT; 8826 struct i40e_pf *pf = vsi->back; 8827 8828 if (vsi->q_vectors[0]) { 8829 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 8830 vsi->seid); 8831 return -EEXIST; 8832 } 8833 8834 if (vsi->base_vector) { 8835 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 8836 vsi->seid, vsi->base_vector); 8837 return -EEXIST; 8838 } 8839 8840 ret = i40e_vsi_alloc_q_vectors(vsi); 8841 if (ret) { 8842 dev_info(&pf->pdev->dev, 8843 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 8844 vsi->num_q_vectors, vsi->seid, ret); 8845 vsi->num_q_vectors = 0; 8846 goto vector_setup_out; 8847 } 8848 8849 /* In Legacy mode, we do not have to get any other vector since we 8850 * piggyback on the misc/ICR0 for queue interrupts. 8851 */ 8852 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 8853 return ret; 8854 if (vsi->num_q_vectors) 8855 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 8856 vsi->num_q_vectors, vsi->idx); 8857 if (vsi->base_vector < 0) { 8858 dev_info(&pf->pdev->dev, 8859 "failed to get tracking for %d vectors for VSI %d, err=%d\n", 8860 vsi->num_q_vectors, vsi->seid, vsi->base_vector); 8861 i40e_vsi_free_q_vectors(vsi); 8862 ret = -ENOENT; 8863 goto vector_setup_out; 8864 } 8865 8866 vector_setup_out: 8867 return ret; 8868 } 8869 8870 /** 8871 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI 8872 * @vsi: pointer to the vsi. 8873 * 8874 * This re-allocates a vsi's queue resources. 8875 * 8876 * Returns pointer to the successfully allocated and configured VSI sw struct 8877 * on success, otherwise returns NULL on failure. 8878 **/ 8879 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) 8880 { 8881 struct i40e_pf *pf = vsi->back; 8882 u8 enabled_tc; 8883 int ret; 8884 8885 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 8886 i40e_vsi_clear_rings(vsi); 8887 8888 i40e_vsi_free_arrays(vsi, false); 8889 i40e_set_num_rings_in_vsi(vsi); 8890 ret = i40e_vsi_alloc_arrays(vsi, false); 8891 if (ret) 8892 goto err_vsi; 8893 8894 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); 8895 if (ret < 0) { 8896 dev_info(&pf->pdev->dev, 8897 "failed to get tracking for %d queues for VSI %d err %d\n", 8898 vsi->alloc_queue_pairs, vsi->seid, ret); 8899 goto err_vsi; 8900 } 8901 vsi->base_queue = ret; 8902 8903 /* Update the FW view of the VSI. Force a reset of TC and queue 8904 * layout configurations. 8905 */ 8906 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 8907 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 8908 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 8909 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 8910 8911 /* assign it some queues */ 8912 ret = i40e_alloc_rings(vsi); 8913 if (ret) 8914 goto err_rings; 8915 8916 /* map all of the rings to the q_vectors */ 8917 i40e_vsi_map_rings_to_vectors(vsi); 8918 return vsi; 8919 8920 err_rings: 8921 i40e_vsi_free_q_vectors(vsi); 8922 if (vsi->netdev_registered) { 8923 vsi->netdev_registered = false; 8924 unregister_netdev(vsi->netdev); 8925 free_netdev(vsi->netdev); 8926 vsi->netdev = NULL; 8927 } 8928 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 8929 err_vsi: 8930 i40e_vsi_clear(vsi); 8931 return NULL; 8932 } 8933 8934 /** 8935 * i40e_vsi_setup - Set up a VSI by a given type 8936 * @pf: board private structure 8937 * @type: VSI type 8938 * @uplink_seid: the switch element to link to 8939 * @param1: usage depends upon VSI type. For VF types, indicates VF id 8940 * 8941 * This allocates the sw VSI structure and its queue resources, then add a VSI 8942 * to the identified VEB. 8943 * 8944 * Returns pointer to the successfully allocated and configure VSI sw struct on 8945 * success, otherwise returns NULL on failure. 8946 **/ 8947 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 8948 u16 uplink_seid, u32 param1) 8949 { 8950 struct i40e_vsi *vsi = NULL; 8951 struct i40e_veb *veb = NULL; 8952 int ret, i; 8953 int v_idx; 8954 8955 /* The requested uplink_seid must be either 8956 * - the PF's port seid 8957 * no VEB is needed because this is the PF 8958 * or this is a Flow Director special case VSI 8959 * - seid of an existing VEB 8960 * - seid of a VSI that owns an existing VEB 8961 * - seid of a VSI that doesn't own a VEB 8962 * a new VEB is created and the VSI becomes the owner 8963 * - seid of the PF VSI, which is what creates the first VEB 8964 * this is a special case of the previous 8965 * 8966 * Find which uplink_seid we were given and create a new VEB if needed 8967 */ 8968 for (i = 0; i < I40E_MAX_VEB; i++) { 8969 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 8970 veb = pf->veb[i]; 8971 break; 8972 } 8973 } 8974 8975 if (!veb && uplink_seid != pf->mac_seid) { 8976 8977 for (i = 0; i < pf->num_alloc_vsi; i++) { 8978 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 8979 vsi = pf->vsi[i]; 8980 break; 8981 } 8982 } 8983 if (!vsi) { 8984 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 8985 uplink_seid); 8986 return NULL; 8987 } 8988 8989 if (vsi->uplink_seid == pf->mac_seid) 8990 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 8991 vsi->tc_config.enabled_tc); 8992 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 8993 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 8994 vsi->tc_config.enabled_tc); 8995 if (veb) { 8996 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { 8997 dev_info(&vsi->back->pdev->dev, 8998 "%s: New VSI creation error, uplink seid of LAN VSI expected.\n", 8999 __func__); 9000 return NULL; 9001 } 9002 /* We come up by default in VEPA mode if SRIOV is not 9003 * already enabled, in which case we can't force VEPA 9004 * mode. 9005 */ 9006 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { 9007 veb->bridge_mode = BRIDGE_MODE_VEPA; 9008 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; 9009 } 9010 i40e_config_bridge_mode(veb); 9011 } 9012 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 9013 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 9014 veb = pf->veb[i]; 9015 } 9016 if (!veb) { 9017 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 9018 return NULL; 9019 } 9020 9021 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 9022 uplink_seid = veb->seid; 9023 } 9024 9025 /* get vsi sw struct */ 9026 v_idx = i40e_vsi_mem_alloc(pf, type); 9027 if (v_idx < 0) 9028 goto err_alloc; 9029 vsi = pf->vsi[v_idx]; 9030 if (!vsi) 9031 goto err_alloc; 9032 vsi->type = type; 9033 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 9034 9035 if (type == I40E_VSI_MAIN) 9036 pf->lan_vsi = v_idx; 9037 else if (type == I40E_VSI_SRIOV) 9038 vsi->vf_id = param1; 9039 /* assign it some queues */ 9040 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, 9041 vsi->idx); 9042 if (ret < 0) { 9043 dev_info(&pf->pdev->dev, 9044 "failed to get tracking for %d queues for VSI %d err=%d\n", 9045 vsi->alloc_queue_pairs, vsi->seid, ret); 9046 goto err_vsi; 9047 } 9048 vsi->base_queue = ret; 9049 9050 /* get a VSI from the hardware */ 9051 vsi->uplink_seid = uplink_seid; 9052 ret = i40e_add_vsi(vsi); 9053 if (ret) 9054 goto err_vsi; 9055 9056 switch (vsi->type) { 9057 /* setup the netdev if needed */ 9058 case I40E_VSI_MAIN: 9059 case I40E_VSI_VMDQ2: 9060 case I40E_VSI_FCOE: 9061 ret = i40e_config_netdev(vsi); 9062 if (ret) 9063 goto err_netdev; 9064 ret = register_netdev(vsi->netdev); 9065 if (ret) 9066 goto err_netdev; 9067 vsi->netdev_registered = true; 9068 netif_carrier_off(vsi->netdev); 9069 #ifdef CONFIG_I40E_DCB 9070 /* Setup DCB netlink interface */ 9071 i40e_dcbnl_setup(vsi); 9072 #endif /* CONFIG_I40E_DCB */ 9073 /* fall through */ 9074 9075 case I40E_VSI_FDIR: 9076 /* set up vectors and rings if needed */ 9077 ret = i40e_vsi_setup_vectors(vsi); 9078 if (ret) 9079 goto err_msix; 9080 9081 ret = i40e_alloc_rings(vsi); 9082 if (ret) 9083 goto err_rings; 9084 9085 /* map all of the rings to the q_vectors */ 9086 i40e_vsi_map_rings_to_vectors(vsi); 9087 9088 i40e_vsi_reset_stats(vsi); 9089 break; 9090 9091 default: 9092 /* no netdev or rings for the other VSI types */ 9093 break; 9094 } 9095 9096 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && 9097 (vsi->type == I40E_VSI_VMDQ2)) { 9098 ret = i40e_vsi_config_rss(vsi); 9099 } 9100 return vsi; 9101 9102 err_rings: 9103 i40e_vsi_free_q_vectors(vsi); 9104 err_msix: 9105 if (vsi->netdev_registered) { 9106 vsi->netdev_registered = false; 9107 unregister_netdev(vsi->netdev); 9108 free_netdev(vsi->netdev); 9109 vsi->netdev = NULL; 9110 } 9111 err_netdev: 9112 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 9113 err_vsi: 9114 i40e_vsi_clear(vsi); 9115 err_alloc: 9116 return NULL; 9117 } 9118 9119 /** 9120 * i40e_veb_get_bw_info - Query VEB BW information 9121 * @veb: the veb to query 9122 * 9123 * Query the Tx scheduler BW configuration data for given VEB 9124 **/ 9125 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 9126 { 9127 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 9128 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 9129 struct i40e_pf *pf = veb->pf; 9130 struct i40e_hw *hw = &pf->hw; 9131 u32 tc_bw_max; 9132 int ret = 0; 9133 int i; 9134 9135 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 9136 &bw_data, NULL); 9137 if (ret) { 9138 dev_info(&pf->pdev->dev, 9139 "query veb bw config failed, err %s aq_err %s\n", 9140 i40e_stat_str(&pf->hw, ret), 9141 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 9142 goto out; 9143 } 9144 9145 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 9146 &ets_data, NULL); 9147 if (ret) { 9148 dev_info(&pf->pdev->dev, 9149 "query veb bw ets config failed, err %s aq_err %s\n", 9150 i40e_stat_str(&pf->hw, ret), 9151 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); 9152 goto out; 9153 } 9154 9155 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 9156 veb->bw_max_quanta = ets_data.tc_bw_max; 9157 veb->is_abs_credits = bw_data.absolute_credits_enable; 9158 veb->enabled_tc = ets_data.tc_valid_bits; 9159 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 9160 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 9161 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 9162 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 9163 veb->bw_tc_limit_credits[i] = 9164 le16_to_cpu(bw_data.tc_bw_limits[i]); 9165 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 9166 } 9167 9168 out: 9169 return ret; 9170 } 9171 9172 /** 9173 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 9174 * @pf: board private structure 9175 * 9176 * On error: returns error code (negative) 9177 * On success: returns vsi index in PF (positive) 9178 **/ 9179 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 9180 { 9181 int ret = -ENOENT; 9182 struct i40e_veb *veb; 9183 int i; 9184 9185 /* Need to protect the allocation of switch elements at the PF level */ 9186 mutex_lock(&pf->switch_mutex); 9187 9188 /* VEB list may be fragmented if VEB creation/destruction has 9189 * been happening. We can afford to do a quick scan to look 9190 * for any free slots in the list. 9191 * 9192 * find next empty veb slot, looping back around if necessary 9193 */ 9194 i = 0; 9195 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 9196 i++; 9197 if (i >= I40E_MAX_VEB) { 9198 ret = -ENOMEM; 9199 goto err_alloc_veb; /* out of VEB slots! */ 9200 } 9201 9202 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 9203 if (!veb) { 9204 ret = -ENOMEM; 9205 goto err_alloc_veb; 9206 } 9207 veb->pf = pf; 9208 veb->idx = i; 9209 veb->enabled_tc = 1; 9210 9211 pf->veb[i] = veb; 9212 ret = i; 9213 err_alloc_veb: 9214 mutex_unlock(&pf->switch_mutex); 9215 return ret; 9216 } 9217 9218 /** 9219 * i40e_switch_branch_release - Delete a branch of the switch tree 9220 * @branch: where to start deleting 9221 * 9222 * This uses recursion to find the tips of the branch to be 9223 * removed, deleting until we get back to and can delete this VEB. 9224 **/ 9225 static void i40e_switch_branch_release(struct i40e_veb *branch) 9226 { 9227 struct i40e_pf *pf = branch->pf; 9228 u16 branch_seid = branch->seid; 9229 u16 veb_idx = branch->idx; 9230 int i; 9231 9232 /* release any VEBs on this VEB - RECURSION */ 9233 for (i = 0; i < I40E_MAX_VEB; i++) { 9234 if (!pf->veb[i]) 9235 continue; 9236 if (pf->veb[i]->uplink_seid == branch->seid) 9237 i40e_switch_branch_release(pf->veb[i]); 9238 } 9239 9240 /* Release the VSIs on this VEB, but not the owner VSI. 9241 * 9242 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 9243 * the VEB itself, so don't use (*branch) after this loop. 9244 */ 9245 for (i = 0; i < pf->num_alloc_vsi; i++) { 9246 if (!pf->vsi[i]) 9247 continue; 9248 if (pf->vsi[i]->uplink_seid == branch_seid && 9249 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 9250 i40e_vsi_release(pf->vsi[i]); 9251 } 9252 } 9253 9254 /* There's one corner case where the VEB might not have been 9255 * removed, so double check it here and remove it if needed. 9256 * This case happens if the veb was created from the debugfs 9257 * commands and no VSIs were added to it. 9258 */ 9259 if (pf->veb[veb_idx]) 9260 i40e_veb_release(pf->veb[veb_idx]); 9261 } 9262 9263 /** 9264 * i40e_veb_clear - remove veb struct 9265 * @veb: the veb to remove 9266 **/ 9267 static void i40e_veb_clear(struct i40e_veb *veb) 9268 { 9269 if (!veb) 9270 return; 9271 9272 if (veb->pf) { 9273 struct i40e_pf *pf = veb->pf; 9274 9275 mutex_lock(&pf->switch_mutex); 9276 if (pf->veb[veb->idx] == veb) 9277 pf->veb[veb->idx] = NULL; 9278 mutex_unlock(&pf->switch_mutex); 9279 } 9280 9281 kfree(veb); 9282 } 9283 9284 /** 9285 * i40e_veb_release - Delete a VEB and free its resources 9286 * @veb: the VEB being removed 9287 **/ 9288 void i40e_veb_release(struct i40e_veb *veb) 9289 { 9290 struct i40e_vsi *vsi = NULL; 9291 struct i40e_pf *pf; 9292 int i, n = 0; 9293 9294 pf = veb->pf; 9295 9296 /* find the remaining VSI and check for extras */ 9297 for (i = 0; i < pf->num_alloc_vsi; i++) { 9298 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 9299 n++; 9300 vsi = pf->vsi[i]; 9301 } 9302 } 9303 if (n != 1) { 9304 dev_info(&pf->pdev->dev, 9305 "can't remove VEB %d with %d VSIs left\n", 9306 veb->seid, n); 9307 return; 9308 } 9309 9310 /* move the remaining VSI to uplink veb */ 9311 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 9312 if (veb->uplink_seid) { 9313 vsi->uplink_seid = veb->uplink_seid; 9314 if (veb->uplink_seid == pf->mac_seid) 9315 vsi->veb_idx = I40E_NO_VEB; 9316 else 9317 vsi->veb_idx = veb->veb_idx; 9318 } else { 9319 /* floating VEB */ 9320 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 9321 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 9322 } 9323 9324 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 9325 i40e_veb_clear(veb); 9326 } 9327 9328 /** 9329 * i40e_add_veb - create the VEB in the switch 9330 * @veb: the VEB to be instantiated 9331 * @vsi: the controlling VSI 9332 **/ 9333 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 9334 { 9335 struct i40e_pf *pf = veb->pf; 9336 bool is_default = veb->pf->cur_promisc; 9337 bool is_cloud = false; 9338 int ret; 9339 9340 /* get a VEB from the hardware */ 9341 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, 9342 veb->enabled_tc, is_default, 9343 is_cloud, &veb->seid, NULL); 9344 if (ret) { 9345 dev_info(&pf->pdev->dev, 9346 "couldn't add VEB, err %s aq_err %s\n", 9347 i40e_stat_str(&pf->hw, ret), 9348 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9349 return -EPERM; 9350 } 9351 9352 /* get statistics counter */ 9353 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, 9354 &veb->stats_idx, NULL, NULL, NULL); 9355 if (ret) { 9356 dev_info(&pf->pdev->dev, 9357 "couldn't get VEB statistics idx, err %s aq_err %s\n", 9358 i40e_stat_str(&pf->hw, ret), 9359 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9360 return -EPERM; 9361 } 9362 ret = i40e_veb_get_bw_info(veb); 9363 if (ret) { 9364 dev_info(&pf->pdev->dev, 9365 "couldn't get VEB bw info, err %s aq_err %s\n", 9366 i40e_stat_str(&pf->hw, ret), 9367 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9368 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 9369 return -ENOENT; 9370 } 9371 9372 vsi->uplink_seid = veb->seid; 9373 vsi->veb_idx = veb->idx; 9374 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 9375 9376 return 0; 9377 } 9378 9379 /** 9380 * i40e_veb_setup - Set up a VEB 9381 * @pf: board private structure 9382 * @flags: VEB setup flags 9383 * @uplink_seid: the switch element to link to 9384 * @vsi_seid: the initial VSI seid 9385 * @enabled_tc: Enabled TC bit-map 9386 * 9387 * This allocates the sw VEB structure and links it into the switch 9388 * It is possible and legal for this to be a duplicate of an already 9389 * existing VEB. It is also possible for both uplink and vsi seids 9390 * to be zero, in order to create a floating VEB. 9391 * 9392 * Returns pointer to the successfully allocated VEB sw struct on 9393 * success, otherwise returns NULL on failure. 9394 **/ 9395 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 9396 u16 uplink_seid, u16 vsi_seid, 9397 u8 enabled_tc) 9398 { 9399 struct i40e_veb *veb, *uplink_veb = NULL; 9400 int vsi_idx, veb_idx; 9401 int ret; 9402 9403 /* if one seid is 0, the other must be 0 to create a floating relay */ 9404 if ((uplink_seid == 0 || vsi_seid == 0) && 9405 (uplink_seid + vsi_seid != 0)) { 9406 dev_info(&pf->pdev->dev, 9407 "one, not both seid's are 0: uplink=%d vsi=%d\n", 9408 uplink_seid, vsi_seid); 9409 return NULL; 9410 } 9411 9412 /* make sure there is such a vsi and uplink */ 9413 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) 9414 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 9415 break; 9416 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { 9417 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 9418 vsi_seid); 9419 return NULL; 9420 } 9421 9422 if (uplink_seid && uplink_seid != pf->mac_seid) { 9423 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 9424 if (pf->veb[veb_idx] && 9425 pf->veb[veb_idx]->seid == uplink_seid) { 9426 uplink_veb = pf->veb[veb_idx]; 9427 break; 9428 } 9429 } 9430 if (!uplink_veb) { 9431 dev_info(&pf->pdev->dev, 9432 "uplink seid %d not found\n", uplink_seid); 9433 return NULL; 9434 } 9435 } 9436 9437 /* get veb sw struct */ 9438 veb_idx = i40e_veb_mem_alloc(pf); 9439 if (veb_idx < 0) 9440 goto err_alloc; 9441 veb = pf->veb[veb_idx]; 9442 veb->flags = flags; 9443 veb->uplink_seid = uplink_seid; 9444 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 9445 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 9446 9447 /* create the VEB in the switch */ 9448 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 9449 if (ret) 9450 goto err_veb; 9451 if (vsi_idx == pf->lan_vsi) 9452 pf->lan_veb = veb->idx; 9453 9454 return veb; 9455 9456 err_veb: 9457 i40e_veb_clear(veb); 9458 err_alloc: 9459 return NULL; 9460 } 9461 9462 /** 9463 * i40e_setup_pf_switch_element - set PF vars based on switch type 9464 * @pf: board private structure 9465 * @ele: element we are building info from 9466 * @num_reported: total number of elements 9467 * @printconfig: should we print the contents 9468 * 9469 * helper function to assist in extracting a few useful SEID values. 9470 **/ 9471 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 9472 struct i40e_aqc_switch_config_element_resp *ele, 9473 u16 num_reported, bool printconfig) 9474 { 9475 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 9476 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 9477 u8 element_type = ele->element_type; 9478 u16 seid = le16_to_cpu(ele->seid); 9479 9480 if (printconfig) 9481 dev_info(&pf->pdev->dev, 9482 "type=%d seid=%d uplink=%d downlink=%d\n", 9483 element_type, seid, uplink_seid, downlink_seid); 9484 9485 switch (element_type) { 9486 case I40E_SWITCH_ELEMENT_TYPE_MAC: 9487 pf->mac_seid = seid; 9488 break; 9489 case I40E_SWITCH_ELEMENT_TYPE_VEB: 9490 /* Main VEB? */ 9491 if (uplink_seid != pf->mac_seid) 9492 break; 9493 if (pf->lan_veb == I40E_NO_VEB) { 9494 int v; 9495 9496 /* find existing or else empty VEB */ 9497 for (v = 0; v < I40E_MAX_VEB; v++) { 9498 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 9499 pf->lan_veb = v; 9500 break; 9501 } 9502 } 9503 if (pf->lan_veb == I40E_NO_VEB) { 9504 v = i40e_veb_mem_alloc(pf); 9505 if (v < 0) 9506 break; 9507 pf->lan_veb = v; 9508 } 9509 } 9510 9511 pf->veb[pf->lan_veb]->seid = seid; 9512 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 9513 pf->veb[pf->lan_veb]->pf = pf; 9514 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 9515 break; 9516 case I40E_SWITCH_ELEMENT_TYPE_VSI: 9517 if (num_reported != 1) 9518 break; 9519 /* This is immediately after a reset so we can assume this is 9520 * the PF's VSI 9521 */ 9522 pf->mac_seid = uplink_seid; 9523 pf->pf_seid = downlink_seid; 9524 pf->main_vsi_seid = seid; 9525 if (printconfig) 9526 dev_info(&pf->pdev->dev, 9527 "pf_seid=%d main_vsi_seid=%d\n", 9528 pf->pf_seid, pf->main_vsi_seid); 9529 break; 9530 case I40E_SWITCH_ELEMENT_TYPE_PF: 9531 case I40E_SWITCH_ELEMENT_TYPE_VF: 9532 case I40E_SWITCH_ELEMENT_TYPE_EMP: 9533 case I40E_SWITCH_ELEMENT_TYPE_BMC: 9534 case I40E_SWITCH_ELEMENT_TYPE_PE: 9535 case I40E_SWITCH_ELEMENT_TYPE_PA: 9536 /* ignore these for now */ 9537 break; 9538 default: 9539 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 9540 element_type, seid); 9541 break; 9542 } 9543 } 9544 9545 /** 9546 * i40e_fetch_switch_configuration - Get switch config from firmware 9547 * @pf: board private structure 9548 * @printconfig: should we print the contents 9549 * 9550 * Get the current switch configuration from the device and 9551 * extract a few useful SEID values. 9552 **/ 9553 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 9554 { 9555 struct i40e_aqc_get_switch_config_resp *sw_config; 9556 u16 next_seid = 0; 9557 int ret = 0; 9558 u8 *aq_buf; 9559 int i; 9560 9561 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 9562 if (!aq_buf) 9563 return -ENOMEM; 9564 9565 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 9566 do { 9567 u16 num_reported, num_total; 9568 9569 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 9570 I40E_AQ_LARGE_BUF, 9571 &next_seid, NULL); 9572 if (ret) { 9573 dev_info(&pf->pdev->dev, 9574 "get switch config failed err %s aq_err %s\n", 9575 i40e_stat_str(&pf->hw, ret), 9576 i40e_aq_str(&pf->hw, 9577 pf->hw.aq.asq_last_status)); 9578 kfree(aq_buf); 9579 return -ENOENT; 9580 } 9581 9582 num_reported = le16_to_cpu(sw_config->header.num_reported); 9583 num_total = le16_to_cpu(sw_config->header.num_total); 9584 9585 if (printconfig) 9586 dev_info(&pf->pdev->dev, 9587 "header: %d reported %d total\n", 9588 num_reported, num_total); 9589 9590 for (i = 0; i < num_reported; i++) { 9591 struct i40e_aqc_switch_config_element_resp *ele = 9592 &sw_config->element[i]; 9593 9594 i40e_setup_pf_switch_element(pf, ele, num_reported, 9595 printconfig); 9596 } 9597 } while (next_seid != 0); 9598 9599 kfree(aq_buf); 9600 return ret; 9601 } 9602 9603 /** 9604 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 9605 * @pf: board private structure 9606 * @reinit: if the Main VSI needs to re-initialized. 9607 * 9608 * Returns 0 on success, negative value on failure 9609 **/ 9610 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 9611 { 9612 int ret; 9613 9614 /* find out what's out there already */ 9615 ret = i40e_fetch_switch_configuration(pf, false); 9616 if (ret) { 9617 dev_info(&pf->pdev->dev, 9618 "couldn't fetch switch config, err %s aq_err %s\n", 9619 i40e_stat_str(&pf->hw, ret), 9620 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 9621 return ret; 9622 } 9623 i40e_pf_reset_stats(pf); 9624 9625 /* first time setup */ 9626 if (pf->lan_vsi == I40E_NO_VSI || reinit) { 9627 struct i40e_vsi *vsi = NULL; 9628 u16 uplink_seid; 9629 9630 /* Set up the PF VSI associated with the PF's main VSI 9631 * that is already in the HW switch 9632 */ 9633 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 9634 uplink_seid = pf->veb[pf->lan_veb]->seid; 9635 else 9636 uplink_seid = pf->mac_seid; 9637 if (pf->lan_vsi == I40E_NO_VSI) 9638 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 9639 else if (reinit) 9640 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); 9641 if (!vsi) { 9642 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 9643 i40e_fdir_teardown(pf); 9644 return -EAGAIN; 9645 } 9646 } else { 9647 /* force a reset of TC and queue layout configurations */ 9648 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 9649 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 9650 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 9651 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 9652 } 9653 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 9654 9655 i40e_fdir_sb_setup(pf); 9656 9657 /* Setup static PF queue filter control settings */ 9658 ret = i40e_setup_pf_filter_control(pf); 9659 if (ret) { 9660 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 9661 ret); 9662 /* Failure here should not stop continuing other steps */ 9663 } 9664 9665 /* enable RSS in the HW, even for only one queue, as the stack can use 9666 * the hash 9667 */ 9668 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 9669 i40e_config_rss(pf); 9670 9671 /* fill in link information and enable LSE reporting */ 9672 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); 9673 i40e_link_event(pf); 9674 9675 /* Initialize user-specific link properties */ 9676 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 9677 I40E_AQ_AN_COMPLETED) ? true : false); 9678 9679 i40e_ptp_init(pf); 9680 9681 return ret; 9682 } 9683 9684 /** 9685 * i40e_determine_queue_usage - Work out queue distribution 9686 * @pf: board private structure 9687 **/ 9688 static void i40e_determine_queue_usage(struct i40e_pf *pf) 9689 { 9690 int queues_left; 9691 9692 pf->num_lan_qps = 0; 9693 #ifdef I40E_FCOE 9694 pf->num_fcoe_qps = 0; 9695 #endif 9696 9697 /* Find the max queues to be put into basic use. We'll always be 9698 * using TC0, whether or not DCB is running, and TC0 will get the 9699 * big RSS set. 9700 */ 9701 queues_left = pf->hw.func_caps.num_tx_qp; 9702 9703 if ((queues_left == 1) || 9704 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 9705 /* one qp for PF, no queues for anything else */ 9706 queues_left = 0; 9707 pf->rss_size = pf->num_lan_qps = 1; 9708 9709 /* make sure all the fancies are disabled */ 9710 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 9711 #ifdef I40E_FCOE 9712 I40E_FLAG_FCOE_ENABLED | 9713 #endif 9714 I40E_FLAG_FD_SB_ENABLED | 9715 I40E_FLAG_FD_ATR_ENABLED | 9716 I40E_FLAG_DCB_CAPABLE | 9717 I40E_FLAG_SRIOV_ENABLED | 9718 I40E_FLAG_VMDQ_ENABLED); 9719 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | 9720 I40E_FLAG_FD_SB_ENABLED | 9721 I40E_FLAG_FD_ATR_ENABLED | 9722 I40E_FLAG_DCB_CAPABLE))) { 9723 /* one qp for PF */ 9724 pf->rss_size = pf->num_lan_qps = 1; 9725 queues_left -= pf->num_lan_qps; 9726 9727 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 9728 #ifdef I40E_FCOE 9729 I40E_FLAG_FCOE_ENABLED | 9730 #endif 9731 I40E_FLAG_FD_SB_ENABLED | 9732 I40E_FLAG_FD_ATR_ENABLED | 9733 I40E_FLAG_DCB_ENABLED | 9734 I40E_FLAG_VMDQ_ENABLED); 9735 } else { 9736 /* Not enough queues for all TCs */ 9737 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && 9738 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 9739 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 9740 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 9741 } 9742 pf->num_lan_qps = max_t(int, pf->rss_size_max, 9743 num_online_cpus()); 9744 pf->num_lan_qps = min_t(int, pf->num_lan_qps, 9745 pf->hw.func_caps.num_tx_qp); 9746 9747 queues_left -= pf->num_lan_qps; 9748 } 9749 9750 #ifdef I40E_FCOE 9751 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 9752 if (I40E_DEFAULT_FCOE <= queues_left) { 9753 pf->num_fcoe_qps = I40E_DEFAULT_FCOE; 9754 } else if (I40E_MINIMUM_FCOE <= queues_left) { 9755 pf->num_fcoe_qps = I40E_MINIMUM_FCOE; 9756 } else { 9757 pf->num_fcoe_qps = 0; 9758 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; 9759 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); 9760 } 9761 9762 queues_left -= pf->num_fcoe_qps; 9763 } 9764 9765 #endif 9766 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 9767 if (queues_left > 1) { 9768 queues_left -= 1; /* save 1 queue for FD */ 9769 } else { 9770 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 9771 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); 9772 } 9773 } 9774 9775 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 9776 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 9777 pf->num_req_vfs = min_t(int, pf->num_req_vfs, 9778 (queues_left / pf->num_vf_qps)); 9779 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 9780 } 9781 9782 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 9783 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 9784 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 9785 (queues_left / pf->num_vmdq_qps)); 9786 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 9787 } 9788 9789 pf->queues_left = queues_left; 9790 #ifdef I40E_FCOE 9791 dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); 9792 #endif 9793 } 9794 9795 /** 9796 * i40e_setup_pf_filter_control - Setup PF static filter control 9797 * @pf: PF to be setup 9798 * 9799 * i40e_setup_pf_filter_control sets up a PF's initial filter control 9800 * settings. If PE/FCoE are enabled then it will also set the per PF 9801 * based filter sizes required for them. It also enables Flow director, 9802 * ethertype and macvlan type filter settings for the pf. 9803 * 9804 * Returns 0 on success, negative on failure 9805 **/ 9806 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 9807 { 9808 struct i40e_filter_control_settings *settings = &pf->filter_settings; 9809 9810 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 9811 9812 /* Flow Director is enabled */ 9813 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) 9814 settings->enable_fdir = true; 9815 9816 /* Ethtype and MACVLAN filters enabled for PF */ 9817 settings->enable_ethtype = true; 9818 settings->enable_macvlan = true; 9819 9820 if (i40e_set_filter_control(&pf->hw, settings)) 9821 return -ENOENT; 9822 9823 return 0; 9824 } 9825 9826 #define INFO_STRING_LEN 255 9827 static void i40e_print_features(struct i40e_pf *pf) 9828 { 9829 struct i40e_hw *hw = &pf->hw; 9830 char *buf, *string; 9831 9832 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); 9833 if (!string) { 9834 dev_err(&pf->pdev->dev, "Features string allocation failed\n"); 9835 return; 9836 } 9837 9838 buf = string; 9839 9840 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); 9841 #ifdef CONFIG_PCI_IOV 9842 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); 9843 #endif 9844 buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", 9845 pf->hw.func_caps.num_vsis, 9846 pf->vsi[pf->lan_vsi]->num_queue_pairs, 9847 pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); 9848 9849 if (pf->flags & I40E_FLAG_RSS_ENABLED) 9850 buf += sprintf(buf, "RSS "); 9851 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 9852 buf += sprintf(buf, "FD_ATR "); 9853 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 9854 buf += sprintf(buf, "FD_SB "); 9855 buf += sprintf(buf, "NTUPLE "); 9856 } 9857 if (pf->flags & I40E_FLAG_DCB_CAPABLE) 9858 buf += sprintf(buf, "DCB "); 9859 if (pf->flags & I40E_FLAG_PTP) 9860 buf += sprintf(buf, "PTP "); 9861 #ifdef I40E_FCOE 9862 if (pf->flags & I40E_FLAG_FCOE_ENABLED) 9863 buf += sprintf(buf, "FCOE "); 9864 #endif 9865 9866 BUG_ON(buf > (string + INFO_STRING_LEN)); 9867 dev_info(&pf->pdev->dev, "%s\n", string); 9868 kfree(string); 9869 } 9870 9871 /** 9872 * i40e_probe - Device initialization routine 9873 * @pdev: PCI device information struct 9874 * @ent: entry in i40e_pci_tbl 9875 * 9876 * i40e_probe initializes a PF identified by a pci_dev structure. 9877 * The OS initialization, configuring of the PF private structure, 9878 * and a hardware reset occur. 9879 * 9880 * Returns 0 on success, negative on failure 9881 **/ 9882 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 9883 { 9884 struct i40e_aq_get_phy_abilities_resp abilities; 9885 unsigned long ioremap_len; 9886 struct i40e_pf *pf; 9887 struct i40e_hw *hw; 9888 static u16 pfs_found; 9889 u16 link_status; 9890 int err = 0; 9891 u32 len; 9892 u32 i; 9893 9894 err = pci_enable_device_mem(pdev); 9895 if (err) 9896 return err; 9897 9898 /* set up for high or low dma */ 9899 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9900 if (err) { 9901 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9902 if (err) { 9903 dev_err(&pdev->dev, 9904 "DMA configuration failed: 0x%x\n", err); 9905 goto err_dma; 9906 } 9907 } 9908 9909 /* set up pci connections */ 9910 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 9911 IORESOURCE_MEM), i40e_driver_name); 9912 if (err) { 9913 dev_info(&pdev->dev, 9914 "pci_request_selected_regions failed %d\n", err); 9915 goto err_pci_reg; 9916 } 9917 9918 pci_enable_pcie_error_reporting(pdev); 9919 pci_set_master(pdev); 9920 9921 /* Now that we have a PCI connection, we need to do the 9922 * low level device setup. This is primarily setting up 9923 * the Admin Queue structures and then querying for the 9924 * device's current profile information. 9925 */ 9926 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 9927 if (!pf) { 9928 err = -ENOMEM; 9929 goto err_pf_alloc; 9930 } 9931 pf->next_vsi = 0; 9932 pf->pdev = pdev; 9933 set_bit(__I40E_DOWN, &pf->state); 9934 9935 hw = &pf->hw; 9936 hw->back = pf; 9937 9938 ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0), 9939 I40E_MAX_CSR_SPACE); 9940 9941 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len); 9942 if (!hw->hw_addr) { 9943 err = -EIO; 9944 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 9945 (unsigned int)pci_resource_start(pdev, 0), 9946 (unsigned int)pci_resource_len(pdev, 0), err); 9947 goto err_ioremap; 9948 } 9949 hw->vendor_id = pdev->vendor; 9950 hw->device_id = pdev->device; 9951 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 9952 hw->subsystem_vendor_id = pdev->subsystem_vendor; 9953 hw->subsystem_device_id = pdev->subsystem_device; 9954 hw->bus.device = PCI_SLOT(pdev->devfn); 9955 hw->bus.func = PCI_FUNC(pdev->devfn); 9956 pf->instance = pfs_found; 9957 9958 if (debug != -1) { 9959 pf->msg_enable = pf->hw.debug_mask; 9960 pf->msg_enable = debug; 9961 } 9962 9963 /* do a special CORER for clearing PXE mode once at init */ 9964 if (hw->revision_id == 0 && 9965 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { 9966 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 9967 i40e_flush(hw); 9968 msleep(200); 9969 pf->corer_count++; 9970 9971 i40e_clear_pxe_mode(hw); 9972 } 9973 9974 /* Reset here to make sure all is clean and to define PF 'n' */ 9975 i40e_clear_hw(hw); 9976 err = i40e_pf_reset(hw); 9977 if (err) { 9978 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 9979 goto err_pf_reset; 9980 } 9981 pf->pfr_count++; 9982 9983 hw->aq.num_arq_entries = I40E_AQ_LEN; 9984 hw->aq.num_asq_entries = I40E_AQ_LEN; 9985 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 9986 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 9987 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 9988 9989 snprintf(pf->int_name, sizeof(pf->int_name) - 1, 9990 "%s-%s:misc", 9991 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); 9992 9993 err = i40e_init_shared_code(hw); 9994 if (err) { 9995 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", 9996 err); 9997 goto err_pf_reset; 9998 } 9999 10000 /* set up a default setting for link flow control */ 10001 pf->hw.fc.requested_mode = I40E_FC_NONE; 10002 10003 err = i40e_init_adminq(hw); 10004 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); 10005 if (err) { 10006 dev_info(&pdev->dev, 10007 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 10008 goto err_pf_reset; 10009 } 10010 10011 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 10012 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) 10013 dev_info(&pdev->dev, 10014 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 10015 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 10016 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) 10017 dev_info(&pdev->dev, 10018 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 10019 10020 i40e_verify_eeprom(pf); 10021 10022 /* Rev 0 hardware was never productized */ 10023 if (hw->revision_id < 1) 10024 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); 10025 10026 i40e_clear_pxe_mode(hw); 10027 err = i40e_get_capabilities(pf); 10028 if (err) 10029 goto err_adminq_setup; 10030 10031 err = i40e_sw_init(pf); 10032 if (err) { 10033 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 10034 goto err_sw_init; 10035 } 10036 10037 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 10038 hw->func_caps.num_rx_qp, 10039 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 10040 if (err) { 10041 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 10042 goto err_init_lan_hmc; 10043 } 10044 10045 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 10046 if (err) { 10047 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 10048 err = -ENOENT; 10049 goto err_configure_lan_hmc; 10050 } 10051 10052 /* Disable LLDP for NICs that have firmware versions lower than v4.3. 10053 * Ignore error return codes because if it was already disabled via 10054 * hardware settings this will fail 10055 */ 10056 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 10057 (pf->hw.aq.fw_maj_ver < 4)) { 10058 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); 10059 i40e_aq_stop_lldp(hw, true, NULL); 10060 } 10061 10062 i40e_get_mac_addr(hw, hw->mac.addr); 10063 if (!is_valid_ether_addr(hw->mac.addr)) { 10064 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 10065 err = -EIO; 10066 goto err_mac_addr; 10067 } 10068 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 10069 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); 10070 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 10071 if (is_valid_ether_addr(hw->mac.port_addr)) 10072 pf->flags |= I40E_FLAG_PORT_ID_VALID; 10073 #ifdef I40E_FCOE 10074 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); 10075 if (err) 10076 dev_info(&pdev->dev, 10077 "(non-fatal) SAN MAC retrieval failed: %d\n", err); 10078 if (!is_valid_ether_addr(hw->mac.san_addr)) { 10079 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", 10080 hw->mac.san_addr); 10081 ether_addr_copy(hw->mac.san_addr, hw->mac.addr); 10082 } 10083 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); 10084 #endif /* I40E_FCOE */ 10085 10086 pci_set_drvdata(pdev, pf); 10087 pci_save_state(pdev); 10088 #ifdef CONFIG_I40E_DCB 10089 err = i40e_init_pf_dcb(pf); 10090 if (err) { 10091 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); 10092 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10093 /* Continue without DCB enabled */ 10094 } 10095 #endif /* CONFIG_I40E_DCB */ 10096 10097 /* set up periodic task facility */ 10098 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); 10099 pf->service_timer_period = HZ; 10100 10101 INIT_WORK(&pf->service_task, i40e_service_task); 10102 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 10103 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; 10104 pf->link_check_timeout = jiffies; 10105 10106 /* WoL defaults to disabled */ 10107 pf->wol_en = false; 10108 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 10109 10110 /* set up the main switch operations */ 10111 i40e_determine_queue_usage(pf); 10112 err = i40e_init_interrupt_scheme(pf); 10113 if (err) 10114 goto err_switch_setup; 10115 10116 /* The number of VSIs reported by the FW is the minimum guaranteed 10117 * to us; HW supports far more and we share the remaining pool with 10118 * the other PFs. We allocate space for more than the guarantee with 10119 * the understanding that we might not get them all later. 10120 */ 10121 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) 10122 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; 10123 else 10124 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; 10125 10126 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ 10127 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; 10128 pf->vsi = kzalloc(len, GFP_KERNEL); 10129 if (!pf->vsi) { 10130 err = -ENOMEM; 10131 goto err_switch_setup; 10132 } 10133 10134 #ifdef CONFIG_PCI_IOV 10135 /* prep for VF support */ 10136 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 10137 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 10138 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 10139 if (pci_num_vf(pdev)) 10140 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 10141 } 10142 #endif 10143 err = i40e_setup_pf_switch(pf, false); 10144 if (err) { 10145 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 10146 goto err_vsis; 10147 } 10148 /* if FDIR VSI was set up, start it now */ 10149 for (i = 0; i < pf->num_alloc_vsi; i++) { 10150 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 10151 i40e_vsi_open(pf->vsi[i]); 10152 break; 10153 } 10154 } 10155 10156 /* driver is only interested in link up/down and module qualification 10157 * reports from firmware 10158 */ 10159 err = i40e_aq_set_phy_int_mask(&pf->hw, 10160 I40E_AQ_EVENT_LINK_UPDOWN | 10161 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); 10162 if (err) 10163 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", 10164 i40e_stat_str(&pf->hw, err), 10165 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10166 10167 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || 10168 (pf->hw.aq.fw_maj_ver < 4)) { 10169 msleep(75); 10170 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); 10171 if (err) 10172 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", 10173 i40e_stat_str(&pf->hw, err), 10174 i40e_aq_str(&pf->hw, 10175 pf->hw.aq.asq_last_status)); 10176 } 10177 /* The main driver is (mostly) up and happy. We need to set this state 10178 * before setting up the misc vector or we get a race and the vector 10179 * ends up disabled forever. 10180 */ 10181 clear_bit(__I40E_DOWN, &pf->state); 10182 10183 /* In case of MSIX we are going to setup the misc vector right here 10184 * to handle admin queue events etc. In case of legacy and MSI 10185 * the misc functionality and queue processing is combined in 10186 * the same vector and that gets setup at open. 10187 */ 10188 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 10189 err = i40e_setup_misc_vector(pf); 10190 if (err) { 10191 dev_info(&pdev->dev, 10192 "setup of misc vector failed: %d\n", err); 10193 goto err_vsis; 10194 } 10195 } 10196 10197 #ifdef CONFIG_PCI_IOV 10198 /* prep for VF support */ 10199 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 10200 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 10201 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 10202 u32 val; 10203 10204 /* disable link interrupts for VFs */ 10205 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 10206 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 10207 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 10208 i40e_flush(hw); 10209 10210 if (pci_num_vf(pdev)) { 10211 dev_info(&pdev->dev, 10212 "Active VFs found, allocating resources.\n"); 10213 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); 10214 if (err) 10215 dev_info(&pdev->dev, 10216 "Error %d allocating resources for existing VFs\n", 10217 err); 10218 } 10219 } 10220 #endif /* CONFIG_PCI_IOV */ 10221 10222 pfs_found++; 10223 10224 i40e_dbg_pf_init(pf); 10225 10226 /* tell the firmware that we're starting */ 10227 i40e_send_version(pf); 10228 10229 /* since everything's happy, start the service_task timer */ 10230 mod_timer(&pf->service_timer, 10231 round_jiffies(jiffies + pf->service_timer_period)); 10232 10233 #ifdef I40E_FCOE 10234 /* create FCoE interface */ 10235 i40e_fcoe_vsi_setup(pf); 10236 10237 #endif 10238 /* Get the negotiated link width and speed from PCI config space */ 10239 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); 10240 10241 i40e_set_pci_config_data(hw, link_status); 10242 10243 dev_info(&pdev->dev, "PCI-Express: %s %s\n", 10244 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : 10245 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : 10246 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : 10247 "Unknown"), 10248 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : 10249 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : 10250 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : 10251 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : 10252 "Unknown")); 10253 10254 if (hw->bus.width < i40e_bus_width_pcie_x8 || 10255 hw->bus.speed < i40e_bus_speed_8000) { 10256 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 10257 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 10258 } 10259 10260 /* get the requested speeds from the fw */ 10261 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); 10262 if (err) 10263 dev_info(&pf->pdev->dev, 10264 "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n", 10265 i40e_stat_str(&pf->hw, err), 10266 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 10267 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; 10268 10269 /* print a string summarizing features */ 10270 i40e_print_features(pf); 10271 10272 return 0; 10273 10274 /* Unwind what we've done if something failed in the setup */ 10275 err_vsis: 10276 set_bit(__I40E_DOWN, &pf->state); 10277 i40e_clear_interrupt_scheme(pf); 10278 kfree(pf->vsi); 10279 err_switch_setup: 10280 i40e_reset_interrupt_capability(pf); 10281 del_timer_sync(&pf->service_timer); 10282 err_mac_addr: 10283 err_configure_lan_hmc: 10284 (void)i40e_shutdown_lan_hmc(hw); 10285 err_init_lan_hmc: 10286 kfree(pf->qp_pile); 10287 err_sw_init: 10288 err_adminq_setup: 10289 (void)i40e_shutdown_adminq(hw); 10290 err_pf_reset: 10291 iounmap(hw->hw_addr); 10292 err_ioremap: 10293 kfree(pf); 10294 err_pf_alloc: 10295 pci_disable_pcie_error_reporting(pdev); 10296 pci_release_selected_regions(pdev, 10297 pci_select_bars(pdev, IORESOURCE_MEM)); 10298 err_pci_reg: 10299 err_dma: 10300 pci_disable_device(pdev); 10301 return err; 10302 } 10303 10304 /** 10305 * i40e_remove - Device removal routine 10306 * @pdev: PCI device information struct 10307 * 10308 * i40e_remove is called by the PCI subsystem to alert the driver 10309 * that is should release a PCI device. This could be caused by a 10310 * Hot-Plug event, or because the driver is going to be removed from 10311 * memory. 10312 **/ 10313 static void i40e_remove(struct pci_dev *pdev) 10314 { 10315 struct i40e_pf *pf = pci_get_drvdata(pdev); 10316 i40e_status ret_code; 10317 int i; 10318 10319 i40e_dbg_pf_exit(pf); 10320 10321 i40e_ptp_stop(pf); 10322 10323 /* no more scheduling of any task */ 10324 set_bit(__I40E_DOWN, &pf->state); 10325 del_timer_sync(&pf->service_timer); 10326 cancel_work_sync(&pf->service_task); 10327 i40e_fdir_teardown(pf); 10328 10329 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 10330 i40e_free_vfs(pf); 10331 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 10332 } 10333 10334 i40e_fdir_teardown(pf); 10335 10336 /* If there is a switch structure or any orphans, remove them. 10337 * This will leave only the PF's VSI remaining. 10338 */ 10339 for (i = 0; i < I40E_MAX_VEB; i++) { 10340 if (!pf->veb[i]) 10341 continue; 10342 10343 if (pf->veb[i]->uplink_seid == pf->mac_seid || 10344 pf->veb[i]->uplink_seid == 0) 10345 i40e_switch_branch_release(pf->veb[i]); 10346 } 10347 10348 /* Now we can shutdown the PF's VSI, just before we kill 10349 * adminq and hmc. 10350 */ 10351 if (pf->vsi[pf->lan_vsi]) 10352 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 10353 10354 /* shutdown and destroy the HMC */ 10355 if (pf->hw.hmc.hmc_obj) { 10356 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 10357 if (ret_code) 10358 dev_warn(&pdev->dev, 10359 "Failed to destroy the HMC resources: %d\n", 10360 ret_code); 10361 } 10362 10363 /* shutdown the adminq */ 10364 ret_code = i40e_shutdown_adminq(&pf->hw); 10365 if (ret_code) 10366 dev_warn(&pdev->dev, 10367 "Failed to destroy the Admin Queue resources: %d\n", 10368 ret_code); 10369 10370 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 10371 i40e_clear_interrupt_scheme(pf); 10372 for (i = 0; i < pf->num_alloc_vsi; i++) { 10373 if (pf->vsi[i]) { 10374 i40e_vsi_clear_rings(pf->vsi[i]); 10375 i40e_vsi_clear(pf->vsi[i]); 10376 pf->vsi[i] = NULL; 10377 } 10378 } 10379 10380 for (i = 0; i < I40E_MAX_VEB; i++) { 10381 kfree(pf->veb[i]); 10382 pf->veb[i] = NULL; 10383 } 10384 10385 kfree(pf->qp_pile); 10386 kfree(pf->vsi); 10387 10388 iounmap(pf->hw.hw_addr); 10389 kfree(pf); 10390 pci_release_selected_regions(pdev, 10391 pci_select_bars(pdev, IORESOURCE_MEM)); 10392 10393 pci_disable_pcie_error_reporting(pdev); 10394 pci_disable_device(pdev); 10395 } 10396 10397 /** 10398 * i40e_pci_error_detected - warning that something funky happened in PCI land 10399 * @pdev: PCI device information struct 10400 * 10401 * Called to warn that something happened and the error handling steps 10402 * are in progress. Allows the driver to quiesce things, be ready for 10403 * remediation. 10404 **/ 10405 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 10406 enum pci_channel_state error) 10407 { 10408 struct i40e_pf *pf = pci_get_drvdata(pdev); 10409 10410 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 10411 10412 /* shutdown all operations */ 10413 if (!test_bit(__I40E_SUSPENDED, &pf->state)) { 10414 rtnl_lock(); 10415 i40e_prep_for_reset(pf); 10416 rtnl_unlock(); 10417 } 10418 10419 /* Request a slot reset */ 10420 return PCI_ERS_RESULT_NEED_RESET; 10421 } 10422 10423 /** 10424 * i40e_pci_error_slot_reset - a PCI slot reset just happened 10425 * @pdev: PCI device information struct 10426 * 10427 * Called to find if the driver can work with the device now that 10428 * the pci slot has been reset. If a basic connection seems good 10429 * (registers are readable and have sane content) then return a 10430 * happy little PCI_ERS_RESULT_xxx. 10431 **/ 10432 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 10433 { 10434 struct i40e_pf *pf = pci_get_drvdata(pdev); 10435 pci_ers_result_t result; 10436 int err; 10437 u32 reg; 10438 10439 dev_info(&pdev->dev, "%s\n", __func__); 10440 if (pci_enable_device_mem(pdev)) { 10441 dev_info(&pdev->dev, 10442 "Cannot re-enable PCI device after reset.\n"); 10443 result = PCI_ERS_RESULT_DISCONNECT; 10444 } else { 10445 pci_set_master(pdev); 10446 pci_restore_state(pdev); 10447 pci_save_state(pdev); 10448 pci_wake_from_d3(pdev, false); 10449 10450 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 10451 if (reg == 0) 10452 result = PCI_ERS_RESULT_RECOVERED; 10453 else 10454 result = PCI_ERS_RESULT_DISCONNECT; 10455 } 10456 10457 err = pci_cleanup_aer_uncorrect_error_status(pdev); 10458 if (err) { 10459 dev_info(&pdev->dev, 10460 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 10461 err); 10462 /* non-fatal, continue */ 10463 } 10464 10465 return result; 10466 } 10467 10468 /** 10469 * i40e_pci_error_resume - restart operations after PCI error recovery 10470 * @pdev: PCI device information struct 10471 * 10472 * Called to allow the driver to bring things back up after PCI error 10473 * and/or reset recovery has finished. 10474 **/ 10475 static void i40e_pci_error_resume(struct pci_dev *pdev) 10476 { 10477 struct i40e_pf *pf = pci_get_drvdata(pdev); 10478 10479 dev_info(&pdev->dev, "%s\n", __func__); 10480 if (test_bit(__I40E_SUSPENDED, &pf->state)) 10481 return; 10482 10483 rtnl_lock(); 10484 i40e_handle_reset_warning(pf); 10485 rtnl_lock(); 10486 } 10487 10488 /** 10489 * i40e_shutdown - PCI callback for shutting down 10490 * @pdev: PCI device information struct 10491 **/ 10492 static void i40e_shutdown(struct pci_dev *pdev) 10493 { 10494 struct i40e_pf *pf = pci_get_drvdata(pdev); 10495 struct i40e_hw *hw = &pf->hw; 10496 10497 set_bit(__I40E_SUSPENDED, &pf->state); 10498 set_bit(__I40E_DOWN, &pf->state); 10499 rtnl_lock(); 10500 i40e_prep_for_reset(pf); 10501 rtnl_unlock(); 10502 10503 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 10504 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 10505 10506 del_timer_sync(&pf->service_timer); 10507 cancel_work_sync(&pf->service_task); 10508 i40e_fdir_teardown(pf); 10509 10510 rtnl_lock(); 10511 i40e_prep_for_reset(pf); 10512 rtnl_unlock(); 10513 10514 wr32(hw, I40E_PFPM_APM, 10515 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 10516 wr32(hw, I40E_PFPM_WUFC, 10517 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 10518 10519 i40e_clear_interrupt_scheme(pf); 10520 10521 if (system_state == SYSTEM_POWER_OFF) { 10522 pci_wake_from_d3(pdev, pf->wol_en); 10523 pci_set_power_state(pdev, PCI_D3hot); 10524 } 10525 } 10526 10527 #ifdef CONFIG_PM 10528 /** 10529 * i40e_suspend - PCI callback for moving to D3 10530 * @pdev: PCI device information struct 10531 **/ 10532 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) 10533 { 10534 struct i40e_pf *pf = pci_get_drvdata(pdev); 10535 struct i40e_hw *hw = &pf->hw; 10536 10537 set_bit(__I40E_SUSPENDED, &pf->state); 10538 set_bit(__I40E_DOWN, &pf->state); 10539 10540 rtnl_lock(); 10541 i40e_prep_for_reset(pf); 10542 rtnl_unlock(); 10543 10544 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 10545 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 10546 10547 pci_wake_from_d3(pdev, pf->wol_en); 10548 pci_set_power_state(pdev, PCI_D3hot); 10549 10550 return 0; 10551 } 10552 10553 /** 10554 * i40e_resume - PCI callback for waking up from D3 10555 * @pdev: PCI device information struct 10556 **/ 10557 static int i40e_resume(struct pci_dev *pdev) 10558 { 10559 struct i40e_pf *pf = pci_get_drvdata(pdev); 10560 u32 err; 10561 10562 pci_set_power_state(pdev, PCI_D0); 10563 pci_restore_state(pdev); 10564 /* pci_restore_state() clears dev->state_saves, so 10565 * call pci_save_state() again to restore it. 10566 */ 10567 pci_save_state(pdev); 10568 10569 err = pci_enable_device_mem(pdev); 10570 if (err) { 10571 dev_err(&pdev->dev, 10572 "%s: Cannot enable PCI device from suspend\n", 10573 __func__); 10574 return err; 10575 } 10576 pci_set_master(pdev); 10577 10578 /* no wakeup events while running */ 10579 pci_wake_from_d3(pdev, false); 10580 10581 /* handling the reset will rebuild the device state */ 10582 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { 10583 clear_bit(__I40E_DOWN, &pf->state); 10584 rtnl_lock(); 10585 i40e_reset_and_rebuild(pf, false); 10586 rtnl_unlock(); 10587 } 10588 10589 return 0; 10590 } 10591 10592 #endif 10593 static const struct pci_error_handlers i40e_err_handler = { 10594 .error_detected = i40e_pci_error_detected, 10595 .slot_reset = i40e_pci_error_slot_reset, 10596 .resume = i40e_pci_error_resume, 10597 }; 10598 10599 static struct pci_driver i40e_driver = { 10600 .name = i40e_driver_name, 10601 .id_table = i40e_pci_tbl, 10602 .probe = i40e_probe, 10603 .remove = i40e_remove, 10604 #ifdef CONFIG_PM 10605 .suspend = i40e_suspend, 10606 .resume = i40e_resume, 10607 #endif 10608 .shutdown = i40e_shutdown, 10609 .err_handler = &i40e_err_handler, 10610 .sriov_configure = i40e_pci_sriov_configure, 10611 }; 10612 10613 /** 10614 * i40e_init_module - Driver registration routine 10615 * 10616 * i40e_init_module is the first routine called when the driver is 10617 * loaded. All it does is register with the PCI subsystem. 10618 **/ 10619 static int __init i40e_init_module(void) 10620 { 10621 pr_info("%s: %s - version %s\n", i40e_driver_name, 10622 i40e_driver_string, i40e_driver_version_str); 10623 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 10624 10625 i40e_dbg_init(); 10626 return pci_register_driver(&i40e_driver); 10627 } 10628 module_init(i40e_init_module); 10629 10630 /** 10631 * i40e_exit_module - Driver exit cleanup routine 10632 * 10633 * i40e_exit_module is called just before the driver is removed 10634 * from memory. 10635 **/ 10636 static void __exit i40e_exit_module(void) 10637 { 10638 pci_unregister_driver(&i40e_driver); 10639 i40e_dbg_exit(); 10640 } 10641 module_exit(i40e_exit_module); 10642