1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 /* Local includes */ 28 #include "i40e.h" 29 #ifdef CONFIG_I40E_VXLAN 30 #include <net/vxlan.h> 31 #endif 32 33 const char i40e_driver_name[] = "i40e"; 34 static const char i40e_driver_string[] = 35 "Intel(R) Ethernet Connection XL710 Network Driver"; 36 37 #define DRV_KERN "-k" 38 39 #define DRV_VERSION_MAJOR 0 40 #define DRV_VERSION_MINOR 3 41 #define DRV_VERSION_BUILD 30 42 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 const char i40e_driver_version_str[] = DRV_VERSION; 46 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; 47 48 /* a bit of forward declarations */ 49 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 50 static void i40e_handle_reset_warning(struct i40e_pf *pf); 51 static int i40e_add_vsi(struct i40e_vsi *vsi); 52 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 53 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 54 static int i40e_setup_misc_vector(struct i40e_pf *pf); 55 static void i40e_determine_queue_usage(struct i40e_pf *pf); 56 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 57 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 58 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 59 60 /* i40e_pci_tbl - PCI Device ID Table 61 * 62 * Last entry must be all 0s 63 * 64 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 65 * Class, Class Mask, private data (not used) } 66 */ 67 static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = { 68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0}, 70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, 72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0}, 75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 78 /* required last entry */ 79 {0, } 80 }; 81 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 82 83 #define I40E_MAX_VF_COUNT 128 84 static int debug = -1; 85 module_param(debug, int, 0); 86 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 87 88 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 89 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 90 MODULE_LICENSE("GPL"); 91 MODULE_VERSION(DRV_VERSION); 92 93 /** 94 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 95 * @hw: pointer to the HW structure 96 * @mem: ptr to mem struct to fill out 97 * @size: size of memory requested 98 * @alignment: what to align the allocation to 99 **/ 100 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 101 u64 size, u32 alignment) 102 { 103 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 104 105 mem->size = ALIGN(size, alignment); 106 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 107 &mem->pa, GFP_KERNEL); 108 if (!mem->va) 109 return -ENOMEM; 110 111 return 0; 112 } 113 114 /** 115 * i40e_free_dma_mem_d - OS specific memory free for shared code 116 * @hw: pointer to the HW structure 117 * @mem: ptr to mem struct to free 118 **/ 119 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 120 { 121 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 122 123 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 124 mem->va = NULL; 125 mem->pa = 0; 126 mem->size = 0; 127 128 return 0; 129 } 130 131 /** 132 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 133 * @hw: pointer to the HW structure 134 * @mem: ptr to mem struct to fill out 135 * @size: size of memory requested 136 **/ 137 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 138 u32 size) 139 { 140 mem->size = size; 141 mem->va = kzalloc(size, GFP_KERNEL); 142 143 if (!mem->va) 144 return -ENOMEM; 145 146 return 0; 147 } 148 149 /** 150 * i40e_free_virt_mem_d - OS specific memory free for shared code 151 * @hw: pointer to the HW structure 152 * @mem: ptr to mem struct to free 153 **/ 154 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 155 { 156 /* it's ok to kfree a NULL pointer */ 157 kfree(mem->va); 158 mem->va = NULL; 159 mem->size = 0; 160 161 return 0; 162 } 163 164 /** 165 * i40e_get_lump - find a lump of free generic resource 166 * @pf: board private structure 167 * @pile: the pile of resource to search 168 * @needed: the number of items needed 169 * @id: an owner id to stick on the items assigned 170 * 171 * Returns the base item index of the lump, or negative for error 172 * 173 * The search_hint trick and lack of advanced fit-finding only work 174 * because we're highly likely to have all the same size lump requests. 175 * Linear search time and any fragmentation should be minimal. 176 **/ 177 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 178 u16 needed, u16 id) 179 { 180 int ret = -ENOMEM; 181 int i, j; 182 183 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 184 dev_info(&pf->pdev->dev, 185 "param err: pile=%p needed=%d id=0x%04x\n", 186 pile, needed, id); 187 return -EINVAL; 188 } 189 190 /* start the linear search with an imperfect hint */ 191 i = pile->search_hint; 192 while (i < pile->num_entries) { 193 /* skip already allocated entries */ 194 if (pile->list[i] & I40E_PILE_VALID_BIT) { 195 i++; 196 continue; 197 } 198 199 /* do we have enough in this lump? */ 200 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 201 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 202 break; 203 } 204 205 if (j == needed) { 206 /* there was enough, so assign it to the requestor */ 207 for (j = 0; j < needed; j++) 208 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 209 ret = i; 210 pile->search_hint = i + j; 211 break; 212 } else { 213 /* not enough, so skip over it and continue looking */ 214 i += j; 215 } 216 } 217 218 return ret; 219 } 220 221 /** 222 * i40e_put_lump - return a lump of generic resource 223 * @pile: the pile of resource to search 224 * @index: the base item index 225 * @id: the owner id of the items assigned 226 * 227 * Returns the count of items in the lump 228 **/ 229 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 230 { 231 int valid_id = (id | I40E_PILE_VALID_BIT); 232 int count = 0; 233 int i; 234 235 if (!pile || index >= pile->num_entries) 236 return -EINVAL; 237 238 for (i = index; 239 i < pile->num_entries && pile->list[i] == valid_id; 240 i++) { 241 pile->list[i] = 0; 242 count++; 243 } 244 245 if (count && index < pile->search_hint) 246 pile->search_hint = index; 247 248 return count; 249 } 250 251 /** 252 * i40e_service_event_schedule - Schedule the service task to wake up 253 * @pf: board private structure 254 * 255 * If not already scheduled, this puts the task into the work queue 256 **/ 257 static void i40e_service_event_schedule(struct i40e_pf *pf) 258 { 259 if (!test_bit(__I40E_DOWN, &pf->state) && 260 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && 261 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) 262 schedule_work(&pf->service_task); 263 } 264 265 /** 266 * i40e_tx_timeout - Respond to a Tx Hang 267 * @netdev: network interface device structure 268 * 269 * If any port has noticed a Tx timeout, it is likely that the whole 270 * device is munged, not just the one netdev port, so go for the full 271 * reset. 272 **/ 273 static void i40e_tx_timeout(struct net_device *netdev) 274 { 275 struct i40e_netdev_priv *np = netdev_priv(netdev); 276 struct i40e_vsi *vsi = np->vsi; 277 struct i40e_pf *pf = vsi->back; 278 279 pf->tx_timeout_count++; 280 281 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 282 pf->tx_timeout_recovery_level = 0; 283 pf->tx_timeout_last_recovery = jiffies; 284 netdev_info(netdev, "tx_timeout recovery level %d\n", 285 pf->tx_timeout_recovery_level); 286 287 switch (pf->tx_timeout_recovery_level) { 288 case 0: 289 /* disable and re-enable queues for the VSI */ 290 if (in_interrupt()) { 291 set_bit(__I40E_REINIT_REQUESTED, &pf->state); 292 set_bit(__I40E_REINIT_REQUESTED, &vsi->state); 293 } else { 294 i40e_vsi_reinit_locked(vsi); 295 } 296 break; 297 case 1: 298 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 299 break; 300 case 2: 301 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 302 break; 303 case 3: 304 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 305 break; 306 default: 307 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 308 i40e_down(vsi); 309 break; 310 } 311 i40e_service_event_schedule(pf); 312 pf->tx_timeout_recovery_level++; 313 } 314 315 /** 316 * i40e_release_rx_desc - Store the new tail and head values 317 * @rx_ring: ring to bump 318 * @val: new head index 319 **/ 320 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 321 { 322 rx_ring->next_to_use = val; 323 324 /* Force memory writes to complete before letting h/w 325 * know there are new descriptors to fetch. (Only 326 * applicable for weak-ordered memory model archs, 327 * such as IA-64). 328 */ 329 wmb(); 330 writel(val, rx_ring->tail); 331 } 332 333 /** 334 * i40e_get_vsi_stats_struct - Get System Network Statistics 335 * @vsi: the VSI we care about 336 * 337 * Returns the address of the device statistics structure. 338 * The statistics are actually updated from the service task. 339 **/ 340 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 341 { 342 return &vsi->net_stats; 343 } 344 345 /** 346 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 347 * @netdev: network interface device structure 348 * 349 * Returns the address of the device statistics structure. 350 * The statistics are actually updated from the service task. 351 **/ 352 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 353 struct net_device *netdev, 354 struct rtnl_link_stats64 *stats) 355 { 356 struct i40e_netdev_priv *np = netdev_priv(netdev); 357 struct i40e_vsi *vsi = np->vsi; 358 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 359 int i; 360 361 if (test_bit(__I40E_DOWN, &vsi->state)) 362 return stats; 363 364 if (!vsi->tx_rings) 365 return stats; 366 367 rcu_read_lock(); 368 for (i = 0; i < vsi->num_queue_pairs; i++) { 369 struct i40e_ring *tx_ring, *rx_ring; 370 u64 bytes, packets; 371 unsigned int start; 372 373 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); 374 if (!tx_ring) 375 continue; 376 377 do { 378 start = u64_stats_fetch_begin_bh(&tx_ring->syncp); 379 packets = tx_ring->stats.packets; 380 bytes = tx_ring->stats.bytes; 381 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); 382 383 stats->tx_packets += packets; 384 stats->tx_bytes += bytes; 385 rx_ring = &tx_ring[1]; 386 387 do { 388 start = u64_stats_fetch_begin_bh(&rx_ring->syncp); 389 packets = rx_ring->stats.packets; 390 bytes = rx_ring->stats.bytes; 391 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); 392 393 stats->rx_packets += packets; 394 stats->rx_bytes += bytes; 395 } 396 rcu_read_unlock(); 397 398 /* following stats updated by ixgbe_watchdog_task() */ 399 stats->multicast = vsi_stats->multicast; 400 stats->tx_errors = vsi_stats->tx_errors; 401 stats->tx_dropped = vsi_stats->tx_dropped; 402 stats->rx_errors = vsi_stats->rx_errors; 403 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 404 stats->rx_length_errors = vsi_stats->rx_length_errors; 405 406 return stats; 407 } 408 409 /** 410 * i40e_vsi_reset_stats - Resets all stats of the given vsi 411 * @vsi: the VSI to have its stats reset 412 **/ 413 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 414 { 415 struct rtnl_link_stats64 *ns; 416 int i; 417 418 if (!vsi) 419 return; 420 421 ns = i40e_get_vsi_stats_struct(vsi); 422 memset(ns, 0, sizeof(*ns)); 423 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 424 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 425 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 426 if (vsi->rx_rings && vsi->rx_rings[0]) { 427 for (i = 0; i < vsi->num_queue_pairs; i++) { 428 memset(&vsi->rx_rings[i]->stats, 0 , 429 sizeof(vsi->rx_rings[i]->stats)); 430 memset(&vsi->rx_rings[i]->rx_stats, 0 , 431 sizeof(vsi->rx_rings[i]->rx_stats)); 432 memset(&vsi->tx_rings[i]->stats, 0 , 433 sizeof(vsi->tx_rings[i]->stats)); 434 memset(&vsi->tx_rings[i]->tx_stats, 0, 435 sizeof(vsi->tx_rings[i]->tx_stats)); 436 } 437 } 438 vsi->stat_offsets_loaded = false; 439 } 440 441 /** 442 * i40e_pf_reset_stats - Reset all of the stats for the given pf 443 * @pf: the PF to be reset 444 **/ 445 void i40e_pf_reset_stats(struct i40e_pf *pf) 446 { 447 memset(&pf->stats, 0, sizeof(pf->stats)); 448 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 449 pf->stat_offsets_loaded = false; 450 } 451 452 /** 453 * i40e_stat_update48 - read and update a 48 bit stat from the chip 454 * @hw: ptr to the hardware info 455 * @hireg: the high 32 bit reg to read 456 * @loreg: the low 32 bit reg to read 457 * @offset_loaded: has the initial offset been loaded yet 458 * @offset: ptr to current offset value 459 * @stat: ptr to the stat 460 * 461 * Since the device stats are not reset at PFReset, they likely will not 462 * be zeroed when the driver starts. We'll save the first values read 463 * and use them as offsets to be subtracted from the raw values in order 464 * to report stats that count from zero. In the process, we also manage 465 * the potential roll-over. 466 **/ 467 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 468 bool offset_loaded, u64 *offset, u64 *stat) 469 { 470 u64 new_data; 471 472 if (hw->device_id == I40E_DEV_ID_QEMU) { 473 new_data = rd32(hw, loreg); 474 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 475 } else { 476 new_data = rd64(hw, loreg); 477 } 478 if (!offset_loaded) 479 *offset = new_data; 480 if (likely(new_data >= *offset)) 481 *stat = new_data - *offset; 482 else 483 *stat = (new_data + ((u64)1 << 48)) - *offset; 484 *stat &= 0xFFFFFFFFFFFFULL; 485 } 486 487 /** 488 * i40e_stat_update32 - read and update a 32 bit stat from the chip 489 * @hw: ptr to the hardware info 490 * @reg: the hw reg to read 491 * @offset_loaded: has the initial offset been loaded yet 492 * @offset: ptr to current offset value 493 * @stat: ptr to the stat 494 **/ 495 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 496 bool offset_loaded, u64 *offset, u64 *stat) 497 { 498 u32 new_data; 499 500 new_data = rd32(hw, reg); 501 if (!offset_loaded) 502 *offset = new_data; 503 if (likely(new_data >= *offset)) 504 *stat = (u32)(new_data - *offset); 505 else 506 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 507 } 508 509 /** 510 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 511 * @vsi: the VSI to be updated 512 **/ 513 void i40e_update_eth_stats(struct i40e_vsi *vsi) 514 { 515 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 516 struct i40e_pf *pf = vsi->back; 517 struct i40e_hw *hw = &pf->hw; 518 struct i40e_eth_stats *oes; 519 struct i40e_eth_stats *es; /* device's eth stats */ 520 521 es = &vsi->eth_stats; 522 oes = &vsi->eth_stats_offsets; 523 524 /* Gather up the stats that the hw collects */ 525 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 526 vsi->stat_offsets_loaded, 527 &oes->tx_errors, &es->tx_errors); 528 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 529 vsi->stat_offsets_loaded, 530 &oes->rx_discards, &es->rx_discards); 531 532 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 533 I40E_GLV_GORCL(stat_idx), 534 vsi->stat_offsets_loaded, 535 &oes->rx_bytes, &es->rx_bytes); 536 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 537 I40E_GLV_UPRCL(stat_idx), 538 vsi->stat_offsets_loaded, 539 &oes->rx_unicast, &es->rx_unicast); 540 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 541 I40E_GLV_MPRCL(stat_idx), 542 vsi->stat_offsets_loaded, 543 &oes->rx_multicast, &es->rx_multicast); 544 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 545 I40E_GLV_BPRCL(stat_idx), 546 vsi->stat_offsets_loaded, 547 &oes->rx_broadcast, &es->rx_broadcast); 548 549 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 550 I40E_GLV_GOTCL(stat_idx), 551 vsi->stat_offsets_loaded, 552 &oes->tx_bytes, &es->tx_bytes); 553 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 554 I40E_GLV_UPTCL(stat_idx), 555 vsi->stat_offsets_loaded, 556 &oes->tx_unicast, &es->tx_unicast); 557 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 558 I40E_GLV_MPTCL(stat_idx), 559 vsi->stat_offsets_loaded, 560 &oes->tx_multicast, &es->tx_multicast); 561 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 562 I40E_GLV_BPTCL(stat_idx), 563 vsi->stat_offsets_loaded, 564 &oes->tx_broadcast, &es->tx_broadcast); 565 vsi->stat_offsets_loaded = true; 566 } 567 568 /** 569 * i40e_update_veb_stats - Update Switch component statistics 570 * @veb: the VEB being updated 571 **/ 572 static void i40e_update_veb_stats(struct i40e_veb *veb) 573 { 574 struct i40e_pf *pf = veb->pf; 575 struct i40e_hw *hw = &pf->hw; 576 struct i40e_eth_stats *oes; 577 struct i40e_eth_stats *es; /* device's eth stats */ 578 int idx = 0; 579 580 idx = veb->stats_idx; 581 es = &veb->stats; 582 oes = &veb->stats_offsets; 583 584 /* Gather up the stats that the hw collects */ 585 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 586 veb->stat_offsets_loaded, 587 &oes->tx_discards, &es->tx_discards); 588 if (hw->revision_id > 0) 589 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 590 veb->stat_offsets_loaded, 591 &oes->rx_unknown_protocol, 592 &es->rx_unknown_protocol); 593 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 594 veb->stat_offsets_loaded, 595 &oes->rx_bytes, &es->rx_bytes); 596 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 597 veb->stat_offsets_loaded, 598 &oes->rx_unicast, &es->rx_unicast); 599 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 600 veb->stat_offsets_loaded, 601 &oes->rx_multicast, &es->rx_multicast); 602 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 603 veb->stat_offsets_loaded, 604 &oes->rx_broadcast, &es->rx_broadcast); 605 606 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 607 veb->stat_offsets_loaded, 608 &oes->tx_bytes, &es->tx_bytes); 609 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 610 veb->stat_offsets_loaded, 611 &oes->tx_unicast, &es->tx_unicast); 612 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 613 veb->stat_offsets_loaded, 614 &oes->tx_multicast, &es->tx_multicast); 615 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 616 veb->stat_offsets_loaded, 617 &oes->tx_broadcast, &es->tx_broadcast); 618 veb->stat_offsets_loaded = true; 619 } 620 621 /** 622 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode 623 * @pf: the corresponding PF 624 * 625 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode 626 **/ 627 static void i40e_update_link_xoff_rx(struct i40e_pf *pf) 628 { 629 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 630 struct i40e_hw_port_stats *nsd = &pf->stats; 631 struct i40e_hw *hw = &pf->hw; 632 u64 xoff = 0; 633 u16 i, v; 634 635 if ((hw->fc.current_mode != I40E_FC_FULL) && 636 (hw->fc.current_mode != I40E_FC_RX_PAUSE)) 637 return; 638 639 xoff = nsd->link_xoff_rx; 640 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 641 pf->stat_offsets_loaded, 642 &osd->link_xoff_rx, &nsd->link_xoff_rx); 643 644 /* No new LFC xoff rx */ 645 if (!(nsd->link_xoff_rx - xoff)) 646 return; 647 648 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ 649 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 650 struct i40e_vsi *vsi = pf->vsi[v]; 651 652 if (!vsi) 653 continue; 654 655 for (i = 0; i < vsi->num_queue_pairs; i++) { 656 struct i40e_ring *ring = vsi->tx_rings[i]; 657 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 658 } 659 } 660 } 661 662 /** 663 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode 664 * @pf: the corresponding PF 665 * 666 * Update the Rx XOFF counter (PAUSE frames) in PFC mode 667 **/ 668 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) 669 { 670 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 671 struct i40e_hw_port_stats *nsd = &pf->stats; 672 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; 673 struct i40e_dcbx_config *dcb_cfg; 674 struct i40e_hw *hw = &pf->hw; 675 u16 i, v; 676 u8 tc; 677 678 dcb_cfg = &hw->local_dcbx_config; 679 680 /* See if DCB enabled with PFC TC */ 681 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || 682 !(dcb_cfg->pfc.pfcenable)) { 683 i40e_update_link_xoff_rx(pf); 684 return; 685 } 686 687 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 688 u64 prio_xoff = nsd->priority_xoff_rx[i]; 689 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 690 pf->stat_offsets_loaded, 691 &osd->priority_xoff_rx[i], 692 &nsd->priority_xoff_rx[i]); 693 694 /* No new PFC xoff rx */ 695 if (!(nsd->priority_xoff_rx[i] - prio_xoff)) 696 continue; 697 /* Get the TC for given priority */ 698 tc = dcb_cfg->etscfg.prioritytable[i]; 699 xoff[tc] = true; 700 } 701 702 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ 703 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 704 struct i40e_vsi *vsi = pf->vsi[v]; 705 706 if (!vsi) 707 continue; 708 709 for (i = 0; i < vsi->num_queue_pairs; i++) { 710 struct i40e_ring *ring = vsi->tx_rings[i]; 711 712 tc = ring->dcb_tc; 713 if (xoff[tc]) 714 clear_bit(__I40E_HANG_CHECK_ARMED, 715 &ring->state); 716 } 717 } 718 } 719 720 /** 721 * i40e_update_stats - Update the board statistics counters. 722 * @vsi: the VSI to be updated 723 * 724 * There are a few instances where we store the same stat in a 725 * couple of different structs. This is partly because we have 726 * the netdev stats that need to be filled out, which is slightly 727 * different from the "eth_stats" defined by the chip and used in 728 * VF communications. We sort it all out here in a central place. 729 **/ 730 void i40e_update_stats(struct i40e_vsi *vsi) 731 { 732 struct i40e_pf *pf = vsi->back; 733 struct i40e_hw *hw = &pf->hw; 734 struct rtnl_link_stats64 *ons; 735 struct rtnl_link_stats64 *ns; /* netdev stats */ 736 struct i40e_eth_stats *oes; 737 struct i40e_eth_stats *es; /* device's eth stats */ 738 u32 tx_restart, tx_busy; 739 u32 rx_page, rx_buf; 740 u64 rx_p, rx_b; 741 u64 tx_p, tx_b; 742 int i; 743 u16 q; 744 745 if (test_bit(__I40E_DOWN, &vsi->state) || 746 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 747 return; 748 749 ns = i40e_get_vsi_stats_struct(vsi); 750 ons = &vsi->net_stats_offsets; 751 es = &vsi->eth_stats; 752 oes = &vsi->eth_stats_offsets; 753 754 /* Gather up the netdev and vsi stats that the driver collects 755 * on the fly during packet processing 756 */ 757 rx_b = rx_p = 0; 758 tx_b = tx_p = 0; 759 tx_restart = tx_busy = 0; 760 rx_page = 0; 761 rx_buf = 0; 762 rcu_read_lock(); 763 for (q = 0; q < vsi->num_queue_pairs; q++) { 764 struct i40e_ring *p; 765 u64 bytes, packets; 766 unsigned int start; 767 768 /* locate Tx ring */ 769 p = ACCESS_ONCE(vsi->tx_rings[q]); 770 771 do { 772 start = u64_stats_fetch_begin_bh(&p->syncp); 773 packets = p->stats.packets; 774 bytes = p->stats.bytes; 775 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 776 tx_b += bytes; 777 tx_p += packets; 778 tx_restart += p->tx_stats.restart_queue; 779 tx_busy += p->tx_stats.tx_busy; 780 781 /* Rx queue is part of the same block as Tx queue */ 782 p = &p[1]; 783 do { 784 start = u64_stats_fetch_begin_bh(&p->syncp); 785 packets = p->stats.packets; 786 bytes = p->stats.bytes; 787 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 788 rx_b += bytes; 789 rx_p += packets; 790 rx_buf += p->rx_stats.alloc_buff_failed; 791 rx_page += p->rx_stats.alloc_page_failed; 792 } 793 rcu_read_unlock(); 794 vsi->tx_restart = tx_restart; 795 vsi->tx_busy = tx_busy; 796 vsi->rx_page_failed = rx_page; 797 vsi->rx_buf_failed = rx_buf; 798 799 ns->rx_packets = rx_p; 800 ns->rx_bytes = rx_b; 801 ns->tx_packets = tx_p; 802 ns->tx_bytes = tx_b; 803 804 i40e_update_eth_stats(vsi); 805 /* update netdev stats from eth stats */ 806 ons->rx_errors = oes->rx_errors; 807 ns->rx_errors = es->rx_errors; 808 ons->tx_errors = oes->tx_errors; 809 ns->tx_errors = es->tx_errors; 810 ons->multicast = oes->rx_multicast; 811 ns->multicast = es->rx_multicast; 812 ons->tx_dropped = oes->tx_discards; 813 ns->tx_dropped = es->tx_discards; 814 815 /* Get the port data only if this is the main PF VSI */ 816 if (vsi == pf->vsi[pf->lan_vsi]) { 817 struct i40e_hw_port_stats *nsd = &pf->stats; 818 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 819 820 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 821 I40E_GLPRT_GORCL(hw->port), 822 pf->stat_offsets_loaded, 823 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 824 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 825 I40E_GLPRT_GOTCL(hw->port), 826 pf->stat_offsets_loaded, 827 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 828 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 829 pf->stat_offsets_loaded, 830 &osd->eth.rx_discards, 831 &nsd->eth.rx_discards); 832 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), 833 pf->stat_offsets_loaded, 834 &osd->eth.tx_discards, 835 &nsd->eth.tx_discards); 836 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 837 I40E_GLPRT_MPRCL(hw->port), 838 pf->stat_offsets_loaded, 839 &osd->eth.rx_multicast, 840 &nsd->eth.rx_multicast); 841 842 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 843 pf->stat_offsets_loaded, 844 &osd->tx_dropped_link_down, 845 &nsd->tx_dropped_link_down); 846 847 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 848 pf->stat_offsets_loaded, 849 &osd->crc_errors, &nsd->crc_errors); 850 ns->rx_crc_errors = nsd->crc_errors; 851 852 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 853 pf->stat_offsets_loaded, 854 &osd->illegal_bytes, &nsd->illegal_bytes); 855 ns->rx_errors = nsd->crc_errors 856 + nsd->illegal_bytes; 857 858 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 859 pf->stat_offsets_loaded, 860 &osd->mac_local_faults, 861 &nsd->mac_local_faults); 862 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 863 pf->stat_offsets_loaded, 864 &osd->mac_remote_faults, 865 &nsd->mac_remote_faults); 866 867 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 868 pf->stat_offsets_loaded, 869 &osd->rx_length_errors, 870 &nsd->rx_length_errors); 871 ns->rx_length_errors = nsd->rx_length_errors; 872 873 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 874 pf->stat_offsets_loaded, 875 &osd->link_xon_rx, &nsd->link_xon_rx); 876 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 877 pf->stat_offsets_loaded, 878 &osd->link_xon_tx, &nsd->link_xon_tx); 879 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ 880 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 881 pf->stat_offsets_loaded, 882 &osd->link_xoff_tx, &nsd->link_xoff_tx); 883 884 for (i = 0; i < 8; i++) { 885 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 886 pf->stat_offsets_loaded, 887 &osd->priority_xon_rx[i], 888 &nsd->priority_xon_rx[i]); 889 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 890 pf->stat_offsets_loaded, 891 &osd->priority_xon_tx[i], 892 &nsd->priority_xon_tx[i]); 893 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 894 pf->stat_offsets_loaded, 895 &osd->priority_xoff_tx[i], 896 &nsd->priority_xoff_tx[i]); 897 i40e_stat_update32(hw, 898 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 899 pf->stat_offsets_loaded, 900 &osd->priority_xon_2_xoff[i], 901 &nsd->priority_xon_2_xoff[i]); 902 } 903 904 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 905 I40E_GLPRT_PRC64L(hw->port), 906 pf->stat_offsets_loaded, 907 &osd->rx_size_64, &nsd->rx_size_64); 908 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 909 I40E_GLPRT_PRC127L(hw->port), 910 pf->stat_offsets_loaded, 911 &osd->rx_size_127, &nsd->rx_size_127); 912 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 913 I40E_GLPRT_PRC255L(hw->port), 914 pf->stat_offsets_loaded, 915 &osd->rx_size_255, &nsd->rx_size_255); 916 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 917 I40E_GLPRT_PRC511L(hw->port), 918 pf->stat_offsets_loaded, 919 &osd->rx_size_511, &nsd->rx_size_511); 920 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 921 I40E_GLPRT_PRC1023L(hw->port), 922 pf->stat_offsets_loaded, 923 &osd->rx_size_1023, &nsd->rx_size_1023); 924 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 925 I40E_GLPRT_PRC1522L(hw->port), 926 pf->stat_offsets_loaded, 927 &osd->rx_size_1522, &nsd->rx_size_1522); 928 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 929 I40E_GLPRT_PRC9522L(hw->port), 930 pf->stat_offsets_loaded, 931 &osd->rx_size_big, &nsd->rx_size_big); 932 933 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 934 I40E_GLPRT_PTC64L(hw->port), 935 pf->stat_offsets_loaded, 936 &osd->tx_size_64, &nsd->tx_size_64); 937 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 938 I40E_GLPRT_PTC127L(hw->port), 939 pf->stat_offsets_loaded, 940 &osd->tx_size_127, &nsd->tx_size_127); 941 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 942 I40E_GLPRT_PTC255L(hw->port), 943 pf->stat_offsets_loaded, 944 &osd->tx_size_255, &nsd->tx_size_255); 945 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 946 I40E_GLPRT_PTC511L(hw->port), 947 pf->stat_offsets_loaded, 948 &osd->tx_size_511, &nsd->tx_size_511); 949 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 950 I40E_GLPRT_PTC1023L(hw->port), 951 pf->stat_offsets_loaded, 952 &osd->tx_size_1023, &nsd->tx_size_1023); 953 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 954 I40E_GLPRT_PTC1522L(hw->port), 955 pf->stat_offsets_loaded, 956 &osd->tx_size_1522, &nsd->tx_size_1522); 957 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 958 I40E_GLPRT_PTC9522L(hw->port), 959 pf->stat_offsets_loaded, 960 &osd->tx_size_big, &nsd->tx_size_big); 961 962 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 963 pf->stat_offsets_loaded, 964 &osd->rx_undersize, &nsd->rx_undersize); 965 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 966 pf->stat_offsets_loaded, 967 &osd->rx_fragments, &nsd->rx_fragments); 968 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 969 pf->stat_offsets_loaded, 970 &osd->rx_oversize, &nsd->rx_oversize); 971 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 972 pf->stat_offsets_loaded, 973 &osd->rx_jabber, &nsd->rx_jabber); 974 } 975 976 pf->stat_offsets_loaded = true; 977 } 978 979 /** 980 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 981 * @vsi: the VSI to be searched 982 * @macaddr: the MAC address 983 * @vlan: the vlan 984 * @is_vf: make sure its a vf filter, else doesn't matter 985 * @is_netdev: make sure its a netdev filter, else doesn't matter 986 * 987 * Returns ptr to the filter object or NULL 988 **/ 989 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 990 u8 *macaddr, s16 vlan, 991 bool is_vf, bool is_netdev) 992 { 993 struct i40e_mac_filter *f; 994 995 if (!vsi || !macaddr) 996 return NULL; 997 998 list_for_each_entry(f, &vsi->mac_filter_list, list) { 999 if ((ether_addr_equal(macaddr, f->macaddr)) && 1000 (vlan == f->vlan) && 1001 (!is_vf || f->is_vf) && 1002 (!is_netdev || f->is_netdev)) 1003 return f; 1004 } 1005 return NULL; 1006 } 1007 1008 /** 1009 * i40e_find_mac - Find a mac addr in the macvlan filters list 1010 * @vsi: the VSI to be searched 1011 * @macaddr: the MAC address we are searching for 1012 * @is_vf: make sure its a vf filter, else doesn't matter 1013 * @is_netdev: make sure its a netdev filter, else doesn't matter 1014 * 1015 * Returns the first filter with the provided MAC address or NULL if 1016 * MAC address was not found 1017 **/ 1018 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 1019 bool is_vf, bool is_netdev) 1020 { 1021 struct i40e_mac_filter *f; 1022 1023 if (!vsi || !macaddr) 1024 return NULL; 1025 1026 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1027 if ((ether_addr_equal(macaddr, f->macaddr)) && 1028 (!is_vf || f->is_vf) && 1029 (!is_netdev || f->is_netdev)) 1030 return f; 1031 } 1032 return NULL; 1033 } 1034 1035 /** 1036 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1037 * @vsi: the VSI to be searched 1038 * 1039 * Returns true if VSI is in vlan mode or false otherwise 1040 **/ 1041 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1042 { 1043 struct i40e_mac_filter *f; 1044 1045 /* Only -1 for all the filters denotes not in vlan mode 1046 * so we have to go through all the list in order to make sure 1047 */ 1048 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1049 if (f->vlan >= 0) 1050 return true; 1051 } 1052 1053 return false; 1054 } 1055 1056 /** 1057 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans 1058 * @vsi: the VSI to be searched 1059 * @macaddr: the mac address to be filtered 1060 * @is_vf: true if it is a vf 1061 * @is_netdev: true if it is a netdev 1062 * 1063 * Goes through all the macvlan filters and adds a 1064 * macvlan filter for each unique vlan that already exists 1065 * 1066 * Returns first filter found on success, else NULL 1067 **/ 1068 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1069 bool is_vf, bool is_netdev) 1070 { 1071 struct i40e_mac_filter *f; 1072 1073 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1074 if (!i40e_find_filter(vsi, macaddr, f->vlan, 1075 is_vf, is_netdev)) { 1076 if (!i40e_add_filter(vsi, macaddr, f->vlan, 1077 is_vf, is_netdev)) 1078 return NULL; 1079 } 1080 } 1081 1082 return list_first_entry_or_null(&vsi->mac_filter_list, 1083 struct i40e_mac_filter, list); 1084 } 1085 1086 /** 1087 * i40e_add_filter - Add a mac/vlan filter to the VSI 1088 * @vsi: the VSI to be searched 1089 * @macaddr: the MAC address 1090 * @vlan: the vlan 1091 * @is_vf: make sure its a vf filter, else doesn't matter 1092 * @is_netdev: make sure its a netdev filter, else doesn't matter 1093 * 1094 * Returns ptr to the filter object or NULL when no memory available. 1095 **/ 1096 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1097 u8 *macaddr, s16 vlan, 1098 bool is_vf, bool is_netdev) 1099 { 1100 struct i40e_mac_filter *f; 1101 1102 if (!vsi || !macaddr) 1103 return NULL; 1104 1105 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1106 if (!f) { 1107 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1108 if (!f) 1109 goto add_filter_out; 1110 1111 memcpy(f->macaddr, macaddr, ETH_ALEN); 1112 f->vlan = vlan; 1113 f->changed = true; 1114 1115 INIT_LIST_HEAD(&f->list); 1116 list_add(&f->list, &vsi->mac_filter_list); 1117 } 1118 1119 /* increment counter and add a new flag if needed */ 1120 if (is_vf) { 1121 if (!f->is_vf) { 1122 f->is_vf = true; 1123 f->counter++; 1124 } 1125 } else if (is_netdev) { 1126 if (!f->is_netdev) { 1127 f->is_netdev = true; 1128 f->counter++; 1129 } 1130 } else { 1131 f->counter++; 1132 } 1133 1134 /* changed tells sync_filters_subtask to 1135 * push the filter down to the firmware 1136 */ 1137 if (f->changed) { 1138 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1139 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1140 } 1141 1142 add_filter_out: 1143 return f; 1144 } 1145 1146 /** 1147 * i40e_del_filter - Remove a mac/vlan filter from the VSI 1148 * @vsi: the VSI to be searched 1149 * @macaddr: the MAC address 1150 * @vlan: the vlan 1151 * @is_vf: make sure it's a vf filter, else doesn't matter 1152 * @is_netdev: make sure it's a netdev filter, else doesn't matter 1153 **/ 1154 void i40e_del_filter(struct i40e_vsi *vsi, 1155 u8 *macaddr, s16 vlan, 1156 bool is_vf, bool is_netdev) 1157 { 1158 struct i40e_mac_filter *f; 1159 1160 if (!vsi || !macaddr) 1161 return; 1162 1163 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1164 if (!f || f->counter == 0) 1165 return; 1166 1167 if (is_vf) { 1168 if (f->is_vf) { 1169 f->is_vf = false; 1170 f->counter--; 1171 } 1172 } else if (is_netdev) { 1173 if (f->is_netdev) { 1174 f->is_netdev = false; 1175 f->counter--; 1176 } 1177 } else { 1178 /* make sure we don't remove a filter in use by vf or netdev */ 1179 int min_f = 0; 1180 min_f += (f->is_vf ? 1 : 0); 1181 min_f += (f->is_netdev ? 1 : 0); 1182 1183 if (f->counter > min_f) 1184 f->counter--; 1185 } 1186 1187 /* counter == 0 tells sync_filters_subtask to 1188 * remove the filter from the firmware's list 1189 */ 1190 if (f->counter == 0) { 1191 f->changed = true; 1192 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1193 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1194 } 1195 } 1196 1197 /** 1198 * i40e_set_mac - NDO callback to set mac address 1199 * @netdev: network interface device structure 1200 * @p: pointer to an address structure 1201 * 1202 * Returns 0 on success, negative on failure 1203 **/ 1204 static int i40e_set_mac(struct net_device *netdev, void *p) 1205 { 1206 struct i40e_netdev_priv *np = netdev_priv(netdev); 1207 struct i40e_vsi *vsi = np->vsi; 1208 struct sockaddr *addr = p; 1209 struct i40e_mac_filter *f; 1210 1211 if (!is_valid_ether_addr(addr->sa_data)) 1212 return -EADDRNOTAVAIL; 1213 1214 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data); 1215 1216 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 1217 return 0; 1218 1219 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1220 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1221 return -EADDRNOTAVAIL; 1222 1223 if (vsi->type == I40E_VSI_MAIN) { 1224 i40e_status ret; 1225 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1226 I40E_AQC_WRITE_TYPE_LAA_ONLY, 1227 addr->sa_data, NULL); 1228 if (ret) { 1229 netdev_info(netdev, 1230 "Addr change for Main VSI failed: %d\n", 1231 ret); 1232 return -EADDRNOTAVAIL; 1233 } 1234 1235 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len); 1236 } 1237 1238 /* In order to be sure to not drop any packets, add the new address 1239 * then delete the old one. 1240 */ 1241 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false); 1242 if (!f) 1243 return -ENOMEM; 1244 1245 i40e_sync_vsi_filters(vsi); 1246 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); 1247 i40e_sync_vsi_filters(vsi); 1248 1249 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1250 1251 return 0; 1252 } 1253 1254 /** 1255 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1256 * @vsi: the VSI being setup 1257 * @ctxt: VSI context structure 1258 * @enabled_tc: Enabled TCs bitmap 1259 * @is_add: True if called before Add VSI 1260 * 1261 * Setup VSI queue mapping for enabled traffic classes. 1262 **/ 1263 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1264 struct i40e_vsi_context *ctxt, 1265 u8 enabled_tc, 1266 bool is_add) 1267 { 1268 struct i40e_pf *pf = vsi->back; 1269 u16 sections = 0; 1270 u8 netdev_tc = 0; 1271 u16 numtc = 0; 1272 u16 qcount; 1273 u8 offset; 1274 u16 qmap; 1275 int i; 1276 u16 num_tc_qps = 0; 1277 1278 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1279 offset = 0; 1280 1281 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1282 /* Find numtc from enabled TC bitmap */ 1283 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1284 if (enabled_tc & (1 << i)) /* TC is enabled */ 1285 numtc++; 1286 } 1287 if (!numtc) { 1288 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1289 numtc = 1; 1290 } 1291 } else { 1292 /* At least TC0 is enabled in case of non-DCB case */ 1293 numtc = 1; 1294 } 1295 1296 vsi->tc_config.numtc = numtc; 1297 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1298 /* Number of queues per enabled TC */ 1299 num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc); 1300 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1301 1302 /* Setup queue offset/count for all TCs for given VSI */ 1303 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1304 /* See if the given TC is enabled for the given VSI */ 1305 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ 1306 int pow, num_qps; 1307 1308 switch (vsi->type) { 1309 case I40E_VSI_MAIN: 1310 qcount = min_t(int, pf->rss_size, num_tc_qps); 1311 break; 1312 case I40E_VSI_FDIR: 1313 case I40E_VSI_SRIOV: 1314 case I40E_VSI_VMDQ2: 1315 default: 1316 qcount = num_tc_qps; 1317 WARN_ON(i != 0); 1318 break; 1319 } 1320 vsi->tc_config.tc_info[i].qoffset = offset; 1321 vsi->tc_config.tc_info[i].qcount = qcount; 1322 1323 /* find the power-of-2 of the number of queue pairs */ 1324 num_qps = qcount; 1325 pow = 0; 1326 while (num_qps && ((1 << pow) < qcount)) { 1327 pow++; 1328 num_qps >>= 1; 1329 } 1330 1331 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1332 qmap = 1333 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1334 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1335 1336 offset += qcount; 1337 } else { 1338 /* TC is not enabled so set the offset to 1339 * default queue and allocate one queue 1340 * for the given TC. 1341 */ 1342 vsi->tc_config.tc_info[i].qoffset = 0; 1343 vsi->tc_config.tc_info[i].qcount = 1; 1344 vsi->tc_config.tc_info[i].netdev_tc = 0; 1345 1346 qmap = 0; 1347 } 1348 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1349 } 1350 1351 /* Set actual Tx/Rx queue pairs */ 1352 vsi->num_queue_pairs = offset; 1353 1354 /* Scheduler section valid can only be set for ADD VSI */ 1355 if (is_add) { 1356 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1357 1358 ctxt->info.up_enable_bits = enabled_tc; 1359 } 1360 if (vsi->type == I40E_VSI_SRIOV) { 1361 ctxt->info.mapping_flags |= 1362 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1363 for (i = 0; i < vsi->num_queue_pairs; i++) 1364 ctxt->info.queue_mapping[i] = 1365 cpu_to_le16(vsi->base_queue + i); 1366 } else { 1367 ctxt->info.mapping_flags |= 1368 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1369 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1370 } 1371 ctxt->info.valid_sections |= cpu_to_le16(sections); 1372 } 1373 1374 /** 1375 * i40e_set_rx_mode - NDO callback to set the netdev filters 1376 * @netdev: network interface device structure 1377 **/ 1378 static void i40e_set_rx_mode(struct net_device *netdev) 1379 { 1380 struct i40e_netdev_priv *np = netdev_priv(netdev); 1381 struct i40e_mac_filter *f, *ftmp; 1382 struct i40e_vsi *vsi = np->vsi; 1383 struct netdev_hw_addr *uca; 1384 struct netdev_hw_addr *mca; 1385 struct netdev_hw_addr *ha; 1386 1387 /* add addr if not already in the filter list */ 1388 netdev_for_each_uc_addr(uca, netdev) { 1389 if (!i40e_find_mac(vsi, uca->addr, false, true)) { 1390 if (i40e_is_vsi_in_vlan(vsi)) 1391 i40e_put_mac_in_vlan(vsi, uca->addr, 1392 false, true); 1393 else 1394 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, 1395 false, true); 1396 } 1397 } 1398 1399 netdev_for_each_mc_addr(mca, netdev) { 1400 if (!i40e_find_mac(vsi, mca->addr, false, true)) { 1401 if (i40e_is_vsi_in_vlan(vsi)) 1402 i40e_put_mac_in_vlan(vsi, mca->addr, 1403 false, true); 1404 else 1405 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, 1406 false, true); 1407 } 1408 } 1409 1410 /* remove filter if not in netdev list */ 1411 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1412 bool found = false; 1413 1414 if (!f->is_netdev) 1415 continue; 1416 1417 if (is_multicast_ether_addr(f->macaddr)) { 1418 netdev_for_each_mc_addr(mca, netdev) { 1419 if (ether_addr_equal(mca->addr, f->macaddr)) { 1420 found = true; 1421 break; 1422 } 1423 } 1424 } else { 1425 netdev_for_each_uc_addr(uca, netdev) { 1426 if (ether_addr_equal(uca->addr, f->macaddr)) { 1427 found = true; 1428 break; 1429 } 1430 } 1431 1432 for_each_dev_addr(netdev, ha) { 1433 if (ether_addr_equal(ha->addr, f->macaddr)) { 1434 found = true; 1435 break; 1436 } 1437 } 1438 } 1439 if (!found) 1440 i40e_del_filter( 1441 vsi, f->macaddr, I40E_VLAN_ANY, false, true); 1442 } 1443 1444 /* check for other flag changes */ 1445 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1446 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1447 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1448 } 1449 } 1450 1451 /** 1452 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 1453 * @vsi: ptr to the VSI 1454 * 1455 * Push any outstanding VSI filter changes through the AdminQ. 1456 * 1457 * Returns 0 or error value 1458 **/ 1459 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) 1460 { 1461 struct i40e_mac_filter *f, *ftmp; 1462 bool promisc_forced_on = false; 1463 bool add_happened = false; 1464 int filter_list_len = 0; 1465 u32 changed_flags = 0; 1466 i40e_status aq_ret = 0; 1467 struct i40e_pf *pf; 1468 int num_add = 0; 1469 int num_del = 0; 1470 u16 cmd_flags; 1471 1472 /* empty array typed pointers, kcalloc later */ 1473 struct i40e_aqc_add_macvlan_element_data *add_list; 1474 struct i40e_aqc_remove_macvlan_element_data *del_list; 1475 1476 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) 1477 usleep_range(1000, 2000); 1478 pf = vsi->back; 1479 1480 if (vsi->netdev) { 1481 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 1482 vsi->current_netdev_flags = vsi->netdev->flags; 1483 } 1484 1485 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1486 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1487 1488 filter_list_len = pf->hw.aq.asq_buf_size / 1489 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1490 del_list = kcalloc(filter_list_len, 1491 sizeof(struct i40e_aqc_remove_macvlan_element_data), 1492 GFP_KERNEL); 1493 if (!del_list) 1494 return -ENOMEM; 1495 1496 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1497 if (!f->changed) 1498 continue; 1499 1500 if (f->counter != 0) 1501 continue; 1502 f->changed = false; 1503 cmd_flags = 0; 1504 1505 /* add to delete list */ 1506 memcpy(del_list[num_del].mac_addr, 1507 f->macaddr, ETH_ALEN); 1508 del_list[num_del].vlan_tag = 1509 cpu_to_le16((u16)(f->vlan == 1510 I40E_VLAN_ANY ? 0 : f->vlan)); 1511 1512 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1513 del_list[num_del].flags = cmd_flags; 1514 num_del++; 1515 1516 /* unlink from filter list */ 1517 list_del(&f->list); 1518 kfree(f); 1519 1520 /* flush a full buffer */ 1521 if (num_del == filter_list_len) { 1522 aq_ret = i40e_aq_remove_macvlan(&pf->hw, 1523 vsi->seid, del_list, num_del, 1524 NULL); 1525 num_del = 0; 1526 memset(del_list, 0, sizeof(*del_list)); 1527 1528 if (aq_ret) 1529 dev_info(&pf->pdev->dev, 1530 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1531 aq_ret, 1532 pf->hw.aq.asq_last_status); 1533 } 1534 } 1535 if (num_del) { 1536 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1537 del_list, num_del, NULL); 1538 num_del = 0; 1539 1540 if (aq_ret) 1541 dev_info(&pf->pdev->dev, 1542 "ignoring delete macvlan error, err %d, aq_err %d\n", 1543 aq_ret, pf->hw.aq.asq_last_status); 1544 } 1545 1546 kfree(del_list); 1547 del_list = NULL; 1548 1549 /* do all the adds now */ 1550 filter_list_len = pf->hw.aq.asq_buf_size / 1551 sizeof(struct i40e_aqc_add_macvlan_element_data), 1552 add_list = kcalloc(filter_list_len, 1553 sizeof(struct i40e_aqc_add_macvlan_element_data), 1554 GFP_KERNEL); 1555 if (!add_list) 1556 return -ENOMEM; 1557 1558 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1559 if (!f->changed) 1560 continue; 1561 1562 if (f->counter == 0) 1563 continue; 1564 f->changed = false; 1565 add_happened = true; 1566 cmd_flags = 0; 1567 1568 /* add to add array */ 1569 memcpy(add_list[num_add].mac_addr, 1570 f->macaddr, ETH_ALEN); 1571 add_list[num_add].vlan_tag = 1572 cpu_to_le16( 1573 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 1574 add_list[num_add].queue_number = 0; 1575 1576 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1577 add_list[num_add].flags = cpu_to_le16(cmd_flags); 1578 num_add++; 1579 1580 /* flush a full buffer */ 1581 if (num_add == filter_list_len) { 1582 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1583 add_list, num_add, 1584 NULL); 1585 num_add = 0; 1586 1587 if (aq_ret) 1588 break; 1589 memset(add_list, 0, sizeof(*add_list)); 1590 } 1591 } 1592 if (num_add) { 1593 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1594 add_list, num_add, NULL); 1595 num_add = 0; 1596 } 1597 kfree(add_list); 1598 add_list = NULL; 1599 1600 if (add_happened && (!aq_ret)) { 1601 /* do nothing */; 1602 } else if (add_happened && (aq_ret)) { 1603 dev_info(&pf->pdev->dev, 1604 "add filter failed, err %d, aq_err %d\n", 1605 aq_ret, pf->hw.aq.asq_last_status); 1606 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 1607 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1608 &vsi->state)) { 1609 promisc_forced_on = true; 1610 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1611 &vsi->state); 1612 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 1613 } 1614 } 1615 } 1616 1617 /* check for changes in promiscuous modes */ 1618 if (changed_flags & IFF_ALLMULTI) { 1619 bool cur_multipromisc; 1620 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 1621 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 1622 vsi->seid, 1623 cur_multipromisc, 1624 NULL); 1625 if (aq_ret) 1626 dev_info(&pf->pdev->dev, 1627 "set multi promisc failed, err %d, aq_err %d\n", 1628 aq_ret, pf->hw.aq.asq_last_status); 1629 } 1630 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 1631 bool cur_promisc; 1632 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 1633 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1634 &vsi->state)); 1635 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, 1636 vsi->seid, 1637 cur_promisc, NULL); 1638 if (aq_ret) 1639 dev_info(&pf->pdev->dev, 1640 "set uni promisc failed, err %d, aq_err %d\n", 1641 aq_ret, pf->hw.aq.asq_last_status); 1642 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 1643 vsi->seid, 1644 cur_promisc, NULL); 1645 if (aq_ret) 1646 dev_info(&pf->pdev->dev, 1647 "set brdcast promisc failed, err %d, aq_err %d\n", 1648 aq_ret, pf->hw.aq.asq_last_status); 1649 } 1650 1651 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 1652 return 0; 1653 } 1654 1655 /** 1656 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 1657 * @pf: board private structure 1658 **/ 1659 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 1660 { 1661 int v; 1662 1663 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 1664 return; 1665 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 1666 1667 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 1668 if (pf->vsi[v] && 1669 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) 1670 i40e_sync_vsi_filters(pf->vsi[v]); 1671 } 1672 } 1673 1674 /** 1675 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 1676 * @netdev: network interface device structure 1677 * @new_mtu: new value for maximum frame size 1678 * 1679 * Returns 0 on success, negative on failure 1680 **/ 1681 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 1682 { 1683 struct i40e_netdev_priv *np = netdev_priv(netdev); 1684 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 1685 struct i40e_vsi *vsi = np->vsi; 1686 1687 /* MTU < 68 is an error and causes problems on some kernels */ 1688 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) 1689 return -EINVAL; 1690 1691 netdev_info(netdev, "changing MTU from %d to %d\n", 1692 netdev->mtu, new_mtu); 1693 netdev->mtu = new_mtu; 1694 if (netif_running(netdev)) 1695 i40e_vsi_reinit_locked(vsi); 1696 1697 return 0; 1698 } 1699 1700 /** 1701 * i40e_ioctl - Access the hwtstamp interface 1702 * @netdev: network interface device structure 1703 * @ifr: interface request data 1704 * @cmd: ioctl command 1705 **/ 1706 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1707 { 1708 struct i40e_netdev_priv *np = netdev_priv(netdev); 1709 struct i40e_pf *pf = np->vsi->back; 1710 1711 switch (cmd) { 1712 case SIOCGHWTSTAMP: 1713 return i40e_ptp_get_ts_config(pf, ifr); 1714 case SIOCSHWTSTAMP: 1715 return i40e_ptp_set_ts_config(pf, ifr); 1716 default: 1717 return -EOPNOTSUPP; 1718 } 1719 } 1720 1721 /** 1722 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 1723 * @vsi: the vsi being adjusted 1724 **/ 1725 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 1726 { 1727 struct i40e_vsi_context ctxt; 1728 i40e_status ret; 1729 1730 if ((vsi->info.valid_sections & 1731 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 1732 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 1733 return; /* already enabled */ 1734 1735 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1736 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1737 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 1738 1739 ctxt.seid = vsi->seid; 1740 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1741 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1742 if (ret) { 1743 dev_info(&vsi->back->pdev->dev, 1744 "%s: update vsi failed, aq_err=%d\n", 1745 __func__, vsi->back->hw.aq.asq_last_status); 1746 } 1747 } 1748 1749 /** 1750 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 1751 * @vsi: the vsi being adjusted 1752 **/ 1753 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 1754 { 1755 struct i40e_vsi_context ctxt; 1756 i40e_status ret; 1757 1758 if ((vsi->info.valid_sections & 1759 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 1760 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 1761 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 1762 return; /* already disabled */ 1763 1764 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1765 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1766 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 1767 1768 ctxt.seid = vsi->seid; 1769 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1770 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1771 if (ret) { 1772 dev_info(&vsi->back->pdev->dev, 1773 "%s: update vsi failed, aq_err=%d\n", 1774 __func__, vsi->back->hw.aq.asq_last_status); 1775 } 1776 } 1777 1778 /** 1779 * i40e_vlan_rx_register - Setup or shutdown vlan offload 1780 * @netdev: network interface to be adjusted 1781 * @features: netdev features to test if VLAN offload is enabled or not 1782 **/ 1783 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 1784 { 1785 struct i40e_netdev_priv *np = netdev_priv(netdev); 1786 struct i40e_vsi *vsi = np->vsi; 1787 1788 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1789 i40e_vlan_stripping_enable(vsi); 1790 else 1791 i40e_vlan_stripping_disable(vsi); 1792 } 1793 1794 /** 1795 * i40e_vsi_add_vlan - Add vsi membership for given vlan 1796 * @vsi: the vsi being configured 1797 * @vid: vlan id to be added (0 = untagged only , -1 = any) 1798 **/ 1799 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) 1800 { 1801 struct i40e_mac_filter *f, *add_f; 1802 bool is_netdev, is_vf; 1803 1804 is_vf = (vsi->type == I40E_VSI_SRIOV); 1805 is_netdev = !!(vsi->netdev); 1806 1807 if (is_netdev) { 1808 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, 1809 is_vf, is_netdev); 1810 if (!add_f) { 1811 dev_info(&vsi->back->pdev->dev, 1812 "Could not add vlan filter %d for %pM\n", 1813 vid, vsi->netdev->dev_addr); 1814 return -ENOMEM; 1815 } 1816 } 1817 1818 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1819 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 1820 if (!add_f) { 1821 dev_info(&vsi->back->pdev->dev, 1822 "Could not add vlan filter %d for %pM\n", 1823 vid, f->macaddr); 1824 return -ENOMEM; 1825 } 1826 } 1827 1828 /* Now if we add a vlan tag, make sure to check if it is the first 1829 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" 1830 * with 0, so we now accept untagged and specified tagged traffic 1831 * (and not any taged and untagged) 1832 */ 1833 if (vid > 0) { 1834 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, 1835 I40E_VLAN_ANY, 1836 is_vf, is_netdev)) { 1837 i40e_del_filter(vsi, vsi->netdev->dev_addr, 1838 I40E_VLAN_ANY, is_vf, is_netdev); 1839 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, 1840 is_vf, is_netdev); 1841 if (!add_f) { 1842 dev_info(&vsi->back->pdev->dev, 1843 "Could not add filter 0 for %pM\n", 1844 vsi->netdev->dev_addr); 1845 return -ENOMEM; 1846 } 1847 } 1848 } 1849 1850 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ 1851 if (vid > 0 && !vsi->info.pvid) { 1852 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1853 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, 1854 is_vf, is_netdev)) { 1855 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, 1856 is_vf, is_netdev); 1857 add_f = i40e_add_filter(vsi, f->macaddr, 1858 0, is_vf, is_netdev); 1859 if (!add_f) { 1860 dev_info(&vsi->back->pdev->dev, 1861 "Could not add filter 0 for %pM\n", 1862 f->macaddr); 1863 return -ENOMEM; 1864 } 1865 } 1866 } 1867 } 1868 1869 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1870 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1871 return 0; 1872 1873 return i40e_sync_vsi_filters(vsi); 1874 } 1875 1876 /** 1877 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 1878 * @vsi: the vsi being configured 1879 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 1880 * 1881 * Return: 0 on success or negative otherwise 1882 **/ 1883 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 1884 { 1885 struct net_device *netdev = vsi->netdev; 1886 struct i40e_mac_filter *f, *add_f; 1887 bool is_vf, is_netdev; 1888 int filter_count = 0; 1889 1890 is_vf = (vsi->type == I40E_VSI_SRIOV); 1891 is_netdev = !!(netdev); 1892 1893 if (is_netdev) 1894 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); 1895 1896 list_for_each_entry(f, &vsi->mac_filter_list, list) 1897 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 1898 1899 /* go through all the filters for this VSI and if there is only 1900 * vid == 0 it means there are no other filters, so vid 0 must 1901 * be replaced with -1. This signifies that we should from now 1902 * on accept any traffic (with any tag present, or untagged) 1903 */ 1904 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1905 if (is_netdev) { 1906 if (f->vlan && 1907 ether_addr_equal(netdev->dev_addr, f->macaddr)) 1908 filter_count++; 1909 } 1910 1911 if (f->vlan) 1912 filter_count++; 1913 } 1914 1915 if (!filter_count && is_netdev) { 1916 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); 1917 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 1918 is_vf, is_netdev); 1919 if (!f) { 1920 dev_info(&vsi->back->pdev->dev, 1921 "Could not add filter %d for %pM\n", 1922 I40E_VLAN_ANY, netdev->dev_addr); 1923 return -ENOMEM; 1924 } 1925 } 1926 1927 if (!filter_count) { 1928 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1929 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); 1930 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, 1931 is_vf, is_netdev); 1932 if (!add_f) { 1933 dev_info(&vsi->back->pdev->dev, 1934 "Could not add filter %d for %pM\n", 1935 I40E_VLAN_ANY, f->macaddr); 1936 return -ENOMEM; 1937 } 1938 } 1939 } 1940 1941 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1942 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1943 return 0; 1944 1945 return i40e_sync_vsi_filters(vsi); 1946 } 1947 1948 /** 1949 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 1950 * @netdev: network interface to be adjusted 1951 * @vid: vlan id to be added 1952 * 1953 * net_device_ops implementation for adding vlan ids 1954 **/ 1955 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 1956 __always_unused __be16 proto, u16 vid) 1957 { 1958 struct i40e_netdev_priv *np = netdev_priv(netdev); 1959 struct i40e_vsi *vsi = np->vsi; 1960 int ret = 0; 1961 1962 if (vid > 4095) 1963 return -EINVAL; 1964 1965 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 1966 1967 /* If the network stack called us with vid = 0, we should 1968 * indicate to i40e_vsi_add_vlan() that we want to receive 1969 * any traffic (i.e. with any vlan tag, or untagged) 1970 */ 1971 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); 1972 1973 if (!ret && (vid < VLAN_N_VID)) 1974 set_bit(vid, vsi->active_vlans); 1975 1976 return ret; 1977 } 1978 1979 /** 1980 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 1981 * @netdev: network interface to be adjusted 1982 * @vid: vlan id to be removed 1983 * 1984 * net_device_ops implementation for adding vlan ids 1985 **/ 1986 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 1987 __always_unused __be16 proto, u16 vid) 1988 { 1989 struct i40e_netdev_priv *np = netdev_priv(netdev); 1990 struct i40e_vsi *vsi = np->vsi; 1991 1992 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); 1993 1994 /* return code is ignored as there is nothing a user 1995 * can do about failure to remove and a log message was 1996 * already printed from the other function 1997 */ 1998 i40e_vsi_kill_vlan(vsi, vid); 1999 2000 clear_bit(vid, vsi->active_vlans); 2001 2002 return 0; 2003 } 2004 2005 /** 2006 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 2007 * @vsi: the vsi being brought back up 2008 **/ 2009 static void i40e_restore_vlan(struct i40e_vsi *vsi) 2010 { 2011 u16 vid; 2012 2013 if (!vsi->netdev) 2014 return; 2015 2016 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2017 2018 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 2019 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 2020 vid); 2021 } 2022 2023 /** 2024 * i40e_vsi_add_pvid - Add pvid for the VSI 2025 * @vsi: the vsi being adjusted 2026 * @vid: the vlan id to set as a PVID 2027 **/ 2028 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2029 { 2030 struct i40e_vsi_context ctxt; 2031 i40e_status aq_ret; 2032 2033 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2034 vsi->info.pvid = cpu_to_le16(vid); 2035 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | 2036 I40E_AQ_VSI_PVLAN_INSERT_PVID | 2037 I40E_AQ_VSI_PVLAN_EMOD_STR; 2038 2039 ctxt.seid = vsi->seid; 2040 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 2041 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2042 if (aq_ret) { 2043 dev_info(&vsi->back->pdev->dev, 2044 "%s: update vsi failed, aq_err=%d\n", 2045 __func__, vsi->back->hw.aq.asq_last_status); 2046 return -ENOENT; 2047 } 2048 2049 return 0; 2050 } 2051 2052 /** 2053 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2054 * @vsi: the vsi being adjusted 2055 * 2056 * Just use the vlan_rx_register() service to put it back to normal 2057 **/ 2058 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2059 { 2060 i40e_vlan_stripping_disable(vsi); 2061 2062 vsi->info.pvid = 0; 2063 } 2064 2065 /** 2066 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2067 * @vsi: ptr to the VSI 2068 * 2069 * If this function returns with an error, then it's possible one or 2070 * more of the rings is populated (while the rest are not). It is the 2071 * callers duty to clean those orphaned rings. 2072 * 2073 * Return 0 on success, negative on failure 2074 **/ 2075 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2076 { 2077 int i, err = 0; 2078 2079 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2080 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2081 2082 return err; 2083 } 2084 2085 /** 2086 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2087 * @vsi: ptr to the VSI 2088 * 2089 * Free VSI's transmit software resources 2090 **/ 2091 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2092 { 2093 int i; 2094 2095 if (!vsi->tx_rings) 2096 return; 2097 2098 for (i = 0; i < vsi->num_queue_pairs; i++) 2099 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2100 i40e_free_tx_resources(vsi->tx_rings[i]); 2101 } 2102 2103 /** 2104 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 2105 * @vsi: ptr to the VSI 2106 * 2107 * If this function returns with an error, then it's possible one or 2108 * more of the rings is populated (while the rest are not). It is the 2109 * callers duty to clean those orphaned rings. 2110 * 2111 * Return 0 on success, negative on failure 2112 **/ 2113 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 2114 { 2115 int i, err = 0; 2116 2117 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2118 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2119 return err; 2120 } 2121 2122 /** 2123 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 2124 * @vsi: ptr to the VSI 2125 * 2126 * Free all receive software resources 2127 **/ 2128 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 2129 { 2130 int i; 2131 2132 if (!vsi->rx_rings) 2133 return; 2134 2135 for (i = 0; i < vsi->num_queue_pairs; i++) 2136 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2137 i40e_free_rx_resources(vsi->rx_rings[i]); 2138 } 2139 2140 /** 2141 * i40e_configure_tx_ring - Configure a transmit ring context and rest 2142 * @ring: The Tx ring to configure 2143 * 2144 * Configure the Tx descriptor ring in the HMC context. 2145 **/ 2146 static int i40e_configure_tx_ring(struct i40e_ring *ring) 2147 { 2148 struct i40e_vsi *vsi = ring->vsi; 2149 u16 pf_q = vsi->base_queue + ring->queue_index; 2150 struct i40e_hw *hw = &vsi->back->hw; 2151 struct i40e_hmc_obj_txq tx_ctx; 2152 i40e_status err = 0; 2153 u32 qtx_ctl = 0; 2154 2155 /* some ATR related tx ring init */ 2156 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { 2157 ring->atr_sample_rate = vsi->back->atr_sample_rate; 2158 ring->atr_count = 0; 2159 } else { 2160 ring->atr_sample_rate = 0; 2161 } 2162 2163 /* initialize XPS */ 2164 if (ring->q_vector && ring->netdev && 2165 vsi->tc_config.numtc <= 1 && 2166 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) 2167 netif_set_xps_queue(ring->netdev, 2168 &ring->q_vector->affinity_mask, 2169 ring->queue_index); 2170 2171 /* clear the context structure first */ 2172 memset(&tx_ctx, 0, sizeof(tx_ctx)); 2173 2174 tx_ctx.new_context = 1; 2175 tx_ctx.base = (ring->dma / 128); 2176 tx_ctx.qlen = ring->count; 2177 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2178 I40E_FLAG_FD_ATR_ENABLED)); 2179 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2180 2181 /* As part of VSI creation/update, FW allocates certain 2182 * Tx arbitration queue sets for each TC enabled for 2183 * the VSI. The FW returns the handles to these queue 2184 * sets as part of the response buffer to Add VSI, 2185 * Update VSI, etc. AQ commands. It is expected that 2186 * these queue set handles be associated with the Tx 2187 * queues by the driver as part of the TX queue context 2188 * initialization. This has to be done regardless of 2189 * DCB as by default everything is mapped to TC0. 2190 */ 2191 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 2192 tx_ctx.rdylist_act = 0; 2193 2194 /* clear the context in the HMC */ 2195 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 2196 if (err) { 2197 dev_info(&vsi->back->pdev->dev, 2198 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 2199 ring->queue_index, pf_q, err); 2200 return -ENOMEM; 2201 } 2202 2203 /* set the context in the HMC */ 2204 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 2205 if (err) { 2206 dev_info(&vsi->back->pdev->dev, 2207 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 2208 ring->queue_index, pf_q, err); 2209 return -ENOMEM; 2210 } 2211 2212 /* Now associate this queue with this PCI function */ 2213 if (vsi->type == I40E_VSI_VMDQ2) 2214 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 2215 else 2216 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2217 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2218 I40E_QTX_CTL_PF_INDX_MASK); 2219 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2220 i40e_flush(hw); 2221 2222 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 2223 2224 /* cache tail off for easier writes later */ 2225 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 2226 2227 return 0; 2228 } 2229 2230 /** 2231 * i40e_configure_rx_ring - Configure a receive ring context 2232 * @ring: The Rx ring to configure 2233 * 2234 * Configure the Rx descriptor ring in the HMC context. 2235 **/ 2236 static int i40e_configure_rx_ring(struct i40e_ring *ring) 2237 { 2238 struct i40e_vsi *vsi = ring->vsi; 2239 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 2240 u16 pf_q = vsi->base_queue + ring->queue_index; 2241 struct i40e_hw *hw = &vsi->back->hw; 2242 struct i40e_hmc_obj_rxq rx_ctx; 2243 i40e_status err = 0; 2244 2245 ring->state = 0; 2246 2247 /* clear the context structure first */ 2248 memset(&rx_ctx, 0, sizeof(rx_ctx)); 2249 2250 ring->rx_buf_len = vsi->rx_buf_len; 2251 ring->rx_hdr_len = vsi->rx_hdr_len; 2252 2253 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; 2254 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; 2255 2256 rx_ctx.base = (ring->dma / 128); 2257 rx_ctx.qlen = ring->count; 2258 2259 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { 2260 set_ring_16byte_desc_enabled(ring); 2261 rx_ctx.dsize = 0; 2262 } else { 2263 rx_ctx.dsize = 1; 2264 } 2265 2266 rx_ctx.dtype = vsi->dtype; 2267 if (vsi->dtype) { 2268 set_ring_ps_enabled(ring); 2269 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 2270 I40E_RX_SPLIT_IP | 2271 I40E_RX_SPLIT_TCP_UDP | 2272 I40E_RX_SPLIT_SCTP; 2273 } else { 2274 rx_ctx.hsplit_0 = 0; 2275 } 2276 2277 rx_ctx.rxmax = min_t(u16, vsi->max_frame, 2278 (chain_len * ring->rx_buf_len)); 2279 rx_ctx.tphrdesc_ena = 1; 2280 rx_ctx.tphwdesc_ena = 1; 2281 rx_ctx.tphdata_ena = 1; 2282 rx_ctx.tphhead_ena = 1; 2283 if (hw->revision_id == 0) 2284 rx_ctx.lrxqthresh = 0; 2285 else 2286 rx_ctx.lrxqthresh = 2; 2287 rx_ctx.crcstrip = 1; 2288 rx_ctx.l2tsel = 1; 2289 rx_ctx.showiv = 1; 2290 2291 /* clear the context in the HMC */ 2292 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2293 if (err) { 2294 dev_info(&vsi->back->pdev->dev, 2295 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2296 ring->queue_index, pf_q, err); 2297 return -ENOMEM; 2298 } 2299 2300 /* set the context in the HMC */ 2301 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 2302 if (err) { 2303 dev_info(&vsi->back->pdev->dev, 2304 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2305 ring->queue_index, pf_q, err); 2306 return -ENOMEM; 2307 } 2308 2309 /* cache tail for quicker writes, and clear the reg before use */ 2310 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 2311 writel(0, ring->tail); 2312 2313 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 2314 2315 return 0; 2316 } 2317 2318 /** 2319 * i40e_vsi_configure_tx - Configure the VSI for Tx 2320 * @vsi: VSI structure describing this set of rings and resources 2321 * 2322 * Configure the Tx VSI for operation. 2323 **/ 2324 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 2325 { 2326 int err = 0; 2327 u16 i; 2328 2329 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 2330 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 2331 2332 return err; 2333 } 2334 2335 /** 2336 * i40e_vsi_configure_rx - Configure the VSI for Rx 2337 * @vsi: the VSI being configured 2338 * 2339 * Configure the Rx VSI for operation. 2340 **/ 2341 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 2342 { 2343 int err = 0; 2344 u16 i; 2345 2346 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) 2347 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN 2348 + ETH_FCS_LEN + VLAN_HLEN; 2349 else 2350 vsi->max_frame = I40E_RXBUFFER_2048; 2351 2352 /* figure out correct receive buffer length */ 2353 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | 2354 I40E_FLAG_RX_PS_ENABLED)) { 2355 case I40E_FLAG_RX_1BUF_ENABLED: 2356 vsi->rx_hdr_len = 0; 2357 vsi->rx_buf_len = vsi->max_frame; 2358 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2359 break; 2360 case I40E_FLAG_RX_PS_ENABLED: 2361 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2362 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2363 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; 2364 break; 2365 default: 2366 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2367 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2368 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; 2369 break; 2370 } 2371 2372 /* round up for the chip's needs */ 2373 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, 2374 (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); 2375 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, 2376 (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); 2377 2378 /* set up individual rings */ 2379 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2380 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 2381 2382 return err; 2383 } 2384 2385 /** 2386 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 2387 * @vsi: ptr to the VSI 2388 **/ 2389 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 2390 { 2391 u16 qoffset, qcount; 2392 int i, n; 2393 2394 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2395 return; 2396 2397 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2398 if (!(vsi->tc_config.enabled_tc & (1 << n))) 2399 continue; 2400 2401 qoffset = vsi->tc_config.tc_info[n].qoffset; 2402 qcount = vsi->tc_config.tc_info[n].qcount; 2403 for (i = qoffset; i < (qoffset + qcount); i++) { 2404 struct i40e_ring *rx_ring = vsi->rx_rings[i]; 2405 struct i40e_ring *tx_ring = vsi->tx_rings[i]; 2406 rx_ring->dcb_tc = n; 2407 tx_ring->dcb_tc = n; 2408 } 2409 } 2410 } 2411 2412 /** 2413 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 2414 * @vsi: ptr to the VSI 2415 **/ 2416 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 2417 { 2418 if (vsi->netdev) 2419 i40e_set_rx_mode(vsi->netdev); 2420 } 2421 2422 /** 2423 * i40e_vsi_configure - Set up the VSI for action 2424 * @vsi: the VSI being configured 2425 **/ 2426 static int i40e_vsi_configure(struct i40e_vsi *vsi) 2427 { 2428 int err; 2429 2430 i40e_set_vsi_rx_mode(vsi); 2431 i40e_restore_vlan(vsi); 2432 i40e_vsi_config_dcb_rings(vsi); 2433 err = i40e_vsi_configure_tx(vsi); 2434 if (!err) 2435 err = i40e_vsi_configure_rx(vsi); 2436 2437 return err; 2438 } 2439 2440 /** 2441 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 2442 * @vsi: the VSI being configured 2443 **/ 2444 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 2445 { 2446 struct i40e_pf *pf = vsi->back; 2447 struct i40e_q_vector *q_vector; 2448 struct i40e_hw *hw = &pf->hw; 2449 u16 vector; 2450 int i, q; 2451 u32 val; 2452 u32 qp; 2453 2454 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 2455 * and PFINT_LNKLSTn registers, e.g.: 2456 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 2457 */ 2458 qp = vsi->base_queue; 2459 vector = vsi->base_vector; 2460 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 2461 q_vector = vsi->q_vectors[i]; 2462 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2463 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2464 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 2465 q_vector->rx.itr); 2466 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2467 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2468 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 2469 q_vector->tx.itr); 2470 2471 /* Linked list for the queuepairs assigned to this vector */ 2472 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 2473 for (q = 0; q < q_vector->num_ringpairs; q++) { 2474 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2475 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2476 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 2477 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 2478 (I40E_QUEUE_TYPE_TX 2479 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 2480 2481 wr32(hw, I40E_QINT_RQCTL(qp), val); 2482 2483 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2484 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2485 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 2486 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| 2487 (I40E_QUEUE_TYPE_RX 2488 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2489 2490 /* Terminate the linked list */ 2491 if (q == (q_vector->num_ringpairs - 1)) 2492 val |= (I40E_QUEUE_END_OF_LIST 2493 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2494 2495 wr32(hw, I40E_QINT_TQCTL(qp), val); 2496 qp++; 2497 } 2498 } 2499 2500 i40e_flush(hw); 2501 } 2502 2503 /** 2504 * i40e_enable_misc_int_causes - enable the non-queue interrupts 2505 * @hw: ptr to the hardware info 2506 **/ 2507 static void i40e_enable_misc_int_causes(struct i40e_hw *hw) 2508 { 2509 u32 val; 2510 2511 /* clear things first */ 2512 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 2513 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 2514 2515 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 2516 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 2517 I40E_PFINT_ICR0_ENA_GRST_MASK | 2518 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 2519 I40E_PFINT_ICR0_ENA_GPIO_MASK | 2520 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK | 2521 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | 2522 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 2523 I40E_PFINT_ICR0_ENA_VFLR_MASK | 2524 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2525 2526 wr32(hw, I40E_PFINT_ICR0_ENA, val); 2527 2528 /* SW_ITR_IDX = 0, but don't change INTENA */ 2529 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 2530 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 2531 2532 /* OTHER_ITR_IDX = 0 */ 2533 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 2534 } 2535 2536 /** 2537 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 2538 * @vsi: the VSI being configured 2539 **/ 2540 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 2541 { 2542 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 2543 struct i40e_pf *pf = vsi->back; 2544 struct i40e_hw *hw = &pf->hw; 2545 u32 val; 2546 2547 /* set the ITR configuration */ 2548 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2549 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2550 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); 2551 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2552 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2553 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 2554 2555 i40e_enable_misc_int_causes(hw); 2556 2557 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 2558 wr32(hw, I40E_PFINT_LNKLST0, 0); 2559 2560 /* Associate the queue pair to the vector and enable the q int */ 2561 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2562 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2563 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2564 2565 wr32(hw, I40E_QINT_RQCTL(0), val); 2566 2567 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2568 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2569 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2570 2571 wr32(hw, I40E_QINT_TQCTL(0), val); 2572 i40e_flush(hw); 2573 } 2574 2575 /** 2576 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 2577 * @pf: board private structure 2578 **/ 2579 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) 2580 { 2581 struct i40e_hw *hw = &pf->hw; 2582 2583 wr32(hw, I40E_PFINT_DYN_CTL0, 2584 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2585 i40e_flush(hw); 2586 } 2587 2588 /** 2589 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 2590 * @pf: board private structure 2591 **/ 2592 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 2593 { 2594 struct i40e_hw *hw = &pf->hw; 2595 u32 val; 2596 2597 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 2598 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 2599 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 2600 2601 wr32(hw, I40E_PFINT_DYN_CTL0, val); 2602 i40e_flush(hw); 2603 } 2604 2605 /** 2606 * i40e_irq_dynamic_enable - Enable default interrupt generation settings 2607 * @vsi: pointer to a vsi 2608 * @vector: enable a particular Hw Interrupt vector 2609 **/ 2610 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) 2611 { 2612 struct i40e_pf *pf = vsi->back; 2613 struct i40e_hw *hw = &pf->hw; 2614 u32 val; 2615 2616 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 2617 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 2618 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2619 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 2620 /* skip the flush */ 2621 } 2622 2623 /** 2624 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 2625 * @irq: interrupt number 2626 * @data: pointer to a q_vector 2627 **/ 2628 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 2629 { 2630 struct i40e_q_vector *q_vector = data; 2631 2632 if (!q_vector->tx.ring && !q_vector->rx.ring) 2633 return IRQ_HANDLED; 2634 2635 napi_schedule(&q_vector->napi); 2636 2637 return IRQ_HANDLED; 2638 } 2639 2640 /** 2641 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 2642 * @vsi: the VSI being configured 2643 * @basename: name for the vector 2644 * 2645 * Allocates MSI-X vectors and requests interrupts from the kernel. 2646 **/ 2647 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 2648 { 2649 int q_vectors = vsi->num_q_vectors; 2650 struct i40e_pf *pf = vsi->back; 2651 int base = vsi->base_vector; 2652 int rx_int_idx = 0; 2653 int tx_int_idx = 0; 2654 int vector, err; 2655 2656 for (vector = 0; vector < q_vectors; vector++) { 2657 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 2658 2659 if (q_vector->tx.ring && q_vector->rx.ring) { 2660 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2661 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2662 tx_int_idx++; 2663 } else if (q_vector->rx.ring) { 2664 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2665 "%s-%s-%d", basename, "rx", rx_int_idx++); 2666 } else if (q_vector->tx.ring) { 2667 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2668 "%s-%s-%d", basename, "tx", tx_int_idx++); 2669 } else { 2670 /* skip this unused q_vector */ 2671 continue; 2672 } 2673 err = request_irq(pf->msix_entries[base + vector].vector, 2674 vsi->irq_handler, 2675 0, 2676 q_vector->name, 2677 q_vector); 2678 if (err) { 2679 dev_info(&pf->pdev->dev, 2680 "%s: request_irq failed, error: %d\n", 2681 __func__, err); 2682 goto free_queue_irqs; 2683 } 2684 /* assign the mask for this irq */ 2685 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 2686 &q_vector->affinity_mask); 2687 } 2688 2689 return 0; 2690 2691 free_queue_irqs: 2692 while (vector) { 2693 vector--; 2694 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 2695 NULL); 2696 free_irq(pf->msix_entries[base + vector].vector, 2697 &(vsi->q_vectors[vector])); 2698 } 2699 return err; 2700 } 2701 2702 /** 2703 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 2704 * @vsi: the VSI being un-configured 2705 **/ 2706 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 2707 { 2708 struct i40e_pf *pf = vsi->back; 2709 struct i40e_hw *hw = &pf->hw; 2710 int base = vsi->base_vector; 2711 int i; 2712 2713 for (i = 0; i < vsi->num_queue_pairs; i++) { 2714 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); 2715 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); 2716 } 2717 2718 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 2719 for (i = vsi->base_vector; 2720 i < (vsi->num_q_vectors + vsi->base_vector); i++) 2721 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 2722 2723 i40e_flush(hw); 2724 for (i = 0; i < vsi->num_q_vectors; i++) 2725 synchronize_irq(pf->msix_entries[i + base].vector); 2726 } else { 2727 /* Legacy and MSI mode - this stops all interrupt handling */ 2728 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 2729 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 2730 i40e_flush(hw); 2731 synchronize_irq(pf->pdev->irq); 2732 } 2733 } 2734 2735 /** 2736 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 2737 * @vsi: the VSI being configured 2738 **/ 2739 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 2740 { 2741 struct i40e_pf *pf = vsi->back; 2742 int i; 2743 2744 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 2745 for (i = vsi->base_vector; 2746 i < (vsi->num_q_vectors + vsi->base_vector); i++) 2747 i40e_irq_dynamic_enable(vsi, i); 2748 } else { 2749 i40e_irq_dynamic_enable_icr0(pf); 2750 } 2751 2752 i40e_flush(&pf->hw); 2753 return 0; 2754 } 2755 2756 /** 2757 * i40e_stop_misc_vector - Stop the vector that handles non-queue events 2758 * @pf: board private structure 2759 **/ 2760 static void i40e_stop_misc_vector(struct i40e_pf *pf) 2761 { 2762 /* Disable ICR 0 */ 2763 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 2764 i40e_flush(&pf->hw); 2765 } 2766 2767 /** 2768 * i40e_intr - MSI/Legacy and non-queue interrupt handler 2769 * @irq: interrupt number 2770 * @data: pointer to a q_vector 2771 * 2772 * This is the handler used for all MSI/Legacy interrupts, and deals 2773 * with both queue and non-queue interrupts. This is also used in 2774 * MSIX mode to handle the non-queue interrupts. 2775 **/ 2776 static irqreturn_t i40e_intr(int irq, void *data) 2777 { 2778 struct i40e_pf *pf = (struct i40e_pf *)data; 2779 struct i40e_hw *hw = &pf->hw; 2780 irqreturn_t ret = IRQ_NONE; 2781 u32 icr0, icr0_remaining; 2782 u32 val, ena_mask; 2783 2784 icr0 = rd32(hw, I40E_PFINT_ICR0); 2785 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 2786 2787 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 2788 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 2789 goto enable_intr; 2790 2791 /* if interrupt but no bits showing, must be SWINT */ 2792 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || 2793 (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) 2794 pf->sw_int_count++; 2795 2796 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 2797 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 2798 2799 /* temporarily disable queue cause for NAPI processing */ 2800 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); 2801 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; 2802 wr32(hw, I40E_QINT_RQCTL(0), qval); 2803 2804 qval = rd32(hw, I40E_QINT_TQCTL(0)); 2805 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 2806 wr32(hw, I40E_QINT_TQCTL(0), qval); 2807 2808 if (!test_bit(__I40E_DOWN, &pf->state)) 2809 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); 2810 } 2811 2812 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 2813 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2814 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 2815 } 2816 2817 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 2818 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 2819 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 2820 } 2821 2822 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 2823 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 2824 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 2825 } 2826 2827 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 2828 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 2829 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 2830 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 2831 val = rd32(hw, I40E_GLGEN_RSTAT); 2832 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 2833 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 2834 if (val == I40E_RESET_CORER) 2835 pf->corer_count++; 2836 else if (val == I40E_RESET_GLOBR) 2837 pf->globr_count++; 2838 else if (val == I40E_RESET_EMPR) 2839 pf->empr_count++; 2840 } 2841 2842 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 2843 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 2844 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 2845 } 2846 2847 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 2848 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 2849 2850 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 2851 ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 2852 i40e_ptp_tx_hwtstamp(pf); 2853 prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK; 2854 } 2855 2856 wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat); 2857 } 2858 2859 /* If a critical error is pending we have no choice but to reset the 2860 * device. 2861 * Report and mask out any remaining unexpected interrupts. 2862 */ 2863 icr0_remaining = icr0 & ena_mask; 2864 if (icr0_remaining) { 2865 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 2866 icr0_remaining); 2867 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 2868 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 2869 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) || 2870 (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) { 2871 dev_info(&pf->pdev->dev, "device will be reset\n"); 2872 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 2873 i40e_service_event_schedule(pf); 2874 } 2875 ena_mask &= ~icr0_remaining; 2876 } 2877 ret = IRQ_HANDLED; 2878 2879 enable_intr: 2880 /* re-enable interrupt causes */ 2881 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 2882 if (!test_bit(__I40E_DOWN, &pf->state)) { 2883 i40e_service_event_schedule(pf); 2884 i40e_irq_dynamic_enable_icr0(pf); 2885 } 2886 2887 return ret; 2888 } 2889 2890 /** 2891 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes 2892 * @tx_ring: tx ring to clean 2893 * @budget: how many cleans we're allowed 2894 * 2895 * Returns true if there's any budget left (e.g. the clean is finished) 2896 **/ 2897 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) 2898 { 2899 struct i40e_vsi *vsi = tx_ring->vsi; 2900 u16 i = tx_ring->next_to_clean; 2901 struct i40e_tx_buffer *tx_buf; 2902 struct i40e_tx_desc *tx_desc; 2903 2904 tx_buf = &tx_ring->tx_bi[i]; 2905 tx_desc = I40E_TX_DESC(tx_ring, i); 2906 i -= tx_ring->count; 2907 2908 do { 2909 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 2910 2911 /* if next_to_watch is not set then there is no work pending */ 2912 if (!eop_desc) 2913 break; 2914 2915 /* prevent any other reads prior to eop_desc */ 2916 read_barrier_depends(); 2917 2918 /* if the descriptor isn't done, no work yet to do */ 2919 if (!(eop_desc->cmd_type_offset_bsz & 2920 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 2921 break; 2922 2923 /* clear next_to_watch to prevent false hangs */ 2924 tx_buf->next_to_watch = NULL; 2925 2926 /* unmap skb header data */ 2927 dma_unmap_single(tx_ring->dev, 2928 dma_unmap_addr(tx_buf, dma), 2929 dma_unmap_len(tx_buf, len), 2930 DMA_TO_DEVICE); 2931 2932 dma_unmap_len_set(tx_buf, len, 0); 2933 2934 2935 /* move to the next desc and buffer to clean */ 2936 tx_buf++; 2937 tx_desc++; 2938 i++; 2939 if (unlikely(!i)) { 2940 i -= tx_ring->count; 2941 tx_buf = tx_ring->tx_bi; 2942 tx_desc = I40E_TX_DESC(tx_ring, 0); 2943 } 2944 2945 /* update budget accounting */ 2946 budget--; 2947 } while (likely(budget)); 2948 2949 i += tx_ring->count; 2950 tx_ring->next_to_clean = i; 2951 2952 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 2953 i40e_irq_dynamic_enable(vsi, 2954 tx_ring->q_vector->v_idx + vsi->base_vector); 2955 } 2956 return budget > 0; 2957 } 2958 2959 /** 2960 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring 2961 * @irq: interrupt number 2962 * @data: pointer to a q_vector 2963 **/ 2964 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) 2965 { 2966 struct i40e_q_vector *q_vector = data; 2967 struct i40e_vsi *vsi; 2968 2969 if (!q_vector->tx.ring) 2970 return IRQ_HANDLED; 2971 2972 vsi = q_vector->tx.ring->vsi; 2973 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); 2974 2975 return IRQ_HANDLED; 2976 } 2977 2978 /** 2979 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 2980 * @vsi: the VSI being configured 2981 * @v_idx: vector index 2982 * @qp_idx: queue pair index 2983 **/ 2984 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 2985 { 2986 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 2987 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 2988 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 2989 2990 tx_ring->q_vector = q_vector; 2991 tx_ring->next = q_vector->tx.ring; 2992 q_vector->tx.ring = tx_ring; 2993 q_vector->tx.count++; 2994 2995 rx_ring->q_vector = q_vector; 2996 rx_ring->next = q_vector->rx.ring; 2997 q_vector->rx.ring = rx_ring; 2998 q_vector->rx.count++; 2999 } 3000 3001 /** 3002 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 3003 * @vsi: the VSI being configured 3004 * 3005 * This function maps descriptor rings to the queue-specific vectors 3006 * we were allotted through the MSI-X enabling code. Ideally, we'd have 3007 * one vector per queue pair, but on a constrained vector budget, we 3008 * group the queue pairs as "efficiently" as possible. 3009 **/ 3010 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 3011 { 3012 int qp_remaining = vsi->num_queue_pairs; 3013 int q_vectors = vsi->num_q_vectors; 3014 int num_ringpairs; 3015 int v_start = 0; 3016 int qp_idx = 0; 3017 3018 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 3019 * group them so there are multiple queues per vector. 3020 */ 3021 for (; v_start < q_vectors && qp_remaining; v_start++) { 3022 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 3023 3024 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 3025 3026 q_vector->num_ringpairs = num_ringpairs; 3027 3028 q_vector->rx.count = 0; 3029 q_vector->tx.count = 0; 3030 q_vector->rx.ring = NULL; 3031 q_vector->tx.ring = NULL; 3032 3033 while (num_ringpairs--) { 3034 map_vector_to_qp(vsi, v_start, qp_idx); 3035 qp_idx++; 3036 qp_remaining--; 3037 } 3038 } 3039 } 3040 3041 /** 3042 * i40e_vsi_request_irq - Request IRQ from the OS 3043 * @vsi: the VSI being configured 3044 * @basename: name for the vector 3045 **/ 3046 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 3047 { 3048 struct i40e_pf *pf = vsi->back; 3049 int err; 3050 3051 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 3052 err = i40e_vsi_request_irq_msix(vsi, basename); 3053 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3054 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3055 pf->misc_int_name, pf); 3056 else 3057 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3058 pf->misc_int_name, pf); 3059 3060 if (err) 3061 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3062 3063 return err; 3064 } 3065 3066 #ifdef CONFIG_NET_POLL_CONTROLLER 3067 /** 3068 * i40e_netpoll - A Polling 'interrupt'handler 3069 * @netdev: network interface device structure 3070 * 3071 * This is used by netconsole to send skbs without having to re-enable 3072 * interrupts. It's not called while the normal interrupt routine is executing. 3073 **/ 3074 static void i40e_netpoll(struct net_device *netdev) 3075 { 3076 struct i40e_netdev_priv *np = netdev_priv(netdev); 3077 struct i40e_vsi *vsi = np->vsi; 3078 struct i40e_pf *pf = vsi->back; 3079 int i; 3080 3081 /* if interface is down do nothing */ 3082 if (test_bit(__I40E_DOWN, &vsi->state)) 3083 return; 3084 3085 pf->flags |= I40E_FLAG_IN_NETPOLL; 3086 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3087 for (i = 0; i < vsi->num_q_vectors; i++) 3088 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 3089 } else { 3090 i40e_intr(pf->pdev->irq, netdev); 3091 } 3092 pf->flags &= ~I40E_FLAG_IN_NETPOLL; 3093 } 3094 #endif 3095 3096 /** 3097 * i40e_vsi_control_tx - Start or stop a VSI's rings 3098 * @vsi: the VSI being configured 3099 * @enable: start or stop the rings 3100 **/ 3101 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 3102 { 3103 struct i40e_pf *pf = vsi->back; 3104 struct i40e_hw *hw = &pf->hw; 3105 int i, j, pf_q; 3106 u32 tx_reg; 3107 3108 pf_q = vsi->base_queue; 3109 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3110 j = 1000; 3111 do { 3112 usleep_range(1000, 2000); 3113 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3114 } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) 3115 ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1); 3116 3117 /* Skip if the queue is already in the requested state */ 3118 if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3119 continue; 3120 if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3121 continue; 3122 3123 /* turn on/off the queue */ 3124 if (enable) { 3125 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 3126 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK | 3127 I40E_QTX_ENA_QENA_STAT_MASK; 3128 } else { 3129 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3130 } 3131 3132 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 3133 3134 /* wait for the change to finish */ 3135 for (j = 0; j < 10; j++) { 3136 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3137 if (enable) { 3138 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3139 break; 3140 } else { 3141 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3142 break; 3143 } 3144 3145 udelay(10); 3146 } 3147 if (j >= 10) { 3148 dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n", 3149 pf_q, (enable ? "en" : "dis")); 3150 return -ETIMEDOUT; 3151 } 3152 } 3153 3154 if (hw->revision_id == 0) 3155 mdelay(50); 3156 3157 return 0; 3158 } 3159 3160 /** 3161 * i40e_vsi_control_rx - Start or stop a VSI's rings 3162 * @vsi: the VSI being configured 3163 * @enable: start or stop the rings 3164 **/ 3165 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 3166 { 3167 struct i40e_pf *pf = vsi->back; 3168 struct i40e_hw *hw = &pf->hw; 3169 int i, j, pf_q; 3170 u32 rx_reg; 3171 3172 pf_q = vsi->base_queue; 3173 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3174 j = 1000; 3175 do { 3176 usleep_range(1000, 2000); 3177 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3178 } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) 3179 ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1); 3180 3181 if (enable) { 3182 /* is STAT set ? */ 3183 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3184 continue; 3185 } else { 3186 /* is !STAT set ? */ 3187 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3188 continue; 3189 } 3190 3191 /* turn on/off the queue */ 3192 if (enable) 3193 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK | 3194 I40E_QRX_ENA_QENA_STAT_MASK; 3195 else 3196 rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK | 3197 I40E_QRX_ENA_QENA_STAT_MASK); 3198 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3199 3200 /* wait for the change to finish */ 3201 for (j = 0; j < 10; j++) { 3202 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3203 3204 if (enable) { 3205 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3206 break; 3207 } else { 3208 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3209 break; 3210 } 3211 3212 udelay(10); 3213 } 3214 if (j >= 10) { 3215 dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n", 3216 pf_q, (enable ? "en" : "dis")); 3217 return -ETIMEDOUT; 3218 } 3219 } 3220 3221 return 0; 3222 } 3223 3224 /** 3225 * i40e_vsi_control_rings - Start or stop a VSI's rings 3226 * @vsi: the VSI being configured 3227 * @enable: start or stop the rings 3228 **/ 3229 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) 3230 { 3231 int ret = 0; 3232 3233 /* do rx first for enable and last for disable */ 3234 if (request) { 3235 ret = i40e_vsi_control_rx(vsi, request); 3236 if (ret) 3237 return ret; 3238 ret = i40e_vsi_control_tx(vsi, request); 3239 } else { 3240 /* Ignore return value, we need to shutdown whatever we can */ 3241 i40e_vsi_control_tx(vsi, request); 3242 i40e_vsi_control_rx(vsi, request); 3243 } 3244 3245 return ret; 3246 } 3247 3248 /** 3249 * i40e_vsi_free_irq - Free the irq association with the OS 3250 * @vsi: the VSI being configured 3251 **/ 3252 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 3253 { 3254 struct i40e_pf *pf = vsi->back; 3255 struct i40e_hw *hw = &pf->hw; 3256 int base = vsi->base_vector; 3257 u32 val, qp; 3258 int i; 3259 3260 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3261 if (!vsi->q_vectors) 3262 return; 3263 3264 for (i = 0; i < vsi->num_q_vectors; i++) { 3265 u16 vector = i + base; 3266 3267 /* free only the irqs that were actually requested */ 3268 if (!vsi->q_vectors[i] || 3269 !vsi->q_vectors[i]->num_ringpairs) 3270 continue; 3271 3272 /* clear the affinity_mask in the IRQ descriptor */ 3273 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3274 NULL); 3275 free_irq(pf->msix_entries[vector].vector, 3276 vsi->q_vectors[i]); 3277 3278 /* Tear down the interrupt queue link list 3279 * 3280 * We know that they come in pairs and always 3281 * the Rx first, then the Tx. To clear the 3282 * link list, stick the EOL value into the 3283 * next_q field of the registers. 3284 */ 3285 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 3286 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3287 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3288 val |= I40E_QUEUE_END_OF_LIST 3289 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3290 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 3291 3292 while (qp != I40E_QUEUE_END_OF_LIST) { 3293 u32 next; 3294 3295 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3296 3297 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3298 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3299 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3300 I40E_QINT_RQCTL_INTEVENT_MASK); 3301 3302 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3303 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3304 3305 wr32(hw, I40E_QINT_RQCTL(qp), val); 3306 3307 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3308 3309 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 3310 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 3311 3312 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3313 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3314 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3315 I40E_QINT_TQCTL_INTEVENT_MASK); 3316 3317 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3318 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3319 3320 wr32(hw, I40E_QINT_TQCTL(qp), val); 3321 qp = next; 3322 } 3323 } 3324 } else { 3325 free_irq(pf->pdev->irq, pf); 3326 3327 val = rd32(hw, I40E_PFINT_LNKLST0); 3328 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3329 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3330 val |= I40E_QUEUE_END_OF_LIST 3331 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 3332 wr32(hw, I40E_PFINT_LNKLST0, val); 3333 3334 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3335 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3336 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3337 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3338 I40E_QINT_RQCTL_INTEVENT_MASK); 3339 3340 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3341 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3342 3343 wr32(hw, I40E_QINT_RQCTL(qp), val); 3344 3345 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3346 3347 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3348 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3349 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3350 I40E_QINT_TQCTL_INTEVENT_MASK); 3351 3352 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3353 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3354 3355 wr32(hw, I40E_QINT_TQCTL(qp), val); 3356 } 3357 } 3358 3359 /** 3360 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 3361 * @vsi: the VSI being configured 3362 * @v_idx: Index of vector to be freed 3363 * 3364 * This function frees the memory allocated to the q_vector. In addition if 3365 * NAPI is enabled it will delete any references to the NAPI struct prior 3366 * to freeing the q_vector. 3367 **/ 3368 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 3369 { 3370 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3371 struct i40e_ring *ring; 3372 3373 if (!q_vector) 3374 return; 3375 3376 /* disassociate q_vector from rings */ 3377 i40e_for_each_ring(ring, q_vector->tx) 3378 ring->q_vector = NULL; 3379 3380 i40e_for_each_ring(ring, q_vector->rx) 3381 ring->q_vector = NULL; 3382 3383 /* only VSI w/ an associated netdev is set up w/ NAPI */ 3384 if (vsi->netdev) 3385 netif_napi_del(&q_vector->napi); 3386 3387 vsi->q_vectors[v_idx] = NULL; 3388 3389 kfree_rcu(q_vector, rcu); 3390 } 3391 3392 /** 3393 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 3394 * @vsi: the VSI being un-configured 3395 * 3396 * This frees the memory allocated to the q_vectors and 3397 * deletes references to the NAPI struct. 3398 **/ 3399 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 3400 { 3401 int v_idx; 3402 3403 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 3404 i40e_free_q_vector(vsi, v_idx); 3405 } 3406 3407 /** 3408 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 3409 * @pf: board private structure 3410 **/ 3411 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 3412 { 3413 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 3414 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3415 pci_disable_msix(pf->pdev); 3416 kfree(pf->msix_entries); 3417 pf->msix_entries = NULL; 3418 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 3419 pci_disable_msi(pf->pdev); 3420 } 3421 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 3422 } 3423 3424 /** 3425 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 3426 * @pf: board private structure 3427 * 3428 * We go through and clear interrupt specific resources and reset the structure 3429 * to pre-load conditions 3430 **/ 3431 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 3432 { 3433 int i; 3434 3435 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3436 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 3437 if (pf->vsi[i]) 3438 i40e_vsi_free_q_vectors(pf->vsi[i]); 3439 i40e_reset_interrupt_capability(pf); 3440 } 3441 3442 /** 3443 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 3444 * @vsi: the VSI being configured 3445 **/ 3446 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 3447 { 3448 int q_idx; 3449 3450 if (!vsi->netdev) 3451 return; 3452 3453 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3454 napi_enable(&vsi->q_vectors[q_idx]->napi); 3455 } 3456 3457 /** 3458 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 3459 * @vsi: the VSI being configured 3460 **/ 3461 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 3462 { 3463 int q_idx; 3464 3465 if (!vsi->netdev) 3466 return; 3467 3468 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3469 napi_disable(&vsi->q_vectors[q_idx]->napi); 3470 } 3471 3472 /** 3473 * i40e_quiesce_vsi - Pause a given VSI 3474 * @vsi: the VSI being paused 3475 **/ 3476 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 3477 { 3478 if (test_bit(__I40E_DOWN, &vsi->state)) 3479 return; 3480 3481 set_bit(__I40E_NEEDS_RESTART, &vsi->state); 3482 if (vsi->netdev && netif_running(vsi->netdev)) { 3483 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3484 } else { 3485 set_bit(__I40E_DOWN, &vsi->state); 3486 i40e_down(vsi); 3487 } 3488 } 3489 3490 /** 3491 * i40e_unquiesce_vsi - Resume a given VSI 3492 * @vsi: the VSI being resumed 3493 **/ 3494 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 3495 { 3496 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) 3497 return; 3498 3499 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 3500 if (vsi->netdev && netif_running(vsi->netdev)) 3501 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3502 else 3503 i40e_up(vsi); /* this clears the DOWN bit */ 3504 } 3505 3506 /** 3507 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 3508 * @pf: the PF 3509 **/ 3510 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 3511 { 3512 int v; 3513 3514 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3515 if (pf->vsi[v]) 3516 i40e_quiesce_vsi(pf->vsi[v]); 3517 } 3518 } 3519 3520 /** 3521 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 3522 * @pf: the PF 3523 **/ 3524 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 3525 { 3526 int v; 3527 3528 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3529 if (pf->vsi[v]) 3530 i40e_unquiesce_vsi(pf->vsi[v]); 3531 } 3532 } 3533 3534 /** 3535 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 3536 * @dcbcfg: the corresponding DCBx configuration structure 3537 * 3538 * Return the number of TCs from given DCBx configuration 3539 **/ 3540 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 3541 { 3542 u8 num_tc = 0; 3543 int i; 3544 3545 /* Scan the ETS Config Priority Table to find 3546 * traffic class enabled for a given priority 3547 * and use the traffic class index to get the 3548 * number of traffic classes enabled 3549 */ 3550 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 3551 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 3552 num_tc = dcbcfg->etscfg.prioritytable[i]; 3553 } 3554 3555 /* Traffic class index starts from zero so 3556 * increment to return the actual count 3557 */ 3558 return num_tc + 1; 3559 } 3560 3561 /** 3562 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 3563 * @dcbcfg: the corresponding DCBx configuration structure 3564 * 3565 * Query the current DCB configuration and return the number of 3566 * traffic classes enabled from the given DCBX config 3567 **/ 3568 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 3569 { 3570 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 3571 u8 enabled_tc = 1; 3572 u8 i; 3573 3574 for (i = 0; i < num_tc; i++) 3575 enabled_tc |= 1 << i; 3576 3577 return enabled_tc; 3578 } 3579 3580 /** 3581 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 3582 * @pf: PF being queried 3583 * 3584 * Return number of traffic classes enabled for the given PF 3585 **/ 3586 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 3587 { 3588 struct i40e_hw *hw = &pf->hw; 3589 u8 i, enabled_tc; 3590 u8 num_tc = 0; 3591 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 3592 3593 /* If DCB is not enabled then always in single TC */ 3594 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 3595 return 1; 3596 3597 /* MFP mode return count of enabled TCs for this PF */ 3598 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 3599 enabled_tc = pf->hw.func_caps.enabled_tcmap; 3600 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3601 if (enabled_tc & (1 << i)) 3602 num_tc++; 3603 } 3604 return num_tc; 3605 } 3606 3607 /* SFP mode will be enabled for all TCs on port */ 3608 return i40e_dcb_get_num_tc(dcbcfg); 3609 } 3610 3611 /** 3612 * i40e_pf_get_default_tc - Get bitmap for first enabled TC 3613 * @pf: PF being queried 3614 * 3615 * Return a bitmap for first enabled traffic class for this PF. 3616 **/ 3617 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) 3618 { 3619 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; 3620 u8 i = 0; 3621 3622 if (!enabled_tc) 3623 return 0x1; /* TC0 */ 3624 3625 /* Find the first enabled TC */ 3626 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3627 if (enabled_tc & (1 << i)) 3628 break; 3629 } 3630 3631 return 1 << i; 3632 } 3633 3634 /** 3635 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 3636 * @pf: PF being queried 3637 * 3638 * Return a bitmap for enabled traffic classes for this PF. 3639 **/ 3640 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 3641 { 3642 /* If DCB is not enabled for this PF then just return default TC */ 3643 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 3644 return i40e_pf_get_default_tc(pf); 3645 3646 /* MFP mode will have enabled TCs set by FW */ 3647 if (pf->flags & I40E_FLAG_MFP_ENABLED) 3648 return pf->hw.func_caps.enabled_tcmap; 3649 3650 /* SFP mode we want PF to be enabled for all TCs */ 3651 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 3652 } 3653 3654 /** 3655 * i40e_vsi_get_bw_info - Query VSI BW Information 3656 * @vsi: the VSI being queried 3657 * 3658 * Returns 0 on success, negative value on failure 3659 **/ 3660 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 3661 { 3662 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 3663 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 3664 struct i40e_pf *pf = vsi->back; 3665 struct i40e_hw *hw = &pf->hw; 3666 i40e_status aq_ret; 3667 u32 tc_bw_max; 3668 int i; 3669 3670 /* Get the VSI level BW configuration */ 3671 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 3672 if (aq_ret) { 3673 dev_info(&pf->pdev->dev, 3674 "couldn't get pf vsi bw config, err %d, aq_err %d\n", 3675 aq_ret, pf->hw.aq.asq_last_status); 3676 return -EINVAL; 3677 } 3678 3679 /* Get the VSI level BW configuration per TC */ 3680 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 3681 NULL); 3682 if (aq_ret) { 3683 dev_info(&pf->pdev->dev, 3684 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", 3685 aq_ret, pf->hw.aq.asq_last_status); 3686 return -EINVAL; 3687 } 3688 3689 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 3690 dev_info(&pf->pdev->dev, 3691 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 3692 bw_config.tc_valid_bits, 3693 bw_ets_config.tc_valid_bits); 3694 /* Still continuing */ 3695 } 3696 3697 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 3698 vsi->bw_max_quanta = bw_config.max_bw; 3699 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 3700 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 3701 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3702 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 3703 vsi->bw_ets_limit_credits[i] = 3704 le16_to_cpu(bw_ets_config.credits[i]); 3705 /* 3 bits out of 4 for each TC */ 3706 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 3707 } 3708 3709 return 0; 3710 } 3711 3712 /** 3713 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 3714 * @vsi: the VSI being configured 3715 * @enabled_tc: TC bitmap 3716 * @bw_credits: BW shared credits per TC 3717 * 3718 * Returns 0 on success, negative value on failure 3719 **/ 3720 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 3721 u8 *bw_share) 3722 { 3723 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 3724 i40e_status aq_ret; 3725 int i; 3726 3727 bw_data.tc_valid_bits = enabled_tc; 3728 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3729 bw_data.tc_bw_credits[i] = bw_share[i]; 3730 3731 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 3732 NULL); 3733 if (aq_ret) { 3734 dev_info(&vsi->back->pdev->dev, 3735 "%s: AQ command Config VSI BW allocation per TC failed = %d\n", 3736 __func__, vsi->back->hw.aq.asq_last_status); 3737 return -EINVAL; 3738 } 3739 3740 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3741 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 3742 3743 return 0; 3744 } 3745 3746 /** 3747 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 3748 * @vsi: the VSI being configured 3749 * @enabled_tc: TC map to be enabled 3750 * 3751 **/ 3752 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 3753 { 3754 struct net_device *netdev = vsi->netdev; 3755 struct i40e_pf *pf = vsi->back; 3756 struct i40e_hw *hw = &pf->hw; 3757 u8 netdev_tc = 0; 3758 int i; 3759 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 3760 3761 if (!netdev) 3762 return; 3763 3764 if (!enabled_tc) { 3765 netdev_reset_tc(netdev); 3766 return; 3767 } 3768 3769 /* Set up actual enabled TCs on the VSI */ 3770 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 3771 return; 3772 3773 /* set per TC queues for the VSI */ 3774 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3775 /* Only set TC queues for enabled tcs 3776 * 3777 * e.g. For a VSI that has TC0 and TC3 enabled the 3778 * enabled_tc bitmap would be 0x00001001; the driver 3779 * will set the numtc for netdev as 2 that will be 3780 * referenced by the netdev layer as TC 0 and 1. 3781 */ 3782 if (vsi->tc_config.enabled_tc & (1 << i)) 3783 netdev_set_tc_queue(netdev, 3784 vsi->tc_config.tc_info[i].netdev_tc, 3785 vsi->tc_config.tc_info[i].qcount, 3786 vsi->tc_config.tc_info[i].qoffset); 3787 } 3788 3789 /* Assign UP2TC map for the VSI */ 3790 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 3791 /* Get the actual TC# for the UP */ 3792 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 3793 /* Get the mapped netdev TC# for the UP */ 3794 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 3795 netdev_set_prio_tc_map(netdev, i, netdev_tc); 3796 } 3797 } 3798 3799 /** 3800 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 3801 * @vsi: the VSI being configured 3802 * @ctxt: the ctxt buffer returned from AQ VSI update param command 3803 **/ 3804 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 3805 struct i40e_vsi_context *ctxt) 3806 { 3807 /* copy just the sections touched not the entire info 3808 * since not all sections are valid as returned by 3809 * update vsi params 3810 */ 3811 vsi->info.mapping_flags = ctxt->info.mapping_flags; 3812 memcpy(&vsi->info.queue_mapping, 3813 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 3814 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 3815 sizeof(vsi->info.tc_mapping)); 3816 } 3817 3818 /** 3819 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 3820 * @vsi: VSI to be configured 3821 * @enabled_tc: TC bitmap 3822 * 3823 * This configures a particular VSI for TCs that are mapped to the 3824 * given TC bitmap. It uses default bandwidth share for TCs across 3825 * VSIs to configure TC for a particular VSI. 3826 * 3827 * NOTE: 3828 * It is expected that the VSI queues have been quisced before calling 3829 * this function. 3830 **/ 3831 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 3832 { 3833 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 3834 struct i40e_vsi_context ctxt; 3835 int ret = 0; 3836 int i; 3837 3838 /* Check if enabled_tc is same as existing or new TCs */ 3839 if (vsi->tc_config.enabled_tc == enabled_tc) 3840 return ret; 3841 3842 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 3843 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3844 if (enabled_tc & (1 << i)) 3845 bw_share[i] = 1; 3846 } 3847 3848 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 3849 if (ret) { 3850 dev_info(&vsi->back->pdev->dev, 3851 "Failed configuring TC map %d for VSI %d\n", 3852 enabled_tc, vsi->seid); 3853 goto out; 3854 } 3855 3856 /* Update Queue Pairs Mapping for currently enabled UPs */ 3857 ctxt.seid = vsi->seid; 3858 ctxt.pf_num = vsi->back->hw.pf_id; 3859 ctxt.vf_num = 0; 3860 ctxt.uplink_seid = vsi->uplink_seid; 3861 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 3862 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 3863 3864 /* Update the VSI after updating the VSI queue-mapping information */ 3865 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 3866 if (ret) { 3867 dev_info(&vsi->back->pdev->dev, 3868 "update vsi failed, aq_err=%d\n", 3869 vsi->back->hw.aq.asq_last_status); 3870 goto out; 3871 } 3872 /* update the local VSI info with updated queue map */ 3873 i40e_vsi_update_queue_map(vsi, &ctxt); 3874 vsi->info.valid_sections = 0; 3875 3876 /* Update current VSI BW information */ 3877 ret = i40e_vsi_get_bw_info(vsi); 3878 if (ret) { 3879 dev_info(&vsi->back->pdev->dev, 3880 "Failed updating vsi bw info, aq_err=%d\n", 3881 vsi->back->hw.aq.asq_last_status); 3882 goto out; 3883 } 3884 3885 /* Update the netdev TC setup */ 3886 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 3887 out: 3888 return ret; 3889 } 3890 3891 /** 3892 * i40e_veb_config_tc - Configure TCs for given VEB 3893 * @veb: given VEB 3894 * @enabled_tc: TC bitmap 3895 * 3896 * Configures given TC bitmap for VEB (switching) element 3897 **/ 3898 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) 3899 { 3900 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; 3901 struct i40e_pf *pf = veb->pf; 3902 int ret = 0; 3903 int i; 3904 3905 /* No TCs or already enabled TCs just return */ 3906 if (!enabled_tc || veb->enabled_tc == enabled_tc) 3907 return ret; 3908 3909 bw_data.tc_valid_bits = enabled_tc; 3910 /* bw_data.absolute_credits is not set (relative) */ 3911 3912 /* Enable ETS TCs with equal BW Share for now */ 3913 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3914 if (enabled_tc & (1 << i)) 3915 bw_data.tc_bw_share_credits[i] = 1; 3916 } 3917 3918 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, 3919 &bw_data, NULL); 3920 if (ret) { 3921 dev_info(&pf->pdev->dev, 3922 "veb bw config failed, aq_err=%d\n", 3923 pf->hw.aq.asq_last_status); 3924 goto out; 3925 } 3926 3927 /* Update the BW information */ 3928 ret = i40e_veb_get_bw_info(veb); 3929 if (ret) { 3930 dev_info(&pf->pdev->dev, 3931 "Failed getting veb bw config, aq_err=%d\n", 3932 pf->hw.aq.asq_last_status); 3933 } 3934 3935 out: 3936 return ret; 3937 } 3938 3939 #ifdef CONFIG_I40E_DCB 3940 /** 3941 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs 3942 * @pf: PF struct 3943 * 3944 * Reconfigure VEB/VSIs on a given PF; it is assumed that 3945 * the caller would've quiesce all the VSIs before calling 3946 * this function 3947 **/ 3948 static void i40e_dcb_reconfigure(struct i40e_pf *pf) 3949 { 3950 u8 tc_map = 0; 3951 int ret; 3952 u8 v; 3953 3954 /* Enable the TCs available on PF to all VEBs */ 3955 tc_map = i40e_pf_get_tc_map(pf); 3956 for (v = 0; v < I40E_MAX_VEB; v++) { 3957 if (!pf->veb[v]) 3958 continue; 3959 ret = i40e_veb_config_tc(pf->veb[v], tc_map); 3960 if (ret) { 3961 dev_info(&pf->pdev->dev, 3962 "Failed configuring TC for VEB seid=%d\n", 3963 pf->veb[v]->seid); 3964 /* Will try to configure as many components */ 3965 } 3966 } 3967 3968 /* Update each VSI */ 3969 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3970 if (!pf->vsi[v]) 3971 continue; 3972 3973 /* - Enable all TCs for the LAN VSI 3974 * - For all others keep them at TC0 for now 3975 */ 3976 if (v == pf->lan_vsi) 3977 tc_map = i40e_pf_get_tc_map(pf); 3978 else 3979 tc_map = i40e_pf_get_default_tc(pf); 3980 3981 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 3982 if (ret) { 3983 dev_info(&pf->pdev->dev, 3984 "Failed configuring TC for VSI seid=%d\n", 3985 pf->vsi[v]->seid); 3986 /* Will try to configure as many components */ 3987 } else { 3988 if (pf->vsi[v]->netdev) 3989 i40e_dcbnl_set_all(pf->vsi[v]); 3990 } 3991 } 3992 } 3993 3994 /** 3995 * i40e_init_pf_dcb - Initialize DCB configuration 3996 * @pf: PF being configured 3997 * 3998 * Query the current DCB configuration and cache it 3999 * in the hardware structure 4000 **/ 4001 static int i40e_init_pf_dcb(struct i40e_pf *pf) 4002 { 4003 struct i40e_hw *hw = &pf->hw; 4004 int err = 0; 4005 4006 if (pf->hw.func_caps.npar_enable) 4007 goto out; 4008 4009 /* Get the initial DCB configuration */ 4010 err = i40e_init_dcb(hw); 4011 if (!err) { 4012 /* Device/Function is not DCBX capable */ 4013 if ((!hw->func_caps.dcb) || 4014 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { 4015 dev_info(&pf->pdev->dev, 4016 "DCBX offload is not supported or is disabled for this PF.\n"); 4017 4018 if (pf->flags & I40E_FLAG_MFP_ENABLED) 4019 goto out; 4020 4021 } else { 4022 /* When status is not DISABLED then DCBX in FW */ 4023 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 4024 DCB_CAP_DCBX_VER_IEEE; 4025 pf->flags |= I40E_FLAG_DCB_ENABLED; 4026 } 4027 } 4028 4029 out: 4030 return err; 4031 } 4032 #endif /* CONFIG_I40E_DCB */ 4033 4034 /** 4035 * i40e_up_complete - Finish the last steps of bringing up a connection 4036 * @vsi: the VSI being configured 4037 **/ 4038 static int i40e_up_complete(struct i40e_vsi *vsi) 4039 { 4040 struct i40e_pf *pf = vsi->back; 4041 int err; 4042 4043 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4044 i40e_vsi_configure_msix(vsi); 4045 else 4046 i40e_configure_msi_and_legacy(vsi); 4047 4048 /* start rings */ 4049 err = i40e_vsi_control_rings(vsi, true); 4050 if (err) 4051 return err; 4052 4053 clear_bit(__I40E_DOWN, &vsi->state); 4054 i40e_napi_enable_all(vsi); 4055 i40e_vsi_enable_irq(vsi); 4056 4057 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 4058 (vsi->netdev)) { 4059 netdev_info(vsi->netdev, "NIC Link is Up\n"); 4060 netif_tx_start_all_queues(vsi->netdev); 4061 netif_carrier_on(vsi->netdev); 4062 } else if (vsi->netdev) { 4063 netdev_info(vsi->netdev, "NIC Link is Down\n"); 4064 } 4065 i40e_service_event_schedule(pf); 4066 4067 return 0; 4068 } 4069 4070 /** 4071 * i40e_vsi_reinit_locked - Reset the VSI 4072 * @vsi: the VSI being configured 4073 * 4074 * Rebuild the ring structs after some configuration 4075 * has changed, e.g. MTU size. 4076 **/ 4077 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 4078 { 4079 struct i40e_pf *pf = vsi->back; 4080 4081 WARN_ON(in_interrupt()); 4082 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 4083 usleep_range(1000, 2000); 4084 i40e_down(vsi); 4085 4086 /* Give a VF some time to respond to the reset. The 4087 * two second wait is based upon the watchdog cycle in 4088 * the VF driver. 4089 */ 4090 if (vsi->type == I40E_VSI_SRIOV) 4091 msleep(2000); 4092 i40e_up(vsi); 4093 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 4094 } 4095 4096 /** 4097 * i40e_up - Bring the connection back up after being down 4098 * @vsi: the VSI being configured 4099 **/ 4100 int i40e_up(struct i40e_vsi *vsi) 4101 { 4102 int err; 4103 4104 err = i40e_vsi_configure(vsi); 4105 if (!err) 4106 err = i40e_up_complete(vsi); 4107 4108 return err; 4109 } 4110 4111 /** 4112 * i40e_down - Shutdown the connection processing 4113 * @vsi: the VSI being stopped 4114 **/ 4115 void i40e_down(struct i40e_vsi *vsi) 4116 { 4117 int i; 4118 4119 /* It is assumed that the caller of this function 4120 * sets the vsi->state __I40E_DOWN bit. 4121 */ 4122 if (vsi->netdev) { 4123 netif_carrier_off(vsi->netdev); 4124 netif_tx_disable(vsi->netdev); 4125 } 4126 i40e_vsi_disable_irq(vsi); 4127 i40e_vsi_control_rings(vsi, false); 4128 i40e_napi_disable_all(vsi); 4129 4130 for (i = 0; i < vsi->num_queue_pairs; i++) { 4131 i40e_clean_tx_ring(vsi->tx_rings[i]); 4132 i40e_clean_rx_ring(vsi->rx_rings[i]); 4133 } 4134 } 4135 4136 /** 4137 * i40e_setup_tc - configure multiple traffic classes 4138 * @netdev: net device to configure 4139 * @tc: number of traffic classes to enable 4140 **/ 4141 static int i40e_setup_tc(struct net_device *netdev, u8 tc) 4142 { 4143 struct i40e_netdev_priv *np = netdev_priv(netdev); 4144 struct i40e_vsi *vsi = np->vsi; 4145 struct i40e_pf *pf = vsi->back; 4146 u8 enabled_tc = 0; 4147 int ret = -EINVAL; 4148 int i; 4149 4150 /* Check if DCB enabled to continue */ 4151 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 4152 netdev_info(netdev, "DCB is not enabled for adapter\n"); 4153 goto exit; 4154 } 4155 4156 /* Check if MFP enabled */ 4157 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4158 netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); 4159 goto exit; 4160 } 4161 4162 /* Check whether tc count is within enabled limit */ 4163 if (tc > i40e_pf_get_num_tc(pf)) { 4164 netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); 4165 goto exit; 4166 } 4167 4168 /* Generate TC map for number of tc requested */ 4169 for (i = 0; i < tc; i++) 4170 enabled_tc |= (1 << i); 4171 4172 /* Requesting same TC configuration as already enabled */ 4173 if (enabled_tc == vsi->tc_config.enabled_tc) 4174 return 0; 4175 4176 /* Quiesce VSI queues */ 4177 i40e_quiesce_vsi(vsi); 4178 4179 /* Configure VSI for enabled TCs */ 4180 ret = i40e_vsi_config_tc(vsi, enabled_tc); 4181 if (ret) { 4182 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 4183 vsi->seid); 4184 goto exit; 4185 } 4186 4187 /* Unquiesce VSI */ 4188 i40e_unquiesce_vsi(vsi); 4189 4190 exit: 4191 return ret; 4192 } 4193 4194 /** 4195 * i40e_open - Called when a network interface is made active 4196 * @netdev: network interface device structure 4197 * 4198 * The open entry point is called when a network interface is made 4199 * active by the system (IFF_UP). At this point all resources needed 4200 * for transmit and receive operations are allocated, the interrupt 4201 * handler is registered with the OS, the netdev watchdog subtask is 4202 * enabled, and the stack is notified that the interface is ready. 4203 * 4204 * Returns 0 on success, negative value on failure 4205 **/ 4206 static int i40e_open(struct net_device *netdev) 4207 { 4208 struct i40e_netdev_priv *np = netdev_priv(netdev); 4209 struct i40e_vsi *vsi = np->vsi; 4210 struct i40e_pf *pf = vsi->back; 4211 char int_name[IFNAMSIZ]; 4212 int err; 4213 4214 /* disallow open during test */ 4215 if (test_bit(__I40E_TESTING, &pf->state)) 4216 return -EBUSY; 4217 4218 netif_carrier_off(netdev); 4219 4220 /* allocate descriptors */ 4221 err = i40e_vsi_setup_tx_resources(vsi); 4222 if (err) 4223 goto err_setup_tx; 4224 err = i40e_vsi_setup_rx_resources(vsi); 4225 if (err) 4226 goto err_setup_rx; 4227 4228 err = i40e_vsi_configure(vsi); 4229 if (err) 4230 goto err_setup_rx; 4231 4232 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 4233 dev_driver_string(&pf->pdev->dev), netdev->name); 4234 err = i40e_vsi_request_irq(vsi, int_name); 4235 if (err) 4236 goto err_setup_rx; 4237 4238 /* Notify the stack of the actual queue counts. */ 4239 err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs); 4240 if (err) 4241 goto err_set_queues; 4242 4243 err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs); 4244 if (err) 4245 goto err_set_queues; 4246 4247 err = i40e_up_complete(vsi); 4248 if (err) 4249 goto err_up_complete; 4250 4251 #ifdef CONFIG_I40E_VXLAN 4252 vxlan_get_rx_port(netdev); 4253 #endif 4254 4255 return 0; 4256 4257 err_up_complete: 4258 i40e_down(vsi); 4259 err_set_queues: 4260 i40e_vsi_free_irq(vsi); 4261 err_setup_rx: 4262 i40e_vsi_free_rx_resources(vsi); 4263 err_setup_tx: 4264 i40e_vsi_free_tx_resources(vsi); 4265 if (vsi == pf->vsi[pf->lan_vsi]) 4266 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 4267 4268 return err; 4269 } 4270 4271 /** 4272 * i40e_close - Disables a network interface 4273 * @netdev: network interface device structure 4274 * 4275 * The close entry point is called when an interface is de-activated 4276 * by the OS. The hardware is still under the driver's control, but 4277 * this netdev interface is disabled. 4278 * 4279 * Returns 0, this is not allowed to fail 4280 **/ 4281 static int i40e_close(struct net_device *netdev) 4282 { 4283 struct i40e_netdev_priv *np = netdev_priv(netdev); 4284 struct i40e_vsi *vsi = np->vsi; 4285 4286 if (test_and_set_bit(__I40E_DOWN, &vsi->state)) 4287 return 0; 4288 4289 i40e_down(vsi); 4290 i40e_vsi_free_irq(vsi); 4291 4292 i40e_vsi_free_tx_resources(vsi); 4293 i40e_vsi_free_rx_resources(vsi); 4294 4295 return 0; 4296 } 4297 4298 /** 4299 * i40e_do_reset - Start a PF or Core Reset sequence 4300 * @pf: board private structure 4301 * @reset_flags: which reset is requested 4302 * 4303 * The essential difference in resets is that the PF Reset 4304 * doesn't clear the packet buffers, doesn't reset the PE 4305 * firmware, and doesn't bother the other PFs on the chip. 4306 **/ 4307 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) 4308 { 4309 u32 val; 4310 4311 WARN_ON(in_interrupt()); 4312 4313 /* do the biggest reset indicated */ 4314 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { 4315 4316 /* Request a Global Reset 4317 * 4318 * This will start the chip's countdown to the actual full 4319 * chip reset event, and a warning interrupt to be sent 4320 * to all PFs, including the requestor. Our handler 4321 * for the warning interrupt will deal with the shutdown 4322 * and recovery of the switch setup. 4323 */ 4324 dev_info(&pf->pdev->dev, "GlobalR requested\n"); 4325 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4326 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 4327 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4328 4329 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) { 4330 4331 /* Request a Core Reset 4332 * 4333 * Same as Global Reset, except does *not* include the MAC/PHY 4334 */ 4335 dev_info(&pf->pdev->dev, "CoreR requested\n"); 4336 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4337 val |= I40E_GLGEN_RTRIG_CORER_MASK; 4338 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4339 i40e_flush(&pf->hw); 4340 4341 } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) { 4342 4343 /* Request a Firmware Reset 4344 * 4345 * Same as Global reset, plus restarting the 4346 * embedded firmware engine. 4347 */ 4348 /* enable EMP Reset */ 4349 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP); 4350 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK; 4351 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val); 4352 4353 /* force the reset */ 4354 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4355 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK; 4356 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4357 i40e_flush(&pf->hw); 4358 4359 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { 4360 4361 /* Request a PF Reset 4362 * 4363 * Resets only the PF-specific registers 4364 * 4365 * This goes directly to the tear-down and rebuild of 4366 * the switch, since we need to do all the recovery as 4367 * for the Core Reset. 4368 */ 4369 dev_info(&pf->pdev->dev, "PFR requested\n"); 4370 i40e_handle_reset_warning(pf); 4371 4372 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { 4373 int v; 4374 4375 /* Find the VSI(s) that requested a re-init */ 4376 dev_info(&pf->pdev->dev, 4377 "VSI reinit requested\n"); 4378 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4379 struct i40e_vsi *vsi = pf->vsi[v]; 4380 if (vsi != NULL && 4381 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 4382 i40e_vsi_reinit_locked(pf->vsi[v]); 4383 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); 4384 } 4385 } 4386 4387 /* no further action needed, so return now */ 4388 return; 4389 } else { 4390 dev_info(&pf->pdev->dev, 4391 "bad reset request 0x%08x\n", reset_flags); 4392 return; 4393 } 4394 } 4395 4396 #ifdef CONFIG_I40E_DCB 4397 /** 4398 * i40e_dcb_need_reconfig - Check if DCB needs reconfig 4399 * @pf: board private structure 4400 * @old_cfg: current DCB config 4401 * @new_cfg: new DCB config 4402 **/ 4403 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, 4404 struct i40e_dcbx_config *old_cfg, 4405 struct i40e_dcbx_config *new_cfg) 4406 { 4407 bool need_reconfig = false; 4408 4409 /* Check if ETS configuration has changed */ 4410 if (memcmp(&new_cfg->etscfg, 4411 &old_cfg->etscfg, 4412 sizeof(new_cfg->etscfg))) { 4413 /* If Priority Table has changed reconfig is needed */ 4414 if (memcmp(&new_cfg->etscfg.prioritytable, 4415 &old_cfg->etscfg.prioritytable, 4416 sizeof(new_cfg->etscfg.prioritytable))) { 4417 need_reconfig = true; 4418 dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n"); 4419 } 4420 4421 if (memcmp(&new_cfg->etscfg.tcbwtable, 4422 &old_cfg->etscfg.tcbwtable, 4423 sizeof(new_cfg->etscfg.tcbwtable))) 4424 dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 4425 4426 if (memcmp(&new_cfg->etscfg.tsatable, 4427 &old_cfg->etscfg.tsatable, 4428 sizeof(new_cfg->etscfg.tsatable))) 4429 dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n"); 4430 } 4431 4432 /* Check if PFC configuration has changed */ 4433 if (memcmp(&new_cfg->pfc, 4434 &old_cfg->pfc, 4435 sizeof(new_cfg->pfc))) { 4436 need_reconfig = true; 4437 dev_info(&pf->pdev->dev, "PFC config change detected.\n"); 4438 } 4439 4440 /* Check if APP Table has changed */ 4441 if (memcmp(&new_cfg->app, 4442 &old_cfg->app, 4443 sizeof(new_cfg->app))) { 4444 need_reconfig = true; 4445 dev_info(&pf->pdev->dev, "APP Table change detected.\n"); 4446 } 4447 4448 return need_reconfig; 4449 } 4450 4451 /** 4452 * i40e_handle_lldp_event - Handle LLDP Change MIB event 4453 * @pf: board private structure 4454 * @e: event info posted on ARQ 4455 **/ 4456 static int i40e_handle_lldp_event(struct i40e_pf *pf, 4457 struct i40e_arq_event_info *e) 4458 { 4459 struct i40e_aqc_lldp_get_mib *mib = 4460 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; 4461 struct i40e_hw *hw = &pf->hw; 4462 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; 4463 struct i40e_dcbx_config tmp_dcbx_cfg; 4464 bool need_reconfig = false; 4465 int ret = 0; 4466 u8 type; 4467 4468 /* Ignore if event is not for Nearest Bridge */ 4469 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 4470 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 4471 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) 4472 return ret; 4473 4474 /* Check MIB Type and return if event for Remote MIB update */ 4475 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; 4476 if (type == I40E_AQ_LLDP_MIB_REMOTE) { 4477 /* Update the remote cached instance and return */ 4478 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, 4479 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 4480 &hw->remote_dcbx_config); 4481 goto exit; 4482 } 4483 4484 /* Convert/store the DCBX data from LLDPDU temporarily */ 4485 memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); 4486 ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg); 4487 if (ret) { 4488 /* Error in LLDPDU parsing return */ 4489 dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n"); 4490 goto exit; 4491 } 4492 4493 /* No change detected in DCBX configs */ 4494 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { 4495 dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 4496 goto exit; 4497 } 4498 4499 need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg); 4500 4501 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg); 4502 4503 /* Overwrite the new configuration */ 4504 *dcbx_cfg = tmp_dcbx_cfg; 4505 4506 if (!need_reconfig) 4507 goto exit; 4508 4509 /* Reconfiguration needed quiesce all VSIs */ 4510 i40e_pf_quiesce_all_vsi(pf); 4511 4512 /* Changes in configuration update VEB/VSI */ 4513 i40e_dcb_reconfigure(pf); 4514 4515 i40e_pf_unquiesce_all_vsi(pf); 4516 exit: 4517 return ret; 4518 } 4519 #endif /* CONFIG_I40E_DCB */ 4520 4521 /** 4522 * i40e_do_reset_safe - Protected reset path for userland calls. 4523 * @pf: board private structure 4524 * @reset_flags: which reset is requested 4525 * 4526 **/ 4527 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) 4528 { 4529 rtnl_lock(); 4530 i40e_do_reset(pf, reset_flags); 4531 rtnl_unlock(); 4532 } 4533 4534 /** 4535 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 4536 * @pf: board private structure 4537 * @e: event info posted on ARQ 4538 * 4539 * Handler for LAN Queue Overflow Event generated by the firmware for PF 4540 * and VF queues 4541 **/ 4542 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 4543 struct i40e_arq_event_info *e) 4544 { 4545 struct i40e_aqc_lan_overflow *data = 4546 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 4547 u32 queue = le32_to_cpu(data->prtdcb_rupto); 4548 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 4549 struct i40e_hw *hw = &pf->hw; 4550 struct i40e_vf *vf; 4551 u16 vf_id; 4552 4553 dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n", 4554 __func__, queue, qtx_ctl); 4555 4556 /* Queue belongs to VF, find the VF and issue VF reset */ 4557 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 4558 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 4559 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 4560 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 4561 vf_id -= hw->func_caps.vf_base_id; 4562 vf = &pf->vf[vf_id]; 4563 i40e_vc_notify_vf_reset(vf); 4564 /* Allow VF to process pending reset notification */ 4565 msleep(20); 4566 i40e_reset_vf(vf, false); 4567 } 4568 } 4569 4570 /** 4571 * i40e_service_event_complete - Finish up the service event 4572 * @pf: board private structure 4573 **/ 4574 static void i40e_service_event_complete(struct i40e_pf *pf) 4575 { 4576 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 4577 4578 /* flush memory to make sure state is correct before next watchog */ 4579 smp_mb__before_clear_bit(); 4580 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 4581 } 4582 4583 /** 4584 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 4585 * @pf: board private structure 4586 **/ 4587 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 4588 { 4589 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT)) 4590 return; 4591 4592 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; 4593 4594 /* if interface is down do nothing */ 4595 if (test_bit(__I40E_DOWN, &pf->state)) 4596 return; 4597 } 4598 4599 /** 4600 * i40e_vsi_link_event - notify VSI of a link event 4601 * @vsi: vsi to be notified 4602 * @link_up: link up or down 4603 **/ 4604 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 4605 { 4606 if (!vsi) 4607 return; 4608 4609 switch (vsi->type) { 4610 case I40E_VSI_MAIN: 4611 if (!vsi->netdev || !vsi->netdev_registered) 4612 break; 4613 4614 if (link_up) { 4615 netif_carrier_on(vsi->netdev); 4616 netif_tx_wake_all_queues(vsi->netdev); 4617 } else { 4618 netif_carrier_off(vsi->netdev); 4619 netif_tx_stop_all_queues(vsi->netdev); 4620 } 4621 break; 4622 4623 case I40E_VSI_SRIOV: 4624 break; 4625 4626 case I40E_VSI_VMDQ2: 4627 case I40E_VSI_CTRL: 4628 case I40E_VSI_MIRROR: 4629 default: 4630 /* there is no notification for other VSIs */ 4631 break; 4632 } 4633 } 4634 4635 /** 4636 * i40e_veb_link_event - notify elements on the veb of a link event 4637 * @veb: veb to be notified 4638 * @link_up: link up or down 4639 **/ 4640 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 4641 { 4642 struct i40e_pf *pf; 4643 int i; 4644 4645 if (!veb || !veb->pf) 4646 return; 4647 pf = veb->pf; 4648 4649 /* depth first... */ 4650 for (i = 0; i < I40E_MAX_VEB; i++) 4651 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 4652 i40e_veb_link_event(pf->veb[i], link_up); 4653 4654 /* ... now the local VSIs */ 4655 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 4656 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 4657 i40e_vsi_link_event(pf->vsi[i], link_up); 4658 } 4659 4660 /** 4661 * i40e_link_event - Update netif_carrier status 4662 * @pf: board private structure 4663 **/ 4664 static void i40e_link_event(struct i40e_pf *pf) 4665 { 4666 bool new_link, old_link; 4667 4668 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP); 4669 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 4670 4671 if (new_link == old_link) 4672 return; 4673 4674 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) 4675 netdev_info(pf->vsi[pf->lan_vsi]->netdev, 4676 "NIC Link is %s\n", (new_link ? "Up" : "Down")); 4677 4678 /* Notify the base of the switch tree connected to 4679 * the link. Floating VEBs are not notified. 4680 */ 4681 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 4682 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 4683 else 4684 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link); 4685 4686 if (pf->vf) 4687 i40e_vc_notify_link_state(pf); 4688 4689 if (pf->flags & I40E_FLAG_PTP) 4690 i40e_ptp_set_increment(pf); 4691 } 4692 4693 /** 4694 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts 4695 * @pf: board private structure 4696 * 4697 * Set the per-queue flags to request a check for stuck queues in the irq 4698 * clean functions, then force interrupts to be sure the irq clean is called. 4699 **/ 4700 static void i40e_check_hang_subtask(struct i40e_pf *pf) 4701 { 4702 int i, v; 4703 4704 /* If we're down or resetting, just bail */ 4705 if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) 4706 return; 4707 4708 /* for each VSI/netdev 4709 * for each Tx queue 4710 * set the check flag 4711 * for each q_vector 4712 * force an interrupt 4713 */ 4714 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4715 struct i40e_vsi *vsi = pf->vsi[v]; 4716 int armed = 0; 4717 4718 if (!pf->vsi[v] || 4719 test_bit(__I40E_DOWN, &vsi->state) || 4720 (vsi->netdev && !netif_carrier_ok(vsi->netdev))) 4721 continue; 4722 4723 for (i = 0; i < vsi->num_queue_pairs; i++) { 4724 set_check_for_tx_hang(vsi->tx_rings[i]); 4725 if (test_bit(__I40E_HANG_CHECK_ARMED, 4726 &vsi->tx_rings[i]->state)) 4727 armed++; 4728 } 4729 4730 if (armed) { 4731 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 4732 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, 4733 (I40E_PFINT_DYN_CTL0_INTENA_MASK | 4734 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); 4735 } else { 4736 u16 vec = vsi->base_vector - 1; 4737 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | 4738 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); 4739 for (i = 0; i < vsi->num_q_vectors; i++, vec++) 4740 wr32(&vsi->back->hw, 4741 I40E_PFINT_DYN_CTLN(vec), val); 4742 } 4743 i40e_flush(&vsi->back->hw); 4744 } 4745 } 4746 } 4747 4748 /** 4749 * i40e_watchdog_subtask - Check and bring link up 4750 * @pf: board private structure 4751 **/ 4752 static void i40e_watchdog_subtask(struct i40e_pf *pf) 4753 { 4754 int i; 4755 4756 /* if interface is down do nothing */ 4757 if (test_bit(__I40E_DOWN, &pf->state) || 4758 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 4759 return; 4760 4761 /* Update the stats for active netdevs so the network stack 4762 * can look at updated numbers whenever it cares to 4763 */ 4764 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 4765 if (pf->vsi[i] && pf->vsi[i]->netdev) 4766 i40e_update_stats(pf->vsi[i]); 4767 4768 /* Update the stats for the active switching components */ 4769 for (i = 0; i < I40E_MAX_VEB; i++) 4770 if (pf->veb[i]) 4771 i40e_update_veb_stats(pf->veb[i]); 4772 4773 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); 4774 } 4775 4776 /** 4777 * i40e_reset_subtask - Set up for resetting the device and driver 4778 * @pf: board private structure 4779 **/ 4780 static void i40e_reset_subtask(struct i40e_pf *pf) 4781 { 4782 u32 reset_flags = 0; 4783 4784 rtnl_lock(); 4785 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { 4786 reset_flags |= (1 << __I40E_REINIT_REQUESTED); 4787 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); 4788 } 4789 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { 4790 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED); 4791 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 4792 } 4793 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { 4794 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED); 4795 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 4796 } 4797 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { 4798 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED); 4799 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 4800 } 4801 4802 /* If there's a recovery already waiting, it takes 4803 * precedence before starting a new reset sequence. 4804 */ 4805 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { 4806 i40e_handle_reset_warning(pf); 4807 goto unlock; 4808 } 4809 4810 /* If we're already down or resetting, just bail */ 4811 if (reset_flags && 4812 !test_bit(__I40E_DOWN, &pf->state) && 4813 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) 4814 i40e_do_reset(pf, reset_flags); 4815 4816 unlock: 4817 rtnl_unlock(); 4818 } 4819 4820 /** 4821 * i40e_handle_link_event - Handle link event 4822 * @pf: board private structure 4823 * @e: event info posted on ARQ 4824 **/ 4825 static void i40e_handle_link_event(struct i40e_pf *pf, 4826 struct i40e_arq_event_info *e) 4827 { 4828 struct i40e_hw *hw = &pf->hw; 4829 struct i40e_aqc_get_link_status *status = 4830 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 4831 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 4832 4833 /* save off old link status information */ 4834 memcpy(&pf->hw.phy.link_info_old, hw_link_info, 4835 sizeof(pf->hw.phy.link_info_old)); 4836 4837 /* update link status */ 4838 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type; 4839 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed; 4840 hw_link_info->link_info = status->link_info; 4841 hw_link_info->an_info = status->an_info; 4842 hw_link_info->ext_info = status->ext_info; 4843 hw_link_info->lse_enable = 4844 le16_to_cpu(status->command_flags) & 4845 I40E_AQ_LSE_ENABLE; 4846 4847 /* process the event */ 4848 i40e_link_event(pf); 4849 4850 /* Do a new status request to re-enable LSE reporting 4851 * and load new status information into the hw struct, 4852 * then see if the status changed while processing the 4853 * initial event. 4854 */ 4855 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); 4856 i40e_link_event(pf); 4857 } 4858 4859 /** 4860 * i40e_clean_adminq_subtask - Clean the AdminQ rings 4861 * @pf: board private structure 4862 **/ 4863 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 4864 { 4865 struct i40e_arq_event_info event; 4866 struct i40e_hw *hw = &pf->hw; 4867 u16 pending, i = 0; 4868 i40e_status ret; 4869 u16 opcode; 4870 u32 val; 4871 4872 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)) 4873 return; 4874 4875 event.msg_size = I40E_MAX_AQ_BUF_SIZE; 4876 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 4877 if (!event.msg_buf) 4878 return; 4879 4880 do { 4881 event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */ 4882 ret = i40e_clean_arq_element(hw, &event, &pending); 4883 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) { 4884 dev_info(&pf->pdev->dev, "No ARQ event found\n"); 4885 break; 4886 } else if (ret) { 4887 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 4888 break; 4889 } 4890 4891 opcode = le16_to_cpu(event.desc.opcode); 4892 switch (opcode) { 4893 4894 case i40e_aqc_opc_get_link_status: 4895 i40e_handle_link_event(pf, &event); 4896 break; 4897 case i40e_aqc_opc_send_msg_to_pf: 4898 ret = i40e_vc_process_vf_msg(pf, 4899 le16_to_cpu(event.desc.retval), 4900 le32_to_cpu(event.desc.cookie_high), 4901 le32_to_cpu(event.desc.cookie_low), 4902 event.msg_buf, 4903 event.msg_size); 4904 break; 4905 case i40e_aqc_opc_lldp_update_mib: 4906 dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 4907 #ifdef CONFIG_I40E_DCB 4908 rtnl_lock(); 4909 ret = i40e_handle_lldp_event(pf, &event); 4910 rtnl_unlock(); 4911 #endif /* CONFIG_I40E_DCB */ 4912 break; 4913 case i40e_aqc_opc_event_lan_overflow: 4914 dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 4915 i40e_handle_lan_overflow_event(pf, &event); 4916 break; 4917 case i40e_aqc_opc_send_msg_to_peer: 4918 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); 4919 break; 4920 default: 4921 dev_info(&pf->pdev->dev, 4922 "ARQ Error: Unknown event 0x%04x received\n", 4923 opcode); 4924 break; 4925 } 4926 } while (pending && (i++ < pf->adminq_work_limit)); 4927 4928 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 4929 /* re-enable Admin queue interrupt cause */ 4930 val = rd32(hw, I40E_PFINT_ICR0_ENA); 4931 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 4932 wr32(hw, I40E_PFINT_ICR0_ENA, val); 4933 i40e_flush(hw); 4934 4935 kfree(event.msg_buf); 4936 } 4937 4938 /** 4939 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 4940 * @veb: pointer to the VEB instance 4941 * 4942 * This is a recursive function that first builds the attached VSIs then 4943 * recurses in to build the next layer of VEB. We track the connections 4944 * through our own index numbers because the seid's from the HW could 4945 * change across the reset. 4946 **/ 4947 static int i40e_reconstitute_veb(struct i40e_veb *veb) 4948 { 4949 struct i40e_vsi *ctl_vsi = NULL; 4950 struct i40e_pf *pf = veb->pf; 4951 int v, veb_idx; 4952 int ret; 4953 4954 /* build VSI that owns this VEB, temporarily attached to base VEB */ 4955 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) { 4956 if (pf->vsi[v] && 4957 pf->vsi[v]->veb_idx == veb->idx && 4958 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 4959 ctl_vsi = pf->vsi[v]; 4960 break; 4961 } 4962 } 4963 if (!ctl_vsi) { 4964 dev_info(&pf->pdev->dev, 4965 "missing owner VSI for veb_idx %d\n", veb->idx); 4966 ret = -ENOENT; 4967 goto end_reconstitute; 4968 } 4969 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 4970 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 4971 ret = i40e_add_vsi(ctl_vsi); 4972 if (ret) { 4973 dev_info(&pf->pdev->dev, 4974 "rebuild of owner VSI failed: %d\n", ret); 4975 goto end_reconstitute; 4976 } 4977 i40e_vsi_reset_stats(ctl_vsi); 4978 4979 /* create the VEB in the switch and move the VSI onto the VEB */ 4980 ret = i40e_add_veb(veb, ctl_vsi); 4981 if (ret) 4982 goto end_reconstitute; 4983 4984 /* create the remaining VSIs attached to this VEB */ 4985 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4986 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 4987 continue; 4988 4989 if (pf->vsi[v]->veb_idx == veb->idx) { 4990 struct i40e_vsi *vsi = pf->vsi[v]; 4991 vsi->uplink_seid = veb->seid; 4992 ret = i40e_add_vsi(vsi); 4993 if (ret) { 4994 dev_info(&pf->pdev->dev, 4995 "rebuild of vsi_idx %d failed: %d\n", 4996 v, ret); 4997 goto end_reconstitute; 4998 } 4999 i40e_vsi_reset_stats(vsi); 5000 } 5001 } 5002 5003 /* create any VEBs attached to this VEB - RECURSION */ 5004 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 5005 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 5006 pf->veb[veb_idx]->uplink_seid = veb->seid; 5007 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 5008 if (ret) 5009 break; 5010 } 5011 } 5012 5013 end_reconstitute: 5014 return ret; 5015 } 5016 5017 /** 5018 * i40e_get_capabilities - get info about the HW 5019 * @pf: the PF struct 5020 **/ 5021 static int i40e_get_capabilities(struct i40e_pf *pf) 5022 { 5023 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 5024 u16 data_size; 5025 int buf_len; 5026 int err; 5027 5028 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 5029 do { 5030 cap_buf = kzalloc(buf_len, GFP_KERNEL); 5031 if (!cap_buf) 5032 return -ENOMEM; 5033 5034 /* this loads the data into the hw struct for us */ 5035 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 5036 &data_size, 5037 i40e_aqc_opc_list_func_capabilities, 5038 NULL); 5039 /* data loaded, buffer no longer needed */ 5040 kfree(cap_buf); 5041 5042 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 5043 /* retry with a larger buffer */ 5044 buf_len = data_size; 5045 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 5046 dev_info(&pf->pdev->dev, 5047 "capability discovery failed: aq=%d\n", 5048 pf->hw.aq.asq_last_status); 5049 return -ENODEV; 5050 } 5051 } while (err); 5052 5053 /* increment MSI-X count because current FW skips one */ 5054 pf->hw.func_caps.num_msix_vectors++; 5055 5056 if (pf->hw.debug_mask & I40E_DEBUG_USER) 5057 dev_info(&pf->pdev->dev, 5058 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 5059 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 5060 pf->hw.func_caps.num_msix_vectors, 5061 pf->hw.func_caps.num_msix_vectors_vf, 5062 pf->hw.func_caps.fd_filters_guaranteed, 5063 pf->hw.func_caps.fd_filters_best_effort, 5064 pf->hw.func_caps.num_tx_qp, 5065 pf->hw.func_caps.num_vsis); 5066 5067 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ 5068 + pf->hw.func_caps.num_vfs) 5069 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { 5070 dev_info(&pf->pdev->dev, 5071 "got num_vsis %d, setting num_vsis to %d\n", 5072 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); 5073 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; 5074 } 5075 5076 return 0; 5077 } 5078 5079 static int i40e_vsi_clear(struct i40e_vsi *vsi); 5080 5081 /** 5082 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband 5083 * @pf: board private structure 5084 **/ 5085 static void i40e_fdir_sb_setup(struct i40e_pf *pf) 5086 { 5087 struct i40e_vsi *vsi; 5088 bool new_vsi = false; 5089 int err, i; 5090 5091 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 5092 return; 5093 5094 /* find existing VSI and see if it needs configuring */ 5095 vsi = NULL; 5096 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5097 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5098 vsi = pf->vsi[i]; 5099 break; 5100 } 5101 } 5102 5103 /* create a new VSI if none exists */ 5104 if (!vsi) { 5105 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, 5106 pf->vsi[pf->lan_vsi]->seid, 0); 5107 if (!vsi) { 5108 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 5109 goto err_vsi; 5110 } 5111 new_vsi = true; 5112 } 5113 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); 5114 5115 err = i40e_vsi_setup_tx_resources(vsi); 5116 if (err) 5117 goto err_setup_tx; 5118 err = i40e_vsi_setup_rx_resources(vsi); 5119 if (err) 5120 goto err_setup_rx; 5121 5122 if (new_vsi) { 5123 char int_name[IFNAMSIZ + 9]; 5124 err = i40e_vsi_configure(vsi); 5125 if (err) 5126 goto err_setup_rx; 5127 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", 5128 dev_driver_string(&pf->pdev->dev)); 5129 err = i40e_vsi_request_irq(vsi, int_name); 5130 if (err) 5131 goto err_setup_rx; 5132 err = i40e_up_complete(vsi); 5133 if (err) 5134 goto err_up_complete; 5135 } 5136 5137 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 5138 return; 5139 5140 err_up_complete: 5141 i40e_down(vsi); 5142 i40e_vsi_free_irq(vsi); 5143 err_setup_rx: 5144 i40e_vsi_free_rx_resources(vsi); 5145 err_setup_tx: 5146 i40e_vsi_free_tx_resources(vsi); 5147 err_vsi: 5148 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 5149 i40e_vsi_clear(vsi); 5150 } 5151 5152 /** 5153 * i40e_fdir_teardown - release the Flow Director resources 5154 * @pf: board private structure 5155 **/ 5156 static void i40e_fdir_teardown(struct i40e_pf *pf) 5157 { 5158 int i; 5159 5160 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5161 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5162 i40e_vsi_release(pf->vsi[i]); 5163 break; 5164 } 5165 } 5166 } 5167 5168 /** 5169 * i40e_prep_for_reset - prep for the core to reset 5170 * @pf: board private structure 5171 * 5172 * Close up the VFs and other things in prep for pf Reset. 5173 **/ 5174 static int i40e_prep_for_reset(struct i40e_pf *pf) 5175 { 5176 struct i40e_hw *hw = &pf->hw; 5177 i40e_status ret; 5178 u32 v; 5179 5180 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 5181 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 5182 return 0; 5183 5184 dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 5185 5186 if (i40e_check_asq_alive(hw)) 5187 i40e_vc_notify_reset(pf); 5188 5189 /* quiesce the VSIs and their queues that are not already DOWN */ 5190 i40e_pf_quiesce_all_vsi(pf); 5191 5192 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5193 if (pf->vsi[v]) 5194 pf->vsi[v]->seid = 0; 5195 } 5196 5197 i40e_shutdown_adminq(&pf->hw); 5198 5199 /* call shutdown HMC */ 5200 ret = i40e_shutdown_lan_hmc(hw); 5201 if (ret) { 5202 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); 5203 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5204 } 5205 return ret; 5206 } 5207 5208 /** 5209 * i40e_reset_and_rebuild - reset and rebuild using a saved config 5210 * @pf: board private structure 5211 * @reinit: if the Main VSI needs to re-initialized. 5212 **/ 5213 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 5214 { 5215 struct i40e_driver_version dv; 5216 struct i40e_hw *hw = &pf->hw; 5217 i40e_status ret; 5218 u32 v; 5219 5220 /* Now we wait for GRST to settle out. 5221 * We don't have to delete the VEBs or VSIs from the hw switch 5222 * because the reset will make them disappear. 5223 */ 5224 ret = i40e_pf_reset(hw); 5225 if (ret) 5226 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 5227 pf->pfr_count++; 5228 5229 if (test_bit(__I40E_DOWN, &pf->state)) 5230 goto end_core_reset; 5231 dev_info(&pf->pdev->dev, "Rebuilding internal switch\n"); 5232 5233 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 5234 ret = i40e_init_adminq(&pf->hw); 5235 if (ret) { 5236 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); 5237 goto end_core_reset; 5238 } 5239 5240 ret = i40e_get_capabilities(pf); 5241 if (ret) { 5242 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", 5243 ret); 5244 goto end_core_reset; 5245 } 5246 5247 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 5248 hw->func_caps.num_rx_qp, 5249 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 5250 if (ret) { 5251 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 5252 goto end_core_reset; 5253 } 5254 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 5255 if (ret) { 5256 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 5257 goto end_core_reset; 5258 } 5259 5260 #ifdef CONFIG_I40E_DCB 5261 ret = i40e_init_pf_dcb(pf); 5262 if (ret) { 5263 dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret); 5264 goto end_core_reset; 5265 } 5266 #endif /* CONFIG_I40E_DCB */ 5267 5268 /* do basic switch setup */ 5269 ret = i40e_setup_pf_switch(pf, reinit); 5270 if (ret) 5271 goto end_core_reset; 5272 5273 /* Rebuild the VSIs and VEBs that existed before reset. 5274 * They are still in our local switch element arrays, so only 5275 * need to rebuild the switch model in the HW. 5276 * 5277 * If there were VEBs but the reconstitution failed, we'll try 5278 * try to recover minimal use by getting the basic PF VSI working. 5279 */ 5280 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 5281 dev_info(&pf->pdev->dev, "attempting to rebuild switch\n"); 5282 /* find the one VEB connected to the MAC, and find orphans */ 5283 for (v = 0; v < I40E_MAX_VEB; v++) { 5284 if (!pf->veb[v]) 5285 continue; 5286 5287 if (pf->veb[v]->uplink_seid == pf->mac_seid || 5288 pf->veb[v]->uplink_seid == 0) { 5289 ret = i40e_reconstitute_veb(pf->veb[v]); 5290 5291 if (!ret) 5292 continue; 5293 5294 /* If Main VEB failed, we're in deep doodoo, 5295 * so give up rebuilding the switch and set up 5296 * for minimal rebuild of PF VSI. 5297 * If orphan failed, we'll report the error 5298 * but try to keep going. 5299 */ 5300 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 5301 dev_info(&pf->pdev->dev, 5302 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 5303 ret); 5304 pf->vsi[pf->lan_vsi]->uplink_seid 5305 = pf->mac_seid; 5306 break; 5307 } else if (pf->veb[v]->uplink_seid == 0) { 5308 dev_info(&pf->pdev->dev, 5309 "rebuild of orphan VEB failed: %d\n", 5310 ret); 5311 } 5312 } 5313 } 5314 } 5315 5316 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { 5317 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 5318 /* no VEB, so rebuild only the Main VSI */ 5319 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); 5320 if (ret) { 5321 dev_info(&pf->pdev->dev, 5322 "rebuild of Main VSI failed: %d\n", ret); 5323 goto end_core_reset; 5324 } 5325 } 5326 5327 /* reinit the misc interrupt */ 5328 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5329 ret = i40e_setup_misc_vector(pf); 5330 5331 /* restart the VSIs that were rebuilt and running before the reset */ 5332 i40e_pf_unquiesce_all_vsi(pf); 5333 5334 /* tell the firmware that we're starting */ 5335 dv.major_version = DRV_VERSION_MAJOR; 5336 dv.minor_version = DRV_VERSION_MINOR; 5337 dv.build_version = DRV_VERSION_BUILD; 5338 dv.subbuild_version = 0; 5339 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 5340 5341 dev_info(&pf->pdev->dev, "PF reset done\n"); 5342 5343 end_core_reset: 5344 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5345 } 5346 5347 /** 5348 * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild 5349 * @pf: board private structure 5350 * 5351 * Close up the VFs and other things in prep for a Core Reset, 5352 * then get ready to rebuild the world. 5353 **/ 5354 static void i40e_handle_reset_warning(struct i40e_pf *pf) 5355 { 5356 i40e_status ret; 5357 5358 ret = i40e_prep_for_reset(pf); 5359 if (!ret) 5360 i40e_reset_and_rebuild(pf, false); 5361 } 5362 5363 /** 5364 * i40e_handle_mdd_event 5365 * @pf: pointer to the pf structure 5366 * 5367 * Called from the MDD irq handler to identify possibly malicious vfs 5368 **/ 5369 static void i40e_handle_mdd_event(struct i40e_pf *pf) 5370 { 5371 struct i40e_hw *hw = &pf->hw; 5372 bool mdd_detected = false; 5373 struct i40e_vf *vf; 5374 u32 reg; 5375 int i; 5376 5377 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) 5378 return; 5379 5380 /* find what triggered the MDD event */ 5381 reg = rd32(hw, I40E_GL_MDET_TX); 5382 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 5383 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK) 5384 >> I40E_GL_MDET_TX_FUNCTION_SHIFT; 5385 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) 5386 >> I40E_GL_MDET_TX_EVENT_SHIFT; 5387 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) 5388 >> I40E_GL_MDET_TX_QUEUE_SHIFT; 5389 dev_info(&pf->pdev->dev, 5390 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n", 5391 event, queue, func); 5392 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 5393 mdd_detected = true; 5394 } 5395 reg = rd32(hw, I40E_GL_MDET_RX); 5396 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 5397 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) 5398 >> I40E_GL_MDET_RX_FUNCTION_SHIFT; 5399 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) 5400 >> I40E_GL_MDET_RX_EVENT_SHIFT; 5401 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) 5402 >> I40E_GL_MDET_RX_QUEUE_SHIFT; 5403 dev_info(&pf->pdev->dev, 5404 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n", 5405 event, queue, func); 5406 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 5407 mdd_detected = true; 5408 } 5409 5410 /* see if one of the VFs needs its hand slapped */ 5411 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 5412 vf = &(pf->vf[i]); 5413 reg = rd32(hw, I40E_VP_MDET_TX(i)); 5414 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 5415 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 5416 vf->num_mdd_events++; 5417 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i); 5418 } 5419 5420 reg = rd32(hw, I40E_VP_MDET_RX(i)); 5421 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 5422 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 5423 vf->num_mdd_events++; 5424 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i); 5425 } 5426 5427 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 5428 dev_info(&pf->pdev->dev, 5429 "Too many MDD events on VF %d, disabled\n", i); 5430 dev_info(&pf->pdev->dev, 5431 "Use PF Control I/F to re-enable the VF\n"); 5432 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 5433 } 5434 } 5435 5436 /* re-enable mdd interrupt cause */ 5437 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 5438 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 5439 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 5440 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 5441 i40e_flush(hw); 5442 } 5443 5444 #ifdef CONFIG_I40E_VXLAN 5445 /** 5446 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW 5447 * @pf: board private structure 5448 **/ 5449 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) 5450 { 5451 const int vxlan_hdr_qwords = 4; 5452 struct i40e_hw *hw = &pf->hw; 5453 i40e_status ret; 5454 u8 filter_index; 5455 __be16 port; 5456 int i; 5457 5458 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) 5459 return; 5460 5461 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; 5462 5463 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 5464 if (pf->pending_vxlan_bitmap & (1 << i)) { 5465 pf->pending_vxlan_bitmap &= ~(1 << i); 5466 port = pf->vxlan_ports[i]; 5467 ret = port ? 5468 i40e_aq_add_udp_tunnel(hw, ntohs(port), 5469 vxlan_hdr_qwords, 5470 I40E_AQC_TUNNEL_TYPE_VXLAN, 5471 &filter_index, NULL) 5472 : i40e_aq_del_udp_tunnel(hw, i, NULL); 5473 5474 if (ret) { 5475 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n", 5476 port ? "adding" : "deleting", 5477 ntohs(port), port ? i : i); 5478 5479 pf->vxlan_ports[i] = 0; 5480 } else { 5481 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n", 5482 port ? "Added" : "Deleted", 5483 ntohs(port), port ? i : filter_index); 5484 } 5485 } 5486 } 5487 } 5488 5489 #endif 5490 /** 5491 * i40e_service_task - Run the driver's async subtasks 5492 * @work: pointer to work_struct containing our data 5493 **/ 5494 static void i40e_service_task(struct work_struct *work) 5495 { 5496 struct i40e_pf *pf = container_of(work, 5497 struct i40e_pf, 5498 service_task); 5499 unsigned long start_time = jiffies; 5500 5501 i40e_reset_subtask(pf); 5502 i40e_handle_mdd_event(pf); 5503 i40e_vc_process_vflr_event(pf); 5504 i40e_watchdog_subtask(pf); 5505 i40e_fdir_reinit_subtask(pf); 5506 i40e_check_hang_subtask(pf); 5507 i40e_sync_filters_subtask(pf); 5508 #ifdef CONFIG_I40E_VXLAN 5509 i40e_sync_vxlan_filters_subtask(pf); 5510 #endif 5511 i40e_clean_adminq_subtask(pf); 5512 5513 i40e_service_event_complete(pf); 5514 5515 /* If the tasks have taken longer than one timer cycle or there 5516 * is more work to be done, reschedule the service task now 5517 * rather than wait for the timer to tick again. 5518 */ 5519 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 5520 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || 5521 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || 5522 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 5523 i40e_service_event_schedule(pf); 5524 } 5525 5526 /** 5527 * i40e_service_timer - timer callback 5528 * @data: pointer to PF struct 5529 **/ 5530 static void i40e_service_timer(unsigned long data) 5531 { 5532 struct i40e_pf *pf = (struct i40e_pf *)data; 5533 5534 mod_timer(&pf->service_timer, 5535 round_jiffies(jiffies + pf->service_timer_period)); 5536 i40e_service_event_schedule(pf); 5537 } 5538 5539 /** 5540 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 5541 * @vsi: the VSI being configured 5542 **/ 5543 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 5544 { 5545 struct i40e_pf *pf = vsi->back; 5546 5547 switch (vsi->type) { 5548 case I40E_VSI_MAIN: 5549 vsi->alloc_queue_pairs = pf->num_lan_qps; 5550 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 5551 I40E_REQ_DESCRIPTOR_MULTIPLE); 5552 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5553 vsi->num_q_vectors = pf->num_lan_msix; 5554 else 5555 vsi->num_q_vectors = 1; 5556 5557 break; 5558 5559 case I40E_VSI_FDIR: 5560 vsi->alloc_queue_pairs = 1; 5561 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 5562 I40E_REQ_DESCRIPTOR_MULTIPLE); 5563 vsi->num_q_vectors = 1; 5564 break; 5565 5566 case I40E_VSI_VMDQ2: 5567 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 5568 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 5569 I40E_REQ_DESCRIPTOR_MULTIPLE); 5570 vsi->num_q_vectors = pf->num_vmdq_msix; 5571 break; 5572 5573 case I40E_VSI_SRIOV: 5574 vsi->alloc_queue_pairs = pf->num_vf_qps; 5575 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 5576 I40E_REQ_DESCRIPTOR_MULTIPLE); 5577 break; 5578 5579 default: 5580 WARN_ON(1); 5581 return -ENODATA; 5582 } 5583 5584 return 0; 5585 } 5586 5587 /** 5588 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi 5589 * @type: VSI pointer 5590 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 5591 * 5592 * On error: returns error code (negative) 5593 * On success: returns 0 5594 **/ 5595 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) 5596 { 5597 int size; 5598 int ret = 0; 5599 5600 /* allocate memory for both Tx and Rx ring pointers */ 5601 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; 5602 vsi->tx_rings = kzalloc(size, GFP_KERNEL); 5603 if (!vsi->tx_rings) 5604 return -ENOMEM; 5605 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; 5606 5607 if (alloc_qvectors) { 5608 /* allocate memory for q_vector pointers */ 5609 size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors; 5610 vsi->q_vectors = kzalloc(size, GFP_KERNEL); 5611 if (!vsi->q_vectors) { 5612 ret = -ENOMEM; 5613 goto err_vectors; 5614 } 5615 } 5616 return ret; 5617 5618 err_vectors: 5619 kfree(vsi->tx_rings); 5620 return ret; 5621 } 5622 5623 /** 5624 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 5625 * @pf: board private structure 5626 * @type: type of VSI 5627 * 5628 * On error: returns error code (negative) 5629 * On success: returns vsi index in PF (positive) 5630 **/ 5631 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 5632 { 5633 int ret = -ENODEV; 5634 struct i40e_vsi *vsi; 5635 int vsi_idx; 5636 int i; 5637 5638 /* Need to protect the allocation of the VSIs at the PF level */ 5639 mutex_lock(&pf->switch_mutex); 5640 5641 /* VSI list may be fragmented if VSI creation/destruction has 5642 * been happening. We can afford to do a quick scan to look 5643 * for any free VSIs in the list. 5644 * 5645 * find next empty vsi slot, looping back around if necessary 5646 */ 5647 i = pf->next_vsi; 5648 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i]) 5649 i++; 5650 if (i >= pf->hw.func_caps.num_vsis) { 5651 i = 0; 5652 while (i < pf->next_vsi && pf->vsi[i]) 5653 i++; 5654 } 5655 5656 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) { 5657 vsi_idx = i; /* Found one! */ 5658 } else { 5659 ret = -ENODEV; 5660 goto unlock_pf; /* out of VSI slots! */ 5661 } 5662 pf->next_vsi = ++i; 5663 5664 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 5665 if (!vsi) { 5666 ret = -ENOMEM; 5667 goto unlock_pf; 5668 } 5669 vsi->type = type; 5670 vsi->back = pf; 5671 set_bit(__I40E_DOWN, &vsi->state); 5672 vsi->flags = 0; 5673 vsi->idx = vsi_idx; 5674 vsi->rx_itr_setting = pf->rx_itr_default; 5675 vsi->tx_itr_setting = pf->tx_itr_default; 5676 vsi->netdev_registered = false; 5677 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 5678 INIT_LIST_HEAD(&vsi->mac_filter_list); 5679 5680 ret = i40e_set_num_rings_in_vsi(vsi); 5681 if (ret) 5682 goto err_rings; 5683 5684 ret = i40e_vsi_alloc_arrays(vsi, true); 5685 if (ret) 5686 goto err_rings; 5687 5688 /* Setup default MSIX irq handler for VSI */ 5689 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 5690 5691 pf->vsi[vsi_idx] = vsi; 5692 ret = vsi_idx; 5693 goto unlock_pf; 5694 5695 err_rings: 5696 pf->next_vsi = i - 1; 5697 kfree(vsi); 5698 unlock_pf: 5699 mutex_unlock(&pf->switch_mutex); 5700 return ret; 5701 } 5702 5703 /** 5704 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 5705 * @type: VSI pointer 5706 * @free_qvectors: a bool to specify if q_vectors need to be freed. 5707 * 5708 * On error: returns error code (negative) 5709 * On success: returns 0 5710 **/ 5711 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) 5712 { 5713 /* free the ring and vector containers */ 5714 if (free_qvectors) { 5715 kfree(vsi->q_vectors); 5716 vsi->q_vectors = NULL; 5717 } 5718 kfree(vsi->tx_rings); 5719 vsi->tx_rings = NULL; 5720 vsi->rx_rings = NULL; 5721 } 5722 5723 /** 5724 * i40e_vsi_clear - Deallocate the VSI provided 5725 * @vsi: the VSI being un-configured 5726 **/ 5727 static int i40e_vsi_clear(struct i40e_vsi *vsi) 5728 { 5729 struct i40e_pf *pf; 5730 5731 if (!vsi) 5732 return 0; 5733 5734 if (!vsi->back) 5735 goto free_vsi; 5736 pf = vsi->back; 5737 5738 mutex_lock(&pf->switch_mutex); 5739 if (!pf->vsi[vsi->idx]) { 5740 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", 5741 vsi->idx, vsi->idx, vsi, vsi->type); 5742 goto unlock_vsi; 5743 } 5744 5745 if (pf->vsi[vsi->idx] != vsi) { 5746 dev_err(&pf->pdev->dev, 5747 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", 5748 pf->vsi[vsi->idx]->idx, 5749 pf->vsi[vsi->idx], 5750 pf->vsi[vsi->idx]->type, 5751 vsi->idx, vsi, vsi->type); 5752 goto unlock_vsi; 5753 } 5754 5755 /* updates the pf for this cleared vsi */ 5756 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 5757 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 5758 5759 i40e_vsi_free_arrays(vsi, true); 5760 5761 pf->vsi[vsi->idx] = NULL; 5762 if (vsi->idx < pf->next_vsi) 5763 pf->next_vsi = vsi->idx; 5764 5765 unlock_vsi: 5766 mutex_unlock(&pf->switch_mutex); 5767 free_vsi: 5768 kfree(vsi); 5769 5770 return 0; 5771 } 5772 5773 /** 5774 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 5775 * @vsi: the VSI being cleaned 5776 **/ 5777 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) 5778 { 5779 int i; 5780 5781 if (vsi->tx_rings && vsi->tx_rings[0]) { 5782 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 5783 kfree_rcu(vsi->tx_rings[i], rcu); 5784 vsi->tx_rings[i] = NULL; 5785 vsi->rx_rings[i] = NULL; 5786 } 5787 } 5788 } 5789 5790 /** 5791 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 5792 * @vsi: the VSI being configured 5793 **/ 5794 static int i40e_alloc_rings(struct i40e_vsi *vsi) 5795 { 5796 struct i40e_pf *pf = vsi->back; 5797 int i; 5798 5799 /* Set basic values in the rings to be used later during open() */ 5800 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 5801 struct i40e_ring *tx_ring; 5802 struct i40e_ring *rx_ring; 5803 5804 /* allocate space for both Tx and Rx in one shot */ 5805 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 5806 if (!tx_ring) 5807 goto err_out; 5808 5809 tx_ring->queue_index = i; 5810 tx_ring->reg_idx = vsi->base_queue + i; 5811 tx_ring->ring_active = false; 5812 tx_ring->vsi = vsi; 5813 tx_ring->netdev = vsi->netdev; 5814 tx_ring->dev = &pf->pdev->dev; 5815 tx_ring->count = vsi->num_desc; 5816 tx_ring->size = 0; 5817 tx_ring->dcb_tc = 0; 5818 vsi->tx_rings[i] = tx_ring; 5819 5820 rx_ring = &tx_ring[1]; 5821 rx_ring->queue_index = i; 5822 rx_ring->reg_idx = vsi->base_queue + i; 5823 rx_ring->ring_active = false; 5824 rx_ring->vsi = vsi; 5825 rx_ring->netdev = vsi->netdev; 5826 rx_ring->dev = &pf->pdev->dev; 5827 rx_ring->count = vsi->num_desc; 5828 rx_ring->size = 0; 5829 rx_ring->dcb_tc = 0; 5830 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) 5831 set_ring_16byte_desc_enabled(rx_ring); 5832 else 5833 clear_ring_16byte_desc_enabled(rx_ring); 5834 vsi->rx_rings[i] = rx_ring; 5835 } 5836 5837 return 0; 5838 5839 err_out: 5840 i40e_vsi_clear_rings(vsi); 5841 return -ENOMEM; 5842 } 5843 5844 /** 5845 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 5846 * @pf: board private structure 5847 * @vectors: the number of MSI-X vectors to request 5848 * 5849 * Returns the number of vectors reserved, or error 5850 **/ 5851 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 5852 { 5853 int err = 0; 5854 5855 pf->num_msix_entries = 0; 5856 while (vectors >= I40E_MIN_MSIX) { 5857 err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors); 5858 if (err == 0) { 5859 /* good to go */ 5860 pf->num_msix_entries = vectors; 5861 break; 5862 } else if (err < 0) { 5863 /* total failure */ 5864 dev_info(&pf->pdev->dev, 5865 "MSI-X vector reservation failed: %d\n", err); 5866 vectors = 0; 5867 break; 5868 } else { 5869 /* err > 0 is the hint for retry */ 5870 dev_info(&pf->pdev->dev, 5871 "MSI-X vectors wanted %d, retrying with %d\n", 5872 vectors, err); 5873 vectors = err; 5874 } 5875 } 5876 5877 if (vectors > 0 && vectors < I40E_MIN_MSIX) { 5878 dev_info(&pf->pdev->dev, 5879 "Couldn't get enough vectors, only %d available\n", 5880 vectors); 5881 vectors = 0; 5882 } 5883 5884 return vectors; 5885 } 5886 5887 /** 5888 * i40e_init_msix - Setup the MSIX capability 5889 * @pf: board private structure 5890 * 5891 * Work with the OS to set up the MSIX vectors needed. 5892 * 5893 * Returns 0 on success, negative on failure 5894 **/ 5895 static int i40e_init_msix(struct i40e_pf *pf) 5896 { 5897 i40e_status err = 0; 5898 struct i40e_hw *hw = &pf->hw; 5899 int v_budget, i; 5900 int vec; 5901 5902 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 5903 return -ENODEV; 5904 5905 /* The number of vectors we'll request will be comprised of: 5906 * - Add 1 for "other" cause for Admin Queue events, etc. 5907 * - The number of LAN queue pairs 5908 * - Queues being used for RSS. 5909 * We don't need as many as max_rss_size vectors. 5910 * use rss_size instead in the calculation since that 5911 * is governed by number of cpus in the system. 5912 * - assumes symmetric Tx/Rx pairing 5913 * - The number of VMDq pairs 5914 * Once we count this up, try the request. 5915 * 5916 * If we can't get what we want, we'll simplify to nearly nothing 5917 * and try again. If that still fails, we punt. 5918 */ 5919 pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); 5920 pf->num_vmdq_msix = pf->num_vmdq_qps; 5921 v_budget = 1 + pf->num_lan_msix; 5922 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix); 5923 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) 5924 v_budget++; 5925 5926 /* Scale down if necessary, and the rings will share vectors */ 5927 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors); 5928 5929 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 5930 GFP_KERNEL); 5931 if (!pf->msix_entries) 5932 return -ENOMEM; 5933 5934 for (i = 0; i < v_budget; i++) 5935 pf->msix_entries[i].entry = i; 5936 vec = i40e_reserve_msix_vectors(pf, v_budget); 5937 if (vec < I40E_MIN_MSIX) { 5938 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 5939 kfree(pf->msix_entries); 5940 pf->msix_entries = NULL; 5941 return -ENODEV; 5942 5943 } else if (vec == I40E_MIN_MSIX) { 5944 /* Adjust for minimal MSIX use */ 5945 dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n"); 5946 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 5947 pf->num_vmdq_vsis = 0; 5948 pf->num_vmdq_qps = 0; 5949 pf->num_vmdq_msix = 0; 5950 pf->num_lan_qps = 1; 5951 pf->num_lan_msix = 1; 5952 5953 } else if (vec != v_budget) { 5954 /* Scale vector usage down */ 5955 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 5956 vec--; /* reserve the misc vector */ 5957 5958 /* partition out the remaining vectors */ 5959 switch (vec) { 5960 case 2: 5961 pf->num_vmdq_vsis = 1; 5962 pf->num_lan_msix = 1; 5963 break; 5964 case 3: 5965 pf->num_vmdq_vsis = 1; 5966 pf->num_lan_msix = 2; 5967 break; 5968 default: 5969 pf->num_lan_msix = min_t(int, (vec / 2), 5970 pf->num_lan_qps); 5971 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix), 5972 I40E_DEFAULT_NUM_VMDQ_VSI); 5973 break; 5974 } 5975 } 5976 5977 return err; 5978 } 5979 5980 /** 5981 * i40e_alloc_q_vector - Allocate memory for a single interrupt vector 5982 * @vsi: the VSI being configured 5983 * @v_idx: index of the vector in the vsi struct 5984 * 5985 * We allocate one q_vector. If allocation fails we return -ENOMEM. 5986 **/ 5987 static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 5988 { 5989 struct i40e_q_vector *q_vector; 5990 5991 /* allocate q_vector */ 5992 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 5993 if (!q_vector) 5994 return -ENOMEM; 5995 5996 q_vector->vsi = vsi; 5997 q_vector->v_idx = v_idx; 5998 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 5999 if (vsi->netdev) 6000 netif_napi_add(vsi->netdev, &q_vector->napi, 6001 i40e_napi_poll, vsi->work_limit); 6002 6003 q_vector->rx.latency_range = I40E_LOW_LATENCY; 6004 q_vector->tx.latency_range = I40E_LOW_LATENCY; 6005 6006 /* tie q_vector and vsi together */ 6007 vsi->q_vectors[v_idx] = q_vector; 6008 6009 return 0; 6010 } 6011 6012 /** 6013 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors 6014 * @vsi: the VSI being configured 6015 * 6016 * We allocate one q_vector per queue interrupt. If allocation fails we 6017 * return -ENOMEM. 6018 **/ 6019 static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) 6020 { 6021 struct i40e_pf *pf = vsi->back; 6022 int v_idx, num_q_vectors; 6023 int err; 6024 6025 /* if not MSIX, give the one vector only to the LAN VSI */ 6026 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6027 num_q_vectors = vsi->num_q_vectors; 6028 else if (vsi == pf->vsi[pf->lan_vsi]) 6029 num_q_vectors = 1; 6030 else 6031 return -EINVAL; 6032 6033 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 6034 err = i40e_alloc_q_vector(vsi, v_idx); 6035 if (err) 6036 goto err_out; 6037 } 6038 6039 return 0; 6040 6041 err_out: 6042 while (v_idx--) 6043 i40e_free_q_vector(vsi, v_idx); 6044 6045 return err; 6046 } 6047 6048 /** 6049 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 6050 * @pf: board private structure to initialize 6051 **/ 6052 static void i40e_init_interrupt_scheme(struct i40e_pf *pf) 6053 { 6054 int err = 0; 6055 6056 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 6057 err = i40e_init_msix(pf); 6058 if (err) { 6059 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 6060 I40E_FLAG_RSS_ENABLED | 6061 I40E_FLAG_DCB_ENABLED | 6062 I40E_FLAG_SRIOV_ENABLED | 6063 I40E_FLAG_FD_SB_ENABLED | 6064 I40E_FLAG_FD_ATR_ENABLED | 6065 I40E_FLAG_VMDQ_ENABLED); 6066 6067 /* rework the queue expectations without MSIX */ 6068 i40e_determine_queue_usage(pf); 6069 } 6070 } 6071 6072 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 6073 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 6074 dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n"); 6075 err = pci_enable_msi(pf->pdev); 6076 if (err) { 6077 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); 6078 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 6079 } 6080 } 6081 6082 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 6083 dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n"); 6084 6085 /* track first vector for misc interrupts */ 6086 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); 6087 } 6088 6089 /** 6090 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 6091 * @pf: board private structure 6092 * 6093 * This sets up the handler for MSIX 0, which is used to manage the 6094 * non-queue interrupts, e.g. AdminQ and errors. This is not used 6095 * when in MSI or Legacy interrupt mode. 6096 **/ 6097 static int i40e_setup_misc_vector(struct i40e_pf *pf) 6098 { 6099 struct i40e_hw *hw = &pf->hw; 6100 int err = 0; 6101 6102 /* Only request the irq if this is the first time through, and 6103 * not when we're rebuilding after a Reset 6104 */ 6105 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 6106 err = request_irq(pf->msix_entries[0].vector, 6107 i40e_intr, 0, pf->misc_int_name, pf); 6108 if (err) { 6109 dev_info(&pf->pdev->dev, 6110 "request_irq for msix_misc failed: %d\n", err); 6111 return -EFAULT; 6112 } 6113 } 6114 6115 i40e_enable_misc_int_causes(hw); 6116 6117 /* associate no queues to the misc vector */ 6118 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 6119 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 6120 6121 i40e_flush(hw); 6122 6123 i40e_irq_dynamic_enable_icr0(pf); 6124 6125 return err; 6126 } 6127 6128 /** 6129 * i40e_config_rss - Prepare for RSS if used 6130 * @pf: board private structure 6131 **/ 6132 static int i40e_config_rss(struct i40e_pf *pf) 6133 { 6134 /* Set of random keys generated using kernel random number generator */ 6135 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687, 6136 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, 6137 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, 6138 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be}; 6139 struct i40e_hw *hw = &pf->hw; 6140 u32 lut = 0; 6141 int i, j; 6142 u64 hena; 6143 6144 /* Fill out hash function seed */ 6145 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 6146 wr32(hw, I40E_PFQF_HKEY(i), seed[i]); 6147 6148 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 6149 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | 6150 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); 6151 hena |= I40E_DEFAULT_RSS_HENA; 6152 wr32(hw, I40E_PFQF_HENA(0), (u32)hena); 6153 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 6154 6155 /* Populate the LUT with max no. of queues in round robin fashion */ 6156 for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) { 6157 6158 /* The assumption is that lan qp count will be the highest 6159 * qp count for any PF VSI that needs RSS. 6160 * If multiple VSIs need RSS support, all the qp counts 6161 * for those VSIs should be a power of 2 for RSS to work. 6162 * If LAN VSI is the only consumer for RSS then this requirement 6163 * is not necessary. 6164 */ 6165 if (j == pf->rss_size) 6166 j = 0; 6167 /* lut = 4-byte sliding window of 4 lut entries */ 6168 lut = (lut << 8) | (j & 6169 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); 6170 /* On i = 3, we have 4 entries in lut; write to the register */ 6171 if ((i & 3) == 3) 6172 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); 6173 } 6174 i40e_flush(hw); 6175 6176 return 0; 6177 } 6178 6179 /** 6180 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild 6181 * @pf: board private structure 6182 * @queue_count: the requested queue count for rss. 6183 * 6184 * returns 0 if rss is not enabled, if enabled returns the final rss queue 6185 * count which may be different from the requested queue count. 6186 **/ 6187 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) 6188 { 6189 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) 6190 return 0; 6191 6192 queue_count = min_t(int, queue_count, pf->rss_size_max); 6193 queue_count = rounddown_pow_of_two(queue_count); 6194 6195 if (queue_count != pf->rss_size) { 6196 i40e_prep_for_reset(pf); 6197 6198 pf->rss_size = queue_count; 6199 6200 i40e_reset_and_rebuild(pf, true); 6201 i40e_config_rss(pf); 6202 } 6203 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); 6204 return pf->rss_size; 6205 } 6206 6207 /** 6208 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 6209 * @pf: board private structure to initialize 6210 * 6211 * i40e_sw_init initializes the Adapter private data structure. 6212 * Fields are initialized based on PCI device information and 6213 * OS network device settings (MTU size). 6214 **/ 6215 static int i40e_sw_init(struct i40e_pf *pf) 6216 { 6217 int err = 0; 6218 int size; 6219 6220 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, 6221 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); 6222 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; 6223 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { 6224 if (I40E_DEBUG_USER & debug) 6225 pf->hw.debug_mask = debug; 6226 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), 6227 I40E_DEFAULT_MSG_ENABLE); 6228 } 6229 6230 /* Set default capability flags */ 6231 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 6232 I40E_FLAG_MSI_ENABLED | 6233 I40E_FLAG_MSIX_ENABLED | 6234 I40E_FLAG_RX_1BUF_ENABLED; 6235 6236 /* Depending on PF configurations, it is possible that the RSS 6237 * maximum might end up larger than the available queues 6238 */ 6239 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; 6240 pf->rss_size_max = min_t(int, pf->rss_size_max, 6241 pf->hw.func_caps.num_tx_qp); 6242 if (pf->hw.func_caps.rss) { 6243 pf->flags |= I40E_FLAG_RSS_ENABLED; 6244 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); 6245 pf->rss_size = rounddown_pow_of_two(pf->rss_size); 6246 } else { 6247 pf->rss_size = 1; 6248 } 6249 6250 /* MFP mode enabled */ 6251 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { 6252 pf->flags |= I40E_FLAG_MFP_ENABLED; 6253 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 6254 } 6255 6256 /* FW/NVM is not yet fixed in this regard */ 6257 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 6258 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 6259 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 6260 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 6261 dev_info(&pf->pdev->dev, 6262 "Flow Director ATR mode Enabled\n"); 6263 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 6264 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 6265 dev_info(&pf->pdev->dev, 6266 "Flow Director Side Band mode Enabled\n"); 6267 } else { 6268 dev_info(&pf->pdev->dev, 6269 "Flow Director Side Band mode Disabled in MFP mode\n"); 6270 } 6271 pf->fdir_pf_filter_count = 6272 pf->hw.func_caps.fd_filters_guaranteed; 6273 pf->hw.fdir_shared_filter_count = 6274 pf->hw.func_caps.fd_filters_best_effort; 6275 } 6276 6277 if (pf->hw.func_caps.vmdq) { 6278 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 6279 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 6280 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; 6281 } 6282 6283 #ifdef CONFIG_PCI_IOV 6284 if (pf->hw.func_caps.num_vfs) { 6285 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 6286 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 6287 pf->num_req_vfs = min_t(int, 6288 pf->hw.func_caps.num_vfs, 6289 I40E_MAX_VF_COUNT); 6290 dev_info(&pf->pdev->dev, 6291 "Number of VFs being requested for PF[%d] = %d\n", 6292 pf->hw.pf_id, pf->num_req_vfs); 6293 } 6294 #endif /* CONFIG_PCI_IOV */ 6295 pf->eeprom_version = 0xDEAD; 6296 pf->lan_veb = I40E_NO_VEB; 6297 pf->lan_vsi = I40E_NO_VSI; 6298 6299 /* set up queue assignment tracking */ 6300 size = sizeof(struct i40e_lump_tracking) 6301 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 6302 pf->qp_pile = kzalloc(size, GFP_KERNEL); 6303 if (!pf->qp_pile) { 6304 err = -ENOMEM; 6305 goto sw_init_done; 6306 } 6307 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 6308 pf->qp_pile->search_hint = 0; 6309 6310 /* set up vector assignment tracking */ 6311 size = sizeof(struct i40e_lump_tracking) 6312 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors); 6313 pf->irq_pile = kzalloc(size, GFP_KERNEL); 6314 if (!pf->irq_pile) { 6315 kfree(pf->qp_pile); 6316 err = -ENOMEM; 6317 goto sw_init_done; 6318 } 6319 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors; 6320 pf->irq_pile->search_hint = 0; 6321 6322 mutex_init(&pf->switch_mutex); 6323 6324 sw_init_done: 6325 return err; 6326 } 6327 6328 /** 6329 * i40e_set_features - set the netdev feature flags 6330 * @netdev: ptr to the netdev being adjusted 6331 * @features: the feature set that the stack is suggesting 6332 **/ 6333 static int i40e_set_features(struct net_device *netdev, 6334 netdev_features_t features) 6335 { 6336 struct i40e_netdev_priv *np = netdev_priv(netdev); 6337 struct i40e_vsi *vsi = np->vsi; 6338 6339 if (features & NETIF_F_HW_VLAN_CTAG_RX) 6340 i40e_vlan_stripping_enable(vsi); 6341 else 6342 i40e_vlan_stripping_disable(vsi); 6343 6344 return 0; 6345 } 6346 6347 #ifdef CONFIG_I40E_VXLAN 6348 /** 6349 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port 6350 * @pf: board private structure 6351 * @port: The UDP port to look up 6352 * 6353 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 6354 **/ 6355 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) 6356 { 6357 u8 i; 6358 6359 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 6360 if (pf->vxlan_ports[i] == port) 6361 return i; 6362 } 6363 6364 return i; 6365 } 6366 6367 /** 6368 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up 6369 * @netdev: This physical port's netdev 6370 * @sa_family: Socket Family that VXLAN is notifying us about 6371 * @port: New UDP port number that VXLAN started listening to 6372 **/ 6373 static void i40e_add_vxlan_port(struct net_device *netdev, 6374 sa_family_t sa_family, __be16 port) 6375 { 6376 struct i40e_netdev_priv *np = netdev_priv(netdev); 6377 struct i40e_vsi *vsi = np->vsi; 6378 struct i40e_pf *pf = vsi->back; 6379 u8 next_idx; 6380 u8 idx; 6381 6382 if (sa_family == AF_INET6) 6383 return; 6384 6385 idx = i40e_get_vxlan_port_idx(pf, port); 6386 6387 /* Check if port already exists */ 6388 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 6389 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port)); 6390 return; 6391 } 6392 6393 /* Now check if there is space to add the new port */ 6394 next_idx = i40e_get_vxlan_port_idx(pf, 0); 6395 6396 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 6397 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n", 6398 ntohs(port)); 6399 return; 6400 } 6401 6402 /* New port: add it and mark its index in the bitmap */ 6403 pf->vxlan_ports[next_idx] = port; 6404 pf->pending_vxlan_bitmap |= (1 << next_idx); 6405 6406 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 6407 } 6408 6409 /** 6410 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away 6411 * @netdev: This physical port's netdev 6412 * @sa_family: Socket Family that VXLAN is notifying us about 6413 * @port: UDP port number that VXLAN stopped listening to 6414 **/ 6415 static void i40e_del_vxlan_port(struct net_device *netdev, 6416 sa_family_t sa_family, __be16 port) 6417 { 6418 struct i40e_netdev_priv *np = netdev_priv(netdev); 6419 struct i40e_vsi *vsi = np->vsi; 6420 struct i40e_pf *pf = vsi->back; 6421 u8 idx; 6422 6423 if (sa_family == AF_INET6) 6424 return; 6425 6426 idx = i40e_get_vxlan_port_idx(pf, port); 6427 6428 /* Check if port already exists */ 6429 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 6430 /* if port exists, set it to 0 (mark for deletion) 6431 * and make it pending 6432 */ 6433 pf->vxlan_ports[idx] = 0; 6434 6435 pf->pending_vxlan_bitmap |= (1 << idx); 6436 6437 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 6438 } else { 6439 netdev_warn(netdev, "Port %d was not found, not deleting\n", 6440 ntohs(port)); 6441 } 6442 } 6443 6444 #endif 6445 static const struct net_device_ops i40e_netdev_ops = { 6446 .ndo_open = i40e_open, 6447 .ndo_stop = i40e_close, 6448 .ndo_start_xmit = i40e_lan_xmit_frame, 6449 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 6450 .ndo_set_rx_mode = i40e_set_rx_mode, 6451 .ndo_validate_addr = eth_validate_addr, 6452 .ndo_set_mac_address = i40e_set_mac, 6453 .ndo_change_mtu = i40e_change_mtu, 6454 .ndo_do_ioctl = i40e_ioctl, 6455 .ndo_tx_timeout = i40e_tx_timeout, 6456 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 6457 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 6458 #ifdef CONFIG_NET_POLL_CONTROLLER 6459 .ndo_poll_controller = i40e_netpoll, 6460 #endif 6461 .ndo_setup_tc = i40e_setup_tc, 6462 .ndo_set_features = i40e_set_features, 6463 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 6464 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 6465 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, 6466 .ndo_get_vf_config = i40e_ndo_get_vf_config, 6467 #ifdef CONFIG_I40E_VXLAN 6468 .ndo_add_vxlan_port = i40e_add_vxlan_port, 6469 .ndo_del_vxlan_port = i40e_del_vxlan_port, 6470 #endif 6471 }; 6472 6473 /** 6474 * i40e_config_netdev - Setup the netdev flags 6475 * @vsi: the VSI being configured 6476 * 6477 * Returns 0 on success, negative value on failure 6478 **/ 6479 static int i40e_config_netdev(struct i40e_vsi *vsi) 6480 { 6481 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 6482 struct i40e_pf *pf = vsi->back; 6483 struct i40e_hw *hw = &pf->hw; 6484 struct i40e_netdev_priv *np; 6485 struct net_device *netdev; 6486 u8 mac_addr[ETH_ALEN]; 6487 int etherdev_size; 6488 6489 etherdev_size = sizeof(struct i40e_netdev_priv); 6490 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 6491 if (!netdev) 6492 return -ENOMEM; 6493 6494 vsi->netdev = netdev; 6495 np = netdev_priv(netdev); 6496 np->vsi = vsi; 6497 6498 netdev->hw_enc_features = NETIF_F_IP_CSUM | 6499 NETIF_F_GSO_UDP_TUNNEL | 6500 NETIF_F_TSO | 6501 NETIF_F_SG; 6502 6503 netdev->features = NETIF_F_SG | 6504 NETIF_F_IP_CSUM | 6505 NETIF_F_SCTP_CSUM | 6506 NETIF_F_HIGHDMA | 6507 NETIF_F_GSO_UDP_TUNNEL | 6508 NETIF_F_HW_VLAN_CTAG_TX | 6509 NETIF_F_HW_VLAN_CTAG_RX | 6510 NETIF_F_HW_VLAN_CTAG_FILTER | 6511 NETIF_F_IPV6_CSUM | 6512 NETIF_F_TSO | 6513 NETIF_F_TSO6 | 6514 NETIF_F_RXCSUM | 6515 NETIF_F_RXHASH | 6516 0; 6517 6518 /* copy netdev features into list of user selectable features */ 6519 netdev->hw_features |= netdev->features; 6520 6521 if (vsi->type == I40E_VSI_MAIN) { 6522 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 6523 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); 6524 } else { 6525 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 6526 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 6527 pf->vsi[pf->lan_vsi]->netdev->name); 6528 random_ether_addr(mac_addr); 6529 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); 6530 } 6531 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 6532 6533 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); 6534 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN); 6535 /* vlan gets same features (except vlan offload) 6536 * after any tweaks for specific VSI types 6537 */ 6538 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | 6539 NETIF_F_HW_VLAN_CTAG_RX | 6540 NETIF_F_HW_VLAN_CTAG_FILTER); 6541 netdev->priv_flags |= IFF_UNICAST_FLT; 6542 netdev->priv_flags |= IFF_SUPP_NOFCS; 6543 /* Setup netdev TC information */ 6544 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 6545 6546 netdev->netdev_ops = &i40e_netdev_ops; 6547 netdev->watchdog_timeo = 5 * HZ; 6548 i40e_set_ethtool_ops(netdev); 6549 6550 return 0; 6551 } 6552 6553 /** 6554 * i40e_vsi_delete - Delete a VSI from the switch 6555 * @vsi: the VSI being removed 6556 * 6557 * Returns 0 on success, negative value on failure 6558 **/ 6559 static void i40e_vsi_delete(struct i40e_vsi *vsi) 6560 { 6561 /* remove default VSI is not allowed */ 6562 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 6563 return; 6564 6565 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 6566 return; 6567 } 6568 6569 /** 6570 * i40e_add_vsi - Add a VSI to the switch 6571 * @vsi: the VSI being configured 6572 * 6573 * This initializes a VSI context depending on the VSI type to be added and 6574 * passes it down to the add_vsi aq command. 6575 **/ 6576 static int i40e_add_vsi(struct i40e_vsi *vsi) 6577 { 6578 int ret = -ENODEV; 6579 struct i40e_mac_filter *f, *ftmp; 6580 struct i40e_pf *pf = vsi->back; 6581 struct i40e_hw *hw = &pf->hw; 6582 struct i40e_vsi_context ctxt; 6583 u8 enabled_tc = 0x1; /* TC0 enabled */ 6584 int f_count = 0; 6585 6586 memset(&ctxt, 0, sizeof(ctxt)); 6587 switch (vsi->type) { 6588 case I40E_VSI_MAIN: 6589 /* The PF's main VSI is already setup as part of the 6590 * device initialization, so we'll not bother with 6591 * the add_vsi call, but we will retrieve the current 6592 * VSI context. 6593 */ 6594 ctxt.seid = pf->main_vsi_seid; 6595 ctxt.pf_num = pf->hw.pf_id; 6596 ctxt.vf_num = 0; 6597 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6598 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6599 if (ret) { 6600 dev_info(&pf->pdev->dev, 6601 "couldn't get pf vsi config, err %d, aq_err %d\n", 6602 ret, pf->hw.aq.asq_last_status); 6603 return -ENOENT; 6604 } 6605 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 6606 vsi->info.valid_sections = 0; 6607 6608 vsi->seid = ctxt.seid; 6609 vsi->id = ctxt.vsi_number; 6610 6611 enabled_tc = i40e_pf_get_tc_map(pf); 6612 6613 /* MFP mode setup queue map and update VSI */ 6614 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 6615 memset(&ctxt, 0, sizeof(ctxt)); 6616 ctxt.seid = pf->main_vsi_seid; 6617 ctxt.pf_num = pf->hw.pf_id; 6618 ctxt.vf_num = 0; 6619 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 6620 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 6621 if (ret) { 6622 dev_info(&pf->pdev->dev, 6623 "update vsi failed, aq_err=%d\n", 6624 pf->hw.aq.asq_last_status); 6625 ret = -ENOENT; 6626 goto err; 6627 } 6628 /* update the local VSI info queue map */ 6629 i40e_vsi_update_queue_map(vsi, &ctxt); 6630 vsi->info.valid_sections = 0; 6631 } else { 6632 /* Default/Main VSI is only enabled for TC0 6633 * reconfigure it to enable all TCs that are 6634 * available on the port in SFP mode. 6635 */ 6636 ret = i40e_vsi_config_tc(vsi, enabled_tc); 6637 if (ret) { 6638 dev_info(&pf->pdev->dev, 6639 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n", 6640 enabled_tc, ret, 6641 pf->hw.aq.asq_last_status); 6642 ret = -ENOENT; 6643 } 6644 } 6645 break; 6646 6647 case I40E_VSI_FDIR: 6648 ctxt.pf_num = hw->pf_id; 6649 ctxt.vf_num = 0; 6650 ctxt.uplink_seid = vsi->uplink_seid; 6651 ctxt.connection_type = 0x1; /* regular data port */ 6652 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6653 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 6654 break; 6655 6656 case I40E_VSI_VMDQ2: 6657 ctxt.pf_num = hw->pf_id; 6658 ctxt.vf_num = 0; 6659 ctxt.uplink_seid = vsi->uplink_seid; 6660 ctxt.connection_type = 0x1; /* regular data port */ 6661 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 6662 6663 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6664 6665 /* This VSI is connected to VEB so the switch_id 6666 * should be set to zero by default. 6667 */ 6668 ctxt.info.switch_id = 0; 6669 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB); 6670 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6671 6672 /* Setup the VSI tx/rx queue map for TC0 only for now */ 6673 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 6674 break; 6675 6676 case I40E_VSI_SRIOV: 6677 ctxt.pf_num = hw->pf_id; 6678 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 6679 ctxt.uplink_seid = vsi->uplink_seid; 6680 ctxt.connection_type = 0x1; /* regular data port */ 6681 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 6682 6683 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6684 6685 /* This VSI is connected to VEB so the switch_id 6686 * should be set to zero by default. 6687 */ 6688 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6689 6690 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 6691 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 6692 /* Setup the VSI tx/rx queue map for TC0 only for now */ 6693 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 6694 break; 6695 6696 default: 6697 return -ENODEV; 6698 } 6699 6700 if (vsi->type != I40E_VSI_MAIN) { 6701 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 6702 if (ret) { 6703 dev_info(&vsi->back->pdev->dev, 6704 "add vsi failed, aq_err=%d\n", 6705 vsi->back->hw.aq.asq_last_status); 6706 ret = -ENOENT; 6707 goto err; 6708 } 6709 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 6710 vsi->info.valid_sections = 0; 6711 vsi->seid = ctxt.seid; 6712 vsi->id = ctxt.vsi_number; 6713 } 6714 6715 /* If macvlan filters already exist, force them to get loaded */ 6716 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 6717 f->changed = true; 6718 f_count++; 6719 } 6720 if (f_count) { 6721 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 6722 pf->flags |= I40E_FLAG_FILTER_SYNC; 6723 } 6724 6725 /* Update VSI BW information */ 6726 ret = i40e_vsi_get_bw_info(vsi); 6727 if (ret) { 6728 dev_info(&pf->pdev->dev, 6729 "couldn't get vsi bw info, err %d, aq_err %d\n", 6730 ret, pf->hw.aq.asq_last_status); 6731 /* VSI is already added so not tearing that up */ 6732 ret = 0; 6733 } 6734 6735 err: 6736 return ret; 6737 } 6738 6739 /** 6740 * i40e_vsi_release - Delete a VSI and free its resources 6741 * @vsi: the VSI being removed 6742 * 6743 * Returns 0 on success or < 0 on error 6744 **/ 6745 int i40e_vsi_release(struct i40e_vsi *vsi) 6746 { 6747 struct i40e_mac_filter *f, *ftmp; 6748 struct i40e_veb *veb = NULL; 6749 struct i40e_pf *pf; 6750 u16 uplink_seid; 6751 int i, n; 6752 6753 pf = vsi->back; 6754 6755 /* release of a VEB-owner or last VSI is not allowed */ 6756 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 6757 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 6758 vsi->seid, vsi->uplink_seid); 6759 return -ENODEV; 6760 } 6761 if (vsi == pf->vsi[pf->lan_vsi] && 6762 !test_bit(__I40E_DOWN, &pf->state)) { 6763 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 6764 return -ENODEV; 6765 } 6766 6767 uplink_seid = vsi->uplink_seid; 6768 if (vsi->type != I40E_VSI_SRIOV) { 6769 if (vsi->netdev_registered) { 6770 vsi->netdev_registered = false; 6771 if (vsi->netdev) { 6772 /* results in a call to i40e_close() */ 6773 unregister_netdev(vsi->netdev); 6774 free_netdev(vsi->netdev); 6775 vsi->netdev = NULL; 6776 } 6777 } else { 6778 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 6779 i40e_down(vsi); 6780 i40e_vsi_free_irq(vsi); 6781 i40e_vsi_free_tx_resources(vsi); 6782 i40e_vsi_free_rx_resources(vsi); 6783 } 6784 i40e_vsi_disable_irq(vsi); 6785 } 6786 6787 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) 6788 i40e_del_filter(vsi, f->macaddr, f->vlan, 6789 f->is_vf, f->is_netdev); 6790 i40e_sync_vsi_filters(vsi); 6791 6792 i40e_vsi_delete(vsi); 6793 i40e_vsi_free_q_vectors(vsi); 6794 i40e_vsi_clear_rings(vsi); 6795 i40e_vsi_clear(vsi); 6796 6797 /* If this was the last thing on the VEB, except for the 6798 * controlling VSI, remove the VEB, which puts the controlling 6799 * VSI onto the next level down in the switch. 6800 * 6801 * Well, okay, there's one more exception here: don't remove 6802 * the orphan VEBs yet. We'll wait for an explicit remove request 6803 * from up the network stack. 6804 */ 6805 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) { 6806 if (pf->vsi[i] && 6807 pf->vsi[i]->uplink_seid == uplink_seid && 6808 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 6809 n++; /* count the VSIs */ 6810 } 6811 } 6812 for (i = 0; i < I40E_MAX_VEB; i++) { 6813 if (!pf->veb[i]) 6814 continue; 6815 if (pf->veb[i]->uplink_seid == uplink_seid) 6816 n++; /* count the VEBs */ 6817 if (pf->veb[i]->seid == uplink_seid) 6818 veb = pf->veb[i]; 6819 } 6820 if (n == 0 && veb && veb->uplink_seid != 0) 6821 i40e_veb_release(veb); 6822 6823 return 0; 6824 } 6825 6826 /** 6827 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 6828 * @vsi: ptr to the VSI 6829 * 6830 * This should only be called after i40e_vsi_mem_alloc() which allocates the 6831 * corresponding SW VSI structure and initializes num_queue_pairs for the 6832 * newly allocated VSI. 6833 * 6834 * Returns 0 on success or negative on failure 6835 **/ 6836 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 6837 { 6838 int ret = -ENOENT; 6839 struct i40e_pf *pf = vsi->back; 6840 6841 if (vsi->q_vectors[0]) { 6842 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 6843 vsi->seid); 6844 return -EEXIST; 6845 } 6846 6847 if (vsi->base_vector) { 6848 dev_info(&pf->pdev->dev, 6849 "VSI %d has non-zero base vector %d\n", 6850 vsi->seid, vsi->base_vector); 6851 return -EEXIST; 6852 } 6853 6854 ret = i40e_alloc_q_vectors(vsi); 6855 if (ret) { 6856 dev_info(&pf->pdev->dev, 6857 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 6858 vsi->num_q_vectors, vsi->seid, ret); 6859 vsi->num_q_vectors = 0; 6860 goto vector_setup_out; 6861 } 6862 6863 if (vsi->num_q_vectors) 6864 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 6865 vsi->num_q_vectors, vsi->idx); 6866 if (vsi->base_vector < 0) { 6867 dev_info(&pf->pdev->dev, 6868 "failed to get q tracking for VSI %d, err=%d\n", 6869 vsi->seid, vsi->base_vector); 6870 i40e_vsi_free_q_vectors(vsi); 6871 ret = -ENOENT; 6872 goto vector_setup_out; 6873 } 6874 6875 vector_setup_out: 6876 return ret; 6877 } 6878 6879 /** 6880 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI 6881 * @vsi: pointer to the vsi. 6882 * 6883 * This re-allocates a vsi's queue resources. 6884 * 6885 * Returns pointer to the successfully allocated and configured VSI sw struct 6886 * on success, otherwise returns NULL on failure. 6887 **/ 6888 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) 6889 { 6890 struct i40e_pf *pf = vsi->back; 6891 u8 enabled_tc; 6892 int ret; 6893 6894 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 6895 i40e_vsi_clear_rings(vsi); 6896 6897 i40e_vsi_free_arrays(vsi, false); 6898 i40e_set_num_rings_in_vsi(vsi); 6899 ret = i40e_vsi_alloc_arrays(vsi, false); 6900 if (ret) 6901 goto err_vsi; 6902 6903 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); 6904 if (ret < 0) { 6905 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", 6906 vsi->seid, ret); 6907 goto err_vsi; 6908 } 6909 vsi->base_queue = ret; 6910 6911 /* Update the FW view of the VSI. Force a reset of TC and queue 6912 * layout configurations. 6913 */ 6914 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 6915 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 6916 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 6917 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 6918 6919 /* assign it some queues */ 6920 ret = i40e_alloc_rings(vsi); 6921 if (ret) 6922 goto err_rings; 6923 6924 /* map all of the rings to the q_vectors */ 6925 i40e_vsi_map_rings_to_vectors(vsi); 6926 return vsi; 6927 6928 err_rings: 6929 i40e_vsi_free_q_vectors(vsi); 6930 if (vsi->netdev_registered) { 6931 vsi->netdev_registered = false; 6932 unregister_netdev(vsi->netdev); 6933 free_netdev(vsi->netdev); 6934 vsi->netdev = NULL; 6935 } 6936 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 6937 err_vsi: 6938 i40e_vsi_clear(vsi); 6939 return NULL; 6940 } 6941 6942 /** 6943 * i40e_vsi_setup - Set up a VSI by a given type 6944 * @pf: board private structure 6945 * @type: VSI type 6946 * @uplink_seid: the switch element to link to 6947 * @param1: usage depends upon VSI type. For VF types, indicates VF id 6948 * 6949 * This allocates the sw VSI structure and its queue resources, then add a VSI 6950 * to the identified VEB. 6951 * 6952 * Returns pointer to the successfully allocated and configure VSI sw struct on 6953 * success, otherwise returns NULL on failure. 6954 **/ 6955 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 6956 u16 uplink_seid, u32 param1) 6957 { 6958 struct i40e_vsi *vsi = NULL; 6959 struct i40e_veb *veb = NULL; 6960 int ret, i; 6961 int v_idx; 6962 6963 /* The requested uplink_seid must be either 6964 * - the PF's port seid 6965 * no VEB is needed because this is the PF 6966 * or this is a Flow Director special case VSI 6967 * - seid of an existing VEB 6968 * - seid of a VSI that owns an existing VEB 6969 * - seid of a VSI that doesn't own a VEB 6970 * a new VEB is created and the VSI becomes the owner 6971 * - seid of the PF VSI, which is what creates the first VEB 6972 * this is a special case of the previous 6973 * 6974 * Find which uplink_seid we were given and create a new VEB if needed 6975 */ 6976 for (i = 0; i < I40E_MAX_VEB; i++) { 6977 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 6978 veb = pf->veb[i]; 6979 break; 6980 } 6981 } 6982 6983 if (!veb && uplink_seid != pf->mac_seid) { 6984 6985 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 6986 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 6987 vsi = pf->vsi[i]; 6988 break; 6989 } 6990 } 6991 if (!vsi) { 6992 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 6993 uplink_seid); 6994 return NULL; 6995 } 6996 6997 if (vsi->uplink_seid == pf->mac_seid) 6998 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 6999 vsi->tc_config.enabled_tc); 7000 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 7001 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 7002 vsi->tc_config.enabled_tc); 7003 7004 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 7005 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 7006 veb = pf->veb[i]; 7007 } 7008 if (!veb) { 7009 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 7010 return NULL; 7011 } 7012 7013 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 7014 uplink_seid = veb->seid; 7015 } 7016 7017 /* get vsi sw struct */ 7018 v_idx = i40e_vsi_mem_alloc(pf, type); 7019 if (v_idx < 0) 7020 goto err_alloc; 7021 vsi = pf->vsi[v_idx]; 7022 if (!vsi) 7023 goto err_alloc; 7024 vsi->type = type; 7025 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 7026 7027 if (type == I40E_VSI_MAIN) 7028 pf->lan_vsi = v_idx; 7029 else if (type == I40E_VSI_SRIOV) 7030 vsi->vf_id = param1; 7031 /* assign it some queues */ 7032 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, 7033 vsi->idx); 7034 if (ret < 0) { 7035 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", 7036 vsi->seid, ret); 7037 goto err_vsi; 7038 } 7039 vsi->base_queue = ret; 7040 7041 /* get a VSI from the hardware */ 7042 vsi->uplink_seid = uplink_seid; 7043 ret = i40e_add_vsi(vsi); 7044 if (ret) 7045 goto err_vsi; 7046 7047 switch (vsi->type) { 7048 /* setup the netdev if needed */ 7049 case I40E_VSI_MAIN: 7050 case I40E_VSI_VMDQ2: 7051 ret = i40e_config_netdev(vsi); 7052 if (ret) 7053 goto err_netdev; 7054 ret = register_netdev(vsi->netdev); 7055 if (ret) 7056 goto err_netdev; 7057 vsi->netdev_registered = true; 7058 netif_carrier_off(vsi->netdev); 7059 #ifdef CONFIG_I40E_DCB 7060 /* Setup DCB netlink interface */ 7061 i40e_dcbnl_setup(vsi); 7062 #endif /* CONFIG_I40E_DCB */ 7063 /* fall through */ 7064 7065 case I40E_VSI_FDIR: 7066 /* set up vectors and rings if needed */ 7067 ret = i40e_vsi_setup_vectors(vsi); 7068 if (ret) 7069 goto err_msix; 7070 7071 ret = i40e_alloc_rings(vsi); 7072 if (ret) 7073 goto err_rings; 7074 7075 /* map all of the rings to the q_vectors */ 7076 i40e_vsi_map_rings_to_vectors(vsi); 7077 7078 i40e_vsi_reset_stats(vsi); 7079 break; 7080 7081 default: 7082 /* no netdev or rings for the other VSI types */ 7083 break; 7084 } 7085 7086 return vsi; 7087 7088 err_rings: 7089 i40e_vsi_free_q_vectors(vsi); 7090 err_msix: 7091 if (vsi->netdev_registered) { 7092 vsi->netdev_registered = false; 7093 unregister_netdev(vsi->netdev); 7094 free_netdev(vsi->netdev); 7095 vsi->netdev = NULL; 7096 } 7097 err_netdev: 7098 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 7099 err_vsi: 7100 i40e_vsi_clear(vsi); 7101 err_alloc: 7102 return NULL; 7103 } 7104 7105 /** 7106 * i40e_veb_get_bw_info - Query VEB BW information 7107 * @veb: the veb to query 7108 * 7109 * Query the Tx scheduler BW configuration data for given VEB 7110 **/ 7111 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 7112 { 7113 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 7114 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 7115 struct i40e_pf *pf = veb->pf; 7116 struct i40e_hw *hw = &pf->hw; 7117 u32 tc_bw_max; 7118 int ret = 0; 7119 int i; 7120 7121 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 7122 &bw_data, NULL); 7123 if (ret) { 7124 dev_info(&pf->pdev->dev, 7125 "query veb bw config failed, aq_err=%d\n", 7126 hw->aq.asq_last_status); 7127 goto out; 7128 } 7129 7130 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 7131 &ets_data, NULL); 7132 if (ret) { 7133 dev_info(&pf->pdev->dev, 7134 "query veb bw ets config failed, aq_err=%d\n", 7135 hw->aq.asq_last_status); 7136 goto out; 7137 } 7138 7139 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 7140 veb->bw_max_quanta = ets_data.tc_bw_max; 7141 veb->is_abs_credits = bw_data.absolute_credits_enable; 7142 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 7143 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 7144 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 7145 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 7146 veb->bw_tc_limit_credits[i] = 7147 le16_to_cpu(bw_data.tc_bw_limits[i]); 7148 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 7149 } 7150 7151 out: 7152 return ret; 7153 } 7154 7155 /** 7156 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 7157 * @pf: board private structure 7158 * 7159 * On error: returns error code (negative) 7160 * On success: returns vsi index in PF (positive) 7161 **/ 7162 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 7163 { 7164 int ret = -ENOENT; 7165 struct i40e_veb *veb; 7166 int i; 7167 7168 /* Need to protect the allocation of switch elements at the PF level */ 7169 mutex_lock(&pf->switch_mutex); 7170 7171 /* VEB list may be fragmented if VEB creation/destruction has 7172 * been happening. We can afford to do a quick scan to look 7173 * for any free slots in the list. 7174 * 7175 * find next empty veb slot, looping back around if necessary 7176 */ 7177 i = 0; 7178 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 7179 i++; 7180 if (i >= I40E_MAX_VEB) { 7181 ret = -ENOMEM; 7182 goto err_alloc_veb; /* out of VEB slots! */ 7183 } 7184 7185 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 7186 if (!veb) { 7187 ret = -ENOMEM; 7188 goto err_alloc_veb; 7189 } 7190 veb->pf = pf; 7191 veb->idx = i; 7192 veb->enabled_tc = 1; 7193 7194 pf->veb[i] = veb; 7195 ret = i; 7196 err_alloc_veb: 7197 mutex_unlock(&pf->switch_mutex); 7198 return ret; 7199 } 7200 7201 /** 7202 * i40e_switch_branch_release - Delete a branch of the switch tree 7203 * @branch: where to start deleting 7204 * 7205 * This uses recursion to find the tips of the branch to be 7206 * removed, deleting until we get back to and can delete this VEB. 7207 **/ 7208 static void i40e_switch_branch_release(struct i40e_veb *branch) 7209 { 7210 struct i40e_pf *pf = branch->pf; 7211 u16 branch_seid = branch->seid; 7212 u16 veb_idx = branch->idx; 7213 int i; 7214 7215 /* release any VEBs on this VEB - RECURSION */ 7216 for (i = 0; i < I40E_MAX_VEB; i++) { 7217 if (!pf->veb[i]) 7218 continue; 7219 if (pf->veb[i]->uplink_seid == branch->seid) 7220 i40e_switch_branch_release(pf->veb[i]); 7221 } 7222 7223 /* Release the VSIs on this VEB, but not the owner VSI. 7224 * 7225 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 7226 * the VEB itself, so don't use (*branch) after this loop. 7227 */ 7228 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7229 if (!pf->vsi[i]) 7230 continue; 7231 if (pf->vsi[i]->uplink_seid == branch_seid && 7232 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 7233 i40e_vsi_release(pf->vsi[i]); 7234 } 7235 } 7236 7237 /* There's one corner case where the VEB might not have been 7238 * removed, so double check it here and remove it if needed. 7239 * This case happens if the veb was created from the debugfs 7240 * commands and no VSIs were added to it. 7241 */ 7242 if (pf->veb[veb_idx]) 7243 i40e_veb_release(pf->veb[veb_idx]); 7244 } 7245 7246 /** 7247 * i40e_veb_clear - remove veb struct 7248 * @veb: the veb to remove 7249 **/ 7250 static void i40e_veb_clear(struct i40e_veb *veb) 7251 { 7252 if (!veb) 7253 return; 7254 7255 if (veb->pf) { 7256 struct i40e_pf *pf = veb->pf; 7257 7258 mutex_lock(&pf->switch_mutex); 7259 if (pf->veb[veb->idx] == veb) 7260 pf->veb[veb->idx] = NULL; 7261 mutex_unlock(&pf->switch_mutex); 7262 } 7263 7264 kfree(veb); 7265 } 7266 7267 /** 7268 * i40e_veb_release - Delete a VEB and free its resources 7269 * @veb: the VEB being removed 7270 **/ 7271 void i40e_veb_release(struct i40e_veb *veb) 7272 { 7273 struct i40e_vsi *vsi = NULL; 7274 struct i40e_pf *pf; 7275 int i, n = 0; 7276 7277 pf = veb->pf; 7278 7279 /* find the remaining VSI and check for extras */ 7280 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7281 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 7282 n++; 7283 vsi = pf->vsi[i]; 7284 } 7285 } 7286 if (n != 1) { 7287 dev_info(&pf->pdev->dev, 7288 "can't remove VEB %d with %d VSIs left\n", 7289 veb->seid, n); 7290 return; 7291 } 7292 7293 /* move the remaining VSI to uplink veb */ 7294 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 7295 if (veb->uplink_seid) { 7296 vsi->uplink_seid = veb->uplink_seid; 7297 if (veb->uplink_seid == pf->mac_seid) 7298 vsi->veb_idx = I40E_NO_VEB; 7299 else 7300 vsi->veb_idx = veb->veb_idx; 7301 } else { 7302 /* floating VEB */ 7303 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 7304 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 7305 } 7306 7307 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 7308 i40e_veb_clear(veb); 7309 7310 return; 7311 } 7312 7313 /** 7314 * i40e_add_veb - create the VEB in the switch 7315 * @veb: the VEB to be instantiated 7316 * @vsi: the controlling VSI 7317 **/ 7318 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 7319 { 7320 bool is_default = false; 7321 bool is_cloud = false; 7322 int ret; 7323 7324 /* get a VEB from the hardware */ 7325 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, 7326 veb->enabled_tc, is_default, 7327 is_cloud, &veb->seid, NULL); 7328 if (ret) { 7329 dev_info(&veb->pf->pdev->dev, 7330 "couldn't add VEB, err %d, aq_err %d\n", 7331 ret, veb->pf->hw.aq.asq_last_status); 7332 return -EPERM; 7333 } 7334 7335 /* get statistics counter */ 7336 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, 7337 &veb->stats_idx, NULL, NULL, NULL); 7338 if (ret) { 7339 dev_info(&veb->pf->pdev->dev, 7340 "couldn't get VEB statistics idx, err %d, aq_err %d\n", 7341 ret, veb->pf->hw.aq.asq_last_status); 7342 return -EPERM; 7343 } 7344 ret = i40e_veb_get_bw_info(veb); 7345 if (ret) { 7346 dev_info(&veb->pf->pdev->dev, 7347 "couldn't get VEB bw info, err %d, aq_err %d\n", 7348 ret, veb->pf->hw.aq.asq_last_status); 7349 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); 7350 return -ENOENT; 7351 } 7352 7353 vsi->uplink_seid = veb->seid; 7354 vsi->veb_idx = veb->idx; 7355 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 7356 7357 return 0; 7358 } 7359 7360 /** 7361 * i40e_veb_setup - Set up a VEB 7362 * @pf: board private structure 7363 * @flags: VEB setup flags 7364 * @uplink_seid: the switch element to link to 7365 * @vsi_seid: the initial VSI seid 7366 * @enabled_tc: Enabled TC bit-map 7367 * 7368 * This allocates the sw VEB structure and links it into the switch 7369 * It is possible and legal for this to be a duplicate of an already 7370 * existing VEB. It is also possible for both uplink and vsi seids 7371 * to be zero, in order to create a floating VEB. 7372 * 7373 * Returns pointer to the successfully allocated VEB sw struct on 7374 * success, otherwise returns NULL on failure. 7375 **/ 7376 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 7377 u16 uplink_seid, u16 vsi_seid, 7378 u8 enabled_tc) 7379 { 7380 struct i40e_veb *veb, *uplink_veb = NULL; 7381 int vsi_idx, veb_idx; 7382 int ret; 7383 7384 /* if one seid is 0, the other must be 0 to create a floating relay */ 7385 if ((uplink_seid == 0 || vsi_seid == 0) && 7386 (uplink_seid + vsi_seid != 0)) { 7387 dev_info(&pf->pdev->dev, 7388 "one, not both seid's are 0: uplink=%d vsi=%d\n", 7389 uplink_seid, vsi_seid); 7390 return NULL; 7391 } 7392 7393 /* make sure there is such a vsi and uplink */ 7394 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++) 7395 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 7396 break; 7397 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) { 7398 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 7399 vsi_seid); 7400 return NULL; 7401 } 7402 7403 if (uplink_seid && uplink_seid != pf->mac_seid) { 7404 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 7405 if (pf->veb[veb_idx] && 7406 pf->veb[veb_idx]->seid == uplink_seid) { 7407 uplink_veb = pf->veb[veb_idx]; 7408 break; 7409 } 7410 } 7411 if (!uplink_veb) { 7412 dev_info(&pf->pdev->dev, 7413 "uplink seid %d not found\n", uplink_seid); 7414 return NULL; 7415 } 7416 } 7417 7418 /* get veb sw struct */ 7419 veb_idx = i40e_veb_mem_alloc(pf); 7420 if (veb_idx < 0) 7421 goto err_alloc; 7422 veb = pf->veb[veb_idx]; 7423 veb->flags = flags; 7424 veb->uplink_seid = uplink_seid; 7425 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 7426 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 7427 7428 /* create the VEB in the switch */ 7429 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 7430 if (ret) 7431 goto err_veb; 7432 7433 return veb; 7434 7435 err_veb: 7436 i40e_veb_clear(veb); 7437 err_alloc: 7438 return NULL; 7439 } 7440 7441 /** 7442 * i40e_setup_pf_switch_element - set pf vars based on switch type 7443 * @pf: board private structure 7444 * @ele: element we are building info from 7445 * @num_reported: total number of elements 7446 * @printconfig: should we print the contents 7447 * 7448 * helper function to assist in extracting a few useful SEID values. 7449 **/ 7450 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 7451 struct i40e_aqc_switch_config_element_resp *ele, 7452 u16 num_reported, bool printconfig) 7453 { 7454 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 7455 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 7456 u8 element_type = ele->element_type; 7457 u16 seid = le16_to_cpu(ele->seid); 7458 7459 if (printconfig) 7460 dev_info(&pf->pdev->dev, 7461 "type=%d seid=%d uplink=%d downlink=%d\n", 7462 element_type, seid, uplink_seid, downlink_seid); 7463 7464 switch (element_type) { 7465 case I40E_SWITCH_ELEMENT_TYPE_MAC: 7466 pf->mac_seid = seid; 7467 break; 7468 case I40E_SWITCH_ELEMENT_TYPE_VEB: 7469 /* Main VEB? */ 7470 if (uplink_seid != pf->mac_seid) 7471 break; 7472 if (pf->lan_veb == I40E_NO_VEB) { 7473 int v; 7474 7475 /* find existing or else empty VEB */ 7476 for (v = 0; v < I40E_MAX_VEB; v++) { 7477 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 7478 pf->lan_veb = v; 7479 break; 7480 } 7481 } 7482 if (pf->lan_veb == I40E_NO_VEB) { 7483 v = i40e_veb_mem_alloc(pf); 7484 if (v < 0) 7485 break; 7486 pf->lan_veb = v; 7487 } 7488 } 7489 7490 pf->veb[pf->lan_veb]->seid = seid; 7491 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 7492 pf->veb[pf->lan_veb]->pf = pf; 7493 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 7494 break; 7495 case I40E_SWITCH_ELEMENT_TYPE_VSI: 7496 if (num_reported != 1) 7497 break; 7498 /* This is immediately after a reset so we can assume this is 7499 * the PF's VSI 7500 */ 7501 pf->mac_seid = uplink_seid; 7502 pf->pf_seid = downlink_seid; 7503 pf->main_vsi_seid = seid; 7504 if (printconfig) 7505 dev_info(&pf->pdev->dev, 7506 "pf_seid=%d main_vsi_seid=%d\n", 7507 pf->pf_seid, pf->main_vsi_seid); 7508 break; 7509 case I40E_SWITCH_ELEMENT_TYPE_PF: 7510 case I40E_SWITCH_ELEMENT_TYPE_VF: 7511 case I40E_SWITCH_ELEMENT_TYPE_EMP: 7512 case I40E_SWITCH_ELEMENT_TYPE_BMC: 7513 case I40E_SWITCH_ELEMENT_TYPE_PE: 7514 case I40E_SWITCH_ELEMENT_TYPE_PA: 7515 /* ignore these for now */ 7516 break; 7517 default: 7518 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 7519 element_type, seid); 7520 break; 7521 } 7522 } 7523 7524 /** 7525 * i40e_fetch_switch_configuration - Get switch config from firmware 7526 * @pf: board private structure 7527 * @printconfig: should we print the contents 7528 * 7529 * Get the current switch configuration from the device and 7530 * extract a few useful SEID values. 7531 **/ 7532 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 7533 { 7534 struct i40e_aqc_get_switch_config_resp *sw_config; 7535 u16 next_seid = 0; 7536 int ret = 0; 7537 u8 *aq_buf; 7538 int i; 7539 7540 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 7541 if (!aq_buf) 7542 return -ENOMEM; 7543 7544 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 7545 do { 7546 u16 num_reported, num_total; 7547 7548 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 7549 I40E_AQ_LARGE_BUF, 7550 &next_seid, NULL); 7551 if (ret) { 7552 dev_info(&pf->pdev->dev, 7553 "get switch config failed %d aq_err=%x\n", 7554 ret, pf->hw.aq.asq_last_status); 7555 kfree(aq_buf); 7556 return -ENOENT; 7557 } 7558 7559 num_reported = le16_to_cpu(sw_config->header.num_reported); 7560 num_total = le16_to_cpu(sw_config->header.num_total); 7561 7562 if (printconfig) 7563 dev_info(&pf->pdev->dev, 7564 "header: %d reported %d total\n", 7565 num_reported, num_total); 7566 7567 if (num_reported) { 7568 int sz = sizeof(*sw_config) * num_reported; 7569 7570 kfree(pf->sw_config); 7571 pf->sw_config = kzalloc(sz, GFP_KERNEL); 7572 if (pf->sw_config) 7573 memcpy(pf->sw_config, sw_config, sz); 7574 } 7575 7576 for (i = 0; i < num_reported; i++) { 7577 struct i40e_aqc_switch_config_element_resp *ele = 7578 &sw_config->element[i]; 7579 7580 i40e_setup_pf_switch_element(pf, ele, num_reported, 7581 printconfig); 7582 } 7583 } while (next_seid != 0); 7584 7585 kfree(aq_buf); 7586 return ret; 7587 } 7588 7589 /** 7590 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 7591 * @pf: board private structure 7592 * @reinit: if the Main VSI needs to re-initialized. 7593 * 7594 * Returns 0 on success, negative value on failure 7595 **/ 7596 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 7597 { 7598 u32 rxfc = 0, txfc = 0, rxfc_reg; 7599 int ret; 7600 7601 /* find out what's out there already */ 7602 ret = i40e_fetch_switch_configuration(pf, false); 7603 if (ret) { 7604 dev_info(&pf->pdev->dev, 7605 "couldn't fetch switch config, err %d, aq_err %d\n", 7606 ret, pf->hw.aq.asq_last_status); 7607 return ret; 7608 } 7609 i40e_pf_reset_stats(pf); 7610 7611 /* first time setup */ 7612 if (pf->lan_vsi == I40E_NO_VSI || reinit) { 7613 struct i40e_vsi *vsi = NULL; 7614 u16 uplink_seid; 7615 7616 /* Set up the PF VSI associated with the PF's main VSI 7617 * that is already in the HW switch 7618 */ 7619 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 7620 uplink_seid = pf->veb[pf->lan_veb]->seid; 7621 else 7622 uplink_seid = pf->mac_seid; 7623 if (pf->lan_vsi == I40E_NO_VSI) 7624 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 7625 else if (reinit) 7626 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); 7627 if (!vsi) { 7628 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 7629 i40e_fdir_teardown(pf); 7630 return -EAGAIN; 7631 } 7632 } else { 7633 /* force a reset of TC and queue layout configurations */ 7634 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 7635 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 7636 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 7637 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 7638 } 7639 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 7640 7641 i40e_fdir_sb_setup(pf); 7642 7643 /* Setup static PF queue filter control settings */ 7644 ret = i40e_setup_pf_filter_control(pf); 7645 if (ret) { 7646 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 7647 ret); 7648 /* Failure here should not stop continuing other steps */ 7649 } 7650 7651 /* enable RSS in the HW, even for only one queue, as the stack can use 7652 * the hash 7653 */ 7654 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 7655 i40e_config_rss(pf); 7656 7657 /* fill in link information and enable LSE reporting */ 7658 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); 7659 i40e_link_event(pf); 7660 7661 /* Initialize user-specific link properties */ 7662 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 7663 I40E_AQ_AN_COMPLETED) ? true : false); 7664 /* requested_mode is set in probe or by ethtool */ 7665 if (!pf->fc_autoneg_status) 7666 goto no_autoneg; 7667 7668 if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) && 7669 (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)) 7670 pf->hw.fc.current_mode = I40E_FC_FULL; 7671 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) 7672 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE; 7673 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) 7674 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE; 7675 else 7676 pf->hw.fc.current_mode = I40E_FC_NONE; 7677 7678 /* sync the flow control settings with the auto-neg values */ 7679 switch (pf->hw.fc.current_mode) { 7680 case I40E_FC_FULL: 7681 txfc = 1; 7682 rxfc = 1; 7683 break; 7684 case I40E_FC_TX_PAUSE: 7685 txfc = 1; 7686 rxfc = 0; 7687 break; 7688 case I40E_FC_RX_PAUSE: 7689 txfc = 0; 7690 rxfc = 1; 7691 break; 7692 case I40E_FC_NONE: 7693 case I40E_FC_DEFAULT: 7694 txfc = 0; 7695 rxfc = 0; 7696 break; 7697 case I40E_FC_PFC: 7698 /* TBD */ 7699 break; 7700 /* no default case, we have to handle all possibilities here */ 7701 } 7702 7703 wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT); 7704 7705 rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) & 7706 ~I40E_PRTDCB_MFLCN_RFCE_MASK; 7707 rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT); 7708 7709 wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg); 7710 7711 goto fc_complete; 7712 7713 no_autoneg: 7714 /* disable L2 flow control, user can turn it on if they wish */ 7715 wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0); 7716 wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) & 7717 ~I40E_PRTDCB_MFLCN_RFCE_MASK); 7718 7719 fc_complete: 7720 i40e_ptp_init(pf); 7721 7722 return ret; 7723 } 7724 7725 /** 7726 * i40e_determine_queue_usage - Work out queue distribution 7727 * @pf: board private structure 7728 **/ 7729 static void i40e_determine_queue_usage(struct i40e_pf *pf) 7730 { 7731 int queues_left; 7732 7733 pf->num_lan_qps = 0; 7734 7735 /* Find the max queues to be put into basic use. We'll always be 7736 * using TC0, whether or not DCB is running, and TC0 will get the 7737 * big RSS set. 7738 */ 7739 queues_left = pf->hw.func_caps.num_tx_qp; 7740 7741 if ((queues_left == 1) || 7742 !(pf->flags & I40E_FLAG_MSIX_ENABLED) || 7743 !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED | 7744 I40E_FLAG_DCB_ENABLED))) { 7745 /* one qp for PF, no queues for anything else */ 7746 queues_left = 0; 7747 pf->rss_size = pf->num_lan_qps = 1; 7748 7749 /* make sure all the fancies are disabled */ 7750 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 7751 I40E_FLAG_FD_SB_ENABLED | 7752 I40E_FLAG_FD_ATR_ENABLED | 7753 I40E_FLAG_DCB_ENABLED | 7754 I40E_FLAG_SRIOV_ENABLED | 7755 I40E_FLAG_VMDQ_ENABLED); 7756 } else { 7757 /* Not enough queues for all TCs */ 7758 if ((pf->flags & I40E_FLAG_DCB_ENABLED) && 7759 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 7760 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 7761 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 7762 } 7763 pf->num_lan_qps = pf->rss_size_max; 7764 queues_left -= pf->num_lan_qps; 7765 } 7766 7767 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7768 if (queues_left > 1) { 7769 queues_left -= 1; /* save 1 queue for FD */ 7770 } else { 7771 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7772 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); 7773 } 7774 } 7775 7776 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 7777 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 7778 pf->num_req_vfs = min_t(int, pf->num_req_vfs, 7779 (queues_left / pf->num_vf_qps)); 7780 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 7781 } 7782 7783 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 7784 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 7785 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 7786 (queues_left / pf->num_vmdq_qps)); 7787 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 7788 } 7789 7790 pf->queues_left = queues_left; 7791 return; 7792 } 7793 7794 /** 7795 * i40e_setup_pf_filter_control - Setup PF static filter control 7796 * @pf: PF to be setup 7797 * 7798 * i40e_setup_pf_filter_control sets up a pf's initial filter control 7799 * settings. If PE/FCoE are enabled then it will also set the per PF 7800 * based filter sizes required for them. It also enables Flow director, 7801 * ethertype and macvlan type filter settings for the pf. 7802 * 7803 * Returns 0 on success, negative on failure 7804 **/ 7805 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 7806 { 7807 struct i40e_filter_control_settings *settings = &pf->filter_settings; 7808 7809 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 7810 7811 /* Flow Director is enabled */ 7812 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) 7813 settings->enable_fdir = true; 7814 7815 /* Ethtype and MACVLAN filters enabled for PF */ 7816 settings->enable_ethtype = true; 7817 settings->enable_macvlan = true; 7818 7819 if (i40e_set_filter_control(&pf->hw, settings)) 7820 return -ENOENT; 7821 7822 return 0; 7823 } 7824 7825 /** 7826 * i40e_probe - Device initialization routine 7827 * @pdev: PCI device information struct 7828 * @ent: entry in i40e_pci_tbl 7829 * 7830 * i40e_probe initializes a pf identified by a pci_dev structure. 7831 * The OS initialization, configuring of the pf private structure, 7832 * and a hardware reset occur. 7833 * 7834 * Returns 0 on success, negative on failure 7835 **/ 7836 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 7837 { 7838 struct i40e_driver_version dv; 7839 struct i40e_pf *pf; 7840 struct i40e_hw *hw; 7841 static u16 pfs_found; 7842 u16 link_status; 7843 int err = 0; 7844 u32 len; 7845 7846 err = pci_enable_device_mem(pdev); 7847 if (err) 7848 return err; 7849 7850 /* set up for high or low dma */ 7851 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 7852 /* coherent mask for the same size will always succeed if 7853 * dma_set_mask does 7854 */ 7855 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 7856 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 7857 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 7858 } else { 7859 dev_err(&pdev->dev, "DMA configuration failed: %d\n", err); 7860 err = -EIO; 7861 goto err_dma; 7862 } 7863 7864 /* set up pci connections */ 7865 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 7866 IORESOURCE_MEM), i40e_driver_name); 7867 if (err) { 7868 dev_info(&pdev->dev, 7869 "pci_request_selected_regions failed %d\n", err); 7870 goto err_pci_reg; 7871 } 7872 7873 pci_enable_pcie_error_reporting(pdev); 7874 pci_set_master(pdev); 7875 7876 /* Now that we have a PCI connection, we need to do the 7877 * low level device setup. This is primarily setting up 7878 * the Admin Queue structures and then querying for the 7879 * device's current profile information. 7880 */ 7881 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 7882 if (!pf) { 7883 err = -ENOMEM; 7884 goto err_pf_alloc; 7885 } 7886 pf->next_vsi = 0; 7887 pf->pdev = pdev; 7888 set_bit(__I40E_DOWN, &pf->state); 7889 7890 hw = &pf->hw; 7891 hw->back = pf; 7892 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 7893 pci_resource_len(pdev, 0)); 7894 if (!hw->hw_addr) { 7895 err = -EIO; 7896 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 7897 (unsigned int)pci_resource_start(pdev, 0), 7898 (unsigned int)pci_resource_len(pdev, 0), err); 7899 goto err_ioremap; 7900 } 7901 hw->vendor_id = pdev->vendor; 7902 hw->device_id = pdev->device; 7903 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 7904 hw->subsystem_vendor_id = pdev->subsystem_vendor; 7905 hw->subsystem_device_id = pdev->subsystem_device; 7906 hw->bus.device = PCI_SLOT(pdev->devfn); 7907 hw->bus.func = PCI_FUNC(pdev->devfn); 7908 pf->instance = pfs_found; 7909 7910 /* do a special CORER for clearing PXE mode once at init */ 7911 if (hw->revision_id == 0 && 7912 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { 7913 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 7914 i40e_flush(hw); 7915 msleep(200); 7916 pf->corer_count++; 7917 7918 i40e_clear_pxe_mode(hw); 7919 } 7920 7921 /* Reset here to make sure all is clean and to define PF 'n' */ 7922 err = i40e_pf_reset(hw); 7923 if (err) { 7924 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 7925 goto err_pf_reset; 7926 } 7927 pf->pfr_count++; 7928 7929 hw->aq.num_arq_entries = I40E_AQ_LEN; 7930 hw->aq.num_asq_entries = I40E_AQ_LEN; 7931 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 7932 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 7933 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 7934 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1, 7935 "%s-pf%d:misc", 7936 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id); 7937 7938 err = i40e_init_shared_code(hw); 7939 if (err) { 7940 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); 7941 goto err_pf_reset; 7942 } 7943 7944 /* set up a default setting for link flow control */ 7945 pf->hw.fc.requested_mode = I40E_FC_NONE; 7946 7947 err = i40e_init_adminq(hw); 7948 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); 7949 if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK) 7950 >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) { 7951 dev_info(&pdev->dev, 7952 "warning: NVM version not supported, supported version: %02x.%02x\n", 7953 I40E_CURRENT_NVM_VERSION_HI, 7954 I40E_CURRENT_NVM_VERSION_LO); 7955 } 7956 if (err) { 7957 dev_info(&pdev->dev, 7958 "init_adminq failed: %d expecting API %02x.%02x\n", 7959 err, 7960 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR); 7961 goto err_pf_reset; 7962 } 7963 7964 i40e_clear_pxe_mode(hw); 7965 err = i40e_get_capabilities(pf); 7966 if (err) 7967 goto err_adminq_setup; 7968 7969 err = i40e_sw_init(pf); 7970 if (err) { 7971 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 7972 goto err_sw_init; 7973 } 7974 7975 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 7976 hw->func_caps.num_rx_qp, 7977 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 7978 if (err) { 7979 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 7980 goto err_init_lan_hmc; 7981 } 7982 7983 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 7984 if (err) { 7985 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 7986 err = -ENOENT; 7987 goto err_configure_lan_hmc; 7988 } 7989 7990 i40e_get_mac_addr(hw, hw->mac.addr); 7991 if (!is_valid_ether_addr(hw->mac.addr)) { 7992 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 7993 err = -EIO; 7994 goto err_mac_addr; 7995 } 7996 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 7997 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN); 7998 7999 pci_set_drvdata(pdev, pf); 8000 pci_save_state(pdev); 8001 #ifdef CONFIG_I40E_DCB 8002 err = i40e_init_pf_dcb(pf); 8003 if (err) { 8004 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); 8005 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 8006 goto err_init_dcb; 8007 } 8008 #endif /* CONFIG_I40E_DCB */ 8009 8010 /* set up periodic task facility */ 8011 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); 8012 pf->service_timer_period = HZ; 8013 8014 INIT_WORK(&pf->service_task, i40e_service_task); 8015 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 8016 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; 8017 pf->link_check_timeout = jiffies; 8018 8019 /* WoL defaults to disabled */ 8020 pf->wol_en = false; 8021 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 8022 8023 /* set up the main switch operations */ 8024 i40e_determine_queue_usage(pf); 8025 i40e_init_interrupt_scheme(pf); 8026 8027 /* Set up the *vsi struct based on the number of VSIs in the HW, 8028 * and set up our local tracking of the MAIN PF vsi. 8029 */ 8030 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; 8031 pf->vsi = kzalloc(len, GFP_KERNEL); 8032 if (!pf->vsi) { 8033 err = -ENOMEM; 8034 goto err_switch_setup; 8035 } 8036 8037 err = i40e_setup_pf_switch(pf, false); 8038 if (err) { 8039 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 8040 goto err_vsis; 8041 } 8042 8043 /* The main driver is (mostly) up and happy. We need to set this state 8044 * before setting up the misc vector or we get a race and the vector 8045 * ends up disabled forever. 8046 */ 8047 clear_bit(__I40E_DOWN, &pf->state); 8048 8049 /* In case of MSIX we are going to setup the misc vector right here 8050 * to handle admin queue events etc. In case of legacy and MSI 8051 * the misc functionality and queue processing is combined in 8052 * the same vector and that gets setup at open. 8053 */ 8054 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 8055 err = i40e_setup_misc_vector(pf); 8056 if (err) { 8057 dev_info(&pdev->dev, 8058 "setup of misc vector failed: %d\n", err); 8059 goto err_vsis; 8060 } 8061 } 8062 8063 /* prep for VF support */ 8064 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 8065 (pf->flags & I40E_FLAG_MSIX_ENABLED)) { 8066 u32 val; 8067 8068 /* disable link interrupts for VFs */ 8069 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 8070 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 8071 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 8072 i40e_flush(hw); 8073 } 8074 8075 pfs_found++; 8076 8077 i40e_dbg_pf_init(pf); 8078 8079 /* tell the firmware that we're starting */ 8080 dv.major_version = DRV_VERSION_MAJOR; 8081 dv.minor_version = DRV_VERSION_MINOR; 8082 dv.build_version = DRV_VERSION_BUILD; 8083 dv.subbuild_version = 0; 8084 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 8085 8086 /* since everything's happy, start the service_task timer */ 8087 mod_timer(&pf->service_timer, 8088 round_jiffies(jiffies + pf->service_timer_period)); 8089 8090 /* Get the negotiated link width and speed from PCI config space */ 8091 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); 8092 8093 i40e_set_pci_config_data(hw, link_status); 8094 8095 dev_info(&pdev->dev, "PCI Express: %s %s\n", 8096 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : 8097 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : 8098 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : 8099 "Unknown"), 8100 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : 8101 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : 8102 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : 8103 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : 8104 "Unknown")); 8105 8106 if (hw->bus.width < i40e_bus_width_pcie_x8 || 8107 hw->bus.speed < i40e_bus_speed_8000) { 8108 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 8109 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 8110 } 8111 8112 return 0; 8113 8114 /* Unwind what we've done if something failed in the setup */ 8115 err_vsis: 8116 set_bit(__I40E_DOWN, &pf->state); 8117 i40e_clear_interrupt_scheme(pf); 8118 kfree(pf->vsi); 8119 err_switch_setup: 8120 i40e_reset_interrupt_capability(pf); 8121 del_timer_sync(&pf->service_timer); 8122 #ifdef CONFIG_I40E_DCB 8123 err_init_dcb: 8124 #endif /* CONFIG_I40E_DCB */ 8125 err_mac_addr: 8126 err_configure_lan_hmc: 8127 (void)i40e_shutdown_lan_hmc(hw); 8128 err_init_lan_hmc: 8129 kfree(pf->qp_pile); 8130 kfree(pf->irq_pile); 8131 err_sw_init: 8132 err_adminq_setup: 8133 (void)i40e_shutdown_adminq(hw); 8134 err_pf_reset: 8135 iounmap(hw->hw_addr); 8136 err_ioremap: 8137 kfree(pf); 8138 err_pf_alloc: 8139 pci_disable_pcie_error_reporting(pdev); 8140 pci_release_selected_regions(pdev, 8141 pci_select_bars(pdev, IORESOURCE_MEM)); 8142 err_pci_reg: 8143 err_dma: 8144 pci_disable_device(pdev); 8145 return err; 8146 } 8147 8148 /** 8149 * i40e_remove - Device removal routine 8150 * @pdev: PCI device information struct 8151 * 8152 * i40e_remove is called by the PCI subsystem to alert the driver 8153 * that is should release a PCI device. This could be caused by a 8154 * Hot-Plug event, or because the driver is going to be removed from 8155 * memory. 8156 **/ 8157 static void i40e_remove(struct pci_dev *pdev) 8158 { 8159 struct i40e_pf *pf = pci_get_drvdata(pdev); 8160 i40e_status ret_code; 8161 u32 reg; 8162 int i; 8163 8164 i40e_dbg_pf_exit(pf); 8165 8166 i40e_ptp_stop(pf); 8167 8168 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 8169 i40e_free_vfs(pf); 8170 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 8171 } 8172 8173 /* no more scheduling of any task */ 8174 set_bit(__I40E_DOWN, &pf->state); 8175 del_timer_sync(&pf->service_timer); 8176 cancel_work_sync(&pf->service_task); 8177 8178 i40e_fdir_teardown(pf); 8179 8180 /* If there is a switch structure or any orphans, remove them. 8181 * This will leave only the PF's VSI remaining. 8182 */ 8183 for (i = 0; i < I40E_MAX_VEB; i++) { 8184 if (!pf->veb[i]) 8185 continue; 8186 8187 if (pf->veb[i]->uplink_seid == pf->mac_seid || 8188 pf->veb[i]->uplink_seid == 0) 8189 i40e_switch_branch_release(pf->veb[i]); 8190 } 8191 8192 /* Now we can shutdown the PF's VSI, just before we kill 8193 * adminq and hmc. 8194 */ 8195 if (pf->vsi[pf->lan_vsi]) 8196 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 8197 8198 i40e_stop_misc_vector(pf); 8199 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 8200 synchronize_irq(pf->msix_entries[0].vector); 8201 free_irq(pf->msix_entries[0].vector, pf); 8202 } 8203 8204 /* shutdown and destroy the HMC */ 8205 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 8206 if (ret_code) 8207 dev_warn(&pdev->dev, 8208 "Failed to destroy the HMC resources: %d\n", ret_code); 8209 8210 /* shutdown the adminq */ 8211 ret_code = i40e_shutdown_adminq(&pf->hw); 8212 if (ret_code) 8213 dev_warn(&pdev->dev, 8214 "Failed to destroy the Admin Queue resources: %d\n", 8215 ret_code); 8216 8217 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 8218 i40e_clear_interrupt_scheme(pf); 8219 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 8220 if (pf->vsi[i]) { 8221 i40e_vsi_clear_rings(pf->vsi[i]); 8222 i40e_vsi_clear(pf->vsi[i]); 8223 pf->vsi[i] = NULL; 8224 } 8225 } 8226 8227 for (i = 0; i < I40E_MAX_VEB; i++) { 8228 kfree(pf->veb[i]); 8229 pf->veb[i] = NULL; 8230 } 8231 8232 kfree(pf->qp_pile); 8233 kfree(pf->irq_pile); 8234 kfree(pf->sw_config); 8235 kfree(pf->vsi); 8236 8237 /* force a PF reset to clean anything leftover */ 8238 reg = rd32(&pf->hw, I40E_PFGEN_CTRL); 8239 wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 8240 i40e_flush(&pf->hw); 8241 8242 iounmap(pf->hw.hw_addr); 8243 kfree(pf); 8244 pci_release_selected_regions(pdev, 8245 pci_select_bars(pdev, IORESOURCE_MEM)); 8246 8247 pci_disable_pcie_error_reporting(pdev); 8248 pci_disable_device(pdev); 8249 } 8250 8251 /** 8252 * i40e_pci_error_detected - warning that something funky happened in PCI land 8253 * @pdev: PCI device information struct 8254 * 8255 * Called to warn that something happened and the error handling steps 8256 * are in progress. Allows the driver to quiesce things, be ready for 8257 * remediation. 8258 **/ 8259 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 8260 enum pci_channel_state error) 8261 { 8262 struct i40e_pf *pf = pci_get_drvdata(pdev); 8263 8264 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 8265 8266 /* shutdown all operations */ 8267 if (!test_bit(__I40E_SUSPENDED, &pf->state)) { 8268 rtnl_lock(); 8269 i40e_prep_for_reset(pf); 8270 rtnl_unlock(); 8271 } 8272 8273 /* Request a slot reset */ 8274 return PCI_ERS_RESULT_NEED_RESET; 8275 } 8276 8277 /** 8278 * i40e_pci_error_slot_reset - a PCI slot reset just happened 8279 * @pdev: PCI device information struct 8280 * 8281 * Called to find if the driver can work with the device now that 8282 * the pci slot has been reset. If a basic connection seems good 8283 * (registers are readable and have sane content) then return a 8284 * happy little PCI_ERS_RESULT_xxx. 8285 **/ 8286 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 8287 { 8288 struct i40e_pf *pf = pci_get_drvdata(pdev); 8289 pci_ers_result_t result; 8290 int err; 8291 u32 reg; 8292 8293 dev_info(&pdev->dev, "%s\n", __func__); 8294 if (pci_enable_device_mem(pdev)) { 8295 dev_info(&pdev->dev, 8296 "Cannot re-enable PCI device after reset.\n"); 8297 result = PCI_ERS_RESULT_DISCONNECT; 8298 } else { 8299 pci_set_master(pdev); 8300 pci_restore_state(pdev); 8301 pci_save_state(pdev); 8302 pci_wake_from_d3(pdev, false); 8303 8304 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 8305 if (reg == 0) 8306 result = PCI_ERS_RESULT_RECOVERED; 8307 else 8308 result = PCI_ERS_RESULT_DISCONNECT; 8309 } 8310 8311 err = pci_cleanup_aer_uncorrect_error_status(pdev); 8312 if (err) { 8313 dev_info(&pdev->dev, 8314 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 8315 err); 8316 /* non-fatal, continue */ 8317 } 8318 8319 return result; 8320 } 8321 8322 /** 8323 * i40e_pci_error_resume - restart operations after PCI error recovery 8324 * @pdev: PCI device information struct 8325 * 8326 * Called to allow the driver to bring things back up after PCI error 8327 * and/or reset recovery has finished. 8328 **/ 8329 static void i40e_pci_error_resume(struct pci_dev *pdev) 8330 { 8331 struct i40e_pf *pf = pci_get_drvdata(pdev); 8332 8333 dev_info(&pdev->dev, "%s\n", __func__); 8334 if (test_bit(__I40E_SUSPENDED, &pf->state)) 8335 return; 8336 8337 rtnl_lock(); 8338 i40e_handle_reset_warning(pf); 8339 rtnl_lock(); 8340 } 8341 8342 /** 8343 * i40e_shutdown - PCI callback for shutting down 8344 * @pdev: PCI device information struct 8345 **/ 8346 static void i40e_shutdown(struct pci_dev *pdev) 8347 { 8348 struct i40e_pf *pf = pci_get_drvdata(pdev); 8349 struct i40e_hw *hw = &pf->hw; 8350 8351 set_bit(__I40E_SUSPENDED, &pf->state); 8352 set_bit(__I40E_DOWN, &pf->state); 8353 rtnl_lock(); 8354 i40e_prep_for_reset(pf); 8355 rtnl_unlock(); 8356 8357 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 8358 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 8359 8360 if (system_state == SYSTEM_POWER_OFF) { 8361 pci_wake_from_d3(pdev, pf->wol_en); 8362 pci_set_power_state(pdev, PCI_D3hot); 8363 } 8364 } 8365 8366 #ifdef CONFIG_PM 8367 /** 8368 * i40e_suspend - PCI callback for moving to D3 8369 * @pdev: PCI device information struct 8370 **/ 8371 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) 8372 { 8373 struct i40e_pf *pf = pci_get_drvdata(pdev); 8374 struct i40e_hw *hw = &pf->hw; 8375 8376 set_bit(__I40E_SUSPENDED, &pf->state); 8377 set_bit(__I40E_DOWN, &pf->state); 8378 rtnl_lock(); 8379 i40e_prep_for_reset(pf); 8380 rtnl_unlock(); 8381 8382 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 8383 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 8384 8385 pci_wake_from_d3(pdev, pf->wol_en); 8386 pci_set_power_state(pdev, PCI_D3hot); 8387 8388 return 0; 8389 } 8390 8391 /** 8392 * i40e_resume - PCI callback for waking up from D3 8393 * @pdev: PCI device information struct 8394 **/ 8395 static int i40e_resume(struct pci_dev *pdev) 8396 { 8397 struct i40e_pf *pf = pci_get_drvdata(pdev); 8398 u32 err; 8399 8400 pci_set_power_state(pdev, PCI_D0); 8401 pci_restore_state(pdev); 8402 /* pci_restore_state() clears dev->state_saves, so 8403 * call pci_save_state() again to restore it. 8404 */ 8405 pci_save_state(pdev); 8406 8407 err = pci_enable_device_mem(pdev); 8408 if (err) { 8409 dev_err(&pdev->dev, 8410 "%s: Cannot enable PCI device from suspend\n", 8411 __func__); 8412 return err; 8413 } 8414 pci_set_master(pdev); 8415 8416 /* no wakeup events while running */ 8417 pci_wake_from_d3(pdev, false); 8418 8419 /* handling the reset will rebuild the device state */ 8420 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { 8421 clear_bit(__I40E_DOWN, &pf->state); 8422 rtnl_lock(); 8423 i40e_reset_and_rebuild(pf, false); 8424 rtnl_unlock(); 8425 } 8426 8427 return 0; 8428 } 8429 8430 #endif 8431 static const struct pci_error_handlers i40e_err_handler = { 8432 .error_detected = i40e_pci_error_detected, 8433 .slot_reset = i40e_pci_error_slot_reset, 8434 .resume = i40e_pci_error_resume, 8435 }; 8436 8437 static struct pci_driver i40e_driver = { 8438 .name = i40e_driver_name, 8439 .id_table = i40e_pci_tbl, 8440 .probe = i40e_probe, 8441 .remove = i40e_remove, 8442 #ifdef CONFIG_PM 8443 .suspend = i40e_suspend, 8444 .resume = i40e_resume, 8445 #endif 8446 .shutdown = i40e_shutdown, 8447 .err_handler = &i40e_err_handler, 8448 .sriov_configure = i40e_pci_sriov_configure, 8449 }; 8450 8451 /** 8452 * i40e_init_module - Driver registration routine 8453 * 8454 * i40e_init_module is the first routine called when the driver is 8455 * loaded. All it does is register with the PCI subsystem. 8456 **/ 8457 static int __init i40e_init_module(void) 8458 { 8459 pr_info("%s: %s - version %s\n", i40e_driver_name, 8460 i40e_driver_string, i40e_driver_version_str); 8461 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 8462 i40e_dbg_init(); 8463 return pci_register_driver(&i40e_driver); 8464 } 8465 module_init(i40e_init_module); 8466 8467 /** 8468 * i40e_exit_module - Driver exit cleanup routine 8469 * 8470 * i40e_exit_module is called just before the driver is removed 8471 * from memory. 8472 **/ 8473 static void __exit i40e_exit_module(void) 8474 { 8475 pci_unregister_driver(&i40e_driver); 8476 i40e_dbg_exit(); 8477 } 8478 module_exit(i40e_exit_module); 8479