1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 /* Local includes */ 28 #include "i40e.h" 29 #include "i40e_diag.h" 30 #ifdef CONFIG_I40E_VXLAN 31 #include <net/vxlan.h> 32 #endif 33 34 const char i40e_driver_name[] = "i40e"; 35 static const char i40e_driver_string[] = 36 "Intel(R) Ethernet Connection XL710 Network Driver"; 37 38 #define DRV_KERN "-k" 39 40 #define DRV_VERSION_MAJOR 0 41 #define DRV_VERSION_MINOR 3 42 #define DRV_VERSION_BUILD 36 43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \ 45 __stringify(DRV_VERSION_BUILD) DRV_KERN 46 const char i40e_driver_version_str[] = DRV_VERSION; 47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; 48 49 /* a bit of forward declarations */ 50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 51 static void i40e_handle_reset_warning(struct i40e_pf *pf); 52 static int i40e_add_vsi(struct i40e_vsi *vsi); 53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 55 static int i40e_setup_misc_vector(struct i40e_pf *pf); 56 static void i40e_determine_queue_usage(struct i40e_pf *pf); 57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); 58 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 59 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 60 61 /* i40e_pci_tbl - PCI Device ID Table 62 * 63 * Last entry must be all 0s 64 * 65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 66 * Class, Class Mask, private data (not used) } 67 */ 68 static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = { 69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, 70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0}, 71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, 72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, 73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, 74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, 75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0}, 76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, 77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, 78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, 79 /* required last entry */ 80 {0, } 81 }; 82 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); 83 84 #define I40E_MAX_VF_COUNT 128 85 static int debug = -1; 86 module_param(debug, int, 0); 87 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 88 89 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 90 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); 91 MODULE_LICENSE("GPL"); 92 MODULE_VERSION(DRV_VERSION); 93 94 /** 95 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code 96 * @hw: pointer to the HW structure 97 * @mem: ptr to mem struct to fill out 98 * @size: size of memory requested 99 * @alignment: what to align the allocation to 100 **/ 101 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, 102 u64 size, u32 alignment) 103 { 104 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 105 106 mem->size = ALIGN(size, alignment); 107 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 108 &mem->pa, GFP_KERNEL); 109 if (!mem->va) 110 return -ENOMEM; 111 112 return 0; 113 } 114 115 /** 116 * i40e_free_dma_mem_d - OS specific memory free for shared code 117 * @hw: pointer to the HW structure 118 * @mem: ptr to mem struct to free 119 **/ 120 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) 121 { 122 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 123 124 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); 125 mem->va = NULL; 126 mem->pa = 0; 127 mem->size = 0; 128 129 return 0; 130 } 131 132 /** 133 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code 134 * @hw: pointer to the HW structure 135 * @mem: ptr to mem struct to fill out 136 * @size: size of memory requested 137 **/ 138 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, 139 u32 size) 140 { 141 mem->size = size; 142 mem->va = kzalloc(size, GFP_KERNEL); 143 144 if (!mem->va) 145 return -ENOMEM; 146 147 return 0; 148 } 149 150 /** 151 * i40e_free_virt_mem_d - OS specific memory free for shared code 152 * @hw: pointer to the HW structure 153 * @mem: ptr to mem struct to free 154 **/ 155 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) 156 { 157 /* it's ok to kfree a NULL pointer */ 158 kfree(mem->va); 159 mem->va = NULL; 160 mem->size = 0; 161 162 return 0; 163 } 164 165 /** 166 * i40e_get_lump - find a lump of free generic resource 167 * @pf: board private structure 168 * @pile: the pile of resource to search 169 * @needed: the number of items needed 170 * @id: an owner id to stick on the items assigned 171 * 172 * Returns the base item index of the lump, or negative for error 173 * 174 * The search_hint trick and lack of advanced fit-finding only work 175 * because we're highly likely to have all the same size lump requests. 176 * Linear search time and any fragmentation should be minimal. 177 **/ 178 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, 179 u16 needed, u16 id) 180 { 181 int ret = -ENOMEM; 182 int i, j; 183 184 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 185 dev_info(&pf->pdev->dev, 186 "param err: pile=%p needed=%d id=0x%04x\n", 187 pile, needed, id); 188 return -EINVAL; 189 } 190 191 /* start the linear search with an imperfect hint */ 192 i = pile->search_hint; 193 while (i < pile->num_entries) { 194 /* skip already allocated entries */ 195 if (pile->list[i] & I40E_PILE_VALID_BIT) { 196 i++; 197 continue; 198 } 199 200 /* do we have enough in this lump? */ 201 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { 202 if (pile->list[i+j] & I40E_PILE_VALID_BIT) 203 break; 204 } 205 206 if (j == needed) { 207 /* there was enough, so assign it to the requestor */ 208 for (j = 0; j < needed; j++) 209 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 210 ret = i; 211 pile->search_hint = i + j; 212 break; 213 } else { 214 /* not enough, so skip over it and continue looking */ 215 i += j; 216 } 217 } 218 219 return ret; 220 } 221 222 /** 223 * i40e_put_lump - return a lump of generic resource 224 * @pile: the pile of resource to search 225 * @index: the base item index 226 * @id: the owner id of the items assigned 227 * 228 * Returns the count of items in the lump 229 **/ 230 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) 231 { 232 int valid_id = (id | I40E_PILE_VALID_BIT); 233 int count = 0; 234 int i; 235 236 if (!pile || index >= pile->num_entries) 237 return -EINVAL; 238 239 for (i = index; 240 i < pile->num_entries && pile->list[i] == valid_id; 241 i++) { 242 pile->list[i] = 0; 243 count++; 244 } 245 246 if (count && index < pile->search_hint) 247 pile->search_hint = index; 248 249 return count; 250 } 251 252 /** 253 * i40e_service_event_schedule - Schedule the service task to wake up 254 * @pf: board private structure 255 * 256 * If not already scheduled, this puts the task into the work queue 257 **/ 258 static void i40e_service_event_schedule(struct i40e_pf *pf) 259 { 260 if (!test_bit(__I40E_DOWN, &pf->state) && 261 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && 262 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) 263 schedule_work(&pf->service_task); 264 } 265 266 /** 267 * i40e_tx_timeout - Respond to a Tx Hang 268 * @netdev: network interface device structure 269 * 270 * If any port has noticed a Tx timeout, it is likely that the whole 271 * device is munged, not just the one netdev port, so go for the full 272 * reset. 273 **/ 274 static void i40e_tx_timeout(struct net_device *netdev) 275 { 276 struct i40e_netdev_priv *np = netdev_priv(netdev); 277 struct i40e_vsi *vsi = np->vsi; 278 struct i40e_pf *pf = vsi->back; 279 280 pf->tx_timeout_count++; 281 282 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) 283 pf->tx_timeout_recovery_level = 0; 284 pf->tx_timeout_last_recovery = jiffies; 285 netdev_info(netdev, "tx_timeout recovery level %d\n", 286 pf->tx_timeout_recovery_level); 287 288 switch (pf->tx_timeout_recovery_level) { 289 case 0: 290 /* disable and re-enable queues for the VSI */ 291 if (in_interrupt()) { 292 set_bit(__I40E_REINIT_REQUESTED, &pf->state); 293 set_bit(__I40E_REINIT_REQUESTED, &vsi->state); 294 } else { 295 i40e_vsi_reinit_locked(vsi); 296 } 297 break; 298 case 1: 299 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 300 break; 301 case 2: 302 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 303 break; 304 case 3: 305 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 306 break; 307 default: 308 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 309 set_bit(__I40E_DOWN, &vsi->state); 310 i40e_down(vsi); 311 break; 312 } 313 i40e_service_event_schedule(pf); 314 pf->tx_timeout_recovery_level++; 315 } 316 317 /** 318 * i40e_release_rx_desc - Store the new tail and head values 319 * @rx_ring: ring to bump 320 * @val: new head index 321 **/ 322 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 323 { 324 rx_ring->next_to_use = val; 325 326 /* Force memory writes to complete before letting h/w 327 * know there are new descriptors to fetch. (Only 328 * applicable for weak-ordered memory model archs, 329 * such as IA-64). 330 */ 331 wmb(); 332 writel(val, rx_ring->tail); 333 } 334 335 /** 336 * i40e_get_vsi_stats_struct - Get System Network Statistics 337 * @vsi: the VSI we care about 338 * 339 * Returns the address of the device statistics structure. 340 * The statistics are actually updated from the service task. 341 **/ 342 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) 343 { 344 return &vsi->net_stats; 345 } 346 347 /** 348 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 349 * @netdev: network interface device structure 350 * 351 * Returns the address of the device statistics structure. 352 * The statistics are actually updated from the service task. 353 **/ 354 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( 355 struct net_device *netdev, 356 struct rtnl_link_stats64 *stats) 357 { 358 struct i40e_netdev_priv *np = netdev_priv(netdev); 359 struct i40e_vsi *vsi = np->vsi; 360 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); 361 int i; 362 363 if (test_bit(__I40E_DOWN, &vsi->state)) 364 return stats; 365 366 if (!vsi->tx_rings) 367 return stats; 368 369 rcu_read_lock(); 370 for (i = 0; i < vsi->num_queue_pairs; i++) { 371 struct i40e_ring *tx_ring, *rx_ring; 372 u64 bytes, packets; 373 unsigned int start; 374 375 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); 376 if (!tx_ring) 377 continue; 378 379 do { 380 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 381 packets = tx_ring->stats.packets; 382 bytes = tx_ring->stats.bytes; 383 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 384 385 stats->tx_packets += packets; 386 stats->tx_bytes += bytes; 387 rx_ring = &tx_ring[1]; 388 389 do { 390 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 391 packets = rx_ring->stats.packets; 392 bytes = rx_ring->stats.bytes; 393 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 394 395 stats->rx_packets += packets; 396 stats->rx_bytes += bytes; 397 } 398 rcu_read_unlock(); 399 400 /* following stats updated by ixgbe_watchdog_task() */ 401 stats->multicast = vsi_stats->multicast; 402 stats->tx_errors = vsi_stats->tx_errors; 403 stats->tx_dropped = vsi_stats->tx_dropped; 404 stats->rx_errors = vsi_stats->rx_errors; 405 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 406 stats->rx_length_errors = vsi_stats->rx_length_errors; 407 408 return stats; 409 } 410 411 /** 412 * i40e_vsi_reset_stats - Resets all stats of the given vsi 413 * @vsi: the VSI to have its stats reset 414 **/ 415 void i40e_vsi_reset_stats(struct i40e_vsi *vsi) 416 { 417 struct rtnl_link_stats64 *ns; 418 int i; 419 420 if (!vsi) 421 return; 422 423 ns = i40e_get_vsi_stats_struct(vsi); 424 memset(ns, 0, sizeof(*ns)); 425 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); 426 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); 427 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); 428 if (vsi->rx_rings && vsi->rx_rings[0]) { 429 for (i = 0; i < vsi->num_queue_pairs; i++) { 430 memset(&vsi->rx_rings[i]->stats, 0 , 431 sizeof(vsi->rx_rings[i]->stats)); 432 memset(&vsi->rx_rings[i]->rx_stats, 0 , 433 sizeof(vsi->rx_rings[i]->rx_stats)); 434 memset(&vsi->tx_rings[i]->stats, 0 , 435 sizeof(vsi->tx_rings[i]->stats)); 436 memset(&vsi->tx_rings[i]->tx_stats, 0, 437 sizeof(vsi->tx_rings[i]->tx_stats)); 438 } 439 } 440 vsi->stat_offsets_loaded = false; 441 } 442 443 /** 444 * i40e_pf_reset_stats - Reset all of the stats for the given pf 445 * @pf: the PF to be reset 446 **/ 447 void i40e_pf_reset_stats(struct i40e_pf *pf) 448 { 449 memset(&pf->stats, 0, sizeof(pf->stats)); 450 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); 451 pf->stat_offsets_loaded = false; 452 } 453 454 /** 455 * i40e_stat_update48 - read and update a 48 bit stat from the chip 456 * @hw: ptr to the hardware info 457 * @hireg: the high 32 bit reg to read 458 * @loreg: the low 32 bit reg to read 459 * @offset_loaded: has the initial offset been loaded yet 460 * @offset: ptr to current offset value 461 * @stat: ptr to the stat 462 * 463 * Since the device stats are not reset at PFReset, they likely will not 464 * be zeroed when the driver starts. We'll save the first values read 465 * and use them as offsets to be subtracted from the raw values in order 466 * to report stats that count from zero. In the process, we also manage 467 * the potential roll-over. 468 **/ 469 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 470 bool offset_loaded, u64 *offset, u64 *stat) 471 { 472 u64 new_data; 473 474 if (hw->device_id == I40E_DEV_ID_QEMU) { 475 new_data = rd32(hw, loreg); 476 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 477 } else { 478 new_data = rd64(hw, loreg); 479 } 480 if (!offset_loaded) 481 *offset = new_data; 482 if (likely(new_data >= *offset)) 483 *stat = new_data - *offset; 484 else 485 *stat = (new_data + ((u64)1 << 48)) - *offset; 486 *stat &= 0xFFFFFFFFFFFFULL; 487 } 488 489 /** 490 * i40e_stat_update32 - read and update a 32 bit stat from the chip 491 * @hw: ptr to the hardware info 492 * @reg: the hw reg to read 493 * @offset_loaded: has the initial offset been loaded yet 494 * @offset: ptr to current offset value 495 * @stat: ptr to the stat 496 **/ 497 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, 498 bool offset_loaded, u64 *offset, u64 *stat) 499 { 500 u32 new_data; 501 502 new_data = rd32(hw, reg); 503 if (!offset_loaded) 504 *offset = new_data; 505 if (likely(new_data >= *offset)) 506 *stat = (u32)(new_data - *offset); 507 else 508 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 509 } 510 511 /** 512 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. 513 * @vsi: the VSI to be updated 514 **/ 515 void i40e_update_eth_stats(struct i40e_vsi *vsi) 516 { 517 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); 518 struct i40e_pf *pf = vsi->back; 519 struct i40e_hw *hw = &pf->hw; 520 struct i40e_eth_stats *oes; 521 struct i40e_eth_stats *es; /* device's eth stats */ 522 523 es = &vsi->eth_stats; 524 oes = &vsi->eth_stats_offsets; 525 526 /* Gather up the stats that the hw collects */ 527 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 528 vsi->stat_offsets_loaded, 529 &oes->tx_errors, &es->tx_errors); 530 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 531 vsi->stat_offsets_loaded, 532 &oes->rx_discards, &es->rx_discards); 533 534 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 535 I40E_GLV_GORCL(stat_idx), 536 vsi->stat_offsets_loaded, 537 &oes->rx_bytes, &es->rx_bytes); 538 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 539 I40E_GLV_UPRCL(stat_idx), 540 vsi->stat_offsets_loaded, 541 &oes->rx_unicast, &es->rx_unicast); 542 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 543 I40E_GLV_MPRCL(stat_idx), 544 vsi->stat_offsets_loaded, 545 &oes->rx_multicast, &es->rx_multicast); 546 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 547 I40E_GLV_BPRCL(stat_idx), 548 vsi->stat_offsets_loaded, 549 &oes->rx_broadcast, &es->rx_broadcast); 550 551 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 552 I40E_GLV_GOTCL(stat_idx), 553 vsi->stat_offsets_loaded, 554 &oes->tx_bytes, &es->tx_bytes); 555 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 556 I40E_GLV_UPTCL(stat_idx), 557 vsi->stat_offsets_loaded, 558 &oes->tx_unicast, &es->tx_unicast); 559 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 560 I40E_GLV_MPTCL(stat_idx), 561 vsi->stat_offsets_loaded, 562 &oes->tx_multicast, &es->tx_multicast); 563 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 564 I40E_GLV_BPTCL(stat_idx), 565 vsi->stat_offsets_loaded, 566 &oes->tx_broadcast, &es->tx_broadcast); 567 vsi->stat_offsets_loaded = true; 568 } 569 570 /** 571 * i40e_update_veb_stats - Update Switch component statistics 572 * @veb: the VEB being updated 573 **/ 574 static void i40e_update_veb_stats(struct i40e_veb *veb) 575 { 576 struct i40e_pf *pf = veb->pf; 577 struct i40e_hw *hw = &pf->hw; 578 struct i40e_eth_stats *oes; 579 struct i40e_eth_stats *es; /* device's eth stats */ 580 int idx = 0; 581 582 idx = veb->stats_idx; 583 es = &veb->stats; 584 oes = &veb->stats_offsets; 585 586 /* Gather up the stats that the hw collects */ 587 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), 588 veb->stat_offsets_loaded, 589 &oes->tx_discards, &es->tx_discards); 590 if (hw->revision_id > 0) 591 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), 592 veb->stat_offsets_loaded, 593 &oes->rx_unknown_protocol, 594 &es->rx_unknown_protocol); 595 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), 596 veb->stat_offsets_loaded, 597 &oes->rx_bytes, &es->rx_bytes); 598 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), 599 veb->stat_offsets_loaded, 600 &oes->rx_unicast, &es->rx_unicast); 601 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), 602 veb->stat_offsets_loaded, 603 &oes->rx_multicast, &es->rx_multicast); 604 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), 605 veb->stat_offsets_loaded, 606 &oes->rx_broadcast, &es->rx_broadcast); 607 608 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), 609 veb->stat_offsets_loaded, 610 &oes->tx_bytes, &es->tx_bytes); 611 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), 612 veb->stat_offsets_loaded, 613 &oes->tx_unicast, &es->tx_unicast); 614 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), 615 veb->stat_offsets_loaded, 616 &oes->tx_multicast, &es->tx_multicast); 617 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), 618 veb->stat_offsets_loaded, 619 &oes->tx_broadcast, &es->tx_broadcast); 620 veb->stat_offsets_loaded = true; 621 } 622 623 /** 624 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode 625 * @pf: the corresponding PF 626 * 627 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode 628 **/ 629 static void i40e_update_link_xoff_rx(struct i40e_pf *pf) 630 { 631 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 632 struct i40e_hw_port_stats *nsd = &pf->stats; 633 struct i40e_hw *hw = &pf->hw; 634 u64 xoff = 0; 635 u16 i, v; 636 637 if ((hw->fc.current_mode != I40E_FC_FULL) && 638 (hw->fc.current_mode != I40E_FC_RX_PAUSE)) 639 return; 640 641 xoff = nsd->link_xoff_rx; 642 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 643 pf->stat_offsets_loaded, 644 &osd->link_xoff_rx, &nsd->link_xoff_rx); 645 646 /* No new LFC xoff rx */ 647 if (!(nsd->link_xoff_rx - xoff)) 648 return; 649 650 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ 651 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 652 struct i40e_vsi *vsi = pf->vsi[v]; 653 654 if (!vsi) 655 continue; 656 657 for (i = 0; i < vsi->num_queue_pairs; i++) { 658 struct i40e_ring *ring = vsi->tx_rings[i]; 659 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 660 } 661 } 662 } 663 664 /** 665 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode 666 * @pf: the corresponding PF 667 * 668 * Update the Rx XOFF counter (PAUSE frames) in PFC mode 669 **/ 670 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) 671 { 672 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 673 struct i40e_hw_port_stats *nsd = &pf->stats; 674 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; 675 struct i40e_dcbx_config *dcb_cfg; 676 struct i40e_hw *hw = &pf->hw; 677 u16 i, v; 678 u8 tc; 679 680 dcb_cfg = &hw->local_dcbx_config; 681 682 /* See if DCB enabled with PFC TC */ 683 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || 684 !(dcb_cfg->pfc.pfcenable)) { 685 i40e_update_link_xoff_rx(pf); 686 return; 687 } 688 689 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 690 u64 prio_xoff = nsd->priority_xoff_rx[i]; 691 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), 692 pf->stat_offsets_loaded, 693 &osd->priority_xoff_rx[i], 694 &nsd->priority_xoff_rx[i]); 695 696 /* No new PFC xoff rx */ 697 if (!(nsd->priority_xoff_rx[i] - prio_xoff)) 698 continue; 699 /* Get the TC for given priority */ 700 tc = dcb_cfg->etscfg.prioritytable[i]; 701 xoff[tc] = true; 702 } 703 704 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ 705 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 706 struct i40e_vsi *vsi = pf->vsi[v]; 707 708 if (!vsi) 709 continue; 710 711 for (i = 0; i < vsi->num_queue_pairs; i++) { 712 struct i40e_ring *ring = vsi->tx_rings[i]; 713 714 tc = ring->dcb_tc; 715 if (xoff[tc]) 716 clear_bit(__I40E_HANG_CHECK_ARMED, 717 &ring->state); 718 } 719 } 720 } 721 722 /** 723 * i40e_update_stats - Update the board statistics counters. 724 * @vsi: the VSI to be updated 725 * 726 * There are a few instances where we store the same stat in a 727 * couple of different structs. This is partly because we have 728 * the netdev stats that need to be filled out, which is slightly 729 * different from the "eth_stats" defined by the chip and used in 730 * VF communications. We sort it all out here in a central place. 731 **/ 732 void i40e_update_stats(struct i40e_vsi *vsi) 733 { 734 struct i40e_pf *pf = vsi->back; 735 struct i40e_hw *hw = &pf->hw; 736 struct rtnl_link_stats64 *ons; 737 struct rtnl_link_stats64 *ns; /* netdev stats */ 738 struct i40e_eth_stats *oes; 739 struct i40e_eth_stats *es; /* device's eth stats */ 740 u32 tx_restart, tx_busy; 741 u32 rx_page, rx_buf; 742 u64 rx_p, rx_b; 743 u64 tx_p, tx_b; 744 u32 val; 745 int i; 746 u16 q; 747 748 if (test_bit(__I40E_DOWN, &vsi->state) || 749 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 750 return; 751 752 ns = i40e_get_vsi_stats_struct(vsi); 753 ons = &vsi->net_stats_offsets; 754 es = &vsi->eth_stats; 755 oes = &vsi->eth_stats_offsets; 756 757 /* Gather up the netdev and vsi stats that the driver collects 758 * on the fly during packet processing 759 */ 760 rx_b = rx_p = 0; 761 tx_b = tx_p = 0; 762 tx_restart = tx_busy = 0; 763 rx_page = 0; 764 rx_buf = 0; 765 rcu_read_lock(); 766 for (q = 0; q < vsi->num_queue_pairs; q++) { 767 struct i40e_ring *p; 768 u64 bytes, packets; 769 unsigned int start; 770 771 /* locate Tx ring */ 772 p = ACCESS_ONCE(vsi->tx_rings[q]); 773 774 do { 775 start = u64_stats_fetch_begin_irq(&p->syncp); 776 packets = p->stats.packets; 777 bytes = p->stats.bytes; 778 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 779 tx_b += bytes; 780 tx_p += packets; 781 tx_restart += p->tx_stats.restart_queue; 782 tx_busy += p->tx_stats.tx_busy; 783 784 /* Rx queue is part of the same block as Tx queue */ 785 p = &p[1]; 786 do { 787 start = u64_stats_fetch_begin_irq(&p->syncp); 788 packets = p->stats.packets; 789 bytes = p->stats.bytes; 790 } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 791 rx_b += bytes; 792 rx_p += packets; 793 rx_buf += p->rx_stats.alloc_buff_failed; 794 rx_page += p->rx_stats.alloc_page_failed; 795 } 796 rcu_read_unlock(); 797 vsi->tx_restart = tx_restart; 798 vsi->tx_busy = tx_busy; 799 vsi->rx_page_failed = rx_page; 800 vsi->rx_buf_failed = rx_buf; 801 802 ns->rx_packets = rx_p; 803 ns->rx_bytes = rx_b; 804 ns->tx_packets = tx_p; 805 ns->tx_bytes = tx_b; 806 807 i40e_update_eth_stats(vsi); 808 /* update netdev stats from eth stats */ 809 ons->rx_errors = oes->rx_errors; 810 ns->rx_errors = es->rx_errors; 811 ons->tx_errors = oes->tx_errors; 812 ns->tx_errors = es->tx_errors; 813 ons->multicast = oes->rx_multicast; 814 ns->multicast = es->rx_multicast; 815 ons->tx_dropped = oes->tx_discards; 816 ns->tx_dropped = es->tx_discards; 817 818 /* Get the port data only if this is the main PF VSI */ 819 if (vsi == pf->vsi[pf->lan_vsi]) { 820 struct i40e_hw_port_stats *nsd = &pf->stats; 821 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 822 823 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 824 I40E_GLPRT_GORCL(hw->port), 825 pf->stat_offsets_loaded, 826 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 827 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 828 I40E_GLPRT_GOTCL(hw->port), 829 pf->stat_offsets_loaded, 830 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 831 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 832 pf->stat_offsets_loaded, 833 &osd->eth.rx_discards, 834 &nsd->eth.rx_discards); 835 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), 836 pf->stat_offsets_loaded, 837 &osd->eth.tx_discards, 838 &nsd->eth.tx_discards); 839 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 840 I40E_GLPRT_MPRCL(hw->port), 841 pf->stat_offsets_loaded, 842 &osd->eth.rx_multicast, 843 &nsd->eth.rx_multicast); 844 845 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 846 pf->stat_offsets_loaded, 847 &osd->tx_dropped_link_down, 848 &nsd->tx_dropped_link_down); 849 850 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 851 pf->stat_offsets_loaded, 852 &osd->crc_errors, &nsd->crc_errors); 853 ns->rx_crc_errors = nsd->crc_errors; 854 855 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 856 pf->stat_offsets_loaded, 857 &osd->illegal_bytes, &nsd->illegal_bytes); 858 ns->rx_errors = nsd->crc_errors 859 + nsd->illegal_bytes; 860 861 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 862 pf->stat_offsets_loaded, 863 &osd->mac_local_faults, 864 &nsd->mac_local_faults); 865 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 866 pf->stat_offsets_loaded, 867 &osd->mac_remote_faults, 868 &nsd->mac_remote_faults); 869 870 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 871 pf->stat_offsets_loaded, 872 &osd->rx_length_errors, 873 &nsd->rx_length_errors); 874 ns->rx_length_errors = nsd->rx_length_errors; 875 876 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 877 pf->stat_offsets_loaded, 878 &osd->link_xon_rx, &nsd->link_xon_rx); 879 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 880 pf->stat_offsets_loaded, 881 &osd->link_xon_tx, &nsd->link_xon_tx); 882 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ 883 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 884 pf->stat_offsets_loaded, 885 &osd->link_xoff_tx, &nsd->link_xoff_tx); 886 887 for (i = 0; i < 8; i++) { 888 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), 889 pf->stat_offsets_loaded, 890 &osd->priority_xon_rx[i], 891 &nsd->priority_xon_rx[i]); 892 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), 893 pf->stat_offsets_loaded, 894 &osd->priority_xon_tx[i], 895 &nsd->priority_xon_tx[i]); 896 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), 897 pf->stat_offsets_loaded, 898 &osd->priority_xoff_tx[i], 899 &nsd->priority_xoff_tx[i]); 900 i40e_stat_update32(hw, 901 I40E_GLPRT_RXON2OFFCNT(hw->port, i), 902 pf->stat_offsets_loaded, 903 &osd->priority_xon_2_xoff[i], 904 &nsd->priority_xon_2_xoff[i]); 905 } 906 907 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 908 I40E_GLPRT_PRC64L(hw->port), 909 pf->stat_offsets_loaded, 910 &osd->rx_size_64, &nsd->rx_size_64); 911 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 912 I40E_GLPRT_PRC127L(hw->port), 913 pf->stat_offsets_loaded, 914 &osd->rx_size_127, &nsd->rx_size_127); 915 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 916 I40E_GLPRT_PRC255L(hw->port), 917 pf->stat_offsets_loaded, 918 &osd->rx_size_255, &nsd->rx_size_255); 919 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 920 I40E_GLPRT_PRC511L(hw->port), 921 pf->stat_offsets_loaded, 922 &osd->rx_size_511, &nsd->rx_size_511); 923 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 924 I40E_GLPRT_PRC1023L(hw->port), 925 pf->stat_offsets_loaded, 926 &osd->rx_size_1023, &nsd->rx_size_1023); 927 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 928 I40E_GLPRT_PRC1522L(hw->port), 929 pf->stat_offsets_loaded, 930 &osd->rx_size_1522, &nsd->rx_size_1522); 931 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 932 I40E_GLPRT_PRC9522L(hw->port), 933 pf->stat_offsets_loaded, 934 &osd->rx_size_big, &nsd->rx_size_big); 935 936 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 937 I40E_GLPRT_PTC64L(hw->port), 938 pf->stat_offsets_loaded, 939 &osd->tx_size_64, &nsd->tx_size_64); 940 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 941 I40E_GLPRT_PTC127L(hw->port), 942 pf->stat_offsets_loaded, 943 &osd->tx_size_127, &nsd->tx_size_127); 944 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 945 I40E_GLPRT_PTC255L(hw->port), 946 pf->stat_offsets_loaded, 947 &osd->tx_size_255, &nsd->tx_size_255); 948 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 949 I40E_GLPRT_PTC511L(hw->port), 950 pf->stat_offsets_loaded, 951 &osd->tx_size_511, &nsd->tx_size_511); 952 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 953 I40E_GLPRT_PTC1023L(hw->port), 954 pf->stat_offsets_loaded, 955 &osd->tx_size_1023, &nsd->tx_size_1023); 956 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 957 I40E_GLPRT_PTC1522L(hw->port), 958 pf->stat_offsets_loaded, 959 &osd->tx_size_1522, &nsd->tx_size_1522); 960 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 961 I40E_GLPRT_PTC9522L(hw->port), 962 pf->stat_offsets_loaded, 963 &osd->tx_size_big, &nsd->tx_size_big); 964 965 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 966 pf->stat_offsets_loaded, 967 &osd->rx_undersize, &nsd->rx_undersize); 968 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 969 pf->stat_offsets_loaded, 970 &osd->rx_fragments, &nsd->rx_fragments); 971 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 972 pf->stat_offsets_loaded, 973 &osd->rx_oversize, &nsd->rx_oversize); 974 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 975 pf->stat_offsets_loaded, 976 &osd->rx_jabber, &nsd->rx_jabber); 977 978 val = rd32(hw, I40E_PRTPM_EEE_STAT); 979 nsd->tx_lpi_status = 980 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> 981 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; 982 nsd->rx_lpi_status = 983 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> 984 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; 985 i40e_stat_update32(hw, I40E_PRTPM_TLPIC, 986 pf->stat_offsets_loaded, 987 &osd->tx_lpi_count, &nsd->tx_lpi_count); 988 i40e_stat_update32(hw, I40E_PRTPM_RLPIC, 989 pf->stat_offsets_loaded, 990 &osd->rx_lpi_count, &nsd->rx_lpi_count); 991 } 992 993 pf->stat_offsets_loaded = true; 994 } 995 996 /** 997 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter 998 * @vsi: the VSI to be searched 999 * @macaddr: the MAC address 1000 * @vlan: the vlan 1001 * @is_vf: make sure its a vf filter, else doesn't matter 1002 * @is_netdev: make sure its a netdev filter, else doesn't matter 1003 * 1004 * Returns ptr to the filter object or NULL 1005 **/ 1006 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, 1007 u8 *macaddr, s16 vlan, 1008 bool is_vf, bool is_netdev) 1009 { 1010 struct i40e_mac_filter *f; 1011 1012 if (!vsi || !macaddr) 1013 return NULL; 1014 1015 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1016 if ((ether_addr_equal(macaddr, f->macaddr)) && 1017 (vlan == f->vlan) && 1018 (!is_vf || f->is_vf) && 1019 (!is_netdev || f->is_netdev)) 1020 return f; 1021 } 1022 return NULL; 1023 } 1024 1025 /** 1026 * i40e_find_mac - Find a mac addr in the macvlan filters list 1027 * @vsi: the VSI to be searched 1028 * @macaddr: the MAC address we are searching for 1029 * @is_vf: make sure its a vf filter, else doesn't matter 1030 * @is_netdev: make sure its a netdev filter, else doesn't matter 1031 * 1032 * Returns the first filter with the provided MAC address or NULL if 1033 * MAC address was not found 1034 **/ 1035 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, 1036 bool is_vf, bool is_netdev) 1037 { 1038 struct i40e_mac_filter *f; 1039 1040 if (!vsi || !macaddr) 1041 return NULL; 1042 1043 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1044 if ((ether_addr_equal(macaddr, f->macaddr)) && 1045 (!is_vf || f->is_vf) && 1046 (!is_netdev || f->is_netdev)) 1047 return f; 1048 } 1049 return NULL; 1050 } 1051 1052 /** 1053 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode 1054 * @vsi: the VSI to be searched 1055 * 1056 * Returns true if VSI is in vlan mode or false otherwise 1057 **/ 1058 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) 1059 { 1060 struct i40e_mac_filter *f; 1061 1062 /* Only -1 for all the filters denotes not in vlan mode 1063 * so we have to go through all the list in order to make sure 1064 */ 1065 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1066 if (f->vlan >= 0) 1067 return true; 1068 } 1069 1070 return false; 1071 } 1072 1073 /** 1074 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans 1075 * @vsi: the VSI to be searched 1076 * @macaddr: the mac address to be filtered 1077 * @is_vf: true if it is a vf 1078 * @is_netdev: true if it is a netdev 1079 * 1080 * Goes through all the macvlan filters and adds a 1081 * macvlan filter for each unique vlan that already exists 1082 * 1083 * Returns first filter found on success, else NULL 1084 **/ 1085 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, 1086 bool is_vf, bool is_netdev) 1087 { 1088 struct i40e_mac_filter *f; 1089 1090 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1091 if (!i40e_find_filter(vsi, macaddr, f->vlan, 1092 is_vf, is_netdev)) { 1093 if (!i40e_add_filter(vsi, macaddr, f->vlan, 1094 is_vf, is_netdev)) 1095 return NULL; 1096 } 1097 } 1098 1099 return list_first_entry_or_null(&vsi->mac_filter_list, 1100 struct i40e_mac_filter, list); 1101 } 1102 1103 /** 1104 * i40e_add_filter - Add a mac/vlan filter to the VSI 1105 * @vsi: the VSI to be searched 1106 * @macaddr: the MAC address 1107 * @vlan: the vlan 1108 * @is_vf: make sure its a vf filter, else doesn't matter 1109 * @is_netdev: make sure its a netdev filter, else doesn't matter 1110 * 1111 * Returns ptr to the filter object or NULL when no memory available. 1112 **/ 1113 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 1114 u8 *macaddr, s16 vlan, 1115 bool is_vf, bool is_netdev) 1116 { 1117 struct i40e_mac_filter *f; 1118 1119 if (!vsi || !macaddr) 1120 return NULL; 1121 1122 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1123 if (!f) { 1124 f = kzalloc(sizeof(*f), GFP_ATOMIC); 1125 if (!f) 1126 goto add_filter_out; 1127 1128 memcpy(f->macaddr, macaddr, ETH_ALEN); 1129 f->vlan = vlan; 1130 f->changed = true; 1131 1132 INIT_LIST_HEAD(&f->list); 1133 list_add(&f->list, &vsi->mac_filter_list); 1134 } 1135 1136 /* increment counter and add a new flag if needed */ 1137 if (is_vf) { 1138 if (!f->is_vf) { 1139 f->is_vf = true; 1140 f->counter++; 1141 } 1142 } else if (is_netdev) { 1143 if (!f->is_netdev) { 1144 f->is_netdev = true; 1145 f->counter++; 1146 } 1147 } else { 1148 f->counter++; 1149 } 1150 1151 /* changed tells sync_filters_subtask to 1152 * push the filter down to the firmware 1153 */ 1154 if (f->changed) { 1155 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1156 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1157 } 1158 1159 add_filter_out: 1160 return f; 1161 } 1162 1163 /** 1164 * i40e_del_filter - Remove a mac/vlan filter from the VSI 1165 * @vsi: the VSI to be searched 1166 * @macaddr: the MAC address 1167 * @vlan: the vlan 1168 * @is_vf: make sure it's a vf filter, else doesn't matter 1169 * @is_netdev: make sure it's a netdev filter, else doesn't matter 1170 **/ 1171 void i40e_del_filter(struct i40e_vsi *vsi, 1172 u8 *macaddr, s16 vlan, 1173 bool is_vf, bool is_netdev) 1174 { 1175 struct i40e_mac_filter *f; 1176 1177 if (!vsi || !macaddr) 1178 return; 1179 1180 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); 1181 if (!f || f->counter == 0) 1182 return; 1183 1184 if (is_vf) { 1185 if (f->is_vf) { 1186 f->is_vf = false; 1187 f->counter--; 1188 } 1189 } else if (is_netdev) { 1190 if (f->is_netdev) { 1191 f->is_netdev = false; 1192 f->counter--; 1193 } 1194 } else { 1195 /* make sure we don't remove a filter in use by vf or netdev */ 1196 int min_f = 0; 1197 min_f += (f->is_vf ? 1 : 0); 1198 min_f += (f->is_netdev ? 1 : 0); 1199 1200 if (f->counter > min_f) 1201 f->counter--; 1202 } 1203 1204 /* counter == 0 tells sync_filters_subtask to 1205 * remove the filter from the firmware's list 1206 */ 1207 if (f->counter == 0) { 1208 f->changed = true; 1209 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1210 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1211 } 1212 } 1213 1214 /** 1215 * i40e_set_mac - NDO callback to set mac address 1216 * @netdev: network interface device structure 1217 * @p: pointer to an address structure 1218 * 1219 * Returns 0 on success, negative on failure 1220 **/ 1221 static int i40e_set_mac(struct net_device *netdev, void *p) 1222 { 1223 struct i40e_netdev_priv *np = netdev_priv(netdev); 1224 struct i40e_vsi *vsi = np->vsi; 1225 struct sockaddr *addr = p; 1226 struct i40e_mac_filter *f; 1227 1228 if (!is_valid_ether_addr(addr->sa_data)) 1229 return -EADDRNOTAVAIL; 1230 1231 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data); 1232 1233 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 1234 return 0; 1235 1236 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1237 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1238 return -EADDRNOTAVAIL; 1239 1240 if (vsi->type == I40E_VSI_MAIN) { 1241 i40e_status ret; 1242 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1243 I40E_AQC_WRITE_TYPE_LAA_ONLY, 1244 addr->sa_data, NULL); 1245 if (ret) { 1246 netdev_info(netdev, 1247 "Addr change for Main VSI failed: %d\n", 1248 ret); 1249 return -EADDRNOTAVAIL; 1250 } 1251 1252 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len); 1253 } 1254 1255 /* In order to be sure to not drop any packets, add the new address 1256 * then delete the old one. 1257 */ 1258 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false); 1259 if (!f) 1260 return -ENOMEM; 1261 1262 i40e_sync_vsi_filters(vsi); 1263 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); 1264 i40e_sync_vsi_filters(vsi); 1265 1266 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1267 1268 return 0; 1269 } 1270 1271 /** 1272 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc 1273 * @vsi: the VSI being setup 1274 * @ctxt: VSI context structure 1275 * @enabled_tc: Enabled TCs bitmap 1276 * @is_add: True if called before Add VSI 1277 * 1278 * Setup VSI queue mapping for enabled traffic classes. 1279 **/ 1280 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, 1281 struct i40e_vsi_context *ctxt, 1282 u8 enabled_tc, 1283 bool is_add) 1284 { 1285 struct i40e_pf *pf = vsi->back; 1286 u16 sections = 0; 1287 u8 netdev_tc = 0; 1288 u16 numtc = 0; 1289 u16 qcount; 1290 u8 offset; 1291 u16 qmap; 1292 int i; 1293 u16 num_tc_qps = 0; 1294 1295 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1296 offset = 0; 1297 1298 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { 1299 /* Find numtc from enabled TC bitmap */ 1300 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1301 if (enabled_tc & (1 << i)) /* TC is enabled */ 1302 numtc++; 1303 } 1304 if (!numtc) { 1305 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); 1306 numtc = 1; 1307 } 1308 } else { 1309 /* At least TC0 is enabled in case of non-DCB case */ 1310 numtc = 1; 1311 } 1312 1313 vsi->tc_config.numtc = numtc; 1314 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1315 /* Number of queues per enabled TC */ 1316 num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc); 1317 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1318 1319 /* Setup queue offset/count for all TCs for given VSI */ 1320 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1321 /* See if the given TC is enabled for the given VSI */ 1322 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ 1323 int pow, num_qps; 1324 1325 switch (vsi->type) { 1326 case I40E_VSI_MAIN: 1327 qcount = min_t(int, pf->rss_size, num_tc_qps); 1328 break; 1329 case I40E_VSI_FDIR: 1330 case I40E_VSI_SRIOV: 1331 case I40E_VSI_VMDQ2: 1332 default: 1333 qcount = num_tc_qps; 1334 WARN_ON(i != 0); 1335 break; 1336 } 1337 vsi->tc_config.tc_info[i].qoffset = offset; 1338 vsi->tc_config.tc_info[i].qcount = qcount; 1339 1340 /* find the power-of-2 of the number of queue pairs */ 1341 num_qps = qcount; 1342 pow = 0; 1343 while (num_qps && ((1 << pow) < qcount)) { 1344 pow++; 1345 num_qps >>= 1; 1346 } 1347 1348 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; 1349 qmap = 1350 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | 1351 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); 1352 1353 offset += qcount; 1354 } else { 1355 /* TC is not enabled so set the offset to 1356 * default queue and allocate one queue 1357 * for the given TC. 1358 */ 1359 vsi->tc_config.tc_info[i].qoffset = 0; 1360 vsi->tc_config.tc_info[i].qcount = 1; 1361 vsi->tc_config.tc_info[i].netdev_tc = 0; 1362 1363 qmap = 0; 1364 } 1365 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); 1366 } 1367 1368 /* Set actual Tx/Rx queue pairs */ 1369 vsi->num_queue_pairs = offset; 1370 1371 /* Scheduler section valid can only be set for ADD VSI */ 1372 if (is_add) { 1373 sections |= I40E_AQ_VSI_PROP_SCHED_VALID; 1374 1375 ctxt->info.up_enable_bits = enabled_tc; 1376 } 1377 if (vsi->type == I40E_VSI_SRIOV) { 1378 ctxt->info.mapping_flags |= 1379 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); 1380 for (i = 0; i < vsi->num_queue_pairs; i++) 1381 ctxt->info.queue_mapping[i] = 1382 cpu_to_le16(vsi->base_queue + i); 1383 } else { 1384 ctxt->info.mapping_flags |= 1385 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); 1386 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); 1387 } 1388 ctxt->info.valid_sections |= cpu_to_le16(sections); 1389 } 1390 1391 /** 1392 * i40e_set_rx_mode - NDO callback to set the netdev filters 1393 * @netdev: network interface device structure 1394 **/ 1395 static void i40e_set_rx_mode(struct net_device *netdev) 1396 { 1397 struct i40e_netdev_priv *np = netdev_priv(netdev); 1398 struct i40e_mac_filter *f, *ftmp; 1399 struct i40e_vsi *vsi = np->vsi; 1400 struct netdev_hw_addr *uca; 1401 struct netdev_hw_addr *mca; 1402 struct netdev_hw_addr *ha; 1403 1404 /* add addr if not already in the filter list */ 1405 netdev_for_each_uc_addr(uca, netdev) { 1406 if (!i40e_find_mac(vsi, uca->addr, false, true)) { 1407 if (i40e_is_vsi_in_vlan(vsi)) 1408 i40e_put_mac_in_vlan(vsi, uca->addr, 1409 false, true); 1410 else 1411 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, 1412 false, true); 1413 } 1414 } 1415 1416 netdev_for_each_mc_addr(mca, netdev) { 1417 if (!i40e_find_mac(vsi, mca->addr, false, true)) { 1418 if (i40e_is_vsi_in_vlan(vsi)) 1419 i40e_put_mac_in_vlan(vsi, mca->addr, 1420 false, true); 1421 else 1422 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, 1423 false, true); 1424 } 1425 } 1426 1427 /* remove filter if not in netdev list */ 1428 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1429 bool found = false; 1430 1431 if (!f->is_netdev) 1432 continue; 1433 1434 if (is_multicast_ether_addr(f->macaddr)) { 1435 netdev_for_each_mc_addr(mca, netdev) { 1436 if (ether_addr_equal(mca->addr, f->macaddr)) { 1437 found = true; 1438 break; 1439 } 1440 } 1441 } else { 1442 netdev_for_each_uc_addr(uca, netdev) { 1443 if (ether_addr_equal(uca->addr, f->macaddr)) { 1444 found = true; 1445 break; 1446 } 1447 } 1448 1449 for_each_dev_addr(netdev, ha) { 1450 if (ether_addr_equal(ha->addr, f->macaddr)) { 1451 found = true; 1452 break; 1453 } 1454 } 1455 } 1456 if (!found) 1457 i40e_del_filter( 1458 vsi, f->macaddr, I40E_VLAN_ANY, false, true); 1459 } 1460 1461 /* check for other flag changes */ 1462 if (vsi->current_netdev_flags != vsi->netdev->flags) { 1463 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 1464 vsi->back->flags |= I40E_FLAG_FILTER_SYNC; 1465 } 1466 } 1467 1468 /** 1469 * i40e_sync_vsi_filters - Update the VSI filter list to the HW 1470 * @vsi: ptr to the VSI 1471 * 1472 * Push any outstanding VSI filter changes through the AdminQ. 1473 * 1474 * Returns 0 or error value 1475 **/ 1476 int i40e_sync_vsi_filters(struct i40e_vsi *vsi) 1477 { 1478 struct i40e_mac_filter *f, *ftmp; 1479 bool promisc_forced_on = false; 1480 bool add_happened = false; 1481 int filter_list_len = 0; 1482 u32 changed_flags = 0; 1483 i40e_status aq_ret = 0; 1484 struct i40e_pf *pf; 1485 int num_add = 0; 1486 int num_del = 0; 1487 u16 cmd_flags; 1488 1489 /* empty array typed pointers, kcalloc later */ 1490 struct i40e_aqc_add_macvlan_element_data *add_list; 1491 struct i40e_aqc_remove_macvlan_element_data *del_list; 1492 1493 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) 1494 usleep_range(1000, 2000); 1495 pf = vsi->back; 1496 1497 if (vsi->netdev) { 1498 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 1499 vsi->current_netdev_flags = vsi->netdev->flags; 1500 } 1501 1502 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1503 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1504 1505 filter_list_len = pf->hw.aq.asq_buf_size / 1506 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1507 del_list = kcalloc(filter_list_len, 1508 sizeof(struct i40e_aqc_remove_macvlan_element_data), 1509 GFP_KERNEL); 1510 if (!del_list) 1511 return -ENOMEM; 1512 1513 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1514 if (!f->changed) 1515 continue; 1516 1517 if (f->counter != 0) 1518 continue; 1519 f->changed = false; 1520 cmd_flags = 0; 1521 1522 /* add to delete list */ 1523 memcpy(del_list[num_del].mac_addr, 1524 f->macaddr, ETH_ALEN); 1525 del_list[num_del].vlan_tag = 1526 cpu_to_le16((u16)(f->vlan == 1527 I40E_VLAN_ANY ? 0 : f->vlan)); 1528 1529 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1530 del_list[num_del].flags = cmd_flags; 1531 num_del++; 1532 1533 /* unlink from filter list */ 1534 list_del(&f->list); 1535 kfree(f); 1536 1537 /* flush a full buffer */ 1538 if (num_del == filter_list_len) { 1539 aq_ret = i40e_aq_remove_macvlan(&pf->hw, 1540 vsi->seid, del_list, num_del, 1541 NULL); 1542 num_del = 0; 1543 memset(del_list, 0, sizeof(*del_list)); 1544 1545 if (aq_ret) 1546 dev_info(&pf->pdev->dev, 1547 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1548 aq_ret, 1549 pf->hw.aq.asq_last_status); 1550 } 1551 } 1552 if (num_del) { 1553 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1554 del_list, num_del, NULL); 1555 num_del = 0; 1556 1557 if (aq_ret) 1558 dev_info(&pf->pdev->dev, 1559 "ignoring delete macvlan error, err %d, aq_err %d\n", 1560 aq_ret, pf->hw.aq.asq_last_status); 1561 } 1562 1563 kfree(del_list); 1564 del_list = NULL; 1565 1566 /* do all the adds now */ 1567 filter_list_len = pf->hw.aq.asq_buf_size / 1568 sizeof(struct i40e_aqc_add_macvlan_element_data), 1569 add_list = kcalloc(filter_list_len, 1570 sizeof(struct i40e_aqc_add_macvlan_element_data), 1571 GFP_KERNEL); 1572 if (!add_list) 1573 return -ENOMEM; 1574 1575 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 1576 if (!f->changed) 1577 continue; 1578 1579 if (f->counter == 0) 1580 continue; 1581 f->changed = false; 1582 add_happened = true; 1583 cmd_flags = 0; 1584 1585 /* add to add array */ 1586 memcpy(add_list[num_add].mac_addr, 1587 f->macaddr, ETH_ALEN); 1588 add_list[num_add].vlan_tag = 1589 cpu_to_le16( 1590 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); 1591 add_list[num_add].queue_number = 0; 1592 1593 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1594 add_list[num_add].flags = cpu_to_le16(cmd_flags); 1595 num_add++; 1596 1597 /* flush a full buffer */ 1598 if (num_add == filter_list_len) { 1599 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1600 add_list, num_add, 1601 NULL); 1602 num_add = 0; 1603 1604 if (aq_ret) 1605 break; 1606 memset(add_list, 0, sizeof(*add_list)); 1607 } 1608 } 1609 if (num_add) { 1610 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1611 add_list, num_add, NULL); 1612 num_add = 0; 1613 } 1614 kfree(add_list); 1615 add_list = NULL; 1616 1617 if (add_happened && (!aq_ret)) { 1618 /* do nothing */; 1619 } else if (add_happened && (aq_ret)) { 1620 dev_info(&pf->pdev->dev, 1621 "add filter failed, err %d, aq_err %d\n", 1622 aq_ret, pf->hw.aq.asq_last_status); 1623 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 1624 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1625 &vsi->state)) { 1626 promisc_forced_on = true; 1627 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1628 &vsi->state); 1629 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 1630 } 1631 } 1632 } 1633 1634 /* check for changes in promiscuous modes */ 1635 if (changed_flags & IFF_ALLMULTI) { 1636 bool cur_multipromisc; 1637 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 1638 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 1639 vsi->seid, 1640 cur_multipromisc, 1641 NULL); 1642 if (aq_ret) 1643 dev_info(&pf->pdev->dev, 1644 "set multi promisc failed, err %d, aq_err %d\n", 1645 aq_ret, pf->hw.aq.asq_last_status); 1646 } 1647 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 1648 bool cur_promisc; 1649 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 1650 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1651 &vsi->state)); 1652 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, 1653 vsi->seid, 1654 cur_promisc, NULL); 1655 if (aq_ret) 1656 dev_info(&pf->pdev->dev, 1657 "set uni promisc failed, err %d, aq_err %d\n", 1658 aq_ret, pf->hw.aq.asq_last_status); 1659 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 1660 vsi->seid, 1661 cur_promisc, NULL); 1662 if (aq_ret) 1663 dev_info(&pf->pdev->dev, 1664 "set brdcast promisc failed, err %d, aq_err %d\n", 1665 aq_ret, pf->hw.aq.asq_last_status); 1666 } 1667 1668 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 1669 return 0; 1670 } 1671 1672 /** 1673 * i40e_sync_filters_subtask - Sync the VSI filter list with HW 1674 * @pf: board private structure 1675 **/ 1676 static void i40e_sync_filters_subtask(struct i40e_pf *pf) 1677 { 1678 int v; 1679 1680 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) 1681 return; 1682 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 1683 1684 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 1685 if (pf->vsi[v] && 1686 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) 1687 i40e_sync_vsi_filters(pf->vsi[v]); 1688 } 1689 } 1690 1691 /** 1692 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit 1693 * @netdev: network interface device structure 1694 * @new_mtu: new value for maximum frame size 1695 * 1696 * Returns 0 on success, negative on failure 1697 **/ 1698 static int i40e_change_mtu(struct net_device *netdev, int new_mtu) 1699 { 1700 struct i40e_netdev_priv *np = netdev_priv(netdev); 1701 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 1702 struct i40e_vsi *vsi = np->vsi; 1703 1704 /* MTU < 68 is an error and causes problems on some kernels */ 1705 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) 1706 return -EINVAL; 1707 1708 netdev_info(netdev, "changing MTU from %d to %d\n", 1709 netdev->mtu, new_mtu); 1710 netdev->mtu = new_mtu; 1711 if (netif_running(netdev)) 1712 i40e_vsi_reinit_locked(vsi); 1713 1714 return 0; 1715 } 1716 1717 /** 1718 * i40e_ioctl - Access the hwtstamp interface 1719 * @netdev: network interface device structure 1720 * @ifr: interface request data 1721 * @cmd: ioctl command 1722 **/ 1723 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1724 { 1725 struct i40e_netdev_priv *np = netdev_priv(netdev); 1726 struct i40e_pf *pf = np->vsi->back; 1727 1728 switch (cmd) { 1729 case SIOCGHWTSTAMP: 1730 return i40e_ptp_get_ts_config(pf, ifr); 1731 case SIOCSHWTSTAMP: 1732 return i40e_ptp_set_ts_config(pf, ifr); 1733 default: 1734 return -EOPNOTSUPP; 1735 } 1736 } 1737 1738 /** 1739 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI 1740 * @vsi: the vsi being adjusted 1741 **/ 1742 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) 1743 { 1744 struct i40e_vsi_context ctxt; 1745 i40e_status ret; 1746 1747 if ((vsi->info.valid_sections & 1748 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 1749 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) 1750 return; /* already enabled */ 1751 1752 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1753 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1754 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 1755 1756 ctxt.seid = vsi->seid; 1757 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1758 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1759 if (ret) { 1760 dev_info(&vsi->back->pdev->dev, 1761 "%s: update vsi failed, aq_err=%d\n", 1762 __func__, vsi->back->hw.aq.asq_last_status); 1763 } 1764 } 1765 1766 /** 1767 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI 1768 * @vsi: the vsi being adjusted 1769 **/ 1770 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) 1771 { 1772 struct i40e_vsi_context ctxt; 1773 i40e_status ret; 1774 1775 if ((vsi->info.valid_sections & 1776 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && 1777 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == 1778 I40E_AQ_VSI_PVLAN_EMOD_MASK)) 1779 return; /* already disabled */ 1780 1781 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1782 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1783 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 1784 1785 ctxt.seid = vsi->seid; 1786 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1787 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1788 if (ret) { 1789 dev_info(&vsi->back->pdev->dev, 1790 "%s: update vsi failed, aq_err=%d\n", 1791 __func__, vsi->back->hw.aq.asq_last_status); 1792 } 1793 } 1794 1795 /** 1796 * i40e_vlan_rx_register - Setup or shutdown vlan offload 1797 * @netdev: network interface to be adjusted 1798 * @features: netdev features to test if VLAN offload is enabled or not 1799 **/ 1800 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) 1801 { 1802 struct i40e_netdev_priv *np = netdev_priv(netdev); 1803 struct i40e_vsi *vsi = np->vsi; 1804 1805 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1806 i40e_vlan_stripping_enable(vsi); 1807 else 1808 i40e_vlan_stripping_disable(vsi); 1809 } 1810 1811 /** 1812 * i40e_vsi_add_vlan - Add vsi membership for given vlan 1813 * @vsi: the vsi being configured 1814 * @vid: vlan id to be added (0 = untagged only , -1 = any) 1815 **/ 1816 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) 1817 { 1818 struct i40e_mac_filter *f, *add_f; 1819 bool is_netdev, is_vf; 1820 1821 is_vf = (vsi->type == I40E_VSI_SRIOV); 1822 is_netdev = !!(vsi->netdev); 1823 1824 if (is_netdev) { 1825 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, 1826 is_vf, is_netdev); 1827 if (!add_f) { 1828 dev_info(&vsi->back->pdev->dev, 1829 "Could not add vlan filter %d for %pM\n", 1830 vid, vsi->netdev->dev_addr); 1831 return -ENOMEM; 1832 } 1833 } 1834 1835 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1836 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 1837 if (!add_f) { 1838 dev_info(&vsi->back->pdev->dev, 1839 "Could not add vlan filter %d for %pM\n", 1840 vid, f->macaddr); 1841 return -ENOMEM; 1842 } 1843 } 1844 1845 /* Now if we add a vlan tag, make sure to check if it is the first 1846 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" 1847 * with 0, so we now accept untagged and specified tagged traffic 1848 * (and not any taged and untagged) 1849 */ 1850 if (vid > 0) { 1851 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, 1852 I40E_VLAN_ANY, 1853 is_vf, is_netdev)) { 1854 i40e_del_filter(vsi, vsi->netdev->dev_addr, 1855 I40E_VLAN_ANY, is_vf, is_netdev); 1856 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, 1857 is_vf, is_netdev); 1858 if (!add_f) { 1859 dev_info(&vsi->back->pdev->dev, 1860 "Could not add filter 0 for %pM\n", 1861 vsi->netdev->dev_addr); 1862 return -ENOMEM; 1863 } 1864 } 1865 } 1866 1867 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ 1868 if (vid > 0 && !vsi->info.pvid) { 1869 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1870 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, 1871 is_vf, is_netdev)) { 1872 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, 1873 is_vf, is_netdev); 1874 add_f = i40e_add_filter(vsi, f->macaddr, 1875 0, is_vf, is_netdev); 1876 if (!add_f) { 1877 dev_info(&vsi->back->pdev->dev, 1878 "Could not add filter 0 for %pM\n", 1879 f->macaddr); 1880 return -ENOMEM; 1881 } 1882 } 1883 } 1884 } 1885 1886 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1887 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1888 return 0; 1889 1890 return i40e_sync_vsi_filters(vsi); 1891 } 1892 1893 /** 1894 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 1895 * @vsi: the vsi being configured 1896 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 1897 * 1898 * Return: 0 on success or negative otherwise 1899 **/ 1900 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 1901 { 1902 struct net_device *netdev = vsi->netdev; 1903 struct i40e_mac_filter *f, *add_f; 1904 bool is_vf, is_netdev; 1905 int filter_count = 0; 1906 1907 is_vf = (vsi->type == I40E_VSI_SRIOV); 1908 is_netdev = !!(netdev); 1909 1910 if (is_netdev) 1911 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); 1912 1913 list_for_each_entry(f, &vsi->mac_filter_list, list) 1914 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); 1915 1916 /* go through all the filters for this VSI and if there is only 1917 * vid == 0 it means there are no other filters, so vid 0 must 1918 * be replaced with -1. This signifies that we should from now 1919 * on accept any traffic (with any tag present, or untagged) 1920 */ 1921 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1922 if (is_netdev) { 1923 if (f->vlan && 1924 ether_addr_equal(netdev->dev_addr, f->macaddr)) 1925 filter_count++; 1926 } 1927 1928 if (f->vlan) 1929 filter_count++; 1930 } 1931 1932 if (!filter_count && is_netdev) { 1933 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); 1934 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 1935 is_vf, is_netdev); 1936 if (!f) { 1937 dev_info(&vsi->back->pdev->dev, 1938 "Could not add filter %d for %pM\n", 1939 I40E_VLAN_ANY, netdev->dev_addr); 1940 return -ENOMEM; 1941 } 1942 } 1943 1944 if (!filter_count) { 1945 list_for_each_entry(f, &vsi->mac_filter_list, list) { 1946 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); 1947 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, 1948 is_vf, is_netdev); 1949 if (!add_f) { 1950 dev_info(&vsi->back->pdev->dev, 1951 "Could not add filter %d for %pM\n", 1952 I40E_VLAN_ANY, f->macaddr); 1953 return -ENOMEM; 1954 } 1955 } 1956 } 1957 1958 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1959 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1960 return 0; 1961 1962 return i40e_sync_vsi_filters(vsi); 1963 } 1964 1965 /** 1966 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 1967 * @netdev: network interface to be adjusted 1968 * @vid: vlan id to be added 1969 * 1970 * net_device_ops implementation for adding vlan ids 1971 **/ 1972 static int i40e_vlan_rx_add_vid(struct net_device *netdev, 1973 __always_unused __be16 proto, u16 vid) 1974 { 1975 struct i40e_netdev_priv *np = netdev_priv(netdev); 1976 struct i40e_vsi *vsi = np->vsi; 1977 int ret = 0; 1978 1979 if (vid > 4095) 1980 return -EINVAL; 1981 1982 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 1983 1984 /* If the network stack called us with vid = 0 then 1985 * it is asking to receive priority tagged packets with 1986 * vlan id 0. Our HW receives them by default when configured 1987 * to receive untagged packets so there is no need to add an 1988 * extra filter for vlan 0 tagged packets. 1989 */ 1990 if (vid) 1991 ret = i40e_vsi_add_vlan(vsi, vid); 1992 1993 if (!ret && (vid < VLAN_N_VID)) 1994 set_bit(vid, vsi->active_vlans); 1995 1996 return ret; 1997 } 1998 1999 /** 2000 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 2001 * @netdev: network interface to be adjusted 2002 * @vid: vlan id to be removed 2003 * 2004 * net_device_ops implementation for removing vlan ids 2005 **/ 2006 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2007 __always_unused __be16 proto, u16 vid) 2008 { 2009 struct i40e_netdev_priv *np = netdev_priv(netdev); 2010 struct i40e_vsi *vsi = np->vsi; 2011 2012 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); 2013 2014 /* return code is ignored as there is nothing a user 2015 * can do about failure to remove and a log message was 2016 * already printed from the other function 2017 */ 2018 i40e_vsi_kill_vlan(vsi, vid); 2019 2020 clear_bit(vid, vsi->active_vlans); 2021 2022 return 0; 2023 } 2024 2025 /** 2026 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up 2027 * @vsi: the vsi being brought back up 2028 **/ 2029 static void i40e_restore_vlan(struct i40e_vsi *vsi) 2030 { 2031 u16 vid; 2032 2033 if (!vsi->netdev) 2034 return; 2035 2036 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); 2037 2038 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) 2039 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), 2040 vid); 2041 } 2042 2043 /** 2044 * i40e_vsi_add_pvid - Add pvid for the VSI 2045 * @vsi: the vsi being adjusted 2046 * @vid: the vlan id to set as a PVID 2047 **/ 2048 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 2049 { 2050 struct i40e_vsi_context ctxt; 2051 i40e_status aq_ret; 2052 2053 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 2054 vsi->info.pvid = cpu_to_le16(vid); 2055 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | 2056 I40E_AQ_VSI_PVLAN_INSERT_PVID | 2057 I40E_AQ_VSI_PVLAN_EMOD_STR; 2058 2059 ctxt.seid = vsi->seid; 2060 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 2061 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2062 if (aq_ret) { 2063 dev_info(&vsi->back->pdev->dev, 2064 "%s: update vsi failed, aq_err=%d\n", 2065 __func__, vsi->back->hw.aq.asq_last_status); 2066 return -ENOENT; 2067 } 2068 2069 return 0; 2070 } 2071 2072 /** 2073 * i40e_vsi_remove_pvid - Remove the pvid from the VSI 2074 * @vsi: the vsi being adjusted 2075 * 2076 * Just use the vlan_rx_register() service to put it back to normal 2077 **/ 2078 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) 2079 { 2080 i40e_vlan_stripping_disable(vsi); 2081 2082 vsi->info.pvid = 0; 2083 } 2084 2085 /** 2086 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources 2087 * @vsi: ptr to the VSI 2088 * 2089 * If this function returns with an error, then it's possible one or 2090 * more of the rings is populated (while the rest are not). It is the 2091 * callers duty to clean those orphaned rings. 2092 * 2093 * Return 0 on success, negative on failure 2094 **/ 2095 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) 2096 { 2097 int i, err = 0; 2098 2099 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2100 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); 2101 2102 return err; 2103 } 2104 2105 /** 2106 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues 2107 * @vsi: ptr to the VSI 2108 * 2109 * Free VSI's transmit software resources 2110 **/ 2111 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) 2112 { 2113 int i; 2114 2115 if (!vsi->tx_rings) 2116 return; 2117 2118 for (i = 0; i < vsi->num_queue_pairs; i++) 2119 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 2120 i40e_free_tx_resources(vsi->tx_rings[i]); 2121 } 2122 2123 /** 2124 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources 2125 * @vsi: ptr to the VSI 2126 * 2127 * If this function returns with an error, then it's possible one or 2128 * more of the rings is populated (while the rest are not). It is the 2129 * callers duty to clean those orphaned rings. 2130 * 2131 * Return 0 on success, negative on failure 2132 **/ 2133 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) 2134 { 2135 int i, err = 0; 2136 2137 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2138 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); 2139 return err; 2140 } 2141 2142 /** 2143 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues 2144 * @vsi: ptr to the VSI 2145 * 2146 * Free all receive software resources 2147 **/ 2148 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) 2149 { 2150 int i; 2151 2152 if (!vsi->rx_rings) 2153 return; 2154 2155 for (i = 0; i < vsi->num_queue_pairs; i++) 2156 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) 2157 i40e_free_rx_resources(vsi->rx_rings[i]); 2158 } 2159 2160 /** 2161 * i40e_configure_tx_ring - Configure a transmit ring context and rest 2162 * @ring: The Tx ring to configure 2163 * 2164 * Configure the Tx descriptor ring in the HMC context. 2165 **/ 2166 static int i40e_configure_tx_ring(struct i40e_ring *ring) 2167 { 2168 struct i40e_vsi *vsi = ring->vsi; 2169 u16 pf_q = vsi->base_queue + ring->queue_index; 2170 struct i40e_hw *hw = &vsi->back->hw; 2171 struct i40e_hmc_obj_txq tx_ctx; 2172 i40e_status err = 0; 2173 u32 qtx_ctl = 0; 2174 2175 /* some ATR related tx ring init */ 2176 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { 2177 ring->atr_sample_rate = vsi->back->atr_sample_rate; 2178 ring->atr_count = 0; 2179 } else { 2180 ring->atr_sample_rate = 0; 2181 } 2182 2183 /* initialize XPS */ 2184 if (ring->q_vector && ring->netdev && 2185 vsi->tc_config.numtc <= 1 && 2186 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) 2187 netif_set_xps_queue(ring->netdev, 2188 &ring->q_vector->affinity_mask, 2189 ring->queue_index); 2190 2191 /* clear the context structure first */ 2192 memset(&tx_ctx, 0, sizeof(tx_ctx)); 2193 2194 tx_ctx.new_context = 1; 2195 tx_ctx.base = (ring->dma / 128); 2196 tx_ctx.qlen = ring->count; 2197 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2198 I40E_FLAG_FD_ATR_ENABLED)); 2199 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2200 /* FDIR VSI tx ring can still use RS bit and writebacks */ 2201 if (vsi->type != I40E_VSI_FDIR) 2202 tx_ctx.head_wb_ena = 1; 2203 tx_ctx.head_wb_addr = ring->dma + 2204 (ring->count * sizeof(struct i40e_tx_desc)); 2205 2206 /* As part of VSI creation/update, FW allocates certain 2207 * Tx arbitration queue sets for each TC enabled for 2208 * the VSI. The FW returns the handles to these queue 2209 * sets as part of the response buffer to Add VSI, 2210 * Update VSI, etc. AQ commands. It is expected that 2211 * these queue set handles be associated with the Tx 2212 * queues by the driver as part of the TX queue context 2213 * initialization. This has to be done regardless of 2214 * DCB as by default everything is mapped to TC0. 2215 */ 2216 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); 2217 tx_ctx.rdylist_act = 0; 2218 2219 /* clear the context in the HMC */ 2220 err = i40e_clear_lan_tx_queue_context(hw, pf_q); 2221 if (err) { 2222 dev_info(&vsi->back->pdev->dev, 2223 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", 2224 ring->queue_index, pf_q, err); 2225 return -ENOMEM; 2226 } 2227 2228 /* set the context in the HMC */ 2229 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); 2230 if (err) { 2231 dev_info(&vsi->back->pdev->dev, 2232 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", 2233 ring->queue_index, pf_q, err); 2234 return -ENOMEM; 2235 } 2236 2237 /* Now associate this queue with this PCI function */ 2238 if (vsi->type == I40E_VSI_VMDQ2) 2239 qtx_ctl = I40E_QTX_CTL_VM_QUEUE; 2240 else 2241 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2242 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2243 I40E_QTX_CTL_PF_INDX_MASK); 2244 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2245 i40e_flush(hw); 2246 2247 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); 2248 2249 /* cache tail off for easier writes later */ 2250 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); 2251 2252 return 0; 2253 } 2254 2255 /** 2256 * i40e_configure_rx_ring - Configure a receive ring context 2257 * @ring: The Rx ring to configure 2258 * 2259 * Configure the Rx descriptor ring in the HMC context. 2260 **/ 2261 static int i40e_configure_rx_ring(struct i40e_ring *ring) 2262 { 2263 struct i40e_vsi *vsi = ring->vsi; 2264 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; 2265 u16 pf_q = vsi->base_queue + ring->queue_index; 2266 struct i40e_hw *hw = &vsi->back->hw; 2267 struct i40e_hmc_obj_rxq rx_ctx; 2268 i40e_status err = 0; 2269 2270 ring->state = 0; 2271 2272 /* clear the context structure first */ 2273 memset(&rx_ctx, 0, sizeof(rx_ctx)); 2274 2275 ring->rx_buf_len = vsi->rx_buf_len; 2276 ring->rx_hdr_len = vsi->rx_hdr_len; 2277 2278 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; 2279 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; 2280 2281 rx_ctx.base = (ring->dma / 128); 2282 rx_ctx.qlen = ring->count; 2283 2284 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { 2285 set_ring_16byte_desc_enabled(ring); 2286 rx_ctx.dsize = 0; 2287 } else { 2288 rx_ctx.dsize = 1; 2289 } 2290 2291 rx_ctx.dtype = vsi->dtype; 2292 if (vsi->dtype) { 2293 set_ring_ps_enabled(ring); 2294 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | 2295 I40E_RX_SPLIT_IP | 2296 I40E_RX_SPLIT_TCP_UDP | 2297 I40E_RX_SPLIT_SCTP; 2298 } else { 2299 rx_ctx.hsplit_0 = 0; 2300 } 2301 2302 rx_ctx.rxmax = min_t(u16, vsi->max_frame, 2303 (chain_len * ring->rx_buf_len)); 2304 rx_ctx.tphrdesc_ena = 1; 2305 rx_ctx.tphwdesc_ena = 1; 2306 rx_ctx.tphdata_ena = 1; 2307 rx_ctx.tphhead_ena = 1; 2308 if (hw->revision_id == 0) 2309 rx_ctx.lrxqthresh = 0; 2310 else 2311 rx_ctx.lrxqthresh = 2; 2312 rx_ctx.crcstrip = 1; 2313 rx_ctx.l2tsel = 1; 2314 rx_ctx.showiv = 1; 2315 2316 /* clear the context in the HMC */ 2317 err = i40e_clear_lan_rx_queue_context(hw, pf_q); 2318 if (err) { 2319 dev_info(&vsi->back->pdev->dev, 2320 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2321 ring->queue_index, pf_q, err); 2322 return -ENOMEM; 2323 } 2324 2325 /* set the context in the HMC */ 2326 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); 2327 if (err) { 2328 dev_info(&vsi->back->pdev->dev, 2329 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", 2330 ring->queue_index, pf_q, err); 2331 return -ENOMEM; 2332 } 2333 2334 /* cache tail for quicker writes, and clear the reg before use */ 2335 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 2336 writel(0, ring->tail); 2337 2338 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 2339 2340 return 0; 2341 } 2342 2343 /** 2344 * i40e_vsi_configure_tx - Configure the VSI for Tx 2345 * @vsi: VSI structure describing this set of rings and resources 2346 * 2347 * Configure the Tx VSI for operation. 2348 **/ 2349 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) 2350 { 2351 int err = 0; 2352 u16 i; 2353 2354 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) 2355 err = i40e_configure_tx_ring(vsi->tx_rings[i]); 2356 2357 return err; 2358 } 2359 2360 /** 2361 * i40e_vsi_configure_rx - Configure the VSI for Rx 2362 * @vsi: the VSI being configured 2363 * 2364 * Configure the Rx VSI for operation. 2365 **/ 2366 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) 2367 { 2368 int err = 0; 2369 u16 i; 2370 2371 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) 2372 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN 2373 + ETH_FCS_LEN + VLAN_HLEN; 2374 else 2375 vsi->max_frame = I40E_RXBUFFER_2048; 2376 2377 /* figure out correct receive buffer length */ 2378 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | 2379 I40E_FLAG_RX_PS_ENABLED)) { 2380 case I40E_FLAG_RX_1BUF_ENABLED: 2381 vsi->rx_hdr_len = 0; 2382 vsi->rx_buf_len = vsi->max_frame; 2383 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; 2384 break; 2385 case I40E_FLAG_RX_PS_ENABLED: 2386 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2387 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2388 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; 2389 break; 2390 default: 2391 vsi->rx_hdr_len = I40E_RX_HDR_SIZE; 2392 vsi->rx_buf_len = I40E_RXBUFFER_2048; 2393 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; 2394 break; 2395 } 2396 2397 /* round up for the chip's needs */ 2398 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, 2399 (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); 2400 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, 2401 (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); 2402 2403 /* set up individual rings */ 2404 for (i = 0; i < vsi->num_queue_pairs && !err; i++) 2405 err = i40e_configure_rx_ring(vsi->rx_rings[i]); 2406 2407 return err; 2408 } 2409 2410 /** 2411 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC 2412 * @vsi: ptr to the VSI 2413 **/ 2414 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) 2415 { 2416 u16 qoffset, qcount; 2417 int i, n; 2418 2419 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2420 return; 2421 2422 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2423 if (!(vsi->tc_config.enabled_tc & (1 << n))) 2424 continue; 2425 2426 qoffset = vsi->tc_config.tc_info[n].qoffset; 2427 qcount = vsi->tc_config.tc_info[n].qcount; 2428 for (i = qoffset; i < (qoffset + qcount); i++) { 2429 struct i40e_ring *rx_ring = vsi->rx_rings[i]; 2430 struct i40e_ring *tx_ring = vsi->tx_rings[i]; 2431 rx_ring->dcb_tc = n; 2432 tx_ring->dcb_tc = n; 2433 } 2434 } 2435 } 2436 2437 /** 2438 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI 2439 * @vsi: ptr to the VSI 2440 **/ 2441 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) 2442 { 2443 if (vsi->netdev) 2444 i40e_set_rx_mode(vsi->netdev); 2445 } 2446 2447 /** 2448 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters 2449 * @vsi: Pointer to the targeted VSI 2450 * 2451 * This function replays the hlist on the hw where all the SB Flow Director 2452 * filters were saved. 2453 **/ 2454 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) 2455 { 2456 struct i40e_fdir_filter *filter; 2457 struct i40e_pf *pf = vsi->back; 2458 struct hlist_node *node; 2459 2460 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 2461 return; 2462 2463 hlist_for_each_entry_safe(filter, node, 2464 &pf->fdir_filter_list, fdir_node) { 2465 i40e_add_del_fdir(vsi, filter, true); 2466 } 2467 } 2468 2469 /** 2470 * i40e_vsi_configure - Set up the VSI for action 2471 * @vsi: the VSI being configured 2472 **/ 2473 static int i40e_vsi_configure(struct i40e_vsi *vsi) 2474 { 2475 int err; 2476 2477 i40e_set_vsi_rx_mode(vsi); 2478 i40e_restore_vlan(vsi); 2479 i40e_vsi_config_dcb_rings(vsi); 2480 err = i40e_vsi_configure_tx(vsi); 2481 if (!err) 2482 err = i40e_vsi_configure_rx(vsi); 2483 2484 return err; 2485 } 2486 2487 /** 2488 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW 2489 * @vsi: the VSI being configured 2490 **/ 2491 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) 2492 { 2493 struct i40e_pf *pf = vsi->back; 2494 struct i40e_q_vector *q_vector; 2495 struct i40e_hw *hw = &pf->hw; 2496 u16 vector; 2497 int i, q; 2498 u32 val; 2499 u32 qp; 2500 2501 /* The interrupt indexing is offset by 1 in the PFINT_ITRn 2502 * and PFINT_LNKLSTn registers, e.g.: 2503 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) 2504 */ 2505 qp = vsi->base_queue; 2506 vector = vsi->base_vector; 2507 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { 2508 q_vector = vsi->q_vectors[i]; 2509 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2510 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2511 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), 2512 q_vector->rx.itr); 2513 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2514 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2515 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), 2516 q_vector->tx.itr); 2517 2518 /* Linked list for the queuepairs assigned to this vector */ 2519 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); 2520 for (q = 0; q < q_vector->num_ringpairs; q++) { 2521 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2522 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2523 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 2524 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| 2525 (I40E_QUEUE_TYPE_TX 2526 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 2527 2528 wr32(hw, I40E_QINT_RQCTL(qp), val); 2529 2530 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2531 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2532 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 2533 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| 2534 (I40E_QUEUE_TYPE_RX 2535 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2536 2537 /* Terminate the linked list */ 2538 if (q == (q_vector->num_ringpairs - 1)) 2539 val |= (I40E_QUEUE_END_OF_LIST 2540 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2541 2542 wr32(hw, I40E_QINT_TQCTL(qp), val); 2543 qp++; 2544 } 2545 } 2546 2547 i40e_flush(hw); 2548 } 2549 2550 /** 2551 * i40e_enable_misc_int_causes - enable the non-queue interrupts 2552 * @hw: ptr to the hardware info 2553 **/ 2554 static void i40e_enable_misc_int_causes(struct i40e_hw *hw) 2555 { 2556 u32 val; 2557 2558 /* clear things first */ 2559 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 2560 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 2561 2562 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 2563 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 2564 I40E_PFINT_ICR0_ENA_GRST_MASK | 2565 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 2566 I40E_PFINT_ICR0_ENA_GPIO_MASK | 2567 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK | 2568 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | 2569 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 2570 I40E_PFINT_ICR0_ENA_VFLR_MASK | 2571 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2572 2573 wr32(hw, I40E_PFINT_ICR0_ENA, val); 2574 2575 /* SW_ITR_IDX = 0, but don't change INTENA */ 2576 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 2577 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 2578 2579 /* OTHER_ITR_IDX = 0 */ 2580 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 2581 } 2582 2583 /** 2584 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW 2585 * @vsi: the VSI being configured 2586 **/ 2587 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) 2588 { 2589 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; 2590 struct i40e_pf *pf = vsi->back; 2591 struct i40e_hw *hw = &pf->hw; 2592 u32 val; 2593 2594 /* set the ITR configuration */ 2595 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); 2596 q_vector->rx.latency_range = I40E_LOW_LATENCY; 2597 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); 2598 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); 2599 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2600 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 2601 2602 i40e_enable_misc_int_causes(hw); 2603 2604 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 2605 wr32(hw, I40E_PFINT_LNKLST0, 0); 2606 2607 /* Associate the queue pair to the vector and enable the queue int */ 2608 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2609 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2610 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2611 2612 wr32(hw, I40E_QINT_RQCTL(0), val); 2613 2614 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 2615 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 2616 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 2617 2618 wr32(hw, I40E_QINT_TQCTL(0), val); 2619 i40e_flush(hw); 2620 } 2621 2622 /** 2623 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 2624 * @pf: board private structure 2625 **/ 2626 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) 2627 { 2628 struct i40e_hw *hw = &pf->hw; 2629 2630 wr32(hw, I40E_PFINT_DYN_CTL0, 2631 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2632 i40e_flush(hw); 2633 } 2634 2635 /** 2636 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 2637 * @pf: board private structure 2638 **/ 2639 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 2640 { 2641 struct i40e_hw *hw = &pf->hw; 2642 u32 val; 2643 2644 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 2645 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 2646 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 2647 2648 wr32(hw, I40E_PFINT_DYN_CTL0, val); 2649 i40e_flush(hw); 2650 } 2651 2652 /** 2653 * i40e_irq_dynamic_enable - Enable default interrupt generation settings 2654 * @vsi: pointer to a vsi 2655 * @vector: enable a particular Hw Interrupt vector 2656 **/ 2657 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) 2658 { 2659 struct i40e_pf *pf = vsi->back; 2660 struct i40e_hw *hw = &pf->hw; 2661 u32 val; 2662 2663 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 2664 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 2665 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2666 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 2667 /* skip the flush */ 2668 } 2669 2670 /** 2671 * i40e_msix_clean_rings - MSIX mode Interrupt Handler 2672 * @irq: interrupt number 2673 * @data: pointer to a q_vector 2674 **/ 2675 static irqreturn_t i40e_msix_clean_rings(int irq, void *data) 2676 { 2677 struct i40e_q_vector *q_vector = data; 2678 2679 if (!q_vector->tx.ring && !q_vector->rx.ring) 2680 return IRQ_HANDLED; 2681 2682 napi_schedule(&q_vector->napi); 2683 2684 return IRQ_HANDLED; 2685 } 2686 2687 /** 2688 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts 2689 * @vsi: the VSI being configured 2690 * @basename: name for the vector 2691 * 2692 * Allocates MSI-X vectors and requests interrupts from the kernel. 2693 **/ 2694 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) 2695 { 2696 int q_vectors = vsi->num_q_vectors; 2697 struct i40e_pf *pf = vsi->back; 2698 int base = vsi->base_vector; 2699 int rx_int_idx = 0; 2700 int tx_int_idx = 0; 2701 int vector, err; 2702 2703 for (vector = 0; vector < q_vectors; vector++) { 2704 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; 2705 2706 if (q_vector->tx.ring && q_vector->rx.ring) { 2707 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2708 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2709 tx_int_idx++; 2710 } else if (q_vector->rx.ring) { 2711 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2712 "%s-%s-%d", basename, "rx", rx_int_idx++); 2713 } else if (q_vector->tx.ring) { 2714 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2715 "%s-%s-%d", basename, "tx", tx_int_idx++); 2716 } else { 2717 /* skip this unused q_vector */ 2718 continue; 2719 } 2720 err = request_irq(pf->msix_entries[base + vector].vector, 2721 vsi->irq_handler, 2722 0, 2723 q_vector->name, 2724 q_vector); 2725 if (err) { 2726 dev_info(&pf->pdev->dev, 2727 "%s: request_irq failed, error: %d\n", 2728 __func__, err); 2729 goto free_queue_irqs; 2730 } 2731 /* assign the mask for this irq */ 2732 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 2733 &q_vector->affinity_mask); 2734 } 2735 2736 return 0; 2737 2738 free_queue_irqs: 2739 while (vector) { 2740 vector--; 2741 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, 2742 NULL); 2743 free_irq(pf->msix_entries[base + vector].vector, 2744 &(vsi->q_vectors[vector])); 2745 } 2746 return err; 2747 } 2748 2749 /** 2750 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI 2751 * @vsi: the VSI being un-configured 2752 **/ 2753 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) 2754 { 2755 struct i40e_pf *pf = vsi->back; 2756 struct i40e_hw *hw = &pf->hw; 2757 int base = vsi->base_vector; 2758 int i; 2759 2760 for (i = 0; i < vsi->num_queue_pairs; i++) { 2761 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); 2762 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); 2763 } 2764 2765 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 2766 for (i = vsi->base_vector; 2767 i < (vsi->num_q_vectors + vsi->base_vector); i++) 2768 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); 2769 2770 i40e_flush(hw); 2771 for (i = 0; i < vsi->num_q_vectors; i++) 2772 synchronize_irq(pf->msix_entries[i + base].vector); 2773 } else { 2774 /* Legacy and MSI mode - this stops all interrupt handling */ 2775 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 2776 wr32(hw, I40E_PFINT_DYN_CTL0, 0); 2777 i40e_flush(hw); 2778 synchronize_irq(pf->pdev->irq); 2779 } 2780 } 2781 2782 /** 2783 * i40e_vsi_enable_irq - Enable IRQ for the given VSI 2784 * @vsi: the VSI being configured 2785 **/ 2786 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) 2787 { 2788 struct i40e_pf *pf = vsi->back; 2789 int i; 2790 2791 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 2792 for (i = vsi->base_vector; 2793 i < (vsi->num_q_vectors + vsi->base_vector); i++) 2794 i40e_irq_dynamic_enable(vsi, i); 2795 } else { 2796 i40e_irq_dynamic_enable_icr0(pf); 2797 } 2798 2799 i40e_flush(&pf->hw); 2800 return 0; 2801 } 2802 2803 /** 2804 * i40e_stop_misc_vector - Stop the vector that handles non-queue events 2805 * @pf: board private structure 2806 **/ 2807 static void i40e_stop_misc_vector(struct i40e_pf *pf) 2808 { 2809 /* Disable ICR 0 */ 2810 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); 2811 i40e_flush(&pf->hw); 2812 } 2813 2814 /** 2815 * i40e_intr - MSI/Legacy and non-queue interrupt handler 2816 * @irq: interrupt number 2817 * @data: pointer to a q_vector 2818 * 2819 * This is the handler used for all MSI/Legacy interrupts, and deals 2820 * with both queue and non-queue interrupts. This is also used in 2821 * MSIX mode to handle the non-queue interrupts. 2822 **/ 2823 static irqreturn_t i40e_intr(int irq, void *data) 2824 { 2825 struct i40e_pf *pf = (struct i40e_pf *)data; 2826 struct i40e_hw *hw = &pf->hw; 2827 irqreturn_t ret = IRQ_NONE; 2828 u32 icr0, icr0_remaining; 2829 u32 val, ena_mask; 2830 2831 icr0 = rd32(hw, I40E_PFINT_ICR0); 2832 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 2833 2834 /* if sharing a legacy IRQ, we might get called w/o an intr pending */ 2835 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) 2836 goto enable_intr; 2837 2838 /* if interrupt but no bits showing, must be SWINT */ 2839 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || 2840 (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) 2841 pf->sw_int_count++; 2842 2843 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 2844 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { 2845 2846 /* temporarily disable queue cause for NAPI processing */ 2847 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); 2848 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; 2849 wr32(hw, I40E_QINT_RQCTL(0), qval); 2850 2851 qval = rd32(hw, I40E_QINT_TQCTL(0)); 2852 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 2853 wr32(hw, I40E_QINT_TQCTL(0), qval); 2854 2855 if (!test_bit(__I40E_DOWN, &pf->state)) 2856 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); 2857 } 2858 2859 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { 2860 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2861 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 2862 } 2863 2864 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 2865 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 2866 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 2867 } 2868 2869 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { 2870 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 2871 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); 2872 } 2873 2874 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { 2875 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 2876 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 2877 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; 2878 val = rd32(hw, I40E_GLGEN_RSTAT); 2879 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 2880 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 2881 if (val == I40E_RESET_CORER) { 2882 pf->corer_count++; 2883 } else if (val == I40E_RESET_GLOBR) { 2884 pf->globr_count++; 2885 } else if (val == I40E_RESET_EMPR) { 2886 pf->empr_count++; 2887 set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); 2888 } 2889 } 2890 2891 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 2892 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 2893 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 2894 } 2895 2896 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 2897 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 2898 2899 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 2900 ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 2901 i40e_ptp_tx_hwtstamp(pf); 2902 prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK; 2903 } 2904 2905 wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat); 2906 } 2907 2908 /* If a critical error is pending we have no choice but to reset the 2909 * device. 2910 * Report and mask out any remaining unexpected interrupts. 2911 */ 2912 icr0_remaining = icr0 & ena_mask; 2913 if (icr0_remaining) { 2914 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", 2915 icr0_remaining); 2916 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 2917 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 2918 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { 2919 dev_info(&pf->pdev->dev, "device will be reset\n"); 2920 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 2921 i40e_service_event_schedule(pf); 2922 } 2923 ena_mask &= ~icr0_remaining; 2924 } 2925 ret = IRQ_HANDLED; 2926 2927 enable_intr: 2928 /* re-enable interrupt causes */ 2929 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 2930 if (!test_bit(__I40E_DOWN, &pf->state)) { 2931 i40e_service_event_schedule(pf); 2932 i40e_irq_dynamic_enable_icr0(pf); 2933 } 2934 2935 return ret; 2936 } 2937 2938 /** 2939 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes 2940 * @tx_ring: tx ring to clean 2941 * @budget: how many cleans we're allowed 2942 * 2943 * Returns true if there's any budget left (e.g. the clean is finished) 2944 **/ 2945 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) 2946 { 2947 struct i40e_vsi *vsi = tx_ring->vsi; 2948 u16 i = tx_ring->next_to_clean; 2949 struct i40e_tx_buffer *tx_buf; 2950 struct i40e_tx_desc *tx_desc; 2951 2952 tx_buf = &tx_ring->tx_bi[i]; 2953 tx_desc = I40E_TX_DESC(tx_ring, i); 2954 i -= tx_ring->count; 2955 2956 do { 2957 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 2958 2959 /* if next_to_watch is not set then there is no work pending */ 2960 if (!eop_desc) 2961 break; 2962 2963 /* prevent any other reads prior to eop_desc */ 2964 read_barrier_depends(); 2965 2966 /* if the descriptor isn't done, no work yet to do */ 2967 if (!(eop_desc->cmd_type_offset_bsz & 2968 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) 2969 break; 2970 2971 /* clear next_to_watch to prevent false hangs */ 2972 tx_buf->next_to_watch = NULL; 2973 2974 /* unmap skb header data */ 2975 dma_unmap_single(tx_ring->dev, 2976 dma_unmap_addr(tx_buf, dma), 2977 dma_unmap_len(tx_buf, len), 2978 DMA_TO_DEVICE); 2979 2980 dma_unmap_len_set(tx_buf, len, 0); 2981 2982 2983 /* move to the next desc and buffer to clean */ 2984 tx_buf++; 2985 tx_desc++; 2986 i++; 2987 if (unlikely(!i)) { 2988 i -= tx_ring->count; 2989 tx_buf = tx_ring->tx_bi; 2990 tx_desc = I40E_TX_DESC(tx_ring, 0); 2991 } 2992 2993 /* update budget accounting */ 2994 budget--; 2995 } while (likely(budget)); 2996 2997 i += tx_ring->count; 2998 tx_ring->next_to_clean = i; 2999 3000 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 3001 i40e_irq_dynamic_enable(vsi, 3002 tx_ring->q_vector->v_idx + vsi->base_vector); 3003 } 3004 return budget > 0; 3005 } 3006 3007 /** 3008 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring 3009 * @irq: interrupt number 3010 * @data: pointer to a q_vector 3011 **/ 3012 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) 3013 { 3014 struct i40e_q_vector *q_vector = data; 3015 struct i40e_vsi *vsi; 3016 3017 if (!q_vector->tx.ring) 3018 return IRQ_HANDLED; 3019 3020 vsi = q_vector->tx.ring->vsi; 3021 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); 3022 3023 return IRQ_HANDLED; 3024 } 3025 3026 /** 3027 * i40e_map_vector_to_qp - Assigns the queue pair to the vector 3028 * @vsi: the VSI being configured 3029 * @v_idx: vector index 3030 * @qp_idx: queue pair index 3031 **/ 3032 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) 3033 { 3034 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3035 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; 3036 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; 3037 3038 tx_ring->q_vector = q_vector; 3039 tx_ring->next = q_vector->tx.ring; 3040 q_vector->tx.ring = tx_ring; 3041 q_vector->tx.count++; 3042 3043 rx_ring->q_vector = q_vector; 3044 rx_ring->next = q_vector->rx.ring; 3045 q_vector->rx.ring = rx_ring; 3046 q_vector->rx.count++; 3047 } 3048 3049 /** 3050 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors 3051 * @vsi: the VSI being configured 3052 * 3053 * This function maps descriptor rings to the queue-specific vectors 3054 * we were allotted through the MSI-X enabling code. Ideally, we'd have 3055 * one vector per queue pair, but on a constrained vector budget, we 3056 * group the queue pairs as "efficiently" as possible. 3057 **/ 3058 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) 3059 { 3060 int qp_remaining = vsi->num_queue_pairs; 3061 int q_vectors = vsi->num_q_vectors; 3062 int num_ringpairs; 3063 int v_start = 0; 3064 int qp_idx = 0; 3065 3066 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to 3067 * group them so there are multiple queues per vector. 3068 */ 3069 for (; v_start < q_vectors && qp_remaining; v_start++) { 3070 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; 3071 3072 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); 3073 3074 q_vector->num_ringpairs = num_ringpairs; 3075 3076 q_vector->rx.count = 0; 3077 q_vector->tx.count = 0; 3078 q_vector->rx.ring = NULL; 3079 q_vector->tx.ring = NULL; 3080 3081 while (num_ringpairs--) { 3082 map_vector_to_qp(vsi, v_start, qp_idx); 3083 qp_idx++; 3084 qp_remaining--; 3085 } 3086 } 3087 } 3088 3089 /** 3090 * i40e_vsi_request_irq - Request IRQ from the OS 3091 * @vsi: the VSI being configured 3092 * @basename: name for the vector 3093 **/ 3094 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) 3095 { 3096 struct i40e_pf *pf = vsi->back; 3097 int err; 3098 3099 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 3100 err = i40e_vsi_request_irq_msix(vsi, basename); 3101 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3102 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3103 pf->misc_int_name, pf); 3104 else 3105 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3106 pf->misc_int_name, pf); 3107 3108 if (err) 3109 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3110 3111 return err; 3112 } 3113 3114 #ifdef CONFIG_NET_POLL_CONTROLLER 3115 /** 3116 * i40e_netpoll - A Polling 'interrupt'handler 3117 * @netdev: network interface device structure 3118 * 3119 * This is used by netconsole to send skbs without having to re-enable 3120 * interrupts. It's not called while the normal interrupt routine is executing. 3121 **/ 3122 static void i40e_netpoll(struct net_device *netdev) 3123 { 3124 struct i40e_netdev_priv *np = netdev_priv(netdev); 3125 struct i40e_vsi *vsi = np->vsi; 3126 struct i40e_pf *pf = vsi->back; 3127 int i; 3128 3129 /* if interface is down do nothing */ 3130 if (test_bit(__I40E_DOWN, &vsi->state)) 3131 return; 3132 3133 pf->flags |= I40E_FLAG_IN_NETPOLL; 3134 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3135 for (i = 0; i < vsi->num_q_vectors; i++) 3136 i40e_msix_clean_rings(0, vsi->q_vectors[i]); 3137 } else { 3138 i40e_intr(pf->pdev->irq, netdev); 3139 } 3140 pf->flags &= ~I40E_FLAG_IN_NETPOLL; 3141 } 3142 #endif 3143 3144 /** 3145 * i40e_vsi_control_tx - Start or stop a VSI's rings 3146 * @vsi: the VSI being configured 3147 * @enable: start or stop the rings 3148 **/ 3149 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) 3150 { 3151 struct i40e_pf *pf = vsi->back; 3152 struct i40e_hw *hw = &pf->hw; 3153 int i, j, pf_q; 3154 u32 tx_reg; 3155 3156 pf_q = vsi->base_queue; 3157 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3158 for (j = 0; j < 50; j++) { 3159 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3160 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == 3161 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) 3162 break; 3163 usleep_range(1000, 2000); 3164 } 3165 /* Skip if the queue is already in the requested state */ 3166 if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3167 continue; 3168 if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3169 continue; 3170 3171 /* turn on/off the queue */ 3172 if (enable) { 3173 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 3174 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; 3175 } else { 3176 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3177 } 3178 3179 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); 3180 3181 /* wait for the change to finish */ 3182 for (j = 0; j < 10; j++) { 3183 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3184 if (enable) { 3185 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3186 break; 3187 } else { 3188 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3189 break; 3190 } 3191 3192 udelay(10); 3193 } 3194 if (j >= 10) { 3195 dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n", 3196 pf_q, (enable ? "en" : "dis")); 3197 return -ETIMEDOUT; 3198 } 3199 } 3200 3201 if (hw->revision_id == 0) 3202 mdelay(50); 3203 3204 return 0; 3205 } 3206 3207 /** 3208 * i40e_vsi_control_rx - Start or stop a VSI's rings 3209 * @vsi: the VSI being configured 3210 * @enable: start or stop the rings 3211 **/ 3212 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) 3213 { 3214 struct i40e_pf *pf = vsi->back; 3215 struct i40e_hw *hw = &pf->hw; 3216 int i, j, pf_q; 3217 u32 rx_reg; 3218 3219 pf_q = vsi->base_queue; 3220 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3221 for (j = 0; j < 50; j++) { 3222 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3223 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == 3224 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) 3225 break; 3226 usleep_range(1000, 2000); 3227 } 3228 3229 if (enable) { 3230 /* is STAT set ? */ 3231 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3232 continue; 3233 } else { 3234 /* is !STAT set ? */ 3235 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3236 continue; 3237 } 3238 3239 /* turn on/off the queue */ 3240 if (enable) 3241 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; 3242 else 3243 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 3244 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3245 3246 /* wait for the change to finish */ 3247 for (j = 0; j < 10; j++) { 3248 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3249 3250 if (enable) { 3251 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3252 break; 3253 } else { 3254 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3255 break; 3256 } 3257 3258 udelay(10); 3259 } 3260 if (j >= 10) { 3261 dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n", 3262 pf_q, (enable ? "en" : "dis")); 3263 return -ETIMEDOUT; 3264 } 3265 } 3266 3267 return 0; 3268 } 3269 3270 /** 3271 * i40e_vsi_control_rings - Start or stop a VSI's rings 3272 * @vsi: the VSI being configured 3273 * @enable: start or stop the rings 3274 **/ 3275 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) 3276 { 3277 int ret = 0; 3278 3279 /* do rx first for enable and last for disable */ 3280 if (request) { 3281 ret = i40e_vsi_control_rx(vsi, request); 3282 if (ret) 3283 return ret; 3284 ret = i40e_vsi_control_tx(vsi, request); 3285 } else { 3286 /* Ignore return value, we need to shutdown whatever we can */ 3287 i40e_vsi_control_tx(vsi, request); 3288 i40e_vsi_control_rx(vsi, request); 3289 } 3290 3291 return ret; 3292 } 3293 3294 /** 3295 * i40e_vsi_free_irq - Free the irq association with the OS 3296 * @vsi: the VSI being configured 3297 **/ 3298 static void i40e_vsi_free_irq(struct i40e_vsi *vsi) 3299 { 3300 struct i40e_pf *pf = vsi->back; 3301 struct i40e_hw *hw = &pf->hw; 3302 int base = vsi->base_vector; 3303 u32 val, qp; 3304 int i; 3305 3306 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3307 if (!vsi->q_vectors) 3308 return; 3309 3310 for (i = 0; i < vsi->num_q_vectors; i++) { 3311 u16 vector = i + base; 3312 3313 /* free only the irqs that were actually requested */ 3314 if (!vsi->q_vectors[i] || 3315 !vsi->q_vectors[i]->num_ringpairs) 3316 continue; 3317 3318 /* clear the affinity_mask in the IRQ descriptor */ 3319 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3320 NULL); 3321 free_irq(pf->msix_entries[vector].vector, 3322 vsi->q_vectors[i]); 3323 3324 /* Tear down the interrupt queue link list 3325 * 3326 * We know that they come in pairs and always 3327 * the Rx first, then the Tx. To clear the 3328 * link list, stick the EOL value into the 3329 * next_q field of the registers. 3330 */ 3331 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); 3332 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3333 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3334 val |= I40E_QUEUE_END_OF_LIST 3335 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3336 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); 3337 3338 while (qp != I40E_QUEUE_END_OF_LIST) { 3339 u32 next; 3340 3341 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3342 3343 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3344 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3345 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3346 I40E_QINT_RQCTL_INTEVENT_MASK); 3347 3348 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3349 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3350 3351 wr32(hw, I40E_QINT_RQCTL(qp), val); 3352 3353 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3354 3355 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) 3356 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; 3357 3358 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3359 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3360 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3361 I40E_QINT_TQCTL_INTEVENT_MASK); 3362 3363 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3364 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3365 3366 wr32(hw, I40E_QINT_TQCTL(qp), val); 3367 qp = next; 3368 } 3369 } 3370 } else { 3371 free_irq(pf->pdev->irq, pf); 3372 3373 val = rd32(hw, I40E_PFINT_LNKLST0); 3374 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) 3375 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; 3376 val |= I40E_QUEUE_END_OF_LIST 3377 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 3378 wr32(hw, I40E_PFINT_LNKLST0, val); 3379 3380 val = rd32(hw, I40E_QINT_RQCTL(qp)); 3381 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | 3382 I40E_QINT_RQCTL_MSIX0_INDX_MASK | 3383 I40E_QINT_RQCTL_CAUSE_ENA_MASK | 3384 I40E_QINT_RQCTL_INTEVENT_MASK); 3385 3386 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | 3387 I40E_QINT_RQCTL_NEXTQ_INDX_MASK); 3388 3389 wr32(hw, I40E_QINT_RQCTL(qp), val); 3390 3391 val = rd32(hw, I40E_QINT_TQCTL(qp)); 3392 3393 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | 3394 I40E_QINT_TQCTL_MSIX0_INDX_MASK | 3395 I40E_QINT_TQCTL_CAUSE_ENA_MASK | 3396 I40E_QINT_TQCTL_INTEVENT_MASK); 3397 3398 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | 3399 I40E_QINT_TQCTL_NEXTQ_INDX_MASK); 3400 3401 wr32(hw, I40E_QINT_TQCTL(qp), val); 3402 } 3403 } 3404 3405 /** 3406 * i40e_free_q_vector - Free memory allocated for specific interrupt vector 3407 * @vsi: the VSI being configured 3408 * @v_idx: Index of vector to be freed 3409 * 3410 * This function frees the memory allocated to the q_vector. In addition if 3411 * NAPI is enabled it will delete any references to the NAPI struct prior 3412 * to freeing the q_vector. 3413 **/ 3414 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) 3415 { 3416 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; 3417 struct i40e_ring *ring; 3418 3419 if (!q_vector) 3420 return; 3421 3422 /* disassociate q_vector from rings */ 3423 i40e_for_each_ring(ring, q_vector->tx) 3424 ring->q_vector = NULL; 3425 3426 i40e_for_each_ring(ring, q_vector->rx) 3427 ring->q_vector = NULL; 3428 3429 /* only VSI w/ an associated netdev is set up w/ NAPI */ 3430 if (vsi->netdev) 3431 netif_napi_del(&q_vector->napi); 3432 3433 vsi->q_vectors[v_idx] = NULL; 3434 3435 kfree_rcu(q_vector, rcu); 3436 } 3437 3438 /** 3439 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors 3440 * @vsi: the VSI being un-configured 3441 * 3442 * This frees the memory allocated to the q_vectors and 3443 * deletes references to the NAPI struct. 3444 **/ 3445 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) 3446 { 3447 int v_idx; 3448 3449 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 3450 i40e_free_q_vector(vsi, v_idx); 3451 } 3452 3453 /** 3454 * i40e_reset_interrupt_capability - Disable interrupt setup in OS 3455 * @pf: board private structure 3456 **/ 3457 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) 3458 { 3459 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ 3460 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 3461 pci_disable_msix(pf->pdev); 3462 kfree(pf->msix_entries); 3463 pf->msix_entries = NULL; 3464 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { 3465 pci_disable_msi(pf->pdev); 3466 } 3467 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); 3468 } 3469 3470 /** 3471 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings 3472 * @pf: board private structure 3473 * 3474 * We go through and clear interrupt specific resources and reset the structure 3475 * to pre-load conditions 3476 **/ 3477 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) 3478 { 3479 int i; 3480 3481 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3482 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 3483 if (pf->vsi[i]) 3484 i40e_vsi_free_q_vectors(pf->vsi[i]); 3485 i40e_reset_interrupt_capability(pf); 3486 } 3487 3488 /** 3489 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI 3490 * @vsi: the VSI being configured 3491 **/ 3492 static void i40e_napi_enable_all(struct i40e_vsi *vsi) 3493 { 3494 int q_idx; 3495 3496 if (!vsi->netdev) 3497 return; 3498 3499 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3500 napi_enable(&vsi->q_vectors[q_idx]->napi); 3501 } 3502 3503 /** 3504 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI 3505 * @vsi: the VSI being configured 3506 **/ 3507 static void i40e_napi_disable_all(struct i40e_vsi *vsi) 3508 { 3509 int q_idx; 3510 3511 if (!vsi->netdev) 3512 return; 3513 3514 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3515 napi_disable(&vsi->q_vectors[q_idx]->napi); 3516 } 3517 3518 /** 3519 * i40e_quiesce_vsi - Pause a given VSI 3520 * @vsi: the VSI being paused 3521 **/ 3522 static void i40e_quiesce_vsi(struct i40e_vsi *vsi) 3523 { 3524 if (test_bit(__I40E_DOWN, &vsi->state)) 3525 return; 3526 3527 set_bit(__I40E_NEEDS_RESTART, &vsi->state); 3528 if (vsi->netdev && netif_running(vsi->netdev)) { 3529 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3530 } else { 3531 set_bit(__I40E_DOWN, &vsi->state); 3532 i40e_down(vsi); 3533 } 3534 } 3535 3536 /** 3537 * i40e_unquiesce_vsi - Resume a given VSI 3538 * @vsi: the VSI being resumed 3539 **/ 3540 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) 3541 { 3542 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) 3543 return; 3544 3545 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 3546 if (vsi->netdev && netif_running(vsi->netdev)) 3547 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3548 else 3549 i40e_up(vsi); /* this clears the DOWN bit */ 3550 } 3551 3552 /** 3553 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF 3554 * @pf: the PF 3555 **/ 3556 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) 3557 { 3558 int v; 3559 3560 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3561 if (pf->vsi[v]) 3562 i40e_quiesce_vsi(pf->vsi[v]); 3563 } 3564 } 3565 3566 /** 3567 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF 3568 * @pf: the PF 3569 **/ 3570 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) 3571 { 3572 int v; 3573 3574 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3575 if (pf->vsi[v]) 3576 i40e_unquiesce_vsi(pf->vsi[v]); 3577 } 3578 } 3579 3580 /** 3581 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 3582 * @dcbcfg: the corresponding DCBx configuration structure 3583 * 3584 * Return the number of TCs from given DCBx configuration 3585 **/ 3586 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 3587 { 3588 u8 num_tc = 0; 3589 int i; 3590 3591 /* Scan the ETS Config Priority Table to find 3592 * traffic class enabled for a given priority 3593 * and use the traffic class index to get the 3594 * number of traffic classes enabled 3595 */ 3596 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 3597 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 3598 num_tc = dcbcfg->etscfg.prioritytable[i]; 3599 } 3600 3601 /* Traffic class index starts from zero so 3602 * increment to return the actual count 3603 */ 3604 return num_tc + 1; 3605 } 3606 3607 /** 3608 * i40e_dcb_get_enabled_tc - Get enabled traffic classes 3609 * @dcbcfg: the corresponding DCBx configuration structure 3610 * 3611 * Query the current DCB configuration and return the number of 3612 * traffic classes enabled from the given DCBX config 3613 **/ 3614 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) 3615 { 3616 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); 3617 u8 enabled_tc = 1; 3618 u8 i; 3619 3620 for (i = 0; i < num_tc; i++) 3621 enabled_tc |= 1 << i; 3622 3623 return enabled_tc; 3624 } 3625 3626 /** 3627 * i40e_pf_get_num_tc - Get enabled traffic classes for PF 3628 * @pf: PF being queried 3629 * 3630 * Return number of traffic classes enabled for the given PF 3631 **/ 3632 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) 3633 { 3634 struct i40e_hw *hw = &pf->hw; 3635 u8 i, enabled_tc; 3636 u8 num_tc = 0; 3637 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 3638 3639 /* If DCB is not enabled then always in single TC */ 3640 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 3641 return 1; 3642 3643 /* MFP mode return count of enabled TCs for this PF */ 3644 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 3645 enabled_tc = pf->hw.func_caps.enabled_tcmap; 3646 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3647 if (enabled_tc & (1 << i)) 3648 num_tc++; 3649 } 3650 return num_tc; 3651 } 3652 3653 /* SFP mode will be enabled for all TCs on port */ 3654 return i40e_dcb_get_num_tc(dcbcfg); 3655 } 3656 3657 /** 3658 * i40e_pf_get_default_tc - Get bitmap for first enabled TC 3659 * @pf: PF being queried 3660 * 3661 * Return a bitmap for first enabled traffic class for this PF. 3662 **/ 3663 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) 3664 { 3665 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; 3666 u8 i = 0; 3667 3668 if (!enabled_tc) 3669 return 0x1; /* TC0 */ 3670 3671 /* Find the first enabled TC */ 3672 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3673 if (enabled_tc & (1 << i)) 3674 break; 3675 } 3676 3677 return 1 << i; 3678 } 3679 3680 /** 3681 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes 3682 * @pf: PF being queried 3683 * 3684 * Return a bitmap for enabled traffic classes for this PF. 3685 **/ 3686 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) 3687 { 3688 /* If DCB is not enabled for this PF then just return default TC */ 3689 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 3690 return i40e_pf_get_default_tc(pf); 3691 3692 /* MFP mode will have enabled TCs set by FW */ 3693 if (pf->flags & I40E_FLAG_MFP_ENABLED) 3694 return pf->hw.func_caps.enabled_tcmap; 3695 3696 /* SFP mode we want PF to be enabled for all TCs */ 3697 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 3698 } 3699 3700 /** 3701 * i40e_vsi_get_bw_info - Query VSI BW Information 3702 * @vsi: the VSI being queried 3703 * 3704 * Returns 0 on success, negative value on failure 3705 **/ 3706 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) 3707 { 3708 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; 3709 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 3710 struct i40e_pf *pf = vsi->back; 3711 struct i40e_hw *hw = &pf->hw; 3712 i40e_status aq_ret; 3713 u32 tc_bw_max; 3714 int i; 3715 3716 /* Get the VSI level BW configuration */ 3717 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 3718 if (aq_ret) { 3719 dev_info(&pf->pdev->dev, 3720 "couldn't get pf vsi bw config, err %d, aq_err %d\n", 3721 aq_ret, pf->hw.aq.asq_last_status); 3722 return -EINVAL; 3723 } 3724 3725 /* Get the VSI level BW configuration per TC */ 3726 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, 3727 NULL); 3728 if (aq_ret) { 3729 dev_info(&pf->pdev->dev, 3730 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", 3731 aq_ret, pf->hw.aq.asq_last_status); 3732 return -EINVAL; 3733 } 3734 3735 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 3736 dev_info(&pf->pdev->dev, 3737 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", 3738 bw_config.tc_valid_bits, 3739 bw_ets_config.tc_valid_bits); 3740 /* Still continuing */ 3741 } 3742 3743 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); 3744 vsi->bw_max_quanta = bw_config.max_bw; 3745 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | 3746 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); 3747 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3748 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; 3749 vsi->bw_ets_limit_credits[i] = 3750 le16_to_cpu(bw_ets_config.credits[i]); 3751 /* 3 bits out of 4 for each TC */ 3752 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 3753 } 3754 3755 return 0; 3756 } 3757 3758 /** 3759 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 3760 * @vsi: the VSI being configured 3761 * @enabled_tc: TC bitmap 3762 * @bw_credits: BW shared credits per TC 3763 * 3764 * Returns 0 on success, negative value on failure 3765 **/ 3766 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, 3767 u8 *bw_share) 3768 { 3769 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 3770 i40e_status aq_ret; 3771 int i; 3772 3773 bw_data.tc_valid_bits = enabled_tc; 3774 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3775 bw_data.tc_bw_credits[i] = bw_share[i]; 3776 3777 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 3778 NULL); 3779 if (aq_ret) { 3780 dev_info(&vsi->back->pdev->dev, 3781 "AQ command Config VSI BW allocation per TC failed = %d\n", 3782 vsi->back->hw.aq.asq_last_status); 3783 return -EINVAL; 3784 } 3785 3786 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3787 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 3788 3789 return 0; 3790 } 3791 3792 /** 3793 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration 3794 * @vsi: the VSI being configured 3795 * @enabled_tc: TC map to be enabled 3796 * 3797 **/ 3798 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) 3799 { 3800 struct net_device *netdev = vsi->netdev; 3801 struct i40e_pf *pf = vsi->back; 3802 struct i40e_hw *hw = &pf->hw; 3803 u8 netdev_tc = 0; 3804 int i; 3805 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; 3806 3807 if (!netdev) 3808 return; 3809 3810 if (!enabled_tc) { 3811 netdev_reset_tc(netdev); 3812 return; 3813 } 3814 3815 /* Set up actual enabled TCs on the VSI */ 3816 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) 3817 return; 3818 3819 /* set per TC queues for the VSI */ 3820 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3821 /* Only set TC queues for enabled tcs 3822 * 3823 * e.g. For a VSI that has TC0 and TC3 enabled the 3824 * enabled_tc bitmap would be 0x00001001; the driver 3825 * will set the numtc for netdev as 2 that will be 3826 * referenced by the netdev layer as TC 0 and 1. 3827 */ 3828 if (vsi->tc_config.enabled_tc & (1 << i)) 3829 netdev_set_tc_queue(netdev, 3830 vsi->tc_config.tc_info[i].netdev_tc, 3831 vsi->tc_config.tc_info[i].qcount, 3832 vsi->tc_config.tc_info[i].qoffset); 3833 } 3834 3835 /* Assign UP2TC map for the VSI */ 3836 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 3837 /* Get the actual TC# for the UP */ 3838 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; 3839 /* Get the mapped netdev TC# for the UP */ 3840 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; 3841 netdev_set_prio_tc_map(netdev, i, netdev_tc); 3842 } 3843 } 3844 3845 /** 3846 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map 3847 * @vsi: the VSI being configured 3848 * @ctxt: the ctxt buffer returned from AQ VSI update param command 3849 **/ 3850 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, 3851 struct i40e_vsi_context *ctxt) 3852 { 3853 /* copy just the sections touched not the entire info 3854 * since not all sections are valid as returned by 3855 * update vsi params 3856 */ 3857 vsi->info.mapping_flags = ctxt->info.mapping_flags; 3858 memcpy(&vsi->info.queue_mapping, 3859 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); 3860 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, 3861 sizeof(vsi->info.tc_mapping)); 3862 } 3863 3864 /** 3865 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map 3866 * @vsi: VSI to be configured 3867 * @enabled_tc: TC bitmap 3868 * 3869 * This configures a particular VSI for TCs that are mapped to the 3870 * given TC bitmap. It uses default bandwidth share for TCs across 3871 * VSIs to configure TC for a particular VSI. 3872 * 3873 * NOTE: 3874 * It is expected that the VSI queues have been quisced before calling 3875 * this function. 3876 **/ 3877 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) 3878 { 3879 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; 3880 struct i40e_vsi_context ctxt; 3881 int ret = 0; 3882 int i; 3883 3884 /* Check if enabled_tc is same as existing or new TCs */ 3885 if (vsi->tc_config.enabled_tc == enabled_tc) 3886 return ret; 3887 3888 /* Enable ETS TCs with equal BW Share for now across all VSIs */ 3889 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3890 if (enabled_tc & (1 << i)) 3891 bw_share[i] = 1; 3892 } 3893 3894 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); 3895 if (ret) { 3896 dev_info(&vsi->back->pdev->dev, 3897 "Failed configuring TC map %d for VSI %d\n", 3898 enabled_tc, vsi->seid); 3899 goto out; 3900 } 3901 3902 /* Update Queue Pairs Mapping for currently enabled UPs */ 3903 ctxt.seid = vsi->seid; 3904 ctxt.pf_num = vsi->back->hw.pf_id; 3905 ctxt.vf_num = 0; 3906 ctxt.uplink_seid = vsi->uplink_seid; 3907 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 3908 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 3909 3910 /* Update the VSI after updating the VSI queue-mapping information */ 3911 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 3912 if (ret) { 3913 dev_info(&vsi->back->pdev->dev, 3914 "update vsi failed, aq_err=%d\n", 3915 vsi->back->hw.aq.asq_last_status); 3916 goto out; 3917 } 3918 /* update the local VSI info with updated queue map */ 3919 i40e_vsi_update_queue_map(vsi, &ctxt); 3920 vsi->info.valid_sections = 0; 3921 3922 /* Update current VSI BW information */ 3923 ret = i40e_vsi_get_bw_info(vsi); 3924 if (ret) { 3925 dev_info(&vsi->back->pdev->dev, 3926 "Failed updating vsi bw info, aq_err=%d\n", 3927 vsi->back->hw.aq.asq_last_status); 3928 goto out; 3929 } 3930 3931 /* Update the netdev TC setup */ 3932 i40e_vsi_config_netdev_tc(vsi, enabled_tc); 3933 out: 3934 return ret; 3935 } 3936 3937 /** 3938 * i40e_veb_config_tc - Configure TCs for given VEB 3939 * @veb: given VEB 3940 * @enabled_tc: TC bitmap 3941 * 3942 * Configures given TC bitmap for VEB (switching) element 3943 **/ 3944 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) 3945 { 3946 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; 3947 struct i40e_pf *pf = veb->pf; 3948 int ret = 0; 3949 int i; 3950 3951 /* No TCs or already enabled TCs just return */ 3952 if (!enabled_tc || veb->enabled_tc == enabled_tc) 3953 return ret; 3954 3955 bw_data.tc_valid_bits = enabled_tc; 3956 /* bw_data.absolute_credits is not set (relative) */ 3957 3958 /* Enable ETS TCs with equal BW Share for now */ 3959 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 3960 if (enabled_tc & (1 << i)) 3961 bw_data.tc_bw_share_credits[i] = 1; 3962 } 3963 3964 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, 3965 &bw_data, NULL); 3966 if (ret) { 3967 dev_info(&pf->pdev->dev, 3968 "veb bw config failed, aq_err=%d\n", 3969 pf->hw.aq.asq_last_status); 3970 goto out; 3971 } 3972 3973 /* Update the BW information */ 3974 ret = i40e_veb_get_bw_info(veb); 3975 if (ret) { 3976 dev_info(&pf->pdev->dev, 3977 "Failed getting veb bw config, aq_err=%d\n", 3978 pf->hw.aq.asq_last_status); 3979 } 3980 3981 out: 3982 return ret; 3983 } 3984 3985 #ifdef CONFIG_I40E_DCB 3986 /** 3987 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs 3988 * @pf: PF struct 3989 * 3990 * Reconfigure VEB/VSIs on a given PF; it is assumed that 3991 * the caller would've quiesce all the VSIs before calling 3992 * this function 3993 **/ 3994 static void i40e_dcb_reconfigure(struct i40e_pf *pf) 3995 { 3996 u8 tc_map = 0; 3997 int ret; 3998 u8 v; 3999 4000 /* Enable the TCs available on PF to all VEBs */ 4001 tc_map = i40e_pf_get_tc_map(pf); 4002 for (v = 0; v < I40E_MAX_VEB; v++) { 4003 if (!pf->veb[v]) 4004 continue; 4005 ret = i40e_veb_config_tc(pf->veb[v], tc_map); 4006 if (ret) { 4007 dev_info(&pf->pdev->dev, 4008 "Failed configuring TC for VEB seid=%d\n", 4009 pf->veb[v]->seid); 4010 /* Will try to configure as many components */ 4011 } 4012 } 4013 4014 /* Update each VSI */ 4015 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4016 if (!pf->vsi[v]) 4017 continue; 4018 4019 /* - Enable all TCs for the LAN VSI 4020 * - For all others keep them at TC0 for now 4021 */ 4022 if (v == pf->lan_vsi) 4023 tc_map = i40e_pf_get_tc_map(pf); 4024 else 4025 tc_map = i40e_pf_get_default_tc(pf); 4026 4027 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); 4028 if (ret) { 4029 dev_info(&pf->pdev->dev, 4030 "Failed configuring TC for VSI seid=%d\n", 4031 pf->vsi[v]->seid); 4032 /* Will try to configure as many components */ 4033 } else { 4034 if (pf->vsi[v]->netdev) 4035 i40e_dcbnl_set_all(pf->vsi[v]); 4036 } 4037 } 4038 } 4039 4040 /** 4041 * i40e_init_pf_dcb - Initialize DCB configuration 4042 * @pf: PF being configured 4043 * 4044 * Query the current DCB configuration and cache it 4045 * in the hardware structure 4046 **/ 4047 static int i40e_init_pf_dcb(struct i40e_pf *pf) 4048 { 4049 struct i40e_hw *hw = &pf->hw; 4050 int err = 0; 4051 4052 if (pf->hw.func_caps.npar_enable) 4053 goto out; 4054 4055 /* Get the initial DCB configuration */ 4056 err = i40e_init_dcb(hw); 4057 if (!err) { 4058 /* Device/Function is not DCBX capable */ 4059 if ((!hw->func_caps.dcb) || 4060 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { 4061 dev_info(&pf->pdev->dev, 4062 "DCBX offload is not supported or is disabled for this PF.\n"); 4063 4064 if (pf->flags & I40E_FLAG_MFP_ENABLED) 4065 goto out; 4066 4067 } else { 4068 /* When status is not DISABLED then DCBX in FW */ 4069 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | 4070 DCB_CAP_DCBX_VER_IEEE; 4071 pf->flags |= I40E_FLAG_DCB_ENABLED; 4072 } 4073 } 4074 4075 out: 4076 return err; 4077 } 4078 #endif /* CONFIG_I40E_DCB */ 4079 4080 /** 4081 * i40e_up_complete - Finish the last steps of bringing up a connection 4082 * @vsi: the VSI being configured 4083 **/ 4084 static int i40e_up_complete(struct i40e_vsi *vsi) 4085 { 4086 struct i40e_pf *pf = vsi->back; 4087 int err; 4088 4089 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 4090 i40e_vsi_configure_msix(vsi); 4091 else 4092 i40e_configure_msi_and_legacy(vsi); 4093 4094 /* start rings */ 4095 err = i40e_vsi_control_rings(vsi, true); 4096 if (err) 4097 return err; 4098 4099 clear_bit(__I40E_DOWN, &vsi->state); 4100 i40e_napi_enable_all(vsi); 4101 i40e_vsi_enable_irq(vsi); 4102 4103 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && 4104 (vsi->netdev)) { 4105 netdev_info(vsi->netdev, "NIC Link is Up\n"); 4106 netif_tx_start_all_queues(vsi->netdev); 4107 netif_carrier_on(vsi->netdev); 4108 } else if (vsi->netdev) { 4109 netdev_info(vsi->netdev, "NIC Link is Down\n"); 4110 } 4111 4112 /* replay FDIR SB filters */ 4113 if (vsi->type == I40E_VSI_FDIR) 4114 i40e_fdir_filter_restore(vsi); 4115 i40e_service_event_schedule(pf); 4116 4117 return 0; 4118 } 4119 4120 /** 4121 * i40e_vsi_reinit_locked - Reset the VSI 4122 * @vsi: the VSI being configured 4123 * 4124 * Rebuild the ring structs after some configuration 4125 * has changed, e.g. MTU size. 4126 **/ 4127 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) 4128 { 4129 struct i40e_pf *pf = vsi->back; 4130 4131 WARN_ON(in_interrupt()); 4132 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) 4133 usleep_range(1000, 2000); 4134 i40e_down(vsi); 4135 4136 /* Give a VF some time to respond to the reset. The 4137 * two second wait is based upon the watchdog cycle in 4138 * the VF driver. 4139 */ 4140 if (vsi->type == I40E_VSI_SRIOV) 4141 msleep(2000); 4142 i40e_up(vsi); 4143 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 4144 } 4145 4146 /** 4147 * i40e_up - Bring the connection back up after being down 4148 * @vsi: the VSI being configured 4149 **/ 4150 int i40e_up(struct i40e_vsi *vsi) 4151 { 4152 int err; 4153 4154 err = i40e_vsi_configure(vsi); 4155 if (!err) 4156 err = i40e_up_complete(vsi); 4157 4158 return err; 4159 } 4160 4161 /** 4162 * i40e_down - Shutdown the connection processing 4163 * @vsi: the VSI being stopped 4164 **/ 4165 void i40e_down(struct i40e_vsi *vsi) 4166 { 4167 int i; 4168 4169 /* It is assumed that the caller of this function 4170 * sets the vsi->state __I40E_DOWN bit. 4171 */ 4172 if (vsi->netdev) { 4173 netif_carrier_off(vsi->netdev); 4174 netif_tx_disable(vsi->netdev); 4175 } 4176 i40e_vsi_disable_irq(vsi); 4177 i40e_vsi_control_rings(vsi, false); 4178 i40e_napi_disable_all(vsi); 4179 4180 for (i = 0; i < vsi->num_queue_pairs; i++) { 4181 i40e_clean_tx_ring(vsi->tx_rings[i]); 4182 i40e_clean_rx_ring(vsi->rx_rings[i]); 4183 } 4184 } 4185 4186 /** 4187 * i40e_setup_tc - configure multiple traffic classes 4188 * @netdev: net device to configure 4189 * @tc: number of traffic classes to enable 4190 **/ 4191 static int i40e_setup_tc(struct net_device *netdev, u8 tc) 4192 { 4193 struct i40e_netdev_priv *np = netdev_priv(netdev); 4194 struct i40e_vsi *vsi = np->vsi; 4195 struct i40e_pf *pf = vsi->back; 4196 u8 enabled_tc = 0; 4197 int ret = -EINVAL; 4198 int i; 4199 4200 /* Check if DCB enabled to continue */ 4201 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { 4202 netdev_info(netdev, "DCB is not enabled for adapter\n"); 4203 goto exit; 4204 } 4205 4206 /* Check if MFP enabled */ 4207 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4208 netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); 4209 goto exit; 4210 } 4211 4212 /* Check whether tc count is within enabled limit */ 4213 if (tc > i40e_pf_get_num_tc(pf)) { 4214 netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); 4215 goto exit; 4216 } 4217 4218 /* Generate TC map for number of tc requested */ 4219 for (i = 0; i < tc; i++) 4220 enabled_tc |= (1 << i); 4221 4222 /* Requesting same TC configuration as already enabled */ 4223 if (enabled_tc == vsi->tc_config.enabled_tc) 4224 return 0; 4225 4226 /* Quiesce VSI queues */ 4227 i40e_quiesce_vsi(vsi); 4228 4229 /* Configure VSI for enabled TCs */ 4230 ret = i40e_vsi_config_tc(vsi, enabled_tc); 4231 if (ret) { 4232 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", 4233 vsi->seid); 4234 goto exit; 4235 } 4236 4237 /* Unquiesce VSI */ 4238 i40e_unquiesce_vsi(vsi); 4239 4240 exit: 4241 return ret; 4242 } 4243 4244 /** 4245 * i40e_open - Called when a network interface is made active 4246 * @netdev: network interface device structure 4247 * 4248 * The open entry point is called when a network interface is made 4249 * active by the system (IFF_UP). At this point all resources needed 4250 * for transmit and receive operations are allocated, the interrupt 4251 * handler is registered with the OS, the netdev watchdog subtask is 4252 * enabled, and the stack is notified that the interface is ready. 4253 * 4254 * Returns 0 on success, negative value on failure 4255 **/ 4256 static int i40e_open(struct net_device *netdev) 4257 { 4258 struct i40e_netdev_priv *np = netdev_priv(netdev); 4259 struct i40e_vsi *vsi = np->vsi; 4260 struct i40e_pf *pf = vsi->back; 4261 int err; 4262 4263 /* disallow open during test or if eeprom is broken */ 4264 if (test_bit(__I40E_TESTING, &pf->state) || 4265 test_bit(__I40E_BAD_EEPROM, &pf->state)) 4266 return -EBUSY; 4267 4268 netif_carrier_off(netdev); 4269 4270 err = i40e_vsi_open(vsi); 4271 if (err) 4272 return err; 4273 4274 #ifdef CONFIG_I40E_VXLAN 4275 vxlan_get_rx_port(netdev); 4276 #endif 4277 4278 return 0; 4279 } 4280 4281 /** 4282 * i40e_vsi_open - 4283 * @vsi: the VSI to open 4284 * 4285 * Finish initialization of the VSI. 4286 * 4287 * Returns 0 on success, negative value on failure 4288 **/ 4289 int i40e_vsi_open(struct i40e_vsi *vsi) 4290 { 4291 struct i40e_pf *pf = vsi->back; 4292 char int_name[IFNAMSIZ]; 4293 int err; 4294 4295 /* allocate descriptors */ 4296 err = i40e_vsi_setup_tx_resources(vsi); 4297 if (err) 4298 goto err_setup_tx; 4299 err = i40e_vsi_setup_rx_resources(vsi); 4300 if (err) 4301 goto err_setup_rx; 4302 4303 err = i40e_vsi_configure(vsi); 4304 if (err) 4305 goto err_setup_rx; 4306 4307 if (!vsi->netdev) { 4308 err = EINVAL; 4309 goto err_setup_rx; 4310 } 4311 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 4312 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 4313 err = i40e_vsi_request_irq(vsi, int_name); 4314 if (err) 4315 goto err_setup_rx; 4316 4317 /* Notify the stack of the actual queue counts. */ 4318 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs); 4319 if (err) 4320 goto err_set_queues; 4321 4322 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs); 4323 if (err) 4324 goto err_set_queues; 4325 4326 err = i40e_up_complete(vsi); 4327 if (err) 4328 goto err_up_complete; 4329 4330 return 0; 4331 4332 err_up_complete: 4333 i40e_down(vsi); 4334 err_set_queues: 4335 i40e_vsi_free_irq(vsi); 4336 err_setup_rx: 4337 i40e_vsi_free_rx_resources(vsi); 4338 err_setup_tx: 4339 i40e_vsi_free_tx_resources(vsi); 4340 if (vsi == pf->vsi[pf->lan_vsi]) 4341 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 4342 4343 return err; 4344 } 4345 4346 /** 4347 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting 4348 * @pf: Pointer to pf 4349 * 4350 * This function destroys the hlist where all the Flow Director 4351 * filters were saved. 4352 **/ 4353 static void i40e_fdir_filter_exit(struct i40e_pf *pf) 4354 { 4355 struct i40e_fdir_filter *filter; 4356 struct hlist_node *node2; 4357 4358 hlist_for_each_entry_safe(filter, node2, 4359 &pf->fdir_filter_list, fdir_node) { 4360 hlist_del(&filter->fdir_node); 4361 kfree(filter); 4362 } 4363 pf->fdir_pf_active_filters = 0; 4364 } 4365 4366 /** 4367 * i40e_close - Disables a network interface 4368 * @netdev: network interface device structure 4369 * 4370 * The close entry point is called when an interface is de-activated 4371 * by the OS. The hardware is still under the driver's control, but 4372 * this netdev interface is disabled. 4373 * 4374 * Returns 0, this is not allowed to fail 4375 **/ 4376 static int i40e_close(struct net_device *netdev) 4377 { 4378 struct i40e_netdev_priv *np = netdev_priv(netdev); 4379 struct i40e_vsi *vsi = np->vsi; 4380 4381 if (test_and_set_bit(__I40E_DOWN, &vsi->state)) 4382 return 0; 4383 4384 i40e_down(vsi); 4385 i40e_vsi_free_irq(vsi); 4386 4387 i40e_vsi_free_tx_resources(vsi); 4388 i40e_vsi_free_rx_resources(vsi); 4389 4390 return 0; 4391 } 4392 4393 /** 4394 * i40e_do_reset - Start a PF or Core Reset sequence 4395 * @pf: board private structure 4396 * @reset_flags: which reset is requested 4397 * 4398 * The essential difference in resets is that the PF Reset 4399 * doesn't clear the packet buffers, doesn't reset the PE 4400 * firmware, and doesn't bother the other PFs on the chip. 4401 **/ 4402 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) 4403 { 4404 u32 val; 4405 4406 WARN_ON(in_interrupt()); 4407 4408 /* do the biggest reset indicated */ 4409 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { 4410 4411 /* Request a Global Reset 4412 * 4413 * This will start the chip's countdown to the actual full 4414 * chip reset event, and a warning interrupt to be sent 4415 * to all PFs, including the requestor. Our handler 4416 * for the warning interrupt will deal with the shutdown 4417 * and recovery of the switch setup. 4418 */ 4419 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); 4420 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4421 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 4422 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4423 4424 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) { 4425 4426 /* Request a Core Reset 4427 * 4428 * Same as Global Reset, except does *not* include the MAC/PHY 4429 */ 4430 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); 4431 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4432 val |= I40E_GLGEN_RTRIG_CORER_MASK; 4433 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4434 i40e_flush(&pf->hw); 4435 4436 } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) { 4437 4438 /* Request a Firmware Reset 4439 * 4440 * Same as Global reset, plus restarting the 4441 * embedded firmware engine. 4442 */ 4443 /* enable EMP Reset */ 4444 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP); 4445 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK; 4446 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val); 4447 4448 /* force the reset */ 4449 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4450 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK; 4451 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4452 i40e_flush(&pf->hw); 4453 4454 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { 4455 4456 /* Request a PF Reset 4457 * 4458 * Resets only the PF-specific registers 4459 * 4460 * This goes directly to the tear-down and rebuild of 4461 * the switch, since we need to do all the recovery as 4462 * for the Core Reset. 4463 */ 4464 dev_dbg(&pf->pdev->dev, "PFR requested\n"); 4465 i40e_handle_reset_warning(pf); 4466 4467 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { 4468 int v; 4469 4470 /* Find the VSI(s) that requested a re-init */ 4471 dev_info(&pf->pdev->dev, 4472 "VSI reinit requested\n"); 4473 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4474 struct i40e_vsi *vsi = pf->vsi[v]; 4475 if (vsi != NULL && 4476 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 4477 i40e_vsi_reinit_locked(pf->vsi[v]); 4478 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); 4479 } 4480 } 4481 4482 /* no further action needed, so return now */ 4483 return; 4484 } else { 4485 dev_info(&pf->pdev->dev, 4486 "bad reset request 0x%08x\n", reset_flags); 4487 return; 4488 } 4489 } 4490 4491 #ifdef CONFIG_I40E_DCB 4492 /** 4493 * i40e_dcb_need_reconfig - Check if DCB needs reconfig 4494 * @pf: board private structure 4495 * @old_cfg: current DCB config 4496 * @new_cfg: new DCB config 4497 **/ 4498 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, 4499 struct i40e_dcbx_config *old_cfg, 4500 struct i40e_dcbx_config *new_cfg) 4501 { 4502 bool need_reconfig = false; 4503 4504 /* Check if ETS configuration has changed */ 4505 if (memcmp(&new_cfg->etscfg, 4506 &old_cfg->etscfg, 4507 sizeof(new_cfg->etscfg))) { 4508 /* If Priority Table has changed reconfig is needed */ 4509 if (memcmp(&new_cfg->etscfg.prioritytable, 4510 &old_cfg->etscfg.prioritytable, 4511 sizeof(new_cfg->etscfg.prioritytable))) { 4512 need_reconfig = true; 4513 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); 4514 } 4515 4516 if (memcmp(&new_cfg->etscfg.tcbwtable, 4517 &old_cfg->etscfg.tcbwtable, 4518 sizeof(new_cfg->etscfg.tcbwtable))) 4519 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 4520 4521 if (memcmp(&new_cfg->etscfg.tsatable, 4522 &old_cfg->etscfg.tsatable, 4523 sizeof(new_cfg->etscfg.tsatable))) 4524 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); 4525 } 4526 4527 /* Check if PFC configuration has changed */ 4528 if (memcmp(&new_cfg->pfc, 4529 &old_cfg->pfc, 4530 sizeof(new_cfg->pfc))) { 4531 need_reconfig = true; 4532 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); 4533 } 4534 4535 /* Check if APP Table has changed */ 4536 if (memcmp(&new_cfg->app, 4537 &old_cfg->app, 4538 sizeof(new_cfg->app))) { 4539 need_reconfig = true; 4540 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); 4541 } 4542 4543 return need_reconfig; 4544 } 4545 4546 /** 4547 * i40e_handle_lldp_event - Handle LLDP Change MIB event 4548 * @pf: board private structure 4549 * @e: event info posted on ARQ 4550 **/ 4551 static int i40e_handle_lldp_event(struct i40e_pf *pf, 4552 struct i40e_arq_event_info *e) 4553 { 4554 struct i40e_aqc_lldp_get_mib *mib = 4555 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; 4556 struct i40e_hw *hw = &pf->hw; 4557 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; 4558 struct i40e_dcbx_config tmp_dcbx_cfg; 4559 bool need_reconfig = false; 4560 int ret = 0; 4561 u8 type; 4562 4563 /* Ignore if event is not for Nearest Bridge */ 4564 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) 4565 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 4566 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) 4567 return ret; 4568 4569 /* Check MIB Type and return if event for Remote MIB update */ 4570 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; 4571 if (type == I40E_AQ_LLDP_MIB_REMOTE) { 4572 /* Update the remote cached instance and return */ 4573 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, 4574 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, 4575 &hw->remote_dcbx_config); 4576 goto exit; 4577 } 4578 4579 /* Convert/store the DCBX data from LLDPDU temporarily */ 4580 memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); 4581 ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg); 4582 if (ret) { 4583 /* Error in LLDPDU parsing return */ 4584 dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n"); 4585 goto exit; 4586 } 4587 4588 /* No change detected in DCBX configs */ 4589 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { 4590 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 4591 goto exit; 4592 } 4593 4594 need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg); 4595 4596 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg); 4597 4598 /* Overwrite the new configuration */ 4599 *dcbx_cfg = tmp_dcbx_cfg; 4600 4601 if (!need_reconfig) 4602 goto exit; 4603 4604 /* Reconfiguration needed quiesce all VSIs */ 4605 i40e_pf_quiesce_all_vsi(pf); 4606 4607 /* Changes in configuration update VEB/VSI */ 4608 i40e_dcb_reconfigure(pf); 4609 4610 i40e_pf_unquiesce_all_vsi(pf); 4611 exit: 4612 return ret; 4613 } 4614 #endif /* CONFIG_I40E_DCB */ 4615 4616 /** 4617 * i40e_do_reset_safe - Protected reset path for userland calls. 4618 * @pf: board private structure 4619 * @reset_flags: which reset is requested 4620 * 4621 **/ 4622 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) 4623 { 4624 rtnl_lock(); 4625 i40e_do_reset(pf, reset_flags); 4626 rtnl_unlock(); 4627 } 4628 4629 /** 4630 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event 4631 * @pf: board private structure 4632 * @e: event info posted on ARQ 4633 * 4634 * Handler for LAN Queue Overflow Event generated by the firmware for PF 4635 * and VF queues 4636 **/ 4637 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, 4638 struct i40e_arq_event_info *e) 4639 { 4640 struct i40e_aqc_lan_overflow *data = 4641 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; 4642 u32 queue = le32_to_cpu(data->prtdcb_rupto); 4643 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); 4644 struct i40e_hw *hw = &pf->hw; 4645 struct i40e_vf *vf; 4646 u16 vf_id; 4647 4648 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", 4649 queue, qtx_ctl); 4650 4651 /* Queue belongs to VF, find the VF and issue VF reset */ 4652 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 4653 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { 4654 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) 4655 >> I40E_QTX_CTL_VFVM_INDX_SHIFT); 4656 vf_id -= hw->func_caps.vf_base_id; 4657 vf = &pf->vf[vf_id]; 4658 i40e_vc_notify_vf_reset(vf); 4659 /* Allow VF to process pending reset notification */ 4660 msleep(20); 4661 i40e_reset_vf(vf, false); 4662 } 4663 } 4664 4665 /** 4666 * i40e_service_event_complete - Finish up the service event 4667 * @pf: board private structure 4668 **/ 4669 static void i40e_service_event_complete(struct i40e_pf *pf) 4670 { 4671 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 4672 4673 /* flush memory to make sure state is correct before next watchog */ 4674 smp_mb__before_clear_bit(); 4675 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 4676 } 4677 4678 /** 4679 * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW 4680 * @pf: board private structure 4681 **/ 4682 int i40e_get_current_fd_count(struct i40e_pf *pf) 4683 { 4684 int val, fcnt_prog; 4685 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 4686 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + 4687 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> 4688 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 4689 return fcnt_prog; 4690 } 4691 4692 /** 4693 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 4694 * @pf: board private structure 4695 **/ 4696 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) 4697 { 4698 u32 fcnt_prog, fcnt_avail; 4699 4700 /* Check if, FD SB or ATR was auto disabled and if there is enough room 4701 * to re-enable 4702 */ 4703 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 4704 (pf->flags & I40E_FLAG_FD_SB_ENABLED)) 4705 return; 4706 fcnt_prog = i40e_get_current_fd_count(pf); 4707 fcnt_avail = pf->hw.fdir_shared_filter_count + 4708 pf->fdir_pf_filter_count; 4709 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { 4710 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 4711 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 4712 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 4713 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 4714 } 4715 } 4716 /* Wait for some more space to be available to turn on ATR */ 4717 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { 4718 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 4719 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 4720 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 4721 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); 4722 } 4723 } 4724 } 4725 4726 /** 4727 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 4728 * @pf: board private structure 4729 **/ 4730 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 4731 { 4732 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT)) 4733 return; 4734 4735 /* if interface is down do nothing */ 4736 if (test_bit(__I40E_DOWN, &pf->state)) 4737 return; 4738 i40e_fdir_check_and_reenable(pf); 4739 4740 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 4741 (pf->flags & I40E_FLAG_FD_SB_ENABLED)) 4742 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; 4743 } 4744 4745 /** 4746 * i40e_vsi_link_event - notify VSI of a link event 4747 * @vsi: vsi to be notified 4748 * @link_up: link up or down 4749 **/ 4750 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 4751 { 4752 if (!vsi) 4753 return; 4754 4755 switch (vsi->type) { 4756 case I40E_VSI_MAIN: 4757 if (!vsi->netdev || !vsi->netdev_registered) 4758 break; 4759 4760 if (link_up) { 4761 netif_carrier_on(vsi->netdev); 4762 netif_tx_wake_all_queues(vsi->netdev); 4763 } else { 4764 netif_carrier_off(vsi->netdev); 4765 netif_tx_stop_all_queues(vsi->netdev); 4766 } 4767 break; 4768 4769 case I40E_VSI_SRIOV: 4770 break; 4771 4772 case I40E_VSI_VMDQ2: 4773 case I40E_VSI_CTRL: 4774 case I40E_VSI_MIRROR: 4775 default: 4776 /* there is no notification for other VSIs */ 4777 break; 4778 } 4779 } 4780 4781 /** 4782 * i40e_veb_link_event - notify elements on the veb of a link event 4783 * @veb: veb to be notified 4784 * @link_up: link up or down 4785 **/ 4786 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) 4787 { 4788 struct i40e_pf *pf; 4789 int i; 4790 4791 if (!veb || !veb->pf) 4792 return; 4793 pf = veb->pf; 4794 4795 /* depth first... */ 4796 for (i = 0; i < I40E_MAX_VEB; i++) 4797 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) 4798 i40e_veb_link_event(pf->veb[i], link_up); 4799 4800 /* ... now the local VSIs */ 4801 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 4802 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 4803 i40e_vsi_link_event(pf->vsi[i], link_up); 4804 } 4805 4806 /** 4807 * i40e_link_event - Update netif_carrier status 4808 * @pf: board private structure 4809 **/ 4810 static void i40e_link_event(struct i40e_pf *pf) 4811 { 4812 bool new_link, old_link; 4813 4814 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP); 4815 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 4816 4817 if (new_link == old_link) 4818 return; 4819 4820 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) 4821 netdev_info(pf->vsi[pf->lan_vsi]->netdev, 4822 "NIC Link is %s\n", (new_link ? "Up" : "Down")); 4823 4824 /* Notify the base of the switch tree connected to 4825 * the link. Floating VEBs are not notified. 4826 */ 4827 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 4828 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); 4829 else 4830 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link); 4831 4832 if (pf->vf) 4833 i40e_vc_notify_link_state(pf); 4834 4835 if (pf->flags & I40E_FLAG_PTP) 4836 i40e_ptp_set_increment(pf); 4837 } 4838 4839 /** 4840 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts 4841 * @pf: board private structure 4842 * 4843 * Set the per-queue flags to request a check for stuck queues in the irq 4844 * clean functions, then force interrupts to be sure the irq clean is called. 4845 **/ 4846 static void i40e_check_hang_subtask(struct i40e_pf *pf) 4847 { 4848 int i, v; 4849 4850 /* If we're down or resetting, just bail */ 4851 if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) 4852 return; 4853 4854 /* for each VSI/netdev 4855 * for each Tx queue 4856 * set the check flag 4857 * for each q_vector 4858 * force an interrupt 4859 */ 4860 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4861 struct i40e_vsi *vsi = pf->vsi[v]; 4862 int armed = 0; 4863 4864 if (!pf->vsi[v] || 4865 test_bit(__I40E_DOWN, &vsi->state) || 4866 (vsi->netdev && !netif_carrier_ok(vsi->netdev))) 4867 continue; 4868 4869 for (i = 0; i < vsi->num_queue_pairs; i++) { 4870 set_check_for_tx_hang(vsi->tx_rings[i]); 4871 if (test_bit(__I40E_HANG_CHECK_ARMED, 4872 &vsi->tx_rings[i]->state)) 4873 armed++; 4874 } 4875 4876 if (armed) { 4877 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { 4878 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, 4879 (I40E_PFINT_DYN_CTL0_INTENA_MASK | 4880 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); 4881 } else { 4882 u16 vec = vsi->base_vector - 1; 4883 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | 4884 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); 4885 for (i = 0; i < vsi->num_q_vectors; i++, vec++) 4886 wr32(&vsi->back->hw, 4887 I40E_PFINT_DYN_CTLN(vec), val); 4888 } 4889 i40e_flush(&vsi->back->hw); 4890 } 4891 } 4892 } 4893 4894 /** 4895 * i40e_watchdog_subtask - Check and bring link up 4896 * @pf: board private structure 4897 **/ 4898 static void i40e_watchdog_subtask(struct i40e_pf *pf) 4899 { 4900 int i; 4901 4902 /* if interface is down do nothing */ 4903 if (test_bit(__I40E_DOWN, &pf->state) || 4904 test_bit(__I40E_CONFIG_BUSY, &pf->state)) 4905 return; 4906 4907 /* Update the stats for active netdevs so the network stack 4908 * can look at updated numbers whenever it cares to 4909 */ 4910 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 4911 if (pf->vsi[i] && pf->vsi[i]->netdev) 4912 i40e_update_stats(pf->vsi[i]); 4913 4914 /* Update the stats for the active switching components */ 4915 for (i = 0; i < I40E_MAX_VEB; i++) 4916 if (pf->veb[i]) 4917 i40e_update_veb_stats(pf->veb[i]); 4918 4919 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); 4920 } 4921 4922 /** 4923 * i40e_reset_subtask - Set up for resetting the device and driver 4924 * @pf: board private structure 4925 **/ 4926 static void i40e_reset_subtask(struct i40e_pf *pf) 4927 { 4928 u32 reset_flags = 0; 4929 4930 rtnl_lock(); 4931 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { 4932 reset_flags |= (1 << __I40E_REINIT_REQUESTED); 4933 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); 4934 } 4935 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { 4936 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED); 4937 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 4938 } 4939 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { 4940 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED); 4941 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); 4942 } 4943 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { 4944 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED); 4945 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); 4946 } 4947 4948 /* If there's a recovery already waiting, it takes 4949 * precedence before starting a new reset sequence. 4950 */ 4951 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { 4952 i40e_handle_reset_warning(pf); 4953 goto unlock; 4954 } 4955 4956 /* If we're already down or resetting, just bail */ 4957 if (reset_flags && 4958 !test_bit(__I40E_DOWN, &pf->state) && 4959 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) 4960 i40e_do_reset(pf, reset_flags); 4961 4962 unlock: 4963 rtnl_unlock(); 4964 } 4965 4966 /** 4967 * i40e_handle_link_event - Handle link event 4968 * @pf: board private structure 4969 * @e: event info posted on ARQ 4970 **/ 4971 static void i40e_handle_link_event(struct i40e_pf *pf, 4972 struct i40e_arq_event_info *e) 4973 { 4974 struct i40e_hw *hw = &pf->hw; 4975 struct i40e_aqc_get_link_status *status = 4976 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 4977 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 4978 4979 /* save off old link status information */ 4980 memcpy(&pf->hw.phy.link_info_old, hw_link_info, 4981 sizeof(pf->hw.phy.link_info_old)); 4982 4983 /* update link status */ 4984 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type; 4985 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed; 4986 hw_link_info->link_info = status->link_info; 4987 hw_link_info->an_info = status->an_info; 4988 hw_link_info->ext_info = status->ext_info; 4989 hw_link_info->lse_enable = 4990 le16_to_cpu(status->command_flags) & 4991 I40E_AQ_LSE_ENABLE; 4992 4993 /* process the event */ 4994 i40e_link_event(pf); 4995 4996 /* Do a new status request to re-enable LSE reporting 4997 * and load new status information into the hw struct, 4998 * then see if the status changed while processing the 4999 * initial event. 5000 */ 5001 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); 5002 i40e_link_event(pf); 5003 } 5004 5005 /** 5006 * i40e_clean_adminq_subtask - Clean the AdminQ rings 5007 * @pf: board private structure 5008 **/ 5009 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) 5010 { 5011 struct i40e_arq_event_info event; 5012 struct i40e_hw *hw = &pf->hw; 5013 u16 pending, i = 0; 5014 i40e_status ret; 5015 u16 opcode; 5016 u32 val; 5017 5018 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)) 5019 return; 5020 5021 event.msg_size = I40E_MAX_AQ_BUF_SIZE; 5022 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 5023 if (!event.msg_buf) 5024 return; 5025 5026 do { 5027 event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */ 5028 ret = i40e_clean_arq_element(hw, &event, &pending); 5029 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) { 5030 dev_info(&pf->pdev->dev, "No ARQ event found\n"); 5031 break; 5032 } else if (ret) { 5033 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); 5034 break; 5035 } 5036 5037 opcode = le16_to_cpu(event.desc.opcode); 5038 switch (opcode) { 5039 5040 case i40e_aqc_opc_get_link_status: 5041 i40e_handle_link_event(pf, &event); 5042 break; 5043 case i40e_aqc_opc_send_msg_to_pf: 5044 ret = i40e_vc_process_vf_msg(pf, 5045 le16_to_cpu(event.desc.retval), 5046 le32_to_cpu(event.desc.cookie_high), 5047 le32_to_cpu(event.desc.cookie_low), 5048 event.msg_buf, 5049 event.msg_size); 5050 break; 5051 case i40e_aqc_opc_lldp_update_mib: 5052 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 5053 #ifdef CONFIG_I40E_DCB 5054 rtnl_lock(); 5055 ret = i40e_handle_lldp_event(pf, &event); 5056 rtnl_unlock(); 5057 #endif /* CONFIG_I40E_DCB */ 5058 break; 5059 case i40e_aqc_opc_event_lan_overflow: 5060 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 5061 i40e_handle_lan_overflow_event(pf, &event); 5062 break; 5063 case i40e_aqc_opc_send_msg_to_peer: 5064 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); 5065 break; 5066 default: 5067 dev_info(&pf->pdev->dev, 5068 "ARQ Error: Unknown event 0x%04x received\n", 5069 opcode); 5070 break; 5071 } 5072 } while (pending && (i++ < pf->adminq_work_limit)); 5073 5074 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); 5075 /* re-enable Admin queue interrupt cause */ 5076 val = rd32(hw, I40E_PFINT_ICR0_ENA); 5077 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 5078 wr32(hw, I40E_PFINT_ICR0_ENA, val); 5079 i40e_flush(hw); 5080 5081 kfree(event.msg_buf); 5082 } 5083 5084 /** 5085 * i40e_verify_eeprom - make sure eeprom is good to use 5086 * @pf: board private structure 5087 **/ 5088 static void i40e_verify_eeprom(struct i40e_pf *pf) 5089 { 5090 int err; 5091 5092 err = i40e_diag_eeprom_test(&pf->hw); 5093 if (err) { 5094 /* retry in case of garbage read */ 5095 err = i40e_diag_eeprom_test(&pf->hw); 5096 if (err) { 5097 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", 5098 err); 5099 set_bit(__I40E_BAD_EEPROM, &pf->state); 5100 } 5101 } 5102 5103 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { 5104 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); 5105 clear_bit(__I40E_BAD_EEPROM, &pf->state); 5106 } 5107 } 5108 5109 /** 5110 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it 5111 * @veb: pointer to the VEB instance 5112 * 5113 * This is a recursive function that first builds the attached VSIs then 5114 * recurses in to build the next layer of VEB. We track the connections 5115 * through our own index numbers because the seid's from the HW could 5116 * change across the reset. 5117 **/ 5118 static int i40e_reconstitute_veb(struct i40e_veb *veb) 5119 { 5120 struct i40e_vsi *ctl_vsi = NULL; 5121 struct i40e_pf *pf = veb->pf; 5122 int v, veb_idx; 5123 int ret; 5124 5125 /* build VSI that owns this VEB, temporarily attached to base VEB */ 5126 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) { 5127 if (pf->vsi[v] && 5128 pf->vsi[v]->veb_idx == veb->idx && 5129 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 5130 ctl_vsi = pf->vsi[v]; 5131 break; 5132 } 5133 } 5134 if (!ctl_vsi) { 5135 dev_info(&pf->pdev->dev, 5136 "missing owner VSI for veb_idx %d\n", veb->idx); 5137 ret = -ENOENT; 5138 goto end_reconstitute; 5139 } 5140 if (ctl_vsi != pf->vsi[pf->lan_vsi]) 5141 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 5142 ret = i40e_add_vsi(ctl_vsi); 5143 if (ret) { 5144 dev_info(&pf->pdev->dev, 5145 "rebuild of owner VSI failed: %d\n", ret); 5146 goto end_reconstitute; 5147 } 5148 i40e_vsi_reset_stats(ctl_vsi); 5149 5150 /* create the VEB in the switch and move the VSI onto the VEB */ 5151 ret = i40e_add_veb(veb, ctl_vsi); 5152 if (ret) 5153 goto end_reconstitute; 5154 5155 /* create the remaining VSIs attached to this VEB */ 5156 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5157 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 5158 continue; 5159 5160 if (pf->vsi[v]->veb_idx == veb->idx) { 5161 struct i40e_vsi *vsi = pf->vsi[v]; 5162 vsi->uplink_seid = veb->seid; 5163 ret = i40e_add_vsi(vsi); 5164 if (ret) { 5165 dev_info(&pf->pdev->dev, 5166 "rebuild of vsi_idx %d failed: %d\n", 5167 v, ret); 5168 goto end_reconstitute; 5169 } 5170 i40e_vsi_reset_stats(vsi); 5171 } 5172 } 5173 5174 /* create any VEBs attached to this VEB - RECURSION */ 5175 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 5176 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { 5177 pf->veb[veb_idx]->uplink_seid = veb->seid; 5178 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); 5179 if (ret) 5180 break; 5181 } 5182 } 5183 5184 end_reconstitute: 5185 return ret; 5186 } 5187 5188 /** 5189 * i40e_get_capabilities - get info about the HW 5190 * @pf: the PF struct 5191 **/ 5192 static int i40e_get_capabilities(struct i40e_pf *pf) 5193 { 5194 struct i40e_aqc_list_capabilities_element_resp *cap_buf; 5195 u16 data_size; 5196 int buf_len; 5197 int err; 5198 5199 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 5200 do { 5201 cap_buf = kzalloc(buf_len, GFP_KERNEL); 5202 if (!cap_buf) 5203 return -ENOMEM; 5204 5205 /* this loads the data into the hw struct for us */ 5206 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, 5207 &data_size, 5208 i40e_aqc_opc_list_func_capabilities, 5209 NULL); 5210 /* data loaded, buffer no longer needed */ 5211 kfree(cap_buf); 5212 5213 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 5214 /* retry with a larger buffer */ 5215 buf_len = data_size; 5216 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 5217 dev_info(&pf->pdev->dev, 5218 "capability discovery failed: aq=%d\n", 5219 pf->hw.aq.asq_last_status); 5220 return -ENODEV; 5221 } 5222 } while (err); 5223 5224 /* increment MSI-X count because current FW skips one */ 5225 pf->hw.func_caps.num_msix_vectors++; 5226 5227 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || 5228 (pf->hw.aq.fw_maj_ver < 2)) { 5229 pf->hw.func_caps.num_msix_vectors++; 5230 pf->hw.func_caps.num_msix_vectors_vf++; 5231 } 5232 5233 if (pf->hw.debug_mask & I40E_DEBUG_USER) 5234 dev_info(&pf->pdev->dev, 5235 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 5236 pf->hw.pf_id, pf->hw.func_caps.num_vfs, 5237 pf->hw.func_caps.num_msix_vectors, 5238 pf->hw.func_caps.num_msix_vectors_vf, 5239 pf->hw.func_caps.fd_filters_guaranteed, 5240 pf->hw.func_caps.fd_filters_best_effort, 5241 pf->hw.func_caps.num_tx_qp, 5242 pf->hw.func_caps.num_vsis); 5243 5244 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ 5245 + pf->hw.func_caps.num_vfs) 5246 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { 5247 dev_info(&pf->pdev->dev, 5248 "got num_vsis %d, setting num_vsis to %d\n", 5249 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); 5250 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; 5251 } 5252 5253 return 0; 5254 } 5255 5256 static int i40e_vsi_clear(struct i40e_vsi *vsi); 5257 5258 /** 5259 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband 5260 * @pf: board private structure 5261 **/ 5262 static void i40e_fdir_sb_setup(struct i40e_pf *pf) 5263 { 5264 struct i40e_vsi *vsi; 5265 bool new_vsi = false; 5266 int err, i; 5267 5268 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 5269 return; 5270 5271 /* find existing VSI and see if it needs configuring */ 5272 vsi = NULL; 5273 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5274 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5275 vsi = pf->vsi[i]; 5276 break; 5277 } 5278 } 5279 5280 /* create a new VSI if none exists */ 5281 if (!vsi) { 5282 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, 5283 pf->vsi[pf->lan_vsi]->seid, 0); 5284 if (!vsi) { 5285 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); 5286 goto err_vsi; 5287 } 5288 new_vsi = true; 5289 } 5290 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); 5291 5292 err = i40e_vsi_setup_tx_resources(vsi); 5293 if (err) 5294 goto err_setup_tx; 5295 err = i40e_vsi_setup_rx_resources(vsi); 5296 if (err) 5297 goto err_setup_rx; 5298 5299 if (new_vsi) { 5300 char int_name[IFNAMSIZ + 9]; 5301 err = i40e_vsi_configure(vsi); 5302 if (err) 5303 goto err_setup_rx; 5304 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", 5305 dev_driver_string(&pf->pdev->dev)); 5306 err = i40e_vsi_request_irq(vsi, int_name); 5307 if (err) 5308 goto err_setup_rx; 5309 err = i40e_up_complete(vsi); 5310 if (err) 5311 goto err_up_complete; 5312 clear_bit(__I40E_NEEDS_RESTART, &vsi->state); 5313 } 5314 5315 return; 5316 5317 err_up_complete: 5318 i40e_down(vsi); 5319 i40e_vsi_free_irq(vsi); 5320 err_setup_rx: 5321 i40e_vsi_free_rx_resources(vsi); 5322 err_setup_tx: 5323 i40e_vsi_free_tx_resources(vsi); 5324 err_vsi: 5325 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 5326 i40e_vsi_clear(vsi); 5327 } 5328 5329 /** 5330 * i40e_fdir_teardown - release the Flow Director resources 5331 * @pf: board private structure 5332 **/ 5333 static void i40e_fdir_teardown(struct i40e_pf *pf) 5334 { 5335 int i; 5336 5337 i40e_fdir_filter_exit(pf); 5338 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5339 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5340 i40e_vsi_release(pf->vsi[i]); 5341 break; 5342 } 5343 } 5344 } 5345 5346 /** 5347 * i40e_prep_for_reset - prep for the core to reset 5348 * @pf: board private structure 5349 * 5350 * Close up the VFs and other things in prep for pf Reset. 5351 **/ 5352 static int i40e_prep_for_reset(struct i40e_pf *pf) 5353 { 5354 struct i40e_hw *hw = &pf->hw; 5355 i40e_status ret; 5356 u32 v; 5357 5358 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); 5359 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 5360 return 0; 5361 5362 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 5363 5364 if (i40e_check_asq_alive(hw)) 5365 i40e_vc_notify_reset(pf); 5366 5367 /* quiesce the VSIs and their queues that are not already DOWN */ 5368 i40e_pf_quiesce_all_vsi(pf); 5369 5370 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5371 if (pf->vsi[v]) 5372 pf->vsi[v]->seid = 0; 5373 } 5374 5375 i40e_shutdown_adminq(&pf->hw); 5376 5377 /* call shutdown HMC */ 5378 ret = i40e_shutdown_lan_hmc(hw); 5379 if (ret) { 5380 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); 5381 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5382 } 5383 return ret; 5384 } 5385 5386 /** 5387 * i40e_reset_and_rebuild - reset and rebuild using a saved config 5388 * @pf: board private structure 5389 * @reinit: if the Main VSI needs to re-initialized. 5390 **/ 5391 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) 5392 { 5393 struct i40e_driver_version dv; 5394 struct i40e_hw *hw = &pf->hw; 5395 i40e_status ret; 5396 u32 v; 5397 5398 /* Now we wait for GRST to settle out. 5399 * We don't have to delete the VEBs or VSIs from the hw switch 5400 * because the reset will make them disappear. 5401 */ 5402 ret = i40e_pf_reset(hw); 5403 if (ret) 5404 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 5405 pf->pfr_count++; 5406 5407 if (test_bit(__I40E_DOWN, &pf->state)) 5408 goto end_core_reset; 5409 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 5410 5411 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 5412 ret = i40e_init_adminq(&pf->hw); 5413 if (ret) { 5414 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); 5415 goto end_core_reset; 5416 } 5417 5418 /* re-verify the eeprom if we just had an EMP reset */ 5419 if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) { 5420 clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); 5421 i40e_verify_eeprom(pf); 5422 } 5423 5424 ret = i40e_get_capabilities(pf); 5425 if (ret) { 5426 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", 5427 ret); 5428 goto end_core_reset; 5429 } 5430 5431 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 5432 hw->func_caps.num_rx_qp, 5433 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 5434 if (ret) { 5435 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); 5436 goto end_core_reset; 5437 } 5438 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 5439 if (ret) { 5440 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); 5441 goto end_core_reset; 5442 } 5443 5444 #ifdef CONFIG_I40E_DCB 5445 ret = i40e_init_pf_dcb(pf); 5446 if (ret) { 5447 dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret); 5448 goto end_core_reset; 5449 } 5450 #endif /* CONFIG_I40E_DCB */ 5451 5452 /* do basic switch setup */ 5453 ret = i40e_setup_pf_switch(pf, reinit); 5454 if (ret) 5455 goto end_core_reset; 5456 5457 /* Rebuild the VSIs and VEBs that existed before reset. 5458 * They are still in our local switch element arrays, so only 5459 * need to rebuild the switch model in the HW. 5460 * 5461 * If there were VEBs but the reconstitution failed, we'll try 5462 * try to recover minimal use by getting the basic PF VSI working. 5463 */ 5464 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 5465 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); 5466 /* find the one VEB connected to the MAC, and find orphans */ 5467 for (v = 0; v < I40E_MAX_VEB; v++) { 5468 if (!pf->veb[v]) 5469 continue; 5470 5471 if (pf->veb[v]->uplink_seid == pf->mac_seid || 5472 pf->veb[v]->uplink_seid == 0) { 5473 ret = i40e_reconstitute_veb(pf->veb[v]); 5474 5475 if (!ret) 5476 continue; 5477 5478 /* If Main VEB failed, we're in deep doodoo, 5479 * so give up rebuilding the switch and set up 5480 * for minimal rebuild of PF VSI. 5481 * If orphan failed, we'll report the error 5482 * but try to keep going. 5483 */ 5484 if (pf->veb[v]->uplink_seid == pf->mac_seid) { 5485 dev_info(&pf->pdev->dev, 5486 "rebuild of switch failed: %d, will try to set up simple PF connection\n", 5487 ret); 5488 pf->vsi[pf->lan_vsi]->uplink_seid 5489 = pf->mac_seid; 5490 break; 5491 } else if (pf->veb[v]->uplink_seid == 0) { 5492 dev_info(&pf->pdev->dev, 5493 "rebuild of orphan VEB failed: %d\n", 5494 ret); 5495 } 5496 } 5497 } 5498 } 5499 5500 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { 5501 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); 5502 /* no VEB, so rebuild only the Main VSI */ 5503 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); 5504 if (ret) { 5505 dev_info(&pf->pdev->dev, 5506 "rebuild of Main VSI failed: %d\n", ret); 5507 goto end_core_reset; 5508 } 5509 } 5510 5511 /* reinit the misc interrupt */ 5512 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5513 ret = i40e_setup_misc_vector(pf); 5514 5515 /* restart the VSIs that were rebuilt and running before the reset */ 5516 i40e_pf_unquiesce_all_vsi(pf); 5517 5518 if (pf->num_alloc_vfs) { 5519 for (v = 0; v < pf->num_alloc_vfs; v++) 5520 i40e_reset_vf(&pf->vf[v], true); 5521 } 5522 5523 /* tell the firmware that we're starting */ 5524 dv.major_version = DRV_VERSION_MAJOR; 5525 dv.minor_version = DRV_VERSION_MINOR; 5526 dv.build_version = DRV_VERSION_BUILD; 5527 dv.subbuild_version = 0; 5528 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 5529 5530 dev_info(&pf->pdev->dev, "reset complete\n"); 5531 5532 end_core_reset: 5533 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5534 } 5535 5536 /** 5537 * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild 5538 * @pf: board private structure 5539 * 5540 * Close up the VFs and other things in prep for a Core Reset, 5541 * then get ready to rebuild the world. 5542 **/ 5543 static void i40e_handle_reset_warning(struct i40e_pf *pf) 5544 { 5545 i40e_status ret; 5546 5547 ret = i40e_prep_for_reset(pf); 5548 if (!ret) 5549 i40e_reset_and_rebuild(pf, false); 5550 } 5551 5552 /** 5553 * i40e_handle_mdd_event 5554 * @pf: pointer to the pf structure 5555 * 5556 * Called from the MDD irq handler to identify possibly malicious vfs 5557 **/ 5558 static void i40e_handle_mdd_event(struct i40e_pf *pf) 5559 { 5560 struct i40e_hw *hw = &pf->hw; 5561 bool mdd_detected = false; 5562 struct i40e_vf *vf; 5563 u32 reg; 5564 int i; 5565 5566 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) 5567 return; 5568 5569 /* find what triggered the MDD event */ 5570 reg = rd32(hw, I40E_GL_MDET_TX); 5571 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 5572 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK) 5573 >> I40E_GL_MDET_TX_FUNCTION_SHIFT; 5574 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) 5575 >> I40E_GL_MDET_TX_EVENT_SHIFT; 5576 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) 5577 >> I40E_GL_MDET_TX_QUEUE_SHIFT; 5578 dev_info(&pf->pdev->dev, 5579 "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n", 5580 event, queue, func); 5581 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 5582 mdd_detected = true; 5583 } 5584 reg = rd32(hw, I40E_GL_MDET_RX); 5585 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 5586 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) 5587 >> I40E_GL_MDET_RX_FUNCTION_SHIFT; 5588 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) 5589 >> I40E_GL_MDET_RX_EVENT_SHIFT; 5590 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) 5591 >> I40E_GL_MDET_RX_QUEUE_SHIFT; 5592 dev_info(&pf->pdev->dev, 5593 "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 5594 event, queue, func); 5595 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 5596 mdd_detected = true; 5597 } 5598 5599 /* see if one of the VFs needs its hand slapped */ 5600 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 5601 vf = &(pf->vf[i]); 5602 reg = rd32(hw, I40E_VP_MDET_TX(i)); 5603 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 5604 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 5605 vf->num_mdd_events++; 5606 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i); 5607 } 5608 5609 reg = rd32(hw, I40E_VP_MDET_RX(i)); 5610 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 5611 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 5612 vf->num_mdd_events++; 5613 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i); 5614 } 5615 5616 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 5617 dev_info(&pf->pdev->dev, 5618 "Too many MDD events on VF %d, disabled\n", i); 5619 dev_info(&pf->pdev->dev, 5620 "Use PF Control I/F to re-enable the VF\n"); 5621 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); 5622 } 5623 } 5624 5625 /* re-enable mdd interrupt cause */ 5626 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); 5627 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 5628 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 5629 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 5630 i40e_flush(hw); 5631 } 5632 5633 #ifdef CONFIG_I40E_VXLAN 5634 /** 5635 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW 5636 * @pf: board private structure 5637 **/ 5638 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) 5639 { 5640 const int vxlan_hdr_qwords = 4; 5641 struct i40e_hw *hw = &pf->hw; 5642 i40e_status ret; 5643 u8 filter_index; 5644 __be16 port; 5645 int i; 5646 5647 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) 5648 return; 5649 5650 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; 5651 5652 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 5653 if (pf->pending_vxlan_bitmap & (1 << i)) { 5654 pf->pending_vxlan_bitmap &= ~(1 << i); 5655 port = pf->vxlan_ports[i]; 5656 ret = port ? 5657 i40e_aq_add_udp_tunnel(hw, ntohs(port), 5658 vxlan_hdr_qwords, 5659 I40E_AQC_TUNNEL_TYPE_VXLAN, 5660 &filter_index, NULL) 5661 : i40e_aq_del_udp_tunnel(hw, i, NULL); 5662 5663 if (ret) { 5664 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n", 5665 port ? "adding" : "deleting", 5666 ntohs(port), port ? i : i); 5667 5668 pf->vxlan_ports[i] = 0; 5669 } else { 5670 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n", 5671 port ? "Added" : "Deleted", 5672 ntohs(port), port ? i : filter_index); 5673 } 5674 } 5675 } 5676 } 5677 5678 #endif 5679 /** 5680 * i40e_service_task - Run the driver's async subtasks 5681 * @work: pointer to work_struct containing our data 5682 **/ 5683 static void i40e_service_task(struct work_struct *work) 5684 { 5685 struct i40e_pf *pf = container_of(work, 5686 struct i40e_pf, 5687 service_task); 5688 unsigned long start_time = jiffies; 5689 5690 i40e_reset_subtask(pf); 5691 i40e_handle_mdd_event(pf); 5692 i40e_vc_process_vflr_event(pf); 5693 i40e_watchdog_subtask(pf); 5694 i40e_fdir_reinit_subtask(pf); 5695 i40e_check_hang_subtask(pf); 5696 i40e_sync_filters_subtask(pf); 5697 #ifdef CONFIG_I40E_VXLAN 5698 i40e_sync_vxlan_filters_subtask(pf); 5699 #endif 5700 i40e_clean_adminq_subtask(pf); 5701 5702 i40e_service_event_complete(pf); 5703 5704 /* If the tasks have taken longer than one timer cycle or there 5705 * is more work to be done, reschedule the service task now 5706 * rather than wait for the timer to tick again. 5707 */ 5708 if (time_after(jiffies, (start_time + pf->service_timer_period)) || 5709 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || 5710 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || 5711 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) 5712 i40e_service_event_schedule(pf); 5713 } 5714 5715 /** 5716 * i40e_service_timer - timer callback 5717 * @data: pointer to PF struct 5718 **/ 5719 static void i40e_service_timer(unsigned long data) 5720 { 5721 struct i40e_pf *pf = (struct i40e_pf *)data; 5722 5723 mod_timer(&pf->service_timer, 5724 round_jiffies(jiffies + pf->service_timer_period)); 5725 i40e_service_event_schedule(pf); 5726 } 5727 5728 /** 5729 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI 5730 * @vsi: the VSI being configured 5731 **/ 5732 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) 5733 { 5734 struct i40e_pf *pf = vsi->back; 5735 5736 switch (vsi->type) { 5737 case I40E_VSI_MAIN: 5738 vsi->alloc_queue_pairs = pf->num_lan_qps; 5739 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 5740 I40E_REQ_DESCRIPTOR_MULTIPLE); 5741 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 5742 vsi->num_q_vectors = pf->num_lan_msix; 5743 else 5744 vsi->num_q_vectors = 1; 5745 5746 break; 5747 5748 case I40E_VSI_FDIR: 5749 vsi->alloc_queue_pairs = 1; 5750 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 5751 I40E_REQ_DESCRIPTOR_MULTIPLE); 5752 vsi->num_q_vectors = 1; 5753 break; 5754 5755 case I40E_VSI_VMDQ2: 5756 vsi->alloc_queue_pairs = pf->num_vmdq_qps; 5757 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 5758 I40E_REQ_DESCRIPTOR_MULTIPLE); 5759 vsi->num_q_vectors = pf->num_vmdq_msix; 5760 break; 5761 5762 case I40E_VSI_SRIOV: 5763 vsi->alloc_queue_pairs = pf->num_vf_qps; 5764 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, 5765 I40E_REQ_DESCRIPTOR_MULTIPLE); 5766 break; 5767 5768 default: 5769 WARN_ON(1); 5770 return -ENODATA; 5771 } 5772 5773 return 0; 5774 } 5775 5776 /** 5777 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi 5778 * @type: VSI pointer 5779 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. 5780 * 5781 * On error: returns error code (negative) 5782 * On success: returns 0 5783 **/ 5784 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) 5785 { 5786 int size; 5787 int ret = 0; 5788 5789 /* allocate memory for both Tx and Rx ring pointers */ 5790 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; 5791 vsi->tx_rings = kzalloc(size, GFP_KERNEL); 5792 if (!vsi->tx_rings) 5793 return -ENOMEM; 5794 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; 5795 5796 if (alloc_qvectors) { 5797 /* allocate memory for q_vector pointers */ 5798 size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors; 5799 vsi->q_vectors = kzalloc(size, GFP_KERNEL); 5800 if (!vsi->q_vectors) { 5801 ret = -ENOMEM; 5802 goto err_vectors; 5803 } 5804 } 5805 return ret; 5806 5807 err_vectors: 5808 kfree(vsi->tx_rings); 5809 return ret; 5810 } 5811 5812 /** 5813 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF 5814 * @pf: board private structure 5815 * @type: type of VSI 5816 * 5817 * On error: returns error code (negative) 5818 * On success: returns vsi index in PF (positive) 5819 **/ 5820 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) 5821 { 5822 int ret = -ENODEV; 5823 struct i40e_vsi *vsi; 5824 int vsi_idx; 5825 int i; 5826 5827 /* Need to protect the allocation of the VSIs at the PF level */ 5828 mutex_lock(&pf->switch_mutex); 5829 5830 /* VSI list may be fragmented if VSI creation/destruction has 5831 * been happening. We can afford to do a quick scan to look 5832 * for any free VSIs in the list. 5833 * 5834 * find next empty vsi slot, looping back around if necessary 5835 */ 5836 i = pf->next_vsi; 5837 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i]) 5838 i++; 5839 if (i >= pf->hw.func_caps.num_vsis) { 5840 i = 0; 5841 while (i < pf->next_vsi && pf->vsi[i]) 5842 i++; 5843 } 5844 5845 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) { 5846 vsi_idx = i; /* Found one! */ 5847 } else { 5848 ret = -ENODEV; 5849 goto unlock_pf; /* out of VSI slots! */ 5850 } 5851 pf->next_vsi = ++i; 5852 5853 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); 5854 if (!vsi) { 5855 ret = -ENOMEM; 5856 goto unlock_pf; 5857 } 5858 vsi->type = type; 5859 vsi->back = pf; 5860 set_bit(__I40E_DOWN, &vsi->state); 5861 vsi->flags = 0; 5862 vsi->idx = vsi_idx; 5863 vsi->rx_itr_setting = pf->rx_itr_default; 5864 vsi->tx_itr_setting = pf->tx_itr_default; 5865 vsi->netdev_registered = false; 5866 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; 5867 INIT_LIST_HEAD(&vsi->mac_filter_list); 5868 5869 ret = i40e_set_num_rings_in_vsi(vsi); 5870 if (ret) 5871 goto err_rings; 5872 5873 ret = i40e_vsi_alloc_arrays(vsi, true); 5874 if (ret) 5875 goto err_rings; 5876 5877 /* Setup default MSIX irq handler for VSI */ 5878 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); 5879 5880 pf->vsi[vsi_idx] = vsi; 5881 ret = vsi_idx; 5882 goto unlock_pf; 5883 5884 err_rings: 5885 pf->next_vsi = i - 1; 5886 kfree(vsi); 5887 unlock_pf: 5888 mutex_unlock(&pf->switch_mutex); 5889 return ret; 5890 } 5891 5892 /** 5893 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 5894 * @type: VSI pointer 5895 * @free_qvectors: a bool to specify if q_vectors need to be freed. 5896 * 5897 * On error: returns error code (negative) 5898 * On success: returns 0 5899 **/ 5900 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) 5901 { 5902 /* free the ring and vector containers */ 5903 if (free_qvectors) { 5904 kfree(vsi->q_vectors); 5905 vsi->q_vectors = NULL; 5906 } 5907 kfree(vsi->tx_rings); 5908 vsi->tx_rings = NULL; 5909 vsi->rx_rings = NULL; 5910 } 5911 5912 /** 5913 * i40e_vsi_clear - Deallocate the VSI provided 5914 * @vsi: the VSI being un-configured 5915 **/ 5916 static int i40e_vsi_clear(struct i40e_vsi *vsi) 5917 { 5918 struct i40e_pf *pf; 5919 5920 if (!vsi) 5921 return 0; 5922 5923 if (!vsi->back) 5924 goto free_vsi; 5925 pf = vsi->back; 5926 5927 mutex_lock(&pf->switch_mutex); 5928 if (!pf->vsi[vsi->idx]) { 5929 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", 5930 vsi->idx, vsi->idx, vsi, vsi->type); 5931 goto unlock_vsi; 5932 } 5933 5934 if (pf->vsi[vsi->idx] != vsi) { 5935 dev_err(&pf->pdev->dev, 5936 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", 5937 pf->vsi[vsi->idx]->idx, 5938 pf->vsi[vsi->idx], 5939 pf->vsi[vsi->idx]->type, 5940 vsi->idx, vsi, vsi->type); 5941 goto unlock_vsi; 5942 } 5943 5944 /* updates the pf for this cleared vsi */ 5945 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 5946 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 5947 5948 i40e_vsi_free_arrays(vsi, true); 5949 5950 pf->vsi[vsi->idx] = NULL; 5951 if (vsi->idx < pf->next_vsi) 5952 pf->next_vsi = vsi->idx; 5953 5954 unlock_vsi: 5955 mutex_unlock(&pf->switch_mutex); 5956 free_vsi: 5957 kfree(vsi); 5958 5959 return 0; 5960 } 5961 5962 /** 5963 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI 5964 * @vsi: the VSI being cleaned 5965 **/ 5966 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) 5967 { 5968 int i; 5969 5970 if (vsi->tx_rings && vsi->tx_rings[0]) { 5971 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 5972 kfree_rcu(vsi->tx_rings[i], rcu); 5973 vsi->tx_rings[i] = NULL; 5974 vsi->rx_rings[i] = NULL; 5975 } 5976 } 5977 } 5978 5979 /** 5980 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI 5981 * @vsi: the VSI being configured 5982 **/ 5983 static int i40e_alloc_rings(struct i40e_vsi *vsi) 5984 { 5985 struct i40e_pf *pf = vsi->back; 5986 int i; 5987 5988 /* Set basic values in the rings to be used later during open() */ 5989 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 5990 struct i40e_ring *tx_ring; 5991 struct i40e_ring *rx_ring; 5992 5993 /* allocate space for both Tx and Rx in one shot */ 5994 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); 5995 if (!tx_ring) 5996 goto err_out; 5997 5998 tx_ring->queue_index = i; 5999 tx_ring->reg_idx = vsi->base_queue + i; 6000 tx_ring->ring_active = false; 6001 tx_ring->vsi = vsi; 6002 tx_ring->netdev = vsi->netdev; 6003 tx_ring->dev = &pf->pdev->dev; 6004 tx_ring->count = vsi->num_desc; 6005 tx_ring->size = 0; 6006 tx_ring->dcb_tc = 0; 6007 vsi->tx_rings[i] = tx_ring; 6008 6009 rx_ring = &tx_ring[1]; 6010 rx_ring->queue_index = i; 6011 rx_ring->reg_idx = vsi->base_queue + i; 6012 rx_ring->ring_active = false; 6013 rx_ring->vsi = vsi; 6014 rx_ring->netdev = vsi->netdev; 6015 rx_ring->dev = &pf->pdev->dev; 6016 rx_ring->count = vsi->num_desc; 6017 rx_ring->size = 0; 6018 rx_ring->dcb_tc = 0; 6019 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) 6020 set_ring_16byte_desc_enabled(rx_ring); 6021 else 6022 clear_ring_16byte_desc_enabled(rx_ring); 6023 vsi->rx_rings[i] = rx_ring; 6024 } 6025 6026 return 0; 6027 6028 err_out: 6029 i40e_vsi_clear_rings(vsi); 6030 return -ENOMEM; 6031 } 6032 6033 /** 6034 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel 6035 * @pf: board private structure 6036 * @vectors: the number of MSI-X vectors to request 6037 * 6038 * Returns the number of vectors reserved, or error 6039 **/ 6040 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 6041 { 6042 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, 6043 I40E_MIN_MSIX, vectors); 6044 if (vectors < 0) { 6045 dev_info(&pf->pdev->dev, 6046 "MSI-X vector reservation failed: %d\n", vectors); 6047 vectors = 0; 6048 } 6049 6050 pf->num_msix_entries = vectors; 6051 6052 return vectors; 6053 } 6054 6055 /** 6056 * i40e_init_msix - Setup the MSIX capability 6057 * @pf: board private structure 6058 * 6059 * Work with the OS to set up the MSIX vectors needed. 6060 * 6061 * Returns 0 on success, negative on failure 6062 **/ 6063 static int i40e_init_msix(struct i40e_pf *pf) 6064 { 6065 i40e_status err = 0; 6066 struct i40e_hw *hw = &pf->hw; 6067 int v_budget, i; 6068 int vec; 6069 6070 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) 6071 return -ENODEV; 6072 6073 /* The number of vectors we'll request will be comprised of: 6074 * - Add 1 for "other" cause for Admin Queue events, etc. 6075 * - The number of LAN queue pairs 6076 * - Queues being used for RSS. 6077 * We don't need as many as max_rss_size vectors. 6078 * use rss_size instead in the calculation since that 6079 * is governed by number of cpus in the system. 6080 * - assumes symmetric Tx/Rx pairing 6081 * - The number of VMDq pairs 6082 * Once we count this up, try the request. 6083 * 6084 * If we can't get what we want, we'll simplify to nearly nothing 6085 * and try again. If that still fails, we punt. 6086 */ 6087 pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); 6088 pf->num_vmdq_msix = pf->num_vmdq_qps; 6089 v_budget = 1 + pf->num_lan_msix; 6090 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix); 6091 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) 6092 v_budget++; 6093 6094 /* Scale down if necessary, and the rings will share vectors */ 6095 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors); 6096 6097 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 6098 GFP_KERNEL); 6099 if (!pf->msix_entries) 6100 return -ENOMEM; 6101 6102 for (i = 0; i < v_budget; i++) 6103 pf->msix_entries[i].entry = i; 6104 vec = i40e_reserve_msix_vectors(pf, v_budget); 6105 if (vec < I40E_MIN_MSIX) { 6106 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 6107 kfree(pf->msix_entries); 6108 pf->msix_entries = NULL; 6109 return -ENODEV; 6110 6111 } else if (vec == I40E_MIN_MSIX) { 6112 /* Adjust for minimal MSIX use */ 6113 dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n"); 6114 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 6115 pf->num_vmdq_vsis = 0; 6116 pf->num_vmdq_qps = 0; 6117 pf->num_vmdq_msix = 0; 6118 pf->num_lan_qps = 1; 6119 pf->num_lan_msix = 1; 6120 6121 } else if (vec != v_budget) { 6122 /* Scale vector usage down */ 6123 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 6124 vec--; /* reserve the misc vector */ 6125 6126 /* partition out the remaining vectors */ 6127 switch (vec) { 6128 case 2: 6129 pf->num_vmdq_vsis = 1; 6130 pf->num_lan_msix = 1; 6131 break; 6132 case 3: 6133 pf->num_vmdq_vsis = 1; 6134 pf->num_lan_msix = 2; 6135 break; 6136 default: 6137 pf->num_lan_msix = min_t(int, (vec / 2), 6138 pf->num_lan_qps); 6139 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix), 6140 I40E_DEFAULT_NUM_VMDQ_VSI); 6141 break; 6142 } 6143 } 6144 6145 return err; 6146 } 6147 6148 /** 6149 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 6150 * @vsi: the VSI being configured 6151 * @v_idx: index of the vector in the vsi struct 6152 * 6153 * We allocate one q_vector. If allocation fails we return -ENOMEM. 6154 **/ 6155 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) 6156 { 6157 struct i40e_q_vector *q_vector; 6158 6159 /* allocate q_vector */ 6160 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); 6161 if (!q_vector) 6162 return -ENOMEM; 6163 6164 q_vector->vsi = vsi; 6165 q_vector->v_idx = v_idx; 6166 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 6167 if (vsi->netdev) 6168 netif_napi_add(vsi->netdev, &q_vector->napi, 6169 i40e_napi_poll, vsi->work_limit); 6170 6171 q_vector->rx.latency_range = I40E_LOW_LATENCY; 6172 q_vector->tx.latency_range = I40E_LOW_LATENCY; 6173 6174 /* tie q_vector and vsi together */ 6175 vsi->q_vectors[v_idx] = q_vector; 6176 6177 return 0; 6178 } 6179 6180 /** 6181 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 6182 * @vsi: the VSI being configured 6183 * 6184 * We allocate one q_vector per queue interrupt. If allocation fails we 6185 * return -ENOMEM. 6186 **/ 6187 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) 6188 { 6189 struct i40e_pf *pf = vsi->back; 6190 int v_idx, num_q_vectors; 6191 int err; 6192 6193 /* if not MSIX, give the one vector only to the LAN VSI */ 6194 if (pf->flags & I40E_FLAG_MSIX_ENABLED) 6195 num_q_vectors = vsi->num_q_vectors; 6196 else if (vsi == pf->vsi[pf->lan_vsi]) 6197 num_q_vectors = 1; 6198 else 6199 return -EINVAL; 6200 6201 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { 6202 err = i40e_vsi_alloc_q_vector(vsi, v_idx); 6203 if (err) 6204 goto err_out; 6205 } 6206 6207 return 0; 6208 6209 err_out: 6210 while (v_idx--) 6211 i40e_free_q_vector(vsi, v_idx); 6212 6213 return err; 6214 } 6215 6216 /** 6217 * i40e_init_interrupt_scheme - Determine proper interrupt scheme 6218 * @pf: board private structure to initialize 6219 **/ 6220 static void i40e_init_interrupt_scheme(struct i40e_pf *pf) 6221 { 6222 int err = 0; 6223 6224 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 6225 err = i40e_init_msix(pf); 6226 if (err) { 6227 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | 6228 I40E_FLAG_RSS_ENABLED | 6229 I40E_FLAG_DCB_ENABLED | 6230 I40E_FLAG_SRIOV_ENABLED | 6231 I40E_FLAG_FD_SB_ENABLED | 6232 I40E_FLAG_FD_ATR_ENABLED | 6233 I40E_FLAG_VMDQ_ENABLED); 6234 6235 /* rework the queue expectations without MSIX */ 6236 i40e_determine_queue_usage(pf); 6237 } 6238 } 6239 6240 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 6241 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 6242 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); 6243 err = pci_enable_msi(pf->pdev); 6244 if (err) { 6245 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); 6246 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 6247 } 6248 } 6249 6250 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 6251 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); 6252 6253 /* track first vector for misc interrupts */ 6254 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); 6255 } 6256 6257 /** 6258 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events 6259 * @pf: board private structure 6260 * 6261 * This sets up the handler for MSIX 0, which is used to manage the 6262 * non-queue interrupts, e.g. AdminQ and errors. This is not used 6263 * when in MSI or Legacy interrupt mode. 6264 **/ 6265 static int i40e_setup_misc_vector(struct i40e_pf *pf) 6266 { 6267 struct i40e_hw *hw = &pf->hw; 6268 int err = 0; 6269 6270 /* Only request the irq if this is the first time through, and 6271 * not when we're rebuilding after a Reset 6272 */ 6273 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 6274 err = request_irq(pf->msix_entries[0].vector, 6275 i40e_intr, 0, pf->misc_int_name, pf); 6276 if (err) { 6277 dev_info(&pf->pdev->dev, 6278 "request_irq for %s failed: %d\n", 6279 pf->misc_int_name, err); 6280 return -EFAULT; 6281 } 6282 } 6283 6284 i40e_enable_misc_int_causes(hw); 6285 6286 /* associate no queues to the misc vector */ 6287 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 6288 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); 6289 6290 i40e_flush(hw); 6291 6292 i40e_irq_dynamic_enable_icr0(pf); 6293 6294 return err; 6295 } 6296 6297 /** 6298 * i40e_config_rss - Prepare for RSS if used 6299 * @pf: board private structure 6300 **/ 6301 static int i40e_config_rss(struct i40e_pf *pf) 6302 { 6303 /* Set of random keys generated using kernel random number generator */ 6304 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687, 6305 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, 6306 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, 6307 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be}; 6308 struct i40e_hw *hw = &pf->hw; 6309 u32 lut = 0; 6310 int i, j; 6311 u64 hena; 6312 6313 /* Fill out hash function seed */ 6314 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 6315 wr32(hw, I40E_PFQF_HKEY(i), seed[i]); 6316 6317 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ 6318 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | 6319 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); 6320 hena |= I40E_DEFAULT_RSS_HENA; 6321 wr32(hw, I40E_PFQF_HENA(0), (u32)hena); 6322 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 6323 6324 /* Populate the LUT with max no. of queues in round robin fashion */ 6325 for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) { 6326 6327 /* The assumption is that lan qp count will be the highest 6328 * qp count for any PF VSI that needs RSS. 6329 * If multiple VSIs need RSS support, all the qp counts 6330 * for those VSIs should be a power of 2 for RSS to work. 6331 * If LAN VSI is the only consumer for RSS then this requirement 6332 * is not necessary. 6333 */ 6334 if (j == pf->rss_size) 6335 j = 0; 6336 /* lut = 4-byte sliding window of 4 lut entries */ 6337 lut = (lut << 8) | (j & 6338 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); 6339 /* On i = 3, we have 4 entries in lut; write to the register */ 6340 if ((i & 3) == 3) 6341 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); 6342 } 6343 i40e_flush(hw); 6344 6345 return 0; 6346 } 6347 6348 /** 6349 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild 6350 * @pf: board private structure 6351 * @queue_count: the requested queue count for rss. 6352 * 6353 * returns 0 if rss is not enabled, if enabled returns the final rss queue 6354 * count which may be different from the requested queue count. 6355 **/ 6356 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) 6357 { 6358 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) 6359 return 0; 6360 6361 queue_count = min_t(int, queue_count, pf->rss_size_max); 6362 queue_count = rounddown_pow_of_two(queue_count); 6363 6364 if (queue_count != pf->rss_size) { 6365 i40e_prep_for_reset(pf); 6366 6367 pf->rss_size = queue_count; 6368 6369 i40e_reset_and_rebuild(pf, true); 6370 i40e_config_rss(pf); 6371 } 6372 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); 6373 return pf->rss_size; 6374 } 6375 6376 /** 6377 * i40e_sw_init - Initialize general software structures (struct i40e_pf) 6378 * @pf: board private structure to initialize 6379 * 6380 * i40e_sw_init initializes the Adapter private data structure. 6381 * Fields are initialized based on PCI device information and 6382 * OS network device settings (MTU size). 6383 **/ 6384 static int i40e_sw_init(struct i40e_pf *pf) 6385 { 6386 int err = 0; 6387 int size; 6388 6389 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, 6390 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); 6391 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; 6392 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { 6393 if (I40E_DEBUG_USER & debug) 6394 pf->hw.debug_mask = debug; 6395 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), 6396 I40E_DEFAULT_MSG_ENABLE); 6397 } 6398 6399 /* Set default capability flags */ 6400 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 6401 I40E_FLAG_MSI_ENABLED | 6402 I40E_FLAG_MSIX_ENABLED | 6403 I40E_FLAG_RX_1BUF_ENABLED; 6404 6405 /* Depending on PF configurations, it is possible that the RSS 6406 * maximum might end up larger than the available queues 6407 */ 6408 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; 6409 pf->rss_size_max = min_t(int, pf->rss_size_max, 6410 pf->hw.func_caps.num_tx_qp); 6411 if (pf->hw.func_caps.rss) { 6412 pf->flags |= I40E_FLAG_RSS_ENABLED; 6413 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); 6414 pf->rss_size = rounddown_pow_of_two(pf->rss_size); 6415 } else { 6416 pf->rss_size = 1; 6417 } 6418 6419 /* MFP mode enabled */ 6420 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { 6421 pf->flags |= I40E_FLAG_MFP_ENABLED; 6422 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); 6423 } 6424 6425 /* FW/NVM is not yet fixed in this regard */ 6426 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || 6427 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 6428 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 6429 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 6430 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 6431 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 6432 } else { 6433 dev_info(&pf->pdev->dev, 6434 "Flow Director Sideband mode Disabled in MFP mode\n"); 6435 } 6436 pf->fdir_pf_filter_count = 6437 pf->hw.func_caps.fd_filters_guaranteed; 6438 pf->hw.fdir_shared_filter_count = 6439 pf->hw.func_caps.fd_filters_best_effort; 6440 } 6441 6442 if (pf->hw.func_caps.vmdq) { 6443 pf->flags |= I40E_FLAG_VMDQ_ENABLED; 6444 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; 6445 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; 6446 } 6447 6448 #ifdef CONFIG_PCI_IOV 6449 if (pf->hw.func_caps.num_vfs) { 6450 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 6451 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 6452 pf->num_req_vfs = min_t(int, 6453 pf->hw.func_caps.num_vfs, 6454 I40E_MAX_VF_COUNT); 6455 } 6456 #endif /* CONFIG_PCI_IOV */ 6457 pf->eeprom_version = 0xDEAD; 6458 pf->lan_veb = I40E_NO_VEB; 6459 pf->lan_vsi = I40E_NO_VSI; 6460 6461 /* set up queue assignment tracking */ 6462 size = sizeof(struct i40e_lump_tracking) 6463 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); 6464 pf->qp_pile = kzalloc(size, GFP_KERNEL); 6465 if (!pf->qp_pile) { 6466 err = -ENOMEM; 6467 goto sw_init_done; 6468 } 6469 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; 6470 pf->qp_pile->search_hint = 0; 6471 6472 /* set up vector assignment tracking */ 6473 size = sizeof(struct i40e_lump_tracking) 6474 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors); 6475 pf->irq_pile = kzalloc(size, GFP_KERNEL); 6476 if (!pf->irq_pile) { 6477 kfree(pf->qp_pile); 6478 err = -ENOMEM; 6479 goto sw_init_done; 6480 } 6481 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors; 6482 pf->irq_pile->search_hint = 0; 6483 6484 mutex_init(&pf->switch_mutex); 6485 6486 sw_init_done: 6487 return err; 6488 } 6489 6490 /** 6491 * i40e_set_ntuple - set the ntuple feature flag and take action 6492 * @pf: board private structure to initialize 6493 * @features: the feature set that the stack is suggesting 6494 * 6495 * returns a bool to indicate if reset needs to happen 6496 **/ 6497 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) 6498 { 6499 bool need_reset = false; 6500 6501 /* Check if Flow Director n-tuple support was enabled or disabled. If 6502 * the state changed, we need to reset. 6503 */ 6504 if (features & NETIF_F_NTUPLE) { 6505 /* Enable filters and mark for reset */ 6506 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 6507 need_reset = true; 6508 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 6509 } else { 6510 /* turn off filters, mark for reset and clear SW filter list */ 6511 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 6512 need_reset = true; 6513 i40e_fdir_filter_exit(pf); 6514 } 6515 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 6516 /* if ATR was disabled it can be re-enabled. */ 6517 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) 6518 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 6519 } 6520 return need_reset; 6521 } 6522 6523 /** 6524 * i40e_set_features - set the netdev feature flags 6525 * @netdev: ptr to the netdev being adjusted 6526 * @features: the feature set that the stack is suggesting 6527 **/ 6528 static int i40e_set_features(struct net_device *netdev, 6529 netdev_features_t features) 6530 { 6531 struct i40e_netdev_priv *np = netdev_priv(netdev); 6532 struct i40e_vsi *vsi = np->vsi; 6533 struct i40e_pf *pf = vsi->back; 6534 bool need_reset; 6535 6536 if (features & NETIF_F_HW_VLAN_CTAG_RX) 6537 i40e_vlan_stripping_enable(vsi); 6538 else 6539 i40e_vlan_stripping_disable(vsi); 6540 6541 need_reset = i40e_set_ntuple(pf, features); 6542 6543 if (need_reset) 6544 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 6545 6546 return 0; 6547 } 6548 6549 #ifdef CONFIG_I40E_VXLAN 6550 /** 6551 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port 6552 * @pf: board private structure 6553 * @port: The UDP port to look up 6554 * 6555 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found 6556 **/ 6557 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) 6558 { 6559 u8 i; 6560 6561 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 6562 if (pf->vxlan_ports[i] == port) 6563 return i; 6564 } 6565 6566 return i; 6567 } 6568 6569 /** 6570 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up 6571 * @netdev: This physical port's netdev 6572 * @sa_family: Socket Family that VXLAN is notifying us about 6573 * @port: New UDP port number that VXLAN started listening to 6574 **/ 6575 static void i40e_add_vxlan_port(struct net_device *netdev, 6576 sa_family_t sa_family, __be16 port) 6577 { 6578 struct i40e_netdev_priv *np = netdev_priv(netdev); 6579 struct i40e_vsi *vsi = np->vsi; 6580 struct i40e_pf *pf = vsi->back; 6581 u8 next_idx; 6582 u8 idx; 6583 6584 if (sa_family == AF_INET6) 6585 return; 6586 6587 idx = i40e_get_vxlan_port_idx(pf, port); 6588 6589 /* Check if port already exists */ 6590 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 6591 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port)); 6592 return; 6593 } 6594 6595 /* Now check if there is space to add the new port */ 6596 next_idx = i40e_get_vxlan_port_idx(pf, 0); 6597 6598 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 6599 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n", 6600 ntohs(port)); 6601 return; 6602 } 6603 6604 /* New port: add it and mark its index in the bitmap */ 6605 pf->vxlan_ports[next_idx] = port; 6606 pf->pending_vxlan_bitmap |= (1 << next_idx); 6607 6608 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 6609 } 6610 6611 /** 6612 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away 6613 * @netdev: This physical port's netdev 6614 * @sa_family: Socket Family that VXLAN is notifying us about 6615 * @port: UDP port number that VXLAN stopped listening to 6616 **/ 6617 static void i40e_del_vxlan_port(struct net_device *netdev, 6618 sa_family_t sa_family, __be16 port) 6619 { 6620 struct i40e_netdev_priv *np = netdev_priv(netdev); 6621 struct i40e_vsi *vsi = np->vsi; 6622 struct i40e_pf *pf = vsi->back; 6623 u8 idx; 6624 6625 if (sa_family == AF_INET6) 6626 return; 6627 6628 idx = i40e_get_vxlan_port_idx(pf, port); 6629 6630 /* Check if port already exists */ 6631 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 6632 /* if port exists, set it to 0 (mark for deletion) 6633 * and make it pending 6634 */ 6635 pf->vxlan_ports[idx] = 0; 6636 6637 pf->pending_vxlan_bitmap |= (1 << idx); 6638 6639 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 6640 } else { 6641 netdev_warn(netdev, "Port %d was not found, not deleting\n", 6642 ntohs(port)); 6643 } 6644 } 6645 6646 #endif 6647 static const struct net_device_ops i40e_netdev_ops = { 6648 .ndo_open = i40e_open, 6649 .ndo_stop = i40e_close, 6650 .ndo_start_xmit = i40e_lan_xmit_frame, 6651 .ndo_get_stats64 = i40e_get_netdev_stats_struct, 6652 .ndo_set_rx_mode = i40e_set_rx_mode, 6653 .ndo_validate_addr = eth_validate_addr, 6654 .ndo_set_mac_address = i40e_set_mac, 6655 .ndo_change_mtu = i40e_change_mtu, 6656 .ndo_do_ioctl = i40e_ioctl, 6657 .ndo_tx_timeout = i40e_tx_timeout, 6658 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, 6659 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, 6660 #ifdef CONFIG_NET_POLL_CONTROLLER 6661 .ndo_poll_controller = i40e_netpoll, 6662 #endif 6663 .ndo_setup_tc = i40e_setup_tc, 6664 .ndo_set_features = i40e_set_features, 6665 .ndo_set_vf_mac = i40e_ndo_set_vf_mac, 6666 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 6667 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, 6668 .ndo_get_vf_config = i40e_ndo_get_vf_config, 6669 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 6670 #ifdef CONFIG_I40E_VXLAN 6671 .ndo_add_vxlan_port = i40e_add_vxlan_port, 6672 .ndo_del_vxlan_port = i40e_del_vxlan_port, 6673 #endif 6674 }; 6675 6676 /** 6677 * i40e_config_netdev - Setup the netdev flags 6678 * @vsi: the VSI being configured 6679 * 6680 * Returns 0 on success, negative value on failure 6681 **/ 6682 static int i40e_config_netdev(struct i40e_vsi *vsi) 6683 { 6684 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 6685 struct i40e_pf *pf = vsi->back; 6686 struct i40e_hw *hw = &pf->hw; 6687 struct i40e_netdev_priv *np; 6688 struct net_device *netdev; 6689 u8 mac_addr[ETH_ALEN]; 6690 int etherdev_size; 6691 6692 etherdev_size = sizeof(struct i40e_netdev_priv); 6693 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); 6694 if (!netdev) 6695 return -ENOMEM; 6696 6697 vsi->netdev = netdev; 6698 np = netdev_priv(netdev); 6699 np->vsi = vsi; 6700 6701 netdev->hw_enc_features |= NETIF_F_IP_CSUM | 6702 NETIF_F_GSO_UDP_TUNNEL | 6703 NETIF_F_TSO; 6704 6705 netdev->features = NETIF_F_SG | 6706 NETIF_F_IP_CSUM | 6707 NETIF_F_SCTP_CSUM | 6708 NETIF_F_HIGHDMA | 6709 NETIF_F_GSO_UDP_TUNNEL | 6710 NETIF_F_HW_VLAN_CTAG_TX | 6711 NETIF_F_HW_VLAN_CTAG_RX | 6712 NETIF_F_HW_VLAN_CTAG_FILTER | 6713 NETIF_F_IPV6_CSUM | 6714 NETIF_F_TSO | 6715 NETIF_F_TSO6 | 6716 NETIF_F_RXCSUM | 6717 NETIF_F_NTUPLE | 6718 NETIF_F_RXHASH | 6719 0; 6720 6721 /* copy netdev features into list of user selectable features */ 6722 netdev->hw_features |= netdev->features; 6723 6724 if (vsi->type == I40E_VSI_MAIN) { 6725 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 6726 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); 6727 } else { 6728 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 6729 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 6730 pf->vsi[pf->lan_vsi]->netdev->name); 6731 random_ether_addr(mac_addr); 6732 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); 6733 } 6734 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); 6735 6736 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); 6737 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN); 6738 /* vlan gets same features (except vlan offload) 6739 * after any tweaks for specific VSI types 6740 */ 6741 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | 6742 NETIF_F_HW_VLAN_CTAG_RX | 6743 NETIF_F_HW_VLAN_CTAG_FILTER); 6744 netdev->priv_flags |= IFF_UNICAST_FLT; 6745 netdev->priv_flags |= IFF_SUPP_NOFCS; 6746 /* Setup netdev TC information */ 6747 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); 6748 6749 netdev->netdev_ops = &i40e_netdev_ops; 6750 netdev->watchdog_timeo = 5 * HZ; 6751 i40e_set_ethtool_ops(netdev); 6752 6753 return 0; 6754 } 6755 6756 /** 6757 * i40e_vsi_delete - Delete a VSI from the switch 6758 * @vsi: the VSI being removed 6759 * 6760 * Returns 0 on success, negative value on failure 6761 **/ 6762 static void i40e_vsi_delete(struct i40e_vsi *vsi) 6763 { 6764 /* remove default VSI is not allowed */ 6765 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) 6766 return; 6767 6768 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); 6769 return; 6770 } 6771 6772 /** 6773 * i40e_add_vsi - Add a VSI to the switch 6774 * @vsi: the VSI being configured 6775 * 6776 * This initializes a VSI context depending on the VSI type to be added and 6777 * passes it down to the add_vsi aq command. 6778 **/ 6779 static int i40e_add_vsi(struct i40e_vsi *vsi) 6780 { 6781 int ret = -ENODEV; 6782 struct i40e_mac_filter *f, *ftmp; 6783 struct i40e_pf *pf = vsi->back; 6784 struct i40e_hw *hw = &pf->hw; 6785 struct i40e_vsi_context ctxt; 6786 u8 enabled_tc = 0x1; /* TC0 enabled */ 6787 int f_count = 0; 6788 6789 memset(&ctxt, 0, sizeof(ctxt)); 6790 switch (vsi->type) { 6791 case I40E_VSI_MAIN: 6792 /* The PF's main VSI is already setup as part of the 6793 * device initialization, so we'll not bother with 6794 * the add_vsi call, but we will retrieve the current 6795 * VSI context. 6796 */ 6797 ctxt.seid = pf->main_vsi_seid; 6798 ctxt.pf_num = pf->hw.pf_id; 6799 ctxt.vf_num = 0; 6800 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); 6801 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6802 if (ret) { 6803 dev_info(&pf->pdev->dev, 6804 "couldn't get pf vsi config, err %d, aq_err %d\n", 6805 ret, pf->hw.aq.asq_last_status); 6806 return -ENOENT; 6807 } 6808 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 6809 vsi->info.valid_sections = 0; 6810 6811 vsi->seid = ctxt.seid; 6812 vsi->id = ctxt.vsi_number; 6813 6814 enabled_tc = i40e_pf_get_tc_map(pf); 6815 6816 /* MFP mode setup queue map and update VSI */ 6817 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 6818 memset(&ctxt, 0, sizeof(ctxt)); 6819 ctxt.seid = pf->main_vsi_seid; 6820 ctxt.pf_num = pf->hw.pf_id; 6821 ctxt.vf_num = 0; 6822 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 6823 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 6824 if (ret) { 6825 dev_info(&pf->pdev->dev, 6826 "update vsi failed, aq_err=%d\n", 6827 pf->hw.aq.asq_last_status); 6828 ret = -ENOENT; 6829 goto err; 6830 } 6831 /* update the local VSI info queue map */ 6832 i40e_vsi_update_queue_map(vsi, &ctxt); 6833 vsi->info.valid_sections = 0; 6834 } else { 6835 /* Default/Main VSI is only enabled for TC0 6836 * reconfigure it to enable all TCs that are 6837 * available on the port in SFP mode. 6838 */ 6839 ret = i40e_vsi_config_tc(vsi, enabled_tc); 6840 if (ret) { 6841 dev_info(&pf->pdev->dev, 6842 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n", 6843 enabled_tc, ret, 6844 pf->hw.aq.asq_last_status); 6845 ret = -ENOENT; 6846 } 6847 } 6848 break; 6849 6850 case I40E_VSI_FDIR: 6851 ctxt.pf_num = hw->pf_id; 6852 ctxt.vf_num = 0; 6853 ctxt.uplink_seid = vsi->uplink_seid; 6854 ctxt.connection_type = 0x1; /* regular data port */ 6855 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 6856 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 6857 break; 6858 6859 case I40E_VSI_VMDQ2: 6860 ctxt.pf_num = hw->pf_id; 6861 ctxt.vf_num = 0; 6862 ctxt.uplink_seid = vsi->uplink_seid; 6863 ctxt.connection_type = 0x1; /* regular data port */ 6864 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 6865 6866 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6867 6868 /* This VSI is connected to VEB so the switch_id 6869 * should be set to zero by default. 6870 */ 6871 ctxt.info.switch_id = 0; 6872 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB); 6873 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6874 6875 /* Setup the VSI tx/rx queue map for TC0 only for now */ 6876 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 6877 break; 6878 6879 case I40E_VSI_SRIOV: 6880 ctxt.pf_num = hw->pf_id; 6881 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 6882 ctxt.uplink_seid = vsi->uplink_seid; 6883 ctxt.connection_type = 0x1; /* regular data port */ 6884 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 6885 6886 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 6887 6888 /* This VSI is connected to VEB so the switch_id 6889 * should be set to zero by default. 6890 */ 6891 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 6892 6893 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 6894 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 6895 /* Setup the VSI tx/rx queue map for TC0 only for now */ 6896 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 6897 break; 6898 6899 default: 6900 return -ENODEV; 6901 } 6902 6903 if (vsi->type != I40E_VSI_MAIN) { 6904 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); 6905 if (ret) { 6906 dev_info(&vsi->back->pdev->dev, 6907 "add vsi failed, aq_err=%d\n", 6908 vsi->back->hw.aq.asq_last_status); 6909 ret = -ENOENT; 6910 goto err; 6911 } 6912 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 6913 vsi->info.valid_sections = 0; 6914 vsi->seid = ctxt.seid; 6915 vsi->id = ctxt.vsi_number; 6916 } 6917 6918 /* If macvlan filters already exist, force them to get loaded */ 6919 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { 6920 f->changed = true; 6921 f_count++; 6922 } 6923 if (f_count) { 6924 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; 6925 pf->flags |= I40E_FLAG_FILTER_SYNC; 6926 } 6927 6928 /* Update VSI BW information */ 6929 ret = i40e_vsi_get_bw_info(vsi); 6930 if (ret) { 6931 dev_info(&pf->pdev->dev, 6932 "couldn't get vsi bw info, err %d, aq_err %d\n", 6933 ret, pf->hw.aq.asq_last_status); 6934 /* VSI is already added so not tearing that up */ 6935 ret = 0; 6936 } 6937 6938 err: 6939 return ret; 6940 } 6941 6942 /** 6943 * i40e_vsi_release - Delete a VSI and free its resources 6944 * @vsi: the VSI being removed 6945 * 6946 * Returns 0 on success or < 0 on error 6947 **/ 6948 int i40e_vsi_release(struct i40e_vsi *vsi) 6949 { 6950 struct i40e_mac_filter *f, *ftmp; 6951 struct i40e_veb *veb = NULL; 6952 struct i40e_pf *pf; 6953 u16 uplink_seid; 6954 int i, n; 6955 6956 pf = vsi->back; 6957 6958 /* release of a VEB-owner or last VSI is not allowed */ 6959 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { 6960 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", 6961 vsi->seid, vsi->uplink_seid); 6962 return -ENODEV; 6963 } 6964 if (vsi == pf->vsi[pf->lan_vsi] && 6965 !test_bit(__I40E_DOWN, &pf->state)) { 6966 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 6967 return -ENODEV; 6968 } 6969 6970 uplink_seid = vsi->uplink_seid; 6971 if (vsi->type != I40E_VSI_SRIOV) { 6972 if (vsi->netdev_registered) { 6973 vsi->netdev_registered = false; 6974 if (vsi->netdev) { 6975 /* results in a call to i40e_close() */ 6976 unregister_netdev(vsi->netdev); 6977 } 6978 } else { 6979 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 6980 i40e_down(vsi); 6981 i40e_vsi_free_irq(vsi); 6982 i40e_vsi_free_tx_resources(vsi); 6983 i40e_vsi_free_rx_resources(vsi); 6984 } 6985 i40e_vsi_disable_irq(vsi); 6986 } 6987 6988 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) 6989 i40e_del_filter(vsi, f->macaddr, f->vlan, 6990 f->is_vf, f->is_netdev); 6991 i40e_sync_vsi_filters(vsi); 6992 6993 i40e_vsi_delete(vsi); 6994 i40e_vsi_free_q_vectors(vsi); 6995 if (vsi->netdev) { 6996 free_netdev(vsi->netdev); 6997 vsi->netdev = NULL; 6998 } 6999 i40e_vsi_clear_rings(vsi); 7000 i40e_vsi_clear(vsi); 7001 7002 /* If this was the last thing on the VEB, except for the 7003 * controlling VSI, remove the VEB, which puts the controlling 7004 * VSI onto the next level down in the switch. 7005 * 7006 * Well, okay, there's one more exception here: don't remove 7007 * the orphan VEBs yet. We'll wait for an explicit remove request 7008 * from up the network stack. 7009 */ 7010 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7011 if (pf->vsi[i] && 7012 pf->vsi[i]->uplink_seid == uplink_seid && 7013 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 7014 n++; /* count the VSIs */ 7015 } 7016 } 7017 for (i = 0; i < I40E_MAX_VEB; i++) { 7018 if (!pf->veb[i]) 7019 continue; 7020 if (pf->veb[i]->uplink_seid == uplink_seid) 7021 n++; /* count the VEBs */ 7022 if (pf->veb[i]->seid == uplink_seid) 7023 veb = pf->veb[i]; 7024 } 7025 if (n == 0 && veb && veb->uplink_seid != 0) 7026 i40e_veb_release(veb); 7027 7028 return 0; 7029 } 7030 7031 /** 7032 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI 7033 * @vsi: ptr to the VSI 7034 * 7035 * This should only be called after i40e_vsi_mem_alloc() which allocates the 7036 * corresponding SW VSI structure and initializes num_queue_pairs for the 7037 * newly allocated VSI. 7038 * 7039 * Returns 0 on success or negative on failure 7040 **/ 7041 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) 7042 { 7043 int ret = -ENOENT; 7044 struct i40e_pf *pf = vsi->back; 7045 7046 if (vsi->q_vectors[0]) { 7047 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", 7048 vsi->seid); 7049 return -EEXIST; 7050 } 7051 7052 if (vsi->base_vector) { 7053 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 7054 vsi->seid, vsi->base_vector); 7055 return -EEXIST; 7056 } 7057 7058 ret = i40e_vsi_alloc_q_vectors(vsi); 7059 if (ret) { 7060 dev_info(&pf->pdev->dev, 7061 "failed to allocate %d q_vector for VSI %d, ret=%d\n", 7062 vsi->num_q_vectors, vsi->seid, ret); 7063 vsi->num_q_vectors = 0; 7064 goto vector_setup_out; 7065 } 7066 7067 if (vsi->num_q_vectors) 7068 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 7069 vsi->num_q_vectors, vsi->idx); 7070 if (vsi->base_vector < 0) { 7071 dev_info(&pf->pdev->dev, 7072 "failed to get queue tracking for VSI %d, err=%d\n", 7073 vsi->seid, vsi->base_vector); 7074 i40e_vsi_free_q_vectors(vsi); 7075 ret = -ENOENT; 7076 goto vector_setup_out; 7077 } 7078 7079 vector_setup_out: 7080 return ret; 7081 } 7082 7083 /** 7084 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI 7085 * @vsi: pointer to the vsi. 7086 * 7087 * This re-allocates a vsi's queue resources. 7088 * 7089 * Returns pointer to the successfully allocated and configured VSI sw struct 7090 * on success, otherwise returns NULL on failure. 7091 **/ 7092 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) 7093 { 7094 struct i40e_pf *pf = vsi->back; 7095 u8 enabled_tc; 7096 int ret; 7097 7098 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 7099 i40e_vsi_clear_rings(vsi); 7100 7101 i40e_vsi_free_arrays(vsi, false); 7102 i40e_set_num_rings_in_vsi(vsi); 7103 ret = i40e_vsi_alloc_arrays(vsi, false); 7104 if (ret) 7105 goto err_vsi; 7106 7107 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); 7108 if (ret < 0) { 7109 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", 7110 vsi->seid, ret); 7111 goto err_vsi; 7112 } 7113 vsi->base_queue = ret; 7114 7115 /* Update the FW view of the VSI. Force a reset of TC and queue 7116 * layout configurations. 7117 */ 7118 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 7119 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 7120 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 7121 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 7122 7123 /* assign it some queues */ 7124 ret = i40e_alloc_rings(vsi); 7125 if (ret) 7126 goto err_rings; 7127 7128 /* map all of the rings to the q_vectors */ 7129 i40e_vsi_map_rings_to_vectors(vsi); 7130 return vsi; 7131 7132 err_rings: 7133 i40e_vsi_free_q_vectors(vsi); 7134 if (vsi->netdev_registered) { 7135 vsi->netdev_registered = false; 7136 unregister_netdev(vsi->netdev); 7137 free_netdev(vsi->netdev); 7138 vsi->netdev = NULL; 7139 } 7140 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 7141 err_vsi: 7142 i40e_vsi_clear(vsi); 7143 return NULL; 7144 } 7145 7146 /** 7147 * i40e_vsi_setup - Set up a VSI by a given type 7148 * @pf: board private structure 7149 * @type: VSI type 7150 * @uplink_seid: the switch element to link to 7151 * @param1: usage depends upon VSI type. For VF types, indicates VF id 7152 * 7153 * This allocates the sw VSI structure and its queue resources, then add a VSI 7154 * to the identified VEB. 7155 * 7156 * Returns pointer to the successfully allocated and configure VSI sw struct on 7157 * success, otherwise returns NULL on failure. 7158 **/ 7159 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, 7160 u16 uplink_seid, u32 param1) 7161 { 7162 struct i40e_vsi *vsi = NULL; 7163 struct i40e_veb *veb = NULL; 7164 int ret, i; 7165 int v_idx; 7166 7167 /* The requested uplink_seid must be either 7168 * - the PF's port seid 7169 * no VEB is needed because this is the PF 7170 * or this is a Flow Director special case VSI 7171 * - seid of an existing VEB 7172 * - seid of a VSI that owns an existing VEB 7173 * - seid of a VSI that doesn't own a VEB 7174 * a new VEB is created and the VSI becomes the owner 7175 * - seid of the PF VSI, which is what creates the first VEB 7176 * this is a special case of the previous 7177 * 7178 * Find which uplink_seid we were given and create a new VEB if needed 7179 */ 7180 for (i = 0; i < I40E_MAX_VEB; i++) { 7181 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { 7182 veb = pf->veb[i]; 7183 break; 7184 } 7185 } 7186 7187 if (!veb && uplink_seid != pf->mac_seid) { 7188 7189 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7190 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 7191 vsi = pf->vsi[i]; 7192 break; 7193 } 7194 } 7195 if (!vsi) { 7196 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", 7197 uplink_seid); 7198 return NULL; 7199 } 7200 7201 if (vsi->uplink_seid == pf->mac_seid) 7202 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, 7203 vsi->tc_config.enabled_tc); 7204 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) 7205 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, 7206 vsi->tc_config.enabled_tc); 7207 7208 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 7209 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) 7210 veb = pf->veb[i]; 7211 } 7212 if (!veb) { 7213 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); 7214 return NULL; 7215 } 7216 7217 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 7218 uplink_seid = veb->seid; 7219 } 7220 7221 /* get vsi sw struct */ 7222 v_idx = i40e_vsi_mem_alloc(pf, type); 7223 if (v_idx < 0) 7224 goto err_alloc; 7225 vsi = pf->vsi[v_idx]; 7226 if (!vsi) 7227 goto err_alloc; 7228 vsi->type = type; 7229 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); 7230 7231 if (type == I40E_VSI_MAIN) 7232 pf->lan_vsi = v_idx; 7233 else if (type == I40E_VSI_SRIOV) 7234 vsi->vf_id = param1; 7235 /* assign it some queues */ 7236 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, 7237 vsi->idx); 7238 if (ret < 0) { 7239 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", 7240 vsi->seid, ret); 7241 goto err_vsi; 7242 } 7243 vsi->base_queue = ret; 7244 7245 /* get a VSI from the hardware */ 7246 vsi->uplink_seid = uplink_seid; 7247 ret = i40e_add_vsi(vsi); 7248 if (ret) 7249 goto err_vsi; 7250 7251 switch (vsi->type) { 7252 /* setup the netdev if needed */ 7253 case I40E_VSI_MAIN: 7254 case I40E_VSI_VMDQ2: 7255 ret = i40e_config_netdev(vsi); 7256 if (ret) 7257 goto err_netdev; 7258 ret = register_netdev(vsi->netdev); 7259 if (ret) 7260 goto err_netdev; 7261 vsi->netdev_registered = true; 7262 netif_carrier_off(vsi->netdev); 7263 #ifdef CONFIG_I40E_DCB 7264 /* Setup DCB netlink interface */ 7265 i40e_dcbnl_setup(vsi); 7266 #endif /* CONFIG_I40E_DCB */ 7267 /* fall through */ 7268 7269 case I40E_VSI_FDIR: 7270 /* set up vectors and rings if needed */ 7271 ret = i40e_vsi_setup_vectors(vsi); 7272 if (ret) 7273 goto err_msix; 7274 7275 ret = i40e_alloc_rings(vsi); 7276 if (ret) 7277 goto err_rings; 7278 7279 /* map all of the rings to the q_vectors */ 7280 i40e_vsi_map_rings_to_vectors(vsi); 7281 7282 i40e_vsi_reset_stats(vsi); 7283 break; 7284 7285 default: 7286 /* no netdev or rings for the other VSI types */ 7287 break; 7288 } 7289 7290 return vsi; 7291 7292 err_rings: 7293 i40e_vsi_free_q_vectors(vsi); 7294 err_msix: 7295 if (vsi->netdev_registered) { 7296 vsi->netdev_registered = false; 7297 unregister_netdev(vsi->netdev); 7298 free_netdev(vsi->netdev); 7299 vsi->netdev = NULL; 7300 } 7301 err_netdev: 7302 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); 7303 err_vsi: 7304 i40e_vsi_clear(vsi); 7305 err_alloc: 7306 return NULL; 7307 } 7308 7309 /** 7310 * i40e_veb_get_bw_info - Query VEB BW information 7311 * @veb: the veb to query 7312 * 7313 * Query the Tx scheduler BW configuration data for given VEB 7314 **/ 7315 static int i40e_veb_get_bw_info(struct i40e_veb *veb) 7316 { 7317 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; 7318 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; 7319 struct i40e_pf *pf = veb->pf; 7320 struct i40e_hw *hw = &pf->hw; 7321 u32 tc_bw_max; 7322 int ret = 0; 7323 int i; 7324 7325 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, 7326 &bw_data, NULL); 7327 if (ret) { 7328 dev_info(&pf->pdev->dev, 7329 "query veb bw config failed, aq_err=%d\n", 7330 hw->aq.asq_last_status); 7331 goto out; 7332 } 7333 7334 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, 7335 &ets_data, NULL); 7336 if (ret) { 7337 dev_info(&pf->pdev->dev, 7338 "query veb bw ets config failed, aq_err=%d\n", 7339 hw->aq.asq_last_status); 7340 goto out; 7341 } 7342 7343 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); 7344 veb->bw_max_quanta = ets_data.tc_bw_max; 7345 veb->is_abs_credits = bw_data.absolute_credits_enable; 7346 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | 7347 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); 7348 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 7349 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; 7350 veb->bw_tc_limit_credits[i] = 7351 le16_to_cpu(bw_data.tc_bw_limits[i]); 7352 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); 7353 } 7354 7355 out: 7356 return ret; 7357 } 7358 7359 /** 7360 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF 7361 * @pf: board private structure 7362 * 7363 * On error: returns error code (negative) 7364 * On success: returns vsi index in PF (positive) 7365 **/ 7366 static int i40e_veb_mem_alloc(struct i40e_pf *pf) 7367 { 7368 int ret = -ENOENT; 7369 struct i40e_veb *veb; 7370 int i; 7371 7372 /* Need to protect the allocation of switch elements at the PF level */ 7373 mutex_lock(&pf->switch_mutex); 7374 7375 /* VEB list may be fragmented if VEB creation/destruction has 7376 * been happening. We can afford to do a quick scan to look 7377 * for any free slots in the list. 7378 * 7379 * find next empty veb slot, looping back around if necessary 7380 */ 7381 i = 0; 7382 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) 7383 i++; 7384 if (i >= I40E_MAX_VEB) { 7385 ret = -ENOMEM; 7386 goto err_alloc_veb; /* out of VEB slots! */ 7387 } 7388 7389 veb = kzalloc(sizeof(*veb), GFP_KERNEL); 7390 if (!veb) { 7391 ret = -ENOMEM; 7392 goto err_alloc_veb; 7393 } 7394 veb->pf = pf; 7395 veb->idx = i; 7396 veb->enabled_tc = 1; 7397 7398 pf->veb[i] = veb; 7399 ret = i; 7400 err_alloc_veb: 7401 mutex_unlock(&pf->switch_mutex); 7402 return ret; 7403 } 7404 7405 /** 7406 * i40e_switch_branch_release - Delete a branch of the switch tree 7407 * @branch: where to start deleting 7408 * 7409 * This uses recursion to find the tips of the branch to be 7410 * removed, deleting until we get back to and can delete this VEB. 7411 **/ 7412 static void i40e_switch_branch_release(struct i40e_veb *branch) 7413 { 7414 struct i40e_pf *pf = branch->pf; 7415 u16 branch_seid = branch->seid; 7416 u16 veb_idx = branch->idx; 7417 int i; 7418 7419 /* release any VEBs on this VEB - RECURSION */ 7420 for (i = 0; i < I40E_MAX_VEB; i++) { 7421 if (!pf->veb[i]) 7422 continue; 7423 if (pf->veb[i]->uplink_seid == branch->seid) 7424 i40e_switch_branch_release(pf->veb[i]); 7425 } 7426 7427 /* Release the VSIs on this VEB, but not the owner VSI. 7428 * 7429 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 7430 * the VEB itself, so don't use (*branch) after this loop. 7431 */ 7432 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7433 if (!pf->vsi[i]) 7434 continue; 7435 if (pf->vsi[i]->uplink_seid == branch_seid && 7436 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 7437 i40e_vsi_release(pf->vsi[i]); 7438 } 7439 } 7440 7441 /* There's one corner case where the VEB might not have been 7442 * removed, so double check it here and remove it if needed. 7443 * This case happens if the veb was created from the debugfs 7444 * commands and no VSIs were added to it. 7445 */ 7446 if (pf->veb[veb_idx]) 7447 i40e_veb_release(pf->veb[veb_idx]); 7448 } 7449 7450 /** 7451 * i40e_veb_clear - remove veb struct 7452 * @veb: the veb to remove 7453 **/ 7454 static void i40e_veb_clear(struct i40e_veb *veb) 7455 { 7456 if (!veb) 7457 return; 7458 7459 if (veb->pf) { 7460 struct i40e_pf *pf = veb->pf; 7461 7462 mutex_lock(&pf->switch_mutex); 7463 if (pf->veb[veb->idx] == veb) 7464 pf->veb[veb->idx] = NULL; 7465 mutex_unlock(&pf->switch_mutex); 7466 } 7467 7468 kfree(veb); 7469 } 7470 7471 /** 7472 * i40e_veb_release - Delete a VEB and free its resources 7473 * @veb: the VEB being removed 7474 **/ 7475 void i40e_veb_release(struct i40e_veb *veb) 7476 { 7477 struct i40e_vsi *vsi = NULL; 7478 struct i40e_pf *pf; 7479 int i, n = 0; 7480 7481 pf = veb->pf; 7482 7483 /* find the remaining VSI and check for extras */ 7484 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7485 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 7486 n++; 7487 vsi = pf->vsi[i]; 7488 } 7489 } 7490 if (n != 1) { 7491 dev_info(&pf->pdev->dev, 7492 "can't remove VEB %d with %d VSIs left\n", 7493 veb->seid, n); 7494 return; 7495 } 7496 7497 /* move the remaining VSI to uplink veb */ 7498 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; 7499 if (veb->uplink_seid) { 7500 vsi->uplink_seid = veb->uplink_seid; 7501 if (veb->uplink_seid == pf->mac_seid) 7502 vsi->veb_idx = I40E_NO_VEB; 7503 else 7504 vsi->veb_idx = veb->veb_idx; 7505 } else { 7506 /* floating VEB */ 7507 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; 7508 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; 7509 } 7510 7511 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); 7512 i40e_veb_clear(veb); 7513 7514 return; 7515 } 7516 7517 /** 7518 * i40e_add_veb - create the VEB in the switch 7519 * @veb: the VEB to be instantiated 7520 * @vsi: the controlling VSI 7521 **/ 7522 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 7523 { 7524 bool is_default = false; 7525 bool is_cloud = false; 7526 int ret; 7527 7528 /* get a VEB from the hardware */ 7529 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, 7530 veb->enabled_tc, is_default, 7531 is_cloud, &veb->seid, NULL); 7532 if (ret) { 7533 dev_info(&veb->pf->pdev->dev, 7534 "couldn't add VEB, err %d, aq_err %d\n", 7535 ret, veb->pf->hw.aq.asq_last_status); 7536 return -EPERM; 7537 } 7538 7539 /* get statistics counter */ 7540 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, 7541 &veb->stats_idx, NULL, NULL, NULL); 7542 if (ret) { 7543 dev_info(&veb->pf->pdev->dev, 7544 "couldn't get VEB statistics idx, err %d, aq_err %d\n", 7545 ret, veb->pf->hw.aq.asq_last_status); 7546 return -EPERM; 7547 } 7548 ret = i40e_veb_get_bw_info(veb); 7549 if (ret) { 7550 dev_info(&veb->pf->pdev->dev, 7551 "couldn't get VEB bw info, err %d, aq_err %d\n", 7552 ret, veb->pf->hw.aq.asq_last_status); 7553 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); 7554 return -ENOENT; 7555 } 7556 7557 vsi->uplink_seid = veb->seid; 7558 vsi->veb_idx = veb->idx; 7559 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; 7560 7561 return 0; 7562 } 7563 7564 /** 7565 * i40e_veb_setup - Set up a VEB 7566 * @pf: board private structure 7567 * @flags: VEB setup flags 7568 * @uplink_seid: the switch element to link to 7569 * @vsi_seid: the initial VSI seid 7570 * @enabled_tc: Enabled TC bit-map 7571 * 7572 * This allocates the sw VEB structure and links it into the switch 7573 * It is possible and legal for this to be a duplicate of an already 7574 * existing VEB. It is also possible for both uplink and vsi seids 7575 * to be zero, in order to create a floating VEB. 7576 * 7577 * Returns pointer to the successfully allocated VEB sw struct on 7578 * success, otherwise returns NULL on failure. 7579 **/ 7580 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, 7581 u16 uplink_seid, u16 vsi_seid, 7582 u8 enabled_tc) 7583 { 7584 struct i40e_veb *veb, *uplink_veb = NULL; 7585 int vsi_idx, veb_idx; 7586 int ret; 7587 7588 /* if one seid is 0, the other must be 0 to create a floating relay */ 7589 if ((uplink_seid == 0 || vsi_seid == 0) && 7590 (uplink_seid + vsi_seid != 0)) { 7591 dev_info(&pf->pdev->dev, 7592 "one, not both seid's are 0: uplink=%d vsi=%d\n", 7593 uplink_seid, vsi_seid); 7594 return NULL; 7595 } 7596 7597 /* make sure there is such a vsi and uplink */ 7598 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++) 7599 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 7600 break; 7601 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) { 7602 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 7603 vsi_seid); 7604 return NULL; 7605 } 7606 7607 if (uplink_seid && uplink_seid != pf->mac_seid) { 7608 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { 7609 if (pf->veb[veb_idx] && 7610 pf->veb[veb_idx]->seid == uplink_seid) { 7611 uplink_veb = pf->veb[veb_idx]; 7612 break; 7613 } 7614 } 7615 if (!uplink_veb) { 7616 dev_info(&pf->pdev->dev, 7617 "uplink seid %d not found\n", uplink_seid); 7618 return NULL; 7619 } 7620 } 7621 7622 /* get veb sw struct */ 7623 veb_idx = i40e_veb_mem_alloc(pf); 7624 if (veb_idx < 0) 7625 goto err_alloc; 7626 veb = pf->veb[veb_idx]; 7627 veb->flags = flags; 7628 veb->uplink_seid = uplink_seid; 7629 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); 7630 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); 7631 7632 /* create the VEB in the switch */ 7633 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); 7634 if (ret) 7635 goto err_veb; 7636 7637 return veb; 7638 7639 err_veb: 7640 i40e_veb_clear(veb); 7641 err_alloc: 7642 return NULL; 7643 } 7644 7645 /** 7646 * i40e_setup_pf_switch_element - set pf vars based on switch type 7647 * @pf: board private structure 7648 * @ele: element we are building info from 7649 * @num_reported: total number of elements 7650 * @printconfig: should we print the contents 7651 * 7652 * helper function to assist in extracting a few useful SEID values. 7653 **/ 7654 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, 7655 struct i40e_aqc_switch_config_element_resp *ele, 7656 u16 num_reported, bool printconfig) 7657 { 7658 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); 7659 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); 7660 u8 element_type = ele->element_type; 7661 u16 seid = le16_to_cpu(ele->seid); 7662 7663 if (printconfig) 7664 dev_info(&pf->pdev->dev, 7665 "type=%d seid=%d uplink=%d downlink=%d\n", 7666 element_type, seid, uplink_seid, downlink_seid); 7667 7668 switch (element_type) { 7669 case I40E_SWITCH_ELEMENT_TYPE_MAC: 7670 pf->mac_seid = seid; 7671 break; 7672 case I40E_SWITCH_ELEMENT_TYPE_VEB: 7673 /* Main VEB? */ 7674 if (uplink_seid != pf->mac_seid) 7675 break; 7676 if (pf->lan_veb == I40E_NO_VEB) { 7677 int v; 7678 7679 /* find existing or else empty VEB */ 7680 for (v = 0; v < I40E_MAX_VEB; v++) { 7681 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { 7682 pf->lan_veb = v; 7683 break; 7684 } 7685 } 7686 if (pf->lan_veb == I40E_NO_VEB) { 7687 v = i40e_veb_mem_alloc(pf); 7688 if (v < 0) 7689 break; 7690 pf->lan_veb = v; 7691 } 7692 } 7693 7694 pf->veb[pf->lan_veb]->seid = seid; 7695 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; 7696 pf->veb[pf->lan_veb]->pf = pf; 7697 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; 7698 break; 7699 case I40E_SWITCH_ELEMENT_TYPE_VSI: 7700 if (num_reported != 1) 7701 break; 7702 /* This is immediately after a reset so we can assume this is 7703 * the PF's VSI 7704 */ 7705 pf->mac_seid = uplink_seid; 7706 pf->pf_seid = downlink_seid; 7707 pf->main_vsi_seid = seid; 7708 if (printconfig) 7709 dev_info(&pf->pdev->dev, 7710 "pf_seid=%d main_vsi_seid=%d\n", 7711 pf->pf_seid, pf->main_vsi_seid); 7712 break; 7713 case I40E_SWITCH_ELEMENT_TYPE_PF: 7714 case I40E_SWITCH_ELEMENT_TYPE_VF: 7715 case I40E_SWITCH_ELEMENT_TYPE_EMP: 7716 case I40E_SWITCH_ELEMENT_TYPE_BMC: 7717 case I40E_SWITCH_ELEMENT_TYPE_PE: 7718 case I40E_SWITCH_ELEMENT_TYPE_PA: 7719 /* ignore these for now */ 7720 break; 7721 default: 7722 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", 7723 element_type, seid); 7724 break; 7725 } 7726 } 7727 7728 /** 7729 * i40e_fetch_switch_configuration - Get switch config from firmware 7730 * @pf: board private structure 7731 * @printconfig: should we print the contents 7732 * 7733 * Get the current switch configuration from the device and 7734 * extract a few useful SEID values. 7735 **/ 7736 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) 7737 { 7738 struct i40e_aqc_get_switch_config_resp *sw_config; 7739 u16 next_seid = 0; 7740 int ret = 0; 7741 u8 *aq_buf; 7742 int i; 7743 7744 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); 7745 if (!aq_buf) 7746 return -ENOMEM; 7747 7748 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 7749 do { 7750 u16 num_reported, num_total; 7751 7752 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, 7753 I40E_AQ_LARGE_BUF, 7754 &next_seid, NULL); 7755 if (ret) { 7756 dev_info(&pf->pdev->dev, 7757 "get switch config failed %d aq_err=%x\n", 7758 ret, pf->hw.aq.asq_last_status); 7759 kfree(aq_buf); 7760 return -ENOENT; 7761 } 7762 7763 num_reported = le16_to_cpu(sw_config->header.num_reported); 7764 num_total = le16_to_cpu(sw_config->header.num_total); 7765 7766 if (printconfig) 7767 dev_info(&pf->pdev->dev, 7768 "header: %d reported %d total\n", 7769 num_reported, num_total); 7770 7771 if (num_reported) { 7772 int sz = sizeof(*sw_config) * num_reported; 7773 7774 kfree(pf->sw_config); 7775 pf->sw_config = kzalloc(sz, GFP_KERNEL); 7776 if (pf->sw_config) 7777 memcpy(pf->sw_config, sw_config, sz); 7778 } 7779 7780 for (i = 0; i < num_reported; i++) { 7781 struct i40e_aqc_switch_config_element_resp *ele = 7782 &sw_config->element[i]; 7783 7784 i40e_setup_pf_switch_element(pf, ele, num_reported, 7785 printconfig); 7786 } 7787 } while (next_seid != 0); 7788 7789 kfree(aq_buf); 7790 return ret; 7791 } 7792 7793 /** 7794 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 7795 * @pf: board private structure 7796 * @reinit: if the Main VSI needs to re-initialized. 7797 * 7798 * Returns 0 on success, negative value on failure 7799 **/ 7800 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 7801 { 7802 u32 rxfc = 0, txfc = 0, rxfc_reg; 7803 int ret; 7804 7805 /* find out what's out there already */ 7806 ret = i40e_fetch_switch_configuration(pf, false); 7807 if (ret) { 7808 dev_info(&pf->pdev->dev, 7809 "couldn't fetch switch config, err %d, aq_err %d\n", 7810 ret, pf->hw.aq.asq_last_status); 7811 return ret; 7812 } 7813 i40e_pf_reset_stats(pf); 7814 7815 /* first time setup */ 7816 if (pf->lan_vsi == I40E_NO_VSI || reinit) { 7817 struct i40e_vsi *vsi = NULL; 7818 u16 uplink_seid; 7819 7820 /* Set up the PF VSI associated with the PF's main VSI 7821 * that is already in the HW switch 7822 */ 7823 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) 7824 uplink_seid = pf->veb[pf->lan_veb]->seid; 7825 else 7826 uplink_seid = pf->mac_seid; 7827 if (pf->lan_vsi == I40E_NO_VSI) 7828 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); 7829 else if (reinit) 7830 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); 7831 if (!vsi) { 7832 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); 7833 i40e_fdir_teardown(pf); 7834 return -EAGAIN; 7835 } 7836 } else { 7837 /* force a reset of TC and queue layout configurations */ 7838 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; 7839 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; 7840 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; 7841 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); 7842 } 7843 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); 7844 7845 i40e_fdir_sb_setup(pf); 7846 7847 /* Setup static PF queue filter control settings */ 7848 ret = i40e_setup_pf_filter_control(pf); 7849 if (ret) { 7850 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", 7851 ret); 7852 /* Failure here should not stop continuing other steps */ 7853 } 7854 7855 /* enable RSS in the HW, even for only one queue, as the stack can use 7856 * the hash 7857 */ 7858 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) 7859 i40e_config_rss(pf); 7860 7861 /* fill in link information and enable LSE reporting */ 7862 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); 7863 i40e_link_event(pf); 7864 7865 /* Initialize user-specific link properties */ 7866 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & 7867 I40E_AQ_AN_COMPLETED) ? true : false); 7868 /* requested_mode is set in probe or by ethtool */ 7869 if (!pf->fc_autoneg_status) 7870 goto no_autoneg; 7871 7872 if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) && 7873 (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)) 7874 pf->hw.fc.current_mode = I40E_FC_FULL; 7875 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) 7876 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE; 7877 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) 7878 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE; 7879 else 7880 pf->hw.fc.current_mode = I40E_FC_NONE; 7881 7882 /* sync the flow control settings with the auto-neg values */ 7883 switch (pf->hw.fc.current_mode) { 7884 case I40E_FC_FULL: 7885 txfc = 1; 7886 rxfc = 1; 7887 break; 7888 case I40E_FC_TX_PAUSE: 7889 txfc = 1; 7890 rxfc = 0; 7891 break; 7892 case I40E_FC_RX_PAUSE: 7893 txfc = 0; 7894 rxfc = 1; 7895 break; 7896 case I40E_FC_NONE: 7897 case I40E_FC_DEFAULT: 7898 txfc = 0; 7899 rxfc = 0; 7900 break; 7901 case I40E_FC_PFC: 7902 /* TBD */ 7903 break; 7904 /* no default case, we have to handle all possibilities here */ 7905 } 7906 7907 wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT); 7908 7909 rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) & 7910 ~I40E_PRTDCB_MFLCN_RFCE_MASK; 7911 rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT); 7912 7913 wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg); 7914 7915 goto fc_complete; 7916 7917 no_autoneg: 7918 /* disable L2 flow control, user can turn it on if they wish */ 7919 wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0); 7920 wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) & 7921 ~I40E_PRTDCB_MFLCN_RFCE_MASK); 7922 7923 fc_complete: 7924 i40e_ptp_init(pf); 7925 7926 return ret; 7927 } 7928 7929 /** 7930 * i40e_determine_queue_usage - Work out queue distribution 7931 * @pf: board private structure 7932 **/ 7933 static void i40e_determine_queue_usage(struct i40e_pf *pf) 7934 { 7935 int queues_left; 7936 7937 pf->num_lan_qps = 0; 7938 7939 /* Find the max queues to be put into basic use. We'll always be 7940 * using TC0, whether or not DCB is running, and TC0 will get the 7941 * big RSS set. 7942 */ 7943 queues_left = pf->hw.func_caps.num_tx_qp; 7944 7945 if ((queues_left == 1) || 7946 !(pf->flags & I40E_FLAG_MSIX_ENABLED) || 7947 !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED | 7948 I40E_FLAG_DCB_ENABLED))) { 7949 /* one qp for PF, no queues for anything else */ 7950 queues_left = 0; 7951 pf->rss_size = pf->num_lan_qps = 1; 7952 7953 /* make sure all the fancies are disabled */ 7954 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 7955 I40E_FLAG_FD_SB_ENABLED | 7956 I40E_FLAG_FD_ATR_ENABLED | 7957 I40E_FLAG_DCB_ENABLED | 7958 I40E_FLAG_SRIOV_ENABLED | 7959 I40E_FLAG_VMDQ_ENABLED); 7960 } else { 7961 /* Not enough queues for all TCs */ 7962 if ((pf->flags & I40E_FLAG_DCB_ENABLED) && 7963 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 7964 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 7965 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 7966 } 7967 pf->num_lan_qps = pf->rss_size_max; 7968 queues_left -= pf->num_lan_qps; 7969 } 7970 7971 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7972 if (queues_left > 1) { 7973 queues_left -= 1; /* save 1 queue for FD */ 7974 } else { 7975 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7976 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); 7977 } 7978 } 7979 7980 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 7981 pf->num_vf_qps && pf->num_req_vfs && queues_left) { 7982 pf->num_req_vfs = min_t(int, pf->num_req_vfs, 7983 (queues_left / pf->num_vf_qps)); 7984 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); 7985 } 7986 7987 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && 7988 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { 7989 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, 7990 (queues_left / pf->num_vmdq_qps)); 7991 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); 7992 } 7993 7994 pf->queues_left = queues_left; 7995 return; 7996 } 7997 7998 /** 7999 * i40e_setup_pf_filter_control - Setup PF static filter control 8000 * @pf: PF to be setup 8001 * 8002 * i40e_setup_pf_filter_control sets up a pf's initial filter control 8003 * settings. If PE/FCoE are enabled then it will also set the per PF 8004 * based filter sizes required for them. It also enables Flow director, 8005 * ethertype and macvlan type filter settings for the pf. 8006 * 8007 * Returns 0 on success, negative on failure 8008 **/ 8009 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) 8010 { 8011 struct i40e_filter_control_settings *settings = &pf->filter_settings; 8012 8013 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; 8014 8015 /* Flow Director is enabled */ 8016 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) 8017 settings->enable_fdir = true; 8018 8019 /* Ethtype and MACVLAN filters enabled for PF */ 8020 settings->enable_ethtype = true; 8021 settings->enable_macvlan = true; 8022 8023 if (i40e_set_filter_control(&pf->hw, settings)) 8024 return -ENOENT; 8025 8026 return 0; 8027 } 8028 8029 #define INFO_STRING_LEN 255 8030 static void i40e_print_features(struct i40e_pf *pf) 8031 { 8032 struct i40e_hw *hw = &pf->hw; 8033 char *buf, *string; 8034 8035 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); 8036 if (!string) { 8037 dev_err(&pf->pdev->dev, "Features string allocation failed\n"); 8038 return; 8039 } 8040 8041 buf = string; 8042 8043 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); 8044 #ifdef CONFIG_PCI_IOV 8045 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); 8046 #endif 8047 buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis, 8048 pf->vsi[pf->lan_vsi]->num_queue_pairs); 8049 8050 if (pf->flags & I40E_FLAG_RSS_ENABLED) 8051 buf += sprintf(buf, "RSS "); 8052 buf += sprintf(buf, "FDir "); 8053 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 8054 buf += sprintf(buf, "ATR "); 8055 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) 8056 buf += sprintf(buf, "NTUPLE "); 8057 if (pf->flags & I40E_FLAG_DCB_ENABLED) 8058 buf += sprintf(buf, "DCB "); 8059 if (pf->flags & I40E_FLAG_PTP) 8060 buf += sprintf(buf, "PTP "); 8061 8062 BUG_ON(buf > (string + INFO_STRING_LEN)); 8063 dev_info(&pf->pdev->dev, "%s\n", string); 8064 kfree(string); 8065 } 8066 8067 /** 8068 * i40e_probe - Device initialization routine 8069 * @pdev: PCI device information struct 8070 * @ent: entry in i40e_pci_tbl 8071 * 8072 * i40e_probe initializes a pf identified by a pci_dev structure. 8073 * The OS initialization, configuring of the pf private structure, 8074 * and a hardware reset occur. 8075 * 8076 * Returns 0 on success, negative on failure 8077 **/ 8078 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 8079 { 8080 struct i40e_driver_version dv; 8081 struct i40e_pf *pf; 8082 struct i40e_hw *hw; 8083 static u16 pfs_found; 8084 u16 link_status; 8085 int err = 0; 8086 u32 len; 8087 8088 err = pci_enable_device_mem(pdev); 8089 if (err) 8090 return err; 8091 8092 /* set up for high or low dma */ 8093 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 8094 if (err) { 8095 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 8096 if (err) { 8097 dev_err(&pdev->dev, 8098 "DMA configuration failed: 0x%x\n", err); 8099 goto err_dma; 8100 } 8101 } 8102 8103 /* set up pci connections */ 8104 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 8105 IORESOURCE_MEM), i40e_driver_name); 8106 if (err) { 8107 dev_info(&pdev->dev, 8108 "pci_request_selected_regions failed %d\n", err); 8109 goto err_pci_reg; 8110 } 8111 8112 pci_enable_pcie_error_reporting(pdev); 8113 pci_set_master(pdev); 8114 8115 /* Now that we have a PCI connection, we need to do the 8116 * low level device setup. This is primarily setting up 8117 * the Admin Queue structures and then querying for the 8118 * device's current profile information. 8119 */ 8120 pf = kzalloc(sizeof(*pf), GFP_KERNEL); 8121 if (!pf) { 8122 err = -ENOMEM; 8123 goto err_pf_alloc; 8124 } 8125 pf->next_vsi = 0; 8126 pf->pdev = pdev; 8127 set_bit(__I40E_DOWN, &pf->state); 8128 8129 hw = &pf->hw; 8130 hw->back = pf; 8131 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 8132 pci_resource_len(pdev, 0)); 8133 if (!hw->hw_addr) { 8134 err = -EIO; 8135 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", 8136 (unsigned int)pci_resource_start(pdev, 0), 8137 (unsigned int)pci_resource_len(pdev, 0), err); 8138 goto err_ioremap; 8139 } 8140 hw->vendor_id = pdev->vendor; 8141 hw->device_id = pdev->device; 8142 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 8143 hw->subsystem_vendor_id = pdev->subsystem_vendor; 8144 hw->subsystem_device_id = pdev->subsystem_device; 8145 hw->bus.device = PCI_SLOT(pdev->devfn); 8146 hw->bus.func = PCI_FUNC(pdev->devfn); 8147 pf->instance = pfs_found; 8148 8149 /* do a special CORER for clearing PXE mode once at init */ 8150 if (hw->revision_id == 0 && 8151 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { 8152 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 8153 i40e_flush(hw); 8154 msleep(200); 8155 pf->corer_count++; 8156 8157 i40e_clear_pxe_mode(hw); 8158 } 8159 8160 /* Reset here to make sure all is clean and to define PF 'n' */ 8161 err = i40e_pf_reset(hw); 8162 if (err) { 8163 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); 8164 goto err_pf_reset; 8165 } 8166 pf->pfr_count++; 8167 8168 hw->aq.num_arq_entries = I40E_AQ_LEN; 8169 hw->aq.num_asq_entries = I40E_AQ_LEN; 8170 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; 8171 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 8172 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 8173 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1, 8174 "%s-pf%d:misc", 8175 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id); 8176 8177 err = i40e_init_shared_code(hw); 8178 if (err) { 8179 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); 8180 goto err_pf_reset; 8181 } 8182 8183 /* set up a default setting for link flow control */ 8184 pf->hw.fc.requested_mode = I40E_FC_NONE; 8185 8186 err = i40e_init_adminq(hw); 8187 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); 8188 if (err) { 8189 dev_info(&pdev->dev, 8190 "init_adminq failed: %d expecting API %02x.%02x\n", 8191 err, 8192 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR); 8193 goto err_pf_reset; 8194 } 8195 8196 i40e_verify_eeprom(pf); 8197 8198 i40e_clear_pxe_mode(hw); 8199 err = i40e_get_capabilities(pf); 8200 if (err) 8201 goto err_adminq_setup; 8202 8203 err = i40e_sw_init(pf); 8204 if (err) { 8205 dev_info(&pdev->dev, "sw_init failed: %d\n", err); 8206 goto err_sw_init; 8207 } 8208 8209 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 8210 hw->func_caps.num_rx_qp, 8211 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); 8212 if (err) { 8213 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); 8214 goto err_init_lan_hmc; 8215 } 8216 8217 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 8218 if (err) { 8219 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); 8220 err = -ENOENT; 8221 goto err_configure_lan_hmc; 8222 } 8223 8224 i40e_get_mac_addr(hw, hw->mac.addr); 8225 if (!is_valid_ether_addr(hw->mac.addr)) { 8226 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 8227 err = -EIO; 8228 goto err_mac_addr; 8229 } 8230 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); 8231 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN); 8232 8233 pci_set_drvdata(pdev, pf); 8234 pci_save_state(pdev); 8235 #ifdef CONFIG_I40E_DCB 8236 err = i40e_init_pf_dcb(pf); 8237 if (err) { 8238 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); 8239 pf->flags &= ~I40E_FLAG_DCB_ENABLED; 8240 goto err_init_dcb; 8241 } 8242 #endif /* CONFIG_I40E_DCB */ 8243 8244 /* set up periodic task facility */ 8245 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); 8246 pf->service_timer_period = HZ; 8247 8248 INIT_WORK(&pf->service_task, i40e_service_task); 8249 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 8250 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; 8251 pf->link_check_timeout = jiffies; 8252 8253 /* WoL defaults to disabled */ 8254 pf->wol_en = false; 8255 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 8256 8257 /* set up the main switch operations */ 8258 i40e_determine_queue_usage(pf); 8259 i40e_init_interrupt_scheme(pf); 8260 8261 /* Set up the *vsi struct based on the number of VSIs in the HW, 8262 * and set up our local tracking of the MAIN PF vsi. 8263 */ 8264 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; 8265 pf->vsi = kzalloc(len, GFP_KERNEL); 8266 if (!pf->vsi) { 8267 err = -ENOMEM; 8268 goto err_switch_setup; 8269 } 8270 8271 err = i40e_setup_pf_switch(pf, false); 8272 if (err) { 8273 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 8274 goto err_vsis; 8275 } 8276 8277 /* The main driver is (mostly) up and happy. We need to set this state 8278 * before setting up the misc vector or we get a race and the vector 8279 * ends up disabled forever. 8280 */ 8281 clear_bit(__I40E_DOWN, &pf->state); 8282 8283 /* In case of MSIX we are going to setup the misc vector right here 8284 * to handle admin queue events etc. In case of legacy and MSI 8285 * the misc functionality and queue processing is combined in 8286 * the same vector and that gets setup at open. 8287 */ 8288 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 8289 err = i40e_setup_misc_vector(pf); 8290 if (err) { 8291 dev_info(&pdev->dev, 8292 "setup of misc vector failed: %d\n", err); 8293 goto err_vsis; 8294 } 8295 } 8296 8297 /* prep for VF support */ 8298 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && 8299 (pf->flags & I40E_FLAG_MSIX_ENABLED) && 8300 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { 8301 u32 val; 8302 8303 /* disable link interrupts for VFs */ 8304 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); 8305 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 8306 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 8307 i40e_flush(hw); 8308 8309 if (pci_num_vf(pdev)) { 8310 dev_info(&pdev->dev, 8311 "Active VFs found, allocating resources.\n"); 8312 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); 8313 if (err) 8314 dev_info(&pdev->dev, 8315 "Error %d allocating resources for existing VFs\n", 8316 err); 8317 } 8318 } 8319 8320 pfs_found++; 8321 8322 i40e_dbg_pf_init(pf); 8323 8324 /* tell the firmware that we're starting */ 8325 dv.major_version = DRV_VERSION_MAJOR; 8326 dv.minor_version = DRV_VERSION_MINOR; 8327 dv.build_version = DRV_VERSION_BUILD; 8328 dv.subbuild_version = 0; 8329 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 8330 8331 /* since everything's happy, start the service_task timer */ 8332 mod_timer(&pf->service_timer, 8333 round_jiffies(jiffies + pf->service_timer_period)); 8334 8335 /* Get the negotiated link width and speed from PCI config space */ 8336 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); 8337 8338 i40e_set_pci_config_data(hw, link_status); 8339 8340 dev_info(&pdev->dev, "PCI-Express: %s %s\n", 8341 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : 8342 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : 8343 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : 8344 "Unknown"), 8345 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : 8346 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : 8347 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : 8348 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : 8349 "Unknown")); 8350 8351 if (hw->bus.width < i40e_bus_width_pcie_x8 || 8352 hw->bus.speed < i40e_bus_speed_8000) { 8353 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 8354 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 8355 } 8356 8357 /* print a string summarizing features */ 8358 i40e_print_features(pf); 8359 8360 return 0; 8361 8362 /* Unwind what we've done if something failed in the setup */ 8363 err_vsis: 8364 set_bit(__I40E_DOWN, &pf->state); 8365 i40e_clear_interrupt_scheme(pf); 8366 kfree(pf->vsi); 8367 err_switch_setup: 8368 i40e_reset_interrupt_capability(pf); 8369 del_timer_sync(&pf->service_timer); 8370 #ifdef CONFIG_I40E_DCB 8371 err_init_dcb: 8372 #endif /* CONFIG_I40E_DCB */ 8373 err_mac_addr: 8374 err_configure_lan_hmc: 8375 (void)i40e_shutdown_lan_hmc(hw); 8376 err_init_lan_hmc: 8377 kfree(pf->qp_pile); 8378 kfree(pf->irq_pile); 8379 err_sw_init: 8380 err_adminq_setup: 8381 (void)i40e_shutdown_adminq(hw); 8382 err_pf_reset: 8383 iounmap(hw->hw_addr); 8384 err_ioremap: 8385 kfree(pf); 8386 err_pf_alloc: 8387 pci_disable_pcie_error_reporting(pdev); 8388 pci_release_selected_regions(pdev, 8389 pci_select_bars(pdev, IORESOURCE_MEM)); 8390 err_pci_reg: 8391 err_dma: 8392 pci_disable_device(pdev); 8393 return err; 8394 } 8395 8396 /** 8397 * i40e_remove - Device removal routine 8398 * @pdev: PCI device information struct 8399 * 8400 * i40e_remove is called by the PCI subsystem to alert the driver 8401 * that is should release a PCI device. This could be caused by a 8402 * Hot-Plug event, or because the driver is going to be removed from 8403 * memory. 8404 **/ 8405 static void i40e_remove(struct pci_dev *pdev) 8406 { 8407 struct i40e_pf *pf = pci_get_drvdata(pdev); 8408 i40e_status ret_code; 8409 u32 reg; 8410 int i; 8411 8412 i40e_dbg_pf_exit(pf); 8413 8414 i40e_ptp_stop(pf); 8415 8416 /* no more scheduling of any task */ 8417 set_bit(__I40E_DOWN, &pf->state); 8418 del_timer_sync(&pf->service_timer); 8419 cancel_work_sync(&pf->service_task); 8420 8421 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 8422 i40e_free_vfs(pf); 8423 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; 8424 } 8425 8426 i40e_fdir_teardown(pf); 8427 8428 /* If there is a switch structure or any orphans, remove them. 8429 * This will leave only the PF's VSI remaining. 8430 */ 8431 for (i = 0; i < I40E_MAX_VEB; i++) { 8432 if (!pf->veb[i]) 8433 continue; 8434 8435 if (pf->veb[i]->uplink_seid == pf->mac_seid || 8436 pf->veb[i]->uplink_seid == 0) 8437 i40e_switch_branch_release(pf->veb[i]); 8438 } 8439 8440 /* Now we can shutdown the PF's VSI, just before we kill 8441 * adminq and hmc. 8442 */ 8443 if (pf->vsi[pf->lan_vsi]) 8444 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 8445 8446 i40e_stop_misc_vector(pf); 8447 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 8448 synchronize_irq(pf->msix_entries[0].vector); 8449 free_irq(pf->msix_entries[0].vector, pf); 8450 } 8451 8452 /* shutdown and destroy the HMC */ 8453 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 8454 if (ret_code) 8455 dev_warn(&pdev->dev, 8456 "Failed to destroy the HMC resources: %d\n", ret_code); 8457 8458 /* shutdown the adminq */ 8459 ret_code = i40e_shutdown_adminq(&pf->hw); 8460 if (ret_code) 8461 dev_warn(&pdev->dev, 8462 "Failed to destroy the Admin Queue resources: %d\n", 8463 ret_code); 8464 8465 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 8466 i40e_clear_interrupt_scheme(pf); 8467 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 8468 if (pf->vsi[i]) { 8469 i40e_vsi_clear_rings(pf->vsi[i]); 8470 i40e_vsi_clear(pf->vsi[i]); 8471 pf->vsi[i] = NULL; 8472 } 8473 } 8474 8475 for (i = 0; i < I40E_MAX_VEB; i++) { 8476 kfree(pf->veb[i]); 8477 pf->veb[i] = NULL; 8478 } 8479 8480 kfree(pf->qp_pile); 8481 kfree(pf->irq_pile); 8482 kfree(pf->sw_config); 8483 kfree(pf->vsi); 8484 8485 /* force a PF reset to clean anything leftover */ 8486 reg = rd32(&pf->hw, I40E_PFGEN_CTRL); 8487 wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 8488 i40e_flush(&pf->hw); 8489 8490 iounmap(pf->hw.hw_addr); 8491 kfree(pf); 8492 pci_release_selected_regions(pdev, 8493 pci_select_bars(pdev, IORESOURCE_MEM)); 8494 8495 pci_disable_pcie_error_reporting(pdev); 8496 pci_disable_device(pdev); 8497 } 8498 8499 /** 8500 * i40e_pci_error_detected - warning that something funky happened in PCI land 8501 * @pdev: PCI device information struct 8502 * 8503 * Called to warn that something happened and the error handling steps 8504 * are in progress. Allows the driver to quiesce things, be ready for 8505 * remediation. 8506 **/ 8507 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, 8508 enum pci_channel_state error) 8509 { 8510 struct i40e_pf *pf = pci_get_drvdata(pdev); 8511 8512 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); 8513 8514 /* shutdown all operations */ 8515 if (!test_bit(__I40E_SUSPENDED, &pf->state)) { 8516 rtnl_lock(); 8517 i40e_prep_for_reset(pf); 8518 rtnl_unlock(); 8519 } 8520 8521 /* Request a slot reset */ 8522 return PCI_ERS_RESULT_NEED_RESET; 8523 } 8524 8525 /** 8526 * i40e_pci_error_slot_reset - a PCI slot reset just happened 8527 * @pdev: PCI device information struct 8528 * 8529 * Called to find if the driver can work with the device now that 8530 * the pci slot has been reset. If a basic connection seems good 8531 * (registers are readable and have sane content) then return a 8532 * happy little PCI_ERS_RESULT_xxx. 8533 **/ 8534 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) 8535 { 8536 struct i40e_pf *pf = pci_get_drvdata(pdev); 8537 pci_ers_result_t result; 8538 int err; 8539 u32 reg; 8540 8541 dev_info(&pdev->dev, "%s\n", __func__); 8542 if (pci_enable_device_mem(pdev)) { 8543 dev_info(&pdev->dev, 8544 "Cannot re-enable PCI device after reset.\n"); 8545 result = PCI_ERS_RESULT_DISCONNECT; 8546 } else { 8547 pci_set_master(pdev); 8548 pci_restore_state(pdev); 8549 pci_save_state(pdev); 8550 pci_wake_from_d3(pdev, false); 8551 8552 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); 8553 if (reg == 0) 8554 result = PCI_ERS_RESULT_RECOVERED; 8555 else 8556 result = PCI_ERS_RESULT_DISCONNECT; 8557 } 8558 8559 err = pci_cleanup_aer_uncorrect_error_status(pdev); 8560 if (err) { 8561 dev_info(&pdev->dev, 8562 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", 8563 err); 8564 /* non-fatal, continue */ 8565 } 8566 8567 return result; 8568 } 8569 8570 /** 8571 * i40e_pci_error_resume - restart operations after PCI error recovery 8572 * @pdev: PCI device information struct 8573 * 8574 * Called to allow the driver to bring things back up after PCI error 8575 * and/or reset recovery has finished. 8576 **/ 8577 static void i40e_pci_error_resume(struct pci_dev *pdev) 8578 { 8579 struct i40e_pf *pf = pci_get_drvdata(pdev); 8580 8581 dev_info(&pdev->dev, "%s\n", __func__); 8582 if (test_bit(__I40E_SUSPENDED, &pf->state)) 8583 return; 8584 8585 rtnl_lock(); 8586 i40e_handle_reset_warning(pf); 8587 rtnl_lock(); 8588 } 8589 8590 /** 8591 * i40e_shutdown - PCI callback for shutting down 8592 * @pdev: PCI device information struct 8593 **/ 8594 static void i40e_shutdown(struct pci_dev *pdev) 8595 { 8596 struct i40e_pf *pf = pci_get_drvdata(pdev); 8597 struct i40e_hw *hw = &pf->hw; 8598 8599 set_bit(__I40E_SUSPENDED, &pf->state); 8600 set_bit(__I40E_DOWN, &pf->state); 8601 rtnl_lock(); 8602 i40e_prep_for_reset(pf); 8603 rtnl_unlock(); 8604 8605 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 8606 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 8607 8608 if (system_state == SYSTEM_POWER_OFF) { 8609 pci_wake_from_d3(pdev, pf->wol_en); 8610 pci_set_power_state(pdev, PCI_D3hot); 8611 } 8612 } 8613 8614 #ifdef CONFIG_PM 8615 /** 8616 * i40e_suspend - PCI callback for moving to D3 8617 * @pdev: PCI device information struct 8618 **/ 8619 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) 8620 { 8621 struct i40e_pf *pf = pci_get_drvdata(pdev); 8622 struct i40e_hw *hw = &pf->hw; 8623 8624 set_bit(__I40E_SUSPENDED, &pf->state); 8625 set_bit(__I40E_DOWN, &pf->state); 8626 rtnl_lock(); 8627 i40e_prep_for_reset(pf); 8628 rtnl_unlock(); 8629 8630 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 8631 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 8632 8633 pci_wake_from_d3(pdev, pf->wol_en); 8634 pci_set_power_state(pdev, PCI_D3hot); 8635 8636 return 0; 8637 } 8638 8639 /** 8640 * i40e_resume - PCI callback for waking up from D3 8641 * @pdev: PCI device information struct 8642 **/ 8643 static int i40e_resume(struct pci_dev *pdev) 8644 { 8645 struct i40e_pf *pf = pci_get_drvdata(pdev); 8646 u32 err; 8647 8648 pci_set_power_state(pdev, PCI_D0); 8649 pci_restore_state(pdev); 8650 /* pci_restore_state() clears dev->state_saves, so 8651 * call pci_save_state() again to restore it. 8652 */ 8653 pci_save_state(pdev); 8654 8655 err = pci_enable_device_mem(pdev); 8656 if (err) { 8657 dev_err(&pdev->dev, 8658 "%s: Cannot enable PCI device from suspend\n", 8659 __func__); 8660 return err; 8661 } 8662 pci_set_master(pdev); 8663 8664 /* no wakeup events while running */ 8665 pci_wake_from_d3(pdev, false); 8666 8667 /* handling the reset will rebuild the device state */ 8668 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { 8669 clear_bit(__I40E_DOWN, &pf->state); 8670 rtnl_lock(); 8671 i40e_reset_and_rebuild(pf, false); 8672 rtnl_unlock(); 8673 } 8674 8675 return 0; 8676 } 8677 8678 #endif 8679 static const struct pci_error_handlers i40e_err_handler = { 8680 .error_detected = i40e_pci_error_detected, 8681 .slot_reset = i40e_pci_error_slot_reset, 8682 .resume = i40e_pci_error_resume, 8683 }; 8684 8685 static struct pci_driver i40e_driver = { 8686 .name = i40e_driver_name, 8687 .id_table = i40e_pci_tbl, 8688 .probe = i40e_probe, 8689 .remove = i40e_remove, 8690 #ifdef CONFIG_PM 8691 .suspend = i40e_suspend, 8692 .resume = i40e_resume, 8693 #endif 8694 .shutdown = i40e_shutdown, 8695 .err_handler = &i40e_err_handler, 8696 .sriov_configure = i40e_pci_sriov_configure, 8697 }; 8698 8699 /** 8700 * i40e_init_module - Driver registration routine 8701 * 8702 * i40e_init_module is the first routine called when the driver is 8703 * loaded. All it does is register with the PCI subsystem. 8704 **/ 8705 static int __init i40e_init_module(void) 8706 { 8707 pr_info("%s: %s - version %s\n", i40e_driver_name, 8708 i40e_driver_string, i40e_driver_version_str); 8709 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); 8710 i40e_dbg_init(); 8711 return pci_register_driver(&i40e_driver); 8712 } 8713 module_init(i40e_init_module); 8714 8715 /** 8716 * i40e_exit_module - Driver exit cleanup routine 8717 * 8718 * i40e_exit_module is called just before the driver is removed 8719 * from memory. 8720 **/ 8721 static void __exit i40e_exit_module(void) 8722 { 8723 pci_unregister_driver(&i40e_driver); 8724 i40e_dbg_exit(); 8725 } 8726 module_exit(i40e_exit_module); 8727