1 // SPDX-License-Identifier: GPL-2.0 2 /* Intel(R) Ethernet Switch Host Interface Driver 3 * Copyright(c) 2013 - 2018 Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * The full GNU General Public License is included in this distribution in 15 * the file called "COPYING". 16 * 17 * Contact Information: 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 20 */ 21 22 #include <linux/module.h> 23 #include <linux/interrupt.h> 24 #include <linux/aer.h> 25 26 #include "fm10k.h" 27 28 static const struct fm10k_info *fm10k_info_tbl[] = { 29 [fm10k_device_pf] = &fm10k_pf_info, 30 [fm10k_device_vf] = &fm10k_vf_info, 31 }; 32 33 /* 34 * fm10k_pci_tbl - PCI Device ID Table 35 * 36 * Wildcard entries (PCI_ANY_ID) should come last 37 * Last entry must be all 0s 38 * 39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 40 * Class, Class Mask, private data (not used) } 41 */ 42 static const struct pci_device_id fm10k_pci_tbl[] = { 43 { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf }, 44 { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf }, 45 /* required last entry */ 46 { 0, } 47 }; 48 MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl); 49 50 u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg) 51 { 52 struct fm10k_intfc *interface = hw->back; 53 u16 value = 0; 54 55 if (FM10K_REMOVED(hw->hw_addr)) 56 return ~value; 57 58 pci_read_config_word(interface->pdev, reg, &value); 59 if (value == 0xFFFF) 60 fm10k_write_flush(hw); 61 62 return value; 63 } 64 65 u32 fm10k_read_reg(struct fm10k_hw *hw, int reg) 66 { 67 u32 __iomem *hw_addr = READ_ONCE(hw->hw_addr); 68 u32 value = 0; 69 70 if (FM10K_REMOVED(hw_addr)) 71 return ~value; 72 73 value = readl(&hw_addr[reg]); 74 if (!(~value) && (!reg || !(~readl(hw_addr)))) { 75 struct fm10k_intfc *interface = hw->back; 76 struct net_device *netdev = interface->netdev; 77 78 hw->hw_addr = NULL; 79 netif_device_detach(netdev); 80 netdev_err(netdev, "PCIe link lost, device now detached\n"); 81 } 82 83 return value; 84 } 85 86 static int fm10k_hw_ready(struct fm10k_intfc *interface) 87 { 88 struct fm10k_hw *hw = &interface->hw; 89 90 fm10k_write_flush(hw); 91 92 return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0; 93 } 94 95 /** 96 * fm10k_macvlan_schedule - Schedule MAC/VLAN queue task 97 * @interface: fm10k private interface structure 98 * 99 * Schedule the MAC/VLAN queue monitor task. If the MAC/VLAN task cannot be 100 * started immediately, request that it be restarted when possible. 101 */ 102 void fm10k_macvlan_schedule(struct fm10k_intfc *interface) 103 { 104 /* Avoid processing the MAC/VLAN queue when the service task is 105 * disabled, or when we're resetting the device. 106 */ 107 if (!test_bit(__FM10K_MACVLAN_DISABLE, interface->state) && 108 !test_and_set_bit(__FM10K_MACVLAN_SCHED, interface->state)) { 109 clear_bit(__FM10K_MACVLAN_REQUEST, interface->state); 110 /* We delay the actual start of execution in order to allow 111 * multiple MAC/VLAN updates to accumulate before handling 112 * them, and to allow some time to let the mailbox drain 113 * between runs. 114 */ 115 queue_delayed_work(fm10k_workqueue, 116 &interface->macvlan_task, 10); 117 } else { 118 set_bit(__FM10K_MACVLAN_REQUEST, interface->state); 119 } 120 } 121 122 /** 123 * fm10k_stop_macvlan_task - Stop the MAC/VLAN queue monitor 124 * @interface: fm10k private interface structure 125 * 126 * Wait until the MAC/VLAN queue task has stopped, and cancel any future 127 * requests. 128 */ 129 static void fm10k_stop_macvlan_task(struct fm10k_intfc *interface) 130 { 131 /* Disable the MAC/VLAN work item */ 132 set_bit(__FM10K_MACVLAN_DISABLE, interface->state); 133 134 /* Make sure we waited until any current invocations have stopped */ 135 cancel_delayed_work_sync(&interface->macvlan_task); 136 137 /* We set the __FM10K_MACVLAN_SCHED bit when we schedule the task. 138 * However, it may not be unset of the MAC/VLAN task never actually 139 * got a chance to run. Since we've canceled the task here, and it 140 * cannot be rescheuled right now, we need to ensure the scheduled bit 141 * gets unset. 142 */ 143 clear_bit(__FM10K_MACVLAN_SCHED, interface->state); 144 } 145 146 /** 147 * fm10k_resume_macvlan_task - Restart the MAC/VLAN queue monitor 148 * @interface: fm10k private interface structure 149 * 150 * Clear the __FM10K_MACVLAN_DISABLE bit and, if a request occurred, schedule 151 * the MAC/VLAN work monitor. 152 */ 153 static void fm10k_resume_macvlan_task(struct fm10k_intfc *interface) 154 { 155 /* Re-enable the MAC/VLAN work item */ 156 clear_bit(__FM10K_MACVLAN_DISABLE, interface->state); 157 158 /* We might have received a MAC/VLAN request while disabled. If so, 159 * kick off the queue now. 160 */ 161 if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state)) 162 fm10k_macvlan_schedule(interface); 163 } 164 165 void fm10k_service_event_schedule(struct fm10k_intfc *interface) 166 { 167 if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) && 168 !test_and_set_bit(__FM10K_SERVICE_SCHED, interface->state)) { 169 clear_bit(__FM10K_SERVICE_REQUEST, interface->state); 170 queue_work(fm10k_workqueue, &interface->service_task); 171 } else { 172 set_bit(__FM10K_SERVICE_REQUEST, interface->state); 173 } 174 } 175 176 static void fm10k_service_event_complete(struct fm10k_intfc *interface) 177 { 178 WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, interface->state)); 179 180 /* flush memory to make sure state is correct before next watchog */ 181 smp_mb__before_atomic(); 182 clear_bit(__FM10K_SERVICE_SCHED, interface->state); 183 184 /* If a service event was requested since we started, immediately 185 * re-schedule now. This ensures we don't drop a request until the 186 * next timer event. 187 */ 188 if (test_bit(__FM10K_SERVICE_REQUEST, interface->state)) 189 fm10k_service_event_schedule(interface); 190 } 191 192 static void fm10k_stop_service_event(struct fm10k_intfc *interface) 193 { 194 set_bit(__FM10K_SERVICE_DISABLE, interface->state); 195 cancel_work_sync(&interface->service_task); 196 197 /* It's possible that cancel_work_sync stopped the service task from 198 * running before it could actually start. In this case the 199 * __FM10K_SERVICE_SCHED bit will never be cleared. Since we know that 200 * the service task cannot be running at this point, we need to clear 201 * the scheduled bit, as otherwise the service task may never be 202 * restarted. 203 */ 204 clear_bit(__FM10K_SERVICE_SCHED, interface->state); 205 } 206 207 static void fm10k_start_service_event(struct fm10k_intfc *interface) 208 { 209 clear_bit(__FM10K_SERVICE_DISABLE, interface->state); 210 fm10k_service_event_schedule(interface); 211 } 212 213 /** 214 * fm10k_service_timer - Timer Call-back 215 * @t: pointer to timer data 216 **/ 217 static void fm10k_service_timer(struct timer_list *t) 218 { 219 struct fm10k_intfc *interface = from_timer(interface, t, 220 service_timer); 221 222 /* Reset the timer */ 223 mod_timer(&interface->service_timer, (HZ * 2) + jiffies); 224 225 fm10k_service_event_schedule(interface); 226 } 227 228 /** 229 * fm10k_prepare_for_reset - Prepare the driver and device for a pending reset 230 * @interface: fm10k private data structure 231 * 232 * This function prepares for a device reset by shutting as much down as we 233 * can. It does nothing and returns false if __FM10K_RESETTING was already set 234 * prior to calling this function. It returns true if it actually did work. 235 */ 236 static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface) 237 { 238 struct net_device *netdev = interface->netdev; 239 240 WARN_ON(in_interrupt()); 241 242 /* put off any impending NetWatchDogTimeout */ 243 netif_trans_update(netdev); 244 245 /* Nothing to do if a reset is already in progress */ 246 if (test_and_set_bit(__FM10K_RESETTING, interface->state)) 247 return false; 248 249 /* As the MAC/VLAN task will be accessing registers it must not be 250 * running while we reset. Although the task will not be scheduled 251 * once we start resetting it may already be running 252 */ 253 fm10k_stop_macvlan_task(interface); 254 255 rtnl_lock(); 256 257 fm10k_iov_suspend(interface->pdev); 258 259 if (netif_running(netdev)) 260 fm10k_close(netdev); 261 262 fm10k_mbx_free_irq(interface); 263 264 /* free interrupts */ 265 fm10k_clear_queueing_scheme(interface); 266 267 /* delay any future reset requests */ 268 interface->last_reset = jiffies + (10 * HZ); 269 270 rtnl_unlock(); 271 272 return true; 273 } 274 275 static int fm10k_handle_reset(struct fm10k_intfc *interface) 276 { 277 struct net_device *netdev = interface->netdev; 278 struct fm10k_hw *hw = &interface->hw; 279 int err; 280 281 WARN_ON(!test_bit(__FM10K_RESETTING, interface->state)); 282 283 rtnl_lock(); 284 285 pci_set_master(interface->pdev); 286 287 /* reset and initialize the hardware so it is in a known state */ 288 err = hw->mac.ops.reset_hw(hw); 289 if (err) { 290 dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err); 291 goto reinit_err; 292 } 293 294 err = hw->mac.ops.init_hw(hw); 295 if (err) { 296 dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err); 297 goto reinit_err; 298 } 299 300 err = fm10k_init_queueing_scheme(interface); 301 if (err) { 302 dev_err(&interface->pdev->dev, 303 "init_queueing_scheme failed: %d\n", err); 304 goto reinit_err; 305 } 306 307 /* re-associate interrupts */ 308 err = fm10k_mbx_request_irq(interface); 309 if (err) 310 goto err_mbx_irq; 311 312 err = fm10k_hw_ready(interface); 313 if (err) 314 goto err_open; 315 316 /* update hardware address for VFs if perm_addr has changed */ 317 if (hw->mac.type == fm10k_mac_vf) { 318 if (is_valid_ether_addr(hw->mac.perm_addr)) { 319 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); 320 ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr); 321 ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr); 322 netdev->addr_assign_type &= ~NET_ADDR_RANDOM; 323 } 324 325 if (hw->mac.vlan_override) 326 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 327 else 328 netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; 329 } 330 331 err = netif_running(netdev) ? fm10k_open(netdev) : 0; 332 if (err) 333 goto err_open; 334 335 fm10k_iov_resume(interface->pdev); 336 337 rtnl_unlock(); 338 339 fm10k_resume_macvlan_task(interface); 340 341 clear_bit(__FM10K_RESETTING, interface->state); 342 343 return err; 344 err_open: 345 fm10k_mbx_free_irq(interface); 346 err_mbx_irq: 347 fm10k_clear_queueing_scheme(interface); 348 reinit_err: 349 netif_device_detach(netdev); 350 351 rtnl_unlock(); 352 353 clear_bit(__FM10K_RESETTING, interface->state); 354 355 return err; 356 } 357 358 static void fm10k_detach_subtask(struct fm10k_intfc *interface) 359 { 360 struct net_device *netdev = interface->netdev; 361 u32 __iomem *hw_addr; 362 u32 value; 363 int err; 364 365 /* do nothing if netdev is still present or hw_addr is set */ 366 if (netif_device_present(netdev) || interface->hw.hw_addr) 367 return; 368 369 /* We've lost the PCIe register space, and can no longer access the 370 * device. Shut everything except the detach subtask down and prepare 371 * to reset the device in case we recover. If we actually prepare for 372 * reset, indicate that we're detached. 373 */ 374 if (fm10k_prepare_for_reset(interface)) 375 set_bit(__FM10K_RESET_DETACHED, interface->state); 376 377 /* check the real address space to see if we've recovered */ 378 hw_addr = READ_ONCE(interface->uc_addr); 379 value = readl(hw_addr); 380 if (~value) { 381 /* Make sure the reset was initiated because we detached, 382 * otherwise we might race with a different reset flow. 383 */ 384 if (!test_and_clear_bit(__FM10K_RESET_DETACHED, 385 interface->state)) 386 return; 387 388 /* Restore the hardware address */ 389 interface->hw.hw_addr = interface->uc_addr; 390 391 /* PCIe link has been restored, and the device is active 392 * again. Restore everything and reset the device. 393 */ 394 err = fm10k_handle_reset(interface); 395 if (err) { 396 netdev_err(netdev, "Unable to reset device: %d\n", err); 397 interface->hw.hw_addr = NULL; 398 return; 399 } 400 401 /* Re-attach the netdev */ 402 netif_device_attach(netdev); 403 netdev_warn(netdev, "PCIe link restored, device now attached\n"); 404 return; 405 } 406 } 407 408 static void fm10k_reset_subtask(struct fm10k_intfc *interface) 409 { 410 int err; 411 412 if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED, 413 interface->flags)) 414 return; 415 416 /* If another thread has already prepared to reset the device, we 417 * should not attempt to handle a reset here, since we'd race with 418 * that thread. This may happen if we suspend the device or if the 419 * PCIe link is lost. In this case, we'll just ignore the RESET 420 * request, as it will (eventually) be taken care of when the thread 421 * which actually started the reset is finished. 422 */ 423 if (!fm10k_prepare_for_reset(interface)) 424 return; 425 426 netdev_err(interface->netdev, "Reset interface\n"); 427 428 err = fm10k_handle_reset(interface); 429 if (err) 430 dev_err(&interface->pdev->dev, 431 "fm10k_handle_reset failed: %d\n", err); 432 } 433 434 /** 435 * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping 436 * @interface: board private structure 437 * 438 * Configure the SWPRI to PC mapping for the port. 439 **/ 440 static void fm10k_configure_swpri_map(struct fm10k_intfc *interface) 441 { 442 struct net_device *netdev = interface->netdev; 443 struct fm10k_hw *hw = &interface->hw; 444 int i; 445 446 /* clear flag indicating update is needed */ 447 clear_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags); 448 449 /* these registers are only available on the PF */ 450 if (hw->mac.type != fm10k_mac_pf) 451 return; 452 453 /* configure SWPRI to PC map */ 454 for (i = 0; i < FM10K_SWPRI_MAX; i++) 455 fm10k_write_reg(hw, FM10K_SWPRI_MAP(i), 456 netdev_get_prio_tc_map(netdev, i)); 457 } 458 459 /** 460 * fm10k_watchdog_update_host_state - Update the link status based on host. 461 * @interface: board private structure 462 **/ 463 static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface) 464 { 465 struct fm10k_hw *hw = &interface->hw; 466 s32 err; 467 468 if (test_bit(__FM10K_LINK_DOWN, interface->state)) { 469 interface->host_ready = false; 470 if (time_is_after_jiffies(interface->link_down_event)) 471 return; 472 clear_bit(__FM10K_LINK_DOWN, interface->state); 473 } 474 475 if (test_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags)) { 476 if (rtnl_trylock()) { 477 fm10k_configure_swpri_map(interface); 478 rtnl_unlock(); 479 } 480 } 481 482 /* lock the mailbox for transmit and receive */ 483 fm10k_mbx_lock(interface); 484 485 err = hw->mac.ops.get_host_state(hw, &interface->host_ready); 486 if (err && time_is_before_jiffies(interface->last_reset)) 487 set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); 488 489 /* free the lock */ 490 fm10k_mbx_unlock(interface); 491 } 492 493 /** 494 * fm10k_mbx_subtask - Process upstream and downstream mailboxes 495 * @interface: board private structure 496 * 497 * This function will process both the upstream and downstream mailboxes. 498 **/ 499 static void fm10k_mbx_subtask(struct fm10k_intfc *interface) 500 { 501 /* If we're resetting, bail out */ 502 if (test_bit(__FM10K_RESETTING, interface->state)) 503 return; 504 505 /* process upstream mailbox and update device state */ 506 fm10k_watchdog_update_host_state(interface); 507 508 /* process downstream mailboxes */ 509 fm10k_iov_mbx(interface); 510 } 511 512 /** 513 * fm10k_watchdog_host_is_ready - Update netdev status based on host ready 514 * @interface: board private structure 515 **/ 516 static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface) 517 { 518 struct net_device *netdev = interface->netdev; 519 520 /* only continue if link state is currently down */ 521 if (netif_carrier_ok(netdev)) 522 return; 523 524 netif_info(interface, drv, netdev, "NIC Link is up\n"); 525 526 netif_carrier_on(netdev); 527 netif_tx_wake_all_queues(netdev); 528 } 529 530 /** 531 * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready 532 * @interface: board private structure 533 **/ 534 static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface) 535 { 536 struct net_device *netdev = interface->netdev; 537 538 /* only continue if link state is currently up */ 539 if (!netif_carrier_ok(netdev)) 540 return; 541 542 netif_info(interface, drv, netdev, "NIC Link is down\n"); 543 544 netif_carrier_off(netdev); 545 netif_tx_stop_all_queues(netdev); 546 } 547 548 /** 549 * fm10k_update_stats - Update the board statistics counters. 550 * @interface: board private structure 551 **/ 552 void fm10k_update_stats(struct fm10k_intfc *interface) 553 { 554 struct net_device_stats *net_stats = &interface->netdev->stats; 555 struct fm10k_hw *hw = &interface->hw; 556 u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0; 557 u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0; 558 u64 rx_link_errors = 0; 559 u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0; 560 u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0; 561 u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0; 562 u64 tx_bytes_nic = 0, tx_pkts_nic = 0; 563 u64 bytes, pkts; 564 int i; 565 566 /* ensure only one thread updates stats at a time */ 567 if (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state)) 568 return; 569 570 /* do not allow stats update via service task for next second */ 571 interface->next_stats_update = jiffies + HZ; 572 573 /* gather some stats to the interface struct that are per queue */ 574 for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) { 575 struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]); 576 577 if (!tx_ring) 578 continue; 579 580 restart_queue += tx_ring->tx_stats.restart_queue; 581 tx_busy += tx_ring->tx_stats.tx_busy; 582 tx_csum_errors += tx_ring->tx_stats.csum_err; 583 bytes += tx_ring->stats.bytes; 584 pkts += tx_ring->stats.packets; 585 hw_csum_tx_good += tx_ring->tx_stats.csum_good; 586 } 587 588 interface->restart_queue = restart_queue; 589 interface->tx_busy = tx_busy; 590 net_stats->tx_bytes = bytes; 591 net_stats->tx_packets = pkts; 592 interface->tx_csum_errors = tx_csum_errors; 593 interface->hw_csum_tx_good = hw_csum_tx_good; 594 595 /* gather some stats to the interface struct that are per queue */ 596 for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) { 597 struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]); 598 599 if (!rx_ring) 600 continue; 601 602 bytes += rx_ring->stats.bytes; 603 pkts += rx_ring->stats.packets; 604 alloc_failed += rx_ring->rx_stats.alloc_failed; 605 rx_csum_errors += rx_ring->rx_stats.csum_err; 606 rx_errors += rx_ring->rx_stats.errors; 607 hw_csum_rx_good += rx_ring->rx_stats.csum_good; 608 rx_switch_errors += rx_ring->rx_stats.switch_errors; 609 rx_drops += rx_ring->rx_stats.drops; 610 rx_pp_errors += rx_ring->rx_stats.pp_errors; 611 rx_link_errors += rx_ring->rx_stats.link_errors; 612 rx_length_errors += rx_ring->rx_stats.length_errors; 613 } 614 615 net_stats->rx_bytes = bytes; 616 net_stats->rx_packets = pkts; 617 interface->alloc_failed = alloc_failed; 618 interface->rx_csum_errors = rx_csum_errors; 619 interface->hw_csum_rx_good = hw_csum_rx_good; 620 interface->rx_switch_errors = rx_switch_errors; 621 interface->rx_drops = rx_drops; 622 interface->rx_pp_errors = rx_pp_errors; 623 interface->rx_link_errors = rx_link_errors; 624 interface->rx_length_errors = rx_length_errors; 625 626 hw->mac.ops.update_hw_stats(hw, &interface->stats); 627 628 for (i = 0; i < hw->mac.max_queues; i++) { 629 struct fm10k_hw_stats_q *q = &interface->stats.q[i]; 630 631 tx_bytes_nic += q->tx_bytes.count; 632 tx_pkts_nic += q->tx_packets.count; 633 rx_bytes_nic += q->rx_bytes.count; 634 rx_pkts_nic += q->rx_packets.count; 635 rx_drops_nic += q->rx_drops.count; 636 } 637 638 interface->tx_bytes_nic = tx_bytes_nic; 639 interface->tx_packets_nic = tx_pkts_nic; 640 interface->rx_bytes_nic = rx_bytes_nic; 641 interface->rx_packets_nic = rx_pkts_nic; 642 interface->rx_drops_nic = rx_drops_nic; 643 644 /* Fill out the OS statistics structure */ 645 net_stats->rx_errors = rx_errors; 646 net_stats->rx_dropped = interface->stats.nodesc_drop.count; 647 648 clear_bit(__FM10K_UPDATING_STATS, interface->state); 649 } 650 651 /** 652 * fm10k_watchdog_flush_tx - flush queues on host not ready 653 * @interface: pointer to the device interface structure 654 **/ 655 static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface) 656 { 657 int some_tx_pending = 0; 658 int i; 659 660 /* nothing to do if carrier is up */ 661 if (netif_carrier_ok(interface->netdev)) 662 return; 663 664 for (i = 0; i < interface->num_tx_queues; i++) { 665 struct fm10k_ring *tx_ring = interface->tx_ring[i]; 666 667 if (tx_ring->next_to_use != tx_ring->next_to_clean) { 668 some_tx_pending = 1; 669 break; 670 } 671 } 672 673 /* We've lost link, so the controller stops DMA, but we've got 674 * queued Tx work that's never going to get done, so reset 675 * controller to flush Tx. 676 */ 677 if (some_tx_pending) 678 set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); 679 } 680 681 /** 682 * fm10k_watchdog_subtask - check and bring link up 683 * @interface: pointer to the device interface structure 684 **/ 685 static void fm10k_watchdog_subtask(struct fm10k_intfc *interface) 686 { 687 /* if interface is down do nothing */ 688 if (test_bit(__FM10K_DOWN, interface->state) || 689 test_bit(__FM10K_RESETTING, interface->state)) 690 return; 691 692 if (interface->host_ready) 693 fm10k_watchdog_host_is_ready(interface); 694 else 695 fm10k_watchdog_host_not_ready(interface); 696 697 /* update stats only once every second */ 698 if (time_is_before_jiffies(interface->next_stats_update)) 699 fm10k_update_stats(interface); 700 701 /* flush any uncompleted work */ 702 fm10k_watchdog_flush_tx(interface); 703 } 704 705 /** 706 * fm10k_check_hang_subtask - check for hung queues and dropped interrupts 707 * @interface: pointer to the device interface structure 708 * 709 * This function serves two purposes. First it strobes the interrupt lines 710 * in order to make certain interrupts are occurring. Secondly it sets the 711 * bits needed to check for TX hangs. As a result we should immediately 712 * determine if a hang has occurred. 713 */ 714 static void fm10k_check_hang_subtask(struct fm10k_intfc *interface) 715 { 716 int i; 717 718 /* If we're down or resetting, just bail */ 719 if (test_bit(__FM10K_DOWN, interface->state) || 720 test_bit(__FM10K_RESETTING, interface->state)) 721 return; 722 723 /* rate limit tx hang checks to only once every 2 seconds */ 724 if (time_is_after_eq_jiffies(interface->next_tx_hang_check)) 725 return; 726 interface->next_tx_hang_check = jiffies + (2 * HZ); 727 728 if (netif_carrier_ok(interface->netdev)) { 729 /* Force detection of hung controller */ 730 for (i = 0; i < interface->num_tx_queues; i++) 731 set_check_for_tx_hang(interface->tx_ring[i]); 732 733 /* Rearm all in-use q_vectors for immediate firing */ 734 for (i = 0; i < interface->num_q_vectors; i++) { 735 struct fm10k_q_vector *qv = interface->q_vector[i]; 736 737 if (!qv->tx.count && !qv->rx.count) 738 continue; 739 writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr); 740 } 741 } 742 } 743 744 /** 745 * fm10k_service_task - manages and runs subtasks 746 * @work: pointer to work_struct containing our data 747 **/ 748 static void fm10k_service_task(struct work_struct *work) 749 { 750 struct fm10k_intfc *interface; 751 752 interface = container_of(work, struct fm10k_intfc, service_task); 753 754 /* Check whether we're detached first */ 755 fm10k_detach_subtask(interface); 756 757 /* tasks run even when interface is down */ 758 fm10k_mbx_subtask(interface); 759 fm10k_reset_subtask(interface); 760 761 /* tasks only run when interface is up */ 762 fm10k_watchdog_subtask(interface); 763 fm10k_check_hang_subtask(interface); 764 765 /* release lock on service events to allow scheduling next event */ 766 fm10k_service_event_complete(interface); 767 } 768 769 /** 770 * fm10k_macvlan_task - send queued MAC/VLAN requests to switch manager 771 * @work: pointer to work_struct containing our data 772 * 773 * This work item handles sending MAC/VLAN updates to the switch manager. When 774 * the interface is up, it will attempt to queue mailbox messages to the 775 * switch manager requesting updates for MAC/VLAN pairs. If the Tx fifo of the 776 * mailbox is full, it will reschedule itself to try again in a short while. 777 * This ensures that the driver does not overload the switch mailbox with too 778 * many simultaneous requests, causing an unnecessary reset. 779 **/ 780 static void fm10k_macvlan_task(struct work_struct *work) 781 { 782 struct fm10k_macvlan_request *item; 783 struct fm10k_intfc *interface; 784 struct delayed_work *dwork; 785 struct list_head *requests; 786 struct fm10k_hw *hw; 787 unsigned long flags; 788 789 dwork = to_delayed_work(work); 790 interface = container_of(dwork, struct fm10k_intfc, macvlan_task); 791 hw = &interface->hw; 792 requests = &interface->macvlan_requests; 793 794 do { 795 /* Pop the first item off the list */ 796 spin_lock_irqsave(&interface->macvlan_lock, flags); 797 item = list_first_entry_or_null(requests, 798 struct fm10k_macvlan_request, 799 list); 800 if (item) 801 list_del_init(&item->list); 802 803 spin_unlock_irqrestore(&interface->macvlan_lock, flags); 804 805 /* We have no more items to process */ 806 if (!item) 807 goto done; 808 809 fm10k_mbx_lock(interface); 810 811 /* Check that we have plenty of space to send the message. We 812 * want to ensure that the mailbox stays low enough to avoid a 813 * change in the host state, otherwise we may see spurious 814 * link up / link down notifications. 815 */ 816 if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU + 5)) { 817 hw->mbx.ops.process(hw, &hw->mbx); 818 set_bit(__FM10K_MACVLAN_REQUEST, interface->state); 819 fm10k_mbx_unlock(interface); 820 821 /* Put the request back on the list */ 822 spin_lock_irqsave(&interface->macvlan_lock, flags); 823 list_add(&item->list, requests); 824 spin_unlock_irqrestore(&interface->macvlan_lock, flags); 825 break; 826 } 827 828 switch (item->type) { 829 case FM10K_MC_MAC_REQUEST: 830 hw->mac.ops.update_mc_addr(hw, 831 item->mac.glort, 832 item->mac.addr, 833 item->mac.vid, 834 item->set); 835 break; 836 case FM10K_UC_MAC_REQUEST: 837 hw->mac.ops.update_uc_addr(hw, 838 item->mac.glort, 839 item->mac.addr, 840 item->mac.vid, 841 item->set, 842 0); 843 break; 844 case FM10K_VLAN_REQUEST: 845 hw->mac.ops.update_vlan(hw, 846 item->vlan.vid, 847 item->vlan.vsi, 848 item->set); 849 break; 850 default: 851 break; 852 } 853 854 fm10k_mbx_unlock(interface); 855 856 /* Free the item now that we've sent the update */ 857 kfree(item); 858 } while (true); 859 860 done: 861 WARN_ON(!test_bit(__FM10K_MACVLAN_SCHED, interface->state)); 862 863 /* flush memory to make sure state is correct */ 864 smp_mb__before_atomic(); 865 clear_bit(__FM10K_MACVLAN_SCHED, interface->state); 866 867 /* If a MAC/VLAN request was scheduled since we started, we should 868 * re-schedule. However, there is no reason to re-schedule if there is 869 * no work to do. 870 */ 871 if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state)) 872 fm10k_macvlan_schedule(interface); 873 } 874 875 /** 876 * fm10k_configure_tx_ring - Configure Tx ring after Reset 877 * @interface: board private structure 878 * @ring: structure containing ring specific data 879 * 880 * Configure the Tx descriptor ring after a reset. 881 **/ 882 static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, 883 struct fm10k_ring *ring) 884 { 885 struct fm10k_hw *hw = &interface->hw; 886 u64 tdba = ring->dma; 887 u32 size = ring->count * sizeof(struct fm10k_tx_desc); 888 u32 txint = FM10K_INT_MAP_DISABLE; 889 u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE; 890 u8 reg_idx = ring->reg_idx; 891 892 /* disable queue to avoid issues while updating state */ 893 fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0); 894 fm10k_write_flush(hw); 895 896 /* possible poll here to verify ring resources have been cleaned */ 897 898 /* set location and size for descriptor ring */ 899 fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); 900 fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32); 901 fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size); 902 903 /* reset head and tail pointers */ 904 fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0); 905 fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0); 906 907 /* store tail pointer */ 908 ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)]; 909 910 /* reset ntu and ntc to place SW in sync with hardware */ 911 ring->next_to_clean = 0; 912 ring->next_to_use = 0; 913 914 /* Map interrupt */ 915 if (ring->q_vector) { 916 txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); 917 txint |= FM10K_INT_MAP_TIMER0; 918 } 919 920 fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint); 921 922 /* enable use of FTAG bit in Tx descriptor, register is RO for VF */ 923 fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx), 924 FM10K_PFVTCTL_FTAG_DESC_ENABLE); 925 926 /* Initialize XPS */ 927 if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, ring->state) && 928 ring->q_vector) 929 netif_set_xps_queue(ring->netdev, 930 &ring->q_vector->affinity_mask, 931 ring->queue_index); 932 933 /* enable queue */ 934 fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl); 935 } 936 937 /** 938 * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration 939 * @interface: board private structure 940 * @ring: structure containing ring specific data 941 * 942 * Verify the Tx descriptor ring is ready for transmit. 943 **/ 944 static void fm10k_enable_tx_ring(struct fm10k_intfc *interface, 945 struct fm10k_ring *ring) 946 { 947 struct fm10k_hw *hw = &interface->hw; 948 int wait_loop = 10; 949 u32 txdctl; 950 u8 reg_idx = ring->reg_idx; 951 952 /* if we are already enabled just exit */ 953 if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE) 954 return; 955 956 /* poll to verify queue is enabled */ 957 do { 958 usleep_range(1000, 2000); 959 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)); 960 } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop); 961 if (!wait_loop) 962 netif_err(interface, drv, interface->netdev, 963 "Could not enable Tx Queue %d\n", reg_idx); 964 } 965 966 /** 967 * fm10k_configure_tx - Configure Transmit Unit after Reset 968 * @interface: board private structure 969 * 970 * Configure the Tx unit of the MAC after a reset. 971 **/ 972 static void fm10k_configure_tx(struct fm10k_intfc *interface) 973 { 974 int i; 975 976 /* Setup the HW Tx Head and Tail descriptor pointers */ 977 for (i = 0; i < interface->num_tx_queues; i++) 978 fm10k_configure_tx_ring(interface, interface->tx_ring[i]); 979 980 /* poll here to verify that Tx rings are now enabled */ 981 for (i = 0; i < interface->num_tx_queues; i++) 982 fm10k_enable_tx_ring(interface, interface->tx_ring[i]); 983 } 984 985 /** 986 * fm10k_configure_rx_ring - Configure Rx ring after Reset 987 * @interface: board private structure 988 * @ring: structure containing ring specific data 989 * 990 * Configure the Rx descriptor ring after a reset. 991 **/ 992 static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, 993 struct fm10k_ring *ring) 994 { 995 u64 rdba = ring->dma; 996 struct fm10k_hw *hw = &interface->hw; 997 u32 size = ring->count * sizeof(union fm10k_rx_desc); 998 u32 rxqctl, rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; 999 u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN; 1000 u32 rxint = FM10K_INT_MAP_DISABLE; 1001 u8 rx_pause = interface->rx_pause; 1002 u8 reg_idx = ring->reg_idx; 1003 1004 /* disable queue to avoid issues while updating state */ 1005 rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx)); 1006 rxqctl &= ~FM10K_RXQCTL_ENABLE; 1007 fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl); 1008 fm10k_write_flush(hw); 1009 1010 /* possible poll here to verify ring resources have been cleaned */ 1011 1012 /* set location and size for descriptor ring */ 1013 fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); 1014 fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32); 1015 fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size); 1016 1017 /* reset head and tail pointers */ 1018 fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0); 1019 fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0); 1020 1021 /* store tail pointer */ 1022 ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)]; 1023 1024 /* reset ntu and ntc to place SW in sync with hardware */ 1025 ring->next_to_clean = 0; 1026 ring->next_to_use = 0; 1027 ring->next_to_alloc = 0; 1028 1029 /* Configure the Rx buffer size for one buff without split */ 1030 srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT; 1031 1032 /* Configure the Rx ring to suppress loopback packets */ 1033 srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS; 1034 fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl); 1035 1036 /* Enable drop on empty */ 1037 #ifdef CONFIG_DCB 1038 if (interface->pfc_en) 1039 rx_pause = interface->pfc_en; 1040 #endif 1041 if (!(rx_pause & BIT(ring->qos_pc))) 1042 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; 1043 1044 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); 1045 1046 /* assign default VLAN to queue */ 1047 ring->vid = hw->mac.default_vid; 1048 1049 /* if we have an active VLAN, disable default VLAN ID */ 1050 if (test_bit(hw->mac.default_vid, interface->active_vlans)) 1051 ring->vid |= FM10K_VLAN_CLEAR; 1052 1053 /* Map interrupt */ 1054 if (ring->q_vector) { 1055 rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); 1056 rxint |= FM10K_INT_MAP_TIMER1; 1057 } 1058 1059 fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint); 1060 1061 /* enable queue */ 1062 rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx)); 1063 rxqctl |= FM10K_RXQCTL_ENABLE; 1064 fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl); 1065 1066 /* place buffers on ring for receive data */ 1067 fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring)); 1068 } 1069 1070 /** 1071 * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings 1072 * @interface: board private structure 1073 * 1074 * Configure the drop enable bits for the Rx rings. 1075 **/ 1076 void fm10k_update_rx_drop_en(struct fm10k_intfc *interface) 1077 { 1078 struct fm10k_hw *hw = &interface->hw; 1079 u8 rx_pause = interface->rx_pause; 1080 int i; 1081 1082 #ifdef CONFIG_DCB 1083 if (interface->pfc_en) 1084 rx_pause = interface->pfc_en; 1085 1086 #endif 1087 for (i = 0; i < interface->num_rx_queues; i++) { 1088 struct fm10k_ring *ring = interface->rx_ring[i]; 1089 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; 1090 u8 reg_idx = ring->reg_idx; 1091 1092 if (!(rx_pause & BIT(ring->qos_pc))) 1093 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; 1094 1095 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); 1096 } 1097 } 1098 1099 /** 1100 * fm10k_configure_dglort - Configure Receive DGLORT after reset 1101 * @interface: board private structure 1102 * 1103 * Configure the DGLORT description and RSS tables. 1104 **/ 1105 static void fm10k_configure_dglort(struct fm10k_intfc *interface) 1106 { 1107 struct fm10k_dglort_cfg dglort = { 0 }; 1108 struct fm10k_hw *hw = &interface->hw; 1109 int i; 1110 u32 mrqc; 1111 1112 /* Fill out hash function seeds */ 1113 for (i = 0; i < FM10K_RSSRK_SIZE; i++) 1114 fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]); 1115 1116 /* Write RETA table to hardware */ 1117 for (i = 0; i < FM10K_RETA_SIZE; i++) 1118 fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]); 1119 1120 /* Generate RSS hash based on packet types, TCP/UDP 1121 * port numbers and/or IPv4/v6 src and dst addresses 1122 */ 1123 mrqc = FM10K_MRQC_IPV4 | 1124 FM10K_MRQC_TCP_IPV4 | 1125 FM10K_MRQC_IPV6 | 1126 FM10K_MRQC_TCP_IPV6; 1127 1128 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags)) 1129 mrqc |= FM10K_MRQC_UDP_IPV4; 1130 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, interface->flags)) 1131 mrqc |= FM10K_MRQC_UDP_IPV6; 1132 1133 fm10k_write_reg(hw, FM10K_MRQC(0), mrqc); 1134 1135 /* configure default DGLORT mapping for RSS/DCB */ 1136 dglort.inner_rss = 1; 1137 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); 1138 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); 1139 hw->mac.ops.configure_dglort_map(hw, &dglort); 1140 1141 /* assign GLORT per queue for queue mapped testing */ 1142 if (interface->glort_count > 64) { 1143 memset(&dglort, 0, sizeof(dglort)); 1144 dglort.inner_rss = 1; 1145 dglort.glort = interface->glort + 64; 1146 dglort.idx = fm10k_dglort_pf_queue; 1147 dglort.queue_l = fls(interface->num_rx_queues - 1); 1148 hw->mac.ops.configure_dglort_map(hw, &dglort); 1149 } 1150 1151 /* assign glort value for RSS/DCB specific to this interface */ 1152 memset(&dglort, 0, sizeof(dglort)); 1153 dglort.inner_rss = 1; 1154 dglort.glort = interface->glort; 1155 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); 1156 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); 1157 /* configure DGLORT mapping for RSS/DCB */ 1158 dglort.idx = fm10k_dglort_pf_rss; 1159 if (interface->l2_accel) 1160 dglort.shared_l = fls(interface->l2_accel->size); 1161 hw->mac.ops.configure_dglort_map(hw, &dglort); 1162 } 1163 1164 /** 1165 * fm10k_configure_rx - Configure Receive Unit after Reset 1166 * @interface: board private structure 1167 * 1168 * Configure the Rx unit of the MAC after a reset. 1169 **/ 1170 static void fm10k_configure_rx(struct fm10k_intfc *interface) 1171 { 1172 int i; 1173 1174 /* Configure SWPRI to PC map */ 1175 fm10k_configure_swpri_map(interface); 1176 1177 /* Configure RSS and DGLORT map */ 1178 fm10k_configure_dglort(interface); 1179 1180 /* Setup the HW Rx Head and Tail descriptor pointers */ 1181 for (i = 0; i < interface->num_rx_queues; i++) 1182 fm10k_configure_rx_ring(interface, interface->rx_ring[i]); 1183 1184 /* possible poll here to verify that Rx rings are now enabled */ 1185 } 1186 1187 static void fm10k_napi_enable_all(struct fm10k_intfc *interface) 1188 { 1189 struct fm10k_q_vector *q_vector; 1190 int q_idx; 1191 1192 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { 1193 q_vector = interface->q_vector[q_idx]; 1194 napi_enable(&q_vector->napi); 1195 } 1196 } 1197 1198 static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data) 1199 { 1200 struct fm10k_q_vector *q_vector = data; 1201 1202 if (q_vector->rx.count || q_vector->tx.count) 1203 napi_schedule_irqoff(&q_vector->napi); 1204 1205 return IRQ_HANDLED; 1206 } 1207 1208 static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) 1209 { 1210 struct fm10k_intfc *interface = data; 1211 struct fm10k_hw *hw = &interface->hw; 1212 struct fm10k_mbx_info *mbx = &hw->mbx; 1213 1214 /* re-enable mailbox interrupt and indicate 20us delay */ 1215 fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR), 1216 (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) | 1217 FM10K_ITR_ENABLE); 1218 1219 /* service upstream mailbox */ 1220 if (fm10k_mbx_trylock(interface)) { 1221 mbx->ops.process(hw, mbx); 1222 fm10k_mbx_unlock(interface); 1223 } 1224 1225 hw->mac.get_host_state = true; 1226 fm10k_service_event_schedule(interface); 1227 1228 return IRQ_HANDLED; 1229 } 1230 1231 #ifdef CONFIG_NET_POLL_CONTROLLER 1232 /** 1233 * fm10k_netpoll - A Polling 'interrupt' handler 1234 * @netdev: network interface device structure 1235 * 1236 * This is used by netconsole to send skbs without having to re-enable 1237 * interrupts. It's not called while the normal interrupt routine is executing. 1238 **/ 1239 void fm10k_netpoll(struct net_device *netdev) 1240 { 1241 struct fm10k_intfc *interface = netdev_priv(netdev); 1242 int i; 1243 1244 /* if interface is down do nothing */ 1245 if (test_bit(__FM10K_DOWN, interface->state)) 1246 return; 1247 1248 for (i = 0; i < interface->num_q_vectors; i++) 1249 fm10k_msix_clean_rings(0, interface->q_vector[i]); 1250 } 1251 1252 #endif 1253 #define FM10K_ERR_MSG(type) case (type): error = #type; break 1254 static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, 1255 struct fm10k_fault *fault) 1256 { 1257 struct pci_dev *pdev = interface->pdev; 1258 struct fm10k_hw *hw = &interface->hw; 1259 struct fm10k_iov_data *iov_data = interface->iov_data; 1260 char *error; 1261 1262 switch (type) { 1263 case FM10K_PCA_FAULT: 1264 switch (fault->type) { 1265 default: 1266 error = "Unknown PCA error"; 1267 break; 1268 FM10K_ERR_MSG(PCA_NO_FAULT); 1269 FM10K_ERR_MSG(PCA_UNMAPPED_ADDR); 1270 FM10K_ERR_MSG(PCA_BAD_QACCESS_PF); 1271 FM10K_ERR_MSG(PCA_BAD_QACCESS_VF); 1272 FM10K_ERR_MSG(PCA_MALICIOUS_REQ); 1273 FM10K_ERR_MSG(PCA_POISONED_TLP); 1274 FM10K_ERR_MSG(PCA_TLP_ABORT); 1275 } 1276 break; 1277 case FM10K_THI_FAULT: 1278 switch (fault->type) { 1279 default: 1280 error = "Unknown THI error"; 1281 break; 1282 FM10K_ERR_MSG(THI_NO_FAULT); 1283 FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT); 1284 } 1285 break; 1286 case FM10K_FUM_FAULT: 1287 switch (fault->type) { 1288 default: 1289 error = "Unknown FUM error"; 1290 break; 1291 FM10K_ERR_MSG(FUM_NO_FAULT); 1292 FM10K_ERR_MSG(FUM_UNMAPPED_ADDR); 1293 FM10K_ERR_MSG(FUM_BAD_VF_QACCESS); 1294 FM10K_ERR_MSG(FUM_ADD_DECODE_ERR); 1295 FM10K_ERR_MSG(FUM_RO_ERROR); 1296 FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR); 1297 FM10K_ERR_MSG(FUM_CSR_TIMEOUT); 1298 FM10K_ERR_MSG(FUM_INVALID_TYPE); 1299 FM10K_ERR_MSG(FUM_INVALID_LENGTH); 1300 FM10K_ERR_MSG(FUM_INVALID_BE); 1301 FM10K_ERR_MSG(FUM_INVALID_ALIGN); 1302 } 1303 break; 1304 default: 1305 error = "Undocumented fault"; 1306 break; 1307 } 1308 1309 dev_warn(&pdev->dev, 1310 "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n", 1311 error, fault->address, fault->specinfo, 1312 PCI_SLOT(fault->func), PCI_FUNC(fault->func)); 1313 1314 /* For VF faults, clear out the respective LPORT, reset the queue 1315 * resources, and then reconnect to the mailbox. This allows the 1316 * VF in question to resume behavior. For transient faults that are 1317 * the result of non-malicious behavior this will log the fault and 1318 * allow the VF to resume functionality. Obviously for malicious VFs 1319 * they will be able to attempt malicious behavior again. In this 1320 * case, the system administrator will need to step in and manually 1321 * remove or disable the VF in question. 1322 */ 1323 if (fault->func && iov_data) { 1324 int vf = fault->func - 1; 1325 struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf]; 1326 1327 hw->iov.ops.reset_lport(hw, vf_info); 1328 hw->iov.ops.reset_resources(hw, vf_info); 1329 1330 /* reset_lport disables the VF, so re-enable it */ 1331 hw->iov.ops.set_lport(hw, vf_info, vf, 1332 FM10K_VF_FLAG_MULTI_CAPABLE); 1333 1334 /* reset_resources will disconnect from the mbx */ 1335 vf_info->mbx.ops.connect(hw, &vf_info->mbx); 1336 } 1337 } 1338 1339 static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr) 1340 { 1341 struct fm10k_hw *hw = &interface->hw; 1342 struct fm10k_fault fault = { 0 }; 1343 int type, err; 1344 1345 for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT; 1346 eicr; 1347 eicr >>= 1, type += FM10K_FAULT_SIZE) { 1348 /* only check if there is an error reported */ 1349 if (!(eicr & 0x1)) 1350 continue; 1351 1352 /* retrieve fault info */ 1353 err = hw->mac.ops.get_fault(hw, type, &fault); 1354 if (err) { 1355 dev_err(&interface->pdev->dev, 1356 "error reading fault\n"); 1357 continue; 1358 } 1359 1360 fm10k_handle_fault(interface, type, &fault); 1361 } 1362 } 1363 1364 static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr) 1365 { 1366 struct fm10k_hw *hw = &interface->hw; 1367 const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; 1368 u32 maxholdq; 1369 int q; 1370 1371 if (!(eicr & FM10K_EICR_MAXHOLDTIME)) 1372 return; 1373 1374 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7)); 1375 if (maxholdq) 1376 fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq); 1377 for (q = 255;;) { 1378 if (maxholdq & BIT(31)) { 1379 if (q < FM10K_MAX_QUEUES_PF) { 1380 interface->rx_overrun_pf++; 1381 fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl); 1382 } else { 1383 interface->rx_overrun_vf++; 1384 } 1385 } 1386 1387 maxholdq *= 2; 1388 if (!maxholdq) 1389 q &= ~(32 - 1); 1390 1391 if (!q) 1392 break; 1393 1394 if (q-- % 32) 1395 continue; 1396 1397 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32)); 1398 if (maxholdq) 1399 fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq); 1400 } 1401 } 1402 1403 static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) 1404 { 1405 struct fm10k_intfc *interface = data; 1406 struct fm10k_hw *hw = &interface->hw; 1407 struct fm10k_mbx_info *mbx = &hw->mbx; 1408 u32 eicr; 1409 s32 err = 0; 1410 1411 /* unmask any set bits related to this interrupt */ 1412 eicr = fm10k_read_reg(hw, FM10K_EICR); 1413 fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX | 1414 FM10K_EICR_SWITCHREADY | 1415 FM10K_EICR_SWITCHNOTREADY)); 1416 1417 /* report any faults found to the message log */ 1418 fm10k_report_fault(interface, eicr); 1419 1420 /* reset any queues disabled due to receiver overrun */ 1421 fm10k_reset_drop_on_empty(interface, eicr); 1422 1423 /* service mailboxes */ 1424 if (fm10k_mbx_trylock(interface)) { 1425 err = mbx->ops.process(hw, mbx); 1426 /* handle VFLRE events */ 1427 fm10k_iov_event(interface); 1428 fm10k_mbx_unlock(interface); 1429 } 1430 1431 if (err == FM10K_ERR_RESET_REQUESTED) 1432 set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); 1433 1434 /* if switch toggled state we should reset GLORTs */ 1435 if (eicr & FM10K_EICR_SWITCHNOTREADY) { 1436 /* force link down for at least 4 seconds */ 1437 interface->link_down_event = jiffies + (4 * HZ); 1438 set_bit(__FM10K_LINK_DOWN, interface->state); 1439 1440 /* reset dglort_map back to no config */ 1441 hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; 1442 } 1443 1444 /* we should validate host state after interrupt event */ 1445 hw->mac.get_host_state = true; 1446 1447 /* validate host state, and handle VF mailboxes in the service task */ 1448 fm10k_service_event_schedule(interface); 1449 1450 /* re-enable mailbox interrupt and indicate 20us delay */ 1451 fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR), 1452 (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) | 1453 FM10K_ITR_ENABLE); 1454 1455 return IRQ_HANDLED; 1456 } 1457 1458 void fm10k_mbx_free_irq(struct fm10k_intfc *interface) 1459 { 1460 struct fm10k_hw *hw = &interface->hw; 1461 struct msix_entry *entry; 1462 int itr_reg; 1463 1464 /* no mailbox IRQ to free if MSI-X is not enabled */ 1465 if (!interface->msix_entries) 1466 return; 1467 1468 entry = &interface->msix_entries[FM10K_MBX_VECTOR]; 1469 1470 /* disconnect the mailbox */ 1471 hw->mbx.ops.disconnect(hw, &hw->mbx); 1472 1473 /* disable Mailbox cause */ 1474 if (hw->mac.type == fm10k_mac_pf) { 1475 fm10k_write_reg(hw, FM10K_EIMR, 1476 FM10K_EIMR_DISABLE(PCA_FAULT) | 1477 FM10K_EIMR_DISABLE(FUM_FAULT) | 1478 FM10K_EIMR_DISABLE(MAILBOX) | 1479 FM10K_EIMR_DISABLE(SWITCHREADY) | 1480 FM10K_EIMR_DISABLE(SWITCHNOTREADY) | 1481 FM10K_EIMR_DISABLE(SRAMERROR) | 1482 FM10K_EIMR_DISABLE(VFLR) | 1483 FM10K_EIMR_DISABLE(MAXHOLDTIME)); 1484 itr_reg = FM10K_ITR(FM10K_MBX_VECTOR); 1485 } else { 1486 itr_reg = FM10K_VFITR(FM10K_MBX_VECTOR); 1487 } 1488 1489 fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET); 1490 1491 free_irq(entry->vector, interface); 1492 } 1493 1494 static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, 1495 struct fm10k_mbx_info *mbx) 1496 { 1497 bool vlan_override = hw->mac.vlan_override; 1498 u16 default_vid = hw->mac.default_vid; 1499 struct fm10k_intfc *interface; 1500 s32 err; 1501 1502 err = fm10k_msg_mac_vlan_vf(hw, results, mbx); 1503 if (err) 1504 return err; 1505 1506 interface = container_of(hw, struct fm10k_intfc, hw); 1507 1508 /* MAC was changed so we need reset */ 1509 if (is_valid_ether_addr(hw->mac.perm_addr) && 1510 !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr)) 1511 set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); 1512 1513 /* VLAN override was changed, or default VLAN changed */ 1514 if ((vlan_override != hw->mac.vlan_override) || 1515 (default_vid != hw->mac.default_vid)) 1516 set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); 1517 1518 return 0; 1519 } 1520 1521 /* generic error handler for mailbox issues */ 1522 static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results, 1523 struct fm10k_mbx_info __always_unused *mbx) 1524 { 1525 struct fm10k_intfc *interface; 1526 struct pci_dev *pdev; 1527 1528 interface = container_of(hw, struct fm10k_intfc, hw); 1529 pdev = interface->pdev; 1530 1531 dev_err(&pdev->dev, "Unknown message ID %u\n", 1532 **results & FM10K_TLV_ID_MASK); 1533 1534 return 0; 1535 } 1536 1537 static const struct fm10k_msg_data vf_mbx_data[] = { 1538 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), 1539 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr), 1540 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), 1541 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), 1542 }; 1543 1544 static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface) 1545 { 1546 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; 1547 struct net_device *dev = interface->netdev; 1548 struct fm10k_hw *hw = &interface->hw; 1549 int err; 1550 1551 /* Use timer0 for interrupt moderation on the mailbox */ 1552 u32 itr = entry->entry | FM10K_INT_MAP_TIMER0; 1553 1554 /* register mailbox handlers */ 1555 err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data); 1556 if (err) 1557 return err; 1558 1559 /* request the IRQ */ 1560 err = request_irq(entry->vector, fm10k_msix_mbx_vf, 0, 1561 dev->name, interface); 1562 if (err) { 1563 netif_err(interface, probe, dev, 1564 "request_irq for msix_mbx failed: %d\n", err); 1565 return err; 1566 } 1567 1568 /* map all of the interrupt sources */ 1569 fm10k_write_reg(hw, FM10K_VFINT_MAP, itr); 1570 1571 /* enable interrupt */ 1572 fm10k_write_reg(hw, FM10K_VFITR(entry->entry), FM10K_ITR_ENABLE); 1573 1574 return 0; 1575 } 1576 1577 static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results, 1578 struct fm10k_mbx_info *mbx) 1579 { 1580 struct fm10k_intfc *interface; 1581 u32 dglort_map = hw->mac.dglort_map; 1582 s32 err; 1583 1584 interface = container_of(hw, struct fm10k_intfc, hw); 1585 1586 err = fm10k_msg_err_pf(hw, results, mbx); 1587 if (!err && hw->swapi.status) { 1588 /* force link down for a reasonable delay */ 1589 interface->link_down_event = jiffies + (2 * HZ); 1590 set_bit(__FM10K_LINK_DOWN, interface->state); 1591 1592 /* reset dglort_map back to no config */ 1593 hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; 1594 1595 fm10k_service_event_schedule(interface); 1596 1597 /* prevent overloading kernel message buffer */ 1598 if (interface->lport_map_failed) 1599 return 0; 1600 1601 interface->lport_map_failed = true; 1602 1603 if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED) 1604 dev_warn(&interface->pdev->dev, 1605 "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n"); 1606 dev_warn(&interface->pdev->dev, 1607 "request logical port map failed: %d\n", 1608 hw->swapi.status); 1609 1610 return 0; 1611 } 1612 1613 err = fm10k_msg_lport_map_pf(hw, results, mbx); 1614 if (err) 1615 return err; 1616 1617 interface->lport_map_failed = false; 1618 1619 /* we need to reset if port count was just updated */ 1620 if (dglort_map != hw->mac.dglort_map) 1621 set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); 1622 1623 return 0; 1624 } 1625 1626 static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, 1627 struct fm10k_mbx_info __always_unused *mbx) 1628 { 1629 struct fm10k_intfc *interface; 1630 u16 glort, pvid; 1631 u32 pvid_update; 1632 s32 err; 1633 1634 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID], 1635 &pvid_update); 1636 if (err) 1637 return err; 1638 1639 /* extract values from the pvid update */ 1640 glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT); 1641 pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID); 1642 1643 /* if glort is not valid return error */ 1644 if (!fm10k_glort_valid_pf(hw, glort)) 1645 return FM10K_ERR_PARAM; 1646 1647 /* verify VLAN ID is valid */ 1648 if (pvid >= FM10K_VLAN_TABLE_VID_MAX) 1649 return FM10K_ERR_PARAM; 1650 1651 interface = container_of(hw, struct fm10k_intfc, hw); 1652 1653 /* check to see if this belongs to one of the VFs */ 1654 err = fm10k_iov_update_pvid(interface, glort, pvid); 1655 if (!err) 1656 return 0; 1657 1658 /* we need to reset if default VLAN was just updated */ 1659 if (pvid != hw->mac.default_vid) 1660 set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); 1661 1662 hw->mac.default_vid = pvid; 1663 1664 return 0; 1665 } 1666 1667 static const struct fm10k_msg_data pf_mbx_data[] = { 1668 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), 1669 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), 1670 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map), 1671 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), 1672 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), 1673 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid), 1674 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), 1675 }; 1676 1677 static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface) 1678 { 1679 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; 1680 struct net_device *dev = interface->netdev; 1681 struct fm10k_hw *hw = &interface->hw; 1682 int err; 1683 1684 /* Use timer0 for interrupt moderation on the mailbox */ 1685 u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0; 1686 u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE; 1687 1688 /* register mailbox handlers */ 1689 err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data); 1690 if (err) 1691 return err; 1692 1693 /* request the IRQ */ 1694 err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0, 1695 dev->name, interface); 1696 if (err) { 1697 netif_err(interface, probe, dev, 1698 "request_irq for msix_mbx failed: %d\n", err); 1699 return err; 1700 } 1701 1702 /* Enable interrupts w/ no moderation for "other" interrupts */ 1703 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr); 1704 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr); 1705 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr); 1706 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr); 1707 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr); 1708 1709 /* Enable interrupts w/ moderation for mailbox */ 1710 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr); 1711 1712 /* Enable individual interrupt causes */ 1713 fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | 1714 FM10K_EIMR_ENABLE(FUM_FAULT) | 1715 FM10K_EIMR_ENABLE(MAILBOX) | 1716 FM10K_EIMR_ENABLE(SWITCHREADY) | 1717 FM10K_EIMR_ENABLE(SWITCHNOTREADY) | 1718 FM10K_EIMR_ENABLE(SRAMERROR) | 1719 FM10K_EIMR_ENABLE(VFLR) | 1720 FM10K_EIMR_ENABLE(MAXHOLDTIME)); 1721 1722 /* enable interrupt */ 1723 fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE); 1724 1725 return 0; 1726 } 1727 1728 int fm10k_mbx_request_irq(struct fm10k_intfc *interface) 1729 { 1730 struct fm10k_hw *hw = &interface->hw; 1731 int err; 1732 1733 /* enable Mailbox cause */ 1734 if (hw->mac.type == fm10k_mac_pf) 1735 err = fm10k_mbx_request_irq_pf(interface); 1736 else 1737 err = fm10k_mbx_request_irq_vf(interface); 1738 if (err) 1739 return err; 1740 1741 /* connect mailbox */ 1742 err = hw->mbx.ops.connect(hw, &hw->mbx); 1743 1744 /* if the mailbox failed to connect, then free IRQ */ 1745 if (err) 1746 fm10k_mbx_free_irq(interface); 1747 1748 return err; 1749 } 1750 1751 /** 1752 * fm10k_qv_free_irq - release interrupts associated with queue vectors 1753 * @interface: board private structure 1754 * 1755 * Release all interrupts associated with this interface 1756 **/ 1757 void fm10k_qv_free_irq(struct fm10k_intfc *interface) 1758 { 1759 int vector = interface->num_q_vectors; 1760 struct fm10k_hw *hw = &interface->hw; 1761 struct msix_entry *entry; 1762 1763 entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector]; 1764 1765 while (vector) { 1766 struct fm10k_q_vector *q_vector; 1767 1768 vector--; 1769 entry--; 1770 q_vector = interface->q_vector[vector]; 1771 1772 if (!q_vector->tx.count && !q_vector->rx.count) 1773 continue; 1774 1775 /* clear the affinity_mask in the IRQ descriptor */ 1776 irq_set_affinity_hint(entry->vector, NULL); 1777 1778 /* disable interrupts */ 1779 writel(FM10K_ITR_MASK_SET, q_vector->itr); 1780 1781 free_irq(entry->vector, q_vector); 1782 } 1783 } 1784 1785 /** 1786 * fm10k_qv_request_irq - initialize interrupts for queue vectors 1787 * @interface: board private structure 1788 * 1789 * Attempts to configure interrupts using the best available 1790 * capabilities of the hardware and kernel. 1791 **/ 1792 int fm10k_qv_request_irq(struct fm10k_intfc *interface) 1793 { 1794 struct net_device *dev = interface->netdev; 1795 struct fm10k_hw *hw = &interface->hw; 1796 struct msix_entry *entry; 1797 unsigned int ri = 0, ti = 0; 1798 int vector, err; 1799 1800 entry = &interface->msix_entries[NON_Q_VECTORS(hw)]; 1801 1802 for (vector = 0; vector < interface->num_q_vectors; vector++) { 1803 struct fm10k_q_vector *q_vector = interface->q_vector[vector]; 1804 1805 /* name the vector */ 1806 if (q_vector->tx.count && q_vector->rx.count) { 1807 snprintf(q_vector->name, sizeof(q_vector->name), 1808 "%s-TxRx-%u", dev->name, ri++); 1809 ti++; 1810 } else if (q_vector->rx.count) { 1811 snprintf(q_vector->name, sizeof(q_vector->name), 1812 "%s-rx-%u", dev->name, ri++); 1813 } else if (q_vector->tx.count) { 1814 snprintf(q_vector->name, sizeof(q_vector->name), 1815 "%s-tx-%u", dev->name, ti++); 1816 } else { 1817 /* skip this unused q_vector */ 1818 continue; 1819 } 1820 1821 /* Assign ITR register to q_vector */ 1822 q_vector->itr = (hw->mac.type == fm10k_mac_pf) ? 1823 &interface->uc_addr[FM10K_ITR(entry->entry)] : 1824 &interface->uc_addr[FM10K_VFITR(entry->entry)]; 1825 1826 /* request the IRQ */ 1827 err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0, 1828 q_vector->name, q_vector); 1829 if (err) { 1830 netif_err(interface, probe, dev, 1831 "request_irq failed for MSIX interrupt Error: %d\n", 1832 err); 1833 goto err_out; 1834 } 1835 1836 /* assign the mask for this irq */ 1837 irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask); 1838 1839 /* Enable q_vector */ 1840 writel(FM10K_ITR_ENABLE, q_vector->itr); 1841 1842 entry++; 1843 } 1844 1845 return 0; 1846 1847 err_out: 1848 /* wind through the ring freeing all entries and vectors */ 1849 while (vector) { 1850 struct fm10k_q_vector *q_vector; 1851 1852 entry--; 1853 vector--; 1854 q_vector = interface->q_vector[vector]; 1855 1856 if (!q_vector->tx.count && !q_vector->rx.count) 1857 continue; 1858 1859 /* clear the affinity_mask in the IRQ descriptor */ 1860 irq_set_affinity_hint(entry->vector, NULL); 1861 1862 /* disable interrupts */ 1863 writel(FM10K_ITR_MASK_SET, q_vector->itr); 1864 1865 free_irq(entry->vector, q_vector); 1866 } 1867 1868 return err; 1869 } 1870 1871 void fm10k_up(struct fm10k_intfc *interface) 1872 { 1873 struct fm10k_hw *hw = &interface->hw; 1874 1875 /* Enable Tx/Rx DMA */ 1876 hw->mac.ops.start_hw(hw); 1877 1878 /* configure Tx descriptor rings */ 1879 fm10k_configure_tx(interface); 1880 1881 /* configure Rx descriptor rings */ 1882 fm10k_configure_rx(interface); 1883 1884 /* configure interrupts */ 1885 hw->mac.ops.update_int_moderator(hw); 1886 1887 /* enable statistics capture again */ 1888 clear_bit(__FM10K_UPDATING_STATS, interface->state); 1889 1890 /* clear down bit to indicate we are ready to go */ 1891 clear_bit(__FM10K_DOWN, interface->state); 1892 1893 /* enable polling cleanups */ 1894 fm10k_napi_enable_all(interface); 1895 1896 /* re-establish Rx filters */ 1897 fm10k_restore_rx_state(interface); 1898 1899 /* enable transmits */ 1900 netif_tx_start_all_queues(interface->netdev); 1901 1902 /* kick off the service timer now */ 1903 hw->mac.get_host_state = true; 1904 mod_timer(&interface->service_timer, jiffies); 1905 } 1906 1907 static void fm10k_napi_disable_all(struct fm10k_intfc *interface) 1908 { 1909 struct fm10k_q_vector *q_vector; 1910 int q_idx; 1911 1912 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { 1913 q_vector = interface->q_vector[q_idx]; 1914 napi_disable(&q_vector->napi); 1915 } 1916 } 1917 1918 void fm10k_down(struct fm10k_intfc *interface) 1919 { 1920 struct net_device *netdev = interface->netdev; 1921 struct fm10k_hw *hw = &interface->hw; 1922 int err, i = 0, count = 0; 1923 1924 /* signal that we are down to the interrupt handler and service task */ 1925 if (test_and_set_bit(__FM10K_DOWN, interface->state)) 1926 return; 1927 1928 /* call carrier off first to avoid false dev_watchdog timeouts */ 1929 netif_carrier_off(netdev); 1930 1931 /* disable transmits */ 1932 netif_tx_stop_all_queues(netdev); 1933 netif_tx_disable(netdev); 1934 1935 /* reset Rx filters */ 1936 fm10k_reset_rx_state(interface); 1937 1938 /* disable polling routines */ 1939 fm10k_napi_disable_all(interface); 1940 1941 /* capture stats one last time before stopping interface */ 1942 fm10k_update_stats(interface); 1943 1944 /* prevent updating statistics while we're down */ 1945 while (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state)) 1946 usleep_range(1000, 2000); 1947 1948 /* skip waiting for TX DMA if we lost PCIe link */ 1949 if (FM10K_REMOVED(hw->hw_addr)) 1950 goto skip_tx_dma_drain; 1951 1952 /* In some rare circumstances it can take a while for Tx queues to 1953 * quiesce and be fully disabled. Attempt to .stop_hw() first, and 1954 * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop 1955 * until the Tx queues have emptied, or until a number of retries. If 1956 * we fail to clear within the retry loop, we will issue a warning 1957 * indicating that Tx DMA is probably hung. Note this means we call 1958 * .stop_hw() twice but this shouldn't cause any problems. 1959 */ 1960 err = hw->mac.ops.stop_hw(hw); 1961 if (err != FM10K_ERR_REQUESTS_PENDING) 1962 goto skip_tx_dma_drain; 1963 1964 #define TX_DMA_DRAIN_RETRIES 25 1965 for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) { 1966 usleep_range(10000, 20000); 1967 1968 /* start checking at the last ring to have pending Tx */ 1969 for (; i < interface->num_tx_queues; i++) 1970 if (fm10k_get_tx_pending(interface->tx_ring[i], false)) 1971 break; 1972 1973 /* if all the queues are drained, we can break now */ 1974 if (i == interface->num_tx_queues) 1975 break; 1976 } 1977 1978 if (count >= TX_DMA_DRAIN_RETRIES) 1979 dev_err(&interface->pdev->dev, 1980 "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n", 1981 count); 1982 skip_tx_dma_drain: 1983 /* Disable DMA engine for Tx/Rx */ 1984 err = hw->mac.ops.stop_hw(hw); 1985 if (err == FM10K_ERR_REQUESTS_PENDING) 1986 dev_err(&interface->pdev->dev, 1987 "due to pending requests hw was not shut down gracefully\n"); 1988 else if (err) 1989 dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err); 1990 1991 /* free any buffers still on the rings */ 1992 fm10k_clean_all_tx_rings(interface); 1993 fm10k_clean_all_rx_rings(interface); 1994 } 1995 1996 /** 1997 * fm10k_sw_init - Initialize general software structures 1998 * @interface: host interface private structure to initialize 1999 * @ent: PCI device ID entry 2000 * 2001 * fm10k_sw_init initializes the interface private data structure. 2002 * Fields are initialized based on PCI device information and 2003 * OS network device settings (MTU size). 2004 **/ 2005 static int fm10k_sw_init(struct fm10k_intfc *interface, 2006 const struct pci_device_id *ent) 2007 { 2008 const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data]; 2009 struct fm10k_hw *hw = &interface->hw; 2010 struct pci_dev *pdev = interface->pdev; 2011 struct net_device *netdev = interface->netdev; 2012 u32 rss_key[FM10K_RSSRK_SIZE]; 2013 unsigned int rss; 2014 int err; 2015 2016 /* initialize back pointer */ 2017 hw->back = interface; 2018 hw->hw_addr = interface->uc_addr; 2019 2020 /* PCI config space info */ 2021 hw->vendor_id = pdev->vendor; 2022 hw->device_id = pdev->device; 2023 hw->revision_id = pdev->revision; 2024 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2025 hw->subsystem_device_id = pdev->subsystem_device; 2026 2027 /* Setup hw api */ 2028 memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops)); 2029 hw->mac.type = fi->mac; 2030 2031 /* Setup IOV handlers */ 2032 if (fi->iov_ops) 2033 memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops)); 2034 2035 /* Set common capability flags and settings */ 2036 rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus()); 2037 interface->ring_feature[RING_F_RSS].limit = rss; 2038 fi->get_invariants(hw); 2039 2040 /* pick up the PCIe bus settings for reporting later */ 2041 if (hw->mac.ops.get_bus_info) 2042 hw->mac.ops.get_bus_info(hw); 2043 2044 /* limit the usable DMA range */ 2045 if (hw->mac.ops.set_dma_mask) 2046 hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev)); 2047 2048 /* update netdev with DMA restrictions */ 2049 if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) { 2050 netdev->features |= NETIF_F_HIGHDMA; 2051 netdev->vlan_features |= NETIF_F_HIGHDMA; 2052 } 2053 2054 /* reset and initialize the hardware so it is in a known state */ 2055 err = hw->mac.ops.reset_hw(hw); 2056 if (err) { 2057 dev_err(&pdev->dev, "reset_hw failed: %d\n", err); 2058 return err; 2059 } 2060 2061 err = hw->mac.ops.init_hw(hw); 2062 if (err) { 2063 dev_err(&pdev->dev, "init_hw failed: %d\n", err); 2064 return err; 2065 } 2066 2067 /* initialize hardware statistics */ 2068 hw->mac.ops.update_hw_stats(hw, &interface->stats); 2069 2070 /* Set upper limit on IOV VFs that can be allocated */ 2071 pci_sriov_set_totalvfs(pdev, hw->iov.total_vfs); 2072 2073 /* Start with random Ethernet address */ 2074 eth_random_addr(hw->mac.addr); 2075 2076 /* Initialize MAC address from hardware */ 2077 err = hw->mac.ops.read_mac_addr(hw); 2078 if (err) { 2079 dev_warn(&pdev->dev, 2080 "Failed to obtain MAC address defaulting to random\n"); 2081 /* tag address assignment as random */ 2082 netdev->addr_assign_type |= NET_ADDR_RANDOM; 2083 } 2084 2085 ether_addr_copy(netdev->dev_addr, hw->mac.addr); 2086 ether_addr_copy(netdev->perm_addr, hw->mac.addr); 2087 2088 if (!is_valid_ether_addr(netdev->perm_addr)) { 2089 dev_err(&pdev->dev, "Invalid MAC Address\n"); 2090 return -EIO; 2091 } 2092 2093 /* initialize DCBNL interface */ 2094 fm10k_dcbnl_set_ops(netdev); 2095 2096 /* set default ring sizes */ 2097 interface->tx_ring_count = FM10K_DEFAULT_TXD; 2098 interface->rx_ring_count = FM10K_DEFAULT_RXD; 2099 2100 /* set default interrupt moderation */ 2101 interface->tx_itr = FM10K_TX_ITR_DEFAULT; 2102 interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT; 2103 2104 /* initialize udp port lists */ 2105 INIT_LIST_HEAD(&interface->vxlan_port); 2106 INIT_LIST_HEAD(&interface->geneve_port); 2107 2108 /* Initialize the MAC/VLAN queue */ 2109 INIT_LIST_HEAD(&interface->macvlan_requests); 2110 2111 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 2112 memcpy(interface->rssrk, rss_key, sizeof(rss_key)); 2113 2114 /* Initialize the mailbox lock */ 2115 spin_lock_init(&interface->mbx_lock); 2116 spin_lock_init(&interface->macvlan_lock); 2117 2118 /* Start off interface as being down */ 2119 set_bit(__FM10K_DOWN, interface->state); 2120 set_bit(__FM10K_UPDATING_STATS, interface->state); 2121 2122 return 0; 2123 } 2124 2125 /** 2126 * fm10k_probe - Device Initialization Routine 2127 * @pdev: PCI device information struct 2128 * @ent: entry in fm10k_pci_tbl 2129 * 2130 * Returns 0 on success, negative on failure 2131 * 2132 * fm10k_probe initializes an interface identified by a pci_dev structure. 2133 * The OS initialization, configuring of the interface private structure, 2134 * and a hardware reset occur. 2135 **/ 2136 static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2137 { 2138 struct net_device *netdev; 2139 struct fm10k_intfc *interface; 2140 int err; 2141 2142 if (pdev->error_state != pci_channel_io_normal) { 2143 dev_err(&pdev->dev, 2144 "PCI device still in an error state. Unable to load...\n"); 2145 return -EIO; 2146 } 2147 2148 err = pci_enable_device_mem(pdev); 2149 if (err) { 2150 dev_err(&pdev->dev, 2151 "PCI enable device failed: %d\n", err); 2152 return err; 2153 } 2154 2155 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); 2156 if (err) 2157 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2158 if (err) { 2159 dev_err(&pdev->dev, 2160 "DMA configuration failed: %d\n", err); 2161 goto err_dma; 2162 } 2163 2164 err = pci_request_mem_regions(pdev, fm10k_driver_name); 2165 if (err) { 2166 dev_err(&pdev->dev, 2167 "pci_request_selected_regions failed: %d\n", err); 2168 goto err_pci_reg; 2169 } 2170 2171 pci_enable_pcie_error_reporting(pdev); 2172 2173 pci_set_master(pdev); 2174 pci_save_state(pdev); 2175 2176 netdev = fm10k_alloc_netdev(fm10k_info_tbl[ent->driver_data]); 2177 if (!netdev) { 2178 err = -ENOMEM; 2179 goto err_alloc_netdev; 2180 } 2181 2182 SET_NETDEV_DEV(netdev, &pdev->dev); 2183 2184 interface = netdev_priv(netdev); 2185 pci_set_drvdata(pdev, interface); 2186 2187 interface->netdev = netdev; 2188 interface->pdev = pdev; 2189 2190 interface->uc_addr = ioremap(pci_resource_start(pdev, 0), 2191 FM10K_UC_ADDR_SIZE); 2192 if (!interface->uc_addr) { 2193 err = -EIO; 2194 goto err_ioremap; 2195 } 2196 2197 err = fm10k_sw_init(interface, ent); 2198 if (err) 2199 goto err_sw_init; 2200 2201 /* enable debugfs support */ 2202 fm10k_dbg_intfc_init(interface); 2203 2204 err = fm10k_init_queueing_scheme(interface); 2205 if (err) 2206 goto err_sw_init; 2207 2208 /* the mbx interrupt might attempt to schedule the service task, so we 2209 * must ensure it is disabled since we haven't yet requested the timer 2210 * or work item. 2211 */ 2212 set_bit(__FM10K_SERVICE_DISABLE, interface->state); 2213 2214 err = fm10k_mbx_request_irq(interface); 2215 if (err) 2216 goto err_mbx_interrupt; 2217 2218 /* final check of hardware state before registering the interface */ 2219 err = fm10k_hw_ready(interface); 2220 if (err) 2221 goto err_register; 2222 2223 err = register_netdev(netdev); 2224 if (err) 2225 goto err_register; 2226 2227 /* carrier off reporting is important to ethtool even BEFORE open */ 2228 netif_carrier_off(netdev); 2229 2230 /* stop all the transmit queues from transmitting until link is up */ 2231 netif_tx_stop_all_queues(netdev); 2232 2233 /* Initialize service timer and service task late in order to avoid 2234 * cleanup issues. 2235 */ 2236 timer_setup(&interface->service_timer, fm10k_service_timer, 0); 2237 INIT_WORK(&interface->service_task, fm10k_service_task); 2238 2239 /* Setup the MAC/VLAN queue */ 2240 INIT_DELAYED_WORK(&interface->macvlan_task, fm10k_macvlan_task); 2241 2242 /* kick off service timer now, even when interface is down */ 2243 mod_timer(&interface->service_timer, (HZ * 2) + jiffies); 2244 2245 /* print warning for non-optimal configurations */ 2246 pcie_print_link_status(interface->pdev); 2247 2248 /* report MAC address for logging */ 2249 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); 2250 2251 /* enable SR-IOV after registering netdev to enforce PF/VF ordering */ 2252 fm10k_iov_configure(pdev, 0); 2253 2254 /* clear the service task disable bit and kick off service task */ 2255 clear_bit(__FM10K_SERVICE_DISABLE, interface->state); 2256 fm10k_service_event_schedule(interface); 2257 2258 return 0; 2259 2260 err_register: 2261 fm10k_mbx_free_irq(interface); 2262 err_mbx_interrupt: 2263 fm10k_clear_queueing_scheme(interface); 2264 err_sw_init: 2265 if (interface->sw_addr) 2266 iounmap(interface->sw_addr); 2267 iounmap(interface->uc_addr); 2268 err_ioremap: 2269 free_netdev(netdev); 2270 err_alloc_netdev: 2271 pci_release_mem_regions(pdev); 2272 err_pci_reg: 2273 err_dma: 2274 pci_disable_device(pdev); 2275 return err; 2276 } 2277 2278 /** 2279 * fm10k_remove - Device Removal Routine 2280 * @pdev: PCI device information struct 2281 * 2282 * fm10k_remove is called by the PCI subsystem to alert the driver 2283 * that it should release a PCI device. The could be caused by a 2284 * Hot-Plug event, or because the driver is going to be removed from 2285 * memory. 2286 **/ 2287 static void fm10k_remove(struct pci_dev *pdev) 2288 { 2289 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2290 struct net_device *netdev = interface->netdev; 2291 2292 del_timer_sync(&interface->service_timer); 2293 2294 fm10k_stop_service_event(interface); 2295 fm10k_stop_macvlan_task(interface); 2296 2297 /* Remove all pending MAC/VLAN requests */ 2298 fm10k_clear_macvlan_queue(interface, interface->glort, true); 2299 2300 /* free netdev, this may bounce the interrupts due to setup_tc */ 2301 if (netdev->reg_state == NETREG_REGISTERED) 2302 unregister_netdev(netdev); 2303 2304 /* release VFs */ 2305 fm10k_iov_disable(pdev); 2306 2307 /* disable mailbox interrupt */ 2308 fm10k_mbx_free_irq(interface); 2309 2310 /* free interrupts */ 2311 fm10k_clear_queueing_scheme(interface); 2312 2313 /* remove any debugfs interfaces */ 2314 fm10k_dbg_intfc_exit(interface); 2315 2316 if (interface->sw_addr) 2317 iounmap(interface->sw_addr); 2318 iounmap(interface->uc_addr); 2319 2320 free_netdev(netdev); 2321 2322 pci_release_mem_regions(pdev); 2323 2324 pci_disable_pcie_error_reporting(pdev); 2325 2326 pci_disable_device(pdev); 2327 } 2328 2329 static void fm10k_prepare_suspend(struct fm10k_intfc *interface) 2330 { 2331 /* the watchdog task reads from registers, which might appear like 2332 * a surprise remove if the PCIe device is disabled while we're 2333 * stopped. We stop the watchdog task until after we resume software 2334 * activity. 2335 * 2336 * Note that the MAC/VLAN task will be stopped as part of preparing 2337 * for reset so we don't need to handle it here. 2338 */ 2339 fm10k_stop_service_event(interface); 2340 2341 if (fm10k_prepare_for_reset(interface)) 2342 set_bit(__FM10K_RESET_SUSPENDED, interface->state); 2343 } 2344 2345 static int fm10k_handle_resume(struct fm10k_intfc *interface) 2346 { 2347 struct fm10k_hw *hw = &interface->hw; 2348 int err; 2349 2350 /* Even if we didn't properly prepare for reset in 2351 * fm10k_prepare_suspend, we'll attempt to resume anyways. 2352 */ 2353 if (!test_and_clear_bit(__FM10K_RESET_SUSPENDED, interface->state)) 2354 dev_warn(&interface->pdev->dev, 2355 "Device was shut down as part of suspend... Attempting to recover\n"); 2356 2357 /* reset statistics starting values */ 2358 hw->mac.ops.rebind_hw_stats(hw, &interface->stats); 2359 2360 err = fm10k_handle_reset(interface); 2361 if (err) 2362 return err; 2363 2364 /* assume host is not ready, to prevent race with watchdog in case we 2365 * actually don't have connection to the switch 2366 */ 2367 interface->host_ready = false; 2368 fm10k_watchdog_host_not_ready(interface); 2369 2370 /* force link to stay down for a second to prevent link flutter */ 2371 interface->link_down_event = jiffies + (HZ); 2372 set_bit(__FM10K_LINK_DOWN, interface->state); 2373 2374 /* restart the service task */ 2375 fm10k_start_service_event(interface); 2376 2377 /* Restart the MAC/VLAN request queue in-case of outstanding events */ 2378 fm10k_macvlan_schedule(interface); 2379 2380 return err; 2381 } 2382 2383 /** 2384 * fm10k_resume - Generic PM resume hook 2385 * @dev: generic device structure 2386 * 2387 * Generic PM hook used when waking the device from a low power state after 2388 * suspend or hibernation. This function does not need to handle lower PCIe 2389 * device state as the stack takes care of that for us. 2390 **/ 2391 static int __maybe_unused fm10k_resume(struct device *dev) 2392 { 2393 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); 2394 struct net_device *netdev = interface->netdev; 2395 struct fm10k_hw *hw = &interface->hw; 2396 int err; 2397 2398 /* refresh hw_addr in case it was dropped */ 2399 hw->hw_addr = interface->uc_addr; 2400 2401 err = fm10k_handle_resume(interface); 2402 if (err) 2403 return err; 2404 2405 netif_device_attach(netdev); 2406 2407 return 0; 2408 } 2409 2410 /** 2411 * fm10k_suspend - Generic PM suspend hook 2412 * @dev: generic device structure 2413 * 2414 * Generic PM hook used when setting the device into a low power state for 2415 * system suspend or hibernation. This function does not need to handle lower 2416 * PCIe device state as the stack takes care of that for us. 2417 **/ 2418 static int __maybe_unused fm10k_suspend(struct device *dev) 2419 { 2420 struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); 2421 struct net_device *netdev = interface->netdev; 2422 2423 netif_device_detach(netdev); 2424 2425 fm10k_prepare_suspend(interface); 2426 2427 return 0; 2428 } 2429 2430 /** 2431 * fm10k_io_error_detected - called when PCI error is detected 2432 * @pdev: Pointer to PCI device 2433 * @state: The current pci connection state 2434 * 2435 * This function is called after a PCI bus error affecting 2436 * this device has been detected. 2437 */ 2438 static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, 2439 pci_channel_state_t state) 2440 { 2441 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2442 struct net_device *netdev = interface->netdev; 2443 2444 netif_device_detach(netdev); 2445 2446 if (state == pci_channel_io_perm_failure) 2447 return PCI_ERS_RESULT_DISCONNECT; 2448 2449 fm10k_prepare_suspend(interface); 2450 2451 /* Request a slot reset. */ 2452 return PCI_ERS_RESULT_NEED_RESET; 2453 } 2454 2455 /** 2456 * fm10k_io_slot_reset - called after the pci bus has been reset. 2457 * @pdev: Pointer to PCI device 2458 * 2459 * Restart the card from scratch, as if from a cold-boot. 2460 */ 2461 static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev) 2462 { 2463 pci_ers_result_t result; 2464 2465 if (pci_reenable_device(pdev)) { 2466 dev_err(&pdev->dev, 2467 "Cannot re-enable PCI device after reset.\n"); 2468 result = PCI_ERS_RESULT_DISCONNECT; 2469 } else { 2470 pci_set_master(pdev); 2471 pci_restore_state(pdev); 2472 2473 /* After second error pci->state_saved is false, this 2474 * resets it so EEH doesn't break. 2475 */ 2476 pci_save_state(pdev); 2477 2478 pci_wake_from_d3(pdev, false); 2479 2480 result = PCI_ERS_RESULT_RECOVERED; 2481 } 2482 2483 pci_cleanup_aer_uncorrect_error_status(pdev); 2484 2485 return result; 2486 } 2487 2488 /** 2489 * fm10k_io_resume - called when traffic can start flowing again. 2490 * @pdev: Pointer to PCI device 2491 * 2492 * This callback is called when the error recovery driver tells us that 2493 * its OK to resume normal operation. 2494 */ 2495 static void fm10k_io_resume(struct pci_dev *pdev) 2496 { 2497 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2498 struct net_device *netdev = interface->netdev; 2499 int err; 2500 2501 err = fm10k_handle_resume(interface); 2502 2503 if (err) 2504 dev_warn(&pdev->dev, 2505 "%s failed: %d\n", __func__, err); 2506 else 2507 netif_device_attach(netdev); 2508 } 2509 2510 /** 2511 * fm10k_io_reset_prepare - called when PCI function is about to be reset 2512 * @pdev: Pointer to PCI device 2513 * 2514 * This callback is called when the PCI function is about to be reset, 2515 * allowing the device driver to prepare for it. 2516 */ 2517 static void fm10k_io_reset_prepare(struct pci_dev *pdev) 2518 { 2519 /* warn incase we have any active VF devices */ 2520 if (pci_num_vf(pdev)) 2521 dev_warn(&pdev->dev, 2522 "PCIe FLR may cause issues for any active VF devices\n"); 2523 fm10k_prepare_suspend(pci_get_drvdata(pdev)); 2524 } 2525 2526 /** 2527 * fm10k_io_reset_done - called when PCI function has finished resetting 2528 * @pdev: Pointer to PCI device 2529 * 2530 * This callback is called just after the PCI function is reset, such as via 2531 * /sys/class/net/<enpX>/device/reset or similar. 2532 */ 2533 static void fm10k_io_reset_done(struct pci_dev *pdev) 2534 { 2535 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2536 int err = fm10k_handle_resume(interface); 2537 2538 if (err) { 2539 dev_warn(&pdev->dev, 2540 "%s failed: %d\n", __func__, err); 2541 netif_device_detach(interface->netdev); 2542 } 2543 } 2544 2545 static const struct pci_error_handlers fm10k_err_handler = { 2546 .error_detected = fm10k_io_error_detected, 2547 .slot_reset = fm10k_io_slot_reset, 2548 .resume = fm10k_io_resume, 2549 .reset_prepare = fm10k_io_reset_prepare, 2550 .reset_done = fm10k_io_reset_done, 2551 }; 2552 2553 static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume); 2554 2555 static struct pci_driver fm10k_driver = { 2556 .name = fm10k_driver_name, 2557 .id_table = fm10k_pci_tbl, 2558 .probe = fm10k_probe, 2559 .remove = fm10k_remove, 2560 .driver = { 2561 .pm = &fm10k_pm_ops, 2562 }, 2563 .sriov_configure = fm10k_iov_configure, 2564 .err_handler = &fm10k_err_handler 2565 }; 2566 2567 /** 2568 * fm10k_register_pci_driver - register driver interface 2569 * 2570 * This function is called on module load in order to register the driver. 2571 **/ 2572 int fm10k_register_pci_driver(void) 2573 { 2574 return pci_register_driver(&fm10k_driver); 2575 } 2576 2577 /** 2578 * fm10k_unregister_pci_driver - unregister driver interface 2579 * 2580 * This function is called on module unload in order to remove the driver. 2581 **/ 2582 void fm10k_unregister_pci_driver(void) 2583 { 2584 pci_unregister_driver(&fm10k_driver); 2585 } 2586