15a6681e2SEdward Cree /**************************************************************************** 25a6681e2SEdward Cree * Driver for Solarflare network controllers and boards 35a6681e2SEdward Cree * Copyright 2005-2006 Fen Systems Ltd. 45a6681e2SEdward Cree * Copyright 2005-2013 Solarflare Communications Inc. 55a6681e2SEdward Cree * 65a6681e2SEdward Cree * This program is free software; you can redistribute it and/or modify it 75a6681e2SEdward Cree * under the terms of the GNU General Public License version 2 as published 85a6681e2SEdward Cree * by the Free Software Foundation, incorporated herein by reference. 95a6681e2SEdward Cree */ 105a6681e2SEdward Cree 115a6681e2SEdward Cree #include <linux/module.h> 125a6681e2SEdward Cree #include <linux/pci.h> 135a6681e2SEdward Cree #include <linux/netdevice.h> 145a6681e2SEdward Cree #include <linux/etherdevice.h> 155a6681e2SEdward Cree #include <linux/delay.h> 165a6681e2SEdward Cree #include <linux/notifier.h> 175a6681e2SEdward Cree #include <linux/ip.h> 185a6681e2SEdward Cree #include <linux/tcp.h> 195a6681e2SEdward Cree #include <linux/in.h> 205a6681e2SEdward Cree #include <linux/ethtool.h> 215a6681e2SEdward Cree #include <linux/topology.h> 225a6681e2SEdward Cree #include <linux/gfp.h> 235a6681e2SEdward Cree #include <linux/aer.h> 245a6681e2SEdward Cree #include <linux/interrupt.h> 255a6681e2SEdward Cree #include "net_driver.h" 265a6681e2SEdward Cree #include "efx.h" 275a6681e2SEdward Cree #include "nic.h" 285a6681e2SEdward Cree #include "selftest.h" 295a6681e2SEdward Cree 305a6681e2SEdward Cree #include "workarounds.h" 315a6681e2SEdward Cree 325a6681e2SEdward Cree /************************************************************************** 335a6681e2SEdward Cree * 345a6681e2SEdward Cree * Type name strings 355a6681e2SEdward Cree * 365a6681e2SEdward Cree ************************************************************************** 375a6681e2SEdward Cree */ 385a6681e2SEdward Cree 395a6681e2SEdward Cree /* Loopback mode names (see LOOPBACK_MODE()) */ 405a6681e2SEdward Cree const unsigned int ef4_loopback_mode_max = LOOPBACK_MAX; 415a6681e2SEdward Cree const char *const ef4_loopback_mode_names[] = { 425a6681e2SEdward Cree [LOOPBACK_NONE] = "NONE", 435a6681e2SEdward Cree [LOOPBACK_DATA] = "DATAPATH", 445a6681e2SEdward Cree [LOOPBACK_GMAC] = "GMAC", 455a6681e2SEdward Cree [LOOPBACK_XGMII] = "XGMII", 465a6681e2SEdward Cree [LOOPBACK_XGXS] = "XGXS", 475a6681e2SEdward Cree [LOOPBACK_XAUI] = "XAUI", 485a6681e2SEdward Cree [LOOPBACK_GMII] = "GMII", 495a6681e2SEdward Cree [LOOPBACK_SGMII] = "SGMII", 505a6681e2SEdward Cree [LOOPBACK_XGBR] = "XGBR", 515a6681e2SEdward Cree [LOOPBACK_XFI] = "XFI", 525a6681e2SEdward Cree [LOOPBACK_XAUI_FAR] = "XAUI_FAR", 535a6681e2SEdward Cree [LOOPBACK_GMII_FAR] = "GMII_FAR", 545a6681e2SEdward Cree [LOOPBACK_SGMII_FAR] = "SGMII_FAR", 555a6681e2SEdward Cree [LOOPBACK_XFI_FAR] = "XFI_FAR", 565a6681e2SEdward Cree [LOOPBACK_GPHY] = "GPHY", 575a6681e2SEdward Cree [LOOPBACK_PHYXS] = "PHYXS", 585a6681e2SEdward Cree [LOOPBACK_PCS] = "PCS", 595a6681e2SEdward Cree [LOOPBACK_PMAPMD] = "PMA/PMD", 605a6681e2SEdward Cree [LOOPBACK_XPORT] = "XPORT", 615a6681e2SEdward Cree [LOOPBACK_XGMII_WS] = "XGMII_WS", 625a6681e2SEdward Cree [LOOPBACK_XAUI_WS] = "XAUI_WS", 635a6681e2SEdward Cree [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", 645a6681e2SEdward Cree [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", 655a6681e2SEdward Cree [LOOPBACK_GMII_WS] = "GMII_WS", 665a6681e2SEdward Cree [LOOPBACK_XFI_WS] = "XFI_WS", 675a6681e2SEdward Cree [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", 685a6681e2SEdward Cree [LOOPBACK_PHYXS_WS] = "PHYXS_WS", 695a6681e2SEdward Cree }; 705a6681e2SEdward Cree 715a6681e2SEdward Cree const unsigned int ef4_reset_type_max = RESET_TYPE_MAX; 725a6681e2SEdward Cree const char *const ef4_reset_type_names[] = { 735a6681e2SEdward Cree [RESET_TYPE_INVISIBLE] = "INVISIBLE", 745a6681e2SEdward Cree [RESET_TYPE_ALL] = "ALL", 755a6681e2SEdward Cree [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", 765a6681e2SEdward Cree [RESET_TYPE_WORLD] = "WORLD", 775a6681e2SEdward Cree [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", 785a6681e2SEdward Cree [RESET_TYPE_DATAPATH] = "DATAPATH", 795a6681e2SEdward Cree [RESET_TYPE_DISABLE] = "DISABLE", 805a6681e2SEdward Cree [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 815a6681e2SEdward Cree [RESET_TYPE_INT_ERROR] = "INT_ERROR", 825a6681e2SEdward Cree [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", 835a6681e2SEdward Cree [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", 845a6681e2SEdward Cree [RESET_TYPE_TX_SKIP] = "TX_SKIP", 855a6681e2SEdward Cree }; 865a6681e2SEdward Cree 875a6681e2SEdward Cree /* Reset workqueue. If any NIC has a hardware failure then a reset will be 885a6681e2SEdward Cree * queued onto this work queue. This is not a per-nic work queue, because 895a6681e2SEdward Cree * ef4_reset_work() acquires the rtnl lock, so resets are naturally serialised. 905a6681e2SEdward Cree */ 915a6681e2SEdward Cree static struct workqueue_struct *reset_workqueue; 925a6681e2SEdward Cree 935a6681e2SEdward Cree /* How often and how many times to poll for a reset while waiting for a 945a6681e2SEdward Cree * BIST that another function started to complete. 955a6681e2SEdward Cree */ 965a6681e2SEdward Cree #define BIST_WAIT_DELAY_MS 100 975a6681e2SEdward Cree #define BIST_WAIT_DELAY_COUNT 100 985a6681e2SEdward Cree 995a6681e2SEdward Cree /************************************************************************** 1005a6681e2SEdward Cree * 1015a6681e2SEdward Cree * Configurable values 1025a6681e2SEdward Cree * 1035a6681e2SEdward Cree *************************************************************************/ 1045a6681e2SEdward Cree 1055a6681e2SEdward Cree /* 1065a6681e2SEdward Cree * Use separate channels for TX and RX events 1075a6681e2SEdward Cree * 1085a6681e2SEdward Cree * Set this to 1 to use separate channels for TX and RX. It allows us 1095a6681e2SEdward Cree * to control interrupt affinity separately for TX and RX. 1105a6681e2SEdward Cree * 1115a6681e2SEdward Cree * This is only used in MSI-X interrupt mode 1125a6681e2SEdward Cree */ 1135a6681e2SEdward Cree bool ef4_separate_tx_channels; 1145a6681e2SEdward Cree module_param(ef4_separate_tx_channels, bool, 0444); 1155a6681e2SEdward Cree MODULE_PARM_DESC(ef4_separate_tx_channels, 1165a6681e2SEdward Cree "Use separate channels for TX and RX"); 1175a6681e2SEdward Cree 1185a6681e2SEdward Cree /* This is the weight assigned to each of the (per-channel) virtual 1195a6681e2SEdward Cree * NAPI devices. 1205a6681e2SEdward Cree */ 1215a6681e2SEdward Cree static int napi_weight = 64; 1225a6681e2SEdward Cree 1235a6681e2SEdward Cree /* This is the time (in jiffies) between invocations of the hardware 1245a6681e2SEdward Cree * monitor. 1255a6681e2SEdward Cree * On Falcon-based NICs, this will: 1265a6681e2SEdward Cree * - Check the on-board hardware monitor; 1275a6681e2SEdward Cree * - Poll the link state and reconfigure the hardware as necessary. 1285a6681e2SEdward Cree * On Siena-based NICs for power systems with EEH support, this will give EEH a 1295a6681e2SEdward Cree * chance to start. 1305a6681e2SEdward Cree */ 1315a6681e2SEdward Cree static unsigned int ef4_monitor_interval = 1 * HZ; 1325a6681e2SEdward Cree 1335a6681e2SEdward Cree /* Initial interrupt moderation settings. They can be modified after 1345a6681e2SEdward Cree * module load with ethtool. 1355a6681e2SEdward Cree * 1365a6681e2SEdward Cree * The default for RX should strike a balance between increasing the 1375a6681e2SEdward Cree * round-trip latency and reducing overhead. 1385a6681e2SEdward Cree */ 1395a6681e2SEdward Cree static unsigned int rx_irq_mod_usec = 60; 1405a6681e2SEdward Cree 1415a6681e2SEdward Cree /* Initial interrupt moderation settings. They can be modified after 1425a6681e2SEdward Cree * module load with ethtool. 1435a6681e2SEdward Cree * 1445a6681e2SEdward Cree * This default is chosen to ensure that a 10G link does not go idle 1455a6681e2SEdward Cree * while a TX queue is stopped after it has become full. A queue is 1465a6681e2SEdward Cree * restarted when it drops below half full. The time this takes (assuming 1475a6681e2SEdward Cree * worst case 3 descriptors per packet and 1024 descriptors) is 1485a6681e2SEdward Cree * 512 / 3 * 1.2 = 205 usec. 1495a6681e2SEdward Cree */ 1505a6681e2SEdward Cree static unsigned int tx_irq_mod_usec = 150; 1515a6681e2SEdward Cree 1525a6681e2SEdward Cree /* This is the first interrupt mode to try out of: 1535a6681e2SEdward Cree * 0 => MSI-X 1545a6681e2SEdward Cree * 1 => MSI 1555a6681e2SEdward Cree * 2 => legacy 1565a6681e2SEdward Cree */ 1575a6681e2SEdward Cree static unsigned int interrupt_mode; 1585a6681e2SEdward Cree 1595a6681e2SEdward Cree /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), 1605a6681e2SEdward Cree * i.e. the number of CPUs among which we may distribute simultaneous 1615a6681e2SEdward Cree * interrupt handling. 1625a6681e2SEdward Cree * 1635a6681e2SEdward Cree * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. 1645a6681e2SEdward Cree * The default (0) means to assign an interrupt to each core. 1655a6681e2SEdward Cree */ 1665a6681e2SEdward Cree static unsigned int rss_cpus; 1675a6681e2SEdward Cree module_param(rss_cpus, uint, 0444); 1685a6681e2SEdward Cree MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 1695a6681e2SEdward Cree 1705a6681e2SEdward Cree static bool phy_flash_cfg; 1715a6681e2SEdward Cree module_param(phy_flash_cfg, bool, 0644); 1725a6681e2SEdward Cree MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 1735a6681e2SEdward Cree 1745a6681e2SEdward Cree static unsigned irq_adapt_low_thresh = 8000; 1755a6681e2SEdward Cree module_param(irq_adapt_low_thresh, uint, 0644); 1765a6681e2SEdward Cree MODULE_PARM_DESC(irq_adapt_low_thresh, 1775a6681e2SEdward Cree "Threshold score for reducing IRQ moderation"); 1785a6681e2SEdward Cree 1795a6681e2SEdward Cree static unsigned irq_adapt_high_thresh = 16000; 1805a6681e2SEdward Cree module_param(irq_adapt_high_thresh, uint, 0644); 1815a6681e2SEdward Cree MODULE_PARM_DESC(irq_adapt_high_thresh, 1825a6681e2SEdward Cree "Threshold score for increasing IRQ moderation"); 1835a6681e2SEdward Cree 1845a6681e2SEdward Cree static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 1855a6681e2SEdward Cree NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 1865a6681e2SEdward Cree NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | 1875a6681e2SEdward Cree NETIF_MSG_TX_ERR | NETIF_MSG_HW); 1885a6681e2SEdward Cree module_param(debug, uint, 0); 1895a6681e2SEdward Cree MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); 1905a6681e2SEdward Cree 1915a6681e2SEdward Cree /************************************************************************** 1925a6681e2SEdward Cree * 1935a6681e2SEdward Cree * Utility functions and prototypes 1945a6681e2SEdward Cree * 1955a6681e2SEdward Cree *************************************************************************/ 1965a6681e2SEdward Cree 1975a6681e2SEdward Cree static int ef4_soft_enable_interrupts(struct ef4_nic *efx); 1985a6681e2SEdward Cree static void ef4_soft_disable_interrupts(struct ef4_nic *efx); 1995a6681e2SEdward Cree static void ef4_remove_channel(struct ef4_channel *channel); 2005a6681e2SEdward Cree static void ef4_remove_channels(struct ef4_nic *efx); 2015a6681e2SEdward Cree static const struct ef4_channel_type ef4_default_channel_type; 2025a6681e2SEdward Cree static void ef4_remove_port(struct ef4_nic *efx); 2035a6681e2SEdward Cree static void ef4_init_napi_channel(struct ef4_channel *channel); 2045a6681e2SEdward Cree static void ef4_fini_napi(struct ef4_nic *efx); 2055a6681e2SEdward Cree static void ef4_fini_napi_channel(struct ef4_channel *channel); 2065a6681e2SEdward Cree static void ef4_fini_struct(struct ef4_nic *efx); 2075a6681e2SEdward Cree static void ef4_start_all(struct ef4_nic *efx); 2085a6681e2SEdward Cree static void ef4_stop_all(struct ef4_nic *efx); 2095a6681e2SEdward Cree 2105a6681e2SEdward Cree #define EF4_ASSERT_RESET_SERIALISED(efx) \ 2115a6681e2SEdward Cree do { \ 2125a6681e2SEdward Cree if ((efx->state == STATE_READY) || \ 2135a6681e2SEdward Cree (efx->state == STATE_RECOVERY) || \ 2145a6681e2SEdward Cree (efx->state == STATE_DISABLED)) \ 2155a6681e2SEdward Cree ASSERT_RTNL(); \ 2165a6681e2SEdward Cree } while (0) 2175a6681e2SEdward Cree 2185a6681e2SEdward Cree static int ef4_check_disabled(struct ef4_nic *efx) 2195a6681e2SEdward Cree { 2205a6681e2SEdward Cree if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { 2215a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, 2225a6681e2SEdward Cree "device is disabled due to earlier errors\n"); 2235a6681e2SEdward Cree return -EIO; 2245a6681e2SEdward Cree } 2255a6681e2SEdward Cree return 0; 2265a6681e2SEdward Cree } 2275a6681e2SEdward Cree 2285a6681e2SEdward Cree /************************************************************************** 2295a6681e2SEdward Cree * 2305a6681e2SEdward Cree * Event queue processing 2315a6681e2SEdward Cree * 2325a6681e2SEdward Cree *************************************************************************/ 2335a6681e2SEdward Cree 2345a6681e2SEdward Cree /* Process channel's event queue 2355a6681e2SEdward Cree * 2365a6681e2SEdward Cree * This function is responsible for processing the event queue of a 2375a6681e2SEdward Cree * single channel. The caller must guarantee that this function will 2385a6681e2SEdward Cree * never be concurrently called more than once on the same channel, 2395a6681e2SEdward Cree * though different channels may be being processed concurrently. 2405a6681e2SEdward Cree */ 2415a6681e2SEdward Cree static int ef4_process_channel(struct ef4_channel *channel, int budget) 2425a6681e2SEdward Cree { 2435a6681e2SEdward Cree struct ef4_tx_queue *tx_queue; 2445a6681e2SEdward Cree int spent; 2455a6681e2SEdward Cree 2465a6681e2SEdward Cree if (unlikely(!channel->enabled)) 2475a6681e2SEdward Cree return 0; 2485a6681e2SEdward Cree 2495a6681e2SEdward Cree ef4_for_each_channel_tx_queue(tx_queue, channel) { 2505a6681e2SEdward Cree tx_queue->pkts_compl = 0; 2515a6681e2SEdward Cree tx_queue->bytes_compl = 0; 2525a6681e2SEdward Cree } 2535a6681e2SEdward Cree 2545a6681e2SEdward Cree spent = ef4_nic_process_eventq(channel, budget); 2555a6681e2SEdward Cree if (spent && ef4_channel_has_rx_queue(channel)) { 2565a6681e2SEdward Cree struct ef4_rx_queue *rx_queue = 2575a6681e2SEdward Cree ef4_channel_get_rx_queue(channel); 2585a6681e2SEdward Cree 2595a6681e2SEdward Cree ef4_rx_flush_packet(channel); 2605a6681e2SEdward Cree ef4_fast_push_rx_descriptors(rx_queue, true); 2615a6681e2SEdward Cree } 2625a6681e2SEdward Cree 2635a6681e2SEdward Cree /* Update BQL */ 2645a6681e2SEdward Cree ef4_for_each_channel_tx_queue(tx_queue, channel) { 2655a6681e2SEdward Cree if (tx_queue->bytes_compl) { 2665a6681e2SEdward Cree netdev_tx_completed_queue(tx_queue->core_txq, 2675a6681e2SEdward Cree tx_queue->pkts_compl, tx_queue->bytes_compl); 2685a6681e2SEdward Cree } 2695a6681e2SEdward Cree } 2705a6681e2SEdward Cree 2715a6681e2SEdward Cree return spent; 2725a6681e2SEdward Cree } 2735a6681e2SEdward Cree 2745a6681e2SEdward Cree /* NAPI poll handler 2755a6681e2SEdward Cree * 2765a6681e2SEdward Cree * NAPI guarantees serialisation of polls of the same device, which 2775a6681e2SEdward Cree * provides the guarantee required by ef4_process_channel(). 2785a6681e2SEdward Cree */ 2795a6681e2SEdward Cree static void ef4_update_irq_mod(struct ef4_nic *efx, struct ef4_channel *channel) 2805a6681e2SEdward Cree { 2815a6681e2SEdward Cree int step = efx->irq_mod_step_us; 2825a6681e2SEdward Cree 2835a6681e2SEdward Cree if (channel->irq_mod_score < irq_adapt_low_thresh) { 2845a6681e2SEdward Cree if (channel->irq_moderation_us > step) { 2855a6681e2SEdward Cree channel->irq_moderation_us -= step; 2865a6681e2SEdward Cree efx->type->push_irq_moderation(channel); 2875a6681e2SEdward Cree } 2885a6681e2SEdward Cree } else if (channel->irq_mod_score > irq_adapt_high_thresh) { 2895a6681e2SEdward Cree if (channel->irq_moderation_us < 2905a6681e2SEdward Cree efx->irq_rx_moderation_us) { 2915a6681e2SEdward Cree channel->irq_moderation_us += step; 2925a6681e2SEdward Cree efx->type->push_irq_moderation(channel); 2935a6681e2SEdward Cree } 2945a6681e2SEdward Cree } 2955a6681e2SEdward Cree 2965a6681e2SEdward Cree channel->irq_count = 0; 2975a6681e2SEdward Cree channel->irq_mod_score = 0; 2985a6681e2SEdward Cree } 2995a6681e2SEdward Cree 3005a6681e2SEdward Cree static int ef4_poll(struct napi_struct *napi, int budget) 3015a6681e2SEdward Cree { 3025a6681e2SEdward Cree struct ef4_channel *channel = 3035a6681e2SEdward Cree container_of(napi, struct ef4_channel, napi_str); 3045a6681e2SEdward Cree struct ef4_nic *efx = channel->efx; 3055a6681e2SEdward Cree int spent; 3065a6681e2SEdward Cree 3075a6681e2SEdward Cree if (!ef4_channel_lock_napi(channel)) 3085a6681e2SEdward Cree return budget; 3095a6681e2SEdward Cree 3105a6681e2SEdward Cree netif_vdbg(efx, intr, efx->net_dev, 3115a6681e2SEdward Cree "channel %d NAPI poll executing on CPU %d\n", 3125a6681e2SEdward Cree channel->channel, raw_smp_processor_id()); 3135a6681e2SEdward Cree 3145a6681e2SEdward Cree spent = ef4_process_channel(channel, budget); 3155a6681e2SEdward Cree 3165a6681e2SEdward Cree if (spent < budget) { 3175a6681e2SEdward Cree if (ef4_channel_has_rx_queue(channel) && 3185a6681e2SEdward Cree efx->irq_rx_adaptive && 3195a6681e2SEdward Cree unlikely(++channel->irq_count == 1000)) { 3205a6681e2SEdward Cree ef4_update_irq_mod(efx, channel); 3215a6681e2SEdward Cree } 3225a6681e2SEdward Cree 3235a6681e2SEdward Cree ef4_filter_rfs_expire(channel); 3245a6681e2SEdward Cree 3255a6681e2SEdward Cree /* There is no race here; although napi_disable() will 3265a6681e2SEdward Cree * only wait for napi_complete(), this isn't a problem 3275a6681e2SEdward Cree * since ef4_nic_eventq_read_ack() will have no effect if 3285a6681e2SEdward Cree * interrupts have already been disabled. 3295a6681e2SEdward Cree */ 3305a6681e2SEdward Cree napi_complete(napi); 3315a6681e2SEdward Cree ef4_nic_eventq_read_ack(channel); 3325a6681e2SEdward Cree } 3335a6681e2SEdward Cree 3345a6681e2SEdward Cree ef4_channel_unlock_napi(channel); 3355a6681e2SEdward Cree return spent; 3365a6681e2SEdward Cree } 3375a6681e2SEdward Cree 3385a6681e2SEdward Cree /* Create event queue 3395a6681e2SEdward Cree * Event queue memory allocations are done only once. If the channel 3405a6681e2SEdward Cree * is reset, the memory buffer will be reused; this guards against 3415a6681e2SEdward Cree * errors during channel reset and also simplifies interrupt handling. 3425a6681e2SEdward Cree */ 3435a6681e2SEdward Cree static int ef4_probe_eventq(struct ef4_channel *channel) 3445a6681e2SEdward Cree { 3455a6681e2SEdward Cree struct ef4_nic *efx = channel->efx; 3465a6681e2SEdward Cree unsigned long entries; 3475a6681e2SEdward Cree 3485a6681e2SEdward Cree netif_dbg(efx, probe, efx->net_dev, 3495a6681e2SEdward Cree "chan %d create event queue\n", channel->channel); 3505a6681e2SEdward Cree 3515a6681e2SEdward Cree /* Build an event queue with room for one event per tx and rx buffer, 3525a6681e2SEdward Cree * plus some extra for link state events and MCDI completions. */ 3535a6681e2SEdward Cree entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); 3545a6681e2SEdward Cree EF4_BUG_ON_PARANOID(entries > EF4_MAX_EVQ_SIZE); 3555a6681e2SEdward Cree channel->eventq_mask = max(entries, EF4_MIN_EVQ_SIZE) - 1; 3565a6681e2SEdward Cree 3575a6681e2SEdward Cree return ef4_nic_probe_eventq(channel); 3585a6681e2SEdward Cree } 3595a6681e2SEdward Cree 3605a6681e2SEdward Cree /* Prepare channel's event queue */ 3615a6681e2SEdward Cree static int ef4_init_eventq(struct ef4_channel *channel) 3625a6681e2SEdward Cree { 3635a6681e2SEdward Cree struct ef4_nic *efx = channel->efx; 3645a6681e2SEdward Cree int rc; 3655a6681e2SEdward Cree 3665a6681e2SEdward Cree EF4_WARN_ON_PARANOID(channel->eventq_init); 3675a6681e2SEdward Cree 3685a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, 3695a6681e2SEdward Cree "chan %d init event queue\n", channel->channel); 3705a6681e2SEdward Cree 3715a6681e2SEdward Cree rc = ef4_nic_init_eventq(channel); 3725a6681e2SEdward Cree if (rc == 0) { 3735a6681e2SEdward Cree efx->type->push_irq_moderation(channel); 3745a6681e2SEdward Cree channel->eventq_read_ptr = 0; 3755a6681e2SEdward Cree channel->eventq_init = true; 3765a6681e2SEdward Cree } 3775a6681e2SEdward Cree return rc; 3785a6681e2SEdward Cree } 3795a6681e2SEdward Cree 3805a6681e2SEdward Cree /* Enable event queue processing and NAPI */ 3815a6681e2SEdward Cree void ef4_start_eventq(struct ef4_channel *channel) 3825a6681e2SEdward Cree { 3835a6681e2SEdward Cree netif_dbg(channel->efx, ifup, channel->efx->net_dev, 3845a6681e2SEdward Cree "chan %d start event queue\n", channel->channel); 3855a6681e2SEdward Cree 3865a6681e2SEdward Cree /* Make sure the NAPI handler sees the enabled flag set */ 3875a6681e2SEdward Cree channel->enabled = true; 3885a6681e2SEdward Cree smp_wmb(); 3895a6681e2SEdward Cree 3905a6681e2SEdward Cree ef4_channel_enable(channel); 3915a6681e2SEdward Cree napi_enable(&channel->napi_str); 3925a6681e2SEdward Cree ef4_nic_eventq_read_ack(channel); 3935a6681e2SEdward Cree } 3945a6681e2SEdward Cree 3955a6681e2SEdward Cree /* Disable event queue processing and NAPI */ 3965a6681e2SEdward Cree void ef4_stop_eventq(struct ef4_channel *channel) 3975a6681e2SEdward Cree { 3985a6681e2SEdward Cree if (!channel->enabled) 3995a6681e2SEdward Cree return; 4005a6681e2SEdward Cree 4015a6681e2SEdward Cree napi_disable(&channel->napi_str); 4025a6681e2SEdward Cree while (!ef4_channel_disable(channel)) 4035a6681e2SEdward Cree usleep_range(1000, 20000); 4045a6681e2SEdward Cree channel->enabled = false; 4055a6681e2SEdward Cree } 4065a6681e2SEdward Cree 4075a6681e2SEdward Cree static void ef4_fini_eventq(struct ef4_channel *channel) 4085a6681e2SEdward Cree { 4095a6681e2SEdward Cree if (!channel->eventq_init) 4105a6681e2SEdward Cree return; 4115a6681e2SEdward Cree 4125a6681e2SEdward Cree netif_dbg(channel->efx, drv, channel->efx->net_dev, 4135a6681e2SEdward Cree "chan %d fini event queue\n", channel->channel); 4145a6681e2SEdward Cree 4155a6681e2SEdward Cree ef4_nic_fini_eventq(channel); 4165a6681e2SEdward Cree channel->eventq_init = false; 4175a6681e2SEdward Cree } 4185a6681e2SEdward Cree 4195a6681e2SEdward Cree static void ef4_remove_eventq(struct ef4_channel *channel) 4205a6681e2SEdward Cree { 4215a6681e2SEdward Cree netif_dbg(channel->efx, drv, channel->efx->net_dev, 4225a6681e2SEdward Cree "chan %d remove event queue\n", channel->channel); 4235a6681e2SEdward Cree 4245a6681e2SEdward Cree ef4_nic_remove_eventq(channel); 4255a6681e2SEdward Cree } 4265a6681e2SEdward Cree 4275a6681e2SEdward Cree /************************************************************************** 4285a6681e2SEdward Cree * 4295a6681e2SEdward Cree * Channel handling 4305a6681e2SEdward Cree * 4315a6681e2SEdward Cree *************************************************************************/ 4325a6681e2SEdward Cree 4335a6681e2SEdward Cree /* Allocate and initialise a channel structure. */ 4345a6681e2SEdward Cree static struct ef4_channel * 4355a6681e2SEdward Cree ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel) 4365a6681e2SEdward Cree { 4375a6681e2SEdward Cree struct ef4_channel *channel; 4385a6681e2SEdward Cree struct ef4_rx_queue *rx_queue; 4395a6681e2SEdward Cree struct ef4_tx_queue *tx_queue; 4405a6681e2SEdward Cree int j; 4415a6681e2SEdward Cree 4425a6681e2SEdward Cree channel = kzalloc(sizeof(*channel), GFP_KERNEL); 4435a6681e2SEdward Cree if (!channel) 4445a6681e2SEdward Cree return NULL; 4455a6681e2SEdward Cree 4465a6681e2SEdward Cree channel->efx = efx; 4475a6681e2SEdward Cree channel->channel = i; 4485a6681e2SEdward Cree channel->type = &ef4_default_channel_type; 4495a6681e2SEdward Cree 4505a6681e2SEdward Cree for (j = 0; j < EF4_TXQ_TYPES; j++) { 4515a6681e2SEdward Cree tx_queue = &channel->tx_queue[j]; 4525a6681e2SEdward Cree tx_queue->efx = efx; 4535a6681e2SEdward Cree tx_queue->queue = i * EF4_TXQ_TYPES + j; 4545a6681e2SEdward Cree tx_queue->channel = channel; 4555a6681e2SEdward Cree } 4565a6681e2SEdward Cree 4575a6681e2SEdward Cree rx_queue = &channel->rx_queue; 4585a6681e2SEdward Cree rx_queue->efx = efx; 4595a6681e2SEdward Cree setup_timer(&rx_queue->slow_fill, ef4_rx_slow_fill, 4605a6681e2SEdward Cree (unsigned long)rx_queue); 4615a6681e2SEdward Cree 4625a6681e2SEdward Cree return channel; 4635a6681e2SEdward Cree } 4645a6681e2SEdward Cree 4655a6681e2SEdward Cree /* Allocate and initialise a channel structure, copying parameters 4665a6681e2SEdward Cree * (but not resources) from an old channel structure. 4675a6681e2SEdward Cree */ 4685a6681e2SEdward Cree static struct ef4_channel * 4695a6681e2SEdward Cree ef4_copy_channel(const struct ef4_channel *old_channel) 4705a6681e2SEdward Cree { 4715a6681e2SEdward Cree struct ef4_channel *channel; 4725a6681e2SEdward Cree struct ef4_rx_queue *rx_queue; 4735a6681e2SEdward Cree struct ef4_tx_queue *tx_queue; 4745a6681e2SEdward Cree int j; 4755a6681e2SEdward Cree 4765a6681e2SEdward Cree channel = kmalloc(sizeof(*channel), GFP_KERNEL); 4775a6681e2SEdward Cree if (!channel) 4785a6681e2SEdward Cree return NULL; 4795a6681e2SEdward Cree 4805a6681e2SEdward Cree *channel = *old_channel; 4815a6681e2SEdward Cree 4825a6681e2SEdward Cree channel->napi_dev = NULL; 4835a6681e2SEdward Cree INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); 4845a6681e2SEdward Cree channel->napi_str.napi_id = 0; 4855a6681e2SEdward Cree channel->napi_str.state = 0; 4865a6681e2SEdward Cree memset(&channel->eventq, 0, sizeof(channel->eventq)); 4875a6681e2SEdward Cree 4885a6681e2SEdward Cree for (j = 0; j < EF4_TXQ_TYPES; j++) { 4895a6681e2SEdward Cree tx_queue = &channel->tx_queue[j]; 4905a6681e2SEdward Cree if (tx_queue->channel) 4915a6681e2SEdward Cree tx_queue->channel = channel; 4925a6681e2SEdward Cree tx_queue->buffer = NULL; 4935a6681e2SEdward Cree memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); 4945a6681e2SEdward Cree } 4955a6681e2SEdward Cree 4965a6681e2SEdward Cree rx_queue = &channel->rx_queue; 4975a6681e2SEdward Cree rx_queue->buffer = NULL; 4985a6681e2SEdward Cree memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); 4995a6681e2SEdward Cree setup_timer(&rx_queue->slow_fill, ef4_rx_slow_fill, 5005a6681e2SEdward Cree (unsigned long)rx_queue); 5015a6681e2SEdward Cree 5025a6681e2SEdward Cree return channel; 5035a6681e2SEdward Cree } 5045a6681e2SEdward Cree 5055a6681e2SEdward Cree static int ef4_probe_channel(struct ef4_channel *channel) 5065a6681e2SEdward Cree { 5075a6681e2SEdward Cree struct ef4_tx_queue *tx_queue; 5085a6681e2SEdward Cree struct ef4_rx_queue *rx_queue; 5095a6681e2SEdward Cree int rc; 5105a6681e2SEdward Cree 5115a6681e2SEdward Cree netif_dbg(channel->efx, probe, channel->efx->net_dev, 5125a6681e2SEdward Cree "creating channel %d\n", channel->channel); 5135a6681e2SEdward Cree 5145a6681e2SEdward Cree rc = channel->type->pre_probe(channel); 5155a6681e2SEdward Cree if (rc) 5165a6681e2SEdward Cree goto fail; 5175a6681e2SEdward Cree 5185a6681e2SEdward Cree rc = ef4_probe_eventq(channel); 5195a6681e2SEdward Cree if (rc) 5205a6681e2SEdward Cree goto fail; 5215a6681e2SEdward Cree 5225a6681e2SEdward Cree ef4_for_each_channel_tx_queue(tx_queue, channel) { 5235a6681e2SEdward Cree rc = ef4_probe_tx_queue(tx_queue); 5245a6681e2SEdward Cree if (rc) 5255a6681e2SEdward Cree goto fail; 5265a6681e2SEdward Cree } 5275a6681e2SEdward Cree 5285a6681e2SEdward Cree ef4_for_each_channel_rx_queue(rx_queue, channel) { 5295a6681e2SEdward Cree rc = ef4_probe_rx_queue(rx_queue); 5305a6681e2SEdward Cree if (rc) 5315a6681e2SEdward Cree goto fail; 5325a6681e2SEdward Cree } 5335a6681e2SEdward Cree 5345a6681e2SEdward Cree return 0; 5355a6681e2SEdward Cree 5365a6681e2SEdward Cree fail: 5375a6681e2SEdward Cree ef4_remove_channel(channel); 5385a6681e2SEdward Cree return rc; 5395a6681e2SEdward Cree } 5405a6681e2SEdward Cree 5415a6681e2SEdward Cree static void 5425a6681e2SEdward Cree ef4_get_channel_name(struct ef4_channel *channel, char *buf, size_t len) 5435a6681e2SEdward Cree { 5445a6681e2SEdward Cree struct ef4_nic *efx = channel->efx; 5455a6681e2SEdward Cree const char *type; 5465a6681e2SEdward Cree int number; 5475a6681e2SEdward Cree 5485a6681e2SEdward Cree number = channel->channel; 5495a6681e2SEdward Cree if (efx->tx_channel_offset == 0) { 5505a6681e2SEdward Cree type = ""; 5515a6681e2SEdward Cree } else if (channel->channel < efx->tx_channel_offset) { 5525a6681e2SEdward Cree type = "-rx"; 5535a6681e2SEdward Cree } else { 5545a6681e2SEdward Cree type = "-tx"; 5555a6681e2SEdward Cree number -= efx->tx_channel_offset; 5565a6681e2SEdward Cree } 5575a6681e2SEdward Cree snprintf(buf, len, "%s%s-%d", efx->name, type, number); 5585a6681e2SEdward Cree } 5595a6681e2SEdward Cree 5605a6681e2SEdward Cree static void ef4_set_channel_names(struct ef4_nic *efx) 5615a6681e2SEdward Cree { 5625a6681e2SEdward Cree struct ef4_channel *channel; 5635a6681e2SEdward Cree 5645a6681e2SEdward Cree ef4_for_each_channel(channel, efx) 5655a6681e2SEdward Cree channel->type->get_name(channel, 5665a6681e2SEdward Cree efx->msi_context[channel->channel].name, 5675a6681e2SEdward Cree sizeof(efx->msi_context[0].name)); 5685a6681e2SEdward Cree } 5695a6681e2SEdward Cree 5705a6681e2SEdward Cree static int ef4_probe_channels(struct ef4_nic *efx) 5715a6681e2SEdward Cree { 5725a6681e2SEdward Cree struct ef4_channel *channel; 5735a6681e2SEdward Cree int rc; 5745a6681e2SEdward Cree 5755a6681e2SEdward Cree /* Restart special buffer allocation */ 5765a6681e2SEdward Cree efx->next_buffer_table = 0; 5775a6681e2SEdward Cree 5785a6681e2SEdward Cree /* Probe channels in reverse, so that any 'extra' channels 5795a6681e2SEdward Cree * use the start of the buffer table. This allows the traffic 5805a6681e2SEdward Cree * channels to be resized without moving them or wasting the 5815a6681e2SEdward Cree * entries before them. 5825a6681e2SEdward Cree */ 5835a6681e2SEdward Cree ef4_for_each_channel_rev(channel, efx) { 5845a6681e2SEdward Cree rc = ef4_probe_channel(channel); 5855a6681e2SEdward Cree if (rc) { 5865a6681e2SEdward Cree netif_err(efx, probe, efx->net_dev, 5875a6681e2SEdward Cree "failed to create channel %d\n", 5885a6681e2SEdward Cree channel->channel); 5895a6681e2SEdward Cree goto fail; 5905a6681e2SEdward Cree } 5915a6681e2SEdward Cree } 5925a6681e2SEdward Cree ef4_set_channel_names(efx); 5935a6681e2SEdward Cree 5945a6681e2SEdward Cree return 0; 5955a6681e2SEdward Cree 5965a6681e2SEdward Cree fail: 5975a6681e2SEdward Cree ef4_remove_channels(efx); 5985a6681e2SEdward Cree return rc; 5995a6681e2SEdward Cree } 6005a6681e2SEdward Cree 6015a6681e2SEdward Cree /* Channels are shutdown and reinitialised whilst the NIC is running 6025a6681e2SEdward Cree * to propagate configuration changes (mtu, checksum offload), or 6035a6681e2SEdward Cree * to clear hardware error conditions 6045a6681e2SEdward Cree */ 6055a6681e2SEdward Cree static void ef4_start_datapath(struct ef4_nic *efx) 6065a6681e2SEdward Cree { 6075a6681e2SEdward Cree netdev_features_t old_features = efx->net_dev->features; 6085a6681e2SEdward Cree bool old_rx_scatter = efx->rx_scatter; 6095a6681e2SEdward Cree struct ef4_tx_queue *tx_queue; 6105a6681e2SEdward Cree struct ef4_rx_queue *rx_queue; 6115a6681e2SEdward Cree struct ef4_channel *channel; 6125a6681e2SEdward Cree size_t rx_buf_len; 6135a6681e2SEdward Cree 6145a6681e2SEdward Cree /* Calculate the rx buffer allocation parameters required to 6155a6681e2SEdward Cree * support the current MTU, including padding for header 6165a6681e2SEdward Cree * alignment and overruns. 6175a6681e2SEdward Cree */ 6185a6681e2SEdward Cree efx->rx_dma_len = (efx->rx_prefix_size + 6195a6681e2SEdward Cree EF4_MAX_FRAME_LEN(efx->net_dev->mtu) + 6205a6681e2SEdward Cree efx->type->rx_buffer_padding); 6215a6681e2SEdward Cree rx_buf_len = (sizeof(struct ef4_rx_page_state) + 6225a6681e2SEdward Cree efx->rx_ip_align + efx->rx_dma_len); 6235a6681e2SEdward Cree if (rx_buf_len <= PAGE_SIZE) { 6245a6681e2SEdward Cree efx->rx_scatter = efx->type->always_rx_scatter; 6255a6681e2SEdward Cree efx->rx_buffer_order = 0; 6265a6681e2SEdward Cree } else if (efx->type->can_rx_scatter) { 6275a6681e2SEdward Cree BUILD_BUG_ON(EF4_RX_USR_BUF_SIZE % L1_CACHE_BYTES); 6285a6681e2SEdward Cree BUILD_BUG_ON(sizeof(struct ef4_rx_page_state) + 6295a6681e2SEdward Cree 2 * ALIGN(NET_IP_ALIGN + EF4_RX_USR_BUF_SIZE, 6305a6681e2SEdward Cree EF4_RX_BUF_ALIGNMENT) > 6315a6681e2SEdward Cree PAGE_SIZE); 6325a6681e2SEdward Cree efx->rx_scatter = true; 6335a6681e2SEdward Cree efx->rx_dma_len = EF4_RX_USR_BUF_SIZE; 6345a6681e2SEdward Cree efx->rx_buffer_order = 0; 6355a6681e2SEdward Cree } else { 6365a6681e2SEdward Cree efx->rx_scatter = false; 6375a6681e2SEdward Cree efx->rx_buffer_order = get_order(rx_buf_len); 6385a6681e2SEdward Cree } 6395a6681e2SEdward Cree 6405a6681e2SEdward Cree ef4_rx_config_page_split(efx); 6415a6681e2SEdward Cree if (efx->rx_buffer_order) 6425a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, 6435a6681e2SEdward Cree "RX buf len=%u; page order=%u batch=%u\n", 6445a6681e2SEdward Cree efx->rx_dma_len, efx->rx_buffer_order, 6455a6681e2SEdward Cree efx->rx_pages_per_batch); 6465a6681e2SEdward Cree else 6475a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, 6485a6681e2SEdward Cree "RX buf len=%u step=%u bpp=%u; page batch=%u\n", 6495a6681e2SEdward Cree efx->rx_dma_len, efx->rx_page_buf_step, 6505a6681e2SEdward Cree efx->rx_bufs_per_page, efx->rx_pages_per_batch); 6515a6681e2SEdward Cree 6525a6681e2SEdward Cree /* Restore previously fixed features in hw_features and remove 6535a6681e2SEdward Cree * features which are fixed now 6545a6681e2SEdward Cree */ 6555a6681e2SEdward Cree efx->net_dev->hw_features |= efx->net_dev->features; 6565a6681e2SEdward Cree efx->net_dev->hw_features &= ~efx->fixed_features; 6575a6681e2SEdward Cree efx->net_dev->features |= efx->fixed_features; 6585a6681e2SEdward Cree if (efx->net_dev->features != old_features) 6595a6681e2SEdward Cree netdev_features_change(efx->net_dev); 6605a6681e2SEdward Cree 6615a6681e2SEdward Cree /* RX filters may also have scatter-enabled flags */ 6625a6681e2SEdward Cree if (efx->rx_scatter != old_rx_scatter) 6635a6681e2SEdward Cree efx->type->filter_update_rx_scatter(efx); 6645a6681e2SEdward Cree 6655a6681e2SEdward Cree /* We must keep at least one descriptor in a TX ring empty. 6665a6681e2SEdward Cree * We could avoid this when the queue size does not exactly 6675a6681e2SEdward Cree * match the hardware ring size, but it's not that important. 6685a6681e2SEdward Cree * Therefore we stop the queue when one more skb might fill 6695a6681e2SEdward Cree * the ring completely. We wake it when half way back to 6705a6681e2SEdward Cree * empty. 6715a6681e2SEdward Cree */ 6725a6681e2SEdward Cree efx->txq_stop_thresh = efx->txq_entries - ef4_tx_max_skb_descs(efx); 6735a6681e2SEdward Cree efx->txq_wake_thresh = efx->txq_stop_thresh / 2; 6745a6681e2SEdward Cree 6755a6681e2SEdward Cree /* Initialise the channels */ 6765a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 6775a6681e2SEdward Cree ef4_for_each_channel_tx_queue(tx_queue, channel) { 6785a6681e2SEdward Cree ef4_init_tx_queue(tx_queue); 6795a6681e2SEdward Cree atomic_inc(&efx->active_queues); 6805a6681e2SEdward Cree } 6815a6681e2SEdward Cree 6825a6681e2SEdward Cree ef4_for_each_channel_rx_queue(rx_queue, channel) { 6835a6681e2SEdward Cree ef4_init_rx_queue(rx_queue); 6845a6681e2SEdward Cree atomic_inc(&efx->active_queues); 6855a6681e2SEdward Cree ef4_stop_eventq(channel); 6865a6681e2SEdward Cree ef4_fast_push_rx_descriptors(rx_queue, false); 6875a6681e2SEdward Cree ef4_start_eventq(channel); 6885a6681e2SEdward Cree } 6895a6681e2SEdward Cree 6905a6681e2SEdward Cree WARN_ON(channel->rx_pkt_n_frags); 6915a6681e2SEdward Cree } 6925a6681e2SEdward Cree 6935a6681e2SEdward Cree if (netif_device_present(efx->net_dev)) 6945a6681e2SEdward Cree netif_tx_wake_all_queues(efx->net_dev); 6955a6681e2SEdward Cree } 6965a6681e2SEdward Cree 6975a6681e2SEdward Cree static void ef4_stop_datapath(struct ef4_nic *efx) 6985a6681e2SEdward Cree { 6995a6681e2SEdward Cree struct ef4_channel *channel; 7005a6681e2SEdward Cree struct ef4_tx_queue *tx_queue; 7015a6681e2SEdward Cree struct ef4_rx_queue *rx_queue; 7025a6681e2SEdward Cree int rc; 7035a6681e2SEdward Cree 7045a6681e2SEdward Cree EF4_ASSERT_RESET_SERIALISED(efx); 7055a6681e2SEdward Cree BUG_ON(efx->port_enabled); 7065a6681e2SEdward Cree 7075a6681e2SEdward Cree /* Stop RX refill */ 7085a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 7095a6681e2SEdward Cree ef4_for_each_channel_rx_queue(rx_queue, channel) 7105a6681e2SEdward Cree rx_queue->refill_enabled = false; 7115a6681e2SEdward Cree } 7125a6681e2SEdward Cree 7135a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 7145a6681e2SEdward Cree /* RX packet processing is pipelined, so wait for the 7155a6681e2SEdward Cree * NAPI handler to complete. At least event queue 0 7165a6681e2SEdward Cree * might be kept active by non-data events, so don't 7175a6681e2SEdward Cree * use napi_synchronize() but actually disable NAPI 7185a6681e2SEdward Cree * temporarily. 7195a6681e2SEdward Cree */ 7205a6681e2SEdward Cree if (ef4_channel_has_rx_queue(channel)) { 7215a6681e2SEdward Cree ef4_stop_eventq(channel); 7225a6681e2SEdward Cree ef4_start_eventq(channel); 7235a6681e2SEdward Cree } 7245a6681e2SEdward Cree } 7255a6681e2SEdward Cree 7265a6681e2SEdward Cree rc = efx->type->fini_dmaq(efx); 7275a6681e2SEdward Cree if (rc && EF4_WORKAROUND_7803(efx)) { 7285a6681e2SEdward Cree /* Schedule a reset to recover from the flush failure. The 7295a6681e2SEdward Cree * descriptor caches reference memory we're about to free, 7305a6681e2SEdward Cree * but falcon_reconfigure_mac_wrapper() won't reconnect 7315a6681e2SEdward Cree * the MACs because of the pending reset. 7325a6681e2SEdward Cree */ 7335a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, 7345a6681e2SEdward Cree "Resetting to recover from flush failure\n"); 7355a6681e2SEdward Cree ef4_schedule_reset(efx, RESET_TYPE_ALL); 7365a6681e2SEdward Cree } else if (rc) { 7375a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); 7385a6681e2SEdward Cree } else { 7395a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, 7405a6681e2SEdward Cree "successfully flushed all queues\n"); 7415a6681e2SEdward Cree } 7425a6681e2SEdward Cree 7435a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 7445a6681e2SEdward Cree ef4_for_each_channel_rx_queue(rx_queue, channel) 7455a6681e2SEdward Cree ef4_fini_rx_queue(rx_queue); 7465a6681e2SEdward Cree ef4_for_each_possible_channel_tx_queue(tx_queue, channel) 7475a6681e2SEdward Cree ef4_fini_tx_queue(tx_queue); 7485a6681e2SEdward Cree } 7495a6681e2SEdward Cree } 7505a6681e2SEdward Cree 7515a6681e2SEdward Cree static void ef4_remove_channel(struct ef4_channel *channel) 7525a6681e2SEdward Cree { 7535a6681e2SEdward Cree struct ef4_tx_queue *tx_queue; 7545a6681e2SEdward Cree struct ef4_rx_queue *rx_queue; 7555a6681e2SEdward Cree 7565a6681e2SEdward Cree netif_dbg(channel->efx, drv, channel->efx->net_dev, 7575a6681e2SEdward Cree "destroy chan %d\n", channel->channel); 7585a6681e2SEdward Cree 7595a6681e2SEdward Cree ef4_for_each_channel_rx_queue(rx_queue, channel) 7605a6681e2SEdward Cree ef4_remove_rx_queue(rx_queue); 7615a6681e2SEdward Cree ef4_for_each_possible_channel_tx_queue(tx_queue, channel) 7625a6681e2SEdward Cree ef4_remove_tx_queue(tx_queue); 7635a6681e2SEdward Cree ef4_remove_eventq(channel); 7645a6681e2SEdward Cree channel->type->post_remove(channel); 7655a6681e2SEdward Cree } 7665a6681e2SEdward Cree 7675a6681e2SEdward Cree static void ef4_remove_channels(struct ef4_nic *efx) 7685a6681e2SEdward Cree { 7695a6681e2SEdward Cree struct ef4_channel *channel; 7705a6681e2SEdward Cree 7715a6681e2SEdward Cree ef4_for_each_channel(channel, efx) 7725a6681e2SEdward Cree ef4_remove_channel(channel); 7735a6681e2SEdward Cree } 7745a6681e2SEdward Cree 7755a6681e2SEdward Cree int 7765a6681e2SEdward Cree ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries) 7775a6681e2SEdward Cree { 7785a6681e2SEdward Cree struct ef4_channel *other_channel[EF4_MAX_CHANNELS], *channel; 7795a6681e2SEdward Cree u32 old_rxq_entries, old_txq_entries; 7805a6681e2SEdward Cree unsigned i, next_buffer_table = 0; 7815a6681e2SEdward Cree int rc, rc2; 7825a6681e2SEdward Cree 7835a6681e2SEdward Cree rc = ef4_check_disabled(efx); 7845a6681e2SEdward Cree if (rc) 7855a6681e2SEdward Cree return rc; 7865a6681e2SEdward Cree 7875a6681e2SEdward Cree /* Not all channels should be reallocated. We must avoid 7885a6681e2SEdward Cree * reallocating their buffer table entries. 7895a6681e2SEdward Cree */ 7905a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 7915a6681e2SEdward Cree struct ef4_rx_queue *rx_queue; 7925a6681e2SEdward Cree struct ef4_tx_queue *tx_queue; 7935a6681e2SEdward Cree 7945a6681e2SEdward Cree if (channel->type->copy) 7955a6681e2SEdward Cree continue; 7965a6681e2SEdward Cree next_buffer_table = max(next_buffer_table, 7975a6681e2SEdward Cree channel->eventq.index + 7985a6681e2SEdward Cree channel->eventq.entries); 7995a6681e2SEdward Cree ef4_for_each_channel_rx_queue(rx_queue, channel) 8005a6681e2SEdward Cree next_buffer_table = max(next_buffer_table, 8015a6681e2SEdward Cree rx_queue->rxd.index + 8025a6681e2SEdward Cree rx_queue->rxd.entries); 8035a6681e2SEdward Cree ef4_for_each_channel_tx_queue(tx_queue, channel) 8045a6681e2SEdward Cree next_buffer_table = max(next_buffer_table, 8055a6681e2SEdward Cree tx_queue->txd.index + 8065a6681e2SEdward Cree tx_queue->txd.entries); 8075a6681e2SEdward Cree } 8085a6681e2SEdward Cree 8095a6681e2SEdward Cree ef4_device_detach_sync(efx); 8105a6681e2SEdward Cree ef4_stop_all(efx); 8115a6681e2SEdward Cree ef4_soft_disable_interrupts(efx); 8125a6681e2SEdward Cree 8135a6681e2SEdward Cree /* Clone channels (where possible) */ 8145a6681e2SEdward Cree memset(other_channel, 0, sizeof(other_channel)); 8155a6681e2SEdward Cree for (i = 0; i < efx->n_channels; i++) { 8165a6681e2SEdward Cree channel = efx->channel[i]; 8175a6681e2SEdward Cree if (channel->type->copy) 8185a6681e2SEdward Cree channel = channel->type->copy(channel); 8195a6681e2SEdward Cree if (!channel) { 8205a6681e2SEdward Cree rc = -ENOMEM; 8215a6681e2SEdward Cree goto out; 8225a6681e2SEdward Cree } 8235a6681e2SEdward Cree other_channel[i] = channel; 8245a6681e2SEdward Cree } 8255a6681e2SEdward Cree 8265a6681e2SEdward Cree /* Swap entry counts and channel pointers */ 8275a6681e2SEdward Cree old_rxq_entries = efx->rxq_entries; 8285a6681e2SEdward Cree old_txq_entries = efx->txq_entries; 8295a6681e2SEdward Cree efx->rxq_entries = rxq_entries; 8305a6681e2SEdward Cree efx->txq_entries = txq_entries; 8315a6681e2SEdward Cree for (i = 0; i < efx->n_channels; i++) { 8325a6681e2SEdward Cree channel = efx->channel[i]; 8335a6681e2SEdward Cree efx->channel[i] = other_channel[i]; 8345a6681e2SEdward Cree other_channel[i] = channel; 8355a6681e2SEdward Cree } 8365a6681e2SEdward Cree 8375a6681e2SEdward Cree /* Restart buffer table allocation */ 8385a6681e2SEdward Cree efx->next_buffer_table = next_buffer_table; 8395a6681e2SEdward Cree 8405a6681e2SEdward Cree for (i = 0; i < efx->n_channels; i++) { 8415a6681e2SEdward Cree channel = efx->channel[i]; 8425a6681e2SEdward Cree if (!channel->type->copy) 8435a6681e2SEdward Cree continue; 8445a6681e2SEdward Cree rc = ef4_probe_channel(channel); 8455a6681e2SEdward Cree if (rc) 8465a6681e2SEdward Cree goto rollback; 8475a6681e2SEdward Cree ef4_init_napi_channel(efx->channel[i]); 8485a6681e2SEdward Cree } 8495a6681e2SEdward Cree 8505a6681e2SEdward Cree out: 8515a6681e2SEdward Cree /* Destroy unused channel structures */ 8525a6681e2SEdward Cree for (i = 0; i < efx->n_channels; i++) { 8535a6681e2SEdward Cree channel = other_channel[i]; 8545a6681e2SEdward Cree if (channel && channel->type->copy) { 8555a6681e2SEdward Cree ef4_fini_napi_channel(channel); 8565a6681e2SEdward Cree ef4_remove_channel(channel); 8575a6681e2SEdward Cree kfree(channel); 8585a6681e2SEdward Cree } 8595a6681e2SEdward Cree } 8605a6681e2SEdward Cree 8615a6681e2SEdward Cree rc2 = ef4_soft_enable_interrupts(efx); 8625a6681e2SEdward Cree if (rc2) { 8635a6681e2SEdward Cree rc = rc ? rc : rc2; 8645a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, 8655a6681e2SEdward Cree "unable to restart interrupts on channel reallocation\n"); 8665a6681e2SEdward Cree ef4_schedule_reset(efx, RESET_TYPE_DISABLE); 8675a6681e2SEdward Cree } else { 8685a6681e2SEdward Cree ef4_start_all(efx); 8695a6681e2SEdward Cree netif_device_attach(efx->net_dev); 8705a6681e2SEdward Cree } 8715a6681e2SEdward Cree return rc; 8725a6681e2SEdward Cree 8735a6681e2SEdward Cree rollback: 8745a6681e2SEdward Cree /* Swap back */ 8755a6681e2SEdward Cree efx->rxq_entries = old_rxq_entries; 8765a6681e2SEdward Cree efx->txq_entries = old_txq_entries; 8775a6681e2SEdward Cree for (i = 0; i < efx->n_channels; i++) { 8785a6681e2SEdward Cree channel = efx->channel[i]; 8795a6681e2SEdward Cree efx->channel[i] = other_channel[i]; 8805a6681e2SEdward Cree other_channel[i] = channel; 8815a6681e2SEdward Cree } 8825a6681e2SEdward Cree goto out; 8835a6681e2SEdward Cree } 8845a6681e2SEdward Cree 8855a6681e2SEdward Cree void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue) 8865a6681e2SEdward Cree { 8875a6681e2SEdward Cree mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); 8885a6681e2SEdward Cree } 8895a6681e2SEdward Cree 8905a6681e2SEdward Cree static const struct ef4_channel_type ef4_default_channel_type = { 8915a6681e2SEdward Cree .pre_probe = ef4_channel_dummy_op_int, 8925a6681e2SEdward Cree .post_remove = ef4_channel_dummy_op_void, 8935a6681e2SEdward Cree .get_name = ef4_get_channel_name, 8945a6681e2SEdward Cree .copy = ef4_copy_channel, 8955a6681e2SEdward Cree .keep_eventq = false, 8965a6681e2SEdward Cree }; 8975a6681e2SEdward Cree 8985a6681e2SEdward Cree int ef4_channel_dummy_op_int(struct ef4_channel *channel) 8995a6681e2SEdward Cree { 9005a6681e2SEdward Cree return 0; 9015a6681e2SEdward Cree } 9025a6681e2SEdward Cree 9035a6681e2SEdward Cree void ef4_channel_dummy_op_void(struct ef4_channel *channel) 9045a6681e2SEdward Cree { 9055a6681e2SEdward Cree } 9065a6681e2SEdward Cree 9075a6681e2SEdward Cree /************************************************************************** 9085a6681e2SEdward Cree * 9095a6681e2SEdward Cree * Port handling 9105a6681e2SEdward Cree * 9115a6681e2SEdward Cree **************************************************************************/ 9125a6681e2SEdward Cree 9135a6681e2SEdward Cree /* This ensures that the kernel is kept informed (via 9145a6681e2SEdward Cree * netif_carrier_on/off) of the link status, and also maintains the 9155a6681e2SEdward Cree * link status's stop on the port's TX queue. 9165a6681e2SEdward Cree */ 9175a6681e2SEdward Cree void ef4_link_status_changed(struct ef4_nic *efx) 9185a6681e2SEdward Cree { 9195a6681e2SEdward Cree struct ef4_link_state *link_state = &efx->link_state; 9205a6681e2SEdward Cree 9215a6681e2SEdward Cree /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure 9225a6681e2SEdward Cree * that no events are triggered between unregister_netdev() and the 9235a6681e2SEdward Cree * driver unloading. A more general condition is that NETDEV_CHANGE 9245a6681e2SEdward Cree * can only be generated between NETDEV_UP and NETDEV_DOWN */ 9255a6681e2SEdward Cree if (!netif_running(efx->net_dev)) 9265a6681e2SEdward Cree return; 9275a6681e2SEdward Cree 9285a6681e2SEdward Cree if (link_state->up != netif_carrier_ok(efx->net_dev)) { 9295a6681e2SEdward Cree efx->n_link_state_changes++; 9305a6681e2SEdward Cree 9315a6681e2SEdward Cree if (link_state->up) 9325a6681e2SEdward Cree netif_carrier_on(efx->net_dev); 9335a6681e2SEdward Cree else 9345a6681e2SEdward Cree netif_carrier_off(efx->net_dev); 9355a6681e2SEdward Cree } 9365a6681e2SEdward Cree 9375a6681e2SEdward Cree /* Status message for kernel log */ 9385a6681e2SEdward Cree if (link_state->up) 9395a6681e2SEdward Cree netif_info(efx, link, efx->net_dev, 9405a6681e2SEdward Cree "link up at %uMbps %s-duplex (MTU %d)\n", 9415a6681e2SEdward Cree link_state->speed, link_state->fd ? "full" : "half", 9425a6681e2SEdward Cree efx->net_dev->mtu); 9435a6681e2SEdward Cree else 9445a6681e2SEdward Cree netif_info(efx, link, efx->net_dev, "link down\n"); 9455a6681e2SEdward Cree } 9465a6681e2SEdward Cree 9475a6681e2SEdward Cree void ef4_link_set_advertising(struct ef4_nic *efx, u32 advertising) 9485a6681e2SEdward Cree { 9495a6681e2SEdward Cree efx->link_advertising = advertising; 9505a6681e2SEdward Cree if (advertising) { 9515a6681e2SEdward Cree if (advertising & ADVERTISED_Pause) 9525a6681e2SEdward Cree efx->wanted_fc |= (EF4_FC_TX | EF4_FC_RX); 9535a6681e2SEdward Cree else 9545a6681e2SEdward Cree efx->wanted_fc &= ~(EF4_FC_TX | EF4_FC_RX); 9555a6681e2SEdward Cree if (advertising & ADVERTISED_Asym_Pause) 9565a6681e2SEdward Cree efx->wanted_fc ^= EF4_FC_TX; 9575a6681e2SEdward Cree } 9585a6681e2SEdward Cree } 9595a6681e2SEdward Cree 9605a6681e2SEdward Cree void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8 wanted_fc) 9615a6681e2SEdward Cree { 9625a6681e2SEdward Cree efx->wanted_fc = wanted_fc; 9635a6681e2SEdward Cree if (efx->link_advertising) { 9645a6681e2SEdward Cree if (wanted_fc & EF4_FC_RX) 9655a6681e2SEdward Cree efx->link_advertising |= (ADVERTISED_Pause | 9665a6681e2SEdward Cree ADVERTISED_Asym_Pause); 9675a6681e2SEdward Cree else 9685a6681e2SEdward Cree efx->link_advertising &= ~(ADVERTISED_Pause | 9695a6681e2SEdward Cree ADVERTISED_Asym_Pause); 9705a6681e2SEdward Cree if (wanted_fc & EF4_FC_TX) 9715a6681e2SEdward Cree efx->link_advertising ^= ADVERTISED_Asym_Pause; 9725a6681e2SEdward Cree } 9735a6681e2SEdward Cree } 9745a6681e2SEdward Cree 9755a6681e2SEdward Cree static void ef4_fini_port(struct ef4_nic *efx); 9765a6681e2SEdward Cree 9775a6681e2SEdward Cree /* We assume that efx->type->reconfigure_mac will always try to sync RX 9785a6681e2SEdward Cree * filters and therefore needs to read-lock the filter table against freeing 9795a6681e2SEdward Cree */ 9805a6681e2SEdward Cree void ef4_mac_reconfigure(struct ef4_nic *efx) 9815a6681e2SEdward Cree { 9825a6681e2SEdward Cree down_read(&efx->filter_sem); 9835a6681e2SEdward Cree efx->type->reconfigure_mac(efx); 9845a6681e2SEdward Cree up_read(&efx->filter_sem); 9855a6681e2SEdward Cree } 9865a6681e2SEdward Cree 9875a6681e2SEdward Cree /* Push loopback/power/transmit disable settings to the PHY, and reconfigure 9885a6681e2SEdward Cree * the MAC appropriately. All other PHY configuration changes are pushed 9895a6681e2SEdward Cree * through phy_op->set_settings(), and pushed asynchronously to the MAC 9905a6681e2SEdward Cree * through ef4_monitor(). 9915a6681e2SEdward Cree * 9925a6681e2SEdward Cree * Callers must hold the mac_lock 9935a6681e2SEdward Cree */ 9945a6681e2SEdward Cree int __ef4_reconfigure_port(struct ef4_nic *efx) 9955a6681e2SEdward Cree { 9965a6681e2SEdward Cree enum ef4_phy_mode phy_mode; 9975a6681e2SEdward Cree int rc; 9985a6681e2SEdward Cree 9995a6681e2SEdward Cree WARN_ON(!mutex_is_locked(&efx->mac_lock)); 10005a6681e2SEdward Cree 10015a6681e2SEdward Cree /* Disable PHY transmit in mac level loopbacks */ 10025a6681e2SEdward Cree phy_mode = efx->phy_mode; 10035a6681e2SEdward Cree if (LOOPBACK_INTERNAL(efx)) 10045a6681e2SEdward Cree efx->phy_mode |= PHY_MODE_TX_DISABLED; 10055a6681e2SEdward Cree else 10065a6681e2SEdward Cree efx->phy_mode &= ~PHY_MODE_TX_DISABLED; 10075a6681e2SEdward Cree 10085a6681e2SEdward Cree rc = efx->type->reconfigure_port(efx); 10095a6681e2SEdward Cree 10105a6681e2SEdward Cree if (rc) 10115a6681e2SEdward Cree efx->phy_mode = phy_mode; 10125a6681e2SEdward Cree 10135a6681e2SEdward Cree return rc; 10145a6681e2SEdward Cree } 10155a6681e2SEdward Cree 10165a6681e2SEdward Cree /* Reinitialise the MAC to pick up new PHY settings, even if the port is 10175a6681e2SEdward Cree * disabled. */ 10185a6681e2SEdward Cree int ef4_reconfigure_port(struct ef4_nic *efx) 10195a6681e2SEdward Cree { 10205a6681e2SEdward Cree int rc; 10215a6681e2SEdward Cree 10225a6681e2SEdward Cree EF4_ASSERT_RESET_SERIALISED(efx); 10235a6681e2SEdward Cree 10245a6681e2SEdward Cree mutex_lock(&efx->mac_lock); 10255a6681e2SEdward Cree rc = __ef4_reconfigure_port(efx); 10265a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 10275a6681e2SEdward Cree 10285a6681e2SEdward Cree return rc; 10295a6681e2SEdward Cree } 10305a6681e2SEdward Cree 10315a6681e2SEdward Cree /* Asynchronous work item for changing MAC promiscuity and multicast 10325a6681e2SEdward Cree * hash. Avoid a drain/rx_ingress enable by reconfiguring the current 10335a6681e2SEdward Cree * MAC directly. */ 10345a6681e2SEdward Cree static void ef4_mac_work(struct work_struct *data) 10355a6681e2SEdward Cree { 10365a6681e2SEdward Cree struct ef4_nic *efx = container_of(data, struct ef4_nic, mac_work); 10375a6681e2SEdward Cree 10385a6681e2SEdward Cree mutex_lock(&efx->mac_lock); 10395a6681e2SEdward Cree if (efx->port_enabled) 10405a6681e2SEdward Cree ef4_mac_reconfigure(efx); 10415a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 10425a6681e2SEdward Cree } 10435a6681e2SEdward Cree 10445a6681e2SEdward Cree static int ef4_probe_port(struct ef4_nic *efx) 10455a6681e2SEdward Cree { 10465a6681e2SEdward Cree int rc; 10475a6681e2SEdward Cree 10485a6681e2SEdward Cree netif_dbg(efx, probe, efx->net_dev, "create port\n"); 10495a6681e2SEdward Cree 10505a6681e2SEdward Cree if (phy_flash_cfg) 10515a6681e2SEdward Cree efx->phy_mode = PHY_MODE_SPECIAL; 10525a6681e2SEdward Cree 10535a6681e2SEdward Cree /* Connect up MAC/PHY operations table */ 10545a6681e2SEdward Cree rc = efx->type->probe_port(efx); 10555a6681e2SEdward Cree if (rc) 10565a6681e2SEdward Cree return rc; 10575a6681e2SEdward Cree 10585a6681e2SEdward Cree /* Initialise MAC address to permanent address */ 10595a6681e2SEdward Cree ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); 10605a6681e2SEdward Cree 10615a6681e2SEdward Cree return 0; 10625a6681e2SEdward Cree } 10635a6681e2SEdward Cree 10645a6681e2SEdward Cree static int ef4_init_port(struct ef4_nic *efx) 10655a6681e2SEdward Cree { 10665a6681e2SEdward Cree int rc; 10675a6681e2SEdward Cree 10685a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, "init port\n"); 10695a6681e2SEdward Cree 10705a6681e2SEdward Cree mutex_lock(&efx->mac_lock); 10715a6681e2SEdward Cree 10725a6681e2SEdward Cree rc = efx->phy_op->init(efx); 10735a6681e2SEdward Cree if (rc) 10745a6681e2SEdward Cree goto fail1; 10755a6681e2SEdward Cree 10765a6681e2SEdward Cree efx->port_initialized = true; 10775a6681e2SEdward Cree 10785a6681e2SEdward Cree /* Reconfigure the MAC before creating dma queues (required for 10795a6681e2SEdward Cree * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ 10805a6681e2SEdward Cree ef4_mac_reconfigure(efx); 10815a6681e2SEdward Cree 10825a6681e2SEdward Cree /* Ensure the PHY advertises the correct flow control settings */ 10835a6681e2SEdward Cree rc = efx->phy_op->reconfigure(efx); 10845a6681e2SEdward Cree if (rc && rc != -EPERM) 10855a6681e2SEdward Cree goto fail2; 10865a6681e2SEdward Cree 10875a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 10885a6681e2SEdward Cree return 0; 10895a6681e2SEdward Cree 10905a6681e2SEdward Cree fail2: 10915a6681e2SEdward Cree efx->phy_op->fini(efx); 10925a6681e2SEdward Cree fail1: 10935a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 10945a6681e2SEdward Cree return rc; 10955a6681e2SEdward Cree } 10965a6681e2SEdward Cree 10975a6681e2SEdward Cree static void ef4_start_port(struct ef4_nic *efx) 10985a6681e2SEdward Cree { 10995a6681e2SEdward Cree netif_dbg(efx, ifup, efx->net_dev, "start port\n"); 11005a6681e2SEdward Cree BUG_ON(efx->port_enabled); 11015a6681e2SEdward Cree 11025a6681e2SEdward Cree mutex_lock(&efx->mac_lock); 11035a6681e2SEdward Cree efx->port_enabled = true; 11045a6681e2SEdward Cree 11055a6681e2SEdward Cree /* Ensure MAC ingress/egress is enabled */ 11065a6681e2SEdward Cree ef4_mac_reconfigure(efx); 11075a6681e2SEdward Cree 11085a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 11095a6681e2SEdward Cree } 11105a6681e2SEdward Cree 11115a6681e2SEdward Cree /* Cancel work for MAC reconfiguration, periodic hardware monitoring 11125a6681e2SEdward Cree * and the async self-test, wait for them to finish and prevent them 11135a6681e2SEdward Cree * being scheduled again. This doesn't cover online resets, which 11145a6681e2SEdward Cree * should only be cancelled when removing the device. 11155a6681e2SEdward Cree */ 11165a6681e2SEdward Cree static void ef4_stop_port(struct ef4_nic *efx) 11175a6681e2SEdward Cree { 11185a6681e2SEdward Cree netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); 11195a6681e2SEdward Cree 11205a6681e2SEdward Cree EF4_ASSERT_RESET_SERIALISED(efx); 11215a6681e2SEdward Cree 11225a6681e2SEdward Cree mutex_lock(&efx->mac_lock); 11235a6681e2SEdward Cree efx->port_enabled = false; 11245a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 11255a6681e2SEdward Cree 11265a6681e2SEdward Cree /* Serialise against ef4_set_multicast_list() */ 11275a6681e2SEdward Cree netif_addr_lock_bh(efx->net_dev); 11285a6681e2SEdward Cree netif_addr_unlock_bh(efx->net_dev); 11295a6681e2SEdward Cree 11305a6681e2SEdward Cree cancel_delayed_work_sync(&efx->monitor_work); 11315a6681e2SEdward Cree ef4_selftest_async_cancel(efx); 11325a6681e2SEdward Cree cancel_work_sync(&efx->mac_work); 11335a6681e2SEdward Cree } 11345a6681e2SEdward Cree 11355a6681e2SEdward Cree static void ef4_fini_port(struct ef4_nic *efx) 11365a6681e2SEdward Cree { 11375a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); 11385a6681e2SEdward Cree 11395a6681e2SEdward Cree if (!efx->port_initialized) 11405a6681e2SEdward Cree return; 11415a6681e2SEdward Cree 11425a6681e2SEdward Cree efx->phy_op->fini(efx); 11435a6681e2SEdward Cree efx->port_initialized = false; 11445a6681e2SEdward Cree 11455a6681e2SEdward Cree efx->link_state.up = false; 11465a6681e2SEdward Cree ef4_link_status_changed(efx); 11475a6681e2SEdward Cree } 11485a6681e2SEdward Cree 11495a6681e2SEdward Cree static void ef4_remove_port(struct ef4_nic *efx) 11505a6681e2SEdward Cree { 11515a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); 11525a6681e2SEdward Cree 11535a6681e2SEdward Cree efx->type->remove_port(efx); 11545a6681e2SEdward Cree } 11555a6681e2SEdward Cree 11565a6681e2SEdward Cree /************************************************************************** 11575a6681e2SEdward Cree * 11585a6681e2SEdward Cree * NIC handling 11595a6681e2SEdward Cree * 11605a6681e2SEdward Cree **************************************************************************/ 11615a6681e2SEdward Cree 11625a6681e2SEdward Cree static LIST_HEAD(ef4_primary_list); 11635a6681e2SEdward Cree static LIST_HEAD(ef4_unassociated_list); 11645a6681e2SEdward Cree 11655a6681e2SEdward Cree static bool ef4_same_controller(struct ef4_nic *left, struct ef4_nic *right) 11665a6681e2SEdward Cree { 11675a6681e2SEdward Cree return left->type == right->type && 11685a6681e2SEdward Cree left->vpd_sn && right->vpd_sn && 11695a6681e2SEdward Cree !strcmp(left->vpd_sn, right->vpd_sn); 11705a6681e2SEdward Cree } 11715a6681e2SEdward Cree 11725a6681e2SEdward Cree static void ef4_associate(struct ef4_nic *efx) 11735a6681e2SEdward Cree { 11745a6681e2SEdward Cree struct ef4_nic *other, *next; 11755a6681e2SEdward Cree 11765a6681e2SEdward Cree if (efx->primary == efx) { 11775a6681e2SEdward Cree /* Adding primary function; look for secondaries */ 11785a6681e2SEdward Cree 11795a6681e2SEdward Cree netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); 11805a6681e2SEdward Cree list_add_tail(&efx->node, &ef4_primary_list); 11815a6681e2SEdward Cree 11825a6681e2SEdward Cree list_for_each_entry_safe(other, next, &ef4_unassociated_list, 11835a6681e2SEdward Cree node) { 11845a6681e2SEdward Cree if (ef4_same_controller(efx, other)) { 11855a6681e2SEdward Cree list_del(&other->node); 11865a6681e2SEdward Cree netif_dbg(other, probe, other->net_dev, 11875a6681e2SEdward Cree "moving to secondary list of %s %s\n", 11885a6681e2SEdward Cree pci_name(efx->pci_dev), 11895a6681e2SEdward Cree efx->net_dev->name); 11905a6681e2SEdward Cree list_add_tail(&other->node, 11915a6681e2SEdward Cree &efx->secondary_list); 11925a6681e2SEdward Cree other->primary = efx; 11935a6681e2SEdward Cree } 11945a6681e2SEdward Cree } 11955a6681e2SEdward Cree } else { 11965a6681e2SEdward Cree /* Adding secondary function; look for primary */ 11975a6681e2SEdward Cree 11985a6681e2SEdward Cree list_for_each_entry(other, &ef4_primary_list, node) { 11995a6681e2SEdward Cree if (ef4_same_controller(efx, other)) { 12005a6681e2SEdward Cree netif_dbg(efx, probe, efx->net_dev, 12015a6681e2SEdward Cree "adding to secondary list of %s %s\n", 12025a6681e2SEdward Cree pci_name(other->pci_dev), 12035a6681e2SEdward Cree other->net_dev->name); 12045a6681e2SEdward Cree list_add_tail(&efx->node, 12055a6681e2SEdward Cree &other->secondary_list); 12065a6681e2SEdward Cree efx->primary = other; 12075a6681e2SEdward Cree return; 12085a6681e2SEdward Cree } 12095a6681e2SEdward Cree } 12105a6681e2SEdward Cree 12115a6681e2SEdward Cree netif_dbg(efx, probe, efx->net_dev, 12125a6681e2SEdward Cree "adding to unassociated list\n"); 12135a6681e2SEdward Cree list_add_tail(&efx->node, &ef4_unassociated_list); 12145a6681e2SEdward Cree } 12155a6681e2SEdward Cree } 12165a6681e2SEdward Cree 12175a6681e2SEdward Cree static void ef4_dissociate(struct ef4_nic *efx) 12185a6681e2SEdward Cree { 12195a6681e2SEdward Cree struct ef4_nic *other, *next; 12205a6681e2SEdward Cree 12215a6681e2SEdward Cree list_del(&efx->node); 12225a6681e2SEdward Cree efx->primary = NULL; 12235a6681e2SEdward Cree 12245a6681e2SEdward Cree list_for_each_entry_safe(other, next, &efx->secondary_list, node) { 12255a6681e2SEdward Cree list_del(&other->node); 12265a6681e2SEdward Cree netif_dbg(other, probe, other->net_dev, 12275a6681e2SEdward Cree "moving to unassociated list\n"); 12285a6681e2SEdward Cree list_add_tail(&other->node, &ef4_unassociated_list); 12295a6681e2SEdward Cree other->primary = NULL; 12305a6681e2SEdward Cree } 12315a6681e2SEdward Cree } 12325a6681e2SEdward Cree 12335a6681e2SEdward Cree /* This configures the PCI device to enable I/O and DMA. */ 12345a6681e2SEdward Cree static int ef4_init_io(struct ef4_nic *efx) 12355a6681e2SEdward Cree { 12365a6681e2SEdward Cree struct pci_dev *pci_dev = efx->pci_dev; 12375a6681e2SEdward Cree dma_addr_t dma_mask = efx->type->max_dma_mask; 12385a6681e2SEdward Cree unsigned int mem_map_size = efx->type->mem_map_size(efx); 12395a6681e2SEdward Cree int rc, bar; 12405a6681e2SEdward Cree 12415a6681e2SEdward Cree netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 12425a6681e2SEdward Cree 12435a6681e2SEdward Cree bar = efx->type->mem_bar; 12445a6681e2SEdward Cree 12455a6681e2SEdward Cree rc = pci_enable_device(pci_dev); 12465a6681e2SEdward Cree if (rc) { 12475a6681e2SEdward Cree netif_err(efx, probe, efx->net_dev, 12485a6681e2SEdward Cree "failed to enable PCI device\n"); 12495a6681e2SEdward Cree goto fail1; 12505a6681e2SEdward Cree } 12515a6681e2SEdward Cree 12525a6681e2SEdward Cree pci_set_master(pci_dev); 12535a6681e2SEdward Cree 12545a6681e2SEdward Cree /* Set the PCI DMA mask. Try all possibilities from our 12555a6681e2SEdward Cree * genuine mask down to 32 bits, because some architectures 12565a6681e2SEdward Cree * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit 12575a6681e2SEdward Cree * masks event though they reject 46 bit masks. 12585a6681e2SEdward Cree */ 12595a6681e2SEdward Cree while (dma_mask > 0x7fffffffUL) { 12605a6681e2SEdward Cree rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); 12615a6681e2SEdward Cree if (rc == 0) 12625a6681e2SEdward Cree break; 12635a6681e2SEdward Cree dma_mask >>= 1; 12645a6681e2SEdward Cree } 12655a6681e2SEdward Cree if (rc) { 12665a6681e2SEdward Cree netif_err(efx, probe, efx->net_dev, 12675a6681e2SEdward Cree "could not find a suitable DMA mask\n"); 12685a6681e2SEdward Cree goto fail2; 12695a6681e2SEdward Cree } 12705a6681e2SEdward Cree netif_dbg(efx, probe, efx->net_dev, 12715a6681e2SEdward Cree "using DMA mask %llx\n", (unsigned long long) dma_mask); 12725a6681e2SEdward Cree 12735a6681e2SEdward Cree efx->membase_phys = pci_resource_start(efx->pci_dev, bar); 12745a6681e2SEdward Cree rc = pci_request_region(pci_dev, bar, "sfc"); 12755a6681e2SEdward Cree if (rc) { 12765a6681e2SEdward Cree netif_err(efx, probe, efx->net_dev, 12775a6681e2SEdward Cree "request for memory BAR failed\n"); 12785a6681e2SEdward Cree rc = -EIO; 12795a6681e2SEdward Cree goto fail3; 12805a6681e2SEdward Cree } 12815a6681e2SEdward Cree efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); 12825a6681e2SEdward Cree if (!efx->membase) { 12835a6681e2SEdward Cree netif_err(efx, probe, efx->net_dev, 12845a6681e2SEdward Cree "could not map memory BAR at %llx+%x\n", 12855a6681e2SEdward Cree (unsigned long long)efx->membase_phys, mem_map_size); 12865a6681e2SEdward Cree rc = -ENOMEM; 12875a6681e2SEdward Cree goto fail4; 12885a6681e2SEdward Cree } 12895a6681e2SEdward Cree netif_dbg(efx, probe, efx->net_dev, 12905a6681e2SEdward Cree "memory BAR at %llx+%x (virtual %p)\n", 12915a6681e2SEdward Cree (unsigned long long)efx->membase_phys, mem_map_size, 12925a6681e2SEdward Cree efx->membase); 12935a6681e2SEdward Cree 12945a6681e2SEdward Cree return 0; 12955a6681e2SEdward Cree 12965a6681e2SEdward Cree fail4: 12975a6681e2SEdward Cree pci_release_region(efx->pci_dev, bar); 12985a6681e2SEdward Cree fail3: 12995a6681e2SEdward Cree efx->membase_phys = 0; 13005a6681e2SEdward Cree fail2: 13015a6681e2SEdward Cree pci_disable_device(efx->pci_dev); 13025a6681e2SEdward Cree fail1: 13035a6681e2SEdward Cree return rc; 13045a6681e2SEdward Cree } 13055a6681e2SEdward Cree 13065a6681e2SEdward Cree static void ef4_fini_io(struct ef4_nic *efx) 13075a6681e2SEdward Cree { 13085a6681e2SEdward Cree int bar; 13095a6681e2SEdward Cree 13105a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); 13115a6681e2SEdward Cree 13125a6681e2SEdward Cree if (efx->membase) { 13135a6681e2SEdward Cree iounmap(efx->membase); 13145a6681e2SEdward Cree efx->membase = NULL; 13155a6681e2SEdward Cree } 13165a6681e2SEdward Cree 13175a6681e2SEdward Cree if (efx->membase_phys) { 13185a6681e2SEdward Cree bar = efx->type->mem_bar; 13195a6681e2SEdward Cree pci_release_region(efx->pci_dev, bar); 13205a6681e2SEdward Cree efx->membase_phys = 0; 13215a6681e2SEdward Cree } 13225a6681e2SEdward Cree 13235a6681e2SEdward Cree /* Don't disable bus-mastering if VFs are assigned */ 13245a6681e2SEdward Cree if (!pci_vfs_assigned(efx->pci_dev)) 13255a6681e2SEdward Cree pci_disable_device(efx->pci_dev); 13265a6681e2SEdward Cree } 13275a6681e2SEdward Cree 13285a6681e2SEdward Cree void ef4_set_default_rx_indir_table(struct ef4_nic *efx) 13295a6681e2SEdward Cree { 13305a6681e2SEdward Cree size_t i; 13315a6681e2SEdward Cree 13325a6681e2SEdward Cree for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) 13335a6681e2SEdward Cree efx->rx_indir_table[i] = 13345a6681e2SEdward Cree ethtool_rxfh_indir_default(i, efx->rss_spread); 13355a6681e2SEdward Cree } 13365a6681e2SEdward Cree 13375a6681e2SEdward Cree static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx) 13385a6681e2SEdward Cree { 13395a6681e2SEdward Cree cpumask_var_t thread_mask; 13405a6681e2SEdward Cree unsigned int count; 13415a6681e2SEdward Cree int cpu; 13425a6681e2SEdward Cree 13435a6681e2SEdward Cree if (rss_cpus) { 13445a6681e2SEdward Cree count = rss_cpus; 13455a6681e2SEdward Cree } else { 13465a6681e2SEdward Cree if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) { 13475a6681e2SEdward Cree netif_warn(efx, probe, efx->net_dev, 13485a6681e2SEdward Cree "RSS disabled due to allocation failure\n"); 13495a6681e2SEdward Cree return 1; 13505a6681e2SEdward Cree } 13515a6681e2SEdward Cree 13525a6681e2SEdward Cree count = 0; 13535a6681e2SEdward Cree for_each_online_cpu(cpu) { 13545a6681e2SEdward Cree if (!cpumask_test_cpu(cpu, thread_mask)) { 13555a6681e2SEdward Cree ++count; 13565a6681e2SEdward Cree cpumask_or(thread_mask, thread_mask, 13575a6681e2SEdward Cree topology_sibling_cpumask(cpu)); 13585a6681e2SEdward Cree } 13595a6681e2SEdward Cree } 13605a6681e2SEdward Cree 13615a6681e2SEdward Cree free_cpumask_var(thread_mask); 13625a6681e2SEdward Cree } 13635a6681e2SEdward Cree 13645a6681e2SEdward Cree return count; 13655a6681e2SEdward Cree } 13665a6681e2SEdward Cree 13675a6681e2SEdward Cree /* Probe the number and type of interrupts we are able to obtain, and 13685a6681e2SEdward Cree * the resulting numbers of channels and RX queues. 13695a6681e2SEdward Cree */ 13705a6681e2SEdward Cree static int ef4_probe_interrupts(struct ef4_nic *efx) 13715a6681e2SEdward Cree { 13725a6681e2SEdward Cree unsigned int extra_channels = 0; 13735a6681e2SEdward Cree unsigned int i, j; 13745a6681e2SEdward Cree int rc; 13755a6681e2SEdward Cree 13765a6681e2SEdward Cree for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++) 13775a6681e2SEdward Cree if (efx->extra_channel_type[i]) 13785a6681e2SEdward Cree ++extra_channels; 13795a6681e2SEdward Cree 13805a6681e2SEdward Cree if (efx->interrupt_mode == EF4_INT_MODE_MSIX) { 13815a6681e2SEdward Cree struct msix_entry xentries[EF4_MAX_CHANNELS]; 13825a6681e2SEdward Cree unsigned int n_channels; 13835a6681e2SEdward Cree 13845a6681e2SEdward Cree n_channels = ef4_wanted_parallelism(efx); 13855a6681e2SEdward Cree if (ef4_separate_tx_channels) 13865a6681e2SEdward Cree n_channels *= 2; 13875a6681e2SEdward Cree n_channels += extra_channels; 13885a6681e2SEdward Cree n_channels = min(n_channels, efx->max_channels); 13895a6681e2SEdward Cree 13905a6681e2SEdward Cree for (i = 0; i < n_channels; i++) 13915a6681e2SEdward Cree xentries[i].entry = i; 13925a6681e2SEdward Cree rc = pci_enable_msix_range(efx->pci_dev, 13935a6681e2SEdward Cree xentries, 1, n_channels); 13945a6681e2SEdward Cree if (rc < 0) { 13955a6681e2SEdward Cree /* Fall back to single channel MSI */ 13965a6681e2SEdward Cree efx->interrupt_mode = EF4_INT_MODE_MSI; 13975a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, 13985a6681e2SEdward Cree "could not enable MSI-X\n"); 13995a6681e2SEdward Cree } else if (rc < n_channels) { 14005a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, 14015a6681e2SEdward Cree "WARNING: Insufficient MSI-X vectors" 14025a6681e2SEdward Cree " available (%d < %u).\n", rc, n_channels); 14035a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, 14045a6681e2SEdward Cree "WARNING: Performance may be reduced.\n"); 14055a6681e2SEdward Cree n_channels = rc; 14065a6681e2SEdward Cree } 14075a6681e2SEdward Cree 14085a6681e2SEdward Cree if (rc > 0) { 14095a6681e2SEdward Cree efx->n_channels = n_channels; 14105a6681e2SEdward Cree if (n_channels > extra_channels) 14115a6681e2SEdward Cree n_channels -= extra_channels; 14125a6681e2SEdward Cree if (ef4_separate_tx_channels) { 14135a6681e2SEdward Cree efx->n_tx_channels = min(max(n_channels / 2, 14145a6681e2SEdward Cree 1U), 14155a6681e2SEdward Cree efx->max_tx_channels); 14165a6681e2SEdward Cree efx->n_rx_channels = max(n_channels - 14175a6681e2SEdward Cree efx->n_tx_channels, 14185a6681e2SEdward Cree 1U); 14195a6681e2SEdward Cree } else { 14205a6681e2SEdward Cree efx->n_tx_channels = min(n_channels, 14215a6681e2SEdward Cree efx->max_tx_channels); 14225a6681e2SEdward Cree efx->n_rx_channels = n_channels; 14235a6681e2SEdward Cree } 14245a6681e2SEdward Cree for (i = 0; i < efx->n_channels; i++) 14255a6681e2SEdward Cree ef4_get_channel(efx, i)->irq = 14265a6681e2SEdward Cree xentries[i].vector; 14275a6681e2SEdward Cree } 14285a6681e2SEdward Cree } 14295a6681e2SEdward Cree 14305a6681e2SEdward Cree /* Try single interrupt MSI */ 14315a6681e2SEdward Cree if (efx->interrupt_mode == EF4_INT_MODE_MSI) { 14325a6681e2SEdward Cree efx->n_channels = 1; 14335a6681e2SEdward Cree efx->n_rx_channels = 1; 14345a6681e2SEdward Cree efx->n_tx_channels = 1; 14355a6681e2SEdward Cree rc = pci_enable_msi(efx->pci_dev); 14365a6681e2SEdward Cree if (rc == 0) { 14375a6681e2SEdward Cree ef4_get_channel(efx, 0)->irq = efx->pci_dev->irq; 14385a6681e2SEdward Cree } else { 14395a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, 14405a6681e2SEdward Cree "could not enable MSI\n"); 14415a6681e2SEdward Cree efx->interrupt_mode = EF4_INT_MODE_LEGACY; 14425a6681e2SEdward Cree } 14435a6681e2SEdward Cree } 14445a6681e2SEdward Cree 14455a6681e2SEdward Cree /* Assume legacy interrupts */ 14465a6681e2SEdward Cree if (efx->interrupt_mode == EF4_INT_MODE_LEGACY) { 14475a6681e2SEdward Cree efx->n_channels = 1 + (ef4_separate_tx_channels ? 1 : 0); 14485a6681e2SEdward Cree efx->n_rx_channels = 1; 14495a6681e2SEdward Cree efx->n_tx_channels = 1; 14505a6681e2SEdward Cree efx->legacy_irq = efx->pci_dev->irq; 14515a6681e2SEdward Cree } 14525a6681e2SEdward Cree 14535a6681e2SEdward Cree /* Assign extra channels if possible */ 14545a6681e2SEdward Cree j = efx->n_channels; 14555a6681e2SEdward Cree for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++) { 14565a6681e2SEdward Cree if (!efx->extra_channel_type[i]) 14575a6681e2SEdward Cree continue; 14585a6681e2SEdward Cree if (efx->interrupt_mode != EF4_INT_MODE_MSIX || 14595a6681e2SEdward Cree efx->n_channels <= extra_channels) { 14605a6681e2SEdward Cree efx->extra_channel_type[i]->handle_no_channel(efx); 14615a6681e2SEdward Cree } else { 14625a6681e2SEdward Cree --j; 14635a6681e2SEdward Cree ef4_get_channel(efx, j)->type = 14645a6681e2SEdward Cree efx->extra_channel_type[i]; 14655a6681e2SEdward Cree } 14665a6681e2SEdward Cree } 14675a6681e2SEdward Cree 14685a6681e2SEdward Cree efx->rss_spread = efx->n_rx_channels; 14695a6681e2SEdward Cree 14705a6681e2SEdward Cree return 0; 14715a6681e2SEdward Cree } 14725a6681e2SEdward Cree 14735a6681e2SEdward Cree static int ef4_soft_enable_interrupts(struct ef4_nic *efx) 14745a6681e2SEdward Cree { 14755a6681e2SEdward Cree struct ef4_channel *channel, *end_channel; 14765a6681e2SEdward Cree int rc; 14775a6681e2SEdward Cree 14785a6681e2SEdward Cree BUG_ON(efx->state == STATE_DISABLED); 14795a6681e2SEdward Cree 14805a6681e2SEdward Cree efx->irq_soft_enabled = true; 14815a6681e2SEdward Cree smp_wmb(); 14825a6681e2SEdward Cree 14835a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 14845a6681e2SEdward Cree if (!channel->type->keep_eventq) { 14855a6681e2SEdward Cree rc = ef4_init_eventq(channel); 14865a6681e2SEdward Cree if (rc) 14875a6681e2SEdward Cree goto fail; 14885a6681e2SEdward Cree } 14895a6681e2SEdward Cree ef4_start_eventq(channel); 14905a6681e2SEdward Cree } 14915a6681e2SEdward Cree 14925a6681e2SEdward Cree return 0; 14935a6681e2SEdward Cree fail: 14945a6681e2SEdward Cree end_channel = channel; 14955a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 14965a6681e2SEdward Cree if (channel == end_channel) 14975a6681e2SEdward Cree break; 14985a6681e2SEdward Cree ef4_stop_eventq(channel); 14995a6681e2SEdward Cree if (!channel->type->keep_eventq) 15005a6681e2SEdward Cree ef4_fini_eventq(channel); 15015a6681e2SEdward Cree } 15025a6681e2SEdward Cree 15035a6681e2SEdward Cree return rc; 15045a6681e2SEdward Cree } 15055a6681e2SEdward Cree 15065a6681e2SEdward Cree static void ef4_soft_disable_interrupts(struct ef4_nic *efx) 15075a6681e2SEdward Cree { 15085a6681e2SEdward Cree struct ef4_channel *channel; 15095a6681e2SEdward Cree 15105a6681e2SEdward Cree if (efx->state == STATE_DISABLED) 15115a6681e2SEdward Cree return; 15125a6681e2SEdward Cree 15135a6681e2SEdward Cree efx->irq_soft_enabled = false; 15145a6681e2SEdward Cree smp_wmb(); 15155a6681e2SEdward Cree 15165a6681e2SEdward Cree if (efx->legacy_irq) 15175a6681e2SEdward Cree synchronize_irq(efx->legacy_irq); 15185a6681e2SEdward Cree 15195a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 15205a6681e2SEdward Cree if (channel->irq) 15215a6681e2SEdward Cree synchronize_irq(channel->irq); 15225a6681e2SEdward Cree 15235a6681e2SEdward Cree ef4_stop_eventq(channel); 15245a6681e2SEdward Cree if (!channel->type->keep_eventq) 15255a6681e2SEdward Cree ef4_fini_eventq(channel); 15265a6681e2SEdward Cree } 15275a6681e2SEdward Cree } 15285a6681e2SEdward Cree 15295a6681e2SEdward Cree static int ef4_enable_interrupts(struct ef4_nic *efx) 15305a6681e2SEdward Cree { 15315a6681e2SEdward Cree struct ef4_channel *channel, *end_channel; 15325a6681e2SEdward Cree int rc; 15335a6681e2SEdward Cree 15345a6681e2SEdward Cree BUG_ON(efx->state == STATE_DISABLED); 15355a6681e2SEdward Cree 15365a6681e2SEdward Cree if (efx->eeh_disabled_legacy_irq) { 15375a6681e2SEdward Cree enable_irq(efx->legacy_irq); 15385a6681e2SEdward Cree efx->eeh_disabled_legacy_irq = false; 15395a6681e2SEdward Cree } 15405a6681e2SEdward Cree 15415a6681e2SEdward Cree efx->type->irq_enable_master(efx); 15425a6681e2SEdward Cree 15435a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 15445a6681e2SEdward Cree if (channel->type->keep_eventq) { 15455a6681e2SEdward Cree rc = ef4_init_eventq(channel); 15465a6681e2SEdward Cree if (rc) 15475a6681e2SEdward Cree goto fail; 15485a6681e2SEdward Cree } 15495a6681e2SEdward Cree } 15505a6681e2SEdward Cree 15515a6681e2SEdward Cree rc = ef4_soft_enable_interrupts(efx); 15525a6681e2SEdward Cree if (rc) 15535a6681e2SEdward Cree goto fail; 15545a6681e2SEdward Cree 15555a6681e2SEdward Cree return 0; 15565a6681e2SEdward Cree 15575a6681e2SEdward Cree fail: 15585a6681e2SEdward Cree end_channel = channel; 15595a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 15605a6681e2SEdward Cree if (channel == end_channel) 15615a6681e2SEdward Cree break; 15625a6681e2SEdward Cree if (channel->type->keep_eventq) 15635a6681e2SEdward Cree ef4_fini_eventq(channel); 15645a6681e2SEdward Cree } 15655a6681e2SEdward Cree 15665a6681e2SEdward Cree efx->type->irq_disable_non_ev(efx); 15675a6681e2SEdward Cree 15685a6681e2SEdward Cree return rc; 15695a6681e2SEdward Cree } 15705a6681e2SEdward Cree 15715a6681e2SEdward Cree static void ef4_disable_interrupts(struct ef4_nic *efx) 15725a6681e2SEdward Cree { 15735a6681e2SEdward Cree struct ef4_channel *channel; 15745a6681e2SEdward Cree 15755a6681e2SEdward Cree ef4_soft_disable_interrupts(efx); 15765a6681e2SEdward Cree 15775a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 15785a6681e2SEdward Cree if (channel->type->keep_eventq) 15795a6681e2SEdward Cree ef4_fini_eventq(channel); 15805a6681e2SEdward Cree } 15815a6681e2SEdward Cree 15825a6681e2SEdward Cree efx->type->irq_disable_non_ev(efx); 15835a6681e2SEdward Cree } 15845a6681e2SEdward Cree 15855a6681e2SEdward Cree static void ef4_remove_interrupts(struct ef4_nic *efx) 15865a6681e2SEdward Cree { 15875a6681e2SEdward Cree struct ef4_channel *channel; 15885a6681e2SEdward Cree 15895a6681e2SEdward Cree /* Remove MSI/MSI-X interrupts */ 15905a6681e2SEdward Cree ef4_for_each_channel(channel, efx) 15915a6681e2SEdward Cree channel->irq = 0; 15925a6681e2SEdward Cree pci_disable_msi(efx->pci_dev); 15935a6681e2SEdward Cree pci_disable_msix(efx->pci_dev); 15945a6681e2SEdward Cree 15955a6681e2SEdward Cree /* Remove legacy interrupt */ 15965a6681e2SEdward Cree efx->legacy_irq = 0; 15975a6681e2SEdward Cree } 15985a6681e2SEdward Cree 15995a6681e2SEdward Cree static void ef4_set_channels(struct ef4_nic *efx) 16005a6681e2SEdward Cree { 16015a6681e2SEdward Cree struct ef4_channel *channel; 16025a6681e2SEdward Cree struct ef4_tx_queue *tx_queue; 16035a6681e2SEdward Cree 16045a6681e2SEdward Cree efx->tx_channel_offset = 16055a6681e2SEdward Cree ef4_separate_tx_channels ? 16065a6681e2SEdward Cree efx->n_channels - efx->n_tx_channels : 0; 16075a6681e2SEdward Cree 16085a6681e2SEdward Cree /* We need to mark which channels really have RX and TX 16095a6681e2SEdward Cree * queues, and adjust the TX queue numbers if we have separate 16105a6681e2SEdward Cree * RX-only and TX-only channels. 16115a6681e2SEdward Cree */ 16125a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 16135a6681e2SEdward Cree if (channel->channel < efx->n_rx_channels) 16145a6681e2SEdward Cree channel->rx_queue.core_index = channel->channel; 16155a6681e2SEdward Cree else 16165a6681e2SEdward Cree channel->rx_queue.core_index = -1; 16175a6681e2SEdward Cree 16185a6681e2SEdward Cree ef4_for_each_channel_tx_queue(tx_queue, channel) 16195a6681e2SEdward Cree tx_queue->queue -= (efx->tx_channel_offset * 16205a6681e2SEdward Cree EF4_TXQ_TYPES); 16215a6681e2SEdward Cree } 16225a6681e2SEdward Cree } 16235a6681e2SEdward Cree 16245a6681e2SEdward Cree static int ef4_probe_nic(struct ef4_nic *efx) 16255a6681e2SEdward Cree { 16265a6681e2SEdward Cree int rc; 16275a6681e2SEdward Cree 16285a6681e2SEdward Cree netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); 16295a6681e2SEdward Cree 16305a6681e2SEdward Cree /* Carry out hardware-type specific initialisation */ 16315a6681e2SEdward Cree rc = efx->type->probe(efx); 16325a6681e2SEdward Cree if (rc) 16335a6681e2SEdward Cree return rc; 16345a6681e2SEdward Cree 16355a6681e2SEdward Cree do { 16365a6681e2SEdward Cree if (!efx->max_channels || !efx->max_tx_channels) { 16375a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, 16385a6681e2SEdward Cree "Insufficient resources to allocate" 16395a6681e2SEdward Cree " any channels\n"); 16405a6681e2SEdward Cree rc = -ENOSPC; 16415a6681e2SEdward Cree goto fail1; 16425a6681e2SEdward Cree } 16435a6681e2SEdward Cree 16445a6681e2SEdward Cree /* Determine the number of channels and queues by trying 16455a6681e2SEdward Cree * to hook in MSI-X interrupts. 16465a6681e2SEdward Cree */ 16475a6681e2SEdward Cree rc = ef4_probe_interrupts(efx); 16485a6681e2SEdward Cree if (rc) 16495a6681e2SEdward Cree goto fail1; 16505a6681e2SEdward Cree 16515a6681e2SEdward Cree ef4_set_channels(efx); 16525a6681e2SEdward Cree 16535a6681e2SEdward Cree /* dimension_resources can fail with EAGAIN */ 16545a6681e2SEdward Cree rc = efx->type->dimension_resources(efx); 16555a6681e2SEdward Cree if (rc != 0 && rc != -EAGAIN) 16565a6681e2SEdward Cree goto fail2; 16575a6681e2SEdward Cree 16585a6681e2SEdward Cree if (rc == -EAGAIN) 16595a6681e2SEdward Cree /* try again with new max_channels */ 16605a6681e2SEdward Cree ef4_remove_interrupts(efx); 16615a6681e2SEdward Cree 16625a6681e2SEdward Cree } while (rc == -EAGAIN); 16635a6681e2SEdward Cree 16645a6681e2SEdward Cree if (efx->n_channels > 1) 16655a6681e2SEdward Cree netdev_rss_key_fill(&efx->rx_hash_key, 16665a6681e2SEdward Cree sizeof(efx->rx_hash_key)); 16675a6681e2SEdward Cree ef4_set_default_rx_indir_table(efx); 16685a6681e2SEdward Cree 16695a6681e2SEdward Cree netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); 16705a6681e2SEdward Cree netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); 16715a6681e2SEdward Cree 16725a6681e2SEdward Cree /* Initialise the interrupt moderation settings */ 16735a6681e2SEdward Cree efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); 16745a6681e2SEdward Cree ef4_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, 16755a6681e2SEdward Cree true); 16765a6681e2SEdward Cree 16775a6681e2SEdward Cree return 0; 16785a6681e2SEdward Cree 16795a6681e2SEdward Cree fail2: 16805a6681e2SEdward Cree ef4_remove_interrupts(efx); 16815a6681e2SEdward Cree fail1: 16825a6681e2SEdward Cree efx->type->remove(efx); 16835a6681e2SEdward Cree return rc; 16845a6681e2SEdward Cree } 16855a6681e2SEdward Cree 16865a6681e2SEdward Cree static void ef4_remove_nic(struct ef4_nic *efx) 16875a6681e2SEdward Cree { 16885a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); 16895a6681e2SEdward Cree 16905a6681e2SEdward Cree ef4_remove_interrupts(efx); 16915a6681e2SEdward Cree efx->type->remove(efx); 16925a6681e2SEdward Cree } 16935a6681e2SEdward Cree 16945a6681e2SEdward Cree static int ef4_probe_filters(struct ef4_nic *efx) 16955a6681e2SEdward Cree { 16965a6681e2SEdward Cree int rc; 16975a6681e2SEdward Cree 16985a6681e2SEdward Cree spin_lock_init(&efx->filter_lock); 16995a6681e2SEdward Cree init_rwsem(&efx->filter_sem); 17005a6681e2SEdward Cree mutex_lock(&efx->mac_lock); 17015a6681e2SEdward Cree down_write(&efx->filter_sem); 17025a6681e2SEdward Cree rc = efx->type->filter_table_probe(efx); 17035a6681e2SEdward Cree if (rc) 17045a6681e2SEdward Cree goto out_unlock; 17055a6681e2SEdward Cree 17065a6681e2SEdward Cree #ifdef CONFIG_RFS_ACCEL 17075a6681e2SEdward Cree if (efx->type->offload_features & NETIF_F_NTUPLE) { 17085a6681e2SEdward Cree struct ef4_channel *channel; 17095a6681e2SEdward Cree int i, success = 1; 17105a6681e2SEdward Cree 17115a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 17125a6681e2SEdward Cree channel->rps_flow_id = 17135a6681e2SEdward Cree kcalloc(efx->type->max_rx_ip_filters, 17145a6681e2SEdward Cree sizeof(*channel->rps_flow_id), 17155a6681e2SEdward Cree GFP_KERNEL); 17165a6681e2SEdward Cree if (!channel->rps_flow_id) 17175a6681e2SEdward Cree success = 0; 17185a6681e2SEdward Cree else 17195a6681e2SEdward Cree for (i = 0; 17205a6681e2SEdward Cree i < efx->type->max_rx_ip_filters; 17215a6681e2SEdward Cree ++i) 17225a6681e2SEdward Cree channel->rps_flow_id[i] = 17235a6681e2SEdward Cree RPS_FLOW_ID_INVALID; 17245a6681e2SEdward Cree } 17255a6681e2SEdward Cree 17265a6681e2SEdward Cree if (!success) { 17275a6681e2SEdward Cree ef4_for_each_channel(channel, efx) 17285a6681e2SEdward Cree kfree(channel->rps_flow_id); 17295a6681e2SEdward Cree efx->type->filter_table_remove(efx); 17305a6681e2SEdward Cree rc = -ENOMEM; 17315a6681e2SEdward Cree goto out_unlock; 17325a6681e2SEdward Cree } 17335a6681e2SEdward Cree 17345a6681e2SEdward Cree efx->rps_expire_index = efx->rps_expire_channel = 0; 17355a6681e2SEdward Cree } 17365a6681e2SEdward Cree #endif 17375a6681e2SEdward Cree out_unlock: 17385a6681e2SEdward Cree up_write(&efx->filter_sem); 17395a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 17405a6681e2SEdward Cree return rc; 17415a6681e2SEdward Cree } 17425a6681e2SEdward Cree 17435a6681e2SEdward Cree static void ef4_remove_filters(struct ef4_nic *efx) 17445a6681e2SEdward Cree { 17455a6681e2SEdward Cree #ifdef CONFIG_RFS_ACCEL 17465a6681e2SEdward Cree struct ef4_channel *channel; 17475a6681e2SEdward Cree 17485a6681e2SEdward Cree ef4_for_each_channel(channel, efx) 17495a6681e2SEdward Cree kfree(channel->rps_flow_id); 17505a6681e2SEdward Cree #endif 17515a6681e2SEdward Cree down_write(&efx->filter_sem); 17525a6681e2SEdward Cree efx->type->filter_table_remove(efx); 17535a6681e2SEdward Cree up_write(&efx->filter_sem); 17545a6681e2SEdward Cree } 17555a6681e2SEdward Cree 17565a6681e2SEdward Cree static void ef4_restore_filters(struct ef4_nic *efx) 17575a6681e2SEdward Cree { 17585a6681e2SEdward Cree down_read(&efx->filter_sem); 17595a6681e2SEdward Cree efx->type->filter_table_restore(efx); 17605a6681e2SEdward Cree up_read(&efx->filter_sem); 17615a6681e2SEdward Cree } 17625a6681e2SEdward Cree 17635a6681e2SEdward Cree /************************************************************************** 17645a6681e2SEdward Cree * 17655a6681e2SEdward Cree * NIC startup/shutdown 17665a6681e2SEdward Cree * 17675a6681e2SEdward Cree *************************************************************************/ 17685a6681e2SEdward Cree 17695a6681e2SEdward Cree static int ef4_probe_all(struct ef4_nic *efx) 17705a6681e2SEdward Cree { 17715a6681e2SEdward Cree int rc; 17725a6681e2SEdward Cree 17735a6681e2SEdward Cree rc = ef4_probe_nic(efx); 17745a6681e2SEdward Cree if (rc) { 17755a6681e2SEdward Cree netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); 17765a6681e2SEdward Cree goto fail1; 17775a6681e2SEdward Cree } 17785a6681e2SEdward Cree 17795a6681e2SEdward Cree rc = ef4_probe_port(efx); 17805a6681e2SEdward Cree if (rc) { 17815a6681e2SEdward Cree netif_err(efx, probe, efx->net_dev, "failed to create port\n"); 17825a6681e2SEdward Cree goto fail2; 17835a6681e2SEdward Cree } 17845a6681e2SEdward Cree 17855a6681e2SEdward Cree BUILD_BUG_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_RXQ_MIN_ENT); 17865a6681e2SEdward Cree if (WARN_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_TXQ_MIN_ENT(efx))) { 17875a6681e2SEdward Cree rc = -EINVAL; 17885a6681e2SEdward Cree goto fail3; 17895a6681e2SEdward Cree } 17905a6681e2SEdward Cree efx->rxq_entries = efx->txq_entries = EF4_DEFAULT_DMAQ_SIZE; 17915a6681e2SEdward Cree 17925a6681e2SEdward Cree rc = ef4_probe_filters(efx); 17935a6681e2SEdward Cree if (rc) { 17945a6681e2SEdward Cree netif_err(efx, probe, efx->net_dev, 17955a6681e2SEdward Cree "failed to create filter tables\n"); 17965a6681e2SEdward Cree goto fail4; 17975a6681e2SEdward Cree } 17985a6681e2SEdward Cree 17995a6681e2SEdward Cree rc = ef4_probe_channels(efx); 18005a6681e2SEdward Cree if (rc) 18015a6681e2SEdward Cree goto fail5; 18025a6681e2SEdward Cree 18035a6681e2SEdward Cree return 0; 18045a6681e2SEdward Cree 18055a6681e2SEdward Cree fail5: 18065a6681e2SEdward Cree ef4_remove_filters(efx); 18075a6681e2SEdward Cree fail4: 18085a6681e2SEdward Cree fail3: 18095a6681e2SEdward Cree ef4_remove_port(efx); 18105a6681e2SEdward Cree fail2: 18115a6681e2SEdward Cree ef4_remove_nic(efx); 18125a6681e2SEdward Cree fail1: 18135a6681e2SEdward Cree return rc; 18145a6681e2SEdward Cree } 18155a6681e2SEdward Cree 18165a6681e2SEdward Cree /* If the interface is supposed to be running but is not, start 18175a6681e2SEdward Cree * the hardware and software data path, regular activity for the port 18185a6681e2SEdward Cree * (MAC statistics, link polling, etc.) and schedule the port to be 18195a6681e2SEdward Cree * reconfigured. Interrupts must already be enabled. This function 18205a6681e2SEdward Cree * is safe to call multiple times, so long as the NIC is not disabled. 18215a6681e2SEdward Cree * Requires the RTNL lock. 18225a6681e2SEdward Cree */ 18235a6681e2SEdward Cree static void ef4_start_all(struct ef4_nic *efx) 18245a6681e2SEdward Cree { 18255a6681e2SEdward Cree EF4_ASSERT_RESET_SERIALISED(efx); 18265a6681e2SEdward Cree BUG_ON(efx->state == STATE_DISABLED); 18275a6681e2SEdward Cree 18285a6681e2SEdward Cree /* Check that it is appropriate to restart the interface. All 18295a6681e2SEdward Cree * of these flags are safe to read under just the rtnl lock */ 18305a6681e2SEdward Cree if (efx->port_enabled || !netif_running(efx->net_dev) || 18315a6681e2SEdward Cree efx->reset_pending) 18325a6681e2SEdward Cree return; 18335a6681e2SEdward Cree 18345a6681e2SEdward Cree ef4_start_port(efx); 18355a6681e2SEdward Cree ef4_start_datapath(efx); 18365a6681e2SEdward Cree 18375a6681e2SEdward Cree /* Start the hardware monitor if there is one */ 18385a6681e2SEdward Cree if (efx->type->monitor != NULL) 18395a6681e2SEdward Cree queue_delayed_work(efx->workqueue, &efx->monitor_work, 18405a6681e2SEdward Cree ef4_monitor_interval); 18415a6681e2SEdward Cree 18425a6681e2SEdward Cree efx->type->start_stats(efx); 18435a6681e2SEdward Cree efx->type->pull_stats(efx); 18445a6681e2SEdward Cree spin_lock_bh(&efx->stats_lock); 18455a6681e2SEdward Cree efx->type->update_stats(efx, NULL, NULL); 18465a6681e2SEdward Cree spin_unlock_bh(&efx->stats_lock); 18475a6681e2SEdward Cree } 18485a6681e2SEdward Cree 18495a6681e2SEdward Cree /* Quiesce the hardware and software data path, and regular activity 18505a6681e2SEdward Cree * for the port without bringing the link down. Safe to call multiple 18515a6681e2SEdward Cree * times with the NIC in almost any state, but interrupts should be 18525a6681e2SEdward Cree * enabled. Requires the RTNL lock. 18535a6681e2SEdward Cree */ 18545a6681e2SEdward Cree static void ef4_stop_all(struct ef4_nic *efx) 18555a6681e2SEdward Cree { 18565a6681e2SEdward Cree EF4_ASSERT_RESET_SERIALISED(efx); 18575a6681e2SEdward Cree 18585a6681e2SEdward Cree /* port_enabled can be read safely under the rtnl lock */ 18595a6681e2SEdward Cree if (!efx->port_enabled) 18605a6681e2SEdward Cree return; 18615a6681e2SEdward Cree 18625a6681e2SEdward Cree /* update stats before we go down so we can accurately count 18635a6681e2SEdward Cree * rx_nodesc_drops 18645a6681e2SEdward Cree */ 18655a6681e2SEdward Cree efx->type->pull_stats(efx); 18665a6681e2SEdward Cree spin_lock_bh(&efx->stats_lock); 18675a6681e2SEdward Cree efx->type->update_stats(efx, NULL, NULL); 18685a6681e2SEdward Cree spin_unlock_bh(&efx->stats_lock); 18695a6681e2SEdward Cree efx->type->stop_stats(efx); 18705a6681e2SEdward Cree ef4_stop_port(efx); 18715a6681e2SEdward Cree 18725a6681e2SEdward Cree /* Stop the kernel transmit interface. This is only valid if 18735a6681e2SEdward Cree * the device is stopped or detached; otherwise the watchdog 18745a6681e2SEdward Cree * may fire immediately. 18755a6681e2SEdward Cree */ 18765a6681e2SEdward Cree WARN_ON(netif_running(efx->net_dev) && 18775a6681e2SEdward Cree netif_device_present(efx->net_dev)); 18785a6681e2SEdward Cree netif_tx_disable(efx->net_dev); 18795a6681e2SEdward Cree 18805a6681e2SEdward Cree ef4_stop_datapath(efx); 18815a6681e2SEdward Cree } 18825a6681e2SEdward Cree 18835a6681e2SEdward Cree static void ef4_remove_all(struct ef4_nic *efx) 18845a6681e2SEdward Cree { 18855a6681e2SEdward Cree ef4_remove_channels(efx); 18865a6681e2SEdward Cree ef4_remove_filters(efx); 18875a6681e2SEdward Cree ef4_remove_port(efx); 18885a6681e2SEdward Cree ef4_remove_nic(efx); 18895a6681e2SEdward Cree } 18905a6681e2SEdward Cree 18915a6681e2SEdward Cree /************************************************************************** 18925a6681e2SEdward Cree * 18935a6681e2SEdward Cree * Interrupt moderation 18945a6681e2SEdward Cree * 18955a6681e2SEdward Cree **************************************************************************/ 18965a6681e2SEdward Cree unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs) 18975a6681e2SEdward Cree { 18985a6681e2SEdward Cree if (usecs == 0) 18995a6681e2SEdward Cree return 0; 19005a6681e2SEdward Cree if (usecs * 1000 < efx->timer_quantum_ns) 19015a6681e2SEdward Cree return 1; /* never round down to 0 */ 19025a6681e2SEdward Cree return usecs * 1000 / efx->timer_quantum_ns; 19035a6681e2SEdward Cree } 19045a6681e2SEdward Cree 19055a6681e2SEdward Cree unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks) 19065a6681e2SEdward Cree { 19075a6681e2SEdward Cree /* We must round up when converting ticks to microseconds 19085a6681e2SEdward Cree * because we round down when converting the other way. 19095a6681e2SEdward Cree */ 19105a6681e2SEdward Cree return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); 19115a6681e2SEdward Cree } 19125a6681e2SEdward Cree 19135a6681e2SEdward Cree /* Set interrupt moderation parameters */ 19145a6681e2SEdward Cree int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs, 19155a6681e2SEdward Cree unsigned int rx_usecs, bool rx_adaptive, 19165a6681e2SEdward Cree bool rx_may_override_tx) 19175a6681e2SEdward Cree { 19185a6681e2SEdward Cree struct ef4_channel *channel; 19195a6681e2SEdward Cree unsigned int timer_max_us; 19205a6681e2SEdward Cree 19215a6681e2SEdward Cree EF4_ASSERT_RESET_SERIALISED(efx); 19225a6681e2SEdward Cree 19235a6681e2SEdward Cree timer_max_us = efx->timer_max_ns / 1000; 19245a6681e2SEdward Cree 19255a6681e2SEdward Cree if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) 19265a6681e2SEdward Cree return -EINVAL; 19275a6681e2SEdward Cree 19285a6681e2SEdward Cree if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && 19295a6681e2SEdward Cree !rx_may_override_tx) { 19305a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, "Channels are shared. " 19315a6681e2SEdward Cree "RX and TX IRQ moderation must be equal\n"); 19325a6681e2SEdward Cree return -EINVAL; 19335a6681e2SEdward Cree } 19345a6681e2SEdward Cree 19355a6681e2SEdward Cree efx->irq_rx_adaptive = rx_adaptive; 19365a6681e2SEdward Cree efx->irq_rx_moderation_us = rx_usecs; 19375a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 19385a6681e2SEdward Cree if (ef4_channel_has_rx_queue(channel)) 19395a6681e2SEdward Cree channel->irq_moderation_us = rx_usecs; 19405a6681e2SEdward Cree else if (ef4_channel_has_tx_queues(channel)) 19415a6681e2SEdward Cree channel->irq_moderation_us = tx_usecs; 19425a6681e2SEdward Cree } 19435a6681e2SEdward Cree 19445a6681e2SEdward Cree return 0; 19455a6681e2SEdward Cree } 19465a6681e2SEdward Cree 19475a6681e2SEdward Cree void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs, 19485a6681e2SEdward Cree unsigned int *rx_usecs, bool *rx_adaptive) 19495a6681e2SEdward Cree { 19505a6681e2SEdward Cree *rx_adaptive = efx->irq_rx_adaptive; 19515a6681e2SEdward Cree *rx_usecs = efx->irq_rx_moderation_us; 19525a6681e2SEdward Cree 19535a6681e2SEdward Cree /* If channels are shared between RX and TX, so is IRQ 19545a6681e2SEdward Cree * moderation. Otherwise, IRQ moderation is the same for all 19555a6681e2SEdward Cree * TX channels and is not adaptive. 19565a6681e2SEdward Cree */ 19575a6681e2SEdward Cree if (efx->tx_channel_offset == 0) { 19585a6681e2SEdward Cree *tx_usecs = *rx_usecs; 19595a6681e2SEdward Cree } else { 19605a6681e2SEdward Cree struct ef4_channel *tx_channel; 19615a6681e2SEdward Cree 19625a6681e2SEdward Cree tx_channel = efx->channel[efx->tx_channel_offset]; 19635a6681e2SEdward Cree *tx_usecs = tx_channel->irq_moderation_us; 19645a6681e2SEdward Cree } 19655a6681e2SEdward Cree } 19665a6681e2SEdward Cree 19675a6681e2SEdward Cree /************************************************************************** 19685a6681e2SEdward Cree * 19695a6681e2SEdward Cree * Hardware monitor 19705a6681e2SEdward Cree * 19715a6681e2SEdward Cree **************************************************************************/ 19725a6681e2SEdward Cree 19735a6681e2SEdward Cree /* Run periodically off the general workqueue */ 19745a6681e2SEdward Cree static void ef4_monitor(struct work_struct *data) 19755a6681e2SEdward Cree { 19765a6681e2SEdward Cree struct ef4_nic *efx = container_of(data, struct ef4_nic, 19775a6681e2SEdward Cree monitor_work.work); 19785a6681e2SEdward Cree 19795a6681e2SEdward Cree netif_vdbg(efx, timer, efx->net_dev, 19805a6681e2SEdward Cree "hardware monitor executing on CPU %d\n", 19815a6681e2SEdward Cree raw_smp_processor_id()); 19825a6681e2SEdward Cree BUG_ON(efx->type->monitor == NULL); 19835a6681e2SEdward Cree 19845a6681e2SEdward Cree /* If the mac_lock is already held then it is likely a port 19855a6681e2SEdward Cree * reconfiguration is already in place, which will likely do 19865a6681e2SEdward Cree * most of the work of monitor() anyway. */ 19875a6681e2SEdward Cree if (mutex_trylock(&efx->mac_lock)) { 19885a6681e2SEdward Cree if (efx->port_enabled) 19895a6681e2SEdward Cree efx->type->monitor(efx); 19905a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 19915a6681e2SEdward Cree } 19925a6681e2SEdward Cree 19935a6681e2SEdward Cree queue_delayed_work(efx->workqueue, &efx->monitor_work, 19945a6681e2SEdward Cree ef4_monitor_interval); 19955a6681e2SEdward Cree } 19965a6681e2SEdward Cree 19975a6681e2SEdward Cree /************************************************************************** 19985a6681e2SEdward Cree * 19995a6681e2SEdward Cree * ioctls 20005a6681e2SEdward Cree * 20015a6681e2SEdward Cree *************************************************************************/ 20025a6681e2SEdward Cree 20035a6681e2SEdward Cree /* Net device ioctl 20045a6681e2SEdward Cree * Context: process, rtnl_lock() held. 20055a6681e2SEdward Cree */ 20065a6681e2SEdward Cree static int ef4_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 20075a6681e2SEdward Cree { 20085a6681e2SEdward Cree struct ef4_nic *efx = netdev_priv(net_dev); 20095a6681e2SEdward Cree struct mii_ioctl_data *data = if_mii(ifr); 20105a6681e2SEdward Cree 20115a6681e2SEdward Cree /* Convert phy_id from older PRTAD/DEVAD format */ 20125a6681e2SEdward Cree if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 20135a6681e2SEdward Cree (data->phy_id & 0xfc00) == 0x0400) 20145a6681e2SEdward Cree data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; 20155a6681e2SEdward Cree 20165a6681e2SEdward Cree return mdio_mii_ioctl(&efx->mdio, data, cmd); 20175a6681e2SEdward Cree } 20185a6681e2SEdward Cree 20195a6681e2SEdward Cree /************************************************************************** 20205a6681e2SEdward Cree * 20215a6681e2SEdward Cree * NAPI interface 20225a6681e2SEdward Cree * 20235a6681e2SEdward Cree **************************************************************************/ 20245a6681e2SEdward Cree 20255a6681e2SEdward Cree static void ef4_init_napi_channel(struct ef4_channel *channel) 20265a6681e2SEdward Cree { 20275a6681e2SEdward Cree struct ef4_nic *efx = channel->efx; 20285a6681e2SEdward Cree 20295a6681e2SEdward Cree channel->napi_dev = efx->net_dev; 20305a6681e2SEdward Cree netif_napi_add(channel->napi_dev, &channel->napi_str, 20315a6681e2SEdward Cree ef4_poll, napi_weight); 20325a6681e2SEdward Cree ef4_channel_busy_poll_init(channel); 20335a6681e2SEdward Cree } 20345a6681e2SEdward Cree 20355a6681e2SEdward Cree static void ef4_init_napi(struct ef4_nic *efx) 20365a6681e2SEdward Cree { 20375a6681e2SEdward Cree struct ef4_channel *channel; 20385a6681e2SEdward Cree 20395a6681e2SEdward Cree ef4_for_each_channel(channel, efx) 20405a6681e2SEdward Cree ef4_init_napi_channel(channel); 20415a6681e2SEdward Cree } 20425a6681e2SEdward Cree 20435a6681e2SEdward Cree static void ef4_fini_napi_channel(struct ef4_channel *channel) 20445a6681e2SEdward Cree { 20455a6681e2SEdward Cree if (channel->napi_dev) 20465a6681e2SEdward Cree netif_napi_del(&channel->napi_str); 20475a6681e2SEdward Cree 20485a6681e2SEdward Cree channel->napi_dev = NULL; 20495a6681e2SEdward Cree } 20505a6681e2SEdward Cree 20515a6681e2SEdward Cree static void ef4_fini_napi(struct ef4_nic *efx) 20525a6681e2SEdward Cree { 20535a6681e2SEdward Cree struct ef4_channel *channel; 20545a6681e2SEdward Cree 20555a6681e2SEdward Cree ef4_for_each_channel(channel, efx) 20565a6681e2SEdward Cree ef4_fini_napi_channel(channel); 20575a6681e2SEdward Cree } 20585a6681e2SEdward Cree 20595a6681e2SEdward Cree /************************************************************************** 20605a6681e2SEdward Cree * 20615a6681e2SEdward Cree * Kernel netpoll interface 20625a6681e2SEdward Cree * 20635a6681e2SEdward Cree *************************************************************************/ 20645a6681e2SEdward Cree 20655a6681e2SEdward Cree #ifdef CONFIG_NET_POLL_CONTROLLER 20665a6681e2SEdward Cree 20675a6681e2SEdward Cree /* Although in the common case interrupts will be disabled, this is not 20685a6681e2SEdward Cree * guaranteed. However, all our work happens inside the NAPI callback, 20695a6681e2SEdward Cree * so no locking is required. 20705a6681e2SEdward Cree */ 20715a6681e2SEdward Cree static void ef4_netpoll(struct net_device *net_dev) 20725a6681e2SEdward Cree { 20735a6681e2SEdward Cree struct ef4_nic *efx = netdev_priv(net_dev); 20745a6681e2SEdward Cree struct ef4_channel *channel; 20755a6681e2SEdward Cree 20765a6681e2SEdward Cree ef4_for_each_channel(channel, efx) 20775a6681e2SEdward Cree ef4_schedule_channel(channel); 20785a6681e2SEdward Cree } 20795a6681e2SEdward Cree 20805a6681e2SEdward Cree #endif 20815a6681e2SEdward Cree 20825a6681e2SEdward Cree #ifdef CONFIG_NET_RX_BUSY_POLL 20835a6681e2SEdward Cree static int ef4_busy_poll(struct napi_struct *napi) 20845a6681e2SEdward Cree { 20855a6681e2SEdward Cree struct ef4_channel *channel = 20865a6681e2SEdward Cree container_of(napi, struct ef4_channel, napi_str); 20875a6681e2SEdward Cree struct ef4_nic *efx = channel->efx; 20885a6681e2SEdward Cree int budget = 4; 20895a6681e2SEdward Cree int old_rx_packets, rx_packets; 20905a6681e2SEdward Cree 20915a6681e2SEdward Cree if (!netif_running(efx->net_dev)) 20925a6681e2SEdward Cree return LL_FLUSH_FAILED; 20935a6681e2SEdward Cree 20945a6681e2SEdward Cree if (!ef4_channel_try_lock_poll(channel)) 20955a6681e2SEdward Cree return LL_FLUSH_BUSY; 20965a6681e2SEdward Cree 20975a6681e2SEdward Cree old_rx_packets = channel->rx_queue.rx_packets; 20985a6681e2SEdward Cree ef4_process_channel(channel, budget); 20995a6681e2SEdward Cree 21005a6681e2SEdward Cree rx_packets = channel->rx_queue.rx_packets - old_rx_packets; 21015a6681e2SEdward Cree 21025a6681e2SEdward Cree /* There is no race condition with NAPI here. 21035a6681e2SEdward Cree * NAPI will automatically be rescheduled if it yielded during busy 21045a6681e2SEdward Cree * polling, because it was not able to take the lock and thus returned 21055a6681e2SEdward Cree * the full budget. 21065a6681e2SEdward Cree */ 21075a6681e2SEdward Cree ef4_channel_unlock_poll(channel); 21085a6681e2SEdward Cree 21095a6681e2SEdward Cree return rx_packets; 21105a6681e2SEdward Cree } 21115a6681e2SEdward Cree #endif 21125a6681e2SEdward Cree 21135a6681e2SEdward Cree /************************************************************************** 21145a6681e2SEdward Cree * 21155a6681e2SEdward Cree * Kernel net device interface 21165a6681e2SEdward Cree * 21175a6681e2SEdward Cree *************************************************************************/ 21185a6681e2SEdward Cree 21195a6681e2SEdward Cree /* Context: process, rtnl_lock() held. */ 21205a6681e2SEdward Cree int ef4_net_open(struct net_device *net_dev) 21215a6681e2SEdward Cree { 21225a6681e2SEdward Cree struct ef4_nic *efx = netdev_priv(net_dev); 21235a6681e2SEdward Cree int rc; 21245a6681e2SEdward Cree 21255a6681e2SEdward Cree netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 21265a6681e2SEdward Cree raw_smp_processor_id()); 21275a6681e2SEdward Cree 21285a6681e2SEdward Cree rc = ef4_check_disabled(efx); 21295a6681e2SEdward Cree if (rc) 21305a6681e2SEdward Cree return rc; 21315a6681e2SEdward Cree if (efx->phy_mode & PHY_MODE_SPECIAL) 21325a6681e2SEdward Cree return -EBUSY; 21335a6681e2SEdward Cree 21345a6681e2SEdward Cree /* Notify the kernel of the link state polled during driver load, 21355a6681e2SEdward Cree * before the monitor starts running */ 21365a6681e2SEdward Cree ef4_link_status_changed(efx); 21375a6681e2SEdward Cree 21385a6681e2SEdward Cree ef4_start_all(efx); 21395a6681e2SEdward Cree ef4_selftest_async_start(efx); 21405a6681e2SEdward Cree return 0; 21415a6681e2SEdward Cree } 21425a6681e2SEdward Cree 21435a6681e2SEdward Cree /* Context: process, rtnl_lock() held. 21445a6681e2SEdward Cree * Note that the kernel will ignore our return code; this method 21455a6681e2SEdward Cree * should really be a void. 21465a6681e2SEdward Cree */ 21475a6681e2SEdward Cree int ef4_net_stop(struct net_device *net_dev) 21485a6681e2SEdward Cree { 21495a6681e2SEdward Cree struct ef4_nic *efx = netdev_priv(net_dev); 21505a6681e2SEdward Cree 21515a6681e2SEdward Cree netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 21525a6681e2SEdward Cree raw_smp_processor_id()); 21535a6681e2SEdward Cree 21545a6681e2SEdward Cree /* Stop the device and flush all the channels */ 21555a6681e2SEdward Cree ef4_stop_all(efx); 21565a6681e2SEdward Cree 21575a6681e2SEdward Cree return 0; 21585a6681e2SEdward Cree } 21595a6681e2SEdward Cree 21605a6681e2SEdward Cree /* Context: process, dev_base_lock or RTNL held, non-blocking. */ 21615a6681e2SEdward Cree static struct rtnl_link_stats64 *ef4_net_stats(struct net_device *net_dev, 21625a6681e2SEdward Cree struct rtnl_link_stats64 *stats) 21635a6681e2SEdward Cree { 21645a6681e2SEdward Cree struct ef4_nic *efx = netdev_priv(net_dev); 21655a6681e2SEdward Cree 21665a6681e2SEdward Cree spin_lock_bh(&efx->stats_lock); 21675a6681e2SEdward Cree efx->type->update_stats(efx, NULL, stats); 21685a6681e2SEdward Cree spin_unlock_bh(&efx->stats_lock); 21695a6681e2SEdward Cree 21705a6681e2SEdward Cree return stats; 21715a6681e2SEdward Cree } 21725a6681e2SEdward Cree 21735a6681e2SEdward Cree /* Context: netif_tx_lock held, BHs disabled. */ 21745a6681e2SEdward Cree static void ef4_watchdog(struct net_device *net_dev) 21755a6681e2SEdward Cree { 21765a6681e2SEdward Cree struct ef4_nic *efx = netdev_priv(net_dev); 21775a6681e2SEdward Cree 21785a6681e2SEdward Cree netif_err(efx, tx_err, efx->net_dev, 21795a6681e2SEdward Cree "TX stuck with port_enabled=%d: resetting channels\n", 21805a6681e2SEdward Cree efx->port_enabled); 21815a6681e2SEdward Cree 21825a6681e2SEdward Cree ef4_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 21835a6681e2SEdward Cree } 21845a6681e2SEdward Cree 21855a6681e2SEdward Cree 21865a6681e2SEdward Cree /* Context: process, rtnl_lock() held. */ 21875a6681e2SEdward Cree static int ef4_change_mtu(struct net_device *net_dev, int new_mtu) 21885a6681e2SEdward Cree { 21895a6681e2SEdward Cree struct ef4_nic *efx = netdev_priv(net_dev); 21905a6681e2SEdward Cree int rc; 21915a6681e2SEdward Cree 21925a6681e2SEdward Cree rc = ef4_check_disabled(efx); 21935a6681e2SEdward Cree if (rc) 21945a6681e2SEdward Cree return rc; 21955a6681e2SEdward Cree 21965a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 21975a6681e2SEdward Cree 21985a6681e2SEdward Cree ef4_device_detach_sync(efx); 21995a6681e2SEdward Cree ef4_stop_all(efx); 22005a6681e2SEdward Cree 22015a6681e2SEdward Cree mutex_lock(&efx->mac_lock); 22025a6681e2SEdward Cree net_dev->mtu = new_mtu; 22035a6681e2SEdward Cree ef4_mac_reconfigure(efx); 22045a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 22055a6681e2SEdward Cree 22065a6681e2SEdward Cree ef4_start_all(efx); 22075a6681e2SEdward Cree netif_device_attach(efx->net_dev); 22085a6681e2SEdward Cree return 0; 22095a6681e2SEdward Cree } 22105a6681e2SEdward Cree 22115a6681e2SEdward Cree static int ef4_set_mac_address(struct net_device *net_dev, void *data) 22125a6681e2SEdward Cree { 22135a6681e2SEdward Cree struct ef4_nic *efx = netdev_priv(net_dev); 22145a6681e2SEdward Cree struct sockaddr *addr = data; 22155a6681e2SEdward Cree u8 *new_addr = addr->sa_data; 22165a6681e2SEdward Cree u8 old_addr[6]; 22175a6681e2SEdward Cree int rc; 22185a6681e2SEdward Cree 22195a6681e2SEdward Cree if (!is_valid_ether_addr(new_addr)) { 22205a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, 22215a6681e2SEdward Cree "invalid ethernet MAC address requested: %pM\n", 22225a6681e2SEdward Cree new_addr); 22235a6681e2SEdward Cree return -EADDRNOTAVAIL; 22245a6681e2SEdward Cree } 22255a6681e2SEdward Cree 22265a6681e2SEdward Cree /* save old address */ 22275a6681e2SEdward Cree ether_addr_copy(old_addr, net_dev->dev_addr); 22285a6681e2SEdward Cree ether_addr_copy(net_dev->dev_addr, new_addr); 22295a6681e2SEdward Cree if (efx->type->set_mac_address) { 22305a6681e2SEdward Cree rc = efx->type->set_mac_address(efx); 22315a6681e2SEdward Cree if (rc) { 22325a6681e2SEdward Cree ether_addr_copy(net_dev->dev_addr, old_addr); 22335a6681e2SEdward Cree return rc; 22345a6681e2SEdward Cree } 22355a6681e2SEdward Cree } 22365a6681e2SEdward Cree 22375a6681e2SEdward Cree /* Reconfigure the MAC */ 22385a6681e2SEdward Cree mutex_lock(&efx->mac_lock); 22395a6681e2SEdward Cree ef4_mac_reconfigure(efx); 22405a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 22415a6681e2SEdward Cree 22425a6681e2SEdward Cree return 0; 22435a6681e2SEdward Cree } 22445a6681e2SEdward Cree 22455a6681e2SEdward Cree /* Context: netif_addr_lock held, BHs disabled. */ 22465a6681e2SEdward Cree static void ef4_set_rx_mode(struct net_device *net_dev) 22475a6681e2SEdward Cree { 22485a6681e2SEdward Cree struct ef4_nic *efx = netdev_priv(net_dev); 22495a6681e2SEdward Cree 22505a6681e2SEdward Cree if (efx->port_enabled) 22515a6681e2SEdward Cree queue_work(efx->workqueue, &efx->mac_work); 22525a6681e2SEdward Cree /* Otherwise ef4_start_port() will do this */ 22535a6681e2SEdward Cree } 22545a6681e2SEdward Cree 22555a6681e2SEdward Cree static int ef4_set_features(struct net_device *net_dev, netdev_features_t data) 22565a6681e2SEdward Cree { 22575a6681e2SEdward Cree struct ef4_nic *efx = netdev_priv(net_dev); 22585a6681e2SEdward Cree int rc; 22595a6681e2SEdward Cree 22605a6681e2SEdward Cree /* If disabling RX n-tuple filtering, clear existing filters */ 22615a6681e2SEdward Cree if (net_dev->features & ~data & NETIF_F_NTUPLE) { 22625a6681e2SEdward Cree rc = efx->type->filter_clear_rx(efx, EF4_FILTER_PRI_MANUAL); 22635a6681e2SEdward Cree if (rc) 22645a6681e2SEdward Cree return rc; 22655a6681e2SEdward Cree } 22665a6681e2SEdward Cree 22675a6681e2SEdward Cree /* If Rx VLAN filter is changed, update filters via mac_reconfigure */ 22685a6681e2SEdward Cree if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) { 22695a6681e2SEdward Cree /* ef4_set_rx_mode() will schedule MAC work to update filters 22705a6681e2SEdward Cree * when a new features are finally set in net_dev. 22715a6681e2SEdward Cree */ 22725a6681e2SEdward Cree ef4_set_rx_mode(net_dev); 22735a6681e2SEdward Cree } 22745a6681e2SEdward Cree 22755a6681e2SEdward Cree return 0; 22765a6681e2SEdward Cree } 22775a6681e2SEdward Cree 22785a6681e2SEdward Cree static const struct net_device_ops ef4_netdev_ops = { 22795a6681e2SEdward Cree .ndo_open = ef4_net_open, 22805a6681e2SEdward Cree .ndo_stop = ef4_net_stop, 22815a6681e2SEdward Cree .ndo_get_stats64 = ef4_net_stats, 22825a6681e2SEdward Cree .ndo_tx_timeout = ef4_watchdog, 22835a6681e2SEdward Cree .ndo_start_xmit = ef4_hard_start_xmit, 22845a6681e2SEdward Cree .ndo_validate_addr = eth_validate_addr, 22855a6681e2SEdward Cree .ndo_do_ioctl = ef4_ioctl, 22865a6681e2SEdward Cree .ndo_change_mtu = ef4_change_mtu, 22875a6681e2SEdward Cree .ndo_set_mac_address = ef4_set_mac_address, 22885a6681e2SEdward Cree .ndo_set_rx_mode = ef4_set_rx_mode, 22895a6681e2SEdward Cree .ndo_set_features = ef4_set_features, 22905a6681e2SEdward Cree #ifdef CONFIG_NET_POLL_CONTROLLER 22915a6681e2SEdward Cree .ndo_poll_controller = ef4_netpoll, 22925a6681e2SEdward Cree #endif 22935a6681e2SEdward Cree .ndo_setup_tc = ef4_setup_tc, 22945a6681e2SEdward Cree #ifdef CONFIG_NET_RX_BUSY_POLL 22955a6681e2SEdward Cree .ndo_busy_poll = ef4_busy_poll, 22965a6681e2SEdward Cree #endif 22975a6681e2SEdward Cree #ifdef CONFIG_RFS_ACCEL 22985a6681e2SEdward Cree .ndo_rx_flow_steer = ef4_filter_rfs, 22995a6681e2SEdward Cree #endif 23005a6681e2SEdward Cree }; 23015a6681e2SEdward Cree 23025a6681e2SEdward Cree static void ef4_update_name(struct ef4_nic *efx) 23035a6681e2SEdward Cree { 23045a6681e2SEdward Cree strcpy(efx->name, efx->net_dev->name); 23055a6681e2SEdward Cree ef4_mtd_rename(efx); 23065a6681e2SEdward Cree ef4_set_channel_names(efx); 23075a6681e2SEdward Cree } 23085a6681e2SEdward Cree 23095a6681e2SEdward Cree static int ef4_netdev_event(struct notifier_block *this, 23105a6681e2SEdward Cree unsigned long event, void *ptr) 23115a6681e2SEdward Cree { 23125a6681e2SEdward Cree struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); 23135a6681e2SEdward Cree 23145a6681e2SEdward Cree if ((net_dev->netdev_ops == &ef4_netdev_ops) && 23155a6681e2SEdward Cree event == NETDEV_CHANGENAME) 23165a6681e2SEdward Cree ef4_update_name(netdev_priv(net_dev)); 23175a6681e2SEdward Cree 23185a6681e2SEdward Cree return NOTIFY_DONE; 23195a6681e2SEdward Cree } 23205a6681e2SEdward Cree 23215a6681e2SEdward Cree static struct notifier_block ef4_netdev_notifier = { 23225a6681e2SEdward Cree .notifier_call = ef4_netdev_event, 23235a6681e2SEdward Cree }; 23245a6681e2SEdward Cree 23255a6681e2SEdward Cree static ssize_t 23265a6681e2SEdward Cree show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) 23275a6681e2SEdward Cree { 23285a6681e2SEdward Cree struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 23295a6681e2SEdward Cree return sprintf(buf, "%d\n", efx->phy_type); 23305a6681e2SEdward Cree } 23315a6681e2SEdward Cree static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); 23325a6681e2SEdward Cree 23335a6681e2SEdward Cree static int ef4_register_netdev(struct ef4_nic *efx) 23345a6681e2SEdward Cree { 23355a6681e2SEdward Cree struct net_device *net_dev = efx->net_dev; 23365a6681e2SEdward Cree struct ef4_channel *channel; 23375a6681e2SEdward Cree int rc; 23385a6681e2SEdward Cree 23395a6681e2SEdward Cree net_dev->watchdog_timeo = 5 * HZ; 23405a6681e2SEdward Cree net_dev->irq = efx->pci_dev->irq; 23415a6681e2SEdward Cree net_dev->netdev_ops = &ef4_netdev_ops; 23425a6681e2SEdward Cree net_dev->ethtool_ops = &ef4_ethtool_ops; 23435a6681e2SEdward Cree net_dev->gso_max_segs = EF4_TSO_MAX_SEGS; 23445a6681e2SEdward Cree net_dev->min_mtu = EF4_MIN_MTU; 23455a6681e2SEdward Cree net_dev->max_mtu = EF4_MAX_MTU; 23465a6681e2SEdward Cree 23475a6681e2SEdward Cree rtnl_lock(); 23485a6681e2SEdward Cree 23495a6681e2SEdward Cree /* Enable resets to be scheduled and check whether any were 23505a6681e2SEdward Cree * already requested. If so, the NIC is probably hosed so we 23515a6681e2SEdward Cree * abort. 23525a6681e2SEdward Cree */ 23535a6681e2SEdward Cree efx->state = STATE_READY; 23545a6681e2SEdward Cree smp_mb(); /* ensure we change state before checking reset_pending */ 23555a6681e2SEdward Cree if (efx->reset_pending) { 23565a6681e2SEdward Cree netif_err(efx, probe, efx->net_dev, 23575a6681e2SEdward Cree "aborting probe due to scheduled reset\n"); 23585a6681e2SEdward Cree rc = -EIO; 23595a6681e2SEdward Cree goto fail_locked; 23605a6681e2SEdward Cree } 23615a6681e2SEdward Cree 23625a6681e2SEdward Cree rc = dev_alloc_name(net_dev, net_dev->name); 23635a6681e2SEdward Cree if (rc < 0) 23645a6681e2SEdward Cree goto fail_locked; 23655a6681e2SEdward Cree ef4_update_name(efx); 23665a6681e2SEdward Cree 23675a6681e2SEdward Cree /* Always start with carrier off; PHY events will detect the link */ 23685a6681e2SEdward Cree netif_carrier_off(net_dev); 23695a6681e2SEdward Cree 23705a6681e2SEdward Cree rc = register_netdevice(net_dev); 23715a6681e2SEdward Cree if (rc) 23725a6681e2SEdward Cree goto fail_locked; 23735a6681e2SEdward Cree 23745a6681e2SEdward Cree ef4_for_each_channel(channel, efx) { 23755a6681e2SEdward Cree struct ef4_tx_queue *tx_queue; 23765a6681e2SEdward Cree ef4_for_each_channel_tx_queue(tx_queue, channel) 23775a6681e2SEdward Cree ef4_init_tx_queue_core_txq(tx_queue); 23785a6681e2SEdward Cree } 23795a6681e2SEdward Cree 23805a6681e2SEdward Cree ef4_associate(efx); 23815a6681e2SEdward Cree 23825a6681e2SEdward Cree rtnl_unlock(); 23835a6681e2SEdward Cree 23845a6681e2SEdward Cree rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 23855a6681e2SEdward Cree if (rc) { 23865a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, 23875a6681e2SEdward Cree "failed to init net dev attributes\n"); 23885a6681e2SEdward Cree goto fail_registered; 23895a6681e2SEdward Cree } 23905a6681e2SEdward Cree return 0; 23915a6681e2SEdward Cree 23925a6681e2SEdward Cree fail_registered: 23935a6681e2SEdward Cree rtnl_lock(); 23945a6681e2SEdward Cree ef4_dissociate(efx); 23955a6681e2SEdward Cree unregister_netdevice(net_dev); 23965a6681e2SEdward Cree fail_locked: 23975a6681e2SEdward Cree efx->state = STATE_UNINIT; 23985a6681e2SEdward Cree rtnl_unlock(); 23995a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 24005a6681e2SEdward Cree return rc; 24015a6681e2SEdward Cree } 24025a6681e2SEdward Cree 24035a6681e2SEdward Cree static void ef4_unregister_netdev(struct ef4_nic *efx) 24045a6681e2SEdward Cree { 24055a6681e2SEdward Cree if (!efx->net_dev) 24065a6681e2SEdward Cree return; 24075a6681e2SEdward Cree 24085a6681e2SEdward Cree BUG_ON(netdev_priv(efx->net_dev) != efx); 24095a6681e2SEdward Cree 24105a6681e2SEdward Cree if (ef4_dev_registered(efx)) { 24115a6681e2SEdward Cree strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 24125a6681e2SEdward Cree device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 24135a6681e2SEdward Cree unregister_netdev(efx->net_dev); 24145a6681e2SEdward Cree } 24155a6681e2SEdward Cree } 24165a6681e2SEdward Cree 24175a6681e2SEdward Cree /************************************************************************** 24185a6681e2SEdward Cree * 24195a6681e2SEdward Cree * Device reset and suspend 24205a6681e2SEdward Cree * 24215a6681e2SEdward Cree **************************************************************************/ 24225a6681e2SEdward Cree 24235a6681e2SEdward Cree /* Tears down the entire software state and most of the hardware state 24245a6681e2SEdward Cree * before reset. */ 24255a6681e2SEdward Cree void ef4_reset_down(struct ef4_nic *efx, enum reset_type method) 24265a6681e2SEdward Cree { 24275a6681e2SEdward Cree EF4_ASSERT_RESET_SERIALISED(efx); 24285a6681e2SEdward Cree 24295a6681e2SEdward Cree ef4_stop_all(efx); 24305a6681e2SEdward Cree ef4_disable_interrupts(efx); 24315a6681e2SEdward Cree 24325a6681e2SEdward Cree mutex_lock(&efx->mac_lock); 24335a6681e2SEdward Cree if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && 24345a6681e2SEdward Cree method != RESET_TYPE_DATAPATH) 24355a6681e2SEdward Cree efx->phy_op->fini(efx); 24365a6681e2SEdward Cree efx->type->fini(efx); 24375a6681e2SEdward Cree } 24385a6681e2SEdward Cree 24395a6681e2SEdward Cree /* This function will always ensure that the locks acquired in 24405a6681e2SEdward Cree * ef4_reset_down() are released. A failure return code indicates 24415a6681e2SEdward Cree * that we were unable to reinitialise the hardware, and the 24425a6681e2SEdward Cree * driver should be disabled. If ok is false, then the rx and tx 24435a6681e2SEdward Cree * engines are not restarted, pending a RESET_DISABLE. */ 24445a6681e2SEdward Cree int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok) 24455a6681e2SEdward Cree { 24465a6681e2SEdward Cree int rc; 24475a6681e2SEdward Cree 24485a6681e2SEdward Cree EF4_ASSERT_RESET_SERIALISED(efx); 24495a6681e2SEdward Cree 24505a6681e2SEdward Cree /* Ensure that SRAM is initialised even if we're disabling the device */ 24515a6681e2SEdward Cree rc = efx->type->init(efx); 24525a6681e2SEdward Cree if (rc) { 24535a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); 24545a6681e2SEdward Cree goto fail; 24555a6681e2SEdward Cree } 24565a6681e2SEdward Cree 24575a6681e2SEdward Cree if (!ok) 24585a6681e2SEdward Cree goto fail; 24595a6681e2SEdward Cree 24605a6681e2SEdward Cree if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && 24615a6681e2SEdward Cree method != RESET_TYPE_DATAPATH) { 24625a6681e2SEdward Cree rc = efx->phy_op->init(efx); 24635a6681e2SEdward Cree if (rc) 24645a6681e2SEdward Cree goto fail; 24655a6681e2SEdward Cree rc = efx->phy_op->reconfigure(efx); 24665a6681e2SEdward Cree if (rc && rc != -EPERM) 24675a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, 24685a6681e2SEdward Cree "could not restore PHY settings\n"); 24695a6681e2SEdward Cree } 24705a6681e2SEdward Cree 24715a6681e2SEdward Cree rc = ef4_enable_interrupts(efx); 24725a6681e2SEdward Cree if (rc) 24735a6681e2SEdward Cree goto fail; 24745a6681e2SEdward Cree 24755a6681e2SEdward Cree down_read(&efx->filter_sem); 24765a6681e2SEdward Cree ef4_restore_filters(efx); 24775a6681e2SEdward Cree up_read(&efx->filter_sem); 24785a6681e2SEdward Cree 24795a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 24805a6681e2SEdward Cree 24815a6681e2SEdward Cree ef4_start_all(efx); 24825a6681e2SEdward Cree 24835a6681e2SEdward Cree return 0; 24845a6681e2SEdward Cree 24855a6681e2SEdward Cree fail: 24865a6681e2SEdward Cree efx->port_initialized = false; 24875a6681e2SEdward Cree 24885a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 24895a6681e2SEdward Cree 24905a6681e2SEdward Cree return rc; 24915a6681e2SEdward Cree } 24925a6681e2SEdward Cree 24935a6681e2SEdward Cree /* Reset the NIC using the specified method. Note that the reset may 24945a6681e2SEdward Cree * fail, in which case the card will be left in an unusable state. 24955a6681e2SEdward Cree * 24965a6681e2SEdward Cree * Caller must hold the rtnl_lock. 24975a6681e2SEdward Cree */ 24985a6681e2SEdward Cree int ef4_reset(struct ef4_nic *efx, enum reset_type method) 24995a6681e2SEdward Cree { 25005a6681e2SEdward Cree int rc, rc2; 25015a6681e2SEdward Cree bool disabled; 25025a6681e2SEdward Cree 25035a6681e2SEdward Cree netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", 25045a6681e2SEdward Cree RESET_TYPE(method)); 25055a6681e2SEdward Cree 25065a6681e2SEdward Cree ef4_device_detach_sync(efx); 25075a6681e2SEdward Cree ef4_reset_down(efx, method); 25085a6681e2SEdward Cree 25095a6681e2SEdward Cree rc = efx->type->reset(efx, method); 25105a6681e2SEdward Cree if (rc) { 25115a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); 25125a6681e2SEdward Cree goto out; 25135a6681e2SEdward Cree } 25145a6681e2SEdward Cree 25155a6681e2SEdward Cree /* Clear flags for the scopes we covered. We assume the NIC and 25165a6681e2SEdward Cree * driver are now quiescent so that there is no race here. 25175a6681e2SEdward Cree */ 25185a6681e2SEdward Cree if (method < RESET_TYPE_MAX_METHOD) 25195a6681e2SEdward Cree efx->reset_pending &= -(1 << (method + 1)); 25205a6681e2SEdward Cree else /* it doesn't fit into the well-ordered scope hierarchy */ 25215a6681e2SEdward Cree __clear_bit(method, &efx->reset_pending); 25225a6681e2SEdward Cree 25235a6681e2SEdward Cree /* Reinitialise bus-mastering, which may have been turned off before 25245a6681e2SEdward Cree * the reset was scheduled. This is still appropriate, even in the 25255a6681e2SEdward Cree * RESET_TYPE_DISABLE since this driver generally assumes the hardware 25265a6681e2SEdward Cree * can respond to requests. */ 25275a6681e2SEdward Cree pci_set_master(efx->pci_dev); 25285a6681e2SEdward Cree 25295a6681e2SEdward Cree out: 25305a6681e2SEdward Cree /* Leave device stopped if necessary */ 25315a6681e2SEdward Cree disabled = rc || 25325a6681e2SEdward Cree method == RESET_TYPE_DISABLE || 25335a6681e2SEdward Cree method == RESET_TYPE_RECOVER_OR_DISABLE; 25345a6681e2SEdward Cree rc2 = ef4_reset_up(efx, method, !disabled); 25355a6681e2SEdward Cree if (rc2) { 25365a6681e2SEdward Cree disabled = true; 25375a6681e2SEdward Cree if (!rc) 25385a6681e2SEdward Cree rc = rc2; 25395a6681e2SEdward Cree } 25405a6681e2SEdward Cree 25415a6681e2SEdward Cree if (disabled) { 25425a6681e2SEdward Cree dev_close(efx->net_dev); 25435a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, "has been disabled\n"); 25445a6681e2SEdward Cree efx->state = STATE_DISABLED; 25455a6681e2SEdward Cree } else { 25465a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); 25475a6681e2SEdward Cree netif_device_attach(efx->net_dev); 25485a6681e2SEdward Cree } 25495a6681e2SEdward Cree return rc; 25505a6681e2SEdward Cree } 25515a6681e2SEdward Cree 25525a6681e2SEdward Cree /* Try recovery mechanisms. 25535a6681e2SEdward Cree * For now only EEH is supported. 25545a6681e2SEdward Cree * Returns 0 if the recovery mechanisms are unsuccessful. 25555a6681e2SEdward Cree * Returns a non-zero value otherwise. 25565a6681e2SEdward Cree */ 25575a6681e2SEdward Cree int ef4_try_recovery(struct ef4_nic *efx) 25585a6681e2SEdward Cree { 25595a6681e2SEdward Cree #ifdef CONFIG_EEH 25605a6681e2SEdward Cree /* A PCI error can occur and not be seen by EEH because nothing 25615a6681e2SEdward Cree * happens on the PCI bus. In this case the driver may fail and 25625a6681e2SEdward Cree * schedule a 'recover or reset', leading to this recovery handler. 25635a6681e2SEdward Cree * Manually call the eeh failure check function. 25645a6681e2SEdward Cree */ 25655a6681e2SEdward Cree struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); 25665a6681e2SEdward Cree if (eeh_dev_check_failure(eehdev)) { 25675a6681e2SEdward Cree /* The EEH mechanisms will handle the error and reset the 25685a6681e2SEdward Cree * device if necessary. 25695a6681e2SEdward Cree */ 25705a6681e2SEdward Cree return 1; 25715a6681e2SEdward Cree } 25725a6681e2SEdward Cree #endif 25735a6681e2SEdward Cree return 0; 25745a6681e2SEdward Cree } 25755a6681e2SEdward Cree 25765a6681e2SEdward Cree /* The worker thread exists so that code that cannot sleep can 25775a6681e2SEdward Cree * schedule a reset for later. 25785a6681e2SEdward Cree */ 25795a6681e2SEdward Cree static void ef4_reset_work(struct work_struct *data) 25805a6681e2SEdward Cree { 25815a6681e2SEdward Cree struct ef4_nic *efx = container_of(data, struct ef4_nic, reset_work); 25825a6681e2SEdward Cree unsigned long pending; 25835a6681e2SEdward Cree enum reset_type method; 25845a6681e2SEdward Cree 25855a6681e2SEdward Cree pending = ACCESS_ONCE(efx->reset_pending); 25865a6681e2SEdward Cree method = fls(pending) - 1; 25875a6681e2SEdward Cree 25885a6681e2SEdward Cree if ((method == RESET_TYPE_RECOVER_OR_DISABLE || 25895a6681e2SEdward Cree method == RESET_TYPE_RECOVER_OR_ALL) && 25905a6681e2SEdward Cree ef4_try_recovery(efx)) 25915a6681e2SEdward Cree return; 25925a6681e2SEdward Cree 25935a6681e2SEdward Cree if (!pending) 25945a6681e2SEdward Cree return; 25955a6681e2SEdward Cree 25965a6681e2SEdward Cree rtnl_lock(); 25975a6681e2SEdward Cree 25985a6681e2SEdward Cree /* We checked the state in ef4_schedule_reset() but it may 25995a6681e2SEdward Cree * have changed by now. Now that we have the RTNL lock, 26005a6681e2SEdward Cree * it cannot change again. 26015a6681e2SEdward Cree */ 26025a6681e2SEdward Cree if (efx->state == STATE_READY) 26035a6681e2SEdward Cree (void)ef4_reset(efx, method); 26045a6681e2SEdward Cree 26055a6681e2SEdward Cree rtnl_unlock(); 26065a6681e2SEdward Cree } 26075a6681e2SEdward Cree 26085a6681e2SEdward Cree void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type) 26095a6681e2SEdward Cree { 26105a6681e2SEdward Cree enum reset_type method; 26115a6681e2SEdward Cree 26125a6681e2SEdward Cree if (efx->state == STATE_RECOVERY) { 26135a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, 26145a6681e2SEdward Cree "recovering: skip scheduling %s reset\n", 26155a6681e2SEdward Cree RESET_TYPE(type)); 26165a6681e2SEdward Cree return; 26175a6681e2SEdward Cree } 26185a6681e2SEdward Cree 26195a6681e2SEdward Cree switch (type) { 26205a6681e2SEdward Cree case RESET_TYPE_INVISIBLE: 26215a6681e2SEdward Cree case RESET_TYPE_ALL: 26225a6681e2SEdward Cree case RESET_TYPE_RECOVER_OR_ALL: 26235a6681e2SEdward Cree case RESET_TYPE_WORLD: 26245a6681e2SEdward Cree case RESET_TYPE_DISABLE: 26255a6681e2SEdward Cree case RESET_TYPE_RECOVER_OR_DISABLE: 26265a6681e2SEdward Cree case RESET_TYPE_DATAPATH: 26275a6681e2SEdward Cree method = type; 26285a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", 26295a6681e2SEdward Cree RESET_TYPE(method)); 26305a6681e2SEdward Cree break; 26315a6681e2SEdward Cree default: 26325a6681e2SEdward Cree method = efx->type->map_reset_reason(type); 26335a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, 26345a6681e2SEdward Cree "scheduling %s reset for %s\n", 26355a6681e2SEdward Cree RESET_TYPE(method), RESET_TYPE(type)); 26365a6681e2SEdward Cree break; 26375a6681e2SEdward Cree } 26385a6681e2SEdward Cree 26395a6681e2SEdward Cree set_bit(method, &efx->reset_pending); 26405a6681e2SEdward Cree smp_mb(); /* ensure we change reset_pending before checking state */ 26415a6681e2SEdward Cree 26425a6681e2SEdward Cree /* If we're not READY then just leave the flags set as the cue 26435a6681e2SEdward Cree * to abort probing or reschedule the reset later. 26445a6681e2SEdward Cree */ 26455a6681e2SEdward Cree if (ACCESS_ONCE(efx->state) != STATE_READY) 26465a6681e2SEdward Cree return; 26475a6681e2SEdward Cree 26485a6681e2SEdward Cree queue_work(reset_workqueue, &efx->reset_work); 26495a6681e2SEdward Cree } 26505a6681e2SEdward Cree 26515a6681e2SEdward Cree /************************************************************************** 26525a6681e2SEdward Cree * 26535a6681e2SEdward Cree * List of NICs we support 26545a6681e2SEdward Cree * 26555a6681e2SEdward Cree **************************************************************************/ 26565a6681e2SEdward Cree 26575a6681e2SEdward Cree /* PCI device ID table */ 26585a6681e2SEdward Cree static const struct pci_device_id ef4_pci_table[] = { 26595a6681e2SEdward Cree {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 26605a6681e2SEdward Cree PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0), 26615a6681e2SEdward Cree .driver_data = (unsigned long) &falcon_a1_nic_type}, 26625a6681e2SEdward Cree {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 26635a6681e2SEdward Cree PCI_DEVICE_ID_SOLARFLARE_SFC4000B), 26645a6681e2SEdward Cree .driver_data = (unsigned long) &falcon_b0_nic_type}, 26655a6681e2SEdward Cree {0} /* end of list */ 26665a6681e2SEdward Cree }; 26675a6681e2SEdward Cree 26685a6681e2SEdward Cree /************************************************************************** 26695a6681e2SEdward Cree * 26705a6681e2SEdward Cree * Dummy PHY/MAC operations 26715a6681e2SEdward Cree * 26725a6681e2SEdward Cree * Can be used for some unimplemented operations 26735a6681e2SEdward Cree * Needed so all function pointers are valid and do not have to be tested 26745a6681e2SEdward Cree * before use 26755a6681e2SEdward Cree * 26765a6681e2SEdward Cree **************************************************************************/ 26775a6681e2SEdward Cree int ef4_port_dummy_op_int(struct ef4_nic *efx) 26785a6681e2SEdward Cree { 26795a6681e2SEdward Cree return 0; 26805a6681e2SEdward Cree } 26815a6681e2SEdward Cree void ef4_port_dummy_op_void(struct ef4_nic *efx) {} 26825a6681e2SEdward Cree 26835a6681e2SEdward Cree static bool ef4_port_dummy_op_poll(struct ef4_nic *efx) 26845a6681e2SEdward Cree { 26855a6681e2SEdward Cree return false; 26865a6681e2SEdward Cree } 26875a6681e2SEdward Cree 26885a6681e2SEdward Cree static const struct ef4_phy_operations ef4_dummy_phy_operations = { 26895a6681e2SEdward Cree .init = ef4_port_dummy_op_int, 26905a6681e2SEdward Cree .reconfigure = ef4_port_dummy_op_int, 26915a6681e2SEdward Cree .poll = ef4_port_dummy_op_poll, 26925a6681e2SEdward Cree .fini = ef4_port_dummy_op_void, 26935a6681e2SEdward Cree }; 26945a6681e2SEdward Cree 26955a6681e2SEdward Cree /************************************************************************** 26965a6681e2SEdward Cree * 26975a6681e2SEdward Cree * Data housekeeping 26985a6681e2SEdward Cree * 26995a6681e2SEdward Cree **************************************************************************/ 27005a6681e2SEdward Cree 27015a6681e2SEdward Cree /* This zeroes out and then fills in the invariants in a struct 27025a6681e2SEdward Cree * ef4_nic (including all sub-structures). 27035a6681e2SEdward Cree */ 27045a6681e2SEdward Cree static int ef4_init_struct(struct ef4_nic *efx, 27055a6681e2SEdward Cree struct pci_dev *pci_dev, struct net_device *net_dev) 27065a6681e2SEdward Cree { 27075a6681e2SEdward Cree int i; 27085a6681e2SEdward Cree 27095a6681e2SEdward Cree /* Initialise common structures */ 27105a6681e2SEdward Cree INIT_LIST_HEAD(&efx->node); 27115a6681e2SEdward Cree INIT_LIST_HEAD(&efx->secondary_list); 27125a6681e2SEdward Cree spin_lock_init(&efx->biu_lock); 27135a6681e2SEdward Cree #ifdef CONFIG_SFC_FALCON_MTD 27145a6681e2SEdward Cree INIT_LIST_HEAD(&efx->mtd_list); 27155a6681e2SEdward Cree #endif 27165a6681e2SEdward Cree INIT_WORK(&efx->reset_work, ef4_reset_work); 27175a6681e2SEdward Cree INIT_DELAYED_WORK(&efx->monitor_work, ef4_monitor); 27185a6681e2SEdward Cree INIT_DELAYED_WORK(&efx->selftest_work, ef4_selftest_async_work); 27195a6681e2SEdward Cree efx->pci_dev = pci_dev; 27205a6681e2SEdward Cree efx->msg_enable = debug; 27215a6681e2SEdward Cree efx->state = STATE_UNINIT; 27225a6681e2SEdward Cree strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 27235a6681e2SEdward Cree 27245a6681e2SEdward Cree efx->net_dev = net_dev; 27255a6681e2SEdward Cree efx->rx_prefix_size = efx->type->rx_prefix_size; 27265a6681e2SEdward Cree efx->rx_ip_align = 27275a6681e2SEdward Cree NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; 27285a6681e2SEdward Cree efx->rx_packet_hash_offset = 27295a6681e2SEdward Cree efx->type->rx_hash_offset - efx->type->rx_prefix_size; 27305a6681e2SEdward Cree efx->rx_packet_ts_offset = 27315a6681e2SEdward Cree efx->type->rx_ts_offset - efx->type->rx_prefix_size; 27325a6681e2SEdward Cree spin_lock_init(&efx->stats_lock); 27335a6681e2SEdward Cree mutex_init(&efx->mac_lock); 27345a6681e2SEdward Cree efx->phy_op = &ef4_dummy_phy_operations; 27355a6681e2SEdward Cree efx->mdio.dev = net_dev; 27365a6681e2SEdward Cree INIT_WORK(&efx->mac_work, ef4_mac_work); 27375a6681e2SEdward Cree init_waitqueue_head(&efx->flush_wq); 27385a6681e2SEdward Cree 27395a6681e2SEdward Cree for (i = 0; i < EF4_MAX_CHANNELS; i++) { 27405a6681e2SEdward Cree efx->channel[i] = ef4_alloc_channel(efx, i, NULL); 27415a6681e2SEdward Cree if (!efx->channel[i]) 27425a6681e2SEdward Cree goto fail; 27435a6681e2SEdward Cree efx->msi_context[i].efx = efx; 27445a6681e2SEdward Cree efx->msi_context[i].index = i; 27455a6681e2SEdward Cree } 27465a6681e2SEdward Cree 27475a6681e2SEdward Cree /* Higher numbered interrupt modes are less capable! */ 27485a6681e2SEdward Cree efx->interrupt_mode = max(efx->type->max_interrupt_mode, 27495a6681e2SEdward Cree interrupt_mode); 27505a6681e2SEdward Cree 27515a6681e2SEdward Cree /* Would be good to use the net_dev name, but we're too early */ 27525a6681e2SEdward Cree snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", 27535a6681e2SEdward Cree pci_name(pci_dev)); 27545a6681e2SEdward Cree efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); 27555a6681e2SEdward Cree if (!efx->workqueue) 27565a6681e2SEdward Cree goto fail; 27575a6681e2SEdward Cree 27585a6681e2SEdward Cree return 0; 27595a6681e2SEdward Cree 27605a6681e2SEdward Cree fail: 27615a6681e2SEdward Cree ef4_fini_struct(efx); 27625a6681e2SEdward Cree return -ENOMEM; 27635a6681e2SEdward Cree } 27645a6681e2SEdward Cree 27655a6681e2SEdward Cree static void ef4_fini_struct(struct ef4_nic *efx) 27665a6681e2SEdward Cree { 27675a6681e2SEdward Cree int i; 27685a6681e2SEdward Cree 27695a6681e2SEdward Cree for (i = 0; i < EF4_MAX_CHANNELS; i++) 27705a6681e2SEdward Cree kfree(efx->channel[i]); 27715a6681e2SEdward Cree 27725a6681e2SEdward Cree kfree(efx->vpd_sn); 27735a6681e2SEdward Cree 27745a6681e2SEdward Cree if (efx->workqueue) { 27755a6681e2SEdward Cree destroy_workqueue(efx->workqueue); 27765a6681e2SEdward Cree efx->workqueue = NULL; 27775a6681e2SEdward Cree } 27785a6681e2SEdward Cree } 27795a6681e2SEdward Cree 27805a6681e2SEdward Cree void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats) 27815a6681e2SEdward Cree { 27825a6681e2SEdward Cree u64 n_rx_nodesc_trunc = 0; 27835a6681e2SEdward Cree struct ef4_channel *channel; 27845a6681e2SEdward Cree 27855a6681e2SEdward Cree ef4_for_each_channel(channel, efx) 27865a6681e2SEdward Cree n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc; 27875a6681e2SEdward Cree stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; 27885a6681e2SEdward Cree stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); 27895a6681e2SEdward Cree } 27905a6681e2SEdward Cree 27915a6681e2SEdward Cree /************************************************************************** 27925a6681e2SEdward Cree * 27935a6681e2SEdward Cree * PCI interface 27945a6681e2SEdward Cree * 27955a6681e2SEdward Cree **************************************************************************/ 27965a6681e2SEdward Cree 27975a6681e2SEdward Cree /* Main body of final NIC shutdown code 27985a6681e2SEdward Cree * This is called only at module unload (or hotplug removal). 27995a6681e2SEdward Cree */ 28005a6681e2SEdward Cree static void ef4_pci_remove_main(struct ef4_nic *efx) 28015a6681e2SEdward Cree { 28025a6681e2SEdward Cree /* Flush reset_work. It can no longer be scheduled since we 28035a6681e2SEdward Cree * are not READY. 28045a6681e2SEdward Cree */ 28055a6681e2SEdward Cree BUG_ON(efx->state == STATE_READY); 28065a6681e2SEdward Cree cancel_work_sync(&efx->reset_work); 28075a6681e2SEdward Cree 28085a6681e2SEdward Cree ef4_disable_interrupts(efx); 28095a6681e2SEdward Cree ef4_nic_fini_interrupt(efx); 28105a6681e2SEdward Cree ef4_fini_port(efx); 28115a6681e2SEdward Cree efx->type->fini(efx); 28125a6681e2SEdward Cree ef4_fini_napi(efx); 28135a6681e2SEdward Cree ef4_remove_all(efx); 28145a6681e2SEdward Cree } 28155a6681e2SEdward Cree 28165a6681e2SEdward Cree /* Final NIC shutdown 28175a6681e2SEdward Cree * This is called only at module unload (or hotplug removal). A PF can call 28185a6681e2SEdward Cree * this on its VFs to ensure they are unbound first. 28195a6681e2SEdward Cree */ 28205a6681e2SEdward Cree static void ef4_pci_remove(struct pci_dev *pci_dev) 28215a6681e2SEdward Cree { 28225a6681e2SEdward Cree struct ef4_nic *efx; 28235a6681e2SEdward Cree 28245a6681e2SEdward Cree efx = pci_get_drvdata(pci_dev); 28255a6681e2SEdward Cree if (!efx) 28265a6681e2SEdward Cree return; 28275a6681e2SEdward Cree 28285a6681e2SEdward Cree /* Mark the NIC as fini, then stop the interface */ 28295a6681e2SEdward Cree rtnl_lock(); 28305a6681e2SEdward Cree ef4_dissociate(efx); 28315a6681e2SEdward Cree dev_close(efx->net_dev); 28325a6681e2SEdward Cree ef4_disable_interrupts(efx); 28335a6681e2SEdward Cree efx->state = STATE_UNINIT; 28345a6681e2SEdward Cree rtnl_unlock(); 28355a6681e2SEdward Cree 28365a6681e2SEdward Cree ef4_unregister_netdev(efx); 28375a6681e2SEdward Cree 28385a6681e2SEdward Cree ef4_mtd_remove(efx); 28395a6681e2SEdward Cree 28405a6681e2SEdward Cree ef4_pci_remove_main(efx); 28415a6681e2SEdward Cree 28425a6681e2SEdward Cree ef4_fini_io(efx); 28435a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); 28445a6681e2SEdward Cree 28455a6681e2SEdward Cree ef4_fini_struct(efx); 28465a6681e2SEdward Cree free_netdev(efx->net_dev); 28475a6681e2SEdward Cree 28485a6681e2SEdward Cree pci_disable_pcie_error_reporting(pci_dev); 28495a6681e2SEdward Cree }; 28505a6681e2SEdward Cree 28515a6681e2SEdward Cree /* NIC VPD information 28525a6681e2SEdward Cree * Called during probe to display the part number of the 28535a6681e2SEdward Cree * installed NIC. VPD is potentially very large but this should 28545a6681e2SEdward Cree * always appear within the first 512 bytes. 28555a6681e2SEdward Cree */ 28565a6681e2SEdward Cree #define SFC_VPD_LEN 512 28575a6681e2SEdward Cree static void ef4_probe_vpd_strings(struct ef4_nic *efx) 28585a6681e2SEdward Cree { 28595a6681e2SEdward Cree struct pci_dev *dev = efx->pci_dev; 28605a6681e2SEdward Cree char vpd_data[SFC_VPD_LEN]; 28615a6681e2SEdward Cree ssize_t vpd_size; 28625a6681e2SEdward Cree int ro_start, ro_size, i, j; 28635a6681e2SEdward Cree 28645a6681e2SEdward Cree /* Get the vpd data from the device */ 28655a6681e2SEdward Cree vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); 28665a6681e2SEdward Cree if (vpd_size <= 0) { 28675a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n"); 28685a6681e2SEdward Cree return; 28695a6681e2SEdward Cree } 28705a6681e2SEdward Cree 28715a6681e2SEdward Cree /* Get the Read only section */ 28725a6681e2SEdward Cree ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); 28735a6681e2SEdward Cree if (ro_start < 0) { 28745a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); 28755a6681e2SEdward Cree return; 28765a6681e2SEdward Cree } 28775a6681e2SEdward Cree 28785a6681e2SEdward Cree ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); 28795a6681e2SEdward Cree j = ro_size; 28805a6681e2SEdward Cree i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 28815a6681e2SEdward Cree if (i + j > vpd_size) 28825a6681e2SEdward Cree j = vpd_size - i; 28835a6681e2SEdward Cree 28845a6681e2SEdward Cree /* Get the Part number */ 28855a6681e2SEdward Cree i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN"); 28865a6681e2SEdward Cree if (i < 0) { 28875a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, "Part number not found\n"); 28885a6681e2SEdward Cree return; 28895a6681e2SEdward Cree } 28905a6681e2SEdward Cree 28915a6681e2SEdward Cree j = pci_vpd_info_field_size(&vpd_data[i]); 28925a6681e2SEdward Cree i += PCI_VPD_INFO_FLD_HDR_SIZE; 28935a6681e2SEdward Cree if (i + j > vpd_size) { 28945a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, "Incomplete part number\n"); 28955a6681e2SEdward Cree return; 28965a6681e2SEdward Cree } 28975a6681e2SEdward Cree 28985a6681e2SEdward Cree netif_info(efx, drv, efx->net_dev, 28995a6681e2SEdward Cree "Part Number : %.*s\n", j, &vpd_data[i]); 29005a6681e2SEdward Cree 29015a6681e2SEdward Cree i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 29025a6681e2SEdward Cree j = ro_size; 29035a6681e2SEdward Cree i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN"); 29045a6681e2SEdward Cree if (i < 0) { 29055a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, "Serial number not found\n"); 29065a6681e2SEdward Cree return; 29075a6681e2SEdward Cree } 29085a6681e2SEdward Cree 29095a6681e2SEdward Cree j = pci_vpd_info_field_size(&vpd_data[i]); 29105a6681e2SEdward Cree i += PCI_VPD_INFO_FLD_HDR_SIZE; 29115a6681e2SEdward Cree if (i + j > vpd_size) { 29125a6681e2SEdward Cree netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n"); 29135a6681e2SEdward Cree return; 29145a6681e2SEdward Cree } 29155a6681e2SEdward Cree 29165a6681e2SEdward Cree efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL); 29175a6681e2SEdward Cree if (!efx->vpd_sn) 29185a6681e2SEdward Cree return; 29195a6681e2SEdward Cree 29205a6681e2SEdward Cree snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]); 29215a6681e2SEdward Cree } 29225a6681e2SEdward Cree 29235a6681e2SEdward Cree 29245a6681e2SEdward Cree /* Main body of NIC initialisation 29255a6681e2SEdward Cree * This is called at module load (or hotplug insertion, theoretically). 29265a6681e2SEdward Cree */ 29275a6681e2SEdward Cree static int ef4_pci_probe_main(struct ef4_nic *efx) 29285a6681e2SEdward Cree { 29295a6681e2SEdward Cree int rc; 29305a6681e2SEdward Cree 29315a6681e2SEdward Cree /* Do start-of-day initialisation */ 29325a6681e2SEdward Cree rc = ef4_probe_all(efx); 29335a6681e2SEdward Cree if (rc) 29345a6681e2SEdward Cree goto fail1; 29355a6681e2SEdward Cree 29365a6681e2SEdward Cree ef4_init_napi(efx); 29375a6681e2SEdward Cree 29385a6681e2SEdward Cree rc = efx->type->init(efx); 29395a6681e2SEdward Cree if (rc) { 29405a6681e2SEdward Cree netif_err(efx, probe, efx->net_dev, 29415a6681e2SEdward Cree "failed to initialise NIC\n"); 29425a6681e2SEdward Cree goto fail3; 29435a6681e2SEdward Cree } 29445a6681e2SEdward Cree 29455a6681e2SEdward Cree rc = ef4_init_port(efx); 29465a6681e2SEdward Cree if (rc) { 29475a6681e2SEdward Cree netif_err(efx, probe, efx->net_dev, 29485a6681e2SEdward Cree "failed to initialise port\n"); 29495a6681e2SEdward Cree goto fail4; 29505a6681e2SEdward Cree } 29515a6681e2SEdward Cree 29525a6681e2SEdward Cree rc = ef4_nic_init_interrupt(efx); 29535a6681e2SEdward Cree if (rc) 29545a6681e2SEdward Cree goto fail5; 29555a6681e2SEdward Cree rc = ef4_enable_interrupts(efx); 29565a6681e2SEdward Cree if (rc) 29575a6681e2SEdward Cree goto fail6; 29585a6681e2SEdward Cree 29595a6681e2SEdward Cree return 0; 29605a6681e2SEdward Cree 29615a6681e2SEdward Cree fail6: 29625a6681e2SEdward Cree ef4_nic_fini_interrupt(efx); 29635a6681e2SEdward Cree fail5: 29645a6681e2SEdward Cree ef4_fini_port(efx); 29655a6681e2SEdward Cree fail4: 29665a6681e2SEdward Cree efx->type->fini(efx); 29675a6681e2SEdward Cree fail3: 29685a6681e2SEdward Cree ef4_fini_napi(efx); 29695a6681e2SEdward Cree ef4_remove_all(efx); 29705a6681e2SEdward Cree fail1: 29715a6681e2SEdward Cree return rc; 29725a6681e2SEdward Cree } 29735a6681e2SEdward Cree 29745a6681e2SEdward Cree /* NIC initialisation 29755a6681e2SEdward Cree * 29765a6681e2SEdward Cree * This is called at module load (or hotplug insertion, 29775a6681e2SEdward Cree * theoretically). It sets up PCI mappings, resets the NIC, 29785a6681e2SEdward Cree * sets up and registers the network devices with the kernel and hooks 29795a6681e2SEdward Cree * the interrupt service routine. It does not prepare the device for 29805a6681e2SEdward Cree * transmission; this is left to the first time one of the network 29815a6681e2SEdward Cree * interfaces is brought up (i.e. ef4_net_open). 29825a6681e2SEdward Cree */ 29835a6681e2SEdward Cree static int ef4_pci_probe(struct pci_dev *pci_dev, 29845a6681e2SEdward Cree const struct pci_device_id *entry) 29855a6681e2SEdward Cree { 29865a6681e2SEdward Cree struct net_device *net_dev; 29875a6681e2SEdward Cree struct ef4_nic *efx; 29885a6681e2SEdward Cree int rc; 29895a6681e2SEdward Cree 29905a6681e2SEdward Cree /* Allocate and initialise a struct net_device and struct ef4_nic */ 29915a6681e2SEdward Cree net_dev = alloc_etherdev_mqs(sizeof(*efx), EF4_MAX_CORE_TX_QUEUES, 29925a6681e2SEdward Cree EF4_MAX_RX_QUEUES); 29935a6681e2SEdward Cree if (!net_dev) 29945a6681e2SEdward Cree return -ENOMEM; 29955a6681e2SEdward Cree efx = netdev_priv(net_dev); 29965a6681e2SEdward Cree efx->type = (const struct ef4_nic_type *) entry->driver_data; 29975a6681e2SEdward Cree efx->fixed_features |= NETIF_F_HIGHDMA; 29985a6681e2SEdward Cree 29995a6681e2SEdward Cree pci_set_drvdata(pci_dev, efx); 30005a6681e2SEdward Cree SET_NETDEV_DEV(net_dev, &pci_dev->dev); 30015a6681e2SEdward Cree rc = ef4_init_struct(efx, pci_dev, net_dev); 30025a6681e2SEdward Cree if (rc) 30035a6681e2SEdward Cree goto fail1; 30045a6681e2SEdward Cree 30055a6681e2SEdward Cree netif_info(efx, probe, efx->net_dev, 30065a6681e2SEdward Cree "Solarflare NIC detected\n"); 30075a6681e2SEdward Cree 30085a6681e2SEdward Cree ef4_probe_vpd_strings(efx); 30095a6681e2SEdward Cree 30105a6681e2SEdward Cree /* Set up basic I/O (BAR mappings etc) */ 30115a6681e2SEdward Cree rc = ef4_init_io(efx); 30125a6681e2SEdward Cree if (rc) 30135a6681e2SEdward Cree goto fail2; 30145a6681e2SEdward Cree 30155a6681e2SEdward Cree rc = ef4_pci_probe_main(efx); 30165a6681e2SEdward Cree if (rc) 30175a6681e2SEdward Cree goto fail3; 30185a6681e2SEdward Cree 30195a6681e2SEdward Cree net_dev->features |= (efx->type->offload_features | NETIF_F_SG | 30205a6681e2SEdward Cree NETIF_F_RXCSUM); 30215a6681e2SEdward Cree /* Mask for features that also apply to VLAN devices */ 30225a6681e2SEdward Cree net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | 30235a6681e2SEdward Cree NETIF_F_HIGHDMA | NETIF_F_RXCSUM); 30245a6681e2SEdward Cree 30255a6681e2SEdward Cree net_dev->hw_features = net_dev->features & ~efx->fixed_features; 30265a6681e2SEdward Cree 30275a6681e2SEdward Cree /* Disable VLAN filtering by default. It may be enforced if 30285a6681e2SEdward Cree * the feature is fixed (i.e. VLAN filters are required to 30295a6681e2SEdward Cree * receive VLAN tagged packets due to vPort restrictions). 30305a6681e2SEdward Cree */ 30315a6681e2SEdward Cree net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 30325a6681e2SEdward Cree net_dev->features |= efx->fixed_features; 30335a6681e2SEdward Cree 30345a6681e2SEdward Cree rc = ef4_register_netdev(efx); 30355a6681e2SEdward Cree if (rc) 30365a6681e2SEdward Cree goto fail4; 30375a6681e2SEdward Cree 30385a6681e2SEdward Cree netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); 30395a6681e2SEdward Cree 30405a6681e2SEdward Cree /* Try to create MTDs, but allow this to fail */ 30415a6681e2SEdward Cree rtnl_lock(); 30425a6681e2SEdward Cree rc = ef4_mtd_probe(efx); 30435a6681e2SEdward Cree rtnl_unlock(); 30445a6681e2SEdward Cree if (rc && rc != -EPERM) 30455a6681e2SEdward Cree netif_warn(efx, probe, efx->net_dev, 30465a6681e2SEdward Cree "failed to create MTDs (%d)\n", rc); 30475a6681e2SEdward Cree 30485a6681e2SEdward Cree rc = pci_enable_pcie_error_reporting(pci_dev); 30495a6681e2SEdward Cree if (rc && rc != -EINVAL) 30505a6681e2SEdward Cree netif_notice(efx, probe, efx->net_dev, 30515a6681e2SEdward Cree "PCIE error reporting unavailable (%d).\n", 30525a6681e2SEdward Cree rc); 30535a6681e2SEdward Cree 30545a6681e2SEdward Cree return 0; 30555a6681e2SEdward Cree 30565a6681e2SEdward Cree fail4: 30575a6681e2SEdward Cree ef4_pci_remove_main(efx); 30585a6681e2SEdward Cree fail3: 30595a6681e2SEdward Cree ef4_fini_io(efx); 30605a6681e2SEdward Cree fail2: 30615a6681e2SEdward Cree ef4_fini_struct(efx); 30625a6681e2SEdward Cree fail1: 30635a6681e2SEdward Cree WARN_ON(rc > 0); 30645a6681e2SEdward Cree netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); 30655a6681e2SEdward Cree free_netdev(net_dev); 30665a6681e2SEdward Cree return rc; 30675a6681e2SEdward Cree } 30685a6681e2SEdward Cree 30695a6681e2SEdward Cree static int ef4_pm_freeze(struct device *dev) 30705a6681e2SEdward Cree { 30715a6681e2SEdward Cree struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 30725a6681e2SEdward Cree 30735a6681e2SEdward Cree rtnl_lock(); 30745a6681e2SEdward Cree 30755a6681e2SEdward Cree if (efx->state != STATE_DISABLED) { 30765a6681e2SEdward Cree efx->state = STATE_UNINIT; 30775a6681e2SEdward Cree 30785a6681e2SEdward Cree ef4_device_detach_sync(efx); 30795a6681e2SEdward Cree 30805a6681e2SEdward Cree ef4_stop_all(efx); 30815a6681e2SEdward Cree ef4_disable_interrupts(efx); 30825a6681e2SEdward Cree } 30835a6681e2SEdward Cree 30845a6681e2SEdward Cree rtnl_unlock(); 30855a6681e2SEdward Cree 30865a6681e2SEdward Cree return 0; 30875a6681e2SEdward Cree } 30885a6681e2SEdward Cree 30895a6681e2SEdward Cree static int ef4_pm_thaw(struct device *dev) 30905a6681e2SEdward Cree { 30915a6681e2SEdward Cree int rc; 30925a6681e2SEdward Cree struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 30935a6681e2SEdward Cree 30945a6681e2SEdward Cree rtnl_lock(); 30955a6681e2SEdward Cree 30965a6681e2SEdward Cree if (efx->state != STATE_DISABLED) { 30975a6681e2SEdward Cree rc = ef4_enable_interrupts(efx); 30985a6681e2SEdward Cree if (rc) 30995a6681e2SEdward Cree goto fail; 31005a6681e2SEdward Cree 31015a6681e2SEdward Cree mutex_lock(&efx->mac_lock); 31025a6681e2SEdward Cree efx->phy_op->reconfigure(efx); 31035a6681e2SEdward Cree mutex_unlock(&efx->mac_lock); 31045a6681e2SEdward Cree 31055a6681e2SEdward Cree ef4_start_all(efx); 31065a6681e2SEdward Cree 31075a6681e2SEdward Cree netif_device_attach(efx->net_dev); 31085a6681e2SEdward Cree 31095a6681e2SEdward Cree efx->state = STATE_READY; 31105a6681e2SEdward Cree 31115a6681e2SEdward Cree efx->type->resume_wol(efx); 31125a6681e2SEdward Cree } 31135a6681e2SEdward Cree 31145a6681e2SEdward Cree rtnl_unlock(); 31155a6681e2SEdward Cree 31165a6681e2SEdward Cree /* Reschedule any quenched resets scheduled during ef4_pm_freeze() */ 31175a6681e2SEdward Cree queue_work(reset_workqueue, &efx->reset_work); 31185a6681e2SEdward Cree 31195a6681e2SEdward Cree return 0; 31205a6681e2SEdward Cree 31215a6681e2SEdward Cree fail: 31225a6681e2SEdward Cree rtnl_unlock(); 31235a6681e2SEdward Cree 31245a6681e2SEdward Cree return rc; 31255a6681e2SEdward Cree } 31265a6681e2SEdward Cree 31275a6681e2SEdward Cree static int ef4_pm_poweroff(struct device *dev) 31285a6681e2SEdward Cree { 31295a6681e2SEdward Cree struct pci_dev *pci_dev = to_pci_dev(dev); 31305a6681e2SEdward Cree struct ef4_nic *efx = pci_get_drvdata(pci_dev); 31315a6681e2SEdward Cree 31325a6681e2SEdward Cree efx->type->fini(efx); 31335a6681e2SEdward Cree 31345a6681e2SEdward Cree efx->reset_pending = 0; 31355a6681e2SEdward Cree 31365a6681e2SEdward Cree pci_save_state(pci_dev); 31375a6681e2SEdward Cree return pci_set_power_state(pci_dev, PCI_D3hot); 31385a6681e2SEdward Cree } 31395a6681e2SEdward Cree 31405a6681e2SEdward Cree /* Used for both resume and restore */ 31415a6681e2SEdward Cree static int ef4_pm_resume(struct device *dev) 31425a6681e2SEdward Cree { 31435a6681e2SEdward Cree struct pci_dev *pci_dev = to_pci_dev(dev); 31445a6681e2SEdward Cree struct ef4_nic *efx = pci_get_drvdata(pci_dev); 31455a6681e2SEdward Cree int rc; 31465a6681e2SEdward Cree 31475a6681e2SEdward Cree rc = pci_set_power_state(pci_dev, PCI_D0); 31485a6681e2SEdward Cree if (rc) 31495a6681e2SEdward Cree return rc; 31505a6681e2SEdward Cree pci_restore_state(pci_dev); 31515a6681e2SEdward Cree rc = pci_enable_device(pci_dev); 31525a6681e2SEdward Cree if (rc) 31535a6681e2SEdward Cree return rc; 31545a6681e2SEdward Cree pci_set_master(efx->pci_dev); 31555a6681e2SEdward Cree rc = efx->type->reset(efx, RESET_TYPE_ALL); 31565a6681e2SEdward Cree if (rc) 31575a6681e2SEdward Cree return rc; 31585a6681e2SEdward Cree rc = efx->type->init(efx); 31595a6681e2SEdward Cree if (rc) 31605a6681e2SEdward Cree return rc; 31615a6681e2SEdward Cree rc = ef4_pm_thaw(dev); 31625a6681e2SEdward Cree return rc; 31635a6681e2SEdward Cree } 31645a6681e2SEdward Cree 31655a6681e2SEdward Cree static int ef4_pm_suspend(struct device *dev) 31665a6681e2SEdward Cree { 31675a6681e2SEdward Cree int rc; 31685a6681e2SEdward Cree 31695a6681e2SEdward Cree ef4_pm_freeze(dev); 31705a6681e2SEdward Cree rc = ef4_pm_poweroff(dev); 31715a6681e2SEdward Cree if (rc) 31725a6681e2SEdward Cree ef4_pm_resume(dev); 31735a6681e2SEdward Cree return rc; 31745a6681e2SEdward Cree } 31755a6681e2SEdward Cree 31765a6681e2SEdward Cree static const struct dev_pm_ops ef4_pm_ops = { 31775a6681e2SEdward Cree .suspend = ef4_pm_suspend, 31785a6681e2SEdward Cree .resume = ef4_pm_resume, 31795a6681e2SEdward Cree .freeze = ef4_pm_freeze, 31805a6681e2SEdward Cree .thaw = ef4_pm_thaw, 31815a6681e2SEdward Cree .poweroff = ef4_pm_poweroff, 31825a6681e2SEdward Cree .restore = ef4_pm_resume, 31835a6681e2SEdward Cree }; 31845a6681e2SEdward Cree 31855a6681e2SEdward Cree /* A PCI error affecting this device was detected. 31865a6681e2SEdward Cree * At this point MMIO and DMA may be disabled. 31875a6681e2SEdward Cree * Stop the software path and request a slot reset. 31885a6681e2SEdward Cree */ 31895a6681e2SEdward Cree static pci_ers_result_t ef4_io_error_detected(struct pci_dev *pdev, 31905a6681e2SEdward Cree enum pci_channel_state state) 31915a6681e2SEdward Cree { 31925a6681e2SEdward Cree pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; 31935a6681e2SEdward Cree struct ef4_nic *efx = pci_get_drvdata(pdev); 31945a6681e2SEdward Cree 31955a6681e2SEdward Cree if (state == pci_channel_io_perm_failure) 31965a6681e2SEdward Cree return PCI_ERS_RESULT_DISCONNECT; 31975a6681e2SEdward Cree 31985a6681e2SEdward Cree rtnl_lock(); 31995a6681e2SEdward Cree 32005a6681e2SEdward Cree if (efx->state != STATE_DISABLED) { 32015a6681e2SEdward Cree efx->state = STATE_RECOVERY; 32025a6681e2SEdward Cree efx->reset_pending = 0; 32035a6681e2SEdward Cree 32045a6681e2SEdward Cree ef4_device_detach_sync(efx); 32055a6681e2SEdward Cree 32065a6681e2SEdward Cree ef4_stop_all(efx); 32075a6681e2SEdward Cree ef4_disable_interrupts(efx); 32085a6681e2SEdward Cree 32095a6681e2SEdward Cree status = PCI_ERS_RESULT_NEED_RESET; 32105a6681e2SEdward Cree } else { 32115a6681e2SEdward Cree /* If the interface is disabled we don't want to do anything 32125a6681e2SEdward Cree * with it. 32135a6681e2SEdward Cree */ 32145a6681e2SEdward Cree status = PCI_ERS_RESULT_RECOVERED; 32155a6681e2SEdward Cree } 32165a6681e2SEdward Cree 32175a6681e2SEdward Cree rtnl_unlock(); 32185a6681e2SEdward Cree 32195a6681e2SEdward Cree pci_disable_device(pdev); 32205a6681e2SEdward Cree 32215a6681e2SEdward Cree return status; 32225a6681e2SEdward Cree } 32235a6681e2SEdward Cree 32245a6681e2SEdward Cree /* Fake a successful reset, which will be performed later in ef4_io_resume. */ 32255a6681e2SEdward Cree static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev) 32265a6681e2SEdward Cree { 32275a6681e2SEdward Cree struct ef4_nic *efx = pci_get_drvdata(pdev); 32285a6681e2SEdward Cree pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; 32295a6681e2SEdward Cree int rc; 32305a6681e2SEdward Cree 32315a6681e2SEdward Cree if (pci_enable_device(pdev)) { 32325a6681e2SEdward Cree netif_err(efx, hw, efx->net_dev, 32335a6681e2SEdward Cree "Cannot re-enable PCI device after reset.\n"); 32345a6681e2SEdward Cree status = PCI_ERS_RESULT_DISCONNECT; 32355a6681e2SEdward Cree } 32365a6681e2SEdward Cree 32375a6681e2SEdward Cree rc = pci_cleanup_aer_uncorrect_error_status(pdev); 32385a6681e2SEdward Cree if (rc) { 32395a6681e2SEdward Cree netif_err(efx, hw, efx->net_dev, 32405a6681e2SEdward Cree "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc); 32415a6681e2SEdward Cree /* Non-fatal error. Continue. */ 32425a6681e2SEdward Cree } 32435a6681e2SEdward Cree 32445a6681e2SEdward Cree return status; 32455a6681e2SEdward Cree } 32465a6681e2SEdward Cree 32475a6681e2SEdward Cree /* Perform the actual reset and resume I/O operations. */ 32485a6681e2SEdward Cree static void ef4_io_resume(struct pci_dev *pdev) 32495a6681e2SEdward Cree { 32505a6681e2SEdward Cree struct ef4_nic *efx = pci_get_drvdata(pdev); 32515a6681e2SEdward Cree int rc; 32525a6681e2SEdward Cree 32535a6681e2SEdward Cree rtnl_lock(); 32545a6681e2SEdward Cree 32555a6681e2SEdward Cree if (efx->state == STATE_DISABLED) 32565a6681e2SEdward Cree goto out; 32575a6681e2SEdward Cree 32585a6681e2SEdward Cree rc = ef4_reset(efx, RESET_TYPE_ALL); 32595a6681e2SEdward Cree if (rc) { 32605a6681e2SEdward Cree netif_err(efx, hw, efx->net_dev, 32615a6681e2SEdward Cree "ef4_reset failed after PCI error (%d)\n", rc); 32625a6681e2SEdward Cree } else { 32635a6681e2SEdward Cree efx->state = STATE_READY; 32645a6681e2SEdward Cree netif_dbg(efx, hw, efx->net_dev, 32655a6681e2SEdward Cree "Done resetting and resuming IO after PCI error.\n"); 32665a6681e2SEdward Cree } 32675a6681e2SEdward Cree 32685a6681e2SEdward Cree out: 32695a6681e2SEdward Cree rtnl_unlock(); 32705a6681e2SEdward Cree } 32715a6681e2SEdward Cree 32725a6681e2SEdward Cree /* For simplicity and reliability, we always require a slot reset and try to 32735a6681e2SEdward Cree * reset the hardware when a pci error affecting the device is detected. 32745a6681e2SEdward Cree * We leave both the link_reset and mmio_enabled callback unimplemented: 32755a6681e2SEdward Cree * with our request for slot reset the mmio_enabled callback will never be 32765a6681e2SEdward Cree * called, and the link_reset callback is not used by AER or EEH mechanisms. 32775a6681e2SEdward Cree */ 32785a6681e2SEdward Cree static const struct pci_error_handlers ef4_err_handlers = { 32795a6681e2SEdward Cree .error_detected = ef4_io_error_detected, 32805a6681e2SEdward Cree .slot_reset = ef4_io_slot_reset, 32815a6681e2SEdward Cree .resume = ef4_io_resume, 32825a6681e2SEdward Cree }; 32835a6681e2SEdward Cree 32845a6681e2SEdward Cree static struct pci_driver ef4_pci_driver = { 32855a6681e2SEdward Cree .name = KBUILD_MODNAME, 32865a6681e2SEdward Cree .id_table = ef4_pci_table, 32875a6681e2SEdward Cree .probe = ef4_pci_probe, 32885a6681e2SEdward Cree .remove = ef4_pci_remove, 32895a6681e2SEdward Cree .driver.pm = &ef4_pm_ops, 32905a6681e2SEdward Cree .err_handler = &ef4_err_handlers, 32915a6681e2SEdward Cree }; 32925a6681e2SEdward Cree 32935a6681e2SEdward Cree /************************************************************************** 32945a6681e2SEdward Cree * 32955a6681e2SEdward Cree * Kernel module interface 32965a6681e2SEdward Cree * 32975a6681e2SEdward Cree *************************************************************************/ 32985a6681e2SEdward Cree 32995a6681e2SEdward Cree module_param(interrupt_mode, uint, 0444); 33005a6681e2SEdward Cree MODULE_PARM_DESC(interrupt_mode, 33015a6681e2SEdward Cree "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); 33025a6681e2SEdward Cree 33035a6681e2SEdward Cree static int __init ef4_init_module(void) 33045a6681e2SEdward Cree { 33055a6681e2SEdward Cree int rc; 33065a6681e2SEdward Cree 33075a6681e2SEdward Cree printk(KERN_INFO "Solarflare Falcon driver v" EF4_DRIVER_VERSION "\n"); 33085a6681e2SEdward Cree 33095a6681e2SEdward Cree rc = register_netdevice_notifier(&ef4_netdev_notifier); 33105a6681e2SEdward Cree if (rc) 33115a6681e2SEdward Cree goto err_notifier; 33125a6681e2SEdward Cree 33135a6681e2SEdward Cree reset_workqueue = create_singlethread_workqueue("sfc_reset"); 33145a6681e2SEdward Cree if (!reset_workqueue) { 33155a6681e2SEdward Cree rc = -ENOMEM; 33165a6681e2SEdward Cree goto err_reset; 33175a6681e2SEdward Cree } 33185a6681e2SEdward Cree 33195a6681e2SEdward Cree rc = pci_register_driver(&ef4_pci_driver); 33205a6681e2SEdward Cree if (rc < 0) 33215a6681e2SEdward Cree goto err_pci; 33225a6681e2SEdward Cree 33235a6681e2SEdward Cree return 0; 33245a6681e2SEdward Cree 33255a6681e2SEdward Cree err_pci: 33265a6681e2SEdward Cree destroy_workqueue(reset_workqueue); 33275a6681e2SEdward Cree err_reset: 33285a6681e2SEdward Cree unregister_netdevice_notifier(&ef4_netdev_notifier); 33295a6681e2SEdward Cree err_notifier: 33305a6681e2SEdward Cree return rc; 33315a6681e2SEdward Cree } 33325a6681e2SEdward Cree 33335a6681e2SEdward Cree static void __exit ef4_exit_module(void) 33345a6681e2SEdward Cree { 33355a6681e2SEdward Cree printk(KERN_INFO "Solarflare Falcon driver unloading\n"); 33365a6681e2SEdward Cree 33375a6681e2SEdward Cree pci_unregister_driver(&ef4_pci_driver); 33385a6681e2SEdward Cree destroy_workqueue(reset_workqueue); 33395a6681e2SEdward Cree unregister_netdevice_notifier(&ef4_netdev_notifier); 33405a6681e2SEdward Cree 33415a6681e2SEdward Cree } 33425a6681e2SEdward Cree 33435a6681e2SEdward Cree module_init(ef4_init_module); 33445a6681e2SEdward Cree module_exit(ef4_exit_module); 33455a6681e2SEdward Cree 33465a6681e2SEdward Cree MODULE_AUTHOR("Solarflare Communications and " 33475a6681e2SEdward Cree "Michael Brown <mbrown@fensystems.co.uk>"); 33485a6681e2SEdward Cree MODULE_DESCRIPTION("Solarflare Falcon network driver"); 33495a6681e2SEdward Cree MODULE_LICENSE("GPL"); 33505a6681e2SEdward Cree MODULE_DEVICE_TABLE(pci, ef4_pci_table); 3351