1ae06c70bSJeff Kirsher // SPDX-License-Identifier: GPL-2.0
2b731d079SJacob Keller /* Copyright(c) 2013 - 2019 Intel Corporation. */
3b3890e30SAlexander Duyck
4b3890e30SAlexander Duyck #include <linux/module.h>
5282ccf6eSFlorian Westphal #include <linux/interrupt.h>
6b3890e30SAlexander Duyck
7b3890e30SAlexander Duyck #include "fm10k.h"
8b3890e30SAlexander Duyck
90e7b3644SAlexander Duyck static const struct fm10k_info *fm10k_info_tbl[] = {
100e7b3644SAlexander Duyck [fm10k_device_pf] = &fm10k_pf_info,
115cb8db4aSAlexander Duyck [fm10k_device_vf] = &fm10k_vf_info,
120e7b3644SAlexander Duyck };
130e7b3644SAlexander Duyck
147d6707a9SJacob Keller /*
15b3890e30SAlexander Duyck * fm10k_pci_tbl - PCI Device ID Table
16b3890e30SAlexander Duyck *
17b3890e30SAlexander Duyck * Wildcard entries (PCI_ANY_ID) should come last
18b3890e30SAlexander Duyck * Last entry must be all 0s
19b3890e30SAlexander Duyck *
20b3890e30SAlexander Duyck * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
21b3890e30SAlexander Duyck * Class, Class Mask, private data (not used) }
22b3890e30SAlexander Duyck */
23b3890e30SAlexander Duyck static const struct pci_device_id fm10k_pci_tbl[] = {
240e7b3644SAlexander Duyck { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
259a1fe1e2SJacob Keller { PCI_VDEVICE(INTEL, FM10K_DEV_ID_SDI_FM10420_QDA2), fm10k_device_pf },
269a1fe1e2SJacob Keller { PCI_VDEVICE(INTEL, FM10K_DEV_ID_SDI_FM10420_DA2), fm10k_device_pf },
275cb8db4aSAlexander Duyck { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf },
28b3890e30SAlexander Duyck /* required last entry */
29b3890e30SAlexander Duyck { 0, }
30b3890e30SAlexander Duyck };
31b3890e30SAlexander Duyck MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl);
32b3890e30SAlexander Duyck
fm10k_read_pci_cfg_word(struct fm10k_hw * hw,u32 reg)3304a5aefbSAlexander Duyck u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
3404a5aefbSAlexander Duyck {
3504a5aefbSAlexander Duyck struct fm10k_intfc *interface = hw->back;
3604a5aefbSAlexander Duyck u16 value = 0;
3704a5aefbSAlexander Duyck
3804a5aefbSAlexander Duyck if (FM10K_REMOVED(hw->hw_addr))
3904a5aefbSAlexander Duyck return ~value;
4004a5aefbSAlexander Duyck
4104a5aefbSAlexander Duyck pci_read_config_word(interface->pdev, reg, &value);
4204a5aefbSAlexander Duyck if (value == 0xFFFF)
4304a5aefbSAlexander Duyck fm10k_write_flush(hw);
4404a5aefbSAlexander Duyck
4504a5aefbSAlexander Duyck return value;
4604a5aefbSAlexander Duyck }
4704a5aefbSAlexander Duyck
fm10k_read_reg(struct fm10k_hw * hw,int reg)4804a5aefbSAlexander Duyck u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
4904a5aefbSAlexander Duyck {
50ce4dad2cSJacob Keller u32 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
5104a5aefbSAlexander Duyck u32 value = 0;
5204a5aefbSAlexander Duyck
5304a5aefbSAlexander Duyck if (FM10K_REMOVED(hw_addr))
5404a5aefbSAlexander Duyck return ~value;
5504a5aefbSAlexander Duyck
5604a5aefbSAlexander Duyck value = readl(&hw_addr[reg]);
570e7b3644SAlexander Duyck if (!(~value) && (!reg || !(~readl(hw_addr)))) {
580e7b3644SAlexander Duyck struct fm10k_intfc *interface = hw->back;
590e7b3644SAlexander Duyck struct net_device *netdev = interface->netdev;
600e7b3644SAlexander Duyck
6104a5aefbSAlexander Duyck hw->hw_addr = NULL;
620e7b3644SAlexander Duyck netif_device_detach(netdev);
630e7b3644SAlexander Duyck netdev_err(netdev, "PCIe link lost, device now detached\n");
640e7b3644SAlexander Duyck }
6504a5aefbSAlexander Duyck
6604a5aefbSAlexander Duyck return value;
6704a5aefbSAlexander Duyck }
6804a5aefbSAlexander Duyck
fm10k_hw_ready(struct fm10k_intfc * interface)690e7b3644SAlexander Duyck static int fm10k_hw_ready(struct fm10k_intfc *interface)
700e7b3644SAlexander Duyck {
710e7b3644SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
720e7b3644SAlexander Duyck
730e7b3644SAlexander Duyck fm10k_write_flush(hw);
740e7b3644SAlexander Duyck
750e7b3644SAlexander Duyck return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0;
760e7b3644SAlexander Duyck }
770e7b3644SAlexander Duyck
78fc917368SJacob Keller /**
79fc917368SJacob Keller * fm10k_macvlan_schedule - Schedule MAC/VLAN queue task
80fc917368SJacob Keller * @interface: fm10k private interface structure
81fc917368SJacob Keller *
82fc917368SJacob Keller * Schedule the MAC/VLAN queue monitor task. If the MAC/VLAN task cannot be
83fc917368SJacob Keller * started immediately, request that it be restarted when possible.
84fc917368SJacob Keller */
fm10k_macvlan_schedule(struct fm10k_intfc * interface)85fc917368SJacob Keller void fm10k_macvlan_schedule(struct fm10k_intfc *interface)
86fc917368SJacob Keller {
87fc917368SJacob Keller /* Avoid processing the MAC/VLAN queue when the service task is
88fc917368SJacob Keller * disabled, or when we're resetting the device.
89fc917368SJacob Keller */
90fc917368SJacob Keller if (!test_bit(__FM10K_MACVLAN_DISABLE, interface->state) &&
91fc917368SJacob Keller !test_and_set_bit(__FM10K_MACVLAN_SCHED, interface->state)) {
92fc917368SJacob Keller clear_bit(__FM10K_MACVLAN_REQUEST, interface->state);
93fc917368SJacob Keller /* We delay the actual start of execution in order to allow
94fc917368SJacob Keller * multiple MAC/VLAN updates to accumulate before handling
95fc917368SJacob Keller * them, and to allow some time to let the mailbox drain
96fc917368SJacob Keller * between runs.
97fc917368SJacob Keller */
98fc917368SJacob Keller queue_delayed_work(fm10k_workqueue,
99fc917368SJacob Keller &interface->macvlan_task, 10);
100fc917368SJacob Keller } else {
101fc917368SJacob Keller set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
102fc917368SJacob Keller }
103fc917368SJacob Keller }
104fc917368SJacob Keller
105fc917368SJacob Keller /**
106fc917368SJacob Keller * fm10k_stop_macvlan_task - Stop the MAC/VLAN queue monitor
107fc917368SJacob Keller * @interface: fm10k private interface structure
108fc917368SJacob Keller *
109fc917368SJacob Keller * Wait until the MAC/VLAN queue task has stopped, and cancel any future
110fc917368SJacob Keller * requests.
111fc917368SJacob Keller */
fm10k_stop_macvlan_task(struct fm10k_intfc * interface)112fc917368SJacob Keller static void fm10k_stop_macvlan_task(struct fm10k_intfc *interface)
113fc917368SJacob Keller {
114fc917368SJacob Keller /* Disable the MAC/VLAN work item */
115fc917368SJacob Keller set_bit(__FM10K_MACVLAN_DISABLE, interface->state);
116fc917368SJacob Keller
117fc917368SJacob Keller /* Make sure we waited until any current invocations have stopped */
118fc917368SJacob Keller cancel_delayed_work_sync(&interface->macvlan_task);
119fc917368SJacob Keller
120fc917368SJacob Keller /* We set the __FM10K_MACVLAN_SCHED bit when we schedule the task.
121fc917368SJacob Keller * However, it may not be unset of the MAC/VLAN task never actually
122fc917368SJacob Keller * got a chance to run. Since we've canceled the task here, and it
123fc917368SJacob Keller * cannot be rescheuled right now, we need to ensure the scheduled bit
124fc917368SJacob Keller * gets unset.
125fc917368SJacob Keller */
126fc917368SJacob Keller clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
127fc917368SJacob Keller }
128fc917368SJacob Keller
129fc917368SJacob Keller /**
130fc917368SJacob Keller * fm10k_resume_macvlan_task - Restart the MAC/VLAN queue monitor
131fc917368SJacob Keller * @interface: fm10k private interface structure
132fc917368SJacob Keller *
133fc917368SJacob Keller * Clear the __FM10K_MACVLAN_DISABLE bit and, if a request occurred, schedule
134fc917368SJacob Keller * the MAC/VLAN work monitor.
135fc917368SJacob Keller */
fm10k_resume_macvlan_task(struct fm10k_intfc * interface)136fc917368SJacob Keller static void fm10k_resume_macvlan_task(struct fm10k_intfc *interface)
137fc917368SJacob Keller {
138fc917368SJacob Keller /* Re-enable the MAC/VLAN work item */
139fc917368SJacob Keller clear_bit(__FM10K_MACVLAN_DISABLE, interface->state);
140fc917368SJacob Keller
141fc917368SJacob Keller /* We might have received a MAC/VLAN request while disabled. If so,
142fc917368SJacob Keller * kick off the queue now.
143fc917368SJacob Keller */
144fc917368SJacob Keller if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
145fc917368SJacob Keller fm10k_macvlan_schedule(interface);
146fc917368SJacob Keller }
147fc917368SJacob Keller
fm10k_service_event_schedule(struct fm10k_intfc * interface)148b7d8514cSAlexander Duyck void fm10k_service_event_schedule(struct fm10k_intfc *interface)
149b7d8514cSAlexander Duyck {
15046929557SJacob Keller if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) &&
151b4fd8ffcSJacob Keller !test_and_set_bit(__FM10K_SERVICE_SCHED, interface->state)) {
152b4fd8ffcSJacob Keller clear_bit(__FM10K_SERVICE_REQUEST, interface->state);
153b382bb1bSJeff Kirsher queue_work(fm10k_workqueue, &interface->service_task);
154b4fd8ffcSJacob Keller } else {
155b4fd8ffcSJacob Keller set_bit(__FM10K_SERVICE_REQUEST, interface->state);
156b4fd8ffcSJacob Keller }
157b7d8514cSAlexander Duyck }
158b7d8514cSAlexander Duyck
fm10k_service_event_complete(struct fm10k_intfc * interface)159b7d8514cSAlexander Duyck static void fm10k_service_event_complete(struct fm10k_intfc *interface)
160b7d8514cSAlexander Duyck {
16146929557SJacob Keller WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, interface->state));
162b7d8514cSAlexander Duyck
163b7d8514cSAlexander Duyck /* flush memory to make sure state is correct before next watchog */
164b7d8514cSAlexander Duyck smp_mb__before_atomic();
16546929557SJacob Keller clear_bit(__FM10K_SERVICE_SCHED, interface->state);
166b4fd8ffcSJacob Keller
167b4fd8ffcSJacob Keller /* If a service event was requested since we started, immediately
168b4fd8ffcSJacob Keller * re-schedule now. This ensures we don't drop a request until the
169b4fd8ffcSJacob Keller * next timer event.
170b4fd8ffcSJacob Keller */
171b4fd8ffcSJacob Keller if (test_bit(__FM10K_SERVICE_REQUEST, interface->state))
172b4fd8ffcSJacob Keller fm10k_service_event_schedule(interface);
173b7d8514cSAlexander Duyck }
174b7d8514cSAlexander Duyck
fm10k_stop_service_event(struct fm10k_intfc * interface)17504914390SJacob Keller static void fm10k_stop_service_event(struct fm10k_intfc *interface)
17604914390SJacob Keller {
17704914390SJacob Keller set_bit(__FM10K_SERVICE_DISABLE, interface->state);
17804914390SJacob Keller cancel_work_sync(&interface->service_task);
17904914390SJacob Keller
18004914390SJacob Keller /* It's possible that cancel_work_sync stopped the service task from
18104914390SJacob Keller * running before it could actually start. In this case the
18204914390SJacob Keller * __FM10K_SERVICE_SCHED bit will never be cleared. Since we know that
18304914390SJacob Keller * the service task cannot be running at this point, we need to clear
18404914390SJacob Keller * the scheduled bit, as otherwise the service task may never be
18504914390SJacob Keller * restarted.
18604914390SJacob Keller */
18704914390SJacob Keller clear_bit(__FM10K_SERVICE_SCHED, interface->state);
18804914390SJacob Keller }
18904914390SJacob Keller
fm10k_start_service_event(struct fm10k_intfc * interface)19004914390SJacob Keller static void fm10k_start_service_event(struct fm10k_intfc *interface)
19104914390SJacob Keller {
19204914390SJacob Keller clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
19304914390SJacob Keller fm10k_service_event_schedule(interface);
19404914390SJacob Keller }
19504914390SJacob Keller
196b7d8514cSAlexander Duyck /**
197b7d8514cSAlexander Duyck * fm10k_service_timer - Timer Call-back
198363656ebSJacob Keller * @t: pointer to timer data
199b7d8514cSAlexander Duyck **/
fm10k_service_timer(struct timer_list * t)20026566eaeSKees Cook static void fm10k_service_timer(struct timer_list *t)
201b7d8514cSAlexander Duyck {
20226566eaeSKees Cook struct fm10k_intfc *interface = from_timer(interface, t,
20326566eaeSKees Cook service_timer);
204b7d8514cSAlexander Duyck
205b7d8514cSAlexander Duyck /* Reset the timer */
206b7d8514cSAlexander Duyck mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
207b7d8514cSAlexander Duyck
208b7d8514cSAlexander Duyck fm10k_service_event_schedule(interface);
209b7d8514cSAlexander Duyck }
210b7d8514cSAlexander Duyck
2110b40f457SJacob Keller /**
2120b40f457SJacob Keller * fm10k_prepare_for_reset - Prepare the driver and device for a pending reset
2130b40f457SJacob Keller * @interface: fm10k private data structure
2140b40f457SJacob Keller *
2150b40f457SJacob Keller * This function prepares for a device reset by shutting as much down as we
2160b40f457SJacob Keller * can. It does nothing and returns false if __FM10K_RESETTING was already set
2170b40f457SJacob Keller * prior to calling this function. It returns true if it actually did work.
2180b40f457SJacob Keller */
fm10k_prepare_for_reset(struct fm10k_intfc * interface)2190b40f457SJacob Keller static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface)
220b7d8514cSAlexander Duyck {
221b7d8514cSAlexander Duyck struct net_device *netdev = interface->netdev;
222b7d8514cSAlexander Duyck
223b7d8514cSAlexander Duyck /* put off any impending NetWatchDogTimeout */
224860e9538SFlorian Westphal netif_trans_update(netdev);
225b7d8514cSAlexander Duyck
2260b40f457SJacob Keller /* Nothing to do if a reset is already in progress */
2270b40f457SJacob Keller if (test_and_set_bit(__FM10K_RESETTING, interface->state))
2280b40f457SJacob Keller return false;
229b7d8514cSAlexander Duyck
230fc917368SJacob Keller /* As the MAC/VLAN task will be accessing registers it must not be
231fc917368SJacob Keller * running while we reset. Although the task will not be scheduled
232fc917368SJacob Keller * once we start resetting it may already be running
233fc917368SJacob Keller */
234fc917368SJacob Keller fm10k_stop_macvlan_task(interface);
235fc917368SJacob Keller
236b7d8514cSAlexander Duyck rtnl_lock();
237b7d8514cSAlexander Duyck
238883a9ccbSAlexander Duyck fm10k_iov_suspend(interface->pdev);
239883a9ccbSAlexander Duyck
240b7d8514cSAlexander Duyck if (netif_running(netdev))
241b7d8514cSAlexander Duyck fm10k_close(netdev);
242b7d8514cSAlexander Duyck
243b7d8514cSAlexander Duyck fm10k_mbx_free_irq(interface);
244b7d8514cSAlexander Duyck
245875328e4SJacob Keller /* free interrupts */
246875328e4SJacob Keller fm10k_clear_queueing_scheme(interface);
247875328e4SJacob Keller
248b7d8514cSAlexander Duyck /* delay any future reset requests */
249b7d8514cSAlexander Duyck interface->last_reset = jiffies + (10 * HZ);
250b7d8514cSAlexander Duyck
25140de1fadSJacob Keller rtnl_unlock();
2520b40f457SJacob Keller
2530b40f457SJacob Keller return true;
25440de1fadSJacob Keller }
25540de1fadSJacob Keller
fm10k_handle_reset(struct fm10k_intfc * interface)25640de1fadSJacob Keller static int fm10k_handle_reset(struct fm10k_intfc *interface)
25740de1fadSJacob Keller {
25840de1fadSJacob Keller struct net_device *netdev = interface->netdev;
25940de1fadSJacob Keller struct fm10k_hw *hw = &interface->hw;
26040de1fadSJacob Keller int err;
26140de1fadSJacob Keller
2620b40f457SJacob Keller WARN_ON(!test_bit(__FM10K_RESETTING, interface->state));
2630b40f457SJacob Keller
26440de1fadSJacob Keller rtnl_lock();
26540de1fadSJacob Keller
2660d63a8f5SJacob Keller pci_set_master(interface->pdev);
2670d63a8f5SJacob Keller
268b7d8514cSAlexander Duyck /* reset and initialize the hardware so it is in a known state */
2691343c65fSJacob Keller err = hw->mac.ops.reset_hw(hw);
2701343c65fSJacob Keller if (err) {
2711343c65fSJacob Keller dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err);
2721343c65fSJacob Keller goto reinit_err;
2731343c65fSJacob Keller }
2741343c65fSJacob Keller
2751343c65fSJacob Keller err = hw->mac.ops.init_hw(hw);
2761343c65fSJacob Keller if (err) {
277b7d8514cSAlexander Duyck dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
2781343c65fSJacob Keller goto reinit_err;
2791343c65fSJacob Keller }
280b7d8514cSAlexander Duyck
281875328e4SJacob Keller err = fm10k_init_queueing_scheme(interface);
282875328e4SJacob Keller if (err) {
2833d02b3dfSBruce Allan dev_err(&interface->pdev->dev,
2843d02b3dfSBruce Allan "init_queueing_scheme failed: %d\n", err);
285875328e4SJacob Keller goto reinit_err;
286875328e4SJacob Keller }
287875328e4SJacob Keller
28840de1fadSJacob Keller /* re-associate interrupts */
28909f8a82bSAlexander Duyck err = fm10k_mbx_request_irq(interface);
29009f8a82bSAlexander Duyck if (err)
29109f8a82bSAlexander Duyck goto err_mbx_irq;
29209f8a82bSAlexander Duyck
29309f8a82bSAlexander Duyck err = fm10k_hw_ready(interface);
29409f8a82bSAlexander Duyck if (err)
29509f8a82bSAlexander Duyck goto err_open;
296b7d8514cSAlexander Duyck
297bdc7f590SJacob Keller /* update hardware address for VFs if perm_addr has changed */
298bdc7f590SJacob Keller if (hw->mac.type == fm10k_mac_vf) {
299bdc7f590SJacob Keller if (is_valid_ether_addr(hw->mac.perm_addr)) {
300bdc7f590SJacob Keller ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
301bdc7f590SJacob Keller ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
302*f3956ebbSJakub Kicinski eth_hw_addr_set(netdev, hw->mac.perm_addr);
303bdc7f590SJacob Keller netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
304bdc7f590SJacob Keller }
305bdc7f590SJacob Keller
306bdc7f590SJacob Keller if (hw->mac.vlan_override)
307bdc7f590SJacob Keller netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
308bdc7f590SJacob Keller else
309bdc7f590SJacob Keller netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
310bdc7f590SJacob Keller }
311bdc7f590SJacob Keller
31209f8a82bSAlexander Duyck err = netif_running(netdev) ? fm10k_open(netdev) : 0;
31309f8a82bSAlexander Duyck if (err)
31409f8a82bSAlexander Duyck goto err_open;
315b7d8514cSAlexander Duyck
316883a9ccbSAlexander Duyck fm10k_iov_resume(interface->pdev);
317883a9ccbSAlexander Duyck
31809f8a82bSAlexander Duyck rtnl_unlock();
31909f8a82bSAlexander Duyck
320fc917368SJacob Keller fm10k_resume_macvlan_task(interface);
321fc917368SJacob Keller
32246929557SJacob Keller clear_bit(__FM10K_RESETTING, interface->state);
32309f8a82bSAlexander Duyck
32440de1fadSJacob Keller return err;
32509f8a82bSAlexander Duyck err_open:
32609f8a82bSAlexander Duyck fm10k_mbx_free_irq(interface);
32709f8a82bSAlexander Duyck err_mbx_irq:
32809f8a82bSAlexander Duyck fm10k_clear_queueing_scheme(interface);
3291343c65fSJacob Keller reinit_err:
3301343c65fSJacob Keller netif_device_detach(netdev);
3311343c65fSJacob Keller
332b7d8514cSAlexander Duyck rtnl_unlock();
333b7d8514cSAlexander Duyck
33446929557SJacob Keller clear_bit(__FM10K_RESETTING, interface->state);
33540de1fadSJacob Keller
33640de1fadSJacob Keller return err;
33740de1fadSJacob Keller }
33840de1fadSJacob Keller
fm10k_detach_subtask(struct fm10k_intfc * interface)33965b0a469SJacob Keller static void fm10k_detach_subtask(struct fm10k_intfc *interface)
34065b0a469SJacob Keller {
34165b0a469SJacob Keller struct net_device *netdev = interface->netdev;
34265b0a469SJacob Keller u32 __iomem *hw_addr;
34365b0a469SJacob Keller u32 value;
34465b0a469SJacob Keller
3450b40f457SJacob Keller /* do nothing if netdev is still present or hw_addr is set */
34665b0a469SJacob Keller if (netif_device_present(netdev) || interface->hw.hw_addr)
34765b0a469SJacob Keller return;
34865b0a469SJacob Keller
3490b40f457SJacob Keller /* We've lost the PCIe register space, and can no longer access the
3500b40f457SJacob Keller * device. Shut everything except the detach subtask down and prepare
3510b40f457SJacob Keller * to reset the device in case we recover. If we actually prepare for
3520b40f457SJacob Keller * reset, indicate that we're detached.
3530b40f457SJacob Keller */
3540b40f457SJacob Keller if (fm10k_prepare_for_reset(interface))
3550b40f457SJacob Keller set_bit(__FM10K_RESET_DETACHED, interface->state);
3560b40f457SJacob Keller
35765b0a469SJacob Keller /* check the real address space to see if we've recovered */
35865b0a469SJacob Keller hw_addr = READ_ONCE(interface->uc_addr);
35965b0a469SJacob Keller value = readl(hw_addr);
36065b0a469SJacob Keller if (~value) {
361b731d079SJacob Keller int err;
362b731d079SJacob Keller
3630b40f457SJacob Keller /* Make sure the reset was initiated because we detached,
3640b40f457SJacob Keller * otherwise we might race with a different reset flow.
3650b40f457SJacob Keller */
3660b40f457SJacob Keller if (!test_and_clear_bit(__FM10K_RESET_DETACHED,
3670b40f457SJacob Keller interface->state))
3680b40f457SJacob Keller return;
3690b40f457SJacob Keller
3700b40f457SJacob Keller /* Restore the hardware address */
37165b0a469SJacob Keller interface->hw.hw_addr = interface->uc_addr;
3720b40f457SJacob Keller
3730b40f457SJacob Keller /* PCIe link has been restored, and the device is active
3740b40f457SJacob Keller * again. Restore everything and reset the device.
3750b40f457SJacob Keller */
3760b40f457SJacob Keller err = fm10k_handle_reset(interface);
3770b40f457SJacob Keller if (err) {
3780b40f457SJacob Keller netdev_err(netdev, "Unable to reset device: %d\n", err);
3790b40f457SJacob Keller interface->hw.hw_addr = NULL;
38065b0a469SJacob Keller return;
38165b0a469SJacob Keller }
38265b0a469SJacob Keller
3830b40f457SJacob Keller /* Re-attach the netdev */
3840b40f457SJacob Keller netif_device_attach(netdev);
3850b40f457SJacob Keller netdev_warn(netdev, "PCIe link restored, device now attached\n");
3860b40f457SJacob Keller return;
3870b40f457SJacob Keller }
38865b0a469SJacob Keller }
38965b0a469SJacob Keller
fm10k_reset_subtask(struct fm10k_intfc * interface)3900b40f457SJacob Keller static void fm10k_reset_subtask(struct fm10k_intfc *interface)
39140de1fadSJacob Keller {
39240de1fadSJacob Keller int err;
39340de1fadSJacob Keller
3940b40f457SJacob Keller if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED,
3950b40f457SJacob Keller interface->flags))
3960b40f457SJacob Keller return;
3970b40f457SJacob Keller
3980b40f457SJacob Keller /* If another thread has already prepared to reset the device, we
3990b40f457SJacob Keller * should not attempt to handle a reset here, since we'd race with
4000b40f457SJacob Keller * that thread. This may happen if we suspend the device or if the
4010b40f457SJacob Keller * PCIe link is lost. In this case, we'll just ignore the RESET
4020b40f457SJacob Keller * request, as it will (eventually) be taken care of when the thread
4030b40f457SJacob Keller * which actually started the reset is finished.
4040b40f457SJacob Keller */
4050b40f457SJacob Keller if (!fm10k_prepare_for_reset(interface))
4060b40f457SJacob Keller return;
4070b40f457SJacob Keller
4080b40f457SJacob Keller netdev_err(interface->netdev, "Reset interface\n");
40940de1fadSJacob Keller
41040de1fadSJacob Keller err = fm10k_handle_reset(interface);
41140de1fadSJacob Keller if (err)
41240de1fadSJacob Keller dev_err(&interface->pdev->dev,
41340de1fadSJacob Keller "fm10k_handle_reset failed: %d\n", err);
414b7d8514cSAlexander Duyck }
415b7d8514cSAlexander Duyck
416b7d8514cSAlexander Duyck /**
417b7d8514cSAlexander Duyck * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping
418b7d8514cSAlexander Duyck * @interface: board private structure
419b7d8514cSAlexander Duyck *
420b7d8514cSAlexander Duyck * Configure the SWPRI to PC mapping for the port.
421b7d8514cSAlexander Duyck **/
fm10k_configure_swpri_map(struct fm10k_intfc * interface)422b7d8514cSAlexander Duyck static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
423b7d8514cSAlexander Duyck {
424b7d8514cSAlexander Duyck struct net_device *netdev = interface->netdev;
425b7d8514cSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
426b7d8514cSAlexander Duyck int i;
427b7d8514cSAlexander Duyck
428b7d8514cSAlexander Duyck /* clear flag indicating update is needed */
4293ee7b3a3SJacob Keller clear_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
430b7d8514cSAlexander Duyck
431b7d8514cSAlexander Duyck /* these registers are only available on the PF */
432b7d8514cSAlexander Duyck if (hw->mac.type != fm10k_mac_pf)
433b7d8514cSAlexander Duyck return;
434b7d8514cSAlexander Duyck
435b7d8514cSAlexander Duyck /* configure SWPRI to PC map */
436b7d8514cSAlexander Duyck for (i = 0; i < FM10K_SWPRI_MAX; i++)
437b7d8514cSAlexander Duyck fm10k_write_reg(hw, FM10K_SWPRI_MAP(i),
438b7d8514cSAlexander Duyck netdev_get_prio_tc_map(netdev, i));
439b7d8514cSAlexander Duyck }
440b7d8514cSAlexander Duyck
441b7d8514cSAlexander Duyck /**
442b7d8514cSAlexander Duyck * fm10k_watchdog_update_host_state - Update the link status based on host.
443b7d8514cSAlexander Duyck * @interface: board private structure
444b7d8514cSAlexander Duyck **/
fm10k_watchdog_update_host_state(struct fm10k_intfc * interface)445b7d8514cSAlexander Duyck static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
446b7d8514cSAlexander Duyck {
447b7d8514cSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
448b7d8514cSAlexander Duyck s32 err;
449b7d8514cSAlexander Duyck
45046929557SJacob Keller if (test_bit(__FM10K_LINK_DOWN, interface->state)) {
451b7d8514cSAlexander Duyck interface->host_ready = false;
452b7d8514cSAlexander Duyck if (time_is_after_jiffies(interface->link_down_event))
453b7d8514cSAlexander Duyck return;
45446929557SJacob Keller clear_bit(__FM10K_LINK_DOWN, interface->state);
455b7d8514cSAlexander Duyck }
456b7d8514cSAlexander Duyck
4573ee7b3a3SJacob Keller if (test_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags)) {
458b7d8514cSAlexander Duyck if (rtnl_trylock()) {
459b7d8514cSAlexander Duyck fm10k_configure_swpri_map(interface);
460b7d8514cSAlexander Duyck rtnl_unlock();
461b7d8514cSAlexander Duyck }
462b7d8514cSAlexander Duyck }
463b7d8514cSAlexander Duyck
464b7d8514cSAlexander Duyck /* lock the mailbox for transmit and receive */
465b7d8514cSAlexander Duyck fm10k_mbx_lock(interface);
466b7d8514cSAlexander Duyck
467b7d8514cSAlexander Duyck err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
468b7d8514cSAlexander Duyck if (err && time_is_before_jiffies(interface->last_reset))
4693ee7b3a3SJacob Keller set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
470b7d8514cSAlexander Duyck
471b7d8514cSAlexander Duyck /* free the lock */
472b7d8514cSAlexander Duyck fm10k_mbx_unlock(interface);
473b7d8514cSAlexander Duyck }
474b7d8514cSAlexander Duyck
475b7d8514cSAlexander Duyck /**
476b7d8514cSAlexander Duyck * fm10k_mbx_subtask - Process upstream and downstream mailboxes
477b7d8514cSAlexander Duyck * @interface: board private structure
478b7d8514cSAlexander Duyck *
479b7d8514cSAlexander Duyck * This function will process both the upstream and downstream mailboxes.
480b7d8514cSAlexander Duyck **/
fm10k_mbx_subtask(struct fm10k_intfc * interface)481b7d8514cSAlexander Duyck static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
482b7d8514cSAlexander Duyck {
4830b40f457SJacob Keller /* If we're resetting, bail out */
4840b40f457SJacob Keller if (test_bit(__FM10K_RESETTING, interface->state))
4850b40f457SJacob Keller return;
4860b40f457SJacob Keller
487b7d8514cSAlexander Duyck /* process upstream mailbox and update device state */
488b7d8514cSAlexander Duyck fm10k_watchdog_update_host_state(interface);
489883a9ccbSAlexander Duyck
490883a9ccbSAlexander Duyck /* process downstream mailboxes */
491883a9ccbSAlexander Duyck fm10k_iov_mbx(interface);
492b7d8514cSAlexander Duyck }
493b7d8514cSAlexander Duyck
494b7d8514cSAlexander Duyck /**
495b7d8514cSAlexander Duyck * fm10k_watchdog_host_is_ready - Update netdev status based on host ready
496b7d8514cSAlexander Duyck * @interface: board private structure
497b7d8514cSAlexander Duyck **/
fm10k_watchdog_host_is_ready(struct fm10k_intfc * interface)498b7d8514cSAlexander Duyck static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface)
499b7d8514cSAlexander Duyck {
500b7d8514cSAlexander Duyck struct net_device *netdev = interface->netdev;
501b7d8514cSAlexander Duyck
502b7d8514cSAlexander Duyck /* only continue if link state is currently down */
503b7d8514cSAlexander Duyck if (netif_carrier_ok(netdev))
504b7d8514cSAlexander Duyck return;
505b7d8514cSAlexander Duyck
506b7d8514cSAlexander Duyck netif_info(interface, drv, netdev, "NIC Link is up\n");
507b7d8514cSAlexander Duyck
508b7d8514cSAlexander Duyck netif_carrier_on(netdev);
509b7d8514cSAlexander Duyck netif_tx_wake_all_queues(netdev);
510b7d8514cSAlexander Duyck }
511b7d8514cSAlexander Duyck
512b7d8514cSAlexander Duyck /**
513b7d8514cSAlexander Duyck * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready
514b7d8514cSAlexander Duyck * @interface: board private structure
515b7d8514cSAlexander Duyck **/
fm10k_watchdog_host_not_ready(struct fm10k_intfc * interface)516b7d8514cSAlexander Duyck static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface)
517b7d8514cSAlexander Duyck {
518b7d8514cSAlexander Duyck struct net_device *netdev = interface->netdev;
519b7d8514cSAlexander Duyck
520b7d8514cSAlexander Duyck /* only continue if link state is currently up */
521b7d8514cSAlexander Duyck if (!netif_carrier_ok(netdev))
522b7d8514cSAlexander Duyck return;
523b7d8514cSAlexander Duyck
524b7d8514cSAlexander Duyck netif_info(interface, drv, netdev, "NIC Link is down\n");
525b7d8514cSAlexander Duyck
526b7d8514cSAlexander Duyck netif_carrier_off(netdev);
527b7d8514cSAlexander Duyck netif_tx_stop_all_queues(netdev);
528b7d8514cSAlexander Duyck }
529b7d8514cSAlexander Duyck
530b7d8514cSAlexander Duyck /**
531b7d8514cSAlexander Duyck * fm10k_update_stats - Update the board statistics counters.
532b7d8514cSAlexander Duyck * @interface: board private structure
533b7d8514cSAlexander Duyck **/
fm10k_update_stats(struct fm10k_intfc * interface)534b7d8514cSAlexander Duyck void fm10k_update_stats(struct fm10k_intfc *interface)
535b7d8514cSAlexander Duyck {
536b7d8514cSAlexander Duyck struct net_device_stats *net_stats = &interface->netdev->stats;
537b7d8514cSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
53880043f3bSJacob Keller u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0;
53980043f3bSJacob Keller u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0;
54080043f3bSJacob Keller u64 rx_link_errors = 0;
541b7d8514cSAlexander Duyck u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
542b7d8514cSAlexander Duyck u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
543b7d8514cSAlexander Duyck u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
544b7d8514cSAlexander Duyck u64 tx_bytes_nic = 0, tx_pkts_nic = 0;
545b7d8514cSAlexander Duyck u64 bytes, pkts;
546b7d8514cSAlexander Duyck int i;
547b7d8514cSAlexander Duyck
5489d73edeeSJacob Keller /* ensure only one thread updates stats at a time */
54946929557SJacob Keller if (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
5509d73edeeSJacob Keller return;
5519d73edeeSJacob Keller
552b7d8514cSAlexander Duyck /* do not allow stats update via service task for next second */
553b7d8514cSAlexander Duyck interface->next_stats_update = jiffies + HZ;
554b7d8514cSAlexander Duyck
555b7d8514cSAlexander Duyck /* gather some stats to the interface struct that are per queue */
556b7d8514cSAlexander Duyck for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
557b624714bSJacob Keller struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]);
558b624714bSJacob Keller
559b624714bSJacob Keller if (!tx_ring)
560b624714bSJacob Keller continue;
561b7d8514cSAlexander Duyck
562b7d8514cSAlexander Duyck restart_queue += tx_ring->tx_stats.restart_queue;
563b7d8514cSAlexander Duyck tx_busy += tx_ring->tx_stats.tx_busy;
564b7d8514cSAlexander Duyck tx_csum_errors += tx_ring->tx_stats.csum_err;
565b7d8514cSAlexander Duyck bytes += tx_ring->stats.bytes;
566b7d8514cSAlexander Duyck pkts += tx_ring->stats.packets;
56780043f3bSJacob Keller hw_csum_tx_good += tx_ring->tx_stats.csum_good;
568b7d8514cSAlexander Duyck }
569b7d8514cSAlexander Duyck
570b7d8514cSAlexander Duyck interface->restart_queue = restart_queue;
571b7d8514cSAlexander Duyck interface->tx_busy = tx_busy;
572b7d8514cSAlexander Duyck net_stats->tx_bytes = bytes;
573b7d8514cSAlexander Duyck net_stats->tx_packets = pkts;
574b7d8514cSAlexander Duyck interface->tx_csum_errors = tx_csum_errors;
57580043f3bSJacob Keller interface->hw_csum_tx_good = hw_csum_tx_good;
57680043f3bSJacob Keller
577b7d8514cSAlexander Duyck /* gather some stats to the interface struct that are per queue */
578b7d8514cSAlexander Duyck for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
579b624714bSJacob Keller struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]);
580b624714bSJacob Keller
581b624714bSJacob Keller if (!rx_ring)
582b624714bSJacob Keller continue;
583b7d8514cSAlexander Duyck
584b7d8514cSAlexander Duyck bytes += rx_ring->stats.bytes;
585b7d8514cSAlexander Duyck pkts += rx_ring->stats.packets;
586b7d8514cSAlexander Duyck alloc_failed += rx_ring->rx_stats.alloc_failed;
587b7d8514cSAlexander Duyck rx_csum_errors += rx_ring->rx_stats.csum_err;
588b7d8514cSAlexander Duyck rx_errors += rx_ring->rx_stats.errors;
58980043f3bSJacob Keller hw_csum_rx_good += rx_ring->rx_stats.csum_good;
59080043f3bSJacob Keller rx_switch_errors += rx_ring->rx_stats.switch_errors;
59180043f3bSJacob Keller rx_drops += rx_ring->rx_stats.drops;
59280043f3bSJacob Keller rx_pp_errors += rx_ring->rx_stats.pp_errors;
59380043f3bSJacob Keller rx_link_errors += rx_ring->rx_stats.link_errors;
59480043f3bSJacob Keller rx_length_errors += rx_ring->rx_stats.length_errors;
595b7d8514cSAlexander Duyck }
596b7d8514cSAlexander Duyck
597b7d8514cSAlexander Duyck net_stats->rx_bytes = bytes;
598b7d8514cSAlexander Duyck net_stats->rx_packets = pkts;
599b7d8514cSAlexander Duyck interface->alloc_failed = alloc_failed;
600b7d8514cSAlexander Duyck interface->rx_csum_errors = rx_csum_errors;
60180043f3bSJacob Keller interface->hw_csum_rx_good = hw_csum_rx_good;
60280043f3bSJacob Keller interface->rx_switch_errors = rx_switch_errors;
60380043f3bSJacob Keller interface->rx_drops = rx_drops;
60480043f3bSJacob Keller interface->rx_pp_errors = rx_pp_errors;
60580043f3bSJacob Keller interface->rx_link_errors = rx_link_errors;
60680043f3bSJacob Keller interface->rx_length_errors = rx_length_errors;
607b7d8514cSAlexander Duyck
608b7d8514cSAlexander Duyck hw->mac.ops.update_hw_stats(hw, &interface->stats);
609b7d8514cSAlexander Duyck
610c0e61781SJeff Kirsher for (i = 0; i < hw->mac.max_queues; i++) {
611b7d8514cSAlexander Duyck struct fm10k_hw_stats_q *q = &interface->stats.q[i];
612b7d8514cSAlexander Duyck
613b7d8514cSAlexander Duyck tx_bytes_nic += q->tx_bytes.count;
614b7d8514cSAlexander Duyck tx_pkts_nic += q->tx_packets.count;
615b7d8514cSAlexander Duyck rx_bytes_nic += q->rx_bytes.count;
616b7d8514cSAlexander Duyck rx_pkts_nic += q->rx_packets.count;
617b7d8514cSAlexander Duyck rx_drops_nic += q->rx_drops.count;
618b7d8514cSAlexander Duyck }
619b7d8514cSAlexander Duyck
620b7d8514cSAlexander Duyck interface->tx_bytes_nic = tx_bytes_nic;
621b7d8514cSAlexander Duyck interface->tx_packets_nic = tx_pkts_nic;
622b7d8514cSAlexander Duyck interface->rx_bytes_nic = rx_bytes_nic;
623b7d8514cSAlexander Duyck interface->rx_packets_nic = rx_pkts_nic;
624b7d8514cSAlexander Duyck interface->rx_drops_nic = rx_drops_nic;
625b7d8514cSAlexander Duyck
626b7d8514cSAlexander Duyck /* Fill out the OS statistics structure */
62797c71e3cSJeff Kirsher net_stats->rx_errors = rx_errors;
628b7d8514cSAlexander Duyck net_stats->rx_dropped = interface->stats.nodesc_drop.count;
6299d73edeeSJacob Keller
6300e100440SJacob Keller /* Update VF statistics */
6310e100440SJacob Keller fm10k_iov_update_stats(interface);
6320e100440SJacob Keller
63346929557SJacob Keller clear_bit(__FM10K_UPDATING_STATS, interface->state);
634b7d8514cSAlexander Duyck }
635b7d8514cSAlexander Duyck
636b7d8514cSAlexander Duyck /**
637b7d8514cSAlexander Duyck * fm10k_watchdog_flush_tx - flush queues on host not ready
638363656ebSJacob Keller * @interface: pointer to the device interface structure
639b7d8514cSAlexander Duyck **/
fm10k_watchdog_flush_tx(struct fm10k_intfc * interface)640b7d8514cSAlexander Duyck static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
641b7d8514cSAlexander Duyck {
642b7d8514cSAlexander Duyck int some_tx_pending = 0;
643b7d8514cSAlexander Duyck int i;
644b7d8514cSAlexander Duyck
645b7d8514cSAlexander Duyck /* nothing to do if carrier is up */
646b7d8514cSAlexander Duyck if (netif_carrier_ok(interface->netdev))
647b7d8514cSAlexander Duyck return;
648b7d8514cSAlexander Duyck
649b7d8514cSAlexander Duyck for (i = 0; i < interface->num_tx_queues; i++) {
650b7d8514cSAlexander Duyck struct fm10k_ring *tx_ring = interface->tx_ring[i];
651b7d8514cSAlexander Duyck
652b7d8514cSAlexander Duyck if (tx_ring->next_to_use != tx_ring->next_to_clean) {
653b7d8514cSAlexander Duyck some_tx_pending = 1;
654b7d8514cSAlexander Duyck break;
655b7d8514cSAlexander Duyck }
656b7d8514cSAlexander Duyck }
657b7d8514cSAlexander Duyck
658b7d8514cSAlexander Duyck /* We've lost link, so the controller stops DMA, but we've got
659b7d8514cSAlexander Duyck * queued Tx work that's never going to get done, so reset
660b7d8514cSAlexander Duyck * controller to flush Tx.
661b7d8514cSAlexander Duyck */
662b7d8514cSAlexander Duyck if (some_tx_pending)
6633ee7b3a3SJacob Keller set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
664b7d8514cSAlexander Duyck }
665b7d8514cSAlexander Duyck
666b7d8514cSAlexander Duyck /**
667b7d8514cSAlexander Duyck * fm10k_watchdog_subtask - check and bring link up
668363656ebSJacob Keller * @interface: pointer to the device interface structure
669b7d8514cSAlexander Duyck **/
fm10k_watchdog_subtask(struct fm10k_intfc * interface)670b7d8514cSAlexander Duyck static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
671b7d8514cSAlexander Duyck {
672b7d8514cSAlexander Duyck /* if interface is down do nothing */
67346929557SJacob Keller if (test_bit(__FM10K_DOWN, interface->state) ||
67446929557SJacob Keller test_bit(__FM10K_RESETTING, interface->state))
675b7d8514cSAlexander Duyck return;
676b7d8514cSAlexander Duyck
677b7d8514cSAlexander Duyck if (interface->host_ready)
678b7d8514cSAlexander Duyck fm10k_watchdog_host_is_ready(interface);
679b7d8514cSAlexander Duyck else
680b7d8514cSAlexander Duyck fm10k_watchdog_host_not_ready(interface);
681b7d8514cSAlexander Duyck
682b7d8514cSAlexander Duyck /* update stats only once every second */
683b7d8514cSAlexander Duyck if (time_is_before_jiffies(interface->next_stats_update))
684b7d8514cSAlexander Duyck fm10k_update_stats(interface);
685b7d8514cSAlexander Duyck
686b7d8514cSAlexander Duyck /* flush any uncompleted work */
687b7d8514cSAlexander Duyck fm10k_watchdog_flush_tx(interface);
688b7d8514cSAlexander Duyck }
689b7d8514cSAlexander Duyck
690b7d8514cSAlexander Duyck /**
691b7d8514cSAlexander Duyck * fm10k_check_hang_subtask - check for hung queues and dropped interrupts
692363656ebSJacob Keller * @interface: pointer to the device interface structure
693b7d8514cSAlexander Duyck *
694b7d8514cSAlexander Duyck * This function serves two purposes. First it strobes the interrupt lines
695b7d8514cSAlexander Duyck * in order to make certain interrupts are occurring. Secondly it sets the
696b7d8514cSAlexander Duyck * bits needed to check for TX hangs. As a result we should immediately
697b7d8514cSAlexander Duyck * determine if a hang has occurred.
698b7d8514cSAlexander Duyck */
fm10k_check_hang_subtask(struct fm10k_intfc * interface)699b7d8514cSAlexander Duyck static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
700b7d8514cSAlexander Duyck {
701b7d8514cSAlexander Duyck /* If we're down or resetting, just bail */
70246929557SJacob Keller if (test_bit(__FM10K_DOWN, interface->state) ||
70346929557SJacob Keller test_bit(__FM10K_RESETTING, interface->state))
704b7d8514cSAlexander Duyck return;
705b7d8514cSAlexander Duyck
706b7d8514cSAlexander Duyck /* rate limit tx hang checks to only once every 2 seconds */
707b7d8514cSAlexander Duyck if (time_is_after_eq_jiffies(interface->next_tx_hang_check))
708b7d8514cSAlexander Duyck return;
709b7d8514cSAlexander Duyck interface->next_tx_hang_check = jiffies + (2 * HZ);
710b7d8514cSAlexander Duyck
711b7d8514cSAlexander Duyck if (netif_carrier_ok(interface->netdev)) {
712d56b4779SJacob Keller int i;
713d56b4779SJacob Keller
714b7d8514cSAlexander Duyck /* Force detection of hung controller */
715b7d8514cSAlexander Duyck for (i = 0; i < interface->num_tx_queues; i++)
716b7d8514cSAlexander Duyck set_check_for_tx_hang(interface->tx_ring[i]);
717b7d8514cSAlexander Duyck
718b7d8514cSAlexander Duyck /* Rearm all in-use q_vectors for immediate firing */
719b7d8514cSAlexander Duyck for (i = 0; i < interface->num_q_vectors; i++) {
720b7d8514cSAlexander Duyck struct fm10k_q_vector *qv = interface->q_vector[i];
721b7d8514cSAlexander Duyck
722b7d8514cSAlexander Duyck if (!qv->tx.count && !qv->rx.count)
723b7d8514cSAlexander Duyck continue;
724b7d8514cSAlexander Duyck writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr);
725b7d8514cSAlexander Duyck }
726b7d8514cSAlexander Duyck }
727b7d8514cSAlexander Duyck }
728b7d8514cSAlexander Duyck
729b7d8514cSAlexander Duyck /**
730b7d8514cSAlexander Duyck * fm10k_service_task - manages and runs subtasks
731b7d8514cSAlexander Duyck * @work: pointer to work_struct containing our data
732b7d8514cSAlexander Duyck **/
fm10k_service_task(struct work_struct * work)733b7d8514cSAlexander Duyck static void fm10k_service_task(struct work_struct *work)
734b7d8514cSAlexander Duyck {
735b7d8514cSAlexander Duyck struct fm10k_intfc *interface;
736b7d8514cSAlexander Duyck
737b7d8514cSAlexander Duyck interface = container_of(work, struct fm10k_intfc, service_task);
738b7d8514cSAlexander Duyck
7390b40f457SJacob Keller /* Check whether we're detached first */
7400b40f457SJacob Keller fm10k_detach_subtask(interface);
7410b40f457SJacob Keller
7428427672aSJacob Keller /* tasks run even when interface is down */
743b7d8514cSAlexander Duyck fm10k_mbx_subtask(interface);
744b7d8514cSAlexander Duyck fm10k_reset_subtask(interface);
745b7d8514cSAlexander Duyck
746b7d8514cSAlexander Duyck /* tasks only run when interface is up */
747b7d8514cSAlexander Duyck fm10k_watchdog_subtask(interface);
748b7d8514cSAlexander Duyck fm10k_check_hang_subtask(interface);
749b7d8514cSAlexander Duyck
750b7d8514cSAlexander Duyck /* release lock on service events to allow scheduling next event */
751b7d8514cSAlexander Duyck fm10k_service_event_complete(interface);
752b7d8514cSAlexander Duyck }
753b7d8514cSAlexander Duyck
7543abaae42SAlexander Duyck /**
755fc917368SJacob Keller * fm10k_macvlan_task - send queued MAC/VLAN requests to switch manager
756fc917368SJacob Keller * @work: pointer to work_struct containing our data
757fc917368SJacob Keller *
758fc917368SJacob Keller * This work item handles sending MAC/VLAN updates to the switch manager. When
759fc917368SJacob Keller * the interface is up, it will attempt to queue mailbox messages to the
760fc917368SJacob Keller * switch manager requesting updates for MAC/VLAN pairs. If the Tx fifo of the
761fc917368SJacob Keller * mailbox is full, it will reschedule itself to try again in a short while.
762fc917368SJacob Keller * This ensures that the driver does not overload the switch mailbox with too
763fc917368SJacob Keller * many simultaneous requests, causing an unnecessary reset.
764fc917368SJacob Keller **/
fm10k_macvlan_task(struct work_struct * work)765fc917368SJacob Keller static void fm10k_macvlan_task(struct work_struct *work)
766fc917368SJacob Keller {
767fc917368SJacob Keller struct fm10k_macvlan_request *item;
768fc917368SJacob Keller struct fm10k_intfc *interface;
769fc917368SJacob Keller struct delayed_work *dwork;
770fc917368SJacob Keller struct list_head *requests;
771fc917368SJacob Keller struct fm10k_hw *hw;
772fc917368SJacob Keller unsigned long flags;
773fc917368SJacob Keller
774fc917368SJacob Keller dwork = to_delayed_work(work);
775fc917368SJacob Keller interface = container_of(dwork, struct fm10k_intfc, macvlan_task);
776fc917368SJacob Keller hw = &interface->hw;
777fc917368SJacob Keller requests = &interface->macvlan_requests;
778fc917368SJacob Keller
779fc917368SJacob Keller do {
780fc917368SJacob Keller /* Pop the first item off the list */
781fc917368SJacob Keller spin_lock_irqsave(&interface->macvlan_lock, flags);
782fc917368SJacob Keller item = list_first_entry_or_null(requests,
783fc917368SJacob Keller struct fm10k_macvlan_request,
784fc917368SJacob Keller list);
785fc917368SJacob Keller if (item)
786fc917368SJacob Keller list_del_init(&item->list);
787fc917368SJacob Keller
788fc917368SJacob Keller spin_unlock_irqrestore(&interface->macvlan_lock, flags);
789fc917368SJacob Keller
790fc917368SJacob Keller /* We have no more items to process */
791fc917368SJacob Keller if (!item)
792fc917368SJacob Keller goto done;
793fc917368SJacob Keller
794fc917368SJacob Keller fm10k_mbx_lock(interface);
795fc917368SJacob Keller
796fc917368SJacob Keller /* Check that we have plenty of space to send the message. We
797fc917368SJacob Keller * want to ensure that the mailbox stays low enough to avoid a
798fc917368SJacob Keller * change in the host state, otherwise we may see spurious
799fc917368SJacob Keller * link up / link down notifications.
800fc917368SJacob Keller */
801fc917368SJacob Keller if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU + 5)) {
802fc917368SJacob Keller hw->mbx.ops.process(hw, &hw->mbx);
803fc917368SJacob Keller set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
804fc917368SJacob Keller fm10k_mbx_unlock(interface);
805fc917368SJacob Keller
806fc917368SJacob Keller /* Put the request back on the list */
807fc917368SJacob Keller spin_lock_irqsave(&interface->macvlan_lock, flags);
808fc917368SJacob Keller list_add(&item->list, requests);
809fc917368SJacob Keller spin_unlock_irqrestore(&interface->macvlan_lock, flags);
810fc917368SJacob Keller break;
811fc917368SJacob Keller }
812fc917368SJacob Keller
813fc917368SJacob Keller switch (item->type) {
814fc917368SJacob Keller case FM10K_MC_MAC_REQUEST:
815fc917368SJacob Keller hw->mac.ops.update_mc_addr(hw,
816fc917368SJacob Keller item->mac.glort,
817fc917368SJacob Keller item->mac.addr,
818fc917368SJacob Keller item->mac.vid,
819fc917368SJacob Keller item->set);
820fc917368SJacob Keller break;
821fc917368SJacob Keller case FM10K_UC_MAC_REQUEST:
822fc917368SJacob Keller hw->mac.ops.update_uc_addr(hw,
823fc917368SJacob Keller item->mac.glort,
824fc917368SJacob Keller item->mac.addr,
825fc917368SJacob Keller item->mac.vid,
826fc917368SJacob Keller item->set,
827fc917368SJacob Keller 0);
828fc917368SJacob Keller break;
829fc917368SJacob Keller case FM10K_VLAN_REQUEST:
830fc917368SJacob Keller hw->mac.ops.update_vlan(hw,
831fc917368SJacob Keller item->vlan.vid,
832fc917368SJacob Keller item->vlan.vsi,
833fc917368SJacob Keller item->set);
834fc917368SJacob Keller break;
835fc917368SJacob Keller default:
836fc917368SJacob Keller break;
837fc917368SJacob Keller }
838fc917368SJacob Keller
839fc917368SJacob Keller fm10k_mbx_unlock(interface);
840fc917368SJacob Keller
841fc917368SJacob Keller /* Free the item now that we've sent the update */
842fc917368SJacob Keller kfree(item);
843fc917368SJacob Keller } while (true);
844fc917368SJacob Keller
845fc917368SJacob Keller done:
846fc917368SJacob Keller WARN_ON(!test_bit(__FM10K_MACVLAN_SCHED, interface->state));
847fc917368SJacob Keller
848fc917368SJacob Keller /* flush memory to make sure state is correct */
849fc917368SJacob Keller smp_mb__before_atomic();
850fc917368SJacob Keller clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
851fc917368SJacob Keller
852fc917368SJacob Keller /* If a MAC/VLAN request was scheduled since we started, we should
853fc917368SJacob Keller * re-schedule. However, there is no reason to re-schedule if there is
854fc917368SJacob Keller * no work to do.
855fc917368SJacob Keller */
856fc917368SJacob Keller if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
857fc917368SJacob Keller fm10k_macvlan_schedule(interface);
858fc917368SJacob Keller }
859fc917368SJacob Keller
860fc917368SJacob Keller /**
8613abaae42SAlexander Duyck * fm10k_configure_tx_ring - Configure Tx ring after Reset
8623abaae42SAlexander Duyck * @interface: board private structure
8633abaae42SAlexander Duyck * @ring: structure containing ring specific data
8643abaae42SAlexander Duyck *
8653abaae42SAlexander Duyck * Configure the Tx descriptor ring after a reset.
8663abaae42SAlexander Duyck **/
fm10k_configure_tx_ring(struct fm10k_intfc * interface,struct fm10k_ring * ring)8673abaae42SAlexander Duyck static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
8683abaae42SAlexander Duyck struct fm10k_ring *ring)
8693abaae42SAlexander Duyck {
8703abaae42SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
8713abaae42SAlexander Duyck u64 tdba = ring->dma;
8723abaae42SAlexander Duyck u32 size = ring->count * sizeof(struct fm10k_tx_desc);
8733abaae42SAlexander Duyck u32 txint = FM10K_INT_MAP_DISABLE;
874fcdb0a99SBruce Allan u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
8753abaae42SAlexander Duyck u8 reg_idx = ring->reg_idx;
8763abaae42SAlexander Duyck
8773abaae42SAlexander Duyck /* disable queue to avoid issues while updating state */
8783abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0);
8793abaae42SAlexander Duyck fm10k_write_flush(hw);
8803abaae42SAlexander Duyck
8813abaae42SAlexander Duyck /* possible poll here to verify ring resources have been cleaned */
8823abaae42SAlexander Duyck
8833abaae42SAlexander Duyck /* set location and size for descriptor ring */
8843abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
8853abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32);
8863abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size);
8873abaae42SAlexander Duyck
8883abaae42SAlexander Duyck /* reset head and tail pointers */
8893abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0);
8903abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0);
8913abaae42SAlexander Duyck
8923abaae42SAlexander Duyck /* store tail pointer */
8933abaae42SAlexander Duyck ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)];
8943abaae42SAlexander Duyck
895c7bc9523SJacob Keller /* reset ntu and ntc to place SW in sync with hardware */
8963abaae42SAlexander Duyck ring->next_to_clean = 0;
8973abaae42SAlexander Duyck ring->next_to_use = 0;
8983abaae42SAlexander Duyck
8993abaae42SAlexander Duyck /* Map interrupt */
9003abaae42SAlexander Duyck if (ring->q_vector) {
901a3ffeaf7SJacob Keller txint = ring->q_vector->v_idx + NON_Q_VECTORS;
9023abaae42SAlexander Duyck txint |= FM10K_INT_MAP_TIMER0;
9033abaae42SAlexander Duyck }
9043abaae42SAlexander Duyck
9053abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint);
9063abaae42SAlexander Duyck
9073abaae42SAlexander Duyck /* enable use of FTAG bit in Tx descriptor, register is RO for VF */
9083abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx),
9093abaae42SAlexander Duyck FM10K_PFVTCTL_FTAG_DESC_ENABLE);
9103abaae42SAlexander Duyck
911504b0fdfSJacob Keller /* Initialize XPS */
91246929557SJacob Keller if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, ring->state) &&
913504b0fdfSJacob Keller ring->q_vector)
914504b0fdfSJacob Keller netif_set_xps_queue(ring->netdev,
915504b0fdfSJacob Keller &ring->q_vector->affinity_mask,
916504b0fdfSJacob Keller ring->queue_index);
917504b0fdfSJacob Keller
9183abaae42SAlexander Duyck /* enable queue */
9193abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl);
9203abaae42SAlexander Duyck }
9213abaae42SAlexander Duyck
9223abaae42SAlexander Duyck /**
9233abaae42SAlexander Duyck * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration
9243abaae42SAlexander Duyck * @interface: board private structure
9253abaae42SAlexander Duyck * @ring: structure containing ring specific data
9263abaae42SAlexander Duyck *
9273abaae42SAlexander Duyck * Verify the Tx descriptor ring is ready for transmit.
9283abaae42SAlexander Duyck **/
fm10k_enable_tx_ring(struct fm10k_intfc * interface,struct fm10k_ring * ring)9293abaae42SAlexander Duyck static void fm10k_enable_tx_ring(struct fm10k_intfc *interface,
9303abaae42SAlexander Duyck struct fm10k_ring *ring)
9313abaae42SAlexander Duyck {
9323abaae42SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
9333abaae42SAlexander Duyck int wait_loop = 10;
9343abaae42SAlexander Duyck u32 txdctl;
9353abaae42SAlexander Duyck u8 reg_idx = ring->reg_idx;
9363abaae42SAlexander Duyck
9373abaae42SAlexander Duyck /* if we are already enabled just exit */
9383abaae42SAlexander Duyck if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE)
9393abaae42SAlexander Duyck return;
9403abaae42SAlexander Duyck
9413abaae42SAlexander Duyck /* poll to verify queue is enabled */
9423abaae42SAlexander Duyck do {
9433abaae42SAlexander Duyck usleep_range(1000, 2000);
9443abaae42SAlexander Duyck txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx));
9453abaae42SAlexander Duyck } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop);
9463abaae42SAlexander Duyck if (!wait_loop)
9473abaae42SAlexander Duyck netif_err(interface, drv, interface->netdev,
9483abaae42SAlexander Duyck "Could not enable Tx Queue %d\n", reg_idx);
9493abaae42SAlexander Duyck }
9503abaae42SAlexander Duyck
9513abaae42SAlexander Duyck /**
9523abaae42SAlexander Duyck * fm10k_configure_tx - Configure Transmit Unit after Reset
9533abaae42SAlexander Duyck * @interface: board private structure
9543abaae42SAlexander Duyck *
9553abaae42SAlexander Duyck * Configure the Tx unit of the MAC after a reset.
9563abaae42SAlexander Duyck **/
fm10k_configure_tx(struct fm10k_intfc * interface)9573abaae42SAlexander Duyck static void fm10k_configure_tx(struct fm10k_intfc *interface)
9583abaae42SAlexander Duyck {
9593abaae42SAlexander Duyck int i;
9603abaae42SAlexander Duyck
9613abaae42SAlexander Duyck /* Setup the HW Tx Head and Tail descriptor pointers */
9623abaae42SAlexander Duyck for (i = 0; i < interface->num_tx_queues; i++)
9633abaae42SAlexander Duyck fm10k_configure_tx_ring(interface, interface->tx_ring[i]);
9643abaae42SAlexander Duyck
9653abaae42SAlexander Duyck /* poll here to verify that Tx rings are now enabled */
9663abaae42SAlexander Duyck for (i = 0; i < interface->num_tx_queues; i++)
9673abaae42SAlexander Duyck fm10k_enable_tx_ring(interface, interface->tx_ring[i]);
9683abaae42SAlexander Duyck }
9693abaae42SAlexander Duyck
9703abaae42SAlexander Duyck /**
9713abaae42SAlexander Duyck * fm10k_configure_rx_ring - Configure Rx ring after Reset
9723abaae42SAlexander Duyck * @interface: board private structure
9733abaae42SAlexander Duyck * @ring: structure containing ring specific data
9743abaae42SAlexander Duyck *
9753abaae42SAlexander Duyck * Configure the Rx descriptor ring after a reset.
9763abaae42SAlexander Duyck **/
fm10k_configure_rx_ring(struct fm10k_intfc * interface,struct fm10k_ring * ring)9773abaae42SAlexander Duyck static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
9783abaae42SAlexander Duyck struct fm10k_ring *ring)
9793abaae42SAlexander Duyck {
9803abaae42SAlexander Duyck u64 rdba = ring->dma;
9813abaae42SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
9823abaae42SAlexander Duyck u32 size = ring->count * sizeof(union fm10k_rx_desc);
983c689eff1SJacob Keller u32 rxqctl, rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
9843abaae42SAlexander Duyck u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
9853abaae42SAlexander Duyck u32 rxint = FM10K_INT_MAP_DISABLE;
9863abaae42SAlexander Duyck u8 rx_pause = interface->rx_pause;
9873abaae42SAlexander Duyck u8 reg_idx = ring->reg_idx;
9883abaae42SAlexander Duyck
9893abaae42SAlexander Duyck /* disable queue to avoid issues while updating state */
990c689eff1SJacob Keller rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
991c689eff1SJacob Keller rxqctl &= ~FM10K_RXQCTL_ENABLE;
99216b1889fSNgai-Mint Kwan fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
9933abaae42SAlexander Duyck fm10k_write_flush(hw);
9943abaae42SAlexander Duyck
9953abaae42SAlexander Duyck /* possible poll here to verify ring resources have been cleaned */
9963abaae42SAlexander Duyck
9973abaae42SAlexander Duyck /* set location and size for descriptor ring */
9983abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
9993abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32);
10003abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size);
10013abaae42SAlexander Duyck
10023abaae42SAlexander Duyck /* reset head and tail pointers */
10033abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0);
10043abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0);
10053abaae42SAlexander Duyck
10063abaae42SAlexander Duyck /* store tail pointer */
10073abaae42SAlexander Duyck ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)];
10083abaae42SAlexander Duyck
1009c7bc9523SJacob Keller /* reset ntu and ntc to place SW in sync with hardware */
10103abaae42SAlexander Duyck ring->next_to_clean = 0;
10113abaae42SAlexander Duyck ring->next_to_use = 0;
10123abaae42SAlexander Duyck ring->next_to_alloc = 0;
10133abaae42SAlexander Duyck
10143abaae42SAlexander Duyck /* Configure the Rx buffer size for one buff without split */
10153abaae42SAlexander Duyck srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
10163abaae42SAlexander Duyck
1017eca32047SMatthew Vick /* Configure the Rx ring to suppress loopback packets */
10183abaae42SAlexander Duyck srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
10193abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
10203abaae42SAlexander Duyck
10213abaae42SAlexander Duyck /* Enable drop on empty */
10229f801abcSAlexander Duyck #ifdef CONFIG_DCB
10233abaae42SAlexander Duyck if (interface->pfc_en)
10243abaae42SAlexander Duyck rx_pause = interface->pfc_en;
10253abaae42SAlexander Duyck #endif
1026fcdb0a99SBruce Allan if (!(rx_pause & BIT(ring->qos_pc)))
10273abaae42SAlexander Duyck rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
10283abaae42SAlexander Duyck
10293abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
10303abaae42SAlexander Duyck
10313abaae42SAlexander Duyck /* assign default VLAN to queue */
10323abaae42SAlexander Duyck ring->vid = hw->mac.default_vid;
10333abaae42SAlexander Duyck
1034aa502b4aSJacob Keller /* if we have an active VLAN, disable default VLAN ID */
1035e71c9318SJacob Keller if (test_bit(hw->mac.default_vid, interface->active_vlans))
1036e71c9318SJacob Keller ring->vid |= FM10K_VLAN_CLEAR;
1037e71c9318SJacob Keller
10383abaae42SAlexander Duyck /* Map interrupt */
10393abaae42SAlexander Duyck if (ring->q_vector) {
1040a3ffeaf7SJacob Keller rxint = ring->q_vector->v_idx + NON_Q_VECTORS;
10413abaae42SAlexander Duyck rxint |= FM10K_INT_MAP_TIMER1;
10423abaae42SAlexander Duyck }
10433abaae42SAlexander Duyck
10443abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint);
10453abaae42SAlexander Duyck
10463abaae42SAlexander Duyck /* enable queue */
1047c689eff1SJacob Keller rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
1048c689eff1SJacob Keller rxqctl |= FM10K_RXQCTL_ENABLE;
10493abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
1050b101c962SAlexander Duyck
1051b101c962SAlexander Duyck /* place buffers on ring for receive data */
1052b101c962SAlexander Duyck fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring));
10533abaae42SAlexander Duyck }
10543abaae42SAlexander Duyck
10553abaae42SAlexander Duyck /**
10563abaae42SAlexander Duyck * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings
10573abaae42SAlexander Duyck * @interface: board private structure
10583abaae42SAlexander Duyck *
10593abaae42SAlexander Duyck * Configure the drop enable bits for the Rx rings.
10603abaae42SAlexander Duyck **/
fm10k_update_rx_drop_en(struct fm10k_intfc * interface)10613abaae42SAlexander Duyck void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
10623abaae42SAlexander Duyck {
10633abaae42SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
10643abaae42SAlexander Duyck u8 rx_pause = interface->rx_pause;
10653abaae42SAlexander Duyck int i;
10663abaae42SAlexander Duyck
10679f801abcSAlexander Duyck #ifdef CONFIG_DCB
10683abaae42SAlexander Duyck if (interface->pfc_en)
10693abaae42SAlexander Duyck rx_pause = interface->pfc_en;
10703abaae42SAlexander Duyck
10713abaae42SAlexander Duyck #endif
10723abaae42SAlexander Duyck for (i = 0; i < interface->num_rx_queues; i++) {
10733abaae42SAlexander Duyck struct fm10k_ring *ring = interface->rx_ring[i];
10743abaae42SAlexander Duyck u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
10753abaae42SAlexander Duyck u8 reg_idx = ring->reg_idx;
10763abaae42SAlexander Duyck
1077fcdb0a99SBruce Allan if (!(rx_pause & BIT(ring->qos_pc)))
10783abaae42SAlexander Duyck rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
10793abaae42SAlexander Duyck
10803abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
10813abaae42SAlexander Duyck }
10823abaae42SAlexander Duyck }
10833abaae42SAlexander Duyck
10843abaae42SAlexander Duyck /**
10853abaae42SAlexander Duyck * fm10k_configure_dglort - Configure Receive DGLORT after reset
10863abaae42SAlexander Duyck * @interface: board private structure
10873abaae42SAlexander Duyck *
10883abaae42SAlexander Duyck * Configure the DGLORT description and RSS tables.
10893abaae42SAlexander Duyck **/
fm10k_configure_dglort(struct fm10k_intfc * interface)10903abaae42SAlexander Duyck static void fm10k_configure_dglort(struct fm10k_intfc *interface)
10913abaae42SAlexander Duyck {
10923abaae42SAlexander Duyck struct fm10k_dglort_cfg dglort = { 0 };
10933abaae42SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
10943abaae42SAlexander Duyck int i;
10953abaae42SAlexander Duyck u32 mrqc;
10963abaae42SAlexander Duyck
10973abaae42SAlexander Duyck /* Fill out hash function seeds */
10983abaae42SAlexander Duyck for (i = 0; i < FM10K_RSSRK_SIZE; i++)
10993abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]);
11003abaae42SAlexander Duyck
11013abaae42SAlexander Duyck /* Write RETA table to hardware */
11023abaae42SAlexander Duyck for (i = 0; i < FM10K_RETA_SIZE; i++)
11033abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]);
11043abaae42SAlexander Duyck
11053abaae42SAlexander Duyck /* Generate RSS hash based on packet types, TCP/UDP
11063abaae42SAlexander Duyck * port numbers and/or IPv4/v6 src and dst addresses
11073abaae42SAlexander Duyck */
11083abaae42SAlexander Duyck mrqc = FM10K_MRQC_IPV4 |
11093abaae42SAlexander Duyck FM10K_MRQC_TCP_IPV4 |
11103abaae42SAlexander Duyck FM10K_MRQC_IPV6 |
11113abaae42SAlexander Duyck FM10K_MRQC_TCP_IPV6;
11123abaae42SAlexander Duyck
11133ee7b3a3SJacob Keller if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags))
11143abaae42SAlexander Duyck mrqc |= FM10K_MRQC_UDP_IPV4;
11153ee7b3a3SJacob Keller if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, interface->flags))
11163abaae42SAlexander Duyck mrqc |= FM10K_MRQC_UDP_IPV6;
11173abaae42SAlexander Duyck
11183abaae42SAlexander Duyck fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
11193abaae42SAlexander Duyck
11203abaae42SAlexander Duyck /* configure default DGLORT mapping for RSS/DCB */
11213abaae42SAlexander Duyck dglort.inner_rss = 1;
11223abaae42SAlexander Duyck dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
11233abaae42SAlexander Duyck dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
11243abaae42SAlexander Duyck hw->mac.ops.configure_dglort_map(hw, &dglort);
11253abaae42SAlexander Duyck
11263abaae42SAlexander Duyck /* assign GLORT per queue for queue mapped testing */
11273abaae42SAlexander Duyck if (interface->glort_count > 64) {
11283abaae42SAlexander Duyck memset(&dglort, 0, sizeof(dglort));
11293abaae42SAlexander Duyck dglort.inner_rss = 1;
11303abaae42SAlexander Duyck dglort.glort = interface->glort + 64;
11313abaae42SAlexander Duyck dglort.idx = fm10k_dglort_pf_queue;
11323abaae42SAlexander Duyck dglort.queue_l = fls(interface->num_rx_queues - 1);
11333abaae42SAlexander Duyck hw->mac.ops.configure_dglort_map(hw, &dglort);
11343abaae42SAlexander Duyck }
11353abaae42SAlexander Duyck
11363abaae42SAlexander Duyck /* assign glort value for RSS/DCB specific to this interface */
11373abaae42SAlexander Duyck memset(&dglort, 0, sizeof(dglort));
11383abaae42SAlexander Duyck dglort.inner_rss = 1;
11393abaae42SAlexander Duyck dglort.glort = interface->glort;
11403abaae42SAlexander Duyck dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
11413abaae42SAlexander Duyck dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
11423abaae42SAlexander Duyck /* configure DGLORT mapping for RSS/DCB */
11433abaae42SAlexander Duyck dglort.idx = fm10k_dglort_pf_rss;
11445cd5e2e9SAlexander Duyck if (interface->l2_accel)
11455cd5e2e9SAlexander Duyck dglort.shared_l = fls(interface->l2_accel->size);
11463abaae42SAlexander Duyck hw->mac.ops.configure_dglort_map(hw, &dglort);
11473abaae42SAlexander Duyck }
11483abaae42SAlexander Duyck
11493abaae42SAlexander Duyck /**
11503abaae42SAlexander Duyck * fm10k_configure_rx - Configure Receive Unit after Reset
11513abaae42SAlexander Duyck * @interface: board private structure
11523abaae42SAlexander Duyck *
11533abaae42SAlexander Duyck * Configure the Rx unit of the MAC after a reset.
11543abaae42SAlexander Duyck **/
fm10k_configure_rx(struct fm10k_intfc * interface)11553abaae42SAlexander Duyck static void fm10k_configure_rx(struct fm10k_intfc *interface)
11563abaae42SAlexander Duyck {
11573abaae42SAlexander Duyck int i;
11583abaae42SAlexander Duyck
11593abaae42SAlexander Duyck /* Configure SWPRI to PC map */
11603abaae42SAlexander Duyck fm10k_configure_swpri_map(interface);
11613abaae42SAlexander Duyck
11623abaae42SAlexander Duyck /* Configure RSS and DGLORT map */
11633abaae42SAlexander Duyck fm10k_configure_dglort(interface);
11643abaae42SAlexander Duyck
11653abaae42SAlexander Duyck /* Setup the HW Rx Head and Tail descriptor pointers */
11663abaae42SAlexander Duyck for (i = 0; i < interface->num_rx_queues; i++)
11673abaae42SAlexander Duyck fm10k_configure_rx_ring(interface, interface->rx_ring[i]);
11683abaae42SAlexander Duyck
11693abaae42SAlexander Duyck /* possible poll here to verify that Rx rings are now enabled */
11703abaae42SAlexander Duyck }
11713abaae42SAlexander Duyck
fm10k_napi_enable_all(struct fm10k_intfc * interface)117218283cadSAlexander Duyck static void fm10k_napi_enable_all(struct fm10k_intfc *interface)
117318283cadSAlexander Duyck {
117418283cadSAlexander Duyck struct fm10k_q_vector *q_vector;
117518283cadSAlexander Duyck int q_idx;
117618283cadSAlexander Duyck
117718283cadSAlexander Duyck for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
117818283cadSAlexander Duyck q_vector = interface->q_vector[q_idx];
117918283cadSAlexander Duyck napi_enable(&q_vector->napi);
118018283cadSAlexander Duyck }
118118283cadSAlexander Duyck }
118218283cadSAlexander Duyck
fm10k_msix_clean_rings(int __always_unused irq,void * data)1183de445199SJeff Kirsher static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data)
118418283cadSAlexander Duyck {
118518283cadSAlexander Duyck struct fm10k_q_vector *q_vector = data;
118618283cadSAlexander Duyck
118718283cadSAlexander Duyck if (q_vector->rx.count || q_vector->tx.count)
1188de125aaeSAlexander Duyck napi_schedule_irqoff(&q_vector->napi);
118918283cadSAlexander Duyck
119018283cadSAlexander Duyck return IRQ_HANDLED;
119118283cadSAlexander Duyck }
119218283cadSAlexander Duyck
fm10k_msix_mbx_vf(int __always_unused irq,void * data)1193de445199SJeff Kirsher static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
11945cb8db4aSAlexander Duyck {
11955cb8db4aSAlexander Duyck struct fm10k_intfc *interface = data;
11965cb8db4aSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
11975cb8db4aSAlexander Duyck struct fm10k_mbx_info *mbx = &hw->mbx;
11985cb8db4aSAlexander Duyck
11995cb8db4aSAlexander Duyck /* re-enable mailbox interrupt and indicate 20us delay */
12005cb8db4aSAlexander Duyck fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
12011aab144cSBruce Allan (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
12021aab144cSBruce Allan FM10K_ITR_ENABLE);
12035cb8db4aSAlexander Duyck
12045cb8db4aSAlexander Duyck /* service upstream mailbox */
12055cb8db4aSAlexander Duyck if (fm10k_mbx_trylock(interface)) {
12065cb8db4aSAlexander Duyck mbx->ops.process(hw, mbx);
12075cb8db4aSAlexander Duyck fm10k_mbx_unlock(interface);
12085cb8db4aSAlexander Duyck }
12095cb8db4aSAlexander Duyck
1210f355bb51SBruce Allan hw->mac.get_host_state = true;
12115cb8db4aSAlexander Duyck fm10k_service_event_schedule(interface);
12125cb8db4aSAlexander Duyck
12135cb8db4aSAlexander Duyck return IRQ_HANDLED;
12145cb8db4aSAlexander Duyck }
12155cb8db4aSAlexander Duyck
121618283cadSAlexander Duyck #define FM10K_ERR_MSG(type) case (type): error = #type; break
fm10k_handle_fault(struct fm10k_intfc * interface,int type,struct fm10k_fault * fault)121795f4f8daSJacob Keller static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
121818283cadSAlexander Duyck struct fm10k_fault *fault)
121918283cadSAlexander Duyck {
122018283cadSAlexander Duyck struct pci_dev *pdev = interface->pdev;
122195f4f8daSJacob Keller struct fm10k_hw *hw = &interface->hw;
122295f4f8daSJacob Keller struct fm10k_iov_data *iov_data = interface->iov_data;
122318283cadSAlexander Duyck char *error;
122418283cadSAlexander Duyck
122518283cadSAlexander Duyck switch (type) {
122618283cadSAlexander Duyck case FM10K_PCA_FAULT:
122718283cadSAlexander Duyck switch (fault->type) {
122818283cadSAlexander Duyck default:
122918283cadSAlexander Duyck error = "Unknown PCA error";
123018283cadSAlexander Duyck break;
123118283cadSAlexander Duyck FM10K_ERR_MSG(PCA_NO_FAULT);
123218283cadSAlexander Duyck FM10K_ERR_MSG(PCA_UNMAPPED_ADDR);
123318283cadSAlexander Duyck FM10K_ERR_MSG(PCA_BAD_QACCESS_PF);
123418283cadSAlexander Duyck FM10K_ERR_MSG(PCA_BAD_QACCESS_VF);
123518283cadSAlexander Duyck FM10K_ERR_MSG(PCA_MALICIOUS_REQ);
123618283cadSAlexander Duyck FM10K_ERR_MSG(PCA_POISONED_TLP);
123718283cadSAlexander Duyck FM10K_ERR_MSG(PCA_TLP_ABORT);
123818283cadSAlexander Duyck }
123918283cadSAlexander Duyck break;
124018283cadSAlexander Duyck case FM10K_THI_FAULT:
124118283cadSAlexander Duyck switch (fault->type) {
124218283cadSAlexander Duyck default:
124318283cadSAlexander Duyck error = "Unknown THI error";
124418283cadSAlexander Duyck break;
124518283cadSAlexander Duyck FM10K_ERR_MSG(THI_NO_FAULT);
124618283cadSAlexander Duyck FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT);
124718283cadSAlexander Duyck }
124818283cadSAlexander Duyck break;
124918283cadSAlexander Duyck case FM10K_FUM_FAULT:
125018283cadSAlexander Duyck switch (fault->type) {
125118283cadSAlexander Duyck default:
125218283cadSAlexander Duyck error = "Unknown FUM error";
125318283cadSAlexander Duyck break;
125418283cadSAlexander Duyck FM10K_ERR_MSG(FUM_NO_FAULT);
125518283cadSAlexander Duyck FM10K_ERR_MSG(FUM_UNMAPPED_ADDR);
125618283cadSAlexander Duyck FM10K_ERR_MSG(FUM_BAD_VF_QACCESS);
125718283cadSAlexander Duyck FM10K_ERR_MSG(FUM_ADD_DECODE_ERR);
125818283cadSAlexander Duyck FM10K_ERR_MSG(FUM_RO_ERROR);
125918283cadSAlexander Duyck FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR);
126018283cadSAlexander Duyck FM10K_ERR_MSG(FUM_CSR_TIMEOUT);
126118283cadSAlexander Duyck FM10K_ERR_MSG(FUM_INVALID_TYPE);
126218283cadSAlexander Duyck FM10K_ERR_MSG(FUM_INVALID_LENGTH);
126318283cadSAlexander Duyck FM10K_ERR_MSG(FUM_INVALID_BE);
126418283cadSAlexander Duyck FM10K_ERR_MSG(FUM_INVALID_ALIGN);
126518283cadSAlexander Duyck }
126618283cadSAlexander Duyck break;
126718283cadSAlexander Duyck default:
126818283cadSAlexander Duyck error = "Undocumented fault";
126918283cadSAlexander Duyck break;
127018283cadSAlexander Duyck }
127118283cadSAlexander Duyck
127218283cadSAlexander Duyck dev_warn(&pdev->dev,
127318283cadSAlexander Duyck "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
127418283cadSAlexander Duyck error, fault->address, fault->specinfo,
127518283cadSAlexander Duyck PCI_SLOT(fault->func), PCI_FUNC(fault->func));
127695f4f8daSJacob Keller
127795f4f8daSJacob Keller /* For VF faults, clear out the respective LPORT, reset the queue
127895f4f8daSJacob Keller * resources, and then reconnect to the mailbox. This allows the
127995f4f8daSJacob Keller * VF in question to resume behavior. For transient faults that are
128095f4f8daSJacob Keller * the result of non-malicious behavior this will log the fault and
128195f4f8daSJacob Keller * allow the VF to resume functionality. Obviously for malicious VFs
128295f4f8daSJacob Keller * they will be able to attempt malicious behavior again. In this
128395f4f8daSJacob Keller * case, the system administrator will need to step in and manually
128495f4f8daSJacob Keller * remove or disable the VF in question.
128595f4f8daSJacob Keller */
128695f4f8daSJacob Keller if (fault->func && iov_data) {
128795f4f8daSJacob Keller int vf = fault->func - 1;
128895f4f8daSJacob Keller struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf];
128995f4f8daSJacob Keller
129095f4f8daSJacob Keller hw->iov.ops.reset_lport(hw, vf_info);
129195f4f8daSJacob Keller hw->iov.ops.reset_resources(hw, vf_info);
129295f4f8daSJacob Keller
129395f4f8daSJacob Keller /* reset_lport disables the VF, so re-enable it */
129495f4f8daSJacob Keller hw->iov.ops.set_lport(hw, vf_info, vf,
129595f4f8daSJacob Keller FM10K_VF_FLAG_MULTI_CAPABLE);
129695f4f8daSJacob Keller
129795f4f8daSJacob Keller /* reset_resources will disconnect from the mbx */
129895f4f8daSJacob Keller vf_info->mbx.ops.connect(hw, &vf_info->mbx);
129995f4f8daSJacob Keller }
130018283cadSAlexander Duyck }
130118283cadSAlexander Duyck
fm10k_report_fault(struct fm10k_intfc * interface,u32 eicr)130218283cadSAlexander Duyck static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
130318283cadSAlexander Duyck {
130418283cadSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
130518283cadSAlexander Duyck struct fm10k_fault fault = { 0 };
130618283cadSAlexander Duyck int type, err;
130718283cadSAlexander Duyck
130818283cadSAlexander Duyck for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT;
130918283cadSAlexander Duyck eicr;
131018283cadSAlexander Duyck eicr >>= 1, type += FM10K_FAULT_SIZE) {
131118283cadSAlexander Duyck /* only check if there is an error reported */
131218283cadSAlexander Duyck if (!(eicr & 0x1))
131318283cadSAlexander Duyck continue;
131418283cadSAlexander Duyck
131518283cadSAlexander Duyck /* retrieve fault info */
131618283cadSAlexander Duyck err = hw->mac.ops.get_fault(hw, type, &fault);
131718283cadSAlexander Duyck if (err) {
131818283cadSAlexander Duyck dev_err(&interface->pdev->dev,
131918283cadSAlexander Duyck "error reading fault\n");
132018283cadSAlexander Duyck continue;
132118283cadSAlexander Duyck }
132218283cadSAlexander Duyck
132395f4f8daSJacob Keller fm10k_handle_fault(interface, type, &fault);
132418283cadSAlexander Duyck }
132518283cadSAlexander Duyck }
132618283cadSAlexander Duyck
fm10k_reset_drop_on_empty(struct fm10k_intfc * interface,u32 eicr)132718283cadSAlexander Duyck static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
132818283cadSAlexander Duyck {
132918283cadSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
133018283cadSAlexander Duyck const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
133118283cadSAlexander Duyck u32 maxholdq;
133218283cadSAlexander Duyck int q;
133318283cadSAlexander Duyck
133418283cadSAlexander Duyck if (!(eicr & FM10K_EICR_MAXHOLDTIME))
133518283cadSAlexander Duyck return;
133618283cadSAlexander Duyck
133718283cadSAlexander Duyck maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7));
133818283cadSAlexander Duyck if (maxholdq)
133918283cadSAlexander Duyck fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
134018283cadSAlexander Duyck for (q = 255;;) {
1341fcdb0a99SBruce Allan if (maxholdq & BIT(31)) {
134218283cadSAlexander Duyck if (q < FM10K_MAX_QUEUES_PF) {
134318283cadSAlexander Duyck interface->rx_overrun_pf++;
134418283cadSAlexander Duyck fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
134518283cadSAlexander Duyck } else {
134618283cadSAlexander Duyck interface->rx_overrun_vf++;
134718283cadSAlexander Duyck }
134818283cadSAlexander Duyck }
134918283cadSAlexander Duyck
135018283cadSAlexander Duyck maxholdq *= 2;
135118283cadSAlexander Duyck if (!maxholdq)
135218283cadSAlexander Duyck q &= ~(32 - 1);
135318283cadSAlexander Duyck
135418283cadSAlexander Duyck if (!q)
135518283cadSAlexander Duyck break;
135618283cadSAlexander Duyck
135718283cadSAlexander Duyck if (q-- % 32)
135818283cadSAlexander Duyck continue;
135918283cadSAlexander Duyck
136018283cadSAlexander Duyck maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32));
136118283cadSAlexander Duyck if (maxholdq)
136218283cadSAlexander Duyck fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq);
136318283cadSAlexander Duyck }
136418283cadSAlexander Duyck }
136518283cadSAlexander Duyck
fm10k_msix_mbx_pf(int __always_unused irq,void * data)1366de445199SJeff Kirsher static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
136718283cadSAlexander Duyck {
136818283cadSAlexander Duyck struct fm10k_intfc *interface = data;
136918283cadSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
137018283cadSAlexander Duyck struct fm10k_mbx_info *mbx = &hw->mbx;
137118283cadSAlexander Duyck u32 eicr;
137218283cadSAlexander Duyck
137318283cadSAlexander Duyck /* unmask any set bits related to this interrupt */
137418283cadSAlexander Duyck eicr = fm10k_read_reg(hw, FM10K_EICR);
137518283cadSAlexander Duyck fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX |
137618283cadSAlexander Duyck FM10K_EICR_SWITCHREADY |
137718283cadSAlexander Duyck FM10K_EICR_SWITCHNOTREADY));
137818283cadSAlexander Duyck
137918283cadSAlexander Duyck /* report any faults found to the message log */
138018283cadSAlexander Duyck fm10k_report_fault(interface, eicr);
138118283cadSAlexander Duyck
138218283cadSAlexander Duyck /* reset any queues disabled due to receiver overrun */
138318283cadSAlexander Duyck fm10k_reset_drop_on_empty(interface, eicr);
138418283cadSAlexander Duyck
138518283cadSAlexander Duyck /* service mailboxes */
138618283cadSAlexander Duyck if (fm10k_mbx_trylock(interface)) {
13870a5d8a9dSJesse Brandeburg s32 err = mbx->ops.process(hw, mbx);
13880a5d8a9dSJesse Brandeburg
13890a5d8a9dSJesse Brandeburg if (err == FM10K_ERR_RESET_REQUESTED)
13900a5d8a9dSJesse Brandeburg set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
13910a5d8a9dSJesse Brandeburg
13929de15bdaSJeff Kirsher /* handle VFLRE events */
1393883a9ccbSAlexander Duyck fm10k_iov_event(interface);
139418283cadSAlexander Duyck fm10k_mbx_unlock(interface);
139518283cadSAlexander Duyck }
139618283cadSAlexander Duyck
1397b7d8514cSAlexander Duyck /* if switch toggled state we should reset GLORTs */
1398b7d8514cSAlexander Duyck if (eicr & FM10K_EICR_SWITCHNOTREADY) {
1399b7d8514cSAlexander Duyck /* force link down for at least 4 seconds */
1400b7d8514cSAlexander Duyck interface->link_down_event = jiffies + (4 * HZ);
140146929557SJacob Keller set_bit(__FM10K_LINK_DOWN, interface->state);
1402b7d8514cSAlexander Duyck
1403b7d8514cSAlexander Duyck /* reset dglort_map back to no config */
1404b7d8514cSAlexander Duyck hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
1405b7d8514cSAlexander Duyck }
1406b7d8514cSAlexander Duyck
1407b7d8514cSAlexander Duyck /* we should validate host state after interrupt event */
1408f355bb51SBruce Allan hw->mac.get_host_state = true;
14099de15bdaSJeff Kirsher
14109de15bdaSJeff Kirsher /* validate host state, and handle VF mailboxes in the service task */
1411b7d8514cSAlexander Duyck fm10k_service_event_schedule(interface);
1412b7d8514cSAlexander Duyck
141318283cadSAlexander Duyck /* re-enable mailbox interrupt and indicate 20us delay */
141418283cadSAlexander Duyck fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
14151aab144cSBruce Allan (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
14161aab144cSBruce Allan FM10K_ITR_ENABLE);
141718283cadSAlexander Duyck
141818283cadSAlexander Duyck return IRQ_HANDLED;
141918283cadSAlexander Duyck }
142018283cadSAlexander Duyck
fm10k_mbx_free_irq(struct fm10k_intfc * interface)142118283cadSAlexander Duyck void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
142218283cadSAlexander Duyck {
142318283cadSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
1424de66c610SJacob Keller struct msix_entry *entry;
142518283cadSAlexander Duyck int itr_reg;
142618283cadSAlexander Duyck
1427e00e23bcSAlexander Duyck /* no mailbox IRQ to free if MSI-X is not enabled */
1428e00e23bcSAlexander Duyck if (!interface->msix_entries)
1429e00e23bcSAlexander Duyck return;
1430e00e23bcSAlexander Duyck
1431de66c610SJacob Keller entry = &interface->msix_entries[FM10K_MBX_VECTOR];
1432de66c610SJacob Keller
143318283cadSAlexander Duyck /* disconnect the mailbox */
143418283cadSAlexander Duyck hw->mbx.ops.disconnect(hw, &hw->mbx);
143518283cadSAlexander Duyck
143618283cadSAlexander Duyck /* disable Mailbox cause */
143718283cadSAlexander Duyck if (hw->mac.type == fm10k_mac_pf) {
143818283cadSAlexander Duyck fm10k_write_reg(hw, FM10K_EIMR,
143918283cadSAlexander Duyck FM10K_EIMR_DISABLE(PCA_FAULT) |
144018283cadSAlexander Duyck FM10K_EIMR_DISABLE(FUM_FAULT) |
144118283cadSAlexander Duyck FM10K_EIMR_DISABLE(MAILBOX) |
144218283cadSAlexander Duyck FM10K_EIMR_DISABLE(SWITCHREADY) |
144318283cadSAlexander Duyck FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
144418283cadSAlexander Duyck FM10K_EIMR_DISABLE(SRAMERROR) |
144518283cadSAlexander Duyck FM10K_EIMR_DISABLE(VFLR) |
144618283cadSAlexander Duyck FM10K_EIMR_DISABLE(MAXHOLDTIME));
144718283cadSAlexander Duyck itr_reg = FM10K_ITR(FM10K_MBX_VECTOR);
14485cb8db4aSAlexander Duyck } else {
14495cb8db4aSAlexander Duyck itr_reg = FM10K_VFITR(FM10K_MBX_VECTOR);
145018283cadSAlexander Duyck }
145118283cadSAlexander Duyck
145218283cadSAlexander Duyck fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET);
145318283cadSAlexander Duyck
145418283cadSAlexander Duyck free_irq(entry->vector, interface);
145518283cadSAlexander Duyck }
145618283cadSAlexander Duyck
fm10k_mbx_mac_addr(struct fm10k_hw * hw,u32 ** results,struct fm10k_mbx_info * mbx)14575cb8db4aSAlexander Duyck static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
14585cb8db4aSAlexander Duyck struct fm10k_mbx_info *mbx)
14595cb8db4aSAlexander Duyck {
14605cb8db4aSAlexander Duyck bool vlan_override = hw->mac.vlan_override;
14615cb8db4aSAlexander Duyck u16 default_vid = hw->mac.default_vid;
14625cb8db4aSAlexander Duyck struct fm10k_intfc *interface;
14635cb8db4aSAlexander Duyck s32 err;
14645cb8db4aSAlexander Duyck
14655cb8db4aSAlexander Duyck err = fm10k_msg_mac_vlan_vf(hw, results, mbx);
14665cb8db4aSAlexander Duyck if (err)
14675cb8db4aSAlexander Duyck return err;
14685cb8db4aSAlexander Duyck
14695cb8db4aSAlexander Duyck interface = container_of(hw, struct fm10k_intfc, hw);
14705cb8db4aSAlexander Duyck
14715cb8db4aSAlexander Duyck /* MAC was changed so we need reset */
14725cb8db4aSAlexander Duyck if (is_valid_ether_addr(hw->mac.perm_addr) &&
14736186ddf0SJacob Keller !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr))
14743ee7b3a3SJacob Keller set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
14755cb8db4aSAlexander Duyck
14765cb8db4aSAlexander Duyck /* VLAN override was changed, or default VLAN changed */
14775cb8db4aSAlexander Duyck if ((vlan_override != hw->mac.vlan_override) ||
14785cb8db4aSAlexander Duyck (default_vid != hw->mac.default_vid))
14793ee7b3a3SJacob Keller set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
14805cb8db4aSAlexander Duyck
14815cb8db4aSAlexander Duyck return 0;
14825cb8db4aSAlexander Duyck }
14835cb8db4aSAlexander Duyck
148418283cadSAlexander Duyck /* generic error handler for mailbox issues */
fm10k_mbx_error(struct fm10k_hw * hw,u32 ** results,struct fm10k_mbx_info __always_unused * mbx)148518283cadSAlexander Duyck static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
1486de445199SJeff Kirsher struct fm10k_mbx_info __always_unused *mbx)
148718283cadSAlexander Duyck {
148818283cadSAlexander Duyck struct fm10k_intfc *interface;
148918283cadSAlexander Duyck struct pci_dev *pdev;
149018283cadSAlexander Duyck
149118283cadSAlexander Duyck interface = container_of(hw, struct fm10k_intfc, hw);
149218283cadSAlexander Duyck pdev = interface->pdev;
149318283cadSAlexander Duyck
149418283cadSAlexander Duyck dev_err(&pdev->dev, "Unknown message ID %u\n",
149518283cadSAlexander Duyck **results & FM10K_TLV_ID_MASK);
149618283cadSAlexander Duyck
149718283cadSAlexander Duyck return 0;
149818283cadSAlexander Duyck }
149918283cadSAlexander Duyck
15005cb8db4aSAlexander Duyck static const struct fm10k_msg_data vf_mbx_data[] = {
15015cb8db4aSAlexander Duyck FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
15025cb8db4aSAlexander Duyck FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
15035cb8db4aSAlexander Duyck FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
15045cb8db4aSAlexander Duyck FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
15055cb8db4aSAlexander Duyck };
15065cb8db4aSAlexander Duyck
fm10k_mbx_request_irq_vf(struct fm10k_intfc * interface)15075cb8db4aSAlexander Duyck static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface)
15085cb8db4aSAlexander Duyck {
15095cb8db4aSAlexander Duyck struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
15105cb8db4aSAlexander Duyck struct net_device *dev = interface->netdev;
15115cb8db4aSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
15125cb8db4aSAlexander Duyck int err;
15135cb8db4aSAlexander Duyck
15145cb8db4aSAlexander Duyck /* Use timer0 for interrupt moderation on the mailbox */
15151aab144cSBruce Allan u32 itr = entry->entry | FM10K_INT_MAP_TIMER0;
15165cb8db4aSAlexander Duyck
15175cb8db4aSAlexander Duyck /* register mailbox handlers */
15185cb8db4aSAlexander Duyck err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
15195cb8db4aSAlexander Duyck if (err)
15205cb8db4aSAlexander Duyck return err;
15215cb8db4aSAlexander Duyck
15225cb8db4aSAlexander Duyck /* request the IRQ */
15235cb8db4aSAlexander Duyck err = request_irq(entry->vector, fm10k_msix_mbx_vf, 0,
15245cb8db4aSAlexander Duyck dev->name, interface);
15255cb8db4aSAlexander Duyck if (err) {
15265cb8db4aSAlexander Duyck netif_err(interface, probe, dev,
15275cb8db4aSAlexander Duyck "request_irq for msix_mbx failed: %d\n", err);
15285cb8db4aSAlexander Duyck return err;
15295cb8db4aSAlexander Duyck }
15305cb8db4aSAlexander Duyck
15315cb8db4aSAlexander Duyck /* map all of the interrupt sources */
15325cb8db4aSAlexander Duyck fm10k_write_reg(hw, FM10K_VFINT_MAP, itr);
15335cb8db4aSAlexander Duyck
15345cb8db4aSAlexander Duyck /* enable interrupt */
15355cb8db4aSAlexander Duyck fm10k_write_reg(hw, FM10K_VFITR(entry->entry), FM10K_ITR_ENABLE);
15365cb8db4aSAlexander Duyck
15375cb8db4aSAlexander Duyck return 0;
15385cb8db4aSAlexander Duyck }
15395cb8db4aSAlexander Duyck
fm10k_lport_map(struct fm10k_hw * hw,u32 ** results,struct fm10k_mbx_info * mbx)154018283cadSAlexander Duyck static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
154118283cadSAlexander Duyck struct fm10k_mbx_info *mbx)
154218283cadSAlexander Duyck {
154318283cadSAlexander Duyck struct fm10k_intfc *interface;
154418283cadSAlexander Duyck u32 dglort_map = hw->mac.dglort_map;
154518283cadSAlexander Duyck s32 err;
154618283cadSAlexander Duyck
1547a7a7783aSJacob Keller interface = container_of(hw, struct fm10k_intfc, hw);
1548a7a7783aSJacob Keller
1549a7a7783aSJacob Keller err = fm10k_msg_err_pf(hw, results, mbx);
1550a7a7783aSJacob Keller if (!err && hw->swapi.status) {
1551a7a7783aSJacob Keller /* force link down for a reasonable delay */
1552a7a7783aSJacob Keller interface->link_down_event = jiffies + (2 * HZ);
155346929557SJacob Keller set_bit(__FM10K_LINK_DOWN, interface->state);
1554a7a7783aSJacob Keller
1555a7a7783aSJacob Keller /* reset dglort_map back to no config */
1556a7a7783aSJacob Keller hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
1557a7a7783aSJacob Keller
1558a7a7783aSJacob Keller fm10k_service_event_schedule(interface);
1559a7a7783aSJacob Keller
1560a7a7783aSJacob Keller /* prevent overloading kernel message buffer */
1561a7a7783aSJacob Keller if (interface->lport_map_failed)
1562a7a7783aSJacob Keller return 0;
1563a7a7783aSJacob Keller
1564a7a7783aSJacob Keller interface->lport_map_failed = true;
1565a7a7783aSJacob Keller
1566a7a7783aSJacob Keller if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
1567a7a7783aSJacob Keller dev_warn(&interface->pdev->dev,
1568a7a7783aSJacob Keller "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
1569a7a7783aSJacob Keller dev_warn(&interface->pdev->dev,
1570a7a7783aSJacob Keller "request logical port map failed: %d\n",
1571a7a7783aSJacob Keller hw->swapi.status);
1572a7a7783aSJacob Keller
1573a7a7783aSJacob Keller return 0;
1574a7a7783aSJacob Keller }
1575a7a7783aSJacob Keller
157618283cadSAlexander Duyck err = fm10k_msg_lport_map_pf(hw, results, mbx);
157718283cadSAlexander Duyck if (err)
157818283cadSAlexander Duyck return err;
157918283cadSAlexander Duyck
1580a7a7783aSJacob Keller interface->lport_map_failed = false;
158118283cadSAlexander Duyck
158218283cadSAlexander Duyck /* we need to reset if port count was just updated */
158318283cadSAlexander Duyck if (dglort_map != hw->mac.dglort_map)
15843ee7b3a3SJacob Keller set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
158518283cadSAlexander Duyck
158618283cadSAlexander Duyck return 0;
158718283cadSAlexander Duyck }
158818283cadSAlexander Duyck
fm10k_update_pvid(struct fm10k_hw * hw,u32 ** results,struct fm10k_mbx_info __always_unused * mbx)158918283cadSAlexander Duyck static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
1590de445199SJeff Kirsher struct fm10k_mbx_info __always_unused *mbx)
159118283cadSAlexander Duyck {
159218283cadSAlexander Duyck struct fm10k_intfc *interface;
159318283cadSAlexander Duyck u16 glort, pvid;
159418283cadSAlexander Duyck u32 pvid_update;
159518283cadSAlexander Duyck s32 err;
159618283cadSAlexander Duyck
159718283cadSAlexander Duyck err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
159818283cadSAlexander Duyck &pvid_update);
159918283cadSAlexander Duyck if (err)
160018283cadSAlexander Duyck return err;
160118283cadSAlexander Duyck
160218283cadSAlexander Duyck /* extract values from the pvid update */
160318283cadSAlexander Duyck glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
160418283cadSAlexander Duyck pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
160518283cadSAlexander Duyck
160618283cadSAlexander Duyck /* if glort is not valid return error */
160718283cadSAlexander Duyck if (!fm10k_glort_valid_pf(hw, glort))
160818283cadSAlexander Duyck return FM10K_ERR_PARAM;
160918283cadSAlexander Duyck
1610aa502b4aSJacob Keller /* verify VLAN ID is valid */
161118283cadSAlexander Duyck if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
161218283cadSAlexander Duyck return FM10K_ERR_PARAM;
161318283cadSAlexander Duyck
161418283cadSAlexander Duyck interface = container_of(hw, struct fm10k_intfc, hw);
161518283cadSAlexander Duyck
1616883a9ccbSAlexander Duyck /* check to see if this belongs to one of the VFs */
1617883a9ccbSAlexander Duyck err = fm10k_iov_update_pvid(interface, glort, pvid);
1618883a9ccbSAlexander Duyck if (!err)
1619883a9ccbSAlexander Duyck return 0;
1620883a9ccbSAlexander Duyck
162118283cadSAlexander Duyck /* we need to reset if default VLAN was just updated */
162218283cadSAlexander Duyck if (pvid != hw->mac.default_vid)
16233ee7b3a3SJacob Keller set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
162418283cadSAlexander Duyck
162518283cadSAlexander Duyck hw->mac.default_vid = pvid;
162618283cadSAlexander Duyck
162718283cadSAlexander Duyck return 0;
162818283cadSAlexander Duyck }
162918283cadSAlexander Duyck
163018283cadSAlexander Duyck static const struct fm10k_msg_data pf_mbx_data[] = {
163118283cadSAlexander Duyck FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
163218283cadSAlexander Duyck FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
163318283cadSAlexander Duyck FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map),
163418283cadSAlexander Duyck FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
163518283cadSAlexander Duyck FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
163618283cadSAlexander Duyck FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
163718283cadSAlexander Duyck FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
163818283cadSAlexander Duyck };
163918283cadSAlexander Duyck
fm10k_mbx_request_irq_pf(struct fm10k_intfc * interface)164018283cadSAlexander Duyck static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
164118283cadSAlexander Duyck {
164218283cadSAlexander Duyck struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
164318283cadSAlexander Duyck struct net_device *dev = interface->netdev;
164418283cadSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
164518283cadSAlexander Duyck int err;
164618283cadSAlexander Duyck
164718283cadSAlexander Duyck /* Use timer0 for interrupt moderation on the mailbox */
16481aab144cSBruce Allan u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0;
16491aab144cSBruce Allan u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE;
165018283cadSAlexander Duyck
165118283cadSAlexander Duyck /* register mailbox handlers */
165218283cadSAlexander Duyck err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
165318283cadSAlexander Duyck if (err)
165418283cadSAlexander Duyck return err;
165518283cadSAlexander Duyck
165618283cadSAlexander Duyck /* request the IRQ */
165718283cadSAlexander Duyck err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0,
165818283cadSAlexander Duyck dev->name, interface);
165918283cadSAlexander Duyck if (err) {
166018283cadSAlexander Duyck netif_err(interface, probe, dev,
166118283cadSAlexander Duyck "request_irq for msix_mbx failed: %d\n", err);
166218283cadSAlexander Duyck return err;
166318283cadSAlexander Duyck }
166418283cadSAlexander Duyck
166518283cadSAlexander Duyck /* Enable interrupts w/ no moderation for "other" interrupts */
166640423dd2SJacob Keller fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr);
166740423dd2SJacob Keller fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr);
166840423dd2SJacob Keller fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr);
166940423dd2SJacob Keller fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr);
167040423dd2SJacob Keller fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr);
167118283cadSAlexander Duyck
167218283cadSAlexander Duyck /* Enable interrupts w/ moderation for mailbox */
167340423dd2SJacob Keller fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr);
167418283cadSAlexander Duyck
167518283cadSAlexander Duyck /* Enable individual interrupt causes */
167618283cadSAlexander Duyck fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
167718283cadSAlexander Duyck FM10K_EIMR_ENABLE(FUM_FAULT) |
167818283cadSAlexander Duyck FM10K_EIMR_ENABLE(MAILBOX) |
167918283cadSAlexander Duyck FM10K_EIMR_ENABLE(SWITCHREADY) |
168018283cadSAlexander Duyck FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
168118283cadSAlexander Duyck FM10K_EIMR_ENABLE(SRAMERROR) |
168218283cadSAlexander Duyck FM10K_EIMR_ENABLE(VFLR) |
168318283cadSAlexander Duyck FM10K_EIMR_ENABLE(MAXHOLDTIME));
168418283cadSAlexander Duyck
168518283cadSAlexander Duyck /* enable interrupt */
168618283cadSAlexander Duyck fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE);
168718283cadSAlexander Duyck
168818283cadSAlexander Duyck return 0;
168918283cadSAlexander Duyck }
169018283cadSAlexander Duyck
fm10k_mbx_request_irq(struct fm10k_intfc * interface)169118283cadSAlexander Duyck int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
169218283cadSAlexander Duyck {
169318283cadSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
169418283cadSAlexander Duyck int err;
169518283cadSAlexander Duyck
169618283cadSAlexander Duyck /* enable Mailbox cause */
16975cb8db4aSAlexander Duyck if (hw->mac.type == fm10k_mac_pf)
169818283cadSAlexander Duyck err = fm10k_mbx_request_irq_pf(interface);
16995cb8db4aSAlexander Duyck else
17005cb8db4aSAlexander Duyck err = fm10k_mbx_request_irq_vf(interface);
1701e00e23bcSAlexander Duyck if (err)
1702e00e23bcSAlexander Duyck return err;
170318283cadSAlexander Duyck
170418283cadSAlexander Duyck /* connect mailbox */
170518283cadSAlexander Duyck err = hw->mbx.ops.connect(hw, &hw->mbx);
170618283cadSAlexander Duyck
1707e00e23bcSAlexander Duyck /* if the mailbox failed to connect, then free IRQ */
1708e00e23bcSAlexander Duyck if (err)
1709e00e23bcSAlexander Duyck fm10k_mbx_free_irq(interface);
1710e00e23bcSAlexander Duyck
171118283cadSAlexander Duyck return err;
171218283cadSAlexander Duyck }
171318283cadSAlexander Duyck
171418283cadSAlexander Duyck /**
171518283cadSAlexander Duyck * fm10k_qv_free_irq - release interrupts associated with queue vectors
171618283cadSAlexander Duyck * @interface: board private structure
171718283cadSAlexander Duyck *
171818283cadSAlexander Duyck * Release all interrupts associated with this interface
171918283cadSAlexander Duyck **/
fm10k_qv_free_irq(struct fm10k_intfc * interface)172018283cadSAlexander Duyck void fm10k_qv_free_irq(struct fm10k_intfc *interface)
172118283cadSAlexander Duyck {
172218283cadSAlexander Duyck int vector = interface->num_q_vectors;
172318283cadSAlexander Duyck struct msix_entry *entry;
172418283cadSAlexander Duyck
1725a3ffeaf7SJacob Keller entry = &interface->msix_entries[NON_Q_VECTORS + vector];
172618283cadSAlexander Duyck
172718283cadSAlexander Duyck while (vector) {
172818283cadSAlexander Duyck struct fm10k_q_vector *q_vector;
172918283cadSAlexander Duyck
173018283cadSAlexander Duyck vector--;
173118283cadSAlexander Duyck entry--;
173218283cadSAlexander Duyck q_vector = interface->q_vector[vector];
173318283cadSAlexander Duyck
173418283cadSAlexander Duyck if (!q_vector->tx.count && !q_vector->rx.count)
173518283cadSAlexander Duyck continue;
173618283cadSAlexander Duyck
1737504b0fdfSJacob Keller /* clear the affinity_mask in the IRQ descriptor */
1738504b0fdfSJacob Keller irq_set_affinity_hint(entry->vector, NULL);
173918283cadSAlexander Duyck
1740504b0fdfSJacob Keller /* disable interrupts */
174118283cadSAlexander Duyck writel(FM10K_ITR_MASK_SET, q_vector->itr);
174218283cadSAlexander Duyck
174318283cadSAlexander Duyck free_irq(entry->vector, q_vector);
174418283cadSAlexander Duyck }
174518283cadSAlexander Duyck }
174618283cadSAlexander Duyck
174718283cadSAlexander Duyck /**
174818283cadSAlexander Duyck * fm10k_qv_request_irq - initialize interrupts for queue vectors
174918283cadSAlexander Duyck * @interface: board private structure
175018283cadSAlexander Duyck *
175118283cadSAlexander Duyck * Attempts to configure interrupts using the best available
175218283cadSAlexander Duyck * capabilities of the hardware and kernel.
175318283cadSAlexander Duyck **/
fm10k_qv_request_irq(struct fm10k_intfc * interface)175418283cadSAlexander Duyck int fm10k_qv_request_irq(struct fm10k_intfc *interface)
175518283cadSAlexander Duyck {
175618283cadSAlexander Duyck struct net_device *dev = interface->netdev;
175718283cadSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
175818283cadSAlexander Duyck struct msix_entry *entry;
1759b94dd008SJacob Keller unsigned int ri = 0, ti = 0;
176018283cadSAlexander Duyck int vector, err;
176118283cadSAlexander Duyck
1762a3ffeaf7SJacob Keller entry = &interface->msix_entries[NON_Q_VECTORS];
176318283cadSAlexander Duyck
176418283cadSAlexander Duyck for (vector = 0; vector < interface->num_q_vectors; vector++) {
176518283cadSAlexander Duyck struct fm10k_q_vector *q_vector = interface->q_vector[vector];
176618283cadSAlexander Duyck
176718283cadSAlexander Duyck /* name the vector */
176818283cadSAlexander Duyck if (q_vector->tx.count && q_vector->rx.count) {
1769b94dd008SJacob Keller snprintf(q_vector->name, sizeof(q_vector->name),
1770b94dd008SJacob Keller "%s-TxRx-%u", dev->name, ri++);
177118283cadSAlexander Duyck ti++;
177218283cadSAlexander Duyck } else if (q_vector->rx.count) {
1773b94dd008SJacob Keller snprintf(q_vector->name, sizeof(q_vector->name),
1774b94dd008SJacob Keller "%s-rx-%u", dev->name, ri++);
177518283cadSAlexander Duyck } else if (q_vector->tx.count) {
1776b94dd008SJacob Keller snprintf(q_vector->name, sizeof(q_vector->name),
1777b94dd008SJacob Keller "%s-tx-%u", dev->name, ti++);
177818283cadSAlexander Duyck } else {
177918283cadSAlexander Duyck /* skip this unused q_vector */
178018283cadSAlexander Duyck continue;
178118283cadSAlexander Duyck }
178218283cadSAlexander Duyck
178318283cadSAlexander Duyck /* Assign ITR register to q_vector */
17845cb8db4aSAlexander Duyck q_vector->itr = (hw->mac.type == fm10k_mac_pf) ?
17855cb8db4aSAlexander Duyck &interface->uc_addr[FM10K_ITR(entry->entry)] :
17865cb8db4aSAlexander Duyck &interface->uc_addr[FM10K_VFITR(entry->entry)];
178718283cadSAlexander Duyck
178818283cadSAlexander Duyck /* request the IRQ */
178918283cadSAlexander Duyck err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0,
179018283cadSAlexander Duyck q_vector->name, q_vector);
179118283cadSAlexander Duyck if (err) {
179218283cadSAlexander Duyck netif_err(interface, probe, dev,
179318283cadSAlexander Duyck "request_irq failed for MSIX interrupt Error: %d\n",
179418283cadSAlexander Duyck err);
179518283cadSAlexander Duyck goto err_out;
179618283cadSAlexander Duyck }
179718283cadSAlexander Duyck
1798504b0fdfSJacob Keller /* assign the mask for this irq */
1799504b0fdfSJacob Keller irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask);
1800504b0fdfSJacob Keller
180118283cadSAlexander Duyck /* Enable q_vector */
180218283cadSAlexander Duyck writel(FM10K_ITR_ENABLE, q_vector->itr);
180318283cadSAlexander Duyck
180418283cadSAlexander Duyck entry++;
180518283cadSAlexander Duyck }
180618283cadSAlexander Duyck
180718283cadSAlexander Duyck return 0;
180818283cadSAlexander Duyck
180918283cadSAlexander Duyck err_out:
181018283cadSAlexander Duyck /* wind through the ring freeing all entries and vectors */
181118283cadSAlexander Duyck while (vector) {
181218283cadSAlexander Duyck struct fm10k_q_vector *q_vector;
181318283cadSAlexander Duyck
181418283cadSAlexander Duyck entry--;
181518283cadSAlexander Duyck vector--;
181618283cadSAlexander Duyck q_vector = interface->q_vector[vector];
181718283cadSAlexander Duyck
181818283cadSAlexander Duyck if (!q_vector->tx.count && !q_vector->rx.count)
181918283cadSAlexander Duyck continue;
182018283cadSAlexander Duyck
1821504b0fdfSJacob Keller /* clear the affinity_mask in the IRQ descriptor */
1822504b0fdfSJacob Keller irq_set_affinity_hint(entry->vector, NULL);
182318283cadSAlexander Duyck
1824504b0fdfSJacob Keller /* disable interrupts */
182518283cadSAlexander Duyck writel(FM10K_ITR_MASK_SET, q_vector->itr);
182618283cadSAlexander Duyck
182718283cadSAlexander Duyck free_irq(entry->vector, q_vector);
182818283cadSAlexander Duyck }
182918283cadSAlexander Duyck
183018283cadSAlexander Duyck return err;
183118283cadSAlexander Duyck }
183218283cadSAlexander Duyck
fm10k_up(struct fm10k_intfc * interface)1833504c5eacSAlexander Duyck void fm10k_up(struct fm10k_intfc *interface)
1834504c5eacSAlexander Duyck {
1835504c5eacSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
1836504c5eacSAlexander Duyck
1837504c5eacSAlexander Duyck /* Enable Tx/Rx DMA */
1838504c5eacSAlexander Duyck hw->mac.ops.start_hw(hw);
1839504c5eacSAlexander Duyck
18403abaae42SAlexander Duyck /* configure Tx descriptor rings */
18413abaae42SAlexander Duyck fm10k_configure_tx(interface);
18423abaae42SAlexander Duyck
18433abaae42SAlexander Duyck /* configure Rx descriptor rings */
18443abaae42SAlexander Duyck fm10k_configure_rx(interface);
18453abaae42SAlexander Duyck
1846504c5eacSAlexander Duyck /* configure interrupts */
1847504c5eacSAlexander Duyck hw->mac.ops.update_int_moderator(hw);
1848504c5eacSAlexander Duyck
18499d73edeeSJacob Keller /* enable statistics capture again */
185046929557SJacob Keller clear_bit(__FM10K_UPDATING_STATS, interface->state);
18519d73edeeSJacob Keller
1852504c5eacSAlexander Duyck /* clear down bit to indicate we are ready to go */
185346929557SJacob Keller clear_bit(__FM10K_DOWN, interface->state);
1854504c5eacSAlexander Duyck
185518283cadSAlexander Duyck /* enable polling cleanups */
185618283cadSAlexander Duyck fm10k_napi_enable_all(interface);
185718283cadSAlexander Duyck
1858504c5eacSAlexander Duyck /* re-establish Rx filters */
1859504c5eacSAlexander Duyck fm10k_restore_rx_state(interface);
1860504c5eacSAlexander Duyck
1861504c5eacSAlexander Duyck /* enable transmits */
1862504c5eacSAlexander Duyck netif_tx_start_all_queues(interface->netdev);
1863b7d8514cSAlexander Duyck
186454b3c9cfSJeff Kirsher /* kick off the service timer now */
1865f355bb51SBruce Allan hw->mac.get_host_state = true;
1866b7d8514cSAlexander Duyck mod_timer(&interface->service_timer, jiffies);
1867504c5eacSAlexander Duyck }
1868504c5eacSAlexander Duyck
fm10k_napi_disable_all(struct fm10k_intfc * interface)186918283cadSAlexander Duyck static void fm10k_napi_disable_all(struct fm10k_intfc *interface)
187018283cadSAlexander Duyck {
187118283cadSAlexander Duyck struct fm10k_q_vector *q_vector;
187218283cadSAlexander Duyck int q_idx;
187318283cadSAlexander Duyck
187418283cadSAlexander Duyck for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
187518283cadSAlexander Duyck q_vector = interface->q_vector[q_idx];
187618283cadSAlexander Duyck napi_disable(&q_vector->napi);
187718283cadSAlexander Duyck }
187818283cadSAlexander Duyck }
187918283cadSAlexander Duyck
fm10k_down(struct fm10k_intfc * interface)1880504c5eacSAlexander Duyck void fm10k_down(struct fm10k_intfc *interface)
1881504c5eacSAlexander Duyck {
1882504c5eacSAlexander Duyck struct net_device *netdev = interface->netdev;
1883504c5eacSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
188494877768SJacob Keller int err, i = 0, count = 0;
1885504c5eacSAlexander Duyck
1886504c5eacSAlexander Duyck /* signal that we are down to the interrupt handler and service task */
188746929557SJacob Keller if (test_and_set_bit(__FM10K_DOWN, interface->state))
18881b00c6c0SJacob Keller return;
1889504c5eacSAlexander Duyck
1890504c5eacSAlexander Duyck /* call carrier off first to avoid false dev_watchdog timeouts */
1891504c5eacSAlexander Duyck netif_carrier_off(netdev);
1892504c5eacSAlexander Duyck
1893504c5eacSAlexander Duyck /* disable transmits */
1894504c5eacSAlexander Duyck netif_tx_stop_all_queues(netdev);
1895504c5eacSAlexander Duyck netif_tx_disable(netdev);
1896504c5eacSAlexander Duyck
1897504c5eacSAlexander Duyck /* reset Rx filters */
1898504c5eacSAlexander Duyck fm10k_reset_rx_state(interface);
1899504c5eacSAlexander Duyck
190018283cadSAlexander Duyck /* disable polling routines */
190118283cadSAlexander Duyck fm10k_napi_disable_all(interface);
190218283cadSAlexander Duyck
1903b7d8514cSAlexander Duyck /* capture stats one last time before stopping interface */
1904b7d8514cSAlexander Duyck fm10k_update_stats(interface);
1905b7d8514cSAlexander Duyck
19069d73edeeSJacob Keller /* prevent updating statistics while we're down */
190746929557SJacob Keller while (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
19089d73edeeSJacob Keller usleep_range(1000, 2000);
19099d73edeeSJacob Keller
191094877768SJacob Keller /* skip waiting for TX DMA if we lost PCIe link */
191194877768SJacob Keller if (FM10K_REMOVED(hw->hw_addr))
191294877768SJacob Keller goto skip_tx_dma_drain;
191394877768SJacob Keller
191494877768SJacob Keller /* In some rare circumstances it can take a while for Tx queues to
191594877768SJacob Keller * quiesce and be fully disabled. Attempt to .stop_hw() first, and
191694877768SJacob Keller * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop
191794877768SJacob Keller * until the Tx queues have emptied, or until a number of retries. If
191894877768SJacob Keller * we fail to clear within the retry loop, we will issue a warning
191994877768SJacob Keller * indicating that Tx DMA is probably hung. Note this means we call
192094877768SJacob Keller * .stop_hw() twice but this shouldn't cause any problems.
192194877768SJacob Keller */
192294877768SJacob Keller err = hw->mac.ops.stop_hw(hw);
192394877768SJacob Keller if (err != FM10K_ERR_REQUESTS_PENDING)
192494877768SJacob Keller goto skip_tx_dma_drain;
192594877768SJacob Keller
192694877768SJacob Keller #define TX_DMA_DRAIN_RETRIES 25
192794877768SJacob Keller for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) {
192894877768SJacob Keller usleep_range(10000, 20000);
192994877768SJacob Keller
193094877768SJacob Keller /* start checking at the last ring to have pending Tx */
193194877768SJacob Keller for (; i < interface->num_tx_queues; i++)
19325b9e4432SJacob Keller if (fm10k_get_tx_pending(interface->tx_ring[i], false))
193394877768SJacob Keller break;
193494877768SJacob Keller
193594877768SJacob Keller /* if all the queues are drained, we can break now */
193694877768SJacob Keller if (i == interface->num_tx_queues)
193794877768SJacob Keller break;
193894877768SJacob Keller }
193994877768SJacob Keller
194094877768SJacob Keller if (count >= TX_DMA_DRAIN_RETRIES)
194194877768SJacob Keller dev_err(&interface->pdev->dev,
194294877768SJacob Keller "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n",
194394877768SJacob Keller count);
194494877768SJacob Keller skip_tx_dma_drain:
1945504c5eacSAlexander Duyck /* Disable DMA engine for Tx/Rx */
194661e0217eSJacob Keller err = hw->mac.ops.stop_hw(hw);
1947106ca423SJacob Keller if (err == FM10K_ERR_REQUESTS_PENDING)
194894877768SJacob Keller dev_err(&interface->pdev->dev,
1949106ca423SJacob Keller "due to pending requests hw was not shut down gracefully\n");
1950106ca423SJacob Keller else if (err)
195161e0217eSJacob Keller dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
19523abaae42SAlexander Duyck
19533abaae42SAlexander Duyck /* free any buffers still on the rings */
19543abaae42SAlexander Duyck fm10k_clean_all_tx_rings(interface);
1955ec6acb80SJacob Keller fm10k_clean_all_rx_rings(interface);
1956504c5eacSAlexander Duyck }
1957504c5eacSAlexander Duyck
19580e7b3644SAlexander Duyck /**
19590e7b3644SAlexander Duyck * fm10k_sw_init - Initialize general software structures
19600e7b3644SAlexander Duyck * @interface: host interface private structure to initialize
1961363656ebSJacob Keller * @ent: PCI device ID entry
19620e7b3644SAlexander Duyck *
19630e7b3644SAlexander Duyck * fm10k_sw_init initializes the interface private data structure.
19640e7b3644SAlexander Duyck * Fields are initialized based on PCI device information and
19650e7b3644SAlexander Duyck * OS network device settings (MTU size).
19660e7b3644SAlexander Duyck **/
fm10k_sw_init(struct fm10k_intfc * interface,const struct pci_device_id * ent)19670e7b3644SAlexander Duyck static int fm10k_sw_init(struct fm10k_intfc *interface,
19680e7b3644SAlexander Duyck const struct pci_device_id *ent)
19690e7b3644SAlexander Duyck {
19700e7b3644SAlexander Duyck const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data];
19710e7b3644SAlexander Duyck struct fm10k_hw *hw = &interface->hw;
19720e7b3644SAlexander Duyck struct pci_dev *pdev = interface->pdev;
19730e7b3644SAlexander Duyck struct net_device *netdev = interface->netdev;
1974c41a4fbaSEric Dumazet u32 rss_key[FM10K_RSSRK_SIZE];
19750e7b3644SAlexander Duyck unsigned int rss;
19760e7b3644SAlexander Duyck int err;
19770e7b3644SAlexander Duyck
19780e7b3644SAlexander Duyck /* initialize back pointer */
19790e7b3644SAlexander Duyck hw->back = interface;
19800e7b3644SAlexander Duyck hw->hw_addr = interface->uc_addr;
19810e7b3644SAlexander Duyck
19820e7b3644SAlexander Duyck /* PCI config space info */
19830e7b3644SAlexander Duyck hw->vendor_id = pdev->vendor;
19840e7b3644SAlexander Duyck hw->device_id = pdev->device;
19850e7b3644SAlexander Duyck hw->revision_id = pdev->revision;
19860e7b3644SAlexander Duyck hw->subsystem_vendor_id = pdev->subsystem_vendor;
19870e7b3644SAlexander Duyck hw->subsystem_device_id = pdev->subsystem_device;
19880e7b3644SAlexander Duyck
19890e7b3644SAlexander Duyck /* Setup hw api */
19900e7b3644SAlexander Duyck memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops));
19910e7b3644SAlexander Duyck hw->mac.type = fi->mac;
19920e7b3644SAlexander Duyck
1993883a9ccbSAlexander Duyck /* Setup IOV handlers */
1994883a9ccbSAlexander Duyck if (fi->iov_ops)
1995883a9ccbSAlexander Duyck memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops));
1996883a9ccbSAlexander Duyck
19970e7b3644SAlexander Duyck /* Set common capability flags and settings */
19980e7b3644SAlexander Duyck rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus());
19990e7b3644SAlexander Duyck interface->ring_feature[RING_F_RSS].limit = rss;
20000e7b3644SAlexander Duyck fi->get_invariants(hw);
20010e7b3644SAlexander Duyck
20020e7b3644SAlexander Duyck /* pick up the PCIe bus settings for reporting later */
20030e7b3644SAlexander Duyck if (hw->mac.ops.get_bus_info)
20040e7b3644SAlexander Duyck hw->mac.ops.get_bus_info(hw);
20050e7b3644SAlexander Duyck
20060e7b3644SAlexander Duyck /* limit the usable DMA range */
20070e7b3644SAlexander Duyck if (hw->mac.ops.set_dma_mask)
20080e7b3644SAlexander Duyck hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev));
20090e7b3644SAlexander Duyck
20100e7b3644SAlexander Duyck /* update netdev with DMA restrictions */
20110e7b3644SAlexander Duyck if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) {
20120e7b3644SAlexander Duyck netdev->features |= NETIF_F_HIGHDMA;
20130e7b3644SAlexander Duyck netdev->vlan_features |= NETIF_F_HIGHDMA;
20140e7b3644SAlexander Duyck }
20150e7b3644SAlexander Duyck
20160e7b3644SAlexander Duyck /* reset and initialize the hardware so it is in a known state */
20171343c65fSJacob Keller err = hw->mac.ops.reset_hw(hw);
20181343c65fSJacob Keller if (err) {
20191343c65fSJacob Keller dev_err(&pdev->dev, "reset_hw failed: %d\n", err);
20201343c65fSJacob Keller return err;
20211343c65fSJacob Keller }
20221343c65fSJacob Keller
20231343c65fSJacob Keller err = hw->mac.ops.init_hw(hw);
20240e7b3644SAlexander Duyck if (err) {
20250e7b3644SAlexander Duyck dev_err(&pdev->dev, "init_hw failed: %d\n", err);
20260e7b3644SAlexander Duyck return err;
20270e7b3644SAlexander Duyck }
20280e7b3644SAlexander Duyck
20290e7b3644SAlexander Duyck /* initialize hardware statistics */
20300e7b3644SAlexander Duyck hw->mac.ops.update_hw_stats(hw, &interface->stats);
20310e7b3644SAlexander Duyck
2032883a9ccbSAlexander Duyck /* Set upper limit on IOV VFs that can be allocated */
2033883a9ccbSAlexander Duyck pci_sriov_set_totalvfs(pdev, hw->iov.total_vfs);
2034883a9ccbSAlexander Duyck
20350e7b3644SAlexander Duyck /* Start with random Ethernet address */
20360e7b3644SAlexander Duyck eth_random_addr(hw->mac.addr);
20370e7b3644SAlexander Duyck
20380e7b3644SAlexander Duyck /* Initialize MAC address from hardware */
20390e7b3644SAlexander Duyck err = hw->mac.ops.read_mac_addr(hw);
20400e7b3644SAlexander Duyck if (err) {
20410e7b3644SAlexander Duyck dev_warn(&pdev->dev,
20420e7b3644SAlexander Duyck "Failed to obtain MAC address defaulting to random\n");
20430e7b3644SAlexander Duyck /* tag address assignment as random */
20440e7b3644SAlexander Duyck netdev->addr_assign_type |= NET_ADDR_RANDOM;
20450e7b3644SAlexander Duyck }
20460e7b3644SAlexander Duyck
2047*f3956ebbSJakub Kicinski eth_hw_addr_set(netdev, hw->mac.addr);
204811c49f79SBruce Allan ether_addr_copy(netdev->perm_addr, hw->mac.addr);
20490e7b3644SAlexander Duyck
20500e7b3644SAlexander Duyck if (!is_valid_ether_addr(netdev->perm_addr)) {
20510e7b3644SAlexander Duyck dev_err(&pdev->dev, "Invalid MAC Address\n");
20520e7b3644SAlexander Duyck return -EIO;
20530e7b3644SAlexander Duyck }
20540e7b3644SAlexander Duyck
20559f801abcSAlexander Duyck /* initialize DCBNL interface */
20569f801abcSAlexander Duyck fm10k_dcbnl_set_ops(netdev);
20579f801abcSAlexander Duyck
2058e27ef599SAlexander Duyck /* set default ring sizes */
2059e27ef599SAlexander Duyck interface->tx_ring_count = FM10K_DEFAULT_TXD;
2060e27ef599SAlexander Duyck interface->rx_ring_count = FM10K_DEFAULT_RXD;
2061e27ef599SAlexander Duyck
206218283cadSAlexander Duyck /* set default interrupt moderation */
2063436ea956SJacob Keller interface->tx_itr = FM10K_TX_ITR_DEFAULT;
2064436ea956SJacob Keller interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
206518283cadSAlexander Duyck
2066fc917368SJacob Keller /* Initialize the MAC/VLAN queue */
2067fc917368SJacob Keller INIT_LIST_HEAD(&interface->macvlan_requests);
2068fc917368SJacob Keller
2069c41a4fbaSEric Dumazet netdev_rss_key_fill(rss_key, sizeof(rss_key));
2070c41a4fbaSEric Dumazet memcpy(interface->rssrk, rss_key, sizeof(rss_key));
20710e7b3644SAlexander Duyck
2072b4fcd436SJacob Keller /* Initialize the mailbox lock */
2073b4fcd436SJacob Keller spin_lock_init(&interface->mbx_lock);
2074fc917368SJacob Keller spin_lock_init(&interface->macvlan_lock);
2075b4fcd436SJacob Keller
20760e7b3644SAlexander Duyck /* Start off interface as being down */
207746929557SJacob Keller set_bit(__FM10K_DOWN, interface->state);
207846929557SJacob Keller set_bit(__FM10K_UPDATING_STATS, interface->state);
20790e7b3644SAlexander Duyck
20800e7b3644SAlexander Duyck return 0;
20810e7b3644SAlexander Duyck }
20820e7b3644SAlexander Duyck
2083b3890e30SAlexander Duyck /**
2084b3890e30SAlexander Duyck * fm10k_probe - Device Initialization Routine
2085b3890e30SAlexander Duyck * @pdev: PCI device information struct
2086b3890e30SAlexander Duyck * @ent: entry in fm10k_pci_tbl
2087b3890e30SAlexander Duyck *
2088b3890e30SAlexander Duyck * Returns 0 on success, negative on failure
2089b3890e30SAlexander Duyck *
2090b3890e30SAlexander Duyck * fm10k_probe initializes an interface identified by a pci_dev structure.
2091b3890e30SAlexander Duyck * The OS initialization, configuring of the interface private structure,
2092b3890e30SAlexander Duyck * and a hardware reset occur.
2093b3890e30SAlexander Duyck **/
fm10k_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2094a4fcad65SBruce Allan static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2095b3890e30SAlexander Duyck {
20960e7b3644SAlexander Duyck struct net_device *netdev;
20970e7b3644SAlexander Duyck struct fm10k_intfc *interface;
2098b3890e30SAlexander Duyck int err;
2099b3890e30SAlexander Duyck
210018095937SJacob Keller if (pdev->error_state != pci_channel_io_normal) {
210118095937SJacob Keller dev_err(&pdev->dev,
210218095937SJacob Keller "PCI device still in an error state. Unable to load...\n");
210318095937SJacob Keller return -EIO;
210418095937SJacob Keller }
210518095937SJacob Keller
2106b3890e30SAlexander Duyck err = pci_enable_device_mem(pdev);
210776ef0fc5SJacob Keller if (err) {
210876ef0fc5SJacob Keller dev_err(&pdev->dev,
210976ef0fc5SJacob Keller "PCI enable device failed: %d\n", err);
2110b3890e30SAlexander Duyck return err;
211176ef0fc5SJacob Keller }
2112b3890e30SAlexander Duyck
2113c04ae58eSJacob Keller err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
2114c04ae58eSJacob Keller if (err)
2115b3890e30SAlexander Duyck err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2116b3890e30SAlexander Duyck if (err) {
2117b3890e30SAlexander Duyck dev_err(&pdev->dev,
2118c04ae58eSJacob Keller "DMA configuration failed: %d\n", err);
2119b3890e30SAlexander Duyck goto err_dma;
2120b3890e30SAlexander Duyck }
2121b3890e30SAlexander Duyck
212256d766d6SJohannes Thumshirn err = pci_request_mem_regions(pdev, fm10k_driver_name);
2123b3890e30SAlexander Duyck if (err) {
2124b3890e30SAlexander Duyck dev_err(&pdev->dev,
21250197cde6SJacob Keller "pci_request_selected_regions failed: %d\n", err);
2126b3890e30SAlexander Duyck goto err_pci_reg;
2127b3890e30SAlexander Duyck }
2128b3890e30SAlexander Duyck
2129b3890e30SAlexander Duyck pci_set_master(pdev);
2130b3890e30SAlexander Duyck pci_save_state(pdev);
2131b3890e30SAlexander Duyck
2132e0244903SJacob Keller netdev = fm10k_alloc_netdev(fm10k_info_tbl[ent->driver_data]);
21330e7b3644SAlexander Duyck if (!netdev) {
21340e7b3644SAlexander Duyck err = -ENOMEM;
21350e7b3644SAlexander Duyck goto err_alloc_netdev;
21360e7b3644SAlexander Duyck }
21370e7b3644SAlexander Duyck
21380e7b3644SAlexander Duyck SET_NETDEV_DEV(netdev, &pdev->dev);
21390e7b3644SAlexander Duyck
21400e7b3644SAlexander Duyck interface = netdev_priv(netdev);
21410e7b3644SAlexander Duyck pci_set_drvdata(pdev, interface);
21420e7b3644SAlexander Duyck
21430e7b3644SAlexander Duyck interface->netdev = netdev;
21440e7b3644SAlexander Duyck interface->pdev = pdev;
21450e7b3644SAlexander Duyck
21460e7b3644SAlexander Duyck interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
21470e7b3644SAlexander Duyck FM10K_UC_ADDR_SIZE);
21480e7b3644SAlexander Duyck if (!interface->uc_addr) {
21490e7b3644SAlexander Duyck err = -EIO;
21500e7b3644SAlexander Duyck goto err_ioremap;
21510e7b3644SAlexander Duyck }
21520e7b3644SAlexander Duyck
21530e7b3644SAlexander Duyck err = fm10k_sw_init(interface, ent);
21540e7b3644SAlexander Duyck if (err)
21550e7b3644SAlexander Duyck goto err_sw_init;
21560e7b3644SAlexander Duyck
21577461fd91SAlexander Duyck /* enable debugfs support */
21587461fd91SAlexander Duyck fm10k_dbg_intfc_init(interface);
21597461fd91SAlexander Duyck
216018283cadSAlexander Duyck err = fm10k_init_queueing_scheme(interface);
216118283cadSAlexander Duyck if (err)
216218283cadSAlexander Duyck goto err_sw_init;
216318283cadSAlexander Duyck
2164e72319bbSJacob Keller /* the mbx interrupt might attempt to schedule the service task, so we
2165e72319bbSJacob Keller * must ensure it is disabled since we haven't yet requested the timer
2166e72319bbSJacob Keller * or work item.
2167e72319bbSJacob Keller */
216846929557SJacob Keller set_bit(__FM10K_SERVICE_DISABLE, interface->state);
2169e72319bbSJacob Keller
217018283cadSAlexander Duyck err = fm10k_mbx_request_irq(interface);
217118283cadSAlexander Duyck if (err)
217218283cadSAlexander Duyck goto err_mbx_interrupt;
217318283cadSAlexander Duyck
21740e7b3644SAlexander Duyck /* final check of hardware state before registering the interface */
21750e7b3644SAlexander Duyck err = fm10k_hw_ready(interface);
21760e7b3644SAlexander Duyck if (err)
21770e7b3644SAlexander Duyck goto err_register;
21780e7b3644SAlexander Duyck
21790e7b3644SAlexander Duyck err = register_netdev(netdev);
21800e7b3644SAlexander Duyck if (err)
21810e7b3644SAlexander Duyck goto err_register;
21820e7b3644SAlexander Duyck
21830e7b3644SAlexander Duyck /* carrier off reporting is important to ethtool even BEFORE open */
21840e7b3644SAlexander Duyck netif_carrier_off(netdev);
21850e7b3644SAlexander Duyck
21860e7b3644SAlexander Duyck /* stop all the transmit queues from transmitting until link is up */
21870e7b3644SAlexander Duyck netif_tx_stop_all_queues(netdev);
21880e7b3644SAlexander Duyck
2189e72319bbSJacob Keller /* Initialize service timer and service task late in order to avoid
2190e72319bbSJacob Keller * cleanup issues.
2191e72319bbSJacob Keller */
219226566eaeSKees Cook timer_setup(&interface->service_timer, fm10k_service_timer, 0);
2193e72319bbSJacob Keller INIT_WORK(&interface->service_task, fm10k_service_task);
2194e72319bbSJacob Keller
2195fc917368SJacob Keller /* Setup the MAC/VLAN queue */
2196fc917368SJacob Keller INIT_DELAYED_WORK(&interface->macvlan_task, fm10k_macvlan_task);
2197fc917368SJacob Keller
2198e72319bbSJacob Keller /* kick off service timer now, even when interface is down */
2199e72319bbSJacob Keller mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
2200e72319bbSJacob Keller
22010e7b3644SAlexander Duyck /* print warning for non-optimal configurations */
2202170648fdSBjorn Helgaas pcie_print_link_status(interface->pdev);
22030e7b3644SAlexander Duyck
22040ff36676SAlexander Duyck /* report MAC address for logging */
22050ff36676SAlexander Duyck dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
22060ff36676SAlexander Duyck
2207883a9ccbSAlexander Duyck /* enable SR-IOV after registering netdev to enforce PF/VF ordering */
2208883a9ccbSAlexander Duyck fm10k_iov_configure(pdev, 0);
2209883a9ccbSAlexander Duyck
22108bac58beSJacob Keller /* clear the service task disable bit and kick off service task */
221146929557SJacob Keller clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
22128bac58beSJacob Keller fm10k_service_event_schedule(interface);
2213b7d8514cSAlexander Duyck
2214b3890e30SAlexander Duyck return 0;
2215b3890e30SAlexander Duyck
22160e7b3644SAlexander Duyck err_register:
221718283cadSAlexander Duyck fm10k_mbx_free_irq(interface);
221818283cadSAlexander Duyck err_mbx_interrupt:
221918283cadSAlexander Duyck fm10k_clear_queueing_scheme(interface);
22200e7b3644SAlexander Duyck err_sw_init:
2221a211e013SAlexander Duyck if (interface->sw_addr)
2222a211e013SAlexander Duyck iounmap(interface->sw_addr);
22230e7b3644SAlexander Duyck iounmap(interface->uc_addr);
22240e7b3644SAlexander Duyck err_ioremap:
22250e7b3644SAlexander Duyck free_netdev(netdev);
22260e7b3644SAlexander Duyck err_alloc_netdev:
222756d766d6SJohannes Thumshirn pci_release_mem_regions(pdev);
2228b3890e30SAlexander Duyck err_pci_reg:
2229b3890e30SAlexander Duyck err_dma:
2230b3890e30SAlexander Duyck pci_disable_device(pdev);
2231b3890e30SAlexander Duyck return err;
2232b3890e30SAlexander Duyck }
2233b3890e30SAlexander Duyck
2234b3890e30SAlexander Duyck /**
2235b3890e30SAlexander Duyck * fm10k_remove - Device Removal Routine
2236b3890e30SAlexander Duyck * @pdev: PCI device information struct
2237b3890e30SAlexander Duyck *
2238b3890e30SAlexander Duyck * fm10k_remove is called by the PCI subsystem to alert the driver
2239b3890e30SAlexander Duyck * that it should release a PCI device. The could be caused by a
2240b3890e30SAlexander Duyck * Hot-Plug event, or because the driver is going to be removed from
2241b3890e30SAlexander Duyck * memory.
2242b3890e30SAlexander Duyck **/
fm10k_remove(struct pci_dev * pdev)2243b3890e30SAlexander Duyck static void fm10k_remove(struct pci_dev *pdev)
2244b3890e30SAlexander Duyck {
22450e7b3644SAlexander Duyck struct fm10k_intfc *interface = pci_get_drvdata(pdev);
22460e7b3644SAlexander Duyck struct net_device *netdev = interface->netdev;
22470e7b3644SAlexander Duyck
224854b3c9cfSJeff Kirsher del_timer_sync(&interface->service_timer);
224954b3c9cfSJeff Kirsher
225004914390SJacob Keller fm10k_stop_service_event(interface);
2251fc917368SJacob Keller fm10k_stop_macvlan_task(interface);
2252fc917368SJacob Keller
2253fc917368SJacob Keller /* Remove all pending MAC/VLAN requests */
2254fc917368SJacob Keller fm10k_clear_macvlan_queue(interface, interface->glort, true);
2255b7d8514cSAlexander Duyck
22560e7b3644SAlexander Duyck /* free netdev, this may bounce the interrupts due to setup_tc */
22570e7b3644SAlexander Duyck if (netdev->reg_state == NETREG_REGISTERED)
22580e7b3644SAlexander Duyck unregister_netdev(netdev);
22590e7b3644SAlexander Duyck
2260883a9ccbSAlexander Duyck /* release VFs */
2261883a9ccbSAlexander Duyck fm10k_iov_disable(pdev);
2262883a9ccbSAlexander Duyck
226318283cadSAlexander Duyck /* disable mailbox interrupt */
226418283cadSAlexander Duyck fm10k_mbx_free_irq(interface);
226518283cadSAlexander Duyck
226618283cadSAlexander Duyck /* free interrupts */
226718283cadSAlexander Duyck fm10k_clear_queueing_scheme(interface);
226818283cadSAlexander Duyck
22697461fd91SAlexander Duyck /* remove any debugfs interfaces */
22707461fd91SAlexander Duyck fm10k_dbg_intfc_exit(interface);
22717461fd91SAlexander Duyck
2272a211e013SAlexander Duyck if (interface->sw_addr)
2273a211e013SAlexander Duyck iounmap(interface->sw_addr);
22740e7b3644SAlexander Duyck iounmap(interface->uc_addr);
22750e7b3644SAlexander Duyck
22760e7b3644SAlexander Duyck free_netdev(netdev);
22770e7b3644SAlexander Duyck
227856d766d6SJohannes Thumshirn pci_release_mem_regions(pdev);
2279b3890e30SAlexander Duyck
2280b3890e30SAlexander Duyck pci_disable_device(pdev);
2281b3890e30SAlexander Duyck }
2282b3890e30SAlexander Duyck
fm10k_prepare_suspend(struct fm10k_intfc * interface)2283dc4b76c0SJacob Keller static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
2284dc4b76c0SJacob Keller {
2285dc4b76c0SJacob Keller /* the watchdog task reads from registers, which might appear like
2286dc4b76c0SJacob Keller * a surprise remove if the PCIe device is disabled while we're
2287dc4b76c0SJacob Keller * stopped. We stop the watchdog task until after we resume software
2288dc4b76c0SJacob Keller * activity.
2289fc917368SJacob Keller *
2290fc917368SJacob Keller * Note that the MAC/VLAN task will be stopped as part of preparing
2291fc917368SJacob Keller * for reset so we don't need to handle it here.
2292dc4b76c0SJacob Keller */
229304914390SJacob Keller fm10k_stop_service_event(interface);
2294dc4b76c0SJacob Keller
22950b40f457SJacob Keller if (fm10k_prepare_for_reset(interface))
22960b40f457SJacob Keller set_bit(__FM10K_RESET_SUSPENDED, interface->state);
2297dc4b76c0SJacob Keller }
2298dc4b76c0SJacob Keller
fm10k_handle_resume(struct fm10k_intfc * interface)2299dc4b76c0SJacob Keller static int fm10k_handle_resume(struct fm10k_intfc *interface)
2300dc4b76c0SJacob Keller {
2301dc4b76c0SJacob Keller struct fm10k_hw *hw = &interface->hw;
2302dc4b76c0SJacob Keller int err;
2303dc4b76c0SJacob Keller
23040b40f457SJacob Keller /* Even if we didn't properly prepare for reset in
23050b40f457SJacob Keller * fm10k_prepare_suspend, we'll attempt to resume anyways.
23060b40f457SJacob Keller */
23070b40f457SJacob Keller if (!test_and_clear_bit(__FM10K_RESET_SUSPENDED, interface->state))
23080b40f457SJacob Keller dev_warn(&interface->pdev->dev,
23090b40f457SJacob Keller "Device was shut down as part of suspend... Attempting to recover\n");
23100b40f457SJacob Keller
2311dc4b76c0SJacob Keller /* reset statistics starting values */
2312dc4b76c0SJacob Keller hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
2313dc4b76c0SJacob Keller
2314dc4b76c0SJacob Keller err = fm10k_handle_reset(interface);
2315dc4b76c0SJacob Keller if (err)
2316dc4b76c0SJacob Keller return err;
2317dc4b76c0SJacob Keller
2318dc4b76c0SJacob Keller /* assume host is not ready, to prevent race with watchdog in case we
2319dc4b76c0SJacob Keller * actually don't have connection to the switch
2320dc4b76c0SJacob Keller */
2321dc4b76c0SJacob Keller interface->host_ready = false;
2322dc4b76c0SJacob Keller fm10k_watchdog_host_not_ready(interface);
2323dc4b76c0SJacob Keller
23240356b23bSJacob Keller /* force link to stay down for a second to prevent link flutter */
23250356b23bSJacob Keller interface->link_down_event = jiffies + (HZ);
232646929557SJacob Keller set_bit(__FM10K_LINK_DOWN, interface->state);
23270356b23bSJacob Keller
232804914390SJacob Keller /* restart the service task */
232904914390SJacob Keller fm10k_start_service_event(interface);
2330dc4b76c0SJacob Keller
2331fc917368SJacob Keller /* Restart the MAC/VLAN request queue in-case of outstanding events */
2332fc917368SJacob Keller fm10k_macvlan_schedule(interface);
2333fc917368SJacob Keller
23349aac0fbdSJacob Keller return 0;
2335dc4b76c0SJacob Keller }
2336dc4b76c0SJacob Keller
233719ae1b3fSAlexander Duyck /**
23388249c47cSJacob Keller * fm10k_resume - Generic PM resume hook
23398249c47cSJacob Keller * @dev: generic device structure
234019ae1b3fSAlexander Duyck *
23418249c47cSJacob Keller * Generic PM hook used when waking the device from a low power state after
23428249c47cSJacob Keller * suspend or hibernation. This function does not need to handle lower PCIe
23438249c47cSJacob Keller * device state as the stack takes care of that for us.
234419ae1b3fSAlexander Duyck **/
fm10k_resume(struct device * dev)2345b200bfd6SArnd Bergmann static int __maybe_unused fm10k_resume(struct device *dev)
234619ae1b3fSAlexander Duyck {
23477f53be6fSChuhong Yuan struct fm10k_intfc *interface = dev_get_drvdata(dev);
234819ae1b3fSAlexander Duyck struct net_device *netdev = interface->netdev;
234919ae1b3fSAlexander Duyck struct fm10k_hw *hw = &interface->hw;
23508249c47cSJacob Keller int err;
235119ae1b3fSAlexander Duyck
235219ae1b3fSAlexander Duyck /* refresh hw_addr in case it was dropped */
235319ae1b3fSAlexander Duyck hw->hw_addr = interface->uc_addr;
235419ae1b3fSAlexander Duyck
23557756c08bSJacob Keller err = fm10k_handle_resume(interface);
23567756c08bSJacob Keller if (err)
235719ae1b3fSAlexander Duyck return err;
2358883a9ccbSAlexander Duyck
235919ae1b3fSAlexander Duyck netif_device_attach(netdev);
236019ae1b3fSAlexander Duyck
236119ae1b3fSAlexander Duyck return 0;
236219ae1b3fSAlexander Duyck }
236319ae1b3fSAlexander Duyck
236419ae1b3fSAlexander Duyck /**
23658249c47cSJacob Keller * fm10k_suspend - Generic PM suspend hook
23668249c47cSJacob Keller * @dev: generic device structure
236719ae1b3fSAlexander Duyck *
23688249c47cSJacob Keller * Generic PM hook used when setting the device into a low power state for
23698249c47cSJacob Keller * system suspend or hibernation. This function does not need to handle lower
23708249c47cSJacob Keller * PCIe device state as the stack takes care of that for us.
237119ae1b3fSAlexander Duyck **/
fm10k_suspend(struct device * dev)2372b200bfd6SArnd Bergmann static int __maybe_unused fm10k_suspend(struct device *dev)
237319ae1b3fSAlexander Duyck {
23747f53be6fSChuhong Yuan struct fm10k_intfc *interface = dev_get_drvdata(dev);
237519ae1b3fSAlexander Duyck struct net_device *netdev = interface->netdev;
237619ae1b3fSAlexander Duyck
237719ae1b3fSAlexander Duyck netif_device_detach(netdev);
237819ae1b3fSAlexander Duyck
23797756c08bSJacob Keller fm10k_prepare_suspend(interface);
238019ae1b3fSAlexander Duyck
238119ae1b3fSAlexander Duyck return 0;
238219ae1b3fSAlexander Duyck }
238319ae1b3fSAlexander Duyck
238419ae1b3fSAlexander Duyck /**
238519ae1b3fSAlexander Duyck * fm10k_io_error_detected - called when PCI error is detected
238619ae1b3fSAlexander Duyck * @pdev: Pointer to PCI device
238719ae1b3fSAlexander Duyck * @state: The current pci connection state
238819ae1b3fSAlexander Duyck *
238919ae1b3fSAlexander Duyck * This function is called after a PCI bus error affecting
239019ae1b3fSAlexander Duyck * this device has been detected.
239119ae1b3fSAlexander Duyck */
fm10k_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)239219ae1b3fSAlexander Duyck static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
239319ae1b3fSAlexander Duyck pci_channel_state_t state)
239419ae1b3fSAlexander Duyck {
239519ae1b3fSAlexander Duyck struct fm10k_intfc *interface = pci_get_drvdata(pdev);
239619ae1b3fSAlexander Duyck struct net_device *netdev = interface->netdev;
239719ae1b3fSAlexander Duyck
239819ae1b3fSAlexander Duyck netif_device_detach(netdev);
239919ae1b3fSAlexander Duyck
240019ae1b3fSAlexander Duyck if (state == pci_channel_io_perm_failure)
240119ae1b3fSAlexander Duyck return PCI_ERS_RESULT_DISCONNECT;
240219ae1b3fSAlexander Duyck
2403820c91aaSJacob Keller fm10k_prepare_suspend(interface);
24041e4c32f3SJacob Keller
240519ae1b3fSAlexander Duyck /* Request a slot reset. */
240619ae1b3fSAlexander Duyck return PCI_ERS_RESULT_NEED_RESET;
240719ae1b3fSAlexander Duyck }
240819ae1b3fSAlexander Duyck
240919ae1b3fSAlexander Duyck /**
241019ae1b3fSAlexander Duyck * fm10k_io_slot_reset - called after the pci bus has been reset.
241119ae1b3fSAlexander Duyck * @pdev: Pointer to PCI device
241219ae1b3fSAlexander Duyck *
241319ae1b3fSAlexander Duyck * Restart the card from scratch, as if from a cold-boot.
241419ae1b3fSAlexander Duyck */
fm10k_io_slot_reset(struct pci_dev * pdev)241519ae1b3fSAlexander Duyck static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
241619ae1b3fSAlexander Duyck {
241719ae1b3fSAlexander Duyck pci_ers_result_t result;
241819ae1b3fSAlexander Duyck
2419e59a393dSJacob Keller if (pci_reenable_device(pdev)) {
242019ae1b3fSAlexander Duyck dev_err(&pdev->dev,
242119ae1b3fSAlexander Duyck "Cannot re-enable PCI device after reset.\n");
242219ae1b3fSAlexander Duyck result = PCI_ERS_RESULT_DISCONNECT;
242319ae1b3fSAlexander Duyck } else {
242419ae1b3fSAlexander Duyck pci_set_master(pdev);
242519ae1b3fSAlexander Duyck pci_restore_state(pdev);
242619ae1b3fSAlexander Duyck
242719ae1b3fSAlexander Duyck /* After second error pci->state_saved is false, this
242819ae1b3fSAlexander Duyck * resets it so EEH doesn't break.
242919ae1b3fSAlexander Duyck */
243019ae1b3fSAlexander Duyck pci_save_state(pdev);
243119ae1b3fSAlexander Duyck
243219ae1b3fSAlexander Duyck pci_wake_from_d3(pdev, false);
243319ae1b3fSAlexander Duyck
243419ae1b3fSAlexander Duyck result = PCI_ERS_RESULT_RECOVERED;
243519ae1b3fSAlexander Duyck }
243619ae1b3fSAlexander Duyck
243719ae1b3fSAlexander Duyck return result;
243819ae1b3fSAlexander Duyck }
243919ae1b3fSAlexander Duyck
244019ae1b3fSAlexander Duyck /**
244119ae1b3fSAlexander Duyck * fm10k_io_resume - called when traffic can start flowing again.
244219ae1b3fSAlexander Duyck * @pdev: Pointer to PCI device
244319ae1b3fSAlexander Duyck *
244419ae1b3fSAlexander Duyck * This callback is called when the error recovery driver tells us that
244519ae1b3fSAlexander Duyck * its OK to resume normal operation.
244619ae1b3fSAlexander Duyck */
fm10k_io_resume(struct pci_dev * pdev)244719ae1b3fSAlexander Duyck static void fm10k_io_resume(struct pci_dev *pdev)
244819ae1b3fSAlexander Duyck {
244919ae1b3fSAlexander Duyck struct fm10k_intfc *interface = pci_get_drvdata(pdev);
245019ae1b3fSAlexander Duyck struct net_device *netdev = interface->netdev;
2451820c91aaSJacob Keller int err;
245219ae1b3fSAlexander Duyck
2453820c91aaSJacob Keller err = fm10k_handle_resume(interface);
245419ae1b3fSAlexander Duyck
2455820c91aaSJacob Keller if (err)
2456820c91aaSJacob Keller dev_warn(&pdev->dev,
245787be9892SJacob Keller "%s failed: %d\n", __func__, err);
2458820c91aaSJacob Keller else
245919ae1b3fSAlexander Duyck netif_device_attach(netdev);
246019ae1b3fSAlexander Duyck }
246119ae1b3fSAlexander Duyck
246287be9892SJacob Keller /**
246387be9892SJacob Keller * fm10k_io_reset_prepare - called when PCI function is about to be reset
246487be9892SJacob Keller * @pdev: Pointer to PCI device
246587be9892SJacob Keller *
246687be9892SJacob Keller * This callback is called when the PCI function is about to be reset,
246787be9892SJacob Keller * allowing the device driver to prepare for it.
246887be9892SJacob Keller */
fm10k_io_reset_prepare(struct pci_dev * pdev)2469775755edSChristoph Hellwig static void fm10k_io_reset_prepare(struct pci_dev *pdev)
24700593186aSJacob Keller {
24710593186aSJacob Keller /* warn incase we have any active VF devices */
24720593186aSJacob Keller if (pci_num_vf(pdev))
24730593186aSJacob Keller dev_warn(&pdev->dev,
24740593186aSJacob Keller "PCIe FLR may cause issues for any active VF devices\n");
2475775755edSChristoph Hellwig fm10k_prepare_suspend(pci_get_drvdata(pdev));
24760593186aSJacob Keller }
24770593186aSJacob Keller
247887be9892SJacob Keller /**
247987be9892SJacob Keller * fm10k_io_reset_done - called when PCI function has finished resetting
248087be9892SJacob Keller * @pdev: Pointer to PCI device
248187be9892SJacob Keller *
248287be9892SJacob Keller * This callback is called just after the PCI function is reset, such as via
248387be9892SJacob Keller * /sys/class/net/<enpX>/device/reset or similar.
248487be9892SJacob Keller */
fm10k_io_reset_done(struct pci_dev * pdev)2485775755edSChristoph Hellwig static void fm10k_io_reset_done(struct pci_dev *pdev)
2486775755edSChristoph Hellwig {
2487775755edSChristoph Hellwig struct fm10k_intfc *interface = pci_get_drvdata(pdev);
2488775755edSChristoph Hellwig int err = fm10k_handle_resume(interface);
2489775755edSChristoph Hellwig
24900593186aSJacob Keller if (err) {
24910593186aSJacob Keller dev_warn(&pdev->dev,
249287be9892SJacob Keller "%s failed: %d\n", __func__, err);
24930593186aSJacob Keller netif_device_detach(interface->netdev);
24940593186aSJacob Keller }
24950593186aSJacob Keller }
24960593186aSJacob Keller
249719ae1b3fSAlexander Duyck static const struct pci_error_handlers fm10k_err_handler = {
249819ae1b3fSAlexander Duyck .error_detected = fm10k_io_error_detected,
249919ae1b3fSAlexander Duyck .slot_reset = fm10k_io_slot_reset,
250019ae1b3fSAlexander Duyck .resume = fm10k_io_resume,
2501775755edSChristoph Hellwig .reset_prepare = fm10k_io_reset_prepare,
2502775755edSChristoph Hellwig .reset_done = fm10k_io_reset_done,
250319ae1b3fSAlexander Duyck };
250419ae1b3fSAlexander Duyck
25058249c47cSJacob Keller static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
25068249c47cSJacob Keller
2507b3890e30SAlexander Duyck static struct pci_driver fm10k_driver = {
2508b3890e30SAlexander Duyck .name = fm10k_driver_name,
2509b3890e30SAlexander Duyck .id_table = fm10k_pci_tbl,
2510b3890e30SAlexander Duyck .probe = fm10k_probe,
2511b3890e30SAlexander Duyck .remove = fm10k_remove,
25128249c47cSJacob Keller .driver = {
25138249c47cSJacob Keller .pm = &fm10k_pm_ops,
25148249c47cSJacob Keller },
2515883a9ccbSAlexander Duyck .sriov_configure = fm10k_iov_configure,
251619ae1b3fSAlexander Duyck .err_handler = &fm10k_err_handler
2517b3890e30SAlexander Duyck };
2518b3890e30SAlexander Duyck
2519b3890e30SAlexander Duyck /**
2520b3890e30SAlexander Duyck * fm10k_register_pci_driver - register driver interface
2521b3890e30SAlexander Duyck *
2522d8ec92f2SJacob Keller * This function is called on module load in order to register the driver.
2523b3890e30SAlexander Duyck **/
fm10k_register_pci_driver(void)2524b3890e30SAlexander Duyck int fm10k_register_pci_driver(void)
2525b3890e30SAlexander Duyck {
2526b3890e30SAlexander Duyck return pci_register_driver(&fm10k_driver);
2527b3890e30SAlexander Duyck }
2528b3890e30SAlexander Duyck
2529b3890e30SAlexander Duyck /**
2530b3890e30SAlexander Duyck * fm10k_unregister_pci_driver - unregister driver interface
2531b3890e30SAlexander Duyck *
2532d8ec92f2SJacob Keller * This function is called on module unload in order to remove the driver.
2533b3890e30SAlexander Duyck **/
fm10k_unregister_pci_driver(void)2534b3890e30SAlexander Duyck void fm10k_unregister_pci_driver(void)
2535b3890e30SAlexander Duyck {
2536b3890e30SAlexander Duyck pci_unregister_driver(&fm10k_driver);
2537b3890e30SAlexander Duyck }
2538