xref: /openbmc/linux/drivers/net/ethernet/sfc/efx.c (revision 09bae3b6)
1 /****************************************************************************
2  * Driver for Solarflare network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2005-2013 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
17 #include <linux/ip.h>
18 #include <linux/tcp.h>
19 #include <linux/in.h>
20 #include <linux/ethtool.h>
21 #include <linux/topology.h>
22 #include <linux/gfp.h>
23 #include <linux/aer.h>
24 #include <linux/interrupt.h>
25 #include "net_driver.h"
26 #include <net/gre.h>
27 #include <net/udp_tunnel.h>
28 #include "efx.h"
29 #include "nic.h"
30 #include "io.h"
31 #include "selftest.h"
32 #include "sriov.h"
33 
34 #include "mcdi.h"
35 #include "mcdi_pcol.h"
36 #include "workarounds.h"
37 
38 /**************************************************************************
39  *
40  * Type name strings
41  *
42  **************************************************************************
43  */
44 
45 /* Loopback mode names (see LOOPBACK_MODE()) */
46 const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
47 const char *const efx_loopback_mode_names[] = {
48 	[LOOPBACK_NONE]		= "NONE",
49 	[LOOPBACK_DATA]		= "DATAPATH",
50 	[LOOPBACK_GMAC]		= "GMAC",
51 	[LOOPBACK_XGMII]	= "XGMII",
52 	[LOOPBACK_XGXS]		= "XGXS",
53 	[LOOPBACK_XAUI]		= "XAUI",
54 	[LOOPBACK_GMII]		= "GMII",
55 	[LOOPBACK_SGMII]	= "SGMII",
56 	[LOOPBACK_XGBR]		= "XGBR",
57 	[LOOPBACK_XFI]		= "XFI",
58 	[LOOPBACK_XAUI_FAR]	= "XAUI_FAR",
59 	[LOOPBACK_GMII_FAR]	= "GMII_FAR",
60 	[LOOPBACK_SGMII_FAR]	= "SGMII_FAR",
61 	[LOOPBACK_XFI_FAR]	= "XFI_FAR",
62 	[LOOPBACK_GPHY]		= "GPHY",
63 	[LOOPBACK_PHYXS]	= "PHYXS",
64 	[LOOPBACK_PCS]		= "PCS",
65 	[LOOPBACK_PMAPMD]	= "PMA/PMD",
66 	[LOOPBACK_XPORT]	= "XPORT",
67 	[LOOPBACK_XGMII_WS]	= "XGMII_WS",
68 	[LOOPBACK_XAUI_WS]	= "XAUI_WS",
69 	[LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
70 	[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
71 	[LOOPBACK_GMII_WS]	= "GMII_WS",
72 	[LOOPBACK_XFI_WS]	= "XFI_WS",
73 	[LOOPBACK_XFI_WS_FAR]	= "XFI_WS_FAR",
74 	[LOOPBACK_PHYXS_WS]	= "PHYXS_WS",
75 };
76 
77 const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
78 const char *const efx_reset_type_names[] = {
79 	[RESET_TYPE_INVISIBLE]          = "INVISIBLE",
80 	[RESET_TYPE_ALL]                = "ALL",
81 	[RESET_TYPE_RECOVER_OR_ALL]     = "RECOVER_OR_ALL",
82 	[RESET_TYPE_WORLD]              = "WORLD",
83 	[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
84 	[RESET_TYPE_DATAPATH]           = "DATAPATH",
85 	[RESET_TYPE_MC_BIST]		= "MC_BIST",
86 	[RESET_TYPE_DISABLE]            = "DISABLE",
87 	[RESET_TYPE_TX_WATCHDOG]        = "TX_WATCHDOG",
88 	[RESET_TYPE_INT_ERROR]          = "INT_ERROR",
89 	[RESET_TYPE_DMA_ERROR]          = "DMA_ERROR",
90 	[RESET_TYPE_TX_SKIP]            = "TX_SKIP",
91 	[RESET_TYPE_MC_FAILURE]         = "MC_FAILURE",
92 	[RESET_TYPE_MCDI_TIMEOUT]	= "MCDI_TIMEOUT (FLR)",
93 };
94 
95 /* UDP tunnel type names */
96 static const char *const efx_udp_tunnel_type_names[] = {
97 	[TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
98 	[TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve",
99 };
100 
101 void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
102 {
103 	if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) &&
104 	    efx_udp_tunnel_type_names[type] != NULL)
105 		snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]);
106 	else
107 		snprintf(buf, buflen, "type %d", type);
108 }
109 
110 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
111  * queued onto this work queue. This is not a per-nic work queue, because
112  * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
113  */
114 static struct workqueue_struct *reset_workqueue;
115 
116 /* How often and how many times to poll for a reset while waiting for a
117  * BIST that another function started to complete.
118  */
119 #define BIST_WAIT_DELAY_MS	100
120 #define BIST_WAIT_DELAY_COUNT	100
121 
122 /**************************************************************************
123  *
124  * Configurable values
125  *
126  *************************************************************************/
127 
128 /*
129  * Use separate channels for TX and RX events
130  *
131  * Set this to 1 to use separate channels for TX and RX. It allows us
132  * to control interrupt affinity separately for TX and RX.
133  *
134  * This is only used in MSI-X interrupt mode
135  */
136 bool efx_separate_tx_channels;
137 module_param(efx_separate_tx_channels, bool, 0444);
138 MODULE_PARM_DESC(efx_separate_tx_channels,
139 		 "Use separate channels for TX and RX");
140 
141 /* This is the weight assigned to each of the (per-channel) virtual
142  * NAPI devices.
143  */
144 static int napi_weight = 64;
145 
146 /* This is the time (in jiffies) between invocations of the hardware
147  * monitor.
148  * On Falcon-based NICs, this will:
149  * - Check the on-board hardware monitor;
150  * - Poll the link state and reconfigure the hardware as necessary.
151  * On Siena-based NICs for power systems with EEH support, this will give EEH a
152  * chance to start.
153  */
154 static unsigned int efx_monitor_interval = 1 * HZ;
155 
156 /* Initial interrupt moderation settings.  They can be modified after
157  * module load with ethtool.
158  *
159  * The default for RX should strike a balance between increasing the
160  * round-trip latency and reducing overhead.
161  */
162 static unsigned int rx_irq_mod_usec = 60;
163 
164 /* Initial interrupt moderation settings.  They can be modified after
165  * module load with ethtool.
166  *
167  * This default is chosen to ensure that a 10G link does not go idle
168  * while a TX queue is stopped after it has become full.  A queue is
169  * restarted when it drops below half full.  The time this takes (assuming
170  * worst case 3 descriptors per packet and 1024 descriptors) is
171  *   512 / 3 * 1.2 = 205 usec.
172  */
173 static unsigned int tx_irq_mod_usec = 150;
174 
175 /* This is the first interrupt mode to try out of:
176  * 0 => MSI-X
177  * 1 => MSI
178  * 2 => legacy
179  */
180 static unsigned int interrupt_mode;
181 
182 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
183  * i.e. the number of CPUs among which we may distribute simultaneous
184  * interrupt handling.
185  *
186  * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
187  * The default (0) means to assign an interrupt to each core.
188  */
189 static unsigned int rss_cpus;
190 module_param(rss_cpus, uint, 0444);
191 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
192 
193 static bool phy_flash_cfg;
194 module_param(phy_flash_cfg, bool, 0644);
195 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
196 
197 static unsigned irq_adapt_low_thresh = 8000;
198 module_param(irq_adapt_low_thresh, uint, 0644);
199 MODULE_PARM_DESC(irq_adapt_low_thresh,
200 		 "Threshold score for reducing IRQ moderation");
201 
202 static unsigned irq_adapt_high_thresh = 16000;
203 module_param(irq_adapt_high_thresh, uint, 0644);
204 MODULE_PARM_DESC(irq_adapt_high_thresh,
205 		 "Threshold score for increasing IRQ moderation");
206 
207 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
208 			 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
209 			 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
210 			 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
211 module_param(debug, uint, 0);
212 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
213 
214 /**************************************************************************
215  *
216  * Utility functions and prototypes
217  *
218  *************************************************************************/
219 
220 static int efx_soft_enable_interrupts(struct efx_nic *efx);
221 static void efx_soft_disable_interrupts(struct efx_nic *efx);
222 static void efx_remove_channel(struct efx_channel *channel);
223 static void efx_remove_channels(struct efx_nic *efx);
224 static const struct efx_channel_type efx_default_channel_type;
225 static void efx_remove_port(struct efx_nic *efx);
226 static void efx_init_napi_channel(struct efx_channel *channel);
227 static void efx_fini_napi(struct efx_nic *efx);
228 static void efx_fini_napi_channel(struct efx_channel *channel);
229 static void efx_fini_struct(struct efx_nic *efx);
230 static void efx_start_all(struct efx_nic *efx);
231 static void efx_stop_all(struct efx_nic *efx);
232 
233 #define EFX_ASSERT_RESET_SERIALISED(efx)		\
234 	do {						\
235 		if ((efx->state == STATE_READY) ||	\
236 		    (efx->state == STATE_RECOVERY) ||	\
237 		    (efx->state == STATE_DISABLED))	\
238 			ASSERT_RTNL();			\
239 	} while (0)
240 
241 static int efx_check_disabled(struct efx_nic *efx)
242 {
243 	if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
244 		netif_err(efx, drv, efx->net_dev,
245 			  "device is disabled due to earlier errors\n");
246 		return -EIO;
247 	}
248 	return 0;
249 }
250 
251 /**************************************************************************
252  *
253  * Event queue processing
254  *
255  *************************************************************************/
256 
257 /* Process channel's event queue
258  *
259  * This function is responsible for processing the event queue of a
260  * single channel.  The caller must guarantee that this function will
261  * never be concurrently called more than once on the same channel,
262  * though different channels may be being processed concurrently.
263  */
264 static int efx_process_channel(struct efx_channel *channel, int budget)
265 {
266 	struct efx_tx_queue *tx_queue;
267 	struct list_head rx_list;
268 	int spent;
269 
270 	if (unlikely(!channel->enabled))
271 		return 0;
272 
273 	/* Prepare the batch receive list */
274 	EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
275 	INIT_LIST_HEAD(&rx_list);
276 	channel->rx_list = &rx_list;
277 
278 	efx_for_each_channel_tx_queue(tx_queue, channel) {
279 		tx_queue->pkts_compl = 0;
280 		tx_queue->bytes_compl = 0;
281 	}
282 
283 	spent = efx_nic_process_eventq(channel, budget);
284 	if (spent && efx_channel_has_rx_queue(channel)) {
285 		struct efx_rx_queue *rx_queue =
286 			efx_channel_get_rx_queue(channel);
287 
288 		efx_rx_flush_packet(channel);
289 		efx_fast_push_rx_descriptors(rx_queue, true);
290 	}
291 
292 	/* Update BQL */
293 	efx_for_each_channel_tx_queue(tx_queue, channel) {
294 		if (tx_queue->bytes_compl) {
295 			netdev_tx_completed_queue(tx_queue->core_txq,
296 				tx_queue->pkts_compl, tx_queue->bytes_compl);
297 		}
298 	}
299 
300 	/* Receive any packets we queued up */
301 	netif_receive_skb_list(channel->rx_list);
302 	channel->rx_list = NULL;
303 
304 	return spent;
305 }
306 
307 /* NAPI poll handler
308  *
309  * NAPI guarantees serialisation of polls of the same device, which
310  * provides the guarantee required by efx_process_channel().
311  */
312 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
313 {
314 	int step = efx->irq_mod_step_us;
315 
316 	if (channel->irq_mod_score < irq_adapt_low_thresh) {
317 		if (channel->irq_moderation_us > step) {
318 			channel->irq_moderation_us -= step;
319 			efx->type->push_irq_moderation(channel);
320 		}
321 	} else if (channel->irq_mod_score > irq_adapt_high_thresh) {
322 		if (channel->irq_moderation_us <
323 		    efx->irq_rx_moderation_us) {
324 			channel->irq_moderation_us += step;
325 			efx->type->push_irq_moderation(channel);
326 		}
327 	}
328 
329 	channel->irq_count = 0;
330 	channel->irq_mod_score = 0;
331 }
332 
333 static int efx_poll(struct napi_struct *napi, int budget)
334 {
335 	struct efx_channel *channel =
336 		container_of(napi, struct efx_channel, napi_str);
337 	struct efx_nic *efx = channel->efx;
338 	int spent;
339 
340 	netif_vdbg(efx, intr, efx->net_dev,
341 		   "channel %d NAPI poll executing on CPU %d\n",
342 		   channel->channel, raw_smp_processor_id());
343 
344 	spent = efx_process_channel(channel, budget);
345 
346 	if (spent < budget) {
347 		if (efx_channel_has_rx_queue(channel) &&
348 		    efx->irq_rx_adaptive &&
349 		    unlikely(++channel->irq_count == 1000)) {
350 			efx_update_irq_mod(efx, channel);
351 		}
352 
353 #ifdef CONFIG_RFS_ACCEL
354 		/* Perhaps expire some ARFS filters */
355 		schedule_work(&channel->filter_work);
356 #endif
357 
358 		/* There is no race here; although napi_disable() will
359 		 * only wait for napi_complete(), this isn't a problem
360 		 * since efx_nic_eventq_read_ack() will have no effect if
361 		 * interrupts have already been disabled.
362 		 */
363 		if (napi_complete_done(napi, spent))
364 			efx_nic_eventq_read_ack(channel);
365 	}
366 
367 	return spent;
368 }
369 
370 /* Create event queue
371  * Event queue memory allocations are done only once.  If the channel
372  * is reset, the memory buffer will be reused; this guards against
373  * errors during channel reset and also simplifies interrupt handling.
374  */
375 static int efx_probe_eventq(struct efx_channel *channel)
376 {
377 	struct efx_nic *efx = channel->efx;
378 	unsigned long entries;
379 
380 	netif_dbg(efx, probe, efx->net_dev,
381 		  "chan %d create event queue\n", channel->channel);
382 
383 	/* Build an event queue with room for one event per tx and rx buffer,
384 	 * plus some extra for link state events and MCDI completions. */
385 	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
386 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
387 	channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
388 
389 	return efx_nic_probe_eventq(channel);
390 }
391 
392 /* Prepare channel's event queue */
393 static int efx_init_eventq(struct efx_channel *channel)
394 {
395 	struct efx_nic *efx = channel->efx;
396 	int rc;
397 
398 	EFX_WARN_ON_PARANOID(channel->eventq_init);
399 
400 	netif_dbg(efx, drv, efx->net_dev,
401 		  "chan %d init event queue\n", channel->channel);
402 
403 	rc = efx_nic_init_eventq(channel);
404 	if (rc == 0) {
405 		efx->type->push_irq_moderation(channel);
406 		channel->eventq_read_ptr = 0;
407 		channel->eventq_init = true;
408 	}
409 	return rc;
410 }
411 
412 /* Enable event queue processing and NAPI */
413 void efx_start_eventq(struct efx_channel *channel)
414 {
415 	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
416 		  "chan %d start event queue\n", channel->channel);
417 
418 	/* Make sure the NAPI handler sees the enabled flag set */
419 	channel->enabled = true;
420 	smp_wmb();
421 
422 	napi_enable(&channel->napi_str);
423 	efx_nic_eventq_read_ack(channel);
424 }
425 
426 /* Disable event queue processing and NAPI */
427 void efx_stop_eventq(struct efx_channel *channel)
428 {
429 	if (!channel->enabled)
430 		return;
431 
432 	napi_disable(&channel->napi_str);
433 	channel->enabled = false;
434 }
435 
436 static void efx_fini_eventq(struct efx_channel *channel)
437 {
438 	if (!channel->eventq_init)
439 		return;
440 
441 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
442 		  "chan %d fini event queue\n", channel->channel);
443 
444 	efx_nic_fini_eventq(channel);
445 	channel->eventq_init = false;
446 }
447 
448 static void efx_remove_eventq(struct efx_channel *channel)
449 {
450 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
451 		  "chan %d remove event queue\n", channel->channel);
452 
453 	efx_nic_remove_eventq(channel);
454 }
455 
456 /**************************************************************************
457  *
458  * Channel handling
459  *
460  *************************************************************************/
461 
462 /* Allocate and initialise a channel structure. */
463 static struct efx_channel *
464 efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
465 {
466 	struct efx_channel *channel;
467 	struct efx_rx_queue *rx_queue;
468 	struct efx_tx_queue *tx_queue;
469 	int j;
470 
471 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
472 	if (!channel)
473 		return NULL;
474 
475 	channel->efx = efx;
476 	channel->channel = i;
477 	channel->type = &efx_default_channel_type;
478 
479 	for (j = 0; j < EFX_TXQ_TYPES; j++) {
480 		tx_queue = &channel->tx_queue[j];
481 		tx_queue->efx = efx;
482 		tx_queue->queue = i * EFX_TXQ_TYPES + j;
483 		tx_queue->channel = channel;
484 	}
485 
486 #ifdef CONFIG_RFS_ACCEL
487 	INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
488 #endif
489 
490 	rx_queue = &channel->rx_queue;
491 	rx_queue->efx = efx;
492 	timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
493 
494 	return channel;
495 }
496 
497 /* Allocate and initialise a channel structure, copying parameters
498  * (but not resources) from an old channel structure.
499  */
500 static struct efx_channel *
501 efx_copy_channel(const struct efx_channel *old_channel)
502 {
503 	struct efx_channel *channel;
504 	struct efx_rx_queue *rx_queue;
505 	struct efx_tx_queue *tx_queue;
506 	int j;
507 
508 	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
509 	if (!channel)
510 		return NULL;
511 
512 	*channel = *old_channel;
513 
514 	channel->napi_dev = NULL;
515 	INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
516 	channel->napi_str.napi_id = 0;
517 	channel->napi_str.state = 0;
518 	memset(&channel->eventq, 0, sizeof(channel->eventq));
519 
520 	for (j = 0; j < EFX_TXQ_TYPES; j++) {
521 		tx_queue = &channel->tx_queue[j];
522 		if (tx_queue->channel)
523 			tx_queue->channel = channel;
524 		tx_queue->buffer = NULL;
525 		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
526 	}
527 
528 	rx_queue = &channel->rx_queue;
529 	rx_queue->buffer = NULL;
530 	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
531 	timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
532 #ifdef CONFIG_RFS_ACCEL
533 	INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
534 #endif
535 
536 	return channel;
537 }
538 
539 static int efx_probe_channel(struct efx_channel *channel)
540 {
541 	struct efx_tx_queue *tx_queue;
542 	struct efx_rx_queue *rx_queue;
543 	int rc;
544 
545 	netif_dbg(channel->efx, probe, channel->efx->net_dev,
546 		  "creating channel %d\n", channel->channel);
547 
548 	rc = channel->type->pre_probe(channel);
549 	if (rc)
550 		goto fail;
551 
552 	rc = efx_probe_eventq(channel);
553 	if (rc)
554 		goto fail;
555 
556 	efx_for_each_channel_tx_queue(tx_queue, channel) {
557 		rc = efx_probe_tx_queue(tx_queue);
558 		if (rc)
559 			goto fail;
560 	}
561 
562 	efx_for_each_channel_rx_queue(rx_queue, channel) {
563 		rc = efx_probe_rx_queue(rx_queue);
564 		if (rc)
565 			goto fail;
566 	}
567 
568 	channel->rx_list = NULL;
569 
570 	return 0;
571 
572 fail:
573 	efx_remove_channel(channel);
574 	return rc;
575 }
576 
577 static void
578 efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
579 {
580 	struct efx_nic *efx = channel->efx;
581 	const char *type;
582 	int number;
583 
584 	number = channel->channel;
585 	if (efx->tx_channel_offset == 0) {
586 		type = "";
587 	} else if (channel->channel < efx->tx_channel_offset) {
588 		type = "-rx";
589 	} else {
590 		type = "-tx";
591 		number -= efx->tx_channel_offset;
592 	}
593 	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
594 }
595 
596 static void efx_set_channel_names(struct efx_nic *efx)
597 {
598 	struct efx_channel *channel;
599 
600 	efx_for_each_channel(channel, efx)
601 		channel->type->get_name(channel,
602 					efx->msi_context[channel->channel].name,
603 					sizeof(efx->msi_context[0].name));
604 }
605 
606 static int efx_probe_channels(struct efx_nic *efx)
607 {
608 	struct efx_channel *channel;
609 	int rc;
610 
611 	/* Restart special buffer allocation */
612 	efx->next_buffer_table = 0;
613 
614 	/* Probe channels in reverse, so that any 'extra' channels
615 	 * use the start of the buffer table. This allows the traffic
616 	 * channels to be resized without moving them or wasting the
617 	 * entries before them.
618 	 */
619 	efx_for_each_channel_rev(channel, efx) {
620 		rc = efx_probe_channel(channel);
621 		if (rc) {
622 			netif_err(efx, probe, efx->net_dev,
623 				  "failed to create channel %d\n",
624 				  channel->channel);
625 			goto fail;
626 		}
627 	}
628 	efx_set_channel_names(efx);
629 
630 	return 0;
631 
632 fail:
633 	efx_remove_channels(efx);
634 	return rc;
635 }
636 
637 /* Channels are shutdown and reinitialised whilst the NIC is running
638  * to propagate configuration changes (mtu, checksum offload), or
639  * to clear hardware error conditions
640  */
641 static void efx_start_datapath(struct efx_nic *efx)
642 {
643 	netdev_features_t old_features = efx->net_dev->features;
644 	bool old_rx_scatter = efx->rx_scatter;
645 	struct efx_tx_queue *tx_queue;
646 	struct efx_rx_queue *rx_queue;
647 	struct efx_channel *channel;
648 	size_t rx_buf_len;
649 
650 	/* Calculate the rx buffer allocation parameters required to
651 	 * support the current MTU, including padding for header
652 	 * alignment and overruns.
653 	 */
654 	efx->rx_dma_len = (efx->rx_prefix_size +
655 			   EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
656 			   efx->type->rx_buffer_padding);
657 	rx_buf_len = (sizeof(struct efx_rx_page_state) +
658 		      efx->rx_ip_align + efx->rx_dma_len);
659 	if (rx_buf_len <= PAGE_SIZE) {
660 		efx->rx_scatter = efx->type->always_rx_scatter;
661 		efx->rx_buffer_order = 0;
662 	} else if (efx->type->can_rx_scatter) {
663 		BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
664 		BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
665 			     2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
666 				       EFX_RX_BUF_ALIGNMENT) >
667 			     PAGE_SIZE);
668 		efx->rx_scatter = true;
669 		efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
670 		efx->rx_buffer_order = 0;
671 	} else {
672 		efx->rx_scatter = false;
673 		efx->rx_buffer_order = get_order(rx_buf_len);
674 	}
675 
676 	efx_rx_config_page_split(efx);
677 	if (efx->rx_buffer_order)
678 		netif_dbg(efx, drv, efx->net_dev,
679 			  "RX buf len=%u; page order=%u batch=%u\n",
680 			  efx->rx_dma_len, efx->rx_buffer_order,
681 			  efx->rx_pages_per_batch);
682 	else
683 		netif_dbg(efx, drv, efx->net_dev,
684 			  "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
685 			  efx->rx_dma_len, efx->rx_page_buf_step,
686 			  efx->rx_bufs_per_page, efx->rx_pages_per_batch);
687 
688 	/* Restore previously fixed features in hw_features and remove
689 	 * features which are fixed now
690 	 */
691 	efx->net_dev->hw_features |= efx->net_dev->features;
692 	efx->net_dev->hw_features &= ~efx->fixed_features;
693 	efx->net_dev->features |= efx->fixed_features;
694 	if (efx->net_dev->features != old_features)
695 		netdev_features_change(efx->net_dev);
696 
697 	/* RX filters may also have scatter-enabled flags */
698 	if (efx->rx_scatter != old_rx_scatter)
699 		efx->type->filter_update_rx_scatter(efx);
700 
701 	/* We must keep at least one descriptor in a TX ring empty.
702 	 * We could avoid this when the queue size does not exactly
703 	 * match the hardware ring size, but it's not that important.
704 	 * Therefore we stop the queue when one more skb might fill
705 	 * the ring completely.  We wake it when half way back to
706 	 * empty.
707 	 */
708 	efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
709 	efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
710 
711 	/* Initialise the channels */
712 	efx_for_each_channel(channel, efx) {
713 		efx_for_each_channel_tx_queue(tx_queue, channel) {
714 			efx_init_tx_queue(tx_queue);
715 			atomic_inc(&efx->active_queues);
716 		}
717 
718 		efx_for_each_channel_rx_queue(rx_queue, channel) {
719 			efx_init_rx_queue(rx_queue);
720 			atomic_inc(&efx->active_queues);
721 			efx_stop_eventq(channel);
722 			efx_fast_push_rx_descriptors(rx_queue, false);
723 			efx_start_eventq(channel);
724 		}
725 
726 		WARN_ON(channel->rx_pkt_n_frags);
727 	}
728 
729 	efx_ptp_start_datapath(efx);
730 
731 	if (netif_device_present(efx->net_dev))
732 		netif_tx_wake_all_queues(efx->net_dev);
733 }
734 
735 static void efx_stop_datapath(struct efx_nic *efx)
736 {
737 	struct efx_channel *channel;
738 	struct efx_tx_queue *tx_queue;
739 	struct efx_rx_queue *rx_queue;
740 	int rc;
741 
742 	EFX_ASSERT_RESET_SERIALISED(efx);
743 	BUG_ON(efx->port_enabled);
744 
745 	efx_ptp_stop_datapath(efx);
746 
747 	/* Stop RX refill */
748 	efx_for_each_channel(channel, efx) {
749 		efx_for_each_channel_rx_queue(rx_queue, channel)
750 			rx_queue->refill_enabled = false;
751 	}
752 
753 	efx_for_each_channel(channel, efx) {
754 		/* RX packet processing is pipelined, so wait for the
755 		 * NAPI handler to complete.  At least event queue 0
756 		 * might be kept active by non-data events, so don't
757 		 * use napi_synchronize() but actually disable NAPI
758 		 * temporarily.
759 		 */
760 		if (efx_channel_has_rx_queue(channel)) {
761 			efx_stop_eventq(channel);
762 			efx_start_eventq(channel);
763 		}
764 	}
765 
766 	rc = efx->type->fini_dmaq(efx);
767 	if (rc) {
768 		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
769 	} else {
770 		netif_dbg(efx, drv, efx->net_dev,
771 			  "successfully flushed all queues\n");
772 	}
773 
774 	efx_for_each_channel(channel, efx) {
775 		efx_for_each_channel_rx_queue(rx_queue, channel)
776 			efx_fini_rx_queue(rx_queue);
777 		efx_for_each_possible_channel_tx_queue(tx_queue, channel)
778 			efx_fini_tx_queue(tx_queue);
779 	}
780 }
781 
782 static void efx_remove_channel(struct efx_channel *channel)
783 {
784 	struct efx_tx_queue *tx_queue;
785 	struct efx_rx_queue *rx_queue;
786 
787 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
788 		  "destroy chan %d\n", channel->channel);
789 
790 	efx_for_each_channel_rx_queue(rx_queue, channel)
791 		efx_remove_rx_queue(rx_queue);
792 	efx_for_each_possible_channel_tx_queue(tx_queue, channel)
793 		efx_remove_tx_queue(tx_queue);
794 	efx_remove_eventq(channel);
795 	channel->type->post_remove(channel);
796 }
797 
798 static void efx_remove_channels(struct efx_nic *efx)
799 {
800 	struct efx_channel *channel;
801 
802 	efx_for_each_channel(channel, efx)
803 		efx_remove_channel(channel);
804 }
805 
806 int
807 efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
808 {
809 	struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
810 	u32 old_rxq_entries, old_txq_entries;
811 	unsigned i, next_buffer_table = 0;
812 	int rc, rc2;
813 
814 	rc = efx_check_disabled(efx);
815 	if (rc)
816 		return rc;
817 
818 	/* Not all channels should be reallocated. We must avoid
819 	 * reallocating their buffer table entries.
820 	 */
821 	efx_for_each_channel(channel, efx) {
822 		struct efx_rx_queue *rx_queue;
823 		struct efx_tx_queue *tx_queue;
824 
825 		if (channel->type->copy)
826 			continue;
827 		next_buffer_table = max(next_buffer_table,
828 					channel->eventq.index +
829 					channel->eventq.entries);
830 		efx_for_each_channel_rx_queue(rx_queue, channel)
831 			next_buffer_table = max(next_buffer_table,
832 						rx_queue->rxd.index +
833 						rx_queue->rxd.entries);
834 		efx_for_each_channel_tx_queue(tx_queue, channel)
835 			next_buffer_table = max(next_buffer_table,
836 						tx_queue->txd.index +
837 						tx_queue->txd.entries);
838 	}
839 
840 	efx_device_detach_sync(efx);
841 	efx_stop_all(efx);
842 	efx_soft_disable_interrupts(efx);
843 
844 	/* Clone channels (where possible) */
845 	memset(other_channel, 0, sizeof(other_channel));
846 	for (i = 0; i < efx->n_channels; i++) {
847 		channel = efx->channel[i];
848 		if (channel->type->copy)
849 			channel = channel->type->copy(channel);
850 		if (!channel) {
851 			rc = -ENOMEM;
852 			goto out;
853 		}
854 		other_channel[i] = channel;
855 	}
856 
857 	/* Swap entry counts and channel pointers */
858 	old_rxq_entries = efx->rxq_entries;
859 	old_txq_entries = efx->txq_entries;
860 	efx->rxq_entries = rxq_entries;
861 	efx->txq_entries = txq_entries;
862 	for (i = 0; i < efx->n_channels; i++) {
863 		channel = efx->channel[i];
864 		efx->channel[i] = other_channel[i];
865 		other_channel[i] = channel;
866 	}
867 
868 	/* Restart buffer table allocation */
869 	efx->next_buffer_table = next_buffer_table;
870 
871 	for (i = 0; i < efx->n_channels; i++) {
872 		channel = efx->channel[i];
873 		if (!channel->type->copy)
874 			continue;
875 		rc = efx_probe_channel(channel);
876 		if (rc)
877 			goto rollback;
878 		efx_init_napi_channel(efx->channel[i]);
879 	}
880 
881 out:
882 	/* Destroy unused channel structures */
883 	for (i = 0; i < efx->n_channels; i++) {
884 		channel = other_channel[i];
885 		if (channel && channel->type->copy) {
886 			efx_fini_napi_channel(channel);
887 			efx_remove_channel(channel);
888 			kfree(channel);
889 		}
890 	}
891 
892 	rc2 = efx_soft_enable_interrupts(efx);
893 	if (rc2) {
894 		rc = rc ? rc : rc2;
895 		netif_err(efx, drv, efx->net_dev,
896 			  "unable to restart interrupts on channel reallocation\n");
897 		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
898 	} else {
899 		efx_start_all(efx);
900 		efx_device_attach_if_not_resetting(efx);
901 	}
902 	return rc;
903 
904 rollback:
905 	/* Swap back */
906 	efx->rxq_entries = old_rxq_entries;
907 	efx->txq_entries = old_txq_entries;
908 	for (i = 0; i < efx->n_channels; i++) {
909 		channel = efx->channel[i];
910 		efx->channel[i] = other_channel[i];
911 		other_channel[i] = channel;
912 	}
913 	goto out;
914 }
915 
916 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
917 {
918 	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
919 }
920 
921 static bool efx_default_channel_want_txqs(struct efx_channel *channel)
922 {
923 	return channel->channel - channel->efx->tx_channel_offset <
924 		channel->efx->n_tx_channels;
925 }
926 
927 static const struct efx_channel_type efx_default_channel_type = {
928 	.pre_probe		= efx_channel_dummy_op_int,
929 	.post_remove		= efx_channel_dummy_op_void,
930 	.get_name		= efx_get_channel_name,
931 	.copy			= efx_copy_channel,
932 	.want_txqs		= efx_default_channel_want_txqs,
933 	.keep_eventq		= false,
934 	.want_pio		= true,
935 };
936 
937 int efx_channel_dummy_op_int(struct efx_channel *channel)
938 {
939 	return 0;
940 }
941 
942 void efx_channel_dummy_op_void(struct efx_channel *channel)
943 {
944 }
945 
946 /**************************************************************************
947  *
948  * Port handling
949  *
950  **************************************************************************/
951 
952 /* This ensures that the kernel is kept informed (via
953  * netif_carrier_on/off) of the link status, and also maintains the
954  * link status's stop on the port's TX queue.
955  */
956 void efx_link_status_changed(struct efx_nic *efx)
957 {
958 	struct efx_link_state *link_state = &efx->link_state;
959 
960 	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
961 	 * that no events are triggered between unregister_netdev() and the
962 	 * driver unloading. A more general condition is that NETDEV_CHANGE
963 	 * can only be generated between NETDEV_UP and NETDEV_DOWN */
964 	if (!netif_running(efx->net_dev))
965 		return;
966 
967 	if (link_state->up != netif_carrier_ok(efx->net_dev)) {
968 		efx->n_link_state_changes++;
969 
970 		if (link_state->up)
971 			netif_carrier_on(efx->net_dev);
972 		else
973 			netif_carrier_off(efx->net_dev);
974 	}
975 
976 	/* Status message for kernel log */
977 	if (link_state->up)
978 		netif_info(efx, link, efx->net_dev,
979 			   "link up at %uMbps %s-duplex (MTU %d)\n",
980 			   link_state->speed, link_state->fd ? "full" : "half",
981 			   efx->net_dev->mtu);
982 	else
983 		netif_info(efx, link, efx->net_dev, "link down\n");
984 }
985 
986 void efx_link_set_advertising(struct efx_nic *efx,
987 			      const unsigned long *advertising)
988 {
989 	memcpy(efx->link_advertising, advertising,
990 	       sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
991 
992 	efx->link_advertising[0] |= ADVERTISED_Autoneg;
993 	if (advertising[0] & ADVERTISED_Pause)
994 		efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
995 	else
996 		efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
997 	if (advertising[0] & ADVERTISED_Asym_Pause)
998 		efx->wanted_fc ^= EFX_FC_TX;
999 }
1000 
1001 /* Equivalent to efx_link_set_advertising with all-zeroes, except does not
1002  * force the Autoneg bit on.
1003  */
1004 void efx_link_clear_advertising(struct efx_nic *efx)
1005 {
1006 	bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
1007 	efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
1008 }
1009 
1010 void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
1011 {
1012 	efx->wanted_fc = wanted_fc;
1013 	if (efx->link_advertising[0]) {
1014 		if (wanted_fc & EFX_FC_RX)
1015 			efx->link_advertising[0] |= (ADVERTISED_Pause |
1016 						     ADVERTISED_Asym_Pause);
1017 		else
1018 			efx->link_advertising[0] &= ~(ADVERTISED_Pause |
1019 						      ADVERTISED_Asym_Pause);
1020 		if (wanted_fc & EFX_FC_TX)
1021 			efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
1022 	}
1023 }
1024 
1025 static void efx_fini_port(struct efx_nic *efx);
1026 
1027 /* We assume that efx->type->reconfigure_mac will always try to sync RX
1028  * filters and therefore needs to read-lock the filter table against freeing
1029  */
1030 void efx_mac_reconfigure(struct efx_nic *efx)
1031 {
1032 	down_read(&efx->filter_sem);
1033 	efx->type->reconfigure_mac(efx);
1034 	up_read(&efx->filter_sem);
1035 }
1036 
1037 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
1038  * the MAC appropriately. All other PHY configuration changes are pushed
1039  * through phy_op->set_settings(), and pushed asynchronously to the MAC
1040  * through efx_monitor().
1041  *
1042  * Callers must hold the mac_lock
1043  */
1044 int __efx_reconfigure_port(struct efx_nic *efx)
1045 {
1046 	enum efx_phy_mode phy_mode;
1047 	int rc;
1048 
1049 	WARN_ON(!mutex_is_locked(&efx->mac_lock));
1050 
1051 	/* Disable PHY transmit in mac level loopbacks */
1052 	phy_mode = efx->phy_mode;
1053 	if (LOOPBACK_INTERNAL(efx))
1054 		efx->phy_mode |= PHY_MODE_TX_DISABLED;
1055 	else
1056 		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
1057 
1058 	rc = efx->type->reconfigure_port(efx);
1059 
1060 	if (rc)
1061 		efx->phy_mode = phy_mode;
1062 
1063 	return rc;
1064 }
1065 
1066 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
1067  * disabled. */
1068 int efx_reconfigure_port(struct efx_nic *efx)
1069 {
1070 	int rc;
1071 
1072 	EFX_ASSERT_RESET_SERIALISED(efx);
1073 
1074 	mutex_lock(&efx->mac_lock);
1075 	rc = __efx_reconfigure_port(efx);
1076 	mutex_unlock(&efx->mac_lock);
1077 
1078 	return rc;
1079 }
1080 
1081 /* Asynchronous work item for changing MAC promiscuity and multicast
1082  * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
1083  * MAC directly. */
1084 static void efx_mac_work(struct work_struct *data)
1085 {
1086 	struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
1087 
1088 	mutex_lock(&efx->mac_lock);
1089 	if (efx->port_enabled)
1090 		efx_mac_reconfigure(efx);
1091 	mutex_unlock(&efx->mac_lock);
1092 }
1093 
1094 static int efx_probe_port(struct efx_nic *efx)
1095 {
1096 	int rc;
1097 
1098 	netif_dbg(efx, probe, efx->net_dev, "create port\n");
1099 
1100 	if (phy_flash_cfg)
1101 		efx->phy_mode = PHY_MODE_SPECIAL;
1102 
1103 	/* Connect up MAC/PHY operations table */
1104 	rc = efx->type->probe_port(efx);
1105 	if (rc)
1106 		return rc;
1107 
1108 	/* Initialise MAC address to permanent address */
1109 	ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
1110 
1111 	return 0;
1112 }
1113 
1114 static int efx_init_port(struct efx_nic *efx)
1115 {
1116 	int rc;
1117 
1118 	netif_dbg(efx, drv, efx->net_dev, "init port\n");
1119 
1120 	mutex_lock(&efx->mac_lock);
1121 
1122 	rc = efx->phy_op->init(efx);
1123 	if (rc)
1124 		goto fail1;
1125 
1126 	efx->port_initialized = true;
1127 
1128 	/* Reconfigure the MAC before creating dma queues (required for
1129 	 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1130 	efx_mac_reconfigure(efx);
1131 
1132 	/* Ensure the PHY advertises the correct flow control settings */
1133 	rc = efx->phy_op->reconfigure(efx);
1134 	if (rc && rc != -EPERM)
1135 		goto fail2;
1136 
1137 	mutex_unlock(&efx->mac_lock);
1138 	return 0;
1139 
1140 fail2:
1141 	efx->phy_op->fini(efx);
1142 fail1:
1143 	mutex_unlock(&efx->mac_lock);
1144 	return rc;
1145 }
1146 
1147 static void efx_start_port(struct efx_nic *efx)
1148 {
1149 	netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1150 	BUG_ON(efx->port_enabled);
1151 
1152 	mutex_lock(&efx->mac_lock);
1153 	efx->port_enabled = true;
1154 
1155 	/* Ensure MAC ingress/egress is enabled */
1156 	efx_mac_reconfigure(efx);
1157 
1158 	mutex_unlock(&efx->mac_lock);
1159 }
1160 
1161 /* Cancel work for MAC reconfiguration, periodic hardware monitoring
1162  * and the async self-test, wait for them to finish and prevent them
1163  * being scheduled again.  This doesn't cover online resets, which
1164  * should only be cancelled when removing the device.
1165  */
1166 static void efx_stop_port(struct efx_nic *efx)
1167 {
1168 	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1169 
1170 	EFX_ASSERT_RESET_SERIALISED(efx);
1171 
1172 	mutex_lock(&efx->mac_lock);
1173 	efx->port_enabled = false;
1174 	mutex_unlock(&efx->mac_lock);
1175 
1176 	/* Serialise against efx_set_multicast_list() */
1177 	netif_addr_lock_bh(efx->net_dev);
1178 	netif_addr_unlock_bh(efx->net_dev);
1179 
1180 	cancel_delayed_work_sync(&efx->monitor_work);
1181 	efx_selftest_async_cancel(efx);
1182 	cancel_work_sync(&efx->mac_work);
1183 }
1184 
1185 static void efx_fini_port(struct efx_nic *efx)
1186 {
1187 	netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1188 
1189 	if (!efx->port_initialized)
1190 		return;
1191 
1192 	efx->phy_op->fini(efx);
1193 	efx->port_initialized = false;
1194 
1195 	efx->link_state.up = false;
1196 	efx_link_status_changed(efx);
1197 }
1198 
1199 static void efx_remove_port(struct efx_nic *efx)
1200 {
1201 	netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1202 
1203 	efx->type->remove_port(efx);
1204 }
1205 
1206 /**************************************************************************
1207  *
1208  * NIC handling
1209  *
1210  **************************************************************************/
1211 
1212 static LIST_HEAD(efx_primary_list);
1213 static LIST_HEAD(efx_unassociated_list);
1214 
1215 static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
1216 {
1217 	return left->type == right->type &&
1218 		left->vpd_sn && right->vpd_sn &&
1219 		!strcmp(left->vpd_sn, right->vpd_sn);
1220 }
1221 
1222 static void efx_associate(struct efx_nic *efx)
1223 {
1224 	struct efx_nic *other, *next;
1225 
1226 	if (efx->primary == efx) {
1227 		/* Adding primary function; look for secondaries */
1228 
1229 		netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1230 		list_add_tail(&efx->node, &efx_primary_list);
1231 
1232 		list_for_each_entry_safe(other, next, &efx_unassociated_list,
1233 					 node) {
1234 			if (efx_same_controller(efx, other)) {
1235 				list_del(&other->node);
1236 				netif_dbg(other, probe, other->net_dev,
1237 					  "moving to secondary list of %s %s\n",
1238 					  pci_name(efx->pci_dev),
1239 					  efx->net_dev->name);
1240 				list_add_tail(&other->node,
1241 					      &efx->secondary_list);
1242 				other->primary = efx;
1243 			}
1244 		}
1245 	} else {
1246 		/* Adding secondary function; look for primary */
1247 
1248 		list_for_each_entry(other, &efx_primary_list, node) {
1249 			if (efx_same_controller(efx, other)) {
1250 				netif_dbg(efx, probe, efx->net_dev,
1251 					  "adding to secondary list of %s %s\n",
1252 					  pci_name(other->pci_dev),
1253 					  other->net_dev->name);
1254 				list_add_tail(&efx->node,
1255 					      &other->secondary_list);
1256 				efx->primary = other;
1257 				return;
1258 			}
1259 		}
1260 
1261 		netif_dbg(efx, probe, efx->net_dev,
1262 			  "adding to unassociated list\n");
1263 		list_add_tail(&efx->node, &efx_unassociated_list);
1264 	}
1265 }
1266 
1267 static void efx_dissociate(struct efx_nic *efx)
1268 {
1269 	struct efx_nic *other, *next;
1270 
1271 	list_del(&efx->node);
1272 	efx->primary = NULL;
1273 
1274 	list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1275 		list_del(&other->node);
1276 		netif_dbg(other, probe, other->net_dev,
1277 			  "moving to unassociated list\n");
1278 		list_add_tail(&other->node, &efx_unassociated_list);
1279 		other->primary = NULL;
1280 	}
1281 }
1282 
1283 /* This configures the PCI device to enable I/O and DMA. */
1284 static int efx_init_io(struct efx_nic *efx)
1285 {
1286 	struct pci_dev *pci_dev = efx->pci_dev;
1287 	dma_addr_t dma_mask = efx->type->max_dma_mask;
1288 	unsigned int mem_map_size = efx->type->mem_map_size(efx);
1289 	int rc, bar;
1290 
1291 	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1292 
1293 	bar = efx->type->mem_bar(efx);
1294 
1295 	rc = pci_enable_device(pci_dev);
1296 	if (rc) {
1297 		netif_err(efx, probe, efx->net_dev,
1298 			  "failed to enable PCI device\n");
1299 		goto fail1;
1300 	}
1301 
1302 	pci_set_master(pci_dev);
1303 
1304 	/* Set the PCI DMA mask.  Try all possibilities from our genuine mask
1305 	 * down to 32 bits, because some architectures will allow 40 bit
1306 	 * masks event though they reject 46 bit masks.
1307 	 */
1308 	while (dma_mask > 0x7fffffffUL) {
1309 		rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1310 		if (rc == 0)
1311 			break;
1312 		dma_mask >>= 1;
1313 	}
1314 	if (rc) {
1315 		netif_err(efx, probe, efx->net_dev,
1316 			  "could not find a suitable DMA mask\n");
1317 		goto fail2;
1318 	}
1319 	netif_dbg(efx, probe, efx->net_dev,
1320 		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
1321 
1322 	efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1323 	rc = pci_request_region(pci_dev, bar, "sfc");
1324 	if (rc) {
1325 		netif_err(efx, probe, efx->net_dev,
1326 			  "request for memory BAR failed\n");
1327 		rc = -EIO;
1328 		goto fail3;
1329 	}
1330 	efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
1331 	if (!efx->membase) {
1332 		netif_err(efx, probe, efx->net_dev,
1333 			  "could not map memory BAR at %llx+%x\n",
1334 			  (unsigned long long)efx->membase_phys, mem_map_size);
1335 		rc = -ENOMEM;
1336 		goto fail4;
1337 	}
1338 	netif_dbg(efx, probe, efx->net_dev,
1339 		  "memory BAR at %llx+%x (virtual %p)\n",
1340 		  (unsigned long long)efx->membase_phys, mem_map_size,
1341 		  efx->membase);
1342 
1343 	return 0;
1344 
1345  fail4:
1346 	pci_release_region(efx->pci_dev, bar);
1347  fail3:
1348 	efx->membase_phys = 0;
1349  fail2:
1350 	pci_disable_device(efx->pci_dev);
1351  fail1:
1352 	return rc;
1353 }
1354 
1355 static void efx_fini_io(struct efx_nic *efx)
1356 {
1357 	int bar;
1358 
1359 	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1360 
1361 	if (efx->membase) {
1362 		iounmap(efx->membase);
1363 		efx->membase = NULL;
1364 	}
1365 
1366 	if (efx->membase_phys) {
1367 		bar = efx->type->mem_bar(efx);
1368 		pci_release_region(efx->pci_dev, bar);
1369 		efx->membase_phys = 0;
1370 	}
1371 
1372 	/* Don't disable bus-mastering if VFs are assigned */
1373 	if (!pci_vfs_assigned(efx->pci_dev))
1374 		pci_disable_device(efx->pci_dev);
1375 }
1376 
1377 void efx_set_default_rx_indir_table(struct efx_nic *efx,
1378 				    struct efx_rss_context *ctx)
1379 {
1380 	size_t i;
1381 
1382 	for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
1383 		ctx->rx_indir_table[i] =
1384 			ethtool_rxfh_indir_default(i, efx->rss_spread);
1385 }
1386 
1387 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1388 {
1389 	cpumask_var_t thread_mask;
1390 	unsigned int count;
1391 	int cpu;
1392 
1393 	if (rss_cpus) {
1394 		count = rss_cpus;
1395 	} else {
1396 		if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1397 			netif_warn(efx, probe, efx->net_dev,
1398 				   "RSS disabled due to allocation failure\n");
1399 			return 1;
1400 		}
1401 
1402 		count = 0;
1403 		for_each_online_cpu(cpu) {
1404 			if (!cpumask_test_cpu(cpu, thread_mask)) {
1405 				++count;
1406 				cpumask_or(thread_mask, thread_mask,
1407 					   topology_sibling_cpumask(cpu));
1408 			}
1409 		}
1410 
1411 		free_cpumask_var(thread_mask);
1412 	}
1413 
1414 	if (count > EFX_MAX_RX_QUEUES) {
1415 		netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1416 			       "Reducing number of rx queues from %u to %u.\n",
1417 			       count, EFX_MAX_RX_QUEUES);
1418 		count = EFX_MAX_RX_QUEUES;
1419 	}
1420 
1421 	/* If RSS is requested for the PF *and* VFs then we can't write RSS
1422 	 * table entries that are inaccessible to VFs
1423 	 */
1424 #ifdef CONFIG_SFC_SRIOV
1425 	if (efx->type->sriov_wanted) {
1426 		if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
1427 		    count > efx_vf_size(efx)) {
1428 			netif_warn(efx, probe, efx->net_dev,
1429 				   "Reducing number of RSS channels from %u to %u for "
1430 				   "VF support. Increase vf-msix-limit to use more "
1431 				   "channels on the PF.\n",
1432 				   count, efx_vf_size(efx));
1433 			count = efx_vf_size(efx);
1434 		}
1435 	}
1436 #endif
1437 
1438 	return count;
1439 }
1440 
1441 /* Probe the number and type of interrupts we are able to obtain, and
1442  * the resulting numbers of channels and RX queues.
1443  */
1444 static int efx_probe_interrupts(struct efx_nic *efx)
1445 {
1446 	unsigned int extra_channels = 0;
1447 	unsigned int i, j;
1448 	int rc;
1449 
1450 	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
1451 		if (efx->extra_channel_type[i])
1452 			++extra_channels;
1453 
1454 	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1455 		struct msix_entry xentries[EFX_MAX_CHANNELS];
1456 		unsigned int n_channels;
1457 
1458 		n_channels = efx_wanted_parallelism(efx);
1459 		if (efx_separate_tx_channels)
1460 			n_channels *= 2;
1461 		n_channels += extra_channels;
1462 		n_channels = min(n_channels, efx->max_channels);
1463 
1464 		for (i = 0; i < n_channels; i++)
1465 			xentries[i].entry = i;
1466 		rc = pci_enable_msix_range(efx->pci_dev,
1467 					   xentries, 1, n_channels);
1468 		if (rc < 0) {
1469 			/* Fall back to single channel MSI */
1470 			netif_err(efx, drv, efx->net_dev,
1471 				  "could not enable MSI-X\n");
1472 			if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
1473 				efx->interrupt_mode = EFX_INT_MODE_MSI;
1474 			else
1475 				return rc;
1476 		} else if (rc < n_channels) {
1477 			netif_err(efx, drv, efx->net_dev,
1478 				  "WARNING: Insufficient MSI-X vectors"
1479 				  " available (%d < %u).\n", rc, n_channels);
1480 			netif_err(efx, drv, efx->net_dev,
1481 				  "WARNING: Performance may be reduced.\n");
1482 			n_channels = rc;
1483 		}
1484 
1485 		if (rc > 0) {
1486 			efx->n_channels = n_channels;
1487 			if (n_channels > extra_channels)
1488 				n_channels -= extra_channels;
1489 			if (efx_separate_tx_channels) {
1490 				efx->n_tx_channels = min(max(n_channels / 2,
1491 							     1U),
1492 							 efx->max_tx_channels);
1493 				efx->n_rx_channels = max(n_channels -
1494 							 efx->n_tx_channels,
1495 							 1U);
1496 			} else {
1497 				efx->n_tx_channels = min(n_channels,
1498 							 efx->max_tx_channels);
1499 				efx->n_rx_channels = n_channels;
1500 			}
1501 			for (i = 0; i < efx->n_channels; i++)
1502 				efx_get_channel(efx, i)->irq =
1503 					xentries[i].vector;
1504 		}
1505 	}
1506 
1507 	/* Try single interrupt MSI */
1508 	if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1509 		efx->n_channels = 1;
1510 		efx->n_rx_channels = 1;
1511 		efx->n_tx_channels = 1;
1512 		rc = pci_enable_msi(efx->pci_dev);
1513 		if (rc == 0) {
1514 			efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1515 		} else {
1516 			netif_err(efx, drv, efx->net_dev,
1517 				  "could not enable MSI\n");
1518 			if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
1519 				efx->interrupt_mode = EFX_INT_MODE_LEGACY;
1520 			else
1521 				return rc;
1522 		}
1523 	}
1524 
1525 	/* Assume legacy interrupts */
1526 	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1527 		efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
1528 		efx->n_rx_channels = 1;
1529 		efx->n_tx_channels = 1;
1530 		efx->legacy_irq = efx->pci_dev->irq;
1531 	}
1532 
1533 	/* Assign extra channels if possible */
1534 	efx->n_extra_tx_channels = 0;
1535 	j = efx->n_channels;
1536 	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
1537 		if (!efx->extra_channel_type[i])
1538 			continue;
1539 		if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
1540 		    efx->n_channels <= extra_channels) {
1541 			efx->extra_channel_type[i]->handle_no_channel(efx);
1542 		} else {
1543 			--j;
1544 			efx_get_channel(efx, j)->type =
1545 				efx->extra_channel_type[i];
1546 			if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
1547 				efx->n_extra_tx_channels++;
1548 		}
1549 	}
1550 
1551 	/* RSS might be usable on VFs even if it is disabled on the PF */
1552 #ifdef CONFIG_SFC_SRIOV
1553 	if (efx->type->sriov_wanted) {
1554 		efx->rss_spread = ((efx->n_rx_channels > 1 ||
1555 				    !efx->type->sriov_wanted(efx)) ?
1556 				   efx->n_rx_channels : efx_vf_size(efx));
1557 		return 0;
1558 	}
1559 #endif
1560 	efx->rss_spread = efx->n_rx_channels;
1561 
1562 	return 0;
1563 }
1564 
1565 #if defined(CONFIG_SMP)
1566 static void efx_set_interrupt_affinity(struct efx_nic *efx)
1567 {
1568 	struct efx_channel *channel;
1569 	unsigned int cpu;
1570 
1571 	efx_for_each_channel(channel, efx) {
1572 		cpu = cpumask_local_spread(channel->channel,
1573 					   pcibus_to_node(efx->pci_dev->bus));
1574 		irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
1575 	}
1576 }
1577 
1578 static void efx_clear_interrupt_affinity(struct efx_nic *efx)
1579 {
1580 	struct efx_channel *channel;
1581 
1582 	efx_for_each_channel(channel, efx)
1583 		irq_set_affinity_hint(channel->irq, NULL);
1584 }
1585 #else
1586 static void
1587 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
1588 {
1589 }
1590 
1591 static void
1592 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
1593 {
1594 }
1595 #endif /* CONFIG_SMP */
1596 
1597 static int efx_soft_enable_interrupts(struct efx_nic *efx)
1598 {
1599 	struct efx_channel *channel, *end_channel;
1600 	int rc;
1601 
1602 	BUG_ON(efx->state == STATE_DISABLED);
1603 
1604 	efx->irq_soft_enabled = true;
1605 	smp_wmb();
1606 
1607 	efx_for_each_channel(channel, efx) {
1608 		if (!channel->type->keep_eventq) {
1609 			rc = efx_init_eventq(channel);
1610 			if (rc)
1611 				goto fail;
1612 		}
1613 		efx_start_eventq(channel);
1614 	}
1615 
1616 	efx_mcdi_mode_event(efx);
1617 
1618 	return 0;
1619 fail:
1620 	end_channel = channel;
1621 	efx_for_each_channel(channel, efx) {
1622 		if (channel == end_channel)
1623 			break;
1624 		efx_stop_eventq(channel);
1625 		if (!channel->type->keep_eventq)
1626 			efx_fini_eventq(channel);
1627 	}
1628 
1629 	return rc;
1630 }
1631 
1632 static void efx_soft_disable_interrupts(struct efx_nic *efx)
1633 {
1634 	struct efx_channel *channel;
1635 
1636 	if (efx->state == STATE_DISABLED)
1637 		return;
1638 
1639 	efx_mcdi_mode_poll(efx);
1640 
1641 	efx->irq_soft_enabled = false;
1642 	smp_wmb();
1643 
1644 	if (efx->legacy_irq)
1645 		synchronize_irq(efx->legacy_irq);
1646 
1647 	efx_for_each_channel(channel, efx) {
1648 		if (channel->irq)
1649 			synchronize_irq(channel->irq);
1650 
1651 		efx_stop_eventq(channel);
1652 		if (!channel->type->keep_eventq)
1653 			efx_fini_eventq(channel);
1654 	}
1655 
1656 	/* Flush the asynchronous MCDI request queue */
1657 	efx_mcdi_flush_async(efx);
1658 }
1659 
1660 static int efx_enable_interrupts(struct efx_nic *efx)
1661 {
1662 	struct efx_channel *channel, *end_channel;
1663 	int rc;
1664 
1665 	BUG_ON(efx->state == STATE_DISABLED);
1666 
1667 	if (efx->eeh_disabled_legacy_irq) {
1668 		enable_irq(efx->legacy_irq);
1669 		efx->eeh_disabled_legacy_irq = false;
1670 	}
1671 
1672 	efx->type->irq_enable_master(efx);
1673 
1674 	efx_for_each_channel(channel, efx) {
1675 		if (channel->type->keep_eventq) {
1676 			rc = efx_init_eventq(channel);
1677 			if (rc)
1678 				goto fail;
1679 		}
1680 	}
1681 
1682 	rc = efx_soft_enable_interrupts(efx);
1683 	if (rc)
1684 		goto fail;
1685 
1686 	return 0;
1687 
1688 fail:
1689 	end_channel = channel;
1690 	efx_for_each_channel(channel, efx) {
1691 		if (channel == end_channel)
1692 			break;
1693 		if (channel->type->keep_eventq)
1694 			efx_fini_eventq(channel);
1695 	}
1696 
1697 	efx->type->irq_disable_non_ev(efx);
1698 
1699 	return rc;
1700 }
1701 
1702 static void efx_disable_interrupts(struct efx_nic *efx)
1703 {
1704 	struct efx_channel *channel;
1705 
1706 	efx_soft_disable_interrupts(efx);
1707 
1708 	efx_for_each_channel(channel, efx) {
1709 		if (channel->type->keep_eventq)
1710 			efx_fini_eventq(channel);
1711 	}
1712 
1713 	efx->type->irq_disable_non_ev(efx);
1714 }
1715 
1716 static void efx_remove_interrupts(struct efx_nic *efx)
1717 {
1718 	struct efx_channel *channel;
1719 
1720 	/* Remove MSI/MSI-X interrupts */
1721 	efx_for_each_channel(channel, efx)
1722 		channel->irq = 0;
1723 	pci_disable_msi(efx->pci_dev);
1724 	pci_disable_msix(efx->pci_dev);
1725 
1726 	/* Remove legacy interrupt */
1727 	efx->legacy_irq = 0;
1728 }
1729 
1730 static void efx_set_channels(struct efx_nic *efx)
1731 {
1732 	struct efx_channel *channel;
1733 	struct efx_tx_queue *tx_queue;
1734 
1735 	efx->tx_channel_offset =
1736 		efx_separate_tx_channels ?
1737 		efx->n_channels - efx->n_tx_channels : 0;
1738 
1739 	/* We need to mark which channels really have RX and TX
1740 	 * queues, and adjust the TX queue numbers if we have separate
1741 	 * RX-only and TX-only channels.
1742 	 */
1743 	efx_for_each_channel(channel, efx) {
1744 		if (channel->channel < efx->n_rx_channels)
1745 			channel->rx_queue.core_index = channel->channel;
1746 		else
1747 			channel->rx_queue.core_index = -1;
1748 
1749 		efx_for_each_channel_tx_queue(tx_queue, channel)
1750 			tx_queue->queue -= (efx->tx_channel_offset *
1751 					    EFX_TXQ_TYPES);
1752 	}
1753 }
1754 
1755 static int efx_probe_nic(struct efx_nic *efx)
1756 {
1757 	int rc;
1758 
1759 	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1760 
1761 	/* Carry out hardware-type specific initialisation */
1762 	rc = efx->type->probe(efx);
1763 	if (rc)
1764 		return rc;
1765 
1766 	do {
1767 		if (!efx->max_channels || !efx->max_tx_channels) {
1768 			netif_err(efx, drv, efx->net_dev,
1769 				  "Insufficient resources to allocate"
1770 				  " any channels\n");
1771 			rc = -ENOSPC;
1772 			goto fail1;
1773 		}
1774 
1775 		/* Determine the number of channels and queues by trying
1776 		 * to hook in MSI-X interrupts.
1777 		 */
1778 		rc = efx_probe_interrupts(efx);
1779 		if (rc)
1780 			goto fail1;
1781 
1782 		efx_set_channels(efx);
1783 
1784 		/* dimension_resources can fail with EAGAIN */
1785 		rc = efx->type->dimension_resources(efx);
1786 		if (rc != 0 && rc != -EAGAIN)
1787 			goto fail2;
1788 
1789 		if (rc == -EAGAIN)
1790 			/* try again with new max_channels */
1791 			efx_remove_interrupts(efx);
1792 
1793 	} while (rc == -EAGAIN);
1794 
1795 	if (efx->n_channels > 1)
1796 		netdev_rss_key_fill(efx->rss_context.rx_hash_key,
1797 				    sizeof(efx->rss_context.rx_hash_key));
1798 	efx_set_default_rx_indir_table(efx, &efx->rss_context);
1799 
1800 	netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1801 	netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1802 
1803 	/* Initialise the interrupt moderation settings */
1804 	efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
1805 	efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1806 				true);
1807 
1808 	return 0;
1809 
1810 fail2:
1811 	efx_remove_interrupts(efx);
1812 fail1:
1813 	efx->type->remove(efx);
1814 	return rc;
1815 }
1816 
1817 static void efx_remove_nic(struct efx_nic *efx)
1818 {
1819 	netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1820 
1821 	efx_remove_interrupts(efx);
1822 	efx->type->remove(efx);
1823 }
1824 
1825 static int efx_probe_filters(struct efx_nic *efx)
1826 {
1827 	int rc;
1828 
1829 	init_rwsem(&efx->filter_sem);
1830 	mutex_lock(&efx->mac_lock);
1831 	down_write(&efx->filter_sem);
1832 	rc = efx->type->filter_table_probe(efx);
1833 	if (rc)
1834 		goto out_unlock;
1835 
1836 #ifdef CONFIG_RFS_ACCEL
1837 	if (efx->type->offload_features & NETIF_F_NTUPLE) {
1838 		struct efx_channel *channel;
1839 		int i, success = 1;
1840 
1841 		efx_for_each_channel(channel, efx) {
1842 			channel->rps_flow_id =
1843 				kcalloc(efx->type->max_rx_ip_filters,
1844 					sizeof(*channel->rps_flow_id),
1845 					GFP_KERNEL);
1846 			if (!channel->rps_flow_id)
1847 				success = 0;
1848 			else
1849 				for (i = 0;
1850 				     i < efx->type->max_rx_ip_filters;
1851 				     ++i)
1852 					channel->rps_flow_id[i] =
1853 						RPS_FLOW_ID_INVALID;
1854 		}
1855 
1856 		if (!success) {
1857 			efx_for_each_channel(channel, efx)
1858 				kfree(channel->rps_flow_id);
1859 			efx->type->filter_table_remove(efx);
1860 			rc = -ENOMEM;
1861 			goto out_unlock;
1862 		}
1863 
1864 		efx->rps_expire_index = efx->rps_expire_channel = 0;
1865 	}
1866 #endif
1867 out_unlock:
1868 	up_write(&efx->filter_sem);
1869 	mutex_unlock(&efx->mac_lock);
1870 	return rc;
1871 }
1872 
1873 static void efx_remove_filters(struct efx_nic *efx)
1874 {
1875 #ifdef CONFIG_RFS_ACCEL
1876 	struct efx_channel *channel;
1877 
1878 	efx_for_each_channel(channel, efx)
1879 		kfree(channel->rps_flow_id);
1880 #endif
1881 	down_write(&efx->filter_sem);
1882 	efx->type->filter_table_remove(efx);
1883 	up_write(&efx->filter_sem);
1884 }
1885 
1886 
1887 /**************************************************************************
1888  *
1889  * NIC startup/shutdown
1890  *
1891  *************************************************************************/
1892 
1893 static int efx_probe_all(struct efx_nic *efx)
1894 {
1895 	int rc;
1896 
1897 	rc = efx_probe_nic(efx);
1898 	if (rc) {
1899 		netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1900 		goto fail1;
1901 	}
1902 
1903 	rc = efx_probe_port(efx);
1904 	if (rc) {
1905 		netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1906 		goto fail2;
1907 	}
1908 
1909 	BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
1910 	if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
1911 		rc = -EINVAL;
1912 		goto fail3;
1913 	}
1914 	efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1915 
1916 #ifdef CONFIG_SFC_SRIOV
1917 	rc = efx->type->vswitching_probe(efx);
1918 	if (rc) /* not fatal; the PF will still work fine */
1919 		netif_warn(efx, probe, efx->net_dev,
1920 			   "failed to setup vswitching rc=%d;"
1921 			   " VFs may not function\n", rc);
1922 #endif
1923 
1924 	rc = efx_probe_filters(efx);
1925 	if (rc) {
1926 		netif_err(efx, probe, efx->net_dev,
1927 			  "failed to create filter tables\n");
1928 		goto fail4;
1929 	}
1930 
1931 	rc = efx_probe_channels(efx);
1932 	if (rc)
1933 		goto fail5;
1934 
1935 	return 0;
1936 
1937  fail5:
1938 	efx_remove_filters(efx);
1939  fail4:
1940 #ifdef CONFIG_SFC_SRIOV
1941 	efx->type->vswitching_remove(efx);
1942 #endif
1943  fail3:
1944 	efx_remove_port(efx);
1945  fail2:
1946 	efx_remove_nic(efx);
1947  fail1:
1948 	return rc;
1949 }
1950 
1951 /* If the interface is supposed to be running but is not, start
1952  * the hardware and software data path, regular activity for the port
1953  * (MAC statistics, link polling, etc.) and schedule the port to be
1954  * reconfigured.  Interrupts must already be enabled.  This function
1955  * is safe to call multiple times, so long as the NIC is not disabled.
1956  * Requires the RTNL lock.
1957  */
1958 static void efx_start_all(struct efx_nic *efx)
1959 {
1960 	EFX_ASSERT_RESET_SERIALISED(efx);
1961 	BUG_ON(efx->state == STATE_DISABLED);
1962 
1963 	/* Check that it is appropriate to restart the interface. All
1964 	 * of these flags are safe to read under just the rtnl lock */
1965 	if (efx->port_enabled || !netif_running(efx->net_dev) ||
1966 	    efx->reset_pending)
1967 		return;
1968 
1969 	efx_start_port(efx);
1970 	efx_start_datapath(efx);
1971 
1972 	/* Start the hardware monitor if there is one */
1973 	if (efx->type->monitor != NULL)
1974 		queue_delayed_work(efx->workqueue, &efx->monitor_work,
1975 				   efx_monitor_interval);
1976 
1977 	/* Link state detection is normally event-driven; we have
1978 	 * to poll now because we could have missed a change
1979 	 */
1980 	mutex_lock(&efx->mac_lock);
1981 	if (efx->phy_op->poll(efx))
1982 		efx_link_status_changed(efx);
1983 	mutex_unlock(&efx->mac_lock);
1984 
1985 	efx->type->start_stats(efx);
1986 	efx->type->pull_stats(efx);
1987 	spin_lock_bh(&efx->stats_lock);
1988 	efx->type->update_stats(efx, NULL, NULL);
1989 	spin_unlock_bh(&efx->stats_lock);
1990 }
1991 
1992 /* Quiesce the hardware and software data path, and regular activity
1993  * for the port without bringing the link down.  Safe to call multiple
1994  * times with the NIC in almost any state, but interrupts should be
1995  * enabled.  Requires the RTNL lock.
1996  */
1997 static void efx_stop_all(struct efx_nic *efx)
1998 {
1999 	EFX_ASSERT_RESET_SERIALISED(efx);
2000 
2001 	/* port_enabled can be read safely under the rtnl lock */
2002 	if (!efx->port_enabled)
2003 		return;
2004 
2005 	/* update stats before we go down so we can accurately count
2006 	 * rx_nodesc_drops
2007 	 */
2008 	efx->type->pull_stats(efx);
2009 	spin_lock_bh(&efx->stats_lock);
2010 	efx->type->update_stats(efx, NULL, NULL);
2011 	spin_unlock_bh(&efx->stats_lock);
2012 	efx->type->stop_stats(efx);
2013 	efx_stop_port(efx);
2014 
2015 	/* Stop the kernel transmit interface.  This is only valid if
2016 	 * the device is stopped or detached; otherwise the watchdog
2017 	 * may fire immediately.
2018 	 */
2019 	WARN_ON(netif_running(efx->net_dev) &&
2020 		netif_device_present(efx->net_dev));
2021 	netif_tx_disable(efx->net_dev);
2022 
2023 	efx_stop_datapath(efx);
2024 }
2025 
2026 static void efx_remove_all(struct efx_nic *efx)
2027 {
2028 	efx_remove_channels(efx);
2029 	efx_remove_filters(efx);
2030 #ifdef CONFIG_SFC_SRIOV
2031 	efx->type->vswitching_remove(efx);
2032 #endif
2033 	efx_remove_port(efx);
2034 	efx_remove_nic(efx);
2035 }
2036 
2037 /**************************************************************************
2038  *
2039  * Interrupt moderation
2040  *
2041  **************************************************************************/
2042 unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
2043 {
2044 	if (usecs == 0)
2045 		return 0;
2046 	if (usecs * 1000 < efx->timer_quantum_ns)
2047 		return 1; /* never round down to 0 */
2048 	return usecs * 1000 / efx->timer_quantum_ns;
2049 }
2050 
2051 unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
2052 {
2053 	/* We must round up when converting ticks to microseconds
2054 	 * because we round down when converting the other way.
2055 	 */
2056 	return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
2057 }
2058 
2059 /* Set interrupt moderation parameters */
2060 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
2061 			    unsigned int rx_usecs, bool rx_adaptive,
2062 			    bool rx_may_override_tx)
2063 {
2064 	struct efx_channel *channel;
2065 	unsigned int timer_max_us;
2066 
2067 	EFX_ASSERT_RESET_SERIALISED(efx);
2068 
2069 	timer_max_us = efx->timer_max_ns / 1000;
2070 
2071 	if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
2072 		return -EINVAL;
2073 
2074 	if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
2075 	    !rx_may_override_tx) {
2076 		netif_err(efx, drv, efx->net_dev, "Channels are shared. "
2077 			  "RX and TX IRQ moderation must be equal\n");
2078 		return -EINVAL;
2079 	}
2080 
2081 	efx->irq_rx_adaptive = rx_adaptive;
2082 	efx->irq_rx_moderation_us = rx_usecs;
2083 	efx_for_each_channel(channel, efx) {
2084 		if (efx_channel_has_rx_queue(channel))
2085 			channel->irq_moderation_us = rx_usecs;
2086 		else if (efx_channel_has_tx_queues(channel))
2087 			channel->irq_moderation_us = tx_usecs;
2088 	}
2089 
2090 	return 0;
2091 }
2092 
2093 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
2094 			    unsigned int *rx_usecs, bool *rx_adaptive)
2095 {
2096 	*rx_adaptive = efx->irq_rx_adaptive;
2097 	*rx_usecs = efx->irq_rx_moderation_us;
2098 
2099 	/* If channels are shared between RX and TX, so is IRQ
2100 	 * moderation.  Otherwise, IRQ moderation is the same for all
2101 	 * TX channels and is not adaptive.
2102 	 */
2103 	if (efx->tx_channel_offset == 0) {
2104 		*tx_usecs = *rx_usecs;
2105 	} else {
2106 		struct efx_channel *tx_channel;
2107 
2108 		tx_channel = efx->channel[efx->tx_channel_offset];
2109 		*tx_usecs = tx_channel->irq_moderation_us;
2110 	}
2111 }
2112 
2113 /**************************************************************************
2114  *
2115  * Hardware monitor
2116  *
2117  **************************************************************************/
2118 
2119 /* Run periodically off the general workqueue */
2120 static void efx_monitor(struct work_struct *data)
2121 {
2122 	struct efx_nic *efx = container_of(data, struct efx_nic,
2123 					   monitor_work.work);
2124 
2125 	netif_vdbg(efx, timer, efx->net_dev,
2126 		   "hardware monitor executing on CPU %d\n",
2127 		   raw_smp_processor_id());
2128 	BUG_ON(efx->type->monitor == NULL);
2129 
2130 	/* If the mac_lock is already held then it is likely a port
2131 	 * reconfiguration is already in place, which will likely do
2132 	 * most of the work of monitor() anyway. */
2133 	if (mutex_trylock(&efx->mac_lock)) {
2134 		if (efx->port_enabled)
2135 			efx->type->monitor(efx);
2136 		mutex_unlock(&efx->mac_lock);
2137 	}
2138 
2139 	queue_delayed_work(efx->workqueue, &efx->monitor_work,
2140 			   efx_monitor_interval);
2141 }
2142 
2143 /**************************************************************************
2144  *
2145  * ioctls
2146  *
2147  *************************************************************************/
2148 
2149 /* Net device ioctl
2150  * Context: process, rtnl_lock() held.
2151  */
2152 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
2153 {
2154 	struct efx_nic *efx = netdev_priv(net_dev);
2155 	struct mii_ioctl_data *data = if_mii(ifr);
2156 
2157 	if (cmd == SIOCSHWTSTAMP)
2158 		return efx_ptp_set_ts_config(efx, ifr);
2159 	if (cmd == SIOCGHWTSTAMP)
2160 		return efx_ptp_get_ts_config(efx, ifr);
2161 
2162 	/* Convert phy_id from older PRTAD/DEVAD format */
2163 	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
2164 	    (data->phy_id & 0xfc00) == 0x0400)
2165 		data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
2166 
2167 	return mdio_mii_ioctl(&efx->mdio, data, cmd);
2168 }
2169 
2170 /**************************************************************************
2171  *
2172  * NAPI interface
2173  *
2174  **************************************************************************/
2175 
2176 static void efx_init_napi_channel(struct efx_channel *channel)
2177 {
2178 	struct efx_nic *efx = channel->efx;
2179 
2180 	channel->napi_dev = efx->net_dev;
2181 	netif_napi_add(channel->napi_dev, &channel->napi_str,
2182 		       efx_poll, napi_weight);
2183 }
2184 
2185 static void efx_init_napi(struct efx_nic *efx)
2186 {
2187 	struct efx_channel *channel;
2188 
2189 	efx_for_each_channel(channel, efx)
2190 		efx_init_napi_channel(channel);
2191 }
2192 
2193 static void efx_fini_napi_channel(struct efx_channel *channel)
2194 {
2195 	if (channel->napi_dev)
2196 		netif_napi_del(&channel->napi_str);
2197 
2198 	channel->napi_dev = NULL;
2199 }
2200 
2201 static void efx_fini_napi(struct efx_nic *efx)
2202 {
2203 	struct efx_channel *channel;
2204 
2205 	efx_for_each_channel(channel, efx)
2206 		efx_fini_napi_channel(channel);
2207 }
2208 
2209 /**************************************************************************
2210  *
2211  * Kernel netpoll interface
2212  *
2213  *************************************************************************/
2214 
2215 #ifdef CONFIG_NET_POLL_CONTROLLER
2216 
2217 /* Although in the common case interrupts will be disabled, this is not
2218  * guaranteed. However, all our work happens inside the NAPI callback,
2219  * so no locking is required.
2220  */
2221 static void efx_netpoll(struct net_device *net_dev)
2222 {
2223 	struct efx_nic *efx = netdev_priv(net_dev);
2224 	struct efx_channel *channel;
2225 
2226 	efx_for_each_channel(channel, efx)
2227 		efx_schedule_channel(channel);
2228 }
2229 
2230 #endif
2231 
2232 /**************************************************************************
2233  *
2234  * Kernel net device interface
2235  *
2236  *************************************************************************/
2237 
2238 /* Context: process, rtnl_lock() held. */
2239 int efx_net_open(struct net_device *net_dev)
2240 {
2241 	struct efx_nic *efx = netdev_priv(net_dev);
2242 	int rc;
2243 
2244 	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2245 		  raw_smp_processor_id());
2246 
2247 	rc = efx_check_disabled(efx);
2248 	if (rc)
2249 		return rc;
2250 	if (efx->phy_mode & PHY_MODE_SPECIAL)
2251 		return -EBUSY;
2252 	if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
2253 		return -EIO;
2254 
2255 	/* Notify the kernel of the link state polled during driver load,
2256 	 * before the monitor starts running */
2257 	efx_link_status_changed(efx);
2258 
2259 	efx_start_all(efx);
2260 	if (efx->state == STATE_DISABLED || efx->reset_pending)
2261 		netif_device_detach(efx->net_dev);
2262 	efx_selftest_async_start(efx);
2263 	return 0;
2264 }
2265 
2266 /* Context: process, rtnl_lock() held.
2267  * Note that the kernel will ignore our return code; this method
2268  * should really be a void.
2269  */
2270 int efx_net_stop(struct net_device *net_dev)
2271 {
2272 	struct efx_nic *efx = netdev_priv(net_dev);
2273 
2274 	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2275 		  raw_smp_processor_id());
2276 
2277 	/* Stop the device and flush all the channels */
2278 	efx_stop_all(efx);
2279 
2280 	return 0;
2281 }
2282 
2283 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
2284 static void efx_net_stats(struct net_device *net_dev,
2285 			  struct rtnl_link_stats64 *stats)
2286 {
2287 	struct efx_nic *efx = netdev_priv(net_dev);
2288 
2289 	spin_lock_bh(&efx->stats_lock);
2290 	efx->type->update_stats(efx, NULL, stats);
2291 	spin_unlock_bh(&efx->stats_lock);
2292 }
2293 
2294 /* Context: netif_tx_lock held, BHs disabled. */
2295 static void efx_watchdog(struct net_device *net_dev)
2296 {
2297 	struct efx_nic *efx = netdev_priv(net_dev);
2298 
2299 	netif_err(efx, tx_err, efx->net_dev,
2300 		  "TX stuck with port_enabled=%d: resetting channels\n",
2301 		  efx->port_enabled);
2302 
2303 	efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2304 }
2305 
2306 
2307 /* Context: process, rtnl_lock() held. */
2308 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
2309 {
2310 	struct efx_nic *efx = netdev_priv(net_dev);
2311 	int rc;
2312 
2313 	rc = efx_check_disabled(efx);
2314 	if (rc)
2315 		return rc;
2316 
2317 	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2318 
2319 	efx_device_detach_sync(efx);
2320 	efx_stop_all(efx);
2321 
2322 	mutex_lock(&efx->mac_lock);
2323 	net_dev->mtu = new_mtu;
2324 	efx_mac_reconfigure(efx);
2325 	mutex_unlock(&efx->mac_lock);
2326 
2327 	efx_start_all(efx);
2328 	efx_device_attach_if_not_resetting(efx);
2329 	return 0;
2330 }
2331 
2332 static int efx_set_mac_address(struct net_device *net_dev, void *data)
2333 {
2334 	struct efx_nic *efx = netdev_priv(net_dev);
2335 	struct sockaddr *addr = data;
2336 	u8 *new_addr = addr->sa_data;
2337 	u8 old_addr[6];
2338 	int rc;
2339 
2340 	if (!is_valid_ether_addr(new_addr)) {
2341 		netif_err(efx, drv, efx->net_dev,
2342 			  "invalid ethernet MAC address requested: %pM\n",
2343 			  new_addr);
2344 		return -EADDRNOTAVAIL;
2345 	}
2346 
2347 	/* save old address */
2348 	ether_addr_copy(old_addr, net_dev->dev_addr);
2349 	ether_addr_copy(net_dev->dev_addr, new_addr);
2350 	if (efx->type->set_mac_address) {
2351 		rc = efx->type->set_mac_address(efx);
2352 		if (rc) {
2353 			ether_addr_copy(net_dev->dev_addr, old_addr);
2354 			return rc;
2355 		}
2356 	}
2357 
2358 	/* Reconfigure the MAC */
2359 	mutex_lock(&efx->mac_lock);
2360 	efx_mac_reconfigure(efx);
2361 	mutex_unlock(&efx->mac_lock);
2362 
2363 	return 0;
2364 }
2365 
2366 /* Context: netif_addr_lock held, BHs disabled. */
2367 static void efx_set_rx_mode(struct net_device *net_dev)
2368 {
2369 	struct efx_nic *efx = netdev_priv(net_dev);
2370 
2371 	if (efx->port_enabled)
2372 		queue_work(efx->workqueue, &efx->mac_work);
2373 	/* Otherwise efx_start_port() will do this */
2374 }
2375 
2376 static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2377 {
2378 	struct efx_nic *efx = netdev_priv(net_dev);
2379 	int rc;
2380 
2381 	/* If disabling RX n-tuple filtering, clear existing filters */
2382 	if (net_dev->features & ~data & NETIF_F_NTUPLE) {
2383 		rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
2384 		if (rc)
2385 			return rc;
2386 	}
2387 
2388 	/* If Rx VLAN filter is changed, update filters via mac_reconfigure.
2389 	 * If rx-fcs is changed, mac_reconfigure updates that too.
2390 	 */
2391 	if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
2392 					  NETIF_F_RXFCS)) {
2393 		/* efx_set_rx_mode() will schedule MAC work to update filters
2394 		 * when a new features are finally set in net_dev.
2395 		 */
2396 		efx_set_rx_mode(net_dev);
2397 	}
2398 
2399 	return 0;
2400 }
2401 
2402 static int efx_get_phys_port_id(struct net_device *net_dev,
2403 				struct netdev_phys_item_id *ppid)
2404 {
2405 	struct efx_nic *efx = netdev_priv(net_dev);
2406 
2407 	if (efx->type->get_phys_port_id)
2408 		return efx->type->get_phys_port_id(efx, ppid);
2409 	else
2410 		return -EOPNOTSUPP;
2411 }
2412 
2413 static int efx_get_phys_port_name(struct net_device *net_dev,
2414 				  char *name, size_t len)
2415 {
2416 	struct efx_nic *efx = netdev_priv(net_dev);
2417 
2418 	if (snprintf(name, len, "p%u", efx->port_num) >= len)
2419 		return -EINVAL;
2420 	return 0;
2421 }
2422 
2423 static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
2424 {
2425 	struct efx_nic *efx = netdev_priv(net_dev);
2426 
2427 	if (efx->type->vlan_rx_add_vid)
2428 		return efx->type->vlan_rx_add_vid(efx, proto, vid);
2429 	else
2430 		return -EOPNOTSUPP;
2431 }
2432 
2433 static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
2434 {
2435 	struct efx_nic *efx = netdev_priv(net_dev);
2436 
2437 	if (efx->type->vlan_rx_kill_vid)
2438 		return efx->type->vlan_rx_kill_vid(efx, proto, vid);
2439 	else
2440 		return -EOPNOTSUPP;
2441 }
2442 
2443 static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in)
2444 {
2445 	switch (in) {
2446 	case UDP_TUNNEL_TYPE_VXLAN:
2447 		return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
2448 	case UDP_TUNNEL_TYPE_GENEVE:
2449 		return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
2450 	default:
2451 		return -1;
2452 	}
2453 }
2454 
2455 static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
2456 {
2457 	struct efx_nic *efx = netdev_priv(dev);
2458 	struct efx_udp_tunnel tnl;
2459 	int efx_tunnel_type;
2460 
2461 	efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
2462 	if (efx_tunnel_type < 0)
2463 		return;
2464 
2465 	tnl.type = (u16)efx_tunnel_type;
2466 	tnl.port = ti->port;
2467 
2468 	if (efx->type->udp_tnl_add_port)
2469 		(void)efx->type->udp_tnl_add_port(efx, tnl);
2470 }
2471 
2472 static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
2473 {
2474 	struct efx_nic *efx = netdev_priv(dev);
2475 	struct efx_udp_tunnel tnl;
2476 	int efx_tunnel_type;
2477 
2478 	efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
2479 	if (efx_tunnel_type < 0)
2480 		return;
2481 
2482 	tnl.type = (u16)efx_tunnel_type;
2483 	tnl.port = ti->port;
2484 
2485 	if (efx->type->udp_tnl_del_port)
2486 		(void)efx->type->udp_tnl_del_port(efx, tnl);
2487 }
2488 
2489 static const struct net_device_ops efx_netdev_ops = {
2490 	.ndo_open		= efx_net_open,
2491 	.ndo_stop		= efx_net_stop,
2492 	.ndo_get_stats64	= efx_net_stats,
2493 	.ndo_tx_timeout		= efx_watchdog,
2494 	.ndo_start_xmit		= efx_hard_start_xmit,
2495 	.ndo_validate_addr	= eth_validate_addr,
2496 	.ndo_do_ioctl		= efx_ioctl,
2497 	.ndo_change_mtu		= efx_change_mtu,
2498 	.ndo_set_mac_address	= efx_set_mac_address,
2499 	.ndo_set_rx_mode	= efx_set_rx_mode,
2500 	.ndo_set_features	= efx_set_features,
2501 	.ndo_vlan_rx_add_vid	= efx_vlan_rx_add_vid,
2502 	.ndo_vlan_rx_kill_vid	= efx_vlan_rx_kill_vid,
2503 #ifdef CONFIG_SFC_SRIOV
2504 	.ndo_set_vf_mac		= efx_sriov_set_vf_mac,
2505 	.ndo_set_vf_vlan	= efx_sriov_set_vf_vlan,
2506 	.ndo_set_vf_spoofchk	= efx_sriov_set_vf_spoofchk,
2507 	.ndo_get_vf_config	= efx_sriov_get_vf_config,
2508 	.ndo_set_vf_link_state  = efx_sriov_set_vf_link_state,
2509 #endif
2510 	.ndo_get_phys_port_id   = efx_get_phys_port_id,
2511 	.ndo_get_phys_port_name	= efx_get_phys_port_name,
2512 #ifdef CONFIG_NET_POLL_CONTROLLER
2513 	.ndo_poll_controller = efx_netpoll,
2514 #endif
2515 	.ndo_setup_tc		= efx_setup_tc,
2516 #ifdef CONFIG_RFS_ACCEL
2517 	.ndo_rx_flow_steer	= efx_filter_rfs,
2518 #endif
2519 	.ndo_udp_tunnel_add	= efx_udp_tunnel_add,
2520 	.ndo_udp_tunnel_del	= efx_udp_tunnel_del,
2521 };
2522 
2523 static void efx_update_name(struct efx_nic *efx)
2524 {
2525 	strcpy(efx->name, efx->net_dev->name);
2526 	efx_mtd_rename(efx);
2527 	efx_set_channel_names(efx);
2528 }
2529 
2530 static int efx_netdev_event(struct notifier_block *this,
2531 			    unsigned long event, void *ptr)
2532 {
2533 	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2534 
2535 	if ((net_dev->netdev_ops == &efx_netdev_ops) &&
2536 	    event == NETDEV_CHANGENAME)
2537 		efx_update_name(netdev_priv(net_dev));
2538 
2539 	return NOTIFY_DONE;
2540 }
2541 
2542 static struct notifier_block efx_netdev_notifier = {
2543 	.notifier_call = efx_netdev_event,
2544 };
2545 
2546 static ssize_t
2547 show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2548 {
2549 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2550 	return sprintf(buf, "%d\n", efx->phy_type);
2551 }
2552 static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2553 
2554 #ifdef CONFIG_SFC_MCDI_LOGGING
2555 static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
2556 			     char *buf)
2557 {
2558 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2559 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2560 
2561 	return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
2562 }
2563 static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
2564 			    const char *buf, size_t count)
2565 {
2566 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2567 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2568 	bool enable = count > 0 && *buf != '0';
2569 
2570 	mcdi->logging_enabled = enable;
2571 	return count;
2572 }
2573 static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
2574 #endif
2575 
2576 static int efx_register_netdev(struct efx_nic *efx)
2577 {
2578 	struct net_device *net_dev = efx->net_dev;
2579 	struct efx_channel *channel;
2580 	int rc;
2581 
2582 	net_dev->watchdog_timeo = 5 * HZ;
2583 	net_dev->irq = efx->pci_dev->irq;
2584 	net_dev->netdev_ops = &efx_netdev_ops;
2585 	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
2586 		net_dev->priv_flags |= IFF_UNICAST_FLT;
2587 	net_dev->ethtool_ops = &efx_ethtool_ops;
2588 	net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2589 	net_dev->min_mtu = EFX_MIN_MTU;
2590 	net_dev->max_mtu = EFX_MAX_MTU;
2591 
2592 	rtnl_lock();
2593 
2594 	/* Enable resets to be scheduled and check whether any were
2595 	 * already requested.  If so, the NIC is probably hosed so we
2596 	 * abort.
2597 	 */
2598 	efx->state = STATE_READY;
2599 	smp_mb(); /* ensure we change state before checking reset_pending */
2600 	if (efx->reset_pending) {
2601 		netif_err(efx, probe, efx->net_dev,
2602 			  "aborting probe due to scheduled reset\n");
2603 		rc = -EIO;
2604 		goto fail_locked;
2605 	}
2606 
2607 	rc = dev_alloc_name(net_dev, net_dev->name);
2608 	if (rc < 0)
2609 		goto fail_locked;
2610 	efx_update_name(efx);
2611 
2612 	/* Always start with carrier off; PHY events will detect the link */
2613 	netif_carrier_off(net_dev);
2614 
2615 	rc = register_netdevice(net_dev);
2616 	if (rc)
2617 		goto fail_locked;
2618 
2619 	efx_for_each_channel(channel, efx) {
2620 		struct efx_tx_queue *tx_queue;
2621 		efx_for_each_channel_tx_queue(tx_queue, channel)
2622 			efx_init_tx_queue_core_txq(tx_queue);
2623 	}
2624 
2625 	efx_associate(efx);
2626 
2627 	rtnl_unlock();
2628 
2629 	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2630 	if (rc) {
2631 		netif_err(efx, drv, efx->net_dev,
2632 			  "failed to init net dev attributes\n");
2633 		goto fail_registered;
2634 	}
2635 #ifdef CONFIG_SFC_MCDI_LOGGING
2636 	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2637 	if (rc) {
2638 		netif_err(efx, drv, efx->net_dev,
2639 			  "failed to init net dev attributes\n");
2640 		goto fail_attr_mcdi_logging;
2641 	}
2642 #endif
2643 
2644 	return 0;
2645 
2646 #ifdef CONFIG_SFC_MCDI_LOGGING
2647 fail_attr_mcdi_logging:
2648 	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2649 #endif
2650 fail_registered:
2651 	rtnl_lock();
2652 	efx_dissociate(efx);
2653 	unregister_netdevice(net_dev);
2654 fail_locked:
2655 	efx->state = STATE_UNINIT;
2656 	rtnl_unlock();
2657 	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2658 	return rc;
2659 }
2660 
2661 static void efx_unregister_netdev(struct efx_nic *efx)
2662 {
2663 	if (!efx->net_dev)
2664 		return;
2665 
2666 	BUG_ON(netdev_priv(efx->net_dev) != efx);
2667 
2668 	if (efx_dev_registered(efx)) {
2669 		strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2670 #ifdef CONFIG_SFC_MCDI_LOGGING
2671 		device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2672 #endif
2673 		device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2674 		unregister_netdev(efx->net_dev);
2675 	}
2676 }
2677 
2678 /**************************************************************************
2679  *
2680  * Device reset and suspend
2681  *
2682  **************************************************************************/
2683 
2684 /* Tears down the entire software state and most of the hardware state
2685  * before reset.  */
2686 void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2687 {
2688 	EFX_ASSERT_RESET_SERIALISED(efx);
2689 
2690 	if (method == RESET_TYPE_MCDI_TIMEOUT)
2691 		efx->type->prepare_flr(efx);
2692 
2693 	efx_stop_all(efx);
2694 	efx_disable_interrupts(efx);
2695 
2696 	mutex_lock(&efx->mac_lock);
2697 	down_write(&efx->filter_sem);
2698 	mutex_lock(&efx->rss_lock);
2699 	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2700 	    method != RESET_TYPE_DATAPATH)
2701 		efx->phy_op->fini(efx);
2702 	efx->type->fini(efx);
2703 }
2704 
2705 /* This function will always ensure that the locks acquired in
2706  * efx_reset_down() are released. A failure return code indicates
2707  * that we were unable to reinitialise the hardware, and the
2708  * driver should be disabled. If ok is false, then the rx and tx
2709  * engines are not restarted, pending a RESET_DISABLE. */
2710 int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2711 {
2712 	int rc;
2713 
2714 	EFX_ASSERT_RESET_SERIALISED(efx);
2715 
2716 	if (method == RESET_TYPE_MCDI_TIMEOUT)
2717 		efx->type->finish_flr(efx);
2718 
2719 	/* Ensure that SRAM is initialised even if we're disabling the device */
2720 	rc = efx->type->init(efx);
2721 	if (rc) {
2722 		netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2723 		goto fail;
2724 	}
2725 
2726 	if (!ok)
2727 		goto fail;
2728 
2729 	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2730 	    method != RESET_TYPE_DATAPATH) {
2731 		rc = efx->phy_op->init(efx);
2732 		if (rc)
2733 			goto fail;
2734 		rc = efx->phy_op->reconfigure(efx);
2735 		if (rc && rc != -EPERM)
2736 			netif_err(efx, drv, efx->net_dev,
2737 				  "could not restore PHY settings\n");
2738 	}
2739 
2740 	rc = efx_enable_interrupts(efx);
2741 	if (rc)
2742 		goto fail;
2743 
2744 #ifdef CONFIG_SFC_SRIOV
2745 	rc = efx->type->vswitching_restore(efx);
2746 	if (rc) /* not fatal; the PF will still work fine */
2747 		netif_warn(efx, probe, efx->net_dev,
2748 			   "failed to restore vswitching rc=%d;"
2749 			   " VFs may not function\n", rc);
2750 #endif
2751 
2752 	if (efx->type->rx_restore_rss_contexts)
2753 		efx->type->rx_restore_rss_contexts(efx);
2754 	mutex_unlock(&efx->rss_lock);
2755 	efx->type->filter_table_restore(efx);
2756 	up_write(&efx->filter_sem);
2757 	if (efx->type->sriov_reset)
2758 		efx->type->sriov_reset(efx);
2759 
2760 	mutex_unlock(&efx->mac_lock);
2761 
2762 	efx_start_all(efx);
2763 
2764 	if (efx->type->udp_tnl_push_ports)
2765 		efx->type->udp_tnl_push_ports(efx);
2766 
2767 	return 0;
2768 
2769 fail:
2770 	efx->port_initialized = false;
2771 
2772 	mutex_unlock(&efx->rss_lock);
2773 	up_write(&efx->filter_sem);
2774 	mutex_unlock(&efx->mac_lock);
2775 
2776 	return rc;
2777 }
2778 
2779 /* Reset the NIC using the specified method.  Note that the reset may
2780  * fail, in which case the card will be left in an unusable state.
2781  *
2782  * Caller must hold the rtnl_lock.
2783  */
2784 int efx_reset(struct efx_nic *efx, enum reset_type method)
2785 {
2786 	int rc, rc2;
2787 	bool disabled;
2788 
2789 	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2790 		   RESET_TYPE(method));
2791 
2792 	efx_device_detach_sync(efx);
2793 	efx_reset_down(efx, method);
2794 
2795 	rc = efx->type->reset(efx, method);
2796 	if (rc) {
2797 		netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2798 		goto out;
2799 	}
2800 
2801 	/* Clear flags for the scopes we covered.  We assume the NIC and
2802 	 * driver are now quiescent so that there is no race here.
2803 	 */
2804 	if (method < RESET_TYPE_MAX_METHOD)
2805 		efx->reset_pending &= -(1 << (method + 1));
2806 	else /* it doesn't fit into the well-ordered scope hierarchy */
2807 		__clear_bit(method, &efx->reset_pending);
2808 
2809 	/* Reinitialise bus-mastering, which may have been turned off before
2810 	 * the reset was scheduled. This is still appropriate, even in the
2811 	 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2812 	 * can respond to requests. */
2813 	pci_set_master(efx->pci_dev);
2814 
2815 out:
2816 	/* Leave device stopped if necessary */
2817 	disabled = rc ||
2818 		method == RESET_TYPE_DISABLE ||
2819 		method == RESET_TYPE_RECOVER_OR_DISABLE;
2820 	rc2 = efx_reset_up(efx, method, !disabled);
2821 	if (rc2) {
2822 		disabled = true;
2823 		if (!rc)
2824 			rc = rc2;
2825 	}
2826 
2827 	if (disabled) {
2828 		dev_close(efx->net_dev);
2829 		netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2830 		efx->state = STATE_DISABLED;
2831 	} else {
2832 		netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2833 		efx_device_attach_if_not_resetting(efx);
2834 	}
2835 	return rc;
2836 }
2837 
2838 /* Try recovery mechanisms.
2839  * For now only EEH is supported.
2840  * Returns 0 if the recovery mechanisms are unsuccessful.
2841  * Returns a non-zero value otherwise.
2842  */
2843 int efx_try_recovery(struct efx_nic *efx)
2844 {
2845 #ifdef CONFIG_EEH
2846 	/* A PCI error can occur and not be seen by EEH because nothing
2847 	 * happens on the PCI bus. In this case the driver may fail and
2848 	 * schedule a 'recover or reset', leading to this recovery handler.
2849 	 * Manually call the eeh failure check function.
2850 	 */
2851 	struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2852 	if (eeh_dev_check_failure(eehdev)) {
2853 		/* The EEH mechanisms will handle the error and reset the
2854 		 * device if necessary.
2855 		 */
2856 		return 1;
2857 	}
2858 #endif
2859 	return 0;
2860 }
2861 
2862 static void efx_wait_for_bist_end(struct efx_nic *efx)
2863 {
2864 	int i;
2865 
2866 	for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
2867 		if (efx_mcdi_poll_reboot(efx))
2868 			goto out;
2869 		msleep(BIST_WAIT_DELAY_MS);
2870 	}
2871 
2872 	netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
2873 out:
2874 	/* Either way unset the BIST flag. If we found no reboot we probably
2875 	 * won't recover, but we should try.
2876 	 */
2877 	efx->mc_bist_for_other_fn = false;
2878 }
2879 
2880 /* The worker thread exists so that code that cannot sleep can
2881  * schedule a reset for later.
2882  */
2883 static void efx_reset_work(struct work_struct *data)
2884 {
2885 	struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
2886 	unsigned long pending;
2887 	enum reset_type method;
2888 
2889 	pending = READ_ONCE(efx->reset_pending);
2890 	method = fls(pending) - 1;
2891 
2892 	if (method == RESET_TYPE_MC_BIST)
2893 		efx_wait_for_bist_end(efx);
2894 
2895 	if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2896 	     method == RESET_TYPE_RECOVER_OR_ALL) &&
2897 	    efx_try_recovery(efx))
2898 		return;
2899 
2900 	if (!pending)
2901 		return;
2902 
2903 	rtnl_lock();
2904 
2905 	/* We checked the state in efx_schedule_reset() but it may
2906 	 * have changed by now.  Now that we have the RTNL lock,
2907 	 * it cannot change again.
2908 	 */
2909 	if (efx->state == STATE_READY)
2910 		(void)efx_reset(efx, method);
2911 
2912 	rtnl_unlock();
2913 }
2914 
2915 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2916 {
2917 	enum reset_type method;
2918 
2919 	if (efx->state == STATE_RECOVERY) {
2920 		netif_dbg(efx, drv, efx->net_dev,
2921 			  "recovering: skip scheduling %s reset\n",
2922 			  RESET_TYPE(type));
2923 		return;
2924 	}
2925 
2926 	switch (type) {
2927 	case RESET_TYPE_INVISIBLE:
2928 	case RESET_TYPE_ALL:
2929 	case RESET_TYPE_RECOVER_OR_ALL:
2930 	case RESET_TYPE_WORLD:
2931 	case RESET_TYPE_DISABLE:
2932 	case RESET_TYPE_RECOVER_OR_DISABLE:
2933 	case RESET_TYPE_DATAPATH:
2934 	case RESET_TYPE_MC_BIST:
2935 	case RESET_TYPE_MCDI_TIMEOUT:
2936 		method = type;
2937 		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2938 			  RESET_TYPE(method));
2939 		break;
2940 	default:
2941 		method = efx->type->map_reset_reason(type);
2942 		netif_dbg(efx, drv, efx->net_dev,
2943 			  "scheduling %s reset for %s\n",
2944 			  RESET_TYPE(method), RESET_TYPE(type));
2945 		break;
2946 	}
2947 
2948 	set_bit(method, &efx->reset_pending);
2949 	smp_mb(); /* ensure we change reset_pending before checking state */
2950 
2951 	/* If we're not READY then just leave the flags set as the cue
2952 	 * to abort probing or reschedule the reset later.
2953 	 */
2954 	if (READ_ONCE(efx->state) != STATE_READY)
2955 		return;
2956 
2957 	/* efx_process_channel() will no longer read events once a
2958 	 * reset is scheduled. So switch back to poll'd MCDI completions. */
2959 	efx_mcdi_mode_poll(efx);
2960 
2961 	queue_work(reset_workqueue, &efx->reset_work);
2962 }
2963 
2964 /**************************************************************************
2965  *
2966  * List of NICs we support
2967  *
2968  **************************************************************************/
2969 
2970 /* PCI device ID table */
2971 static const struct pci_device_id efx_pci_table[] = {
2972 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803),	/* SFC9020 */
2973 	 .driver_data = (unsigned long) &siena_a0_nic_type},
2974 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813),	/* SFL9021 */
2975 	 .driver_data = (unsigned long) &siena_a0_nic_type},
2976 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903),  /* SFC9120 PF */
2977 	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2978 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903),  /* SFC9120 VF */
2979 	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2980 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923),  /* SFC9140 PF */
2981 	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2982 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923),  /* SFC9140 VF */
2983 	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2984 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03),  /* SFC9220 PF */
2985 	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2986 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03),  /* SFC9220 VF */
2987 	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2988 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03),  /* SFC9250 PF */
2989 	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2990 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03),  /* SFC9250 VF */
2991 	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2992 	{0}			/* end of list */
2993 };
2994 
2995 /**************************************************************************
2996  *
2997  * Dummy PHY/MAC operations
2998  *
2999  * Can be used for some unimplemented operations
3000  * Needed so all function pointers are valid and do not have to be tested
3001  * before use
3002  *
3003  **************************************************************************/
3004 int efx_port_dummy_op_int(struct efx_nic *efx)
3005 {
3006 	return 0;
3007 }
3008 void efx_port_dummy_op_void(struct efx_nic *efx) {}
3009 
3010 static bool efx_port_dummy_op_poll(struct efx_nic *efx)
3011 {
3012 	return false;
3013 }
3014 
3015 static const struct efx_phy_operations efx_dummy_phy_operations = {
3016 	.init		 = efx_port_dummy_op_int,
3017 	.reconfigure	 = efx_port_dummy_op_int,
3018 	.poll		 = efx_port_dummy_op_poll,
3019 	.fini		 = efx_port_dummy_op_void,
3020 };
3021 
3022 /**************************************************************************
3023  *
3024  * Data housekeeping
3025  *
3026  **************************************************************************/
3027 
3028 /* This zeroes out and then fills in the invariants in a struct
3029  * efx_nic (including all sub-structures).
3030  */
3031 static int efx_init_struct(struct efx_nic *efx,
3032 			   struct pci_dev *pci_dev, struct net_device *net_dev)
3033 {
3034 	int rc = -ENOMEM, i;
3035 
3036 	/* Initialise common structures */
3037 	INIT_LIST_HEAD(&efx->node);
3038 	INIT_LIST_HEAD(&efx->secondary_list);
3039 	spin_lock_init(&efx->biu_lock);
3040 #ifdef CONFIG_SFC_MTD
3041 	INIT_LIST_HEAD(&efx->mtd_list);
3042 #endif
3043 	INIT_WORK(&efx->reset_work, efx_reset_work);
3044 	INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
3045 	INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
3046 	efx->pci_dev = pci_dev;
3047 	efx->msg_enable = debug;
3048 	efx->state = STATE_UNINIT;
3049 	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
3050 
3051 	efx->net_dev = net_dev;
3052 	efx->rx_prefix_size = efx->type->rx_prefix_size;
3053 	efx->rx_ip_align =
3054 		NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
3055 	efx->rx_packet_hash_offset =
3056 		efx->type->rx_hash_offset - efx->type->rx_prefix_size;
3057 	efx->rx_packet_ts_offset =
3058 		efx->type->rx_ts_offset - efx->type->rx_prefix_size;
3059 	INIT_LIST_HEAD(&efx->rss_context.list);
3060 	mutex_init(&efx->rss_lock);
3061 	spin_lock_init(&efx->stats_lock);
3062 	efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
3063 	efx->num_mac_stats = MC_CMD_MAC_NSTATS;
3064 	BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
3065 	mutex_init(&efx->mac_lock);
3066 #ifdef CONFIG_RFS_ACCEL
3067 	mutex_init(&efx->rps_mutex);
3068 	spin_lock_init(&efx->rps_hash_lock);
3069 	/* Failure to allocate is not fatal, but may degrade ARFS performance */
3070 	efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
3071 				      sizeof(*efx->rps_hash_table), GFP_KERNEL);
3072 #endif
3073 	efx->phy_op = &efx_dummy_phy_operations;
3074 	efx->mdio.dev = net_dev;
3075 	INIT_WORK(&efx->mac_work, efx_mac_work);
3076 	init_waitqueue_head(&efx->flush_wq);
3077 
3078 	for (i = 0; i < EFX_MAX_CHANNELS; i++) {
3079 		efx->channel[i] = efx_alloc_channel(efx, i, NULL);
3080 		if (!efx->channel[i])
3081 			goto fail;
3082 		efx->msi_context[i].efx = efx;
3083 		efx->msi_context[i].index = i;
3084 	}
3085 
3086 	/* Higher numbered interrupt modes are less capable! */
3087 	if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
3088 			 efx->type->min_interrupt_mode)) {
3089 		rc = -EIO;
3090 		goto fail;
3091 	}
3092 	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
3093 				  interrupt_mode);
3094 	efx->interrupt_mode = min(efx->type->min_interrupt_mode,
3095 				  interrupt_mode);
3096 
3097 	/* Would be good to use the net_dev name, but we're too early */
3098 	snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
3099 		 pci_name(pci_dev));
3100 	efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
3101 	if (!efx->workqueue)
3102 		goto fail;
3103 
3104 	return 0;
3105 
3106 fail:
3107 	efx_fini_struct(efx);
3108 	return rc;
3109 }
3110 
3111 static void efx_fini_struct(struct efx_nic *efx)
3112 {
3113 	int i;
3114 
3115 #ifdef CONFIG_RFS_ACCEL
3116 	kfree(efx->rps_hash_table);
3117 #endif
3118 
3119 	for (i = 0; i < EFX_MAX_CHANNELS; i++)
3120 		kfree(efx->channel[i]);
3121 
3122 	kfree(efx->vpd_sn);
3123 
3124 	if (efx->workqueue) {
3125 		destroy_workqueue(efx->workqueue);
3126 		efx->workqueue = NULL;
3127 	}
3128 }
3129 
3130 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
3131 {
3132 	u64 n_rx_nodesc_trunc = 0;
3133 	struct efx_channel *channel;
3134 
3135 	efx_for_each_channel(channel, efx)
3136 		n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
3137 	stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
3138 	stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
3139 }
3140 
3141 bool efx_filter_spec_equal(const struct efx_filter_spec *left,
3142 			   const struct efx_filter_spec *right)
3143 {
3144 	if ((left->match_flags ^ right->match_flags) |
3145 	    ((left->flags ^ right->flags) &
3146 	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3147 		return false;
3148 
3149 	return memcmp(&left->outer_vid, &right->outer_vid,
3150 		      sizeof(struct efx_filter_spec) -
3151 		      offsetof(struct efx_filter_spec, outer_vid)) == 0;
3152 }
3153 
3154 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
3155 {
3156 	BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
3157 	return jhash2((const u32 *)&spec->outer_vid,
3158 		      (sizeof(struct efx_filter_spec) -
3159 		       offsetof(struct efx_filter_spec, outer_vid)) / 4,
3160 		      0);
3161 }
3162 
3163 #ifdef CONFIG_RFS_ACCEL
3164 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
3165 			bool *force)
3166 {
3167 	if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
3168 		/* ARFS is currently updating this entry, leave it */
3169 		return false;
3170 	}
3171 	if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
3172 		/* ARFS tried and failed to update this, so it's probably out
3173 		 * of date.  Remove the filter and the ARFS rule entry.
3174 		 */
3175 		rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
3176 		*force = true;
3177 		return true;
3178 	} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
3179 		/* ARFS has moved on, so old filter is not needed.  Since we did
3180 		 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
3181 		 * not be removed by efx_rps_hash_del() subsequently.
3182 		 */
3183 		*force = true;
3184 		return true;
3185 	}
3186 	/* Remove it iff ARFS wants to. */
3187 	return true;
3188 }
3189 
3190 static
3191 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
3192 				       const struct efx_filter_spec *spec)
3193 {
3194 	u32 hash = efx_filter_spec_hash(spec);
3195 
3196 	WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
3197 	if (!efx->rps_hash_table)
3198 		return NULL;
3199 	return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
3200 }
3201 
3202 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
3203 					const struct efx_filter_spec *spec)
3204 {
3205 	struct efx_arfs_rule *rule;
3206 	struct hlist_head *head;
3207 	struct hlist_node *node;
3208 
3209 	head = efx_rps_hash_bucket(efx, spec);
3210 	if (!head)
3211 		return NULL;
3212 	hlist_for_each(node, head) {
3213 		rule = container_of(node, struct efx_arfs_rule, node);
3214 		if (efx_filter_spec_equal(spec, &rule->spec))
3215 			return rule;
3216 	}
3217 	return NULL;
3218 }
3219 
3220 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
3221 				       const struct efx_filter_spec *spec,
3222 				       bool *new)
3223 {
3224 	struct efx_arfs_rule *rule;
3225 	struct hlist_head *head;
3226 	struct hlist_node *node;
3227 
3228 	head = efx_rps_hash_bucket(efx, spec);
3229 	if (!head)
3230 		return NULL;
3231 	hlist_for_each(node, head) {
3232 		rule = container_of(node, struct efx_arfs_rule, node);
3233 		if (efx_filter_spec_equal(spec, &rule->spec)) {
3234 			*new = false;
3235 			return rule;
3236 		}
3237 	}
3238 	rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
3239 	*new = true;
3240 	if (rule) {
3241 		memcpy(&rule->spec, spec, sizeof(rule->spec));
3242 		hlist_add_head(&rule->node, head);
3243 	}
3244 	return rule;
3245 }
3246 
3247 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
3248 {
3249 	struct efx_arfs_rule *rule;
3250 	struct hlist_head *head;
3251 	struct hlist_node *node;
3252 
3253 	head = efx_rps_hash_bucket(efx, spec);
3254 	if (WARN_ON(!head))
3255 		return;
3256 	hlist_for_each(node, head) {
3257 		rule = container_of(node, struct efx_arfs_rule, node);
3258 		if (efx_filter_spec_equal(spec, &rule->spec)) {
3259 			/* Someone already reused the entry.  We know that if
3260 			 * this check doesn't fire (i.e. filter_id == REMOVING)
3261 			 * then the REMOVING mark was put there by our caller,
3262 			 * because caller is holding a lock on filter table and
3263 			 * only holders of that lock set REMOVING.
3264 			 */
3265 			if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
3266 				return;
3267 			hlist_del(node);
3268 			kfree(rule);
3269 			return;
3270 		}
3271 	}
3272 	/* We didn't find it. */
3273 	WARN_ON(1);
3274 }
3275 #endif
3276 
3277 /* RSS contexts.  We're using linked lists and crappy O(n) algorithms, because
3278  * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
3279  */
3280 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
3281 {
3282 	struct list_head *head = &efx->rss_context.list;
3283 	struct efx_rss_context *ctx, *new;
3284 	u32 id = 1; /* Don't use zero, that refers to the master RSS context */
3285 
3286 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
3287 
3288 	/* Search for first gap in the numbering */
3289 	list_for_each_entry(ctx, head, list) {
3290 		if (ctx->user_id != id)
3291 			break;
3292 		id++;
3293 		/* Check for wrap.  If this happens, we have nearly 2^32
3294 		 * allocated RSS contexts, which seems unlikely.
3295 		 */
3296 		if (WARN_ON_ONCE(!id))
3297 			return NULL;
3298 	}
3299 
3300 	/* Create the new entry */
3301 	new = kmalloc(sizeof(struct efx_rss_context), GFP_KERNEL);
3302 	if (!new)
3303 		return NULL;
3304 	new->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
3305 	new->rx_hash_udp_4tuple = false;
3306 
3307 	/* Insert the new entry into the gap */
3308 	new->user_id = id;
3309 	list_add_tail(&new->list, &ctx->list);
3310 	return new;
3311 }
3312 
3313 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
3314 {
3315 	struct list_head *head = &efx->rss_context.list;
3316 	struct efx_rss_context *ctx;
3317 
3318 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
3319 
3320 	list_for_each_entry(ctx, head, list)
3321 		if (ctx->user_id == id)
3322 			return ctx;
3323 	return NULL;
3324 }
3325 
3326 void efx_free_rss_context_entry(struct efx_rss_context *ctx)
3327 {
3328 	list_del(&ctx->list);
3329 	kfree(ctx);
3330 }
3331 
3332 /**************************************************************************
3333  *
3334  * PCI interface
3335  *
3336  **************************************************************************/
3337 
3338 /* Main body of final NIC shutdown code
3339  * This is called only at module unload (or hotplug removal).
3340  */
3341 static void efx_pci_remove_main(struct efx_nic *efx)
3342 {
3343 	/* Flush reset_work. It can no longer be scheduled since we
3344 	 * are not READY.
3345 	 */
3346 	BUG_ON(efx->state == STATE_READY);
3347 	cancel_work_sync(&efx->reset_work);
3348 
3349 	efx_disable_interrupts(efx);
3350 	efx_clear_interrupt_affinity(efx);
3351 	efx_nic_fini_interrupt(efx);
3352 	efx_fini_port(efx);
3353 	efx->type->fini(efx);
3354 	efx_fini_napi(efx);
3355 	efx_remove_all(efx);
3356 }
3357 
3358 /* Final NIC shutdown
3359  * This is called only at module unload (or hotplug removal).  A PF can call
3360  * this on its VFs to ensure they are unbound first.
3361  */
3362 static void efx_pci_remove(struct pci_dev *pci_dev)
3363 {
3364 	struct efx_nic *efx;
3365 
3366 	efx = pci_get_drvdata(pci_dev);
3367 	if (!efx)
3368 		return;
3369 
3370 	/* Mark the NIC as fini, then stop the interface */
3371 	rtnl_lock();
3372 	efx_dissociate(efx);
3373 	dev_close(efx->net_dev);
3374 	efx_disable_interrupts(efx);
3375 	efx->state = STATE_UNINIT;
3376 	rtnl_unlock();
3377 
3378 	if (efx->type->sriov_fini)
3379 		efx->type->sriov_fini(efx);
3380 
3381 	efx_unregister_netdev(efx);
3382 
3383 	efx_mtd_remove(efx);
3384 
3385 	efx_pci_remove_main(efx);
3386 
3387 	efx_fini_io(efx);
3388 	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
3389 
3390 	efx_fini_struct(efx);
3391 	free_netdev(efx->net_dev);
3392 
3393 	pci_disable_pcie_error_reporting(pci_dev);
3394 };
3395 
3396 /* NIC VPD information
3397  * Called during probe to display the part number of the
3398  * installed NIC.  VPD is potentially very large but this should
3399  * always appear within the first 512 bytes.
3400  */
3401 #define SFC_VPD_LEN 512
3402 static void efx_probe_vpd_strings(struct efx_nic *efx)
3403 {
3404 	struct pci_dev *dev = efx->pci_dev;
3405 	char vpd_data[SFC_VPD_LEN];
3406 	ssize_t vpd_size;
3407 	int ro_start, ro_size, i, j;
3408 
3409 	/* Get the vpd data from the device */
3410 	vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
3411 	if (vpd_size <= 0) {
3412 		netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
3413 		return;
3414 	}
3415 
3416 	/* Get the Read only section */
3417 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
3418 	if (ro_start < 0) {
3419 		netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
3420 		return;
3421 	}
3422 
3423 	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
3424 	j = ro_size;
3425 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
3426 	if (i + j > vpd_size)
3427 		j = vpd_size - i;
3428 
3429 	/* Get the Part number */
3430 	i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
3431 	if (i < 0) {
3432 		netif_err(efx, drv, efx->net_dev, "Part number not found\n");
3433 		return;
3434 	}
3435 
3436 	j = pci_vpd_info_field_size(&vpd_data[i]);
3437 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
3438 	if (i + j > vpd_size) {
3439 		netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
3440 		return;
3441 	}
3442 
3443 	netif_info(efx, drv, efx->net_dev,
3444 		   "Part Number : %.*s\n", j, &vpd_data[i]);
3445 
3446 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
3447 	j = ro_size;
3448 	i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
3449 	if (i < 0) {
3450 		netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
3451 		return;
3452 	}
3453 
3454 	j = pci_vpd_info_field_size(&vpd_data[i]);
3455 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
3456 	if (i + j > vpd_size) {
3457 		netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
3458 		return;
3459 	}
3460 
3461 	efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
3462 	if (!efx->vpd_sn)
3463 		return;
3464 
3465 	snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
3466 }
3467 
3468 
3469 /* Main body of NIC initialisation
3470  * This is called at module load (or hotplug insertion, theoretically).
3471  */
3472 static int efx_pci_probe_main(struct efx_nic *efx)
3473 {
3474 	int rc;
3475 
3476 	/* Do start-of-day initialisation */
3477 	rc = efx_probe_all(efx);
3478 	if (rc)
3479 		goto fail1;
3480 
3481 	efx_init_napi(efx);
3482 
3483 	down_write(&efx->filter_sem);
3484 	rc = efx->type->init(efx);
3485 	up_write(&efx->filter_sem);
3486 	if (rc) {
3487 		netif_err(efx, probe, efx->net_dev,
3488 			  "failed to initialise NIC\n");
3489 		goto fail3;
3490 	}
3491 
3492 	rc = efx_init_port(efx);
3493 	if (rc) {
3494 		netif_err(efx, probe, efx->net_dev,
3495 			  "failed to initialise port\n");
3496 		goto fail4;
3497 	}
3498 
3499 	rc = efx_nic_init_interrupt(efx);
3500 	if (rc)
3501 		goto fail5;
3502 
3503 	efx_set_interrupt_affinity(efx);
3504 	rc = efx_enable_interrupts(efx);
3505 	if (rc)
3506 		goto fail6;
3507 
3508 	return 0;
3509 
3510  fail6:
3511 	efx_clear_interrupt_affinity(efx);
3512 	efx_nic_fini_interrupt(efx);
3513  fail5:
3514 	efx_fini_port(efx);
3515  fail4:
3516 	efx->type->fini(efx);
3517  fail3:
3518 	efx_fini_napi(efx);
3519 	efx_remove_all(efx);
3520  fail1:
3521 	return rc;
3522 }
3523 
3524 static int efx_pci_probe_post_io(struct efx_nic *efx)
3525 {
3526 	struct net_device *net_dev = efx->net_dev;
3527 	int rc = efx_pci_probe_main(efx);
3528 
3529 	if (rc)
3530 		return rc;
3531 
3532 	if (efx->type->sriov_init) {
3533 		rc = efx->type->sriov_init(efx);
3534 		if (rc)
3535 			netif_err(efx, probe, efx->net_dev,
3536 				  "SR-IOV can't be enabled rc %d\n", rc);
3537 	}
3538 
3539 	/* Determine netdevice features */
3540 	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
3541 			      NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
3542 	if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
3543 		net_dev->features |= NETIF_F_TSO6;
3544 	/* Check whether device supports TSO */
3545 	if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
3546 		net_dev->features &= ~NETIF_F_ALL_TSO;
3547 	/* Mask for features that also apply to VLAN devices */
3548 	net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
3549 				   NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
3550 				   NETIF_F_RXCSUM);
3551 
3552 	net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
3553 
3554 	/* Disable receiving frames with bad FCS, by default. */
3555 	net_dev->features &= ~NETIF_F_RXALL;
3556 
3557 	/* Disable VLAN filtering by default.  It may be enforced if
3558 	 * the feature is fixed (i.e. VLAN filters are required to
3559 	 * receive VLAN tagged packets due to vPort restrictions).
3560 	 */
3561 	net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3562 	net_dev->features |= efx->fixed_features;
3563 
3564 	rc = efx_register_netdev(efx);
3565 	if (!rc)
3566 		return 0;
3567 
3568 	efx_pci_remove_main(efx);
3569 	return rc;
3570 }
3571 
3572 /* NIC initialisation
3573  *
3574  * This is called at module load (or hotplug insertion,
3575  * theoretically).  It sets up PCI mappings, resets the NIC,
3576  * sets up and registers the network devices with the kernel and hooks
3577  * the interrupt service routine.  It does not prepare the device for
3578  * transmission; this is left to the first time one of the network
3579  * interfaces is brought up (i.e. efx_net_open).
3580  */
3581 static int efx_pci_probe(struct pci_dev *pci_dev,
3582 			 const struct pci_device_id *entry)
3583 {
3584 	struct net_device *net_dev;
3585 	struct efx_nic *efx;
3586 	int rc;
3587 
3588 	/* Allocate and initialise a struct net_device and struct efx_nic */
3589 	net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
3590 				     EFX_MAX_RX_QUEUES);
3591 	if (!net_dev)
3592 		return -ENOMEM;
3593 	efx = netdev_priv(net_dev);
3594 	efx->type = (const struct efx_nic_type *) entry->driver_data;
3595 	efx->fixed_features |= NETIF_F_HIGHDMA;
3596 
3597 	pci_set_drvdata(pci_dev, efx);
3598 	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
3599 	rc = efx_init_struct(efx, pci_dev, net_dev);
3600 	if (rc)
3601 		goto fail1;
3602 
3603 	netif_info(efx, probe, efx->net_dev,
3604 		   "Solarflare NIC detected\n");
3605 
3606 	if (!efx->type->is_vf)
3607 		efx_probe_vpd_strings(efx);
3608 
3609 	/* Set up basic I/O (BAR mappings etc) */
3610 	rc = efx_init_io(efx);
3611 	if (rc)
3612 		goto fail2;
3613 
3614 	rc = efx_pci_probe_post_io(efx);
3615 	if (rc) {
3616 		/* On failure, retry once immediately.
3617 		 * If we aborted probe due to a scheduled reset, dismiss it.
3618 		 */
3619 		efx->reset_pending = 0;
3620 		rc = efx_pci_probe_post_io(efx);
3621 		if (rc) {
3622 			/* On another failure, retry once more
3623 			 * after a 50-305ms delay.
3624 			 */
3625 			unsigned char r;
3626 
3627 			get_random_bytes(&r, 1);
3628 			msleep((unsigned int)r + 50);
3629 			efx->reset_pending = 0;
3630 			rc = efx_pci_probe_post_io(efx);
3631 		}
3632 	}
3633 	if (rc)
3634 		goto fail3;
3635 
3636 	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
3637 
3638 	/* Try to create MTDs, but allow this to fail */
3639 	rtnl_lock();
3640 	rc = efx_mtd_probe(efx);
3641 	rtnl_unlock();
3642 	if (rc && rc != -EPERM)
3643 		netif_warn(efx, probe, efx->net_dev,
3644 			   "failed to create MTDs (%d)\n", rc);
3645 
3646 	rc = pci_enable_pcie_error_reporting(pci_dev);
3647 	if (rc && rc != -EINVAL)
3648 		netif_notice(efx, probe, efx->net_dev,
3649 			     "PCIE error reporting unavailable (%d).\n",
3650 			     rc);
3651 
3652 	if (efx->type->udp_tnl_push_ports)
3653 		efx->type->udp_tnl_push_ports(efx);
3654 
3655 	return 0;
3656 
3657  fail3:
3658 	efx_fini_io(efx);
3659  fail2:
3660 	efx_fini_struct(efx);
3661  fail1:
3662 	WARN_ON(rc > 0);
3663 	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
3664 	free_netdev(net_dev);
3665 	return rc;
3666 }
3667 
3668 /* efx_pci_sriov_configure returns the actual number of Virtual Functions
3669  * enabled on success
3670  */
3671 #ifdef CONFIG_SFC_SRIOV
3672 static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
3673 {
3674 	int rc;
3675 	struct efx_nic *efx = pci_get_drvdata(dev);
3676 
3677 	if (efx->type->sriov_configure) {
3678 		rc = efx->type->sriov_configure(efx, num_vfs);
3679 		if (rc)
3680 			return rc;
3681 		else
3682 			return num_vfs;
3683 	} else
3684 		return -EOPNOTSUPP;
3685 }
3686 #endif
3687 
3688 static int efx_pm_freeze(struct device *dev)
3689 {
3690 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3691 
3692 	rtnl_lock();
3693 
3694 	if (efx->state != STATE_DISABLED) {
3695 		efx->state = STATE_UNINIT;
3696 
3697 		efx_device_detach_sync(efx);
3698 
3699 		efx_stop_all(efx);
3700 		efx_disable_interrupts(efx);
3701 	}
3702 
3703 	rtnl_unlock();
3704 
3705 	return 0;
3706 }
3707 
3708 static int efx_pm_thaw(struct device *dev)
3709 {
3710 	int rc;
3711 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3712 
3713 	rtnl_lock();
3714 
3715 	if (efx->state != STATE_DISABLED) {
3716 		rc = efx_enable_interrupts(efx);
3717 		if (rc)
3718 			goto fail;
3719 
3720 		mutex_lock(&efx->mac_lock);
3721 		efx->phy_op->reconfigure(efx);
3722 		mutex_unlock(&efx->mac_lock);
3723 
3724 		efx_start_all(efx);
3725 
3726 		efx_device_attach_if_not_resetting(efx);
3727 
3728 		efx->state = STATE_READY;
3729 
3730 		efx->type->resume_wol(efx);
3731 	}
3732 
3733 	rtnl_unlock();
3734 
3735 	/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
3736 	queue_work(reset_workqueue, &efx->reset_work);
3737 
3738 	return 0;
3739 
3740 fail:
3741 	rtnl_unlock();
3742 
3743 	return rc;
3744 }
3745 
3746 static int efx_pm_poweroff(struct device *dev)
3747 {
3748 	struct pci_dev *pci_dev = to_pci_dev(dev);
3749 	struct efx_nic *efx = pci_get_drvdata(pci_dev);
3750 
3751 	efx->type->fini(efx);
3752 
3753 	efx->reset_pending = 0;
3754 
3755 	pci_save_state(pci_dev);
3756 	return pci_set_power_state(pci_dev, PCI_D3hot);
3757 }
3758 
3759 /* Used for both resume and restore */
3760 static int efx_pm_resume(struct device *dev)
3761 {
3762 	struct pci_dev *pci_dev = to_pci_dev(dev);
3763 	struct efx_nic *efx = pci_get_drvdata(pci_dev);
3764 	int rc;
3765 
3766 	rc = pci_set_power_state(pci_dev, PCI_D0);
3767 	if (rc)
3768 		return rc;
3769 	pci_restore_state(pci_dev);
3770 	rc = pci_enable_device(pci_dev);
3771 	if (rc)
3772 		return rc;
3773 	pci_set_master(efx->pci_dev);
3774 	rc = efx->type->reset(efx, RESET_TYPE_ALL);
3775 	if (rc)
3776 		return rc;
3777 	down_write(&efx->filter_sem);
3778 	rc = efx->type->init(efx);
3779 	up_write(&efx->filter_sem);
3780 	if (rc)
3781 		return rc;
3782 	rc = efx_pm_thaw(dev);
3783 	return rc;
3784 }
3785 
3786 static int efx_pm_suspend(struct device *dev)
3787 {
3788 	int rc;
3789 
3790 	efx_pm_freeze(dev);
3791 	rc = efx_pm_poweroff(dev);
3792 	if (rc)
3793 		efx_pm_resume(dev);
3794 	return rc;
3795 }
3796 
3797 static const struct dev_pm_ops efx_pm_ops = {
3798 	.suspend	= efx_pm_suspend,
3799 	.resume		= efx_pm_resume,
3800 	.freeze		= efx_pm_freeze,
3801 	.thaw		= efx_pm_thaw,
3802 	.poweroff	= efx_pm_poweroff,
3803 	.restore	= efx_pm_resume,
3804 };
3805 
3806 /* A PCI error affecting this device was detected.
3807  * At this point MMIO and DMA may be disabled.
3808  * Stop the software path and request a slot reset.
3809  */
3810 static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
3811 					      enum pci_channel_state state)
3812 {
3813 	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3814 	struct efx_nic *efx = pci_get_drvdata(pdev);
3815 
3816 	if (state == pci_channel_io_perm_failure)
3817 		return PCI_ERS_RESULT_DISCONNECT;
3818 
3819 	rtnl_lock();
3820 
3821 	if (efx->state != STATE_DISABLED) {
3822 		efx->state = STATE_RECOVERY;
3823 		efx->reset_pending = 0;
3824 
3825 		efx_device_detach_sync(efx);
3826 
3827 		efx_stop_all(efx);
3828 		efx_disable_interrupts(efx);
3829 
3830 		status = PCI_ERS_RESULT_NEED_RESET;
3831 	} else {
3832 		/* If the interface is disabled we don't want to do anything
3833 		 * with it.
3834 		 */
3835 		status = PCI_ERS_RESULT_RECOVERED;
3836 	}
3837 
3838 	rtnl_unlock();
3839 
3840 	pci_disable_device(pdev);
3841 
3842 	return status;
3843 }
3844 
3845 /* Fake a successful reset, which will be performed later in efx_io_resume. */
3846 static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
3847 {
3848 	struct efx_nic *efx = pci_get_drvdata(pdev);
3849 	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3850 	int rc;
3851 
3852 	if (pci_enable_device(pdev)) {
3853 		netif_err(efx, hw, efx->net_dev,
3854 			  "Cannot re-enable PCI device after reset.\n");
3855 		status =  PCI_ERS_RESULT_DISCONNECT;
3856 	}
3857 
3858 	rc = pci_cleanup_aer_uncorrect_error_status(pdev);
3859 	if (rc) {
3860 		netif_err(efx, hw, efx->net_dev,
3861 		"pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
3862 		/* Non-fatal error. Continue. */
3863 	}
3864 
3865 	return status;
3866 }
3867 
3868 /* Perform the actual reset and resume I/O operations. */
3869 static void efx_io_resume(struct pci_dev *pdev)
3870 {
3871 	struct efx_nic *efx = pci_get_drvdata(pdev);
3872 	int rc;
3873 
3874 	rtnl_lock();
3875 
3876 	if (efx->state == STATE_DISABLED)
3877 		goto out;
3878 
3879 	rc = efx_reset(efx, RESET_TYPE_ALL);
3880 	if (rc) {
3881 		netif_err(efx, hw, efx->net_dev,
3882 			  "efx_reset failed after PCI error (%d)\n", rc);
3883 	} else {
3884 		efx->state = STATE_READY;
3885 		netif_dbg(efx, hw, efx->net_dev,
3886 			  "Done resetting and resuming IO after PCI error.\n");
3887 	}
3888 
3889 out:
3890 	rtnl_unlock();
3891 }
3892 
3893 /* For simplicity and reliability, we always require a slot reset and try to
3894  * reset the hardware when a pci error affecting the device is detected.
3895  * We leave both the link_reset and mmio_enabled callback unimplemented:
3896  * with our request for slot reset the mmio_enabled callback will never be
3897  * called, and the link_reset callback is not used by AER or EEH mechanisms.
3898  */
3899 static const struct pci_error_handlers efx_err_handlers = {
3900 	.error_detected = efx_io_error_detected,
3901 	.slot_reset	= efx_io_slot_reset,
3902 	.resume		= efx_io_resume,
3903 };
3904 
3905 static struct pci_driver efx_pci_driver = {
3906 	.name		= KBUILD_MODNAME,
3907 	.id_table	= efx_pci_table,
3908 	.probe		= efx_pci_probe,
3909 	.remove		= efx_pci_remove,
3910 	.driver.pm	= &efx_pm_ops,
3911 	.err_handler	= &efx_err_handlers,
3912 #ifdef CONFIG_SFC_SRIOV
3913 	.sriov_configure = efx_pci_sriov_configure,
3914 #endif
3915 };
3916 
3917 /**************************************************************************
3918  *
3919  * Kernel module interface
3920  *
3921  *************************************************************************/
3922 
3923 module_param(interrupt_mode, uint, 0444);
3924 MODULE_PARM_DESC(interrupt_mode,
3925 		 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3926 
3927 static int __init efx_init_module(void)
3928 {
3929 	int rc;
3930 
3931 	printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
3932 
3933 	rc = register_netdevice_notifier(&efx_netdev_notifier);
3934 	if (rc)
3935 		goto err_notifier;
3936 
3937 #ifdef CONFIG_SFC_SRIOV
3938 	rc = efx_init_sriov();
3939 	if (rc)
3940 		goto err_sriov;
3941 #endif
3942 
3943 	reset_workqueue = create_singlethread_workqueue("sfc_reset");
3944 	if (!reset_workqueue) {
3945 		rc = -ENOMEM;
3946 		goto err_reset;
3947 	}
3948 
3949 	rc = pci_register_driver(&efx_pci_driver);
3950 	if (rc < 0)
3951 		goto err_pci;
3952 
3953 	return 0;
3954 
3955  err_pci:
3956 	destroy_workqueue(reset_workqueue);
3957  err_reset:
3958 #ifdef CONFIG_SFC_SRIOV
3959 	efx_fini_sriov();
3960  err_sriov:
3961 #endif
3962 	unregister_netdevice_notifier(&efx_netdev_notifier);
3963  err_notifier:
3964 	return rc;
3965 }
3966 
3967 static void __exit efx_exit_module(void)
3968 {
3969 	printk(KERN_INFO "Solarflare NET driver unloading\n");
3970 
3971 	pci_unregister_driver(&efx_pci_driver);
3972 	destroy_workqueue(reset_workqueue);
3973 #ifdef CONFIG_SFC_SRIOV
3974 	efx_fini_sriov();
3975 #endif
3976 	unregister_netdevice_notifier(&efx_netdev_notifier);
3977 
3978 }
3979 
3980 module_init(efx_init_module);
3981 module_exit(efx_exit_module);
3982 
3983 MODULE_AUTHOR("Solarflare Communications and "
3984 	      "Michael Brown <mbrown@fensystems.co.uk>");
3985 MODULE_DESCRIPTION("Solarflare network driver");
3986 MODULE_LICENSE("GPL");
3987 MODULE_DEVICE_TABLE(pci, efx_pci_table);
3988 MODULE_VERSION(EFX_DRIVER_VERSION);
3989