xref: /openbmc/linux/drivers/net/ethernet/sfc/falcon/efx.c (revision 4e1a33b1)
1 /****************************************************************************
2  * Driver for Solarflare network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2005-2013 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
17 #include <linux/ip.h>
18 #include <linux/tcp.h>
19 #include <linux/in.h>
20 #include <linux/ethtool.h>
21 #include <linux/topology.h>
22 #include <linux/gfp.h>
23 #include <linux/aer.h>
24 #include <linux/interrupt.h>
25 #include "net_driver.h"
26 #include "efx.h"
27 #include "nic.h"
28 #include "selftest.h"
29 
30 #include "workarounds.h"
31 
32 /**************************************************************************
33  *
34  * Type name strings
35  *
36  **************************************************************************
37  */
38 
39 /* Loopback mode names (see LOOPBACK_MODE()) */
40 const unsigned int ef4_loopback_mode_max = LOOPBACK_MAX;
41 const char *const ef4_loopback_mode_names[] = {
42 	[LOOPBACK_NONE]		= "NONE",
43 	[LOOPBACK_DATA]		= "DATAPATH",
44 	[LOOPBACK_GMAC]		= "GMAC",
45 	[LOOPBACK_XGMII]	= "XGMII",
46 	[LOOPBACK_XGXS]		= "XGXS",
47 	[LOOPBACK_XAUI]		= "XAUI",
48 	[LOOPBACK_GMII]		= "GMII",
49 	[LOOPBACK_SGMII]	= "SGMII",
50 	[LOOPBACK_XGBR]		= "XGBR",
51 	[LOOPBACK_XFI]		= "XFI",
52 	[LOOPBACK_XAUI_FAR]	= "XAUI_FAR",
53 	[LOOPBACK_GMII_FAR]	= "GMII_FAR",
54 	[LOOPBACK_SGMII_FAR]	= "SGMII_FAR",
55 	[LOOPBACK_XFI_FAR]	= "XFI_FAR",
56 	[LOOPBACK_GPHY]		= "GPHY",
57 	[LOOPBACK_PHYXS]	= "PHYXS",
58 	[LOOPBACK_PCS]		= "PCS",
59 	[LOOPBACK_PMAPMD]	= "PMA/PMD",
60 	[LOOPBACK_XPORT]	= "XPORT",
61 	[LOOPBACK_XGMII_WS]	= "XGMII_WS",
62 	[LOOPBACK_XAUI_WS]	= "XAUI_WS",
63 	[LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
64 	[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
65 	[LOOPBACK_GMII_WS]	= "GMII_WS",
66 	[LOOPBACK_XFI_WS]	= "XFI_WS",
67 	[LOOPBACK_XFI_WS_FAR]	= "XFI_WS_FAR",
68 	[LOOPBACK_PHYXS_WS]	= "PHYXS_WS",
69 };
70 
71 const unsigned int ef4_reset_type_max = RESET_TYPE_MAX;
72 const char *const ef4_reset_type_names[] = {
73 	[RESET_TYPE_INVISIBLE]          = "INVISIBLE",
74 	[RESET_TYPE_ALL]                = "ALL",
75 	[RESET_TYPE_RECOVER_OR_ALL]     = "RECOVER_OR_ALL",
76 	[RESET_TYPE_WORLD]              = "WORLD",
77 	[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
78 	[RESET_TYPE_DATAPATH]           = "DATAPATH",
79 	[RESET_TYPE_DISABLE]            = "DISABLE",
80 	[RESET_TYPE_TX_WATCHDOG]        = "TX_WATCHDOG",
81 	[RESET_TYPE_INT_ERROR]          = "INT_ERROR",
82 	[RESET_TYPE_RX_RECOVERY]        = "RX_RECOVERY",
83 	[RESET_TYPE_DMA_ERROR]          = "DMA_ERROR",
84 	[RESET_TYPE_TX_SKIP]            = "TX_SKIP",
85 };
86 
87 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
88  * queued onto this work queue. This is not a per-nic work queue, because
89  * ef4_reset_work() acquires the rtnl lock, so resets are naturally serialised.
90  */
91 static struct workqueue_struct *reset_workqueue;
92 
93 /* How often and how many times to poll for a reset while waiting for a
94  * BIST that another function started to complete.
95  */
96 #define BIST_WAIT_DELAY_MS	100
97 #define BIST_WAIT_DELAY_COUNT	100
98 
99 /**************************************************************************
100  *
101  * Configurable values
102  *
103  *************************************************************************/
104 
105 /*
106  * Use separate channels for TX and RX events
107  *
108  * Set this to 1 to use separate channels for TX and RX. It allows us
109  * to control interrupt affinity separately for TX and RX.
110  *
111  * This is only used in MSI-X interrupt mode
112  */
113 bool ef4_separate_tx_channels;
114 module_param(ef4_separate_tx_channels, bool, 0444);
115 MODULE_PARM_DESC(ef4_separate_tx_channels,
116 		 "Use separate channels for TX and RX");
117 
118 /* This is the weight assigned to each of the (per-channel) virtual
119  * NAPI devices.
120  */
121 static int napi_weight = 64;
122 
123 /* This is the time (in jiffies) between invocations of the hardware
124  * monitor.
125  * On Falcon-based NICs, this will:
126  * - Check the on-board hardware monitor;
127  * - Poll the link state and reconfigure the hardware as necessary.
128  * On Siena-based NICs for power systems with EEH support, this will give EEH a
129  * chance to start.
130  */
131 static unsigned int ef4_monitor_interval = 1 * HZ;
132 
133 /* Initial interrupt moderation settings.  They can be modified after
134  * module load with ethtool.
135  *
136  * The default for RX should strike a balance between increasing the
137  * round-trip latency and reducing overhead.
138  */
139 static unsigned int rx_irq_mod_usec = 60;
140 
141 /* Initial interrupt moderation settings.  They can be modified after
142  * module load with ethtool.
143  *
144  * This default is chosen to ensure that a 10G link does not go idle
145  * while a TX queue is stopped after it has become full.  A queue is
146  * restarted when it drops below half full.  The time this takes (assuming
147  * worst case 3 descriptors per packet and 1024 descriptors) is
148  *   512 / 3 * 1.2 = 205 usec.
149  */
150 static unsigned int tx_irq_mod_usec = 150;
151 
152 /* This is the first interrupt mode to try out of:
153  * 0 => MSI-X
154  * 1 => MSI
155  * 2 => legacy
156  */
157 static unsigned int interrupt_mode;
158 
159 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
160  * i.e. the number of CPUs among which we may distribute simultaneous
161  * interrupt handling.
162  *
163  * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
164  * The default (0) means to assign an interrupt to each core.
165  */
166 static unsigned int rss_cpus;
167 module_param(rss_cpus, uint, 0444);
168 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
169 
170 static bool phy_flash_cfg;
171 module_param(phy_flash_cfg, bool, 0644);
172 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
173 
174 static unsigned irq_adapt_low_thresh = 8000;
175 module_param(irq_adapt_low_thresh, uint, 0644);
176 MODULE_PARM_DESC(irq_adapt_low_thresh,
177 		 "Threshold score for reducing IRQ moderation");
178 
179 static unsigned irq_adapt_high_thresh = 16000;
180 module_param(irq_adapt_high_thresh, uint, 0644);
181 MODULE_PARM_DESC(irq_adapt_high_thresh,
182 		 "Threshold score for increasing IRQ moderation");
183 
184 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
185 			 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
186 			 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
187 			 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
188 module_param(debug, uint, 0);
189 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
190 
191 /**************************************************************************
192  *
193  * Utility functions and prototypes
194  *
195  *************************************************************************/
196 
197 static int ef4_soft_enable_interrupts(struct ef4_nic *efx);
198 static void ef4_soft_disable_interrupts(struct ef4_nic *efx);
199 static void ef4_remove_channel(struct ef4_channel *channel);
200 static void ef4_remove_channels(struct ef4_nic *efx);
201 static const struct ef4_channel_type ef4_default_channel_type;
202 static void ef4_remove_port(struct ef4_nic *efx);
203 static void ef4_init_napi_channel(struct ef4_channel *channel);
204 static void ef4_fini_napi(struct ef4_nic *efx);
205 static void ef4_fini_napi_channel(struct ef4_channel *channel);
206 static void ef4_fini_struct(struct ef4_nic *efx);
207 static void ef4_start_all(struct ef4_nic *efx);
208 static void ef4_stop_all(struct ef4_nic *efx);
209 
210 #define EF4_ASSERT_RESET_SERIALISED(efx)		\
211 	do {						\
212 		if ((efx->state == STATE_READY) ||	\
213 		    (efx->state == STATE_RECOVERY) ||	\
214 		    (efx->state == STATE_DISABLED))	\
215 			ASSERT_RTNL();			\
216 	} while (0)
217 
218 static int ef4_check_disabled(struct ef4_nic *efx)
219 {
220 	if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
221 		netif_err(efx, drv, efx->net_dev,
222 			  "device is disabled due to earlier errors\n");
223 		return -EIO;
224 	}
225 	return 0;
226 }
227 
228 /**************************************************************************
229  *
230  * Event queue processing
231  *
232  *************************************************************************/
233 
234 /* Process channel's event queue
235  *
236  * This function is responsible for processing the event queue of a
237  * single channel.  The caller must guarantee that this function will
238  * never be concurrently called more than once on the same channel,
239  * though different channels may be being processed concurrently.
240  */
241 static int ef4_process_channel(struct ef4_channel *channel, int budget)
242 {
243 	struct ef4_tx_queue *tx_queue;
244 	int spent;
245 
246 	if (unlikely(!channel->enabled))
247 		return 0;
248 
249 	ef4_for_each_channel_tx_queue(tx_queue, channel) {
250 		tx_queue->pkts_compl = 0;
251 		tx_queue->bytes_compl = 0;
252 	}
253 
254 	spent = ef4_nic_process_eventq(channel, budget);
255 	if (spent && ef4_channel_has_rx_queue(channel)) {
256 		struct ef4_rx_queue *rx_queue =
257 			ef4_channel_get_rx_queue(channel);
258 
259 		ef4_rx_flush_packet(channel);
260 		ef4_fast_push_rx_descriptors(rx_queue, true);
261 	}
262 
263 	/* Update BQL */
264 	ef4_for_each_channel_tx_queue(tx_queue, channel) {
265 		if (tx_queue->bytes_compl) {
266 			netdev_tx_completed_queue(tx_queue->core_txq,
267 				tx_queue->pkts_compl, tx_queue->bytes_compl);
268 		}
269 	}
270 
271 	return spent;
272 }
273 
274 /* NAPI poll handler
275  *
276  * NAPI guarantees serialisation of polls of the same device, which
277  * provides the guarantee required by ef4_process_channel().
278  */
279 static void ef4_update_irq_mod(struct ef4_nic *efx, struct ef4_channel *channel)
280 {
281 	int step = efx->irq_mod_step_us;
282 
283 	if (channel->irq_mod_score < irq_adapt_low_thresh) {
284 		if (channel->irq_moderation_us > step) {
285 			channel->irq_moderation_us -= step;
286 			efx->type->push_irq_moderation(channel);
287 		}
288 	} else if (channel->irq_mod_score > irq_adapt_high_thresh) {
289 		if (channel->irq_moderation_us <
290 		    efx->irq_rx_moderation_us) {
291 			channel->irq_moderation_us += step;
292 			efx->type->push_irq_moderation(channel);
293 		}
294 	}
295 
296 	channel->irq_count = 0;
297 	channel->irq_mod_score = 0;
298 }
299 
300 static int ef4_poll(struct napi_struct *napi, int budget)
301 {
302 	struct ef4_channel *channel =
303 		container_of(napi, struct ef4_channel, napi_str);
304 	struct ef4_nic *efx = channel->efx;
305 	int spent;
306 
307 	netif_vdbg(efx, intr, efx->net_dev,
308 		   "channel %d NAPI poll executing on CPU %d\n",
309 		   channel->channel, raw_smp_processor_id());
310 
311 	spent = ef4_process_channel(channel, budget);
312 
313 	if (spent < budget) {
314 		if (ef4_channel_has_rx_queue(channel) &&
315 		    efx->irq_rx_adaptive &&
316 		    unlikely(++channel->irq_count == 1000)) {
317 			ef4_update_irq_mod(efx, channel);
318 		}
319 
320 		ef4_filter_rfs_expire(channel);
321 
322 		/* There is no race here; although napi_disable() will
323 		 * only wait for napi_complete(), this isn't a problem
324 		 * since ef4_nic_eventq_read_ack() will have no effect if
325 		 * interrupts have already been disabled.
326 		 */
327 		napi_complete_done(napi, spent);
328 		ef4_nic_eventq_read_ack(channel);
329 	}
330 
331 	return spent;
332 }
333 
334 /* Create event queue
335  * Event queue memory allocations are done only once.  If the channel
336  * is reset, the memory buffer will be reused; this guards against
337  * errors during channel reset and also simplifies interrupt handling.
338  */
339 static int ef4_probe_eventq(struct ef4_channel *channel)
340 {
341 	struct ef4_nic *efx = channel->efx;
342 	unsigned long entries;
343 
344 	netif_dbg(efx, probe, efx->net_dev,
345 		  "chan %d create event queue\n", channel->channel);
346 
347 	/* Build an event queue with room for one event per tx and rx buffer,
348 	 * plus some extra for link state events and MCDI completions. */
349 	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
350 	EF4_BUG_ON_PARANOID(entries > EF4_MAX_EVQ_SIZE);
351 	channel->eventq_mask = max(entries, EF4_MIN_EVQ_SIZE) - 1;
352 
353 	return ef4_nic_probe_eventq(channel);
354 }
355 
356 /* Prepare channel's event queue */
357 static int ef4_init_eventq(struct ef4_channel *channel)
358 {
359 	struct ef4_nic *efx = channel->efx;
360 	int rc;
361 
362 	EF4_WARN_ON_PARANOID(channel->eventq_init);
363 
364 	netif_dbg(efx, drv, efx->net_dev,
365 		  "chan %d init event queue\n", channel->channel);
366 
367 	rc = ef4_nic_init_eventq(channel);
368 	if (rc == 0) {
369 		efx->type->push_irq_moderation(channel);
370 		channel->eventq_read_ptr = 0;
371 		channel->eventq_init = true;
372 	}
373 	return rc;
374 }
375 
376 /* Enable event queue processing and NAPI */
377 void ef4_start_eventq(struct ef4_channel *channel)
378 {
379 	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
380 		  "chan %d start event queue\n", channel->channel);
381 
382 	/* Make sure the NAPI handler sees the enabled flag set */
383 	channel->enabled = true;
384 	smp_wmb();
385 
386 	napi_enable(&channel->napi_str);
387 	ef4_nic_eventq_read_ack(channel);
388 }
389 
390 /* Disable event queue processing and NAPI */
391 void ef4_stop_eventq(struct ef4_channel *channel)
392 {
393 	if (!channel->enabled)
394 		return;
395 
396 	napi_disable(&channel->napi_str);
397 	channel->enabled = false;
398 }
399 
400 static void ef4_fini_eventq(struct ef4_channel *channel)
401 {
402 	if (!channel->eventq_init)
403 		return;
404 
405 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
406 		  "chan %d fini event queue\n", channel->channel);
407 
408 	ef4_nic_fini_eventq(channel);
409 	channel->eventq_init = false;
410 }
411 
412 static void ef4_remove_eventq(struct ef4_channel *channel)
413 {
414 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
415 		  "chan %d remove event queue\n", channel->channel);
416 
417 	ef4_nic_remove_eventq(channel);
418 }
419 
420 /**************************************************************************
421  *
422  * Channel handling
423  *
424  *************************************************************************/
425 
426 /* Allocate and initialise a channel structure. */
427 static struct ef4_channel *
428 ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel)
429 {
430 	struct ef4_channel *channel;
431 	struct ef4_rx_queue *rx_queue;
432 	struct ef4_tx_queue *tx_queue;
433 	int j;
434 
435 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
436 	if (!channel)
437 		return NULL;
438 
439 	channel->efx = efx;
440 	channel->channel = i;
441 	channel->type = &ef4_default_channel_type;
442 
443 	for (j = 0; j < EF4_TXQ_TYPES; j++) {
444 		tx_queue = &channel->tx_queue[j];
445 		tx_queue->efx = efx;
446 		tx_queue->queue = i * EF4_TXQ_TYPES + j;
447 		tx_queue->channel = channel;
448 	}
449 
450 	rx_queue = &channel->rx_queue;
451 	rx_queue->efx = efx;
452 	setup_timer(&rx_queue->slow_fill, ef4_rx_slow_fill,
453 		    (unsigned long)rx_queue);
454 
455 	return channel;
456 }
457 
458 /* Allocate and initialise a channel structure, copying parameters
459  * (but not resources) from an old channel structure.
460  */
461 static struct ef4_channel *
462 ef4_copy_channel(const struct ef4_channel *old_channel)
463 {
464 	struct ef4_channel *channel;
465 	struct ef4_rx_queue *rx_queue;
466 	struct ef4_tx_queue *tx_queue;
467 	int j;
468 
469 	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
470 	if (!channel)
471 		return NULL;
472 
473 	*channel = *old_channel;
474 
475 	channel->napi_dev = NULL;
476 	INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
477 	channel->napi_str.napi_id = 0;
478 	channel->napi_str.state = 0;
479 	memset(&channel->eventq, 0, sizeof(channel->eventq));
480 
481 	for (j = 0; j < EF4_TXQ_TYPES; j++) {
482 		tx_queue = &channel->tx_queue[j];
483 		if (tx_queue->channel)
484 			tx_queue->channel = channel;
485 		tx_queue->buffer = NULL;
486 		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
487 	}
488 
489 	rx_queue = &channel->rx_queue;
490 	rx_queue->buffer = NULL;
491 	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
492 	setup_timer(&rx_queue->slow_fill, ef4_rx_slow_fill,
493 		    (unsigned long)rx_queue);
494 
495 	return channel;
496 }
497 
498 static int ef4_probe_channel(struct ef4_channel *channel)
499 {
500 	struct ef4_tx_queue *tx_queue;
501 	struct ef4_rx_queue *rx_queue;
502 	int rc;
503 
504 	netif_dbg(channel->efx, probe, channel->efx->net_dev,
505 		  "creating channel %d\n", channel->channel);
506 
507 	rc = channel->type->pre_probe(channel);
508 	if (rc)
509 		goto fail;
510 
511 	rc = ef4_probe_eventq(channel);
512 	if (rc)
513 		goto fail;
514 
515 	ef4_for_each_channel_tx_queue(tx_queue, channel) {
516 		rc = ef4_probe_tx_queue(tx_queue);
517 		if (rc)
518 			goto fail;
519 	}
520 
521 	ef4_for_each_channel_rx_queue(rx_queue, channel) {
522 		rc = ef4_probe_rx_queue(rx_queue);
523 		if (rc)
524 			goto fail;
525 	}
526 
527 	return 0;
528 
529 fail:
530 	ef4_remove_channel(channel);
531 	return rc;
532 }
533 
534 static void
535 ef4_get_channel_name(struct ef4_channel *channel, char *buf, size_t len)
536 {
537 	struct ef4_nic *efx = channel->efx;
538 	const char *type;
539 	int number;
540 
541 	number = channel->channel;
542 	if (efx->tx_channel_offset == 0) {
543 		type = "";
544 	} else if (channel->channel < efx->tx_channel_offset) {
545 		type = "-rx";
546 	} else {
547 		type = "-tx";
548 		number -= efx->tx_channel_offset;
549 	}
550 	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
551 }
552 
553 static void ef4_set_channel_names(struct ef4_nic *efx)
554 {
555 	struct ef4_channel *channel;
556 
557 	ef4_for_each_channel(channel, efx)
558 		channel->type->get_name(channel,
559 					efx->msi_context[channel->channel].name,
560 					sizeof(efx->msi_context[0].name));
561 }
562 
563 static int ef4_probe_channels(struct ef4_nic *efx)
564 {
565 	struct ef4_channel *channel;
566 	int rc;
567 
568 	/* Restart special buffer allocation */
569 	efx->next_buffer_table = 0;
570 
571 	/* Probe channels in reverse, so that any 'extra' channels
572 	 * use the start of the buffer table. This allows the traffic
573 	 * channels to be resized without moving them or wasting the
574 	 * entries before them.
575 	 */
576 	ef4_for_each_channel_rev(channel, efx) {
577 		rc = ef4_probe_channel(channel);
578 		if (rc) {
579 			netif_err(efx, probe, efx->net_dev,
580 				  "failed to create channel %d\n",
581 				  channel->channel);
582 			goto fail;
583 		}
584 	}
585 	ef4_set_channel_names(efx);
586 
587 	return 0;
588 
589 fail:
590 	ef4_remove_channels(efx);
591 	return rc;
592 }
593 
594 /* Channels are shutdown and reinitialised whilst the NIC is running
595  * to propagate configuration changes (mtu, checksum offload), or
596  * to clear hardware error conditions
597  */
598 static void ef4_start_datapath(struct ef4_nic *efx)
599 {
600 	netdev_features_t old_features = efx->net_dev->features;
601 	bool old_rx_scatter = efx->rx_scatter;
602 	struct ef4_tx_queue *tx_queue;
603 	struct ef4_rx_queue *rx_queue;
604 	struct ef4_channel *channel;
605 	size_t rx_buf_len;
606 
607 	/* Calculate the rx buffer allocation parameters required to
608 	 * support the current MTU, including padding for header
609 	 * alignment and overruns.
610 	 */
611 	efx->rx_dma_len = (efx->rx_prefix_size +
612 			   EF4_MAX_FRAME_LEN(efx->net_dev->mtu) +
613 			   efx->type->rx_buffer_padding);
614 	rx_buf_len = (sizeof(struct ef4_rx_page_state) +
615 		      efx->rx_ip_align + efx->rx_dma_len);
616 	if (rx_buf_len <= PAGE_SIZE) {
617 		efx->rx_scatter = efx->type->always_rx_scatter;
618 		efx->rx_buffer_order = 0;
619 	} else if (efx->type->can_rx_scatter) {
620 		BUILD_BUG_ON(EF4_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
621 		BUILD_BUG_ON(sizeof(struct ef4_rx_page_state) +
622 			     2 * ALIGN(NET_IP_ALIGN + EF4_RX_USR_BUF_SIZE,
623 				       EF4_RX_BUF_ALIGNMENT) >
624 			     PAGE_SIZE);
625 		efx->rx_scatter = true;
626 		efx->rx_dma_len = EF4_RX_USR_BUF_SIZE;
627 		efx->rx_buffer_order = 0;
628 	} else {
629 		efx->rx_scatter = false;
630 		efx->rx_buffer_order = get_order(rx_buf_len);
631 	}
632 
633 	ef4_rx_config_page_split(efx);
634 	if (efx->rx_buffer_order)
635 		netif_dbg(efx, drv, efx->net_dev,
636 			  "RX buf len=%u; page order=%u batch=%u\n",
637 			  efx->rx_dma_len, efx->rx_buffer_order,
638 			  efx->rx_pages_per_batch);
639 	else
640 		netif_dbg(efx, drv, efx->net_dev,
641 			  "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
642 			  efx->rx_dma_len, efx->rx_page_buf_step,
643 			  efx->rx_bufs_per_page, efx->rx_pages_per_batch);
644 
645 	/* Restore previously fixed features in hw_features and remove
646 	 * features which are fixed now
647 	 */
648 	efx->net_dev->hw_features |= efx->net_dev->features;
649 	efx->net_dev->hw_features &= ~efx->fixed_features;
650 	efx->net_dev->features |= efx->fixed_features;
651 	if (efx->net_dev->features != old_features)
652 		netdev_features_change(efx->net_dev);
653 
654 	/* RX filters may also have scatter-enabled flags */
655 	if (efx->rx_scatter != old_rx_scatter)
656 		efx->type->filter_update_rx_scatter(efx);
657 
658 	/* We must keep at least one descriptor in a TX ring empty.
659 	 * We could avoid this when the queue size does not exactly
660 	 * match the hardware ring size, but it's not that important.
661 	 * Therefore we stop the queue when one more skb might fill
662 	 * the ring completely.  We wake it when half way back to
663 	 * empty.
664 	 */
665 	efx->txq_stop_thresh = efx->txq_entries - ef4_tx_max_skb_descs(efx);
666 	efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
667 
668 	/* Initialise the channels */
669 	ef4_for_each_channel(channel, efx) {
670 		ef4_for_each_channel_tx_queue(tx_queue, channel) {
671 			ef4_init_tx_queue(tx_queue);
672 			atomic_inc(&efx->active_queues);
673 		}
674 
675 		ef4_for_each_channel_rx_queue(rx_queue, channel) {
676 			ef4_init_rx_queue(rx_queue);
677 			atomic_inc(&efx->active_queues);
678 			ef4_stop_eventq(channel);
679 			ef4_fast_push_rx_descriptors(rx_queue, false);
680 			ef4_start_eventq(channel);
681 		}
682 
683 		WARN_ON(channel->rx_pkt_n_frags);
684 	}
685 
686 	if (netif_device_present(efx->net_dev))
687 		netif_tx_wake_all_queues(efx->net_dev);
688 }
689 
690 static void ef4_stop_datapath(struct ef4_nic *efx)
691 {
692 	struct ef4_channel *channel;
693 	struct ef4_tx_queue *tx_queue;
694 	struct ef4_rx_queue *rx_queue;
695 	int rc;
696 
697 	EF4_ASSERT_RESET_SERIALISED(efx);
698 	BUG_ON(efx->port_enabled);
699 
700 	/* Stop RX refill */
701 	ef4_for_each_channel(channel, efx) {
702 		ef4_for_each_channel_rx_queue(rx_queue, channel)
703 			rx_queue->refill_enabled = false;
704 	}
705 
706 	ef4_for_each_channel(channel, efx) {
707 		/* RX packet processing is pipelined, so wait for the
708 		 * NAPI handler to complete.  At least event queue 0
709 		 * might be kept active by non-data events, so don't
710 		 * use napi_synchronize() but actually disable NAPI
711 		 * temporarily.
712 		 */
713 		if (ef4_channel_has_rx_queue(channel)) {
714 			ef4_stop_eventq(channel);
715 			ef4_start_eventq(channel);
716 		}
717 	}
718 
719 	rc = efx->type->fini_dmaq(efx);
720 	if (rc && EF4_WORKAROUND_7803(efx)) {
721 		/* Schedule a reset to recover from the flush failure. The
722 		 * descriptor caches reference memory we're about to free,
723 		 * but falcon_reconfigure_mac_wrapper() won't reconnect
724 		 * the MACs because of the pending reset.
725 		 */
726 		netif_err(efx, drv, efx->net_dev,
727 			  "Resetting to recover from flush failure\n");
728 		ef4_schedule_reset(efx, RESET_TYPE_ALL);
729 	} else if (rc) {
730 		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
731 	} else {
732 		netif_dbg(efx, drv, efx->net_dev,
733 			  "successfully flushed all queues\n");
734 	}
735 
736 	ef4_for_each_channel(channel, efx) {
737 		ef4_for_each_channel_rx_queue(rx_queue, channel)
738 			ef4_fini_rx_queue(rx_queue);
739 		ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
740 			ef4_fini_tx_queue(tx_queue);
741 	}
742 }
743 
744 static void ef4_remove_channel(struct ef4_channel *channel)
745 {
746 	struct ef4_tx_queue *tx_queue;
747 	struct ef4_rx_queue *rx_queue;
748 
749 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
750 		  "destroy chan %d\n", channel->channel);
751 
752 	ef4_for_each_channel_rx_queue(rx_queue, channel)
753 		ef4_remove_rx_queue(rx_queue);
754 	ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
755 		ef4_remove_tx_queue(tx_queue);
756 	ef4_remove_eventq(channel);
757 	channel->type->post_remove(channel);
758 }
759 
760 static void ef4_remove_channels(struct ef4_nic *efx)
761 {
762 	struct ef4_channel *channel;
763 
764 	ef4_for_each_channel(channel, efx)
765 		ef4_remove_channel(channel);
766 }
767 
768 int
769 ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries)
770 {
771 	struct ef4_channel *other_channel[EF4_MAX_CHANNELS], *channel;
772 	u32 old_rxq_entries, old_txq_entries;
773 	unsigned i, next_buffer_table = 0;
774 	int rc, rc2;
775 
776 	rc = ef4_check_disabled(efx);
777 	if (rc)
778 		return rc;
779 
780 	/* Not all channels should be reallocated. We must avoid
781 	 * reallocating their buffer table entries.
782 	 */
783 	ef4_for_each_channel(channel, efx) {
784 		struct ef4_rx_queue *rx_queue;
785 		struct ef4_tx_queue *tx_queue;
786 
787 		if (channel->type->copy)
788 			continue;
789 		next_buffer_table = max(next_buffer_table,
790 					channel->eventq.index +
791 					channel->eventq.entries);
792 		ef4_for_each_channel_rx_queue(rx_queue, channel)
793 			next_buffer_table = max(next_buffer_table,
794 						rx_queue->rxd.index +
795 						rx_queue->rxd.entries);
796 		ef4_for_each_channel_tx_queue(tx_queue, channel)
797 			next_buffer_table = max(next_buffer_table,
798 						tx_queue->txd.index +
799 						tx_queue->txd.entries);
800 	}
801 
802 	ef4_device_detach_sync(efx);
803 	ef4_stop_all(efx);
804 	ef4_soft_disable_interrupts(efx);
805 
806 	/* Clone channels (where possible) */
807 	memset(other_channel, 0, sizeof(other_channel));
808 	for (i = 0; i < efx->n_channels; i++) {
809 		channel = efx->channel[i];
810 		if (channel->type->copy)
811 			channel = channel->type->copy(channel);
812 		if (!channel) {
813 			rc = -ENOMEM;
814 			goto out;
815 		}
816 		other_channel[i] = channel;
817 	}
818 
819 	/* Swap entry counts and channel pointers */
820 	old_rxq_entries = efx->rxq_entries;
821 	old_txq_entries = efx->txq_entries;
822 	efx->rxq_entries = rxq_entries;
823 	efx->txq_entries = txq_entries;
824 	for (i = 0; i < efx->n_channels; i++) {
825 		channel = efx->channel[i];
826 		efx->channel[i] = other_channel[i];
827 		other_channel[i] = channel;
828 	}
829 
830 	/* Restart buffer table allocation */
831 	efx->next_buffer_table = next_buffer_table;
832 
833 	for (i = 0; i < efx->n_channels; i++) {
834 		channel = efx->channel[i];
835 		if (!channel->type->copy)
836 			continue;
837 		rc = ef4_probe_channel(channel);
838 		if (rc)
839 			goto rollback;
840 		ef4_init_napi_channel(efx->channel[i]);
841 	}
842 
843 out:
844 	/* Destroy unused channel structures */
845 	for (i = 0; i < efx->n_channels; i++) {
846 		channel = other_channel[i];
847 		if (channel && channel->type->copy) {
848 			ef4_fini_napi_channel(channel);
849 			ef4_remove_channel(channel);
850 			kfree(channel);
851 		}
852 	}
853 
854 	rc2 = ef4_soft_enable_interrupts(efx);
855 	if (rc2) {
856 		rc = rc ? rc : rc2;
857 		netif_err(efx, drv, efx->net_dev,
858 			  "unable to restart interrupts on channel reallocation\n");
859 		ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
860 	} else {
861 		ef4_start_all(efx);
862 		netif_device_attach(efx->net_dev);
863 	}
864 	return rc;
865 
866 rollback:
867 	/* Swap back */
868 	efx->rxq_entries = old_rxq_entries;
869 	efx->txq_entries = old_txq_entries;
870 	for (i = 0; i < efx->n_channels; i++) {
871 		channel = efx->channel[i];
872 		efx->channel[i] = other_channel[i];
873 		other_channel[i] = channel;
874 	}
875 	goto out;
876 }
877 
878 void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue)
879 {
880 	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
881 }
882 
883 static const struct ef4_channel_type ef4_default_channel_type = {
884 	.pre_probe		= ef4_channel_dummy_op_int,
885 	.post_remove		= ef4_channel_dummy_op_void,
886 	.get_name		= ef4_get_channel_name,
887 	.copy			= ef4_copy_channel,
888 	.keep_eventq		= false,
889 };
890 
891 int ef4_channel_dummy_op_int(struct ef4_channel *channel)
892 {
893 	return 0;
894 }
895 
896 void ef4_channel_dummy_op_void(struct ef4_channel *channel)
897 {
898 }
899 
900 /**************************************************************************
901  *
902  * Port handling
903  *
904  **************************************************************************/
905 
906 /* This ensures that the kernel is kept informed (via
907  * netif_carrier_on/off) of the link status, and also maintains the
908  * link status's stop on the port's TX queue.
909  */
910 void ef4_link_status_changed(struct ef4_nic *efx)
911 {
912 	struct ef4_link_state *link_state = &efx->link_state;
913 
914 	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
915 	 * that no events are triggered between unregister_netdev() and the
916 	 * driver unloading. A more general condition is that NETDEV_CHANGE
917 	 * can only be generated between NETDEV_UP and NETDEV_DOWN */
918 	if (!netif_running(efx->net_dev))
919 		return;
920 
921 	if (link_state->up != netif_carrier_ok(efx->net_dev)) {
922 		efx->n_link_state_changes++;
923 
924 		if (link_state->up)
925 			netif_carrier_on(efx->net_dev);
926 		else
927 			netif_carrier_off(efx->net_dev);
928 	}
929 
930 	/* Status message for kernel log */
931 	if (link_state->up)
932 		netif_info(efx, link, efx->net_dev,
933 			   "link up at %uMbps %s-duplex (MTU %d)\n",
934 			   link_state->speed, link_state->fd ? "full" : "half",
935 			   efx->net_dev->mtu);
936 	else
937 		netif_info(efx, link, efx->net_dev, "link down\n");
938 }
939 
940 void ef4_link_set_advertising(struct ef4_nic *efx, u32 advertising)
941 {
942 	efx->link_advertising = advertising;
943 	if (advertising) {
944 		if (advertising & ADVERTISED_Pause)
945 			efx->wanted_fc |= (EF4_FC_TX | EF4_FC_RX);
946 		else
947 			efx->wanted_fc &= ~(EF4_FC_TX | EF4_FC_RX);
948 		if (advertising & ADVERTISED_Asym_Pause)
949 			efx->wanted_fc ^= EF4_FC_TX;
950 	}
951 }
952 
953 void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8 wanted_fc)
954 {
955 	efx->wanted_fc = wanted_fc;
956 	if (efx->link_advertising) {
957 		if (wanted_fc & EF4_FC_RX)
958 			efx->link_advertising |= (ADVERTISED_Pause |
959 						  ADVERTISED_Asym_Pause);
960 		else
961 			efx->link_advertising &= ~(ADVERTISED_Pause |
962 						   ADVERTISED_Asym_Pause);
963 		if (wanted_fc & EF4_FC_TX)
964 			efx->link_advertising ^= ADVERTISED_Asym_Pause;
965 	}
966 }
967 
968 static void ef4_fini_port(struct ef4_nic *efx);
969 
970 /* We assume that efx->type->reconfigure_mac will always try to sync RX
971  * filters and therefore needs to read-lock the filter table against freeing
972  */
973 void ef4_mac_reconfigure(struct ef4_nic *efx)
974 {
975 	down_read(&efx->filter_sem);
976 	efx->type->reconfigure_mac(efx);
977 	up_read(&efx->filter_sem);
978 }
979 
980 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
981  * the MAC appropriately. All other PHY configuration changes are pushed
982  * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC
983  * through ef4_monitor().
984  *
985  * Callers must hold the mac_lock
986  */
987 int __ef4_reconfigure_port(struct ef4_nic *efx)
988 {
989 	enum ef4_phy_mode phy_mode;
990 	int rc;
991 
992 	WARN_ON(!mutex_is_locked(&efx->mac_lock));
993 
994 	/* Disable PHY transmit in mac level loopbacks */
995 	phy_mode = efx->phy_mode;
996 	if (LOOPBACK_INTERNAL(efx))
997 		efx->phy_mode |= PHY_MODE_TX_DISABLED;
998 	else
999 		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
1000 
1001 	rc = efx->type->reconfigure_port(efx);
1002 
1003 	if (rc)
1004 		efx->phy_mode = phy_mode;
1005 
1006 	return rc;
1007 }
1008 
1009 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
1010  * disabled. */
1011 int ef4_reconfigure_port(struct ef4_nic *efx)
1012 {
1013 	int rc;
1014 
1015 	EF4_ASSERT_RESET_SERIALISED(efx);
1016 
1017 	mutex_lock(&efx->mac_lock);
1018 	rc = __ef4_reconfigure_port(efx);
1019 	mutex_unlock(&efx->mac_lock);
1020 
1021 	return rc;
1022 }
1023 
1024 /* Asynchronous work item for changing MAC promiscuity and multicast
1025  * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
1026  * MAC directly. */
1027 static void ef4_mac_work(struct work_struct *data)
1028 {
1029 	struct ef4_nic *efx = container_of(data, struct ef4_nic, mac_work);
1030 
1031 	mutex_lock(&efx->mac_lock);
1032 	if (efx->port_enabled)
1033 		ef4_mac_reconfigure(efx);
1034 	mutex_unlock(&efx->mac_lock);
1035 }
1036 
1037 static int ef4_probe_port(struct ef4_nic *efx)
1038 {
1039 	int rc;
1040 
1041 	netif_dbg(efx, probe, efx->net_dev, "create port\n");
1042 
1043 	if (phy_flash_cfg)
1044 		efx->phy_mode = PHY_MODE_SPECIAL;
1045 
1046 	/* Connect up MAC/PHY operations table */
1047 	rc = efx->type->probe_port(efx);
1048 	if (rc)
1049 		return rc;
1050 
1051 	/* Initialise MAC address to permanent address */
1052 	ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
1053 
1054 	return 0;
1055 }
1056 
1057 static int ef4_init_port(struct ef4_nic *efx)
1058 {
1059 	int rc;
1060 
1061 	netif_dbg(efx, drv, efx->net_dev, "init port\n");
1062 
1063 	mutex_lock(&efx->mac_lock);
1064 
1065 	rc = efx->phy_op->init(efx);
1066 	if (rc)
1067 		goto fail1;
1068 
1069 	efx->port_initialized = true;
1070 
1071 	/* Reconfigure the MAC before creating dma queues (required for
1072 	 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1073 	ef4_mac_reconfigure(efx);
1074 
1075 	/* Ensure the PHY advertises the correct flow control settings */
1076 	rc = efx->phy_op->reconfigure(efx);
1077 	if (rc && rc != -EPERM)
1078 		goto fail2;
1079 
1080 	mutex_unlock(&efx->mac_lock);
1081 	return 0;
1082 
1083 fail2:
1084 	efx->phy_op->fini(efx);
1085 fail1:
1086 	mutex_unlock(&efx->mac_lock);
1087 	return rc;
1088 }
1089 
1090 static void ef4_start_port(struct ef4_nic *efx)
1091 {
1092 	netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1093 	BUG_ON(efx->port_enabled);
1094 
1095 	mutex_lock(&efx->mac_lock);
1096 	efx->port_enabled = true;
1097 
1098 	/* Ensure MAC ingress/egress is enabled */
1099 	ef4_mac_reconfigure(efx);
1100 
1101 	mutex_unlock(&efx->mac_lock);
1102 }
1103 
1104 /* Cancel work for MAC reconfiguration, periodic hardware monitoring
1105  * and the async self-test, wait for them to finish and prevent them
1106  * being scheduled again.  This doesn't cover online resets, which
1107  * should only be cancelled when removing the device.
1108  */
1109 static void ef4_stop_port(struct ef4_nic *efx)
1110 {
1111 	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1112 
1113 	EF4_ASSERT_RESET_SERIALISED(efx);
1114 
1115 	mutex_lock(&efx->mac_lock);
1116 	efx->port_enabled = false;
1117 	mutex_unlock(&efx->mac_lock);
1118 
1119 	/* Serialise against ef4_set_multicast_list() */
1120 	netif_addr_lock_bh(efx->net_dev);
1121 	netif_addr_unlock_bh(efx->net_dev);
1122 
1123 	cancel_delayed_work_sync(&efx->monitor_work);
1124 	ef4_selftest_async_cancel(efx);
1125 	cancel_work_sync(&efx->mac_work);
1126 }
1127 
1128 static void ef4_fini_port(struct ef4_nic *efx)
1129 {
1130 	netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1131 
1132 	if (!efx->port_initialized)
1133 		return;
1134 
1135 	efx->phy_op->fini(efx);
1136 	efx->port_initialized = false;
1137 
1138 	efx->link_state.up = false;
1139 	ef4_link_status_changed(efx);
1140 }
1141 
1142 static void ef4_remove_port(struct ef4_nic *efx)
1143 {
1144 	netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1145 
1146 	efx->type->remove_port(efx);
1147 }
1148 
1149 /**************************************************************************
1150  *
1151  * NIC handling
1152  *
1153  **************************************************************************/
1154 
1155 static LIST_HEAD(ef4_primary_list);
1156 static LIST_HEAD(ef4_unassociated_list);
1157 
1158 static bool ef4_same_controller(struct ef4_nic *left, struct ef4_nic *right)
1159 {
1160 	return left->type == right->type &&
1161 		left->vpd_sn && right->vpd_sn &&
1162 		!strcmp(left->vpd_sn, right->vpd_sn);
1163 }
1164 
1165 static void ef4_associate(struct ef4_nic *efx)
1166 {
1167 	struct ef4_nic *other, *next;
1168 
1169 	if (efx->primary == efx) {
1170 		/* Adding primary function; look for secondaries */
1171 
1172 		netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1173 		list_add_tail(&efx->node, &ef4_primary_list);
1174 
1175 		list_for_each_entry_safe(other, next, &ef4_unassociated_list,
1176 					 node) {
1177 			if (ef4_same_controller(efx, other)) {
1178 				list_del(&other->node);
1179 				netif_dbg(other, probe, other->net_dev,
1180 					  "moving to secondary list of %s %s\n",
1181 					  pci_name(efx->pci_dev),
1182 					  efx->net_dev->name);
1183 				list_add_tail(&other->node,
1184 					      &efx->secondary_list);
1185 				other->primary = efx;
1186 			}
1187 		}
1188 	} else {
1189 		/* Adding secondary function; look for primary */
1190 
1191 		list_for_each_entry(other, &ef4_primary_list, node) {
1192 			if (ef4_same_controller(efx, other)) {
1193 				netif_dbg(efx, probe, efx->net_dev,
1194 					  "adding to secondary list of %s %s\n",
1195 					  pci_name(other->pci_dev),
1196 					  other->net_dev->name);
1197 				list_add_tail(&efx->node,
1198 					      &other->secondary_list);
1199 				efx->primary = other;
1200 				return;
1201 			}
1202 		}
1203 
1204 		netif_dbg(efx, probe, efx->net_dev,
1205 			  "adding to unassociated list\n");
1206 		list_add_tail(&efx->node, &ef4_unassociated_list);
1207 	}
1208 }
1209 
1210 static void ef4_dissociate(struct ef4_nic *efx)
1211 {
1212 	struct ef4_nic *other, *next;
1213 
1214 	list_del(&efx->node);
1215 	efx->primary = NULL;
1216 
1217 	list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1218 		list_del(&other->node);
1219 		netif_dbg(other, probe, other->net_dev,
1220 			  "moving to unassociated list\n");
1221 		list_add_tail(&other->node, &ef4_unassociated_list);
1222 		other->primary = NULL;
1223 	}
1224 }
1225 
1226 /* This configures the PCI device to enable I/O and DMA. */
1227 static int ef4_init_io(struct ef4_nic *efx)
1228 {
1229 	struct pci_dev *pci_dev = efx->pci_dev;
1230 	dma_addr_t dma_mask = efx->type->max_dma_mask;
1231 	unsigned int mem_map_size = efx->type->mem_map_size(efx);
1232 	int rc, bar;
1233 
1234 	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1235 
1236 	bar = efx->type->mem_bar;
1237 
1238 	rc = pci_enable_device(pci_dev);
1239 	if (rc) {
1240 		netif_err(efx, probe, efx->net_dev,
1241 			  "failed to enable PCI device\n");
1242 		goto fail1;
1243 	}
1244 
1245 	pci_set_master(pci_dev);
1246 
1247 	/* Set the PCI DMA mask.  Try all possibilities from our
1248 	 * genuine mask down to 32 bits, because some architectures
1249 	 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
1250 	 * masks event though they reject 46 bit masks.
1251 	 */
1252 	while (dma_mask > 0x7fffffffUL) {
1253 		rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1254 		if (rc == 0)
1255 			break;
1256 		dma_mask >>= 1;
1257 	}
1258 	if (rc) {
1259 		netif_err(efx, probe, efx->net_dev,
1260 			  "could not find a suitable DMA mask\n");
1261 		goto fail2;
1262 	}
1263 	netif_dbg(efx, probe, efx->net_dev,
1264 		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
1265 
1266 	efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1267 	rc = pci_request_region(pci_dev, bar, "sfc");
1268 	if (rc) {
1269 		netif_err(efx, probe, efx->net_dev,
1270 			  "request for memory BAR failed\n");
1271 		rc = -EIO;
1272 		goto fail3;
1273 	}
1274 	efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
1275 	if (!efx->membase) {
1276 		netif_err(efx, probe, efx->net_dev,
1277 			  "could not map memory BAR at %llx+%x\n",
1278 			  (unsigned long long)efx->membase_phys, mem_map_size);
1279 		rc = -ENOMEM;
1280 		goto fail4;
1281 	}
1282 	netif_dbg(efx, probe, efx->net_dev,
1283 		  "memory BAR at %llx+%x (virtual %p)\n",
1284 		  (unsigned long long)efx->membase_phys, mem_map_size,
1285 		  efx->membase);
1286 
1287 	return 0;
1288 
1289  fail4:
1290 	pci_release_region(efx->pci_dev, bar);
1291  fail3:
1292 	efx->membase_phys = 0;
1293  fail2:
1294 	pci_disable_device(efx->pci_dev);
1295  fail1:
1296 	return rc;
1297 }
1298 
1299 static void ef4_fini_io(struct ef4_nic *efx)
1300 {
1301 	int bar;
1302 
1303 	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1304 
1305 	if (efx->membase) {
1306 		iounmap(efx->membase);
1307 		efx->membase = NULL;
1308 	}
1309 
1310 	if (efx->membase_phys) {
1311 		bar = efx->type->mem_bar;
1312 		pci_release_region(efx->pci_dev, bar);
1313 		efx->membase_phys = 0;
1314 	}
1315 
1316 	/* Don't disable bus-mastering if VFs are assigned */
1317 	if (!pci_vfs_assigned(efx->pci_dev))
1318 		pci_disable_device(efx->pci_dev);
1319 }
1320 
1321 void ef4_set_default_rx_indir_table(struct ef4_nic *efx)
1322 {
1323 	size_t i;
1324 
1325 	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1326 		efx->rx_indir_table[i] =
1327 			ethtool_rxfh_indir_default(i, efx->rss_spread);
1328 }
1329 
1330 static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
1331 {
1332 	cpumask_var_t thread_mask;
1333 	unsigned int count;
1334 	int cpu;
1335 
1336 	if (rss_cpus) {
1337 		count = rss_cpus;
1338 	} else {
1339 		if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1340 			netif_warn(efx, probe, efx->net_dev,
1341 				   "RSS disabled due to allocation failure\n");
1342 			return 1;
1343 		}
1344 
1345 		count = 0;
1346 		for_each_online_cpu(cpu) {
1347 			if (!cpumask_test_cpu(cpu, thread_mask)) {
1348 				++count;
1349 				cpumask_or(thread_mask, thread_mask,
1350 					   topology_sibling_cpumask(cpu));
1351 			}
1352 		}
1353 
1354 		free_cpumask_var(thread_mask);
1355 	}
1356 
1357 	return count;
1358 }
1359 
1360 /* Probe the number and type of interrupts we are able to obtain, and
1361  * the resulting numbers of channels and RX queues.
1362  */
1363 static int ef4_probe_interrupts(struct ef4_nic *efx)
1364 {
1365 	unsigned int extra_channels = 0;
1366 	unsigned int i, j;
1367 	int rc;
1368 
1369 	for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++)
1370 		if (efx->extra_channel_type[i])
1371 			++extra_channels;
1372 
1373 	if (efx->interrupt_mode == EF4_INT_MODE_MSIX) {
1374 		struct msix_entry xentries[EF4_MAX_CHANNELS];
1375 		unsigned int n_channels;
1376 
1377 		n_channels = ef4_wanted_parallelism(efx);
1378 		if (ef4_separate_tx_channels)
1379 			n_channels *= 2;
1380 		n_channels += extra_channels;
1381 		n_channels = min(n_channels, efx->max_channels);
1382 
1383 		for (i = 0; i < n_channels; i++)
1384 			xentries[i].entry = i;
1385 		rc = pci_enable_msix_range(efx->pci_dev,
1386 					   xentries, 1, n_channels);
1387 		if (rc < 0) {
1388 			/* Fall back to single channel MSI */
1389 			efx->interrupt_mode = EF4_INT_MODE_MSI;
1390 			netif_err(efx, drv, efx->net_dev,
1391 				  "could not enable MSI-X\n");
1392 		} else if (rc < n_channels) {
1393 			netif_err(efx, drv, efx->net_dev,
1394 				  "WARNING: Insufficient MSI-X vectors"
1395 				  " available (%d < %u).\n", rc, n_channels);
1396 			netif_err(efx, drv, efx->net_dev,
1397 				  "WARNING: Performance may be reduced.\n");
1398 			n_channels = rc;
1399 		}
1400 
1401 		if (rc > 0) {
1402 			efx->n_channels = n_channels;
1403 			if (n_channels > extra_channels)
1404 				n_channels -= extra_channels;
1405 			if (ef4_separate_tx_channels) {
1406 				efx->n_tx_channels = min(max(n_channels / 2,
1407 							     1U),
1408 							 efx->max_tx_channels);
1409 				efx->n_rx_channels = max(n_channels -
1410 							 efx->n_tx_channels,
1411 							 1U);
1412 			} else {
1413 				efx->n_tx_channels = min(n_channels,
1414 							 efx->max_tx_channels);
1415 				efx->n_rx_channels = n_channels;
1416 			}
1417 			for (i = 0; i < efx->n_channels; i++)
1418 				ef4_get_channel(efx, i)->irq =
1419 					xentries[i].vector;
1420 		}
1421 	}
1422 
1423 	/* Try single interrupt MSI */
1424 	if (efx->interrupt_mode == EF4_INT_MODE_MSI) {
1425 		efx->n_channels = 1;
1426 		efx->n_rx_channels = 1;
1427 		efx->n_tx_channels = 1;
1428 		rc = pci_enable_msi(efx->pci_dev);
1429 		if (rc == 0) {
1430 			ef4_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1431 		} else {
1432 			netif_err(efx, drv, efx->net_dev,
1433 				  "could not enable MSI\n");
1434 			efx->interrupt_mode = EF4_INT_MODE_LEGACY;
1435 		}
1436 	}
1437 
1438 	/* Assume legacy interrupts */
1439 	if (efx->interrupt_mode == EF4_INT_MODE_LEGACY) {
1440 		efx->n_channels = 1 + (ef4_separate_tx_channels ? 1 : 0);
1441 		efx->n_rx_channels = 1;
1442 		efx->n_tx_channels = 1;
1443 		efx->legacy_irq = efx->pci_dev->irq;
1444 	}
1445 
1446 	/* Assign extra channels if possible */
1447 	j = efx->n_channels;
1448 	for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++) {
1449 		if (!efx->extra_channel_type[i])
1450 			continue;
1451 		if (efx->interrupt_mode != EF4_INT_MODE_MSIX ||
1452 		    efx->n_channels <= extra_channels) {
1453 			efx->extra_channel_type[i]->handle_no_channel(efx);
1454 		} else {
1455 			--j;
1456 			ef4_get_channel(efx, j)->type =
1457 				efx->extra_channel_type[i];
1458 		}
1459 	}
1460 
1461 	efx->rss_spread = efx->n_rx_channels;
1462 
1463 	return 0;
1464 }
1465 
1466 static int ef4_soft_enable_interrupts(struct ef4_nic *efx)
1467 {
1468 	struct ef4_channel *channel, *end_channel;
1469 	int rc;
1470 
1471 	BUG_ON(efx->state == STATE_DISABLED);
1472 
1473 	efx->irq_soft_enabled = true;
1474 	smp_wmb();
1475 
1476 	ef4_for_each_channel(channel, efx) {
1477 		if (!channel->type->keep_eventq) {
1478 			rc = ef4_init_eventq(channel);
1479 			if (rc)
1480 				goto fail;
1481 		}
1482 		ef4_start_eventq(channel);
1483 	}
1484 
1485 	return 0;
1486 fail:
1487 	end_channel = channel;
1488 	ef4_for_each_channel(channel, efx) {
1489 		if (channel == end_channel)
1490 			break;
1491 		ef4_stop_eventq(channel);
1492 		if (!channel->type->keep_eventq)
1493 			ef4_fini_eventq(channel);
1494 	}
1495 
1496 	return rc;
1497 }
1498 
1499 static void ef4_soft_disable_interrupts(struct ef4_nic *efx)
1500 {
1501 	struct ef4_channel *channel;
1502 
1503 	if (efx->state == STATE_DISABLED)
1504 		return;
1505 
1506 	efx->irq_soft_enabled = false;
1507 	smp_wmb();
1508 
1509 	if (efx->legacy_irq)
1510 		synchronize_irq(efx->legacy_irq);
1511 
1512 	ef4_for_each_channel(channel, efx) {
1513 		if (channel->irq)
1514 			synchronize_irq(channel->irq);
1515 
1516 		ef4_stop_eventq(channel);
1517 		if (!channel->type->keep_eventq)
1518 			ef4_fini_eventq(channel);
1519 	}
1520 }
1521 
1522 static int ef4_enable_interrupts(struct ef4_nic *efx)
1523 {
1524 	struct ef4_channel *channel, *end_channel;
1525 	int rc;
1526 
1527 	BUG_ON(efx->state == STATE_DISABLED);
1528 
1529 	if (efx->eeh_disabled_legacy_irq) {
1530 		enable_irq(efx->legacy_irq);
1531 		efx->eeh_disabled_legacy_irq = false;
1532 	}
1533 
1534 	efx->type->irq_enable_master(efx);
1535 
1536 	ef4_for_each_channel(channel, efx) {
1537 		if (channel->type->keep_eventq) {
1538 			rc = ef4_init_eventq(channel);
1539 			if (rc)
1540 				goto fail;
1541 		}
1542 	}
1543 
1544 	rc = ef4_soft_enable_interrupts(efx);
1545 	if (rc)
1546 		goto fail;
1547 
1548 	return 0;
1549 
1550 fail:
1551 	end_channel = channel;
1552 	ef4_for_each_channel(channel, efx) {
1553 		if (channel == end_channel)
1554 			break;
1555 		if (channel->type->keep_eventq)
1556 			ef4_fini_eventq(channel);
1557 	}
1558 
1559 	efx->type->irq_disable_non_ev(efx);
1560 
1561 	return rc;
1562 }
1563 
1564 static void ef4_disable_interrupts(struct ef4_nic *efx)
1565 {
1566 	struct ef4_channel *channel;
1567 
1568 	ef4_soft_disable_interrupts(efx);
1569 
1570 	ef4_for_each_channel(channel, efx) {
1571 		if (channel->type->keep_eventq)
1572 			ef4_fini_eventq(channel);
1573 	}
1574 
1575 	efx->type->irq_disable_non_ev(efx);
1576 }
1577 
1578 static void ef4_remove_interrupts(struct ef4_nic *efx)
1579 {
1580 	struct ef4_channel *channel;
1581 
1582 	/* Remove MSI/MSI-X interrupts */
1583 	ef4_for_each_channel(channel, efx)
1584 		channel->irq = 0;
1585 	pci_disable_msi(efx->pci_dev);
1586 	pci_disable_msix(efx->pci_dev);
1587 
1588 	/* Remove legacy interrupt */
1589 	efx->legacy_irq = 0;
1590 }
1591 
1592 static void ef4_set_channels(struct ef4_nic *efx)
1593 {
1594 	struct ef4_channel *channel;
1595 	struct ef4_tx_queue *tx_queue;
1596 
1597 	efx->tx_channel_offset =
1598 		ef4_separate_tx_channels ?
1599 		efx->n_channels - efx->n_tx_channels : 0;
1600 
1601 	/* We need to mark which channels really have RX and TX
1602 	 * queues, and adjust the TX queue numbers if we have separate
1603 	 * RX-only and TX-only channels.
1604 	 */
1605 	ef4_for_each_channel(channel, efx) {
1606 		if (channel->channel < efx->n_rx_channels)
1607 			channel->rx_queue.core_index = channel->channel;
1608 		else
1609 			channel->rx_queue.core_index = -1;
1610 
1611 		ef4_for_each_channel_tx_queue(tx_queue, channel)
1612 			tx_queue->queue -= (efx->tx_channel_offset *
1613 					    EF4_TXQ_TYPES);
1614 	}
1615 }
1616 
1617 static int ef4_probe_nic(struct ef4_nic *efx)
1618 {
1619 	int rc;
1620 
1621 	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1622 
1623 	/* Carry out hardware-type specific initialisation */
1624 	rc = efx->type->probe(efx);
1625 	if (rc)
1626 		return rc;
1627 
1628 	do {
1629 		if (!efx->max_channels || !efx->max_tx_channels) {
1630 			netif_err(efx, drv, efx->net_dev,
1631 				  "Insufficient resources to allocate"
1632 				  " any channels\n");
1633 			rc = -ENOSPC;
1634 			goto fail1;
1635 		}
1636 
1637 		/* Determine the number of channels and queues by trying
1638 		 * to hook in MSI-X interrupts.
1639 		 */
1640 		rc = ef4_probe_interrupts(efx);
1641 		if (rc)
1642 			goto fail1;
1643 
1644 		ef4_set_channels(efx);
1645 
1646 		/* dimension_resources can fail with EAGAIN */
1647 		rc = efx->type->dimension_resources(efx);
1648 		if (rc != 0 && rc != -EAGAIN)
1649 			goto fail2;
1650 
1651 		if (rc == -EAGAIN)
1652 			/* try again with new max_channels */
1653 			ef4_remove_interrupts(efx);
1654 
1655 	} while (rc == -EAGAIN);
1656 
1657 	if (efx->n_channels > 1)
1658 		netdev_rss_key_fill(&efx->rx_hash_key,
1659 				    sizeof(efx->rx_hash_key));
1660 	ef4_set_default_rx_indir_table(efx);
1661 
1662 	netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1663 	netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1664 
1665 	/* Initialise the interrupt moderation settings */
1666 	efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
1667 	ef4_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1668 				true);
1669 
1670 	return 0;
1671 
1672 fail2:
1673 	ef4_remove_interrupts(efx);
1674 fail1:
1675 	efx->type->remove(efx);
1676 	return rc;
1677 }
1678 
1679 static void ef4_remove_nic(struct ef4_nic *efx)
1680 {
1681 	netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1682 
1683 	ef4_remove_interrupts(efx);
1684 	efx->type->remove(efx);
1685 }
1686 
1687 static int ef4_probe_filters(struct ef4_nic *efx)
1688 {
1689 	int rc;
1690 
1691 	spin_lock_init(&efx->filter_lock);
1692 	init_rwsem(&efx->filter_sem);
1693 	mutex_lock(&efx->mac_lock);
1694 	down_write(&efx->filter_sem);
1695 	rc = efx->type->filter_table_probe(efx);
1696 	if (rc)
1697 		goto out_unlock;
1698 
1699 #ifdef CONFIG_RFS_ACCEL
1700 	if (efx->type->offload_features & NETIF_F_NTUPLE) {
1701 		struct ef4_channel *channel;
1702 		int i, success = 1;
1703 
1704 		ef4_for_each_channel(channel, efx) {
1705 			channel->rps_flow_id =
1706 				kcalloc(efx->type->max_rx_ip_filters,
1707 					sizeof(*channel->rps_flow_id),
1708 					GFP_KERNEL);
1709 			if (!channel->rps_flow_id)
1710 				success = 0;
1711 			else
1712 				for (i = 0;
1713 				     i < efx->type->max_rx_ip_filters;
1714 				     ++i)
1715 					channel->rps_flow_id[i] =
1716 						RPS_FLOW_ID_INVALID;
1717 		}
1718 
1719 		if (!success) {
1720 			ef4_for_each_channel(channel, efx)
1721 				kfree(channel->rps_flow_id);
1722 			efx->type->filter_table_remove(efx);
1723 			rc = -ENOMEM;
1724 			goto out_unlock;
1725 		}
1726 
1727 		efx->rps_expire_index = efx->rps_expire_channel = 0;
1728 	}
1729 #endif
1730 out_unlock:
1731 	up_write(&efx->filter_sem);
1732 	mutex_unlock(&efx->mac_lock);
1733 	return rc;
1734 }
1735 
1736 static void ef4_remove_filters(struct ef4_nic *efx)
1737 {
1738 #ifdef CONFIG_RFS_ACCEL
1739 	struct ef4_channel *channel;
1740 
1741 	ef4_for_each_channel(channel, efx)
1742 		kfree(channel->rps_flow_id);
1743 #endif
1744 	down_write(&efx->filter_sem);
1745 	efx->type->filter_table_remove(efx);
1746 	up_write(&efx->filter_sem);
1747 }
1748 
1749 static void ef4_restore_filters(struct ef4_nic *efx)
1750 {
1751 	down_read(&efx->filter_sem);
1752 	efx->type->filter_table_restore(efx);
1753 	up_read(&efx->filter_sem);
1754 }
1755 
1756 /**************************************************************************
1757  *
1758  * NIC startup/shutdown
1759  *
1760  *************************************************************************/
1761 
1762 static int ef4_probe_all(struct ef4_nic *efx)
1763 {
1764 	int rc;
1765 
1766 	rc = ef4_probe_nic(efx);
1767 	if (rc) {
1768 		netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1769 		goto fail1;
1770 	}
1771 
1772 	rc = ef4_probe_port(efx);
1773 	if (rc) {
1774 		netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1775 		goto fail2;
1776 	}
1777 
1778 	BUILD_BUG_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_RXQ_MIN_ENT);
1779 	if (WARN_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_TXQ_MIN_ENT(efx))) {
1780 		rc = -EINVAL;
1781 		goto fail3;
1782 	}
1783 	efx->rxq_entries = efx->txq_entries = EF4_DEFAULT_DMAQ_SIZE;
1784 
1785 	rc = ef4_probe_filters(efx);
1786 	if (rc) {
1787 		netif_err(efx, probe, efx->net_dev,
1788 			  "failed to create filter tables\n");
1789 		goto fail4;
1790 	}
1791 
1792 	rc = ef4_probe_channels(efx);
1793 	if (rc)
1794 		goto fail5;
1795 
1796 	return 0;
1797 
1798  fail5:
1799 	ef4_remove_filters(efx);
1800  fail4:
1801  fail3:
1802 	ef4_remove_port(efx);
1803  fail2:
1804 	ef4_remove_nic(efx);
1805  fail1:
1806 	return rc;
1807 }
1808 
1809 /* If the interface is supposed to be running but is not, start
1810  * the hardware and software data path, regular activity for the port
1811  * (MAC statistics, link polling, etc.) and schedule the port to be
1812  * reconfigured.  Interrupts must already be enabled.  This function
1813  * is safe to call multiple times, so long as the NIC is not disabled.
1814  * Requires the RTNL lock.
1815  */
1816 static void ef4_start_all(struct ef4_nic *efx)
1817 {
1818 	EF4_ASSERT_RESET_SERIALISED(efx);
1819 	BUG_ON(efx->state == STATE_DISABLED);
1820 
1821 	/* Check that it is appropriate to restart the interface. All
1822 	 * of these flags are safe to read under just the rtnl lock */
1823 	if (efx->port_enabled || !netif_running(efx->net_dev) ||
1824 	    efx->reset_pending)
1825 		return;
1826 
1827 	ef4_start_port(efx);
1828 	ef4_start_datapath(efx);
1829 
1830 	/* Start the hardware monitor if there is one */
1831 	if (efx->type->monitor != NULL)
1832 		queue_delayed_work(efx->workqueue, &efx->monitor_work,
1833 				   ef4_monitor_interval);
1834 
1835 	efx->type->start_stats(efx);
1836 	efx->type->pull_stats(efx);
1837 	spin_lock_bh(&efx->stats_lock);
1838 	efx->type->update_stats(efx, NULL, NULL);
1839 	spin_unlock_bh(&efx->stats_lock);
1840 }
1841 
1842 /* Quiesce the hardware and software data path, and regular activity
1843  * for the port without bringing the link down.  Safe to call multiple
1844  * times with the NIC in almost any state, but interrupts should be
1845  * enabled.  Requires the RTNL lock.
1846  */
1847 static void ef4_stop_all(struct ef4_nic *efx)
1848 {
1849 	EF4_ASSERT_RESET_SERIALISED(efx);
1850 
1851 	/* port_enabled can be read safely under the rtnl lock */
1852 	if (!efx->port_enabled)
1853 		return;
1854 
1855 	/* update stats before we go down so we can accurately count
1856 	 * rx_nodesc_drops
1857 	 */
1858 	efx->type->pull_stats(efx);
1859 	spin_lock_bh(&efx->stats_lock);
1860 	efx->type->update_stats(efx, NULL, NULL);
1861 	spin_unlock_bh(&efx->stats_lock);
1862 	efx->type->stop_stats(efx);
1863 	ef4_stop_port(efx);
1864 
1865 	/* Stop the kernel transmit interface.  This is only valid if
1866 	 * the device is stopped or detached; otherwise the watchdog
1867 	 * may fire immediately.
1868 	 */
1869 	WARN_ON(netif_running(efx->net_dev) &&
1870 		netif_device_present(efx->net_dev));
1871 	netif_tx_disable(efx->net_dev);
1872 
1873 	ef4_stop_datapath(efx);
1874 }
1875 
1876 static void ef4_remove_all(struct ef4_nic *efx)
1877 {
1878 	ef4_remove_channels(efx);
1879 	ef4_remove_filters(efx);
1880 	ef4_remove_port(efx);
1881 	ef4_remove_nic(efx);
1882 }
1883 
1884 /**************************************************************************
1885  *
1886  * Interrupt moderation
1887  *
1888  **************************************************************************/
1889 unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
1890 {
1891 	if (usecs == 0)
1892 		return 0;
1893 	if (usecs * 1000 < efx->timer_quantum_ns)
1894 		return 1; /* never round down to 0 */
1895 	return usecs * 1000 / efx->timer_quantum_ns;
1896 }
1897 
1898 unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks)
1899 {
1900 	/* We must round up when converting ticks to microseconds
1901 	 * because we round down when converting the other way.
1902 	 */
1903 	return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
1904 }
1905 
1906 /* Set interrupt moderation parameters */
1907 int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
1908 			    unsigned int rx_usecs, bool rx_adaptive,
1909 			    bool rx_may_override_tx)
1910 {
1911 	struct ef4_channel *channel;
1912 	unsigned int timer_max_us;
1913 
1914 	EF4_ASSERT_RESET_SERIALISED(efx);
1915 
1916 	timer_max_us = efx->timer_max_ns / 1000;
1917 
1918 	if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
1919 		return -EINVAL;
1920 
1921 	if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
1922 	    !rx_may_override_tx) {
1923 		netif_err(efx, drv, efx->net_dev, "Channels are shared. "
1924 			  "RX and TX IRQ moderation must be equal\n");
1925 		return -EINVAL;
1926 	}
1927 
1928 	efx->irq_rx_adaptive = rx_adaptive;
1929 	efx->irq_rx_moderation_us = rx_usecs;
1930 	ef4_for_each_channel(channel, efx) {
1931 		if (ef4_channel_has_rx_queue(channel))
1932 			channel->irq_moderation_us = rx_usecs;
1933 		else if (ef4_channel_has_tx_queues(channel))
1934 			channel->irq_moderation_us = tx_usecs;
1935 	}
1936 
1937 	return 0;
1938 }
1939 
1940 void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
1941 			    unsigned int *rx_usecs, bool *rx_adaptive)
1942 {
1943 	*rx_adaptive = efx->irq_rx_adaptive;
1944 	*rx_usecs = efx->irq_rx_moderation_us;
1945 
1946 	/* If channels are shared between RX and TX, so is IRQ
1947 	 * moderation.  Otherwise, IRQ moderation is the same for all
1948 	 * TX channels and is not adaptive.
1949 	 */
1950 	if (efx->tx_channel_offset == 0) {
1951 		*tx_usecs = *rx_usecs;
1952 	} else {
1953 		struct ef4_channel *tx_channel;
1954 
1955 		tx_channel = efx->channel[efx->tx_channel_offset];
1956 		*tx_usecs = tx_channel->irq_moderation_us;
1957 	}
1958 }
1959 
1960 /**************************************************************************
1961  *
1962  * Hardware monitor
1963  *
1964  **************************************************************************/
1965 
1966 /* Run periodically off the general workqueue */
1967 static void ef4_monitor(struct work_struct *data)
1968 {
1969 	struct ef4_nic *efx = container_of(data, struct ef4_nic,
1970 					   monitor_work.work);
1971 
1972 	netif_vdbg(efx, timer, efx->net_dev,
1973 		   "hardware monitor executing on CPU %d\n",
1974 		   raw_smp_processor_id());
1975 	BUG_ON(efx->type->monitor == NULL);
1976 
1977 	/* If the mac_lock is already held then it is likely a port
1978 	 * reconfiguration is already in place, which will likely do
1979 	 * most of the work of monitor() anyway. */
1980 	if (mutex_trylock(&efx->mac_lock)) {
1981 		if (efx->port_enabled)
1982 			efx->type->monitor(efx);
1983 		mutex_unlock(&efx->mac_lock);
1984 	}
1985 
1986 	queue_delayed_work(efx->workqueue, &efx->monitor_work,
1987 			   ef4_monitor_interval);
1988 }
1989 
1990 /**************************************************************************
1991  *
1992  * ioctls
1993  *
1994  *************************************************************************/
1995 
1996 /* Net device ioctl
1997  * Context: process, rtnl_lock() held.
1998  */
1999 static int ef4_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
2000 {
2001 	struct ef4_nic *efx = netdev_priv(net_dev);
2002 	struct mii_ioctl_data *data = if_mii(ifr);
2003 
2004 	/* Convert phy_id from older PRTAD/DEVAD format */
2005 	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
2006 	    (data->phy_id & 0xfc00) == 0x0400)
2007 		data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
2008 
2009 	return mdio_mii_ioctl(&efx->mdio, data, cmd);
2010 }
2011 
2012 /**************************************************************************
2013  *
2014  * NAPI interface
2015  *
2016  **************************************************************************/
2017 
2018 static void ef4_init_napi_channel(struct ef4_channel *channel)
2019 {
2020 	struct ef4_nic *efx = channel->efx;
2021 
2022 	channel->napi_dev = efx->net_dev;
2023 	netif_napi_add(channel->napi_dev, &channel->napi_str,
2024 		       ef4_poll, napi_weight);
2025 }
2026 
2027 static void ef4_init_napi(struct ef4_nic *efx)
2028 {
2029 	struct ef4_channel *channel;
2030 
2031 	ef4_for_each_channel(channel, efx)
2032 		ef4_init_napi_channel(channel);
2033 }
2034 
2035 static void ef4_fini_napi_channel(struct ef4_channel *channel)
2036 {
2037 	if (channel->napi_dev)
2038 		netif_napi_del(&channel->napi_str);
2039 
2040 	channel->napi_dev = NULL;
2041 }
2042 
2043 static void ef4_fini_napi(struct ef4_nic *efx)
2044 {
2045 	struct ef4_channel *channel;
2046 
2047 	ef4_for_each_channel(channel, efx)
2048 		ef4_fini_napi_channel(channel);
2049 }
2050 
2051 /**************************************************************************
2052  *
2053  * Kernel netpoll interface
2054  *
2055  *************************************************************************/
2056 
2057 #ifdef CONFIG_NET_POLL_CONTROLLER
2058 
2059 /* Although in the common case interrupts will be disabled, this is not
2060  * guaranteed. However, all our work happens inside the NAPI callback,
2061  * so no locking is required.
2062  */
2063 static void ef4_netpoll(struct net_device *net_dev)
2064 {
2065 	struct ef4_nic *efx = netdev_priv(net_dev);
2066 	struct ef4_channel *channel;
2067 
2068 	ef4_for_each_channel(channel, efx)
2069 		ef4_schedule_channel(channel);
2070 }
2071 
2072 #endif
2073 
2074 /**************************************************************************
2075  *
2076  * Kernel net device interface
2077  *
2078  *************************************************************************/
2079 
2080 /* Context: process, rtnl_lock() held. */
2081 int ef4_net_open(struct net_device *net_dev)
2082 {
2083 	struct ef4_nic *efx = netdev_priv(net_dev);
2084 	int rc;
2085 
2086 	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2087 		  raw_smp_processor_id());
2088 
2089 	rc = ef4_check_disabled(efx);
2090 	if (rc)
2091 		return rc;
2092 	if (efx->phy_mode & PHY_MODE_SPECIAL)
2093 		return -EBUSY;
2094 
2095 	/* Notify the kernel of the link state polled during driver load,
2096 	 * before the monitor starts running */
2097 	ef4_link_status_changed(efx);
2098 
2099 	ef4_start_all(efx);
2100 	ef4_selftest_async_start(efx);
2101 	return 0;
2102 }
2103 
2104 /* Context: process, rtnl_lock() held.
2105  * Note that the kernel will ignore our return code; this method
2106  * should really be a void.
2107  */
2108 int ef4_net_stop(struct net_device *net_dev)
2109 {
2110 	struct ef4_nic *efx = netdev_priv(net_dev);
2111 
2112 	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2113 		  raw_smp_processor_id());
2114 
2115 	/* Stop the device and flush all the channels */
2116 	ef4_stop_all(efx);
2117 
2118 	return 0;
2119 }
2120 
2121 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
2122 static void ef4_net_stats(struct net_device *net_dev,
2123 			  struct rtnl_link_stats64 *stats)
2124 {
2125 	struct ef4_nic *efx = netdev_priv(net_dev);
2126 
2127 	spin_lock_bh(&efx->stats_lock);
2128 	efx->type->update_stats(efx, NULL, stats);
2129 	spin_unlock_bh(&efx->stats_lock);
2130 }
2131 
2132 /* Context: netif_tx_lock held, BHs disabled. */
2133 static void ef4_watchdog(struct net_device *net_dev)
2134 {
2135 	struct ef4_nic *efx = netdev_priv(net_dev);
2136 
2137 	netif_err(efx, tx_err, efx->net_dev,
2138 		  "TX stuck with port_enabled=%d: resetting channels\n",
2139 		  efx->port_enabled);
2140 
2141 	ef4_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2142 }
2143 
2144 
2145 /* Context: process, rtnl_lock() held. */
2146 static int ef4_change_mtu(struct net_device *net_dev, int new_mtu)
2147 {
2148 	struct ef4_nic *efx = netdev_priv(net_dev);
2149 	int rc;
2150 
2151 	rc = ef4_check_disabled(efx);
2152 	if (rc)
2153 		return rc;
2154 
2155 	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2156 
2157 	ef4_device_detach_sync(efx);
2158 	ef4_stop_all(efx);
2159 
2160 	mutex_lock(&efx->mac_lock);
2161 	net_dev->mtu = new_mtu;
2162 	ef4_mac_reconfigure(efx);
2163 	mutex_unlock(&efx->mac_lock);
2164 
2165 	ef4_start_all(efx);
2166 	netif_device_attach(efx->net_dev);
2167 	return 0;
2168 }
2169 
2170 static int ef4_set_mac_address(struct net_device *net_dev, void *data)
2171 {
2172 	struct ef4_nic *efx = netdev_priv(net_dev);
2173 	struct sockaddr *addr = data;
2174 	u8 *new_addr = addr->sa_data;
2175 	u8 old_addr[6];
2176 	int rc;
2177 
2178 	if (!is_valid_ether_addr(new_addr)) {
2179 		netif_err(efx, drv, efx->net_dev,
2180 			  "invalid ethernet MAC address requested: %pM\n",
2181 			  new_addr);
2182 		return -EADDRNOTAVAIL;
2183 	}
2184 
2185 	/* save old address */
2186 	ether_addr_copy(old_addr, net_dev->dev_addr);
2187 	ether_addr_copy(net_dev->dev_addr, new_addr);
2188 	if (efx->type->set_mac_address) {
2189 		rc = efx->type->set_mac_address(efx);
2190 		if (rc) {
2191 			ether_addr_copy(net_dev->dev_addr, old_addr);
2192 			return rc;
2193 		}
2194 	}
2195 
2196 	/* Reconfigure the MAC */
2197 	mutex_lock(&efx->mac_lock);
2198 	ef4_mac_reconfigure(efx);
2199 	mutex_unlock(&efx->mac_lock);
2200 
2201 	return 0;
2202 }
2203 
2204 /* Context: netif_addr_lock held, BHs disabled. */
2205 static void ef4_set_rx_mode(struct net_device *net_dev)
2206 {
2207 	struct ef4_nic *efx = netdev_priv(net_dev);
2208 
2209 	if (efx->port_enabled)
2210 		queue_work(efx->workqueue, &efx->mac_work);
2211 	/* Otherwise ef4_start_port() will do this */
2212 }
2213 
2214 static int ef4_set_features(struct net_device *net_dev, netdev_features_t data)
2215 {
2216 	struct ef4_nic *efx = netdev_priv(net_dev);
2217 	int rc;
2218 
2219 	/* If disabling RX n-tuple filtering, clear existing filters */
2220 	if (net_dev->features & ~data & NETIF_F_NTUPLE) {
2221 		rc = efx->type->filter_clear_rx(efx, EF4_FILTER_PRI_MANUAL);
2222 		if (rc)
2223 			return rc;
2224 	}
2225 
2226 	/* If Rx VLAN filter is changed, update filters via mac_reconfigure */
2227 	if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) {
2228 		/* ef4_set_rx_mode() will schedule MAC work to update filters
2229 		 * when a new features are finally set in net_dev.
2230 		 */
2231 		ef4_set_rx_mode(net_dev);
2232 	}
2233 
2234 	return 0;
2235 }
2236 
2237 static const struct net_device_ops ef4_netdev_ops = {
2238 	.ndo_open		= ef4_net_open,
2239 	.ndo_stop		= ef4_net_stop,
2240 	.ndo_get_stats64	= ef4_net_stats,
2241 	.ndo_tx_timeout		= ef4_watchdog,
2242 	.ndo_start_xmit		= ef4_hard_start_xmit,
2243 	.ndo_validate_addr	= eth_validate_addr,
2244 	.ndo_do_ioctl		= ef4_ioctl,
2245 	.ndo_change_mtu		= ef4_change_mtu,
2246 	.ndo_set_mac_address	= ef4_set_mac_address,
2247 	.ndo_set_rx_mode	= ef4_set_rx_mode,
2248 	.ndo_set_features	= ef4_set_features,
2249 #ifdef CONFIG_NET_POLL_CONTROLLER
2250 	.ndo_poll_controller = ef4_netpoll,
2251 #endif
2252 	.ndo_setup_tc		= ef4_setup_tc,
2253 #ifdef CONFIG_RFS_ACCEL
2254 	.ndo_rx_flow_steer	= ef4_filter_rfs,
2255 #endif
2256 };
2257 
2258 static void ef4_update_name(struct ef4_nic *efx)
2259 {
2260 	strcpy(efx->name, efx->net_dev->name);
2261 	ef4_mtd_rename(efx);
2262 	ef4_set_channel_names(efx);
2263 }
2264 
2265 static int ef4_netdev_event(struct notifier_block *this,
2266 			    unsigned long event, void *ptr)
2267 {
2268 	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2269 
2270 	if ((net_dev->netdev_ops == &ef4_netdev_ops) &&
2271 	    event == NETDEV_CHANGENAME)
2272 		ef4_update_name(netdev_priv(net_dev));
2273 
2274 	return NOTIFY_DONE;
2275 }
2276 
2277 static struct notifier_block ef4_netdev_notifier = {
2278 	.notifier_call = ef4_netdev_event,
2279 };
2280 
2281 static ssize_t
2282 show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2283 {
2284 	struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2285 	return sprintf(buf, "%d\n", efx->phy_type);
2286 }
2287 static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2288 
2289 static int ef4_register_netdev(struct ef4_nic *efx)
2290 {
2291 	struct net_device *net_dev = efx->net_dev;
2292 	struct ef4_channel *channel;
2293 	int rc;
2294 
2295 	net_dev->watchdog_timeo = 5 * HZ;
2296 	net_dev->irq = efx->pci_dev->irq;
2297 	net_dev->netdev_ops = &ef4_netdev_ops;
2298 	net_dev->ethtool_ops = &ef4_ethtool_ops;
2299 	net_dev->gso_max_segs = EF4_TSO_MAX_SEGS;
2300 	net_dev->min_mtu = EF4_MIN_MTU;
2301 	net_dev->max_mtu = EF4_MAX_MTU;
2302 
2303 	rtnl_lock();
2304 
2305 	/* Enable resets to be scheduled and check whether any were
2306 	 * already requested.  If so, the NIC is probably hosed so we
2307 	 * abort.
2308 	 */
2309 	efx->state = STATE_READY;
2310 	smp_mb(); /* ensure we change state before checking reset_pending */
2311 	if (efx->reset_pending) {
2312 		netif_err(efx, probe, efx->net_dev,
2313 			  "aborting probe due to scheduled reset\n");
2314 		rc = -EIO;
2315 		goto fail_locked;
2316 	}
2317 
2318 	rc = dev_alloc_name(net_dev, net_dev->name);
2319 	if (rc < 0)
2320 		goto fail_locked;
2321 	ef4_update_name(efx);
2322 
2323 	/* Always start with carrier off; PHY events will detect the link */
2324 	netif_carrier_off(net_dev);
2325 
2326 	rc = register_netdevice(net_dev);
2327 	if (rc)
2328 		goto fail_locked;
2329 
2330 	ef4_for_each_channel(channel, efx) {
2331 		struct ef4_tx_queue *tx_queue;
2332 		ef4_for_each_channel_tx_queue(tx_queue, channel)
2333 			ef4_init_tx_queue_core_txq(tx_queue);
2334 	}
2335 
2336 	ef4_associate(efx);
2337 
2338 	rtnl_unlock();
2339 
2340 	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2341 	if (rc) {
2342 		netif_err(efx, drv, efx->net_dev,
2343 			  "failed to init net dev attributes\n");
2344 		goto fail_registered;
2345 	}
2346 	return 0;
2347 
2348 fail_registered:
2349 	rtnl_lock();
2350 	ef4_dissociate(efx);
2351 	unregister_netdevice(net_dev);
2352 fail_locked:
2353 	efx->state = STATE_UNINIT;
2354 	rtnl_unlock();
2355 	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2356 	return rc;
2357 }
2358 
2359 static void ef4_unregister_netdev(struct ef4_nic *efx)
2360 {
2361 	if (!efx->net_dev)
2362 		return;
2363 
2364 	BUG_ON(netdev_priv(efx->net_dev) != efx);
2365 
2366 	if (ef4_dev_registered(efx)) {
2367 		strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2368 		device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2369 		unregister_netdev(efx->net_dev);
2370 	}
2371 }
2372 
2373 /**************************************************************************
2374  *
2375  * Device reset and suspend
2376  *
2377  **************************************************************************/
2378 
2379 /* Tears down the entire software state and most of the hardware state
2380  * before reset.  */
2381 void ef4_reset_down(struct ef4_nic *efx, enum reset_type method)
2382 {
2383 	EF4_ASSERT_RESET_SERIALISED(efx);
2384 
2385 	ef4_stop_all(efx);
2386 	ef4_disable_interrupts(efx);
2387 
2388 	mutex_lock(&efx->mac_lock);
2389 	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2390 	    method != RESET_TYPE_DATAPATH)
2391 		efx->phy_op->fini(efx);
2392 	efx->type->fini(efx);
2393 }
2394 
2395 /* This function will always ensure that the locks acquired in
2396  * ef4_reset_down() are released. A failure return code indicates
2397  * that we were unable to reinitialise the hardware, and the
2398  * driver should be disabled. If ok is false, then the rx and tx
2399  * engines are not restarted, pending a RESET_DISABLE. */
2400 int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok)
2401 {
2402 	int rc;
2403 
2404 	EF4_ASSERT_RESET_SERIALISED(efx);
2405 
2406 	/* Ensure that SRAM is initialised even if we're disabling the device */
2407 	rc = efx->type->init(efx);
2408 	if (rc) {
2409 		netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2410 		goto fail;
2411 	}
2412 
2413 	if (!ok)
2414 		goto fail;
2415 
2416 	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2417 	    method != RESET_TYPE_DATAPATH) {
2418 		rc = efx->phy_op->init(efx);
2419 		if (rc)
2420 			goto fail;
2421 		rc = efx->phy_op->reconfigure(efx);
2422 		if (rc && rc != -EPERM)
2423 			netif_err(efx, drv, efx->net_dev,
2424 				  "could not restore PHY settings\n");
2425 	}
2426 
2427 	rc = ef4_enable_interrupts(efx);
2428 	if (rc)
2429 		goto fail;
2430 
2431 	down_read(&efx->filter_sem);
2432 	ef4_restore_filters(efx);
2433 	up_read(&efx->filter_sem);
2434 
2435 	mutex_unlock(&efx->mac_lock);
2436 
2437 	ef4_start_all(efx);
2438 
2439 	return 0;
2440 
2441 fail:
2442 	efx->port_initialized = false;
2443 
2444 	mutex_unlock(&efx->mac_lock);
2445 
2446 	return rc;
2447 }
2448 
2449 /* Reset the NIC using the specified method.  Note that the reset may
2450  * fail, in which case the card will be left in an unusable state.
2451  *
2452  * Caller must hold the rtnl_lock.
2453  */
2454 int ef4_reset(struct ef4_nic *efx, enum reset_type method)
2455 {
2456 	int rc, rc2;
2457 	bool disabled;
2458 
2459 	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2460 		   RESET_TYPE(method));
2461 
2462 	ef4_device_detach_sync(efx);
2463 	ef4_reset_down(efx, method);
2464 
2465 	rc = efx->type->reset(efx, method);
2466 	if (rc) {
2467 		netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2468 		goto out;
2469 	}
2470 
2471 	/* Clear flags for the scopes we covered.  We assume the NIC and
2472 	 * driver are now quiescent so that there is no race here.
2473 	 */
2474 	if (method < RESET_TYPE_MAX_METHOD)
2475 		efx->reset_pending &= -(1 << (method + 1));
2476 	else /* it doesn't fit into the well-ordered scope hierarchy */
2477 		__clear_bit(method, &efx->reset_pending);
2478 
2479 	/* Reinitialise bus-mastering, which may have been turned off before
2480 	 * the reset was scheduled. This is still appropriate, even in the
2481 	 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2482 	 * can respond to requests. */
2483 	pci_set_master(efx->pci_dev);
2484 
2485 out:
2486 	/* Leave device stopped if necessary */
2487 	disabled = rc ||
2488 		method == RESET_TYPE_DISABLE ||
2489 		method == RESET_TYPE_RECOVER_OR_DISABLE;
2490 	rc2 = ef4_reset_up(efx, method, !disabled);
2491 	if (rc2) {
2492 		disabled = true;
2493 		if (!rc)
2494 			rc = rc2;
2495 	}
2496 
2497 	if (disabled) {
2498 		dev_close(efx->net_dev);
2499 		netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2500 		efx->state = STATE_DISABLED;
2501 	} else {
2502 		netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2503 		netif_device_attach(efx->net_dev);
2504 	}
2505 	return rc;
2506 }
2507 
2508 /* Try recovery mechanisms.
2509  * For now only EEH is supported.
2510  * Returns 0 if the recovery mechanisms are unsuccessful.
2511  * Returns a non-zero value otherwise.
2512  */
2513 int ef4_try_recovery(struct ef4_nic *efx)
2514 {
2515 #ifdef CONFIG_EEH
2516 	/* A PCI error can occur and not be seen by EEH because nothing
2517 	 * happens on the PCI bus. In this case the driver may fail and
2518 	 * schedule a 'recover or reset', leading to this recovery handler.
2519 	 * Manually call the eeh failure check function.
2520 	 */
2521 	struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2522 	if (eeh_dev_check_failure(eehdev)) {
2523 		/* The EEH mechanisms will handle the error and reset the
2524 		 * device if necessary.
2525 		 */
2526 		return 1;
2527 	}
2528 #endif
2529 	return 0;
2530 }
2531 
2532 /* The worker thread exists so that code that cannot sleep can
2533  * schedule a reset for later.
2534  */
2535 static void ef4_reset_work(struct work_struct *data)
2536 {
2537 	struct ef4_nic *efx = container_of(data, struct ef4_nic, reset_work);
2538 	unsigned long pending;
2539 	enum reset_type method;
2540 
2541 	pending = ACCESS_ONCE(efx->reset_pending);
2542 	method = fls(pending) - 1;
2543 
2544 	if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2545 	     method == RESET_TYPE_RECOVER_OR_ALL) &&
2546 	    ef4_try_recovery(efx))
2547 		return;
2548 
2549 	if (!pending)
2550 		return;
2551 
2552 	rtnl_lock();
2553 
2554 	/* We checked the state in ef4_schedule_reset() but it may
2555 	 * have changed by now.  Now that we have the RTNL lock,
2556 	 * it cannot change again.
2557 	 */
2558 	if (efx->state == STATE_READY)
2559 		(void)ef4_reset(efx, method);
2560 
2561 	rtnl_unlock();
2562 }
2563 
2564 void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
2565 {
2566 	enum reset_type method;
2567 
2568 	if (efx->state == STATE_RECOVERY) {
2569 		netif_dbg(efx, drv, efx->net_dev,
2570 			  "recovering: skip scheduling %s reset\n",
2571 			  RESET_TYPE(type));
2572 		return;
2573 	}
2574 
2575 	switch (type) {
2576 	case RESET_TYPE_INVISIBLE:
2577 	case RESET_TYPE_ALL:
2578 	case RESET_TYPE_RECOVER_OR_ALL:
2579 	case RESET_TYPE_WORLD:
2580 	case RESET_TYPE_DISABLE:
2581 	case RESET_TYPE_RECOVER_OR_DISABLE:
2582 	case RESET_TYPE_DATAPATH:
2583 		method = type;
2584 		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2585 			  RESET_TYPE(method));
2586 		break;
2587 	default:
2588 		method = efx->type->map_reset_reason(type);
2589 		netif_dbg(efx, drv, efx->net_dev,
2590 			  "scheduling %s reset for %s\n",
2591 			  RESET_TYPE(method), RESET_TYPE(type));
2592 		break;
2593 	}
2594 
2595 	set_bit(method, &efx->reset_pending);
2596 	smp_mb(); /* ensure we change reset_pending before checking state */
2597 
2598 	/* If we're not READY then just leave the flags set as the cue
2599 	 * to abort probing or reschedule the reset later.
2600 	 */
2601 	if (ACCESS_ONCE(efx->state) != STATE_READY)
2602 		return;
2603 
2604 	queue_work(reset_workqueue, &efx->reset_work);
2605 }
2606 
2607 /**************************************************************************
2608  *
2609  * List of NICs we support
2610  *
2611  **************************************************************************/
2612 
2613 /* PCI device ID table */
2614 static const struct pci_device_id ef4_pci_table[] = {
2615 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2616 		    PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
2617 	 .driver_data = (unsigned long) &falcon_a1_nic_type},
2618 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2619 		    PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
2620 	 .driver_data = (unsigned long) &falcon_b0_nic_type},
2621 	{0}			/* end of list */
2622 };
2623 
2624 /**************************************************************************
2625  *
2626  * Dummy PHY/MAC operations
2627  *
2628  * Can be used for some unimplemented operations
2629  * Needed so all function pointers are valid and do not have to be tested
2630  * before use
2631  *
2632  **************************************************************************/
2633 int ef4_port_dummy_op_int(struct ef4_nic *efx)
2634 {
2635 	return 0;
2636 }
2637 void ef4_port_dummy_op_void(struct ef4_nic *efx) {}
2638 
2639 static bool ef4_port_dummy_op_poll(struct ef4_nic *efx)
2640 {
2641 	return false;
2642 }
2643 
2644 static const struct ef4_phy_operations ef4_dummy_phy_operations = {
2645 	.init		 = ef4_port_dummy_op_int,
2646 	.reconfigure	 = ef4_port_dummy_op_int,
2647 	.poll		 = ef4_port_dummy_op_poll,
2648 	.fini		 = ef4_port_dummy_op_void,
2649 };
2650 
2651 /**************************************************************************
2652  *
2653  * Data housekeeping
2654  *
2655  **************************************************************************/
2656 
2657 /* This zeroes out and then fills in the invariants in a struct
2658  * ef4_nic (including all sub-structures).
2659  */
2660 static int ef4_init_struct(struct ef4_nic *efx,
2661 			   struct pci_dev *pci_dev, struct net_device *net_dev)
2662 {
2663 	int i;
2664 
2665 	/* Initialise common structures */
2666 	INIT_LIST_HEAD(&efx->node);
2667 	INIT_LIST_HEAD(&efx->secondary_list);
2668 	spin_lock_init(&efx->biu_lock);
2669 #ifdef CONFIG_SFC_FALCON_MTD
2670 	INIT_LIST_HEAD(&efx->mtd_list);
2671 #endif
2672 	INIT_WORK(&efx->reset_work, ef4_reset_work);
2673 	INIT_DELAYED_WORK(&efx->monitor_work, ef4_monitor);
2674 	INIT_DELAYED_WORK(&efx->selftest_work, ef4_selftest_async_work);
2675 	efx->pci_dev = pci_dev;
2676 	efx->msg_enable = debug;
2677 	efx->state = STATE_UNINIT;
2678 	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2679 
2680 	efx->net_dev = net_dev;
2681 	efx->rx_prefix_size = efx->type->rx_prefix_size;
2682 	efx->rx_ip_align =
2683 		NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
2684 	efx->rx_packet_hash_offset =
2685 		efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2686 	efx->rx_packet_ts_offset =
2687 		efx->type->rx_ts_offset - efx->type->rx_prefix_size;
2688 	spin_lock_init(&efx->stats_lock);
2689 	mutex_init(&efx->mac_lock);
2690 	efx->phy_op = &ef4_dummy_phy_operations;
2691 	efx->mdio.dev = net_dev;
2692 	INIT_WORK(&efx->mac_work, ef4_mac_work);
2693 	init_waitqueue_head(&efx->flush_wq);
2694 
2695 	for (i = 0; i < EF4_MAX_CHANNELS; i++) {
2696 		efx->channel[i] = ef4_alloc_channel(efx, i, NULL);
2697 		if (!efx->channel[i])
2698 			goto fail;
2699 		efx->msi_context[i].efx = efx;
2700 		efx->msi_context[i].index = i;
2701 	}
2702 
2703 	/* Higher numbered interrupt modes are less capable! */
2704 	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2705 				  interrupt_mode);
2706 
2707 	/* Would be good to use the net_dev name, but we're too early */
2708 	snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
2709 		 pci_name(pci_dev));
2710 	efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2711 	if (!efx->workqueue)
2712 		goto fail;
2713 
2714 	return 0;
2715 
2716 fail:
2717 	ef4_fini_struct(efx);
2718 	return -ENOMEM;
2719 }
2720 
2721 static void ef4_fini_struct(struct ef4_nic *efx)
2722 {
2723 	int i;
2724 
2725 	for (i = 0; i < EF4_MAX_CHANNELS; i++)
2726 		kfree(efx->channel[i]);
2727 
2728 	kfree(efx->vpd_sn);
2729 
2730 	if (efx->workqueue) {
2731 		destroy_workqueue(efx->workqueue);
2732 		efx->workqueue = NULL;
2733 	}
2734 }
2735 
2736 void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats)
2737 {
2738 	u64 n_rx_nodesc_trunc = 0;
2739 	struct ef4_channel *channel;
2740 
2741 	ef4_for_each_channel(channel, efx)
2742 		n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
2743 	stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
2744 	stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
2745 }
2746 
2747 /**************************************************************************
2748  *
2749  * PCI interface
2750  *
2751  **************************************************************************/
2752 
2753 /* Main body of final NIC shutdown code
2754  * This is called only at module unload (or hotplug removal).
2755  */
2756 static void ef4_pci_remove_main(struct ef4_nic *efx)
2757 {
2758 	/* Flush reset_work. It can no longer be scheduled since we
2759 	 * are not READY.
2760 	 */
2761 	BUG_ON(efx->state == STATE_READY);
2762 	cancel_work_sync(&efx->reset_work);
2763 
2764 	ef4_disable_interrupts(efx);
2765 	ef4_nic_fini_interrupt(efx);
2766 	ef4_fini_port(efx);
2767 	efx->type->fini(efx);
2768 	ef4_fini_napi(efx);
2769 	ef4_remove_all(efx);
2770 }
2771 
2772 /* Final NIC shutdown
2773  * This is called only at module unload (or hotplug removal).  A PF can call
2774  * this on its VFs to ensure they are unbound first.
2775  */
2776 static void ef4_pci_remove(struct pci_dev *pci_dev)
2777 {
2778 	struct ef4_nic *efx;
2779 
2780 	efx = pci_get_drvdata(pci_dev);
2781 	if (!efx)
2782 		return;
2783 
2784 	/* Mark the NIC as fini, then stop the interface */
2785 	rtnl_lock();
2786 	ef4_dissociate(efx);
2787 	dev_close(efx->net_dev);
2788 	ef4_disable_interrupts(efx);
2789 	efx->state = STATE_UNINIT;
2790 	rtnl_unlock();
2791 
2792 	ef4_unregister_netdev(efx);
2793 
2794 	ef4_mtd_remove(efx);
2795 
2796 	ef4_pci_remove_main(efx);
2797 
2798 	ef4_fini_io(efx);
2799 	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2800 
2801 	ef4_fini_struct(efx);
2802 	free_netdev(efx->net_dev);
2803 
2804 	pci_disable_pcie_error_reporting(pci_dev);
2805 };
2806 
2807 /* NIC VPD information
2808  * Called during probe to display the part number of the
2809  * installed NIC.  VPD is potentially very large but this should
2810  * always appear within the first 512 bytes.
2811  */
2812 #define SFC_VPD_LEN 512
2813 static void ef4_probe_vpd_strings(struct ef4_nic *efx)
2814 {
2815 	struct pci_dev *dev = efx->pci_dev;
2816 	char vpd_data[SFC_VPD_LEN];
2817 	ssize_t vpd_size;
2818 	int ro_start, ro_size, i, j;
2819 
2820 	/* Get the vpd data from the device */
2821 	vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
2822 	if (vpd_size <= 0) {
2823 		netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
2824 		return;
2825 	}
2826 
2827 	/* Get the Read only section */
2828 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
2829 	if (ro_start < 0) {
2830 		netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
2831 		return;
2832 	}
2833 
2834 	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
2835 	j = ro_size;
2836 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
2837 	if (i + j > vpd_size)
2838 		j = vpd_size - i;
2839 
2840 	/* Get the Part number */
2841 	i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
2842 	if (i < 0) {
2843 		netif_err(efx, drv, efx->net_dev, "Part number not found\n");
2844 		return;
2845 	}
2846 
2847 	j = pci_vpd_info_field_size(&vpd_data[i]);
2848 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
2849 	if (i + j > vpd_size) {
2850 		netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
2851 		return;
2852 	}
2853 
2854 	netif_info(efx, drv, efx->net_dev,
2855 		   "Part Number : %.*s\n", j, &vpd_data[i]);
2856 
2857 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
2858 	j = ro_size;
2859 	i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
2860 	if (i < 0) {
2861 		netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
2862 		return;
2863 	}
2864 
2865 	j = pci_vpd_info_field_size(&vpd_data[i]);
2866 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
2867 	if (i + j > vpd_size) {
2868 		netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
2869 		return;
2870 	}
2871 
2872 	efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
2873 	if (!efx->vpd_sn)
2874 		return;
2875 
2876 	snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
2877 }
2878 
2879 
2880 /* Main body of NIC initialisation
2881  * This is called at module load (or hotplug insertion, theoretically).
2882  */
2883 static int ef4_pci_probe_main(struct ef4_nic *efx)
2884 {
2885 	int rc;
2886 
2887 	/* Do start-of-day initialisation */
2888 	rc = ef4_probe_all(efx);
2889 	if (rc)
2890 		goto fail1;
2891 
2892 	ef4_init_napi(efx);
2893 
2894 	rc = efx->type->init(efx);
2895 	if (rc) {
2896 		netif_err(efx, probe, efx->net_dev,
2897 			  "failed to initialise NIC\n");
2898 		goto fail3;
2899 	}
2900 
2901 	rc = ef4_init_port(efx);
2902 	if (rc) {
2903 		netif_err(efx, probe, efx->net_dev,
2904 			  "failed to initialise port\n");
2905 		goto fail4;
2906 	}
2907 
2908 	rc = ef4_nic_init_interrupt(efx);
2909 	if (rc)
2910 		goto fail5;
2911 	rc = ef4_enable_interrupts(efx);
2912 	if (rc)
2913 		goto fail6;
2914 
2915 	return 0;
2916 
2917  fail6:
2918 	ef4_nic_fini_interrupt(efx);
2919  fail5:
2920 	ef4_fini_port(efx);
2921  fail4:
2922 	efx->type->fini(efx);
2923  fail3:
2924 	ef4_fini_napi(efx);
2925 	ef4_remove_all(efx);
2926  fail1:
2927 	return rc;
2928 }
2929 
2930 /* NIC initialisation
2931  *
2932  * This is called at module load (or hotplug insertion,
2933  * theoretically).  It sets up PCI mappings, resets the NIC,
2934  * sets up and registers the network devices with the kernel and hooks
2935  * the interrupt service routine.  It does not prepare the device for
2936  * transmission; this is left to the first time one of the network
2937  * interfaces is brought up (i.e. ef4_net_open).
2938  */
2939 static int ef4_pci_probe(struct pci_dev *pci_dev,
2940 			 const struct pci_device_id *entry)
2941 {
2942 	struct net_device *net_dev;
2943 	struct ef4_nic *efx;
2944 	int rc;
2945 
2946 	/* Allocate and initialise a struct net_device and struct ef4_nic */
2947 	net_dev = alloc_etherdev_mqs(sizeof(*efx), EF4_MAX_CORE_TX_QUEUES,
2948 				     EF4_MAX_RX_QUEUES);
2949 	if (!net_dev)
2950 		return -ENOMEM;
2951 	efx = netdev_priv(net_dev);
2952 	efx->type = (const struct ef4_nic_type *) entry->driver_data;
2953 	efx->fixed_features |= NETIF_F_HIGHDMA;
2954 
2955 	pci_set_drvdata(pci_dev, efx);
2956 	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2957 	rc = ef4_init_struct(efx, pci_dev, net_dev);
2958 	if (rc)
2959 		goto fail1;
2960 
2961 	netif_info(efx, probe, efx->net_dev,
2962 		   "Solarflare NIC detected\n");
2963 
2964 	ef4_probe_vpd_strings(efx);
2965 
2966 	/* Set up basic I/O (BAR mappings etc) */
2967 	rc = ef4_init_io(efx);
2968 	if (rc)
2969 		goto fail2;
2970 
2971 	rc = ef4_pci_probe_main(efx);
2972 	if (rc)
2973 		goto fail3;
2974 
2975 	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
2976 			      NETIF_F_RXCSUM);
2977 	/* Mask for features that also apply to VLAN devices */
2978 	net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
2979 				   NETIF_F_HIGHDMA | NETIF_F_RXCSUM);
2980 
2981 	net_dev->hw_features = net_dev->features & ~efx->fixed_features;
2982 
2983 	/* Disable VLAN filtering by default.  It may be enforced if
2984 	 * the feature is fixed (i.e. VLAN filters are required to
2985 	 * receive VLAN tagged packets due to vPort restrictions).
2986 	 */
2987 	net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2988 	net_dev->features |= efx->fixed_features;
2989 
2990 	rc = ef4_register_netdev(efx);
2991 	if (rc)
2992 		goto fail4;
2993 
2994 	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
2995 
2996 	/* Try to create MTDs, but allow this to fail */
2997 	rtnl_lock();
2998 	rc = ef4_mtd_probe(efx);
2999 	rtnl_unlock();
3000 	if (rc && rc != -EPERM)
3001 		netif_warn(efx, probe, efx->net_dev,
3002 			   "failed to create MTDs (%d)\n", rc);
3003 
3004 	rc = pci_enable_pcie_error_reporting(pci_dev);
3005 	if (rc && rc != -EINVAL)
3006 		netif_notice(efx, probe, efx->net_dev,
3007 			     "PCIE error reporting unavailable (%d).\n",
3008 			     rc);
3009 
3010 	return 0;
3011 
3012  fail4:
3013 	ef4_pci_remove_main(efx);
3014  fail3:
3015 	ef4_fini_io(efx);
3016  fail2:
3017 	ef4_fini_struct(efx);
3018  fail1:
3019 	WARN_ON(rc > 0);
3020 	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
3021 	free_netdev(net_dev);
3022 	return rc;
3023 }
3024 
3025 static int ef4_pm_freeze(struct device *dev)
3026 {
3027 	struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3028 
3029 	rtnl_lock();
3030 
3031 	if (efx->state != STATE_DISABLED) {
3032 		efx->state = STATE_UNINIT;
3033 
3034 		ef4_device_detach_sync(efx);
3035 
3036 		ef4_stop_all(efx);
3037 		ef4_disable_interrupts(efx);
3038 	}
3039 
3040 	rtnl_unlock();
3041 
3042 	return 0;
3043 }
3044 
3045 static int ef4_pm_thaw(struct device *dev)
3046 {
3047 	int rc;
3048 	struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3049 
3050 	rtnl_lock();
3051 
3052 	if (efx->state != STATE_DISABLED) {
3053 		rc = ef4_enable_interrupts(efx);
3054 		if (rc)
3055 			goto fail;
3056 
3057 		mutex_lock(&efx->mac_lock);
3058 		efx->phy_op->reconfigure(efx);
3059 		mutex_unlock(&efx->mac_lock);
3060 
3061 		ef4_start_all(efx);
3062 
3063 		netif_device_attach(efx->net_dev);
3064 
3065 		efx->state = STATE_READY;
3066 
3067 		efx->type->resume_wol(efx);
3068 	}
3069 
3070 	rtnl_unlock();
3071 
3072 	/* Reschedule any quenched resets scheduled during ef4_pm_freeze() */
3073 	queue_work(reset_workqueue, &efx->reset_work);
3074 
3075 	return 0;
3076 
3077 fail:
3078 	rtnl_unlock();
3079 
3080 	return rc;
3081 }
3082 
3083 static int ef4_pm_poweroff(struct device *dev)
3084 {
3085 	struct pci_dev *pci_dev = to_pci_dev(dev);
3086 	struct ef4_nic *efx = pci_get_drvdata(pci_dev);
3087 
3088 	efx->type->fini(efx);
3089 
3090 	efx->reset_pending = 0;
3091 
3092 	pci_save_state(pci_dev);
3093 	return pci_set_power_state(pci_dev, PCI_D3hot);
3094 }
3095 
3096 /* Used for both resume and restore */
3097 static int ef4_pm_resume(struct device *dev)
3098 {
3099 	struct pci_dev *pci_dev = to_pci_dev(dev);
3100 	struct ef4_nic *efx = pci_get_drvdata(pci_dev);
3101 	int rc;
3102 
3103 	rc = pci_set_power_state(pci_dev, PCI_D0);
3104 	if (rc)
3105 		return rc;
3106 	pci_restore_state(pci_dev);
3107 	rc = pci_enable_device(pci_dev);
3108 	if (rc)
3109 		return rc;
3110 	pci_set_master(efx->pci_dev);
3111 	rc = efx->type->reset(efx, RESET_TYPE_ALL);
3112 	if (rc)
3113 		return rc;
3114 	rc = efx->type->init(efx);
3115 	if (rc)
3116 		return rc;
3117 	rc = ef4_pm_thaw(dev);
3118 	return rc;
3119 }
3120 
3121 static int ef4_pm_suspend(struct device *dev)
3122 {
3123 	int rc;
3124 
3125 	ef4_pm_freeze(dev);
3126 	rc = ef4_pm_poweroff(dev);
3127 	if (rc)
3128 		ef4_pm_resume(dev);
3129 	return rc;
3130 }
3131 
3132 static const struct dev_pm_ops ef4_pm_ops = {
3133 	.suspend	= ef4_pm_suspend,
3134 	.resume		= ef4_pm_resume,
3135 	.freeze		= ef4_pm_freeze,
3136 	.thaw		= ef4_pm_thaw,
3137 	.poweroff	= ef4_pm_poweroff,
3138 	.restore	= ef4_pm_resume,
3139 };
3140 
3141 /* A PCI error affecting this device was detected.
3142  * At this point MMIO and DMA may be disabled.
3143  * Stop the software path and request a slot reset.
3144  */
3145 static pci_ers_result_t ef4_io_error_detected(struct pci_dev *pdev,
3146 					      enum pci_channel_state state)
3147 {
3148 	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3149 	struct ef4_nic *efx = pci_get_drvdata(pdev);
3150 
3151 	if (state == pci_channel_io_perm_failure)
3152 		return PCI_ERS_RESULT_DISCONNECT;
3153 
3154 	rtnl_lock();
3155 
3156 	if (efx->state != STATE_DISABLED) {
3157 		efx->state = STATE_RECOVERY;
3158 		efx->reset_pending = 0;
3159 
3160 		ef4_device_detach_sync(efx);
3161 
3162 		ef4_stop_all(efx);
3163 		ef4_disable_interrupts(efx);
3164 
3165 		status = PCI_ERS_RESULT_NEED_RESET;
3166 	} else {
3167 		/* If the interface is disabled we don't want to do anything
3168 		 * with it.
3169 		 */
3170 		status = PCI_ERS_RESULT_RECOVERED;
3171 	}
3172 
3173 	rtnl_unlock();
3174 
3175 	pci_disable_device(pdev);
3176 
3177 	return status;
3178 }
3179 
3180 /* Fake a successful reset, which will be performed later in ef4_io_resume. */
3181 static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
3182 {
3183 	struct ef4_nic *efx = pci_get_drvdata(pdev);
3184 	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3185 	int rc;
3186 
3187 	if (pci_enable_device(pdev)) {
3188 		netif_err(efx, hw, efx->net_dev,
3189 			  "Cannot re-enable PCI device after reset.\n");
3190 		status =  PCI_ERS_RESULT_DISCONNECT;
3191 	}
3192 
3193 	rc = pci_cleanup_aer_uncorrect_error_status(pdev);
3194 	if (rc) {
3195 		netif_err(efx, hw, efx->net_dev,
3196 		"pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
3197 		/* Non-fatal error. Continue. */
3198 	}
3199 
3200 	return status;
3201 }
3202 
3203 /* Perform the actual reset and resume I/O operations. */
3204 static void ef4_io_resume(struct pci_dev *pdev)
3205 {
3206 	struct ef4_nic *efx = pci_get_drvdata(pdev);
3207 	int rc;
3208 
3209 	rtnl_lock();
3210 
3211 	if (efx->state == STATE_DISABLED)
3212 		goto out;
3213 
3214 	rc = ef4_reset(efx, RESET_TYPE_ALL);
3215 	if (rc) {
3216 		netif_err(efx, hw, efx->net_dev,
3217 			  "ef4_reset failed after PCI error (%d)\n", rc);
3218 	} else {
3219 		efx->state = STATE_READY;
3220 		netif_dbg(efx, hw, efx->net_dev,
3221 			  "Done resetting and resuming IO after PCI error.\n");
3222 	}
3223 
3224 out:
3225 	rtnl_unlock();
3226 }
3227 
3228 /* For simplicity and reliability, we always require a slot reset and try to
3229  * reset the hardware when a pci error affecting the device is detected.
3230  * We leave both the link_reset and mmio_enabled callback unimplemented:
3231  * with our request for slot reset the mmio_enabled callback will never be
3232  * called, and the link_reset callback is not used by AER or EEH mechanisms.
3233  */
3234 static const struct pci_error_handlers ef4_err_handlers = {
3235 	.error_detected = ef4_io_error_detected,
3236 	.slot_reset	= ef4_io_slot_reset,
3237 	.resume		= ef4_io_resume,
3238 };
3239 
3240 static struct pci_driver ef4_pci_driver = {
3241 	.name		= KBUILD_MODNAME,
3242 	.id_table	= ef4_pci_table,
3243 	.probe		= ef4_pci_probe,
3244 	.remove		= ef4_pci_remove,
3245 	.driver.pm	= &ef4_pm_ops,
3246 	.err_handler	= &ef4_err_handlers,
3247 };
3248 
3249 /**************************************************************************
3250  *
3251  * Kernel module interface
3252  *
3253  *************************************************************************/
3254 
3255 module_param(interrupt_mode, uint, 0444);
3256 MODULE_PARM_DESC(interrupt_mode,
3257 		 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3258 
3259 static int __init ef4_init_module(void)
3260 {
3261 	int rc;
3262 
3263 	printk(KERN_INFO "Solarflare Falcon driver v" EF4_DRIVER_VERSION "\n");
3264 
3265 	rc = register_netdevice_notifier(&ef4_netdev_notifier);
3266 	if (rc)
3267 		goto err_notifier;
3268 
3269 	reset_workqueue = create_singlethread_workqueue("sfc_reset");
3270 	if (!reset_workqueue) {
3271 		rc = -ENOMEM;
3272 		goto err_reset;
3273 	}
3274 
3275 	rc = pci_register_driver(&ef4_pci_driver);
3276 	if (rc < 0)
3277 		goto err_pci;
3278 
3279 	return 0;
3280 
3281  err_pci:
3282 	destroy_workqueue(reset_workqueue);
3283  err_reset:
3284 	unregister_netdevice_notifier(&ef4_netdev_notifier);
3285  err_notifier:
3286 	return rc;
3287 }
3288 
3289 static void __exit ef4_exit_module(void)
3290 {
3291 	printk(KERN_INFO "Solarflare Falcon driver unloading\n");
3292 
3293 	pci_unregister_driver(&ef4_pci_driver);
3294 	destroy_workqueue(reset_workqueue);
3295 	unregister_netdevice_notifier(&ef4_netdev_notifier);
3296 
3297 }
3298 
3299 module_init(ef4_init_module);
3300 module_exit(ef4_exit_module);
3301 
3302 MODULE_AUTHOR("Solarflare Communications and "
3303 	      "Michael Brown <mbrown@fensystems.co.uk>");
3304 MODULE_DESCRIPTION("Solarflare Falcon network driver");
3305 MODULE_LICENSE("GPL");
3306 MODULE_DEVICE_TABLE(pci, ef4_pci_table);
3307 MODULE_VERSION(EF4_DRIVER_VERSION);
3308