xref: /openbmc/linux/drivers/net/ethernet/sfc/efx.c (revision a0ae2562c6c4b2721d9fddba63b7286c13517d9f)
1 /****************************************************************************
2  * Driver for Solarflare network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2005-2013 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
17 #include <linux/ip.h>
18 #include <linux/tcp.h>
19 #include <linux/in.h>
20 #include <linux/ethtool.h>
21 #include <linux/topology.h>
22 #include <linux/gfp.h>
23 #include <linux/aer.h>
24 #include <linux/interrupt.h>
25 #include "net_driver.h"
26 #include <net/gre.h>
27 #include <net/udp_tunnel.h>
28 #include "efx.h"
29 #include "nic.h"
30 #include "io.h"
31 #include "selftest.h"
32 #include "sriov.h"
33 
34 #include "mcdi.h"
35 #include "mcdi_pcol.h"
36 #include "workarounds.h"
37 
38 /**************************************************************************
39  *
40  * Type name strings
41  *
42  **************************************************************************
43  */
44 
45 /* Loopback mode names (see LOOPBACK_MODE()) */
46 const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
47 const char *const efx_loopback_mode_names[] = {
48 	[LOOPBACK_NONE]		= "NONE",
49 	[LOOPBACK_DATA]		= "DATAPATH",
50 	[LOOPBACK_GMAC]		= "GMAC",
51 	[LOOPBACK_XGMII]	= "XGMII",
52 	[LOOPBACK_XGXS]		= "XGXS",
53 	[LOOPBACK_XAUI]		= "XAUI",
54 	[LOOPBACK_GMII]		= "GMII",
55 	[LOOPBACK_SGMII]	= "SGMII",
56 	[LOOPBACK_XGBR]		= "XGBR",
57 	[LOOPBACK_XFI]		= "XFI",
58 	[LOOPBACK_XAUI_FAR]	= "XAUI_FAR",
59 	[LOOPBACK_GMII_FAR]	= "GMII_FAR",
60 	[LOOPBACK_SGMII_FAR]	= "SGMII_FAR",
61 	[LOOPBACK_XFI_FAR]	= "XFI_FAR",
62 	[LOOPBACK_GPHY]		= "GPHY",
63 	[LOOPBACK_PHYXS]	= "PHYXS",
64 	[LOOPBACK_PCS]		= "PCS",
65 	[LOOPBACK_PMAPMD]	= "PMA/PMD",
66 	[LOOPBACK_XPORT]	= "XPORT",
67 	[LOOPBACK_XGMII_WS]	= "XGMII_WS",
68 	[LOOPBACK_XAUI_WS]	= "XAUI_WS",
69 	[LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
70 	[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
71 	[LOOPBACK_GMII_WS]	= "GMII_WS",
72 	[LOOPBACK_XFI_WS]	= "XFI_WS",
73 	[LOOPBACK_XFI_WS_FAR]	= "XFI_WS_FAR",
74 	[LOOPBACK_PHYXS_WS]	= "PHYXS_WS",
75 };
76 
77 const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
78 const char *const efx_reset_type_names[] = {
79 	[RESET_TYPE_INVISIBLE]          = "INVISIBLE",
80 	[RESET_TYPE_ALL]                = "ALL",
81 	[RESET_TYPE_RECOVER_OR_ALL]     = "RECOVER_OR_ALL",
82 	[RESET_TYPE_WORLD]              = "WORLD",
83 	[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
84 	[RESET_TYPE_DATAPATH]           = "DATAPATH",
85 	[RESET_TYPE_MC_BIST]		= "MC_BIST",
86 	[RESET_TYPE_DISABLE]            = "DISABLE",
87 	[RESET_TYPE_TX_WATCHDOG]        = "TX_WATCHDOG",
88 	[RESET_TYPE_INT_ERROR]          = "INT_ERROR",
89 	[RESET_TYPE_DMA_ERROR]          = "DMA_ERROR",
90 	[RESET_TYPE_TX_SKIP]            = "TX_SKIP",
91 	[RESET_TYPE_MC_FAILURE]         = "MC_FAILURE",
92 	[RESET_TYPE_MCDI_TIMEOUT]	= "MCDI_TIMEOUT (FLR)",
93 };
94 
95 /* UDP tunnel type names */
96 static const char *const efx_udp_tunnel_type_names[] = {
97 	[TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
98 	[TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve",
99 };
100 
101 void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
102 {
103 	if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) &&
104 	    efx_udp_tunnel_type_names[type] != NULL)
105 		snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]);
106 	else
107 		snprintf(buf, buflen, "type %d", type);
108 }
109 
110 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
111  * queued onto this work queue. This is not a per-nic work queue, because
112  * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
113  */
114 static struct workqueue_struct *reset_workqueue;
115 
116 /* How often and how many times to poll for a reset while waiting for a
117  * BIST that another function started to complete.
118  */
119 #define BIST_WAIT_DELAY_MS	100
120 #define BIST_WAIT_DELAY_COUNT	100
121 
122 /**************************************************************************
123  *
124  * Configurable values
125  *
126  *************************************************************************/
127 
128 /*
129  * Use separate channels for TX and RX events
130  *
131  * Set this to 1 to use separate channels for TX and RX. It allows us
132  * to control interrupt affinity separately for TX and RX.
133  *
134  * This is only used in MSI-X interrupt mode
135  */
136 bool efx_separate_tx_channels;
137 module_param(efx_separate_tx_channels, bool, 0444);
138 MODULE_PARM_DESC(efx_separate_tx_channels,
139 		 "Use separate channels for TX and RX");
140 
141 /* This is the weight assigned to each of the (per-channel) virtual
142  * NAPI devices.
143  */
144 static int napi_weight = 64;
145 
146 /* This is the time (in jiffies) between invocations of the hardware
147  * monitor.
148  * On Falcon-based NICs, this will:
149  * - Check the on-board hardware monitor;
150  * - Poll the link state and reconfigure the hardware as necessary.
151  * On Siena-based NICs for power systems with EEH support, this will give EEH a
152  * chance to start.
153  */
154 static unsigned int efx_monitor_interval = 1 * HZ;
155 
156 /* Initial interrupt moderation settings.  They can be modified after
157  * module load with ethtool.
158  *
159  * The default for RX should strike a balance between increasing the
160  * round-trip latency and reducing overhead.
161  */
162 static unsigned int rx_irq_mod_usec = 60;
163 
164 /* Initial interrupt moderation settings.  They can be modified after
165  * module load with ethtool.
166  *
167  * This default is chosen to ensure that a 10G link does not go idle
168  * while a TX queue is stopped after it has become full.  A queue is
169  * restarted when it drops below half full.  The time this takes (assuming
170  * worst case 3 descriptors per packet and 1024 descriptors) is
171  *   512 / 3 * 1.2 = 205 usec.
172  */
173 static unsigned int tx_irq_mod_usec = 150;
174 
175 /* This is the first interrupt mode to try out of:
176  * 0 => MSI-X
177  * 1 => MSI
178  * 2 => legacy
179  */
180 static unsigned int interrupt_mode;
181 
182 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
183  * i.e. the number of CPUs among which we may distribute simultaneous
184  * interrupt handling.
185  *
186  * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
187  * The default (0) means to assign an interrupt to each core.
188  */
189 static unsigned int rss_cpus;
190 module_param(rss_cpus, uint, 0444);
191 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
192 
193 static bool phy_flash_cfg;
194 module_param(phy_flash_cfg, bool, 0644);
195 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
196 
197 static unsigned irq_adapt_low_thresh = 8000;
198 module_param(irq_adapt_low_thresh, uint, 0644);
199 MODULE_PARM_DESC(irq_adapt_low_thresh,
200 		 "Threshold score for reducing IRQ moderation");
201 
202 static unsigned irq_adapt_high_thresh = 16000;
203 module_param(irq_adapt_high_thresh, uint, 0644);
204 MODULE_PARM_DESC(irq_adapt_high_thresh,
205 		 "Threshold score for increasing IRQ moderation");
206 
207 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
208 			 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
209 			 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
210 			 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
211 module_param(debug, uint, 0);
212 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
213 
214 /**************************************************************************
215  *
216  * Utility functions and prototypes
217  *
218  *************************************************************************/
219 
220 static int efx_soft_enable_interrupts(struct efx_nic *efx);
221 static void efx_soft_disable_interrupts(struct efx_nic *efx);
222 static void efx_remove_channel(struct efx_channel *channel);
223 static void efx_remove_channels(struct efx_nic *efx);
224 static const struct efx_channel_type efx_default_channel_type;
225 static void efx_remove_port(struct efx_nic *efx);
226 static void efx_init_napi_channel(struct efx_channel *channel);
227 static void efx_fini_napi(struct efx_nic *efx);
228 static void efx_fini_napi_channel(struct efx_channel *channel);
229 static void efx_fini_struct(struct efx_nic *efx);
230 static void efx_start_all(struct efx_nic *efx);
231 static void efx_stop_all(struct efx_nic *efx);
232 
233 #define EFX_ASSERT_RESET_SERIALISED(efx)		\
234 	do {						\
235 		if ((efx->state == STATE_READY) ||	\
236 		    (efx->state == STATE_RECOVERY) ||	\
237 		    (efx->state == STATE_DISABLED))	\
238 			ASSERT_RTNL();			\
239 	} while (0)
240 
241 static int efx_check_disabled(struct efx_nic *efx)
242 {
243 	if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
244 		netif_err(efx, drv, efx->net_dev,
245 			  "device is disabled due to earlier errors\n");
246 		return -EIO;
247 	}
248 	return 0;
249 }
250 
251 /**************************************************************************
252  *
253  * Event queue processing
254  *
255  *************************************************************************/
256 
257 /* Process channel's event queue
258  *
259  * This function is responsible for processing the event queue of a
260  * single channel.  The caller must guarantee that this function will
261  * never be concurrently called more than once on the same channel,
262  * though different channels may be being processed concurrently.
263  */
264 static int efx_process_channel(struct efx_channel *channel, int budget)
265 {
266 	struct efx_tx_queue *tx_queue;
267 	struct list_head rx_list;
268 	int spent;
269 
270 	if (unlikely(!channel->enabled))
271 		return 0;
272 
273 	/* Prepare the batch receive list */
274 	EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
275 	INIT_LIST_HEAD(&rx_list);
276 	channel->rx_list = &rx_list;
277 
278 	efx_for_each_channel_tx_queue(tx_queue, channel) {
279 		tx_queue->pkts_compl = 0;
280 		tx_queue->bytes_compl = 0;
281 	}
282 
283 	spent = efx_nic_process_eventq(channel, budget);
284 	if (spent && efx_channel_has_rx_queue(channel)) {
285 		struct efx_rx_queue *rx_queue =
286 			efx_channel_get_rx_queue(channel);
287 
288 		efx_rx_flush_packet(channel);
289 		efx_fast_push_rx_descriptors(rx_queue, true);
290 	}
291 
292 	/* Update BQL */
293 	efx_for_each_channel_tx_queue(tx_queue, channel) {
294 		if (tx_queue->bytes_compl) {
295 			netdev_tx_completed_queue(tx_queue->core_txq,
296 				tx_queue->pkts_compl, tx_queue->bytes_compl);
297 		}
298 	}
299 
300 	/* Receive any packets we queued up */
301 	netif_receive_skb_list(channel->rx_list);
302 	channel->rx_list = NULL;
303 
304 	return spent;
305 }
306 
307 /* NAPI poll handler
308  *
309  * NAPI guarantees serialisation of polls of the same device, which
310  * provides the guarantee required by efx_process_channel().
311  */
312 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
313 {
314 	int step = efx->irq_mod_step_us;
315 
316 	if (channel->irq_mod_score < irq_adapt_low_thresh) {
317 		if (channel->irq_moderation_us > step) {
318 			channel->irq_moderation_us -= step;
319 			efx->type->push_irq_moderation(channel);
320 		}
321 	} else if (channel->irq_mod_score > irq_adapt_high_thresh) {
322 		if (channel->irq_moderation_us <
323 		    efx->irq_rx_moderation_us) {
324 			channel->irq_moderation_us += step;
325 			efx->type->push_irq_moderation(channel);
326 		}
327 	}
328 
329 	channel->irq_count = 0;
330 	channel->irq_mod_score = 0;
331 }
332 
333 static int efx_poll(struct napi_struct *napi, int budget)
334 {
335 	struct efx_channel *channel =
336 		container_of(napi, struct efx_channel, napi_str);
337 	struct efx_nic *efx = channel->efx;
338 	int spent;
339 
340 	netif_vdbg(efx, intr, efx->net_dev,
341 		   "channel %d NAPI poll executing on CPU %d\n",
342 		   channel->channel, raw_smp_processor_id());
343 
344 	spent = efx_process_channel(channel, budget);
345 
346 	if (spent < budget) {
347 		if (efx_channel_has_rx_queue(channel) &&
348 		    efx->irq_rx_adaptive &&
349 		    unlikely(++channel->irq_count == 1000)) {
350 			efx_update_irq_mod(efx, channel);
351 		}
352 
353 #ifdef CONFIG_RFS_ACCEL
354 		/* Perhaps expire some ARFS filters */
355 		schedule_work(&channel->filter_work);
356 #endif
357 
358 		/* There is no race here; although napi_disable() will
359 		 * only wait for napi_complete(), this isn't a problem
360 		 * since efx_nic_eventq_read_ack() will have no effect if
361 		 * interrupts have already been disabled.
362 		 */
363 		if (napi_complete_done(napi, spent))
364 			efx_nic_eventq_read_ack(channel);
365 	}
366 
367 	return spent;
368 }
369 
370 /* Create event queue
371  * Event queue memory allocations are done only once.  If the channel
372  * is reset, the memory buffer will be reused; this guards against
373  * errors during channel reset and also simplifies interrupt handling.
374  */
375 static int efx_probe_eventq(struct efx_channel *channel)
376 {
377 	struct efx_nic *efx = channel->efx;
378 	unsigned long entries;
379 
380 	netif_dbg(efx, probe, efx->net_dev,
381 		  "chan %d create event queue\n", channel->channel);
382 
383 	/* Build an event queue with room for one event per tx and rx buffer,
384 	 * plus some extra for link state events and MCDI completions. */
385 	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
386 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
387 	channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
388 
389 	return efx_nic_probe_eventq(channel);
390 }
391 
392 /* Prepare channel's event queue */
393 static int efx_init_eventq(struct efx_channel *channel)
394 {
395 	struct efx_nic *efx = channel->efx;
396 	int rc;
397 
398 	EFX_WARN_ON_PARANOID(channel->eventq_init);
399 
400 	netif_dbg(efx, drv, efx->net_dev,
401 		  "chan %d init event queue\n", channel->channel);
402 
403 	rc = efx_nic_init_eventq(channel);
404 	if (rc == 0) {
405 		efx->type->push_irq_moderation(channel);
406 		channel->eventq_read_ptr = 0;
407 		channel->eventq_init = true;
408 	}
409 	return rc;
410 }
411 
412 /* Enable event queue processing and NAPI */
413 void efx_start_eventq(struct efx_channel *channel)
414 {
415 	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
416 		  "chan %d start event queue\n", channel->channel);
417 
418 	/* Make sure the NAPI handler sees the enabled flag set */
419 	channel->enabled = true;
420 	smp_wmb();
421 
422 	napi_enable(&channel->napi_str);
423 	efx_nic_eventq_read_ack(channel);
424 }
425 
426 /* Disable event queue processing and NAPI */
427 void efx_stop_eventq(struct efx_channel *channel)
428 {
429 	if (!channel->enabled)
430 		return;
431 
432 	napi_disable(&channel->napi_str);
433 	channel->enabled = false;
434 }
435 
436 static void efx_fini_eventq(struct efx_channel *channel)
437 {
438 	if (!channel->eventq_init)
439 		return;
440 
441 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
442 		  "chan %d fini event queue\n", channel->channel);
443 
444 	efx_nic_fini_eventq(channel);
445 	channel->eventq_init = false;
446 }
447 
448 static void efx_remove_eventq(struct efx_channel *channel)
449 {
450 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
451 		  "chan %d remove event queue\n", channel->channel);
452 
453 	efx_nic_remove_eventq(channel);
454 }
455 
456 /**************************************************************************
457  *
458  * Channel handling
459  *
460  *************************************************************************/
461 
462 /* Allocate and initialise a channel structure. */
463 static struct efx_channel *
464 efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
465 {
466 	struct efx_channel *channel;
467 	struct efx_rx_queue *rx_queue;
468 	struct efx_tx_queue *tx_queue;
469 	int j;
470 
471 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
472 	if (!channel)
473 		return NULL;
474 
475 	channel->efx = efx;
476 	channel->channel = i;
477 	channel->type = &efx_default_channel_type;
478 
479 	for (j = 0; j < EFX_TXQ_TYPES; j++) {
480 		tx_queue = &channel->tx_queue[j];
481 		tx_queue->efx = efx;
482 		tx_queue->queue = i * EFX_TXQ_TYPES + j;
483 		tx_queue->channel = channel;
484 	}
485 
486 #ifdef CONFIG_RFS_ACCEL
487 	INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
488 #endif
489 
490 	rx_queue = &channel->rx_queue;
491 	rx_queue->efx = efx;
492 	timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
493 
494 	return channel;
495 }
496 
497 /* Allocate and initialise a channel structure, copying parameters
498  * (but not resources) from an old channel structure.
499  */
500 static struct efx_channel *
501 efx_copy_channel(const struct efx_channel *old_channel)
502 {
503 	struct efx_channel *channel;
504 	struct efx_rx_queue *rx_queue;
505 	struct efx_tx_queue *tx_queue;
506 	int j;
507 
508 	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
509 	if (!channel)
510 		return NULL;
511 
512 	*channel = *old_channel;
513 
514 	channel->napi_dev = NULL;
515 	INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
516 	channel->napi_str.napi_id = 0;
517 	channel->napi_str.state = 0;
518 	memset(&channel->eventq, 0, sizeof(channel->eventq));
519 
520 	for (j = 0; j < EFX_TXQ_TYPES; j++) {
521 		tx_queue = &channel->tx_queue[j];
522 		if (tx_queue->channel)
523 			tx_queue->channel = channel;
524 		tx_queue->buffer = NULL;
525 		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
526 	}
527 
528 	rx_queue = &channel->rx_queue;
529 	rx_queue->buffer = NULL;
530 	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
531 	timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
532 #ifdef CONFIG_RFS_ACCEL
533 	INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
534 #endif
535 
536 	return channel;
537 }
538 
539 static int efx_probe_channel(struct efx_channel *channel)
540 {
541 	struct efx_tx_queue *tx_queue;
542 	struct efx_rx_queue *rx_queue;
543 	int rc;
544 
545 	netif_dbg(channel->efx, probe, channel->efx->net_dev,
546 		  "creating channel %d\n", channel->channel);
547 
548 	rc = channel->type->pre_probe(channel);
549 	if (rc)
550 		goto fail;
551 
552 	rc = efx_probe_eventq(channel);
553 	if (rc)
554 		goto fail;
555 
556 	efx_for_each_channel_tx_queue(tx_queue, channel) {
557 		rc = efx_probe_tx_queue(tx_queue);
558 		if (rc)
559 			goto fail;
560 	}
561 
562 	efx_for_each_channel_rx_queue(rx_queue, channel) {
563 		rc = efx_probe_rx_queue(rx_queue);
564 		if (rc)
565 			goto fail;
566 	}
567 
568 	channel->rx_list = NULL;
569 
570 	return 0;
571 
572 fail:
573 	efx_remove_channel(channel);
574 	return rc;
575 }
576 
577 static void
578 efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
579 {
580 	struct efx_nic *efx = channel->efx;
581 	const char *type;
582 	int number;
583 
584 	number = channel->channel;
585 	if (efx->tx_channel_offset == 0) {
586 		type = "";
587 	} else if (channel->channel < efx->tx_channel_offset) {
588 		type = "-rx";
589 	} else {
590 		type = "-tx";
591 		number -= efx->tx_channel_offset;
592 	}
593 	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
594 }
595 
596 static void efx_set_channel_names(struct efx_nic *efx)
597 {
598 	struct efx_channel *channel;
599 
600 	efx_for_each_channel(channel, efx)
601 		channel->type->get_name(channel,
602 					efx->msi_context[channel->channel].name,
603 					sizeof(efx->msi_context[0].name));
604 }
605 
606 static int efx_probe_channels(struct efx_nic *efx)
607 {
608 	struct efx_channel *channel;
609 	int rc;
610 
611 	/* Restart special buffer allocation */
612 	efx->next_buffer_table = 0;
613 
614 	/* Probe channels in reverse, so that any 'extra' channels
615 	 * use the start of the buffer table. This allows the traffic
616 	 * channels to be resized without moving them or wasting the
617 	 * entries before them.
618 	 */
619 	efx_for_each_channel_rev(channel, efx) {
620 		rc = efx_probe_channel(channel);
621 		if (rc) {
622 			netif_err(efx, probe, efx->net_dev,
623 				  "failed to create channel %d\n",
624 				  channel->channel);
625 			goto fail;
626 		}
627 	}
628 	efx_set_channel_names(efx);
629 
630 	return 0;
631 
632 fail:
633 	efx_remove_channels(efx);
634 	return rc;
635 }
636 
637 /* Channels are shutdown and reinitialised whilst the NIC is running
638  * to propagate configuration changes (mtu, checksum offload), or
639  * to clear hardware error conditions
640  */
641 static void efx_start_datapath(struct efx_nic *efx)
642 {
643 	netdev_features_t old_features = efx->net_dev->features;
644 	bool old_rx_scatter = efx->rx_scatter;
645 	struct efx_tx_queue *tx_queue;
646 	struct efx_rx_queue *rx_queue;
647 	struct efx_channel *channel;
648 	size_t rx_buf_len;
649 
650 	/* Calculate the rx buffer allocation parameters required to
651 	 * support the current MTU, including padding for header
652 	 * alignment and overruns.
653 	 */
654 	efx->rx_dma_len = (efx->rx_prefix_size +
655 			   EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
656 			   efx->type->rx_buffer_padding);
657 	rx_buf_len = (sizeof(struct efx_rx_page_state) +
658 		      efx->rx_ip_align + efx->rx_dma_len);
659 	if (rx_buf_len <= PAGE_SIZE) {
660 		efx->rx_scatter = efx->type->always_rx_scatter;
661 		efx->rx_buffer_order = 0;
662 	} else if (efx->type->can_rx_scatter) {
663 		BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
664 		BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
665 			     2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
666 				       EFX_RX_BUF_ALIGNMENT) >
667 			     PAGE_SIZE);
668 		efx->rx_scatter = true;
669 		efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
670 		efx->rx_buffer_order = 0;
671 	} else {
672 		efx->rx_scatter = false;
673 		efx->rx_buffer_order = get_order(rx_buf_len);
674 	}
675 
676 	efx_rx_config_page_split(efx);
677 	if (efx->rx_buffer_order)
678 		netif_dbg(efx, drv, efx->net_dev,
679 			  "RX buf len=%u; page order=%u batch=%u\n",
680 			  efx->rx_dma_len, efx->rx_buffer_order,
681 			  efx->rx_pages_per_batch);
682 	else
683 		netif_dbg(efx, drv, efx->net_dev,
684 			  "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
685 			  efx->rx_dma_len, efx->rx_page_buf_step,
686 			  efx->rx_bufs_per_page, efx->rx_pages_per_batch);
687 
688 	/* Restore previously fixed features in hw_features and remove
689 	 * features which are fixed now
690 	 */
691 	efx->net_dev->hw_features |= efx->net_dev->features;
692 	efx->net_dev->hw_features &= ~efx->fixed_features;
693 	efx->net_dev->features |= efx->fixed_features;
694 	if (efx->net_dev->features != old_features)
695 		netdev_features_change(efx->net_dev);
696 
697 	/* RX filters may also have scatter-enabled flags */
698 	if (efx->rx_scatter != old_rx_scatter)
699 		efx->type->filter_update_rx_scatter(efx);
700 
701 	/* We must keep at least one descriptor in a TX ring empty.
702 	 * We could avoid this when the queue size does not exactly
703 	 * match the hardware ring size, but it's not that important.
704 	 * Therefore we stop the queue when one more skb might fill
705 	 * the ring completely.  We wake it when half way back to
706 	 * empty.
707 	 */
708 	efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
709 	efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
710 
711 	/* Initialise the channels */
712 	efx_for_each_channel(channel, efx) {
713 		efx_for_each_channel_tx_queue(tx_queue, channel) {
714 			efx_init_tx_queue(tx_queue);
715 			atomic_inc(&efx->active_queues);
716 		}
717 
718 		efx_for_each_channel_rx_queue(rx_queue, channel) {
719 			efx_init_rx_queue(rx_queue);
720 			atomic_inc(&efx->active_queues);
721 			efx_stop_eventq(channel);
722 			efx_fast_push_rx_descriptors(rx_queue, false);
723 			efx_start_eventq(channel);
724 		}
725 
726 		WARN_ON(channel->rx_pkt_n_frags);
727 	}
728 
729 	efx_ptp_start_datapath(efx);
730 
731 	if (netif_device_present(efx->net_dev))
732 		netif_tx_wake_all_queues(efx->net_dev);
733 }
734 
735 static void efx_stop_datapath(struct efx_nic *efx)
736 {
737 	struct efx_channel *channel;
738 	struct efx_tx_queue *tx_queue;
739 	struct efx_rx_queue *rx_queue;
740 	int rc;
741 
742 	EFX_ASSERT_RESET_SERIALISED(efx);
743 	BUG_ON(efx->port_enabled);
744 
745 	efx_ptp_stop_datapath(efx);
746 
747 	/* Stop RX refill */
748 	efx_for_each_channel(channel, efx) {
749 		efx_for_each_channel_rx_queue(rx_queue, channel)
750 			rx_queue->refill_enabled = false;
751 	}
752 
753 	efx_for_each_channel(channel, efx) {
754 		/* RX packet processing is pipelined, so wait for the
755 		 * NAPI handler to complete.  At least event queue 0
756 		 * might be kept active by non-data events, so don't
757 		 * use napi_synchronize() but actually disable NAPI
758 		 * temporarily.
759 		 */
760 		if (efx_channel_has_rx_queue(channel)) {
761 			efx_stop_eventq(channel);
762 			efx_start_eventq(channel);
763 		}
764 	}
765 
766 	rc = efx->type->fini_dmaq(efx);
767 	if (rc) {
768 		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
769 	} else {
770 		netif_dbg(efx, drv, efx->net_dev,
771 			  "successfully flushed all queues\n");
772 	}
773 
774 	efx_for_each_channel(channel, efx) {
775 		efx_for_each_channel_rx_queue(rx_queue, channel)
776 			efx_fini_rx_queue(rx_queue);
777 		efx_for_each_possible_channel_tx_queue(tx_queue, channel)
778 			efx_fini_tx_queue(tx_queue);
779 	}
780 }
781 
782 static void efx_remove_channel(struct efx_channel *channel)
783 {
784 	struct efx_tx_queue *tx_queue;
785 	struct efx_rx_queue *rx_queue;
786 
787 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
788 		  "destroy chan %d\n", channel->channel);
789 
790 	efx_for_each_channel_rx_queue(rx_queue, channel)
791 		efx_remove_rx_queue(rx_queue);
792 	efx_for_each_possible_channel_tx_queue(tx_queue, channel)
793 		efx_remove_tx_queue(tx_queue);
794 	efx_remove_eventq(channel);
795 	channel->type->post_remove(channel);
796 }
797 
798 static void efx_remove_channels(struct efx_nic *efx)
799 {
800 	struct efx_channel *channel;
801 
802 	efx_for_each_channel(channel, efx)
803 		efx_remove_channel(channel);
804 }
805 
806 int
807 efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
808 {
809 	struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
810 	u32 old_rxq_entries, old_txq_entries;
811 	unsigned i, next_buffer_table = 0;
812 	int rc, rc2;
813 
814 	rc = efx_check_disabled(efx);
815 	if (rc)
816 		return rc;
817 
818 	/* Not all channels should be reallocated. We must avoid
819 	 * reallocating their buffer table entries.
820 	 */
821 	efx_for_each_channel(channel, efx) {
822 		struct efx_rx_queue *rx_queue;
823 		struct efx_tx_queue *tx_queue;
824 
825 		if (channel->type->copy)
826 			continue;
827 		next_buffer_table = max(next_buffer_table,
828 					channel->eventq.index +
829 					channel->eventq.entries);
830 		efx_for_each_channel_rx_queue(rx_queue, channel)
831 			next_buffer_table = max(next_buffer_table,
832 						rx_queue->rxd.index +
833 						rx_queue->rxd.entries);
834 		efx_for_each_channel_tx_queue(tx_queue, channel)
835 			next_buffer_table = max(next_buffer_table,
836 						tx_queue->txd.index +
837 						tx_queue->txd.entries);
838 	}
839 
840 	efx_device_detach_sync(efx);
841 	efx_stop_all(efx);
842 	efx_soft_disable_interrupts(efx);
843 
844 	/* Clone channels (where possible) */
845 	memset(other_channel, 0, sizeof(other_channel));
846 	for (i = 0; i < efx->n_channels; i++) {
847 		channel = efx->channel[i];
848 		if (channel->type->copy)
849 			channel = channel->type->copy(channel);
850 		if (!channel) {
851 			rc = -ENOMEM;
852 			goto out;
853 		}
854 		other_channel[i] = channel;
855 	}
856 
857 	/* Swap entry counts and channel pointers */
858 	old_rxq_entries = efx->rxq_entries;
859 	old_txq_entries = efx->txq_entries;
860 	efx->rxq_entries = rxq_entries;
861 	efx->txq_entries = txq_entries;
862 	for (i = 0; i < efx->n_channels; i++) {
863 		channel = efx->channel[i];
864 		efx->channel[i] = other_channel[i];
865 		other_channel[i] = channel;
866 	}
867 
868 	/* Restart buffer table allocation */
869 	efx->next_buffer_table = next_buffer_table;
870 
871 	for (i = 0; i < efx->n_channels; i++) {
872 		channel = efx->channel[i];
873 		if (!channel->type->copy)
874 			continue;
875 		rc = efx_probe_channel(channel);
876 		if (rc)
877 			goto rollback;
878 		efx_init_napi_channel(efx->channel[i]);
879 	}
880 
881 out:
882 	/* Destroy unused channel structures */
883 	for (i = 0; i < efx->n_channels; i++) {
884 		channel = other_channel[i];
885 		if (channel && channel->type->copy) {
886 			efx_fini_napi_channel(channel);
887 			efx_remove_channel(channel);
888 			kfree(channel);
889 		}
890 	}
891 
892 	rc2 = efx_soft_enable_interrupts(efx);
893 	if (rc2) {
894 		rc = rc ? rc : rc2;
895 		netif_err(efx, drv, efx->net_dev,
896 			  "unable to restart interrupts on channel reallocation\n");
897 		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
898 	} else {
899 		efx_start_all(efx);
900 		efx_device_attach_if_not_resetting(efx);
901 	}
902 	return rc;
903 
904 rollback:
905 	/* Swap back */
906 	efx->rxq_entries = old_rxq_entries;
907 	efx->txq_entries = old_txq_entries;
908 	for (i = 0; i < efx->n_channels; i++) {
909 		channel = efx->channel[i];
910 		efx->channel[i] = other_channel[i];
911 		other_channel[i] = channel;
912 	}
913 	goto out;
914 }
915 
916 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
917 {
918 	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
919 }
920 
921 static bool efx_default_channel_want_txqs(struct efx_channel *channel)
922 {
923 	return channel->channel - channel->efx->tx_channel_offset <
924 		channel->efx->n_tx_channels;
925 }
926 
927 static const struct efx_channel_type efx_default_channel_type = {
928 	.pre_probe		= efx_channel_dummy_op_int,
929 	.post_remove		= efx_channel_dummy_op_void,
930 	.get_name		= efx_get_channel_name,
931 	.copy			= efx_copy_channel,
932 	.want_txqs		= efx_default_channel_want_txqs,
933 	.keep_eventq		= false,
934 	.want_pio		= true,
935 };
936 
937 int efx_channel_dummy_op_int(struct efx_channel *channel)
938 {
939 	return 0;
940 }
941 
942 void efx_channel_dummy_op_void(struct efx_channel *channel)
943 {
944 }
945 
946 /**************************************************************************
947  *
948  * Port handling
949  *
950  **************************************************************************/
951 
952 /* This ensures that the kernel is kept informed (via
953  * netif_carrier_on/off) of the link status, and also maintains the
954  * link status's stop on the port's TX queue.
955  */
956 void efx_link_status_changed(struct efx_nic *efx)
957 {
958 	struct efx_link_state *link_state = &efx->link_state;
959 
960 	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
961 	 * that no events are triggered between unregister_netdev() and the
962 	 * driver unloading. A more general condition is that NETDEV_CHANGE
963 	 * can only be generated between NETDEV_UP and NETDEV_DOWN */
964 	if (!netif_running(efx->net_dev))
965 		return;
966 
967 	if (link_state->up != netif_carrier_ok(efx->net_dev)) {
968 		efx->n_link_state_changes++;
969 
970 		if (link_state->up)
971 			netif_carrier_on(efx->net_dev);
972 		else
973 			netif_carrier_off(efx->net_dev);
974 	}
975 
976 	/* Status message for kernel log */
977 	if (link_state->up)
978 		netif_info(efx, link, efx->net_dev,
979 			   "link up at %uMbps %s-duplex (MTU %d)\n",
980 			   link_state->speed, link_state->fd ? "full" : "half",
981 			   efx->net_dev->mtu);
982 	else
983 		netif_info(efx, link, efx->net_dev, "link down\n");
984 }
985 
986 void efx_link_set_advertising(struct efx_nic *efx,
987 			      const unsigned long *advertising)
988 {
989 	memcpy(efx->link_advertising, advertising,
990 	       sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
991 
992 	efx->link_advertising[0] |= ADVERTISED_Autoneg;
993 	if (advertising[0] & ADVERTISED_Pause)
994 		efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
995 	else
996 		efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
997 	if (advertising[0] & ADVERTISED_Asym_Pause)
998 		efx->wanted_fc ^= EFX_FC_TX;
999 }
1000 
1001 /* Equivalent to efx_link_set_advertising with all-zeroes, except does not
1002  * force the Autoneg bit on.
1003  */
1004 void efx_link_clear_advertising(struct efx_nic *efx)
1005 {
1006 	bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
1007 	efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
1008 }
1009 
1010 void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
1011 {
1012 	efx->wanted_fc = wanted_fc;
1013 	if (efx->link_advertising[0]) {
1014 		if (wanted_fc & EFX_FC_RX)
1015 			efx->link_advertising[0] |= (ADVERTISED_Pause |
1016 						     ADVERTISED_Asym_Pause);
1017 		else
1018 			efx->link_advertising[0] &= ~(ADVERTISED_Pause |
1019 						      ADVERTISED_Asym_Pause);
1020 		if (wanted_fc & EFX_FC_TX)
1021 			efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
1022 	}
1023 }
1024 
1025 static void efx_fini_port(struct efx_nic *efx);
1026 
1027 /* We assume that efx->type->reconfigure_mac will always try to sync RX
1028  * filters and therefore needs to read-lock the filter table against freeing
1029  */
1030 void efx_mac_reconfigure(struct efx_nic *efx)
1031 {
1032 	down_read(&efx->filter_sem);
1033 	efx->type->reconfigure_mac(efx);
1034 	up_read(&efx->filter_sem);
1035 }
1036 
1037 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
1038  * the MAC appropriately. All other PHY configuration changes are pushed
1039  * through phy_op->set_settings(), and pushed asynchronously to the MAC
1040  * through efx_monitor().
1041  *
1042  * Callers must hold the mac_lock
1043  */
1044 int __efx_reconfigure_port(struct efx_nic *efx)
1045 {
1046 	enum efx_phy_mode phy_mode;
1047 	int rc;
1048 
1049 	WARN_ON(!mutex_is_locked(&efx->mac_lock));
1050 
1051 	/* Disable PHY transmit in mac level loopbacks */
1052 	phy_mode = efx->phy_mode;
1053 	if (LOOPBACK_INTERNAL(efx))
1054 		efx->phy_mode |= PHY_MODE_TX_DISABLED;
1055 	else
1056 		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
1057 
1058 	rc = efx->type->reconfigure_port(efx);
1059 
1060 	if (rc)
1061 		efx->phy_mode = phy_mode;
1062 
1063 	return rc;
1064 }
1065 
1066 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
1067  * disabled. */
1068 int efx_reconfigure_port(struct efx_nic *efx)
1069 {
1070 	int rc;
1071 
1072 	EFX_ASSERT_RESET_SERIALISED(efx);
1073 
1074 	mutex_lock(&efx->mac_lock);
1075 	rc = __efx_reconfigure_port(efx);
1076 	mutex_unlock(&efx->mac_lock);
1077 
1078 	return rc;
1079 }
1080 
1081 /* Asynchronous work item for changing MAC promiscuity and multicast
1082  * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
1083  * MAC directly. */
1084 static void efx_mac_work(struct work_struct *data)
1085 {
1086 	struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
1087 
1088 	mutex_lock(&efx->mac_lock);
1089 	if (efx->port_enabled)
1090 		efx_mac_reconfigure(efx);
1091 	mutex_unlock(&efx->mac_lock);
1092 }
1093 
1094 static int efx_probe_port(struct efx_nic *efx)
1095 {
1096 	int rc;
1097 
1098 	netif_dbg(efx, probe, efx->net_dev, "create port\n");
1099 
1100 	if (phy_flash_cfg)
1101 		efx->phy_mode = PHY_MODE_SPECIAL;
1102 
1103 	/* Connect up MAC/PHY operations table */
1104 	rc = efx->type->probe_port(efx);
1105 	if (rc)
1106 		return rc;
1107 
1108 	/* Initialise MAC address to permanent address */
1109 	ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
1110 
1111 	return 0;
1112 }
1113 
1114 static int efx_init_port(struct efx_nic *efx)
1115 {
1116 	int rc;
1117 
1118 	netif_dbg(efx, drv, efx->net_dev, "init port\n");
1119 
1120 	mutex_lock(&efx->mac_lock);
1121 
1122 	rc = efx->phy_op->init(efx);
1123 	if (rc)
1124 		goto fail1;
1125 
1126 	efx->port_initialized = true;
1127 
1128 	/* Reconfigure the MAC before creating dma queues (required for
1129 	 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1130 	efx_mac_reconfigure(efx);
1131 
1132 	/* Ensure the PHY advertises the correct flow control settings */
1133 	rc = efx->phy_op->reconfigure(efx);
1134 	if (rc && rc != -EPERM)
1135 		goto fail2;
1136 
1137 	mutex_unlock(&efx->mac_lock);
1138 	return 0;
1139 
1140 fail2:
1141 	efx->phy_op->fini(efx);
1142 fail1:
1143 	mutex_unlock(&efx->mac_lock);
1144 	return rc;
1145 }
1146 
1147 static void efx_start_port(struct efx_nic *efx)
1148 {
1149 	netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1150 	BUG_ON(efx->port_enabled);
1151 
1152 	mutex_lock(&efx->mac_lock);
1153 	efx->port_enabled = true;
1154 
1155 	/* Ensure MAC ingress/egress is enabled */
1156 	efx_mac_reconfigure(efx);
1157 
1158 	mutex_unlock(&efx->mac_lock);
1159 }
1160 
1161 /* Cancel work for MAC reconfiguration, periodic hardware monitoring
1162  * and the async self-test, wait for them to finish and prevent them
1163  * being scheduled again.  This doesn't cover online resets, which
1164  * should only be cancelled when removing the device.
1165  */
1166 static void efx_stop_port(struct efx_nic *efx)
1167 {
1168 	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1169 
1170 	EFX_ASSERT_RESET_SERIALISED(efx);
1171 
1172 	mutex_lock(&efx->mac_lock);
1173 	efx->port_enabled = false;
1174 	mutex_unlock(&efx->mac_lock);
1175 
1176 	/* Serialise against efx_set_multicast_list() */
1177 	netif_addr_lock_bh(efx->net_dev);
1178 	netif_addr_unlock_bh(efx->net_dev);
1179 
1180 	cancel_delayed_work_sync(&efx->monitor_work);
1181 	efx_selftest_async_cancel(efx);
1182 	cancel_work_sync(&efx->mac_work);
1183 }
1184 
1185 static void efx_fini_port(struct efx_nic *efx)
1186 {
1187 	netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1188 
1189 	if (!efx->port_initialized)
1190 		return;
1191 
1192 	efx->phy_op->fini(efx);
1193 	efx->port_initialized = false;
1194 
1195 	efx->link_state.up = false;
1196 	efx_link_status_changed(efx);
1197 }
1198 
1199 static void efx_remove_port(struct efx_nic *efx)
1200 {
1201 	netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1202 
1203 	efx->type->remove_port(efx);
1204 }
1205 
1206 /**************************************************************************
1207  *
1208  * NIC handling
1209  *
1210  **************************************************************************/
1211 
1212 static LIST_HEAD(efx_primary_list);
1213 static LIST_HEAD(efx_unassociated_list);
1214 
1215 static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
1216 {
1217 	return left->type == right->type &&
1218 		left->vpd_sn && right->vpd_sn &&
1219 		!strcmp(left->vpd_sn, right->vpd_sn);
1220 }
1221 
1222 static void efx_associate(struct efx_nic *efx)
1223 {
1224 	struct efx_nic *other, *next;
1225 
1226 	if (efx->primary == efx) {
1227 		/* Adding primary function; look for secondaries */
1228 
1229 		netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1230 		list_add_tail(&efx->node, &efx_primary_list);
1231 
1232 		list_for_each_entry_safe(other, next, &efx_unassociated_list,
1233 					 node) {
1234 			if (efx_same_controller(efx, other)) {
1235 				list_del(&other->node);
1236 				netif_dbg(other, probe, other->net_dev,
1237 					  "moving to secondary list of %s %s\n",
1238 					  pci_name(efx->pci_dev),
1239 					  efx->net_dev->name);
1240 				list_add_tail(&other->node,
1241 					      &efx->secondary_list);
1242 				other->primary = efx;
1243 			}
1244 		}
1245 	} else {
1246 		/* Adding secondary function; look for primary */
1247 
1248 		list_for_each_entry(other, &efx_primary_list, node) {
1249 			if (efx_same_controller(efx, other)) {
1250 				netif_dbg(efx, probe, efx->net_dev,
1251 					  "adding to secondary list of %s %s\n",
1252 					  pci_name(other->pci_dev),
1253 					  other->net_dev->name);
1254 				list_add_tail(&efx->node,
1255 					      &other->secondary_list);
1256 				efx->primary = other;
1257 				return;
1258 			}
1259 		}
1260 
1261 		netif_dbg(efx, probe, efx->net_dev,
1262 			  "adding to unassociated list\n");
1263 		list_add_tail(&efx->node, &efx_unassociated_list);
1264 	}
1265 }
1266 
1267 static void efx_dissociate(struct efx_nic *efx)
1268 {
1269 	struct efx_nic *other, *next;
1270 
1271 	list_del(&efx->node);
1272 	efx->primary = NULL;
1273 
1274 	list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1275 		list_del(&other->node);
1276 		netif_dbg(other, probe, other->net_dev,
1277 			  "moving to unassociated list\n");
1278 		list_add_tail(&other->node, &efx_unassociated_list);
1279 		other->primary = NULL;
1280 	}
1281 }
1282 
1283 /* This configures the PCI device to enable I/O and DMA. */
1284 static int efx_init_io(struct efx_nic *efx)
1285 {
1286 	struct pci_dev *pci_dev = efx->pci_dev;
1287 	dma_addr_t dma_mask = efx->type->max_dma_mask;
1288 	unsigned int mem_map_size = efx->type->mem_map_size(efx);
1289 	int rc, bar;
1290 
1291 	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1292 
1293 	bar = efx->type->mem_bar(efx);
1294 
1295 	rc = pci_enable_device(pci_dev);
1296 	if (rc) {
1297 		netif_err(efx, probe, efx->net_dev,
1298 			  "failed to enable PCI device\n");
1299 		goto fail1;
1300 	}
1301 
1302 	pci_set_master(pci_dev);
1303 
1304 	/* Set the PCI DMA mask.  Try all possibilities from our genuine mask
1305 	 * down to 32 bits, because some architectures will allow 40 bit
1306 	 * masks event though they reject 46 bit masks.
1307 	 */
1308 	while (dma_mask > 0x7fffffffUL) {
1309 		rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1310 		if (rc == 0)
1311 			break;
1312 		dma_mask >>= 1;
1313 	}
1314 	if (rc) {
1315 		netif_err(efx, probe, efx->net_dev,
1316 			  "could not find a suitable DMA mask\n");
1317 		goto fail2;
1318 	}
1319 	netif_dbg(efx, probe, efx->net_dev,
1320 		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
1321 
1322 	efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1323 	rc = pci_request_region(pci_dev, bar, "sfc");
1324 	if (rc) {
1325 		netif_err(efx, probe, efx->net_dev,
1326 			  "request for memory BAR failed\n");
1327 		rc = -EIO;
1328 		goto fail3;
1329 	}
1330 	efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
1331 	if (!efx->membase) {
1332 		netif_err(efx, probe, efx->net_dev,
1333 			  "could not map memory BAR at %llx+%x\n",
1334 			  (unsigned long long)efx->membase_phys, mem_map_size);
1335 		rc = -ENOMEM;
1336 		goto fail4;
1337 	}
1338 	netif_dbg(efx, probe, efx->net_dev,
1339 		  "memory BAR at %llx+%x (virtual %p)\n",
1340 		  (unsigned long long)efx->membase_phys, mem_map_size,
1341 		  efx->membase);
1342 
1343 	return 0;
1344 
1345  fail4:
1346 	pci_release_region(efx->pci_dev, bar);
1347  fail3:
1348 	efx->membase_phys = 0;
1349  fail2:
1350 	pci_disable_device(efx->pci_dev);
1351  fail1:
1352 	return rc;
1353 }
1354 
1355 static void efx_fini_io(struct efx_nic *efx)
1356 {
1357 	int bar;
1358 
1359 	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1360 
1361 	if (efx->membase) {
1362 		iounmap(efx->membase);
1363 		efx->membase = NULL;
1364 	}
1365 
1366 	if (efx->membase_phys) {
1367 		bar = efx->type->mem_bar(efx);
1368 		pci_release_region(efx->pci_dev, bar);
1369 		efx->membase_phys = 0;
1370 	}
1371 
1372 	/* Don't disable bus-mastering if VFs are assigned */
1373 	if (!pci_vfs_assigned(efx->pci_dev))
1374 		pci_disable_device(efx->pci_dev);
1375 }
1376 
1377 void efx_set_default_rx_indir_table(struct efx_nic *efx,
1378 				    struct efx_rss_context *ctx)
1379 {
1380 	size_t i;
1381 
1382 	for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
1383 		ctx->rx_indir_table[i] =
1384 			ethtool_rxfh_indir_default(i, efx->rss_spread);
1385 }
1386 
1387 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1388 {
1389 	cpumask_var_t thread_mask;
1390 	unsigned int count;
1391 	int cpu;
1392 
1393 	if (rss_cpus) {
1394 		count = rss_cpus;
1395 	} else {
1396 		if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1397 			netif_warn(efx, probe, efx->net_dev,
1398 				   "RSS disabled due to allocation failure\n");
1399 			return 1;
1400 		}
1401 
1402 		count = 0;
1403 		for_each_online_cpu(cpu) {
1404 			if (!cpumask_test_cpu(cpu, thread_mask)) {
1405 				++count;
1406 				cpumask_or(thread_mask, thread_mask,
1407 					   topology_sibling_cpumask(cpu));
1408 			}
1409 		}
1410 
1411 		free_cpumask_var(thread_mask);
1412 	}
1413 
1414 	if (count > EFX_MAX_RX_QUEUES) {
1415 		netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1416 			       "Reducing number of rx queues from %u to %u.\n",
1417 			       count, EFX_MAX_RX_QUEUES);
1418 		count = EFX_MAX_RX_QUEUES;
1419 	}
1420 
1421 	/* If RSS is requested for the PF *and* VFs then we can't write RSS
1422 	 * table entries that are inaccessible to VFs
1423 	 */
1424 #ifdef CONFIG_SFC_SRIOV
1425 	if (efx->type->sriov_wanted) {
1426 		if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
1427 		    count > efx_vf_size(efx)) {
1428 			netif_warn(efx, probe, efx->net_dev,
1429 				   "Reducing number of RSS channels from %u to %u for "
1430 				   "VF support. Increase vf-msix-limit to use more "
1431 				   "channels on the PF.\n",
1432 				   count, efx_vf_size(efx));
1433 			count = efx_vf_size(efx);
1434 		}
1435 	}
1436 #endif
1437 
1438 	return count;
1439 }
1440 
1441 /* Probe the number and type of interrupts we are able to obtain, and
1442  * the resulting numbers of channels and RX queues.
1443  */
1444 static int efx_probe_interrupts(struct efx_nic *efx)
1445 {
1446 	unsigned int extra_channels = 0;
1447 	unsigned int i, j;
1448 	int rc;
1449 
1450 	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
1451 		if (efx->extra_channel_type[i])
1452 			++extra_channels;
1453 
1454 	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1455 		struct msix_entry xentries[EFX_MAX_CHANNELS];
1456 		unsigned int n_channels;
1457 
1458 		n_channels = efx_wanted_parallelism(efx);
1459 		if (efx_separate_tx_channels)
1460 			n_channels *= 2;
1461 		n_channels += extra_channels;
1462 		n_channels = min(n_channels, efx->max_channels);
1463 
1464 		for (i = 0; i < n_channels; i++)
1465 			xentries[i].entry = i;
1466 		rc = pci_enable_msix_range(efx->pci_dev,
1467 					   xentries, 1, n_channels);
1468 		if (rc < 0) {
1469 			/* Fall back to single channel MSI */
1470 			netif_err(efx, drv, efx->net_dev,
1471 				  "could not enable MSI-X\n");
1472 			if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
1473 				efx->interrupt_mode = EFX_INT_MODE_MSI;
1474 			else
1475 				return rc;
1476 		} else if (rc < n_channels) {
1477 			netif_err(efx, drv, efx->net_dev,
1478 				  "WARNING: Insufficient MSI-X vectors"
1479 				  " available (%d < %u).\n", rc, n_channels);
1480 			netif_err(efx, drv, efx->net_dev,
1481 				  "WARNING: Performance may be reduced.\n");
1482 			n_channels = rc;
1483 		}
1484 
1485 		if (rc > 0) {
1486 			efx->n_channels = n_channels;
1487 			if (n_channels > extra_channels)
1488 				n_channels -= extra_channels;
1489 			if (efx_separate_tx_channels) {
1490 				efx->n_tx_channels = min(max(n_channels / 2,
1491 							     1U),
1492 							 efx->max_tx_channels);
1493 				efx->n_rx_channels = max(n_channels -
1494 							 efx->n_tx_channels,
1495 							 1U);
1496 			} else {
1497 				efx->n_tx_channels = min(n_channels,
1498 							 efx->max_tx_channels);
1499 				efx->n_rx_channels = n_channels;
1500 			}
1501 			for (i = 0; i < efx->n_channels; i++)
1502 				efx_get_channel(efx, i)->irq =
1503 					xentries[i].vector;
1504 		}
1505 	}
1506 
1507 	/* Try single interrupt MSI */
1508 	if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1509 		efx->n_channels = 1;
1510 		efx->n_rx_channels = 1;
1511 		efx->n_tx_channels = 1;
1512 		rc = pci_enable_msi(efx->pci_dev);
1513 		if (rc == 0) {
1514 			efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1515 		} else {
1516 			netif_err(efx, drv, efx->net_dev,
1517 				  "could not enable MSI\n");
1518 			if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
1519 				efx->interrupt_mode = EFX_INT_MODE_LEGACY;
1520 			else
1521 				return rc;
1522 		}
1523 	}
1524 
1525 	/* Assume legacy interrupts */
1526 	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1527 		efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
1528 		efx->n_rx_channels = 1;
1529 		efx->n_tx_channels = 1;
1530 		efx->legacy_irq = efx->pci_dev->irq;
1531 	}
1532 
1533 	/* Assign extra channels if possible */
1534 	efx->n_extra_tx_channels = 0;
1535 	j = efx->n_channels;
1536 	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
1537 		if (!efx->extra_channel_type[i])
1538 			continue;
1539 		if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
1540 		    efx->n_channels <= extra_channels) {
1541 			efx->extra_channel_type[i]->handle_no_channel(efx);
1542 		} else {
1543 			--j;
1544 			efx_get_channel(efx, j)->type =
1545 				efx->extra_channel_type[i];
1546 			if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
1547 				efx->n_extra_tx_channels++;
1548 		}
1549 	}
1550 
1551 	/* RSS might be usable on VFs even if it is disabled on the PF */
1552 #ifdef CONFIG_SFC_SRIOV
1553 	if (efx->type->sriov_wanted) {
1554 		efx->rss_spread = ((efx->n_rx_channels > 1 ||
1555 				    !efx->type->sriov_wanted(efx)) ?
1556 				   efx->n_rx_channels : efx_vf_size(efx));
1557 		return 0;
1558 	}
1559 #endif
1560 	efx->rss_spread = efx->n_rx_channels;
1561 
1562 	return 0;
1563 }
1564 
1565 #if defined(CONFIG_SMP)
1566 static void efx_set_interrupt_affinity(struct efx_nic *efx)
1567 {
1568 	struct efx_channel *channel;
1569 	unsigned int cpu;
1570 
1571 	efx_for_each_channel(channel, efx) {
1572 		cpu = cpumask_local_spread(channel->channel,
1573 					   pcibus_to_node(efx->pci_dev->bus));
1574 		irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
1575 	}
1576 }
1577 
1578 static void efx_clear_interrupt_affinity(struct efx_nic *efx)
1579 {
1580 	struct efx_channel *channel;
1581 
1582 	efx_for_each_channel(channel, efx)
1583 		irq_set_affinity_hint(channel->irq, NULL);
1584 }
1585 #else
1586 static void
1587 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
1588 {
1589 }
1590 
1591 static void
1592 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
1593 {
1594 }
1595 #endif /* CONFIG_SMP */
1596 
1597 static int efx_soft_enable_interrupts(struct efx_nic *efx)
1598 {
1599 	struct efx_channel *channel, *end_channel;
1600 	int rc;
1601 
1602 	BUG_ON(efx->state == STATE_DISABLED);
1603 
1604 	efx->irq_soft_enabled = true;
1605 	smp_wmb();
1606 
1607 	efx_for_each_channel(channel, efx) {
1608 		if (!channel->type->keep_eventq) {
1609 			rc = efx_init_eventq(channel);
1610 			if (rc)
1611 				goto fail;
1612 		}
1613 		efx_start_eventq(channel);
1614 	}
1615 
1616 	efx_mcdi_mode_event(efx);
1617 
1618 	return 0;
1619 fail:
1620 	end_channel = channel;
1621 	efx_for_each_channel(channel, efx) {
1622 		if (channel == end_channel)
1623 			break;
1624 		efx_stop_eventq(channel);
1625 		if (!channel->type->keep_eventq)
1626 			efx_fini_eventq(channel);
1627 	}
1628 
1629 	return rc;
1630 }
1631 
1632 static void efx_soft_disable_interrupts(struct efx_nic *efx)
1633 {
1634 	struct efx_channel *channel;
1635 
1636 	if (efx->state == STATE_DISABLED)
1637 		return;
1638 
1639 	efx_mcdi_mode_poll(efx);
1640 
1641 	efx->irq_soft_enabled = false;
1642 	smp_wmb();
1643 
1644 	if (efx->legacy_irq)
1645 		synchronize_irq(efx->legacy_irq);
1646 
1647 	efx_for_each_channel(channel, efx) {
1648 		if (channel->irq)
1649 			synchronize_irq(channel->irq);
1650 
1651 		efx_stop_eventq(channel);
1652 		if (!channel->type->keep_eventq)
1653 			efx_fini_eventq(channel);
1654 	}
1655 
1656 	/* Flush the asynchronous MCDI request queue */
1657 	efx_mcdi_flush_async(efx);
1658 }
1659 
1660 static int efx_enable_interrupts(struct efx_nic *efx)
1661 {
1662 	struct efx_channel *channel, *end_channel;
1663 	int rc;
1664 
1665 	BUG_ON(efx->state == STATE_DISABLED);
1666 
1667 	if (efx->eeh_disabled_legacy_irq) {
1668 		enable_irq(efx->legacy_irq);
1669 		efx->eeh_disabled_legacy_irq = false;
1670 	}
1671 
1672 	efx->type->irq_enable_master(efx);
1673 
1674 	efx_for_each_channel(channel, efx) {
1675 		if (channel->type->keep_eventq) {
1676 			rc = efx_init_eventq(channel);
1677 			if (rc)
1678 				goto fail;
1679 		}
1680 	}
1681 
1682 	rc = efx_soft_enable_interrupts(efx);
1683 	if (rc)
1684 		goto fail;
1685 
1686 	return 0;
1687 
1688 fail:
1689 	end_channel = channel;
1690 	efx_for_each_channel(channel, efx) {
1691 		if (channel == end_channel)
1692 			break;
1693 		if (channel->type->keep_eventq)
1694 			efx_fini_eventq(channel);
1695 	}
1696 
1697 	efx->type->irq_disable_non_ev(efx);
1698 
1699 	return rc;
1700 }
1701 
1702 static void efx_disable_interrupts(struct efx_nic *efx)
1703 {
1704 	struct efx_channel *channel;
1705 
1706 	efx_soft_disable_interrupts(efx);
1707 
1708 	efx_for_each_channel(channel, efx) {
1709 		if (channel->type->keep_eventq)
1710 			efx_fini_eventq(channel);
1711 	}
1712 
1713 	efx->type->irq_disable_non_ev(efx);
1714 }
1715 
1716 static void efx_remove_interrupts(struct efx_nic *efx)
1717 {
1718 	struct efx_channel *channel;
1719 
1720 	/* Remove MSI/MSI-X interrupts */
1721 	efx_for_each_channel(channel, efx)
1722 		channel->irq = 0;
1723 	pci_disable_msi(efx->pci_dev);
1724 	pci_disable_msix(efx->pci_dev);
1725 
1726 	/* Remove legacy interrupt */
1727 	efx->legacy_irq = 0;
1728 }
1729 
1730 static void efx_set_channels(struct efx_nic *efx)
1731 {
1732 	struct efx_channel *channel;
1733 	struct efx_tx_queue *tx_queue;
1734 
1735 	efx->tx_channel_offset =
1736 		efx_separate_tx_channels ?
1737 		efx->n_channels - efx->n_tx_channels : 0;
1738 
1739 	/* We need to mark which channels really have RX and TX
1740 	 * queues, and adjust the TX queue numbers if we have separate
1741 	 * RX-only and TX-only channels.
1742 	 */
1743 	efx_for_each_channel(channel, efx) {
1744 		if (channel->channel < efx->n_rx_channels)
1745 			channel->rx_queue.core_index = channel->channel;
1746 		else
1747 			channel->rx_queue.core_index = -1;
1748 
1749 		efx_for_each_channel_tx_queue(tx_queue, channel)
1750 			tx_queue->queue -= (efx->tx_channel_offset *
1751 					    EFX_TXQ_TYPES);
1752 	}
1753 }
1754 
1755 static int efx_probe_nic(struct efx_nic *efx)
1756 {
1757 	int rc;
1758 
1759 	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1760 
1761 	/* Carry out hardware-type specific initialisation */
1762 	rc = efx->type->probe(efx);
1763 	if (rc)
1764 		return rc;
1765 
1766 	do {
1767 		if (!efx->max_channels || !efx->max_tx_channels) {
1768 			netif_err(efx, drv, efx->net_dev,
1769 				  "Insufficient resources to allocate"
1770 				  " any channels\n");
1771 			rc = -ENOSPC;
1772 			goto fail1;
1773 		}
1774 
1775 		/* Determine the number of channels and queues by trying
1776 		 * to hook in MSI-X interrupts.
1777 		 */
1778 		rc = efx_probe_interrupts(efx);
1779 		if (rc)
1780 			goto fail1;
1781 
1782 		efx_set_channels(efx);
1783 
1784 		/* dimension_resources can fail with EAGAIN */
1785 		rc = efx->type->dimension_resources(efx);
1786 		if (rc != 0 && rc != -EAGAIN)
1787 			goto fail2;
1788 
1789 		if (rc == -EAGAIN)
1790 			/* try again with new max_channels */
1791 			efx_remove_interrupts(efx);
1792 
1793 	} while (rc == -EAGAIN);
1794 
1795 	if (efx->n_channels > 1)
1796 		netdev_rss_key_fill(efx->rss_context.rx_hash_key,
1797 				    sizeof(efx->rss_context.rx_hash_key));
1798 	efx_set_default_rx_indir_table(efx, &efx->rss_context);
1799 
1800 	netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1801 	netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1802 
1803 	/* Initialise the interrupt moderation settings */
1804 	efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
1805 	efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1806 				true);
1807 
1808 	return 0;
1809 
1810 fail2:
1811 	efx_remove_interrupts(efx);
1812 fail1:
1813 	efx->type->remove(efx);
1814 	return rc;
1815 }
1816 
1817 static void efx_remove_nic(struct efx_nic *efx)
1818 {
1819 	netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1820 
1821 	efx_remove_interrupts(efx);
1822 	efx->type->remove(efx);
1823 }
1824 
1825 static int efx_probe_filters(struct efx_nic *efx)
1826 {
1827 	int rc;
1828 
1829 	init_rwsem(&efx->filter_sem);
1830 	mutex_lock(&efx->mac_lock);
1831 	down_write(&efx->filter_sem);
1832 	rc = efx->type->filter_table_probe(efx);
1833 	if (rc)
1834 		goto out_unlock;
1835 
1836 #ifdef CONFIG_RFS_ACCEL
1837 	if (efx->type->offload_features & NETIF_F_NTUPLE) {
1838 		struct efx_channel *channel;
1839 		int i, success = 1;
1840 
1841 		efx_for_each_channel(channel, efx) {
1842 			channel->rps_flow_id =
1843 				kcalloc(efx->type->max_rx_ip_filters,
1844 					sizeof(*channel->rps_flow_id),
1845 					GFP_KERNEL);
1846 			if (!channel->rps_flow_id)
1847 				success = 0;
1848 			else
1849 				for (i = 0;
1850 				     i < efx->type->max_rx_ip_filters;
1851 				     ++i)
1852 					channel->rps_flow_id[i] =
1853 						RPS_FLOW_ID_INVALID;
1854 		}
1855 
1856 		if (!success) {
1857 			efx_for_each_channel(channel, efx)
1858 				kfree(channel->rps_flow_id);
1859 			efx->type->filter_table_remove(efx);
1860 			rc = -ENOMEM;
1861 			goto out_unlock;
1862 		}
1863 
1864 		efx->rps_expire_index = efx->rps_expire_channel = 0;
1865 	}
1866 #endif
1867 out_unlock:
1868 	up_write(&efx->filter_sem);
1869 	mutex_unlock(&efx->mac_lock);
1870 	return rc;
1871 }
1872 
1873 static void efx_remove_filters(struct efx_nic *efx)
1874 {
1875 #ifdef CONFIG_RFS_ACCEL
1876 	struct efx_channel *channel;
1877 
1878 	efx_for_each_channel(channel, efx)
1879 		kfree(channel->rps_flow_id);
1880 #endif
1881 	down_write(&efx->filter_sem);
1882 	efx->type->filter_table_remove(efx);
1883 	up_write(&efx->filter_sem);
1884 }
1885 
1886 static void efx_restore_filters(struct efx_nic *efx)
1887 {
1888 	down_read(&efx->filter_sem);
1889 	efx->type->filter_table_restore(efx);
1890 	up_read(&efx->filter_sem);
1891 }
1892 
1893 /**************************************************************************
1894  *
1895  * NIC startup/shutdown
1896  *
1897  *************************************************************************/
1898 
1899 static int efx_probe_all(struct efx_nic *efx)
1900 {
1901 	int rc;
1902 
1903 	rc = efx_probe_nic(efx);
1904 	if (rc) {
1905 		netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1906 		goto fail1;
1907 	}
1908 
1909 	rc = efx_probe_port(efx);
1910 	if (rc) {
1911 		netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1912 		goto fail2;
1913 	}
1914 
1915 	BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
1916 	if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
1917 		rc = -EINVAL;
1918 		goto fail3;
1919 	}
1920 	efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1921 
1922 #ifdef CONFIG_SFC_SRIOV
1923 	rc = efx->type->vswitching_probe(efx);
1924 	if (rc) /* not fatal; the PF will still work fine */
1925 		netif_warn(efx, probe, efx->net_dev,
1926 			   "failed to setup vswitching rc=%d;"
1927 			   " VFs may not function\n", rc);
1928 #endif
1929 
1930 	rc = efx_probe_filters(efx);
1931 	if (rc) {
1932 		netif_err(efx, probe, efx->net_dev,
1933 			  "failed to create filter tables\n");
1934 		goto fail4;
1935 	}
1936 
1937 	rc = efx_probe_channels(efx);
1938 	if (rc)
1939 		goto fail5;
1940 
1941 	return 0;
1942 
1943  fail5:
1944 	efx_remove_filters(efx);
1945  fail4:
1946 #ifdef CONFIG_SFC_SRIOV
1947 	efx->type->vswitching_remove(efx);
1948 #endif
1949  fail3:
1950 	efx_remove_port(efx);
1951  fail2:
1952 	efx_remove_nic(efx);
1953  fail1:
1954 	return rc;
1955 }
1956 
1957 /* If the interface is supposed to be running but is not, start
1958  * the hardware and software data path, regular activity for the port
1959  * (MAC statistics, link polling, etc.) and schedule the port to be
1960  * reconfigured.  Interrupts must already be enabled.  This function
1961  * is safe to call multiple times, so long as the NIC is not disabled.
1962  * Requires the RTNL lock.
1963  */
1964 static void efx_start_all(struct efx_nic *efx)
1965 {
1966 	EFX_ASSERT_RESET_SERIALISED(efx);
1967 	BUG_ON(efx->state == STATE_DISABLED);
1968 
1969 	/* Check that it is appropriate to restart the interface. All
1970 	 * of these flags are safe to read under just the rtnl lock */
1971 	if (efx->port_enabled || !netif_running(efx->net_dev) ||
1972 	    efx->reset_pending)
1973 		return;
1974 
1975 	efx_start_port(efx);
1976 	efx_start_datapath(efx);
1977 
1978 	/* Start the hardware monitor if there is one */
1979 	if (efx->type->monitor != NULL)
1980 		queue_delayed_work(efx->workqueue, &efx->monitor_work,
1981 				   efx_monitor_interval);
1982 
1983 	/* Link state detection is normally event-driven; we have
1984 	 * to poll now because we could have missed a change
1985 	 */
1986 	mutex_lock(&efx->mac_lock);
1987 	if (efx->phy_op->poll(efx))
1988 		efx_link_status_changed(efx);
1989 	mutex_unlock(&efx->mac_lock);
1990 
1991 	efx->type->start_stats(efx);
1992 	efx->type->pull_stats(efx);
1993 	spin_lock_bh(&efx->stats_lock);
1994 	efx->type->update_stats(efx, NULL, NULL);
1995 	spin_unlock_bh(&efx->stats_lock);
1996 }
1997 
1998 /* Quiesce the hardware and software data path, and regular activity
1999  * for the port without bringing the link down.  Safe to call multiple
2000  * times with the NIC in almost any state, but interrupts should be
2001  * enabled.  Requires the RTNL lock.
2002  */
2003 static void efx_stop_all(struct efx_nic *efx)
2004 {
2005 	EFX_ASSERT_RESET_SERIALISED(efx);
2006 
2007 	/* port_enabled can be read safely under the rtnl lock */
2008 	if (!efx->port_enabled)
2009 		return;
2010 
2011 	/* update stats before we go down so we can accurately count
2012 	 * rx_nodesc_drops
2013 	 */
2014 	efx->type->pull_stats(efx);
2015 	spin_lock_bh(&efx->stats_lock);
2016 	efx->type->update_stats(efx, NULL, NULL);
2017 	spin_unlock_bh(&efx->stats_lock);
2018 	efx->type->stop_stats(efx);
2019 	efx_stop_port(efx);
2020 
2021 	/* Stop the kernel transmit interface.  This is only valid if
2022 	 * the device is stopped or detached; otherwise the watchdog
2023 	 * may fire immediately.
2024 	 */
2025 	WARN_ON(netif_running(efx->net_dev) &&
2026 		netif_device_present(efx->net_dev));
2027 	netif_tx_disable(efx->net_dev);
2028 
2029 	efx_stop_datapath(efx);
2030 }
2031 
2032 static void efx_remove_all(struct efx_nic *efx)
2033 {
2034 	efx_remove_channels(efx);
2035 	efx_remove_filters(efx);
2036 #ifdef CONFIG_SFC_SRIOV
2037 	efx->type->vswitching_remove(efx);
2038 #endif
2039 	efx_remove_port(efx);
2040 	efx_remove_nic(efx);
2041 }
2042 
2043 /**************************************************************************
2044  *
2045  * Interrupt moderation
2046  *
2047  **************************************************************************/
2048 unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
2049 {
2050 	if (usecs == 0)
2051 		return 0;
2052 	if (usecs * 1000 < efx->timer_quantum_ns)
2053 		return 1; /* never round down to 0 */
2054 	return usecs * 1000 / efx->timer_quantum_ns;
2055 }
2056 
2057 unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
2058 {
2059 	/* We must round up when converting ticks to microseconds
2060 	 * because we round down when converting the other way.
2061 	 */
2062 	return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
2063 }
2064 
2065 /* Set interrupt moderation parameters */
2066 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
2067 			    unsigned int rx_usecs, bool rx_adaptive,
2068 			    bool rx_may_override_tx)
2069 {
2070 	struct efx_channel *channel;
2071 	unsigned int timer_max_us;
2072 
2073 	EFX_ASSERT_RESET_SERIALISED(efx);
2074 
2075 	timer_max_us = efx->timer_max_ns / 1000;
2076 
2077 	if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
2078 		return -EINVAL;
2079 
2080 	if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
2081 	    !rx_may_override_tx) {
2082 		netif_err(efx, drv, efx->net_dev, "Channels are shared. "
2083 			  "RX and TX IRQ moderation must be equal\n");
2084 		return -EINVAL;
2085 	}
2086 
2087 	efx->irq_rx_adaptive = rx_adaptive;
2088 	efx->irq_rx_moderation_us = rx_usecs;
2089 	efx_for_each_channel(channel, efx) {
2090 		if (efx_channel_has_rx_queue(channel))
2091 			channel->irq_moderation_us = rx_usecs;
2092 		else if (efx_channel_has_tx_queues(channel))
2093 			channel->irq_moderation_us = tx_usecs;
2094 	}
2095 
2096 	return 0;
2097 }
2098 
2099 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
2100 			    unsigned int *rx_usecs, bool *rx_adaptive)
2101 {
2102 	*rx_adaptive = efx->irq_rx_adaptive;
2103 	*rx_usecs = efx->irq_rx_moderation_us;
2104 
2105 	/* If channels are shared between RX and TX, so is IRQ
2106 	 * moderation.  Otherwise, IRQ moderation is the same for all
2107 	 * TX channels and is not adaptive.
2108 	 */
2109 	if (efx->tx_channel_offset == 0) {
2110 		*tx_usecs = *rx_usecs;
2111 	} else {
2112 		struct efx_channel *tx_channel;
2113 
2114 		tx_channel = efx->channel[efx->tx_channel_offset];
2115 		*tx_usecs = tx_channel->irq_moderation_us;
2116 	}
2117 }
2118 
2119 /**************************************************************************
2120  *
2121  * Hardware monitor
2122  *
2123  **************************************************************************/
2124 
2125 /* Run periodically off the general workqueue */
2126 static void efx_monitor(struct work_struct *data)
2127 {
2128 	struct efx_nic *efx = container_of(data, struct efx_nic,
2129 					   monitor_work.work);
2130 
2131 	netif_vdbg(efx, timer, efx->net_dev,
2132 		   "hardware monitor executing on CPU %d\n",
2133 		   raw_smp_processor_id());
2134 	BUG_ON(efx->type->monitor == NULL);
2135 
2136 	/* If the mac_lock is already held then it is likely a port
2137 	 * reconfiguration is already in place, which will likely do
2138 	 * most of the work of monitor() anyway. */
2139 	if (mutex_trylock(&efx->mac_lock)) {
2140 		if (efx->port_enabled)
2141 			efx->type->monitor(efx);
2142 		mutex_unlock(&efx->mac_lock);
2143 	}
2144 
2145 	queue_delayed_work(efx->workqueue, &efx->monitor_work,
2146 			   efx_monitor_interval);
2147 }
2148 
2149 /**************************************************************************
2150  *
2151  * ioctls
2152  *
2153  *************************************************************************/
2154 
2155 /* Net device ioctl
2156  * Context: process, rtnl_lock() held.
2157  */
2158 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
2159 {
2160 	struct efx_nic *efx = netdev_priv(net_dev);
2161 	struct mii_ioctl_data *data = if_mii(ifr);
2162 
2163 	if (cmd == SIOCSHWTSTAMP)
2164 		return efx_ptp_set_ts_config(efx, ifr);
2165 	if (cmd == SIOCGHWTSTAMP)
2166 		return efx_ptp_get_ts_config(efx, ifr);
2167 
2168 	/* Convert phy_id from older PRTAD/DEVAD format */
2169 	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
2170 	    (data->phy_id & 0xfc00) == 0x0400)
2171 		data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
2172 
2173 	return mdio_mii_ioctl(&efx->mdio, data, cmd);
2174 }
2175 
2176 /**************************************************************************
2177  *
2178  * NAPI interface
2179  *
2180  **************************************************************************/
2181 
2182 static void efx_init_napi_channel(struct efx_channel *channel)
2183 {
2184 	struct efx_nic *efx = channel->efx;
2185 
2186 	channel->napi_dev = efx->net_dev;
2187 	netif_napi_add(channel->napi_dev, &channel->napi_str,
2188 		       efx_poll, napi_weight);
2189 }
2190 
2191 static void efx_init_napi(struct efx_nic *efx)
2192 {
2193 	struct efx_channel *channel;
2194 
2195 	efx_for_each_channel(channel, efx)
2196 		efx_init_napi_channel(channel);
2197 }
2198 
2199 static void efx_fini_napi_channel(struct efx_channel *channel)
2200 {
2201 	if (channel->napi_dev)
2202 		netif_napi_del(&channel->napi_str);
2203 
2204 	channel->napi_dev = NULL;
2205 }
2206 
2207 static void efx_fini_napi(struct efx_nic *efx)
2208 {
2209 	struct efx_channel *channel;
2210 
2211 	efx_for_each_channel(channel, efx)
2212 		efx_fini_napi_channel(channel);
2213 }
2214 
2215 /**************************************************************************
2216  *
2217  * Kernel netpoll interface
2218  *
2219  *************************************************************************/
2220 
2221 #ifdef CONFIG_NET_POLL_CONTROLLER
2222 
2223 /* Although in the common case interrupts will be disabled, this is not
2224  * guaranteed. However, all our work happens inside the NAPI callback,
2225  * so no locking is required.
2226  */
2227 static void efx_netpoll(struct net_device *net_dev)
2228 {
2229 	struct efx_nic *efx = netdev_priv(net_dev);
2230 	struct efx_channel *channel;
2231 
2232 	efx_for_each_channel(channel, efx)
2233 		efx_schedule_channel(channel);
2234 }
2235 
2236 #endif
2237 
2238 /**************************************************************************
2239  *
2240  * Kernel net device interface
2241  *
2242  *************************************************************************/
2243 
2244 /* Context: process, rtnl_lock() held. */
2245 int efx_net_open(struct net_device *net_dev)
2246 {
2247 	struct efx_nic *efx = netdev_priv(net_dev);
2248 	int rc;
2249 
2250 	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2251 		  raw_smp_processor_id());
2252 
2253 	rc = efx_check_disabled(efx);
2254 	if (rc)
2255 		return rc;
2256 	if (efx->phy_mode & PHY_MODE_SPECIAL)
2257 		return -EBUSY;
2258 	if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
2259 		return -EIO;
2260 
2261 	/* Notify the kernel of the link state polled during driver load,
2262 	 * before the monitor starts running */
2263 	efx_link_status_changed(efx);
2264 
2265 	efx_start_all(efx);
2266 	if (efx->state == STATE_DISABLED || efx->reset_pending)
2267 		netif_device_detach(efx->net_dev);
2268 	efx_selftest_async_start(efx);
2269 	return 0;
2270 }
2271 
2272 /* Context: process, rtnl_lock() held.
2273  * Note that the kernel will ignore our return code; this method
2274  * should really be a void.
2275  */
2276 int efx_net_stop(struct net_device *net_dev)
2277 {
2278 	struct efx_nic *efx = netdev_priv(net_dev);
2279 
2280 	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2281 		  raw_smp_processor_id());
2282 
2283 	/* Stop the device and flush all the channels */
2284 	efx_stop_all(efx);
2285 
2286 	return 0;
2287 }
2288 
2289 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
2290 static void efx_net_stats(struct net_device *net_dev,
2291 			  struct rtnl_link_stats64 *stats)
2292 {
2293 	struct efx_nic *efx = netdev_priv(net_dev);
2294 
2295 	spin_lock_bh(&efx->stats_lock);
2296 	efx->type->update_stats(efx, NULL, stats);
2297 	spin_unlock_bh(&efx->stats_lock);
2298 }
2299 
2300 /* Context: netif_tx_lock held, BHs disabled. */
2301 static void efx_watchdog(struct net_device *net_dev)
2302 {
2303 	struct efx_nic *efx = netdev_priv(net_dev);
2304 
2305 	netif_err(efx, tx_err, efx->net_dev,
2306 		  "TX stuck with port_enabled=%d: resetting channels\n",
2307 		  efx->port_enabled);
2308 
2309 	efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2310 }
2311 
2312 
2313 /* Context: process, rtnl_lock() held. */
2314 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
2315 {
2316 	struct efx_nic *efx = netdev_priv(net_dev);
2317 	int rc;
2318 
2319 	rc = efx_check_disabled(efx);
2320 	if (rc)
2321 		return rc;
2322 
2323 	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2324 
2325 	efx_device_detach_sync(efx);
2326 	efx_stop_all(efx);
2327 
2328 	mutex_lock(&efx->mac_lock);
2329 	net_dev->mtu = new_mtu;
2330 	efx_mac_reconfigure(efx);
2331 	mutex_unlock(&efx->mac_lock);
2332 
2333 	efx_start_all(efx);
2334 	efx_device_attach_if_not_resetting(efx);
2335 	return 0;
2336 }
2337 
2338 static int efx_set_mac_address(struct net_device *net_dev, void *data)
2339 {
2340 	struct efx_nic *efx = netdev_priv(net_dev);
2341 	struct sockaddr *addr = data;
2342 	u8 *new_addr = addr->sa_data;
2343 	u8 old_addr[6];
2344 	int rc;
2345 
2346 	if (!is_valid_ether_addr(new_addr)) {
2347 		netif_err(efx, drv, efx->net_dev,
2348 			  "invalid ethernet MAC address requested: %pM\n",
2349 			  new_addr);
2350 		return -EADDRNOTAVAIL;
2351 	}
2352 
2353 	/* save old address */
2354 	ether_addr_copy(old_addr, net_dev->dev_addr);
2355 	ether_addr_copy(net_dev->dev_addr, new_addr);
2356 	if (efx->type->set_mac_address) {
2357 		rc = efx->type->set_mac_address(efx);
2358 		if (rc) {
2359 			ether_addr_copy(net_dev->dev_addr, old_addr);
2360 			return rc;
2361 		}
2362 	}
2363 
2364 	/* Reconfigure the MAC */
2365 	mutex_lock(&efx->mac_lock);
2366 	efx_mac_reconfigure(efx);
2367 	mutex_unlock(&efx->mac_lock);
2368 
2369 	return 0;
2370 }
2371 
2372 /* Context: netif_addr_lock held, BHs disabled. */
2373 static void efx_set_rx_mode(struct net_device *net_dev)
2374 {
2375 	struct efx_nic *efx = netdev_priv(net_dev);
2376 
2377 	if (efx->port_enabled)
2378 		queue_work(efx->workqueue, &efx->mac_work);
2379 	/* Otherwise efx_start_port() will do this */
2380 }
2381 
2382 static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2383 {
2384 	struct efx_nic *efx = netdev_priv(net_dev);
2385 	int rc;
2386 
2387 	/* If disabling RX n-tuple filtering, clear existing filters */
2388 	if (net_dev->features & ~data & NETIF_F_NTUPLE) {
2389 		rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
2390 		if (rc)
2391 			return rc;
2392 	}
2393 
2394 	/* If Rx VLAN filter is changed, update filters via mac_reconfigure.
2395 	 * If rx-fcs is changed, mac_reconfigure updates that too.
2396 	 */
2397 	if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
2398 					  NETIF_F_RXFCS)) {
2399 		/* efx_set_rx_mode() will schedule MAC work to update filters
2400 		 * when a new features are finally set in net_dev.
2401 		 */
2402 		efx_set_rx_mode(net_dev);
2403 	}
2404 
2405 	return 0;
2406 }
2407 
2408 static int efx_get_phys_port_id(struct net_device *net_dev,
2409 				struct netdev_phys_item_id *ppid)
2410 {
2411 	struct efx_nic *efx = netdev_priv(net_dev);
2412 
2413 	if (efx->type->get_phys_port_id)
2414 		return efx->type->get_phys_port_id(efx, ppid);
2415 	else
2416 		return -EOPNOTSUPP;
2417 }
2418 
2419 static int efx_get_phys_port_name(struct net_device *net_dev,
2420 				  char *name, size_t len)
2421 {
2422 	struct efx_nic *efx = netdev_priv(net_dev);
2423 
2424 	if (snprintf(name, len, "p%u", efx->port_num) >= len)
2425 		return -EINVAL;
2426 	return 0;
2427 }
2428 
2429 static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
2430 {
2431 	struct efx_nic *efx = netdev_priv(net_dev);
2432 
2433 	if (efx->type->vlan_rx_add_vid)
2434 		return efx->type->vlan_rx_add_vid(efx, proto, vid);
2435 	else
2436 		return -EOPNOTSUPP;
2437 }
2438 
2439 static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
2440 {
2441 	struct efx_nic *efx = netdev_priv(net_dev);
2442 
2443 	if (efx->type->vlan_rx_kill_vid)
2444 		return efx->type->vlan_rx_kill_vid(efx, proto, vid);
2445 	else
2446 		return -EOPNOTSUPP;
2447 }
2448 
2449 static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in)
2450 {
2451 	switch (in) {
2452 	case UDP_TUNNEL_TYPE_VXLAN:
2453 		return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
2454 	case UDP_TUNNEL_TYPE_GENEVE:
2455 		return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
2456 	default:
2457 		return -1;
2458 	}
2459 }
2460 
2461 static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
2462 {
2463 	struct efx_nic *efx = netdev_priv(dev);
2464 	struct efx_udp_tunnel tnl;
2465 	int efx_tunnel_type;
2466 
2467 	efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
2468 	if (efx_tunnel_type < 0)
2469 		return;
2470 
2471 	tnl.type = (u16)efx_tunnel_type;
2472 	tnl.port = ti->port;
2473 
2474 	if (efx->type->udp_tnl_add_port)
2475 		(void)efx->type->udp_tnl_add_port(efx, tnl);
2476 }
2477 
2478 static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
2479 {
2480 	struct efx_nic *efx = netdev_priv(dev);
2481 	struct efx_udp_tunnel tnl;
2482 	int efx_tunnel_type;
2483 
2484 	efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
2485 	if (efx_tunnel_type < 0)
2486 		return;
2487 
2488 	tnl.type = (u16)efx_tunnel_type;
2489 	tnl.port = ti->port;
2490 
2491 	if (efx->type->udp_tnl_del_port)
2492 		(void)efx->type->udp_tnl_del_port(efx, tnl);
2493 }
2494 
2495 static const struct net_device_ops efx_netdev_ops = {
2496 	.ndo_open		= efx_net_open,
2497 	.ndo_stop		= efx_net_stop,
2498 	.ndo_get_stats64	= efx_net_stats,
2499 	.ndo_tx_timeout		= efx_watchdog,
2500 	.ndo_start_xmit		= efx_hard_start_xmit,
2501 	.ndo_validate_addr	= eth_validate_addr,
2502 	.ndo_do_ioctl		= efx_ioctl,
2503 	.ndo_change_mtu		= efx_change_mtu,
2504 	.ndo_set_mac_address	= efx_set_mac_address,
2505 	.ndo_set_rx_mode	= efx_set_rx_mode,
2506 	.ndo_set_features	= efx_set_features,
2507 	.ndo_vlan_rx_add_vid	= efx_vlan_rx_add_vid,
2508 	.ndo_vlan_rx_kill_vid	= efx_vlan_rx_kill_vid,
2509 #ifdef CONFIG_SFC_SRIOV
2510 	.ndo_set_vf_mac		= efx_sriov_set_vf_mac,
2511 	.ndo_set_vf_vlan	= efx_sriov_set_vf_vlan,
2512 	.ndo_set_vf_spoofchk	= efx_sriov_set_vf_spoofchk,
2513 	.ndo_get_vf_config	= efx_sriov_get_vf_config,
2514 	.ndo_set_vf_link_state  = efx_sriov_set_vf_link_state,
2515 #endif
2516 	.ndo_get_phys_port_id   = efx_get_phys_port_id,
2517 	.ndo_get_phys_port_name	= efx_get_phys_port_name,
2518 #ifdef CONFIG_NET_POLL_CONTROLLER
2519 	.ndo_poll_controller = efx_netpoll,
2520 #endif
2521 	.ndo_setup_tc		= efx_setup_tc,
2522 #ifdef CONFIG_RFS_ACCEL
2523 	.ndo_rx_flow_steer	= efx_filter_rfs,
2524 #endif
2525 	.ndo_udp_tunnel_add	= efx_udp_tunnel_add,
2526 	.ndo_udp_tunnel_del	= efx_udp_tunnel_del,
2527 };
2528 
2529 static void efx_update_name(struct efx_nic *efx)
2530 {
2531 	strcpy(efx->name, efx->net_dev->name);
2532 	efx_mtd_rename(efx);
2533 	efx_set_channel_names(efx);
2534 }
2535 
2536 static int efx_netdev_event(struct notifier_block *this,
2537 			    unsigned long event, void *ptr)
2538 {
2539 	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2540 
2541 	if ((net_dev->netdev_ops == &efx_netdev_ops) &&
2542 	    event == NETDEV_CHANGENAME)
2543 		efx_update_name(netdev_priv(net_dev));
2544 
2545 	return NOTIFY_DONE;
2546 }
2547 
2548 static struct notifier_block efx_netdev_notifier = {
2549 	.notifier_call = efx_netdev_event,
2550 };
2551 
2552 static ssize_t
2553 show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2554 {
2555 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2556 	return sprintf(buf, "%d\n", efx->phy_type);
2557 }
2558 static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2559 
2560 #ifdef CONFIG_SFC_MCDI_LOGGING
2561 static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
2562 			     char *buf)
2563 {
2564 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2565 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2566 
2567 	return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
2568 }
2569 static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
2570 			    const char *buf, size_t count)
2571 {
2572 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2573 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2574 	bool enable = count > 0 && *buf != '0';
2575 
2576 	mcdi->logging_enabled = enable;
2577 	return count;
2578 }
2579 static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
2580 #endif
2581 
2582 static int efx_register_netdev(struct efx_nic *efx)
2583 {
2584 	struct net_device *net_dev = efx->net_dev;
2585 	struct efx_channel *channel;
2586 	int rc;
2587 
2588 	net_dev->watchdog_timeo = 5 * HZ;
2589 	net_dev->irq = efx->pci_dev->irq;
2590 	net_dev->netdev_ops = &efx_netdev_ops;
2591 	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
2592 		net_dev->priv_flags |= IFF_UNICAST_FLT;
2593 	net_dev->ethtool_ops = &efx_ethtool_ops;
2594 	net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2595 	net_dev->min_mtu = EFX_MIN_MTU;
2596 	net_dev->max_mtu = EFX_MAX_MTU;
2597 
2598 	rtnl_lock();
2599 
2600 	/* Enable resets to be scheduled and check whether any were
2601 	 * already requested.  If so, the NIC is probably hosed so we
2602 	 * abort.
2603 	 */
2604 	efx->state = STATE_READY;
2605 	smp_mb(); /* ensure we change state before checking reset_pending */
2606 	if (efx->reset_pending) {
2607 		netif_err(efx, probe, efx->net_dev,
2608 			  "aborting probe due to scheduled reset\n");
2609 		rc = -EIO;
2610 		goto fail_locked;
2611 	}
2612 
2613 	rc = dev_alloc_name(net_dev, net_dev->name);
2614 	if (rc < 0)
2615 		goto fail_locked;
2616 	efx_update_name(efx);
2617 
2618 	/* Always start with carrier off; PHY events will detect the link */
2619 	netif_carrier_off(net_dev);
2620 
2621 	rc = register_netdevice(net_dev);
2622 	if (rc)
2623 		goto fail_locked;
2624 
2625 	efx_for_each_channel(channel, efx) {
2626 		struct efx_tx_queue *tx_queue;
2627 		efx_for_each_channel_tx_queue(tx_queue, channel)
2628 			efx_init_tx_queue_core_txq(tx_queue);
2629 	}
2630 
2631 	efx_associate(efx);
2632 
2633 	rtnl_unlock();
2634 
2635 	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2636 	if (rc) {
2637 		netif_err(efx, drv, efx->net_dev,
2638 			  "failed to init net dev attributes\n");
2639 		goto fail_registered;
2640 	}
2641 #ifdef CONFIG_SFC_MCDI_LOGGING
2642 	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2643 	if (rc) {
2644 		netif_err(efx, drv, efx->net_dev,
2645 			  "failed to init net dev attributes\n");
2646 		goto fail_attr_mcdi_logging;
2647 	}
2648 #endif
2649 
2650 	return 0;
2651 
2652 #ifdef CONFIG_SFC_MCDI_LOGGING
2653 fail_attr_mcdi_logging:
2654 	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2655 #endif
2656 fail_registered:
2657 	rtnl_lock();
2658 	efx_dissociate(efx);
2659 	unregister_netdevice(net_dev);
2660 fail_locked:
2661 	efx->state = STATE_UNINIT;
2662 	rtnl_unlock();
2663 	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2664 	return rc;
2665 }
2666 
2667 static void efx_unregister_netdev(struct efx_nic *efx)
2668 {
2669 	if (!efx->net_dev)
2670 		return;
2671 
2672 	BUG_ON(netdev_priv(efx->net_dev) != efx);
2673 
2674 	if (efx_dev_registered(efx)) {
2675 		strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2676 #ifdef CONFIG_SFC_MCDI_LOGGING
2677 		device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2678 #endif
2679 		device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2680 		unregister_netdev(efx->net_dev);
2681 	}
2682 }
2683 
2684 /**************************************************************************
2685  *
2686  * Device reset and suspend
2687  *
2688  **************************************************************************/
2689 
2690 /* Tears down the entire software state and most of the hardware state
2691  * before reset.  */
2692 void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2693 {
2694 	EFX_ASSERT_RESET_SERIALISED(efx);
2695 
2696 	if (method == RESET_TYPE_MCDI_TIMEOUT)
2697 		efx->type->prepare_flr(efx);
2698 
2699 	efx_stop_all(efx);
2700 	efx_disable_interrupts(efx);
2701 
2702 	mutex_lock(&efx->mac_lock);
2703 	mutex_lock(&efx->rss_lock);
2704 	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2705 	    method != RESET_TYPE_DATAPATH)
2706 		efx->phy_op->fini(efx);
2707 	efx->type->fini(efx);
2708 }
2709 
2710 /* This function will always ensure that the locks acquired in
2711  * efx_reset_down() are released. A failure return code indicates
2712  * that we were unable to reinitialise the hardware, and the
2713  * driver should be disabled. If ok is false, then the rx and tx
2714  * engines are not restarted, pending a RESET_DISABLE. */
2715 int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2716 {
2717 	int rc;
2718 
2719 	EFX_ASSERT_RESET_SERIALISED(efx);
2720 
2721 	if (method == RESET_TYPE_MCDI_TIMEOUT)
2722 		efx->type->finish_flr(efx);
2723 
2724 	/* Ensure that SRAM is initialised even if we're disabling the device */
2725 	rc = efx->type->init(efx);
2726 	if (rc) {
2727 		netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2728 		goto fail;
2729 	}
2730 
2731 	if (!ok)
2732 		goto fail;
2733 
2734 	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2735 	    method != RESET_TYPE_DATAPATH) {
2736 		rc = efx->phy_op->init(efx);
2737 		if (rc)
2738 			goto fail;
2739 		rc = efx->phy_op->reconfigure(efx);
2740 		if (rc && rc != -EPERM)
2741 			netif_err(efx, drv, efx->net_dev,
2742 				  "could not restore PHY settings\n");
2743 	}
2744 
2745 	rc = efx_enable_interrupts(efx);
2746 	if (rc)
2747 		goto fail;
2748 
2749 #ifdef CONFIG_SFC_SRIOV
2750 	rc = efx->type->vswitching_restore(efx);
2751 	if (rc) /* not fatal; the PF will still work fine */
2752 		netif_warn(efx, probe, efx->net_dev,
2753 			   "failed to restore vswitching rc=%d;"
2754 			   " VFs may not function\n", rc);
2755 #endif
2756 
2757 	if (efx->type->rx_restore_rss_contexts)
2758 		efx->type->rx_restore_rss_contexts(efx);
2759 	mutex_unlock(&efx->rss_lock);
2760 	down_read(&efx->filter_sem);
2761 	efx_restore_filters(efx);
2762 	up_read(&efx->filter_sem);
2763 	if (efx->type->sriov_reset)
2764 		efx->type->sriov_reset(efx);
2765 
2766 	mutex_unlock(&efx->mac_lock);
2767 
2768 	efx_start_all(efx);
2769 
2770 	if (efx->type->udp_tnl_push_ports)
2771 		efx->type->udp_tnl_push_ports(efx);
2772 
2773 	return 0;
2774 
2775 fail:
2776 	efx->port_initialized = false;
2777 
2778 	mutex_unlock(&efx->rss_lock);
2779 	mutex_unlock(&efx->mac_lock);
2780 
2781 	return rc;
2782 }
2783 
2784 /* Reset the NIC using the specified method.  Note that the reset may
2785  * fail, in which case the card will be left in an unusable state.
2786  *
2787  * Caller must hold the rtnl_lock.
2788  */
2789 int efx_reset(struct efx_nic *efx, enum reset_type method)
2790 {
2791 	int rc, rc2;
2792 	bool disabled;
2793 
2794 	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2795 		   RESET_TYPE(method));
2796 
2797 	efx_device_detach_sync(efx);
2798 	efx_reset_down(efx, method);
2799 
2800 	rc = efx->type->reset(efx, method);
2801 	if (rc) {
2802 		netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2803 		goto out;
2804 	}
2805 
2806 	/* Clear flags for the scopes we covered.  We assume the NIC and
2807 	 * driver are now quiescent so that there is no race here.
2808 	 */
2809 	if (method < RESET_TYPE_MAX_METHOD)
2810 		efx->reset_pending &= -(1 << (method + 1));
2811 	else /* it doesn't fit into the well-ordered scope hierarchy */
2812 		__clear_bit(method, &efx->reset_pending);
2813 
2814 	/* Reinitialise bus-mastering, which may have been turned off before
2815 	 * the reset was scheduled. This is still appropriate, even in the
2816 	 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2817 	 * can respond to requests. */
2818 	pci_set_master(efx->pci_dev);
2819 
2820 out:
2821 	/* Leave device stopped if necessary */
2822 	disabled = rc ||
2823 		method == RESET_TYPE_DISABLE ||
2824 		method == RESET_TYPE_RECOVER_OR_DISABLE;
2825 	rc2 = efx_reset_up(efx, method, !disabled);
2826 	if (rc2) {
2827 		disabled = true;
2828 		if (!rc)
2829 			rc = rc2;
2830 	}
2831 
2832 	if (disabled) {
2833 		dev_close(efx->net_dev);
2834 		netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2835 		efx->state = STATE_DISABLED;
2836 	} else {
2837 		netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2838 		efx_device_attach_if_not_resetting(efx);
2839 	}
2840 	return rc;
2841 }
2842 
2843 /* Try recovery mechanisms.
2844  * For now only EEH is supported.
2845  * Returns 0 if the recovery mechanisms are unsuccessful.
2846  * Returns a non-zero value otherwise.
2847  */
2848 int efx_try_recovery(struct efx_nic *efx)
2849 {
2850 #ifdef CONFIG_EEH
2851 	/* A PCI error can occur and not be seen by EEH because nothing
2852 	 * happens on the PCI bus. In this case the driver may fail and
2853 	 * schedule a 'recover or reset', leading to this recovery handler.
2854 	 * Manually call the eeh failure check function.
2855 	 */
2856 	struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2857 	if (eeh_dev_check_failure(eehdev)) {
2858 		/* The EEH mechanisms will handle the error and reset the
2859 		 * device if necessary.
2860 		 */
2861 		return 1;
2862 	}
2863 #endif
2864 	return 0;
2865 }
2866 
2867 static void efx_wait_for_bist_end(struct efx_nic *efx)
2868 {
2869 	int i;
2870 
2871 	for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
2872 		if (efx_mcdi_poll_reboot(efx))
2873 			goto out;
2874 		msleep(BIST_WAIT_DELAY_MS);
2875 	}
2876 
2877 	netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
2878 out:
2879 	/* Either way unset the BIST flag. If we found no reboot we probably
2880 	 * won't recover, but we should try.
2881 	 */
2882 	efx->mc_bist_for_other_fn = false;
2883 }
2884 
2885 /* The worker thread exists so that code that cannot sleep can
2886  * schedule a reset for later.
2887  */
2888 static void efx_reset_work(struct work_struct *data)
2889 {
2890 	struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
2891 	unsigned long pending;
2892 	enum reset_type method;
2893 
2894 	pending = READ_ONCE(efx->reset_pending);
2895 	method = fls(pending) - 1;
2896 
2897 	if (method == RESET_TYPE_MC_BIST)
2898 		efx_wait_for_bist_end(efx);
2899 
2900 	if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2901 	     method == RESET_TYPE_RECOVER_OR_ALL) &&
2902 	    efx_try_recovery(efx))
2903 		return;
2904 
2905 	if (!pending)
2906 		return;
2907 
2908 	rtnl_lock();
2909 
2910 	/* We checked the state in efx_schedule_reset() but it may
2911 	 * have changed by now.  Now that we have the RTNL lock,
2912 	 * it cannot change again.
2913 	 */
2914 	if (efx->state == STATE_READY)
2915 		(void)efx_reset(efx, method);
2916 
2917 	rtnl_unlock();
2918 }
2919 
2920 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2921 {
2922 	enum reset_type method;
2923 
2924 	if (efx->state == STATE_RECOVERY) {
2925 		netif_dbg(efx, drv, efx->net_dev,
2926 			  "recovering: skip scheduling %s reset\n",
2927 			  RESET_TYPE(type));
2928 		return;
2929 	}
2930 
2931 	switch (type) {
2932 	case RESET_TYPE_INVISIBLE:
2933 	case RESET_TYPE_ALL:
2934 	case RESET_TYPE_RECOVER_OR_ALL:
2935 	case RESET_TYPE_WORLD:
2936 	case RESET_TYPE_DISABLE:
2937 	case RESET_TYPE_RECOVER_OR_DISABLE:
2938 	case RESET_TYPE_DATAPATH:
2939 	case RESET_TYPE_MC_BIST:
2940 	case RESET_TYPE_MCDI_TIMEOUT:
2941 		method = type;
2942 		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2943 			  RESET_TYPE(method));
2944 		break;
2945 	default:
2946 		method = efx->type->map_reset_reason(type);
2947 		netif_dbg(efx, drv, efx->net_dev,
2948 			  "scheduling %s reset for %s\n",
2949 			  RESET_TYPE(method), RESET_TYPE(type));
2950 		break;
2951 	}
2952 
2953 	set_bit(method, &efx->reset_pending);
2954 	smp_mb(); /* ensure we change reset_pending before checking state */
2955 
2956 	/* If we're not READY then just leave the flags set as the cue
2957 	 * to abort probing or reschedule the reset later.
2958 	 */
2959 	if (READ_ONCE(efx->state) != STATE_READY)
2960 		return;
2961 
2962 	/* efx_process_channel() will no longer read events once a
2963 	 * reset is scheduled. So switch back to poll'd MCDI completions. */
2964 	efx_mcdi_mode_poll(efx);
2965 
2966 	queue_work(reset_workqueue, &efx->reset_work);
2967 }
2968 
2969 /**************************************************************************
2970  *
2971  * List of NICs we support
2972  *
2973  **************************************************************************/
2974 
2975 /* PCI device ID table */
2976 static const struct pci_device_id efx_pci_table[] = {
2977 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803),	/* SFC9020 */
2978 	 .driver_data = (unsigned long) &siena_a0_nic_type},
2979 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813),	/* SFL9021 */
2980 	 .driver_data = (unsigned long) &siena_a0_nic_type},
2981 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903),  /* SFC9120 PF */
2982 	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2983 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903),  /* SFC9120 VF */
2984 	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2985 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923),  /* SFC9140 PF */
2986 	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2987 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923),  /* SFC9140 VF */
2988 	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2989 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03),  /* SFC9220 PF */
2990 	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2991 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03),  /* SFC9220 VF */
2992 	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2993 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03),  /* SFC9250 PF */
2994 	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2995 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03),  /* SFC9250 VF */
2996 	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2997 	{0}			/* end of list */
2998 };
2999 
3000 /**************************************************************************
3001  *
3002  * Dummy PHY/MAC operations
3003  *
3004  * Can be used for some unimplemented operations
3005  * Needed so all function pointers are valid and do not have to be tested
3006  * before use
3007  *
3008  **************************************************************************/
3009 int efx_port_dummy_op_int(struct efx_nic *efx)
3010 {
3011 	return 0;
3012 }
3013 void efx_port_dummy_op_void(struct efx_nic *efx) {}
3014 
3015 static bool efx_port_dummy_op_poll(struct efx_nic *efx)
3016 {
3017 	return false;
3018 }
3019 
3020 static const struct efx_phy_operations efx_dummy_phy_operations = {
3021 	.init		 = efx_port_dummy_op_int,
3022 	.reconfigure	 = efx_port_dummy_op_int,
3023 	.poll		 = efx_port_dummy_op_poll,
3024 	.fini		 = efx_port_dummy_op_void,
3025 };
3026 
3027 /**************************************************************************
3028  *
3029  * Data housekeeping
3030  *
3031  **************************************************************************/
3032 
3033 /* This zeroes out and then fills in the invariants in a struct
3034  * efx_nic (including all sub-structures).
3035  */
3036 static int efx_init_struct(struct efx_nic *efx,
3037 			   struct pci_dev *pci_dev, struct net_device *net_dev)
3038 {
3039 	int rc = -ENOMEM, i;
3040 
3041 	/* Initialise common structures */
3042 	INIT_LIST_HEAD(&efx->node);
3043 	INIT_LIST_HEAD(&efx->secondary_list);
3044 	spin_lock_init(&efx->biu_lock);
3045 #ifdef CONFIG_SFC_MTD
3046 	INIT_LIST_HEAD(&efx->mtd_list);
3047 #endif
3048 	INIT_WORK(&efx->reset_work, efx_reset_work);
3049 	INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
3050 	INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
3051 	efx->pci_dev = pci_dev;
3052 	efx->msg_enable = debug;
3053 	efx->state = STATE_UNINIT;
3054 	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
3055 
3056 	efx->net_dev = net_dev;
3057 	efx->rx_prefix_size = efx->type->rx_prefix_size;
3058 	efx->rx_ip_align =
3059 		NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
3060 	efx->rx_packet_hash_offset =
3061 		efx->type->rx_hash_offset - efx->type->rx_prefix_size;
3062 	efx->rx_packet_ts_offset =
3063 		efx->type->rx_ts_offset - efx->type->rx_prefix_size;
3064 	INIT_LIST_HEAD(&efx->rss_context.list);
3065 	mutex_init(&efx->rss_lock);
3066 	spin_lock_init(&efx->stats_lock);
3067 	efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
3068 	efx->num_mac_stats = MC_CMD_MAC_NSTATS;
3069 	BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
3070 	mutex_init(&efx->mac_lock);
3071 #ifdef CONFIG_RFS_ACCEL
3072 	mutex_init(&efx->rps_mutex);
3073 	spin_lock_init(&efx->rps_hash_lock);
3074 	/* Failure to allocate is not fatal, but may degrade ARFS performance */
3075 	efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
3076 				      sizeof(*efx->rps_hash_table), GFP_KERNEL);
3077 #endif
3078 	efx->phy_op = &efx_dummy_phy_operations;
3079 	efx->mdio.dev = net_dev;
3080 	INIT_WORK(&efx->mac_work, efx_mac_work);
3081 	init_waitqueue_head(&efx->flush_wq);
3082 
3083 	for (i = 0; i < EFX_MAX_CHANNELS; i++) {
3084 		efx->channel[i] = efx_alloc_channel(efx, i, NULL);
3085 		if (!efx->channel[i])
3086 			goto fail;
3087 		efx->msi_context[i].efx = efx;
3088 		efx->msi_context[i].index = i;
3089 	}
3090 
3091 	/* Higher numbered interrupt modes are less capable! */
3092 	if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
3093 			 efx->type->min_interrupt_mode)) {
3094 		rc = -EIO;
3095 		goto fail;
3096 	}
3097 	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
3098 				  interrupt_mode);
3099 	efx->interrupt_mode = min(efx->type->min_interrupt_mode,
3100 				  interrupt_mode);
3101 
3102 	/* Would be good to use the net_dev name, but we're too early */
3103 	snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
3104 		 pci_name(pci_dev));
3105 	efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
3106 	if (!efx->workqueue)
3107 		goto fail;
3108 
3109 	return 0;
3110 
3111 fail:
3112 	efx_fini_struct(efx);
3113 	return rc;
3114 }
3115 
3116 static void efx_fini_struct(struct efx_nic *efx)
3117 {
3118 	int i;
3119 
3120 #ifdef CONFIG_RFS_ACCEL
3121 	kfree(efx->rps_hash_table);
3122 #endif
3123 
3124 	for (i = 0; i < EFX_MAX_CHANNELS; i++)
3125 		kfree(efx->channel[i]);
3126 
3127 	kfree(efx->vpd_sn);
3128 
3129 	if (efx->workqueue) {
3130 		destroy_workqueue(efx->workqueue);
3131 		efx->workqueue = NULL;
3132 	}
3133 }
3134 
3135 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
3136 {
3137 	u64 n_rx_nodesc_trunc = 0;
3138 	struct efx_channel *channel;
3139 
3140 	efx_for_each_channel(channel, efx)
3141 		n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
3142 	stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
3143 	stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
3144 }
3145 
3146 bool efx_filter_spec_equal(const struct efx_filter_spec *left,
3147 			   const struct efx_filter_spec *right)
3148 {
3149 	if ((left->match_flags ^ right->match_flags) |
3150 	    ((left->flags ^ right->flags) &
3151 	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3152 		return false;
3153 
3154 	return memcmp(&left->outer_vid, &right->outer_vid,
3155 		      sizeof(struct efx_filter_spec) -
3156 		      offsetof(struct efx_filter_spec, outer_vid)) == 0;
3157 }
3158 
3159 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
3160 {
3161 	BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
3162 	return jhash2((const u32 *)&spec->outer_vid,
3163 		      (sizeof(struct efx_filter_spec) -
3164 		       offsetof(struct efx_filter_spec, outer_vid)) / 4,
3165 		      0);
3166 }
3167 
3168 #ifdef CONFIG_RFS_ACCEL
3169 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
3170 			bool *force)
3171 {
3172 	if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
3173 		/* ARFS is currently updating this entry, leave it */
3174 		return false;
3175 	}
3176 	if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
3177 		/* ARFS tried and failed to update this, so it's probably out
3178 		 * of date.  Remove the filter and the ARFS rule entry.
3179 		 */
3180 		rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
3181 		*force = true;
3182 		return true;
3183 	} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
3184 		/* ARFS has moved on, so old filter is not needed.  Since we did
3185 		 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
3186 		 * not be removed by efx_rps_hash_del() subsequently.
3187 		 */
3188 		*force = true;
3189 		return true;
3190 	}
3191 	/* Remove it iff ARFS wants to. */
3192 	return true;
3193 }
3194 
3195 static
3196 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
3197 				       const struct efx_filter_spec *spec)
3198 {
3199 	u32 hash = efx_filter_spec_hash(spec);
3200 
3201 	WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
3202 	if (!efx->rps_hash_table)
3203 		return NULL;
3204 	return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
3205 }
3206 
3207 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
3208 					const struct efx_filter_spec *spec)
3209 {
3210 	struct efx_arfs_rule *rule;
3211 	struct hlist_head *head;
3212 	struct hlist_node *node;
3213 
3214 	head = efx_rps_hash_bucket(efx, spec);
3215 	if (!head)
3216 		return NULL;
3217 	hlist_for_each(node, head) {
3218 		rule = container_of(node, struct efx_arfs_rule, node);
3219 		if (efx_filter_spec_equal(spec, &rule->spec))
3220 			return rule;
3221 	}
3222 	return NULL;
3223 }
3224 
3225 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
3226 				       const struct efx_filter_spec *spec,
3227 				       bool *new)
3228 {
3229 	struct efx_arfs_rule *rule;
3230 	struct hlist_head *head;
3231 	struct hlist_node *node;
3232 
3233 	head = efx_rps_hash_bucket(efx, spec);
3234 	if (!head)
3235 		return NULL;
3236 	hlist_for_each(node, head) {
3237 		rule = container_of(node, struct efx_arfs_rule, node);
3238 		if (efx_filter_spec_equal(spec, &rule->spec)) {
3239 			*new = false;
3240 			return rule;
3241 		}
3242 	}
3243 	rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
3244 	*new = true;
3245 	if (rule) {
3246 		memcpy(&rule->spec, spec, sizeof(rule->spec));
3247 		hlist_add_head(&rule->node, head);
3248 	}
3249 	return rule;
3250 }
3251 
3252 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
3253 {
3254 	struct efx_arfs_rule *rule;
3255 	struct hlist_head *head;
3256 	struct hlist_node *node;
3257 
3258 	head = efx_rps_hash_bucket(efx, spec);
3259 	if (WARN_ON(!head))
3260 		return;
3261 	hlist_for_each(node, head) {
3262 		rule = container_of(node, struct efx_arfs_rule, node);
3263 		if (efx_filter_spec_equal(spec, &rule->spec)) {
3264 			/* Someone already reused the entry.  We know that if
3265 			 * this check doesn't fire (i.e. filter_id == REMOVING)
3266 			 * then the REMOVING mark was put there by our caller,
3267 			 * because caller is holding a lock on filter table and
3268 			 * only holders of that lock set REMOVING.
3269 			 */
3270 			if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
3271 				return;
3272 			hlist_del(node);
3273 			kfree(rule);
3274 			return;
3275 		}
3276 	}
3277 	/* We didn't find it. */
3278 	WARN_ON(1);
3279 }
3280 #endif
3281 
3282 /* RSS contexts.  We're using linked lists and crappy O(n) algorithms, because
3283  * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
3284  */
3285 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
3286 {
3287 	struct list_head *head = &efx->rss_context.list;
3288 	struct efx_rss_context *ctx, *new;
3289 	u32 id = 1; /* Don't use zero, that refers to the master RSS context */
3290 
3291 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
3292 
3293 	/* Search for first gap in the numbering */
3294 	list_for_each_entry(ctx, head, list) {
3295 		if (ctx->user_id != id)
3296 			break;
3297 		id++;
3298 		/* Check for wrap.  If this happens, we have nearly 2^32
3299 		 * allocated RSS contexts, which seems unlikely.
3300 		 */
3301 		if (WARN_ON_ONCE(!id))
3302 			return NULL;
3303 	}
3304 
3305 	/* Create the new entry */
3306 	new = kmalloc(sizeof(struct efx_rss_context), GFP_KERNEL);
3307 	if (!new)
3308 		return NULL;
3309 	new->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
3310 	new->rx_hash_udp_4tuple = false;
3311 
3312 	/* Insert the new entry into the gap */
3313 	new->user_id = id;
3314 	list_add_tail(&new->list, &ctx->list);
3315 	return new;
3316 }
3317 
3318 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
3319 {
3320 	struct list_head *head = &efx->rss_context.list;
3321 	struct efx_rss_context *ctx;
3322 
3323 	WARN_ON(!mutex_is_locked(&efx->rss_lock));
3324 
3325 	list_for_each_entry(ctx, head, list)
3326 		if (ctx->user_id == id)
3327 			return ctx;
3328 	return NULL;
3329 }
3330 
3331 void efx_free_rss_context_entry(struct efx_rss_context *ctx)
3332 {
3333 	list_del(&ctx->list);
3334 	kfree(ctx);
3335 }
3336 
3337 /**************************************************************************
3338  *
3339  * PCI interface
3340  *
3341  **************************************************************************/
3342 
3343 /* Main body of final NIC shutdown code
3344  * This is called only at module unload (or hotplug removal).
3345  */
3346 static void efx_pci_remove_main(struct efx_nic *efx)
3347 {
3348 	/* Flush reset_work. It can no longer be scheduled since we
3349 	 * are not READY.
3350 	 */
3351 	BUG_ON(efx->state == STATE_READY);
3352 	cancel_work_sync(&efx->reset_work);
3353 
3354 	efx_disable_interrupts(efx);
3355 	efx_clear_interrupt_affinity(efx);
3356 	efx_nic_fini_interrupt(efx);
3357 	efx_fini_port(efx);
3358 	efx->type->fini(efx);
3359 	efx_fini_napi(efx);
3360 	efx_remove_all(efx);
3361 }
3362 
3363 /* Final NIC shutdown
3364  * This is called only at module unload (or hotplug removal).  A PF can call
3365  * this on its VFs to ensure they are unbound first.
3366  */
3367 static void efx_pci_remove(struct pci_dev *pci_dev)
3368 {
3369 	struct efx_nic *efx;
3370 
3371 	efx = pci_get_drvdata(pci_dev);
3372 	if (!efx)
3373 		return;
3374 
3375 	/* Mark the NIC as fini, then stop the interface */
3376 	rtnl_lock();
3377 	efx_dissociate(efx);
3378 	dev_close(efx->net_dev);
3379 	efx_disable_interrupts(efx);
3380 	efx->state = STATE_UNINIT;
3381 	rtnl_unlock();
3382 
3383 	if (efx->type->sriov_fini)
3384 		efx->type->sriov_fini(efx);
3385 
3386 	efx_unregister_netdev(efx);
3387 
3388 	efx_mtd_remove(efx);
3389 
3390 	efx_pci_remove_main(efx);
3391 
3392 	efx_fini_io(efx);
3393 	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
3394 
3395 	efx_fini_struct(efx);
3396 	free_netdev(efx->net_dev);
3397 
3398 	pci_disable_pcie_error_reporting(pci_dev);
3399 };
3400 
3401 /* NIC VPD information
3402  * Called during probe to display the part number of the
3403  * installed NIC.  VPD is potentially very large but this should
3404  * always appear within the first 512 bytes.
3405  */
3406 #define SFC_VPD_LEN 512
3407 static void efx_probe_vpd_strings(struct efx_nic *efx)
3408 {
3409 	struct pci_dev *dev = efx->pci_dev;
3410 	char vpd_data[SFC_VPD_LEN];
3411 	ssize_t vpd_size;
3412 	int ro_start, ro_size, i, j;
3413 
3414 	/* Get the vpd data from the device */
3415 	vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
3416 	if (vpd_size <= 0) {
3417 		netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
3418 		return;
3419 	}
3420 
3421 	/* Get the Read only section */
3422 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
3423 	if (ro_start < 0) {
3424 		netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
3425 		return;
3426 	}
3427 
3428 	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
3429 	j = ro_size;
3430 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
3431 	if (i + j > vpd_size)
3432 		j = vpd_size - i;
3433 
3434 	/* Get the Part number */
3435 	i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
3436 	if (i < 0) {
3437 		netif_err(efx, drv, efx->net_dev, "Part number not found\n");
3438 		return;
3439 	}
3440 
3441 	j = pci_vpd_info_field_size(&vpd_data[i]);
3442 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
3443 	if (i + j > vpd_size) {
3444 		netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
3445 		return;
3446 	}
3447 
3448 	netif_info(efx, drv, efx->net_dev,
3449 		   "Part Number : %.*s\n", j, &vpd_data[i]);
3450 
3451 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
3452 	j = ro_size;
3453 	i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
3454 	if (i < 0) {
3455 		netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
3456 		return;
3457 	}
3458 
3459 	j = pci_vpd_info_field_size(&vpd_data[i]);
3460 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
3461 	if (i + j > vpd_size) {
3462 		netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
3463 		return;
3464 	}
3465 
3466 	efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
3467 	if (!efx->vpd_sn)
3468 		return;
3469 
3470 	snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
3471 }
3472 
3473 
3474 /* Main body of NIC initialisation
3475  * This is called at module load (or hotplug insertion, theoretically).
3476  */
3477 static int efx_pci_probe_main(struct efx_nic *efx)
3478 {
3479 	int rc;
3480 
3481 	/* Do start-of-day initialisation */
3482 	rc = efx_probe_all(efx);
3483 	if (rc)
3484 		goto fail1;
3485 
3486 	efx_init_napi(efx);
3487 
3488 	rc = efx->type->init(efx);
3489 	if (rc) {
3490 		netif_err(efx, probe, efx->net_dev,
3491 			  "failed to initialise NIC\n");
3492 		goto fail3;
3493 	}
3494 
3495 	rc = efx_init_port(efx);
3496 	if (rc) {
3497 		netif_err(efx, probe, efx->net_dev,
3498 			  "failed to initialise port\n");
3499 		goto fail4;
3500 	}
3501 
3502 	rc = efx_nic_init_interrupt(efx);
3503 	if (rc)
3504 		goto fail5;
3505 
3506 	efx_set_interrupt_affinity(efx);
3507 	rc = efx_enable_interrupts(efx);
3508 	if (rc)
3509 		goto fail6;
3510 
3511 	return 0;
3512 
3513  fail6:
3514 	efx_clear_interrupt_affinity(efx);
3515 	efx_nic_fini_interrupt(efx);
3516  fail5:
3517 	efx_fini_port(efx);
3518  fail4:
3519 	efx->type->fini(efx);
3520  fail3:
3521 	efx_fini_napi(efx);
3522 	efx_remove_all(efx);
3523  fail1:
3524 	return rc;
3525 }
3526 
3527 static int efx_pci_probe_post_io(struct efx_nic *efx)
3528 {
3529 	struct net_device *net_dev = efx->net_dev;
3530 	int rc = efx_pci_probe_main(efx);
3531 
3532 	if (rc)
3533 		return rc;
3534 
3535 	if (efx->type->sriov_init) {
3536 		rc = efx->type->sriov_init(efx);
3537 		if (rc)
3538 			netif_err(efx, probe, efx->net_dev,
3539 				  "SR-IOV can't be enabled rc %d\n", rc);
3540 	}
3541 
3542 	/* Determine netdevice features */
3543 	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
3544 			      NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
3545 	if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
3546 		net_dev->features |= NETIF_F_TSO6;
3547 	/* Check whether device supports TSO */
3548 	if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
3549 		net_dev->features &= ~NETIF_F_ALL_TSO;
3550 	/* Mask for features that also apply to VLAN devices */
3551 	net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
3552 				   NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
3553 				   NETIF_F_RXCSUM);
3554 
3555 	net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
3556 
3557 	/* Disable receiving frames with bad FCS, by default. */
3558 	net_dev->features &= ~NETIF_F_RXALL;
3559 
3560 	/* Disable VLAN filtering by default.  It may be enforced if
3561 	 * the feature is fixed (i.e. VLAN filters are required to
3562 	 * receive VLAN tagged packets due to vPort restrictions).
3563 	 */
3564 	net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3565 	net_dev->features |= efx->fixed_features;
3566 
3567 	rc = efx_register_netdev(efx);
3568 	if (!rc)
3569 		return 0;
3570 
3571 	efx_pci_remove_main(efx);
3572 	return rc;
3573 }
3574 
3575 /* NIC initialisation
3576  *
3577  * This is called at module load (or hotplug insertion,
3578  * theoretically).  It sets up PCI mappings, resets the NIC,
3579  * sets up and registers the network devices with the kernel and hooks
3580  * the interrupt service routine.  It does not prepare the device for
3581  * transmission; this is left to the first time one of the network
3582  * interfaces is brought up (i.e. efx_net_open).
3583  */
3584 static int efx_pci_probe(struct pci_dev *pci_dev,
3585 			 const struct pci_device_id *entry)
3586 {
3587 	struct net_device *net_dev;
3588 	struct efx_nic *efx;
3589 	int rc;
3590 
3591 	/* Allocate and initialise a struct net_device and struct efx_nic */
3592 	net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
3593 				     EFX_MAX_RX_QUEUES);
3594 	if (!net_dev)
3595 		return -ENOMEM;
3596 	efx = netdev_priv(net_dev);
3597 	efx->type = (const struct efx_nic_type *) entry->driver_data;
3598 	efx->fixed_features |= NETIF_F_HIGHDMA;
3599 
3600 	pci_set_drvdata(pci_dev, efx);
3601 	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
3602 	rc = efx_init_struct(efx, pci_dev, net_dev);
3603 	if (rc)
3604 		goto fail1;
3605 
3606 	netif_info(efx, probe, efx->net_dev,
3607 		   "Solarflare NIC detected\n");
3608 
3609 	if (!efx->type->is_vf)
3610 		efx_probe_vpd_strings(efx);
3611 
3612 	/* Set up basic I/O (BAR mappings etc) */
3613 	rc = efx_init_io(efx);
3614 	if (rc)
3615 		goto fail2;
3616 
3617 	rc = efx_pci_probe_post_io(efx);
3618 	if (rc) {
3619 		/* On failure, retry once immediately.
3620 		 * If we aborted probe due to a scheduled reset, dismiss it.
3621 		 */
3622 		efx->reset_pending = 0;
3623 		rc = efx_pci_probe_post_io(efx);
3624 		if (rc) {
3625 			/* On another failure, retry once more
3626 			 * after a 50-305ms delay.
3627 			 */
3628 			unsigned char r;
3629 
3630 			get_random_bytes(&r, 1);
3631 			msleep((unsigned int)r + 50);
3632 			efx->reset_pending = 0;
3633 			rc = efx_pci_probe_post_io(efx);
3634 		}
3635 	}
3636 	if (rc)
3637 		goto fail3;
3638 
3639 	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
3640 
3641 	/* Try to create MTDs, but allow this to fail */
3642 	rtnl_lock();
3643 	rc = efx_mtd_probe(efx);
3644 	rtnl_unlock();
3645 	if (rc && rc != -EPERM)
3646 		netif_warn(efx, probe, efx->net_dev,
3647 			   "failed to create MTDs (%d)\n", rc);
3648 
3649 	rc = pci_enable_pcie_error_reporting(pci_dev);
3650 	if (rc && rc != -EINVAL)
3651 		netif_notice(efx, probe, efx->net_dev,
3652 			     "PCIE error reporting unavailable (%d).\n",
3653 			     rc);
3654 
3655 	if (efx->type->udp_tnl_push_ports)
3656 		efx->type->udp_tnl_push_ports(efx);
3657 
3658 	return 0;
3659 
3660  fail3:
3661 	efx_fini_io(efx);
3662  fail2:
3663 	efx_fini_struct(efx);
3664  fail1:
3665 	WARN_ON(rc > 0);
3666 	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
3667 	free_netdev(net_dev);
3668 	return rc;
3669 }
3670 
3671 /* efx_pci_sriov_configure returns the actual number of Virtual Functions
3672  * enabled on success
3673  */
3674 #ifdef CONFIG_SFC_SRIOV
3675 static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
3676 {
3677 	int rc;
3678 	struct efx_nic *efx = pci_get_drvdata(dev);
3679 
3680 	if (efx->type->sriov_configure) {
3681 		rc = efx->type->sriov_configure(efx, num_vfs);
3682 		if (rc)
3683 			return rc;
3684 		else
3685 			return num_vfs;
3686 	} else
3687 		return -EOPNOTSUPP;
3688 }
3689 #endif
3690 
3691 static int efx_pm_freeze(struct device *dev)
3692 {
3693 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3694 
3695 	rtnl_lock();
3696 
3697 	if (efx->state != STATE_DISABLED) {
3698 		efx->state = STATE_UNINIT;
3699 
3700 		efx_device_detach_sync(efx);
3701 
3702 		efx_stop_all(efx);
3703 		efx_disable_interrupts(efx);
3704 	}
3705 
3706 	rtnl_unlock();
3707 
3708 	return 0;
3709 }
3710 
3711 static int efx_pm_thaw(struct device *dev)
3712 {
3713 	int rc;
3714 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3715 
3716 	rtnl_lock();
3717 
3718 	if (efx->state != STATE_DISABLED) {
3719 		rc = efx_enable_interrupts(efx);
3720 		if (rc)
3721 			goto fail;
3722 
3723 		mutex_lock(&efx->mac_lock);
3724 		efx->phy_op->reconfigure(efx);
3725 		mutex_unlock(&efx->mac_lock);
3726 
3727 		efx_start_all(efx);
3728 
3729 		efx_device_attach_if_not_resetting(efx);
3730 
3731 		efx->state = STATE_READY;
3732 
3733 		efx->type->resume_wol(efx);
3734 	}
3735 
3736 	rtnl_unlock();
3737 
3738 	/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
3739 	queue_work(reset_workqueue, &efx->reset_work);
3740 
3741 	return 0;
3742 
3743 fail:
3744 	rtnl_unlock();
3745 
3746 	return rc;
3747 }
3748 
3749 static int efx_pm_poweroff(struct device *dev)
3750 {
3751 	struct pci_dev *pci_dev = to_pci_dev(dev);
3752 	struct efx_nic *efx = pci_get_drvdata(pci_dev);
3753 
3754 	efx->type->fini(efx);
3755 
3756 	efx->reset_pending = 0;
3757 
3758 	pci_save_state(pci_dev);
3759 	return pci_set_power_state(pci_dev, PCI_D3hot);
3760 }
3761 
3762 /* Used for both resume and restore */
3763 static int efx_pm_resume(struct device *dev)
3764 {
3765 	struct pci_dev *pci_dev = to_pci_dev(dev);
3766 	struct efx_nic *efx = pci_get_drvdata(pci_dev);
3767 	int rc;
3768 
3769 	rc = pci_set_power_state(pci_dev, PCI_D0);
3770 	if (rc)
3771 		return rc;
3772 	pci_restore_state(pci_dev);
3773 	rc = pci_enable_device(pci_dev);
3774 	if (rc)
3775 		return rc;
3776 	pci_set_master(efx->pci_dev);
3777 	rc = efx->type->reset(efx, RESET_TYPE_ALL);
3778 	if (rc)
3779 		return rc;
3780 	rc = efx->type->init(efx);
3781 	if (rc)
3782 		return rc;
3783 	rc = efx_pm_thaw(dev);
3784 	return rc;
3785 }
3786 
3787 static int efx_pm_suspend(struct device *dev)
3788 {
3789 	int rc;
3790 
3791 	efx_pm_freeze(dev);
3792 	rc = efx_pm_poweroff(dev);
3793 	if (rc)
3794 		efx_pm_resume(dev);
3795 	return rc;
3796 }
3797 
3798 static const struct dev_pm_ops efx_pm_ops = {
3799 	.suspend	= efx_pm_suspend,
3800 	.resume		= efx_pm_resume,
3801 	.freeze		= efx_pm_freeze,
3802 	.thaw		= efx_pm_thaw,
3803 	.poweroff	= efx_pm_poweroff,
3804 	.restore	= efx_pm_resume,
3805 };
3806 
3807 /* A PCI error affecting this device was detected.
3808  * At this point MMIO and DMA may be disabled.
3809  * Stop the software path and request a slot reset.
3810  */
3811 static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
3812 					      enum pci_channel_state state)
3813 {
3814 	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3815 	struct efx_nic *efx = pci_get_drvdata(pdev);
3816 
3817 	if (state == pci_channel_io_perm_failure)
3818 		return PCI_ERS_RESULT_DISCONNECT;
3819 
3820 	rtnl_lock();
3821 
3822 	if (efx->state != STATE_DISABLED) {
3823 		efx->state = STATE_RECOVERY;
3824 		efx->reset_pending = 0;
3825 
3826 		efx_device_detach_sync(efx);
3827 
3828 		efx_stop_all(efx);
3829 		efx_disable_interrupts(efx);
3830 
3831 		status = PCI_ERS_RESULT_NEED_RESET;
3832 	} else {
3833 		/* If the interface is disabled we don't want to do anything
3834 		 * with it.
3835 		 */
3836 		status = PCI_ERS_RESULT_RECOVERED;
3837 	}
3838 
3839 	rtnl_unlock();
3840 
3841 	pci_disable_device(pdev);
3842 
3843 	return status;
3844 }
3845 
3846 /* Fake a successful reset, which will be performed later in efx_io_resume. */
3847 static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
3848 {
3849 	struct efx_nic *efx = pci_get_drvdata(pdev);
3850 	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3851 	int rc;
3852 
3853 	if (pci_enable_device(pdev)) {
3854 		netif_err(efx, hw, efx->net_dev,
3855 			  "Cannot re-enable PCI device after reset.\n");
3856 		status =  PCI_ERS_RESULT_DISCONNECT;
3857 	}
3858 
3859 	rc = pci_cleanup_aer_uncorrect_error_status(pdev);
3860 	if (rc) {
3861 		netif_err(efx, hw, efx->net_dev,
3862 		"pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
3863 		/* Non-fatal error. Continue. */
3864 	}
3865 
3866 	return status;
3867 }
3868 
3869 /* Perform the actual reset and resume I/O operations. */
3870 static void efx_io_resume(struct pci_dev *pdev)
3871 {
3872 	struct efx_nic *efx = pci_get_drvdata(pdev);
3873 	int rc;
3874 
3875 	rtnl_lock();
3876 
3877 	if (efx->state == STATE_DISABLED)
3878 		goto out;
3879 
3880 	rc = efx_reset(efx, RESET_TYPE_ALL);
3881 	if (rc) {
3882 		netif_err(efx, hw, efx->net_dev,
3883 			  "efx_reset failed after PCI error (%d)\n", rc);
3884 	} else {
3885 		efx->state = STATE_READY;
3886 		netif_dbg(efx, hw, efx->net_dev,
3887 			  "Done resetting and resuming IO after PCI error.\n");
3888 	}
3889 
3890 out:
3891 	rtnl_unlock();
3892 }
3893 
3894 /* For simplicity and reliability, we always require a slot reset and try to
3895  * reset the hardware when a pci error affecting the device is detected.
3896  * We leave both the link_reset and mmio_enabled callback unimplemented:
3897  * with our request for slot reset the mmio_enabled callback will never be
3898  * called, and the link_reset callback is not used by AER or EEH mechanisms.
3899  */
3900 static const struct pci_error_handlers efx_err_handlers = {
3901 	.error_detected = efx_io_error_detected,
3902 	.slot_reset	= efx_io_slot_reset,
3903 	.resume		= efx_io_resume,
3904 };
3905 
3906 static struct pci_driver efx_pci_driver = {
3907 	.name		= KBUILD_MODNAME,
3908 	.id_table	= efx_pci_table,
3909 	.probe		= efx_pci_probe,
3910 	.remove		= efx_pci_remove,
3911 	.driver.pm	= &efx_pm_ops,
3912 	.err_handler	= &efx_err_handlers,
3913 #ifdef CONFIG_SFC_SRIOV
3914 	.sriov_configure = efx_pci_sriov_configure,
3915 #endif
3916 };
3917 
3918 /**************************************************************************
3919  *
3920  * Kernel module interface
3921  *
3922  *************************************************************************/
3923 
3924 module_param(interrupt_mode, uint, 0444);
3925 MODULE_PARM_DESC(interrupt_mode,
3926 		 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3927 
3928 static int __init efx_init_module(void)
3929 {
3930 	int rc;
3931 
3932 	printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
3933 
3934 	rc = register_netdevice_notifier(&efx_netdev_notifier);
3935 	if (rc)
3936 		goto err_notifier;
3937 
3938 #ifdef CONFIG_SFC_SRIOV
3939 	rc = efx_init_sriov();
3940 	if (rc)
3941 		goto err_sriov;
3942 #endif
3943 
3944 	reset_workqueue = create_singlethread_workqueue("sfc_reset");
3945 	if (!reset_workqueue) {
3946 		rc = -ENOMEM;
3947 		goto err_reset;
3948 	}
3949 
3950 	rc = pci_register_driver(&efx_pci_driver);
3951 	if (rc < 0)
3952 		goto err_pci;
3953 
3954 	return 0;
3955 
3956  err_pci:
3957 	destroy_workqueue(reset_workqueue);
3958  err_reset:
3959 #ifdef CONFIG_SFC_SRIOV
3960 	efx_fini_sriov();
3961  err_sriov:
3962 #endif
3963 	unregister_netdevice_notifier(&efx_netdev_notifier);
3964  err_notifier:
3965 	return rc;
3966 }
3967 
3968 static void __exit efx_exit_module(void)
3969 {
3970 	printk(KERN_INFO "Solarflare NET driver unloading\n");
3971 
3972 	pci_unregister_driver(&efx_pci_driver);
3973 	destroy_workqueue(reset_workqueue);
3974 #ifdef CONFIG_SFC_SRIOV
3975 	efx_fini_sriov();
3976 #endif
3977 	unregister_netdevice_notifier(&efx_netdev_notifier);
3978 
3979 }
3980 
3981 module_init(efx_init_module);
3982 module_exit(efx_exit_module);
3983 
3984 MODULE_AUTHOR("Solarflare Communications and "
3985 	      "Michael Brown <mbrown@fensystems.co.uk>");
3986 MODULE_DESCRIPTION("Solarflare network driver");
3987 MODULE_LICENSE("GPL");
3988 MODULE_DEVICE_TABLE(pci, efx_pci_table);
3989 MODULE_VERSION(EFX_DRIVER_VERSION);
3990