1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2018 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/filter.h>
14 #include "efx_channels.h"
15 #include "efx.h"
16 #include "efx_common.h"
17 #include "tx_common.h"
18 #include "rx_common.h"
19 #include "nic.h"
20 #include "sriov.h"
21 #include "workarounds.h"
22 
23 /* This is the first interrupt mode to try out of:
24  * 0 => MSI-X
25  * 1 => MSI
26  * 2 => legacy
27  */
28 unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX;
29 
30 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
31  * i.e. the number of CPUs among which we may distribute simultaneous
32  * interrupt handling.
33  *
34  * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
35  * The default (0) means to assign an interrupt to each core.
36  */
37 unsigned int rss_cpus;
38 
39 static unsigned int irq_adapt_low_thresh = 8000;
40 module_param(irq_adapt_low_thresh, uint, 0644);
41 MODULE_PARM_DESC(irq_adapt_low_thresh,
42 		 "Threshold score for reducing IRQ moderation");
43 
44 static unsigned int irq_adapt_high_thresh = 16000;
45 module_param(irq_adapt_high_thresh, uint, 0644);
46 MODULE_PARM_DESC(irq_adapt_high_thresh,
47 		 "Threshold score for increasing IRQ moderation");
48 
49 static const struct efx_channel_type efx_default_channel_type;
50 
51 /*************
52  * INTERRUPTS
53  *************/
54 
55 static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
56 {
57 	cpumask_var_t filter_mask;
58 	unsigned int count;
59 	int cpu;
60 
61 	if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) {
62 		netif_warn(efx, probe, efx->net_dev,
63 			   "RSS disabled due to allocation failure\n");
64 		return 1;
65 	}
66 
67 	cpumask_copy(filter_mask, cpu_online_mask);
68 	if (local_node)
69 		cpumask_and(filter_mask, filter_mask,
70 			    cpumask_of_pcibus(efx->pci_dev->bus));
71 
72 	count = 0;
73 	for_each_cpu(cpu, filter_mask) {
74 		++count;
75 		cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
76 	}
77 
78 	free_cpumask_var(filter_mask);
79 
80 	return count;
81 }
82 
83 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
84 {
85 	unsigned int count;
86 
87 	if (rss_cpus) {
88 		count = rss_cpus;
89 	} else {
90 		count = count_online_cores(efx, true);
91 
92 		/* If no online CPUs in local node, fallback to any online CPUs */
93 		if (count == 0)
94 			count = count_online_cores(efx, false);
95 	}
96 
97 	if (count > EFX_MAX_RX_QUEUES) {
98 		netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
99 			       "Reducing number of rx queues from %u to %u.\n",
100 			       count, EFX_MAX_RX_QUEUES);
101 		count = EFX_MAX_RX_QUEUES;
102 	}
103 
104 	/* If RSS is requested for the PF *and* VFs then we can't write RSS
105 	 * table entries that are inaccessible to VFs
106 	 */
107 #ifdef CONFIG_SFC_SRIOV
108 	if (efx->type->sriov_wanted) {
109 		if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
110 		    count > efx_vf_size(efx)) {
111 			netif_warn(efx, probe, efx->net_dev,
112 				   "Reducing number of RSS channels from %u to %u for "
113 				   "VF support. Increase vf-msix-limit to use more "
114 				   "channels on the PF.\n",
115 				   count, efx_vf_size(efx));
116 			count = efx_vf_size(efx);
117 		}
118 	}
119 #endif
120 
121 	return count;
122 }
123 
124 static int efx_allocate_msix_channels(struct efx_nic *efx,
125 				      unsigned int max_channels,
126 				      unsigned int extra_channels,
127 				      unsigned int parallelism)
128 {
129 	unsigned int n_channels = parallelism;
130 	int vec_count;
131 	int tx_per_ev;
132 	int n_xdp_tx;
133 	int n_xdp_ev;
134 
135 	if (efx_separate_tx_channels)
136 		n_channels *= 2;
137 	n_channels += extra_channels;
138 
139 	/* To allow XDP transmit to happen from arbitrary NAPI contexts
140 	 * we allocate a TX queue per CPU. We share event queues across
141 	 * multiple tx queues, assuming tx and ev queues are both
142 	 * maximum size.
143 	 */
144 	tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
145 	tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
146 	n_xdp_tx = num_possible_cpus();
147 	n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
148 
149 	vec_count = pci_msix_vec_count(efx->pci_dev);
150 	if (vec_count < 0)
151 		return vec_count;
152 
153 	max_channels = min_t(unsigned int, vec_count, max_channels);
154 
155 	/* Check resources.
156 	 * We need a channel per event queue, plus a VI per tx queue.
157 	 * This may be more pessimistic than it needs to be.
158 	 */
159 	if (n_channels >= max_channels) {
160 		efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
161 		netif_warn(efx, drv, efx->net_dev,
162 			   "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
163 			   n_xdp_ev, n_channels, max_channels);
164 		netif_warn(efx, drv, efx->net_dev,
165 			   "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
166 	} else if (n_channels + n_xdp_tx > efx->max_vis) {
167 		efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
168 		netif_warn(efx, drv, efx->net_dev,
169 			   "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
170 			   n_xdp_tx, n_channels, efx->max_vis);
171 		netif_warn(efx, drv, efx->net_dev,
172 			   "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
173 	} else if (n_channels + n_xdp_ev > max_channels) {
174 		efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
175 		netif_warn(efx, drv, efx->net_dev,
176 			   "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
177 			   n_xdp_ev, n_channels, max_channels);
178 
179 		n_xdp_ev = max_channels - n_channels;
180 		netif_warn(efx, drv, efx->net_dev,
181 			   "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
182 			   DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
183 	} else {
184 		efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
185 	}
186 
187 	if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
188 		efx->n_xdp_channels = n_xdp_ev;
189 		efx->xdp_tx_per_channel = tx_per_ev;
190 		efx->xdp_tx_queue_count = n_xdp_tx;
191 		n_channels += n_xdp_ev;
192 		netif_dbg(efx, drv, efx->net_dev,
193 			  "Allocating %d TX and %d event queues for XDP\n",
194 			  n_xdp_ev * tx_per_ev, n_xdp_ev);
195 	} else {
196 		efx->n_xdp_channels = 0;
197 		efx->xdp_tx_per_channel = 0;
198 		efx->xdp_tx_queue_count = n_xdp_tx;
199 	}
200 
201 	if (vec_count < n_channels) {
202 		netif_err(efx, drv, efx->net_dev,
203 			  "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
204 			  vec_count, n_channels);
205 		netif_err(efx, drv, efx->net_dev,
206 			  "WARNING: Performance may be reduced.\n");
207 		n_channels = vec_count;
208 	}
209 
210 	n_channels = min(n_channels, max_channels);
211 
212 	efx->n_channels = n_channels;
213 
214 	/* Ignore XDP tx channels when creating rx channels. */
215 	n_channels -= efx->n_xdp_channels;
216 
217 	if (efx_separate_tx_channels) {
218 		efx->n_tx_channels =
219 			min(max(n_channels / 2, 1U),
220 			    efx->max_tx_channels);
221 		efx->tx_channel_offset =
222 			n_channels - efx->n_tx_channels;
223 		efx->n_rx_channels =
224 			max(n_channels -
225 			    efx->n_tx_channels, 1U);
226 	} else {
227 		efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
228 		efx->tx_channel_offset = 0;
229 		efx->n_rx_channels = n_channels;
230 	}
231 
232 	efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
233 	efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
234 
235 	efx->xdp_channel_offset = n_channels;
236 
237 	netif_dbg(efx, drv, efx->net_dev,
238 		  "Allocating %u RX channels\n",
239 		  efx->n_rx_channels);
240 
241 	return efx->n_channels;
242 }
243 
244 /* Probe the number and type of interrupts we are able to obtain, and
245  * the resulting numbers of channels and RX queues.
246  */
247 int efx_probe_interrupts(struct efx_nic *efx)
248 {
249 	unsigned int extra_channels = 0;
250 	unsigned int rss_spread;
251 	unsigned int i, j;
252 	int rc;
253 
254 	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
255 		if (efx->extra_channel_type[i])
256 			++extra_channels;
257 
258 	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
259 		unsigned int parallelism = efx_wanted_parallelism(efx);
260 		struct msix_entry xentries[EFX_MAX_CHANNELS];
261 		unsigned int n_channels;
262 
263 		rc = efx_allocate_msix_channels(efx, efx->max_channels,
264 						extra_channels, parallelism);
265 		if (rc >= 0) {
266 			n_channels = rc;
267 			for (i = 0; i < n_channels; i++)
268 				xentries[i].entry = i;
269 			rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
270 						   n_channels);
271 		}
272 		if (rc < 0) {
273 			/* Fall back to single channel MSI */
274 			netif_err(efx, drv, efx->net_dev,
275 				  "could not enable MSI-X\n");
276 			if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
277 				efx->interrupt_mode = EFX_INT_MODE_MSI;
278 			else
279 				return rc;
280 		} else if (rc < n_channels) {
281 			netif_err(efx, drv, efx->net_dev,
282 				  "WARNING: Insufficient MSI-X vectors"
283 				  " available (%d < %u).\n", rc, n_channels);
284 			netif_err(efx, drv, efx->net_dev,
285 				  "WARNING: Performance may be reduced.\n");
286 			n_channels = rc;
287 		}
288 
289 		if (rc > 0) {
290 			for (i = 0; i < efx->n_channels; i++)
291 				efx_get_channel(efx, i)->irq =
292 					xentries[i].vector;
293 		}
294 	}
295 
296 	/* Try single interrupt MSI */
297 	if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
298 		efx->n_channels = 1;
299 		efx->n_rx_channels = 1;
300 		efx->n_tx_channels = 1;
301 		efx->n_xdp_channels = 0;
302 		efx->xdp_channel_offset = efx->n_channels;
303 		rc = pci_enable_msi(efx->pci_dev);
304 		if (rc == 0) {
305 			efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
306 		} else {
307 			netif_err(efx, drv, efx->net_dev,
308 				  "could not enable MSI\n");
309 			if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
310 				efx->interrupt_mode = EFX_INT_MODE_LEGACY;
311 			else
312 				return rc;
313 		}
314 	}
315 
316 	/* Assume legacy interrupts */
317 	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
318 		efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
319 		efx->n_rx_channels = 1;
320 		efx->n_tx_channels = 1;
321 		efx->n_xdp_channels = 0;
322 		efx->xdp_channel_offset = efx->n_channels;
323 		efx->legacy_irq = efx->pci_dev->irq;
324 	}
325 
326 	/* Assign extra channels if possible, before XDP channels */
327 	efx->n_extra_tx_channels = 0;
328 	j = efx->xdp_channel_offset;
329 	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
330 		if (!efx->extra_channel_type[i])
331 			continue;
332 		if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
333 			efx->extra_channel_type[i]->handle_no_channel(efx);
334 		} else {
335 			--j;
336 			efx_get_channel(efx, j)->type =
337 				efx->extra_channel_type[i];
338 			if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
339 				efx->n_extra_tx_channels++;
340 		}
341 	}
342 
343 	rss_spread = efx->n_rx_channels;
344 	/* RSS might be usable on VFs even if it is disabled on the PF */
345 #ifdef CONFIG_SFC_SRIOV
346 	if (efx->type->sriov_wanted) {
347 		efx->rss_spread = ((rss_spread > 1 ||
348 				    !efx->type->sriov_wanted(efx)) ?
349 				   rss_spread : efx_vf_size(efx));
350 		return 0;
351 	}
352 #endif
353 	efx->rss_spread = rss_spread;
354 
355 	return 0;
356 }
357 
358 #if defined(CONFIG_SMP)
359 void efx_set_interrupt_affinity(struct efx_nic *efx)
360 {
361 	const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus);
362 	struct efx_channel *channel;
363 	unsigned int cpu;
364 
365 	/* If no online CPUs in local node, fallback to any online CPU */
366 	if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids)
367 		numa_mask = cpu_online_mask;
368 
369 	cpu = -1;
370 	efx_for_each_channel(channel, efx) {
371 		cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
372 		if (cpu >= nr_cpu_ids)
373 			cpu = cpumask_first_and(cpu_online_mask, numa_mask);
374 		irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
375 	}
376 }
377 
378 void efx_clear_interrupt_affinity(struct efx_nic *efx)
379 {
380 	struct efx_channel *channel;
381 
382 	efx_for_each_channel(channel, efx)
383 		irq_set_affinity_hint(channel->irq, NULL);
384 }
385 #else
386 void
387 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
388 {
389 }
390 
391 void
392 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
393 {
394 }
395 #endif /* CONFIG_SMP */
396 
397 void efx_remove_interrupts(struct efx_nic *efx)
398 {
399 	struct efx_channel *channel;
400 
401 	/* Remove MSI/MSI-X interrupts */
402 	efx_for_each_channel(channel, efx)
403 		channel->irq = 0;
404 	pci_disable_msi(efx->pci_dev);
405 	pci_disable_msix(efx->pci_dev);
406 
407 	/* Remove legacy interrupt */
408 	efx->legacy_irq = 0;
409 }
410 
411 /***************
412  * EVENT QUEUES
413  ***************/
414 
415 /* Create event queue
416  * Event queue memory allocations are done only once.  If the channel
417  * is reset, the memory buffer will be reused; this guards against
418  * errors during channel reset and also simplifies interrupt handling.
419  */
420 int efx_probe_eventq(struct efx_channel *channel)
421 {
422 	struct efx_nic *efx = channel->efx;
423 	unsigned long entries;
424 
425 	netif_dbg(efx, probe, efx->net_dev,
426 		  "chan %d create event queue\n", channel->channel);
427 
428 	/* Build an event queue with room for one event per tx and rx buffer,
429 	 * plus some extra for link state events and MCDI completions.
430 	 */
431 	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
432 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
433 	channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
434 
435 	return efx_nic_probe_eventq(channel);
436 }
437 
438 /* Prepare channel's event queue */
439 int efx_init_eventq(struct efx_channel *channel)
440 {
441 	struct efx_nic *efx = channel->efx;
442 	int rc;
443 
444 	EFX_WARN_ON_PARANOID(channel->eventq_init);
445 
446 	netif_dbg(efx, drv, efx->net_dev,
447 		  "chan %d init event queue\n", channel->channel);
448 
449 	rc = efx_nic_init_eventq(channel);
450 	if (rc == 0) {
451 		efx->type->push_irq_moderation(channel);
452 		channel->eventq_read_ptr = 0;
453 		channel->eventq_init = true;
454 	}
455 	return rc;
456 }
457 
458 /* Enable event queue processing and NAPI */
459 void efx_start_eventq(struct efx_channel *channel)
460 {
461 	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
462 		  "chan %d start event queue\n", channel->channel);
463 
464 	/* Make sure the NAPI handler sees the enabled flag set */
465 	channel->enabled = true;
466 	smp_wmb();
467 
468 	napi_enable(&channel->napi_str);
469 	efx_nic_eventq_read_ack(channel);
470 }
471 
472 /* Disable event queue processing and NAPI */
473 void efx_stop_eventq(struct efx_channel *channel)
474 {
475 	if (!channel->enabled)
476 		return;
477 
478 	napi_disable(&channel->napi_str);
479 	channel->enabled = false;
480 }
481 
482 void efx_fini_eventq(struct efx_channel *channel)
483 {
484 	if (!channel->eventq_init)
485 		return;
486 
487 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
488 		  "chan %d fini event queue\n", channel->channel);
489 
490 	efx_nic_fini_eventq(channel);
491 	channel->eventq_init = false;
492 }
493 
494 void efx_remove_eventq(struct efx_channel *channel)
495 {
496 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
497 		  "chan %d remove event queue\n", channel->channel);
498 
499 	efx_nic_remove_eventq(channel);
500 }
501 
502 /**************************************************************************
503  *
504  * Channel handling
505  *
506  *************************************************************************/
507 
508 #ifdef CONFIG_RFS_ACCEL
509 static void efx_filter_rfs_expire(struct work_struct *data)
510 {
511 	struct delayed_work *dwork = to_delayed_work(data);
512 	struct efx_channel *channel;
513 	unsigned int time, quota;
514 
515 	channel = container_of(dwork, struct efx_channel, filter_work);
516 	time = jiffies - channel->rfs_last_expiry;
517 	quota = channel->rfs_filter_count * time / (30 * HZ);
518 	if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
519 		channel->rfs_last_expiry += time;
520 	/* Ensure we do more work eventually even if NAPI poll is not happening */
521 	schedule_delayed_work(dwork, 30 * HZ);
522 }
523 #endif
524 
525 /* Allocate and initialise a channel structure. */
526 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
527 {
528 	struct efx_rx_queue *rx_queue;
529 	struct efx_tx_queue *tx_queue;
530 	struct efx_channel *channel;
531 	int j;
532 
533 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
534 	if (!channel)
535 		return NULL;
536 
537 	channel->efx = efx;
538 	channel->channel = i;
539 	channel->type = &efx_default_channel_type;
540 
541 	for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
542 		tx_queue = &channel->tx_queue[j];
543 		tx_queue->efx = efx;
544 		tx_queue->queue = -1;
545 		tx_queue->label = j;
546 		tx_queue->channel = channel;
547 	}
548 
549 #ifdef CONFIG_RFS_ACCEL
550 	INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
551 #endif
552 
553 	rx_queue = &channel->rx_queue;
554 	rx_queue->efx = efx;
555 	timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
556 
557 	return channel;
558 }
559 
560 int efx_init_channels(struct efx_nic *efx)
561 {
562 	unsigned int i;
563 
564 	for (i = 0; i < EFX_MAX_CHANNELS; i++) {
565 		efx->channel[i] = efx_alloc_channel(efx, i);
566 		if (!efx->channel[i])
567 			return -ENOMEM;
568 		efx->msi_context[i].efx = efx;
569 		efx->msi_context[i].index = i;
570 	}
571 
572 	/* Higher numbered interrupt modes are less capable! */
573 	efx->interrupt_mode = min(efx->type->min_interrupt_mode,
574 				  efx_interrupt_mode);
575 
576 	efx->max_channels = EFX_MAX_CHANNELS;
577 	efx->max_tx_channels = EFX_MAX_CHANNELS;
578 
579 	return 0;
580 }
581 
582 void efx_fini_channels(struct efx_nic *efx)
583 {
584 	unsigned int i;
585 
586 	for (i = 0; i < EFX_MAX_CHANNELS; i++)
587 		if (efx->channel[i]) {
588 			kfree(efx->channel[i]);
589 			efx->channel[i] = NULL;
590 		}
591 }
592 
593 /* Allocate and initialise a channel structure, copying parameters
594  * (but not resources) from an old channel structure.
595  */
596 struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
597 {
598 	struct efx_rx_queue *rx_queue;
599 	struct efx_tx_queue *tx_queue;
600 	struct efx_channel *channel;
601 	int j;
602 
603 	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
604 	if (!channel)
605 		return NULL;
606 
607 	*channel = *old_channel;
608 
609 	channel->napi_dev = NULL;
610 	INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
611 	channel->napi_str.napi_id = 0;
612 	channel->napi_str.state = 0;
613 	memset(&channel->eventq, 0, sizeof(channel->eventq));
614 
615 	for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
616 		tx_queue = &channel->tx_queue[j];
617 		if (tx_queue->channel)
618 			tx_queue->channel = channel;
619 		tx_queue->buffer = NULL;
620 		tx_queue->cb_page = NULL;
621 		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
622 	}
623 
624 	rx_queue = &channel->rx_queue;
625 	rx_queue->buffer = NULL;
626 	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
627 	timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
628 #ifdef CONFIG_RFS_ACCEL
629 	INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
630 #endif
631 
632 	return channel;
633 }
634 
635 static int efx_probe_channel(struct efx_channel *channel)
636 {
637 	struct efx_tx_queue *tx_queue;
638 	struct efx_rx_queue *rx_queue;
639 	int rc;
640 
641 	netif_dbg(channel->efx, probe, channel->efx->net_dev,
642 		  "creating channel %d\n", channel->channel);
643 
644 	rc = channel->type->pre_probe(channel);
645 	if (rc)
646 		goto fail;
647 
648 	rc = efx_probe_eventq(channel);
649 	if (rc)
650 		goto fail;
651 
652 	efx_for_each_channel_tx_queue(tx_queue, channel) {
653 		rc = efx_probe_tx_queue(tx_queue);
654 		if (rc)
655 			goto fail;
656 	}
657 
658 	efx_for_each_channel_rx_queue(rx_queue, channel) {
659 		rc = efx_probe_rx_queue(rx_queue);
660 		if (rc)
661 			goto fail;
662 	}
663 
664 	channel->rx_list = NULL;
665 
666 	return 0;
667 
668 fail:
669 	efx_remove_channel(channel);
670 	return rc;
671 }
672 
673 static void efx_get_channel_name(struct efx_channel *channel, char *buf,
674 				 size_t len)
675 {
676 	struct efx_nic *efx = channel->efx;
677 	const char *type;
678 	int number;
679 
680 	number = channel->channel;
681 
682 	if (number >= efx->xdp_channel_offset &&
683 	    !WARN_ON_ONCE(!efx->n_xdp_channels)) {
684 		type = "-xdp";
685 		number -= efx->xdp_channel_offset;
686 	} else if (efx->tx_channel_offset == 0) {
687 		type = "";
688 	} else if (number < efx->tx_channel_offset) {
689 		type = "-rx";
690 	} else {
691 		type = "-tx";
692 		number -= efx->tx_channel_offset;
693 	}
694 	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
695 }
696 
697 void efx_set_channel_names(struct efx_nic *efx)
698 {
699 	struct efx_channel *channel;
700 
701 	efx_for_each_channel(channel, efx)
702 		channel->type->get_name(channel,
703 					efx->msi_context[channel->channel].name,
704 					sizeof(efx->msi_context[0].name));
705 }
706 
707 int efx_probe_channels(struct efx_nic *efx)
708 {
709 	struct efx_channel *channel;
710 	int rc;
711 
712 	/* Restart special buffer allocation */
713 	efx->next_buffer_table = 0;
714 
715 	/* Probe channels in reverse, so that any 'extra' channels
716 	 * use the start of the buffer table. This allows the traffic
717 	 * channels to be resized without moving them or wasting the
718 	 * entries before them.
719 	 */
720 	efx_for_each_channel_rev(channel, efx) {
721 		rc = efx_probe_channel(channel);
722 		if (rc) {
723 			netif_err(efx, probe, efx->net_dev,
724 				  "failed to create channel %d\n",
725 				  channel->channel);
726 			goto fail;
727 		}
728 	}
729 	efx_set_channel_names(efx);
730 
731 	return 0;
732 
733 fail:
734 	efx_remove_channels(efx);
735 	return rc;
736 }
737 
738 void efx_remove_channel(struct efx_channel *channel)
739 {
740 	struct efx_tx_queue *tx_queue;
741 	struct efx_rx_queue *rx_queue;
742 
743 	netif_dbg(channel->efx, drv, channel->efx->net_dev,
744 		  "destroy chan %d\n", channel->channel);
745 
746 	efx_for_each_channel_rx_queue(rx_queue, channel)
747 		efx_remove_rx_queue(rx_queue);
748 	efx_for_each_channel_tx_queue(tx_queue, channel)
749 		efx_remove_tx_queue(tx_queue);
750 	efx_remove_eventq(channel);
751 	channel->type->post_remove(channel);
752 }
753 
754 void efx_remove_channels(struct efx_nic *efx)
755 {
756 	struct efx_channel *channel;
757 
758 	efx_for_each_channel(channel, efx)
759 		efx_remove_channel(channel);
760 
761 	kfree(efx->xdp_tx_queues);
762 }
763 
764 static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
765 				struct efx_tx_queue *tx_queue)
766 {
767 	if (xdp_queue_number >= efx->xdp_tx_queue_count)
768 		return -EINVAL;
769 
770 	netif_dbg(efx, drv, efx->net_dev,
771 		  "Channel %u TXQ %u is XDP %u, HW %u\n",
772 		  tx_queue->channel->channel, tx_queue->label,
773 		  xdp_queue_number, tx_queue->queue);
774 	efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
775 	return 0;
776 }
777 
778 static void efx_set_xdp_channels(struct efx_nic *efx)
779 {
780 	struct efx_tx_queue *tx_queue;
781 	struct efx_channel *channel;
782 	unsigned int next_queue = 0;
783 	int xdp_queue_number = 0;
784 	int rc;
785 
786 	/* We need to mark which channels really have RX and TX
787 	 * queues, and adjust the TX queue numbers if we have separate
788 	 * RX-only and TX-only channels.
789 	 */
790 	efx_for_each_channel(channel, efx) {
791 		if (channel->channel < efx->tx_channel_offset)
792 			continue;
793 
794 		if (efx_channel_is_xdp_tx(channel)) {
795 			efx_for_each_channel_tx_queue(tx_queue, channel) {
796 				tx_queue->queue = next_queue++;
797 				rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
798 							  tx_queue);
799 				if (rc == 0)
800 					xdp_queue_number++;
801 			}
802 		} else {
803 			efx_for_each_channel_tx_queue(tx_queue, channel) {
804 				tx_queue->queue = next_queue++;
805 				netif_dbg(efx, drv, efx->net_dev,
806 					  "Channel %u TXQ %u is HW %u\n",
807 					  channel->channel, tx_queue->label,
808 					  tx_queue->queue);
809 			}
810 
811 			/* If XDP is borrowing queues from net stack, it must
812 			 * use the queue with no csum offload, which is the
813 			 * first one of the channel
814 			 * (note: tx_queue_by_type is not initialized yet)
815 			 */
816 			if (efx->xdp_txq_queues_mode ==
817 			    EFX_XDP_TX_QUEUES_BORROWED) {
818 				tx_queue = &channel->tx_queue[0];
819 				rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
820 							  tx_queue);
821 				if (rc == 0)
822 					xdp_queue_number++;
823 			}
824 		}
825 	}
826 	WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
827 		xdp_queue_number != efx->xdp_tx_queue_count);
828 	WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
829 		xdp_queue_number > efx->xdp_tx_queue_count);
830 
831 	/* If we have more CPUs than assigned XDP TX queues, assign the already
832 	 * existing queues to the exceeding CPUs
833 	 */
834 	next_queue = 0;
835 	while (xdp_queue_number < efx->xdp_tx_queue_count) {
836 		tx_queue = efx->xdp_tx_queues[next_queue++];
837 		rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
838 		if (rc == 0)
839 			xdp_queue_number++;
840 	}
841 }
842 
843 int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
844 {
845 	struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel,
846 			   *ptp_channel = efx_ptp_channel(efx);
847 	struct efx_ptp_data *ptp_data = efx->ptp_data;
848 	unsigned int i, next_buffer_table = 0;
849 	u32 old_rxq_entries, old_txq_entries;
850 	int rc, rc2;
851 
852 	rc = efx_check_disabled(efx);
853 	if (rc)
854 		return rc;
855 
856 	/* Not all channels should be reallocated. We must avoid
857 	 * reallocating their buffer table entries.
858 	 */
859 	efx_for_each_channel(channel, efx) {
860 		struct efx_rx_queue *rx_queue;
861 		struct efx_tx_queue *tx_queue;
862 
863 		if (channel->type->copy)
864 			continue;
865 		next_buffer_table = max(next_buffer_table,
866 					channel->eventq.index +
867 					channel->eventq.entries);
868 		efx_for_each_channel_rx_queue(rx_queue, channel)
869 			next_buffer_table = max(next_buffer_table,
870 						rx_queue->rxd.index +
871 						rx_queue->rxd.entries);
872 		efx_for_each_channel_tx_queue(tx_queue, channel)
873 			next_buffer_table = max(next_buffer_table,
874 						tx_queue->txd.index +
875 						tx_queue->txd.entries);
876 	}
877 
878 	efx_device_detach_sync(efx);
879 	efx_stop_all(efx);
880 	efx_soft_disable_interrupts(efx);
881 
882 	/* Clone channels (where possible) */
883 	memset(other_channel, 0, sizeof(other_channel));
884 	for (i = 0; i < efx->n_channels; i++) {
885 		channel = efx->channel[i];
886 		if (channel->type->copy)
887 			channel = channel->type->copy(channel);
888 		if (!channel) {
889 			rc = -ENOMEM;
890 			goto out;
891 		}
892 		other_channel[i] = channel;
893 	}
894 
895 	/* Swap entry counts and channel pointers */
896 	old_rxq_entries = efx->rxq_entries;
897 	old_txq_entries = efx->txq_entries;
898 	efx->rxq_entries = rxq_entries;
899 	efx->txq_entries = txq_entries;
900 	for (i = 0; i < efx->n_channels; i++)
901 		swap(efx->channel[i], other_channel[i]);
902 
903 	/* Restart buffer table allocation */
904 	efx->next_buffer_table = next_buffer_table;
905 
906 	for (i = 0; i < efx->n_channels; i++) {
907 		channel = efx->channel[i];
908 		if (!channel->type->copy)
909 			continue;
910 		rc = efx_probe_channel(channel);
911 		if (rc)
912 			goto rollback;
913 		efx_init_napi_channel(efx->channel[i]);
914 	}
915 
916 	efx_set_xdp_channels(efx);
917 out:
918 	efx->ptp_data = NULL;
919 	/* Destroy unused channel structures */
920 	for (i = 0; i < efx->n_channels; i++) {
921 		channel = other_channel[i];
922 		if (channel && channel->type->copy) {
923 			efx_fini_napi_channel(channel);
924 			efx_remove_channel(channel);
925 			kfree(channel);
926 		}
927 	}
928 
929 	efx->ptp_data = ptp_data;
930 	rc2 = efx_soft_enable_interrupts(efx);
931 	if (rc2) {
932 		rc = rc ? rc : rc2;
933 		netif_err(efx, drv, efx->net_dev,
934 			  "unable to restart interrupts on channel reallocation\n");
935 		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
936 	} else {
937 		efx_start_all(efx);
938 		efx_device_attach_if_not_resetting(efx);
939 	}
940 	return rc;
941 
942 rollback:
943 	/* Swap back */
944 	efx->rxq_entries = old_rxq_entries;
945 	efx->txq_entries = old_txq_entries;
946 	for (i = 0; i < efx->n_channels; i++)
947 		swap(efx->channel[i], other_channel[i]);
948 	efx_ptp_update_channel(efx, ptp_channel);
949 	goto out;
950 }
951 
952 int efx_set_channels(struct efx_nic *efx)
953 {
954 	struct efx_channel *channel;
955 	int rc;
956 
957 	efx->tx_channel_offset =
958 		efx_separate_tx_channels ?
959 		efx->n_channels - efx->n_tx_channels : 0;
960 
961 	if (efx->xdp_tx_queue_count) {
962 		EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
963 
964 		/* Allocate array for XDP TX queue lookup. */
965 		efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
966 					     sizeof(*efx->xdp_tx_queues),
967 					     GFP_KERNEL);
968 		if (!efx->xdp_tx_queues)
969 			return -ENOMEM;
970 	}
971 
972 	efx_for_each_channel(channel, efx) {
973 		if (channel->channel < efx->n_rx_channels)
974 			channel->rx_queue.core_index = channel->channel;
975 		else
976 			channel->rx_queue.core_index = -1;
977 	}
978 
979 	efx_set_xdp_channels(efx);
980 
981 	rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
982 	if (rc)
983 		return rc;
984 	return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
985 }
986 
987 static bool efx_default_channel_want_txqs(struct efx_channel *channel)
988 {
989 	return channel->channel - channel->efx->tx_channel_offset <
990 		channel->efx->n_tx_channels;
991 }
992 
993 /*************
994  * START/STOP
995  *************/
996 
997 int efx_soft_enable_interrupts(struct efx_nic *efx)
998 {
999 	struct efx_channel *channel, *end_channel;
1000 	int rc;
1001 
1002 	BUG_ON(efx->state == STATE_DISABLED);
1003 
1004 	efx->irq_soft_enabled = true;
1005 	smp_wmb();
1006 
1007 	efx_for_each_channel(channel, efx) {
1008 		if (!channel->type->keep_eventq) {
1009 			rc = efx_init_eventq(channel);
1010 			if (rc)
1011 				goto fail;
1012 		}
1013 		efx_start_eventq(channel);
1014 	}
1015 
1016 	efx_mcdi_mode_event(efx);
1017 
1018 	return 0;
1019 fail:
1020 	end_channel = channel;
1021 	efx_for_each_channel(channel, efx) {
1022 		if (channel == end_channel)
1023 			break;
1024 		efx_stop_eventq(channel);
1025 		if (!channel->type->keep_eventq)
1026 			efx_fini_eventq(channel);
1027 	}
1028 
1029 	return rc;
1030 }
1031 
1032 void efx_soft_disable_interrupts(struct efx_nic *efx)
1033 {
1034 	struct efx_channel *channel;
1035 
1036 	if (efx->state == STATE_DISABLED)
1037 		return;
1038 
1039 	efx_mcdi_mode_poll(efx);
1040 
1041 	efx->irq_soft_enabled = false;
1042 	smp_wmb();
1043 
1044 	if (efx->legacy_irq)
1045 		synchronize_irq(efx->legacy_irq);
1046 
1047 	efx_for_each_channel(channel, efx) {
1048 		if (channel->irq)
1049 			synchronize_irq(channel->irq);
1050 
1051 		efx_stop_eventq(channel);
1052 		if (!channel->type->keep_eventq)
1053 			efx_fini_eventq(channel);
1054 	}
1055 
1056 	/* Flush the asynchronous MCDI request queue */
1057 	efx_mcdi_flush_async(efx);
1058 }
1059 
1060 int efx_enable_interrupts(struct efx_nic *efx)
1061 {
1062 	struct efx_channel *channel, *end_channel;
1063 	int rc;
1064 
1065 	/* TODO: Is this really a bug? */
1066 	BUG_ON(efx->state == STATE_DISABLED);
1067 
1068 	if (efx->eeh_disabled_legacy_irq) {
1069 		enable_irq(efx->legacy_irq);
1070 		efx->eeh_disabled_legacy_irq = false;
1071 	}
1072 
1073 	efx->type->irq_enable_master(efx);
1074 
1075 	efx_for_each_channel(channel, efx) {
1076 		if (channel->type->keep_eventq) {
1077 			rc = efx_init_eventq(channel);
1078 			if (rc)
1079 				goto fail;
1080 		}
1081 	}
1082 
1083 	rc = efx_soft_enable_interrupts(efx);
1084 	if (rc)
1085 		goto fail;
1086 
1087 	return 0;
1088 
1089 fail:
1090 	end_channel = channel;
1091 	efx_for_each_channel(channel, efx) {
1092 		if (channel == end_channel)
1093 			break;
1094 		if (channel->type->keep_eventq)
1095 			efx_fini_eventq(channel);
1096 	}
1097 
1098 	efx->type->irq_disable_non_ev(efx);
1099 
1100 	return rc;
1101 }
1102 
1103 void efx_disable_interrupts(struct efx_nic *efx)
1104 {
1105 	struct efx_channel *channel;
1106 
1107 	efx_soft_disable_interrupts(efx);
1108 
1109 	efx_for_each_channel(channel, efx) {
1110 		if (channel->type->keep_eventq)
1111 			efx_fini_eventq(channel);
1112 	}
1113 
1114 	efx->type->irq_disable_non_ev(efx);
1115 }
1116 
1117 void efx_start_channels(struct efx_nic *efx)
1118 {
1119 	struct efx_tx_queue *tx_queue;
1120 	struct efx_rx_queue *rx_queue;
1121 	struct efx_channel *channel;
1122 
1123 	efx_for_each_channel_rev(channel, efx) {
1124 		efx_for_each_channel_tx_queue(tx_queue, channel) {
1125 			efx_init_tx_queue(tx_queue);
1126 			atomic_inc(&efx->active_queues);
1127 		}
1128 
1129 		efx_for_each_channel_rx_queue(rx_queue, channel) {
1130 			efx_init_rx_queue(rx_queue);
1131 			atomic_inc(&efx->active_queues);
1132 			efx_stop_eventq(channel);
1133 			efx_fast_push_rx_descriptors(rx_queue, false);
1134 			efx_start_eventq(channel);
1135 		}
1136 
1137 		WARN_ON(channel->rx_pkt_n_frags);
1138 	}
1139 }
1140 
1141 void efx_stop_channels(struct efx_nic *efx)
1142 {
1143 	struct efx_tx_queue *tx_queue;
1144 	struct efx_rx_queue *rx_queue;
1145 	struct efx_channel *channel;
1146 	int rc = 0;
1147 
1148 	/* Stop RX refill */
1149 	efx_for_each_channel(channel, efx) {
1150 		efx_for_each_channel_rx_queue(rx_queue, channel)
1151 			rx_queue->refill_enabled = false;
1152 	}
1153 
1154 	efx_for_each_channel(channel, efx) {
1155 		/* RX packet processing is pipelined, so wait for the
1156 		 * NAPI handler to complete.  At least event queue 0
1157 		 * might be kept active by non-data events, so don't
1158 		 * use napi_synchronize() but actually disable NAPI
1159 		 * temporarily.
1160 		 */
1161 		if (efx_channel_has_rx_queue(channel)) {
1162 			efx_stop_eventq(channel);
1163 			efx_start_eventq(channel);
1164 		}
1165 	}
1166 
1167 	if (efx->type->fini_dmaq)
1168 		rc = efx->type->fini_dmaq(efx);
1169 
1170 	if (rc) {
1171 		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
1172 	} else {
1173 		netif_dbg(efx, drv, efx->net_dev,
1174 			  "successfully flushed all queues\n");
1175 	}
1176 
1177 	efx_for_each_channel(channel, efx) {
1178 		efx_for_each_channel_rx_queue(rx_queue, channel)
1179 			efx_fini_rx_queue(rx_queue);
1180 		efx_for_each_channel_tx_queue(tx_queue, channel)
1181 			efx_fini_tx_queue(tx_queue);
1182 	}
1183 }
1184 
1185 /**************************************************************************
1186  *
1187  * NAPI interface
1188  *
1189  *************************************************************************/
1190 
1191 /* Process channel's event queue
1192  *
1193  * This function is responsible for processing the event queue of a
1194  * single channel.  The caller must guarantee that this function will
1195  * never be concurrently called more than once on the same channel,
1196  * though different channels may be being processed concurrently.
1197  */
1198 static int efx_process_channel(struct efx_channel *channel, int budget)
1199 {
1200 	struct efx_tx_queue *tx_queue;
1201 	struct list_head rx_list;
1202 	int spent;
1203 
1204 	if (unlikely(!channel->enabled))
1205 		return 0;
1206 
1207 	/* Prepare the batch receive list */
1208 	EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
1209 	INIT_LIST_HEAD(&rx_list);
1210 	channel->rx_list = &rx_list;
1211 
1212 	efx_for_each_channel_tx_queue(tx_queue, channel) {
1213 		tx_queue->pkts_compl = 0;
1214 		tx_queue->bytes_compl = 0;
1215 	}
1216 
1217 	spent = efx_nic_process_eventq(channel, budget);
1218 	if (spent && efx_channel_has_rx_queue(channel)) {
1219 		struct efx_rx_queue *rx_queue =
1220 			efx_channel_get_rx_queue(channel);
1221 
1222 		efx_rx_flush_packet(channel);
1223 		efx_fast_push_rx_descriptors(rx_queue, true);
1224 	}
1225 
1226 	/* Update BQL */
1227 	efx_for_each_channel_tx_queue(tx_queue, channel) {
1228 		if (tx_queue->bytes_compl) {
1229 			netdev_tx_completed_queue(tx_queue->core_txq,
1230 						  tx_queue->pkts_compl,
1231 						  tx_queue->bytes_compl);
1232 		}
1233 	}
1234 
1235 	/* Receive any packets we queued up */
1236 	netif_receive_skb_list(channel->rx_list);
1237 	channel->rx_list = NULL;
1238 
1239 	return spent;
1240 }
1241 
1242 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
1243 {
1244 	int step = efx->irq_mod_step_us;
1245 
1246 	if (channel->irq_mod_score < irq_adapt_low_thresh) {
1247 		if (channel->irq_moderation_us > step) {
1248 			channel->irq_moderation_us -= step;
1249 			efx->type->push_irq_moderation(channel);
1250 		}
1251 	} else if (channel->irq_mod_score > irq_adapt_high_thresh) {
1252 		if (channel->irq_moderation_us <
1253 		    efx->irq_rx_moderation_us) {
1254 			channel->irq_moderation_us += step;
1255 			efx->type->push_irq_moderation(channel);
1256 		}
1257 	}
1258 
1259 	channel->irq_count = 0;
1260 	channel->irq_mod_score = 0;
1261 }
1262 
1263 /* NAPI poll handler
1264  *
1265  * NAPI guarantees serialisation of polls of the same device, which
1266  * provides the guarantee required by efx_process_channel().
1267  */
1268 static int efx_poll(struct napi_struct *napi, int budget)
1269 {
1270 	struct efx_channel *channel =
1271 		container_of(napi, struct efx_channel, napi_str);
1272 	struct efx_nic *efx = channel->efx;
1273 #ifdef CONFIG_RFS_ACCEL
1274 	unsigned int time;
1275 #endif
1276 	int spent;
1277 
1278 	netif_vdbg(efx, intr, efx->net_dev,
1279 		   "channel %d NAPI poll executing on CPU %d\n",
1280 		   channel->channel, raw_smp_processor_id());
1281 
1282 	spent = efx_process_channel(channel, budget);
1283 
1284 	xdp_do_flush_map();
1285 
1286 	if (spent < budget) {
1287 		if (efx_channel_has_rx_queue(channel) &&
1288 		    efx->irq_rx_adaptive &&
1289 		    unlikely(++channel->irq_count == 1000)) {
1290 			efx_update_irq_mod(efx, channel);
1291 		}
1292 
1293 #ifdef CONFIG_RFS_ACCEL
1294 		/* Perhaps expire some ARFS filters */
1295 		time = jiffies - channel->rfs_last_expiry;
1296 		/* Would our quota be >= 20? */
1297 		if (channel->rfs_filter_count * time >= 600 * HZ)
1298 			mod_delayed_work(system_wq, &channel->filter_work, 0);
1299 #endif
1300 
1301 		/* There is no race here; although napi_disable() will
1302 		 * only wait for napi_complete(), this isn't a problem
1303 		 * since efx_nic_eventq_read_ack() will have no effect if
1304 		 * interrupts have already been disabled.
1305 		 */
1306 		if (napi_complete_done(napi, spent))
1307 			efx_nic_eventq_read_ack(channel);
1308 	}
1309 
1310 	return spent;
1311 }
1312 
1313 void efx_init_napi_channel(struct efx_channel *channel)
1314 {
1315 	struct efx_nic *efx = channel->efx;
1316 
1317 	channel->napi_dev = efx->net_dev;
1318 	netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64);
1319 }
1320 
1321 void efx_init_napi(struct efx_nic *efx)
1322 {
1323 	struct efx_channel *channel;
1324 
1325 	efx_for_each_channel(channel, efx)
1326 		efx_init_napi_channel(channel);
1327 }
1328 
1329 void efx_fini_napi_channel(struct efx_channel *channel)
1330 {
1331 	if (channel->napi_dev)
1332 		netif_napi_del(&channel->napi_str);
1333 
1334 	channel->napi_dev = NULL;
1335 }
1336 
1337 void efx_fini_napi(struct efx_nic *efx)
1338 {
1339 	struct efx_channel *channel;
1340 
1341 	efx_for_each_channel(channel, efx)
1342 		efx_fini_napi_channel(channel);
1343 }
1344 
1345 /***************
1346  * Housekeeping
1347  ***************/
1348 
1349 static int efx_channel_dummy_op_int(struct efx_channel *channel)
1350 {
1351 	return 0;
1352 }
1353 
1354 void efx_channel_dummy_op_void(struct efx_channel *channel)
1355 {
1356 }
1357 
1358 static const struct efx_channel_type efx_default_channel_type = {
1359 	.pre_probe		= efx_channel_dummy_op_int,
1360 	.post_remove		= efx_channel_dummy_op_void,
1361 	.get_name		= efx_get_channel_name,
1362 	.copy			= efx_copy_channel,
1363 	.want_txqs		= efx_default_channel_want_txqs,
1364 	.keep_eventq		= false,
1365 	.want_pio		= true,
1366 };
1367