1 /*
2  * Copyright (C) 2015 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * nfp_net_common.c
36  * Netronome network device driver: Common functions between PF and VF
37  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38  *          Jason McMullan <jason.mcmullan@netronome.com>
39  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
40  *          Brad Petrus <brad.petrus@netronome.com>
41  *          Chris Telfer <chris.telfer@netronome.com>
42  */
43 
44 #include <linux/version.h>
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/fs.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/interrupt.h>
52 #include <linux/ip.h>
53 #include <linux/ipv6.h>
54 #include <linux/pci.h>
55 #include <linux/pci_regs.h>
56 #include <linux/msi.h>
57 #include <linux/ethtool.h>
58 #include <linux/log2.h>
59 #include <linux/if_vlan.h>
60 #include <linux/random.h>
61 
62 #include <linux/ktime.h>
63 
64 #include <net/vxlan.h>
65 
66 #include "nfp_net_ctrl.h"
67 #include "nfp_net.h"
68 
69 /**
70  * nfp_net_get_fw_version() - Read and parse the FW version
71  * @fw_ver:	Output fw_version structure to read to
72  * @ctrl_bar:	Mapped address of the control BAR
73  */
74 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
75 			    void __iomem *ctrl_bar)
76 {
77 	u32 reg;
78 
79 	reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
80 	put_unaligned_le32(reg, fw_ver);
81 }
82 
83 /* Firmware reconfig
84  *
85  * Firmware reconfig may take a while so we have two versions of it -
86  * synchronous and asynchronous (posted).  All synchronous callers are holding
87  * RTNL so we don't have to worry about serializing them.
88  */
89 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
90 {
91 	nn_writel(nn, NFP_NET_CFG_UPDATE, update);
92 	/* ensure update is written before pinging HW */
93 	nn_pci_flush(nn);
94 	nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
95 }
96 
97 /* Pass 0 as update to run posted reconfigs. */
98 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
99 {
100 	update |= nn->reconfig_posted;
101 	nn->reconfig_posted = 0;
102 
103 	nfp_net_reconfig_start(nn, update);
104 
105 	nn->reconfig_timer_active = true;
106 	mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
107 }
108 
109 static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
110 {
111 	u32 reg;
112 
113 	reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
114 	if (reg == 0)
115 		return true;
116 	if (reg & NFP_NET_CFG_UPDATE_ERR) {
117 		nn_err(nn, "Reconfig error: 0x%08x\n", reg);
118 		return true;
119 	} else if (last_check) {
120 		nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
121 		return true;
122 	}
123 
124 	return false;
125 }
126 
127 static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
128 {
129 	bool timed_out = false;
130 
131 	/* Poll update field, waiting for NFP to ack the config */
132 	while (!nfp_net_reconfig_check_done(nn, timed_out)) {
133 		msleep(1);
134 		timed_out = time_is_before_eq_jiffies(deadline);
135 	}
136 
137 	if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
138 		return -EIO;
139 
140 	return timed_out ? -EIO : 0;
141 }
142 
143 static void nfp_net_reconfig_timer(unsigned long data)
144 {
145 	struct nfp_net *nn = (void *)data;
146 
147 	spin_lock_bh(&nn->reconfig_lock);
148 
149 	nn->reconfig_timer_active = false;
150 
151 	/* If sync caller is present it will take over from us */
152 	if (nn->reconfig_sync_present)
153 		goto done;
154 
155 	/* Read reconfig status and report errors */
156 	nfp_net_reconfig_check_done(nn, true);
157 
158 	if (nn->reconfig_posted)
159 		nfp_net_reconfig_start_async(nn, 0);
160 done:
161 	spin_unlock_bh(&nn->reconfig_lock);
162 }
163 
164 /**
165  * nfp_net_reconfig_post() - Post async reconfig request
166  * @nn:      NFP Net device to reconfigure
167  * @update:  The value for the update field in the BAR config
168  *
169  * Record FW reconfiguration request.  Reconfiguration will be kicked off
170  * whenever reconfiguration machinery is idle.  Multiple requests can be
171  * merged together!
172  */
173 static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
174 {
175 	spin_lock_bh(&nn->reconfig_lock);
176 
177 	/* Sync caller will kick off async reconf when it's done, just post */
178 	if (nn->reconfig_sync_present) {
179 		nn->reconfig_posted |= update;
180 		goto done;
181 	}
182 
183 	/* Opportunistically check if the previous command is done */
184 	if (!nn->reconfig_timer_active ||
185 	    nfp_net_reconfig_check_done(nn, false))
186 		nfp_net_reconfig_start_async(nn, update);
187 	else
188 		nn->reconfig_posted |= update;
189 done:
190 	spin_unlock_bh(&nn->reconfig_lock);
191 }
192 
193 /**
194  * nfp_net_reconfig() - Reconfigure the firmware
195  * @nn:      NFP Net device to reconfigure
196  * @update:  The value for the update field in the BAR config
197  *
198  * Write the update word to the BAR and ping the reconfig queue.  The
199  * poll until the firmware has acknowledged the update by zeroing the
200  * update word.
201  *
202  * Return: Negative errno on error, 0 on success
203  */
204 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
205 {
206 	bool cancelled_timer = false;
207 	u32 pre_posted_requests;
208 	int ret;
209 
210 	spin_lock_bh(&nn->reconfig_lock);
211 
212 	nn->reconfig_sync_present = true;
213 
214 	if (nn->reconfig_timer_active) {
215 		del_timer(&nn->reconfig_timer);
216 		nn->reconfig_timer_active = false;
217 		cancelled_timer = true;
218 	}
219 	pre_posted_requests = nn->reconfig_posted;
220 	nn->reconfig_posted = 0;
221 
222 	spin_unlock_bh(&nn->reconfig_lock);
223 
224 	if (cancelled_timer)
225 		nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
226 
227 	/* Run the posted reconfigs which were issued before we started */
228 	if (pre_posted_requests) {
229 		nfp_net_reconfig_start(nn, pre_posted_requests);
230 		nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
231 	}
232 
233 	nfp_net_reconfig_start(nn, update);
234 	ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
235 
236 	spin_lock_bh(&nn->reconfig_lock);
237 
238 	if (nn->reconfig_posted)
239 		nfp_net_reconfig_start_async(nn, 0);
240 
241 	nn->reconfig_sync_present = false;
242 
243 	spin_unlock_bh(&nn->reconfig_lock);
244 
245 	return ret;
246 }
247 
248 /* Interrupt configuration and handling
249  */
250 
251 /**
252  * nfp_net_irq_unmask_msix() - Unmask MSI-X after automasking
253  * @nn:       NFP Network structure
254  * @entry_nr: MSI-X table entry
255  *
256  * Clear the MSI-X table mask bit for the given entry bypassing Linux irq
257  * handling subsystem.  Use *only* to reenable automasked vectors.
258  */
259 static void nfp_net_irq_unmask_msix(struct nfp_net *nn, unsigned int entry_nr)
260 {
261 	struct list_head *msi_head = &nn->pdev->dev.msi_list;
262 	struct msi_desc *entry;
263 	u32 off;
264 
265 	/* All MSI-Xs have the same mask_base */
266 	entry = list_first_entry(msi_head, struct msi_desc, list);
267 
268 	off = (PCI_MSIX_ENTRY_SIZE * entry_nr) +
269 		PCI_MSIX_ENTRY_VECTOR_CTRL;
270 	writel(0, entry->mask_base + off);
271 	readl(entry->mask_base);
272 }
273 
274 /**
275  * nfp_net_irq_unmask() - Unmask automasked interrupt
276  * @nn:       NFP Network structure
277  * @entry_nr: MSI-X table entry
278  *
279  * If MSI-X auto-masking is enabled clear the mask bit, otherwise
280  * clear the ICR for the entry.
281  */
282 static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
283 {
284 	if (nn->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
285 		nfp_net_irq_unmask_msix(nn, entry_nr);
286 		return;
287 	}
288 
289 	nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
290 	nn_pci_flush(nn);
291 }
292 
293 /**
294  * nfp_net_msix_alloc() - Try to allocate MSI-X irqs
295  * @nn:       NFP Network structure
296  * @nr_vecs:  Number of MSI-X vectors to allocate
297  *
298  * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors.
299  *
300  * Return: Number of MSI-X vectors obtained or 0 on error.
301  */
302 static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs)
303 {
304 	struct pci_dev *pdev = nn->pdev;
305 	int nvecs;
306 	int i;
307 
308 	for (i = 0; i < nr_vecs; i++)
309 		nn->irq_entries[i].entry = i;
310 
311 	nvecs = pci_enable_msix_range(pdev, nn->irq_entries,
312 				      NFP_NET_NON_Q_VECTORS + 1, nr_vecs);
313 	if (nvecs < 0) {
314 		nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n",
315 			NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs);
316 		return 0;
317 	}
318 
319 	return nvecs;
320 }
321 
322 /**
323  * nfp_net_irqs_wanted() - Work out how many interrupt vectors we want
324  * @nn:       NFP Network structure
325  *
326  * We want a vector per CPU (or ring), whatever is smaller plus
327  * NFP_NET_NON_Q_VECTORS for LSC etc.
328  *
329  * Return: Number of interrupts wanted
330  */
331 static int nfp_net_irqs_wanted(struct nfp_net *nn)
332 {
333 	int ncpus;
334 	int vecs;
335 
336 	ncpus = num_online_cpus();
337 
338 	vecs = max_t(int, nn->num_tx_rings, nn->num_rx_rings);
339 	vecs = min_t(int, vecs, ncpus);
340 
341 	return vecs + NFP_NET_NON_Q_VECTORS;
342 }
343 
344 /**
345  * nfp_net_irqs_alloc() - allocates MSI-X irqs
346  * @nn:       NFP Network structure
347  *
348  * Return: Number of irqs obtained or 0 on error.
349  */
350 int nfp_net_irqs_alloc(struct nfp_net *nn)
351 {
352 	int wanted_irqs;
353 
354 	wanted_irqs = nfp_net_irqs_wanted(nn);
355 
356 	nn->num_irqs = nfp_net_msix_alloc(nn, wanted_irqs);
357 	if (nn->num_irqs == 0) {
358 		nn_err(nn, "Failed to allocate MSI-X IRQs\n");
359 		return 0;
360 	}
361 
362 	nn->num_r_vecs = nn->num_irqs - NFP_NET_NON_Q_VECTORS;
363 
364 	if (nn->num_irqs < wanted_irqs)
365 		nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n",
366 			wanted_irqs, nn->num_irqs);
367 
368 	return nn->num_irqs;
369 }
370 
371 /**
372  * nfp_net_irqs_disable() - Disable interrupts
373  * @nn:       NFP Network structure
374  *
375  * Undoes what @nfp_net_irqs_alloc() does.
376  */
377 void nfp_net_irqs_disable(struct nfp_net *nn)
378 {
379 	pci_disable_msix(nn->pdev);
380 }
381 
382 /**
383  * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
384  * @irq:      Interrupt
385  * @data:     Opaque data structure
386  *
387  * Return: Indicate if the interrupt has been handled.
388  */
389 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
390 {
391 	struct nfp_net_r_vector *r_vec = data;
392 
393 	napi_schedule_irqoff(&r_vec->napi);
394 
395 	/* The FW auto-masks any interrupt, either via the MASK bit in
396 	 * the MSI-X table or via the per entry ICR field.  So there
397 	 * is no need to disable interrupts here.
398 	 */
399 	return IRQ_HANDLED;
400 }
401 
402 /**
403  * nfp_net_read_link_status() - Reread link status from control BAR
404  * @nn:       NFP Network structure
405  */
406 static void nfp_net_read_link_status(struct nfp_net *nn)
407 {
408 	unsigned long flags;
409 	bool link_up;
410 	u32 sts;
411 
412 	spin_lock_irqsave(&nn->link_status_lock, flags);
413 
414 	sts = nn_readl(nn, NFP_NET_CFG_STS);
415 	link_up = !!(sts & NFP_NET_CFG_STS_LINK);
416 
417 	if (nn->link_up == link_up)
418 		goto out;
419 
420 	nn->link_up = link_up;
421 
422 	if (nn->link_up) {
423 		netif_carrier_on(nn->netdev);
424 		netdev_info(nn->netdev, "NIC Link is Up\n");
425 	} else {
426 		netif_carrier_off(nn->netdev);
427 		netdev_info(nn->netdev, "NIC Link is Down\n");
428 	}
429 out:
430 	spin_unlock_irqrestore(&nn->link_status_lock, flags);
431 }
432 
433 /**
434  * nfp_net_irq_lsc() - Interrupt service routine for link state changes
435  * @irq:      Interrupt
436  * @data:     Opaque data structure
437  *
438  * Return: Indicate if the interrupt has been handled.
439  */
440 static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
441 {
442 	struct nfp_net *nn = data;
443 
444 	nfp_net_read_link_status(nn);
445 
446 	nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX);
447 
448 	return IRQ_HANDLED;
449 }
450 
451 /**
452  * nfp_net_irq_exn() - Interrupt service routine for exceptions
453  * @irq:      Interrupt
454  * @data:     Opaque data structure
455  *
456  * Return: Indicate if the interrupt has been handled.
457  */
458 static irqreturn_t nfp_net_irq_exn(int irq, void *data)
459 {
460 	struct nfp_net *nn = data;
461 
462 	nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
463 	/* XXX TO BE IMPLEMENTED */
464 	return IRQ_HANDLED;
465 }
466 
467 /**
468  * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
469  * @tx_ring:  TX ring structure
470  * @r_vec:    IRQ vector servicing this ring
471  * @idx:      Ring index
472  */
473 static void
474 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
475 		     struct nfp_net_r_vector *r_vec, unsigned int idx)
476 {
477 	struct nfp_net *nn = r_vec->nfp_net;
478 
479 	tx_ring->idx = idx;
480 	tx_ring->r_vec = r_vec;
481 
482 	tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
483 	tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
484 }
485 
486 /**
487  * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
488  * @rx_ring:  RX ring structure
489  * @r_vec:    IRQ vector servicing this ring
490  * @idx:      Ring index
491  */
492 static void
493 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
494 		     struct nfp_net_r_vector *r_vec, unsigned int idx)
495 {
496 	struct nfp_net *nn = r_vec->nfp_net;
497 
498 	rx_ring->idx = idx;
499 	rx_ring->r_vec = r_vec;
500 
501 	rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
502 	rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
503 
504 	rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
505 	rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx);
506 }
507 
508 /**
509  * nfp_net_irqs_assign() - Assign IRQs and setup rvecs.
510  * @netdev:   netdev structure
511  */
512 static void nfp_net_irqs_assign(struct net_device *netdev)
513 {
514 	struct nfp_net *nn = netdev_priv(netdev);
515 	struct nfp_net_r_vector *r_vec;
516 	int r;
517 
518 	/* Assumes nn->num_tx_rings == nn->num_rx_rings */
519 	if (nn->num_tx_rings > nn->num_r_vecs) {
520 		nn_warn(nn, "More rings (%d) than vectors (%d).\n",
521 			nn->num_tx_rings, nn->num_r_vecs);
522 		nn->num_tx_rings = nn->num_r_vecs;
523 		nn->num_rx_rings = nn->num_r_vecs;
524 	}
525 
526 	nn->lsc_handler = nfp_net_irq_lsc;
527 	nn->exn_handler = nfp_net_irq_exn;
528 
529 	for (r = 0; r < nn->num_r_vecs; r++) {
530 		r_vec = &nn->r_vecs[r];
531 		r_vec->nfp_net = nn;
532 		r_vec->handler = nfp_net_irq_rxtx;
533 		r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
534 
535 		cpumask_set_cpu(r, &r_vec->affinity_mask);
536 	}
537 }
538 
539 /**
540  * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
541  * @nn:		NFP Network structure
542  * @ctrl_offset: Control BAR offset where IRQ configuration should be written
543  * @format:	printf-style format to construct the interrupt name
544  * @name:	Pointer to allocated space for interrupt name
545  * @name_sz:	Size of space for interrupt name
546  * @vector_idx:	Index of MSI-X vector used for this interrupt
547  * @handler:	IRQ handler to register for this interrupt
548  */
549 static int
550 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
551 			const char *format, char *name, size_t name_sz,
552 			unsigned int vector_idx, irq_handler_t handler)
553 {
554 	struct msix_entry *entry;
555 	int err;
556 
557 	entry = &nn->irq_entries[vector_idx];
558 
559 	snprintf(name, name_sz, format, netdev_name(nn->netdev));
560 	err = request_irq(entry->vector, handler, 0, name, nn);
561 	if (err) {
562 		nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
563 		       entry->vector, err);
564 		return err;
565 	}
566 	nn_writeb(nn, ctrl_offset, vector_idx);
567 
568 	return 0;
569 }
570 
571 /**
572  * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
573  * @nn:		NFP Network structure
574  * @ctrl_offset: Control BAR offset where IRQ configuration should be written
575  * @vector_idx:	Index of MSI-X vector used for this interrupt
576  */
577 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
578 				 unsigned int vector_idx)
579 {
580 	nn_writeb(nn, ctrl_offset, 0xff);
581 	free_irq(nn->irq_entries[vector_idx].vector, nn);
582 }
583 
584 /* Transmit
585  *
586  * One queue controller peripheral queue is used for transmit.  The
587  * driver en-queues packets for transmit by advancing the write
588  * pointer.  The device indicates that packets have transmitted by
589  * advancing the read pointer.  The driver maintains a local copy of
590  * the read and write pointer in @struct nfp_net_tx_ring.  The driver
591  * keeps @wr_p in sync with the queue controller write pointer and can
592  * determine how many packets have been transmitted by comparing its
593  * copy of the read pointer @rd_p with the read pointer maintained by
594  * the queue controller peripheral.
595  */
596 
597 /**
598  * nfp_net_tx_full() - Check if the TX ring is full
599  * @tx_ring: TX ring to check
600  * @dcnt:    Number of descriptors that need to be enqueued (must be >= 1)
601  *
602  * This function checks, based on the *host copy* of read/write
603  * pointer if a given TX ring is full.  The real TX queue may have
604  * some newly made available slots.
605  *
606  * Return: True if the ring is full.
607  */
608 static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
609 {
610 	return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
611 }
612 
613 /* Wrappers for deciding when to stop and restart TX queues */
614 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
615 {
616 	return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
617 }
618 
619 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
620 {
621 	return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
622 }
623 
624 /**
625  * nfp_net_tx_ring_stop() - stop tx ring
626  * @nd_q:    netdev queue
627  * @tx_ring: driver tx queue structure
628  *
629  * Safely stop TX ring.  Remember that while we are running .start_xmit()
630  * someone else may be cleaning the TX ring completions so we need to be
631  * extra careful here.
632  */
633 static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
634 				 struct nfp_net_tx_ring *tx_ring)
635 {
636 	netif_tx_stop_queue(nd_q);
637 
638 	/* We can race with the TX completion out of NAPI so recheck */
639 	smp_mb();
640 	if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
641 		netif_tx_start_queue(nd_q);
642 }
643 
644 /**
645  * nfp_net_tx_tso() - Set up Tx descriptor for LSO
646  * @nn:  NFP Net device
647  * @r_vec: per-ring structure
648  * @txbuf: Pointer to driver soft TX descriptor
649  * @txd: Pointer to HW TX descriptor
650  * @skb: Pointer to SKB
651  *
652  * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
653  * Return error on packet header greater than maximum supported LSO header size.
654  */
655 static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
656 			   struct nfp_net_tx_buf *txbuf,
657 			   struct nfp_net_tx_desc *txd, struct sk_buff *skb)
658 {
659 	u32 hdrlen;
660 	u16 mss;
661 
662 	if (!skb_is_gso(skb))
663 		return;
664 
665 	if (!skb->encapsulation)
666 		hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
667 	else
668 		hdrlen = skb_inner_transport_header(skb) - skb->data +
669 			inner_tcp_hdrlen(skb);
670 
671 	txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
672 	txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
673 
674 	mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
675 	txd->l4_offset = hdrlen;
676 	txd->mss = cpu_to_le16(mss);
677 	txd->flags |= PCIE_DESC_TX_LSO;
678 
679 	u64_stats_update_begin(&r_vec->tx_sync);
680 	r_vec->tx_lso++;
681 	u64_stats_update_end(&r_vec->tx_sync);
682 }
683 
684 /**
685  * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
686  * @nn:  NFP Net device
687  * @r_vec: per-ring structure
688  * @txbuf: Pointer to driver soft TX descriptor
689  * @txd: Pointer to TX descriptor
690  * @skb: Pointer to SKB
691  *
692  * This function sets the TX checksum flags in the TX descriptor based
693  * on the configuration and the protocol of the packet to be transmitted.
694  */
695 static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
696 			    struct nfp_net_tx_buf *txbuf,
697 			    struct nfp_net_tx_desc *txd, struct sk_buff *skb)
698 {
699 	struct ipv6hdr *ipv6h;
700 	struct iphdr *iph;
701 	u8 l4_hdr;
702 
703 	if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
704 		return;
705 
706 	if (skb->ip_summed != CHECKSUM_PARTIAL)
707 		return;
708 
709 	txd->flags |= PCIE_DESC_TX_CSUM;
710 	if (skb->encapsulation)
711 		txd->flags |= PCIE_DESC_TX_ENCAP;
712 
713 	iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
714 	ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
715 
716 	if (iph->version == 4) {
717 		txd->flags |= PCIE_DESC_TX_IP4_CSUM;
718 		l4_hdr = iph->protocol;
719 	} else if (ipv6h->version == 6) {
720 		l4_hdr = ipv6h->nexthdr;
721 	} else {
722 		nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n",
723 				  iph->version);
724 		return;
725 	}
726 
727 	switch (l4_hdr) {
728 	case IPPROTO_TCP:
729 		txd->flags |= PCIE_DESC_TX_TCP_CSUM;
730 		break;
731 	case IPPROTO_UDP:
732 		txd->flags |= PCIE_DESC_TX_UDP_CSUM;
733 		break;
734 	default:
735 		nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n",
736 				  l4_hdr);
737 		return;
738 	}
739 
740 	u64_stats_update_begin(&r_vec->tx_sync);
741 	if (skb->encapsulation)
742 		r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
743 	else
744 		r_vec->hw_csum_tx += txbuf->pkt_cnt;
745 	u64_stats_update_end(&r_vec->tx_sync);
746 }
747 
748 /**
749  * nfp_net_tx() - Main transmit entry point
750  * @skb:    SKB to transmit
751  * @netdev: netdev structure
752  *
753  * Return: NETDEV_TX_OK on success.
754  */
755 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
756 {
757 	struct nfp_net *nn = netdev_priv(netdev);
758 	const struct skb_frag_struct *frag;
759 	struct nfp_net_r_vector *r_vec;
760 	struct nfp_net_tx_desc *txd, txdg;
761 	struct nfp_net_tx_buf *txbuf;
762 	struct nfp_net_tx_ring *tx_ring;
763 	struct netdev_queue *nd_q;
764 	dma_addr_t dma_addr;
765 	unsigned int fsize;
766 	int f, nr_frags;
767 	int wr_idx;
768 	u16 qidx;
769 
770 	qidx = skb_get_queue_mapping(skb);
771 	tx_ring = &nn->tx_rings[qidx];
772 	r_vec = tx_ring->r_vec;
773 	nd_q = netdev_get_tx_queue(nn->netdev, qidx);
774 
775 	nr_frags = skb_shinfo(skb)->nr_frags;
776 
777 	if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
778 		nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n",
779 				  qidx, tx_ring->wr_p, tx_ring->rd_p);
780 		netif_tx_stop_queue(nd_q);
781 		u64_stats_update_begin(&r_vec->tx_sync);
782 		r_vec->tx_busy++;
783 		u64_stats_update_end(&r_vec->tx_sync);
784 		return NETDEV_TX_BUSY;
785 	}
786 
787 	/* Start with the head skbuf */
788 	dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb),
789 				  DMA_TO_DEVICE);
790 	if (dma_mapping_error(&nn->pdev->dev, dma_addr))
791 		goto err_free;
792 
793 	wr_idx = tx_ring->wr_p % tx_ring->cnt;
794 
795 	/* Stash the soft descriptor of the head then initialize it */
796 	txbuf = &tx_ring->txbufs[wr_idx];
797 	txbuf->skb = skb;
798 	txbuf->dma_addr = dma_addr;
799 	txbuf->fidx = -1;
800 	txbuf->pkt_cnt = 1;
801 	txbuf->real_len = skb->len;
802 
803 	/* Build TX descriptor */
804 	txd = &tx_ring->txds[wr_idx];
805 	txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0;
806 	txd->dma_len = cpu_to_le16(skb_headlen(skb));
807 	nfp_desc_set_dma_addr(txd, dma_addr);
808 	txd->data_len = cpu_to_le16(skb->len);
809 
810 	txd->flags = 0;
811 	txd->mss = 0;
812 	txd->l4_offset = 0;
813 
814 	nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb);
815 
816 	nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb);
817 
818 	if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
819 		txd->flags |= PCIE_DESC_TX_VLAN;
820 		txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
821 	}
822 
823 	/* Gather DMA */
824 	if (nr_frags > 0) {
825 		/* all descs must match except for in addr, length and eop */
826 		txdg = *txd;
827 
828 		for (f = 0; f < nr_frags; f++) {
829 			frag = &skb_shinfo(skb)->frags[f];
830 			fsize = skb_frag_size(frag);
831 
832 			dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0,
833 						    fsize, DMA_TO_DEVICE);
834 			if (dma_mapping_error(&nn->pdev->dev, dma_addr))
835 				goto err_unmap;
836 
837 			wr_idx = (wr_idx + 1) % tx_ring->cnt;
838 			tx_ring->txbufs[wr_idx].skb = skb;
839 			tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
840 			tx_ring->txbufs[wr_idx].fidx = f;
841 
842 			txd = &tx_ring->txds[wr_idx];
843 			*txd = txdg;
844 			txd->dma_len = cpu_to_le16(fsize);
845 			nfp_desc_set_dma_addr(txd, dma_addr);
846 			txd->offset_eop =
847 				(f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
848 		}
849 
850 		u64_stats_update_begin(&r_vec->tx_sync);
851 		r_vec->tx_gather++;
852 		u64_stats_update_end(&r_vec->tx_sync);
853 	}
854 
855 	netdev_tx_sent_queue(nd_q, txbuf->real_len);
856 
857 	tx_ring->wr_p += nr_frags + 1;
858 	if (nfp_net_tx_ring_should_stop(tx_ring))
859 		nfp_net_tx_ring_stop(nd_q, tx_ring);
860 
861 	tx_ring->wr_ptr_add += nr_frags + 1;
862 	if (!skb->xmit_more || netif_xmit_stopped(nd_q)) {
863 		/* force memory write before we let HW know */
864 		wmb();
865 		nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
866 		tx_ring->wr_ptr_add = 0;
867 	}
868 
869 	skb_tx_timestamp(skb);
870 
871 	return NETDEV_TX_OK;
872 
873 err_unmap:
874 	--f;
875 	while (f >= 0) {
876 		frag = &skb_shinfo(skb)->frags[f];
877 		dma_unmap_page(&nn->pdev->dev,
878 			       tx_ring->txbufs[wr_idx].dma_addr,
879 			       skb_frag_size(frag), DMA_TO_DEVICE);
880 		tx_ring->txbufs[wr_idx].skb = NULL;
881 		tx_ring->txbufs[wr_idx].dma_addr = 0;
882 		tx_ring->txbufs[wr_idx].fidx = -2;
883 		wr_idx = wr_idx - 1;
884 		if (wr_idx < 0)
885 			wr_idx += tx_ring->cnt;
886 	}
887 	dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr,
888 			 skb_headlen(skb), DMA_TO_DEVICE);
889 	tx_ring->txbufs[wr_idx].skb = NULL;
890 	tx_ring->txbufs[wr_idx].dma_addr = 0;
891 	tx_ring->txbufs[wr_idx].fidx = -2;
892 err_free:
893 	nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n");
894 	u64_stats_update_begin(&r_vec->tx_sync);
895 	r_vec->tx_errors++;
896 	u64_stats_update_end(&r_vec->tx_sync);
897 	dev_kfree_skb_any(skb);
898 	return NETDEV_TX_OK;
899 }
900 
901 /**
902  * nfp_net_tx_complete() - Handled completed TX packets
903  * @tx_ring:   TX ring structure
904  *
905  * Return: Number of completed TX descriptors
906  */
907 static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
908 {
909 	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
910 	struct nfp_net *nn = r_vec->nfp_net;
911 	const struct skb_frag_struct *frag;
912 	struct netdev_queue *nd_q;
913 	u32 done_pkts = 0, done_bytes = 0;
914 	struct sk_buff *skb;
915 	int todo, nr_frags;
916 	u32 qcp_rd_p;
917 	int fidx;
918 	int idx;
919 
920 	/* Work out how many descriptors have been transmitted */
921 	qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
922 
923 	if (qcp_rd_p == tx_ring->qcp_rd_p)
924 		return;
925 
926 	if (qcp_rd_p > tx_ring->qcp_rd_p)
927 		todo = qcp_rd_p - tx_ring->qcp_rd_p;
928 	else
929 		todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
930 
931 	while (todo--) {
932 		idx = tx_ring->rd_p % tx_ring->cnt;
933 		tx_ring->rd_p++;
934 
935 		skb = tx_ring->txbufs[idx].skb;
936 		if (!skb)
937 			continue;
938 
939 		nr_frags = skb_shinfo(skb)->nr_frags;
940 		fidx = tx_ring->txbufs[idx].fidx;
941 
942 		if (fidx == -1) {
943 			/* unmap head */
944 			dma_unmap_single(&nn->pdev->dev,
945 					 tx_ring->txbufs[idx].dma_addr,
946 					 skb_headlen(skb), DMA_TO_DEVICE);
947 
948 			done_pkts += tx_ring->txbufs[idx].pkt_cnt;
949 			done_bytes += tx_ring->txbufs[idx].real_len;
950 		} else {
951 			/* unmap fragment */
952 			frag = &skb_shinfo(skb)->frags[fidx];
953 			dma_unmap_page(&nn->pdev->dev,
954 				       tx_ring->txbufs[idx].dma_addr,
955 				       skb_frag_size(frag), DMA_TO_DEVICE);
956 		}
957 
958 		/* check for last gather fragment */
959 		if (fidx == nr_frags - 1)
960 			dev_kfree_skb_any(skb);
961 
962 		tx_ring->txbufs[idx].dma_addr = 0;
963 		tx_ring->txbufs[idx].skb = NULL;
964 		tx_ring->txbufs[idx].fidx = -2;
965 	}
966 
967 	tx_ring->qcp_rd_p = qcp_rd_p;
968 
969 	u64_stats_update_begin(&r_vec->tx_sync);
970 	r_vec->tx_bytes += done_bytes;
971 	r_vec->tx_pkts += done_pkts;
972 	u64_stats_update_end(&r_vec->tx_sync);
973 
974 	nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
975 	netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
976 	if (nfp_net_tx_ring_should_wake(tx_ring)) {
977 		/* Make sure TX thread will see updated tx_ring->rd_p */
978 		smp_mb();
979 
980 		if (unlikely(netif_tx_queue_stopped(nd_q)))
981 			netif_tx_wake_queue(nd_q);
982 	}
983 
984 	WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
985 		  "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
986 		  tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
987 }
988 
989 /**
990  * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
991  * @nn:		NFP Net device
992  * @tx_ring:	TX ring structure
993  *
994  * Assumes that the device is stopped
995  */
996 static void
997 nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
998 {
999 	const struct skb_frag_struct *frag;
1000 	struct netdev_queue *nd_q;
1001 	struct pci_dev *pdev = nn->pdev;
1002 
1003 	while (tx_ring->rd_p != tx_ring->wr_p) {
1004 		int nr_frags, fidx, idx;
1005 		struct sk_buff *skb;
1006 
1007 		idx = tx_ring->rd_p % tx_ring->cnt;
1008 		skb = tx_ring->txbufs[idx].skb;
1009 		nr_frags = skb_shinfo(skb)->nr_frags;
1010 		fidx = tx_ring->txbufs[idx].fidx;
1011 
1012 		if (fidx == -1) {
1013 			/* unmap head */
1014 			dma_unmap_single(&pdev->dev,
1015 					 tx_ring->txbufs[idx].dma_addr,
1016 					 skb_headlen(skb), DMA_TO_DEVICE);
1017 		} else {
1018 			/* unmap fragment */
1019 			frag = &skb_shinfo(skb)->frags[fidx];
1020 			dma_unmap_page(&pdev->dev,
1021 				       tx_ring->txbufs[idx].dma_addr,
1022 				       skb_frag_size(frag), DMA_TO_DEVICE);
1023 		}
1024 
1025 		/* check for last gather fragment */
1026 		if (fidx == nr_frags - 1)
1027 			dev_kfree_skb_any(skb);
1028 
1029 		tx_ring->txbufs[idx].dma_addr = 0;
1030 		tx_ring->txbufs[idx].skb = NULL;
1031 		tx_ring->txbufs[idx].fidx = -2;
1032 
1033 		tx_ring->qcp_rd_p++;
1034 		tx_ring->rd_p++;
1035 	}
1036 
1037 	memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
1038 	tx_ring->wr_p = 0;
1039 	tx_ring->rd_p = 0;
1040 	tx_ring->qcp_rd_p = 0;
1041 	tx_ring->wr_ptr_add = 0;
1042 
1043 	nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
1044 	netdev_tx_reset_queue(nd_q);
1045 }
1046 
1047 static void nfp_net_tx_timeout(struct net_device *netdev)
1048 {
1049 	struct nfp_net *nn = netdev_priv(netdev);
1050 	int i;
1051 
1052 	for (i = 0; i < nn->num_tx_rings; i++) {
1053 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
1054 			continue;
1055 		nn_warn(nn, "TX timeout on ring: %d\n", i);
1056 	}
1057 	nn_warn(nn, "TX watchdog timeout\n");
1058 }
1059 
1060 /* Receive processing
1061  */
1062 
1063 /**
1064  * nfp_net_rx_space() - return the number of free slots on the RX ring
1065  * @rx_ring:   RX ring structure
1066  *
1067  * Make sure we leave at least one slot free.
1068  *
1069  * Return: True if there is space on the RX ring
1070  */
1071 static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
1072 {
1073 	return (rx_ring->cnt - 1) - (rx_ring->wr_p - rx_ring->rd_p);
1074 }
1075 
1076 /**
1077  * nfp_net_rx_alloc_one() - Allocate and map skb for RX
1078  * @rx_ring:	RX ring structure of the skb
1079  * @dma_addr:	Pointer to storage for DMA address (output param)
1080  * @fl_bufsz:	size of freelist buffers
1081  *
1082  * This function will allcate a new skb, map it for DMA.
1083  *
1084  * Return: allocated skb or NULL on failure.
1085  */
1086 static struct sk_buff *
1087 nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
1088 		     unsigned int fl_bufsz)
1089 {
1090 	struct nfp_net *nn = rx_ring->r_vec->nfp_net;
1091 	struct sk_buff *skb;
1092 
1093 	skb = netdev_alloc_skb(nn->netdev, fl_bufsz);
1094 	if (!skb) {
1095 		nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n");
1096 		return NULL;
1097 	}
1098 
1099 	*dma_addr = dma_map_single(&nn->pdev->dev, skb->data,
1100 				   fl_bufsz, DMA_FROM_DEVICE);
1101 	if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
1102 		dev_kfree_skb_any(skb);
1103 		nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
1104 		return NULL;
1105 	}
1106 
1107 	return skb;
1108 }
1109 
1110 /**
1111  * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
1112  * @rx_ring:	RX ring structure
1113  * @skb:	Skb to put on rings
1114  * @dma_addr:	DMA address of skb mapping
1115  */
1116 static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
1117 				struct sk_buff *skb, dma_addr_t dma_addr)
1118 {
1119 	unsigned int wr_idx;
1120 
1121 	wr_idx = rx_ring->wr_p % rx_ring->cnt;
1122 
1123 	/* Stash SKB and DMA address away */
1124 	rx_ring->rxbufs[wr_idx].skb = skb;
1125 	rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
1126 
1127 	/* Fill freelist descriptor */
1128 	rx_ring->rxds[wr_idx].fld.reserved = 0;
1129 	rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1130 	nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr);
1131 
1132 	rx_ring->wr_p++;
1133 	rx_ring->wr_ptr_add++;
1134 	if (rx_ring->wr_ptr_add >= NFP_NET_FL_BATCH) {
1135 		/* Update write pointer of the freelist queue. Make
1136 		 * sure all writes are flushed before telling the hardware.
1137 		 */
1138 		wmb();
1139 		nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, rx_ring->wr_ptr_add);
1140 		rx_ring->wr_ptr_add = 0;
1141 	}
1142 }
1143 
1144 /**
1145  * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1146  * @rx_ring:	RX ring structure
1147  *
1148  * Warning: Do *not* call if ring buffers were never put on the FW freelist
1149  *	    (i.e. device was not enabled)!
1150  */
1151 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1152 {
1153 	unsigned int wr_idx, last_idx;
1154 
1155 	/* Move the empty entry to the end of the list */
1156 	wr_idx = rx_ring->wr_p % rx_ring->cnt;
1157 	last_idx = rx_ring->cnt - 1;
1158 	rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1159 	rx_ring->rxbufs[wr_idx].skb = rx_ring->rxbufs[last_idx].skb;
1160 	rx_ring->rxbufs[last_idx].dma_addr = 0;
1161 	rx_ring->rxbufs[last_idx].skb = NULL;
1162 
1163 	memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
1164 	rx_ring->wr_p = 0;
1165 	rx_ring->rd_p = 0;
1166 	rx_ring->wr_ptr_add = 0;
1167 }
1168 
1169 /**
1170  * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1171  * @nn:		NFP Net device
1172  * @rx_ring:	RX ring to remove buffers from
1173  *
1174  * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1175  * entries.  After device is disabled nfp_net_rx_ring_reset() must be called
1176  * to restore required ring geometry.
1177  */
1178 static void
1179 nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
1180 {
1181 	struct pci_dev *pdev = nn->pdev;
1182 	unsigned int i;
1183 
1184 	for (i = 0; i < rx_ring->cnt - 1; i++) {
1185 		/* NULL skb can only happen when initial filling of the ring
1186 		 * fails to allocate enough buffers and calls here to free
1187 		 * already allocated ones.
1188 		 */
1189 		if (!rx_ring->rxbufs[i].skb)
1190 			continue;
1191 
1192 		dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr,
1193 				 rx_ring->bufsz, DMA_FROM_DEVICE);
1194 		dev_kfree_skb_any(rx_ring->rxbufs[i].skb);
1195 		rx_ring->rxbufs[i].dma_addr = 0;
1196 		rx_ring->rxbufs[i].skb = NULL;
1197 	}
1198 }
1199 
1200 /**
1201  * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1202  * @nn:		NFP Net device
1203  * @rx_ring:	RX ring to remove buffers from
1204  */
1205 static int
1206 nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
1207 {
1208 	struct nfp_net_rx_buf *rxbufs;
1209 	unsigned int i;
1210 
1211 	rxbufs = rx_ring->rxbufs;
1212 
1213 	for (i = 0; i < rx_ring->cnt - 1; i++) {
1214 		rxbufs[i].skb =
1215 			nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
1216 					     rx_ring->bufsz);
1217 		if (!rxbufs[i].skb) {
1218 			nfp_net_rx_ring_bufs_free(nn, rx_ring);
1219 			return -ENOMEM;
1220 		}
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 /**
1227  * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1228  * @rx_ring: RX ring to fill
1229  */
1230 static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
1231 {
1232 	unsigned int i;
1233 
1234 	for (i = 0; i < rx_ring->cnt - 1; i++)
1235 		nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].skb,
1236 				    rx_ring->rxbufs[i].dma_addr);
1237 }
1238 
1239 /**
1240  * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1241  * @flags: RX descriptor flags field in CPU byte order
1242  */
1243 static int nfp_net_rx_csum_has_errors(u16 flags)
1244 {
1245 	u16 csum_all_checked, csum_all_ok;
1246 
1247 	csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
1248 	csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
1249 
1250 	return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
1251 }
1252 
1253 /**
1254  * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1255  * @nn:  NFP Net device
1256  * @r_vec: per-ring structure
1257  * @rxd: Pointer to RX descriptor
1258  * @skb: Pointer to SKB
1259  */
1260 static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1261 			    struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
1262 {
1263 	skb_checksum_none_assert(skb);
1264 
1265 	if (!(nn->netdev->features & NETIF_F_RXCSUM))
1266 		return;
1267 
1268 	if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
1269 		u64_stats_update_begin(&r_vec->rx_sync);
1270 		r_vec->hw_csum_rx_error++;
1271 		u64_stats_update_end(&r_vec->rx_sync);
1272 		return;
1273 	}
1274 
1275 	/* Assume that the firmware will never report inner CSUM_OK unless outer
1276 	 * L4 headers were successfully parsed. FW will always report zero UDP
1277 	 * checksum as CSUM_OK.
1278 	 */
1279 	if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
1280 	    rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
1281 		__skb_incr_checksum_unnecessary(skb);
1282 		u64_stats_update_begin(&r_vec->rx_sync);
1283 		r_vec->hw_csum_rx_ok++;
1284 		u64_stats_update_end(&r_vec->rx_sync);
1285 	}
1286 
1287 	if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
1288 	    rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
1289 		__skb_incr_checksum_unnecessary(skb);
1290 		u64_stats_update_begin(&r_vec->rx_sync);
1291 		r_vec->hw_csum_rx_inner_ok++;
1292 		u64_stats_update_end(&r_vec->rx_sync);
1293 	}
1294 }
1295 
1296 /**
1297  * nfp_net_set_hash() - Set SKB hash data
1298  * @netdev: adapter's net_device structure
1299  * @skb:   SKB to set the hash data on
1300  * @rxd:   RX descriptor
1301  *
1302  * The RSS hash and hash-type are pre-pended to the packet data.
1303  * Extract and decode it and set the skb fields.
1304  */
1305 static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
1306 			     struct nfp_net_rx_desc *rxd)
1307 {
1308 	struct nfp_net_rx_hash *rx_hash;
1309 
1310 	if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS) ||
1311 	    !(netdev->features & NETIF_F_RXHASH))
1312 		return;
1313 
1314 	rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
1315 
1316 	switch (be32_to_cpu(rx_hash->hash_type)) {
1317 	case NFP_NET_RSS_IPV4:
1318 	case NFP_NET_RSS_IPV6:
1319 	case NFP_NET_RSS_IPV6_EX:
1320 		skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L3);
1321 		break;
1322 	default:
1323 		skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L4);
1324 		break;
1325 	}
1326 }
1327 
1328 /**
1329  * nfp_net_rx() - receive up to @budget packets on @rx_ring
1330  * @rx_ring:   RX ring to receive from
1331  * @budget:    NAPI budget
1332  *
1333  * Note, this function is separated out from the napi poll function to
1334  * more cleanly separate packet receive code from other bookkeeping
1335  * functions performed in the napi poll function.
1336  *
1337  * There are differences between the NFP-3200 firmware and the
1338  * NFP-6000 firmware.  The NFP-3200 firmware uses a dedicated RX queue
1339  * to indicate that new packets have arrived.  The NFP-6000 does not
1340  * have this queue and uses the DD bit in the RX descriptor. This
1341  * method cannot be used on the NFP-3200 as it causes a race
1342  * condition: The RX ring write pointer on the NFP-3200 is updated
1343  * after packets (and descriptors) have been DMAed.  If the DD bit is
1344  * used and subsequently the read pointer is updated this may lead to
1345  * the RX queue to underflow (if the firmware has not yet update the
1346  * write pointer).  Therefore we use slightly ugly conditional code
1347  * below to handle the differences.  We may, in the future update the
1348  * NFP-3200 firmware to behave the same as the firmware on the
1349  * NFP-6000.
1350  *
1351  * Return: Number of packets received.
1352  */
1353 static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1354 {
1355 	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1356 	struct nfp_net *nn = r_vec->nfp_net;
1357 	unsigned int data_len, meta_len;
1358 	int avail = 0, pkts_polled = 0;
1359 	struct sk_buff *skb, *new_skb;
1360 	struct nfp_net_rx_desc *rxd;
1361 	dma_addr_t new_dma_addr;
1362 	u32 qcp_wr_p;
1363 	int idx;
1364 
1365 	if (nn->is_nfp3200) {
1366 		/* Work out how many packets arrived */
1367 		qcp_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx);
1368 		idx = rx_ring->rd_p % rx_ring->cnt;
1369 
1370 		if (qcp_wr_p == idx)
1371 			/* No new packets */
1372 			return 0;
1373 
1374 		if (qcp_wr_p > idx)
1375 			avail = qcp_wr_p - idx;
1376 		else
1377 			avail = qcp_wr_p + rx_ring->cnt - idx;
1378 	} else {
1379 		avail = budget + 1;
1380 	}
1381 
1382 	while (avail > 0 && pkts_polled < budget) {
1383 		idx = rx_ring->rd_p % rx_ring->cnt;
1384 
1385 		rxd = &rx_ring->rxds[idx];
1386 		if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) {
1387 			if (nn->is_nfp3200)
1388 				nn_dbg(nn, "RX descriptor not valid (DD)%d:%u rxd[0]=%#x rxd[1]=%#x\n",
1389 				       rx_ring->idx, idx,
1390 				       rxd->vals[0], rxd->vals[1]);
1391 			break;
1392 		}
1393 		/* Memory barrier to ensure that we won't do other reads
1394 		 * before the DD bit.
1395 		 */
1396 		dma_rmb();
1397 
1398 		rx_ring->rd_p++;
1399 		pkts_polled++;
1400 		avail--;
1401 
1402 		skb = rx_ring->rxbufs[idx].skb;
1403 
1404 		new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr,
1405 					       nn->fl_bufsz);
1406 		if (!new_skb) {
1407 			nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb,
1408 					    rx_ring->rxbufs[idx].dma_addr);
1409 			u64_stats_update_begin(&r_vec->rx_sync);
1410 			r_vec->rx_drops++;
1411 			u64_stats_update_end(&r_vec->rx_sync);
1412 			continue;
1413 		}
1414 
1415 		dma_unmap_single(&nn->pdev->dev,
1416 				 rx_ring->rxbufs[idx].dma_addr,
1417 				 nn->fl_bufsz, DMA_FROM_DEVICE);
1418 
1419 		nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);
1420 
1421 		/*         < meta_len >
1422 		 *  <-- [rx_offset] -->
1423 		 *  ---------------------------------------------------------
1424 		 * | [XX] |  metadata  |             packet           | XXXX |
1425 		 *  ---------------------------------------------------------
1426 		 *         <---------------- data_len --------------->
1427 		 *
1428 		 * The rx_offset is fixed for all packets, the meta_len can vary
1429 		 * on a packet by packet basis. If rx_offset is set to zero
1430 		 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
1431 		 * buffer and is immediately followed by the packet (no [XX]).
1432 		 */
1433 		meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1434 		data_len = le16_to_cpu(rxd->rxd.data_len);
1435 
1436 		if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1437 			skb_reserve(skb, meta_len);
1438 		else
1439 			skb_reserve(skb, nn->rx_offset);
1440 		skb_put(skb, data_len - meta_len);
1441 
1442 		nfp_net_set_hash(nn->netdev, skb, rxd);
1443 
1444 		/* Pad small frames to minimum */
1445 		if (skb_put_padto(skb, 60))
1446 			break;
1447 
1448 		/* Stats update */
1449 		u64_stats_update_begin(&r_vec->rx_sync);
1450 		r_vec->rx_pkts++;
1451 		r_vec->rx_bytes += skb->len;
1452 		u64_stats_update_end(&r_vec->rx_sync);
1453 
1454 		skb_record_rx_queue(skb, rx_ring->idx);
1455 		skb->protocol = eth_type_trans(skb, nn->netdev);
1456 
1457 		nfp_net_rx_csum(nn, r_vec, rxd, skb);
1458 
1459 		if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1460 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1461 					       le16_to_cpu(rxd->rxd.vlan));
1462 
1463 		napi_gro_receive(&rx_ring->r_vec->napi, skb);
1464 	}
1465 
1466 	if (nn->is_nfp3200)
1467 		nfp_qcp_rd_ptr_add(rx_ring->qcp_rx, pkts_polled);
1468 
1469 	return pkts_polled;
1470 }
1471 
1472 /**
1473  * nfp_net_poll() - napi poll function
1474  * @napi:    NAPI structure
1475  * @budget:  NAPI budget
1476  *
1477  * Return: number of packets polled.
1478  */
1479 static int nfp_net_poll(struct napi_struct *napi, int budget)
1480 {
1481 	struct nfp_net_r_vector *r_vec =
1482 		container_of(napi, struct nfp_net_r_vector, napi);
1483 	struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
1484 	struct nfp_net_tx_ring *tx_ring = r_vec->tx_ring;
1485 	struct nfp_net *nn = r_vec->nfp_net;
1486 	struct netdev_queue *txq;
1487 	unsigned int pkts_polled;
1488 
1489 	tx_ring = &nn->tx_rings[rx_ring->idx];
1490 	txq = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
1491 	nfp_net_tx_complete(tx_ring);
1492 
1493 	pkts_polled = nfp_net_rx(rx_ring, budget);
1494 
1495 	if (pkts_polled < budget) {
1496 		napi_complete_done(napi, pkts_polled);
1497 		nfp_net_irq_unmask(nn, r_vec->irq_idx);
1498 	}
1499 
1500 	return pkts_polled;
1501 }
1502 
1503 /* Setup and Configuration
1504  */
1505 
1506 /**
1507  * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
1508  * @tx_ring:   TX ring to free
1509  */
1510 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
1511 {
1512 	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1513 	struct nfp_net *nn = r_vec->nfp_net;
1514 	struct pci_dev *pdev = nn->pdev;
1515 
1516 	kfree(tx_ring->txbufs);
1517 
1518 	if (tx_ring->txds)
1519 		dma_free_coherent(&pdev->dev, tx_ring->size,
1520 				  tx_ring->txds, tx_ring->dma);
1521 
1522 	tx_ring->cnt = 0;
1523 	tx_ring->txbufs = NULL;
1524 	tx_ring->txds = NULL;
1525 	tx_ring->dma = 0;
1526 	tx_ring->size = 0;
1527 }
1528 
1529 /**
1530  * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
1531  * @tx_ring:   TX Ring structure to allocate
1532  * @cnt:       Ring buffer count
1533  *
1534  * Return: 0 on success, negative errno otherwise.
1535  */
1536 static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
1537 {
1538 	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1539 	struct nfp_net *nn = r_vec->nfp_net;
1540 	struct pci_dev *pdev = nn->pdev;
1541 	int sz;
1542 
1543 	tx_ring->cnt = cnt;
1544 
1545 	tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
1546 	tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
1547 					    &tx_ring->dma, GFP_KERNEL);
1548 	if (!tx_ring->txds)
1549 		goto err_alloc;
1550 
1551 	sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
1552 	tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
1553 	if (!tx_ring->txbufs)
1554 		goto err_alloc;
1555 
1556 	netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx);
1557 
1558 	nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
1559 	       tx_ring->idx, tx_ring->qcidx,
1560 	       tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds);
1561 
1562 	return 0;
1563 
1564 err_alloc:
1565 	nfp_net_tx_ring_free(tx_ring);
1566 	return -ENOMEM;
1567 }
1568 
1569 static struct nfp_net_tx_ring *
1570 nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt)
1571 {
1572 	struct nfp_net_tx_ring *rings;
1573 	unsigned int r;
1574 
1575 	rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL);
1576 	if (!rings)
1577 		return NULL;
1578 
1579 	for (r = 0; r < nn->num_tx_rings; r++) {
1580 		nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r);
1581 
1582 		if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt))
1583 			goto err_free_prev;
1584 	}
1585 
1586 	return rings;
1587 
1588 err_free_prev:
1589 	while (r--)
1590 		nfp_net_tx_ring_free(&rings[r]);
1591 	kfree(rings);
1592 	return NULL;
1593 }
1594 
1595 static struct nfp_net_tx_ring *
1596 nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
1597 {
1598 	struct nfp_net_tx_ring *old = nn->tx_rings;
1599 	unsigned int r;
1600 
1601 	for (r = 0; r < nn->num_tx_rings; r++)
1602 		old[r].r_vec->tx_ring = &rings[r];
1603 
1604 	nn->tx_rings = rings;
1605 	return old;
1606 }
1607 
1608 static void
1609 nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
1610 {
1611 	unsigned int r;
1612 
1613 	if (!rings)
1614 		return;
1615 
1616 	for (r = 0; r < nn->num_tx_rings; r++)
1617 		nfp_net_tx_ring_free(&rings[r]);
1618 
1619 	kfree(rings);
1620 }
1621 
1622 /**
1623  * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
1624  * @rx_ring:  RX ring to free
1625  */
1626 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
1627 {
1628 	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1629 	struct nfp_net *nn = r_vec->nfp_net;
1630 	struct pci_dev *pdev = nn->pdev;
1631 
1632 	kfree(rx_ring->rxbufs);
1633 
1634 	if (rx_ring->rxds)
1635 		dma_free_coherent(&pdev->dev, rx_ring->size,
1636 				  rx_ring->rxds, rx_ring->dma);
1637 
1638 	rx_ring->cnt = 0;
1639 	rx_ring->rxbufs = NULL;
1640 	rx_ring->rxds = NULL;
1641 	rx_ring->dma = 0;
1642 	rx_ring->size = 0;
1643 }
1644 
1645 /**
1646  * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
1647  * @rx_ring:  RX ring to allocate
1648  * @fl_bufsz: Size of buffers to allocate
1649  * @cnt:      Ring buffer count
1650  *
1651  * Return: 0 on success, negative errno otherwise.
1652  */
1653 static int
1654 nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
1655 		      u32 cnt)
1656 {
1657 	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1658 	struct nfp_net *nn = r_vec->nfp_net;
1659 	struct pci_dev *pdev = nn->pdev;
1660 	int sz;
1661 
1662 	rx_ring->cnt = cnt;
1663 	rx_ring->bufsz = fl_bufsz;
1664 
1665 	rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
1666 	rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
1667 					    &rx_ring->dma, GFP_KERNEL);
1668 	if (!rx_ring->rxds)
1669 		goto err_alloc;
1670 
1671 	sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
1672 	rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
1673 	if (!rx_ring->rxbufs)
1674 		goto err_alloc;
1675 
1676 	nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
1677 	       rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
1678 	       rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
1679 
1680 	return 0;
1681 
1682 err_alloc:
1683 	nfp_net_rx_ring_free(rx_ring);
1684 	return -ENOMEM;
1685 }
1686 
1687 static struct nfp_net_rx_ring *
1688 nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz,
1689 				u32 buf_cnt)
1690 {
1691 	struct nfp_net_rx_ring *rings;
1692 	unsigned int r;
1693 
1694 	rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
1695 	if (!rings)
1696 		return NULL;
1697 
1698 	for (r = 0; r < nn->num_rx_rings; r++) {
1699 		nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
1700 
1701 		if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt))
1702 			goto err_free_prev;
1703 
1704 		if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
1705 			goto err_free_ring;
1706 	}
1707 
1708 	return rings;
1709 
1710 err_free_prev:
1711 	while (r--) {
1712 		nfp_net_rx_ring_bufs_free(nn, &rings[r]);
1713 err_free_ring:
1714 		nfp_net_rx_ring_free(&rings[r]);
1715 	}
1716 	kfree(rings);
1717 	return NULL;
1718 }
1719 
1720 static struct nfp_net_rx_ring *
1721 nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
1722 {
1723 	struct nfp_net_rx_ring *old = nn->rx_rings;
1724 	unsigned int r;
1725 
1726 	for (r = 0; r < nn->num_rx_rings; r++)
1727 		old[r].r_vec->rx_ring = &rings[r];
1728 
1729 	nn->rx_rings = rings;
1730 	return old;
1731 }
1732 
1733 static void
1734 nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
1735 {
1736 	unsigned int r;
1737 
1738 	if (!rings)
1739 		return;
1740 
1741 	for (r = 0; r < nn->num_r_vecs; r++) {
1742 		nfp_net_rx_ring_bufs_free(nn, &rings[r]);
1743 		nfp_net_rx_ring_free(&rings[r]);
1744 	}
1745 
1746 	kfree(rings);
1747 }
1748 
1749 static int
1750 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1751 		       int idx)
1752 {
1753 	struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
1754 	int err;
1755 
1756 	r_vec->tx_ring = &nn->tx_rings[idx];
1757 	nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
1758 
1759 	r_vec->rx_ring = &nn->rx_rings[idx];
1760 	nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
1761 
1762 	snprintf(r_vec->name, sizeof(r_vec->name),
1763 		 "%s-rxtx-%d", nn->netdev->name, idx);
1764 	err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
1765 	if (err) {
1766 		nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
1767 		return err;
1768 	}
1769 	disable_irq(entry->vector);
1770 
1771 	/* Setup NAPI */
1772 	netif_napi_add(nn->netdev, &r_vec->napi,
1773 		       nfp_net_poll, NAPI_POLL_WEIGHT);
1774 
1775 	irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
1776 
1777 	nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
1778 
1779 	return 0;
1780 }
1781 
1782 static void
1783 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
1784 {
1785 	struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
1786 
1787 	irq_set_affinity_hint(entry->vector, NULL);
1788 	netif_napi_del(&r_vec->napi);
1789 	free_irq(entry->vector, r_vec);
1790 }
1791 
1792 /**
1793  * nfp_net_rss_write_itbl() - Write RSS indirection table to device
1794  * @nn:      NFP Net device to reconfigure
1795  */
1796 void nfp_net_rss_write_itbl(struct nfp_net *nn)
1797 {
1798 	int i;
1799 
1800 	for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
1801 		nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
1802 			  get_unaligned_le32(nn->rss_itbl + i));
1803 }
1804 
1805 /**
1806  * nfp_net_rss_write_key() - Write RSS hash key to device
1807  * @nn:      NFP Net device to reconfigure
1808  */
1809 void nfp_net_rss_write_key(struct nfp_net *nn)
1810 {
1811 	int i;
1812 
1813 	for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4)
1814 		nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
1815 			  get_unaligned_le32(nn->rss_key + i));
1816 }
1817 
1818 /**
1819  * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
1820  * @nn:      NFP Net device to reconfigure
1821  */
1822 void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
1823 {
1824 	u8 i;
1825 	u32 factor;
1826 	u32 value;
1827 
1828 	/* Compute factor used to convert coalesce '_usecs' parameters to
1829 	 * ME timestamp ticks.  There are 16 ME clock cycles for each timestamp
1830 	 * count.
1831 	 */
1832 	factor = nn->me_freq_mhz / 16;
1833 
1834 	/* copy RX interrupt coalesce parameters */
1835 	value = (nn->rx_coalesce_max_frames << 16) |
1836 		(factor * nn->rx_coalesce_usecs);
1837 	for (i = 0; i < nn->num_r_vecs; i++)
1838 		nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
1839 
1840 	/* copy TX interrupt coalesce parameters */
1841 	value = (nn->tx_coalesce_max_frames << 16) |
1842 		(factor * nn->tx_coalesce_usecs);
1843 	for (i = 0; i < nn->num_r_vecs; i++)
1844 		nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
1845 }
1846 
1847 /**
1848  * nfp_net_write_mac_addr() - Write mac address to device registers
1849  * @nn:      NFP Net device to reconfigure
1850  * @mac:     Six-byte MAC address to be written
1851  *
1852  * We do a bit of byte swapping dance because firmware is LE.
1853  */
1854 static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
1855 {
1856 	nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
1857 		  get_unaligned_be32(nn->netdev->dev_addr));
1858 	/* We can't do writew for NFP-3200 compatibility */
1859 	nn_writel(nn, NFP_NET_CFG_MACADDR + 4,
1860 		  get_unaligned_be16(nn->netdev->dev_addr + 4) << 16);
1861 }
1862 
1863 static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
1864 {
1865 	nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
1866 	nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
1867 	nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
1868 
1869 	nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
1870 	nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
1871 	nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
1872 }
1873 
1874 /**
1875  * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
1876  * @nn:      NFP Net device to reconfigure
1877  */
1878 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
1879 {
1880 	u32 new_ctrl, update;
1881 	unsigned int r;
1882 	int err;
1883 
1884 	new_ctrl = nn->ctrl;
1885 	new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
1886 	update = NFP_NET_CFG_UPDATE_GEN;
1887 	update |= NFP_NET_CFG_UPDATE_MSIX;
1888 	update |= NFP_NET_CFG_UPDATE_RING;
1889 
1890 	if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
1891 		new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
1892 
1893 	nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
1894 	nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
1895 
1896 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1897 	err = nfp_net_reconfig(nn, update);
1898 	if (err)
1899 		nn_err(nn, "Could not disable device: %d\n", err);
1900 
1901 	for (r = 0; r < nn->num_r_vecs; r++) {
1902 		nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
1903 		nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
1904 		nfp_net_vec_clear_ring_data(nn, r);
1905 	}
1906 
1907 	nn->ctrl = new_ctrl;
1908 }
1909 
1910 static void
1911 nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1912 			    unsigned int idx)
1913 {
1914 	/* Write the DMA address, size and MSI-X info to the device */
1915 	nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma);
1916 	nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt));
1917 	nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx);
1918 
1919 	nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma);
1920 	nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt));
1921 	nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx);
1922 }
1923 
1924 static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
1925 {
1926 	u32 new_ctrl, update = 0;
1927 	unsigned int r;
1928 	int err;
1929 
1930 	new_ctrl = nn->ctrl;
1931 
1932 	if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
1933 		nfp_net_rss_write_key(nn);
1934 		nfp_net_rss_write_itbl(nn);
1935 		nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
1936 		update |= NFP_NET_CFG_UPDATE_RSS;
1937 	}
1938 
1939 	if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
1940 		nfp_net_coalesce_write_cfg(nn);
1941 
1942 		new_ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
1943 		update |= NFP_NET_CFG_UPDATE_IRQMOD;
1944 	}
1945 
1946 	for (r = 0; r < nn->num_r_vecs; r++)
1947 		nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r);
1948 
1949 	nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
1950 		  0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
1951 
1952 	nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
1953 		  0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
1954 
1955 	nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
1956 
1957 	nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
1958 	nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
1959 
1960 	/* Enable device */
1961 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
1962 	update |= NFP_NET_CFG_UPDATE_GEN;
1963 	update |= NFP_NET_CFG_UPDATE_MSIX;
1964 	update |= NFP_NET_CFG_UPDATE_RING;
1965 	if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
1966 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
1967 
1968 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1969 	err = nfp_net_reconfig(nn, update);
1970 
1971 	nn->ctrl = new_ctrl;
1972 
1973 	for (r = 0; r < nn->num_r_vecs; r++)
1974 		nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
1975 
1976 	/* Since reconfiguration requests while NFP is down are ignored we
1977 	 * have to wipe the entire VXLAN configuration and reinitialize it.
1978 	 */
1979 	if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
1980 		memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
1981 		memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
1982 		vxlan_get_rx_port(nn->netdev);
1983 	}
1984 
1985 	return err;
1986 }
1987 
1988 /**
1989  * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
1990  * @nn:      NFP Net device to reconfigure
1991  */
1992 static int nfp_net_set_config_and_enable(struct nfp_net *nn)
1993 {
1994 	int err;
1995 
1996 	err = __nfp_net_set_config_and_enable(nn);
1997 	if (err)
1998 		nfp_net_clear_config_and_disable(nn);
1999 
2000 	return err;
2001 }
2002 
2003 /**
2004  * nfp_net_open_stack() - Start the device from stack's perspective
2005  * @nn:      NFP Net device to reconfigure
2006  */
2007 static void nfp_net_open_stack(struct nfp_net *nn)
2008 {
2009 	unsigned int r;
2010 
2011 	for (r = 0; r < nn->num_r_vecs; r++) {
2012 		napi_enable(&nn->r_vecs[r].napi);
2013 		enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
2014 	}
2015 
2016 	netif_tx_wake_all_queues(nn->netdev);
2017 
2018 	enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2019 	nfp_net_read_link_status(nn);
2020 }
2021 
2022 static int nfp_net_netdev_open(struct net_device *netdev)
2023 {
2024 	struct nfp_net *nn = netdev_priv(netdev);
2025 	int err, r;
2026 
2027 	if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
2028 		nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
2029 		return -EBUSY;
2030 	}
2031 
2032 	/* Step 1: Allocate resources for rings and the like
2033 	 * - Request interrupts
2034 	 * - Allocate RX and TX ring resources
2035 	 * - Setup initial RSS table
2036 	 */
2037 	err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
2038 				      nn->exn_name, sizeof(nn->exn_name),
2039 				      NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
2040 	if (err)
2041 		return err;
2042 	err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
2043 				      nn->lsc_name, sizeof(nn->lsc_name),
2044 				      NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
2045 	if (err)
2046 		goto err_free_exn;
2047 	disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2048 
2049 	nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
2050 			       GFP_KERNEL);
2051 	if (!nn->rx_rings)
2052 		goto err_free_lsc;
2053 	nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
2054 			       GFP_KERNEL);
2055 	if (!nn->tx_rings)
2056 		goto err_free_rx_rings;
2057 
2058 	for (r = 0; r < nn->num_r_vecs; r++) {
2059 		err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2060 		if (err)
2061 			goto err_free_prev_vecs;
2062 
2063 		err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
2064 		if (err)
2065 			goto err_cleanup_vec_p;
2066 
2067 		err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
2068 					    nn->fl_bufsz, nn->rxd_cnt);
2069 		if (err)
2070 			goto err_free_tx_ring_p;
2071 
2072 		err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
2073 		if (err)
2074 			goto err_flush_rx_ring_p;
2075 	}
2076 
2077 	err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
2078 	if (err)
2079 		goto err_free_rings;
2080 
2081 	err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
2082 	if (err)
2083 		goto err_free_rings;
2084 
2085 	/* Step 2: Configure the NFP
2086 	 * - Enable rings from 0 to tx_rings/rx_rings - 1.
2087 	 * - Write MAC address (in case it changed)
2088 	 * - Set the MTU
2089 	 * - Set the Freelist buffer size
2090 	 * - Enable the FW
2091 	 */
2092 	err = nfp_net_set_config_and_enable(nn);
2093 	if (err)
2094 		goto err_free_rings;
2095 
2096 	/* Step 3: Enable for kernel
2097 	 * - put some freelist descriptors on each RX ring
2098 	 * - enable NAPI on each ring
2099 	 * - enable all TX queues
2100 	 * - set link state
2101 	 */
2102 	nfp_net_open_stack(nn);
2103 
2104 	return 0;
2105 
2106 err_free_rings:
2107 	r = nn->num_r_vecs;
2108 err_free_prev_vecs:
2109 	while (r--) {
2110 		nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
2111 err_flush_rx_ring_p:
2112 		nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
2113 err_free_tx_ring_p:
2114 		nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
2115 err_cleanup_vec_p:
2116 		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2117 	}
2118 	kfree(nn->tx_rings);
2119 err_free_rx_rings:
2120 	kfree(nn->rx_rings);
2121 err_free_lsc:
2122 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2123 err_free_exn:
2124 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2125 	return err;
2126 }
2127 
2128 /**
2129  * nfp_net_close_stack() - Quiescent the stack (part of close)
2130  * @nn:	     NFP Net device to reconfigure
2131  */
2132 static void nfp_net_close_stack(struct nfp_net *nn)
2133 {
2134 	unsigned int r;
2135 
2136 	disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2137 	netif_carrier_off(nn->netdev);
2138 	nn->link_up = false;
2139 
2140 	for (r = 0; r < nn->num_r_vecs; r++) {
2141 		disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
2142 		napi_disable(&nn->r_vecs[r].napi);
2143 	}
2144 
2145 	netif_tx_disable(nn->netdev);
2146 }
2147 
2148 /**
2149  * nfp_net_close_free_all() - Free all runtime resources
2150  * @nn:      NFP Net device to reconfigure
2151  */
2152 static void nfp_net_close_free_all(struct nfp_net *nn)
2153 {
2154 	unsigned int r;
2155 
2156 	for (r = 0; r < nn->num_r_vecs; r++) {
2157 		nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
2158 		nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
2159 		nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
2160 		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2161 	}
2162 
2163 	kfree(nn->rx_rings);
2164 	kfree(nn->tx_rings);
2165 
2166 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2167 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2168 }
2169 
2170 /**
2171  * nfp_net_netdev_close() - Called when the device is downed
2172  * @netdev:      netdev structure
2173  */
2174 static int nfp_net_netdev_close(struct net_device *netdev)
2175 {
2176 	struct nfp_net *nn = netdev_priv(netdev);
2177 
2178 	if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
2179 		nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
2180 		return 0;
2181 	}
2182 
2183 	/* Step 1: Disable RX and TX rings from the Linux kernel perspective
2184 	 */
2185 	nfp_net_close_stack(nn);
2186 
2187 	/* Step 2: Tell NFP
2188 	 */
2189 	nfp_net_clear_config_and_disable(nn);
2190 
2191 	/* Step 3: Free resources
2192 	 */
2193 	nfp_net_close_free_all(nn);
2194 
2195 	nn_dbg(nn, "%s down", netdev->name);
2196 	return 0;
2197 }
2198 
2199 static void nfp_net_set_rx_mode(struct net_device *netdev)
2200 {
2201 	struct nfp_net *nn = netdev_priv(netdev);
2202 	u32 new_ctrl;
2203 
2204 	new_ctrl = nn->ctrl;
2205 
2206 	if (netdev->flags & IFF_PROMISC) {
2207 		if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
2208 			new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
2209 		else
2210 			nn_warn(nn, "FW does not support promiscuous mode\n");
2211 	} else {
2212 		new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
2213 	}
2214 
2215 	if (new_ctrl == nn->ctrl)
2216 		return;
2217 
2218 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2219 	nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
2220 
2221 	nn->ctrl = new_ctrl;
2222 }
2223 
2224 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
2225 {
2226 	unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz;
2227 	struct nfp_net *nn = netdev_priv(netdev);
2228 	struct nfp_net_rx_ring *tmp_rings;
2229 	int err;
2230 
2231 	if (new_mtu < 68 || new_mtu > nn->max_mtu) {
2232 		nn_err(nn, "New MTU (%d) is not valid\n", new_mtu);
2233 		return -EINVAL;
2234 	}
2235 
2236 	old_mtu = netdev->mtu;
2237 	old_fl_bufsz = nn->fl_bufsz;
2238 	new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu;
2239 
2240 	if (!netif_running(netdev)) {
2241 		netdev->mtu = new_mtu;
2242 		nn->fl_bufsz = new_fl_bufsz;
2243 		return 0;
2244 	}
2245 
2246 	/* Prepare new rings */
2247 	tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz,
2248 						    nn->rxd_cnt);
2249 	if (!tmp_rings)
2250 		return -ENOMEM;
2251 
2252 	/* Stop device, swap in new rings, try to start the firmware */
2253 	nfp_net_close_stack(nn);
2254 	nfp_net_clear_config_and_disable(nn);
2255 
2256 	tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
2257 
2258 	netdev->mtu = new_mtu;
2259 	nn->fl_bufsz = new_fl_bufsz;
2260 
2261 	err = nfp_net_set_config_and_enable(nn);
2262 	if (err) {
2263 		const int err_new = err;
2264 
2265 		/* Try with old configuration and old rings */
2266 		tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
2267 
2268 		netdev->mtu = old_mtu;
2269 		nn->fl_bufsz = old_fl_bufsz;
2270 
2271 		err = __nfp_net_set_config_and_enable(nn);
2272 		if (err)
2273 			nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n",
2274 			       err_new, err);
2275 	}
2276 
2277 	nfp_net_shadow_rx_rings_free(nn, tmp_rings);
2278 
2279 	nfp_net_open_stack(nn);
2280 
2281 	return err;
2282 }
2283 
2284 int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
2285 {
2286 	struct nfp_net_tx_ring *tx_rings = NULL;
2287 	struct nfp_net_rx_ring *rx_rings = NULL;
2288 	u32 old_rxd_cnt, old_txd_cnt;
2289 	int err;
2290 
2291 	if (!netif_running(nn->netdev)) {
2292 		nn->rxd_cnt = rxd_cnt;
2293 		nn->txd_cnt = txd_cnt;
2294 		return 0;
2295 	}
2296 
2297 	old_rxd_cnt = nn->rxd_cnt;
2298 	old_txd_cnt = nn->txd_cnt;
2299 
2300 	/* Prepare new rings */
2301 	if (nn->rxd_cnt != rxd_cnt) {
2302 		rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz,
2303 							   rxd_cnt);
2304 		if (!rx_rings)
2305 			return -ENOMEM;
2306 	}
2307 	if (nn->txd_cnt != txd_cnt) {
2308 		tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt);
2309 		if (!tx_rings) {
2310 			nfp_net_shadow_rx_rings_free(nn, rx_rings);
2311 			return -ENOMEM;
2312 		}
2313 	}
2314 
2315 	/* Stop device, swap in new rings, try to start the firmware */
2316 	nfp_net_close_stack(nn);
2317 	nfp_net_clear_config_and_disable(nn);
2318 
2319 	if (rx_rings)
2320 		rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
2321 	if (tx_rings)
2322 		tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
2323 
2324 	nn->rxd_cnt = rxd_cnt;
2325 	nn->txd_cnt = txd_cnt;
2326 
2327 	err = nfp_net_set_config_and_enable(nn);
2328 	if (err) {
2329 		const int err_new = err;
2330 
2331 		/* Try with old configuration and old rings */
2332 		if (rx_rings)
2333 			rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
2334 		if (tx_rings)
2335 			tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
2336 
2337 		nn->rxd_cnt = old_rxd_cnt;
2338 		nn->txd_cnt = old_txd_cnt;
2339 
2340 		err = __nfp_net_set_config_and_enable(nn);
2341 		if (err)
2342 			nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
2343 			       err_new, err);
2344 	}
2345 
2346 	nfp_net_shadow_rx_rings_free(nn, rx_rings);
2347 	nfp_net_shadow_tx_rings_free(nn, tx_rings);
2348 
2349 	nfp_net_open_stack(nn);
2350 
2351 	return err;
2352 }
2353 
2354 static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
2355 						struct rtnl_link_stats64 *stats)
2356 {
2357 	struct nfp_net *nn = netdev_priv(netdev);
2358 	int r;
2359 
2360 	for (r = 0; r < nn->num_r_vecs; r++) {
2361 		struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
2362 		u64 data[3];
2363 		unsigned int start;
2364 
2365 		do {
2366 			start = u64_stats_fetch_begin(&r_vec->rx_sync);
2367 			data[0] = r_vec->rx_pkts;
2368 			data[1] = r_vec->rx_bytes;
2369 			data[2] = r_vec->rx_drops;
2370 		} while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
2371 		stats->rx_packets += data[0];
2372 		stats->rx_bytes += data[1];
2373 		stats->rx_dropped += data[2];
2374 
2375 		do {
2376 			start = u64_stats_fetch_begin(&r_vec->tx_sync);
2377 			data[0] = r_vec->tx_pkts;
2378 			data[1] = r_vec->tx_bytes;
2379 			data[2] = r_vec->tx_errors;
2380 		} while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
2381 		stats->tx_packets += data[0];
2382 		stats->tx_bytes += data[1];
2383 		stats->tx_errors += data[2];
2384 	}
2385 
2386 	return stats;
2387 }
2388 
2389 static int nfp_net_set_features(struct net_device *netdev,
2390 				netdev_features_t features)
2391 {
2392 	netdev_features_t changed = netdev->features ^ features;
2393 	struct nfp_net *nn = netdev_priv(netdev);
2394 	u32 new_ctrl;
2395 	int err;
2396 
2397 	/* Assume this is not called with features we have not advertised */
2398 
2399 	new_ctrl = nn->ctrl;
2400 
2401 	if (changed & NETIF_F_RXCSUM) {
2402 		if (features & NETIF_F_RXCSUM)
2403 			new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
2404 		else
2405 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM;
2406 	}
2407 
2408 	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2409 		if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
2410 			new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2411 		else
2412 			new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
2413 	}
2414 
2415 	if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
2416 		if (features & (NETIF_F_TSO | NETIF_F_TSO6))
2417 			new_ctrl |= NFP_NET_CFG_CTRL_LSO;
2418 		else
2419 			new_ctrl &= ~NFP_NET_CFG_CTRL_LSO;
2420 	}
2421 
2422 	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2423 		if (features & NETIF_F_HW_VLAN_CTAG_RX)
2424 			new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
2425 		else
2426 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
2427 	}
2428 
2429 	if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
2430 		if (features & NETIF_F_HW_VLAN_CTAG_TX)
2431 			new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
2432 		else
2433 			new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
2434 	}
2435 
2436 	if (changed & NETIF_F_SG) {
2437 		if (features & NETIF_F_SG)
2438 			new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
2439 		else
2440 			new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
2441 	}
2442 
2443 	nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
2444 	       netdev->features, features, changed);
2445 
2446 	if (new_ctrl == nn->ctrl)
2447 		return 0;
2448 
2449 	nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl);
2450 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2451 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
2452 	if (err)
2453 		return err;
2454 
2455 	nn->ctrl = new_ctrl;
2456 
2457 	return 0;
2458 }
2459 
2460 static netdev_features_t
2461 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
2462 		       netdev_features_t features)
2463 {
2464 	u8 l4_hdr;
2465 
2466 	/* We can't do TSO over double tagged packets (802.1AD) */
2467 	features &= vlan_features_check(skb, features);
2468 
2469 	if (!skb->encapsulation)
2470 		return features;
2471 
2472 	/* Ensure that inner L4 header offset fits into TX descriptor field */
2473 	if (skb_is_gso(skb)) {
2474 		u32 hdrlen;
2475 
2476 		hdrlen = skb_inner_transport_header(skb) - skb->data +
2477 			inner_tcp_hdrlen(skb);
2478 
2479 		if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
2480 			features &= ~NETIF_F_GSO_MASK;
2481 	}
2482 
2483 	/* VXLAN/GRE check */
2484 	switch (vlan_get_protocol(skb)) {
2485 	case htons(ETH_P_IP):
2486 		l4_hdr = ip_hdr(skb)->protocol;
2487 		break;
2488 	case htons(ETH_P_IPV6):
2489 		l4_hdr = ipv6_hdr(skb)->nexthdr;
2490 		break;
2491 	default:
2492 		return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2493 	}
2494 
2495 	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
2496 	    skb->inner_protocol != htons(ETH_P_TEB) ||
2497 	    (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
2498 	    (l4_hdr == IPPROTO_UDP &&
2499 	     (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
2500 	      sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
2501 		return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2502 
2503 	return features;
2504 }
2505 
2506 /**
2507  * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
2508  * @nn:   NFP Net device to reconfigure
2509  * @idx:  Index into the port table where new port should be written
2510  * @port: UDP port to configure (pass zero to remove VXLAN port)
2511  */
2512 static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
2513 {
2514 	int i;
2515 
2516 	nn->vxlan_ports[idx] = port;
2517 
2518 	if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN))
2519 		return;
2520 
2521 	BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
2522 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
2523 		nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
2524 			  be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
2525 			  be16_to_cpu(nn->vxlan_ports[i]));
2526 
2527 	nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
2528 }
2529 
2530 /**
2531  * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
2532  * @nn:   NFP Network structure
2533  * @port: UDP port to look for
2534  *
2535  * Return: if the port is already in the table -- it's position;
2536  *	   if the port is not in the table -- free position to use;
2537  *	   if the table is full -- -ENOSPC.
2538  */
2539 static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
2540 {
2541 	int i, free_idx = -ENOSPC;
2542 
2543 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
2544 		if (nn->vxlan_ports[i] == port)
2545 			return i;
2546 		if (!nn->vxlan_usecnt[i])
2547 			free_idx = i;
2548 	}
2549 
2550 	return free_idx;
2551 }
2552 
2553 static void nfp_net_add_vxlan_port(struct net_device *netdev,
2554 				   sa_family_t sa_family, __be16 port)
2555 {
2556 	struct nfp_net *nn = netdev_priv(netdev);
2557 	int idx;
2558 
2559 	idx = nfp_net_find_vxlan_idx(nn, port);
2560 	if (idx == -ENOSPC)
2561 		return;
2562 
2563 	if (!nn->vxlan_usecnt[idx]++)
2564 		nfp_net_set_vxlan_port(nn, idx, port);
2565 }
2566 
2567 static void nfp_net_del_vxlan_port(struct net_device *netdev,
2568 				   sa_family_t sa_family, __be16 port)
2569 {
2570 	struct nfp_net *nn = netdev_priv(netdev);
2571 	int idx;
2572 
2573 	idx = nfp_net_find_vxlan_idx(nn, port);
2574 	if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC)
2575 		return;
2576 
2577 	if (!--nn->vxlan_usecnt[idx])
2578 		nfp_net_set_vxlan_port(nn, idx, 0);
2579 }
2580 
2581 static const struct net_device_ops nfp_net_netdev_ops = {
2582 	.ndo_open		= nfp_net_netdev_open,
2583 	.ndo_stop		= nfp_net_netdev_close,
2584 	.ndo_start_xmit		= nfp_net_tx,
2585 	.ndo_get_stats64	= nfp_net_stat64,
2586 	.ndo_tx_timeout		= nfp_net_tx_timeout,
2587 	.ndo_set_rx_mode	= nfp_net_set_rx_mode,
2588 	.ndo_change_mtu		= nfp_net_change_mtu,
2589 	.ndo_set_mac_address	= eth_mac_addr,
2590 	.ndo_set_features	= nfp_net_set_features,
2591 	.ndo_features_check	= nfp_net_features_check,
2592 	.ndo_add_vxlan_port     = nfp_net_add_vxlan_port,
2593 	.ndo_del_vxlan_port     = nfp_net_del_vxlan_port,
2594 };
2595 
2596 /**
2597  * nfp_net_info() - Print general info about the NIC
2598  * @nn:      NFP Net device to reconfigure
2599  */
2600 void nfp_net_info(struct nfp_net *nn)
2601 {
2602 	nn_info(nn, "Netronome %s %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
2603 		nn->is_nfp3200 ? "NFP-32xx" : "NFP-6xxx",
2604 		nn->is_vf ? "VF " : "",
2605 		nn->num_tx_rings, nn->max_tx_rings,
2606 		nn->num_rx_rings, nn->max_rx_rings);
2607 	nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
2608 		nn->fw_ver.resv, nn->fw_ver.class,
2609 		nn->fw_ver.major, nn->fw_ver.minor,
2610 		nn->max_mtu);
2611 	nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
2612 		nn->cap,
2613 		nn->cap & NFP_NET_CFG_CTRL_PROMISC  ? "PROMISC "  : "",
2614 		nn->cap & NFP_NET_CFG_CTRL_L2BC     ? "L2BCFILT " : "",
2615 		nn->cap & NFP_NET_CFG_CTRL_L2MC     ? "L2MCFILT " : "",
2616 		nn->cap & NFP_NET_CFG_CTRL_RXCSUM   ? "RXCSUM "   : "",
2617 		nn->cap & NFP_NET_CFG_CTRL_TXCSUM   ? "TXCSUM "   : "",
2618 		nn->cap & NFP_NET_CFG_CTRL_RXVLAN   ? "RXVLAN "   : "",
2619 		nn->cap & NFP_NET_CFG_CTRL_TXVLAN   ? "TXVLAN "   : "",
2620 		nn->cap & NFP_NET_CFG_CTRL_SCATTER  ? "SCATTER "  : "",
2621 		nn->cap & NFP_NET_CFG_CTRL_GATHER   ? "GATHER "   : "",
2622 		nn->cap & NFP_NET_CFG_CTRL_LSO      ? "TSO "      : "",
2623 		nn->cap & NFP_NET_CFG_CTRL_RSS      ? "RSS "      : "",
2624 		nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
2625 		nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
2626 		nn->cap & NFP_NET_CFG_CTRL_IRQMOD   ? "IRQMOD "   : "",
2627 		nn->cap & NFP_NET_CFG_CTRL_VXLAN    ? "VXLAN "    : "",
2628 		nn->cap & NFP_NET_CFG_CTRL_NVGRE    ? "NVGRE "	  : "");
2629 }
2630 
2631 /**
2632  * nfp_net_netdev_alloc() - Allocate netdev and related structure
2633  * @pdev:         PCI device
2634  * @max_tx_rings: Maximum number of TX rings supported by device
2635  * @max_rx_rings: Maximum number of RX rings supported by device
2636  *
2637  * This function allocates a netdev device and fills in the initial
2638  * part of the @struct nfp_net structure.
2639  *
2640  * Return: NFP Net device structure, or ERR_PTR on error.
2641  */
2642 struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
2643 				     int max_tx_rings, int max_rx_rings)
2644 {
2645 	struct net_device *netdev;
2646 	struct nfp_net *nn;
2647 	int nqs;
2648 
2649 	netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
2650 				    max_tx_rings, max_rx_rings);
2651 	if (!netdev)
2652 		return ERR_PTR(-ENOMEM);
2653 
2654 	SET_NETDEV_DEV(netdev, &pdev->dev);
2655 	nn = netdev_priv(netdev);
2656 
2657 	nn->netdev = netdev;
2658 	nn->pdev = pdev;
2659 
2660 	nn->max_tx_rings = max_tx_rings;
2661 	nn->max_rx_rings = max_rx_rings;
2662 
2663 	nqs = netif_get_num_default_rss_queues();
2664 	nn->num_tx_rings = min_t(int, nqs, max_tx_rings);
2665 	nn->num_rx_rings = min_t(int, nqs, max_rx_rings);
2666 
2667 	nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
2668 	nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
2669 
2670 	spin_lock_init(&nn->reconfig_lock);
2671 	spin_lock_init(&nn->link_status_lock);
2672 
2673 	setup_timer(&nn->reconfig_timer,
2674 		    nfp_net_reconfig_timer, (unsigned long)nn);
2675 
2676 	return nn;
2677 }
2678 
2679 /**
2680  * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did
2681  * @nn:      NFP Net device to reconfigure
2682  */
2683 void nfp_net_netdev_free(struct nfp_net *nn)
2684 {
2685 	free_netdev(nn->netdev);
2686 }
2687 
2688 /**
2689  * nfp_net_rss_init() - Set the initial RSS parameters
2690  * @nn:	     NFP Net device to reconfigure
2691  */
2692 static void nfp_net_rss_init(struct nfp_net *nn)
2693 {
2694 	int i;
2695 
2696 	netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
2697 
2698 	for (i = 0; i < sizeof(nn->rss_itbl); i++)
2699 		nn->rss_itbl[i] =
2700 			ethtool_rxfh_indir_default(i, nn->num_rx_rings);
2701 
2702 	/* Enable IPv4/IPv6 TCP by default */
2703 	nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
2704 		      NFP_NET_CFG_RSS_IPV6_TCP |
2705 		      NFP_NET_CFG_RSS_TOEPLITZ |
2706 		      NFP_NET_CFG_RSS_MASK;
2707 }
2708 
2709 /**
2710  * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
2711  * @nn:	     NFP Net device to reconfigure
2712  */
2713 static void nfp_net_irqmod_init(struct nfp_net *nn)
2714 {
2715 	nn->rx_coalesce_usecs      = 50;
2716 	nn->rx_coalesce_max_frames = 64;
2717 	nn->tx_coalesce_usecs      = 50;
2718 	nn->tx_coalesce_max_frames = 64;
2719 }
2720 
2721 /**
2722  * nfp_net_netdev_init() - Initialise/finalise the netdev structure
2723  * @netdev:      netdev structure
2724  *
2725  * Return: 0 on success or negative errno on error.
2726  */
2727 int nfp_net_netdev_init(struct net_device *netdev)
2728 {
2729 	struct nfp_net *nn = netdev_priv(netdev);
2730 	int err;
2731 
2732 	/* Get some of the read-only fields from the BAR */
2733 	nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
2734 	nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
2735 
2736 	nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
2737 
2738 	/* Set default MTU and Freelist buffer size */
2739 	if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
2740 		netdev->mtu = nn->max_mtu;
2741 	else
2742 		netdev->mtu = NFP_NET_DEFAULT_MTU;
2743 	nn->fl_bufsz = NFP_NET_DEFAULT_RX_BUFSZ;
2744 
2745 	/* Advertise/enable offloads based on capabilities
2746 	 *
2747 	 * Note: netdev->features show the currently enabled features
2748 	 * and netdev->hw_features advertises which features are
2749 	 * supported.  By default we enable most features.
2750 	 */
2751 	netdev->hw_features = NETIF_F_HIGHDMA;
2752 	if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
2753 		netdev->hw_features |= NETIF_F_RXCSUM;
2754 		nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
2755 	}
2756 	if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
2757 		netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2758 		nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2759 	}
2760 	if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
2761 		netdev->hw_features |= NETIF_F_SG;
2762 		nn->ctrl |= NFP_NET_CFG_CTRL_GATHER;
2763 	}
2764 	if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
2765 		netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2766 		nn->ctrl |= NFP_NET_CFG_CTRL_LSO;
2767 	}
2768 	if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
2769 		netdev->hw_features |= NETIF_F_RXHASH;
2770 		nfp_net_rss_init(nn);
2771 		nn->ctrl |= NFP_NET_CFG_CTRL_RSS;
2772 	}
2773 	if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
2774 	    nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
2775 		if (nn->cap & NFP_NET_CFG_CTRL_LSO)
2776 			netdev->hw_features |= NETIF_F_GSO_GRE |
2777 					       NETIF_F_GSO_UDP_TUNNEL;
2778 		nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
2779 
2780 		netdev->hw_enc_features = netdev->hw_features;
2781 	}
2782 
2783 	netdev->vlan_features = netdev->hw_features;
2784 
2785 	if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
2786 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2787 		nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
2788 	}
2789 	if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
2790 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
2791 		nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
2792 	}
2793 
2794 	netdev->features = netdev->hw_features;
2795 
2796 	/* Advertise but disable TSO by default. */
2797 	netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2798 
2799 	/* Allow L2 Broadcast and Multicast through by default, if supported */
2800 	if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
2801 		nn->ctrl |= NFP_NET_CFG_CTRL_L2BC;
2802 	if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
2803 		nn->ctrl |= NFP_NET_CFG_CTRL_L2MC;
2804 
2805 	/* Allow IRQ moderation, if supported */
2806 	if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
2807 		nfp_net_irqmod_init(nn);
2808 		nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
2809 	}
2810 
2811 	/* On NFP-3200 enable MSI-X auto-masking, if supported and the
2812 	 * interrupts are not shared.
2813 	 */
2814 	if (nn->is_nfp3200 && nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO)
2815 		nn->ctrl |= NFP_NET_CFG_CTRL_MSIXAUTO;
2816 
2817 	/* On NFP4000/NFP6000, determine RX packet/metadata boundary offset */
2818 	if (nn->fw_ver.major >= 2)
2819 		nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
2820 	else
2821 		nn->rx_offset = NFP_NET_RX_OFFSET;
2822 
2823 	/* Stash the re-configuration queue away.  First odd queue in TX Bar */
2824 	nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
2825 
2826 	/* Make sure the FW knows the netdev is supposed to be disabled here */
2827 	nn_writel(nn, NFP_NET_CFG_CTRL, 0);
2828 	nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2829 	nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2830 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
2831 				   NFP_NET_CFG_UPDATE_GEN);
2832 	if (err)
2833 		return err;
2834 
2835 	/* Finalise the netdev setup */
2836 	ether_setup(netdev);
2837 	netdev->netdev_ops = &nfp_net_netdev_ops;
2838 	netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
2839 	netif_carrier_off(netdev);
2840 
2841 	nfp_net_set_ethtool_ops(netdev);
2842 	nfp_net_irqs_assign(netdev);
2843 
2844 	return register_netdev(netdev);
2845 }
2846 
2847 /**
2848  * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did.
2849  * @netdev:      netdev structure
2850  */
2851 void nfp_net_netdev_clean(struct net_device *netdev)
2852 {
2853 	unregister_netdev(netdev);
2854 }
2855