1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
3 
4 /*
5  * nfp_net_common.c
6  * Netronome network device driver: Common functions between PF and VF
7  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8  *          Jason McMullan <jason.mcmullan@netronome.com>
9  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
10  *          Brad Petrus <brad.petrus@netronome.com>
11  *          Chris Telfer <chris.telfer@netronome.com>
12  */
13 
14 #include <linux/bitfield.h>
15 #include <linux/bpf.h>
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/fs.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <linux/ipv6.h>
25 #include <linux/mm.h>
26 #include <linux/overflow.h>
27 #include <linux/page_ref.h>
28 #include <linux/pci.h>
29 #include <linux/pci_regs.h>
30 #include <linux/ethtool.h>
31 #include <linux/log2.h>
32 #include <linux/if_vlan.h>
33 #include <linux/if_bridge.h>
34 #include <linux/random.h>
35 #include <linux/vmalloc.h>
36 #include <linux/ktime.h>
37 
38 #include <net/tls.h>
39 #include <net/vxlan.h>
40 #include <net/xdp_sock_drv.h>
41 #include <net/xfrm.h>
42 
43 #include "nfpcore/nfp_dev.h"
44 #include "nfpcore/nfp_nsp.h"
45 #include "ccm.h"
46 #include "nfp_app.h"
47 #include "nfp_net_ctrl.h"
48 #include "nfp_net.h"
49 #include "nfp_net_dp.h"
50 #include "nfp_net_sriov.h"
51 #include "nfp_net_xsk.h"
52 #include "nfp_port.h"
53 #include "crypto/crypto.h"
54 #include "crypto/fw.h"
55 
56 static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr);
57 
58 /**
59  * nfp_net_get_fw_version() - Read and parse the FW version
60  * @fw_ver:	Output fw_version structure to read to
61  * @ctrl_bar:	Mapped address of the control BAR
62  */
nfp_net_get_fw_version(struct nfp_net_fw_version * fw_ver,void __iomem * ctrl_bar)63 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
64 			    void __iomem *ctrl_bar)
65 {
66 	u32 reg;
67 
68 	reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
69 	put_unaligned_le32(reg, fw_ver);
70 }
71 
nfp_qcp_queue_offset(const struct nfp_dev_info * dev_info,u16 queue)72 u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue)
73 {
74 	queue &= dev_info->qc_idx_mask;
75 	return dev_info->qc_addr_offset + NFP_QCP_QUEUE_ADDR_SZ * queue;
76 }
77 
78 /* Firmware reconfig
79  *
80  * Firmware reconfig may take a while so we have two versions of it -
81  * synchronous and asynchronous (posted).  All synchronous callers are holding
82  * RTNL so we don't have to worry about serializing them.
83  */
nfp_net_reconfig_start(struct nfp_net * nn,u32 update)84 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
85 {
86 	nn_writel(nn, NFP_NET_CFG_UPDATE, update);
87 	/* ensure update is written before pinging HW */
88 	nn_pci_flush(nn);
89 	nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
90 	nn->reconfig_in_progress_update = update;
91 }
92 
93 /* Pass 0 as update to run posted reconfigs. */
nfp_net_reconfig_start_async(struct nfp_net * nn,u32 update)94 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
95 {
96 	update |= nn->reconfig_posted;
97 	nn->reconfig_posted = 0;
98 
99 	nfp_net_reconfig_start(nn, update);
100 
101 	nn->reconfig_timer_active = true;
102 	mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
103 }
104 
nfp_net_reconfig_check_done(struct nfp_net * nn,bool last_check)105 static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
106 {
107 	u32 reg;
108 
109 	reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
110 	if (reg == 0)
111 		return true;
112 	if (reg & NFP_NET_CFG_UPDATE_ERR) {
113 		nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
114 		       reg, nn->reconfig_in_progress_update,
115 		       nn_readl(nn, NFP_NET_CFG_CTRL));
116 		return true;
117 	} else if (last_check) {
118 		nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
119 		       reg, nn->reconfig_in_progress_update,
120 		       nn_readl(nn, NFP_NET_CFG_CTRL));
121 		return true;
122 	}
123 
124 	return false;
125 }
126 
__nfp_net_reconfig_wait(struct nfp_net * nn,unsigned long deadline)127 static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
128 {
129 	bool timed_out = false;
130 	int i;
131 
132 	/* Poll update field, waiting for NFP to ack the config.
133 	 * Do an opportunistic wait-busy loop, afterward sleep.
134 	 */
135 	for (i = 0; i < 50; i++) {
136 		if (nfp_net_reconfig_check_done(nn, false))
137 			return false;
138 		udelay(4);
139 	}
140 
141 	while (!nfp_net_reconfig_check_done(nn, timed_out)) {
142 		usleep_range(250, 500);
143 		timed_out = time_is_before_eq_jiffies(deadline);
144 	}
145 
146 	return timed_out;
147 }
148 
nfp_net_reconfig_wait(struct nfp_net * nn,unsigned long deadline)149 static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
150 {
151 	if (__nfp_net_reconfig_wait(nn, deadline))
152 		return -EIO;
153 
154 	if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
155 		return -EIO;
156 
157 	return 0;
158 }
159 
nfp_net_reconfig_timer(struct timer_list * t)160 static void nfp_net_reconfig_timer(struct timer_list *t)
161 {
162 	struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
163 
164 	spin_lock_bh(&nn->reconfig_lock);
165 
166 	nn->reconfig_timer_active = false;
167 
168 	/* If sync caller is present it will take over from us */
169 	if (nn->reconfig_sync_present)
170 		goto done;
171 
172 	/* Read reconfig status and report errors */
173 	nfp_net_reconfig_check_done(nn, true);
174 
175 	if (nn->reconfig_posted)
176 		nfp_net_reconfig_start_async(nn, 0);
177 done:
178 	spin_unlock_bh(&nn->reconfig_lock);
179 }
180 
181 /**
182  * nfp_net_reconfig_post() - Post async reconfig request
183  * @nn:      NFP Net device to reconfigure
184  * @update:  The value for the update field in the BAR config
185  *
186  * Record FW reconfiguration request.  Reconfiguration will be kicked off
187  * whenever reconfiguration machinery is idle.  Multiple requests can be
188  * merged together!
189  */
nfp_net_reconfig_post(struct nfp_net * nn,u32 update)190 static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
191 {
192 	spin_lock_bh(&nn->reconfig_lock);
193 
194 	/* Sync caller will kick off async reconf when it's done, just post */
195 	if (nn->reconfig_sync_present) {
196 		nn->reconfig_posted |= update;
197 		goto done;
198 	}
199 
200 	/* Opportunistically check if the previous command is done */
201 	if (!nn->reconfig_timer_active ||
202 	    nfp_net_reconfig_check_done(nn, false))
203 		nfp_net_reconfig_start_async(nn, update);
204 	else
205 		nn->reconfig_posted |= update;
206 done:
207 	spin_unlock_bh(&nn->reconfig_lock);
208 }
209 
nfp_net_reconfig_sync_enter(struct nfp_net * nn)210 static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
211 {
212 	bool cancelled_timer = false;
213 	u32 pre_posted_requests;
214 
215 	spin_lock_bh(&nn->reconfig_lock);
216 
217 	WARN_ON(nn->reconfig_sync_present);
218 	nn->reconfig_sync_present = true;
219 
220 	if (nn->reconfig_timer_active) {
221 		nn->reconfig_timer_active = false;
222 		cancelled_timer = true;
223 	}
224 	pre_posted_requests = nn->reconfig_posted;
225 	nn->reconfig_posted = 0;
226 
227 	spin_unlock_bh(&nn->reconfig_lock);
228 
229 	if (cancelled_timer) {
230 		del_timer_sync(&nn->reconfig_timer);
231 		nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
232 	}
233 
234 	/* Run the posted reconfigs which were issued before we started */
235 	if (pre_posted_requests) {
236 		nfp_net_reconfig_start(nn, pre_posted_requests);
237 		nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
238 	}
239 }
240 
nfp_net_reconfig_wait_posted(struct nfp_net * nn)241 static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
242 {
243 	nfp_net_reconfig_sync_enter(nn);
244 
245 	spin_lock_bh(&nn->reconfig_lock);
246 	nn->reconfig_sync_present = false;
247 	spin_unlock_bh(&nn->reconfig_lock);
248 }
249 
250 /**
251  * __nfp_net_reconfig() - Reconfigure the firmware
252  * @nn:      NFP Net device to reconfigure
253  * @update:  The value for the update field in the BAR config
254  *
255  * Write the update word to the BAR and ping the reconfig queue.  The
256  * poll until the firmware has acknowledged the update by zeroing the
257  * update word.
258  *
259  * Return: Negative errno on error, 0 on success
260  */
__nfp_net_reconfig(struct nfp_net * nn,u32 update)261 int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
262 {
263 	int ret;
264 
265 	nfp_net_reconfig_sync_enter(nn);
266 
267 	nfp_net_reconfig_start(nn, update);
268 	ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
269 
270 	spin_lock_bh(&nn->reconfig_lock);
271 
272 	if (nn->reconfig_posted)
273 		nfp_net_reconfig_start_async(nn, 0);
274 
275 	nn->reconfig_sync_present = false;
276 
277 	spin_unlock_bh(&nn->reconfig_lock);
278 
279 	return ret;
280 }
281 
nfp_net_reconfig(struct nfp_net * nn,u32 update)282 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
283 {
284 	int ret;
285 
286 	nn_ctrl_bar_lock(nn);
287 	ret = __nfp_net_reconfig(nn, update);
288 	nn_ctrl_bar_unlock(nn);
289 
290 	return ret;
291 }
292 
nfp_net_mbox_lock(struct nfp_net * nn,unsigned int data_size)293 int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
294 {
295 	if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
296 		nn_err(nn, "mailbox too small for %u of data (%u)\n",
297 		       data_size, nn->tlv_caps.mbox_len);
298 		return -EIO;
299 	}
300 
301 	nn_ctrl_bar_lock(nn);
302 	return 0;
303 }
304 
305 /**
306  * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox
307  * @nn:        NFP Net device to reconfigure
308  * @mbox_cmd:  The value for the mailbox command
309  *
310  * Helper function for mailbox updates
311  *
312  * Return: Negative errno on error, 0 on success
313  */
nfp_net_mbox_reconfig(struct nfp_net * nn,u32 mbox_cmd)314 int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
315 {
316 	u32 mbox = nn->tlv_caps.mbox_off;
317 	int ret;
318 
319 	nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
320 
321 	ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
322 	if (ret) {
323 		nn_err(nn, "Mailbox update error\n");
324 		return ret;
325 	}
326 
327 	return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
328 }
329 
nfp_net_mbox_reconfig_post(struct nfp_net * nn,u32 mbox_cmd)330 void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 mbox_cmd)
331 {
332 	u32 mbox = nn->tlv_caps.mbox_off;
333 
334 	nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
335 
336 	nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_MBOX);
337 }
338 
nfp_net_mbox_reconfig_wait_posted(struct nfp_net * nn)339 int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn)
340 {
341 	u32 mbox = nn->tlv_caps.mbox_off;
342 
343 	nfp_net_reconfig_wait_posted(nn);
344 
345 	return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
346 }
347 
nfp_net_mbox_reconfig_and_unlock(struct nfp_net * nn,u32 mbox_cmd)348 int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
349 {
350 	int ret;
351 
352 	ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
353 	nn_ctrl_bar_unlock(nn);
354 	return ret;
355 }
356 
357 /* Interrupt configuration and handling
358  */
359 
360 /**
361  * nfp_net_irqs_alloc() - allocates MSI-X irqs
362  * @pdev:        PCI device structure
363  * @irq_entries: Array to be initialized and used to hold the irq entries
364  * @min_irqs:    Minimal acceptable number of interrupts
365  * @wanted_irqs: Target number of interrupts to allocate
366  *
367  * Return: Number of irqs obtained or 0 on error.
368  */
369 unsigned int
nfp_net_irqs_alloc(struct pci_dev * pdev,struct msix_entry * irq_entries,unsigned int min_irqs,unsigned int wanted_irqs)370 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
371 		   unsigned int min_irqs, unsigned int wanted_irqs)
372 {
373 	unsigned int i;
374 	int got_irqs;
375 
376 	for (i = 0; i < wanted_irqs; i++)
377 		irq_entries[i].entry = i;
378 
379 	got_irqs = pci_enable_msix_range(pdev, irq_entries,
380 					 min_irqs, wanted_irqs);
381 	if (got_irqs < 0) {
382 		dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
383 			min_irqs, wanted_irqs, got_irqs);
384 		return 0;
385 	}
386 
387 	if (got_irqs < wanted_irqs)
388 		dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
389 			 wanted_irqs, got_irqs);
390 
391 	return got_irqs;
392 }
393 
394 /**
395  * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
396  * @nn:		 NFP Network structure
397  * @irq_entries: Table of allocated interrupts
398  * @n:		 Size of @irq_entries (number of entries to grab)
399  *
400  * After interrupts are allocated with nfp_net_irqs_alloc() this function
401  * should be called to assign them to a specific netdev (port).
402  */
403 void
nfp_net_irqs_assign(struct nfp_net * nn,struct msix_entry * irq_entries,unsigned int n)404 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
405 		    unsigned int n)
406 {
407 	struct nfp_net_dp *dp = &nn->dp;
408 
409 	nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
410 	dp->num_r_vecs = nn->max_r_vecs;
411 
412 	memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
413 
414 	if (dp->num_rx_rings > dp->num_r_vecs ||
415 	    dp->num_tx_rings > dp->num_r_vecs)
416 		dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
417 			 dp->num_rx_rings, dp->num_tx_rings,
418 			 dp->num_r_vecs);
419 
420 	dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
421 	dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
422 	dp->num_stack_tx_rings = dp->num_tx_rings;
423 }
424 
425 /**
426  * nfp_net_irqs_disable() - Disable interrupts
427  * @pdev:        PCI device structure
428  *
429  * Undoes what @nfp_net_irqs_alloc() does.
430  */
nfp_net_irqs_disable(struct pci_dev * pdev)431 void nfp_net_irqs_disable(struct pci_dev *pdev)
432 {
433 	pci_disable_msix(pdev);
434 }
435 
436 /**
437  * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
438  * @irq:      Interrupt
439  * @data:     Opaque data structure
440  *
441  * Return: Indicate if the interrupt has been handled.
442  */
nfp_net_irq_rxtx(int irq,void * data)443 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
444 {
445 	struct nfp_net_r_vector *r_vec = data;
446 
447 	/* Currently we cannot tell if it's a rx or tx interrupt,
448 	 * since dim does not need accurate event_ctr to calculate,
449 	 * we just use this counter for both rx and tx dim.
450 	 */
451 	r_vec->event_ctr++;
452 
453 	napi_schedule_irqoff(&r_vec->napi);
454 
455 	/* The FW auto-masks any interrupt, either via the MASK bit in
456 	 * the MSI-X table or via the per entry ICR field.  So there
457 	 * is no need to disable interrupts here.
458 	 */
459 	return IRQ_HANDLED;
460 }
461 
nfp_ctrl_irq_rxtx(int irq,void * data)462 static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
463 {
464 	struct nfp_net_r_vector *r_vec = data;
465 
466 	tasklet_schedule(&r_vec->tasklet);
467 
468 	return IRQ_HANDLED;
469 }
470 
471 /**
472  * nfp_net_read_link_status() - Reread link status from control BAR
473  * @nn:       NFP Network structure
474  */
nfp_net_read_link_status(struct nfp_net * nn)475 static void nfp_net_read_link_status(struct nfp_net *nn)
476 {
477 	unsigned long flags;
478 	bool link_up;
479 	u16 sts;
480 
481 	spin_lock_irqsave(&nn->link_status_lock, flags);
482 
483 	sts = nn_readw(nn, NFP_NET_CFG_STS);
484 	link_up = !!(sts & NFP_NET_CFG_STS_LINK);
485 
486 	if (nn->link_up == link_up)
487 		goto out;
488 
489 	nn->link_up = link_up;
490 	if (nn->port) {
491 		set_bit(NFP_PORT_CHANGED, &nn->port->flags);
492 		if (nn->port->link_cb)
493 			nn->port->link_cb(nn->port);
494 	}
495 
496 	if (nn->link_up) {
497 		netif_carrier_on(nn->dp.netdev);
498 		netdev_info(nn->dp.netdev, "NIC Link is Up\n");
499 	} else {
500 		netif_carrier_off(nn->dp.netdev);
501 		netdev_info(nn->dp.netdev, "NIC Link is Down\n");
502 	}
503 out:
504 	spin_unlock_irqrestore(&nn->link_status_lock, flags);
505 }
506 
507 /**
508  * nfp_net_irq_lsc() - Interrupt service routine for link state changes
509  * @irq:      Interrupt
510  * @data:     Opaque data structure
511  *
512  * Return: Indicate if the interrupt has been handled.
513  */
nfp_net_irq_lsc(int irq,void * data)514 static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
515 {
516 	struct nfp_net *nn = data;
517 	struct msix_entry *entry;
518 
519 	entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
520 
521 	nfp_net_read_link_status(nn);
522 
523 	nfp_net_irq_unmask(nn, entry->entry);
524 
525 	return IRQ_HANDLED;
526 }
527 
528 /**
529  * nfp_net_irq_exn() - Interrupt service routine for exceptions
530  * @irq:      Interrupt
531  * @data:     Opaque data structure
532  *
533  * Return: Indicate if the interrupt has been handled.
534  */
nfp_net_irq_exn(int irq,void * data)535 static irqreturn_t nfp_net_irq_exn(int irq, void *data)
536 {
537 	struct nfp_net *nn = data;
538 
539 	nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
540 	/* XXX TO BE IMPLEMENTED */
541 	return IRQ_HANDLED;
542 }
543 
544 /**
545  * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
546  * @nn:		NFP Network structure
547  * @ctrl_offset: Control BAR offset where IRQ configuration should be written
548  * @format:	printf-style format to construct the interrupt name
549  * @name:	Pointer to allocated space for interrupt name
550  * @name_sz:	Size of space for interrupt name
551  * @vector_idx:	Index of MSI-X vector used for this interrupt
552  * @handler:	IRQ handler to register for this interrupt
553  */
554 static int
nfp_net_aux_irq_request(struct nfp_net * nn,u32 ctrl_offset,const char * format,char * name,size_t name_sz,unsigned int vector_idx,irq_handler_t handler)555 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
556 			const char *format, char *name, size_t name_sz,
557 			unsigned int vector_idx, irq_handler_t handler)
558 {
559 	struct msix_entry *entry;
560 	int err;
561 
562 	entry = &nn->irq_entries[vector_idx];
563 
564 	snprintf(name, name_sz, format, nfp_net_name(nn));
565 	err = request_irq(entry->vector, handler, 0, name, nn);
566 	if (err) {
567 		nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
568 		       entry->vector, err);
569 		return err;
570 	}
571 	nn_writeb(nn, ctrl_offset, entry->entry);
572 	nfp_net_irq_unmask(nn, entry->entry);
573 
574 	return 0;
575 }
576 
577 /**
578  * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
579  * @nn:		NFP Network structure
580  * @ctrl_offset: Control BAR offset where IRQ configuration should be written
581  * @vector_idx:	Index of MSI-X vector used for this interrupt
582  */
nfp_net_aux_irq_free(struct nfp_net * nn,u32 ctrl_offset,unsigned int vector_idx)583 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
584 				 unsigned int vector_idx)
585 {
586 	nn_writeb(nn, ctrl_offset, 0xff);
587 	nn_pci_flush(nn);
588 	free_irq(nn->irq_entries[vector_idx].vector, nn);
589 }
590 
591 struct sk_buff *
nfp_net_tls_tx(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,struct sk_buff * skb,u64 * tls_handle,int * nr_frags)592 nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
593 	       struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
594 {
595 #ifdef CONFIG_TLS_DEVICE
596 	struct nfp_net_tls_offload_ctx *ntls;
597 	struct sk_buff *nskb;
598 	bool resync_pending;
599 	u32 datalen, seq;
600 
601 	if (likely(!dp->ktls_tx))
602 		return skb;
603 	if (!tls_is_skb_tx_device_offloaded(skb))
604 		return skb;
605 
606 	datalen = skb->len - skb_tcp_all_headers(skb);
607 	seq = ntohl(tcp_hdr(skb)->seq);
608 	ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
609 	resync_pending = tls_offload_tx_resync_pending(skb->sk);
610 	if (unlikely(resync_pending || ntls->next_seq != seq)) {
611 		/* Pure ACK out of order already */
612 		if (!datalen)
613 			return skb;
614 
615 		u64_stats_update_begin(&r_vec->tx_sync);
616 		r_vec->tls_tx_fallback++;
617 		u64_stats_update_end(&r_vec->tx_sync);
618 
619 		nskb = tls_encrypt_skb(skb);
620 		if (!nskb) {
621 			u64_stats_update_begin(&r_vec->tx_sync);
622 			r_vec->tls_tx_no_fallback++;
623 			u64_stats_update_end(&r_vec->tx_sync);
624 			return NULL;
625 		}
626 		/* encryption wasn't necessary */
627 		if (nskb == skb)
628 			return skb;
629 		/* we don't re-check ring space */
630 		if (unlikely(skb_is_nonlinear(nskb))) {
631 			nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
632 			u64_stats_update_begin(&r_vec->tx_sync);
633 			r_vec->tx_errors++;
634 			u64_stats_update_end(&r_vec->tx_sync);
635 			dev_kfree_skb_any(nskb);
636 			return NULL;
637 		}
638 
639 		/* jump forward, a TX may have gotten lost, need to sync TX */
640 		if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
641 			tls_offload_tx_resync_request(nskb->sk, seq,
642 						      ntls->next_seq);
643 
644 		*nr_frags = 0;
645 		return nskb;
646 	}
647 
648 	if (datalen) {
649 		u64_stats_update_begin(&r_vec->tx_sync);
650 		if (!skb_is_gso(skb))
651 			r_vec->hw_tls_tx++;
652 		else
653 			r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
654 		u64_stats_update_end(&r_vec->tx_sync);
655 	}
656 
657 	memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
658 	ntls->next_seq += datalen;
659 #endif
660 	return skb;
661 }
662 
nfp_net_tls_tx_undo(struct sk_buff * skb,u64 tls_handle)663 void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
664 {
665 #ifdef CONFIG_TLS_DEVICE
666 	struct nfp_net_tls_offload_ctx *ntls;
667 	u32 datalen, seq;
668 
669 	if (!tls_handle)
670 		return;
671 	if (WARN_ON_ONCE(!tls_is_skb_tx_device_offloaded(skb)))
672 		return;
673 
674 	datalen = skb->len - skb_tcp_all_headers(skb);
675 	seq = ntohl(tcp_hdr(skb)->seq);
676 
677 	ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
678 	if (ntls->next_seq == seq + datalen)
679 		ntls->next_seq = seq;
680 	else
681 		WARN_ON_ONCE(1);
682 #endif
683 }
684 
nfp_net_tx_timeout(struct net_device * netdev,unsigned int txqueue)685 static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
686 {
687 	struct nfp_net *nn = netdev_priv(netdev);
688 
689 	nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
690 }
691 
692 /* Receive processing */
693 static unsigned int
nfp_net_calc_fl_bufsz_data(struct nfp_net_dp * dp)694 nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp)
695 {
696 	unsigned int fl_bufsz = 0;
697 
698 	if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
699 		fl_bufsz += NFP_NET_MAX_PREPEND;
700 	else
701 		fl_bufsz += dp->rx_offset;
702 	fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
703 
704 	return fl_bufsz;
705 }
706 
nfp_net_calc_fl_bufsz(struct nfp_net_dp * dp)707 static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
708 {
709 	unsigned int fl_bufsz;
710 
711 	fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
712 	fl_bufsz += dp->rx_dma_off;
713 	fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
714 
715 	fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
716 	fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
717 
718 	return fl_bufsz;
719 }
720 
nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp * dp)721 static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
722 {
723 	unsigned int fl_bufsz;
724 
725 	fl_bufsz = XDP_PACKET_HEADROOM;
726 	fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
727 
728 	return fl_bufsz;
729 }
730 
731 /* Setup and Configuration
732  */
733 
734 /**
735  * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
736  * @nn:		NFP Network structure
737  */
nfp_net_vecs_init(struct nfp_net * nn)738 static void nfp_net_vecs_init(struct nfp_net *nn)
739 {
740 	int numa_node = dev_to_node(&nn->pdev->dev);
741 	struct nfp_net_r_vector *r_vec;
742 	unsigned int r;
743 
744 	nn->lsc_handler = nfp_net_irq_lsc;
745 	nn->exn_handler = nfp_net_irq_exn;
746 
747 	for (r = 0; r < nn->max_r_vecs; r++) {
748 		struct msix_entry *entry;
749 
750 		entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
751 
752 		r_vec = &nn->r_vecs[r];
753 		r_vec->nfp_net = nn;
754 		r_vec->irq_entry = entry->entry;
755 		r_vec->irq_vector = entry->vector;
756 
757 		if (nn->dp.netdev) {
758 			r_vec->handler = nfp_net_irq_rxtx;
759 		} else {
760 			r_vec->handler = nfp_ctrl_irq_rxtx;
761 
762 			__skb_queue_head_init(&r_vec->queue);
763 			spin_lock_init(&r_vec->lock);
764 			tasklet_setup(&r_vec->tasklet, nn->dp.ops->ctrl_poll);
765 			tasklet_disable(&r_vec->tasklet);
766 		}
767 
768 		cpumask_set_cpu(cpumask_local_spread(r, numa_node), &r_vec->affinity_mask);
769 	}
770 }
771 
772 static void
nfp_net_napi_add(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,int idx)773 nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
774 {
775 	if (dp->netdev)
776 		netif_napi_add(dp->netdev, &r_vec->napi,
777 			       nfp_net_has_xsk_pool_slow(dp, idx) ? dp->ops->xsk_poll : dp->ops->poll);
778 	else
779 		tasklet_enable(&r_vec->tasklet);
780 }
781 
782 static void
nfp_net_napi_del(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec)783 nfp_net_napi_del(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec)
784 {
785 	if (dp->netdev)
786 		netif_napi_del(&r_vec->napi);
787 	else
788 		tasklet_disable(&r_vec->tasklet);
789 }
790 
791 static void
nfp_net_vector_assign_rings(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,int idx)792 nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
793 			    struct nfp_net_r_vector *r_vec, int idx)
794 {
795 	r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
796 	r_vec->tx_ring =
797 		idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
798 
799 	r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
800 		&dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
801 
802 	if (nfp_net_has_xsk_pool_slow(dp, idx) || r_vec->xsk_pool) {
803 		r_vec->xsk_pool = dp->xdp_prog ? dp->xsk_pools[idx] : NULL;
804 
805 		if (r_vec->xsk_pool)
806 			xsk_pool_set_rxq_info(r_vec->xsk_pool,
807 					      &r_vec->rx_ring->xdp_rxq);
808 
809 		nfp_net_napi_del(dp, r_vec);
810 		nfp_net_napi_add(dp, r_vec, idx);
811 	}
812 }
813 
814 static int
nfp_net_prepare_vector(struct nfp_net * nn,struct nfp_net_r_vector * r_vec,int idx)815 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
816 		       int idx)
817 {
818 	int err;
819 
820 	nfp_net_napi_add(&nn->dp, r_vec, idx);
821 
822 	snprintf(r_vec->name, sizeof(r_vec->name),
823 		 "%s-rxtx-%d", nfp_net_name(nn), idx);
824 	err = request_irq(r_vec->irq_vector, r_vec->handler, IRQF_NO_AUTOEN,
825 			  r_vec->name, r_vec);
826 	if (err) {
827 		nfp_net_napi_del(&nn->dp, r_vec);
828 		nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
829 		return err;
830 	}
831 
832 	irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
833 
834 	nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
835 	       r_vec->irq_entry);
836 
837 	return 0;
838 }
839 
840 static void
nfp_net_cleanup_vector(struct nfp_net * nn,struct nfp_net_r_vector * r_vec)841 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
842 {
843 	irq_set_affinity_hint(r_vec->irq_vector, NULL);
844 	nfp_net_napi_del(&nn->dp, r_vec);
845 	free_irq(r_vec->irq_vector, r_vec);
846 }
847 
848 /**
849  * nfp_net_rss_write_itbl() - Write RSS indirection table to device
850  * @nn:      NFP Net device to reconfigure
851  */
nfp_net_rss_write_itbl(struct nfp_net * nn)852 void nfp_net_rss_write_itbl(struct nfp_net *nn)
853 {
854 	int i;
855 
856 	for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
857 		nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
858 			  get_unaligned_le32(nn->rss_itbl + i));
859 }
860 
861 /**
862  * nfp_net_rss_write_key() - Write RSS hash key to device
863  * @nn:      NFP Net device to reconfigure
864  */
nfp_net_rss_write_key(struct nfp_net * nn)865 void nfp_net_rss_write_key(struct nfp_net *nn)
866 {
867 	int i;
868 
869 	for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
870 		nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
871 			  get_unaligned_le32(nn->rss_key + i));
872 }
873 
874 /**
875  * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
876  * @nn:      NFP Net device to reconfigure
877  */
nfp_net_coalesce_write_cfg(struct nfp_net * nn)878 void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
879 {
880 	u8 i;
881 	u32 factor;
882 	u32 value;
883 
884 	/* Compute factor used to convert coalesce '_usecs' parameters to
885 	 * ME timestamp ticks.  There are 16 ME clock cycles for each timestamp
886 	 * count.
887 	 */
888 	factor = nn->tlv_caps.me_freq_mhz / 16;
889 
890 	/* copy RX interrupt coalesce parameters */
891 	value = (nn->rx_coalesce_max_frames << 16) |
892 		(factor * nn->rx_coalesce_usecs);
893 	for (i = 0; i < nn->dp.num_rx_rings; i++)
894 		nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
895 
896 	/* copy TX interrupt coalesce parameters */
897 	value = (nn->tx_coalesce_max_frames << 16) |
898 		(factor * nn->tx_coalesce_usecs);
899 	for (i = 0; i < nn->dp.num_tx_rings; i++)
900 		nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
901 }
902 
903 /**
904  * nfp_net_write_mac_addr() - Write mac address to the device control BAR
905  * @nn:      NFP Net device to reconfigure
906  * @addr:    MAC address to write
907  *
908  * Writes the MAC address from the netdev to the device control BAR.  Does not
909  * perform the required reconfig.  We do a bit of byte swapping dance because
910  * firmware is LE.
911  */
nfp_net_write_mac_addr(struct nfp_net * nn,const u8 * addr)912 static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
913 {
914 	nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
915 	nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
916 }
917 
918 /**
919  * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
920  * @nn:      NFP Net device to reconfigure
921  *
922  * Warning: must be fully idempotent.
923  */
nfp_net_clear_config_and_disable(struct nfp_net * nn)924 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
925 {
926 	u32 new_ctrl, new_ctrl_w1, update;
927 	unsigned int r;
928 	int err;
929 
930 	new_ctrl = nn->dp.ctrl;
931 	new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
932 	update = NFP_NET_CFG_UPDATE_GEN;
933 	update |= NFP_NET_CFG_UPDATE_MSIX;
934 	update |= NFP_NET_CFG_UPDATE_RING;
935 
936 	if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
937 		new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
938 
939 	if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)) {
940 		nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
941 		nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
942 	}
943 
944 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
945 	err = nfp_net_reconfig(nn, update);
946 	if (err)
947 		nn_err(nn, "Could not disable device: %d\n", err);
948 
949 	if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) {
950 		new_ctrl_w1 = nn->dp.ctrl_w1;
951 		new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_FREELIST_EN;
952 		nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
953 		nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
954 
955 		nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
956 		err = nfp_net_reconfig(nn, update);
957 		if (err)
958 			nn_err(nn, "Could not disable FREELIST_EN: %d\n", err);
959 		nn->dp.ctrl_w1 = new_ctrl_w1;
960 	}
961 
962 	for (r = 0; r < nn->dp.num_rx_rings; r++) {
963 		nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
964 		if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx))
965 			nfp_net_xsk_rx_bufs_free(&nn->dp.rx_rings[r]);
966 	}
967 	for (r = 0; r < nn->dp.num_tx_rings; r++)
968 		nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
969 	for (r = 0; r < nn->dp.num_r_vecs; r++)
970 		nfp_net_vec_clear_ring_data(nn, r);
971 
972 	nn->dp.ctrl = new_ctrl;
973 }
974 
975 /**
976  * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
977  * @nn:      NFP Net device to reconfigure
978  */
nfp_net_set_config_and_enable(struct nfp_net * nn)979 static int nfp_net_set_config_and_enable(struct nfp_net *nn)
980 {
981 	u32 bufsz, new_ctrl, new_ctrl_w1, update = 0;
982 	unsigned int r;
983 	int err;
984 
985 	new_ctrl = nn->dp.ctrl;
986 	new_ctrl_w1 = nn->dp.ctrl_w1;
987 
988 	if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
989 		nfp_net_rss_write_key(nn);
990 		nfp_net_rss_write_itbl(nn);
991 		nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
992 		update |= NFP_NET_CFG_UPDATE_RSS;
993 	}
994 
995 	if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
996 		nfp_net_coalesce_write_cfg(nn);
997 		update |= NFP_NET_CFG_UPDATE_IRQMOD;
998 	}
999 
1000 	for (r = 0; r < nn->dp.num_tx_rings; r++)
1001 		nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
1002 	for (r = 0; r < nn->dp.num_rx_rings; r++)
1003 		nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
1004 
1005 	nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE,
1006 		  U64_MAX >> (64 - nn->dp.num_tx_rings));
1007 
1008 	nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE,
1009 		  U64_MAX >> (64 - nn->dp.num_rx_rings));
1010 
1011 	if (nn->dp.netdev)
1012 		nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
1013 
1014 	nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
1015 
1016 	bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
1017 	nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
1018 
1019 	/* Enable device
1020 	 * Step 1: Replace the CTRL_ENABLE by NFP_NET_CFG_CTRL_FREELIST_EN if
1021 	 * FREELIST_EN exits.
1022 	 */
1023 	if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)
1024 		new_ctrl_w1 |= NFP_NET_CFG_CTRL_FREELIST_EN;
1025 	else
1026 		new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
1027 	update |= NFP_NET_CFG_UPDATE_GEN;
1028 	update |= NFP_NET_CFG_UPDATE_MSIX;
1029 	update |= NFP_NET_CFG_UPDATE_RING;
1030 	if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
1031 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
1032 
1033 	/* Step 2: Send the configuration and write the freelist.
1034 	 * - The freelist only need to be written once.
1035 	 */
1036 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1037 	nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
1038 	err = nfp_net_reconfig(nn, update);
1039 	if (err) {
1040 		nfp_net_clear_config_and_disable(nn);
1041 		return err;
1042 	}
1043 
1044 	nn->dp.ctrl = new_ctrl;
1045 	nn->dp.ctrl_w1 = new_ctrl_w1;
1046 
1047 	for (r = 0; r < nn->dp.num_rx_rings; r++)
1048 		nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
1049 
1050 	/* Step 3: Do the NFP_NET_CFG_CTRL_ENABLE. Send the configuration.
1051 	 */
1052 	if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) {
1053 		new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
1054 		nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1055 
1056 		err = nfp_net_reconfig(nn, update);
1057 		if (err) {
1058 			nfp_net_clear_config_and_disable(nn);
1059 			return err;
1060 		}
1061 		nn->dp.ctrl = new_ctrl;
1062 	}
1063 
1064 	return 0;
1065 }
1066 
1067 /**
1068  * nfp_net_close_stack() - Quiesce the stack (part of close)
1069  * @nn:	     NFP Net device to reconfigure
1070  */
nfp_net_close_stack(struct nfp_net * nn)1071 static void nfp_net_close_stack(struct nfp_net *nn)
1072 {
1073 	struct nfp_net_r_vector *r_vec;
1074 	unsigned int r;
1075 
1076 	disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
1077 	netif_carrier_off(nn->dp.netdev);
1078 	nn->link_up = false;
1079 
1080 	for (r = 0; r < nn->dp.num_r_vecs; r++) {
1081 		r_vec = &nn->r_vecs[r];
1082 
1083 		disable_irq(r_vec->irq_vector);
1084 		napi_disable(&r_vec->napi);
1085 
1086 		if (r_vec->rx_ring)
1087 			cancel_work_sync(&r_vec->rx_dim.work);
1088 
1089 		if (r_vec->tx_ring)
1090 			cancel_work_sync(&r_vec->tx_dim.work);
1091 	}
1092 
1093 	netif_tx_disable(nn->dp.netdev);
1094 }
1095 
1096 /**
1097  * nfp_net_close_free_all() - Free all runtime resources
1098  * @nn:      NFP Net device to reconfigure
1099  */
nfp_net_close_free_all(struct nfp_net * nn)1100 static void nfp_net_close_free_all(struct nfp_net *nn)
1101 {
1102 	unsigned int r;
1103 
1104 	nfp_net_tx_rings_free(&nn->dp);
1105 	nfp_net_rx_rings_free(&nn->dp);
1106 
1107 	for (r = 0; r < nn->dp.num_r_vecs; r++)
1108 		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1109 
1110 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1111 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
1112 }
1113 
1114 /**
1115  * nfp_net_netdev_close() - Called when the device is downed
1116  * @netdev:      netdev structure
1117  */
nfp_net_netdev_close(struct net_device * netdev)1118 static int nfp_net_netdev_close(struct net_device *netdev)
1119 {
1120 	struct nfp_net *nn = netdev_priv(netdev);
1121 
1122 	/* Step 1: Disable RX and TX rings from the Linux kernel perspective
1123 	 */
1124 	nfp_net_close_stack(nn);
1125 
1126 	/* Step 2: Tell NFP
1127 	 */
1128 	if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER)
1129 		__dev_mc_unsync(netdev, nfp_net_mc_unsync);
1130 
1131 	nfp_net_clear_config_and_disable(nn);
1132 	nfp_port_configure(netdev, false);
1133 
1134 	/* Step 3: Free resources
1135 	 */
1136 	nfp_net_close_free_all(nn);
1137 
1138 	nn_dbg(nn, "%s down", netdev->name);
1139 	return 0;
1140 }
1141 
nfp_ctrl_close(struct nfp_net * nn)1142 void nfp_ctrl_close(struct nfp_net *nn)
1143 {
1144 	int r;
1145 
1146 	rtnl_lock();
1147 
1148 	for (r = 0; r < nn->dp.num_r_vecs; r++) {
1149 		disable_irq(nn->r_vecs[r].irq_vector);
1150 		tasklet_disable(&nn->r_vecs[r].tasklet);
1151 	}
1152 
1153 	nfp_net_clear_config_and_disable(nn);
1154 
1155 	nfp_net_close_free_all(nn);
1156 
1157 	rtnl_unlock();
1158 }
1159 
nfp_net_rx_dim_work(struct work_struct * work)1160 static void nfp_net_rx_dim_work(struct work_struct *work)
1161 {
1162 	struct nfp_net_r_vector *r_vec;
1163 	unsigned int factor, value;
1164 	struct dim_cq_moder moder;
1165 	struct nfp_net *nn;
1166 	struct dim *dim;
1167 
1168 	dim = container_of(work, struct dim, work);
1169 	moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1170 	r_vec = container_of(dim, struct nfp_net_r_vector, rx_dim);
1171 	nn = r_vec->nfp_net;
1172 
1173 	/* Compute factor used to convert coalesce '_usecs' parameters to
1174 	 * ME timestamp ticks.  There are 16 ME clock cycles for each timestamp
1175 	 * count.
1176 	 */
1177 	factor = nn->tlv_caps.me_freq_mhz / 16;
1178 	if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
1179 		return;
1180 
1181 	/* copy RX interrupt coalesce parameters */
1182 	value = (moder.pkts << 16) | (factor * moder.usec);
1183 	nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(r_vec->rx_ring->idx), value);
1184 	(void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
1185 
1186 	dim->state = DIM_START_MEASURE;
1187 }
1188 
nfp_net_tx_dim_work(struct work_struct * work)1189 static void nfp_net_tx_dim_work(struct work_struct *work)
1190 {
1191 	struct nfp_net_r_vector *r_vec;
1192 	unsigned int factor, value;
1193 	struct dim_cq_moder moder;
1194 	struct nfp_net *nn;
1195 	struct dim *dim;
1196 
1197 	dim = container_of(work, struct dim, work);
1198 	moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
1199 	r_vec = container_of(dim, struct nfp_net_r_vector, tx_dim);
1200 	nn = r_vec->nfp_net;
1201 
1202 	/* Compute factor used to convert coalesce '_usecs' parameters to
1203 	 * ME timestamp ticks.  There are 16 ME clock cycles for each timestamp
1204 	 * count.
1205 	 */
1206 	factor = nn->tlv_caps.me_freq_mhz / 16;
1207 	if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
1208 		return;
1209 
1210 	/* copy TX interrupt coalesce parameters */
1211 	value = (moder.pkts << 16) | (factor * moder.usec);
1212 	nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(r_vec->tx_ring->idx), value);
1213 	(void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
1214 
1215 	dim->state = DIM_START_MEASURE;
1216 }
1217 
1218 /**
1219  * nfp_net_open_stack() - Start the device from stack's perspective
1220  * @nn:      NFP Net device to reconfigure
1221  */
nfp_net_open_stack(struct nfp_net * nn)1222 static void nfp_net_open_stack(struct nfp_net *nn)
1223 {
1224 	struct nfp_net_r_vector *r_vec;
1225 	unsigned int r;
1226 
1227 	for (r = 0; r < nn->dp.num_r_vecs; r++) {
1228 		r_vec = &nn->r_vecs[r];
1229 
1230 		if (r_vec->rx_ring) {
1231 			INIT_WORK(&r_vec->rx_dim.work, nfp_net_rx_dim_work);
1232 			r_vec->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1233 		}
1234 
1235 		if (r_vec->tx_ring) {
1236 			INIT_WORK(&r_vec->tx_dim.work, nfp_net_tx_dim_work);
1237 			r_vec->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1238 		}
1239 
1240 		napi_enable(&r_vec->napi);
1241 		enable_irq(r_vec->irq_vector);
1242 	}
1243 
1244 	netif_tx_wake_all_queues(nn->dp.netdev);
1245 
1246 	enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
1247 	nfp_net_read_link_status(nn);
1248 }
1249 
nfp_net_open_alloc_all(struct nfp_net * nn)1250 static int nfp_net_open_alloc_all(struct nfp_net *nn)
1251 {
1252 	int err, r;
1253 
1254 	err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
1255 				      nn->exn_name, sizeof(nn->exn_name),
1256 				      NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
1257 	if (err)
1258 		return err;
1259 	err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
1260 				      nn->lsc_name, sizeof(nn->lsc_name),
1261 				      NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
1262 	if (err)
1263 		goto err_free_exn;
1264 	disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
1265 
1266 	for (r = 0; r < nn->dp.num_r_vecs; r++) {
1267 		err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
1268 		if (err)
1269 			goto err_cleanup_vec_p;
1270 	}
1271 
1272 	err = nfp_net_rx_rings_prepare(nn, &nn->dp);
1273 	if (err)
1274 		goto err_cleanup_vec;
1275 
1276 	err = nfp_net_tx_rings_prepare(nn, &nn->dp);
1277 	if (err)
1278 		goto err_free_rx_rings;
1279 
1280 	for (r = 0; r < nn->max_r_vecs; r++)
1281 		nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
1282 
1283 	return 0;
1284 
1285 err_free_rx_rings:
1286 	nfp_net_rx_rings_free(&nn->dp);
1287 err_cleanup_vec:
1288 	r = nn->dp.num_r_vecs;
1289 err_cleanup_vec_p:
1290 	while (r--)
1291 		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1292 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1293 err_free_exn:
1294 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
1295 	return err;
1296 }
1297 
nfp_net_netdev_open(struct net_device * netdev)1298 static int nfp_net_netdev_open(struct net_device *netdev)
1299 {
1300 	struct nfp_net *nn = netdev_priv(netdev);
1301 	int err;
1302 
1303 	/* Step 1: Allocate resources for rings and the like
1304 	 * - Request interrupts
1305 	 * - Allocate RX and TX ring resources
1306 	 * - Setup initial RSS table
1307 	 */
1308 	err = nfp_net_open_alloc_all(nn);
1309 	if (err)
1310 		return err;
1311 
1312 	err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
1313 	if (err)
1314 		goto err_free_all;
1315 
1316 	err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
1317 	if (err)
1318 		goto err_free_all;
1319 
1320 	/* Step 2: Configure the NFP
1321 	 * - Ifup the physical interface if it exists
1322 	 * - Enable rings from 0 to tx_rings/rx_rings - 1.
1323 	 * - Write MAC address (in case it changed)
1324 	 * - Set the MTU
1325 	 * - Set the Freelist buffer size
1326 	 * - Enable the FW
1327 	 */
1328 	err = nfp_port_configure(netdev, true);
1329 	if (err)
1330 		goto err_free_all;
1331 
1332 	err = nfp_net_set_config_and_enable(nn);
1333 	if (err)
1334 		goto err_port_disable;
1335 
1336 	/* Step 3: Enable for kernel
1337 	 * - put some freelist descriptors on each RX ring
1338 	 * - enable NAPI on each ring
1339 	 * - enable all TX queues
1340 	 * - set link state
1341 	 */
1342 	nfp_net_open_stack(nn);
1343 
1344 	return 0;
1345 
1346 err_port_disable:
1347 	nfp_port_configure(netdev, false);
1348 err_free_all:
1349 	nfp_net_close_free_all(nn);
1350 	return err;
1351 }
1352 
nfp_ctrl_open(struct nfp_net * nn)1353 int nfp_ctrl_open(struct nfp_net *nn)
1354 {
1355 	int err, r;
1356 
1357 	/* ring dumping depends on vNICs being opened/closed under rtnl */
1358 	rtnl_lock();
1359 
1360 	err = nfp_net_open_alloc_all(nn);
1361 	if (err)
1362 		goto err_unlock;
1363 
1364 	err = nfp_net_set_config_and_enable(nn);
1365 	if (err)
1366 		goto err_free_all;
1367 
1368 	for (r = 0; r < nn->dp.num_r_vecs; r++)
1369 		enable_irq(nn->r_vecs[r].irq_vector);
1370 
1371 	rtnl_unlock();
1372 
1373 	return 0;
1374 
1375 err_free_all:
1376 	nfp_net_close_free_all(nn);
1377 err_unlock:
1378 	rtnl_unlock();
1379 	return err;
1380 }
1381 
nfp_net_sched_mbox_amsg_work(struct nfp_net * nn,u32 cmd,const void * data,size_t len,int (* cb)(struct nfp_net *,struct nfp_mbox_amsg_entry *))1382 int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
1383 				 int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
1384 {
1385 	struct nfp_mbox_amsg_entry *entry;
1386 
1387 	entry = kmalloc(sizeof(*entry) + len, GFP_ATOMIC);
1388 	if (!entry)
1389 		return -ENOMEM;
1390 
1391 	memcpy(entry->msg, data, len);
1392 	entry->cmd = cmd;
1393 	entry->cfg = cb;
1394 
1395 	spin_lock_bh(&nn->mbox_amsg.lock);
1396 	list_add_tail(&entry->list, &nn->mbox_amsg.list);
1397 	spin_unlock_bh(&nn->mbox_amsg.lock);
1398 
1399 	schedule_work(&nn->mbox_amsg.work);
1400 
1401 	return 0;
1402 }
1403 
nfp_net_mbox_amsg_work(struct work_struct * work)1404 static void nfp_net_mbox_amsg_work(struct work_struct *work)
1405 {
1406 	struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
1407 	struct nfp_mbox_amsg_entry *entry, *tmp;
1408 	struct list_head tmp_list;
1409 
1410 	INIT_LIST_HEAD(&tmp_list);
1411 
1412 	spin_lock_bh(&nn->mbox_amsg.lock);
1413 	list_splice_init(&nn->mbox_amsg.list, &tmp_list);
1414 	spin_unlock_bh(&nn->mbox_amsg.lock);
1415 
1416 	list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
1417 		int err = entry->cfg(nn, entry);
1418 
1419 		if (err)
1420 			nn_err(nn, "Config cmd %d to HW failed %d.\n", entry->cmd, err);
1421 
1422 		list_del(&entry->list);
1423 		kfree(entry);
1424 	}
1425 }
1426 
nfp_net_mc_cfg(struct nfp_net * nn,struct nfp_mbox_amsg_entry * entry)1427 static int nfp_net_mc_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
1428 {
1429 	unsigned char *addr = entry->msg;
1430 	int ret;
1431 
1432 	ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
1433 	if (ret)
1434 		return ret;
1435 
1436 	nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_HI,
1437 		  get_unaligned_be32(addr));
1438 	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
1439 		  get_unaligned_be16(addr + 4));
1440 
1441 	return nfp_net_mbox_reconfig_and_unlock(nn, entry->cmd);
1442 }
1443 
nfp_net_mc_sync(struct net_device * netdev,const unsigned char * addr)1444 static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
1445 {
1446 	struct nfp_net *nn = netdev_priv(netdev);
1447 
1448 	if (netdev_mc_count(netdev) > NFP_NET_CFG_MAC_MC_MAX) {
1449 		nn_err(nn, "Requested number of MC addresses (%d) exceeds maximum (%d).\n",
1450 		       netdev_mc_count(netdev), NFP_NET_CFG_MAC_MC_MAX);
1451 		return -EINVAL;
1452 	}
1453 
1454 	return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD, addr,
1455 					    NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
1456 }
1457 
nfp_net_mc_unsync(struct net_device * netdev,const unsigned char * addr)1458 static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
1459 {
1460 	struct nfp_net *nn = netdev_priv(netdev);
1461 
1462 	return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL, addr,
1463 					    NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
1464 }
1465 
nfp_net_set_rx_mode(struct net_device * netdev)1466 static void nfp_net_set_rx_mode(struct net_device *netdev)
1467 {
1468 	struct nfp_net *nn = netdev_priv(netdev);
1469 	u32 new_ctrl, new_ctrl_w1;
1470 
1471 	new_ctrl = nn->dp.ctrl;
1472 	new_ctrl_w1 = nn->dp.ctrl_w1;
1473 
1474 	if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
1475 		new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
1476 	else
1477 		new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
1478 
1479 	if (netdev->flags & IFF_ALLMULTI)
1480 		new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_MCAST_FILTER;
1481 	else
1482 		new_ctrl_w1 |= nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER;
1483 
1484 	if (netdev->flags & IFF_PROMISC) {
1485 		if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
1486 			new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
1487 		else
1488 			nn_warn(nn, "FW does not support promiscuous mode\n");
1489 	} else {
1490 		new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
1491 	}
1492 
1493 	if ((nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER) &&
1494 	    __dev_mc_sync(netdev, nfp_net_mc_sync, nfp_net_mc_unsync))
1495 		netdev_err(netdev, "Sync mc address failed\n");
1496 
1497 	if (new_ctrl == nn->dp.ctrl && new_ctrl_w1 == nn->dp.ctrl_w1)
1498 		return;
1499 
1500 	if (new_ctrl != nn->dp.ctrl)
1501 		nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1502 	if (new_ctrl_w1 != nn->dp.ctrl_w1)
1503 		nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
1504 	nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
1505 
1506 	nn->dp.ctrl = new_ctrl;
1507 	nn->dp.ctrl_w1 = new_ctrl_w1;
1508 }
1509 
nfp_net_rss_init_itbl(struct nfp_net * nn)1510 static void nfp_net_rss_init_itbl(struct nfp_net *nn)
1511 {
1512 	int i;
1513 
1514 	for (i = 0; i < sizeof(nn->rss_itbl); i++)
1515 		nn->rss_itbl[i] =
1516 			ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
1517 }
1518 
nfp_net_dp_swap(struct nfp_net * nn,struct nfp_net_dp * dp)1519 static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
1520 {
1521 	struct nfp_net_dp new_dp = *dp;
1522 
1523 	*dp = nn->dp;
1524 	nn->dp = new_dp;
1525 
1526 	nn->dp.netdev->mtu = new_dp.mtu;
1527 
1528 	if (!netif_is_rxfh_configured(nn->dp.netdev))
1529 		nfp_net_rss_init_itbl(nn);
1530 }
1531 
nfp_net_dp_swap_enable(struct nfp_net * nn,struct nfp_net_dp * dp)1532 static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
1533 {
1534 	unsigned int r;
1535 	int err;
1536 
1537 	nfp_net_dp_swap(nn, dp);
1538 
1539 	for (r = 0; r <	nn->max_r_vecs; r++)
1540 		nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
1541 
1542 	err = netif_set_real_num_queues(nn->dp.netdev,
1543 					nn->dp.num_stack_tx_rings,
1544 					nn->dp.num_rx_rings);
1545 	if (err)
1546 		return err;
1547 
1548 	return nfp_net_set_config_and_enable(nn);
1549 }
1550 
nfp_net_clone_dp(struct nfp_net * nn)1551 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
1552 {
1553 	struct nfp_net_dp *new;
1554 
1555 	new = kmalloc(sizeof(*new), GFP_KERNEL);
1556 	if (!new)
1557 		return NULL;
1558 
1559 	*new = nn->dp;
1560 
1561 	new->xsk_pools = kmemdup(new->xsk_pools,
1562 				 array_size(nn->max_r_vecs,
1563 					    sizeof(new->xsk_pools)),
1564 				 GFP_KERNEL);
1565 	if (!new->xsk_pools) {
1566 		kfree(new);
1567 		return NULL;
1568 	}
1569 
1570 	/* Clear things which need to be recomputed */
1571 	new->fl_bufsz = 0;
1572 	new->tx_rings = NULL;
1573 	new->rx_rings = NULL;
1574 	new->num_r_vecs = 0;
1575 	new->num_stack_tx_rings = 0;
1576 	new->txrwb = NULL;
1577 	new->txrwb_dma = 0;
1578 
1579 	return new;
1580 }
1581 
nfp_net_free_dp(struct nfp_net_dp * dp)1582 static void nfp_net_free_dp(struct nfp_net_dp *dp)
1583 {
1584 	kfree(dp->xsk_pools);
1585 	kfree(dp);
1586 }
1587 
1588 static int
nfp_net_check_config(struct nfp_net * nn,struct nfp_net_dp * dp,struct netlink_ext_ack * extack)1589 nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
1590 		     struct netlink_ext_ack *extack)
1591 {
1592 	unsigned int r, xsk_min_fl_bufsz;
1593 
1594 	/* XDP-enabled tests */
1595 	if (!dp->xdp_prog)
1596 		return 0;
1597 	if (dp->fl_bufsz > PAGE_SIZE) {
1598 		NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
1599 		return -EINVAL;
1600 	}
1601 	if (dp->num_tx_rings > nn->max_tx_rings) {
1602 		NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
1603 		return -EINVAL;
1604 	}
1605 
1606 	xsk_min_fl_bufsz = nfp_net_calc_fl_bufsz_xsk(dp);
1607 	for (r = 0; r < nn->max_r_vecs; r++) {
1608 		if (!dp->xsk_pools[r])
1609 			continue;
1610 
1611 		if (xsk_pool_get_rx_frame_size(dp->xsk_pools[r]) < xsk_min_fl_bufsz) {
1612 			NL_SET_ERR_MSG_MOD(extack,
1613 					   "XSK buffer pool chunk size too small");
1614 			return -EINVAL;
1615 		}
1616 	}
1617 
1618 	return 0;
1619 }
1620 
nfp_net_ring_reconfig(struct nfp_net * nn,struct nfp_net_dp * dp,struct netlink_ext_ack * extack)1621 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
1622 			  struct netlink_ext_ack *extack)
1623 {
1624 	int r, err;
1625 
1626 	dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
1627 
1628 	dp->num_stack_tx_rings = dp->num_tx_rings;
1629 	if (dp->xdp_prog)
1630 		dp->num_stack_tx_rings -= dp->num_rx_rings;
1631 
1632 	dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
1633 
1634 	err = nfp_net_check_config(nn, dp, extack);
1635 	if (err)
1636 		goto exit_free_dp;
1637 
1638 	if (!netif_running(dp->netdev)) {
1639 		nfp_net_dp_swap(nn, dp);
1640 		err = 0;
1641 		goto exit_free_dp;
1642 	}
1643 
1644 	/* Prepare new rings */
1645 	for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
1646 		err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
1647 		if (err) {
1648 			dp->num_r_vecs = r;
1649 			goto err_cleanup_vecs;
1650 		}
1651 	}
1652 
1653 	err = nfp_net_rx_rings_prepare(nn, dp);
1654 	if (err)
1655 		goto err_cleanup_vecs;
1656 
1657 	err = nfp_net_tx_rings_prepare(nn, dp);
1658 	if (err)
1659 		goto err_free_rx;
1660 
1661 	/* Stop device, swap in new rings, try to start the firmware */
1662 	nfp_net_close_stack(nn);
1663 	nfp_net_clear_config_and_disable(nn);
1664 
1665 	err = nfp_net_dp_swap_enable(nn, dp);
1666 	if (err) {
1667 		int err2;
1668 
1669 		nfp_net_clear_config_and_disable(nn);
1670 
1671 		/* Try with old configuration and old rings */
1672 		err2 = nfp_net_dp_swap_enable(nn, dp);
1673 		if (err2)
1674 			nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
1675 			       err, err2);
1676 	}
1677 	for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
1678 		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1679 
1680 	nfp_net_rx_rings_free(dp);
1681 	nfp_net_tx_rings_free(dp);
1682 
1683 	nfp_net_open_stack(nn);
1684 exit_free_dp:
1685 	nfp_net_free_dp(dp);
1686 
1687 	return err;
1688 
1689 err_free_rx:
1690 	nfp_net_rx_rings_free(dp);
1691 err_cleanup_vecs:
1692 	for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
1693 		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1694 	nfp_net_free_dp(dp);
1695 	return err;
1696 }
1697 
nfp_net_change_mtu(struct net_device * netdev,int new_mtu)1698 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
1699 {
1700 	struct nfp_net *nn = netdev_priv(netdev);
1701 	struct nfp_net_dp *dp;
1702 	int err;
1703 
1704 	err = nfp_app_check_mtu(nn->app, netdev, new_mtu);
1705 	if (err)
1706 		return err;
1707 
1708 	dp = nfp_net_clone_dp(nn);
1709 	if (!dp)
1710 		return -ENOMEM;
1711 
1712 	dp->mtu = new_mtu;
1713 
1714 	return nfp_net_ring_reconfig(nn, dp, NULL);
1715 }
1716 
1717 static int
nfp_net_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1718 nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1719 {
1720 	const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
1721 	struct nfp_net *nn = netdev_priv(netdev);
1722 	int err;
1723 
1724 	/* Priority tagged packets with vlan id 0 are processed by the
1725 	 * NFP as untagged packets
1726 	 */
1727 	if (!vid)
1728 		return 0;
1729 
1730 	err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
1731 	if (err)
1732 		return err;
1733 
1734 	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
1735 	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
1736 		  ETH_P_8021Q);
1737 
1738 	return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
1739 }
1740 
1741 static int
nfp_net_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)1742 nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
1743 {
1744 	const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
1745 	struct nfp_net *nn = netdev_priv(netdev);
1746 	int err;
1747 
1748 	/* Priority tagged packets with vlan id 0 are processed by the
1749 	 * NFP as untagged packets
1750 	 */
1751 	if (!vid)
1752 		return 0;
1753 
1754 	err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
1755 	if (err)
1756 		return err;
1757 
1758 	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
1759 	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
1760 		  ETH_P_8021Q);
1761 
1762 	return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
1763 }
1764 
nfp_net_stat64(struct net_device * netdev,struct rtnl_link_stats64 * stats)1765 static void nfp_net_stat64(struct net_device *netdev,
1766 			   struct rtnl_link_stats64 *stats)
1767 {
1768 	struct nfp_net *nn = netdev_priv(netdev);
1769 	int r;
1770 
1771 	/* Collect software stats */
1772 	for (r = 0; r < nn->max_r_vecs; r++) {
1773 		struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
1774 		u64 data[3];
1775 		unsigned int start;
1776 
1777 		do {
1778 			start = u64_stats_fetch_begin(&r_vec->rx_sync);
1779 			data[0] = r_vec->rx_pkts;
1780 			data[1] = r_vec->rx_bytes;
1781 			data[2] = r_vec->rx_drops;
1782 		} while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
1783 		stats->rx_packets += data[0];
1784 		stats->rx_bytes += data[1];
1785 		stats->rx_dropped += data[2];
1786 
1787 		do {
1788 			start = u64_stats_fetch_begin(&r_vec->tx_sync);
1789 			data[0] = r_vec->tx_pkts;
1790 			data[1] = r_vec->tx_bytes;
1791 			data[2] = r_vec->tx_errors;
1792 		} while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
1793 		stats->tx_packets += data[0];
1794 		stats->tx_bytes += data[1];
1795 		stats->tx_errors += data[2];
1796 	}
1797 
1798 	/* Add in device stats */
1799 	stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES);
1800 	stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS);
1801 	stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS);
1802 
1803 	stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS);
1804 	stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS);
1805 }
1806 
nfp_net_set_features(struct net_device * netdev,netdev_features_t features)1807 static int nfp_net_set_features(struct net_device *netdev,
1808 				netdev_features_t features)
1809 {
1810 	netdev_features_t changed = netdev->features ^ features;
1811 	struct nfp_net *nn = netdev_priv(netdev);
1812 	u32 new_ctrl;
1813 	int err;
1814 
1815 	/* Assume this is not called with features we have not advertised */
1816 
1817 	new_ctrl = nn->dp.ctrl;
1818 
1819 	if (changed & NETIF_F_RXCSUM) {
1820 		if (features & NETIF_F_RXCSUM)
1821 			new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
1822 		else
1823 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
1824 	}
1825 
1826 	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1827 		if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
1828 			new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
1829 		else
1830 			new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
1831 	}
1832 
1833 	if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1834 		if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1835 			new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
1836 					      NFP_NET_CFG_CTRL_LSO;
1837 		else
1838 			new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
1839 	}
1840 
1841 	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
1842 		if (features & NETIF_F_HW_VLAN_CTAG_RX)
1843 			new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
1844 				    NFP_NET_CFG_CTRL_RXVLAN;
1845 		else
1846 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN_ANY;
1847 	}
1848 
1849 	if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
1850 		if (features & NETIF_F_HW_VLAN_CTAG_TX)
1851 			new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
1852 				    NFP_NET_CFG_CTRL_TXVLAN;
1853 		else
1854 			new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN_ANY;
1855 	}
1856 
1857 	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
1858 		if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1859 			new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
1860 		else
1861 			new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
1862 	}
1863 
1864 	if (changed & NETIF_F_HW_VLAN_STAG_RX) {
1865 		if (features & NETIF_F_HW_VLAN_STAG_RX)
1866 			new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
1867 		else
1868 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
1869 	}
1870 
1871 	if (changed & NETIF_F_SG) {
1872 		if (features & NETIF_F_SG)
1873 			new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
1874 		else
1875 			new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
1876 	}
1877 
1878 	err = nfp_port_set_features(netdev, features);
1879 	if (err)
1880 		return err;
1881 
1882 	nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
1883 	       netdev->features, features, changed);
1884 
1885 	if (new_ctrl == nn->dp.ctrl)
1886 		return 0;
1887 
1888 	nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
1889 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1890 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
1891 	if (err)
1892 		return err;
1893 
1894 	nn->dp.ctrl = new_ctrl;
1895 
1896 	return 0;
1897 }
1898 
1899 static netdev_features_t
nfp_net_fix_features(struct net_device * netdev,netdev_features_t features)1900 nfp_net_fix_features(struct net_device *netdev,
1901 		     netdev_features_t features)
1902 {
1903 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1904 	    (features & NETIF_F_HW_VLAN_STAG_RX)) {
1905 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
1906 			features &= ~NETIF_F_HW_VLAN_CTAG_RX;
1907 			netdev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
1908 			netdev_warn(netdev,
1909 				    "S-tag and C-tag stripping can't be enabled at the same time. Enabling S-tag stripping and disabling C-tag stripping\n");
1910 		} else if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) {
1911 			features &= ~NETIF_F_HW_VLAN_STAG_RX;
1912 			netdev->wanted_features &= ~NETIF_F_HW_VLAN_STAG_RX;
1913 			netdev_warn(netdev,
1914 				    "S-tag and C-tag stripping can't be enabled at the same time. Enabling C-tag stripping and disabling S-tag stripping\n");
1915 		}
1916 	}
1917 	return features;
1918 }
1919 
1920 static netdev_features_t
nfp_net_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)1921 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
1922 		       netdev_features_t features)
1923 {
1924 	u8 l4_hdr;
1925 
1926 	/* We can't do TSO over double tagged packets (802.1AD) */
1927 	features &= vlan_features_check(skb, features);
1928 
1929 	if (!skb->encapsulation)
1930 		return features;
1931 
1932 	/* Ensure that inner L4 header offset fits into TX descriptor field */
1933 	if (skb_is_gso(skb)) {
1934 		u32 hdrlen;
1935 
1936 		hdrlen = skb_inner_tcp_all_headers(skb);
1937 
1938 		/* Assume worst case scenario of having longest possible
1939 		 * metadata prepend - 8B
1940 		 */
1941 		if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8))
1942 			features &= ~NETIF_F_GSO_MASK;
1943 	}
1944 
1945 	if (xfrm_offload(skb))
1946 		return features;
1947 
1948 	/* VXLAN/GRE check */
1949 	switch (vlan_get_protocol(skb)) {
1950 	case htons(ETH_P_IP):
1951 		l4_hdr = ip_hdr(skb)->protocol;
1952 		break;
1953 	case htons(ETH_P_IPV6):
1954 		l4_hdr = ipv6_hdr(skb)->nexthdr;
1955 		break;
1956 	default:
1957 		return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1958 	}
1959 
1960 	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1961 	    skb->inner_protocol != htons(ETH_P_TEB) ||
1962 	    (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
1963 	    (l4_hdr == IPPROTO_UDP &&
1964 	     (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
1965 	      sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
1966 		return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1967 
1968 	return features;
1969 }
1970 
1971 static int
nfp_net_get_phys_port_name(struct net_device * netdev,char * name,size_t len)1972 nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
1973 {
1974 	struct nfp_net *nn = netdev_priv(netdev);
1975 	int n;
1976 
1977 	/* If port is defined, devlink_port is registered and devlink core
1978 	 * is taking care of name formatting.
1979 	 */
1980 	if (nn->port)
1981 		return -EOPNOTSUPP;
1982 
1983 	if (nn->dp.is_vf || nn->vnic_no_name)
1984 		return -EOPNOTSUPP;
1985 
1986 	n = snprintf(name, len, "n%d", nn->id);
1987 	if (n >= len)
1988 		return -EINVAL;
1989 
1990 	return 0;
1991 }
1992 
nfp_net_xdp_setup_drv(struct nfp_net * nn,struct netdev_bpf * bpf)1993 static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
1994 {
1995 	struct bpf_prog *prog = bpf->prog;
1996 	struct nfp_net_dp *dp;
1997 	int err;
1998 
1999 	if (!prog == !nn->dp.xdp_prog) {
2000 		WRITE_ONCE(nn->dp.xdp_prog, prog);
2001 		xdp_attachment_setup(&nn->xdp, bpf);
2002 		return 0;
2003 	}
2004 
2005 	dp = nfp_net_clone_dp(nn);
2006 	if (!dp)
2007 		return -ENOMEM;
2008 
2009 	dp->xdp_prog = prog;
2010 	dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
2011 	dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2012 	dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
2013 
2014 	/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
2015 	err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
2016 	if (err)
2017 		return err;
2018 
2019 	xdp_attachment_setup(&nn->xdp, bpf);
2020 	return 0;
2021 }
2022 
nfp_net_xdp_setup_hw(struct nfp_net * nn,struct netdev_bpf * bpf)2023 static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
2024 {
2025 	int err;
2026 
2027 	err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
2028 	if (err)
2029 		return err;
2030 
2031 	xdp_attachment_setup(&nn->xdp_hw, bpf);
2032 	return 0;
2033 }
2034 
nfp_net_xdp(struct net_device * netdev,struct netdev_bpf * xdp)2035 static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
2036 {
2037 	struct nfp_net *nn = netdev_priv(netdev);
2038 
2039 	switch (xdp->command) {
2040 	case XDP_SETUP_PROG:
2041 		return nfp_net_xdp_setup_drv(nn, xdp);
2042 	case XDP_SETUP_PROG_HW:
2043 		return nfp_net_xdp_setup_hw(nn, xdp);
2044 	case XDP_SETUP_XSK_POOL:
2045 		return nfp_net_xsk_setup_pool(netdev, xdp->xsk.pool,
2046 					      xdp->xsk.queue_id);
2047 	default:
2048 		return nfp_app_bpf(nn->app, nn, xdp);
2049 	}
2050 }
2051 
nfp_net_set_mac_address(struct net_device * netdev,void * addr)2052 static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
2053 {
2054 	struct nfp_net *nn = netdev_priv(netdev);
2055 	struct sockaddr *saddr = addr;
2056 	int err;
2057 
2058 	err = eth_prepare_mac_addr_change(netdev, addr);
2059 	if (err)
2060 		return err;
2061 
2062 	nfp_net_write_mac_addr(nn, saddr->sa_data);
2063 
2064 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
2065 	if (err)
2066 		return err;
2067 
2068 	eth_commit_mac_addr_change(netdev, addr);
2069 
2070 	return 0;
2071 }
2072 
nfp_net_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)2073 static int nfp_net_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
2074 				  struct net_device *dev, u32 filter_mask,
2075 				  int nlflags)
2076 {
2077 	struct nfp_net *nn = netdev_priv(dev);
2078 	u16 mode;
2079 
2080 	if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
2081 		return -EOPNOTSUPP;
2082 
2083 	mode = (nn->dp.ctrl & NFP_NET_CFG_CTRL_VEPA) ?
2084 	       BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
2085 
2086 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0,
2087 				       nlflags, filter_mask, NULL);
2088 }
2089 
nfp_net_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)2090 static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2091 				  u16 flags, struct netlink_ext_ack *extack)
2092 {
2093 	struct nfp_net *nn = netdev_priv(dev);
2094 	struct nlattr *attr, *br_spec;
2095 	int rem, err;
2096 	u32 new_ctrl;
2097 	u16 mode;
2098 
2099 	if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
2100 		return -EOPNOTSUPP;
2101 
2102 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
2103 	if (!br_spec)
2104 		return -EINVAL;
2105 
2106 	nla_for_each_nested(attr, br_spec, rem) {
2107 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
2108 			continue;
2109 
2110 		new_ctrl = nn->dp.ctrl;
2111 		mode = nla_get_u16(attr);
2112 		if (mode == BRIDGE_MODE_VEPA)
2113 			new_ctrl |= NFP_NET_CFG_CTRL_VEPA;
2114 		else if (mode == BRIDGE_MODE_VEB)
2115 			new_ctrl &= ~NFP_NET_CFG_CTRL_VEPA;
2116 		else
2117 			return -EOPNOTSUPP;
2118 
2119 		if (new_ctrl == nn->dp.ctrl)
2120 			return 0;
2121 
2122 		nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2123 		err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
2124 		if (!err)
2125 			nn->dp.ctrl = new_ctrl;
2126 
2127 		return err;
2128 	}
2129 
2130 	return -EINVAL;
2131 }
2132 
2133 const struct net_device_ops nfp_nfd3_netdev_ops = {
2134 	.ndo_init		= nfp_app_ndo_init,
2135 	.ndo_uninit		= nfp_app_ndo_uninit,
2136 	.ndo_open		= nfp_net_netdev_open,
2137 	.ndo_stop		= nfp_net_netdev_close,
2138 	.ndo_start_xmit		= nfp_net_tx,
2139 	.ndo_get_stats64	= nfp_net_stat64,
2140 	.ndo_vlan_rx_add_vid	= nfp_net_vlan_rx_add_vid,
2141 	.ndo_vlan_rx_kill_vid	= nfp_net_vlan_rx_kill_vid,
2142 	.ndo_set_vf_mac         = nfp_app_set_vf_mac,
2143 	.ndo_set_vf_vlan        = nfp_app_set_vf_vlan,
2144 	.ndo_set_vf_rate	= nfp_app_set_vf_rate,
2145 	.ndo_set_vf_spoofchk    = nfp_app_set_vf_spoofchk,
2146 	.ndo_set_vf_trust	= nfp_app_set_vf_trust,
2147 	.ndo_get_vf_config	= nfp_app_get_vf_config,
2148 	.ndo_set_vf_link_state  = nfp_app_set_vf_link_state,
2149 	.ndo_setup_tc		= nfp_port_setup_tc,
2150 	.ndo_tx_timeout		= nfp_net_tx_timeout,
2151 	.ndo_set_rx_mode	= nfp_net_set_rx_mode,
2152 	.ndo_change_mtu		= nfp_net_change_mtu,
2153 	.ndo_set_mac_address	= nfp_net_set_mac_address,
2154 	.ndo_set_features	= nfp_net_set_features,
2155 	.ndo_fix_features	= nfp_net_fix_features,
2156 	.ndo_features_check	= nfp_net_features_check,
2157 	.ndo_get_phys_port_name	= nfp_net_get_phys_port_name,
2158 	.ndo_bpf		= nfp_net_xdp,
2159 	.ndo_xsk_wakeup		= nfp_net_xsk_wakeup,
2160 	.ndo_bridge_getlink     = nfp_net_bridge_getlink,
2161 	.ndo_bridge_setlink     = nfp_net_bridge_setlink,
2162 };
2163 
2164 const struct net_device_ops nfp_nfdk_netdev_ops = {
2165 	.ndo_init		= nfp_app_ndo_init,
2166 	.ndo_uninit		= nfp_app_ndo_uninit,
2167 	.ndo_open		= nfp_net_netdev_open,
2168 	.ndo_stop		= nfp_net_netdev_close,
2169 	.ndo_start_xmit		= nfp_net_tx,
2170 	.ndo_get_stats64	= nfp_net_stat64,
2171 	.ndo_vlan_rx_add_vid	= nfp_net_vlan_rx_add_vid,
2172 	.ndo_vlan_rx_kill_vid	= nfp_net_vlan_rx_kill_vid,
2173 	.ndo_set_vf_mac         = nfp_app_set_vf_mac,
2174 	.ndo_set_vf_vlan        = nfp_app_set_vf_vlan,
2175 	.ndo_set_vf_rate	= nfp_app_set_vf_rate,
2176 	.ndo_set_vf_spoofchk    = nfp_app_set_vf_spoofchk,
2177 	.ndo_set_vf_trust	= nfp_app_set_vf_trust,
2178 	.ndo_get_vf_config	= nfp_app_get_vf_config,
2179 	.ndo_set_vf_link_state  = nfp_app_set_vf_link_state,
2180 	.ndo_setup_tc		= nfp_port_setup_tc,
2181 	.ndo_tx_timeout		= nfp_net_tx_timeout,
2182 	.ndo_set_rx_mode	= nfp_net_set_rx_mode,
2183 	.ndo_change_mtu		= nfp_net_change_mtu,
2184 	.ndo_set_mac_address	= nfp_net_set_mac_address,
2185 	.ndo_set_features	= nfp_net_set_features,
2186 	.ndo_fix_features	= nfp_net_fix_features,
2187 	.ndo_features_check	= nfp_net_features_check,
2188 	.ndo_get_phys_port_name	= nfp_net_get_phys_port_name,
2189 	.ndo_bpf		= nfp_net_xdp,
2190 	.ndo_bridge_getlink     = nfp_net_bridge_getlink,
2191 	.ndo_bridge_setlink     = nfp_net_bridge_setlink,
2192 };
2193 
nfp_udp_tunnel_sync(struct net_device * netdev,unsigned int table)2194 static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
2195 {
2196 	struct nfp_net *nn = netdev_priv(netdev);
2197 	int i;
2198 
2199 	BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
2200 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
2201 		struct udp_tunnel_info ti0, ti1;
2202 
2203 		udp_tunnel_nic_get_port(netdev, table, i, &ti0);
2204 		udp_tunnel_nic_get_port(netdev, table, i + 1, &ti1);
2205 
2206 		nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(ti0.port),
2207 			  be16_to_cpu(ti1.port) << 16 | be16_to_cpu(ti0.port));
2208 	}
2209 
2210 	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
2211 }
2212 
2213 static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
2214 	.sync_table     = nfp_udp_tunnel_sync,
2215 	.flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
2216 			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
2217 	.tables         = {
2218 		{
2219 			.n_entries      = NFP_NET_N_VXLAN_PORTS,
2220 			.tunnel_types   = UDP_TUNNEL_TYPE_VXLAN,
2221 		},
2222 	},
2223 };
2224 
2225 /**
2226  * nfp_net_info() - Print general info about the NIC
2227  * @nn:      NFP Net device to reconfigure
2228  */
nfp_net_info(struct nfp_net * nn)2229 void nfp_net_info(struct nfp_net *nn)
2230 {
2231 	nn_info(nn, "NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
2232 		nn->dp.is_vf ? "VF " : "",
2233 		nn->dp.num_tx_rings, nn->max_tx_rings,
2234 		nn->dp.num_rx_rings, nn->max_rx_rings);
2235 	nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
2236 		nn->fw_ver.extend, nn->fw_ver.class,
2237 		nn->fw_ver.major, nn->fw_ver.minor,
2238 		nn->max_mtu);
2239 	nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
2240 		nn->cap,
2241 		nn->cap & NFP_NET_CFG_CTRL_PROMISC  ? "PROMISC "  : "",
2242 		nn->cap & NFP_NET_CFG_CTRL_L2BC     ? "L2BCFILT " : "",
2243 		nn->cap & NFP_NET_CFG_CTRL_L2MC     ? "L2MCFILT " : "",
2244 		nn->cap & NFP_NET_CFG_CTRL_RXCSUM   ? "RXCSUM "   : "",
2245 		nn->cap & NFP_NET_CFG_CTRL_TXCSUM   ? "TXCSUM "   : "",
2246 		nn->cap & NFP_NET_CFG_CTRL_RXVLAN   ? "RXVLAN "   : "",
2247 		nn->cap & NFP_NET_CFG_CTRL_TXVLAN   ? "TXVLAN "   : "",
2248 		nn->cap & NFP_NET_CFG_CTRL_RXQINQ   ? "RXQINQ "   : "",
2249 		nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ? "RXVLANv2 "   : "",
2250 		nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2   ? "TXVLANv2 "   : "",
2251 		nn->cap & NFP_NET_CFG_CTRL_SCATTER  ? "SCATTER "  : "",
2252 		nn->cap & NFP_NET_CFG_CTRL_GATHER   ? "GATHER "   : "",
2253 		nn->cap & NFP_NET_CFG_CTRL_LSO      ? "TSO1 "     : "",
2254 		nn->cap & NFP_NET_CFG_CTRL_LSO2     ? "TSO2 "     : "",
2255 		nn->cap & NFP_NET_CFG_CTRL_RSS      ? "RSS1 "     : "",
2256 		nn->cap & NFP_NET_CFG_CTRL_RSS2     ? "RSS2 "     : "",
2257 		nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
2258 		nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
2259 		nn->cap & NFP_NET_CFG_CTRL_IRQMOD   ? "IRQMOD "   : "",
2260 		nn->cap & NFP_NET_CFG_CTRL_TXRWB    ? "TXRWB "    : "",
2261 		nn->cap & NFP_NET_CFG_CTRL_VEPA     ? "VEPA "     : "",
2262 		nn->cap & NFP_NET_CFG_CTRL_VXLAN    ? "VXLAN "    : "",
2263 		nn->cap & NFP_NET_CFG_CTRL_NVGRE    ? "NVGRE "	  : "",
2264 		nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
2265 						      "RXCSUM_COMPLETE " : "",
2266 		nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
2267 		nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER ? "MULTICAST_FILTER " : "",
2268 		nfp_app_extra_cap(nn->app, nn));
2269 }
2270 
2271 /**
2272  * nfp_net_alloc() - Allocate netdev and related structure
2273  * @pdev:         PCI device
2274  * @dev_info:     NFP ASIC params
2275  * @ctrl_bar:     PCI IOMEM with vNIC config memory
2276  * @needs_netdev: Whether to allocate a netdev for this vNIC
2277  * @max_tx_rings: Maximum number of TX rings supported by device
2278  * @max_rx_rings: Maximum number of RX rings supported by device
2279  *
2280  * This function allocates a netdev device and fills in the initial
2281  * part of the @struct nfp_net structure.  In case of control device
2282  * nfp_net structure is allocated without the netdev.
2283  *
2284  * Return: NFP Net device structure, or ERR_PTR on error.
2285  */
2286 struct nfp_net *
nfp_net_alloc(struct pci_dev * pdev,const struct nfp_dev_info * dev_info,void __iomem * ctrl_bar,bool needs_netdev,unsigned int max_tx_rings,unsigned int max_rx_rings)2287 nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
2288 	      void __iomem *ctrl_bar, bool needs_netdev,
2289 	      unsigned int max_tx_rings, unsigned int max_rx_rings)
2290 {
2291 	u64 dma_mask = dma_get_mask(&pdev->dev);
2292 	struct nfp_net *nn;
2293 	int err;
2294 
2295 	if (needs_netdev) {
2296 		struct net_device *netdev;
2297 
2298 		netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
2299 					    max_tx_rings, max_rx_rings);
2300 		if (!netdev)
2301 			return ERR_PTR(-ENOMEM);
2302 
2303 		SET_NETDEV_DEV(netdev, &pdev->dev);
2304 		nn = netdev_priv(netdev);
2305 		nn->dp.netdev = netdev;
2306 	} else {
2307 		nn = vzalloc(sizeof(*nn));
2308 		if (!nn)
2309 			return ERR_PTR(-ENOMEM);
2310 	}
2311 
2312 	nn->dp.dev = &pdev->dev;
2313 	nn->dp.ctrl_bar = ctrl_bar;
2314 	nn->dev_info = dev_info;
2315 	nn->pdev = pdev;
2316 	nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
2317 
2318 	switch (FIELD_GET(NFP_NET_CFG_VERSION_DP_MASK, nn->fw_ver.extend)) {
2319 	case NFP_NET_CFG_VERSION_DP_NFD3:
2320 		nn->dp.ops = &nfp_nfd3_ops;
2321 		break;
2322 	case NFP_NET_CFG_VERSION_DP_NFDK:
2323 		if (nn->fw_ver.major < 5) {
2324 			dev_err(&pdev->dev,
2325 				"NFDK must use ABI 5 or newer, found: %d\n",
2326 				nn->fw_ver.major);
2327 			err = -EINVAL;
2328 			goto err_free_nn;
2329 		}
2330 		nn->dp.ops = &nfp_nfdk_ops;
2331 		break;
2332 	default:
2333 		err = -EINVAL;
2334 		goto err_free_nn;
2335 	}
2336 
2337 	if ((dma_mask & nn->dp.ops->dma_mask) != dma_mask) {
2338 		dev_err(&pdev->dev,
2339 			"DMA mask of loaded firmware: %llx, required DMA mask: %llx\n",
2340 			nn->dp.ops->dma_mask, dma_mask);
2341 		err = -EINVAL;
2342 		goto err_free_nn;
2343 	}
2344 
2345 	nn->max_tx_rings = max_tx_rings;
2346 	nn->max_rx_rings = max_rx_rings;
2347 
2348 	nn->dp.num_tx_rings = min_t(unsigned int,
2349 				    max_tx_rings, num_online_cpus());
2350 	nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
2351 				 netif_get_num_default_rss_queues());
2352 
2353 	nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
2354 	nn->dp.num_r_vecs = min_t(unsigned int,
2355 				  nn->dp.num_r_vecs, num_online_cpus());
2356 	nn->max_r_vecs = nn->dp.num_r_vecs;
2357 
2358 	nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
2359 				   GFP_KERNEL);
2360 	if (!nn->dp.xsk_pools) {
2361 		err = -ENOMEM;
2362 		goto err_free_nn;
2363 	}
2364 
2365 	nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
2366 	nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
2367 
2368 	sema_init(&nn->bar_lock, 1);
2369 
2370 	spin_lock_init(&nn->reconfig_lock);
2371 	spin_lock_init(&nn->link_status_lock);
2372 
2373 	timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
2374 
2375 	err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
2376 				     &nn->tlv_caps);
2377 	if (err)
2378 		goto err_free_nn;
2379 
2380 	err = nfp_ccm_mbox_alloc(nn);
2381 	if (err)
2382 		goto err_free_nn;
2383 
2384 	return nn;
2385 
2386 err_free_nn:
2387 	if (nn->dp.netdev)
2388 		free_netdev(nn->dp.netdev);
2389 	else
2390 		vfree(nn);
2391 	return ERR_PTR(err);
2392 }
2393 
2394 /**
2395  * nfp_net_free() - Undo what @nfp_net_alloc() did
2396  * @nn:      NFP Net device to reconfigure
2397  */
nfp_net_free(struct nfp_net * nn)2398 void nfp_net_free(struct nfp_net *nn)
2399 {
2400 	WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
2401 	nfp_ccm_mbox_free(nn);
2402 
2403 	kfree(nn->dp.xsk_pools);
2404 	if (nn->dp.netdev)
2405 		free_netdev(nn->dp.netdev);
2406 	else
2407 		vfree(nn);
2408 }
2409 
2410 /**
2411  * nfp_net_rss_key_sz() - Get current size of the RSS key
2412  * @nn:		NFP Net device instance
2413  *
2414  * Return: size of the RSS key for currently selected hash function.
2415  */
nfp_net_rss_key_sz(struct nfp_net * nn)2416 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
2417 {
2418 	switch (nn->rss_hfunc) {
2419 	case ETH_RSS_HASH_TOP:
2420 		return NFP_NET_CFG_RSS_KEY_SZ;
2421 	case ETH_RSS_HASH_XOR:
2422 		return 0;
2423 	case ETH_RSS_HASH_CRC32:
2424 		return 4;
2425 	}
2426 
2427 	nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
2428 	return 0;
2429 }
2430 
2431 /**
2432  * nfp_net_rss_init() - Set the initial RSS parameters
2433  * @nn:	     NFP Net device to reconfigure
2434  */
nfp_net_rss_init(struct nfp_net * nn)2435 static void nfp_net_rss_init(struct nfp_net *nn)
2436 {
2437 	unsigned long func_bit, rss_cap_hfunc;
2438 	u32 reg;
2439 
2440 	/* Read the RSS function capability and select first supported func */
2441 	reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
2442 	rss_cap_hfunc =	FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
2443 	if (!rss_cap_hfunc)
2444 		rss_cap_hfunc =	FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
2445 					  NFP_NET_CFG_RSS_TOEPLITZ);
2446 
2447 	func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
2448 	if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
2449 		dev_warn(nn->dp.dev,
2450 			 "Bad RSS config, defaulting to Toeplitz hash\n");
2451 		func_bit = ETH_RSS_HASH_TOP_BIT;
2452 	}
2453 	nn->rss_hfunc = 1 << func_bit;
2454 
2455 	netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
2456 
2457 	nfp_net_rss_init_itbl(nn);
2458 
2459 	/* Enable IPv4/IPv6 TCP by default */
2460 	nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
2461 		      NFP_NET_CFG_RSS_IPV6_TCP |
2462 		      NFP_NET_CFG_RSS_IPV4_UDP |
2463 		      NFP_NET_CFG_RSS_IPV6_UDP |
2464 		      FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
2465 		      NFP_NET_CFG_RSS_MASK;
2466 }
2467 
2468 /**
2469  * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
2470  * @nn:	     NFP Net device to reconfigure
2471  */
nfp_net_irqmod_init(struct nfp_net * nn)2472 static void nfp_net_irqmod_init(struct nfp_net *nn)
2473 {
2474 	nn->rx_coalesce_usecs      = 50;
2475 	nn->rx_coalesce_max_frames = 64;
2476 	nn->tx_coalesce_usecs      = 50;
2477 	nn->tx_coalesce_max_frames = 64;
2478 
2479 	nn->rx_coalesce_adapt_on   = true;
2480 	nn->tx_coalesce_adapt_on   = true;
2481 }
2482 
nfp_net_netdev_init(struct nfp_net * nn)2483 static void nfp_net_netdev_init(struct nfp_net *nn)
2484 {
2485 	struct net_device *netdev = nn->dp.netdev;
2486 
2487 	nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
2488 
2489 	netdev->mtu = nn->dp.mtu;
2490 
2491 	/* Advertise/enable offloads based on capabilities
2492 	 *
2493 	 * Note: netdev->features show the currently enabled features
2494 	 * and netdev->hw_features advertises which features are
2495 	 * supported.  By default we enable most features.
2496 	 */
2497 	if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
2498 		netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2499 
2500 	netdev->hw_features = NETIF_F_HIGHDMA;
2501 	if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
2502 		netdev->hw_features |= NETIF_F_RXCSUM;
2503 		nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
2504 	}
2505 	if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
2506 		netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2507 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2508 	}
2509 	if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
2510 		netdev->hw_features |= NETIF_F_SG;
2511 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
2512 	}
2513 	if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
2514 	    nn->cap & NFP_NET_CFG_CTRL_LSO2) {
2515 		netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2516 		nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
2517 					 NFP_NET_CFG_CTRL_LSO;
2518 	}
2519 	if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
2520 		netdev->hw_features |= NETIF_F_RXHASH;
2521 
2522 #ifdef CONFIG_NFP_NET_IPSEC
2523 	if (nn->cap_w1 & NFP_NET_CFG_CTRL_IPSEC)
2524 		netdev->hw_features |= NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM;
2525 #endif
2526 
2527 	if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
2528 		if (nn->cap & NFP_NET_CFG_CTRL_LSO) {
2529 			netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
2530 					       NETIF_F_GSO_UDP_TUNNEL_CSUM |
2531 					       NETIF_F_GSO_PARTIAL;
2532 			netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
2533 		}
2534 		netdev->udp_tunnel_nic_info = &nfp_udp_tunnels;
2535 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
2536 	}
2537 	if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
2538 		if (nn->cap & NFP_NET_CFG_CTRL_LSO)
2539 			netdev->hw_features |= NETIF_F_GSO_GRE;
2540 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE;
2541 	}
2542 	if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
2543 		netdev->hw_enc_features = netdev->hw_features;
2544 
2545 	netdev->vlan_features = netdev->hw_features;
2546 
2547 	if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN_ANY) {
2548 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2549 		nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
2550 			       NFP_NET_CFG_CTRL_RXVLAN;
2551 	}
2552 	if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
2553 		if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
2554 			nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
2555 		} else {
2556 			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
2557 			nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
2558 				       NFP_NET_CFG_CTRL_TXVLAN;
2559 		}
2560 	}
2561 	if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
2562 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2563 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
2564 	}
2565 	if (nn->cap & NFP_NET_CFG_CTRL_RXQINQ) {
2566 		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
2567 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
2568 	}
2569 
2570 	netdev->features = netdev->hw_features;
2571 
2572 	if (nfp_app_has_tc(nn->app) && nn->port)
2573 		netdev->hw_features |= NETIF_F_HW_TC;
2574 
2575 	/* C-Tag strip and S-Tag strip can't be supported simultaneously,
2576 	 * so enable C-Tag strip and disable S-Tag strip by default.
2577 	 */
2578 	netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
2579 	nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
2580 
2581 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
2582 	if (nn->app && nn->app->type->id == NFP_APP_BPF_NIC)
2583 		netdev->xdp_features |= NETDEV_XDP_ACT_HW_OFFLOAD;
2584 
2585 	/* Finalise the netdev setup */
2586 	switch (nn->dp.ops->version) {
2587 	case NFP_NFD_VER_NFD3:
2588 		netdev->netdev_ops = &nfp_nfd3_netdev_ops;
2589 		netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
2590 		netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
2591 		break;
2592 	case NFP_NFD_VER_NFDK:
2593 		netdev->netdev_ops = &nfp_nfdk_netdev_ops;
2594 		break;
2595 	}
2596 
2597 	netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
2598 
2599 	/* MTU range: 68 - hw-specific max */
2600 	netdev->min_mtu = ETH_MIN_MTU;
2601 	netdev->max_mtu = nn->max_mtu;
2602 
2603 	netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
2604 
2605 	netif_carrier_off(netdev);
2606 
2607 	nfp_net_set_ethtool_ops(netdev);
2608 }
2609 
nfp_net_read_caps(struct nfp_net * nn)2610 static int nfp_net_read_caps(struct nfp_net *nn)
2611 {
2612 	/* Get some of the read-only fields from the BAR */
2613 	nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
2614 	nn->cap_w1 = nn_readl(nn, NFP_NET_CFG_CAP_WORD1);
2615 	nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
2616 
2617 	/* ABI 4.x and ctrl vNIC always use chained metadata, in other cases
2618 	 * we allow use of non-chained metadata if RSS(v1) is the only
2619 	 * advertised capability requiring metadata.
2620 	 */
2621 	nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
2622 					 !nn->dp.netdev ||
2623 					 !(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
2624 					 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
2625 	/* RSS(v1) uses non-chained metadata format, except in ABI 4.x where
2626 	 * it has the same meaning as RSSv2.
2627 	 */
2628 	if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
2629 		nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
2630 
2631 	/* Determine RX packet/metadata boundary offset */
2632 	if (nn->fw_ver.major >= 2) {
2633 		u32 reg;
2634 
2635 		reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
2636 		if (reg > NFP_NET_MAX_PREPEND) {
2637 			nn_err(nn, "Invalid rx offset: %d\n", reg);
2638 			return -EINVAL;
2639 		}
2640 		nn->dp.rx_offset = reg;
2641 	} else {
2642 		nn->dp.rx_offset = NFP_NET_RX_OFFSET;
2643 	}
2644 
2645 	/* Mask out NFD-version-specific features */
2646 	nn->cap &= nn->dp.ops->cap_mask;
2647 
2648 	/* For control vNICs mask out the capabilities app doesn't want. */
2649 	if (!nn->dp.netdev)
2650 		nn->cap &= nn->app->type->ctrl_cap_mask;
2651 
2652 	return 0;
2653 }
2654 
2655 /**
2656  * nfp_net_init() - Initialise/finalise the nfp_net structure
2657  * @nn:		NFP Net device structure
2658  *
2659  * Return: 0 on success or negative errno on error.
2660  */
nfp_net_init(struct nfp_net * nn)2661 int nfp_net_init(struct nfp_net *nn)
2662 {
2663 	int err;
2664 
2665 	nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
2666 
2667 	err = nfp_net_read_caps(nn);
2668 	if (err)
2669 		return err;
2670 
2671 	/* Set default MTU and Freelist buffer size */
2672 	if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
2673 		nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu);
2674 	} else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
2675 		nn->dp.mtu = nn->max_mtu;
2676 	} else {
2677 		nn->dp.mtu = NFP_NET_DEFAULT_MTU;
2678 	}
2679 	nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
2680 
2681 	if (nfp_app_ctrl_uses_data_vnics(nn->app))
2682 		nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA;
2683 
2684 	if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
2685 		nfp_net_rss_init(nn);
2686 		nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
2687 					 NFP_NET_CFG_CTRL_RSS;
2688 	}
2689 
2690 	/* Allow L2 Broadcast and Multicast through by default, if supported */
2691 	if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
2692 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
2693 
2694 	/* Allow IRQ moderation, if supported */
2695 	if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
2696 		nfp_net_irqmod_init(nn);
2697 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
2698 	}
2699 
2700 	/* Enable TX pointer writeback, if supported */
2701 	if (nn->cap & NFP_NET_CFG_CTRL_TXRWB)
2702 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXRWB;
2703 
2704 	if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER)
2705 		nn->dp.ctrl_w1 |= NFP_NET_CFG_CTRL_MCAST_FILTER;
2706 
2707 	/* Stash the re-configuration queue away.  First odd queue in TX Bar */
2708 	nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
2709 
2710 	/* Make sure the FW knows the netdev is supposed to be disabled here */
2711 	nn_writel(nn, NFP_NET_CFG_CTRL, 0);
2712 	nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2713 	nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2714 	nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, 0);
2715 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
2716 				   NFP_NET_CFG_UPDATE_GEN);
2717 	if (err)
2718 		return err;
2719 
2720 	if (nn->dp.netdev) {
2721 		nfp_net_netdev_init(nn);
2722 
2723 		err = nfp_ccm_mbox_init(nn);
2724 		if (err)
2725 			return err;
2726 
2727 		err = nfp_net_tls_init(nn);
2728 		if (err)
2729 			goto err_clean_mbox;
2730 
2731 		nfp_net_ipsec_init(nn);
2732 	}
2733 
2734 	nfp_net_vecs_init(nn);
2735 
2736 	if (!nn->dp.netdev)
2737 		return 0;
2738 
2739 	spin_lock_init(&nn->mbox_amsg.lock);
2740 	INIT_LIST_HEAD(&nn->mbox_amsg.list);
2741 	INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
2742 
2743 	return register_netdev(nn->dp.netdev);
2744 
2745 err_clean_mbox:
2746 	nfp_ccm_mbox_clean(nn);
2747 	return err;
2748 }
2749 
2750 /**
2751  * nfp_net_clean() - Undo what nfp_net_init() did.
2752  * @nn:		NFP Net device structure
2753  */
nfp_net_clean(struct nfp_net * nn)2754 void nfp_net_clean(struct nfp_net *nn)
2755 {
2756 	if (!nn->dp.netdev)
2757 		return;
2758 
2759 	unregister_netdev(nn->dp.netdev);
2760 	nfp_net_ipsec_clean(nn);
2761 	nfp_ccm_mbox_clean(nn);
2762 	flush_work(&nn->mbox_amsg.work);
2763 	nfp_net_reconfig_wait_posted(nn);
2764 }
2765