196de2506SJakub Kicinski // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
262d03330SJakub Kicinski /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
34c352362SJakub Kicinski 
44c352362SJakub Kicinski /*
54c352362SJakub Kicinski  * nfp_net_common.c
64c352362SJakub Kicinski  * Netronome network device driver: Common functions between PF and VF
74c352362SJakub Kicinski  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
84c352362SJakub Kicinski  *          Jason McMullan <jason.mcmullan@netronome.com>
94c352362SJakub Kicinski  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
104c352362SJakub Kicinski  *          Brad Petrus <brad.petrus@netronome.com>
114c352362SJakub Kicinski  *          Chris Telfer <chris.telfer@netronome.com>
124c352362SJakub Kicinski  */
134c352362SJakub Kicinski 
149ff304bfSJakub Kicinski #include <linux/bitfield.h>
15ecd63a02SJakub Kicinski #include <linux/bpf.h>
164c352362SJakub Kicinski #include <linux/module.h>
174c352362SJakub Kicinski #include <linux/kernel.h>
184c352362SJakub Kicinski #include <linux/init.h>
194c352362SJakub Kicinski #include <linux/fs.h>
204c352362SJakub Kicinski #include <linux/netdevice.h>
214c352362SJakub Kicinski #include <linux/etherdevice.h>
224c352362SJakub Kicinski #include <linux/interrupt.h>
234c352362SJakub Kicinski #include <linux/ip.h>
244c352362SJakub Kicinski #include <linux/ipv6.h>
2546627170SJakub Kicinski #include <linux/mm.h>
265ea14712SJakub Kicinski #include <linux/overflow.h>
27c0f031bcSJakub Kicinski #include <linux/page_ref.h>
284c352362SJakub Kicinski #include <linux/pci.h>
294c352362SJakub Kicinski #include <linux/pci_regs.h>
304c352362SJakub Kicinski #include <linux/ethtool.h>
314c352362SJakub Kicinski #include <linux/log2.h>
324c352362SJakub Kicinski #include <linux/if_vlan.h>
33be801411SYinjun Zhang #include <linux/if_bridge.h>
344c352362SJakub Kicinski #include <linux/random.h>
35a7b1ad08SJakub Kicinski #include <linux/vmalloc.h>
364c352362SJakub Kicinski #include <linux/ktime.h>
374c352362SJakub Kicinski 
38c3991d39SDirk van der Merwe #include <net/tls.h>
394c352362SJakub Kicinski #include <net/vxlan.h>
409c91a365SNiklas Söderlund #include <net/xdp_sock_drv.h>
411cf78d4cSHuanhuan Wang #include <net/xfrm.h>
424c352362SJakub Kicinski 
43e900db70SJakub Kicinski #include "nfpcore/nfp_dev.h"
44ce22f5a2SJakub Kicinski #include "nfpcore/nfp_nsp.h"
45e2c7114aSJakub Kicinski #include "ccm.h"
46bb45e51cSJakub Kicinski #include "nfp_app.h"
474c352362SJakub Kicinski #include "nfp_net_ctrl.h"
484c352362SJakub Kicinski #include "nfp_net.h"
4962d03330SJakub Kicinski #include "nfp_net_dp.h"
5025528d90SPablo Cascón #include "nfp_net_sriov.h"
516402528bSNiklas Söderlund #include "nfp_net_xsk.h"
52eb488c26SJakub Kicinski #include "nfp_port.h"
53232eeb1fSJakub Kicinski #include "crypto/crypto.h"
546a35ddc5SJakub Kicinski #include "crypto/fw.h"
554c352362SJakub Kicinski 
56cc7eab25SYinjun Zhang static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr);
57cc7eab25SYinjun Zhang 
584c352362SJakub Kicinski /**
594c352362SJakub Kicinski  * nfp_net_get_fw_version() - Read and parse the FW version
604c352362SJakub Kicinski  * @fw_ver:	Output fw_version structure to read to
614c352362SJakub Kicinski  * @ctrl_bar:	Mapped address of the control BAR
624c352362SJakub Kicinski  */
nfp_net_get_fw_version(struct nfp_net_fw_version * fw_ver,void __iomem * ctrl_bar)634c352362SJakub Kicinski void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
644c352362SJakub Kicinski 			    void __iomem *ctrl_bar)
654c352362SJakub Kicinski {
664c352362SJakub Kicinski 	u32 reg;
674c352362SJakub Kicinski 
684c352362SJakub Kicinski 	reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
694c352362SJakub Kicinski 	put_unaligned_le32(reg, fw_ver);
704c352362SJakub Kicinski }
714c352362SJakub Kicinski 
nfp_qcp_queue_offset(const struct nfp_dev_info * dev_info,u16 queue)72e900db70SJakub Kicinski u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue)
73e900db70SJakub Kicinski {
74e900db70SJakub Kicinski 	queue &= dev_info->qc_idx_mask;
75e900db70SJakub Kicinski 	return dev_info->qc_addr_offset + NFP_QCP_QUEUE_ADDR_SZ * queue;
76e900db70SJakub Kicinski }
77e900db70SJakub Kicinski 
783d780b92SJakub Kicinski /* Firmware reconfig
793d780b92SJakub Kicinski  *
803d780b92SJakub Kicinski  * Firmware reconfig may take a while so we have two versions of it -
813d780b92SJakub Kicinski  * synchronous and asynchronous (posted).  All synchronous callers are holding
823d780b92SJakub Kicinski  * RTNL so we don't have to worry about serializing them.
833d780b92SJakub Kicinski  */
nfp_net_reconfig_start(struct nfp_net * nn,u32 update)843d780b92SJakub Kicinski static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
853d780b92SJakub Kicinski {
863d780b92SJakub Kicinski 	nn_writel(nn, NFP_NET_CFG_UPDATE, update);
873d780b92SJakub Kicinski 	/* ensure update is written before pinging HW */
883d780b92SJakub Kicinski 	nn_pci_flush(nn);
893d780b92SJakub Kicinski 	nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
906db3a9dcSJakub Kicinski 	nn->reconfig_in_progress_update = update;
913d780b92SJakub Kicinski }
923d780b92SJakub Kicinski 
933d780b92SJakub Kicinski /* Pass 0 as update to run posted reconfigs. */
nfp_net_reconfig_start_async(struct nfp_net * nn,u32 update)943d780b92SJakub Kicinski static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
953d780b92SJakub Kicinski {
963d780b92SJakub Kicinski 	update |= nn->reconfig_posted;
973d780b92SJakub Kicinski 	nn->reconfig_posted = 0;
983d780b92SJakub Kicinski 
993d780b92SJakub Kicinski 	nfp_net_reconfig_start(nn, update);
1003d780b92SJakub Kicinski 
1013d780b92SJakub Kicinski 	nn->reconfig_timer_active = true;
1023d780b92SJakub Kicinski 	mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
1033d780b92SJakub Kicinski }
1043d780b92SJakub Kicinski 
nfp_net_reconfig_check_done(struct nfp_net * nn,bool last_check)1053d780b92SJakub Kicinski static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
1063d780b92SJakub Kicinski {
1073d780b92SJakub Kicinski 	u32 reg;
1083d780b92SJakub Kicinski 
1093d780b92SJakub Kicinski 	reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
1103d780b92SJakub Kicinski 	if (reg == 0)
1113d780b92SJakub Kicinski 		return true;
1123d780b92SJakub Kicinski 	if (reg & NFP_NET_CFG_UPDATE_ERR) {
1136db3a9dcSJakub Kicinski 		nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
1146db3a9dcSJakub Kicinski 		       reg, nn->reconfig_in_progress_update,
1156db3a9dcSJakub Kicinski 		       nn_readl(nn, NFP_NET_CFG_CTRL));
1163d780b92SJakub Kicinski 		return true;
1173d780b92SJakub Kicinski 	} else if (last_check) {
1186db3a9dcSJakub Kicinski 		nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
1196db3a9dcSJakub Kicinski 		       reg, nn->reconfig_in_progress_update,
1206db3a9dcSJakub Kicinski 		       nn_readl(nn, NFP_NET_CFG_CTRL));
1213d780b92SJakub Kicinski 		return true;
1223d780b92SJakub Kicinski 	}
1233d780b92SJakub Kicinski 
1243d780b92SJakub Kicinski 	return false;
1253d780b92SJakub Kicinski }
1263d780b92SJakub Kicinski 
__nfp_net_reconfig_wait(struct nfp_net * nn,unsigned long deadline)127e6471828SDirk van der Merwe static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
1283d780b92SJakub Kicinski {
1293d780b92SJakub Kicinski 	bool timed_out = false;
130e6471828SDirk van der Merwe 	int i;
1313d780b92SJakub Kicinski 
132e6471828SDirk van der Merwe 	/* Poll update field, waiting for NFP to ack the config.
133e6471828SDirk van der Merwe 	 * Do an opportunistic wait-busy loop, afterward sleep.
134e6471828SDirk van der Merwe 	 */
135e6471828SDirk van der Merwe 	for (i = 0; i < 50; i++) {
136e6471828SDirk van der Merwe 		if (nfp_net_reconfig_check_done(nn, false))
137e6471828SDirk van der Merwe 			return false;
138e6471828SDirk van der Merwe 		udelay(4);
139e6471828SDirk van der Merwe 	}
140e6471828SDirk van der Merwe 
1413d780b92SJakub Kicinski 	while (!nfp_net_reconfig_check_done(nn, timed_out)) {
142e6471828SDirk van der Merwe 		usleep_range(250, 500);
1433d780b92SJakub Kicinski 		timed_out = time_is_before_eq_jiffies(deadline);
1443d780b92SJakub Kicinski 	}
1453d780b92SJakub Kicinski 
146e6471828SDirk van der Merwe 	return timed_out;
147e6471828SDirk van der Merwe }
148e6471828SDirk van der Merwe 
nfp_net_reconfig_wait(struct nfp_net * nn,unsigned long deadline)149e6471828SDirk van der Merwe static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
150e6471828SDirk van der Merwe {
151e6471828SDirk van der Merwe 	if (__nfp_net_reconfig_wait(nn, deadline))
152e6471828SDirk van der Merwe 		return -EIO;
153e6471828SDirk van der Merwe 
1543d780b92SJakub Kicinski 	if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
1553d780b92SJakub Kicinski 		return -EIO;
1563d780b92SJakub Kicinski 
157e6471828SDirk van der Merwe 	return 0;
1583d780b92SJakub Kicinski }
1593d780b92SJakub Kicinski 
nfp_net_reconfig_timer(struct timer_list * t)1603248f77fSKees Cook static void nfp_net_reconfig_timer(struct timer_list *t)
1613d780b92SJakub Kicinski {
1623248f77fSKees Cook 	struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
1633d780b92SJakub Kicinski 
1643d780b92SJakub Kicinski 	spin_lock_bh(&nn->reconfig_lock);
1653d780b92SJakub Kicinski 
1663d780b92SJakub Kicinski 	nn->reconfig_timer_active = false;
1673d780b92SJakub Kicinski 
1683d780b92SJakub Kicinski 	/* If sync caller is present it will take over from us */
1693d780b92SJakub Kicinski 	if (nn->reconfig_sync_present)
1703d780b92SJakub Kicinski 		goto done;
1713d780b92SJakub Kicinski 
1723d780b92SJakub Kicinski 	/* Read reconfig status and report errors */
1733d780b92SJakub Kicinski 	nfp_net_reconfig_check_done(nn, true);
1743d780b92SJakub Kicinski 
1753d780b92SJakub Kicinski 	if (nn->reconfig_posted)
1763d780b92SJakub Kicinski 		nfp_net_reconfig_start_async(nn, 0);
1773d780b92SJakub Kicinski done:
1783d780b92SJakub Kicinski 	spin_unlock_bh(&nn->reconfig_lock);
1793d780b92SJakub Kicinski }
1803d780b92SJakub Kicinski 
1813d780b92SJakub Kicinski /**
1823d780b92SJakub Kicinski  * nfp_net_reconfig_post() - Post async reconfig request
1833d780b92SJakub Kicinski  * @nn:      NFP Net device to reconfigure
1843d780b92SJakub Kicinski  * @update:  The value for the update field in the BAR config
1853d780b92SJakub Kicinski  *
1863d780b92SJakub Kicinski  * Record FW reconfiguration request.  Reconfiguration will be kicked off
1873d780b92SJakub Kicinski  * whenever reconfiguration machinery is idle.  Multiple requests can be
1883d780b92SJakub Kicinski  * merged together!
1893d780b92SJakub Kicinski  */
nfp_net_reconfig_post(struct nfp_net * nn,u32 update)1903d780b92SJakub Kicinski static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
1913d780b92SJakub Kicinski {
1923d780b92SJakub Kicinski 	spin_lock_bh(&nn->reconfig_lock);
1933d780b92SJakub Kicinski 
1943d780b92SJakub Kicinski 	/* Sync caller will kick off async reconf when it's done, just post */
1953d780b92SJakub Kicinski 	if (nn->reconfig_sync_present) {
1963d780b92SJakub Kicinski 		nn->reconfig_posted |= update;
1973d780b92SJakub Kicinski 		goto done;
1983d780b92SJakub Kicinski 	}
1993d780b92SJakub Kicinski 
2003d780b92SJakub Kicinski 	/* Opportunistically check if the previous command is done */
2013d780b92SJakub Kicinski 	if (!nn->reconfig_timer_active ||
2023d780b92SJakub Kicinski 	    nfp_net_reconfig_check_done(nn, false))
2033d780b92SJakub Kicinski 		nfp_net_reconfig_start_async(nn, update);
2043d780b92SJakub Kicinski 	else
2053d780b92SJakub Kicinski 		nn->reconfig_posted |= update;
2063d780b92SJakub Kicinski done:
2073d780b92SJakub Kicinski 	spin_unlock_bh(&nn->reconfig_lock);
2083d780b92SJakub Kicinski }
2093d780b92SJakub Kicinski 
nfp_net_reconfig_sync_enter(struct nfp_net * nn)2109ad716b9SJakub Kicinski static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
2119ad716b9SJakub Kicinski {
2129ad716b9SJakub Kicinski 	bool cancelled_timer = false;
2139ad716b9SJakub Kicinski 	u32 pre_posted_requests;
2149ad716b9SJakub Kicinski 
2159ad716b9SJakub Kicinski 	spin_lock_bh(&nn->reconfig_lock);
2169ad716b9SJakub Kicinski 
217e2c7114aSJakub Kicinski 	WARN_ON(nn->reconfig_sync_present);
2189ad716b9SJakub Kicinski 	nn->reconfig_sync_present = true;
2199ad716b9SJakub Kicinski 
2209ad716b9SJakub Kicinski 	if (nn->reconfig_timer_active) {
2219ad716b9SJakub Kicinski 		nn->reconfig_timer_active = false;
2229ad716b9SJakub Kicinski 		cancelled_timer = true;
2239ad716b9SJakub Kicinski 	}
2249ad716b9SJakub Kicinski 	pre_posted_requests = nn->reconfig_posted;
2259ad716b9SJakub Kicinski 	nn->reconfig_posted = 0;
2269ad716b9SJakub Kicinski 
2279ad716b9SJakub Kicinski 	spin_unlock_bh(&nn->reconfig_lock);
2289ad716b9SJakub Kicinski 
2299ad716b9SJakub Kicinski 	if (cancelled_timer) {
2309ad716b9SJakub Kicinski 		del_timer_sync(&nn->reconfig_timer);
2319ad716b9SJakub Kicinski 		nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
2329ad716b9SJakub Kicinski 	}
2339ad716b9SJakub Kicinski 
2349ad716b9SJakub Kicinski 	/* Run the posted reconfigs which were issued before we started */
2359ad716b9SJakub Kicinski 	if (pre_posted_requests) {
2369ad716b9SJakub Kicinski 		nfp_net_reconfig_start(nn, pre_posted_requests);
2379ad716b9SJakub Kicinski 		nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
2389ad716b9SJakub Kicinski 	}
2399ad716b9SJakub Kicinski }
2409ad716b9SJakub Kicinski 
nfp_net_reconfig_wait_posted(struct nfp_net * nn)2419ad716b9SJakub Kicinski static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
2429ad716b9SJakub Kicinski {
2439ad716b9SJakub Kicinski 	nfp_net_reconfig_sync_enter(nn);
2449ad716b9SJakub Kicinski 
2459ad716b9SJakub Kicinski 	spin_lock_bh(&nn->reconfig_lock);
2469ad716b9SJakub Kicinski 	nn->reconfig_sync_present = false;
2479ad716b9SJakub Kicinski 	spin_unlock_bh(&nn->reconfig_lock);
2489ad716b9SJakub Kicinski }
2499ad716b9SJakub Kicinski 
2504c352362SJakub Kicinski /**
251dd5b2498SJakub Kicinski  * __nfp_net_reconfig() - Reconfigure the firmware
2524c352362SJakub Kicinski  * @nn:      NFP Net device to reconfigure
2534c352362SJakub Kicinski  * @update:  The value for the update field in the BAR config
2544c352362SJakub Kicinski  *
2554c352362SJakub Kicinski  * Write the update word to the BAR and ping the reconfig queue.  The
2564c352362SJakub Kicinski  * poll until the firmware has acknowledged the update by zeroing the
2574c352362SJakub Kicinski  * update word.
2584c352362SJakub Kicinski  *
2594c352362SJakub Kicinski  * Return: Negative errno on error, 0 on success
2604c352362SJakub Kicinski  */
__nfp_net_reconfig(struct nfp_net * nn,u32 update)261232eeb1fSJakub Kicinski int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
2624c352362SJakub Kicinski {
2633d780b92SJakub Kicinski 	int ret;
2644c352362SJakub Kicinski 
2659ad716b9SJakub Kicinski 	nfp_net_reconfig_sync_enter(nn);
2663d780b92SJakub Kicinski 
2673d780b92SJakub Kicinski 	nfp_net_reconfig_start(nn, update);
2683d780b92SJakub Kicinski 	ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
2693d780b92SJakub Kicinski 
2703d780b92SJakub Kicinski 	spin_lock_bh(&nn->reconfig_lock);
2713d780b92SJakub Kicinski 
2723d780b92SJakub Kicinski 	if (nn->reconfig_posted)
2733d780b92SJakub Kicinski 		nfp_net_reconfig_start_async(nn, 0);
2743d780b92SJakub Kicinski 
2753d780b92SJakub Kicinski 	nn->reconfig_sync_present = false;
2763d780b92SJakub Kicinski 
2773d780b92SJakub Kicinski 	spin_unlock_bh(&nn->reconfig_lock);
2783d780b92SJakub Kicinski 
2794c352362SJakub Kicinski 	return ret;
2804c352362SJakub Kicinski }
2814c352362SJakub Kicinski 
nfp_net_reconfig(struct nfp_net * nn,u32 update)282dd5b2498SJakub Kicinski int nfp_net_reconfig(struct nfp_net *nn, u32 update)
283dd5b2498SJakub Kicinski {
284dd5b2498SJakub Kicinski 	int ret;
285dd5b2498SJakub Kicinski 
286dd5b2498SJakub Kicinski 	nn_ctrl_bar_lock(nn);
287dd5b2498SJakub Kicinski 	ret = __nfp_net_reconfig(nn, update);
288dd5b2498SJakub Kicinski 	nn_ctrl_bar_unlock(nn);
289dd5b2498SJakub Kicinski 
290dd5b2498SJakub Kicinski 	return ret;
291dd5b2498SJakub Kicinski }
292dd5b2498SJakub Kicinski 
nfp_net_mbox_lock(struct nfp_net * nn,unsigned int data_size)293dd5b2498SJakub Kicinski int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
294dd5b2498SJakub Kicinski {
295dd5b2498SJakub Kicinski 	if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
296dd5b2498SJakub Kicinski 		nn_err(nn, "mailbox too small for %u of data (%u)\n",
297dd5b2498SJakub Kicinski 		       data_size, nn->tlv_caps.mbox_len);
298dd5b2498SJakub Kicinski 		return -EIO;
299dd5b2498SJakub Kicinski 	}
300dd5b2498SJakub Kicinski 
301dd5b2498SJakub Kicinski 	nn_ctrl_bar_lock(nn);
302dd5b2498SJakub Kicinski 	return 0;
303dd5b2498SJakub Kicinski }
304dd5b2498SJakub Kicinski 
305b64052fcSPablo Cascón /**
306dd5b2498SJakub Kicinski  * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox
307b64052fcSPablo Cascón  * @nn:        NFP Net device to reconfigure
308b64052fcSPablo Cascón  * @mbox_cmd:  The value for the mailbox command
309b64052fcSPablo Cascón  *
310b64052fcSPablo Cascón  * Helper function for mailbox updates
311b64052fcSPablo Cascón  *
312b64052fcSPablo Cascón  * Return: Negative errno on error, 0 on success
313b64052fcSPablo Cascón  */
nfp_net_mbox_reconfig(struct nfp_net * nn,u32 mbox_cmd)314dd5b2498SJakub Kicinski int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
315b64052fcSPablo Cascón {
316527d7d1bSJakub Kicinski 	u32 mbox = nn->tlv_caps.mbox_off;
317b64052fcSPablo Cascón 	int ret;
318b64052fcSPablo Cascón 
319527d7d1bSJakub Kicinski 	nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
320b64052fcSPablo Cascón 
321dd5b2498SJakub Kicinski 	ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
322b64052fcSPablo Cascón 	if (ret) {
323b64052fcSPablo Cascón 		nn_err(nn, "Mailbox update error\n");
324b64052fcSPablo Cascón 		return ret;
325b64052fcSPablo Cascón 	}
326b64052fcSPablo Cascón 
327527d7d1bSJakub Kicinski 	return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
328b64052fcSPablo Cascón }
329b64052fcSPablo Cascón 
nfp_net_mbox_reconfig_post(struct nfp_net * nn,u32 mbox_cmd)330e2c7114aSJakub Kicinski void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 mbox_cmd)
331e2c7114aSJakub Kicinski {
332e2c7114aSJakub Kicinski 	u32 mbox = nn->tlv_caps.mbox_off;
333e2c7114aSJakub Kicinski 
334e2c7114aSJakub Kicinski 	nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
335e2c7114aSJakub Kicinski 
336e2c7114aSJakub Kicinski 	nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_MBOX);
337e2c7114aSJakub Kicinski }
338e2c7114aSJakub Kicinski 
nfp_net_mbox_reconfig_wait_posted(struct nfp_net * nn)339e2c7114aSJakub Kicinski int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn)
340e2c7114aSJakub Kicinski {
341e2c7114aSJakub Kicinski 	u32 mbox = nn->tlv_caps.mbox_off;
342e2c7114aSJakub Kicinski 
343e2c7114aSJakub Kicinski 	nfp_net_reconfig_wait_posted(nn);
344e2c7114aSJakub Kicinski 
345e2c7114aSJakub Kicinski 	return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
346e2c7114aSJakub Kicinski }
347e2c7114aSJakub Kicinski 
nfp_net_mbox_reconfig_and_unlock(struct nfp_net * nn,u32 mbox_cmd)348dd5b2498SJakub Kicinski int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
349dd5b2498SJakub Kicinski {
350dd5b2498SJakub Kicinski 	int ret;
351dd5b2498SJakub Kicinski 
352dd5b2498SJakub Kicinski 	ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
353dd5b2498SJakub Kicinski 	nn_ctrl_bar_unlock(nn);
354dd5b2498SJakub Kicinski 	return ret;
355dd5b2498SJakub Kicinski }
356dd5b2498SJakub Kicinski 
3574c352362SJakub Kicinski /* Interrupt configuration and handling
3584c352362SJakub Kicinski  */
3594c352362SJakub Kicinski 
3604c352362SJakub Kicinski /**
3614c352362SJakub Kicinski  * nfp_net_irqs_alloc() - allocates MSI-X irqs
362fdace6c2SJakub Kicinski  * @pdev:        PCI device structure
363fdace6c2SJakub Kicinski  * @irq_entries: Array to be initialized and used to hold the irq entries
364fdace6c2SJakub Kicinski  * @min_irqs:    Minimal acceptable number of interrupts
365fdace6c2SJakub Kicinski  * @wanted_irqs: Target number of interrupts to allocate
3664c352362SJakub Kicinski  *
3674c352362SJakub Kicinski  * Return: Number of irqs obtained or 0 on error.
3684c352362SJakub Kicinski  */
369fdace6c2SJakub Kicinski unsigned int
nfp_net_irqs_alloc(struct pci_dev * pdev,struct msix_entry * irq_entries,unsigned int min_irqs,unsigned int wanted_irqs)370fdace6c2SJakub Kicinski nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
371fdace6c2SJakub Kicinski 		   unsigned int min_irqs, unsigned int wanted_irqs)
3724c352362SJakub Kicinski {
373fdace6c2SJakub Kicinski 	unsigned int i;
374fdace6c2SJakub Kicinski 	int got_irqs;
3754c352362SJakub Kicinski 
376fdace6c2SJakub Kicinski 	for (i = 0; i < wanted_irqs; i++)
377fdace6c2SJakub Kicinski 		irq_entries[i].entry = i;
3784c352362SJakub Kicinski 
379fdace6c2SJakub Kicinski 	got_irqs = pci_enable_msix_range(pdev, irq_entries,
380fdace6c2SJakub Kicinski 					 min_irqs, wanted_irqs);
381fdace6c2SJakub Kicinski 	if (got_irqs < 0) {
382fdace6c2SJakub Kicinski 		dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
383fdace6c2SJakub Kicinski 			min_irqs, wanted_irqs, got_irqs);
3844c352362SJakub Kicinski 		return 0;
3854c352362SJakub Kicinski 	}
3864c352362SJakub Kicinski 
387fdace6c2SJakub Kicinski 	if (got_irqs < wanted_irqs)
388fdace6c2SJakub Kicinski 		dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
389fdace6c2SJakub Kicinski 			 wanted_irqs, got_irqs);
390fdace6c2SJakub Kicinski 
391fdace6c2SJakub Kicinski 	return got_irqs;
392fdace6c2SJakub Kicinski }
393fdace6c2SJakub Kicinski 
394fdace6c2SJakub Kicinski /**
395fdace6c2SJakub Kicinski  * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
396fdace6c2SJakub Kicinski  * @nn:		 NFP Network structure
397fdace6c2SJakub Kicinski  * @irq_entries: Table of allocated interrupts
398fdace6c2SJakub Kicinski  * @n:		 Size of @irq_entries (number of entries to grab)
399fdace6c2SJakub Kicinski  *
400fdace6c2SJakub Kicinski  * After interrupts are allocated with nfp_net_irqs_alloc() this function
401fdace6c2SJakub Kicinski  * should be called to assign them to a specific netdev (port).
402fdace6c2SJakub Kicinski  */
403fdace6c2SJakub Kicinski void
nfp_net_irqs_assign(struct nfp_net * nn,struct msix_entry * irq_entries,unsigned int n)404fdace6c2SJakub Kicinski nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
405fdace6c2SJakub Kicinski 		    unsigned int n)
406fdace6c2SJakub Kicinski {
40779c12a75SJakub Kicinski 	struct nfp_net_dp *dp = &nn->dp;
40879c12a75SJakub Kicinski 
409b33ae997SJakub Kicinski 	nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
41079c12a75SJakub Kicinski 	dp->num_r_vecs = nn->max_r_vecs;
4114c352362SJakub Kicinski 
412fdace6c2SJakub Kicinski 	memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
4134c352362SJakub Kicinski 
41479c12a75SJakub Kicinski 	if (dp->num_rx_rings > dp->num_r_vecs ||
41579c12a75SJakub Kicinski 	    dp->num_tx_rings > dp->num_r_vecs)
41687232d96SJakub Kicinski 		dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
41779c12a75SJakub Kicinski 			 dp->num_rx_rings, dp->num_tx_rings,
41879c12a75SJakub Kicinski 			 dp->num_r_vecs);
419fdace6c2SJakub Kicinski 
42079c12a75SJakub Kicinski 	dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
42179c12a75SJakub Kicinski 	dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
42279c12a75SJakub Kicinski 	dp->num_stack_tx_rings = dp->num_tx_rings;
4234c352362SJakub Kicinski }
4244c352362SJakub Kicinski 
4254c352362SJakub Kicinski /**
4264c352362SJakub Kicinski  * nfp_net_irqs_disable() - Disable interrupts
427fdace6c2SJakub Kicinski  * @pdev:        PCI device structure
4284c352362SJakub Kicinski  *
4294c352362SJakub Kicinski  * Undoes what @nfp_net_irqs_alloc() does.
4304c352362SJakub Kicinski  */
nfp_net_irqs_disable(struct pci_dev * pdev)431fdace6c2SJakub Kicinski void nfp_net_irqs_disable(struct pci_dev *pdev)
4324c352362SJakub Kicinski {
433fdace6c2SJakub Kicinski 	pci_disable_msix(pdev);
4344c352362SJakub Kicinski }
4354c352362SJakub Kicinski 
4364c352362SJakub Kicinski /**
4374c352362SJakub Kicinski  * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
4384c352362SJakub Kicinski  * @irq:      Interrupt
4394c352362SJakub Kicinski  * @data:     Opaque data structure
4404c352362SJakub Kicinski  *
4414c352362SJakub Kicinski  * Return: Indicate if the interrupt has been handled.
4424c352362SJakub Kicinski  */
nfp_net_irq_rxtx(int irq,void * data)4434c352362SJakub Kicinski static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
4444c352362SJakub Kicinski {
4454c352362SJakub Kicinski 	struct nfp_net_r_vector *r_vec = data;
4464c352362SJakub Kicinski 
4479d32e4e7SYinjun Zhang 	/* Currently we cannot tell if it's a rx or tx interrupt,
4489d32e4e7SYinjun Zhang 	 * since dim does not need accurate event_ctr to calculate,
4499d32e4e7SYinjun Zhang 	 * we just use this counter for both rx and tx dim.
4509d32e4e7SYinjun Zhang 	 */
4519d32e4e7SYinjun Zhang 	r_vec->event_ctr++;
4529d32e4e7SYinjun Zhang 
4534c352362SJakub Kicinski 	napi_schedule_irqoff(&r_vec->napi);
4544c352362SJakub Kicinski 
4554c352362SJakub Kicinski 	/* The FW auto-masks any interrupt, either via the MASK bit in
4564c352362SJakub Kicinski 	 * the MSI-X table or via the per entry ICR field.  So there
4574c352362SJakub Kicinski 	 * is no need to disable interrupts here.
4584c352362SJakub Kicinski 	 */
4594c352362SJakub Kicinski 	return IRQ_HANDLED;
4604c352362SJakub Kicinski }
4614c352362SJakub Kicinski 
nfp_ctrl_irq_rxtx(int irq,void * data)46277ece8d5SJakub Kicinski static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
46377ece8d5SJakub Kicinski {
46477ece8d5SJakub Kicinski 	struct nfp_net_r_vector *r_vec = data;
46577ece8d5SJakub Kicinski 
46677ece8d5SJakub Kicinski 	tasklet_schedule(&r_vec->tasklet);
46777ece8d5SJakub Kicinski 
46877ece8d5SJakub Kicinski 	return IRQ_HANDLED;
46977ece8d5SJakub Kicinski }
47077ece8d5SJakub Kicinski 
4714c352362SJakub Kicinski /**
4724c352362SJakub Kicinski  * nfp_net_read_link_status() - Reread link status from control BAR
4734c352362SJakub Kicinski  * @nn:       NFP Network structure
4744c352362SJakub Kicinski  */
nfp_net_read_link_status(struct nfp_net * nn)4754c352362SJakub Kicinski static void nfp_net_read_link_status(struct nfp_net *nn)
4764c352362SJakub Kicinski {
4774c352362SJakub Kicinski 	unsigned long flags;
4784c352362SJakub Kicinski 	bool link_up;
47962fad9e6SYinjun Zhang 	u16 sts;
4804c352362SJakub Kicinski 
4814c352362SJakub Kicinski 	spin_lock_irqsave(&nn->link_status_lock, flags);
4824c352362SJakub Kicinski 
48362fad9e6SYinjun Zhang 	sts = nn_readw(nn, NFP_NET_CFG_STS);
4844c352362SJakub Kicinski 	link_up = !!(sts & NFP_NET_CFG_STS_LINK);
4854c352362SJakub Kicinski 
4864c352362SJakub Kicinski 	if (nn->link_up == link_up)
4874c352362SJakub Kicinski 		goto out;
4884c352362SJakub Kicinski 
4894c352362SJakub Kicinski 	nn->link_up = link_up;
49062fad9e6SYinjun Zhang 	if (nn->port) {
4916d4f8cbaSJakub Kicinski 		set_bit(NFP_PORT_CHANGED, &nn->port->flags);
49262fad9e6SYinjun Zhang 		if (nn->port->link_cb)
49362fad9e6SYinjun Zhang 			nn->port->link_cb(nn->port);
49462fad9e6SYinjun Zhang 	}
4954c352362SJakub Kicinski 
4964c352362SJakub Kicinski 	if (nn->link_up) {
49779c12a75SJakub Kicinski 		netif_carrier_on(nn->dp.netdev);
49879c12a75SJakub Kicinski 		netdev_info(nn->dp.netdev, "NIC Link is Up\n");
4994c352362SJakub Kicinski 	} else {
50079c12a75SJakub Kicinski 		netif_carrier_off(nn->dp.netdev);
50179c12a75SJakub Kicinski 		netdev_info(nn->dp.netdev, "NIC Link is Down\n");
5024c352362SJakub Kicinski 	}
5034c352362SJakub Kicinski out:
5044c352362SJakub Kicinski 	spin_unlock_irqrestore(&nn->link_status_lock, flags);
5054c352362SJakub Kicinski }
5064c352362SJakub Kicinski 
5074c352362SJakub Kicinski /**
5084c352362SJakub Kicinski  * nfp_net_irq_lsc() - Interrupt service routine for link state changes
5094c352362SJakub Kicinski  * @irq:      Interrupt
5104c352362SJakub Kicinski  * @data:     Opaque data structure
5114c352362SJakub Kicinski  *
5124c352362SJakub Kicinski  * Return: Indicate if the interrupt has been handled.
5134c352362SJakub Kicinski  */
nfp_net_irq_lsc(int irq,void * data)5144c352362SJakub Kicinski static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
5154c352362SJakub Kicinski {
5164c352362SJakub Kicinski 	struct nfp_net *nn = data;
517fdace6c2SJakub Kicinski 	struct msix_entry *entry;
518fdace6c2SJakub Kicinski 
519fdace6c2SJakub Kicinski 	entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
5204c352362SJakub Kicinski 
5214c352362SJakub Kicinski 	nfp_net_read_link_status(nn);
5224c352362SJakub Kicinski 
523fdace6c2SJakub Kicinski 	nfp_net_irq_unmask(nn, entry->entry);
5244c352362SJakub Kicinski 
5254c352362SJakub Kicinski 	return IRQ_HANDLED;
5264c352362SJakub Kicinski }
5274c352362SJakub Kicinski 
5284c352362SJakub Kicinski /**
5294c352362SJakub Kicinski  * nfp_net_irq_exn() - Interrupt service routine for exceptions
5304c352362SJakub Kicinski  * @irq:      Interrupt
5314c352362SJakub Kicinski  * @data:     Opaque data structure
5324c352362SJakub Kicinski  *
5334c352362SJakub Kicinski  * Return: Indicate if the interrupt has been handled.
5344c352362SJakub Kicinski  */
nfp_net_irq_exn(int irq,void * data)5354c352362SJakub Kicinski static irqreturn_t nfp_net_irq_exn(int irq, void *data)
5364c352362SJakub Kicinski {
5374c352362SJakub Kicinski 	struct nfp_net *nn = data;
5384c352362SJakub Kicinski 
5394c352362SJakub Kicinski 	nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
5404c352362SJakub Kicinski 	/* XXX TO BE IMPLEMENTED */
5414c352362SJakub Kicinski 	return IRQ_HANDLED;
5424c352362SJakub Kicinski }
5434c352362SJakub Kicinski 
5444c352362SJakub Kicinski /**
5454c352362SJakub Kicinski  * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
5464c352362SJakub Kicinski  * @nn:		NFP Network structure
5474c352362SJakub Kicinski  * @ctrl_offset: Control BAR offset where IRQ configuration should be written
5484c352362SJakub Kicinski  * @format:	printf-style format to construct the interrupt name
5494c352362SJakub Kicinski  * @name:	Pointer to allocated space for interrupt name
5504c352362SJakub Kicinski  * @name_sz:	Size of space for interrupt name
5514c352362SJakub Kicinski  * @vector_idx:	Index of MSI-X vector used for this interrupt
5524c352362SJakub Kicinski  * @handler:	IRQ handler to register for this interrupt
5534c352362SJakub Kicinski  */
5544c352362SJakub Kicinski static int
nfp_net_aux_irq_request(struct nfp_net * nn,u32 ctrl_offset,const char * format,char * name,size_t name_sz,unsigned int vector_idx,irq_handler_t handler)5554c352362SJakub Kicinski nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
5564c352362SJakub Kicinski 			const char *format, char *name, size_t name_sz,
5574c352362SJakub Kicinski 			unsigned int vector_idx, irq_handler_t handler)
5584c352362SJakub Kicinski {
5594c352362SJakub Kicinski 	struct msix_entry *entry;
5604c352362SJakub Kicinski 	int err;
5614c352362SJakub Kicinski 
5624c352362SJakub Kicinski 	entry = &nn->irq_entries[vector_idx];
5634c352362SJakub Kicinski 
56477ece8d5SJakub Kicinski 	snprintf(name, name_sz, format, nfp_net_name(nn));
5654c352362SJakub Kicinski 	err = request_irq(entry->vector, handler, 0, name, nn);
5664c352362SJakub Kicinski 	if (err) {
5674c352362SJakub Kicinski 		nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
5684c352362SJakub Kicinski 		       entry->vector, err);
5694c352362SJakub Kicinski 		return err;
5704c352362SJakub Kicinski 	}
571fdace6c2SJakub Kicinski 	nn_writeb(nn, ctrl_offset, entry->entry);
572fc233650SJakub Kicinski 	nfp_net_irq_unmask(nn, entry->entry);
5734c352362SJakub Kicinski 
5744c352362SJakub Kicinski 	return 0;
5754c352362SJakub Kicinski }
5764c352362SJakub Kicinski 
5774c352362SJakub Kicinski /**
5784c352362SJakub Kicinski  * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
5794c352362SJakub Kicinski  * @nn:		NFP Network structure
5804c352362SJakub Kicinski  * @ctrl_offset: Control BAR offset where IRQ configuration should be written
5814c352362SJakub Kicinski  * @vector_idx:	Index of MSI-X vector used for this interrupt
5824c352362SJakub Kicinski  */
nfp_net_aux_irq_free(struct nfp_net * nn,u32 ctrl_offset,unsigned int vector_idx)5834c352362SJakub Kicinski static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
5844c352362SJakub Kicinski 				 unsigned int vector_idx)
5854c352362SJakub Kicinski {
5864c352362SJakub Kicinski 	nn_writeb(nn, ctrl_offset, 0xff);
587fc233650SJakub Kicinski 	nn_pci_flush(nn);
5884c352362SJakub Kicinski 	free_irq(nn->irq_entries[vector_idx].vector, nn);
5894c352362SJakub Kicinski }
5904c352362SJakub Kicinski 
59162d03330SJakub Kicinski struct sk_buff *
nfp_net_tls_tx(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,struct sk_buff * skb,u64 * tls_handle,int * nr_frags)59251a5e563SJakub Kicinski nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
59351a5e563SJakub Kicinski 	       struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
594c3991d39SDirk van der Merwe {
595c8d3928eSJakub Kicinski #ifdef CONFIG_TLS_DEVICE
596c3991d39SDirk van der Merwe 	struct nfp_net_tls_offload_ctx *ntls;
597c3991d39SDirk van der Merwe 	struct sk_buff *nskb;
5989ed431c1SJakub Kicinski 	bool resync_pending;
599c3991d39SDirk van der Merwe 	u32 datalen, seq;
600c3991d39SDirk van der Merwe 
601c3991d39SDirk van der Merwe 	if (likely(!dp->ktls_tx))
602c3991d39SDirk van der Merwe 		return skb;
603ed3c9a2fSJakub Kicinski 	if (!tls_is_skb_tx_device_offloaded(skb))
604c3991d39SDirk van der Merwe 		return skb;
605c3991d39SDirk van der Merwe 
606504148feSEric Dumazet 	datalen = skb->len - skb_tcp_all_headers(skb);
607c3991d39SDirk van der Merwe 	seq = ntohl(tcp_hdr(skb)->seq);
608c3991d39SDirk van der Merwe 	ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
6099ed431c1SJakub Kicinski 	resync_pending = tls_offload_tx_resync_pending(skb->sk);
6109ed431c1SJakub Kicinski 	if (unlikely(resync_pending || ntls->next_seq != seq)) {
611c3991d39SDirk van der Merwe 		/* Pure ACK out of order already */
612c3991d39SDirk van der Merwe 		if (!datalen)
613c3991d39SDirk van der Merwe 			return skb;
614c3991d39SDirk van der Merwe 
61551a5e563SJakub Kicinski 		u64_stats_update_begin(&r_vec->tx_sync);
61651a5e563SJakub Kicinski 		r_vec->tls_tx_fallback++;
61751a5e563SJakub Kicinski 		u64_stats_update_end(&r_vec->tx_sync);
61851a5e563SJakub Kicinski 
619c3991d39SDirk van der Merwe 		nskb = tls_encrypt_skb(skb);
62051a5e563SJakub Kicinski 		if (!nskb) {
62151a5e563SJakub Kicinski 			u64_stats_update_begin(&r_vec->tx_sync);
62251a5e563SJakub Kicinski 			r_vec->tls_tx_no_fallback++;
62351a5e563SJakub Kicinski 			u64_stats_update_end(&r_vec->tx_sync);
624c3991d39SDirk van der Merwe 			return NULL;
62551a5e563SJakub Kicinski 		}
626c3991d39SDirk van der Merwe 		/* encryption wasn't necessary */
627c3991d39SDirk van der Merwe 		if (nskb == skb)
628c3991d39SDirk van der Merwe 			return skb;
629c3991d39SDirk van der Merwe 		/* we don't re-check ring space */
630c3991d39SDirk van der Merwe 		if (unlikely(skb_is_nonlinear(nskb))) {
631c3991d39SDirk van der Merwe 			nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
63251a5e563SJakub Kicinski 			u64_stats_update_begin(&r_vec->tx_sync);
63351a5e563SJakub Kicinski 			r_vec->tx_errors++;
63451a5e563SJakub Kicinski 			u64_stats_update_end(&r_vec->tx_sync);
635c3991d39SDirk van der Merwe 			dev_kfree_skb_any(nskb);
636c3991d39SDirk van der Merwe 			return NULL;
637c3991d39SDirk van der Merwe 		}
638c3991d39SDirk van der Merwe 
639c3991d39SDirk van der Merwe 		/* jump forward, a TX may have gotten lost, need to sync TX */
6409ed431c1SJakub Kicinski 		if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
6418538d29cSJakub Kicinski 			tls_offload_tx_resync_request(nskb->sk, seq,
6428538d29cSJakub Kicinski 						      ntls->next_seq);
643c3991d39SDirk van der Merwe 
644c3991d39SDirk van der Merwe 		*nr_frags = 0;
645c3991d39SDirk van der Merwe 		return nskb;
646c3991d39SDirk van der Merwe 	}
647c3991d39SDirk van der Merwe 
64851a5e563SJakub Kicinski 	if (datalen) {
64951a5e563SJakub Kicinski 		u64_stats_update_begin(&r_vec->tx_sync);
650427545b3SJakub Kicinski 		if (!skb_is_gso(skb))
65151a5e563SJakub Kicinski 			r_vec->hw_tls_tx++;
652427545b3SJakub Kicinski 		else
653427545b3SJakub Kicinski 			r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
65451a5e563SJakub Kicinski 		u64_stats_update_end(&r_vec->tx_sync);
65551a5e563SJakub Kicinski 	}
65651a5e563SJakub Kicinski 
657c3991d39SDirk van der Merwe 	memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
658c3991d39SDirk van der Merwe 	ntls->next_seq += datalen;
659c8d3928eSJakub Kicinski #endif
660c3991d39SDirk van der Merwe 	return skb;
661c3991d39SDirk van der Merwe }
662c3991d39SDirk van der Merwe 
nfp_net_tls_tx_undo(struct sk_buff * skb,u64 tls_handle)66362d03330SJakub Kicinski void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
6645a4cea28SJakub Kicinski {
6655a4cea28SJakub Kicinski #ifdef CONFIG_TLS_DEVICE
6665a4cea28SJakub Kicinski 	struct nfp_net_tls_offload_ctx *ntls;
6675a4cea28SJakub Kicinski 	u32 datalen, seq;
6685a4cea28SJakub Kicinski 
6695a4cea28SJakub Kicinski 	if (!tls_handle)
6705a4cea28SJakub Kicinski 		return;
671ed3c9a2fSJakub Kicinski 	if (WARN_ON_ONCE(!tls_is_skb_tx_device_offloaded(skb)))
6725a4cea28SJakub Kicinski 		return;
6735a4cea28SJakub Kicinski 
674504148feSEric Dumazet 	datalen = skb->len - skb_tcp_all_headers(skb);
6755a4cea28SJakub Kicinski 	seq = ntohl(tcp_hdr(skb)->seq);
6765a4cea28SJakub Kicinski 
6775a4cea28SJakub Kicinski 	ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
6785a4cea28SJakub Kicinski 	if (ntls->next_seq == seq + datalen)
6795a4cea28SJakub Kicinski 		ntls->next_seq = seq;
6805a4cea28SJakub Kicinski 	else
6815a4cea28SJakub Kicinski 		WARN_ON_ONCE(1);
6825a4cea28SJakub Kicinski #endif
6835a4cea28SJakub Kicinski }
6845a4cea28SJakub Kicinski 
nfp_net_tx_timeout(struct net_device * netdev,unsigned int txqueue)6850290bd29SMichael S. Tsirkin static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6864c352362SJakub Kicinski {
6874c352362SJakub Kicinski 	struct nfp_net *nn = netdev_priv(netdev);
6884c352362SJakub Kicinski 
689d8968edaSMichael S. Tsirkin 	nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
6904c352362SJakub Kicinski }
6914c352362SJakub Kicinski 
69262d03330SJakub Kicinski /* Receive processing */
693bf187ea0SJakub Kicinski static unsigned int
nfp_net_calc_fl_bufsz_data(struct nfp_net_dp * dp)6949c91a365SNiklas Söderlund nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp)
695bf187ea0SJakub Kicinski {
6969c91a365SNiklas Söderlund 	unsigned int fl_bufsz = 0;
697bf187ea0SJakub Kicinski 
69879c12a75SJakub Kicinski 	if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
699c0f031bcSJakub Kicinski 		fl_bufsz += NFP_NET_MAX_PREPEND;
700bf187ea0SJakub Kicinski 	else
70179c12a75SJakub Kicinski 		fl_bufsz += dp->rx_offset;
70276e1e1a8SJakub Kicinski 	fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
703bf187ea0SJakub Kicinski 
7049c91a365SNiklas Söderlund 	return fl_bufsz;
7059c91a365SNiklas Söderlund }
7069c91a365SNiklas Söderlund 
nfp_net_calc_fl_bufsz(struct nfp_net_dp * dp)7079c91a365SNiklas Söderlund static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
7089c91a365SNiklas Söderlund {
7099c91a365SNiklas Söderlund 	unsigned int fl_bufsz;
7109c91a365SNiklas Söderlund 
7119c91a365SNiklas Söderlund 	fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
7129c91a365SNiklas Söderlund 	fl_bufsz += dp->rx_dma_off;
7139c91a365SNiklas Söderlund 	fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
7149c91a365SNiklas Söderlund 
715c0f031bcSJakub Kicinski 	fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
716c0f031bcSJakub Kicinski 	fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
717c0f031bcSJakub Kicinski 
718bf187ea0SJakub Kicinski 	return fl_bufsz;
719bf187ea0SJakub Kicinski }
7204c352362SJakub Kicinski 
nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp * dp)7219c91a365SNiklas Söderlund static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
7229c91a365SNiklas Söderlund {
7239c91a365SNiklas Söderlund 	unsigned int fl_bufsz;
7249c91a365SNiklas Söderlund 
7259c91a365SNiklas Söderlund 	fl_bufsz = XDP_PACKET_HEADROOM;
7269c91a365SNiklas Söderlund 	fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
7279c91a365SNiklas Söderlund 
7289c91a365SNiklas Söderlund 	return fl_bufsz;
7299c91a365SNiklas Söderlund }
7309c91a365SNiklas Söderlund 
7314c352362SJakub Kicinski /* Setup and Configuration
7324c352362SJakub Kicinski  */
7334c352362SJakub Kicinski 
7344c352362SJakub Kicinski /**
735cd083ce1SJakub Kicinski  * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
736cd083ce1SJakub Kicinski  * @nn:		NFP Network structure
737cd083ce1SJakub Kicinski  */
nfp_net_vecs_init(struct nfp_net * nn)738cd083ce1SJakub Kicinski static void nfp_net_vecs_init(struct nfp_net *nn)
739cd083ce1SJakub Kicinski {
74042ba9654SYinjun Zhang 	int numa_node = dev_to_node(&nn->pdev->dev);
741cd083ce1SJakub Kicinski 	struct nfp_net_r_vector *r_vec;
74242ba9654SYinjun Zhang 	unsigned int r;
743cd083ce1SJakub Kicinski 
744cd083ce1SJakub Kicinski 	nn->lsc_handler = nfp_net_irq_lsc;
745cd083ce1SJakub Kicinski 	nn->exn_handler = nfp_net_irq_exn;
746cd083ce1SJakub Kicinski 
747cd083ce1SJakub Kicinski 	for (r = 0; r < nn->max_r_vecs; r++) {
748cd083ce1SJakub Kicinski 		struct msix_entry *entry;
749cd083ce1SJakub Kicinski 
750cd083ce1SJakub Kicinski 		entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
751cd083ce1SJakub Kicinski 
752cd083ce1SJakub Kicinski 		r_vec = &nn->r_vecs[r];
753cd083ce1SJakub Kicinski 		r_vec->nfp_net = nn;
754cd083ce1SJakub Kicinski 		r_vec->irq_entry = entry->entry;
755cd083ce1SJakub Kicinski 		r_vec->irq_vector = entry->vector;
756cd083ce1SJakub Kicinski 
75777ece8d5SJakub Kicinski 		if (nn->dp.netdev) {
75877ece8d5SJakub Kicinski 			r_vec->handler = nfp_net_irq_rxtx;
75977ece8d5SJakub Kicinski 		} else {
76077ece8d5SJakub Kicinski 			r_vec->handler = nfp_ctrl_irq_rxtx;
76177ece8d5SJakub Kicinski 
76277ece8d5SJakub Kicinski 			__skb_queue_head_init(&r_vec->queue);
76377ece8d5SJakub Kicinski 			spin_lock_init(&r_vec->lock);
7646fd86efaSJakub Kicinski 			tasklet_setup(&r_vec->tasklet, nn->dp.ops->ctrl_poll);
76577ece8d5SJakub Kicinski 			tasklet_disable(&r_vec->tasklet);
76677ece8d5SJakub Kicinski 		}
76777ece8d5SJakub Kicinski 
76842ba9654SYinjun Zhang 		cpumask_set_cpu(cpumask_local_spread(r, numa_node), &r_vec->affinity_mask);
769cd083ce1SJakub Kicinski 	}
770cd083ce1SJakub Kicinski }
771cd083ce1SJakub Kicinski 
772e31230f9SJakub Kicinski static void
nfp_net_napi_add(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,int idx)7736402528bSNiklas Söderlund nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
77458eb4363SJakub Kicinski {
77558eb4363SJakub Kicinski 	if (dp->netdev)
77658eb4363SJakub Kicinski 		netif_napi_add(dp->netdev, &r_vec->napi,
777b48b89f9SJakub Kicinski 			       nfp_net_has_xsk_pool_slow(dp, idx) ? dp->ops->xsk_poll : dp->ops->poll);
77858eb4363SJakub Kicinski 	else
77958eb4363SJakub Kicinski 		tasklet_enable(&r_vec->tasklet);
78058eb4363SJakub Kicinski }
78158eb4363SJakub Kicinski 
78258eb4363SJakub Kicinski static void
nfp_net_napi_del(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec)78358eb4363SJakub Kicinski nfp_net_napi_del(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec)
78458eb4363SJakub Kicinski {
78558eb4363SJakub Kicinski 	if (dp->netdev)
78658eb4363SJakub Kicinski 		netif_napi_del(&r_vec->napi);
78758eb4363SJakub Kicinski 	else
78858eb4363SJakub Kicinski 		tasklet_disable(&r_vec->tasklet);
78958eb4363SJakub Kicinski }
79058eb4363SJakub Kicinski 
79158eb4363SJakub Kicinski static void
nfp_net_vector_assign_rings(struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,int idx)79279c12a75SJakub Kicinski nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
79379c12a75SJakub Kicinski 			    struct nfp_net_r_vector *r_vec, int idx)
794e31230f9SJakub Kicinski {
79579c12a75SJakub Kicinski 	r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
796ecd63a02SJakub Kicinski 	r_vec->tx_ring =
79779c12a75SJakub Kicinski 		idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
798ecd63a02SJakub Kicinski 
79979c12a75SJakub Kicinski 	r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
80079c12a75SJakub Kicinski 		&dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
8016402528bSNiklas Söderlund 
8026402528bSNiklas Söderlund 	if (nfp_net_has_xsk_pool_slow(dp, idx) || r_vec->xsk_pool) {
8036402528bSNiklas Söderlund 		r_vec->xsk_pool = dp->xdp_prog ? dp->xsk_pools[idx] : NULL;
8046402528bSNiklas Söderlund 
8056402528bSNiklas Söderlund 		if (r_vec->xsk_pool)
8066402528bSNiklas Söderlund 			xsk_pool_set_rxq_info(r_vec->xsk_pool,
8076402528bSNiklas Söderlund 					      &r_vec->rx_ring->xdp_rxq);
8086402528bSNiklas Söderlund 
8096402528bSNiklas Söderlund 		nfp_net_napi_del(dp, r_vec);
8106402528bSNiklas Söderlund 		nfp_net_napi_add(dp, r_vec, idx);
8116402528bSNiklas Söderlund 	}
812e31230f9SJakub Kicinski }
813e31230f9SJakub Kicinski 
8140afbfb18SJakub Kicinski static int
nfp_net_prepare_vector(struct nfp_net * nn,struct nfp_net_r_vector * r_vec,int idx)8150afbfb18SJakub Kicinski nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
8160afbfb18SJakub Kicinski 		       int idx)
8174c352362SJakub Kicinski {
8184c352362SJakub Kicinski 	int err;
8194c352362SJakub Kicinski 
8206402528bSNiklas Söderlund 	nfp_net_napi_add(&nn->dp, r_vec, idx);
821164d1e9eSJakub Kicinski 
8220afbfb18SJakub Kicinski 	snprintf(r_vec->name, sizeof(r_vec->name),
82377ece8d5SJakub Kicinski 		 "%s-rxtx-%d", nfp_net_name(nn), idx);
824fdace6c2SJakub Kicinski 	err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
825fdace6c2SJakub Kicinski 			  r_vec);
8260afbfb18SJakub Kicinski 	if (err) {
82758eb4363SJakub Kicinski 		nfp_net_napi_del(&nn->dp, r_vec);
828fdace6c2SJakub Kicinski 		nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
8290afbfb18SJakub Kicinski 		return err;
8300afbfb18SJakub Kicinski 	}
831fdace6c2SJakub Kicinski 	disable_irq(r_vec->irq_vector);
8324c352362SJakub Kicinski 
833fdace6c2SJakub Kicinski 	irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
8344c352362SJakub Kicinski 
835fdace6c2SJakub Kicinski 	nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
836fdace6c2SJakub Kicinski 	       r_vec->irq_entry);
8374c352362SJakub Kicinski 
8384c352362SJakub Kicinski 	return 0;
8390afbfb18SJakub Kicinski }
8404c352362SJakub Kicinski 
8410afbfb18SJakub Kicinski static void
nfp_net_cleanup_vector(struct nfp_net * nn,struct nfp_net_r_vector * r_vec)8420afbfb18SJakub Kicinski nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
8430afbfb18SJakub Kicinski {
844fdace6c2SJakub Kicinski 	irq_set_affinity_hint(r_vec->irq_vector, NULL);
84558eb4363SJakub Kicinski 	nfp_net_napi_del(&nn->dp, r_vec);
846fdace6c2SJakub Kicinski 	free_irq(r_vec->irq_vector, r_vec);
8474c352362SJakub Kicinski }
8484c352362SJakub Kicinski 
8494c352362SJakub Kicinski /**
8504c352362SJakub Kicinski  * nfp_net_rss_write_itbl() - Write RSS indirection table to device
8514c352362SJakub Kicinski  * @nn:      NFP Net device to reconfigure
8524c352362SJakub Kicinski  */
nfp_net_rss_write_itbl(struct nfp_net * nn)8534c352362SJakub Kicinski void nfp_net_rss_write_itbl(struct nfp_net *nn)
8544c352362SJakub Kicinski {
8554c352362SJakub Kicinski 	int i;
8564c352362SJakub Kicinski 
8574c352362SJakub Kicinski 	for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
8584c352362SJakub Kicinski 		nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
8594c352362SJakub Kicinski 			  get_unaligned_le32(nn->rss_itbl + i));
8604c352362SJakub Kicinski }
8614c352362SJakub Kicinski 
8624c352362SJakub Kicinski /**
8634c352362SJakub Kicinski  * nfp_net_rss_write_key() - Write RSS hash key to device
8644c352362SJakub Kicinski  * @nn:      NFP Net device to reconfigure
8654c352362SJakub Kicinski  */
nfp_net_rss_write_key(struct nfp_net * nn)8664c352362SJakub Kicinski void nfp_net_rss_write_key(struct nfp_net *nn)
8674c352362SJakub Kicinski {
8684c352362SJakub Kicinski 	int i;
8694c352362SJakub Kicinski 
8709ff304bfSJakub Kicinski 	for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
8714c352362SJakub Kicinski 		nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
8724c352362SJakub Kicinski 			  get_unaligned_le32(nn->rss_key + i));
8734c352362SJakub Kicinski }
8744c352362SJakub Kicinski 
8754c352362SJakub Kicinski /**
8764c352362SJakub Kicinski  * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
8774c352362SJakub Kicinski  * @nn:      NFP Net device to reconfigure
8784c352362SJakub Kicinski  */
nfp_net_coalesce_write_cfg(struct nfp_net * nn)8794c352362SJakub Kicinski void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
8804c352362SJakub Kicinski {
8814c352362SJakub Kicinski 	u8 i;
8824c352362SJakub Kicinski 	u32 factor;
8834c352362SJakub Kicinski 	u32 value;
8844c352362SJakub Kicinski 
8854c352362SJakub Kicinski 	/* Compute factor used to convert coalesce '_usecs' parameters to
8864c352362SJakub Kicinski 	 * ME timestamp ticks.  There are 16 ME clock cycles for each timestamp
8874c352362SJakub Kicinski 	 * count.
8884c352362SJakub Kicinski 	 */
889ce991ab6SJakub Kicinski 	factor = nn->tlv_caps.me_freq_mhz / 16;
8904c352362SJakub Kicinski 
8914c352362SJakub Kicinski 	/* copy RX interrupt coalesce parameters */
8924c352362SJakub Kicinski 	value = (nn->rx_coalesce_max_frames << 16) |
8934c352362SJakub Kicinski 		(factor * nn->rx_coalesce_usecs);
89479c12a75SJakub Kicinski 	for (i = 0; i < nn->dp.num_rx_rings; i++)
8954c352362SJakub Kicinski 		nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
8964c352362SJakub Kicinski 
8974c352362SJakub Kicinski 	/* copy TX interrupt coalesce parameters */
8984c352362SJakub Kicinski 	value = (nn->tx_coalesce_max_frames << 16) |
8994c352362SJakub Kicinski 		(factor * nn->tx_coalesce_usecs);
90079c12a75SJakub Kicinski 	for (i = 0; i < nn->dp.num_tx_rings; i++)
9014c352362SJakub Kicinski 		nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
9024c352362SJakub Kicinski }
9034c352362SJakub Kicinski 
9044c352362SJakub Kicinski /**
905f642963bSJakub Kicinski  * nfp_net_write_mac_addr() - Write mac address to the device control BAR
9064c352362SJakub Kicinski  * @nn:      NFP Net device to reconfigure
9079d372759SPablo Cascón  * @addr:    MAC address to write
9084c352362SJakub Kicinski  *
909f642963bSJakub Kicinski  * Writes the MAC address from the netdev to the device control BAR.  Does not
910f642963bSJakub Kicinski  * perform the required reconfig.  We do a bit of byte swapping dance because
911f642963bSJakub Kicinski  * firmware is LE.
9124c352362SJakub Kicinski  */
nfp_net_write_mac_addr(struct nfp_net * nn,const u8 * addr)9139d372759SPablo Cascón static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
9144c352362SJakub Kicinski {
9159d372759SPablo Cascón 	nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
9169d372759SPablo Cascón 	nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
9174c352362SJakub Kicinski }
9184c352362SJakub Kicinski 
9194c352362SJakub Kicinski /**
9204c352362SJakub Kicinski  * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
9214c352362SJakub Kicinski  * @nn:      NFP Net device to reconfigure
92207300f77SJakub Kicinski  *
92307300f77SJakub Kicinski  * Warning: must be fully idempotent.
9244c352362SJakub Kicinski  */
nfp_net_clear_config_and_disable(struct nfp_net * nn)9254c352362SJakub Kicinski static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
9264c352362SJakub Kicinski {
927bec9ce34SZiyang Chen 	u32 new_ctrl, new_ctrl_w1, update;
928ca40feabSJakub Kicinski 	unsigned int r;
9294c352362SJakub Kicinski 	int err;
9304c352362SJakub Kicinski 
93179c12a75SJakub Kicinski 	new_ctrl = nn->dp.ctrl;
9324c352362SJakub Kicinski 	new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
9334c352362SJakub Kicinski 	update = NFP_NET_CFG_UPDATE_GEN;
9344c352362SJakub Kicinski 	update |= NFP_NET_CFG_UPDATE_MSIX;
9354c352362SJakub Kicinski 	update |= NFP_NET_CFG_UPDATE_RING;
9364c352362SJakub Kicinski 
9374c352362SJakub Kicinski 	if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
9384c352362SJakub Kicinski 		new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
9394c352362SJakub Kicinski 
940bec9ce34SZiyang Chen 	if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)) {
9414c352362SJakub Kicinski 		nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
9424c352362SJakub Kicinski 		nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
943bec9ce34SZiyang Chen 	}
9444c352362SJakub Kicinski 
9454c352362SJakub Kicinski 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
9464c352362SJakub Kicinski 	err = nfp_net_reconfig(nn, update);
947aba52df8SJakub Kicinski 	if (err)
9484c352362SJakub Kicinski 		nn_err(nn, "Could not disable device: %d\n", err);
9494c352362SJakub Kicinski 
950bec9ce34SZiyang Chen 	if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) {
951bec9ce34SZiyang Chen 		new_ctrl_w1 = nn->dp.ctrl_w1;
952bec9ce34SZiyang Chen 		new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_FREELIST_EN;
953bec9ce34SZiyang Chen 		nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
954bec9ce34SZiyang Chen 		nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
955bec9ce34SZiyang Chen 
956bec9ce34SZiyang Chen 		nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
957bec9ce34SZiyang Chen 		err = nfp_net_reconfig(nn, update);
958bec9ce34SZiyang Chen 		if (err)
959bec9ce34SZiyang Chen 			nn_err(nn, "Could not disable FREELIST_EN: %d\n", err);
960bec9ce34SZiyang Chen 		nn->dp.ctrl_w1 = new_ctrl_w1;
961bec9ce34SZiyang Chen 	}
962bec9ce34SZiyang Chen 
9636402528bSNiklas Söderlund 	for (r = 0; r < nn->dp.num_rx_rings; r++) {
96479c12a75SJakub Kicinski 		nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
9656402528bSNiklas Söderlund 		if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx))
9666402528bSNiklas Söderlund 			nfp_net_xsk_rx_bufs_free(&nn->dp.rx_rings[r]);
9676402528bSNiklas Söderlund 	}
96879c12a75SJakub Kicinski 	for (r = 0; r < nn->dp.num_tx_rings; r++)
96979c12a75SJakub Kicinski 		nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
97079c12a75SJakub Kicinski 	for (r = 0; r < nn->dp.num_r_vecs; r++)
971ca40feabSJakub Kicinski 		nfp_net_vec_clear_ring_data(nn, r);
972ca40feabSJakub Kicinski 
97379c12a75SJakub Kicinski 	nn->dp.ctrl = new_ctrl;
9744c352362SJakub Kicinski }
9754c352362SJakub Kicinski 
976ac0488efSJakub Kicinski /**
977ac0488efSJakub Kicinski  * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
978ac0488efSJakub Kicinski  * @nn:      NFP Net device to reconfigure
979ac0488efSJakub Kicinski  */
nfp_net_set_config_and_enable(struct nfp_net * nn)980ac0488efSJakub Kicinski static int nfp_net_set_config_and_enable(struct nfp_net *nn)
9811cd0cfc4SJakub Kicinski {
982bec9ce34SZiyang Chen 	u32 bufsz, new_ctrl, new_ctrl_w1, update = 0;
9831cd0cfc4SJakub Kicinski 	unsigned int r;
9841cd0cfc4SJakub Kicinski 	int err;
9851cd0cfc4SJakub Kicinski 
98679c12a75SJakub Kicinski 	new_ctrl = nn->dp.ctrl;
987bec9ce34SZiyang Chen 	new_ctrl_w1 = nn->dp.ctrl_w1;
9881cd0cfc4SJakub Kicinski 
989611bdd49SEdwin Peer 	if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
9901cd0cfc4SJakub Kicinski 		nfp_net_rss_write_key(nn);
9911cd0cfc4SJakub Kicinski 		nfp_net_rss_write_itbl(nn);
9921cd0cfc4SJakub Kicinski 		nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
9931cd0cfc4SJakub Kicinski 		update |= NFP_NET_CFG_UPDATE_RSS;
9941cd0cfc4SJakub Kicinski 	}
9951cd0cfc4SJakub Kicinski 
996ad50451eSJakub Kicinski 	if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
9971cd0cfc4SJakub Kicinski 		nfp_net_coalesce_write_cfg(nn);
9981cd0cfc4SJakub Kicinski 		update |= NFP_NET_CFG_UPDATE_IRQMOD;
9991cd0cfc4SJakub Kicinski 	}
10001cd0cfc4SJakub Kicinski 
100179c12a75SJakub Kicinski 	for (r = 0; r < nn->dp.num_tx_rings; r++)
100279c12a75SJakub Kicinski 		nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
100379c12a75SJakub Kicinski 	for (r = 0; r < nn->dp.num_rx_rings; r++)
100479c12a75SJakub Kicinski 		nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
10051cd0cfc4SJakub Kicinski 
1006fc9769f6SJakub Kicinski 	nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE,
1007fc9769f6SJakub Kicinski 		  U64_MAX >> (64 - nn->dp.num_tx_rings));
10081cd0cfc4SJakub Kicinski 
1009fc9769f6SJakub Kicinski 	nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE,
1010fc9769f6SJakub Kicinski 		  U64_MAX >> (64 - nn->dp.num_rx_rings));
10111cd0cfc4SJakub Kicinski 
10125c0dbe9eSJakub Kicinski 	if (nn->dp.netdev)
10139d372759SPablo Cascón 		nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
10141cd0cfc4SJakub Kicinski 
10155c0dbe9eSJakub Kicinski 	nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
1016ee200a73SJakub Kicinski 
1017ee200a73SJakub Kicinski 	bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
1018ee200a73SJakub Kicinski 	nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
10191cd0cfc4SJakub Kicinski 
1020bec9ce34SZiyang Chen 	/* Enable device
1021bec9ce34SZiyang Chen 	 * Step 1: Replace the CTRL_ENABLE by NFP_NET_CFG_CTRL_FREELIST_EN if
1022bec9ce34SZiyang Chen 	 * FREELIST_EN exits.
1023bec9ce34SZiyang Chen 	 */
1024bec9ce34SZiyang Chen 	if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)
1025bec9ce34SZiyang Chen 		new_ctrl_w1 |= NFP_NET_CFG_CTRL_FREELIST_EN;
1026bec9ce34SZiyang Chen 	else
10271cd0cfc4SJakub Kicinski 		new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
10281cd0cfc4SJakub Kicinski 	update |= NFP_NET_CFG_UPDATE_GEN;
10291cd0cfc4SJakub Kicinski 	update |= NFP_NET_CFG_UPDATE_MSIX;
10301cd0cfc4SJakub Kicinski 	update |= NFP_NET_CFG_UPDATE_RING;
10311cd0cfc4SJakub Kicinski 	if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
10321cd0cfc4SJakub Kicinski 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
10331cd0cfc4SJakub Kicinski 
1034bec9ce34SZiyang Chen 	/* Step 2: Send the configuration and write the freelist.
1035bec9ce34SZiyang Chen 	 * - The freelist only need to be written once.
1036bec9ce34SZiyang Chen 	 */
10371cd0cfc4SJakub Kicinski 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1038bec9ce34SZiyang Chen 	nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
10391cd0cfc4SJakub Kicinski 	err = nfp_net_reconfig(nn, update);
1040ac0488efSJakub Kicinski 	if (err) {
1041ac0488efSJakub Kicinski 		nfp_net_clear_config_and_disable(nn);
1042ac0488efSJakub Kicinski 		return err;
1043ac0488efSJakub Kicinski 	}
10441cd0cfc4SJakub Kicinski 
104579c12a75SJakub Kicinski 	nn->dp.ctrl = new_ctrl;
1046bec9ce34SZiyang Chen 	nn->dp.ctrl_w1 = new_ctrl_w1;
10471cd0cfc4SJakub Kicinski 
104879c12a75SJakub Kicinski 	for (r = 0; r < nn->dp.num_rx_rings; r++)
10496fe0c3b4SJakub Kicinski 		nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
1050aba52df8SJakub Kicinski 
1051bec9ce34SZiyang Chen 	/* Step 3: Do the NFP_NET_CFG_CTRL_ENABLE. Send the configuration.
1052bec9ce34SZiyang Chen 	 */
1053bec9ce34SZiyang Chen 	if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) {
1054bec9ce34SZiyang Chen 		new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
1055bec9ce34SZiyang Chen 		nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1056bec9ce34SZiyang Chen 
1057bec9ce34SZiyang Chen 		err = nfp_net_reconfig(nn, update);
1058bec9ce34SZiyang Chen 		if (err) {
1059bec9ce34SZiyang Chen 			nfp_net_clear_config_and_disable(nn);
1060bec9ce34SZiyang Chen 			return err;
1061bec9ce34SZiyang Chen 		}
1062bec9ce34SZiyang Chen 		nn->dp.ctrl = new_ctrl;
1063bec9ce34SZiyang Chen 	}
1064bec9ce34SZiyang Chen 
1065ac0488efSJakub Kicinski 	return 0;
10661cd0cfc4SJakub Kicinski }
10671cd0cfc4SJakub Kicinski 
10684c352362SJakub Kicinski /**
1069d00ca2f3SJakub Kicinski  * nfp_net_close_stack() - Quiesce the stack (part of close)
1070d00ca2f3SJakub Kicinski  * @nn:	     NFP Net device to reconfigure
1071d00ca2f3SJakub Kicinski  */
nfp_net_close_stack(struct nfp_net * nn)1072d00ca2f3SJakub Kicinski static void nfp_net_close_stack(struct nfp_net *nn)
1073d00ca2f3SJakub Kicinski {
10749d32e4e7SYinjun Zhang 	struct nfp_net_r_vector *r_vec;
1075d00ca2f3SJakub Kicinski 	unsigned int r;
1076d00ca2f3SJakub Kicinski 
1077d00ca2f3SJakub Kicinski 	disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
1078d00ca2f3SJakub Kicinski 	netif_carrier_off(nn->dp.netdev);
1079d00ca2f3SJakub Kicinski 	nn->link_up = false;
1080d00ca2f3SJakub Kicinski 
1081d00ca2f3SJakub Kicinski 	for (r = 0; r < nn->dp.num_r_vecs; r++) {
10829d32e4e7SYinjun Zhang 		r_vec = &nn->r_vecs[r];
10839d32e4e7SYinjun Zhang 
10849d32e4e7SYinjun Zhang 		disable_irq(r_vec->irq_vector);
10859d32e4e7SYinjun Zhang 		napi_disable(&r_vec->napi);
10869d32e4e7SYinjun Zhang 
10879d32e4e7SYinjun Zhang 		if (r_vec->rx_ring)
10889d32e4e7SYinjun Zhang 			cancel_work_sync(&r_vec->rx_dim.work);
10899d32e4e7SYinjun Zhang 
10909d32e4e7SYinjun Zhang 		if (r_vec->tx_ring)
10919d32e4e7SYinjun Zhang 			cancel_work_sync(&r_vec->tx_dim.work);
1092d00ca2f3SJakub Kicinski 	}
1093d00ca2f3SJakub Kicinski 
1094d00ca2f3SJakub Kicinski 	netif_tx_disable(nn->dp.netdev);
1095d00ca2f3SJakub Kicinski }
1096d00ca2f3SJakub Kicinski 
1097d00ca2f3SJakub Kicinski /**
1098d00ca2f3SJakub Kicinski  * nfp_net_close_free_all() - Free all runtime resources
1099d00ca2f3SJakub Kicinski  * @nn:      NFP Net device to reconfigure
1100d00ca2f3SJakub Kicinski  */
nfp_net_close_free_all(struct nfp_net * nn)1101d00ca2f3SJakub Kicinski static void nfp_net_close_free_all(struct nfp_net *nn)
1102d00ca2f3SJakub Kicinski {
1103d00ca2f3SJakub Kicinski 	unsigned int r;
1104d00ca2f3SJakub Kicinski 
11054621199dSJakub Kicinski 	nfp_net_tx_rings_free(&nn->dp);
11064621199dSJakub Kicinski 	nfp_net_rx_rings_free(&nn->dp);
11074621199dSJakub Kicinski 
1108d00ca2f3SJakub Kicinski 	for (r = 0; r < nn->dp.num_r_vecs; r++)
1109d00ca2f3SJakub Kicinski 		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1110d00ca2f3SJakub Kicinski 
1111d00ca2f3SJakub Kicinski 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1112d00ca2f3SJakub Kicinski 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
1113d00ca2f3SJakub Kicinski }
1114d00ca2f3SJakub Kicinski 
1115d00ca2f3SJakub Kicinski /**
1116d00ca2f3SJakub Kicinski  * nfp_net_netdev_close() - Called when the device is downed
1117d00ca2f3SJakub Kicinski  * @netdev:      netdev structure
1118d00ca2f3SJakub Kicinski  */
nfp_net_netdev_close(struct net_device * netdev)1119d00ca2f3SJakub Kicinski static int nfp_net_netdev_close(struct net_device *netdev)
1120d00ca2f3SJakub Kicinski {
1121d00ca2f3SJakub Kicinski 	struct nfp_net *nn = netdev_priv(netdev);
1122d00ca2f3SJakub Kicinski 
1123d00ca2f3SJakub Kicinski 	/* Step 1: Disable RX and TX rings from the Linux kernel perspective
1124d00ca2f3SJakub Kicinski 	 */
1125d00ca2f3SJakub Kicinski 	nfp_net_close_stack(nn);
1126d00ca2f3SJakub Kicinski 
1127d00ca2f3SJakub Kicinski 	/* Step 2: Tell NFP
1128d00ca2f3SJakub Kicinski 	 */
1129cc7eab25SYinjun Zhang 	if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER)
1130cc7eab25SYinjun Zhang 		__dev_mc_unsync(netdev, nfp_net_mc_unsync);
1131cc7eab25SYinjun Zhang 
1132d00ca2f3SJakub Kicinski 	nfp_net_clear_config_and_disable(nn);
1133447e9ebfSDirk van der Merwe 	nfp_port_configure(netdev, false);
1134d00ca2f3SJakub Kicinski 
1135d00ca2f3SJakub Kicinski 	/* Step 3: Free resources
1136d00ca2f3SJakub Kicinski 	 */
1137d00ca2f3SJakub Kicinski 	nfp_net_close_free_all(nn);
1138d00ca2f3SJakub Kicinski 
1139d00ca2f3SJakub Kicinski 	nn_dbg(nn, "%s down", netdev->name);
1140d00ca2f3SJakub Kicinski 	return 0;
1141d00ca2f3SJakub Kicinski }
1142d00ca2f3SJakub Kicinski 
nfp_ctrl_close(struct nfp_net * nn)114377ece8d5SJakub Kicinski void nfp_ctrl_close(struct nfp_net *nn)
114477ece8d5SJakub Kicinski {
114577ece8d5SJakub Kicinski 	int r;
114677ece8d5SJakub Kicinski 
114777ece8d5SJakub Kicinski 	rtnl_lock();
114877ece8d5SJakub Kicinski 
114977ece8d5SJakub Kicinski 	for (r = 0; r < nn->dp.num_r_vecs; r++) {
115077ece8d5SJakub Kicinski 		disable_irq(nn->r_vecs[r].irq_vector);
115177ece8d5SJakub Kicinski 		tasklet_disable(&nn->r_vecs[r].tasklet);
115277ece8d5SJakub Kicinski 	}
115377ece8d5SJakub Kicinski 
115477ece8d5SJakub Kicinski 	nfp_net_clear_config_and_disable(nn);
115577ece8d5SJakub Kicinski 
115677ece8d5SJakub Kicinski 	nfp_net_close_free_all(nn);
115777ece8d5SJakub Kicinski 
115877ece8d5SJakub Kicinski 	rtnl_unlock();
115977ece8d5SJakub Kicinski }
116077ece8d5SJakub Kicinski 
nfp_net_rx_dim_work(struct work_struct * work)11619d32e4e7SYinjun Zhang static void nfp_net_rx_dim_work(struct work_struct *work)
11629d32e4e7SYinjun Zhang {
11639d32e4e7SYinjun Zhang 	struct nfp_net_r_vector *r_vec;
11649d32e4e7SYinjun Zhang 	unsigned int factor, value;
11659d32e4e7SYinjun Zhang 	struct dim_cq_moder moder;
11669d32e4e7SYinjun Zhang 	struct nfp_net *nn;
11679d32e4e7SYinjun Zhang 	struct dim *dim;
11689d32e4e7SYinjun Zhang 
11699d32e4e7SYinjun Zhang 	dim = container_of(work, struct dim, work);
11709d32e4e7SYinjun Zhang 	moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
11719d32e4e7SYinjun Zhang 	r_vec = container_of(dim, struct nfp_net_r_vector, rx_dim);
11729d32e4e7SYinjun Zhang 	nn = r_vec->nfp_net;
11739d32e4e7SYinjun Zhang 
11749d32e4e7SYinjun Zhang 	/* Compute factor used to convert coalesce '_usecs' parameters to
11759d32e4e7SYinjun Zhang 	 * ME timestamp ticks.  There are 16 ME clock cycles for each timestamp
11769d32e4e7SYinjun Zhang 	 * count.
11779d32e4e7SYinjun Zhang 	 */
11789d32e4e7SYinjun Zhang 	factor = nn->tlv_caps.me_freq_mhz / 16;
11799d32e4e7SYinjun Zhang 	if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
11809d32e4e7SYinjun Zhang 		return;
11819d32e4e7SYinjun Zhang 
11829d32e4e7SYinjun Zhang 	/* copy RX interrupt coalesce parameters */
11839d32e4e7SYinjun Zhang 	value = (moder.pkts << 16) | (factor * moder.usec);
11849d32e4e7SYinjun Zhang 	nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(r_vec->rx_ring->idx), value);
11859d32e4e7SYinjun Zhang 	(void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
11869d32e4e7SYinjun Zhang 
11879d32e4e7SYinjun Zhang 	dim->state = DIM_START_MEASURE;
11889d32e4e7SYinjun Zhang }
11899d32e4e7SYinjun Zhang 
nfp_net_tx_dim_work(struct work_struct * work)11909d32e4e7SYinjun Zhang static void nfp_net_tx_dim_work(struct work_struct *work)
11919d32e4e7SYinjun Zhang {
11929d32e4e7SYinjun Zhang 	struct nfp_net_r_vector *r_vec;
11939d32e4e7SYinjun Zhang 	unsigned int factor, value;
11949d32e4e7SYinjun Zhang 	struct dim_cq_moder moder;
11959d32e4e7SYinjun Zhang 	struct nfp_net *nn;
11969d32e4e7SYinjun Zhang 	struct dim *dim;
11979d32e4e7SYinjun Zhang 
11989d32e4e7SYinjun Zhang 	dim = container_of(work, struct dim, work);
11999d32e4e7SYinjun Zhang 	moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
12009d32e4e7SYinjun Zhang 	r_vec = container_of(dim, struct nfp_net_r_vector, tx_dim);
12019d32e4e7SYinjun Zhang 	nn = r_vec->nfp_net;
12029d32e4e7SYinjun Zhang 
12039d32e4e7SYinjun Zhang 	/* Compute factor used to convert coalesce '_usecs' parameters to
12049d32e4e7SYinjun Zhang 	 * ME timestamp ticks.  There are 16 ME clock cycles for each timestamp
12059d32e4e7SYinjun Zhang 	 * count.
12069d32e4e7SYinjun Zhang 	 */
12079d32e4e7SYinjun Zhang 	factor = nn->tlv_caps.me_freq_mhz / 16;
12089d32e4e7SYinjun Zhang 	if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
12099d32e4e7SYinjun Zhang 		return;
12109d32e4e7SYinjun Zhang 
12119d32e4e7SYinjun Zhang 	/* copy TX interrupt coalesce parameters */
12129d32e4e7SYinjun Zhang 	value = (moder.pkts << 16) | (factor * moder.usec);
12139d32e4e7SYinjun Zhang 	nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(r_vec->tx_ring->idx), value);
12149d32e4e7SYinjun Zhang 	(void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
12159d32e4e7SYinjun Zhang 
12169d32e4e7SYinjun Zhang 	dim->state = DIM_START_MEASURE;
12179d32e4e7SYinjun Zhang }
12189d32e4e7SYinjun Zhang 
1219d00ca2f3SJakub Kicinski /**
12201cd0cfc4SJakub Kicinski  * nfp_net_open_stack() - Start the device from stack's perspective
12211cd0cfc4SJakub Kicinski  * @nn:      NFP Net device to reconfigure
12221cd0cfc4SJakub Kicinski  */
nfp_net_open_stack(struct nfp_net * nn)12231cd0cfc4SJakub Kicinski static void nfp_net_open_stack(struct nfp_net *nn)
12241cd0cfc4SJakub Kicinski {
12259d32e4e7SYinjun Zhang 	struct nfp_net_r_vector *r_vec;
12261cd0cfc4SJakub Kicinski 	unsigned int r;
12271cd0cfc4SJakub Kicinski 
122879c12a75SJakub Kicinski 	for (r = 0; r < nn->dp.num_r_vecs; r++) {
12299d32e4e7SYinjun Zhang 		r_vec = &nn->r_vecs[r];
12309d32e4e7SYinjun Zhang 
12319d32e4e7SYinjun Zhang 		if (r_vec->rx_ring) {
12329d32e4e7SYinjun Zhang 			INIT_WORK(&r_vec->rx_dim.work, nfp_net_rx_dim_work);
12339d32e4e7SYinjun Zhang 			r_vec->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
12349d32e4e7SYinjun Zhang 		}
12359d32e4e7SYinjun Zhang 
12369d32e4e7SYinjun Zhang 		if (r_vec->tx_ring) {
12379d32e4e7SYinjun Zhang 			INIT_WORK(&r_vec->tx_dim.work, nfp_net_tx_dim_work);
12389d32e4e7SYinjun Zhang 			r_vec->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
12399d32e4e7SYinjun Zhang 		}
12409d32e4e7SYinjun Zhang 
12419d32e4e7SYinjun Zhang 		napi_enable(&r_vec->napi);
12429d32e4e7SYinjun Zhang 		enable_irq(r_vec->irq_vector);
1243aba52df8SJakub Kicinski 	}
12441cd0cfc4SJakub Kicinski 
124579c12a75SJakub Kicinski 	netif_tx_wake_all_queues(nn->dp.netdev);
12461cd0cfc4SJakub Kicinski 
1247ce449ba7SJakub Kicinski 	enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
12481cd0cfc4SJakub Kicinski 	nfp_net_read_link_status(nn);
12491cd0cfc4SJakub Kicinski }
12501cd0cfc4SJakub Kicinski 
nfp_net_open_alloc_all(struct nfp_net * nn)1251ee26756dSJakub Kicinski static int nfp_net_open_alloc_all(struct nfp_net *nn)
12524c352362SJakub Kicinski {
12534c352362SJakub Kicinski 	int err, r;
12544c352362SJakub Kicinski 
12554c352362SJakub Kicinski 	err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
12564c352362SJakub Kicinski 				      nn->exn_name, sizeof(nn->exn_name),
12574c352362SJakub Kicinski 				      NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
12584c352362SJakub Kicinski 	if (err)
12594c352362SJakub Kicinski 		return err;
12600ba40af9SJakub Kicinski 	err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
12610ba40af9SJakub Kicinski 				      nn->lsc_name, sizeof(nn->lsc_name),
12620ba40af9SJakub Kicinski 				      NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
12630ba40af9SJakub Kicinski 	if (err)
12640ba40af9SJakub Kicinski 		goto err_free_exn;
1265ce449ba7SJakub Kicinski 	disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
12664c352362SJakub Kicinski 
126779c12a75SJakub Kicinski 	for (r = 0; r < nn->dp.num_r_vecs; r++) {
12680afbfb18SJakub Kicinski 		err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
12694c352362SJakub Kicinski 		if (err)
1270cbeaf7aaSJakub Kicinski 			goto err_cleanup_vec_p;
1271cbeaf7aaSJakub Kicinski 	}
1272114bdef0SJakub Kicinski 
1273892a7f70SJakub Kicinski 	err = nfp_net_rx_rings_prepare(nn, &nn->dp);
1274892a7f70SJakub Kicinski 	if (err)
1275a10b563dSJakub Kicinski 		goto err_cleanup_vec;
1276a10b563dSJakub Kicinski 
1277892a7f70SJakub Kicinski 	err = nfp_net_tx_rings_prepare(nn, &nn->dp);
1278892a7f70SJakub Kicinski 	if (err)
1279a10b563dSJakub Kicinski 		goto err_free_rx_rings;
12804c352362SJakub Kicinski 
1281e31230f9SJakub Kicinski 	for (r = 0; r < nn->max_r_vecs; r++)
128279c12a75SJakub Kicinski 		nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
1283e31230f9SJakub Kicinski 
1284ee26756dSJakub Kicinski 	return 0;
1285ee26756dSJakub Kicinski 
1286ee26756dSJakub Kicinski err_free_rx_rings:
1287ee26756dSJakub Kicinski 	nfp_net_rx_rings_free(&nn->dp);
1288ee26756dSJakub Kicinski err_cleanup_vec:
1289ee26756dSJakub Kicinski 	r = nn->dp.num_r_vecs;
1290ee26756dSJakub Kicinski err_cleanup_vec_p:
1291ee26756dSJakub Kicinski 	while (r--)
1292ee26756dSJakub Kicinski 		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1293ee26756dSJakub Kicinski 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1294ee26756dSJakub Kicinski err_free_exn:
1295ee26756dSJakub Kicinski 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
1296ee26756dSJakub Kicinski 	return err;
1297ee26756dSJakub Kicinski }
1298ee26756dSJakub Kicinski 
nfp_net_netdev_open(struct net_device * netdev)1299ee26756dSJakub Kicinski static int nfp_net_netdev_open(struct net_device *netdev)
1300ee26756dSJakub Kicinski {
1301ee26756dSJakub Kicinski 	struct nfp_net *nn = netdev_priv(netdev);
1302ee26756dSJakub Kicinski 	int err;
1303ee26756dSJakub Kicinski 
1304ee26756dSJakub Kicinski 	/* Step 1: Allocate resources for rings and the like
1305ee26756dSJakub Kicinski 	 * - Request interrupts
1306ee26756dSJakub Kicinski 	 * - Allocate RX and TX ring resources
1307ee26756dSJakub Kicinski 	 * - Setup initial RSS table
1308ee26756dSJakub Kicinski 	 */
1309ee26756dSJakub Kicinski 	err = nfp_net_open_alloc_all(nn);
1310ee26756dSJakub Kicinski 	if (err)
1311ee26756dSJakub Kicinski 		return err;
1312ee26756dSJakub Kicinski 
131379c12a75SJakub Kicinski 	err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
13144c352362SJakub Kicinski 	if (err)
1315ee26756dSJakub Kicinski 		goto err_free_all;
13164c352362SJakub Kicinski 
131779c12a75SJakub Kicinski 	err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
13184c352362SJakub Kicinski 	if (err)
1319ee26756dSJakub Kicinski 		goto err_free_all;
13204c352362SJakub Kicinski 
13214c352362SJakub Kicinski 	/* Step 2: Configure the NFP
1322447e9ebfSDirk van der Merwe 	 * - Ifup the physical interface if it exists
13234c352362SJakub Kicinski 	 * - Enable rings from 0 to tx_rings/rx_rings - 1.
13244c352362SJakub Kicinski 	 * - Write MAC address (in case it changed)
13254c352362SJakub Kicinski 	 * - Set the MTU
13264c352362SJakub Kicinski 	 * - Set the Freelist buffer size
13274c352362SJakub Kicinski 	 * - Enable the FW
13284c352362SJakub Kicinski 	 */
1329447e9ebfSDirk van der Merwe 	err = nfp_port_configure(netdev, true);
13304c352362SJakub Kicinski 	if (err)
1331ee26756dSJakub Kicinski 		goto err_free_all;
13324c352362SJakub Kicinski 
1333447e9ebfSDirk van der Merwe 	err = nfp_net_set_config_and_enable(nn);
1334447e9ebfSDirk van der Merwe 	if (err)
1335447e9ebfSDirk van der Merwe 		goto err_port_disable;
1336447e9ebfSDirk van der Merwe 
13374c352362SJakub Kicinski 	/* Step 3: Enable for kernel
13384c352362SJakub Kicinski 	 * - put some freelist descriptors on each RX ring
13394c352362SJakub Kicinski 	 * - enable NAPI on each ring
13404c352362SJakub Kicinski 	 * - enable all TX queues
13414c352362SJakub Kicinski 	 * - set link state
13424c352362SJakub Kicinski 	 */
13431cd0cfc4SJakub Kicinski 	nfp_net_open_stack(nn);
13444c352362SJakub Kicinski 
13454c352362SJakub Kicinski 	return 0;
13464c352362SJakub Kicinski 
1347447e9ebfSDirk van der Merwe err_port_disable:
1348447e9ebfSDirk van der Merwe 	nfp_port_configure(netdev, false);
1349ee26756dSJakub Kicinski err_free_all:
1350ee26756dSJakub Kicinski 	nfp_net_close_free_all(nn);
13514c352362SJakub Kicinski 	return err;
13524c352362SJakub Kicinski }
13534c352362SJakub Kicinski 
nfp_ctrl_open(struct nfp_net * nn)135477ece8d5SJakub Kicinski int nfp_ctrl_open(struct nfp_net *nn)
135577ece8d5SJakub Kicinski {
135677ece8d5SJakub Kicinski 	int err, r;
135777ece8d5SJakub Kicinski 
135877ece8d5SJakub Kicinski 	/* ring dumping depends on vNICs being opened/closed under rtnl */
135977ece8d5SJakub Kicinski 	rtnl_lock();
136077ece8d5SJakub Kicinski 
136177ece8d5SJakub Kicinski 	err = nfp_net_open_alloc_all(nn);
136277ece8d5SJakub Kicinski 	if (err)
136377ece8d5SJakub Kicinski 		goto err_unlock;
136477ece8d5SJakub Kicinski 
136577ece8d5SJakub Kicinski 	err = nfp_net_set_config_and_enable(nn);
136677ece8d5SJakub Kicinski 	if (err)
136777ece8d5SJakub Kicinski 		goto err_free_all;
136877ece8d5SJakub Kicinski 
136977ece8d5SJakub Kicinski 	for (r = 0; r < nn->dp.num_r_vecs; r++)
137077ece8d5SJakub Kicinski 		enable_irq(nn->r_vecs[r].irq_vector);
137177ece8d5SJakub Kicinski 
137277ece8d5SJakub Kicinski 	rtnl_unlock();
137377ece8d5SJakub Kicinski 
137477ece8d5SJakub Kicinski 	return 0;
137577ece8d5SJakub Kicinski 
137677ece8d5SJakub Kicinski err_free_all:
137777ece8d5SJakub Kicinski 	nfp_net_close_free_all(nn);
137877ece8d5SJakub Kicinski err_unlock:
137977ece8d5SJakub Kicinski 	rtnl_unlock();
138077ece8d5SJakub Kicinski 	return err;
138177ece8d5SJakub Kicinski }
138277ece8d5SJakub Kicinski 
nfp_net_sched_mbox_amsg_work(struct nfp_net * nn,u32 cmd,const void * data,size_t len,int (* cb)(struct nfp_net *,struct nfp_mbox_amsg_entry *))138371f814cdSYinjun Zhang int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
138471f814cdSYinjun Zhang 				 int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
1385de624864SDiana Wang {
138671f814cdSYinjun Zhang 	struct nfp_mbox_amsg_entry *entry;
138771f814cdSYinjun Zhang 
138871f814cdSYinjun Zhang 	entry = kmalloc(sizeof(*entry) + len, GFP_ATOMIC);
138971f814cdSYinjun Zhang 	if (!entry)
139071f814cdSYinjun Zhang 		return -ENOMEM;
139171f814cdSYinjun Zhang 
139271f814cdSYinjun Zhang 	memcpy(entry->msg, data, len);
139371f814cdSYinjun Zhang 	entry->cmd = cmd;
139471f814cdSYinjun Zhang 	entry->cfg = cb;
139571f814cdSYinjun Zhang 
139671f814cdSYinjun Zhang 	spin_lock_bh(&nn->mbox_amsg.lock);
139771f814cdSYinjun Zhang 	list_add_tail(&entry->list, &nn->mbox_amsg.list);
139871f814cdSYinjun Zhang 	spin_unlock_bh(&nn->mbox_amsg.lock);
139971f814cdSYinjun Zhang 
140071f814cdSYinjun Zhang 	schedule_work(&nn->mbox_amsg.work);
140171f814cdSYinjun Zhang 
140271f814cdSYinjun Zhang 	return 0;
140371f814cdSYinjun Zhang }
140471f814cdSYinjun Zhang 
nfp_net_mbox_amsg_work(struct work_struct * work)140571f814cdSYinjun Zhang static void nfp_net_mbox_amsg_work(struct work_struct *work)
140671f814cdSYinjun Zhang {
140771f814cdSYinjun Zhang 	struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
140871f814cdSYinjun Zhang 	struct nfp_mbox_amsg_entry *entry, *tmp;
140971f814cdSYinjun Zhang 	struct list_head tmp_list;
141071f814cdSYinjun Zhang 
141171f814cdSYinjun Zhang 	INIT_LIST_HEAD(&tmp_list);
141271f814cdSYinjun Zhang 
141371f814cdSYinjun Zhang 	spin_lock_bh(&nn->mbox_amsg.lock);
141471f814cdSYinjun Zhang 	list_splice_init(&nn->mbox_amsg.list, &tmp_list);
141571f814cdSYinjun Zhang 	spin_unlock_bh(&nn->mbox_amsg.lock);
141671f814cdSYinjun Zhang 
141771f814cdSYinjun Zhang 	list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
141871f814cdSYinjun Zhang 		int err = entry->cfg(nn, entry);
141971f814cdSYinjun Zhang 
142071f814cdSYinjun Zhang 		if (err)
142171f814cdSYinjun Zhang 			nn_err(nn, "Config cmd %d to HW failed %d.\n", entry->cmd, err);
142271f814cdSYinjun Zhang 
142371f814cdSYinjun Zhang 		list_del(&entry->list);
142471f814cdSYinjun Zhang 		kfree(entry);
142571f814cdSYinjun Zhang 	}
142671f814cdSYinjun Zhang }
142771f814cdSYinjun Zhang 
nfp_net_mc_cfg(struct nfp_net * nn,struct nfp_mbox_amsg_entry * entry)142871f814cdSYinjun Zhang static int nfp_net_mc_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
142971f814cdSYinjun Zhang {
143071f814cdSYinjun Zhang 	unsigned char *addr = entry->msg;
1431de624864SDiana Wang 	int ret;
1432de624864SDiana Wang 
1433de624864SDiana Wang 	ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
1434de624864SDiana Wang 	if (ret)
1435de624864SDiana Wang 		return ret;
1436de624864SDiana Wang 
1437de624864SDiana Wang 	nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_HI,
1438de624864SDiana Wang 		  get_unaligned_be32(addr));
1439de624864SDiana Wang 	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
1440de624864SDiana Wang 		  get_unaligned_be16(addr + 4));
1441de624864SDiana Wang 
144271f814cdSYinjun Zhang 	return nfp_net_mbox_reconfig_and_unlock(nn, entry->cmd);
1443e20aa071SYinjun Zhang }
1444e20aa071SYinjun Zhang 
nfp_net_mc_sync(struct net_device * netdev,const unsigned char * addr)1445de624864SDiana Wang static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
1446de624864SDiana Wang {
1447de624864SDiana Wang 	struct nfp_net *nn = netdev_priv(netdev);
1448de624864SDiana Wang 
1449de624864SDiana Wang 	if (netdev_mc_count(netdev) > NFP_NET_CFG_MAC_MC_MAX) {
1450de624864SDiana Wang 		nn_err(nn, "Requested number of MC addresses (%d) exceeds maximum (%d).\n",
1451de624864SDiana Wang 		       netdev_mc_count(netdev), NFP_NET_CFG_MAC_MC_MAX);
1452de624864SDiana Wang 		return -EINVAL;
1453de624864SDiana Wang 	}
1454de624864SDiana Wang 
145571f814cdSYinjun Zhang 	return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD, addr,
145671f814cdSYinjun Zhang 					    NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
1457de624864SDiana Wang }
1458de624864SDiana Wang 
nfp_net_mc_unsync(struct net_device * netdev,const unsigned char * addr)1459de624864SDiana Wang static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
1460de624864SDiana Wang {
1461e20aa071SYinjun Zhang 	struct nfp_net *nn = netdev_priv(netdev);
1462e20aa071SYinjun Zhang 
146371f814cdSYinjun Zhang 	return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL, addr,
146471f814cdSYinjun Zhang 					    NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
1465de624864SDiana Wang }
1466de624864SDiana Wang 
nfp_net_set_rx_mode(struct net_device * netdev)14674c352362SJakub Kicinski static void nfp_net_set_rx_mode(struct net_device *netdev)
14684c352362SJakub Kicinski {
14694c352362SJakub Kicinski 	struct nfp_net *nn = netdev_priv(netdev);
1470de624864SDiana Wang 	u32 new_ctrl, new_ctrl_w1;
14714c352362SJakub Kicinski 
147279c12a75SJakub Kicinski 	new_ctrl = nn->dp.ctrl;
1473de624864SDiana Wang 	new_ctrl_w1 = nn->dp.ctrl_w1;
14744c352362SJakub Kicinski 
1475d0adb51eSJakub Kicinski 	if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
1476d0adb51eSJakub Kicinski 		new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
1477d0adb51eSJakub Kicinski 	else
1478d0adb51eSJakub Kicinski 		new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
1479d0adb51eSJakub Kicinski 
1480de624864SDiana Wang 	if (netdev->flags & IFF_ALLMULTI)
1481de624864SDiana Wang 		new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_MCAST_FILTER;
1482de624864SDiana Wang 	else
1483de624864SDiana Wang 		new_ctrl_w1 |= nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER;
1484de624864SDiana Wang 
14854c352362SJakub Kicinski 	if (netdev->flags & IFF_PROMISC) {
14864c352362SJakub Kicinski 		if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
14874c352362SJakub Kicinski 			new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
14884c352362SJakub Kicinski 		else
14894c352362SJakub Kicinski 			nn_warn(nn, "FW does not support promiscuous mode\n");
14904c352362SJakub Kicinski 	} else {
14914c352362SJakub Kicinski 		new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
14924c352362SJakub Kicinski 	}
14934c352362SJakub Kicinski 
1494de624864SDiana Wang 	if ((nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER) &&
1495de624864SDiana Wang 	    __dev_mc_sync(netdev, nfp_net_mc_sync, nfp_net_mc_unsync))
1496de624864SDiana Wang 		netdev_err(netdev, "Sync mc address failed\n");
1497de624864SDiana Wang 
1498de624864SDiana Wang 	if (new_ctrl == nn->dp.ctrl && new_ctrl_w1 == nn->dp.ctrl_w1)
14994c352362SJakub Kicinski 		return;
15004c352362SJakub Kicinski 
1501de624864SDiana Wang 	if (new_ctrl != nn->dp.ctrl)
15024c352362SJakub Kicinski 		nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1503de624864SDiana Wang 	if (new_ctrl_w1 != nn->dp.ctrl_w1)
1504de624864SDiana Wang 		nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
15053d780b92SJakub Kicinski 	nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
15064c352362SJakub Kicinski 
150779c12a75SJakub Kicinski 	nn->dp.ctrl = new_ctrl;
1508de624864SDiana Wang 	nn->dp.ctrl_w1 = new_ctrl_w1;
15094c352362SJakub Kicinski }
15104c352362SJakub Kicinski 
nfp_net_rss_init_itbl(struct nfp_net * nn)15111e9e10d0SJakub Kicinski static void nfp_net_rss_init_itbl(struct nfp_net *nn)
15121e9e10d0SJakub Kicinski {
15131e9e10d0SJakub Kicinski 	int i;
15141e9e10d0SJakub Kicinski 
15151e9e10d0SJakub Kicinski 	for (i = 0; i < sizeof(nn->rss_itbl); i++)
15161e9e10d0SJakub Kicinski 		nn->rss_itbl[i] =
151779c12a75SJakub Kicinski 			ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
15181e9e10d0SJakub Kicinski }
15191e9e10d0SJakub Kicinski 
nfp_net_dp_swap(struct nfp_net * nn,struct nfp_net_dp * dp)1520512e94dcSJakub Kicinski static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
1521512e94dcSJakub Kicinski {
1522512e94dcSJakub Kicinski 	struct nfp_net_dp new_dp = *dp;
1523512e94dcSJakub Kicinski 
1524512e94dcSJakub Kicinski 	*dp = nn->dp;
1525512e94dcSJakub Kicinski 	nn->dp = new_dp;
152676e1e1a8SJakub Kicinski 
152776e1e1a8SJakub Kicinski 	nn->dp.netdev->mtu = new_dp.mtu;
1528892a7f70SJakub Kicinski 
1529892a7f70SJakub Kicinski 	if (!netif_is_rxfh_configured(nn->dp.netdev))
1530892a7f70SJakub Kicinski 		nfp_net_rss_init_itbl(nn);
1531512e94dcSJakub Kicinski }
1532512e94dcSJakub Kicinski 
nfp_net_dp_swap_enable(struct nfp_net * nn,struct nfp_net_dp * dp)1533892a7f70SJakub Kicinski static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
15344c352362SJakub Kicinski {
1535e31230f9SJakub Kicinski 	unsigned int r;
1536164d1e9eSJakub Kicinski 	int err;
1537e31230f9SJakub Kicinski 
1538892a7f70SJakub Kicinski 	nfp_net_dp_swap(nn, dp);
1539164d1e9eSJakub Kicinski 
1540e31230f9SJakub Kicinski 	for (r = 0; r <	nn->max_r_vecs; r++)
154179c12a75SJakub Kicinski 		nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
1542e31230f9SJakub Kicinski 
1543e874f455SJakub Kicinski 	err = netif_set_real_num_queues(nn->dp.netdev,
1544e874f455SJakub Kicinski 					nn->dp.num_stack_tx_rings,
1545e874f455SJakub Kicinski 					nn->dp.num_rx_rings);
1546164d1e9eSJakub Kicinski 	if (err)
1547164d1e9eSJakub Kicinski 		return err;
1548164d1e9eSJakub Kicinski 
1549ac0488efSJakub Kicinski 	return nfp_net_set_config_and_enable(nn);
15504c352362SJakub Kicinski }
15514c352362SJakub Kicinski 
nfp_net_clone_dp(struct nfp_net * nn)1552783496b0SJakub Kicinski struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
1553783496b0SJakub Kicinski {
1554783496b0SJakub Kicinski 	struct nfp_net_dp *new;
1555783496b0SJakub Kicinski 
1556783496b0SJakub Kicinski 	new = kmalloc(sizeof(*new), GFP_KERNEL);
1557783496b0SJakub Kicinski 	if (!new)
1558783496b0SJakub Kicinski 		return NULL;
1559783496b0SJakub Kicinski 
1560783496b0SJakub Kicinski 	*new = nn->dp;
1561783496b0SJakub Kicinski 
1562543bd14fSNiklas Söderlund 	new->xsk_pools = kmemdup(new->xsk_pools,
1563543bd14fSNiklas Söderlund 				 array_size(nn->max_r_vecs,
1564543bd14fSNiklas Söderlund 					    sizeof(new->xsk_pools)),
1565543bd14fSNiklas Söderlund 				 GFP_KERNEL);
1566543bd14fSNiklas Söderlund 	if (!new->xsk_pools) {
1567543bd14fSNiklas Söderlund 		kfree(new);
1568543bd14fSNiklas Söderlund 		return NULL;
1569543bd14fSNiklas Söderlund 	}
1570543bd14fSNiklas Söderlund 
1571783496b0SJakub Kicinski 	/* Clear things which need to be recomputed */
1572783496b0SJakub Kicinski 	new->fl_bufsz = 0;
1573783496b0SJakub Kicinski 	new->tx_rings = NULL;
1574783496b0SJakub Kicinski 	new->rx_rings = NULL;
1575783496b0SJakub Kicinski 	new->num_r_vecs = 0;
1576783496b0SJakub Kicinski 	new->num_stack_tx_rings = 0;
15770dcf7f50SJakub Kicinski 	new->txrwb = NULL;
15780dcf7f50SJakub Kicinski 	new->txrwb_dma = 0;
1579783496b0SJakub Kicinski 
1580783496b0SJakub Kicinski 	return new;
1581783496b0SJakub Kicinski }
1582783496b0SJakub Kicinski 
nfp_net_free_dp(struct nfp_net_dp * dp)1583543bd14fSNiklas Söderlund static void nfp_net_free_dp(struct nfp_net_dp *dp)
1584543bd14fSNiklas Söderlund {
1585543bd14fSNiklas Söderlund 	kfree(dp->xsk_pools);
1586543bd14fSNiklas Söderlund 	kfree(dp);
1587543bd14fSNiklas Söderlund }
1588543bd14fSNiklas Söderlund 
1589d957c0f7SJakub Kicinski static int
nfp_net_check_config(struct nfp_net * nn,struct nfp_net_dp * dp,struct netlink_ext_ack * extack)1590d957c0f7SJakub Kicinski nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
1591d957c0f7SJakub Kicinski 		     struct netlink_ext_ack *extack)
1592ecd63a02SJakub Kicinski {
15939c91a365SNiklas Söderlund 	unsigned int r, xsk_min_fl_bufsz;
15949c91a365SNiklas Söderlund 
1595ecd63a02SJakub Kicinski 	/* XDP-enabled tests */
15969dc6b116SJakub Kicinski 	if (!dp->xdp_prog)
1597ecd63a02SJakub Kicinski 		return 0;
15982195c263SJakub Kicinski 	if (dp->fl_bufsz > PAGE_SIZE) {
15994d463c4dSDaniel Borkmann 		NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
1600ecd63a02SJakub Kicinski 		return -EINVAL;
1601ecd63a02SJakub Kicinski 	}
1602892a7f70SJakub Kicinski 	if (dp->num_tx_rings > nn->max_tx_rings) {
16034d463c4dSDaniel Borkmann 		NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
1604ecd63a02SJakub Kicinski 		return -EINVAL;
1605ecd63a02SJakub Kicinski 	}
1606ecd63a02SJakub Kicinski 
16079c91a365SNiklas Söderlund 	xsk_min_fl_bufsz = nfp_net_calc_fl_bufsz_xsk(dp);
16089c91a365SNiklas Söderlund 	for (r = 0; r < nn->max_r_vecs; r++) {
16099c91a365SNiklas Söderlund 		if (!dp->xsk_pools[r])
16109c91a365SNiklas Söderlund 			continue;
16119c91a365SNiklas Söderlund 
16129c91a365SNiklas Söderlund 		if (xsk_pool_get_rx_frame_size(dp->xsk_pools[r]) < xsk_min_fl_bufsz) {
16139c91a365SNiklas Söderlund 			NL_SET_ERR_MSG_MOD(extack,
16140c1794c2SGuo Zhengkui 					   "XSK buffer pool chunk size too small");
16159c91a365SNiklas Söderlund 			return -EINVAL;
16169c91a365SNiklas Söderlund 		}
16179c91a365SNiklas Söderlund 	}
16189c91a365SNiklas Söderlund 
1619ecd63a02SJakub Kicinski 	return 0;
1620ecd63a02SJakub Kicinski }
1621ecd63a02SJakub Kicinski 
nfp_net_ring_reconfig(struct nfp_net * nn,struct nfp_net_dp * dp,struct netlink_ext_ack * extack)1622d957c0f7SJakub Kicinski int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
1623d957c0f7SJakub Kicinski 			  struct netlink_ext_ack *extack)
162468453c7aSJakub Kicinski {
1625512e94dcSJakub Kicinski 	int r, err;
1626cc7c0333SJakub Kicinski 
162776e1e1a8SJakub Kicinski 	dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
16282195c263SJakub Kicinski 
1629892a7f70SJakub Kicinski 	dp->num_stack_tx_rings = dp->num_tx_rings;
16309dc6b116SJakub Kicinski 	if (dp->xdp_prog)
1631892a7f70SJakub Kicinski 		dp->num_stack_tx_rings -= dp->num_rx_rings;
1632ecd63a02SJakub Kicinski 
1633892a7f70SJakub Kicinski 	dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
1634ecd63a02SJakub Kicinski 
1635d957c0f7SJakub Kicinski 	err = nfp_net_check_config(nn, dp, extack);
1636ecd63a02SJakub Kicinski 	if (err)
1637783496b0SJakub Kicinski 		goto exit_free_dp;
1638164d1e9eSJakub Kicinski 
1639783496b0SJakub Kicinski 	if (!netif_running(dp->netdev)) {
1640892a7f70SJakub Kicinski 		nfp_net_dp_swap(nn, dp);
1641783496b0SJakub Kicinski 		err = 0;
1642783496b0SJakub Kicinski 		goto exit_free_dp;
1643cc7c0333SJakub Kicinski 	}
1644cc7c0333SJakub Kicinski 
1645cc7c0333SJakub Kicinski 	/* Prepare new rings */
1646512e94dcSJakub Kicinski 	for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
1647164d1e9eSJakub Kicinski 		err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
1648164d1e9eSJakub Kicinski 		if (err) {
1649512e94dcSJakub Kicinski 			dp->num_r_vecs = r;
1650164d1e9eSJakub Kicinski 			goto err_cleanup_vecs;
1651164d1e9eSJakub Kicinski 		}
1652164d1e9eSJakub Kicinski 	}
1653892a7f70SJakub Kicinski 
1654892a7f70SJakub Kicinski 	err = nfp_net_rx_rings_prepare(nn, dp);
1655892a7f70SJakub Kicinski 	if (err)
1656164d1e9eSJakub Kicinski 		goto err_cleanup_vecs;
1657892a7f70SJakub Kicinski 
1658892a7f70SJakub Kicinski 	err = nfp_net_tx_rings_prepare(nn, dp);
1659892a7f70SJakub Kicinski 	if (err)
166068453c7aSJakub Kicinski 		goto err_free_rx;
1661cc7c0333SJakub Kicinski 
1662cc7c0333SJakub Kicinski 	/* Stop device, swap in new rings, try to start the firmware */
1663cc7c0333SJakub Kicinski 	nfp_net_close_stack(nn);
1664cc7c0333SJakub Kicinski 	nfp_net_clear_config_and_disable(nn);
1665cc7c0333SJakub Kicinski 
1666892a7f70SJakub Kicinski 	err = nfp_net_dp_swap_enable(nn, dp);
1667cc7c0333SJakub Kicinski 	if (err) {
166868453c7aSJakub Kicinski 		int err2;
166968453c7aSJakub Kicinski 
167068453c7aSJakub Kicinski 		nfp_net_clear_config_and_disable(nn);
1671cc7c0333SJakub Kicinski 
1672cc7c0333SJakub Kicinski 		/* Try with old configuration and old rings */
1673892a7f70SJakub Kicinski 		err2 = nfp_net_dp_swap_enable(nn, dp);
167468453c7aSJakub Kicinski 		if (err2)
1675cc7c0333SJakub Kicinski 			nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
167668453c7aSJakub Kicinski 			       err, err2);
1677cc7c0333SJakub Kicinski 	}
1678512e94dcSJakub Kicinski 	for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
1679164d1e9eSJakub Kicinski 		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1680cc7c0333SJakub Kicinski 
1681892a7f70SJakub Kicinski 	nfp_net_rx_rings_free(dp);
1682892a7f70SJakub Kicinski 	nfp_net_tx_rings_free(dp);
1683cc7c0333SJakub Kicinski 
1684cc7c0333SJakub Kicinski 	nfp_net_open_stack(nn);
1685783496b0SJakub Kicinski exit_free_dp:
1686543bd14fSNiklas Söderlund 	nfp_net_free_dp(dp);
1687cc7c0333SJakub Kicinski 
1688cc7c0333SJakub Kicinski 	return err;
168968453c7aSJakub Kicinski 
169068453c7aSJakub Kicinski err_free_rx:
1691892a7f70SJakub Kicinski 	nfp_net_rx_rings_free(dp);
1692164d1e9eSJakub Kicinski err_cleanup_vecs:
1693512e94dcSJakub Kicinski 	for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
1694164d1e9eSJakub Kicinski 		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1695543bd14fSNiklas Söderlund 	nfp_net_free_dp(dp);
169668453c7aSJakub Kicinski 	return err;
169768453c7aSJakub Kicinski }
169868453c7aSJakub Kicinski 
nfp_net_change_mtu(struct net_device * netdev,int new_mtu)169968453c7aSJakub Kicinski static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
170068453c7aSJakub Kicinski {
170168453c7aSJakub Kicinski 	struct nfp_net *nn = netdev_priv(netdev);
1702783496b0SJakub Kicinski 	struct nfp_net_dp *dp;
1703ccbdc596SJakub Kicinski 	int err;
1704ccbdc596SJakub Kicinski 
1705167cebefSJohn Hurley 	err = nfp_app_check_mtu(nn->app, netdev, new_mtu);
1706ccbdc596SJakub Kicinski 	if (err)
1707ccbdc596SJakub Kicinski 		return err;
170868453c7aSJakub Kicinski 
1709783496b0SJakub Kicinski 	dp = nfp_net_clone_dp(nn);
1710783496b0SJakub Kicinski 	if (!dp)
1711783496b0SJakub Kicinski 		return -ENOMEM;
1712783496b0SJakub Kicinski 
171376e1e1a8SJakub Kicinski 	dp->mtu = new_mtu;
171476e1e1a8SJakub Kicinski 
1715d957c0f7SJakub Kicinski 	return nfp_net_ring_reconfig(nn, dp, NULL);
1716cc7c0333SJakub Kicinski }
1717cc7c0333SJakub Kicinski 
1718b64052fcSPablo Cascón static int
nfp_net_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1719b64052fcSPablo Cascón nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1720b64052fcSPablo Cascón {
1721dd5b2498SJakub Kicinski 	const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
1722b64052fcSPablo Cascón 	struct nfp_net *nn = netdev_priv(netdev);
1723dd5b2498SJakub Kicinski 	int err;
1724b64052fcSPablo Cascón 
1725b64052fcSPablo Cascón 	/* Priority tagged packets with vlan id 0 are processed by the
1726b64052fcSPablo Cascón 	 * NFP as untagged packets
1727b64052fcSPablo Cascón 	 */
1728b64052fcSPablo Cascón 	if (!vid)
1729b64052fcSPablo Cascón 		return 0;
1730b64052fcSPablo Cascón 
1731dd5b2498SJakub Kicinski 	err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
1732dd5b2498SJakub Kicinski 	if (err)
1733dd5b2498SJakub Kicinski 		return err;
1734dd5b2498SJakub Kicinski 
1735527d7d1bSJakub Kicinski 	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
1736527d7d1bSJakub Kicinski 	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
1737527d7d1bSJakub Kicinski 		  ETH_P_8021Q);
1738b64052fcSPablo Cascón 
1739dd5b2498SJakub Kicinski 	return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
1740b64052fcSPablo Cascón }
1741b64052fcSPablo Cascón 
1742b64052fcSPablo Cascón static int
nfp_net_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)1743b64052fcSPablo Cascón nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
1744b64052fcSPablo Cascón {
1745dd5b2498SJakub Kicinski 	const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
1746b64052fcSPablo Cascón 	struct nfp_net *nn = netdev_priv(netdev);
1747dd5b2498SJakub Kicinski 	int err;
1748b64052fcSPablo Cascón 
1749b64052fcSPablo Cascón 	/* Priority tagged packets with vlan id 0 are processed by the
1750b64052fcSPablo Cascón 	 * NFP as untagged packets
1751b64052fcSPablo Cascón 	 */
1752b64052fcSPablo Cascón 	if (!vid)
1753b64052fcSPablo Cascón 		return 0;
1754b64052fcSPablo Cascón 
1755dd5b2498SJakub Kicinski 	err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
1756dd5b2498SJakub Kicinski 	if (err)
1757dd5b2498SJakub Kicinski 		return err;
1758dd5b2498SJakub Kicinski 
1759527d7d1bSJakub Kicinski 	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
1760527d7d1bSJakub Kicinski 	nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
1761527d7d1bSJakub Kicinski 		  ETH_P_8021Q);
1762b64052fcSPablo Cascón 
1763dd5b2498SJakub Kicinski 	return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
1764b64052fcSPablo Cascón }
1765b64052fcSPablo Cascón 
nfp_net_stat64(struct net_device * netdev,struct rtnl_link_stats64 * stats)1766bc1f4470Sstephen hemminger static void nfp_net_stat64(struct net_device *netdev,
17674c352362SJakub Kicinski 			   struct rtnl_link_stats64 *stats)
17684c352362SJakub Kicinski {
17694c352362SJakub Kicinski 	struct nfp_net *nn = netdev_priv(netdev);
17704c352362SJakub Kicinski 	int r;
17714c352362SJakub Kicinski 
1772eca09be8SJakub Kicinski 	/* Collect software stats */
177329f534c4SJakub Kicinski 	for (r = 0; r < nn->max_r_vecs; r++) {
17744c352362SJakub Kicinski 		struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
17754c352362SJakub Kicinski 		u64 data[3];
17764c352362SJakub Kicinski 		unsigned int start;
17774c352362SJakub Kicinski 
17784c352362SJakub Kicinski 		do {
1779068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&r_vec->rx_sync);
17804c352362SJakub Kicinski 			data[0] = r_vec->rx_pkts;
17814c352362SJakub Kicinski 			data[1] = r_vec->rx_bytes;
17824c352362SJakub Kicinski 			data[2] = r_vec->rx_drops;
1783068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
17844c352362SJakub Kicinski 		stats->rx_packets += data[0];
17854c352362SJakub Kicinski 		stats->rx_bytes += data[1];
17864c352362SJakub Kicinski 		stats->rx_dropped += data[2];
17874c352362SJakub Kicinski 
17884c352362SJakub Kicinski 		do {
1789068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&r_vec->tx_sync);
17904c352362SJakub Kicinski 			data[0] = r_vec->tx_pkts;
17914c352362SJakub Kicinski 			data[1] = r_vec->tx_bytes;
17924c352362SJakub Kicinski 			data[2] = r_vec->tx_errors;
1793068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
17944c352362SJakub Kicinski 		stats->tx_packets += data[0];
17954c352362SJakub Kicinski 		stats->tx_bytes += data[1];
17964c352362SJakub Kicinski 		stats->tx_errors += data[2];
17974c352362SJakub Kicinski 	}
1798eca09be8SJakub Kicinski 
1799eca09be8SJakub Kicinski 	/* Add in device stats */
1800eca09be8SJakub Kicinski 	stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES);
1801eca09be8SJakub Kicinski 	stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS);
1802eca09be8SJakub Kicinski 	stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS);
1803eca09be8SJakub Kicinski 
1804eca09be8SJakub Kicinski 	stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS);
1805eca09be8SJakub Kicinski 	stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS);
18064c352362SJakub Kicinski }
18074c352362SJakub Kicinski 
nfp_net_set_features(struct net_device * netdev,netdev_features_t features)18084c352362SJakub Kicinski static int nfp_net_set_features(struct net_device *netdev,
18094c352362SJakub Kicinski 				netdev_features_t features)
18104c352362SJakub Kicinski {
18114c352362SJakub Kicinski 	netdev_features_t changed = netdev->features ^ features;
18124c352362SJakub Kicinski 	struct nfp_net *nn = netdev_priv(netdev);
18134c352362SJakub Kicinski 	u32 new_ctrl;
18144c352362SJakub Kicinski 	int err;
18154c352362SJakub Kicinski 
18164c352362SJakub Kicinski 	/* Assume this is not called with features we have not advertised */
18174c352362SJakub Kicinski 
181879c12a75SJakub Kicinski 	new_ctrl = nn->dp.ctrl;
18194c352362SJakub Kicinski 
18204c352362SJakub Kicinski 	if (changed & NETIF_F_RXCSUM) {
18214c352362SJakub Kicinski 		if (features & NETIF_F_RXCSUM)
1822ddb98d94SJakub Kicinski 			new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
18234c352362SJakub Kicinski 		else
1824ddb98d94SJakub Kicinski 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
18254c352362SJakub Kicinski 	}
18264c352362SJakub Kicinski 
18274c352362SJakub Kicinski 	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
18284c352362SJakub Kicinski 		if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
18294c352362SJakub Kicinski 			new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
18304c352362SJakub Kicinski 		else
18314c352362SJakub Kicinski 			new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
18324c352362SJakub Kicinski 	}
18334c352362SJakub Kicinski 
18344c352362SJakub Kicinski 	if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
18354c352362SJakub Kicinski 		if (features & (NETIF_F_TSO | NETIF_F_TSO6))
183628063be6SEdwin Peer 			new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
183728063be6SEdwin Peer 					      NFP_NET_CFG_CTRL_LSO;
18384c352362SJakub Kicinski 		else
183928063be6SEdwin Peer 			new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
18404c352362SJakub Kicinski 	}
18414c352362SJakub Kicinski 
18424c352362SJakub Kicinski 	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
18434c352362SJakub Kicinski 		if (features & NETIF_F_HW_VLAN_CTAG_RX)
184467d2656bSDiana Wang 			new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
184567d2656bSDiana Wang 				    NFP_NET_CFG_CTRL_RXVLAN;
18464c352362SJakub Kicinski 		else
184767d2656bSDiana Wang 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN_ANY;
18484c352362SJakub Kicinski 	}
18494c352362SJakub Kicinski 
18504c352362SJakub Kicinski 	if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
18514c352362SJakub Kicinski 		if (features & NETIF_F_HW_VLAN_CTAG_TX)
1852d80702ffSDiana Wang 			new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
1853d80702ffSDiana Wang 				    NFP_NET_CFG_CTRL_TXVLAN;
18544c352362SJakub Kicinski 		else
1855d80702ffSDiana Wang 			new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN_ANY;
18564c352362SJakub Kicinski 	}
18574c352362SJakub Kicinski 
1858b64052fcSPablo Cascón 	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
1859b64052fcSPablo Cascón 		if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1860b64052fcSPablo Cascón 			new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
1861b64052fcSPablo Cascón 		else
1862b64052fcSPablo Cascón 			new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
1863b64052fcSPablo Cascón 	}
1864b64052fcSPablo Cascón 
186567d2656bSDiana Wang 	if (changed & NETIF_F_HW_VLAN_STAG_RX) {
186667d2656bSDiana Wang 		if (features & NETIF_F_HW_VLAN_STAG_RX)
186767d2656bSDiana Wang 			new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
186867d2656bSDiana Wang 		else
186967d2656bSDiana Wang 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
187067d2656bSDiana Wang 	}
187167d2656bSDiana Wang 
18724c352362SJakub Kicinski 	if (changed & NETIF_F_SG) {
18734c352362SJakub Kicinski 		if (features & NETIF_F_SG)
18744c352362SJakub Kicinski 			new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
18754c352362SJakub Kicinski 		else
18764c352362SJakub Kicinski 			new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
18774c352362SJakub Kicinski 	}
18784c352362SJakub Kicinski 
1879d692403eSJakub Kicinski 	err = nfp_port_set_features(netdev, features);
1880d692403eSJakub Kicinski 	if (err)
1881d692403eSJakub Kicinski 		return err;
18827533fdc0SJakub Kicinski 
18834c352362SJakub Kicinski 	nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
18844c352362SJakub Kicinski 	       netdev->features, features, changed);
18854c352362SJakub Kicinski 
188679c12a75SJakub Kicinski 	if (new_ctrl == nn->dp.ctrl)
18874c352362SJakub Kicinski 		return 0;
18884c352362SJakub Kicinski 
188979c12a75SJakub Kicinski 	nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
18904c352362SJakub Kicinski 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
18914c352362SJakub Kicinski 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
18924c352362SJakub Kicinski 	if (err)
18934c352362SJakub Kicinski 		return err;
18944c352362SJakub Kicinski 
189579c12a75SJakub Kicinski 	nn->dp.ctrl = new_ctrl;
18964c352362SJakub Kicinski 
18974c352362SJakub Kicinski 	return 0;
18984c352362SJakub Kicinski }
18994c352362SJakub Kicinski 
19004c352362SJakub Kicinski static netdev_features_t
nfp_net_fix_features(struct net_device * netdev,netdev_features_t features)190167d2656bSDiana Wang nfp_net_fix_features(struct net_device *netdev,
190267d2656bSDiana Wang 		     netdev_features_t features)
190367d2656bSDiana Wang {
190467d2656bSDiana Wang 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
190567d2656bSDiana Wang 	    (features & NETIF_F_HW_VLAN_STAG_RX)) {
190667d2656bSDiana Wang 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
190767d2656bSDiana Wang 			features &= ~NETIF_F_HW_VLAN_CTAG_RX;
190867d2656bSDiana Wang 			netdev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
190967d2656bSDiana Wang 			netdev_warn(netdev,
191067d2656bSDiana Wang 				    "S-tag and C-tag stripping can't be enabled at the same time. Enabling S-tag stripping and disabling C-tag stripping\n");
191167d2656bSDiana Wang 		} else if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) {
191267d2656bSDiana Wang 			features &= ~NETIF_F_HW_VLAN_STAG_RX;
191367d2656bSDiana Wang 			netdev->wanted_features &= ~NETIF_F_HW_VLAN_STAG_RX;
191467d2656bSDiana Wang 			netdev_warn(netdev,
191567d2656bSDiana Wang 				    "S-tag and C-tag stripping can't be enabled at the same time. Enabling C-tag stripping and disabling S-tag stripping\n");
191667d2656bSDiana Wang 		}
191767d2656bSDiana Wang 	}
191867d2656bSDiana Wang 	return features;
191967d2656bSDiana Wang }
192067d2656bSDiana Wang 
192167d2656bSDiana Wang static netdev_features_t
nfp_net_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)19224c352362SJakub Kicinski nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
19234c352362SJakub Kicinski 		       netdev_features_t features)
19244c352362SJakub Kicinski {
19254c352362SJakub Kicinski 	u8 l4_hdr;
19264c352362SJakub Kicinski 
19274c352362SJakub Kicinski 	/* We can't do TSO over double tagged packets (802.1AD) */
19284c352362SJakub Kicinski 	features &= vlan_features_check(skb, features);
19294c352362SJakub Kicinski 
19304c352362SJakub Kicinski 	if (!skb->encapsulation)
19314c352362SJakub Kicinski 		return features;
19324c352362SJakub Kicinski 
19334c352362SJakub Kicinski 	/* Ensure that inner L4 header offset fits into TX descriptor field */
19344c352362SJakub Kicinski 	if (skb_is_gso(skb)) {
19354c352362SJakub Kicinski 		u32 hdrlen;
19364c352362SJakub Kicinski 
1937504148feSEric Dumazet 		hdrlen = skb_inner_tcp_all_headers(skb);
19384c352362SJakub Kicinski 
1939d7cc8252SJakub Kicinski 		/* Assume worst case scenario of having longest possible
1940d7cc8252SJakub Kicinski 		 * metadata prepend - 8B
1941d7cc8252SJakub Kicinski 		 */
1942d7cc8252SJakub Kicinski 		if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8))
19434c352362SJakub Kicinski 			features &= ~NETIF_F_GSO_MASK;
19444c352362SJakub Kicinski 	}
19454c352362SJakub Kicinski 
19461cf78d4cSHuanhuan Wang 	if (xfrm_offload(skb))
19471cf78d4cSHuanhuan Wang 		return features;
19481cf78d4cSHuanhuan Wang 
19494c352362SJakub Kicinski 	/* VXLAN/GRE check */
19504c352362SJakub Kicinski 	switch (vlan_get_protocol(skb)) {
19514c352362SJakub Kicinski 	case htons(ETH_P_IP):
19524c352362SJakub Kicinski 		l4_hdr = ip_hdr(skb)->protocol;
19534c352362SJakub Kicinski 		break;
19544c352362SJakub Kicinski 	case htons(ETH_P_IPV6):
19554c352362SJakub Kicinski 		l4_hdr = ipv6_hdr(skb)->nexthdr;
19564c352362SJakub Kicinski 		break;
19574c352362SJakub Kicinski 	default:
1958a188222bSTom Herbert 		return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
19594c352362SJakub Kicinski 	}
19604c352362SJakub Kicinski 
19614c352362SJakub Kicinski 	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
19624c352362SJakub Kicinski 	    skb->inner_protocol != htons(ETH_P_TEB) ||
19634c352362SJakub Kicinski 	    (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
19644c352362SJakub Kicinski 	    (l4_hdr == IPPROTO_UDP &&
19654c352362SJakub Kicinski 	     (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
19664c352362SJakub Kicinski 	      sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
1967a188222bSTom Herbert 		return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
19684c352362SJakub Kicinski 
19694c352362SJakub Kicinski 	return features;
19704c352362SJakub Kicinski }
19714c352362SJakub Kicinski 
197251c1df83SJakub Kicinski static int
nfp_net_get_phys_port_name(struct net_device * netdev,char * name,size_t len)197351c1df83SJakub Kicinski nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
197451c1df83SJakub Kicinski {
197551c1df83SJakub Kicinski 	struct nfp_net *nn = netdev_priv(netdev);
197651c1df83SJakub Kicinski 	int n;
197751c1df83SJakub Kicinski 
1978f1fa719cSJiri Pirko 	/* If port is defined, devlink_port is registered and devlink core
1979f1fa719cSJiri Pirko 	 * is taking care of name formatting.
1980f1fa719cSJiri Pirko 	 */
198151c1df83SJakub Kicinski 	if (nn->port)
1982f1fa719cSJiri Pirko 		return -EOPNOTSUPP;
198351c1df83SJakub Kicinski 
1984fe06a64eSJakub Kicinski 	if (nn->dp.is_vf || nn->vnic_no_name)
19856fd1cfc0SJakub Kicinski 		return -EOPNOTSUPP;
19866fd1cfc0SJakub Kicinski 
1987ca145732SJakub Kicinski 	n = snprintf(name, len, "n%d", nn->id);
198851c1df83SJakub Kicinski 	if (n >= len)
198951c1df83SJakub Kicinski 		return -EINVAL;
199051c1df83SJakub Kicinski 
199151c1df83SJakub Kicinski 	return 0;
199251c1df83SJakub Kicinski }
199351c1df83SJakub Kicinski 
nfp_net_xdp_setup_drv(struct nfp_net * nn,struct netdev_bpf * bpf)19945f428401SJakub Kicinski static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
1995ecd63a02SJakub Kicinski {
19965f428401SJakub Kicinski 	struct bpf_prog *prog = bpf->prog;
1997783496b0SJakub Kicinski 	struct nfp_net_dp *dp;
19985f428401SJakub Kicinski 	int err;
19995f428401SJakub Kicinski 
20006a8ef542SJakub Kicinski 	if (!prog == !nn->dp.xdp_prog) {
20016a8ef542SJakub Kicinski 		WRITE_ONCE(nn->dp.xdp_prog, prog);
20025f428401SJakub Kicinski 		xdp_attachment_setup(&nn->xdp, bpf);
2003ecd63a02SJakub Kicinski 		return 0;
2004ecd63a02SJakub Kicinski 	}
2005ecd63a02SJakub Kicinski 
2006783496b0SJakub Kicinski 	dp = nfp_net_clone_dp(nn);
2007783496b0SJakub Kicinski 	if (!dp)
2008783496b0SJakub Kicinski 		return -ENOMEM;
2009783496b0SJakub Kicinski 
20109dc6b116SJakub Kicinski 	dp->xdp_prog = prog;
2011892a7f70SJakub Kicinski 	dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
2012c487e6b1SJakub Kicinski 	dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2013dbf637ffSJakub Kicinski 	dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
2014ecd63a02SJakub Kicinski 
2015ecd63a02SJakub Kicinski 	/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
20165f428401SJakub Kicinski 	err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
2017c443b5acSJakub Kicinski 	if (err)
2018c443b5acSJakub Kicinski 		return err;
2019c443b5acSJakub Kicinski 
20205f428401SJakub Kicinski 	xdp_attachment_setup(&nn->xdp, bpf);
20215f428401SJakub Kicinski 	return 0;
20225f428401SJakub Kicinski }
20235f428401SJakub Kicinski 
nfp_net_xdp_setup_hw(struct nfp_net * nn,struct netdev_bpf * bpf)20245f428401SJakub Kicinski static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
20255f428401SJakub Kicinski {
20265f428401SJakub Kicinski 	int err;
20275f428401SJakub Kicinski 
20285f428401SJakub Kicinski 	err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
20295f428401SJakub Kicinski 	if (err)
2030cafa92acSJakub Kicinski 		return err;
20316a8ef542SJakub Kicinski 
20325f428401SJakub Kicinski 	xdp_attachment_setup(&nn->xdp_hw, bpf);
2033ecd63a02SJakub Kicinski 	return 0;
2034ecd63a02SJakub Kicinski }
2035ecd63a02SJakub Kicinski 
nfp_net_xdp(struct net_device * netdev,struct netdev_bpf * xdp)2036f4e63525SJakub Kicinski static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
2037ecd63a02SJakub Kicinski {
2038ecd63a02SJakub Kicinski 	struct nfp_net *nn = netdev_priv(netdev);
2039ecd63a02SJakub Kicinski 
2040ecd63a02SJakub Kicinski 	switch (xdp->command) {
2041ecd63a02SJakub Kicinski 	case XDP_SETUP_PROG:
20425f428401SJakub Kicinski 		return nfp_net_xdp_setup_drv(nn, xdp);
2043cafa92acSJakub Kicinski 	case XDP_SETUP_PROG_HW:
20445f428401SJakub Kicinski 		return nfp_net_xdp_setup_hw(nn, xdp);
20456402528bSNiklas Söderlund 	case XDP_SETUP_XSK_POOL:
20466402528bSNiklas Söderlund 		return nfp_net_xsk_setup_pool(netdev, xdp->xsk.pool,
20476402528bSNiklas Söderlund 					      xdp->xsk.queue_id);
2048ecd63a02SJakub Kicinski 	default:
2049af93d15aSJakub Kicinski 		return nfp_app_bpf(nn->app, nn, xdp);
2050ecd63a02SJakub Kicinski 	}
2051ecd63a02SJakub Kicinski }
2052ecd63a02SJakub Kicinski 
nfp_net_set_mac_address(struct net_device * netdev,void * addr)20539d372759SPablo Cascón static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
20549d372759SPablo Cascón {
20559d372759SPablo Cascón 	struct nfp_net *nn = netdev_priv(netdev);
20569d372759SPablo Cascón 	struct sockaddr *saddr = addr;
20579d372759SPablo Cascón 	int err;
20589d372759SPablo Cascón 
20599d372759SPablo Cascón 	err = eth_prepare_mac_addr_change(netdev, addr);
20609d372759SPablo Cascón 	if (err)
20619d372759SPablo Cascón 		return err;
20629d372759SPablo Cascón 
20639d372759SPablo Cascón 	nfp_net_write_mac_addr(nn, saddr->sa_data);
20649d372759SPablo Cascón 
20659d372759SPablo Cascón 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
20669d372759SPablo Cascón 	if (err)
20679d372759SPablo Cascón 		return err;
20689d372759SPablo Cascón 
20699d372759SPablo Cascón 	eth_commit_mac_addr_change(netdev, addr);
20709d372759SPablo Cascón 
20719d372759SPablo Cascón 	return 0;
20729d372759SPablo Cascón }
20739d372759SPablo Cascón 
nfp_net_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)2074be801411SYinjun Zhang static int nfp_net_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
2075be801411SYinjun Zhang 				  struct net_device *dev, u32 filter_mask,
2076be801411SYinjun Zhang 				  int nlflags)
2077be801411SYinjun Zhang {
2078be801411SYinjun Zhang 	struct nfp_net *nn = netdev_priv(dev);
2079be801411SYinjun Zhang 	u16 mode;
2080be801411SYinjun Zhang 
2081be801411SYinjun Zhang 	if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
2082be801411SYinjun Zhang 		return -EOPNOTSUPP;
2083be801411SYinjun Zhang 
2084be801411SYinjun Zhang 	mode = (nn->dp.ctrl & NFP_NET_CFG_CTRL_VEPA) ?
2085be801411SYinjun Zhang 	       BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
2086be801411SYinjun Zhang 
2087be801411SYinjun Zhang 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0,
2088be801411SYinjun Zhang 				       nlflags, filter_mask, NULL);
2089be801411SYinjun Zhang }
2090be801411SYinjun Zhang 
nfp_net_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)2091be801411SYinjun Zhang static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2092be801411SYinjun Zhang 				  u16 flags, struct netlink_ext_ack *extack)
2093be801411SYinjun Zhang {
2094be801411SYinjun Zhang 	struct nfp_net *nn = netdev_priv(dev);
2095be801411SYinjun Zhang 	struct nlattr *attr, *br_spec;
2096be801411SYinjun Zhang 	int rem, err;
2097be801411SYinjun Zhang 	u32 new_ctrl;
2098be801411SYinjun Zhang 	u16 mode;
2099be801411SYinjun Zhang 
2100be801411SYinjun Zhang 	if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
2101be801411SYinjun Zhang 		return -EOPNOTSUPP;
2102be801411SYinjun Zhang 
2103be801411SYinjun Zhang 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
2104be801411SYinjun Zhang 	if (!br_spec)
2105be801411SYinjun Zhang 		return -EINVAL;
2106be801411SYinjun Zhang 
2107be801411SYinjun Zhang 	nla_for_each_nested(attr, br_spec, rem) {
2108be801411SYinjun Zhang 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
2109be801411SYinjun Zhang 			continue;
2110be801411SYinjun Zhang 
2111be801411SYinjun Zhang 		new_ctrl = nn->dp.ctrl;
2112be801411SYinjun Zhang 		mode = nla_get_u16(attr);
2113be801411SYinjun Zhang 		if (mode == BRIDGE_MODE_VEPA)
2114be801411SYinjun Zhang 			new_ctrl |= NFP_NET_CFG_CTRL_VEPA;
2115be801411SYinjun Zhang 		else if (mode == BRIDGE_MODE_VEB)
2116be801411SYinjun Zhang 			new_ctrl &= ~NFP_NET_CFG_CTRL_VEPA;
2117be801411SYinjun Zhang 		else
2118be801411SYinjun Zhang 			return -EOPNOTSUPP;
2119be801411SYinjun Zhang 
2120be801411SYinjun Zhang 		if (new_ctrl == nn->dp.ctrl)
2121be801411SYinjun Zhang 			return 0;
2122be801411SYinjun Zhang 
2123be801411SYinjun Zhang 		nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2124be801411SYinjun Zhang 		err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
2125be801411SYinjun Zhang 		if (!err)
2126be801411SYinjun Zhang 			nn->dp.ctrl = new_ctrl;
2127be801411SYinjun Zhang 
2128be801411SYinjun Zhang 		return err;
2129be801411SYinjun Zhang 	}
2130be801411SYinjun Zhang 
2131be801411SYinjun Zhang 	return -EINVAL;
2132be801411SYinjun Zhang }
2133be801411SYinjun Zhang 
2134d9e3c299SJakub Kicinski const struct net_device_ops nfp_nfd3_netdev_ops = {
21354612bebfSJakub Kicinski 	.ndo_init		= nfp_app_ndo_init,
21364612bebfSJakub Kicinski 	.ndo_uninit		= nfp_app_ndo_uninit,
21374c352362SJakub Kicinski 	.ndo_open		= nfp_net_netdev_open,
21384c352362SJakub Kicinski 	.ndo_stop		= nfp_net_netdev_close,
21396fd86efaSJakub Kicinski 	.ndo_start_xmit		= nfp_net_tx,
21404c352362SJakub Kicinski 	.ndo_get_stats64	= nfp_net_stat64,
2141b64052fcSPablo Cascón 	.ndo_vlan_rx_add_vid	= nfp_net_vlan_rx_add_vid,
2142b64052fcSPablo Cascón 	.ndo_vlan_rx_kill_vid	= nfp_net_vlan_rx_kill_vid,
214325528d90SPablo Cascón 	.ndo_set_vf_mac         = nfp_app_set_vf_mac,
214425528d90SPablo Cascón 	.ndo_set_vf_vlan        = nfp_app_set_vf_vlan,
2145e0d0e1fdSBin Chen 	.ndo_set_vf_rate	= nfp_app_set_vf_rate,
214625528d90SPablo Cascón 	.ndo_set_vf_spoofchk    = nfp_app_set_vf_spoofchk,
21474ef6cbe8SPablo Cascón 	.ndo_set_vf_trust	= nfp_app_set_vf_trust,
214825528d90SPablo Cascón 	.ndo_get_vf_config	= nfp_app_get_vf_config,
214925528d90SPablo Cascón 	.ndo_set_vf_link_state  = nfp_app_set_vf_link_state,
21508a276873SPieter Jansen van Vuuren 	.ndo_setup_tc		= nfp_port_setup_tc,
21514c352362SJakub Kicinski 	.ndo_tx_timeout		= nfp_net_tx_timeout,
21524c352362SJakub Kicinski 	.ndo_set_rx_mode	= nfp_net_set_rx_mode,
21534c352362SJakub Kicinski 	.ndo_change_mtu		= nfp_net_change_mtu,
21549d372759SPablo Cascón 	.ndo_set_mac_address	= nfp_net_set_mac_address,
21554c352362SJakub Kicinski 	.ndo_set_features	= nfp_net_set_features,
215667d2656bSDiana Wang 	.ndo_fix_features	= nfp_net_fix_features,
21574c352362SJakub Kicinski 	.ndo_features_check	= nfp_net_features_check,
215851c1df83SJakub Kicinski 	.ndo_get_phys_port_name	= nfp_net_get_phys_port_name,
2159f4e63525SJakub Kicinski 	.ndo_bpf		= nfp_net_xdp,
21606402528bSNiklas Söderlund 	.ndo_xsk_wakeup		= nfp_net_xsk_wakeup,
2161be801411SYinjun Zhang 	.ndo_bridge_getlink     = nfp_net_bridge_getlink,
2162be801411SYinjun Zhang 	.ndo_bridge_setlink     = nfp_net_bridge_setlink,
21634c352362SJakub Kicinski };
21644c352362SJakub Kicinski 
2165c10d12e3SJakub Kicinski const struct net_device_ops nfp_nfdk_netdev_ops = {
2166c10d12e3SJakub Kicinski 	.ndo_init		= nfp_app_ndo_init,
2167c10d12e3SJakub Kicinski 	.ndo_uninit		= nfp_app_ndo_uninit,
2168c10d12e3SJakub Kicinski 	.ndo_open		= nfp_net_netdev_open,
2169c10d12e3SJakub Kicinski 	.ndo_stop		= nfp_net_netdev_close,
2170c10d12e3SJakub Kicinski 	.ndo_start_xmit		= nfp_net_tx,
2171c10d12e3SJakub Kicinski 	.ndo_get_stats64	= nfp_net_stat64,
2172c10d12e3SJakub Kicinski 	.ndo_vlan_rx_add_vid	= nfp_net_vlan_rx_add_vid,
2173c10d12e3SJakub Kicinski 	.ndo_vlan_rx_kill_vid	= nfp_net_vlan_rx_kill_vid,
2174c10d12e3SJakub Kicinski 	.ndo_set_vf_mac         = nfp_app_set_vf_mac,
2175c10d12e3SJakub Kicinski 	.ndo_set_vf_vlan        = nfp_app_set_vf_vlan,
2176c7b1267bSBin Chen 	.ndo_set_vf_rate	= nfp_app_set_vf_rate,
2177c10d12e3SJakub Kicinski 	.ndo_set_vf_spoofchk    = nfp_app_set_vf_spoofchk,
2178c10d12e3SJakub Kicinski 	.ndo_set_vf_trust	= nfp_app_set_vf_trust,
2179c10d12e3SJakub Kicinski 	.ndo_get_vf_config	= nfp_app_get_vf_config,
2180c10d12e3SJakub Kicinski 	.ndo_set_vf_link_state  = nfp_app_set_vf_link_state,
2181c10d12e3SJakub Kicinski 	.ndo_setup_tc		= nfp_port_setup_tc,
2182c10d12e3SJakub Kicinski 	.ndo_tx_timeout		= nfp_net_tx_timeout,
2183c10d12e3SJakub Kicinski 	.ndo_set_rx_mode	= nfp_net_set_rx_mode,
2184c10d12e3SJakub Kicinski 	.ndo_change_mtu		= nfp_net_change_mtu,
2185c10d12e3SJakub Kicinski 	.ndo_set_mac_address	= nfp_net_set_mac_address,
2186c10d12e3SJakub Kicinski 	.ndo_set_features	= nfp_net_set_features,
218767d2656bSDiana Wang 	.ndo_fix_features	= nfp_net_fix_features,
2188c10d12e3SJakub Kicinski 	.ndo_features_check	= nfp_net_features_check,
2189c10d12e3SJakub Kicinski 	.ndo_get_phys_port_name	= nfp_net_get_phys_port_name,
2190c10d12e3SJakub Kicinski 	.ndo_bpf		= nfp_net_xdp,
2191be801411SYinjun Zhang 	.ndo_bridge_getlink     = nfp_net_bridge_getlink,
2192be801411SYinjun Zhang 	.ndo_bridge_setlink     = nfp_net_bridge_setlink,
2193c10d12e3SJakub Kicinski };
2194c10d12e3SJakub Kicinski 
nfp_udp_tunnel_sync(struct net_device * netdev,unsigned int table)2195641ca085SJakub Kicinski static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
2196641ca085SJakub Kicinski {
2197641ca085SJakub Kicinski 	struct nfp_net *nn = netdev_priv(netdev);
2198641ca085SJakub Kicinski 	int i;
2199641ca085SJakub Kicinski 
2200641ca085SJakub Kicinski 	BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
2201641ca085SJakub Kicinski 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
2202641ca085SJakub Kicinski 		struct udp_tunnel_info ti0, ti1;
2203641ca085SJakub Kicinski 
2204641ca085SJakub Kicinski 		udp_tunnel_nic_get_port(netdev, table, i, &ti0);
2205641ca085SJakub Kicinski 		udp_tunnel_nic_get_port(netdev, table, i + 1, &ti1);
2206641ca085SJakub Kicinski 
2207641ca085SJakub Kicinski 		nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(ti0.port),
2208641ca085SJakub Kicinski 			  be16_to_cpu(ti1.port) << 16 | be16_to_cpu(ti0.port));
2209641ca085SJakub Kicinski 	}
2210641ca085SJakub Kicinski 
2211641ca085SJakub Kicinski 	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
2212641ca085SJakub Kicinski }
2213641ca085SJakub Kicinski 
2214641ca085SJakub Kicinski static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
2215641ca085SJakub Kicinski 	.sync_table     = nfp_udp_tunnel_sync,
2216641ca085SJakub Kicinski 	.flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
2217641ca085SJakub Kicinski 			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
2218641ca085SJakub Kicinski 	.tables         = {
2219641ca085SJakub Kicinski 		{
2220641ca085SJakub Kicinski 			.n_entries      = NFP_NET_N_VXLAN_PORTS,
2221641ca085SJakub Kicinski 			.tunnel_types   = UDP_TUNNEL_TYPE_VXLAN,
2222641ca085SJakub Kicinski 		},
2223641ca085SJakub Kicinski 	},
2224641ca085SJakub Kicinski };
2225641ca085SJakub Kicinski 
22264c352362SJakub Kicinski /**
22274c352362SJakub Kicinski  * nfp_net_info() - Print general info about the NIC
22284c352362SJakub Kicinski  * @nn:      NFP Net device to reconfigure
22294c352362SJakub Kicinski  */
nfp_net_info(struct nfp_net * nn)22304c352362SJakub Kicinski void nfp_net_info(struct nfp_net *nn)
22314c352362SJakub Kicinski {
223234e244eaSYu Xiao 	nn_info(nn, "NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
223379c12a75SJakub Kicinski 		nn->dp.is_vf ? "VF " : "",
223479c12a75SJakub Kicinski 		nn->dp.num_tx_rings, nn->max_tx_rings,
223579c12a75SJakub Kicinski 		nn->dp.num_rx_rings, nn->max_rx_rings);
22364c352362SJakub Kicinski 	nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
2237d9e3c299SJakub Kicinski 		nn->fw_ver.extend, nn->fw_ver.class,
22384c352362SJakub Kicinski 		nn->fw_ver.major, nn->fw_ver.minor,
22394c352362SJakub Kicinski 		nn->max_mtu);
2240de624864SDiana Wang 	nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
22414c352362SJakub Kicinski 		nn->cap,
22424c352362SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_PROMISC  ? "PROMISC "  : "",
22434c352362SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_L2BC     ? "L2BCFILT " : "",
22444c352362SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_L2MC     ? "L2MCFILT " : "",
22454c352362SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_RXCSUM   ? "RXCSUM "   : "",
22464c352362SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_TXCSUM   ? "TXCSUM "   : "",
22474c352362SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_RXVLAN   ? "RXVLAN "   : "",
22484c352362SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_TXVLAN   ? "TXVLAN "   : "",
224967d2656bSDiana Wang 		nn->cap & NFP_NET_CFG_CTRL_RXQINQ   ? "RXQINQ "   : "",
225067d2656bSDiana Wang 		nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ? "RXVLANv2 "   : "",
2251eca250b1SDiana Wang 		nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2   ? "TXVLANv2 "   : "",
22524c352362SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_SCATTER  ? "SCATTER "  : "",
22534c352362SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_GATHER   ? "GATHER "   : "",
225428063be6SEdwin Peer 		nn->cap & NFP_NET_CFG_CTRL_LSO      ? "TSO1 "     : "",
225528063be6SEdwin Peer 		nn->cap & NFP_NET_CFG_CTRL_LSO2     ? "TSO2 "     : "",
2256611bdd49SEdwin Peer 		nn->cap & NFP_NET_CFG_CTRL_RSS      ? "RSS1 "     : "",
2257611bdd49SEdwin Peer 		nn->cap & NFP_NET_CFG_CTRL_RSS2     ? "RSS2 "     : "",
2258b64052fcSPablo Cascón 		nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
22594c352362SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
22604c352362SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_IRQMOD   ? "IRQMOD "   : "",
22610dcf7f50SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_TXRWB    ? "TXRWB "    : "",
2262be801411SYinjun Zhang 		nn->cap & NFP_NET_CFG_CTRL_VEPA     ? "VEPA "     : "",
22634c352362SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_VXLAN    ? "VXLAN "    : "",
22647533fdc0SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_NVGRE    ? "NVGRE "	  : "",
2265ddb98d94SJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
22669d372759SPablo Cascón 						      "RXCSUM_COMPLETE " : "",
2267bb45e51cSJakub Kicinski 		nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
2268de624864SDiana Wang 		nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER ? "MULTICAST_FILTER " : "",
2269bb45e51cSJakub Kicinski 		nfp_app_extra_cap(nn->app, nn));
22704c352362SJakub Kicinski }
22714c352362SJakub Kicinski 
22724c352362SJakub Kicinski /**
2273beba69caSJakub Kicinski  * nfp_net_alloc() - Allocate netdev and related structure
22744c352362SJakub Kicinski  * @pdev:         PCI device
22759423d24bSJakub Kicinski  * @dev_info:     NFP ASIC params
2276e38f5d11SJakub Kicinski  * @ctrl_bar:     PCI IOMEM with vNIC config memory
2277a7b1ad08SJakub Kicinski  * @needs_netdev: Whether to allocate a netdev for this vNIC
22784c352362SJakub Kicinski  * @max_tx_rings: Maximum number of TX rings supported by device
22794c352362SJakub Kicinski  * @max_rx_rings: Maximum number of RX rings supported by device
22804c352362SJakub Kicinski  *
22814c352362SJakub Kicinski  * This function allocates a netdev device and fills in the initial
2282a7b1ad08SJakub Kicinski  * part of the @struct nfp_net structure.  In case of control device
2283a7b1ad08SJakub Kicinski  * nfp_net structure is allocated without the netdev.
22844c352362SJakub Kicinski  *
22854c352362SJakub Kicinski  * Return: NFP Net device structure, or ERR_PTR on error.
22864c352362SJakub Kicinski  */
2287e38f5d11SJakub Kicinski struct nfp_net *
nfp_net_alloc(struct pci_dev * pdev,const struct nfp_dev_info * dev_info,void __iomem * ctrl_bar,bool needs_netdev,unsigned int max_tx_rings,unsigned int max_rx_rings)22889423d24bSJakub Kicinski nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
22899423d24bSJakub Kicinski 	      void __iomem *ctrl_bar, bool needs_netdev,
2290e38f5d11SJakub Kicinski 	      unsigned int max_tx_rings, unsigned int max_rx_rings)
22914c352362SJakub Kicinski {
22925f30671dSYinjun Zhang 	u64 dma_mask = dma_get_mask(&pdev->dev);
22934c352362SJakub Kicinski 	struct nfp_net *nn;
229483ec8857SJakub Kicinski 	int err;
22954c352362SJakub Kicinski 
2296a7b1ad08SJakub Kicinski 	if (needs_netdev) {
2297a7b1ad08SJakub Kicinski 		struct net_device *netdev;
2298a7b1ad08SJakub Kicinski 
22994c352362SJakub Kicinski 		netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
23004c352362SJakub Kicinski 					    max_tx_rings, max_rx_rings);
23014c352362SJakub Kicinski 		if (!netdev)
23024c352362SJakub Kicinski 			return ERR_PTR(-ENOMEM);
23034c352362SJakub Kicinski 
23044c352362SJakub Kicinski 		SET_NETDEV_DEV(netdev, &pdev->dev);
23054c352362SJakub Kicinski 		nn = netdev_priv(netdev);
230679c12a75SJakub Kicinski 		nn->dp.netdev = netdev;
2307a7b1ad08SJakub Kicinski 	} else {
2308a7b1ad08SJakub Kicinski 		nn = vzalloc(sizeof(*nn));
2309a7b1ad08SJakub Kicinski 		if (!nn)
2310a7b1ad08SJakub Kicinski 			return ERR_PTR(-ENOMEM);
2311a7b1ad08SJakub Kicinski 	}
2312a7b1ad08SJakub Kicinski 
231379c12a75SJakub Kicinski 	nn->dp.dev = &pdev->dev;
2314e38f5d11SJakub Kicinski 	nn->dp.ctrl_bar = ctrl_bar;
23159423d24bSJakub Kicinski 	nn->dev_info = dev_info;
23164c352362SJakub Kicinski 	nn->pdev = pdev;
2317d9e3c299SJakub Kicinski 	nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
2318d9e3c299SJakub Kicinski 
2319d9e3c299SJakub Kicinski 	switch (FIELD_GET(NFP_NET_CFG_VERSION_DP_MASK, nn->fw_ver.extend)) {
2320d9e3c299SJakub Kicinski 	case NFP_NET_CFG_VERSION_DP_NFD3:
23216fd86efaSJakub Kicinski 		nn->dp.ops = &nfp_nfd3_ops;
2322d9e3c299SJakub Kicinski 		break;
2323c10d12e3SJakub Kicinski 	case NFP_NET_CFG_VERSION_DP_NFDK:
2324c10d12e3SJakub Kicinski 		if (nn->fw_ver.major < 5) {
2325c10d12e3SJakub Kicinski 			dev_err(&pdev->dev,
2326c10d12e3SJakub Kicinski 				"NFDK must use ABI 5 or newer, found: %d\n",
2327c10d12e3SJakub Kicinski 				nn->fw_ver.major);
2328c10d12e3SJakub Kicinski 			err = -EINVAL;
2329c10d12e3SJakub Kicinski 			goto err_free_nn;
2330c10d12e3SJakub Kicinski 		}
2331c10d12e3SJakub Kicinski 		nn->dp.ops = &nfp_nfdk_ops;
2332c10d12e3SJakub Kicinski 		break;
2333d9e3c299SJakub Kicinski 	default:
2334d9e3c299SJakub Kicinski 		err = -EINVAL;
2335d9e3c299SJakub Kicinski 		goto err_free_nn;
2336d9e3c299SJakub Kicinski 	}
23374c352362SJakub Kicinski 
23385f30671dSYinjun Zhang 	if ((dma_mask & nn->dp.ops->dma_mask) != dma_mask) {
23395f30671dSYinjun Zhang 		dev_err(&pdev->dev,
23405f30671dSYinjun Zhang 			"DMA mask of loaded firmware: %llx, required DMA mask: %llx\n",
23415f30671dSYinjun Zhang 			nn->dp.ops->dma_mask, dma_mask);
23425f30671dSYinjun Zhang 		err = -EINVAL;
23435f30671dSYinjun Zhang 		goto err_free_nn;
23445f30671dSYinjun Zhang 	}
23455f30671dSYinjun Zhang 
23464c352362SJakub Kicinski 	nn->max_tx_rings = max_tx_rings;
23474c352362SJakub Kicinski 	nn->max_rx_rings = max_rx_rings;
23484c352362SJakub Kicinski 
234979c12a75SJakub Kicinski 	nn->dp.num_tx_rings = min_t(unsigned int,
235079c12a75SJakub Kicinski 				    max_tx_rings, num_online_cpus());
235179c12a75SJakub Kicinski 	nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
2352cbeaf7aaSJakub Kicinski 				 netif_get_num_default_rss_queues());
23534c352362SJakub Kicinski 
235479c12a75SJakub Kicinski 	nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
235579c12a75SJakub Kicinski 	nn->dp.num_r_vecs = min_t(unsigned int,
235679c12a75SJakub Kicinski 				  nn->dp.num_r_vecs, num_online_cpus());
23576402528bSNiklas Söderlund 	nn->max_r_vecs = nn->dp.num_r_vecs;
23586402528bSNiklas Söderlund 
23596402528bSNiklas Söderlund 	nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
23606402528bSNiklas Söderlund 				   GFP_KERNEL);
23616402528bSNiklas Söderlund 	if (!nn->dp.xsk_pools) {
23626402528bSNiklas Söderlund 		err = -ENOMEM;
23636402528bSNiklas Söderlund 		goto err_free_nn;
23646402528bSNiklas Söderlund 	}
23654b27a1ebSJakub Kicinski 
236679c12a75SJakub Kicinski 	nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
236779c12a75SJakub Kicinski 	nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
23684c352362SJakub Kicinski 
23693ed77bf7SJakub Kicinski 	sema_init(&nn->bar_lock, 1);
2370dd5b2498SJakub Kicinski 
23714c352362SJakub Kicinski 	spin_lock_init(&nn->reconfig_lock);
23724c352362SJakub Kicinski 	spin_lock_init(&nn->link_status_lock);
23734c352362SJakub Kicinski 
23743248f77fSKees Cook 	timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
23753d780b92SJakub Kicinski 
237683ec8857SJakub Kicinski 	err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
237783ec8857SJakub Kicinski 				     &nn->tlv_caps);
237883ec8857SJakub Kicinski 	if (err)
237983ec8857SJakub Kicinski 		goto err_free_nn;
238083ec8857SJakub Kicinski 
2381e2c7114aSJakub Kicinski 	err = nfp_ccm_mbox_alloc(nn);
2382e2c7114aSJakub Kicinski 	if (err)
2383e2c7114aSJakub Kicinski 		goto err_free_nn;
2384e2c7114aSJakub Kicinski 
23854c352362SJakub Kicinski 	return nn;
238683ec8857SJakub Kicinski 
238783ec8857SJakub Kicinski err_free_nn:
238883ec8857SJakub Kicinski 	if (nn->dp.netdev)
238983ec8857SJakub Kicinski 		free_netdev(nn->dp.netdev);
239083ec8857SJakub Kicinski 	else
239183ec8857SJakub Kicinski 		vfree(nn);
239283ec8857SJakub Kicinski 	return ERR_PTR(err);
23934c352362SJakub Kicinski }
23944c352362SJakub Kicinski 
23954c352362SJakub Kicinski /**
2396beba69caSJakub Kicinski  * nfp_net_free() - Undo what @nfp_net_alloc() did
23974c352362SJakub Kicinski  * @nn:      NFP Net device to reconfigure
23984c352362SJakub Kicinski  */
nfp_net_free(struct nfp_net * nn)2399beba69caSJakub Kicinski void nfp_net_free(struct nfp_net *nn)
24004c352362SJakub Kicinski {
24019ad716b9SJakub Kicinski 	WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
2402e2c7114aSJakub Kicinski 	nfp_ccm_mbox_free(nn);
2403dd5b2498SJakub Kicinski 
24046402528bSNiklas Söderlund 	kfree(nn->dp.xsk_pools);
2405a7b1ad08SJakub Kicinski 	if (nn->dp.netdev)
240679c12a75SJakub Kicinski 		free_netdev(nn->dp.netdev);
2407a7b1ad08SJakub Kicinski 	else
2408a7b1ad08SJakub Kicinski 		vfree(nn);
24094c352362SJakub Kicinski }
24104c352362SJakub Kicinski 
24114c352362SJakub Kicinski /**
24129ff304bfSJakub Kicinski  * nfp_net_rss_key_sz() - Get current size of the RSS key
24139ff304bfSJakub Kicinski  * @nn:		NFP Net device instance
24149ff304bfSJakub Kicinski  *
24159ff304bfSJakub Kicinski  * Return: size of the RSS key for currently selected hash function.
24169ff304bfSJakub Kicinski  */
nfp_net_rss_key_sz(struct nfp_net * nn)24179ff304bfSJakub Kicinski unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
24189ff304bfSJakub Kicinski {
24199ff304bfSJakub Kicinski 	switch (nn->rss_hfunc) {
24209ff304bfSJakub Kicinski 	case ETH_RSS_HASH_TOP:
24219ff304bfSJakub Kicinski 		return NFP_NET_CFG_RSS_KEY_SZ;
24229ff304bfSJakub Kicinski 	case ETH_RSS_HASH_XOR:
24239ff304bfSJakub Kicinski 		return 0;
24249ff304bfSJakub Kicinski 	case ETH_RSS_HASH_CRC32:
24259ff304bfSJakub Kicinski 		return 4;
24269ff304bfSJakub Kicinski 	}
24279ff304bfSJakub Kicinski 
24289ff304bfSJakub Kicinski 	nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
24299ff304bfSJakub Kicinski 	return 0;
24309ff304bfSJakub Kicinski }
24319ff304bfSJakub Kicinski 
24329ff304bfSJakub Kicinski /**
24334c352362SJakub Kicinski  * nfp_net_rss_init() - Set the initial RSS parameters
24344c352362SJakub Kicinski  * @nn:	     NFP Net device to reconfigure
24354c352362SJakub Kicinski  */
nfp_net_rss_init(struct nfp_net * nn)24364c352362SJakub Kicinski static void nfp_net_rss_init(struct nfp_net *nn)
24374c352362SJakub Kicinski {
24389ff304bfSJakub Kicinski 	unsigned long func_bit, rss_cap_hfunc;
24399ff304bfSJakub Kicinski 	u32 reg;
24409ff304bfSJakub Kicinski 
24419ff304bfSJakub Kicinski 	/* Read the RSS function capability and select first supported func */
24429ff304bfSJakub Kicinski 	reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
24439ff304bfSJakub Kicinski 	rss_cap_hfunc =	FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
24449ff304bfSJakub Kicinski 	if (!rss_cap_hfunc)
24459ff304bfSJakub Kicinski 		rss_cap_hfunc =	FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
24469ff304bfSJakub Kicinski 					  NFP_NET_CFG_RSS_TOEPLITZ);
24479ff304bfSJakub Kicinski 
24489ff304bfSJakub Kicinski 	func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
24499ff304bfSJakub Kicinski 	if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
245079c12a75SJakub Kicinski 		dev_warn(nn->dp.dev,
24519ff304bfSJakub Kicinski 			 "Bad RSS config, defaulting to Toeplitz hash\n");
24529ff304bfSJakub Kicinski 		func_bit = ETH_RSS_HASH_TOP_BIT;
24539ff304bfSJakub Kicinski 	}
24549ff304bfSJakub Kicinski 	nn->rss_hfunc = 1 << func_bit;
24559ff304bfSJakub Kicinski 
24569ff304bfSJakub Kicinski 	netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
24574c352362SJakub Kicinski 
24581e9e10d0SJakub Kicinski 	nfp_net_rss_init_itbl(nn);
24594c352362SJakub Kicinski 
24604c352362SJakub Kicinski 	/* Enable IPv4/IPv6 TCP by default */
24614c352362SJakub Kicinski 	nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
24624c352362SJakub Kicinski 		      NFP_NET_CFG_RSS_IPV6_TCP |
246357910a47SJaco Coetzee 		      NFP_NET_CFG_RSS_IPV4_UDP |
246457910a47SJaco Coetzee 		      NFP_NET_CFG_RSS_IPV6_UDP |
24659ff304bfSJakub Kicinski 		      FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
24664c352362SJakub Kicinski 		      NFP_NET_CFG_RSS_MASK;
24674c352362SJakub Kicinski }
24684c352362SJakub Kicinski 
24694c352362SJakub Kicinski /**
24704c352362SJakub Kicinski  * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
24714c352362SJakub Kicinski  * @nn:	     NFP Net device to reconfigure
24724c352362SJakub Kicinski  */
nfp_net_irqmod_init(struct nfp_net * nn)24734c352362SJakub Kicinski static void nfp_net_irqmod_init(struct nfp_net *nn)
24744c352362SJakub Kicinski {
24754c352362SJakub Kicinski 	nn->rx_coalesce_usecs      = 50;
24764c352362SJakub Kicinski 	nn->rx_coalesce_max_frames = 64;
24774c352362SJakub Kicinski 	nn->tx_coalesce_usecs      = 50;
24784c352362SJakub Kicinski 	nn->tx_coalesce_max_frames = 64;
24799d32e4e7SYinjun Zhang 
24809d32e4e7SYinjun Zhang 	nn->rx_coalesce_adapt_on   = true;
24819d32e4e7SYinjun Zhang 	nn->tx_coalesce_adapt_on   = true;
24824c352362SJakub Kicinski }
24834c352362SJakub Kicinski 
nfp_net_netdev_init(struct nfp_net * nn)2484a7b1ad08SJakub Kicinski static void nfp_net_netdev_init(struct nfp_net *nn)
24854c352362SJakub Kicinski {
2486beba69caSJakub Kicinski 	struct net_device *netdev = nn->dp.netdev;
2487611bdd49SEdwin Peer 
24889d372759SPablo Cascón 	nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
24894c352362SJakub Kicinski 
2490a7b1ad08SJakub Kicinski 	netdev->mtu = nn->dp.mtu;
24914c352362SJakub Kicinski 
24924c352362SJakub Kicinski 	/* Advertise/enable offloads based on capabilities
24934c352362SJakub Kicinski 	 *
24944c352362SJakub Kicinski 	 * Note: netdev->features show the currently enabled features
24954c352362SJakub Kicinski 	 * and netdev->hw_features advertises which features are
24964c352362SJakub Kicinski 	 * supported.  By default we enable most features.
24974c352362SJakub Kicinski 	 */
24989d372759SPablo Cascón 	if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
24999d372759SPablo Cascón 		netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
25009d372759SPablo Cascón 
25014c352362SJakub Kicinski 	netdev->hw_features = NETIF_F_HIGHDMA;
2502ddb98d94SJakub Kicinski 	if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
25034c352362SJakub Kicinski 		netdev->hw_features |= NETIF_F_RXCSUM;
2504ddb98d94SJakub Kicinski 		nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
25054c352362SJakub Kicinski 	}
25064c352362SJakub Kicinski 	if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
25074c352362SJakub Kicinski 		netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
250879c12a75SJakub Kicinski 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
25094c352362SJakub Kicinski 	}
25104c352362SJakub Kicinski 	if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
25114c352362SJakub Kicinski 		netdev->hw_features |= NETIF_F_SG;
251279c12a75SJakub Kicinski 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
25134c352362SJakub Kicinski 	}
251428063be6SEdwin Peer 	if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
251528063be6SEdwin Peer 	    nn->cap & NFP_NET_CFG_CTRL_LSO2) {
25164c352362SJakub Kicinski 		netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
251728063be6SEdwin Peer 		nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
251828063be6SEdwin Peer 					 NFP_NET_CFG_CTRL_LSO;
25194c352362SJakub Kicinski 	}
2520a7b1ad08SJakub Kicinski 	if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
25214c352362SJakub Kicinski 		netdev->hw_features |= NETIF_F_RXHASH;
2522859a497fSHuanhuan Wang 
2523859a497fSHuanhuan Wang #ifdef CONFIG_NFP_NET_IPSEC
2524859a497fSHuanhuan Wang 	if (nn->cap_w1 & NFP_NET_CFG_CTRL_IPSEC)
2525859a497fSHuanhuan Wang 		netdev->hw_features |= NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM;
2526859a497fSHuanhuan Wang #endif
2527859a497fSHuanhuan Wang 
25287848418eSJakub Kicinski 	if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
2529ae664d9dSFei Qin 		if (nn->cap & NFP_NET_CFG_CTRL_LSO) {
2530ae664d9dSFei Qin 			netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
2531ae664d9dSFei Qin 					       NETIF_F_GSO_UDP_TUNNEL_CSUM |
2532ae664d9dSFei Qin 					       NETIF_F_GSO_PARTIAL;
2533ae664d9dSFei Qin 			netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
2534ae664d9dSFei Qin 		}
2535641ca085SJakub Kicinski 		netdev->udp_tunnel_nic_info = &nfp_udp_tunnels;
25367848418eSJakub Kicinski 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
25374c352362SJakub Kicinski 	}
25387848418eSJakub Kicinski 	if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
25397848418eSJakub Kicinski 		if (nn->cap & NFP_NET_CFG_CTRL_LSO)
25407848418eSJakub Kicinski 			netdev->hw_features |= NETIF_F_GSO_GRE;
25417848418eSJakub Kicinski 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE;
25427848418eSJakub Kicinski 	}
25437848418eSJakub Kicinski 	if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
25447848418eSJakub Kicinski 		netdev->hw_enc_features = netdev->hw_features;
25454c352362SJakub Kicinski 
25464c352362SJakub Kicinski 	netdev->vlan_features = netdev->hw_features;
25474c352362SJakub Kicinski 
254867d2656bSDiana Wang 	if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN_ANY) {
25494c352362SJakub Kicinski 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
255067d2656bSDiana Wang 		nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
255167d2656bSDiana Wang 			       NFP_NET_CFG_CTRL_RXVLAN;
25524c352362SJakub Kicinski 	}
2553d80702ffSDiana Wang 	if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
255428063be6SEdwin Peer 		if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
255528063be6SEdwin Peer 			nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
255628063be6SEdwin Peer 		} else {
25574c352362SJakub Kicinski 			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
2558d80702ffSDiana Wang 			nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
2559d80702ffSDiana Wang 				       NFP_NET_CFG_CTRL_TXVLAN;
25604c352362SJakub Kicinski 		}
256128063be6SEdwin Peer 	}
2562b64052fcSPablo Cascón 	if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
2563b64052fcSPablo Cascón 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2564b64052fcSPablo Cascón 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
2565b64052fcSPablo Cascón 	}
256667d2656bSDiana Wang 	if (nn->cap & NFP_NET_CFG_CTRL_RXQINQ) {
256767d2656bSDiana Wang 		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
256867d2656bSDiana Wang 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
256967d2656bSDiana Wang 	}
25704c352362SJakub Kicinski 
25714c352362SJakub Kicinski 	netdev->features = netdev->hw_features;
25724c352362SJakub Kicinski 
25730b9de4caSJakub Kicinski 	if (nfp_app_has_tc(nn->app) && nn->port)
25747533fdc0SJakub Kicinski 		netdev->hw_features |= NETIF_F_HW_TC;
25757533fdc0SJakub Kicinski 
25767de8b691SSimon Horman 	/* C-Tag strip and S-Tag strip can't be supported simultaneously,
257767d2656bSDiana Wang 	 * so enable C-Tag strip and disable S-Tag strip by default.
257867d2656bSDiana Wang 	 */
25797de8b691SSimon Horman 	netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
25807de8b691SSimon Horman 	nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
25814c352362SJakub Kicinski 
258266c0e13aSMarek Majtyka 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
258366c0e13aSMarek Majtyka 	if (nn->app && nn->app->type->id == NFP_APP_BPF_NIC)
258466c0e13aSMarek Majtyka 		netdev->xdp_features |= NETDEV_XDP_ACT_HW_OFFLOAD;
258566c0e13aSMarek Majtyka 
2586a7b1ad08SJakub Kicinski 	/* Finalise the netdev setup */
2587d9e3c299SJakub Kicinski 	switch (nn->dp.ops->version) {
2588d9e3c299SJakub Kicinski 	case NFP_NFD_VER_NFD3:
2589d9e3c299SJakub Kicinski 		netdev->netdev_ops = &nfp_nfd3_netdev_ops;
259066c0e13aSMarek Majtyka 		netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
2591*d194f1c7SJames Hershaw 		netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
2592d9e3c299SJakub Kicinski 		break;
2593c10d12e3SJakub Kicinski 	case NFP_NFD_VER_NFDK:
2594c10d12e3SJakub Kicinski 		netdev->netdev_ops = &nfp_nfdk_netdev_ops;
2595c10d12e3SJakub Kicinski 		break;
2596d9e3c299SJakub Kicinski 	}
2597d9e3c299SJakub Kicinski 
2598a7b1ad08SJakub Kicinski 	netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
2599a7b1ad08SJakub Kicinski 
2600a7b1ad08SJakub Kicinski 	/* MTU range: 68 - hw-specific max */
2601a7b1ad08SJakub Kicinski 	netdev->min_mtu = ETH_MIN_MTU;
2602a7b1ad08SJakub Kicinski 	netdev->max_mtu = nn->max_mtu;
2603a7b1ad08SJakub Kicinski 
2604ee8b7a11SJakub Kicinski 	netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
26050d592e52SJakub Kicinski 
2606a7b1ad08SJakub Kicinski 	netif_carrier_off(netdev);
2607a7b1ad08SJakub Kicinski 
2608a7b1ad08SJakub Kicinski 	nfp_net_set_ethtool_ops(netdev);
2609a7b1ad08SJakub Kicinski }
2610a7b1ad08SJakub Kicinski 
nfp_net_read_caps(struct nfp_net * nn)2611545bfa7aSJakub Kicinski static int nfp_net_read_caps(struct nfp_net *nn)
2612a7b1ad08SJakub Kicinski {
2613a7b1ad08SJakub Kicinski 	/* Get some of the read-only fields from the BAR */
2614a7b1ad08SJakub Kicinski 	nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
26151b0c84a3SHuanhuan Wang 	nn->cap_w1 = nn_readl(nn, NFP_NET_CFG_CAP_WORD1);
2616a7b1ad08SJakub Kicinski 	nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
2617a7b1ad08SJakub Kicinski 
261864a919a9SJakub Kicinski 	/* ABI 4.x and ctrl vNIC always use chained metadata, in other cases
261964a919a9SJakub Kicinski 	 * we allow use of non-chained metadata if RSS(v1) is the only
262064a919a9SJakub Kicinski 	 * advertised capability requiring metadata.
262164a919a9SJakub Kicinski 	 */
2622a7b1ad08SJakub Kicinski 	nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
262377ece8d5SJakub Kicinski 					 !nn->dp.netdev ||
262464a919a9SJakub Kicinski 					 !(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
2625a7b1ad08SJakub Kicinski 					 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
262664a919a9SJakub Kicinski 	/* RSS(v1) uses non-chained metadata format, except in ABI 4.x where
262764a919a9SJakub Kicinski 	 * it has the same meaning as RSSv2.
262864a919a9SJakub Kicinski 	 */
2629a7b1ad08SJakub Kicinski 	if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
2630a7b1ad08SJakub Kicinski 		nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
2631a7b1ad08SJakub Kicinski 
2632a7b1ad08SJakub Kicinski 	/* Determine RX packet/metadata boundary offset */
2633a7b1ad08SJakub Kicinski 	if (nn->fw_ver.major >= 2) {
2634a7b1ad08SJakub Kicinski 		u32 reg;
2635a7b1ad08SJakub Kicinski 
2636a7b1ad08SJakub Kicinski 		reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
2637a7b1ad08SJakub Kicinski 		if (reg > NFP_NET_MAX_PREPEND) {
2638a7b1ad08SJakub Kicinski 			nn_err(nn, "Invalid rx offset: %d\n", reg);
2639a7b1ad08SJakub Kicinski 			return -EINVAL;
2640a7b1ad08SJakub Kicinski 		}
2641a7b1ad08SJakub Kicinski 		nn->dp.rx_offset = reg;
2642a7b1ad08SJakub Kicinski 	} else {
2643a7b1ad08SJakub Kicinski 		nn->dp.rx_offset = NFP_NET_RX_OFFSET;
2644a7b1ad08SJakub Kicinski 	}
2645a7b1ad08SJakub Kicinski 
2646b94b6a13SJakub Kicinski 	/* Mask out NFD-version-specific features */
2647b94b6a13SJakub Kicinski 	nn->cap &= nn->dp.ops->cap_mask;
2648b94b6a13SJakub Kicinski 
264978a0a65fSJakub Kicinski 	/* For control vNICs mask out the capabilities app doesn't want. */
265078a0a65fSJakub Kicinski 	if (!nn->dp.netdev)
265178a0a65fSJakub Kicinski 		nn->cap &= nn->app->type->ctrl_cap_mask;
265278a0a65fSJakub Kicinski 
2653545bfa7aSJakub Kicinski 	return 0;
2654545bfa7aSJakub Kicinski }
2655545bfa7aSJakub Kicinski 
2656545bfa7aSJakub Kicinski /**
2657545bfa7aSJakub Kicinski  * nfp_net_init() - Initialise/finalise the nfp_net structure
2658545bfa7aSJakub Kicinski  * @nn:		NFP Net device structure
2659545bfa7aSJakub Kicinski  *
2660545bfa7aSJakub Kicinski  * Return: 0 on success or negative errno on error.
2661545bfa7aSJakub Kicinski  */
nfp_net_init(struct nfp_net * nn)2662545bfa7aSJakub Kicinski int nfp_net_init(struct nfp_net *nn)
2663545bfa7aSJakub Kicinski {
2664545bfa7aSJakub Kicinski 	int err;
2665545bfa7aSJakub Kicinski 
2666545bfa7aSJakub Kicinski 	nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
2667545bfa7aSJakub Kicinski 
2668545bfa7aSJakub Kicinski 	err = nfp_net_read_caps(nn);
2669545bfa7aSJakub Kicinski 	if (err)
2670545bfa7aSJakub Kicinski 		return err;
2671545bfa7aSJakub Kicinski 
2672a7b1ad08SJakub Kicinski 	/* Set default MTU and Freelist buffer size */
26739bbdd41bSJakub Kicinski 	if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
2674bc2796dbSJakub Kicinski 		nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu);
26759bbdd41bSJakub Kicinski 	} else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
26769bbdd41bSJakub Kicinski 		nn->dp.mtu = nn->max_mtu;
26779bbdd41bSJakub Kicinski 	} else {
2678a7b1ad08SJakub Kicinski 		nn->dp.mtu = NFP_NET_DEFAULT_MTU;
26799bbdd41bSJakub Kicinski 	}
2680a7b1ad08SJakub Kicinski 	nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
2681a7b1ad08SJakub Kicinski 
268279ca38e8SJakub Kicinski 	if (nfp_app_ctrl_uses_data_vnics(nn->app))
268379ca38e8SJakub Kicinski 		nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA;
268479ca38e8SJakub Kicinski 
2685a7b1ad08SJakub Kicinski 	if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
2686a7b1ad08SJakub Kicinski 		nfp_net_rss_init(nn);
2687a7b1ad08SJakub Kicinski 		nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
2688a7b1ad08SJakub Kicinski 					 NFP_NET_CFG_CTRL_RSS;
2689a7b1ad08SJakub Kicinski 	}
2690a7b1ad08SJakub Kicinski 
26914c352362SJakub Kicinski 	/* Allow L2 Broadcast and Multicast through by default, if supported */
26924c352362SJakub Kicinski 	if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
269379c12a75SJakub Kicinski 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
26944c352362SJakub Kicinski 
26954c352362SJakub Kicinski 	/* Allow IRQ moderation, if supported */
26964c352362SJakub Kicinski 	if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
26974c352362SJakub Kicinski 		nfp_net_irqmod_init(nn);
269879c12a75SJakub Kicinski 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
26994c352362SJakub Kicinski 	}
27004c352362SJakub Kicinski 
27010dcf7f50SJakub Kicinski 	/* Enable TX pointer writeback, if supported */
27020dcf7f50SJakub Kicinski 	if (nn->cap & NFP_NET_CFG_CTRL_TXRWB)
27030dcf7f50SJakub Kicinski 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXRWB;
27040dcf7f50SJakub Kicinski 
2705de624864SDiana Wang 	if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER)
2706de624864SDiana Wang 		nn->dp.ctrl_w1 |= NFP_NET_CFG_CTRL_MCAST_FILTER;
2707de624864SDiana Wang 
27084c352362SJakub Kicinski 	/* Stash the re-configuration queue away.  First odd queue in TX Bar */
27094c352362SJakub Kicinski 	nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
27104c352362SJakub Kicinski 
27114c352362SJakub Kicinski 	/* Make sure the FW knows the netdev is supposed to be disabled here */
27124c352362SJakub Kicinski 	nn_writel(nn, NFP_NET_CFG_CTRL, 0);
27134c352362SJakub Kicinski 	nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
27144c352362SJakub Kicinski 	nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2715de624864SDiana Wang 	nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, 0);
27164c352362SJakub Kicinski 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
27174c352362SJakub Kicinski 				   NFP_NET_CFG_UPDATE_GEN);
27184c352362SJakub Kicinski 	if (err)
27194c352362SJakub Kicinski 		return err;
27204c352362SJakub Kicinski 
2721232eeb1fSJakub Kicinski 	if (nn->dp.netdev) {
27220a72d833SJakub Kicinski 		nfp_net_netdev_init(nn);
27230a72d833SJakub Kicinski 
2724e2c7114aSJakub Kicinski 		err = nfp_ccm_mbox_init(nn);
2725232eeb1fSJakub Kicinski 		if (err)
2726232eeb1fSJakub Kicinski 			return err;
2727e2c7114aSJakub Kicinski 
2728e2c7114aSJakub Kicinski 		err = nfp_net_tls_init(nn);
2729e2c7114aSJakub Kicinski 		if (err)
2730e2c7114aSJakub Kicinski 			goto err_clean_mbox;
273157f273adSHuanhuan Wang 
273257f273adSHuanhuan Wang 		nfp_net_ipsec_init(nn);
2733232eeb1fSJakub Kicinski 	}
2734232eeb1fSJakub Kicinski 
2735beba69caSJakub Kicinski 	nfp_net_vecs_init(nn);
27364c352362SJakub Kicinski 
2737a7b1ad08SJakub Kicinski 	if (!nn->dp.netdev)
2738a7b1ad08SJakub Kicinski 		return 0;
2739e20aa071SYinjun Zhang 
274071f814cdSYinjun Zhang 	spin_lock_init(&nn->mbox_amsg.lock);
274171f814cdSYinjun Zhang 	INIT_LIST_HEAD(&nn->mbox_amsg.list);
274271f814cdSYinjun Zhang 	INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
2743e20aa071SYinjun Zhang 
2744a7b1ad08SJakub Kicinski 	return register_netdev(nn->dp.netdev);
2745e2c7114aSJakub Kicinski 
2746e2c7114aSJakub Kicinski err_clean_mbox:
2747e2c7114aSJakub Kicinski 	nfp_ccm_mbox_clean(nn);
2748e2c7114aSJakub Kicinski 	return err;
27494c352362SJakub Kicinski }
27504c352362SJakub Kicinski 
27514c352362SJakub Kicinski /**
2752beba69caSJakub Kicinski  * nfp_net_clean() - Undo what nfp_net_init() did.
2753beba69caSJakub Kicinski  * @nn:		NFP Net device structure
27544c352362SJakub Kicinski  */
nfp_net_clean(struct nfp_net * nn)2755beba69caSJakub Kicinski void nfp_net_clean(struct nfp_net *nn)
27564c352362SJakub Kicinski {
2757a7b1ad08SJakub Kicinski 	if (!nn->dp.netdev)
2758a7b1ad08SJakub Kicinski 		return;
2759a7b1ad08SJakub Kicinski 
27606f14f443SDavid S. Miller 	unregister_netdev(nn->dp.netdev);
276157f273adSHuanhuan Wang 	nfp_net_ipsec_clean(nn);
2762e2c7114aSJakub Kicinski 	nfp_ccm_mbox_clean(nn);
276371f814cdSYinjun Zhang 	flush_work(&nn->mbox_amsg.work);
27649ad716b9SJakub Kicinski 	nfp_net_reconfig_wait_posted(nn);
27654c352362SJakub Kicinski }
2766